From fa1a4c54c74c346b797c40f099c0599611c9af61 Mon Sep 17 00:00:00 2001 From: Waldemar Brodkorb Date: Sun, 30 Oct 2016 16:51:47 +0100 Subject: linux: update older kernels --- .../linux/patches/3.4.113/bsd-compatibility.patch | 2512 + target/linux/patches/3.4.113/defaults.patch | 22 + target/linux/patches/3.4.113/gemalto.patch | 11 + target/linux/patches/3.4.113/lemote-rfkill.patch | 21 + target/linux/patches/3.4.113/linux-gcc-check.patch | 18 + target/linux/patches/3.4.113/mips-error.patch | 254 + target/linux/patches/3.4.113/mkpiggy.patch | 28 + .../patches/3.4.113/module-alloc-size-check.patch | 21 + target/linux/patches/3.4.113/nds32.patch | 72132 +++++++++++++++++++ target/linux/patches/3.4.113/non-static.patch | 33 + target/linux/patches/3.4.113/relocs.patch | 3131 + target/linux/patches/3.4.113/sparc-aout.patch | 24 + target/linux/patches/3.4.113/sparc-include.patch | 11 + target/linux/patches/3.4.113/startup.patch | 34 + .../linux/patches/3.4.113/usb-defaults-off.patch | 32 + .../patches/3.4.113/vga-cons-default-off.patch | 12 + target/linux/patches/3.4.113/wlan-cf.patch | 11 + target/linux/patches/3.4.113/yaffs2.patch | 16550 +++++ target/linux/patches/3.4.113/zlib-inflate.patch | 12 + 19 files changed, 94869 insertions(+) create mode 100644 target/linux/patches/3.4.113/bsd-compatibility.patch create mode 100644 target/linux/patches/3.4.113/defaults.patch create mode 100644 target/linux/patches/3.4.113/gemalto.patch create mode 100644 target/linux/patches/3.4.113/lemote-rfkill.patch create mode 100644 target/linux/patches/3.4.113/linux-gcc-check.patch create mode 100644 target/linux/patches/3.4.113/mips-error.patch create mode 100644 target/linux/patches/3.4.113/mkpiggy.patch create mode 100644 target/linux/patches/3.4.113/module-alloc-size-check.patch create mode 100644 target/linux/patches/3.4.113/nds32.patch create mode 100644 target/linux/patches/3.4.113/non-static.patch create mode 100644 target/linux/patches/3.4.113/relocs.patch create mode 100644 target/linux/patches/3.4.113/sparc-aout.patch create mode 100644 target/linux/patches/3.4.113/sparc-include.patch create mode 100644 target/linux/patches/3.4.113/startup.patch create mode 100644 target/linux/patches/3.4.113/usb-defaults-off.patch create mode 100644 target/linux/patches/3.4.113/vga-cons-default-off.patch create mode 100644 target/linux/patches/3.4.113/wlan-cf.patch create mode 100644 target/linux/patches/3.4.113/yaffs2.patch create mode 100644 target/linux/patches/3.4.113/zlib-inflate.patch (limited to 'target/linux/patches/3.4.113') diff --git a/target/linux/patches/3.4.113/bsd-compatibility.patch b/target/linux/patches/3.4.113/bsd-compatibility.patch new file mode 100644 index 000000000..9e91a62de --- /dev/null +++ b/target/linux/patches/3.4.113/bsd-compatibility.patch @@ -0,0 +1,2512 @@ +diff -Nur linux-2.6.36.orig/scripts/Makefile.lib linux-2.6.36/scripts/Makefile.lib +--- linux-2.6.36.orig/scripts/Makefile.lib 2010-10-20 22:30:22.000000000 +0200 ++++ linux-2.6.36/scripts/Makefile.lib 2010-11-28 18:34:22.000000000 +0100 +@@ -216,7 +216,12 @@ + size_append = printf $(shell \ + dec_size=0; \ + for F in $1; do \ +- fsize=$$(stat -c "%s" $$F); \ ++ if stat -qs .>/dev/null 2>&1; then \ ++ statcmd='stat -f %z'; \ ++ else \ ++ statcmd='stat -c %s'; \ ++ fi; \ ++ fsize=$$($$statcmd $$F); \ + dec_size=$$(expr $$dec_size + $$fsize); \ + done; \ + printf "%08x\n" $$dec_size | \ +diff -Nur linux-2.6.36.orig/scripts/mod/mk_elfconfig.c linux-2.6.36/scripts/mod/mk_elfconfig.c +--- linux-2.6.36.orig/scripts/mod/mk_elfconfig.c 2010-10-20 22:30:22.000000000 +0200 ++++ linux-2.6.36/scripts/mod/mk_elfconfig.c 2010-11-28 18:33:24.000000000 +0100 +@@ -1,7 +1,18 @@ + #include + #include + #include +-#include ++ ++#define EI_NIDENT (16) ++#define ELFMAG "\177ELF" ++ ++#define SELFMAG 4 ++#define EI_CLASS 4 ++#define ELFCLASS32 1 /* 32-bit objects */ ++#define ELFCLASS64 2 /* 64-bit objects */ ++ ++#define EI_DATA 5 /* Data encoding byte index */ ++#define ELFDATA2LSB 1 /* 2's complement, little endian */ ++#define ELFDATA2MSB 2 /* 2's complement, big endian */ + + int + main(int argc, char **argv) +diff -Nur linux-2.6.36.orig/scripts/mod/modpost.h linux-2.6.36/scripts/mod/modpost.h +--- linux-2.6.36.orig/scripts/mod/modpost.h 2010-10-20 22:30:22.000000000 +0200 ++++ linux-2.6.36/scripts/mod/modpost.h 2010-11-28 18:33:24.000000000 +0100 +@@ -7,7 +7,2453 @@ + #include + #include + #include +-#include ++ ++ ++/* This file defines standard ELF types, structures, and macros. ++ Copyright (C) 1995-1999,2000,2001,2002,2003 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, write to the Free ++ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA ++ 02111-1307 USA. */ ++ ++#ifndef _ELF_H ++#define _ELF_H 1 ++ ++__BEGIN_DECLS ++ ++/* Standard ELF types. */ ++ ++#include ++ ++/* Type for a 16-bit quantity. */ ++typedef uint16_t Elf32_Half; ++typedef uint16_t Elf64_Half; ++ ++/* Types for signed and unsigned 32-bit quantities. */ ++typedef uint32_t Elf32_Word; ++typedef int32_t Elf32_Sword; ++typedef uint32_t Elf64_Word; ++typedef int32_t Elf64_Sword; ++ ++/* Types for signed and unsigned 64-bit quantities. */ ++typedef uint64_t Elf32_Xword; ++typedef int64_t Elf32_Sxword; ++typedef uint64_t Elf64_Xword; ++typedef int64_t Elf64_Sxword; ++ ++/* Type of addresses. */ ++typedef uint32_t Elf32_Addr; ++typedef uint64_t Elf64_Addr; ++ ++/* Type of file offsets. */ ++typedef uint32_t Elf32_Off; ++typedef uint64_t Elf64_Off; ++ ++/* Type for section indices, which are 16-bit quantities. */ ++typedef uint16_t Elf32_Section; ++typedef uint16_t Elf64_Section; ++ ++/* Type for version symbol information. */ ++typedef Elf32_Half Elf32_Versym; ++typedef Elf64_Half Elf64_Versym; ++ ++ ++/* The ELF file header. This appears at the start of every ELF file. */ ++ ++#define EI_NIDENT (16) ++ ++typedef struct ++{ ++ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */ ++ Elf32_Half e_type; /* Object file type */ ++ Elf32_Half e_machine; /* Architecture */ ++ Elf32_Word e_version; /* Object file version */ ++ Elf32_Addr e_entry; /* Entry point virtual address */ ++ Elf32_Off e_phoff; /* Program header table file offset */ ++ Elf32_Off e_shoff; /* Section header table file offset */ ++ Elf32_Word e_flags; /* Processor-specific flags */ ++ Elf32_Half e_ehsize; /* ELF header size in bytes */ ++ Elf32_Half e_phentsize; /* Program header table entry size */ ++ Elf32_Half e_phnum; /* Program header table entry count */ ++ Elf32_Half e_shentsize; /* Section header table entry size */ ++ Elf32_Half e_shnum; /* Section header table entry count */ ++ Elf32_Half e_shstrndx; /* Section header string table index */ ++} Elf32_Ehdr; ++ ++typedef struct ++{ ++ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */ ++ Elf64_Half e_type; /* Object file type */ ++ Elf64_Half e_machine; /* Architecture */ ++ Elf64_Word e_version; /* Object file version */ ++ Elf64_Addr e_entry; /* Entry point virtual address */ ++ Elf64_Off e_phoff; /* Program header table file offset */ ++ Elf64_Off e_shoff; /* Section header table file offset */ ++ Elf64_Word e_flags; /* Processor-specific flags */ ++ Elf64_Half e_ehsize; /* ELF header size in bytes */ ++ Elf64_Half e_phentsize; /* Program header table entry size */ ++ Elf64_Half e_phnum; /* Program header table entry count */ ++ Elf64_Half e_shentsize; /* Section header table entry size */ ++ Elf64_Half e_shnum; /* Section header table entry count */ ++ Elf64_Half e_shstrndx; /* Section header string table index */ ++} Elf64_Ehdr; ++ ++/* Fields in the e_ident array. The EI_* macros are indices into the ++ array. The macros under each EI_* macro are the values the byte ++ may have. */ ++ ++#define EI_MAG0 0 /* File identification byte 0 index */ ++#define ELFMAG0 0x7f /* Magic number byte 0 */ ++ ++#define EI_MAG1 1 /* File identification byte 1 index */ ++#define ELFMAG1 'E' /* Magic number byte 1 */ ++ ++#define EI_MAG2 2 /* File identification byte 2 index */ ++#define ELFMAG2 'L' /* Magic number byte 2 */ ++ ++#define EI_MAG3 3 /* File identification byte 3 index */ ++#define ELFMAG3 'F' /* Magic number byte 3 */ ++ ++/* Conglomeration of the identification bytes, for easy testing as a word. */ ++#define ELFMAG "\177ELF" ++#define SELFMAG 4 ++ ++#define EI_CLASS 4 /* File class byte index */ ++#define ELFCLASSNONE 0 /* Invalid class */ ++#define ELFCLASS32 1 /* 32-bit objects */ ++#define ELFCLASS64 2 /* 64-bit objects */ ++#define ELFCLASSNUM 3 ++ ++#define EI_DATA 5 /* Data encoding byte index */ ++#define ELFDATANONE 0 /* Invalid data encoding */ ++#define ELFDATA2LSB 1 /* 2's complement, little endian */ ++#define ELFDATA2MSB 2 /* 2's complement, big endian */ ++#define ELFDATANUM 3 ++ ++#define EI_VERSION 6 /* File version byte index */ ++ /* Value must be EV_CURRENT */ ++ ++#define EI_OSABI 7 /* OS ABI identification */ ++#define ELFOSABI_NONE 0 /* UNIX System V ABI */ ++#define ELFOSABI_SYSV 0 /* Alias. */ ++#define ELFOSABI_HPUX 1 /* HP-UX */ ++#define ELFOSABI_NETBSD 2 /* NetBSD. */ ++#define ELFOSABI_LINUX 3 /* Linux. */ ++#define ELFOSABI_SOLARIS 6 /* Sun Solaris. */ ++#define ELFOSABI_AIX 7 /* IBM AIX. */ ++#define ELFOSABI_IRIX 8 /* SGI Irix. */ ++#define ELFOSABI_FREEBSD 9 /* FreeBSD. */ ++#define ELFOSABI_TRU64 10 /* Compaq TRU64 UNIX. */ ++#define ELFOSABI_MODESTO 11 /* Novell Modesto. */ ++#define ELFOSABI_OPENBSD 12 /* OpenBSD. */ ++#define ELFOSABI_ARM 97 /* ARM */ ++#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */ ++ ++#define EI_ABIVERSION 8 /* ABI version */ ++ ++#define EI_PAD 9 /* Byte index of padding bytes */ ++ ++/* Legal values for e_type (object file type). */ ++ ++#define ET_NONE 0 /* No file type */ ++#define ET_REL 1 /* Relocatable file */ ++#define ET_EXEC 2 /* Executable file */ ++#define ET_DYN 3 /* Shared object file */ ++#define ET_CORE 4 /* Core file */ ++#define ET_NUM 5 /* Number of defined types */ ++#define ET_LOOS 0xfe00 /* OS-specific range start */ ++#define ET_HIOS 0xfeff /* OS-specific range end */ ++#define ET_LOPROC 0xff00 /* Processor-specific range start */ ++#define ET_HIPROC 0xffff /* Processor-specific range end */ ++ ++/* Legal values for e_machine (architecture). */ ++ ++#define EM_NONE 0 /* No machine */ ++#define EM_M32 1 /* AT&T WE 32100 */ ++#define EM_SPARC 2 /* SUN SPARC */ ++#define EM_386 3 /* Intel 80386 */ ++#define EM_68K 4 /* Motorola m68k family */ ++#define EM_88K 5 /* Motorola m88k family */ ++#define EM_860 7 /* Intel 80860 */ ++#define EM_MIPS 8 /* MIPS R3000 big-endian */ ++#define EM_S370 9 /* IBM System/370 */ ++#define EM_MIPS_RS3_LE 10 /* MIPS R3000 little-endian */ ++ ++#define EM_PARISC 15 /* HPPA */ ++#define EM_VPP500 17 /* Fujitsu VPP500 */ ++#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */ ++#define EM_960 19 /* Intel 80960 */ ++#define EM_PPC 20 /* PowerPC */ ++#define EM_PPC64 21 /* PowerPC 64-bit */ ++#define EM_S390 22 /* IBM S390 */ ++ ++#define EM_V800 36 /* NEC V800 series */ ++#define EM_FR20 37 /* Fujitsu FR20 */ ++#define EM_RH32 38 /* TRW RH-32 */ ++#define EM_RCE 39 /* Motorola RCE */ ++#define EM_ARM 40 /* ARM */ ++#define EM_FAKE_ALPHA 41 /* Digital Alpha */ ++#define EM_SH 42 /* Hitachi SH */ ++#define EM_SPARCV9 43 /* SPARC v9 64-bit */ ++#define EM_TRICORE 44 /* Siemens Tricore */ ++#define EM_ARC 45 /* Argonaut RISC Core */ ++#define EM_H8_300 46 /* Hitachi H8/300 */ ++#define EM_H8_300H 47 /* Hitachi H8/300H */ ++#define EM_H8S 48 /* Hitachi H8S */ ++#define EM_H8_500 49 /* Hitachi H8/500 */ ++#define EM_IA_64 50 /* Intel Merced */ ++#define EM_MIPS_X 51 /* Stanford MIPS-X */ ++#define EM_COLDFIRE 52 /* Motorola Coldfire */ ++#define EM_68HC12 53 /* Motorola M68HC12 */ ++#define EM_MMA 54 /* Fujitsu MMA Multimedia Accelerator*/ ++#define EM_PCP 55 /* Siemens PCP */ ++#define EM_NCPU 56 /* Sony nCPU embeeded RISC */ ++#define EM_NDR1 57 /* Denso NDR1 microprocessor */ ++#define EM_STARCORE 58 /* Motorola Start*Core processor */ ++#define EM_ME16 59 /* Toyota ME16 processor */ ++#define EM_ST100 60 /* STMicroelectronic ST100 processor */ ++#define EM_TINYJ 61 /* Advanced Logic Corp. Tinyj emb.fam*/ ++#define EM_X86_64 62 /* AMD x86-64 architecture */ ++#define EM_PDSP 63 /* Sony DSP Processor */ ++ ++#define EM_FX66 66 /* Siemens FX66 microcontroller */ ++#define EM_ST9PLUS 67 /* STMicroelectronics ST9+ 8/16 mc */ ++#define EM_ST7 68 /* STmicroelectronics ST7 8 bit mc */ ++#define EM_68HC16 69 /* Motorola MC68HC16 microcontroller */ ++#define EM_68HC11 70 /* Motorola MC68HC11 microcontroller */ ++#define EM_68HC08 71 /* Motorola MC68HC08 microcontroller */ ++#define EM_68HC05 72 /* Motorola MC68HC05 microcontroller */ ++#define EM_SVX 73 /* Silicon Graphics SVx */ ++#define EM_ST19 74 /* STMicroelectronics ST19 8 bit mc */ ++#define EM_VAX 75 /* Digital VAX */ ++#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ ++#define EM_JAVELIN 77 /* Infineon Technologies 32-bit embedded processor */ ++#define EM_FIREPATH 78 /* Element 14 64-bit DSP Processor */ ++#define EM_ZSP 79 /* LSI Logic 16-bit DSP Processor */ ++#define EM_MMIX 80 /* Donald Knuth's educational 64-bit processor */ ++#define EM_HUANY 81 /* Harvard University machine-independent object files */ ++#define EM_PRISM 82 /* SiTera Prism */ ++#define EM_AVR 83 /* Atmel AVR 8-bit microcontroller */ ++#define EM_FR30 84 /* Fujitsu FR30 */ ++#define EM_D10V 85 /* Mitsubishi D10V */ ++#define EM_D30V 86 /* Mitsubishi D30V */ ++#define EM_V850 87 /* NEC v850 */ ++#define EM_M32R 88 /* Mitsubishi M32R */ ++#define EM_MN10300 89 /* Matsushita MN10300 */ ++#define EM_MN10200 90 /* Matsushita MN10200 */ ++#define EM_PJ 91 /* picoJava */ ++#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */ ++#define EM_ARC_A5 93 /* ARC Cores Tangent-A5 */ ++#define EM_XTENSA 94 /* Tensilica Xtensa Architecture */ ++#define EM_NUM 95 ++ ++/* If it is necessary to assign new unofficial EM_* values, please ++ pick large random numbers (0x8523, 0xa7f2, etc.) to minimize the ++ chances of collision with official or non-GNU unofficial values. */ ++ ++#define EM_ALPHA 0x9026 ++ ++/* Legal values for e_version (version). */ ++ ++#define EV_NONE 0 /* Invalid ELF version */ ++#define EV_CURRENT 1 /* Current version */ ++#define EV_NUM 2 ++ ++/* Section header. */ ++ ++typedef struct ++{ ++ Elf32_Word sh_name; /* Section name (string tbl index) */ ++ Elf32_Word sh_type; /* Section type */ ++ Elf32_Word sh_flags; /* Section flags */ ++ Elf32_Addr sh_addr; /* Section virtual addr at execution */ ++ Elf32_Off sh_offset; /* Section file offset */ ++ Elf32_Word sh_size; /* Section size in bytes */ ++ Elf32_Word sh_link; /* Link to another section */ ++ Elf32_Word sh_info; /* Additional section information */ ++ Elf32_Word sh_addralign; /* Section alignment */ ++ Elf32_Word sh_entsize; /* Entry size if section holds table */ ++} Elf32_Shdr; ++ ++typedef struct ++{ ++ Elf64_Word sh_name; /* Section name (string tbl index) */ ++ Elf64_Word sh_type; /* Section type */ ++ Elf64_Xword sh_flags; /* Section flags */ ++ Elf64_Addr sh_addr; /* Section virtual addr at execution */ ++ Elf64_Off sh_offset; /* Section file offset */ ++ Elf64_Xword sh_size; /* Section size in bytes */ ++ Elf64_Word sh_link; /* Link to another section */ ++ Elf64_Word sh_info; /* Additional section information */ ++ Elf64_Xword sh_addralign; /* Section alignment */ ++ Elf64_Xword sh_entsize; /* Entry size if section holds table */ ++} Elf64_Shdr; ++ ++/* Special section indices. */ ++ ++#define SHN_UNDEF 0 /* Undefined section */ ++#define SHN_LORESERVE 0xff00 /* Start of reserved indices */ ++#define SHN_LOPROC 0xff00 /* Start of processor-specific */ ++#define SHN_HIPROC 0xff1f /* End of processor-specific */ ++#define SHN_LOOS 0xff20 /* Start of OS-specific */ ++#define SHN_HIOS 0xff3f /* End of OS-specific */ ++#define SHN_ABS 0xfff1 /* Associated symbol is absolute */ ++#define SHN_COMMON 0xfff2 /* Associated symbol is common */ ++#define SHN_XINDEX 0xffff /* Index is in extra table. */ ++#define SHN_HIRESERVE 0xffff /* End of reserved indices */ ++ ++/* Legal values for sh_type (section type). */ ++ ++#define SHT_NULL 0 /* Section header table entry unused */ ++#define SHT_PROGBITS 1 /* Program data */ ++#define SHT_SYMTAB 2 /* Symbol table */ ++#define SHT_STRTAB 3 /* String table */ ++#define SHT_RELA 4 /* Relocation entries with addends */ ++#define SHT_HASH 5 /* Symbol hash table */ ++#define SHT_DYNAMIC 6 /* Dynamic linking information */ ++#define SHT_NOTE 7 /* Notes */ ++#define SHT_NOBITS 8 /* Program space with no data (bss) */ ++#define SHT_REL 9 /* Relocation entries, no addends */ ++#define SHT_SHLIB 10 /* Reserved */ ++#define SHT_DYNSYM 11 /* Dynamic linker symbol table */ ++#define SHT_INIT_ARRAY 14 /* Array of constructors */ ++#define SHT_FINI_ARRAY 15 /* Array of destructors */ ++#define SHT_PREINIT_ARRAY 16 /* Array of pre-constructors */ ++#define SHT_GROUP 17 /* Section group */ ++#define SHT_SYMTAB_SHNDX 18 /* Extended section indeces */ ++#define SHT_NUM 19 /* Number of defined types. */ ++#define SHT_LOOS 0x60000000 /* Start OS-specific */ ++#define SHT_GNU_LIBLIST 0x6ffffff7 /* Prelink library list */ ++#define SHT_CHECKSUM 0x6ffffff8 /* Checksum for DSO content. */ ++#define SHT_LOSUNW 0x6ffffffa /* Sun-specific low bound. */ ++#define SHT_SUNW_move 0x6ffffffa ++#define SHT_SUNW_COMDAT 0x6ffffffb ++#define SHT_SUNW_syminfo 0x6ffffffc ++#define SHT_GNU_verdef 0x6ffffffd /* Version definition section. */ ++#define SHT_GNU_verneed 0x6ffffffe /* Version needs section. */ ++#define SHT_GNU_versym 0x6fffffff /* Version symbol table. */ ++#define SHT_HISUNW 0x6fffffff /* Sun-specific high bound. */ ++#define SHT_HIOS 0x6fffffff /* End OS-specific type */ ++#define SHT_LOPROC 0x70000000 /* Start of processor-specific */ ++#define SHT_HIPROC 0x7fffffff /* End of processor-specific */ ++#define SHT_LOUSER 0x80000000 /* Start of application-specific */ ++#define SHT_HIUSER 0x8fffffff /* End of application-specific */ ++ ++/* Legal values for sh_flags (section flags). */ ++ ++#define SHF_WRITE (1 << 0) /* Writable */ ++#define SHF_ALLOC (1 << 1) /* Occupies memory during execution */ ++#define SHF_EXECINSTR (1 << 2) /* Executable */ ++#define SHF_MERGE (1 << 4) /* Might be merged */ ++#define SHF_STRINGS (1 << 5) /* Contains nul-terminated strings */ ++#define SHF_INFO_LINK (1 << 6) /* `sh_info' contains SHT index */ ++#define SHF_LINK_ORDER (1 << 7) /* Preserve order after combining */ ++#define SHF_OS_NONCONFORMING (1 << 8) /* Non-standard OS specific handling ++ required */ ++#define SHF_GROUP (1 << 9) /* Section is member of a group. */ ++#define SHF_TLS (1 << 10) /* Section hold thread-local data. */ ++#define SHF_MASKOS 0x0ff00000 /* OS-specific. */ ++#define SHF_MASKPROC 0xf0000000 /* Processor-specific */ ++ ++/* Section group handling. */ ++#define GRP_COMDAT 0x1 /* Mark group as COMDAT. */ ++ ++/* Symbol table entry. */ ++ ++typedef struct ++{ ++ Elf32_Word st_name; /* Symbol name (string tbl index) */ ++ Elf32_Addr st_value; /* Symbol value */ ++ Elf32_Word st_size; /* Symbol size */ ++ unsigned char st_info; /* Symbol type and binding */ ++ unsigned char st_other; /* Symbol visibility */ ++ Elf32_Section st_shndx; /* Section index */ ++} Elf32_Sym; ++ ++typedef struct ++{ ++ Elf64_Word st_name; /* Symbol name (string tbl index) */ ++ unsigned char st_info; /* Symbol type and binding */ ++ unsigned char st_other; /* Symbol visibility */ ++ Elf64_Section st_shndx; /* Section index */ ++ Elf64_Addr st_value; /* Symbol value */ ++ Elf64_Xword st_size; /* Symbol size */ ++} Elf64_Sym; ++ ++/* The syminfo section if available contains additional information about ++ every dynamic symbol. */ ++ ++typedef struct ++{ ++ Elf32_Half si_boundto; /* Direct bindings, symbol bound to */ ++ Elf32_Half si_flags; /* Per symbol flags */ ++} Elf32_Syminfo; ++ ++typedef struct ++{ ++ Elf64_Half si_boundto; /* Direct bindings, symbol bound to */ ++ Elf64_Half si_flags; /* Per symbol flags */ ++} Elf64_Syminfo; ++ ++/* Possible values for si_boundto. */ ++#define SYMINFO_BT_SELF 0xffff /* Symbol bound to self */ ++#define SYMINFO_BT_PARENT 0xfffe /* Symbol bound to parent */ ++#define SYMINFO_BT_LOWRESERVE 0xff00 /* Beginning of reserved entries */ ++ ++/* Possible bitmasks for si_flags. */ ++#define SYMINFO_FLG_DIRECT 0x0001 /* Direct bound symbol */ ++#define SYMINFO_FLG_PASSTHRU 0x0002 /* Pass-thru symbol for translator */ ++#define SYMINFO_FLG_COPY 0x0004 /* Symbol is a copy-reloc */ ++#define SYMINFO_FLG_LAZYLOAD 0x0008 /* Symbol bound to object to be lazy ++ loaded */ ++/* Syminfo version values. */ ++#define SYMINFO_NONE 0 ++#define SYMINFO_CURRENT 1 ++#define SYMINFO_NUM 2 ++ ++ ++/* How to extract and insert information held in the st_info field. */ ++ ++#define ELF32_ST_BIND(val) (((unsigned char) (val)) >> 4) ++#define ELF32_ST_TYPE(val) ((val) & 0xf) ++#define ELF32_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf)) ++ ++/* Both Elf32_Sym and Elf64_Sym use the same one-byte st_info field. */ ++#define ELF64_ST_BIND(val) ELF32_ST_BIND (val) ++#define ELF64_ST_TYPE(val) ELF32_ST_TYPE (val) ++#define ELF64_ST_INFO(bind, type) ELF32_ST_INFO ((bind), (type)) ++ ++/* Legal values for ST_BIND subfield of st_info (symbol binding). */ ++ ++#define STB_LOCAL 0 /* Local symbol */ ++#define STB_GLOBAL 1 /* Global symbol */ ++#define STB_WEAK 2 /* Weak symbol */ ++#define STB_NUM 3 /* Number of defined types. */ ++#define STB_LOOS 10 /* Start of OS-specific */ ++#define STB_HIOS 12 /* End of OS-specific */ ++#define STB_LOPROC 13 /* Start of processor-specific */ ++#define STB_HIPROC 15 /* End of processor-specific */ ++ ++/* Legal values for ST_TYPE subfield of st_info (symbol type). */ ++ ++#define STT_NOTYPE 0 /* Symbol type is unspecified */ ++#define STT_OBJECT 1 /* Symbol is a data object */ ++#define STT_FUNC 2 /* Symbol is a code object */ ++#define STT_SECTION 3 /* Symbol associated with a section */ ++#define STT_FILE 4 /* Symbol's name is file name */ ++#define STT_COMMON 5 /* Symbol is a common data object */ ++#define STT_TLS 6 /* Symbol is thread-local data object*/ ++#define STT_NUM 7 /* Number of defined types. */ ++#define STT_LOOS 10 /* Start of OS-specific */ ++#define STT_HIOS 12 /* End of OS-specific */ ++#define STT_LOPROC 13 /* Start of processor-specific */ ++#define STT_HIPROC 15 /* End of processor-specific */ ++ ++ ++/* Symbol table indices are found in the hash buckets and chain table ++ of a symbol hash table section. This special index value indicates ++ the end of a chain, meaning no further symbols are found in that bucket. */ ++ ++#define STN_UNDEF 0 /* End of a chain. */ ++ ++ ++/* How to extract and insert information held in the st_other field. */ ++ ++#define ELF32_ST_VISIBILITY(o) ((o) & 0x03) ++ ++/* For ELF64 the definitions are the same. */ ++#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o) ++ ++/* Symbol visibility specification encoded in the st_other field. */ ++#define STV_DEFAULT 0 /* Default symbol visibility rules */ ++#define STV_INTERNAL 1 /* Processor specific hidden class */ ++#define STV_HIDDEN 2 /* Sym unavailable in other modules */ ++#define STV_PROTECTED 3 /* Not preemptible, not exported */ ++ ++ ++/* Relocation table entry without addend (in section of type SHT_REL). */ ++ ++typedef struct ++{ ++ Elf32_Addr r_offset; /* Address */ ++ Elf32_Word r_info; /* Relocation type and symbol index */ ++} Elf32_Rel; ++ ++/* I have seen two different definitions of the Elf64_Rel and ++ Elf64_Rela structures, so we'll leave them out until Novell (or ++ whoever) gets their act together. */ ++/* The following, at least, is used on Sparc v9, MIPS, and Alpha. */ ++ ++typedef struct ++{ ++ Elf64_Addr r_offset; /* Address */ ++ Elf64_Xword r_info; /* Relocation type and symbol index */ ++} Elf64_Rel; ++ ++/* Relocation table entry with addend (in section of type SHT_RELA). */ ++ ++typedef struct ++{ ++ Elf32_Addr r_offset; /* Address */ ++ Elf32_Word r_info; /* Relocation type and symbol index */ ++ Elf32_Sword r_addend; /* Addend */ ++} Elf32_Rela; ++ ++typedef struct ++{ ++ Elf64_Addr r_offset; /* Address */ ++ Elf64_Xword r_info; /* Relocation type and symbol index */ ++ Elf64_Sxword r_addend; /* Addend */ ++} Elf64_Rela; ++ ++/* How to extract and insert information held in the r_info field. */ ++ ++#define ELF32_R_SYM(val) ((val) >> 8) ++#define ELF32_R_TYPE(val) ((val) & 0xff) ++#define ELF32_R_INFO(sym, type) (((sym) << 8) + ((type) & 0xff)) ++ ++#define ELF64_R_SYM(i) ((i) >> 32) ++#define ELF64_R_TYPE(i) ((i) & 0xffffffff) ++#define ELF64_R_INFO(sym,type) ((((Elf64_Xword) (sym)) << 32) + (type)) ++ ++/* Program segment header. */ ++ ++typedef struct ++{ ++ Elf32_Word p_type; /* Segment type */ ++ Elf32_Off p_offset; /* Segment file offset */ ++ Elf32_Addr p_vaddr; /* Segment virtual address */ ++ Elf32_Addr p_paddr; /* Segment physical address */ ++ Elf32_Word p_filesz; /* Segment size in file */ ++ Elf32_Word p_memsz; /* Segment size in memory */ ++ Elf32_Word p_flags; /* Segment flags */ ++ Elf32_Word p_align; /* Segment alignment */ ++} Elf32_Phdr; ++ ++typedef struct ++{ ++ Elf64_Word p_type; /* Segment type */ ++ Elf64_Word p_flags; /* Segment flags */ ++ Elf64_Off p_offset; /* Segment file offset */ ++ Elf64_Addr p_vaddr; /* Segment virtual address */ ++ Elf64_Addr p_paddr; /* Segment physical address */ ++ Elf64_Xword p_filesz; /* Segment size in file */ ++ Elf64_Xword p_memsz; /* Segment size in memory */ ++ Elf64_Xword p_align; /* Segment alignment */ ++} Elf64_Phdr; ++ ++/* Legal values for p_type (segment type). */ ++ ++#define PT_NULL 0 /* Program header table entry unused */ ++#define PT_LOAD 1 /* Loadable program segment */ ++#define PT_DYNAMIC 2 /* Dynamic linking information */ ++#define PT_INTERP 3 /* Program interpreter */ ++#define PT_NOTE 4 /* Auxiliary information */ ++#define PT_SHLIB 5 /* Reserved */ ++#define PT_PHDR 6 /* Entry for header table itself */ ++#define PT_TLS 7 /* Thread-local storage segment */ ++#define PT_NUM 8 /* Number of defined types */ ++#define PT_LOOS 0x60000000 /* Start of OS-specific */ ++#define PT_GNU_EH_FRAME 0x6474e550 /* GCC .eh_frame_hdr segment */ ++#define PT_GNU_STACK 0x6474e551 /* Indicates stack executability */ ++#define PT_LOSUNW 0x6ffffffa ++#define PT_SUNWBSS 0x6ffffffa /* Sun Specific segment */ ++#define PT_SUNWSTACK 0x6ffffffb /* Stack segment */ ++#define PT_HISUNW 0x6fffffff ++#define PT_HIOS 0x6fffffff /* End of OS-specific */ ++#define PT_LOPROC 0x70000000 /* Start of processor-specific */ ++#define PT_HIPROC 0x7fffffff /* End of processor-specific */ ++ ++/* Legal values for p_flags (segment flags). */ ++ ++#define PF_X (1 << 0) /* Segment is executable */ ++#define PF_W (1 << 1) /* Segment is writable */ ++#define PF_R (1 << 2) /* Segment is readable */ ++#define PF_MASKOS 0x0ff00000 /* OS-specific */ ++#define PF_MASKPROC 0xf0000000 /* Processor-specific */ ++ ++/* Legal values for note segment descriptor types for core files. */ ++ ++#define NT_PRSTATUS 1 /* Contains copy of prstatus struct */ ++#define NT_FPREGSET 2 /* Contains copy of fpregset struct */ ++#define NT_PRPSINFO 3 /* Contains copy of prpsinfo struct */ ++#define NT_PRXREG 4 /* Contains copy of prxregset struct */ ++#define NT_TASKSTRUCT 4 /* Contains copy of task structure */ ++#define NT_PLATFORM 5 /* String from sysinfo(SI_PLATFORM) */ ++#define NT_AUXV 6 /* Contains copy of auxv array */ ++#define NT_GWINDOWS 7 /* Contains copy of gwindows struct */ ++#define NT_ASRS 8 /* Contains copy of asrset struct */ ++#define NT_PSTATUS 10 /* Contains copy of pstatus struct */ ++#define NT_PSINFO 13 /* Contains copy of psinfo struct */ ++#define NT_PRCRED 14 /* Contains copy of prcred struct */ ++#define NT_UTSNAME 15 /* Contains copy of utsname struct */ ++#define NT_LWPSTATUS 16 /* Contains copy of lwpstatus struct */ ++#define NT_LWPSINFO 17 /* Contains copy of lwpinfo struct */ ++#define NT_PRFPXREG 20 /* Contains copy of fprxregset struct*/ ++ ++/* Legal values for the note segment descriptor types for object files. */ ++ ++#define NT_VERSION 1 /* Contains a version string. */ ++ ++ ++/* Dynamic section entry. */ ++ ++typedef struct ++{ ++ Elf32_Sword d_tag; /* Dynamic entry type */ ++ union ++ { ++ Elf32_Word d_val; /* Integer value */ ++ Elf32_Addr d_ptr; /* Address value */ ++ } d_un; ++} Elf32_Dyn; ++ ++typedef struct ++{ ++ Elf64_Sxword d_tag; /* Dynamic entry type */ ++ union ++ { ++ Elf64_Xword d_val; /* Integer value */ ++ Elf64_Addr d_ptr; /* Address value */ ++ } d_un; ++} Elf64_Dyn; ++ ++/* Legal values for d_tag (dynamic entry type). */ ++ ++#define DT_NULL 0 /* Marks end of dynamic section */ ++#define DT_NEEDED 1 /* Name of needed library */ ++#define DT_PLTRELSZ 2 /* Size in bytes of PLT relocs */ ++#define DT_PLTGOT 3 /* Processor defined value */ ++#define DT_HASH 4 /* Address of symbol hash table */ ++#define DT_STRTAB 5 /* Address of string table */ ++#define DT_SYMTAB 6 /* Address of symbol table */ ++#define DT_RELA 7 /* Address of Rela relocs */ ++#define DT_RELASZ 8 /* Total size of Rela relocs */ ++#define DT_RELAENT 9 /* Size of one Rela reloc */ ++#define DT_STRSZ 10 /* Size of string table */ ++#define DT_SYMENT 11 /* Size of one symbol table entry */ ++#define DT_INIT 12 /* Address of init function */ ++#define DT_FINI 13 /* Address of termination function */ ++#define DT_SONAME 14 /* Name of shared object */ ++#define DT_RPATH 15 /* Library search path (deprecated) */ ++#define DT_SYMBOLIC 16 /* Start symbol search here */ ++#define DT_REL 17 /* Address of Rel relocs */ ++#define DT_RELSZ 18 /* Total size of Rel relocs */ ++#define DT_RELENT 19 /* Size of one Rel reloc */ ++#define DT_PLTREL 20 /* Type of reloc in PLT */ ++#define DT_DEBUG 21 /* For debugging; unspecified */ ++#define DT_TEXTREL 22 /* Reloc might modify .text */ ++#define DT_JMPREL 23 /* Address of PLT relocs */ ++#define DT_BIND_NOW 24 /* Process relocations of object */ ++#define DT_INIT_ARRAY 25 /* Array with addresses of init fct */ ++#define DT_FINI_ARRAY 26 /* Array with addresses of fini fct */ ++#define DT_INIT_ARRAYSZ 27 /* Size in bytes of DT_INIT_ARRAY */ ++#define DT_FINI_ARRAYSZ 28 /* Size in bytes of DT_FINI_ARRAY */ ++#define DT_RUNPATH 29 /* Library search path */ ++#define DT_FLAGS 30 /* Flags for the object being loaded */ ++#define DT_ENCODING 32 /* Start of encoded range */ ++#define DT_PREINIT_ARRAY 32 /* Array with addresses of preinit fct*/ ++#define DT_PREINIT_ARRAYSZ 33 /* size in bytes of DT_PREINIT_ARRAY */ ++#define DT_NUM 34 /* Number used */ ++#define DT_LOOS 0x6000000d /* Start of OS-specific */ ++#define DT_HIOS 0x6ffff000 /* End of OS-specific */ ++#define DT_LOPROC 0x70000000 /* Start of processor-specific */ ++#define DT_HIPROC 0x7fffffff /* End of processor-specific */ ++#define DT_PROCNUM DT_MIPS_NUM /* Most used by any processor */ ++ ++/* DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the ++ Dyn.d_un.d_val field of the Elf*_Dyn structure. This follows Sun's ++ approach. */ ++#define DT_VALRNGLO 0x6ffffd00 ++#define DT_GNU_PRELINKED 0x6ffffdf5 /* Prelinking timestamp */ ++#define DT_GNU_CONFLICTSZ 0x6ffffdf6 /* Size of conflict section */ ++#define DT_GNU_LIBLISTSZ 0x6ffffdf7 /* Size of library list */ ++#define DT_CHECKSUM 0x6ffffdf8 ++#define DT_PLTPADSZ 0x6ffffdf9 ++#define DT_MOVEENT 0x6ffffdfa ++#define DT_MOVESZ 0x6ffffdfb ++#define DT_FEATURE_1 0x6ffffdfc /* Feature selection (DTF_*). */ ++#define DT_POSFLAG_1 0x6ffffdfd /* Flags for DT_* entries, effecting ++ the following DT_* entry. */ ++#define DT_SYMINSZ 0x6ffffdfe /* Size of syminfo table (in bytes) */ ++#define DT_SYMINENT 0x6ffffdff /* Entry size of syminfo */ ++#define DT_VALRNGHI 0x6ffffdff ++#define DT_VALTAGIDX(tag) (DT_VALRNGHI - (tag)) /* Reverse order! */ ++#define DT_VALNUM 12 ++ ++/* DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the ++ Dyn.d_un.d_ptr field of the Elf*_Dyn structure. ++ ++ If any adjustment is made to the ELF object after it has been ++ built these entries will need to be adjusted. */ ++#define DT_ADDRRNGLO 0x6ffffe00 ++#define DT_GNU_CONFLICT 0x6ffffef8 /* Start of conflict section */ ++#define DT_GNU_LIBLIST 0x6ffffef9 /* Library list */ ++#define DT_CONFIG 0x6ffffefa /* Configuration information. */ ++#define DT_DEPAUDIT 0x6ffffefb /* Dependency auditing. */ ++#define DT_AUDIT 0x6ffffefc /* Object auditing. */ ++#define DT_PLTPAD 0x6ffffefd /* PLT padding. */ ++#define DT_MOVETAB 0x6ffffefe /* Move table. */ ++#define DT_SYMINFO 0x6ffffeff /* Syminfo table. */ ++#define DT_ADDRRNGHI 0x6ffffeff ++#define DT_ADDRTAGIDX(tag) (DT_ADDRRNGHI - (tag)) /* Reverse order! */ ++#define DT_ADDRNUM 10 ++ ++/* The versioning entry types. The next are defined as part of the ++ GNU extension. */ ++#define DT_VERSYM 0x6ffffff0 ++ ++#define DT_RELACOUNT 0x6ffffff9 ++#define DT_RELCOUNT 0x6ffffffa ++ ++/* These were chosen by Sun. */ ++#define DT_FLAGS_1 0x6ffffffb /* State flags, see DF_1_* below. */ ++#define DT_VERDEF 0x6ffffffc /* Address of version definition ++ table */ ++#define DT_VERDEFNUM 0x6ffffffd /* Number of version definitions */ ++#define DT_VERNEED 0x6ffffffe /* Address of table with needed ++ versions */ ++#define DT_VERNEEDNUM 0x6fffffff /* Number of needed versions */ ++#define DT_VERSIONTAGIDX(tag) (DT_VERNEEDNUM - (tag)) /* Reverse order! */ ++#define DT_VERSIONTAGNUM 16 ++ ++/* Sun added these machine-independent extensions in the "processor-specific" ++ range. Be compatible. */ ++#define DT_AUXILIARY 0x7ffffffd /* Shared object to load before self */ ++#define DT_FILTER 0x7fffffff /* Shared object to get values from */ ++#define DT_EXTRATAGIDX(tag) ((Elf32_Word)-((Elf32_Sword) (tag) <<1>>1)-1) ++#define DT_EXTRANUM 3 ++ ++/* Values of `d_un.d_val' in the DT_FLAGS entry. */ ++#define DF_ORIGIN 0x00000001 /* Object may use DF_ORIGIN */ ++#define DF_SYMBOLIC 0x00000002 /* Symbol resolutions starts here */ ++#define DF_TEXTREL 0x00000004 /* Object contains text relocations */ ++#define DF_BIND_NOW 0x00000008 /* No lazy binding for this object */ ++#define DF_STATIC_TLS 0x00000010 /* Module uses the static TLS model */ ++ ++/* State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1 ++ entry in the dynamic section. */ ++#define DF_1_NOW 0x00000001 /* Set RTLD_NOW for this object. */ ++#define DF_1_GLOBAL 0x00000002 /* Set RTLD_GLOBAL for this object. */ ++#define DF_1_GROUP 0x00000004 /* Set RTLD_GROUP for this object. */ ++#define DF_1_NODELETE 0x00000008 /* Set RTLD_NODELETE for this object.*/ ++#define DF_1_LOADFLTR 0x00000010 /* Trigger filtee loading at runtime.*/ ++#define DF_1_INITFIRST 0x00000020 /* Set RTLD_INITFIRST for this object*/ ++#define DF_1_NOOPEN 0x00000040 /* Set RTLD_NOOPEN for this object. */ ++#define DF_1_ORIGIN 0x00000080 /* $ORIGIN must be handled. */ ++#define DF_1_DIRECT 0x00000100 /* Direct binding enabled. */ ++#define DF_1_TRANS 0x00000200 ++#define DF_1_INTERPOSE 0x00000400 /* Object is used to interpose. */ ++#define DF_1_NODEFLIB 0x00000800 /* Ignore default lib search path. */ ++#define DF_1_NODUMP 0x00001000 /* Object can't be dldump'ed. */ ++#define DF_1_CONFALT 0x00002000 /* Configuration alternative created.*/ ++#define DF_1_ENDFILTEE 0x00004000 /* Filtee terminates filters search. */ ++#define DF_1_DISPRELDNE 0x00008000 /* Disp reloc applied at build time. */ ++#define DF_1_DISPRELPND 0x00010000 /* Disp reloc applied at run-time. */ ++ ++/* Flags for the feature selection in DT_FEATURE_1. */ ++#define DTF_1_PARINIT 0x00000001 ++#define DTF_1_CONFEXP 0x00000002 ++ ++/* Flags in the DT_POSFLAG_1 entry effecting only the next DT_* entry. */ ++#define DF_P1_LAZYLOAD 0x00000001 /* Lazyload following object. */ ++#define DF_P1_GROUPPERM 0x00000002 /* Symbols from next object are not ++ generally available. */ ++ ++/* Version definition sections. */ ++ ++typedef struct ++{ ++ Elf32_Half vd_version; /* Version revision */ ++ Elf32_Half vd_flags; /* Version information */ ++ Elf32_Half vd_ndx; /* Version Index */ ++ Elf32_Half vd_cnt; /* Number of associated aux entries */ ++ Elf32_Word vd_hash; /* Version name hash value */ ++ Elf32_Word vd_aux; /* Offset in bytes to verdaux array */ ++ Elf32_Word vd_next; /* Offset in bytes to next verdef ++ entry */ ++} Elf32_Verdef; ++ ++typedef struct ++{ ++ Elf64_Half vd_version; /* Version revision */ ++ Elf64_Half vd_flags; /* Version information */ ++ Elf64_Half vd_ndx; /* Version Index */ ++ Elf64_Half vd_cnt; /* Number of associated aux entries */ ++ Elf64_Word vd_hash; /* Version name hash value */ ++ Elf64_Word vd_aux; /* Offset in bytes to verdaux array */ ++ Elf64_Word vd_next; /* Offset in bytes to next verdef ++ entry */ ++} Elf64_Verdef; ++ ++ ++/* Legal values for vd_version (version revision). */ ++#define VER_DEF_NONE 0 /* No version */ ++#define VER_DEF_CURRENT 1 /* Current version */ ++#define VER_DEF_NUM 2 /* Given version number */ ++ ++/* Legal values for vd_flags (version information flags). */ ++#define VER_FLG_BASE 0x1 /* Version definition of file itself */ ++#define VER_FLG_WEAK 0x2 /* Weak version identifier */ ++ ++/* Versym symbol index values. */ ++#define VER_NDX_LOCAL 0 /* Symbol is local. */ ++#define VER_NDX_GLOBAL 1 /* Symbol is global. */ ++#define VER_NDX_LORESERVE 0xff00 /* Beginning of reserved entries. */ ++#define VER_NDX_ELIMINATE 0xff01 /* Symbol is to be eliminated. */ ++ ++/* Auxialiary version information. */ ++ ++typedef struct ++{ ++ Elf32_Word vda_name; /* Version or dependency names */ ++ Elf32_Word vda_next; /* Offset in bytes to next verdaux ++ entry */ ++} Elf32_Verdaux; ++ ++typedef struct ++{ ++ Elf64_Word vda_name; /* Version or dependency names */ ++ Elf64_Word vda_next; /* Offset in bytes to next verdaux ++ entry */ ++} Elf64_Verdaux; ++ ++ ++/* Version dependency section. */ ++ ++typedef struct ++{ ++ Elf32_Half vn_version; /* Version of structure */ ++ Elf32_Half vn_cnt; /* Number of associated aux entries */ ++ Elf32_Word vn_file; /* Offset of filename for this ++ dependency */ ++ Elf32_Word vn_aux; /* Offset in bytes to vernaux array */ ++ Elf32_Word vn_next; /* Offset in bytes to next verneed ++ entry */ ++} Elf32_Verneed; ++ ++typedef struct ++{ ++ Elf64_Half vn_version; /* Version of structure */ ++ Elf64_Half vn_cnt; /* Number of associated aux entries */ ++ Elf64_Word vn_file; /* Offset of filename for this ++ dependency */ ++ Elf64_Word vn_aux; /* Offset in bytes to vernaux array */ ++ Elf64_Word vn_next; /* Offset in bytes to next verneed ++ entry */ ++} Elf64_Verneed; ++ ++ ++/* Legal values for vn_version (version revision). */ ++#define VER_NEED_NONE 0 /* No version */ ++#define VER_NEED_CURRENT 1 /* Current version */ ++#define VER_NEED_NUM 2 /* Given version number */ ++ ++/* Auxiliary needed version information. */ ++ ++typedef struct ++{ ++ Elf32_Word vna_hash; /* Hash value of dependency name */ ++ Elf32_Half vna_flags; /* Dependency specific information */ ++ Elf32_Half vna_other; /* Unused */ ++ Elf32_Word vna_name; /* Dependency name string offset */ ++ Elf32_Word vna_next; /* Offset in bytes to next vernaux ++ entry */ ++} Elf32_Vernaux; ++ ++typedef struct ++{ ++ Elf64_Word vna_hash; /* Hash value of dependency name */ ++ Elf64_Half vna_flags; /* Dependency specific information */ ++ Elf64_Half vna_other; /* Unused */ ++ Elf64_Word vna_name; /* Dependency name string offset */ ++ Elf64_Word vna_next; /* Offset in bytes to next vernaux ++ entry */ ++} Elf64_Vernaux; ++ ++ ++/* Legal values for vna_flags. */ ++#define VER_FLG_WEAK 0x2 /* Weak version identifier */ ++ ++ ++/* Auxiliary vector. */ ++ ++/* This vector is normally only used by the program interpreter. The ++ usual definition in an ABI supplement uses the name auxv_t. The ++ vector is not usually defined in a standard file, but it ++ can't hurt. We rename it to avoid conflicts. The sizes of these ++ types are an arrangement between the exec server and the program ++ interpreter, so we don't fully specify them here. */ ++ ++typedef struct ++{ ++ int a_type; /* Entry type */ ++ union ++ { ++ long int a_val; /* Integer value */ ++ void *a_ptr; /* Pointer value */ ++ void (*a_fcn) (void); /* Function pointer value */ ++ } a_un; ++} Elf32_auxv_t; ++ ++typedef struct ++{ ++ long int a_type; /* Entry type */ ++ union ++ { ++ long int a_val; /* Integer value */ ++ void *a_ptr; /* Pointer value */ ++ void (*a_fcn) (void); /* Function pointer value */ ++ } a_un; ++} Elf64_auxv_t; ++ ++/* Legal values for a_type (entry type). */ ++ ++#define AT_NULL 0 /* End of vector */ ++#define AT_IGNORE 1 /* Entry should be ignored */ ++#define AT_EXECFD 2 /* File descriptor of program */ ++#define AT_PHDR 3 /* Program headers for program */ ++#define AT_PHENT 4 /* Size of program header entry */ ++#define AT_PHNUM 5 /* Number of program headers */ ++#define AT_PAGESZ 6 /* System page size */ ++#define AT_BASE 7 /* Base address of interpreter */ ++#define AT_FLAGS 8 /* Flags */ ++#define AT_ENTRY 9 /* Entry point of program */ ++#define AT_NOTELF 10 /* Program is not ELF */ ++#define AT_UID 11 /* Real uid */ ++#define AT_EUID 12 /* Effective uid */ ++#define AT_GID 13 /* Real gid */ ++#define AT_EGID 14 /* Effective gid */ ++#define AT_CLKTCK 17 /* Frequency of times() */ ++ ++/* Some more special a_type values describing the hardware. */ ++#define AT_PLATFORM 15 /* String identifying platform. */ ++#define AT_HWCAP 16 /* Machine dependent hints about ++ processor capabilities. */ ++ ++/* This entry gives some information about the FPU initialization ++ performed by the kernel. */ ++#define AT_FPUCW 18 /* Used FPU control word. */ ++ ++/* Cache block sizes. */ ++#define AT_DCACHEBSIZE 19 /* Data cache block size. */ ++#define AT_ICACHEBSIZE 20 /* Instruction cache block size. */ ++#define AT_UCACHEBSIZE 21 /* Unified cache block size. */ ++ ++/* A special ignored value for PPC, used by the kernel to control the ++ interpretation of the AUXV. Must be > 16. */ ++#define AT_IGNOREPPC 22 /* Entry should be ignored. */ ++ ++#define AT_SECURE 23 /* Boolean, was exec setuid-like? */ ++ ++/* Pointer to the global system page used for system calls and other ++ nice things. */ ++#define AT_SYSINFO 32 ++#define AT_SYSINFO_EHDR 33 ++ ++ ++/* Note section contents. Each entry in the note section begins with ++ a header of a fixed form. */ ++ ++typedef struct ++{ ++ Elf32_Word n_namesz; /* Length of the note's name. */ ++ Elf32_Word n_descsz; /* Length of the note's descriptor. */ ++ Elf32_Word n_type; /* Type of the note. */ ++} Elf32_Nhdr; ++ ++typedef struct ++{ ++ Elf64_Word n_namesz; /* Length of the note's name. */ ++ Elf64_Word n_descsz; /* Length of the note's descriptor. */ ++ Elf64_Word n_type; /* Type of the note. */ ++} Elf64_Nhdr; ++ ++/* Known names of notes. */ ++ ++/* Solaris entries in the note section have this name. */ ++#define ELF_NOTE_SOLARIS "SUNW Solaris" ++ ++/* Note entries for GNU systems have this name. */ ++#define ELF_NOTE_GNU "GNU" ++ ++ ++/* Defined types of notes for Solaris. */ ++ ++/* Value of descriptor (one word) is desired pagesize for the binary. */ ++#define ELF_NOTE_PAGESIZE_HINT 1 ++ ++ ++/* Defined note types for GNU systems. */ ++ ++/* ABI information. The descriptor consists of words: ++ word 0: OS descriptor ++ word 1: major version of the ABI ++ word 2: minor version of the ABI ++ word 3: subminor version of the ABI ++*/ ++#define ELF_NOTE_ABI 1 ++ ++/* Known OSes. These value can appear in word 0 of an ELF_NOTE_ABI ++ note section entry. */ ++#define ELF_NOTE_OS_LINUX 0 ++#define ELF_NOTE_OS_GNU 1 ++#define ELF_NOTE_OS_SOLARIS2 2 ++#define ELF_NOTE_OS_FREEBSD 3 ++ ++ ++/* Move records. */ ++typedef struct ++{ ++ Elf32_Xword m_value; /* Symbol value. */ ++ Elf32_Word m_info; /* Size and index. */ ++ Elf32_Word m_poffset; /* Symbol offset. */ ++ Elf32_Half m_repeat; /* Repeat count. */ ++ Elf32_Half m_stride; /* Stride info. */ ++} Elf32_Move; ++ ++typedef struct ++{ ++ Elf64_Xword m_value; /* Symbol value. */ ++ Elf64_Xword m_info; /* Size and index. */ ++ Elf64_Xword m_poffset; /* Symbol offset. */ ++ Elf64_Half m_repeat; /* Repeat count. */ ++ Elf64_Half m_stride; /* Stride info. */ ++} Elf64_Move; ++ ++/* Macro to construct move records. */ ++#define ELF32_M_SYM(info) ((info) >> 8) ++#define ELF32_M_SIZE(info) ((unsigned char) (info)) ++#define ELF32_M_INFO(sym, size) (((sym) << 8) + (unsigned char) (size)) ++ ++#define ELF64_M_SYM(info) ELF32_M_SYM (info) ++#define ELF64_M_SIZE(info) ELF32_M_SIZE (info) ++#define ELF64_M_INFO(sym, size) ELF32_M_INFO (sym, size) ++ ++ ++/* Motorola 68k specific definitions. */ ++ ++/* Values for Elf32_Ehdr.e_flags. */ ++#define EF_CPU32 0x00810000 ++ ++/* m68k relocs. */ ++ ++#define R_68K_NONE 0 /* No reloc */ ++#define R_68K_32 1 /* Direct 32 bit */ ++#define R_68K_16 2 /* Direct 16 bit */ ++#define R_68K_8 3 /* Direct 8 bit */ ++#define R_68K_PC32 4 /* PC relative 32 bit */ ++#define R_68K_PC16 5 /* PC relative 16 bit */ ++#define R_68K_PC8 6 /* PC relative 8 bit */ ++#define R_68K_GOT32 7 /* 32 bit PC relative GOT entry */ ++#define R_68K_GOT16 8 /* 16 bit PC relative GOT entry */ ++#define R_68K_GOT8 9 /* 8 bit PC relative GOT entry */ ++#define R_68K_GOT32O 10 /* 32 bit GOT offset */ ++#define R_68K_GOT16O 11 /* 16 bit GOT offset */ ++#define R_68K_GOT8O 12 /* 8 bit GOT offset */ ++#define R_68K_PLT32 13 /* 32 bit PC relative PLT address */ ++#define R_68K_PLT16 14 /* 16 bit PC relative PLT address */ ++#define R_68K_PLT8 15 /* 8 bit PC relative PLT address */ ++#define R_68K_PLT32O 16 /* 32 bit PLT offset */ ++#define R_68K_PLT16O 17 /* 16 bit PLT offset */ ++#define R_68K_PLT8O 18 /* 8 bit PLT offset */ ++#define R_68K_COPY 19 /* Copy symbol at runtime */ ++#define R_68K_GLOB_DAT 20 /* Create GOT entry */ ++#define R_68K_JMP_SLOT 21 /* Create PLT entry */ ++#define R_68K_RELATIVE 22 /* Adjust by program base */ ++/* Keep this the last entry. */ ++#define R_68K_NUM 23 ++ ++/* Intel 80386 specific definitions. */ ++ ++/* i386 relocs. */ ++ ++#define R_386_NONE 0 /* No reloc */ ++#define R_386_32 1 /* Direct 32 bit */ ++#define R_386_PC32 2 /* PC relative 32 bit */ ++#define R_386_GOT32 3 /* 32 bit GOT entry */ ++#define R_386_PLT32 4 /* 32 bit PLT address */ ++#define R_386_COPY 5 /* Copy symbol at runtime */ ++#define R_386_GLOB_DAT 6 /* Create GOT entry */ ++#define R_386_JMP_SLOT 7 /* Create PLT entry */ ++#define R_386_RELATIVE 8 /* Adjust by program base */ ++#define R_386_GOTOFF 9 /* 32 bit offset to GOT */ ++#define R_386_GOTPC 10 /* 32 bit PC relative offset to GOT */ ++#define R_386_32PLT 11 ++#define R_386_TLS_TPOFF 14 /* Offset in static TLS block */ ++#define R_386_TLS_IE 15 /* Address of GOT entry for static TLS ++ block offset */ ++#define R_386_TLS_GOTIE 16 /* GOT entry for static TLS block ++ offset */ ++#define R_386_TLS_LE 17 /* Offset relative to static TLS ++ block */ ++#define R_386_TLS_GD 18 /* Direct 32 bit for GNU version of ++ general dynamic thread local data */ ++#define R_386_TLS_LDM 19 /* Direct 32 bit for GNU version of ++ local dynamic thread local data ++ in LE code */ ++#define R_386_16 20 ++#define R_386_PC16 21 ++#define R_386_8 22 ++#define R_386_PC8 23 ++#define R_386_TLS_GD_32 24 /* Direct 32 bit for general dynamic ++ thread local data */ ++#define R_386_TLS_GD_PUSH 25 /* Tag for pushl in GD TLS code */ ++#define R_386_TLS_GD_CALL 26 /* Relocation for call to ++ __tls_get_addr() */ ++#define R_386_TLS_GD_POP 27 /* Tag for popl in GD TLS code */ ++#define R_386_TLS_LDM_32 28 /* Direct 32 bit for local dynamic ++ thread local data in LE code */ ++#define R_386_TLS_LDM_PUSH 29 /* Tag for pushl in LDM TLS code */ ++#define R_386_TLS_LDM_CALL 30 /* Relocation for call to ++ __tls_get_addr() in LDM code */ ++#define R_386_TLS_LDM_POP 31 /* Tag for popl in LDM TLS code */ ++#define R_386_TLS_LDO_32 32 /* Offset relative to TLS block */ ++#define R_386_TLS_IE_32 33 /* GOT entry for negated static TLS ++ block offset */ ++#define R_386_TLS_LE_32 34 /* Negated offset relative to static ++ TLS block */ ++#define R_386_TLS_DTPMOD32 35 /* ID of module containing symbol */ ++#define R_386_TLS_DTPOFF32 36 /* Offset in TLS block */ ++#define R_386_TLS_TPOFF32 37 /* Negated offset in static TLS block */ ++/* Keep this the last entry. */ ++#define R_386_NUM 38 ++ ++/* SUN SPARC specific definitions. */ ++ ++/* Legal values for ST_TYPE subfield of st_info (symbol type). */ ++ ++#define STT_REGISTER 13 /* Global register reserved to app. */ ++ ++/* Values for Elf64_Ehdr.e_flags. */ ++ ++#define EF_SPARCV9_MM 3 ++#define EF_SPARCV9_TSO 0 ++#define EF_SPARCV9_PSO 1 ++#define EF_SPARCV9_RMO 2 ++#define EF_SPARC_LEDATA 0x800000 /* little endian data */ ++#define EF_SPARC_EXT_MASK 0xFFFF00 ++#define EF_SPARC_32PLUS 0x000100 /* generic V8+ features */ ++#define EF_SPARC_SUN_US1 0x000200 /* Sun UltraSPARC1 extensions */ ++#define EF_SPARC_HAL_R1 0x000400 /* HAL R1 extensions */ ++#define EF_SPARC_SUN_US3 0x000800 /* Sun UltraSPARCIII extensions */ ++ ++/* SPARC relocs. */ ++ ++#define R_SPARC_NONE 0 /* No reloc */ ++#define R_SPARC_8 1 /* Direct 8 bit */ ++#define R_SPARC_16 2 /* Direct 16 bit */ ++#define R_SPARC_32 3 /* Direct 32 bit */ ++#define R_SPARC_DISP8 4 /* PC relative 8 bit */ ++#define R_SPARC_DISP16 5 /* PC relative 16 bit */ ++#define R_SPARC_DISP32 6 /* PC relative 32 bit */ ++#define R_SPARC_WDISP30 7 /* PC relative 30 bit shifted */ ++#define R_SPARC_WDISP22 8 /* PC relative 22 bit shifted */ ++#define R_SPARC_HI22 9 /* High 22 bit */ ++#define R_SPARC_22 10 /* Direct 22 bit */ ++#define R_SPARC_13 11 /* Direct 13 bit */ ++#define R_SPARC_LO10 12 /* Truncated 10 bit */ ++#define R_SPARC_GOT10 13 /* Truncated 10 bit GOT entry */ ++#define R_SPARC_GOT13 14 /* 13 bit GOT entry */ ++#define R_SPARC_GOT22 15 /* 22 bit GOT entry shifted */ ++#define R_SPARC_PC10 16 /* PC relative 10 bit truncated */ ++#define R_SPARC_PC22 17 /* PC relative 22 bit shifted */ ++#define R_SPARC_WPLT30 18 /* 30 bit PC relative PLT address */ ++#define R_SPARC_COPY 19 /* Copy symbol at runtime */ ++#define R_SPARC_GLOB_DAT 20 /* Create GOT entry */ ++#define R_SPARC_JMP_SLOT 21 /* Create PLT entry */ ++#define R_SPARC_RELATIVE 22 /* Adjust by program base */ ++#define R_SPARC_UA32 23 /* Direct 32 bit unaligned */ ++ ++/* Additional Sparc64 relocs. */ ++ ++#define R_SPARC_PLT32 24 /* Direct 32 bit ref to PLT entry */ ++#define R_SPARC_HIPLT22 25 /* High 22 bit PLT entry */ ++#define R_SPARC_LOPLT10 26 /* Truncated 10 bit PLT entry */ ++#define R_SPARC_PCPLT32 27 /* PC rel 32 bit ref to PLT entry */ ++#define R_SPARC_PCPLT22 28 /* PC rel high 22 bit PLT entry */ ++#define R_SPARC_PCPLT10 29 /* PC rel trunc 10 bit PLT entry */ ++#define R_SPARC_10 30 /* Direct 10 bit */ ++#define R_SPARC_11 31 /* Direct 11 bit */ ++#define R_SPARC_64 32 /* Direct 64 bit */ ++#define R_SPARC_OLO10 33 /* 10bit with secondary 13bit addend */ ++#define R_SPARC_HH22 34 /* Top 22 bits of direct 64 bit */ ++#define R_SPARC_HM10 35 /* High middle 10 bits of ... */ ++#define R_SPARC_LM22 36 /* Low middle 22 bits of ... */ ++#define R_SPARC_PC_HH22 37 /* Top 22 bits of pc rel 64 bit */ ++#define R_SPARC_PC_HM10 38 /* High middle 10 bit of ... */ ++#define R_SPARC_PC_LM22 39 /* Low miggle 22 bits of ... */ ++#define R_SPARC_WDISP16 40 /* PC relative 16 bit shifted */ ++#define R_SPARC_WDISP19 41 /* PC relative 19 bit shifted */ ++#define R_SPARC_7 43 /* Direct 7 bit */ ++#define R_SPARC_5 44 /* Direct 5 bit */ ++#define R_SPARC_6 45 /* Direct 6 bit */ ++#define R_SPARC_DISP64 46 /* PC relative 64 bit */ ++#define R_SPARC_PLT64 47 /* Direct 64 bit ref to PLT entry */ ++#define R_SPARC_HIX22 48 /* High 22 bit complemented */ ++#define R_SPARC_LOX10 49 /* Truncated 11 bit complemented */ ++#define R_SPARC_H44 50 /* Direct high 12 of 44 bit */ ++#define R_SPARC_M44 51 /* Direct mid 22 of 44 bit */ ++#define R_SPARC_L44 52 /* Direct low 10 of 44 bit */ ++#define R_SPARC_REGISTER 53 /* Global register usage */ ++#define R_SPARC_UA64 54 /* Direct 64 bit unaligned */ ++#define R_SPARC_UA16 55 /* Direct 16 bit unaligned */ ++#define R_SPARC_TLS_GD_HI22 56 ++#define R_SPARC_TLS_GD_LO10 57 ++#define R_SPARC_TLS_GD_ADD 58 ++#define R_SPARC_TLS_GD_CALL 59 ++#define R_SPARC_TLS_LDM_HI22 60 ++#define R_SPARC_TLS_LDM_LO10 61 ++#define R_SPARC_TLS_LDM_ADD 62 ++#define R_SPARC_TLS_LDM_CALL 63 ++#define R_SPARC_TLS_LDO_HIX22 64 ++#define R_SPARC_TLS_LDO_LOX10 65 ++#define R_SPARC_TLS_LDO_ADD 66 ++#define R_SPARC_TLS_IE_HI22 67 ++#define R_SPARC_TLS_IE_LO10 68 ++#define R_SPARC_TLS_IE_LD 69 ++#define R_SPARC_TLS_IE_LDX 70 ++#define R_SPARC_TLS_IE_ADD 71 ++#define R_SPARC_TLS_LE_HIX22 72 ++#define R_SPARC_TLS_LE_LOX10 73 ++#define R_SPARC_TLS_DTPMOD32 74 ++#define R_SPARC_TLS_DTPMOD64 75 ++#define R_SPARC_TLS_DTPOFF32 76 ++#define R_SPARC_TLS_DTPOFF64 77 ++#define R_SPARC_TLS_TPOFF32 78 ++#define R_SPARC_TLS_TPOFF64 79 ++/* Keep this the last entry. */ ++#define R_SPARC_NUM 80 ++ ++/* For Sparc64, legal values for d_tag of Elf64_Dyn. */ ++ ++#define DT_SPARC_REGISTER 0x70000001 ++#define DT_SPARC_NUM 2 ++ ++/* Bits present in AT_HWCAP, primarily for Sparc32. */ ++ ++#define HWCAP_SPARC_FLUSH 1 /* The cpu supports flush insn. */ ++#define HWCAP_SPARC_STBAR 2 ++#define HWCAP_SPARC_SWAP 4 ++#define HWCAP_SPARC_MULDIV 8 ++#define HWCAP_SPARC_V9 16 /* The cpu is v9, so v8plus is ok. */ ++#define HWCAP_SPARC_ULTRA3 32 ++ ++/* MIPS R3000 specific definitions. */ ++ ++/* Legal values for e_flags field of Elf32_Ehdr. */ ++ ++#define EF_MIPS_NOREORDER 1 /* A .noreorder directive was used */ ++#define EF_MIPS_PIC 2 /* Contains PIC code */ ++#define EF_MIPS_CPIC 4 /* Uses PIC calling sequence */ ++#define EF_MIPS_XGOT 8 ++#define EF_MIPS_64BIT_WHIRL 16 ++#define EF_MIPS_ABI2 32 ++#define EF_MIPS_ABI_ON32 64 ++#define EF_MIPS_ARCH 0xf0000000 /* MIPS architecture level */ ++ ++/* Legal values for MIPS architecture level. */ ++ ++#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */ ++#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */ ++#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */ ++#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */ ++#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */ ++#define EF_MIPS_ARCH_32 0x60000000 /* MIPS32 code. */ ++#define EF_MIPS_ARCH_64 0x70000000 /* MIPS64 code. */ ++ ++/* The following are non-official names and should not be used. */ ++ ++#define E_MIPS_ARCH_1 0x00000000 /* -mips1 code. */ ++#define E_MIPS_ARCH_2 0x10000000 /* -mips2 code. */ ++#define E_MIPS_ARCH_3 0x20000000 /* -mips3 code. */ ++#define E_MIPS_ARCH_4 0x30000000 /* -mips4 code. */ ++#define E_MIPS_ARCH_5 0x40000000 /* -mips5 code. */ ++#define E_MIPS_ARCH_32 0x60000000 /* MIPS32 code. */ ++#define E_MIPS_ARCH_64 0x70000000 /* MIPS64 code. */ ++ ++/* Special section indices. */ ++ ++#define SHN_MIPS_ACOMMON 0xff00 /* Allocated common symbols */ ++#define SHN_MIPS_TEXT 0xff01 /* Allocated test symbols. */ ++#define SHN_MIPS_DATA 0xff02 /* Allocated data symbols. */ ++#define SHN_MIPS_SCOMMON 0xff03 /* Small common symbols */ ++#define SHN_MIPS_SUNDEFINED 0xff04 /* Small undefined symbols */ ++ ++/* Legal values for sh_type field of Elf32_Shdr. */ ++ ++#define SHT_MIPS_LIBLIST 0x70000000 /* Shared objects used in link */ ++#define SHT_MIPS_MSYM 0x70000001 ++#define SHT_MIPS_CONFLICT 0x70000002 /* Conflicting symbols */ ++#define SHT_MIPS_GPTAB 0x70000003 /* Global data area sizes */ ++#define SHT_MIPS_UCODE 0x70000004 /* Reserved for SGI/MIPS compilers */ ++#define SHT_MIPS_DEBUG 0x70000005 /* MIPS ECOFF debugging information*/ ++#define SHT_MIPS_REGINFO 0x70000006 /* Register usage information */ ++#define SHT_MIPS_PACKAGE 0x70000007 ++#define SHT_MIPS_PACKSYM 0x70000008 ++#define SHT_MIPS_RELD 0x70000009 ++#define SHT_MIPS_IFACE 0x7000000b ++#define SHT_MIPS_CONTENT 0x7000000c ++#define SHT_MIPS_OPTIONS 0x7000000d /* Miscellaneous options. */ ++#define SHT_MIPS_SHDR 0x70000010 ++#define SHT_MIPS_FDESC 0x70000011 ++#define SHT_MIPS_EXTSYM 0x70000012 ++#define SHT_MIPS_DENSE 0x70000013 ++#define SHT_MIPS_PDESC 0x70000014 ++#define SHT_MIPS_LOCSYM 0x70000015 ++#define SHT_MIPS_AUXSYM 0x70000016 ++#define SHT_MIPS_OPTSYM 0x70000017 ++#define SHT_MIPS_LOCSTR 0x70000018 ++#define SHT_MIPS_LINE 0x70000019 ++#define SHT_MIPS_RFDESC 0x7000001a ++#define SHT_MIPS_DELTASYM 0x7000001b ++#define SHT_MIPS_DELTAINST 0x7000001c ++#define SHT_MIPS_DELTACLASS 0x7000001d ++#define SHT_MIPS_DWARF 0x7000001e /* DWARF debugging information. */ ++#define SHT_MIPS_DELTADECL 0x7000001f ++#define SHT_MIPS_SYMBOL_LIB 0x70000020 ++#define SHT_MIPS_EVENTS 0x70000021 /* Event section. */ ++#define SHT_MIPS_TRANSLATE 0x70000022 ++#define SHT_MIPS_PIXIE 0x70000023 ++#define SHT_MIPS_XLATE 0x70000024 ++#define SHT_MIPS_XLATE_DEBUG 0x70000025 ++#define SHT_MIPS_WHIRL 0x70000026 ++#define SHT_MIPS_EH_REGION 0x70000027 ++#define SHT_MIPS_XLATE_OLD 0x70000028 ++#define SHT_MIPS_PDR_EXCEPTION 0x70000029 ++ ++/* Legal values for sh_flags field of Elf32_Shdr. */ ++ ++#define SHF_MIPS_GPREL 0x10000000 /* Must be part of global data area */ ++#define SHF_MIPS_MERGE 0x20000000 ++#define SHF_MIPS_ADDR 0x40000000 ++#define SHF_MIPS_STRINGS 0x80000000 ++#define SHF_MIPS_NOSTRIP 0x08000000 ++#define SHF_MIPS_LOCAL 0x04000000 ++#define SHF_MIPS_NAMES 0x02000000 ++#define SHF_MIPS_NODUPE 0x01000000 ++ ++ ++/* Symbol tables. */ ++ ++/* MIPS specific values for `st_other'. */ ++#define STO_MIPS_DEFAULT 0x0 ++#define STO_MIPS_INTERNAL 0x1 ++#define STO_MIPS_HIDDEN 0x2 ++#define STO_MIPS_PROTECTED 0x3 ++#define STO_MIPS_SC_ALIGN_UNUSED 0xff ++ ++/* MIPS specific values for `st_info'. */ ++#define STB_MIPS_SPLIT_COMMON 13 ++ ++/* Entries found in sections of type SHT_MIPS_GPTAB. */ ++ ++typedef union ++{ ++ struct ++ { ++ Elf32_Word gt_current_g_value; /* -G value used for compilation */ ++ Elf32_Word gt_unused; /* Not used */ ++ } gt_header; /* First entry in section */ ++ struct ++ { ++ Elf32_Word gt_g_value; /* If this value were used for -G */ ++ Elf32_Word gt_bytes; /* This many bytes would be used */ ++ } gt_entry; /* Subsequent entries in section */ ++} Elf32_gptab; ++ ++/* Entry found in sections of type SHT_MIPS_REGINFO. */ ++ ++typedef struct ++{ ++ Elf32_Word ri_gprmask; /* General registers used */ ++ Elf32_Word ri_cprmask[4]; /* Coprocessor registers used */ ++ Elf32_Sword ri_gp_value; /* $gp register value */ ++} Elf32_RegInfo; ++ ++/* Entries found in sections of type SHT_MIPS_OPTIONS. */ ++ ++typedef struct ++{ ++ unsigned char kind; /* Determines interpretation of the ++ variable part of descriptor. */ ++ unsigned char size; /* Size of descriptor, including header. */ ++ Elf32_Section section; /* Section header index of section affected, ++ 0 for global options. */ ++ Elf32_Word info; /* Kind-specific information. */ ++} Elf_Options; ++ ++/* Values for `kind' field in Elf_Options. */ ++ ++#define ODK_NULL 0 /* Undefined. */ ++#define ODK_REGINFO 1 /* Register usage information. */ ++#define ODK_EXCEPTIONS 2 /* Exception processing options. */ ++#define ODK_PAD 3 /* Section padding options. */ ++#define ODK_HWPATCH 4 /* Hardware workarounds performed */ ++#define ODK_FILL 5 /* record the fill value used by the linker. */ ++#define ODK_TAGS 6 /* reserve space for desktop tools to write. */ ++#define ODK_HWAND 7 /* HW workarounds. 'AND' bits when merging. */ ++#define ODK_HWOR 8 /* HW workarounds. 'OR' bits when merging. */ ++ ++/* Values for `info' in Elf_Options for ODK_EXCEPTIONS entries. */ ++ ++#define OEX_FPU_MIN 0x1f /* FPE's which MUST be enabled. */ ++#define OEX_FPU_MAX 0x1f00 /* FPE's which MAY be enabled. */ ++#define OEX_PAGE0 0x10000 /* page zero must be mapped. */ ++#define OEX_SMM 0x20000 /* Force sequential memory mode? */ ++#define OEX_FPDBUG 0x40000 /* Force floating point debug mode? */ ++#define OEX_PRECISEFP OEX_FPDBUG ++#define OEX_DISMISS 0x80000 /* Dismiss invalid address faults? */ ++ ++#define OEX_FPU_INVAL 0x10 ++#define OEX_FPU_DIV0 0x08 ++#define OEX_FPU_OFLO 0x04 ++#define OEX_FPU_UFLO 0x02 ++#define OEX_FPU_INEX 0x01 ++ ++/* Masks for `info' in Elf_Options for an ODK_HWPATCH entry. */ ++ ++#define OHW_R4KEOP 0x1 /* R4000 end-of-page patch. */ ++#define OHW_R8KPFETCH 0x2 /* may need R8000 prefetch patch. */ ++#define OHW_R5KEOP 0x4 /* R5000 end-of-page patch. */ ++#define OHW_R5KCVTL 0x8 /* R5000 cvt.[ds].l bug. clean=1. */ ++ ++#define OPAD_PREFIX 0x1 ++#define OPAD_POSTFIX 0x2 ++#define OPAD_SYMBOL 0x4 ++ ++/* Entry found in `.options' section. */ ++ ++typedef struct ++{ ++ Elf32_Word hwp_flags1; /* Extra flags. */ ++ Elf32_Word hwp_flags2; /* Extra flags. */ ++} Elf_Options_Hw; ++ ++/* Masks for `info' in ElfOptions for ODK_HWAND and ODK_HWOR entries. */ ++ ++#define OHWA0_R4KEOP_CHECKED 0x00000001 ++#define OHWA1_R4KEOP_CLEAN 0x00000002 ++ ++/* MIPS relocs. */ ++ ++#define R_MIPS_NONE 0 /* No reloc */ ++#define R_MIPS_16 1 /* Direct 16 bit */ ++#define R_MIPS_32 2 /* Direct 32 bit */ ++#define R_MIPS_REL32 3 /* PC relative 32 bit */ ++#define R_MIPS_26 4 /* Direct 26 bit shifted */ ++#define R_MIPS_HI16 5 /* High 16 bit */ ++#define R_MIPS_LO16 6 /* Low 16 bit */ ++#define R_MIPS_GPREL16 7 /* GP relative 16 bit */ ++#define R_MIPS_LITERAL 8 /* 16 bit literal entry */ ++#define R_MIPS_GOT16 9 /* 16 bit GOT entry */ ++#define R_MIPS_PC16 10 /* PC relative 16 bit */ ++#define R_MIPS_CALL16 11 /* 16 bit GOT entry for function */ ++#define R_MIPS_GPREL32 12 /* GP relative 32 bit */ ++ ++#define R_MIPS_SHIFT5 16 ++#define R_MIPS_SHIFT6 17 ++#define R_MIPS_64 18 ++#define R_MIPS_GOT_DISP 19 ++#define R_MIPS_GOT_PAGE 20 ++#define R_MIPS_GOT_OFST 21 ++#define R_MIPS_GOT_HI16 22 ++#define R_MIPS_GOT_LO16 23 ++#define R_MIPS_SUB 24 ++#define R_MIPS_INSERT_A 25 ++#define R_MIPS_INSERT_B 26 ++#define R_MIPS_DELETE 27 ++#define R_MIPS_HIGHER 28 ++#define R_MIPS_HIGHEST 29 ++#define R_MIPS_CALL_HI16 30 ++#define R_MIPS_CALL_LO16 31 ++#define R_MIPS_SCN_DISP 32 ++#define R_MIPS_REL16 33 ++#define R_MIPS_ADD_IMMEDIATE 34 ++#define R_MIPS_PJUMP 35 ++#define R_MIPS_RELGOT 36 ++#define R_MIPS_JALR 37 ++/* Keep this the last entry. */ ++#define R_MIPS_NUM 38 ++ ++/* Legal values for p_type field of Elf32_Phdr. */ ++ ++#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */ ++#define PT_MIPS_RTPROC 0x70000001 /* Runtime procedure table. */ ++#define PT_MIPS_OPTIONS 0x70000002 ++ ++/* Special program header types. */ ++ ++#define PF_MIPS_LOCAL 0x10000000 ++ ++/* Legal values for d_tag field of Elf32_Dyn. */ ++ ++#define DT_MIPS_RLD_VERSION 0x70000001 /* Runtime linker interface version */ ++#define DT_MIPS_TIME_STAMP 0x70000002 /* Timestamp */ ++#define DT_MIPS_ICHECKSUM 0x70000003 /* Checksum */ ++#define DT_MIPS_IVERSION 0x70000004 /* Version string (string tbl index) */ ++#define DT_MIPS_FLAGS 0x70000005 /* Flags */ ++#define DT_MIPS_BASE_ADDRESS 0x70000006 /* Base address */ ++#define DT_MIPS_MSYM 0x70000007 ++#define DT_MIPS_CONFLICT 0x70000008 /* Address of CONFLICT section */ ++#define DT_MIPS_LIBLIST 0x70000009 /* Address of LIBLIST section */ ++#define DT_MIPS_LOCAL_GOTNO 0x7000000a /* Number of local GOT entries */ ++#define DT_MIPS_CONFLICTNO 0x7000000b /* Number of CONFLICT entries */ ++#define DT_MIPS_LIBLISTNO 0x70000010 /* Number of LIBLIST entries */ ++#define DT_MIPS_SYMTABNO 0x70000011 /* Number of DYNSYM entries */ ++#define DT_MIPS_UNREFEXTNO 0x70000012 /* First external DYNSYM */ ++#define DT_MIPS_GOTSYM 0x70000013 /* First GOT entry in DYNSYM */ ++#define DT_MIPS_HIPAGENO 0x70000014 /* Number of GOT page table entries */ ++#define DT_MIPS_RLD_MAP 0x70000016 /* Address of run time loader map. */ ++#define DT_MIPS_DELTA_CLASS 0x70000017 /* Delta C++ class definition. */ ++#define DT_MIPS_DELTA_CLASS_NO 0x70000018 /* Number of entries in ++ DT_MIPS_DELTA_CLASS. */ ++#define DT_MIPS_DELTA_INSTANCE 0x70000019 /* Delta C++ class instances. */ ++#define DT_MIPS_DELTA_INSTANCE_NO 0x7000001a /* Number of entries in ++ DT_MIPS_DELTA_INSTANCE. */ ++#define DT_MIPS_DELTA_RELOC 0x7000001b /* Delta relocations. */ ++#define DT_MIPS_DELTA_RELOC_NO 0x7000001c /* Number of entries in ++ DT_MIPS_DELTA_RELOC. */ ++#define DT_MIPS_DELTA_SYM 0x7000001d /* Delta symbols that Delta ++ relocations refer to. */ ++#define DT_MIPS_DELTA_SYM_NO 0x7000001e /* Number of entries in ++ DT_MIPS_DELTA_SYM. */ ++#define DT_MIPS_DELTA_CLASSSYM 0x70000020 /* Delta symbols that hold the ++ class declaration. */ ++#define DT_MIPS_DELTA_CLASSSYM_NO 0x70000021 /* Number of entries in ++ DT_MIPS_DELTA_CLASSSYM. */ ++#define DT_MIPS_CXX_FLAGS 0x70000022 /* Flags indicating for C++ flavor. */ ++#define DT_MIPS_PIXIE_INIT 0x70000023 ++#define DT_MIPS_SYMBOL_LIB 0x70000024 ++#define DT_MIPS_LOCALPAGE_GOTIDX 0x70000025 ++#define DT_MIPS_LOCAL_GOTIDX 0x70000026 ++#define DT_MIPS_HIDDEN_GOTIDX 0x70000027 ++#define DT_MIPS_PROTECTED_GOTIDX 0x70000028 ++#define DT_MIPS_OPTIONS 0x70000029 /* Address of .options. */ ++#define DT_MIPS_INTERFACE 0x7000002a /* Address of .interface. */ ++#define DT_MIPS_DYNSTR_ALIGN 0x7000002b ++#define DT_MIPS_INTERFACE_SIZE 0x7000002c /* Size of the .interface section. */ ++#define DT_MIPS_RLD_TEXT_RESOLVE_ADDR 0x7000002d /* Address of rld_text_rsolve ++ function stored in GOT. */ ++#define DT_MIPS_PERF_SUFFIX 0x7000002e /* Default suffix of dso to be added ++ by rld on dlopen() calls. */ ++#define DT_MIPS_COMPACT_SIZE 0x7000002f /* (O32)Size of compact rel section. */ ++#define DT_MIPS_GP_VALUE 0x70000030 /* GP value for aux GOTs. */ ++#define DT_MIPS_AUX_DYNAMIC 0x70000031 /* Address of aux .dynamic. */ ++#define DT_MIPS_NUM 0x32 ++ ++/* Legal values for DT_MIPS_FLAGS Elf32_Dyn entry. */ ++ ++#define RHF_NONE 0 /* No flags */ ++#define RHF_QUICKSTART (1 << 0) /* Use quickstart */ ++#define RHF_NOTPOT (1 << 1) /* Hash size not power of 2 */ ++#define RHF_NO_LIBRARY_REPLACEMENT (1 << 2) /* Ignore LD_LIBRARY_PATH */ ++#define RHF_NO_MOVE (1 << 3) ++#define RHF_SGI_ONLY (1 << 4) ++#define RHF_GUARANTEE_INIT (1 << 5) ++#define RHF_DELTA_C_PLUS_PLUS (1 << 6) ++#define RHF_GUARANTEE_START_INIT (1 << 7) ++#define RHF_PIXIE (1 << 8) ++#define RHF_DEFAULT_DELAY_LOAD (1 << 9) ++#define RHF_REQUICKSTART (1 << 10) ++#define RHF_REQUICKSTARTED (1 << 11) ++#define RHF_CORD (1 << 12) ++#define RHF_NO_UNRES_UNDEF (1 << 13) ++#define RHF_RLD_ORDER_SAFE (1 << 14) ++ ++/* Entries found in sections of type SHT_MIPS_LIBLIST. */ ++ ++typedef struct ++{ ++ Elf32_Word l_name; /* Name (string table index) */ ++ Elf32_Word l_time_stamp; /* Timestamp */ ++ Elf32_Word l_checksum; /* Checksum */ ++ Elf32_Word l_version; /* Interface version */ ++ Elf32_Word l_flags; /* Flags */ ++} Elf32_Lib; ++ ++typedef struct ++{ ++ Elf64_Word l_name; /* Name (string table index) */ ++ Elf64_Word l_time_stamp; /* Timestamp */ ++ Elf64_Word l_checksum; /* Checksum */ ++ Elf64_Word l_version; /* Interface version */ ++ Elf64_Word l_flags; /* Flags */ ++} Elf64_Lib; ++ ++ ++/* Legal values for l_flags. */ ++ ++#define LL_NONE 0 ++#define LL_EXACT_MATCH (1 << 0) /* Require exact match */ ++#define LL_IGNORE_INT_VER (1 << 1) /* Ignore interface version */ ++#define LL_REQUIRE_MINOR (1 << 2) ++#define LL_EXPORTS (1 << 3) ++#define LL_DELAY_LOAD (1 << 4) ++#define LL_DELTA (1 << 5) ++ ++/* Entries found in sections of type SHT_MIPS_CONFLICT. */ ++ ++typedef Elf32_Addr Elf32_Conflict; ++ ++ ++/* HPPA specific definitions. */ ++ ++/* Legal values for e_flags field of Elf32_Ehdr. */ ++ ++#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */ ++#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */ ++#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */ ++#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */ ++#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch ++ prediction. */ ++#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */ ++#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */ ++ ++/* Defined values for `e_flags & EF_PARISC_ARCH' are: */ ++ ++#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */ ++#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */ ++#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */ ++ ++/* Additional section indeces. */ ++ ++#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared ++ symbols in ANSI C. */ ++#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */ ++ ++/* Legal values for sh_type field of Elf32_Shdr. */ ++ ++#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */ ++#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */ ++#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */ ++ ++/* Legal values for sh_flags field of Elf32_Shdr. */ ++ ++#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */ ++#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */ ++#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */ ++ ++/* Legal values for ST_TYPE subfield of st_info (symbol type). */ ++ ++#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */ ++ ++#define STT_HP_OPAQUE (STT_LOOS + 0x1) ++#define STT_HP_STUB (STT_LOOS + 0x2) ++ ++/* HPPA relocs. */ ++ ++#define R_PARISC_NONE 0 /* No reloc. */ ++#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */ ++#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */ ++#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */ ++#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */ ++#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */ ++#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */ ++#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */ ++#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */ ++#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */ ++#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */ ++#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */ ++#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */ ++#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */ ++#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */ ++#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */ ++#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */ ++#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */ ++#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */ ++#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */ ++#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */ ++#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */ ++#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */ ++#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */ ++#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */ ++#define R_PARISC_FPTR64 64 /* 64 bits function address. */ ++#define R_PARISC_PLABEL32 65 /* 32 bits function address. */ ++#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */ ++#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */ ++#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */ ++#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */ ++#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */ ++#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */ ++#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */ ++#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */ ++#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */ ++#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */ ++#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */ ++#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */ ++#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */ ++#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */ ++#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */ ++#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */ ++#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */ ++#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */ ++#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */ ++#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */ ++#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */ ++#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */ ++#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */ ++#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */ ++#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */ ++#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */ ++#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */ ++#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */ ++#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */ ++#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */ ++#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */ ++#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */ ++#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */ ++#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */ ++#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */ ++#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */ ++#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */ ++#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */ ++#define R_PARISC_LORESERVE 128 ++#define R_PARISC_COPY 128 /* Copy relocation. */ ++#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */ ++#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */ ++#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */ ++#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */ ++#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */ ++#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */ ++#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/ ++#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */ ++#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */ ++#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */ ++#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */ ++#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */ ++#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */ ++#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */ ++#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */ ++#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/ ++#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/ ++#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */ ++#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */ ++#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */ ++#define R_PARISC_HIRESERVE 255 ++ ++/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */ ++ ++#define PT_HP_TLS (PT_LOOS + 0x0) ++#define PT_HP_CORE_NONE (PT_LOOS + 0x1) ++#define PT_HP_CORE_VERSION (PT_LOOS + 0x2) ++#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3) ++#define PT_HP_CORE_COMM (PT_LOOS + 0x4) ++#define PT_HP_CORE_PROC (PT_LOOS + 0x5) ++#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6) ++#define PT_HP_CORE_STACK (PT_LOOS + 0x7) ++#define PT_HP_CORE_SHM (PT_LOOS + 0x8) ++#define PT_HP_CORE_MMF (PT_LOOS + 0x9) ++#define PT_HP_PARALLEL (PT_LOOS + 0x10) ++#define PT_HP_FASTBIND (PT_LOOS + 0x11) ++#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12) ++#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13) ++#define PT_HP_STACK (PT_LOOS + 0x14) ++ ++#define PT_PARISC_ARCHEXT 0x70000000 ++#define PT_PARISC_UNWIND 0x70000001 ++ ++/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */ ++ ++#define PF_PARISC_SBP 0x08000000 ++ ++#define PF_HP_PAGE_SIZE 0x00100000 ++#define PF_HP_FAR_SHARED 0x00200000 ++#define PF_HP_NEAR_SHARED 0x00400000 ++#define PF_HP_CODE 0x01000000 ++#define PF_HP_MODIFY 0x02000000 ++#define PF_HP_LAZYSWAP 0x04000000 ++#define PF_HP_SBP 0x08000000 ++ ++ ++/* Alpha specific definitions. */ ++ ++/* Legal values for e_flags field of Elf64_Ehdr. */ ++ ++#define EF_ALPHA_32BIT 1 /* All addresses must be < 2GB. */ ++#define EF_ALPHA_CANRELAX 2 /* Relocations for relaxing exist. */ ++ ++/* Legal values for sh_type field of Elf64_Shdr. */ ++ ++/* These two are primerily concerned with ECOFF debugging info. */ ++#define SHT_ALPHA_DEBUG 0x70000001 ++#define SHT_ALPHA_REGINFO 0x70000002 ++ ++/* Legal values for sh_flags field of Elf64_Shdr. */ ++ ++#define SHF_ALPHA_GPREL 0x10000000 ++ ++/* Legal values for st_other field of Elf64_Sym. */ ++#define STO_ALPHA_NOPV 0x80 /* No PV required. */ ++#define STO_ALPHA_STD_GPLOAD 0x88 /* PV only used for initial ldgp. */ ++ ++/* Alpha relocs. */ ++ ++#define R_ALPHA_NONE 0 /* No reloc */ ++#define R_ALPHA_REFLONG 1 /* Direct 32 bit */ ++#define R_ALPHA_REFQUAD 2 /* Direct 64 bit */ ++#define R_ALPHA_GPREL32 3 /* GP relative 32 bit */ ++#define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */ ++#define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */ ++#define R_ALPHA_GPDISP 6 /* Add displacement to GP */ ++#define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */ ++#define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */ ++#define R_ALPHA_SREL16 9 /* PC relative 16 bit */ ++#define R_ALPHA_SREL32 10 /* PC relative 32 bit */ ++#define R_ALPHA_SREL64 11 /* PC relative 64 bit */ ++#define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ ++#define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ ++#define R_ALPHA_GPREL16 19 /* GP relative 16 bit */ ++#define R_ALPHA_COPY 24 /* Copy symbol at runtime */ ++#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */ ++#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */ ++#define R_ALPHA_RELATIVE 27 /* Adjust by program base */ ++#define R_ALPHA_TLS_GD_HI 28 ++#define R_ALPHA_TLSGD 29 ++#define R_ALPHA_TLS_LDM 30 ++#define R_ALPHA_DTPMOD64 31 ++#define R_ALPHA_GOTDTPREL 32 ++#define R_ALPHA_DTPREL64 33 ++#define R_ALPHA_DTPRELHI 34 ++#define R_ALPHA_DTPRELLO 35 ++#define R_ALPHA_DTPREL16 36 ++#define R_ALPHA_GOTTPREL 37 ++#define R_ALPHA_TPREL64 38 ++#define R_ALPHA_TPRELHI 39 ++#define R_ALPHA_TPRELLO 40 ++#define R_ALPHA_TPREL16 41 ++/* Keep this the last entry. */ ++#define R_ALPHA_NUM 46 ++ ++/* Magic values of the LITUSE relocation addend. */ ++#define LITUSE_ALPHA_ADDR 0 ++#define LITUSE_ALPHA_BASE 1 ++#define LITUSE_ALPHA_BYTOFF 2 ++#define LITUSE_ALPHA_JSR 3 ++#define LITUSE_ALPHA_TLS_GD 4 ++#define LITUSE_ALPHA_TLS_LDM 5 ++ ++ ++/* PowerPC specific declarations */ ++ ++/* Values for Elf32/64_Ehdr.e_flags. */ ++#define EF_PPC_EMB 0x80000000 /* PowerPC embedded flag */ ++ ++/* Cygnus local bits below */ ++#define EF_PPC_RELOCATABLE 0x00010000 /* PowerPC -mrelocatable flag*/ ++#define EF_PPC_RELOCATABLE_LIB 0x00008000 /* PowerPC -mrelocatable-lib ++ flag */ ++ ++/* PowerPC relocations defined by the ABIs */ ++#define R_PPC_NONE 0 ++#define R_PPC_ADDR32 1 /* 32bit absolute address */ ++#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */ ++#define R_PPC_ADDR16 3 /* 16bit absolute address */ ++#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */ ++#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */ ++#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */ ++#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */ ++#define R_PPC_ADDR14_BRTAKEN 8 ++#define R_PPC_ADDR14_BRNTAKEN 9 ++#define R_PPC_REL24 10 /* PC relative 26 bit */ ++#define R_PPC_REL14 11 /* PC relative 16 bit */ ++#define R_PPC_REL14_BRTAKEN 12 ++#define R_PPC_REL14_BRNTAKEN 13 ++#define R_PPC_GOT16 14 ++#define R_PPC_GOT16_LO 15 ++#define R_PPC_GOT16_HI 16 ++#define R_PPC_GOT16_HA 17 ++#define R_PPC_PLTREL24 18 ++#define R_PPC_COPY 19 ++#define R_PPC_GLOB_DAT 20 ++#define R_PPC_JMP_SLOT 21 ++#define R_PPC_RELATIVE 22 ++#define R_PPC_LOCAL24PC 23 ++#define R_PPC_UADDR32 24 ++#define R_PPC_UADDR16 25 ++#define R_PPC_REL32 26 ++#define R_PPC_PLT32 27 ++#define R_PPC_PLTREL32 28 ++#define R_PPC_PLT16_LO 29 ++#define R_PPC_PLT16_HI 30 ++#define R_PPC_PLT16_HA 31 ++#define R_PPC_SDAREL16 32 ++#define R_PPC_SECTOFF 33 ++#define R_PPC_SECTOFF_LO 34 ++#define R_PPC_SECTOFF_HI 35 ++#define R_PPC_SECTOFF_HA 36 ++ ++/* PowerPC relocations defined for the TLS access ABI. */ ++#define R_PPC_TLS 67 /* none (sym+add)@tls */ ++#define R_PPC_DTPMOD32 68 /* word32 (sym+add)@dtpmod */ ++#define R_PPC_TPREL16 69 /* half16* (sym+add)@tprel */ ++#define R_PPC_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */ ++#define R_PPC_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */ ++#define R_PPC_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */ ++#define R_PPC_TPREL32 73 /* word32 (sym+add)@tprel */ ++#define R_PPC_DTPREL16 74 /* half16* (sym+add)@dtprel */ ++#define R_PPC_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */ ++#define R_PPC_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */ ++#define R_PPC_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */ ++#define R_PPC_DTPREL32 78 /* word32 (sym+add)@dtprel */ ++#define R_PPC_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */ ++#define R_PPC_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */ ++#define R_PPC_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */ ++#define R_PPC_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */ ++#define R_PPC_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */ ++#define R_PPC_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */ ++#define R_PPC_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */ ++#define R_PPC_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */ ++#define R_PPC_GOT_TPREL16 87 /* half16* (sym+add)@got@tprel */ ++#define R_PPC_GOT_TPREL16_LO 88 /* half16 (sym+add)@got@tprel@l */ ++#define R_PPC_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */ ++#define R_PPC_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */ ++#define R_PPC_GOT_DTPREL16 91 /* half16* (sym+add)@got@dtprel */ ++#define R_PPC_GOT_DTPREL16_LO 92 /* half16* (sym+add)@got@dtprel@l */ ++#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */ ++#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */ ++ ++/* Keep this the last entry. */ ++#define R_PPC_NUM 95 ++ ++/* The remaining relocs are from the Embedded ELF ABI, and are not ++ in the SVR4 ELF ABI. */ ++#define R_PPC_EMB_NADDR32 101 ++#define R_PPC_EMB_NADDR16 102 ++#define R_PPC_EMB_NADDR16_LO 103 ++#define R_PPC_EMB_NADDR16_HI 104 ++#define R_PPC_EMB_NADDR16_HA 105 ++#define R_PPC_EMB_SDAI16 106 ++#define R_PPC_EMB_SDA2I16 107 ++#define R_PPC_EMB_SDA2REL 108 ++#define R_PPC_EMB_SDA21 109 /* 16 bit offset in SDA */ ++#define R_PPC_EMB_MRKREF 110 ++#define R_PPC_EMB_RELSEC16 111 ++#define R_PPC_EMB_RELST_LO 112 ++#define R_PPC_EMB_RELST_HI 113 ++#define R_PPC_EMB_RELST_HA 114 ++#define R_PPC_EMB_BIT_FLD 115 ++#define R_PPC_EMB_RELSDA 116 /* 16 bit relative offset in SDA */ ++ ++/* Diab tool relocations. */ ++#define R_PPC_DIAB_SDA21_LO 180 /* like EMB_SDA21, but lower 16 bit */ ++#define R_PPC_DIAB_SDA21_HI 181 /* like EMB_SDA21, but high 16 bit */ ++#define R_PPC_DIAB_SDA21_HA 182 /* like EMB_SDA21, adjusted high 16 */ ++#define R_PPC_DIAB_RELSDA_LO 183 /* like EMB_RELSDA, but lower 16 bit */ ++#define R_PPC_DIAB_RELSDA_HI 184 /* like EMB_RELSDA, but high 16 bit */ ++#define R_PPC_DIAB_RELSDA_HA 185 /* like EMB_RELSDA, adjusted high 16 */ ++ ++/* This is a phony reloc to handle any old fashioned TOC16 references ++ that may still be in object files. */ ++#define R_PPC_TOC16 255 ++ ++ ++/* PowerPC64 relocations defined by the ABIs */ ++#define R_PPC64_NONE R_PPC_NONE ++#define R_PPC64_ADDR32 R_PPC_ADDR32 /* 32bit absolute address */ ++#define R_PPC64_ADDR24 R_PPC_ADDR24 /* 26bit address, word aligned */ ++#define R_PPC64_ADDR16 R_PPC_ADDR16 /* 16bit absolute address */ ++#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO /* lower 16bits of address */ ++#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI /* high 16bits of address. */ ++#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA /* adjusted high 16bits. */ ++#define R_PPC64_ADDR14 R_PPC_ADDR14 /* 16bit address, word aligned */ ++#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN ++#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN ++#define R_PPC64_REL24 R_PPC_REL24 /* PC-rel. 26 bit, word aligned */ ++#define R_PPC64_REL14 R_PPC_REL14 /* PC relative 16 bit */ ++#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN ++#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN ++#define R_PPC64_GOT16 R_PPC_GOT16 ++#define R_PPC64_GOT16_LO R_PPC_GOT16_LO ++#define R_PPC64_GOT16_HI R_PPC_GOT16_HI ++#define R_PPC64_GOT16_HA R_PPC_GOT16_HA ++ ++#define R_PPC64_COPY R_PPC_COPY ++#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT ++#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT ++#define R_PPC64_RELATIVE R_PPC_RELATIVE ++ ++#define R_PPC64_UADDR32 R_PPC_UADDR32 ++#define R_PPC64_UADDR16 R_PPC_UADDR16 ++#define R_PPC64_REL32 R_PPC_REL32 ++#define R_PPC64_PLT32 R_PPC_PLT32 ++#define R_PPC64_PLTREL32 R_PPC_PLTREL32 ++#define R_PPC64_PLT16_LO R_PPC_PLT16_LO ++#define R_PPC64_PLT16_HI R_PPC_PLT16_HI ++#define R_PPC64_PLT16_HA R_PPC_PLT16_HA ++ ++#define R_PPC64_SECTOFF R_PPC_SECTOFF ++#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO ++#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI ++#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA ++#define R_PPC64_ADDR30 37 /* word30 (S + A - P) >> 2 */ ++#define R_PPC64_ADDR64 38 /* doubleword64 S + A */ ++#define R_PPC64_ADDR16_HIGHER 39 /* half16 #higher(S + A) */ ++#define R_PPC64_ADDR16_HIGHERA 40 /* half16 #highera(S + A) */ ++#define R_PPC64_ADDR16_HIGHEST 41 /* half16 #highest(S + A) */ ++#define R_PPC64_ADDR16_HIGHESTA 42 /* half16 #highesta(S + A) */ ++#define R_PPC64_UADDR64 43 /* doubleword64 S + A */ ++#define R_PPC64_REL64 44 /* doubleword64 S + A - P */ ++#define R_PPC64_PLT64 45 /* doubleword64 L + A */ ++#define R_PPC64_PLTREL64 46 /* doubleword64 L + A - P */ ++#define R_PPC64_TOC16 47 /* half16* S + A - .TOC */ ++#define R_PPC64_TOC16_LO 48 /* half16 #lo(S + A - .TOC.) */ ++#define R_PPC64_TOC16_HI 49 /* half16 #hi(S + A - .TOC.) */ ++#define R_PPC64_TOC16_HA 50 /* half16 #ha(S + A - .TOC.) */ ++#define R_PPC64_TOC 51 /* doubleword64 .TOC */ ++#define R_PPC64_PLTGOT16 52 /* half16* M + A */ ++#define R_PPC64_PLTGOT16_LO 53 /* half16 #lo(M + A) */ ++#define R_PPC64_PLTGOT16_HI 54 /* half16 #hi(M + A) */ ++#define R_PPC64_PLTGOT16_HA 55 /* half16 #ha(M + A) */ ++ ++#define R_PPC64_ADDR16_DS 56 /* half16ds* (S + A) >> 2 */ ++#define R_PPC64_ADDR16_LO_DS 57 /* half16ds #lo(S + A) >> 2 */ ++#define R_PPC64_GOT16_DS 58 /* half16ds* (G + A) >> 2 */ ++#define R_PPC64_GOT16_LO_DS 59 /* half16ds #lo(G + A) >> 2 */ ++#define R_PPC64_PLT16_LO_DS 60 /* half16ds #lo(L + A) >> 2 */ ++#define R_PPC64_SECTOFF_DS 61 /* half16ds* (R + A) >> 2 */ ++#define R_PPC64_SECTOFF_LO_DS 62 /* half16ds #lo(R + A) >> 2 */ ++#define R_PPC64_TOC16_DS 63 /* half16ds* (S + A - .TOC.) >> 2 */ ++#define R_PPC64_TOC16_LO_DS 64 /* half16ds #lo(S + A - .TOC.) >> 2 */ ++#define R_PPC64_PLTGOT16_DS 65 /* half16ds* (M + A) >> 2 */ ++#define R_PPC64_PLTGOT16_LO_DS 66 /* half16ds #lo(M + A) >> 2 */ ++ ++/* PowerPC64 relocations defined for the TLS access ABI. */ ++#define R_PPC64_TLS 67 /* none (sym+add)@tls */ ++#define R_PPC64_DTPMOD64 68 /* doubleword64 (sym+add)@dtpmod */ ++#define R_PPC64_TPREL16 69 /* half16* (sym+add)@tprel */ ++#define R_PPC64_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */ ++#define R_PPC64_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */ ++#define R_PPC64_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */ ++#define R_PPC64_TPREL64 73 /* doubleword64 (sym+add)@tprel */ ++#define R_PPC64_DTPREL16 74 /* half16* (sym+add)@dtprel */ ++#define R_PPC64_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */ ++#define R_PPC64_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */ ++#define R_PPC64_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */ ++#define R_PPC64_DTPREL64 78 /* doubleword64 (sym+add)@dtprel */ ++#define R_PPC64_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */ ++#define R_PPC64_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */ ++#define R_PPC64_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */ ++#define R_PPC64_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */ ++#define R_PPC64_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */ ++#define R_PPC64_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */ ++#define R_PPC64_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */ ++#define R_PPC64_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */ ++#define R_PPC64_GOT_TPREL16_DS 87 /* half16ds* (sym+add)@got@tprel */ ++#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */ ++#define R_PPC64_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */ ++#define R_PPC64_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */ ++#define R_PPC64_GOT_DTPREL16_DS 91 /* half16ds* (sym+add)@got@dtprel */ ++#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */ ++#define R_PPC64_GOT_DTPREL16_HI 93 /* half16 (sym+add)@got@dtprel@h */ ++#define R_PPC64_GOT_DTPREL16_HA 94 /* half16 (sym+add)@got@dtprel@ha */ ++#define R_PPC64_TPREL16_DS 95 /* half16ds* (sym+add)@tprel */ ++#define R_PPC64_TPREL16_LO_DS 96 /* half16ds (sym+add)@tprel@l */ ++#define R_PPC64_TPREL16_HIGHER 97 /* half16 (sym+add)@tprel@higher */ ++#define R_PPC64_TPREL16_HIGHERA 98 /* half16 (sym+add)@tprel@highera */ ++#define R_PPC64_TPREL16_HIGHEST 99 /* half16 (sym+add)@tprel@highest */ ++#define R_PPC64_TPREL16_HIGHESTA 100 /* half16 (sym+add)@tprel@highesta */ ++#define R_PPC64_DTPREL16_DS 101 /* half16ds* (sym+add)@dtprel */ ++#define R_PPC64_DTPREL16_LO_DS 102 /* half16ds (sym+add)@dtprel@l */ ++#define R_PPC64_DTPREL16_HIGHER 103 /* half16 (sym+add)@dtprel@higher */ ++#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */ ++#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */ ++#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */ ++ ++/* Keep this the last entry. */ ++#define R_PPC64_NUM 107 ++ ++/* PowerPC64 specific values for the Dyn d_tag field. */ ++#define DT_PPC64_GLINK (DT_LOPROC + 0) ++#define DT_PPC64_NUM 1 ++ ++ ++/* ARM specific declarations */ ++ ++/* Processor specific flags for the ELF header e_flags field. */ ++#define EF_ARM_RELEXEC 0x01 ++#define EF_ARM_HASENTRY 0x02 ++#define EF_ARM_INTERWORK 0x04 ++#define EF_ARM_APCS_26 0x08 ++#define EF_ARM_APCS_FLOAT 0x10 ++#define EF_ARM_PIC 0x20 ++#define EF_ARM_ALIGN8 0x40 /* 8-bit structure alignment is in use */ ++#define EF_ARM_NEW_ABI 0x80 ++#define EF_ARM_OLD_ABI 0x100 ++ ++/* Other constants defined in the ARM ELF spec. version B-01. */ ++/* NB. These conflict with values defined above. */ ++#define EF_ARM_SYMSARESORTED 0x04 ++#define EF_ARM_DYNSYMSUSESEGIDX 0x08 ++#define EF_ARM_MAPSYMSFIRST 0x10 ++#define EF_ARM_EABIMASK 0XFF000000 ++ ++#define EF_ARM_EABI_VERSION(flags) ((flags) & EF_ARM_EABIMASK) ++#define EF_ARM_EABI_UNKNOWN 0x00000000 ++#define EF_ARM_EABI_VER1 0x01000000 ++#define EF_ARM_EABI_VER2 0x02000000 ++ ++/* Additional symbol types for Thumb */ ++#define STT_ARM_TFUNC 0xd ++ ++/* ARM-specific values for sh_flags */ ++#define SHF_ARM_ENTRYSECT 0x10000000 /* Section contains an entry point */ ++#define SHF_ARM_COMDEF 0x80000000 /* Section may be multiply defined ++ in the input to a link step */ ++ ++/* ARM-specific program header flags */ ++#define PF_ARM_SB 0x10000000 /* Segment contains the location ++ addressed by the static base */ ++ ++/* ARM relocs. */ ++#define R_ARM_NONE 0 /* No reloc */ ++#define R_ARM_PC24 1 /* PC relative 26 bit branch */ ++#define R_ARM_ABS32 2 /* Direct 32 bit */ ++#define R_ARM_REL32 3 /* PC relative 32 bit */ ++#define R_ARM_PC13 4 ++#define R_ARM_ABS16 5 /* Direct 16 bit */ ++#define R_ARM_ABS12 6 /* Direct 12 bit */ ++#define R_ARM_THM_ABS5 7 ++#define R_ARM_ABS8 8 /* Direct 8 bit */ ++#define R_ARM_SBREL32 9 ++#define R_ARM_THM_PC22 10 ++#define R_ARM_THM_PC8 11 ++#define R_ARM_AMP_VCALL9 12 ++#define R_ARM_SWI24 13 ++#define R_ARM_THM_SWI8 14 ++#define R_ARM_XPC25 15 ++#define R_ARM_THM_XPC22 16 ++#define R_ARM_COPY 20 /* Copy symbol at runtime */ ++#define R_ARM_GLOB_DAT 21 /* Create GOT entry */ ++#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */ ++#define R_ARM_RELATIVE 23 /* Adjust by program base */ ++#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */ ++#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */ ++#define R_ARM_GOT32 26 /* 32 bit GOT entry */ ++#define R_ARM_PLT32 27 /* 32 bit PLT address */ ++#define R_ARM_ALU_PCREL_7_0 32 ++#define R_ARM_ALU_PCREL_15_8 33 ++#define R_ARM_ALU_PCREL_23_15 34 ++#define R_ARM_LDR_SBREL_11_0 35 ++#define R_ARM_ALU_SBREL_19_12 36 ++#define R_ARM_ALU_SBREL_27_20 37 ++#define R_ARM_GNU_VTENTRY 100 ++#define R_ARM_GNU_VTINHERIT 101 ++#define R_ARM_THM_PC11 102 /* thumb unconditional branch */ ++#define R_ARM_THM_PC9 103 /* thumb conditional branch */ ++#define R_ARM_RXPC25 249 ++#define R_ARM_RSBREL32 250 ++#define R_ARM_THM_RPC22 251 ++#define R_ARM_RREL32 252 ++#define R_ARM_RABS22 253 ++#define R_ARM_RPC24 254 ++#define R_ARM_RBASE 255 ++/* Keep this the last entry. */ ++#define R_ARM_NUM 256 ++ ++/* IA-64 specific declarations. */ ++ ++/* Processor specific flags for the Ehdr e_flags field. */ ++#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */ ++#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */ ++#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */ ++ ++/* Processor specific values for the Phdr p_type field. */ ++#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */ ++#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */ ++ ++/* Processor specific flags for the Phdr p_flags field. */ ++#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */ ++ ++/* Processor specific values for the Shdr sh_type field. */ ++#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */ ++#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */ ++ ++/* Processor specific flags for the Shdr sh_flags field. */ ++#define SHF_IA_64_SHORT 0x10000000 /* section near gp */ ++#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */ ++ ++/* Processor specific values for the Dyn d_tag field. */ ++#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0) ++#define DT_IA_64_NUM 1 ++ ++/* IA-64 relocations. */ ++#define R_IA64_NONE 0x00 /* none */ ++#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */ ++#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */ ++#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */ ++#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */ ++#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */ ++#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */ ++#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */ ++#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */ ++#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */ ++#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */ ++#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */ ++#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */ ++#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */ ++#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */ ++#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */ ++#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */ ++#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */ ++#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */ ++#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */ ++#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */ ++#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */ ++#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */ ++#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */ ++#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */ ++#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */ ++#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */ ++#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */ ++#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */ ++#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */ ++#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */ ++#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */ ++#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */ ++#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */ ++#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */ ++#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */ ++#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */ ++#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */ ++#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */ ++#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */ ++#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */ ++#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */ ++#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */ ++#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */ ++#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */ ++#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */ ++#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */ ++#define R_IA64_REL32MSB 0x6c /* data 4 + REL */ ++#define R_IA64_REL32LSB 0x6d /* data 4 + REL */ ++#define R_IA64_REL64MSB 0x6e /* data 8 + REL */ ++#define R_IA64_REL64LSB 0x6f /* data 8 + REL */ ++#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */ ++#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */ ++#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */ ++#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */ ++#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */ ++#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */ ++#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */ ++#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */ ++#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */ ++#define R_IA64_COPY 0x84 /* copy relocation */ ++#define R_IA64_SUB 0x85 /* Addend and symbol difference */ ++#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */ ++#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */ ++#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */ ++#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */ ++#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */ ++#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */ ++#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */ ++#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */ ++#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */ ++#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */ ++#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */ ++#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */ ++#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */ ++#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */ ++#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */ ++#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */ ++#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */ ++#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */ ++#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */ ++ ++/* SH specific declarations */ ++ ++/* SH relocs. */ ++#define R_SH_NONE 0 ++#define R_SH_DIR32 1 ++#define R_SH_REL32 2 ++#define R_SH_DIR8WPN 3 ++#define R_SH_IND12W 4 ++#define R_SH_DIR8WPL 5 ++#define R_SH_DIR8WPZ 6 ++#define R_SH_DIR8BP 7 ++#define R_SH_DIR8W 8 ++#define R_SH_DIR8L 9 ++#define R_SH_SWITCH16 25 ++#define R_SH_SWITCH32 26 ++#define R_SH_USES 27 ++#define R_SH_COUNT 28 ++#define R_SH_ALIGN 29 ++#define R_SH_CODE 30 ++#define R_SH_DATA 31 ++#define R_SH_LABEL 32 ++#define R_SH_SWITCH8 33 ++#define R_SH_GNU_VTINHERIT 34 ++#define R_SH_GNU_VTENTRY 35 ++#define R_SH_TLS_GD_32 144 ++#define R_SH_TLS_LD_32 145 ++#define R_SH_TLS_LDO_32 146 ++#define R_SH_TLS_IE_32 147 ++#define R_SH_TLS_LE_32 148 ++#define R_SH_TLS_DTPMOD32 149 ++#define R_SH_TLS_DTPOFF32 150 ++#define R_SH_TLS_TPOFF32 151 ++#define R_SH_GOT32 160 ++#define R_SH_PLT32 161 ++#define R_SH_COPY 162 ++#define R_SH_GLOB_DAT 163 ++#define R_SH_JMP_SLOT 164 ++#define R_SH_RELATIVE 165 ++#define R_SH_GOTOFF 166 ++#define R_SH_GOTPC 167 ++/* Keep this the last entry. */ ++#define R_SH_NUM 256 ++ ++/* Additional s390 relocs */ ++ ++#define R_390_NONE 0 /* No reloc. */ ++#define R_390_8 1 /* Direct 8 bit. */ ++#define R_390_12 2 /* Direct 12 bit. */ ++#define R_390_16 3 /* Direct 16 bit. */ ++#define R_390_32 4 /* Direct 32 bit. */ ++#define R_390_PC32 5 /* PC relative 32 bit. */ ++#define R_390_GOT12 6 /* 12 bit GOT offset. */ ++#define R_390_GOT32 7 /* 32 bit GOT offset. */ ++#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */ ++#define R_390_COPY 9 /* Copy symbol at runtime. */ ++#define R_390_GLOB_DAT 10 /* Create GOT entry. */ ++#define R_390_JMP_SLOT 11 /* Create PLT entry. */ ++#define R_390_RELATIVE 12 /* Adjust by program base. */ ++#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */ ++#define R_390_GOTPC 14 /* 32 bit PC relative offset to GOT. */ ++#define R_390_GOT16 15 /* 16 bit GOT offset. */ ++#define R_390_PC16 16 /* PC relative 16 bit. */ ++#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */ ++#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */ ++#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */ ++#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */ ++#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */ ++#define R_390_64 22 /* Direct 64 bit. */ ++#define R_390_PC64 23 /* PC relative 64 bit. */ ++#define R_390_GOT64 24 /* 64 bit GOT offset. */ ++#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */ ++#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */ ++#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */ ++#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */ ++#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */ ++#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */ ++#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */ ++#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */ ++#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */ ++#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */ ++#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */ ++#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */ ++#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */ ++#define R_390_TLS_GDCALL 38 /* Tag for function call in general ++ dynamic TLS code. */ ++#define R_390_TLS_LDCALL 39 /* Tag for function call in local ++ dynamic TLS code. */ ++#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic ++ thread local data. */ ++#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic ++ thread local data. */ ++#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS ++ block offset. */ ++#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS ++ block offset. */ ++#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS ++ block offset. */ ++#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic ++ thread local data in LE code. */ ++#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic ++ thread local data in LE code. */ ++#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for ++ negated static TLS block offset. */ ++#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for ++ negated static TLS block offset. */ ++#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for ++ negated static TLS block offset. */ ++#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to ++ static TLS block. */ ++#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to ++ static TLS block. */ ++#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS ++ block. */ ++#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS ++ block. */ ++#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */ ++#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ ++#define R_390_TLS_TPOFF 56 /* Negated offset in static TLS ++ block. */ ++ ++/* Keep this the last entry. */ ++#define R_390_NUM 57 ++ ++/* CRIS relocations. */ ++#define R_CRIS_NONE 0 ++#define R_CRIS_8 1 ++#define R_CRIS_16 2 ++#define R_CRIS_32 3 ++#define R_CRIS_8_PCREL 4 ++#define R_CRIS_16_PCREL 5 ++#define R_CRIS_32_PCREL 6 ++#define R_CRIS_GNU_VTINHERIT 7 ++#define R_CRIS_GNU_VTENTRY 8 ++#define R_CRIS_COPY 9 ++#define R_CRIS_GLOB_DAT 10 ++#define R_CRIS_JUMP_SLOT 11 ++#define R_CRIS_RELATIVE 12 ++#define R_CRIS_16_GOT 13 ++#define R_CRIS_32_GOT 14 ++#define R_CRIS_16_GOTPLT 15 ++#define R_CRIS_32_GOTPLT 16 ++#define R_CRIS_32_GOTREL 17 ++#define R_CRIS_32_PLT_GOTREL 18 ++#define R_CRIS_32_PLT_PCREL 19 ++ ++#define R_CRIS_NUM 20 ++ ++/* AMD x86-64 relocations. */ ++#define R_X86_64_NONE 0 /* No reloc */ ++#define R_X86_64_64 1 /* Direct 64 bit */ ++#define R_X86_64_PC32 2 /* PC relative 32 bit signed */ ++#define R_X86_64_GOT32 3 /* 32 bit GOT entry */ ++#define R_X86_64_PLT32 4 /* 32 bit PLT address */ ++#define R_X86_64_COPY 5 /* Copy symbol at runtime */ ++#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ ++#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ ++#define R_X86_64_RELATIVE 8 /* Adjust by program base */ ++#define R_X86_64_GOTPCREL 9 /* 32 bit signed PC relative ++ offset to GOT */ ++#define R_X86_64_32 10 /* Direct 32 bit zero extended */ ++#define R_X86_64_32S 11 /* Direct 32 bit sign extended */ ++#define R_X86_64_16 12 /* Direct 16 bit zero extended */ ++#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ ++#define R_X86_64_8 14 /* Direct 8 bit sign extended */ ++#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ ++#define R_X86_64_DTPMOD64 16 /* ID of module containing symbol */ ++#define R_X86_64_DTPOFF64 17 /* Offset in module's TLS block */ ++#define R_X86_64_TPOFF64 18 /* Offset in initial TLS block */ ++#define R_X86_64_TLSGD 19 /* 32 bit signed PC relative offset ++ to two GOT entries for GD symbol */ ++#define R_X86_64_TLSLD 20 /* 32 bit signed PC relative offset ++ to two GOT entries for LD symbol */ ++#define R_X86_64_DTPOFF32 21 /* Offset in TLS block */ ++#define R_X86_64_GOTTPOFF 22 /* 32 bit signed PC relative offset ++ to GOT entry for IE symbol */ ++#define R_X86_64_TPOFF32 23 /* Offset in initial TLS block */ ++ ++#define R_X86_64_NUM 24 ++ ++__END_DECLS ++ ++#endif /* elf.h */ + + #include "elfconfig.h" + +@@ -195,3 +2641,4 @@ + void fatal(const char *fmt, ...); + void warn(const char *fmt, ...); + void merror(const char *fmt, ...); ++ +diff -Nur linux-2.6.36.orig/scripts/mod/sumversion.c linux-2.6.36/scripts/mod/sumversion.c +--- linux-2.6.36.orig/scripts/mod/sumversion.c 2010-10-20 22:30:22.000000000 +0200 ++++ linux-2.6.36/scripts/mod/sumversion.c 2010-11-28 18:33:24.000000000 +0100 +@@ -1,4 +1,4 @@ +-#include ++/* #include */ + #ifdef __sun__ + #include + #else diff --git a/target/linux/patches/3.4.113/defaults.patch b/target/linux/patches/3.4.113/defaults.patch new file mode 100644 index 000000000..58aae610b --- /dev/null +++ b/target/linux/patches/3.4.113/defaults.patch @@ -0,0 +1,22 @@ +diff -Nur linux-3.0.4.orig/fs/Kconfig linux-3.0.4/fs/Kconfig +--- linux-3.0.4.orig/fs/Kconfig 2011-08-29 22:56:30.000000000 +0200 ++++ linux-3.0.4/fs/Kconfig 2011-10-15 22:08:44.000000000 +0200 +@@ -47,7 +47,7 @@ + def_bool n + + config EXPORTFS +- tristate ++ def_bool y + + config FILE_LOCKING + bool "Enable POSIX file locking API" if EXPERT +diff -Nur linux-3.0.4.orig/fs/notify/Kconfig linux-3.0.4/fs/notify/Kconfig +--- linux-3.0.4.orig/fs/notify/Kconfig 2011-08-29 22:56:30.000000000 +0200 ++++ linux-3.0.4/fs/notify/Kconfig 2011-10-15 22:02:00.000000000 +0200 +@@ -1,5 +1,5 @@ + config FSNOTIFY +- def_bool n ++ def_bool y + + source "fs/notify/dnotify/Kconfig" + source "fs/notify/inotify/Kconfig" diff --git a/target/linux/patches/3.4.113/gemalto.patch b/target/linux/patches/3.4.113/gemalto.patch new file mode 100644 index 000000000..65f7af1d7 --- /dev/null +++ b/target/linux/patches/3.4.113/gemalto.patch @@ -0,0 +1,11 @@ +diff -Nur linux-2.6.36.orig/drivers/tty/serial/8250/serial_cs.c linux-2.6.36/drivers/serial/8250/serial_cs.c +--- linux-2.6.36.orig/drivers/tty/serial/8250/serial_cs.c 2010-10-20 22:30:22.000000000 +0200 ++++ linux-2.6.36/drivers/tty/serial/8250/serial_cs.c 2010-12-13 23:03:40.000000000 +0100 +@@ -794,6 +794,7 @@ + PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0025), + PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0045), + PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0052), ++ PCMCIA_DEVICE_MANF_CARD(0x0157, 0x0100), /* Gemalto SCR */ + PCMCIA_DEVICE_MANF_CARD(0x016c, 0x0006), /* Psion 56K+Fax */ + PCMCIA_DEVICE_MANF_CARD(0x0200, 0x0001), /* MultiMobile */ + PCMCIA_DEVICE_PROD_ID134("ADV", "TECH", "COMpad-32/85", 0x67459937, 0x916d02ba, 0x8fbe92ae), diff --git a/target/linux/patches/3.4.113/lemote-rfkill.patch b/target/linux/patches/3.4.113/lemote-rfkill.patch new file mode 100644 index 000000000..a61488434 --- /dev/null +++ b/target/linux/patches/3.4.113/lemote-rfkill.patch @@ -0,0 +1,21 @@ +diff -Nur linux-3.3.orig/drivers/net/wireless/rtl818x/rtl8187/rfkill.c linux-3.3/drivers/net/wireless/rtl818x/rtl8187/rfkill.c +--- linux-3.3.orig/drivers/net/wireless/rtl818x/rtl8187/rfkill.c 2012-03-19 00:15:34.000000000 +0100 ++++ linux-3.3/drivers/net/wireless/rtl818x/rtl8187/rfkill.c 2012-03-27 23:29:46.000000000 +0200 +@@ -22,6 +22,9 @@ + + static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv) + { ++#ifdef CONFIG_LEMOTE_MACH2F ++ return 1; ++#else + u8 gpio; + + gpio = rtl818x_ioread8(priv, &priv->map->GPIO0); +@@ -29,6 +32,7 @@ + gpio = rtl818x_ioread8(priv, &priv->map->GPIO1); + + return gpio & priv->rfkill_mask; ++#endif + } + + void rtl8187_rfkill_init(struct ieee80211_hw *hw) diff --git a/target/linux/patches/3.4.113/linux-gcc-check.patch b/target/linux/patches/3.4.113/linux-gcc-check.patch new file mode 100644 index 000000000..7cc381845 --- /dev/null +++ b/target/linux/patches/3.4.113/linux-gcc-check.patch @@ -0,0 +1,18 @@ +diff -Nur linux-2.6.32.orig/arch/mips/include/asm/sgidefs.h linux-2.6.32/arch/mips/include/asm/sgidefs.h +--- linux-2.6.32.orig/arch/mips/include/asm/sgidefs.h 2009-12-03 04:51:21.000000000 +0100 ++++ linux-2.6.32/arch/mips/include/asm/sgidefs.h 2010-02-14 11:49:21.000000000 +0100 +@@ -11,14 +11,6 @@ + #define __ASM_SGIDEFS_H + + /* +- * Using a Linux compiler for building Linux seems logic but not to +- * everybody. +- */ +-#ifndef __linux__ +-#error Use a Linux compiler or give up. +-#endif +- +-/* + * Definitions for the ISA levels + * + * With the introduction of MIPS32 / MIPS64 instruction sets definitions diff --git a/target/linux/patches/3.4.113/mips-error.patch b/target/linux/patches/3.4.113/mips-error.patch new file mode 100644 index 000000000..800abc80d --- /dev/null +++ b/target/linux/patches/3.4.113/mips-error.patch @@ -0,0 +1,254 @@ +From c022630633624a75b3b58f43dd3c6cc896a56cff Mon Sep 17 00:00:00 2001 +From: Steven J. Hill +Date: Fri, 06 Jul 2012 19:56:01 +0000 +Subject: MIPS: Refactor 'clear_page' and 'copy_page' functions. + +Remove usage of the '__attribute__((alias("...")))' hack that aliased +to integer arrays containing micro-assembled instructions. This hack +breaks when building a microMIPS kernel. It also makes the code much +easier to understand. + +[ralf@linux-mips.org: Added back export of the clear_page and copy_page +symbols so certain modules will work again. Also fixed build with +CONFIG_SIBYTE_DMA_PAGEOPS enabled.] + +Signed-off-by: Steven J. Hill +Cc: linux-mips@linux-mips.org +Patchwork: https://patchwork.linux-mips.org/patch/3866/ +Acked-by: David Daney +Signed-off-by: Ralf Baechle +--- +diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c +index 57ba13e..3fc1691 100644 +--- a/arch/mips/kernel/mips_ksyms.c ++++ b/arch/mips/kernel/mips_ksyms.c +@@ -5,7 +5,7 @@ + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * +- * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle ++ * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle + * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc. + */ + #include +@@ -35,6 +35,12 @@ EXPORT_SYMBOL(memmove); + EXPORT_SYMBOL(kernel_thread); + + /* ++ * Functions that operate on entire pages. Mostly used by memory management. ++ */ ++EXPORT_SYMBOL(clear_page); ++EXPORT_SYMBOL(copy_page); ++ ++/* + * Userspace access stuff. + */ + EXPORT_SYMBOL(__copy_user); +diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile +index 4aa2028..fd6203f 100644 +--- a/arch/mips/mm/Makefile ++++ b/arch/mips/mm/Makefile +@@ -3,8 +3,8 @@ + # + + obj-y += cache.o dma-default.o extable.o fault.o \ +- gup.o init.o mmap.o page.o tlbex.o \ +- tlbex-fault.o uasm.o ++ gup.o init.o mmap.o page.o page-funcs.o \ ++ tlbex.o tlbex-fault.o uasm.o + + obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o + obj-$(CONFIG_64BIT) += pgtable-64.o +diff --git a/arch/mips/mm/page-funcs.S b/arch/mips/mm/page-funcs.S +new file mode 100644 +index 0000000..48a6b38 +--- /dev/null ++++ b/arch/mips/mm/page-funcs.S +@@ -0,0 +1,50 @@ ++/* ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ * Micro-assembler generated clear_page/copy_page functions. ++ * ++ * Copyright (C) 2012 MIPS Technologies, Inc. ++ * Copyright (C) 2012 Ralf Baechle ++ */ ++#include ++#include ++ ++#ifdef CONFIG_SIBYTE_DMA_PAGEOPS ++#define cpu_clear_page_function_name clear_page_cpu ++#define cpu_copy_page_function_name copy_page_cpu ++#else ++#define cpu_clear_page_function_name clear_page ++#define cpu_copy_page_function_name copy_page ++#endif ++ ++/* ++ * Maximum sizes: ++ * ++ * R4000 128 bytes S-cache: 0x058 bytes ++ * R4600 v1.7: 0x05c bytes ++ * R4600 v2.0: 0x060 bytes ++ * With prefetching, 16 word strides 0x120 bytes ++ */ ++EXPORT(__clear_page_start) ++LEAF(cpu_clear_page_function_name) ++1: j 1b /* Dummy, will be replaced. */ ++ .space 288 ++END(cpu_clear_page_function_name) ++EXPORT(__clear_page_end) ++ ++/* ++ * Maximum sizes: ++ * ++ * R4000 128 bytes S-cache: 0x11c bytes ++ * R4600 v1.7: 0x080 bytes ++ * R4600 v2.0: 0x07c bytes ++ * With prefetching, 16 word strides 0x540 bytes ++ */ ++EXPORT(__copy_page_start) ++LEAF(cpu_copy_page_function_name) ++1: j 1b /* Dummy, will be replaced. */ ++ .space 1344 ++END(cpu_copy_page_function_name) ++EXPORT(__copy_page_end) +diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c +index cc0b626..98f530e 100644 +--- a/arch/mips/mm/page.c ++++ b/arch/mips/mm/page.c +@@ -6,6 +6,7 @@ + * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2007 Maciej W. Rozycki + * Copyright (C) 2008 Thiemo Seufer ++ * Copyright (C) 2012 MIPS Technologies, Inc. + */ + #include + #include +@@ -71,45 +72,6 @@ static struct uasm_reloc __cpuinitdata relocs[5]; + #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) + #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) + +-/* +- * Maximum sizes: +- * +- * R4000 128 bytes S-cache: 0x058 bytes +- * R4600 v1.7: 0x05c bytes +- * R4600 v2.0: 0x060 bytes +- * With prefetching, 16 word strides 0x120 bytes +- */ +- +-static u32 clear_page_array[0x120 / 4]; +- +-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS +-void clear_page_cpu(void *page) __attribute__((alias("clear_page_array"))); +-#else +-void clear_page(void *page) __attribute__((alias("clear_page_array"))); +-#endif +- +-EXPORT_SYMBOL(clear_page); +- +-/* +- * Maximum sizes: +- * +- * R4000 128 bytes S-cache: 0x11c bytes +- * R4600 v1.7: 0x080 bytes +- * R4600 v2.0: 0x07c bytes +- * With prefetching, 16 word strides 0x540 bytes +- */ +-static u32 copy_page_array[0x540 / 4]; +- +-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS +-void +-copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array"))); +-#else +-void copy_page(void *to, void *from) __attribute__((alias("copy_page_array"))); +-#endif +- +-EXPORT_SYMBOL(copy_page); +- +- + static int pref_bias_clear_store __cpuinitdata; + static int pref_bias_copy_load __cpuinitdata; + static int pref_bias_copy_store __cpuinitdata; +@@ -282,10 +244,15 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off) + } + } + ++extern u32 __clear_page_start; ++extern u32 __clear_page_end; ++extern u32 __copy_page_start; ++extern u32 __copy_page_end; ++ + void __cpuinit build_clear_page(void) + { + int off; +- u32 *buf = (u32 *)&clear_page_array; ++ u32 *buf = &__clear_page_start; + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + int i; +@@ -356,17 +323,17 @@ void __cpuinit build_clear_page(void) + uasm_i_jr(&buf, RA); + uasm_i_nop(&buf); + +- BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array)); ++ BUG_ON(buf > &__clear_page_end); + + uasm_resolve_relocs(relocs, labels); + + pr_debug("Synthesized clear page handler (%u instructions).\n", +- (u32)(buf - clear_page_array)); ++ (u32)(buf - &__clear_page_start)); + + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); +- for (i = 0; i < (buf - clear_page_array); i++) +- pr_debug("\t.word 0x%08x\n", clear_page_array[i]); ++ for (i = 0; i < (buf - &__clear_page_start); i++) ++ pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]); + pr_debug("\t.set pop\n"); + } + +@@ -427,7 +394,7 @@ static inline void build_copy_store_pref(u32 **buf, int off) + void __cpuinit build_copy_page(void) + { + int off; +- u32 *buf = (u32 *)©_page_array; ++ u32 *buf = &__copy_page_start; + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + int i; +@@ -595,21 +562,23 @@ void __cpuinit build_copy_page(void) + uasm_i_jr(&buf, RA); + uasm_i_nop(&buf); + +- BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array)); ++ BUG_ON(buf > &__copy_page_end); + + uasm_resolve_relocs(relocs, labels); + + pr_debug("Synthesized copy page handler (%u instructions).\n", +- (u32)(buf - copy_page_array)); ++ (u32)(buf - &__copy_page_start)); + + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); +- for (i = 0; i < (buf - copy_page_array); i++) +- pr_debug("\t.word 0x%08x\n", copy_page_array[i]); ++ for (i = 0; i < (buf - &__copy_page_start); i++) ++ pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]); + pr_debug("\t.set pop\n"); + } + + #ifdef CONFIG_SIBYTE_DMA_PAGEOPS ++extern void clear_page_cpu(void *page); ++extern void copy_page_cpu(void *to, void *from); + + /* + * Pad descriptors to cacheline, since each is exclusively owned by a +-- +cgit v0.9.2 + diff --git a/target/linux/patches/3.4.113/mkpiggy.patch b/target/linux/patches/3.4.113/mkpiggy.patch new file mode 100644 index 000000000..d4e815cd2 --- /dev/null +++ b/target/linux/patches/3.4.113/mkpiggy.patch @@ -0,0 +1,28 @@ +diff -Nur linux-3.4.4.orig/arch/x86/boot/compressed/mkpiggy.c linux-3.4.4/arch/x86/boot/compressed/mkpiggy.c +--- linux-3.4.4.orig/arch/x86/boot/compressed/mkpiggy.c 2012-06-22 20:37:50.000000000 +0200 ++++ linux-3.4.4/arch/x86/boot/compressed/mkpiggy.c 2012-07-03 09:48:02.000000000 +0200 +@@ -29,7 +29,14 @@ + #include + #include + #include +-#include ++ ++static uint32_t getle32(const void *p) ++{ ++ const uint8_t *cp = p; ++ ++ return (uint32_t)cp[0] + ((uint32_t)cp[1] << 8) + ++ ((uint32_t)cp[2] << 16) + ((uint32_t)cp[3] << 24); ++} + + int main(int argc, char *argv[]) + { +@@ -62,7 +69,7 @@ + } + + ilen = ftell(f); +- olen = get_unaligned_le32(&olen); ++ olen = getle32(&olen); + fclose(f); + + /* diff --git a/target/linux/patches/3.4.113/module-alloc-size-check.patch b/target/linux/patches/3.4.113/module-alloc-size-check.patch new file mode 100644 index 000000000..a792ac60a --- /dev/null +++ b/target/linux/patches/3.4.113/module-alloc-size-check.patch @@ -0,0 +1,21 @@ +https://dev.openwrt.org/browser/trunk/target/linux/generic/patches-3.3/340-module_alloc_size_check.patch?rev=30813 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -2322,12 +2322,15 @@ + + void * __weak module_alloc(unsigned long size) + { +- return size == 0 ? NULL : vmalloc_exec(size); ++ return vmalloc_exec(size); + } + + static void *module_alloc_update_bounds(unsigned long size) + { +- void *ret = module_alloc(size); ++ void *ret = NULL; ++ ++ if (size) ++ ret = module_alloc(size); + + if (ret) { + mutex_lock(&module_mutex); diff --git a/target/linux/patches/3.4.113/nds32.patch b/target/linux/patches/3.4.113/nds32.patch new file mode 100644 index 000000000..d0da6f7b3 --- /dev/null +++ b/target/linux/patches/3.4.113/nds32.patch @@ -0,0 +1,72132 @@ +diff -Nur linux-3.4.110.orig/arch/nds32/boot/install.sh linux-3.4.110/arch/nds32/boot/install.sh +--- linux-3.4.110.orig/arch/nds32/boot/install.sh 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/boot/install.sh 2016-04-07 10:20:50.862077930 +0200 +@@ -0,0 +1,47 @@ ++#!/bin/sh ++# ++# arch/nds32/boot/install.sh ++# ++# This file is subject to the terms and conditions of the GNU General Public ++# License. See the file "COPYING" in the main directory of this archive ++# for more details. ++# ++# Copyright (C) 1995 by Linus Torvalds ++# Copyright (C) 2009 Andes Technology Corporation ++# ++# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin ++# Adapted from code in arch/i386/boot/install.sh by Russell King ++# ++# "make install" script for arm architecture ++# ++# Arguments: ++# $1 - kernel version ++# $2 - kernel image file ++# $3 - kernel map file ++# $4 - default install path (blank if root directory) ++# ++ ++# User may have a custom install script ++if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi ++if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi ++ ++# Normal install ++echo "Installing normal kernel" ++base=vmlinux ++ ++if [ -f $4/$base-$1 ]; then ++ mv $4/$base-$1 $4/$base-$1.old ++fi ++cat $2 > $4/$base-$1 ++ ++# Install system map file ++if [ -f $4/System.map-$1 ]; then ++ mv $4/System.map-$1 $4/System.map-$1.old ++fi ++cp $3 $4/System.map-$1 ++ ++if [ -x /sbin/loadmap ]; then ++ /sbin/loadmap ++else ++ echo "You have to install it yourself" ++fi +diff -Nur linux-3.4.110.orig/arch/nds32/boot/Makefile linux-3.4.110/arch/nds32/boot/Makefile +--- linux-3.4.110.orig/arch/nds32/boot/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/boot/Makefile 2016-04-07 10:20:50.862077930 +0200 +@@ -0,0 +1,22 @@ ++# ++# arch/nds32/boot/Makefile ++# ++# This file is subject to the terms and conditions of the GNU General Public ++# License. See the file "COPYING" in the main directory of this archive ++# for more details. ++# ++# Copyright (C) 1995-2002 Russell King ++# Copyright (C) 2009 Andes Technology Corporation ++# ++ ++targets := Image ++ ++$(obj)/Image: vmlinux FORCE ++ $(call if_changed,objcopy) ++ @echo ' Kernel: $@ is ready' ++ ++.PHONY: FORCE ++install: $(obj)/Image ++ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ ++ $(obj)/Image System.map "$(INSTALL_PATH)" ++ +diff -Nur linux-3.4.110.orig/arch/nds32/common/dmabounce.c linux-3.4.110/arch/nds32/common/dmabounce.c +--- linux-3.4.110.orig/arch/nds32/common/dmabounce.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/common/dmabounce.c 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,672 @@ ++/* ++ * arch/nds32/common/dmabounce.c ++ * ++ * Special dma_{map/unmap/dma_sync}_* routines for systems that have ++ * limited DMA windows. These functions utilize bounce buffers to ++ * copy data to/from buffers located outside the DMA region. This ++ * only works for systems in which DMA memory is at the bottom of ++ * RAM and the remainder of memory is at the top an the DMA memory ++ * can be marked as ZONE_DMA. Anything beyond that such as discontigous ++ * DMA windows will require custom implementations that reserve memory ++ * areas at early bootup. ++ * ++ * Original version by Brad Parker (brad@heeltoe.com) ++ * Re-written by Christopher Hoover ++ * Made generic by Deepak Saxena ++ * ++ * Copyright (C) 2002 Hewlett Packard Company. ++ * Copyright (C) 2004 MontaVista Software, Inc. ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#undef DEBUG ++ ++#undef STATS ++#ifdef STATS ++#define DO_STATS(X) do { X ; } while (0) ++#else ++#define DO_STATS(X) do { } while (0) ++#endif ++ ++/* ************************************************** */ ++ ++struct safe_buffer { ++ struct list_head node; ++ ++ /* original request */ ++ void *ptr; ++ size_t size; ++ int direction; ++ ++ /* safe buffer info */ ++ struct dma_pool *pool; ++ void *safe; ++ dma_addr_t safe_dma_addr; ++}; ++ ++struct dmabounce_device_info { ++ struct list_head node; ++ ++ struct device *dev; ++ struct dma_pool *small_buffer_pool; ++ struct dma_pool *large_buffer_pool; ++ struct list_head safe_buffers; ++ unsigned long small_buffer_size, large_buffer_size; ++#ifdef STATS ++ unsigned long sbp_allocs; ++ unsigned long lbp_allocs; ++ unsigned long total_allocs; ++ unsigned long map_op_count; ++ unsigned long bounce_count; ++#endif ++}; ++ ++static LIST_HEAD(dmabounce_devs); ++ ++#ifdef STATS ++static void print_alloc_stats(struct dmabounce_device_info *device_info) ++{ ++ printk(KERN_INFO ++ "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", ++ device_info->dev->bus_id, ++ device_info->sbp_allocs, device_info->lbp_allocs, ++ device_info->total_allocs - device_info->sbp_allocs - ++ device_info->lbp_allocs, device_info->total_allocs); ++} ++#endif ++ ++/* find the given device in the dmabounce device list */ ++static inline struct dmabounce_device_info *find_dmabounce_dev(struct device ++ *dev) ++{ ++ struct list_head *entry; ++ ++ list_for_each(entry, &dmabounce_devs) { ++ struct dmabounce_device_info *d = ++ list_entry(entry, struct dmabounce_device_info, node); ++ ++ if (d->dev == dev) ++ return d; ++ } ++ return NULL; ++} ++ ++/* allocate a 'safe' buffer and keep track of it */ ++static inline struct safe_buffer *alloc_safe_buffer(struct dmabounce_device_info ++ *device_info, void *ptr, ++ size_t size, ++ enum dma_data_direction dir) ++{ ++ struct safe_buffer *buf; ++ struct dma_pool *pool; ++ struct device *dev = device_info->dev; ++ void *safe; ++ dma_addr_t safe_dma_addr; ++ ++ dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", __func__, ptr, size, dir); ++ ++ DO_STATS(device_info->total_allocs++); ++ ++ buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); ++ if (buf == NULL) { ++ dev_warn(dev, "%s: kmalloc failed\n", __func__); ++ return NULL; ++ } ++ ++ if (size <= device_info->small_buffer_size) { ++ pool = device_info->small_buffer_pool; ++ safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); ++ ++ DO_STATS(device_info->sbp_allocs++); ++ } else if (size <= device_info->large_buffer_size) { ++ pool = device_info->large_buffer_pool; ++ safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); ++ ++ DO_STATS(device_info->lbp_allocs++); ++ } else { ++ pool = NULL; ++ safe = ++ dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC); ++ } ++ ++ if (safe == NULL) { ++ dev_warn(device_info->dev, ++ "%s: could not alloc dma memory (size=%d)\n", ++ __func__, size); ++ kfree(buf); ++ return NULL; ++ } ++#ifdef STATS ++ if (device_info->total_allocs % 1000 == 0) ++ print_alloc_stats(device_info); ++#endif ++ ++ buf->ptr = ptr; ++ buf->size = size; ++ buf->direction = dir; ++ buf->pool = pool; ++ buf->safe = safe; ++ buf->safe_dma_addr = safe_dma_addr; ++ ++ list_add(&buf->node, &device_info->safe_buffers); ++ ++ return buf; ++} ++ ++/* determine if a buffer is from our "safe" pool */ ++static inline struct safe_buffer *find_safe_buffer(struct dmabounce_device_info ++ *device_info, ++ dma_addr_t safe_dma_addr) ++{ ++ struct list_head *entry; ++ ++ list_for_each(entry, &device_info->safe_buffers) { ++ struct safe_buffer *b = ++ list_entry(entry, struct safe_buffer, node); ++ ++ if (b->safe_dma_addr == safe_dma_addr) ++ return b; ++ } ++ ++ return NULL; ++} ++ ++static inline void ++free_safe_buffer(struct dmabounce_device_info *device_info, ++ struct safe_buffer *buf) ++{ ++ dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); ++ ++ list_del(&buf->node); ++ ++ if (buf->pool) ++ dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr); ++ else ++ dma_free_coherent(device_info->dev, buf->size, buf->safe, ++ buf->safe_dma_addr); ++ ++ kfree(buf); ++} ++ ++/* ************************************************** */ ++ ++#ifdef STATS ++ ++static void print_map_stats(struct dmabounce_device_info *device_info) ++{ ++ printk(KERN_INFO ++ "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n", ++ device_info->dev->bus_id, ++ device_info->map_op_count, device_info->bounce_count); ++} ++#endif ++ ++static inline dma_addr_t ++map_single(struct device *dev, void *ptr, size_t size, ++ enum dma_data_direction dir) ++{ ++ struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); ++ dma_addr_t dma_addr; ++ int needs_bounce = 0; ++ ++ if (device_info) ++ DO_STATS(device_info->map_op_count++); ++ ++ dma_addr = virt_to_dma(dev, ptr); ++ ++ if (dev->dma_mask) { ++ unsigned long mask = *dev->dma_mask; ++ unsigned long limit; ++ ++ limit = (mask + 1) & ~mask; ++ if (limit && size > limit) { ++ dev_err(dev, "DMA mapping too big (requested %#x " ++ "mask %#Lx)\n", size, *dev->dma_mask); ++ return ~0; ++ } ++ ++ /* ++ * Figure out if we need to bounce from the DMA mask. ++ */ ++ needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; ++ } ++ ++ if (device_info ++ && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { ++ struct safe_buffer *buf; ++ ++ buf = alloc_safe_buffer(device_info, ptr, size, dir); ++ if (buf == 0) { ++ dev_err(dev, "%s: unable to map unsafe buffer %p!\n", ++ __func__, ptr); ++ return 0; ++ } ++ ++ dev_dbg(dev, ++ "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", ++ __func__, buf->ptr, (void *)virt_to_dma(dev, buf->ptr), ++ buf->safe, (void *)buf->safe_dma_addr); ++ ++ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL)) { ++ dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", ++ __func__, ptr, buf->safe, size); ++ memcpy(buf->safe, ptr, size); ++ } ++ consistent_sync(buf->safe, size, dir); ++ ++ dma_addr = buf->safe_dma_addr; ++ } else { ++ consistent_sync(ptr, size, dir); ++ } ++ ++ return dma_addr; ++} ++ ++static inline void ++unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, ++ enum dma_data_direction dir) ++{ ++ struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); ++ struct safe_buffer *buf = NULL; ++ ++ /* ++ * Trying to unmap an invalid mapping ++ */ ++ if (dma_addr == ~0) { ++ dev_err(dev, "Trying to unmap invalid mapping\n"); ++ return; ++ } ++ ++ if (device_info) ++ buf = find_safe_buffer(device_info, dma_addr); ++ ++ if (buf) { ++ BUG_ON(buf->size != size); ++ ++ dev_dbg(dev, ++ "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", ++ __func__, buf->ptr, (void *)virt_to_dma(dev, buf->ptr), ++ buf->safe, (void *)buf->safe_dma_addr); ++ ++ DO_STATS(device_info->bounce_count++); ++ ++ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)) { ++ dev_dbg(dev, ++ "%s: copy back safe %p to unsafe %p size %d\n", ++ __func__, buf->safe, buf->ptr, size); ++ memcpy(buf->ptr, buf->safe, size); ++ } ++ free_safe_buffer(device_info, buf); ++ } ++} ++ ++static inline void ++sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, ++ enum dma_data_direction dir) ++{ ++ struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); ++ struct safe_buffer *buf = NULL; ++ ++ if (device_info) ++ buf = find_safe_buffer(device_info, dma_addr); ++ ++ if (buf) { ++ /* ++ * Both of these checks from original code need to be ++ * commented out b/c some drivers rely on the following: ++ * ++ * 1) Drivers may map a large chunk of memory into DMA space ++ * but only sync a small portion of it. Good example is ++ * allocating a large buffer, mapping it, and then ++ * breaking it up into small descriptors. No point ++ * in syncing the whole buffer if you only have to ++ * touch one descriptor. ++ * ++ * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are ++ * usually only synced in one dir at a time. ++ * ++ * See drivers/net/eepro100.c for examples of both cases. ++ * ++ * -ds ++ * ++ * BUG_ON(buf->size != size); ++ * BUG_ON(buf->direction != dir); ++ */ ++ ++ dev_dbg(dev, ++ "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", ++ __func__, buf->ptr, (void *)virt_to_dma(dev, buf->ptr), ++ buf->safe, (void *)buf->safe_dma_addr); ++ ++ DO_STATS(device_info->bounce_count++); ++ ++ switch (dir) { ++ case DMA_FROM_DEVICE: ++ dev_dbg(dev, ++ "%s: copy back safe %p to unsafe %p size %d\n", ++ __func__, buf->safe, buf->ptr, size); ++ memcpy(buf->ptr, buf->safe, size); ++ break; ++ case DMA_TO_DEVICE: ++ dev_dbg(dev, ++ "%s: copy out unsafe %p to safe %p, size %d\n", ++ __func__, buf->ptr, buf->safe, size); ++ memcpy(buf->safe, buf->ptr, size); ++ break; ++ case DMA_BIDIRECTIONAL: ++ BUG(); /* is this allowed? what does it mean? */ ++ default: ++ BUG(); ++ } ++ consistent_sync(buf->safe, size, dir); ++ } else { ++ consistent_sync(dma_to_virt(dev, dma_addr), size, dir); ++ } ++} ++ ++/* ************************************************** */ ++ ++/* ++ * see if a buffer address is in an 'unsafe' range. if it is ++ * allocate a 'safe' buffer and copy the unsafe buffer into it. ++ * substitute the safe buffer for the unsafe one. ++ * (basically move the buffer from an unsafe area to a safe one) ++ */ ++dma_addr_t ++dma_map_single(struct device *dev, void *ptr, size_t size, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ dma_addr_t dma_addr; ++ ++ dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", __func__, ptr, size, dir); ++ ++ BUG_ON(dir == DMA_NONE); ++ ++ local_irq_save(flags); ++ ++ dma_addr = map_single(dev, ptr, size, dir); ++ ++ local_irq_restore(flags); ++ ++ return dma_addr; ++} ++ ++/* ++ * see if a mapped address was really a "safe" buffer and if so, copy ++ * the data from the safe buffer back to the unsafe buffer and free up ++ * the safe buffer. (basically return things back to the way they ++ * should be) ++ */ ++ ++void ++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ ++ dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", ++ __func__, (void *)dma_addr, size, dir); ++ ++ BUG_ON(dir == DMA_NONE); ++ ++ local_irq_save(flags); ++ ++ unmap_single(dev, dma_addr, size, dir); ++ ++ local_irq_restore(flags); ++} ++ ++int ++dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ int i; ++ ++ dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", __func__, sg, nents, dir); ++ ++ BUG_ON(dir == DMA_NONE); ++ ++ local_irq_save(flags); ++ ++ for (i = 0; i < nents; i++, sg++) { ++ struct page *page = sg->page; ++ unsigned int offset = sg->offset; ++ unsigned int length = sg->length; ++ void *ptr = page_address(page) + offset; ++ ++ sg->dma_address = map_single(dev, ptr, length, dir); ++ } ++ ++ local_irq_restore(flags); ++ ++ return nents; ++} ++ ++void ++dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ int i; ++ ++ dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", __func__, sg, nents, dir); ++ ++ BUG_ON(dir == DMA_NONE); ++ ++ local_irq_save(flags); ++ ++ for (i = 0; i < nents; i++, sg++) { ++ dma_addr_t dma_addr = sg->dma_address; ++ unsigned int length = sg->length; ++ ++ unmap_single(dev, dma_addr, length, dir); ++ } ++ ++ local_irq_restore(flags); ++} ++ ++void ++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ ++ dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", ++ __func__, (void *)dma_addr, size, dir); ++ ++ local_irq_save(flags); ++ ++ sync_single(dev, dma_addr, size, dir); ++ ++ local_irq_restore(flags); ++} ++ ++void ++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ ++ dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", ++ __func__, (void *)dma_addr, size, dir); ++ ++ local_irq_save(flags); ++ ++ sync_single(dev, dma_addr, size, dir); ++ ++ local_irq_restore(flags); ++} ++ ++void ++dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ int i; ++ ++ dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", __func__, sg, nents, dir); ++ ++ BUG_ON(dir == DMA_NONE); ++ ++ local_irq_save(flags); ++ ++ for (i = 0; i < nents; i++, sg++) { ++ dma_addr_t dma_addr = sg->dma_address; ++ unsigned int length = sg->length; ++ ++ sync_single(dev, dma_addr, length, dir); ++ } ++ ++ local_irq_restore(flags); ++} ++ ++void ++dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ int i; ++ ++ dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", __func__, sg, nents, dir); ++ ++ BUG_ON(dir == DMA_NONE); ++ ++ local_irq_save(flags); ++ ++ for (i = 0; i < nents; i++, sg++) { ++ dma_addr_t dma_addr = sg->dma_address; ++ unsigned int length = sg->length; ++ ++ sync_single(dev, dma_addr, length, dir); ++ } ++ ++ local_irq_restore(flags); ++} ++ ++int ++dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, ++ unsigned long large_buffer_size) ++{ ++ struct dmabounce_device_info *device_info; ++ ++ device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); ++ if (!device_info) { ++ printk(KERN_ERR ++ "Could not allocated dmabounce_device_info for %s", ++ dev->bus_id); ++ return -ENOMEM; ++ } ++ ++ device_info->small_buffer_pool = ++ dma_pool_create("small_dmabounce_pool", ++ dev, small_buffer_size, 0 /* byte alignment */ , ++ 0 /* no page-crossing issues */ ); ++ if (!device_info->small_buffer_pool) { ++ printk(KERN_ERR ++ "dmabounce: could not allocate small DMA pool for %s\n", ++ dev->bus_id); ++ kfree(device_info); ++ return -ENOMEM; ++ } ++ ++ if (large_buffer_size) { ++ device_info->large_buffer_pool = ++ dma_pool_create("large_dmabounce_pool", ++ dev, ++ large_buffer_size, 0 /* byte alignment */ , ++ 0 /* no page-crossing issues */ ); ++ if (!device_info->large_buffer_pool) { ++ printk(KERN_ERR ++ "dmabounce: could not allocate large DMA pool for %s\n", ++ dev->bus_id); ++ dma_pool_destroy(device_info->small_buffer_pool); ++ ++ return -ENOMEM; ++ } ++ } ++ ++ device_info->dev = dev; ++ device_info->small_buffer_size = small_buffer_size; ++ device_info->large_buffer_size = large_buffer_size; ++ INIT_LIST_HEAD(&device_info->safe_buffers); ++ ++#ifdef STATS ++ device_info->sbp_allocs = 0; ++ device_info->lbp_allocs = 0; ++ device_info->total_allocs = 0; ++ device_info->map_op_count = 0; ++ device_info->bounce_count = 0; ++#endif ++ ++ list_add(&device_info->node, &dmabounce_devs); ++ ++ printk(KERN_INFO "dmabounce: registered device %s on %s bus\n", ++ dev->bus_id, dev->bus->name); ++ ++ return 0; ++} ++ ++void dmabounce_unregister_dev(struct device *dev) ++{ ++ struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); ++ ++ if (!device_info) { ++ printk(KERN_WARNING ++ "%s: Never registered with dmabounce but attempting" ++ "to unregister!\n", dev->bus_id); ++ return; ++ } ++ ++ if (!list_empty(&device_info->safe_buffers)) { ++ printk(KERN_ERR ++ "%s: Removing from dmabounce with pending buffers!\n", ++ dev->bus_id); ++ BUG(); ++ } ++ ++ if (device_info->small_buffer_pool) ++ dma_pool_destroy(device_info->small_buffer_pool); ++ if (device_info->large_buffer_pool) ++ dma_pool_destroy(device_info->large_buffer_pool); ++ ++#ifdef STATS ++ print_alloc_stats(device_info); ++ print_map_stats(device_info); ++#endif ++ ++ list_del(&device_info->node); ++ ++ kfree(device_info); ++ ++ printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n", ++ dev->bus_id, dev->bus->name); ++} ++ ++EXPORT_SYMBOL(dma_map_single); ++EXPORT_SYMBOL(dma_unmap_single); ++EXPORT_SYMBOL(dma_map_sg); ++EXPORT_SYMBOL(dma_unmap_sg); ++EXPORT_SYMBOL(dma_sync_single); ++EXPORT_SYMBOL(dma_sync_sg); ++EXPORT_SYMBOL(dmabounce_register_dev); ++EXPORT_SYMBOL(dmabounce_unregister_dev); ++ ++MODULE_AUTHOR ++ ("Christopher Hoover , Deepak Saxena "); ++MODULE_DESCRIPTION ++ ("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"); ++MODULE_LICENSE("GPL"); +diff -Nur linux-3.4.110.orig/arch/nds32/common/Makefile linux-3.4.110/arch/nds32/common/Makefile +--- linux-3.4.110.orig/arch/nds32/common/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/common/Makefile 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,6 @@ ++# ++# Makefile for the linux kernel. ++# ++ ++obj-y += rtctime.o ++obj-$(CONFIG_DMABOUNCE) += dmabounce.o +diff -Nur linux-3.4.110.orig/arch/nds32/common/rtctime.c linux-3.4.110/arch/nds32/common/rtctime.c +--- linux-3.4.110.orig/arch/nds32/common/rtctime.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/common/rtctime.c 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,441 @@ ++/* ++ * linux/arch/nds32/common/rtctime.c ++ * ++ * Copyright (C) 2003 Deep Blue Solutions Ltd. ++ * Based on sa1100-rtc.c, Nils Faerber, CIH, Nicolas Pitre. ++ * Based on rtc.c by Paul Gortmaker ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++static DECLARE_WAIT_QUEUE_HEAD(rtc_wait); ++static struct fasync_struct *rtc_async_queue; ++ ++/* ++ * rtc_lock protects rtc_irq_data ++ */ ++static DEFINE_SPINLOCK(rtc_lock); ++static unsigned long rtc_irq_data; ++ ++/* ++ * rtc_sem protects rtc_inuse and rtc_ops ++ */ ++static DEFINE_MUTEX(rtc_mutex); ++static unsigned long rtc_inuse; ++static struct rtc_ops *rtc_ops; ++ ++#define rtc_epoch 1900UL ++ ++/* ++ * Calculate the next alarm time given the requested alarm time mask ++ * and the current time. ++ */ ++void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, ++ struct rtc_time *alrm) ++{ ++ unsigned long next_time; ++ unsigned long now_time; ++ ++ next->tm_year = now->tm_year; ++ next->tm_mon = now->tm_mon; ++ next->tm_mday = now->tm_mday; ++ next->tm_hour = alrm->tm_hour; ++ next->tm_min = alrm->tm_min; ++ next->tm_sec = alrm->tm_sec; ++ ++ rtc_tm_to_time(now, &now_time); ++ rtc_tm_to_time(next, &next_time); ++ ++ if (next_time < now_time) { ++ /* Advance one day */ ++ next_time += 60 * 60 * 24; ++ rtc_time_to_tm(next_time, next); ++ } ++} ++ ++static inline int rtc_arm_read_time(struct rtc_ops *ops, struct rtc_time *tm) ++{ ++ memset(tm, 0, sizeof(struct rtc_time)); ++ return ops->read_time(tm); ++} ++ ++static inline int rtc_arm_set_time(struct rtc_ops *ops, struct rtc_time *tm) ++{ ++ int ret; ++ ++ ret = rtc_valid_tm(tm); ++ if (ret == 0) ++ ret = ops->set_time(tm); ++ ++ return ret; ++} ++ ++static inline int rtc_arm_read_alarm(struct rtc_ops *ops, ++ struct rtc_wkalrm *alrm) ++{ ++ int ret = -EINVAL; ++ if (ops->read_alarm) { ++ memset(alrm, 0, sizeof(struct rtc_wkalrm)); ++ ret = ops->read_alarm(alrm); ++ } ++ return ret; ++} ++ ++static inline int rtc_arm_set_alarm(struct rtc_ops *ops, ++ struct rtc_wkalrm *alrm) ++{ ++ int ret = -EINVAL; ++ if (ops->set_alarm) ++ ret = ops->set_alarm(alrm); ++ return ret; ++} ++ ++void rtc_update(unsigned long num, unsigned long events) ++{ ++ spin_lock(&rtc_lock); ++ rtc_irq_data = (rtc_irq_data + (num << 8)) | events; ++ spin_unlock(&rtc_lock); ++ ++ wake_up_interruptible(&rtc_wait); ++ kill_fasync(&rtc_async_queue, SIGIO, POLL_IN); ++} ++ ++EXPORT_SYMBOL(rtc_update); ++ ++static ssize_t ++rtc_read(struct file *file, char __user * buf, size_t count, loff_t * ppos) ++{ ++ DECLARE_WAITQUEUE(wait, current); ++ unsigned long data; ++ ssize_t ret; ++ ++ if (count < sizeof(unsigned long)) ++ return -EINVAL; ++ ++ add_wait_queue(&rtc_wait, &wait); ++ do { ++ __set_current_state(TASK_INTERRUPTIBLE); ++ ++ spin_lock_irq(&rtc_lock); ++ data = rtc_irq_data; ++ rtc_irq_data = 0; ++ spin_unlock_irq(&rtc_lock); ++ ++ if (data != 0) { ++ ret = 0; ++ break; ++ } ++ if (file->f_flags & O_NONBLOCK) { ++ ret = -EAGAIN; ++ break; ++ } ++ if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ schedule(); ++ } while (1); ++ set_current_state(TASK_RUNNING); ++ remove_wait_queue(&rtc_wait, &wait); ++ ++ if (ret == 0) { ++ ret = put_user(data, (unsigned long __user *)buf); ++ if (ret == 0) ++ ret = sizeof(unsigned long); ++ } ++ return ret; ++} ++ ++static unsigned int rtc_poll(struct file *file, poll_table * wait) ++{ ++ unsigned long data; ++ ++ poll_wait(file, &rtc_wait, wait); ++ ++ spin_lock_irq(&rtc_lock); ++ data = rtc_irq_data; ++ spin_unlock_irq(&rtc_lock); ++ ++ return data != 0 ? POLLIN | POLLRDNORM : 0; ++} ++ ++static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ struct rtc_ops *ops = file->private_data; ++ struct rtc_time tm; ++ struct rtc_wkalrm alrm; ++ void __user *uarg = (void __user *)arg; ++ int ret = -EINVAL; ++ ++ switch (cmd) { ++ case RTC_ALM_READ: ++ ret = rtc_arm_read_alarm(ops, &alrm); ++ if (ret) ++ break; ++ ret = copy_to_user(uarg, &alrm.time, sizeof(tm)); ++ if (ret) ++ ret = -EFAULT; ++ break; ++ ++ case RTC_ALM_SET: ++ ret = copy_from_user(&alrm.time, uarg, sizeof(tm)); ++ if (ret) { ++ ret = -EFAULT; ++ break; ++ } ++ alrm.enabled = 0; ++ alrm.pending = 0; ++ alrm.time.tm_mday = -1; ++ alrm.time.tm_mon = -1; ++ alrm.time.tm_year = -1; ++ alrm.time.tm_wday = -1; ++ alrm.time.tm_yday = -1; ++ alrm.time.tm_isdst = -1; ++ ret = rtc_arm_set_alarm(ops, &alrm); ++ break; ++ ++ case RTC_RD_TIME: ++ ret = rtc_arm_read_time(ops, &tm); ++ if (ret) ++ break; ++ ret = copy_to_user(uarg, &tm, sizeof(tm)); ++ if (ret) ++ ret = -EFAULT; ++ break; ++ ++ case RTC_SET_TIME: ++ if (!capable(CAP_SYS_TIME)) { ++ ret = -EACCES; ++ break; ++ } ++ ret = copy_from_user(&tm, uarg, sizeof(tm)); ++ if (ret) { ++ ret = -EFAULT; ++ break; ++ } ++ ret = rtc_arm_set_time(ops, &tm); ++ break; ++ ++ case RTC_EPOCH_SET: ++#ifndef rtc_epoch ++ /* ++ * There were no RTC clocks before 1900. ++ */ ++ if (arg < 1900) { ++ ret = -EINVAL; ++ break; ++ } ++ if (!capable(CAP_SYS_TIME)) { ++ ret = -EACCES; ++ break; ++ } ++ rtc_epoch = arg; ++ ret = 0; ++#endif ++ break; ++ ++ case RTC_EPOCH_READ: ++ ret = put_user(rtc_epoch, (unsigned long __user *)uarg); ++ break; ++ ++ case RTC_WKALM_SET: ++ ret = copy_from_user(&alrm, uarg, sizeof(alrm)); ++ if (ret) { ++ ret = -EFAULT; ++ break; ++ } ++ ret = rtc_arm_set_alarm(ops, &alrm); ++ break; ++ ++ case RTC_WKALM_RD: ++ ret = rtc_arm_read_alarm(ops, &alrm); ++ if (ret) ++ break; ++ ret = copy_to_user(uarg, &alrm, sizeof(alrm)); ++ if (ret) ++ ret = -EFAULT; ++ break; ++ ++ default: ++ if (ops->ioctl) ++ ret = ops->ioctl(cmd, arg); ++ break; ++ } ++ return ret; ++} ++ ++static int rtc_open(struct inode *inode, struct file *file) ++{ ++ int ret; ++ ++ mutex_lock(&rtc_mutex); ++ ++ if (rtc_inuse) { ++ ret = -EBUSY; ++ } else if (!rtc_ops || !try_module_get(rtc_ops->owner)) { ++ ret = -ENODEV; ++ } else { ++ file->private_data = rtc_ops; ++ ++ ret = rtc_ops->open ? rtc_ops->open() : 0; ++ if (ret == 0) { ++ spin_lock_irq(&rtc_lock); ++ rtc_irq_data = 0; ++ spin_unlock_irq(&rtc_lock); ++ ++ rtc_inuse = 1; ++ } ++ } ++ mutex_unlock(&rtc_mutex); ++ ++ return ret; ++} ++ ++static int rtc_release(struct inode *inode, struct file *file) ++{ ++ struct rtc_ops *ops = file->private_data; ++ ++ if (ops->release) ++ ops->release(); ++ ++ spin_lock_irq(&rtc_lock); ++ rtc_irq_data = 0; ++ spin_unlock_irq(&rtc_lock); ++ ++ module_put(rtc_ops->owner); ++ rtc_inuse = 0; ++ ++ return 0; ++} ++ ++static int rtc_fasync(int fd, struct file *file, int on) ++{ ++ return fasync_helper(fd, file, on, &rtc_async_queue); ++} ++ ++static struct file_operations rtc_fops = { ++ .owner = THIS_MODULE, ++ .llseek = no_llseek, ++ .read = rtc_read, ++ .poll = rtc_poll, ++ .ioctl = rtc_ioctl, ++ .open = rtc_open, ++ .release = rtc_release, ++ .fasync = rtc_fasync, ++}; ++ ++static struct miscdevice rtc_miscdev = { ++ .minor = RTC_MINOR, ++ .name = "rtc", ++ .fops = &rtc_fops, ++}; ++ ++static int rtc_read_proc(char *page, char **start, off_t off, int count, ++ int *eof, void *data) ++{ ++ struct rtc_ops *ops = data; ++ struct rtc_wkalrm alrm; ++ struct rtc_time tm; ++ char *p = page; ++ ++ if (rtc_arm_read_time(ops, &tm) == 0) { ++ p += sprintf(p, ++ "rtc_time\t: %02d:%02d:%02d\n" ++ "rtc_date\t: %04d-%02d-%02d\n" ++ "rtc_epoch\t: %04lu\n", ++ tm.tm_hour, tm.tm_min, tm.tm_sec, ++ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, ++ rtc_epoch); ++ } ++ ++ if (rtc_arm_read_alarm(ops, &alrm) == 0) { ++ p += sprintf(p, "alrm_time\t: "); ++ if ((unsigned int)alrm.time.tm_hour <= 24) ++ p += sprintf(p, "%02d:", alrm.time.tm_hour); ++ else ++ p += sprintf(p, "**:"); ++ if ((unsigned int)alrm.time.tm_min <= 59) ++ p += sprintf(p, "%02d:", alrm.time.tm_min); ++ else ++ p += sprintf(p, "**:"); ++ if ((unsigned int)alrm.time.tm_sec <= 59) ++ p += sprintf(p, "%02d\n", alrm.time.tm_sec); ++ else ++ p += sprintf(p, "**\n"); ++ ++ p += sprintf(p, "alrm_date\t: "); ++ if ((unsigned int)alrm.time.tm_year <= 200) ++ p += sprintf(p, "%04d-", alrm.time.tm_year + 1900); ++ else ++ p += sprintf(p, "****-"); ++ if ((unsigned int)alrm.time.tm_mon <= 11) ++ p += sprintf(p, "%02d-", alrm.time.tm_mon + 1); ++ else ++ p += sprintf(p, "**-"); ++ if ((unsigned int)alrm.time.tm_mday <= 31) ++ p += sprintf(p, "%02d\n", alrm.time.tm_mday); ++ else ++ p += sprintf(p, "**\n"); ++ p += sprintf(p, "alrm_wakeup\t: %s\n", ++ alrm.enabled ? "yes" : "no"); ++ p += sprintf(p, "alrm_pending\t: %s\n", ++ alrm.pending ? "yes" : "no"); ++ } ++ ++ if (ops->proc) ++ p += ops->proc(p); ++ ++ return p - page; ++} ++ ++int register_rtc(struct rtc_ops *ops) ++{ ++ int ret = -EBUSY; ++ ++ mutex_lock(&rtc_mutex); ++ if (rtc_ops == NULL) { ++ rtc_ops = ops; ++ ++ ret = misc_register(&rtc_miscdev); ++ if (ret == 0) ++ create_proc_read_entry("driver/rtc", 0, NULL, ++ rtc_read_proc, ops); ++ } ++ mutex_unlock(&rtc_mutex); ++ ++ return ret; ++} ++ ++EXPORT_SYMBOL(register_rtc); ++ ++void unregister_rtc(struct rtc_ops *rtc) ++{ ++ mutex_lock(&rtc_mutex); ++ if (rtc == rtc_ops) { ++ remove_proc_entry("driver/rtc", NULL); ++ misc_deregister(&rtc_miscdev); ++ rtc_ops = NULL; ++ } ++ mutex_unlock(&rtc_mutex); ++} ++ ++EXPORT_SYMBOL(unregister_rtc); +diff -Nur linux-3.4.110.orig/arch/nds32/configs/orca_8k_defconfig linux-3.4.110/arch/nds32/configs/orca_8k_defconfig +--- linux-3.4.110.orig/arch/nds32/configs/orca_8k_defconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/configs/orca_8k_defconfig 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,132 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_CROSS_COMPILE="nds32le-linux-" ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_SYSVIPC=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++CONFIG_SYSCTL_SYSCALL=y ++# CONFIG_HOTPLUG is not set ++# CONFIG_SIGNALFD is not set ++CONFIG_EMBEDDED=y ++# CONFIG_VM_EVENT_COUNTERS is not set ++CONFIG_PROFILING=y ++CONFIG_OPROFILE=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_BLK_DEV_BSG is not set ++CONFIG_PLATFORM_AHBDMA=y ++CONFIG_PLATFORM_APBDMA=y ++CONFIG_SYS_CLK=30000000 ++CONFIG_UART_CLK=14745600 ++CONFIG_SDRAM_SIZE=0x40000000 ++CONFIG_CPU_CACHE_NONALIASING=y ++CONFIG_ANDES_PAGE_SIZE_8KB=y ++CONFIG_HIGHMEM=y ++CONFIG_HZ_100=y ++CONFIG_CMDLINE="root=/dev/ram0 rw mem=1024M@0x0 initrd=0x1000000,8M earlyprintk=uart8250-32bit,0x99600000 console=ttyS0,38400n8 loglevel=7 rootfstype=ext2 init=/bin/busybox init -s user_debug=-1" ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_NET_KEY=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_BRIDGE=y ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_CHAR=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_PHYSMAP_COMPAT=y ++CONFIG_MTD_PHYSMAP_START=0x80400000 ++CONFIG_MTD_PHYSMAP_LEN=0x2000000 ++CONFIG_MTD_PHYSMAP_BANKWIDTH=4 ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=8192 ++CONFIG_NETDEVICES=y ++CONFIG_TUN=y ++CONFIG_FTMAC100=y ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_CPE_TS=y ++# CONFIG_SERIO is not set ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_NR_UARTS=3 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=3 ++# CONFIG_HW_RANDOM is not set ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_GPIO_FTGPIO010=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_FTWDT010_WATCHDOG=y ++CONFIG_FB=y ++CONFIG_FB_FTLCDC100=y ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_LOGO=y ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_PCM_OSS=y ++# CONFIG_SND_SUPPORT_OLD_API is not set ++# CONFIG_SND_VERBOSE_PROCFS is not set ++CONFIG_SND_FTSSP010=y ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++CONFIG_MMC=y ++CONFIG_MMC_FTSDC010=y ++CONFIG_RTC_CLASS=y ++# CONFIG_RTC_HCTOSYS is not set ++CONFIG_RTC_DRV_FTRTC010=y ++CONFIG_EXT2_FS=y ++CONFIG_FUSE_FS=y ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++# CONFIG_PROC_PAGE_MONITOR is not set ++CONFIG_TMPFS=y ++CONFIG_JFFS2_FS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++CONFIG_NFS_V3_ACL=y ++CONFIG_NFS_V4=y ++CONFIG_NFS_V4_1=y ++CONFIG_NFS_USE_LEGACY_DNS=y ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_DEBUG_FS=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_SCHEDSTATS=y ++CONFIG_TIMER_STATS=y ++CONFIG_SLUB_DEBUG_ON=y ++CONFIG_DEBUG_RT_MUTEXES=y ++CONFIG_DEBUG_SPINLOCK=y ++CONFIG_DEBUG_MUTEXES=y ++CONFIG_DEBUG_INFO=y ++CONFIG_DEBUG_MEMORY_INIT=y ++CONFIG_DEBUG_LIST=y ++CONFIG_DEBUG_SG=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_USER=y ++CONFIG_DEBUG_ERRORS=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++# CONFIG_CRYPTO_HW is not set +diff -Nur linux-3.4.110.orig/arch/nds32/configs/orca_defconfig linux-3.4.110/arch/nds32/configs/orca_defconfig +--- linux-3.4.110.orig/arch/nds32/configs/orca_defconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/configs/orca_defconfig 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,125 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_CROSS_COMPILE="nds32le-linux-" ++CONFIG_SYSVIPC=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_NAMESPACES=y ++CONFIG_SYSCTL_SYSCALL=y ++CONFIG_KALLSYMS_ALL=y ++# CONFIG_HOTPLUG is not set ++CONFIG_EMBEDDED=y ++# CONFIG_VM_EVENT_COUNTERS is not set ++CONFIG_PROFILING=y ++CONFIG_OPROFILE=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_BLK_DEV_BSG is not set ++CONFIG_MEASURE_INTERRUPT_LATENCY=y ++CONFIG_PLATFORM_AHBDMA=y ++CONFIG_PLATFORM_APBDMA=y ++CONFIG_SYS_CLK=30000000 ++CONFIG_UART_CLK=14745600 ++CONFIG_SDRAM_SIZE=0x40000000 ++CONFIG_MEMORY_START=0x0 ++# CONFIG_HWZOL is not set ++CONFIG_IVIC=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_PREEMPT=y ++CONFIG_HZ_100=y ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_NET_KEY=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_BRIDGE=y ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_CHAR=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_PHYSMAP_COMPAT=y ++CONFIG_MTD_PHYSMAP_START=0x80400000 ++CONFIG_MTD_PHYSMAP_LEN=0x2000000 ++CONFIG_MTD_PHYSMAP_BANKWIDTH=4 ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=8192 ++CONFIG_NETDEVICES=y ++CONFIG_TUN=y ++CONFIG_FTMAC100=y ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_CPE_TS=m ++# CONFIG_SERIO is not set ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_NR_UARTS=3 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=3 ++# CONFIG_HW_RANDOM is not set ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_GPIO_FTGPIO010=m ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_FTWDT010_WATCHDOG=m ++CONFIG_FB=y ++CONFIG_FB_FTLCDC100=y ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_LOGO=y ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_PCM_OSS=y ++# CONFIG_SND_SUPPORT_OLD_API is not set ++# CONFIG_SND_VERBOSE_PROCFS is not set ++CONFIG_SND_FTSSP010=m ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++CONFIG_MMC=y ++CONFIG_MMC_FTSDC010=y ++CONFIG_RTC_CLASS=y ++# CONFIG_RTC_HCTOSYS is not set ++CONFIG_RTC_DRV_FTRTC010=y ++CONFIG_EXT2_FS=y ++CONFIG_FUSE_FS=y ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_CONFIGFS_FS=y ++CONFIG_JFFS2_FS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++CONFIG_NFS_V3_ACL=y ++CONFIG_NFS_V4=y ++CONFIG_NFS_V4_1=y ++CONFIG_NFS_USE_LEGACY_DNS=y ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_HEADERS_CHECK=y ++CONFIG_DEBUG_SECTION_MISMATCH=y ++# CONFIG_SCHED_DEBUG is not set ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_USER=y ++CONFIG_DEBUG_ERRORS=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++# CONFIG_CRYPTO_HW is not set +diff -Nur linux-3.4.110.orig/arch/nds32/configs/qemu_defconfig linux-3.4.110/arch/nds32/configs/qemu_defconfig +--- linux-3.4.110.orig/arch/nds32/configs/qemu_defconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/configs/qemu_defconfig 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,98 @@ ++CONFIG_EXPERIMENTAL=y ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_SYSVIPC=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++CONFIG_SYSCTL_SYSCALL=y ++# CONFIG_HOTPLUG is not set ++# CONFIG_SIGNALFD is not set ++CONFIG_EMBEDDED=y ++# CONFIG_VM_EVENT_COUNTERS is not set ++CONFIG_PROFILING=y ++CONFIG_OPROFILE=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_BLK_DEV_BSG is not set ++CONFIG_PLAT_QEMU=y ++CONFIG_SYS_CLK=40000000 ++CONFIG_UART_CLK=14745600 ++CONFIG_SDRAM_SIZE=0x10000000 ++CONFIG_HZ_100=y ++CONFIG_CMDLINE="root=/dev/ram0 rw mem=1024M@0x0 initrd=0x1000000,8M earlyprintk=uart8250-32bit,0x99600000 console=ttyS0,38400n8 loglevel=7 rootfstype=ext2 init=/bin/busybox init -s user_debug=-1" ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_NET_KEY=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=8192 ++CONFIG_FTSDC010=y ++CONFIG_NETDEVICES=y ++CONFIG_FTMAC100=y ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++# CONFIG_SERIO is not set ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_NR_UARTS=3 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=3 ++# CONFIG_HW_RANDOM is not set ++# CONFIG_HWMON is not set ++CONFIG_FB=y ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_LOGO=y ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++CONFIG_EXT2_FS_POSIX_ACL=y ++CONFIG_EXT2_FS_SECURITY=y ++CONFIG_EXT2_FS_XIP=y ++CONFIG_EXT3_FS=y ++CONFIG_EXT3_FS_POSIX_ACL=y ++CONFIG_EXT3_FS_SECURITY=y ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_FS_POSIX_ACL=y ++CONFIG_EXT4_FS_SECURITY=y ++CONFIG_EXT4_DEBUG=y ++CONFIG_FUSE_FS=y ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++# CONFIG_PROC_PAGE_MONITOR is not set ++CONFIG_TMPFS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_DEBUG_FS=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_SCHEDSTATS=y ++CONFIG_TIMER_STATS=y ++CONFIG_SLUB_DEBUG_ON=y ++CONFIG_DEBUG_RT_MUTEXES=y ++CONFIG_DEBUG_MUTEXES=y ++CONFIG_DEBUG_INFO=y ++CONFIG_DEBUG_LIST=y ++CONFIG_DEBUG_SG=y ++CONFIG_DEBUG_USER=y ++CONFIG_DEBUG_ERRORS=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++# CONFIG_CRYPTO_HW is not set +diff -Nur linux-3.4.110.orig/arch/nds32/configs/vep-be_defconfig linux-3.4.110/arch/nds32/configs/vep-be_defconfig +--- linux-3.4.110.orig/arch/nds32/configs/vep-be_defconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/configs/vep-be_defconfig 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,777 @@ ++# ++# Automatically generated make config: don't edit ++# Linux kernel version: 2.6.29 ++# Mon Jul 13 11:42:57 2009 ++# ++CONFIG_NDS32=y ++CONFIG_NO_IOPORT=y ++CONFIG_GENERIC_IOMAP=y ++CONFIG_RWSEM_GENERIC_SPINLOCK=y ++CONFIG_GENERIC_HWEIGHT=y ++CONFIG_GENERIC_FIND_NEXT_BIT=y ++CONFIG_GENERIC_CALIBRATE_DELAY=y ++CONFIG_GENERIC_HARDIRQS=y ++CONFIG_LOCKDEP_SUPPORT=y ++CONFIG_STACKTRACE_SUPPORT=y ++CONFIG_HAVE_LATENCYTOP_SUPPORT=y ++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" ++ ++# ++# General setup ++# ++CONFIG_EXPERIMENTAL=y ++CONFIG_BROKEN_ON_SMP=y ++CONFIG_INIT_ENV_ARG_LIMIT=32 ++CONFIG_LOCALVERSION="" ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_SWAP=y ++CONFIG_SYSVIPC=y ++CONFIG_SYSVIPC_SYSCTL=y ++# CONFIG_POSIX_MQUEUE is not set ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++# CONFIG_TASKSTATS is not set ++# CONFIG_AUDIT is not set ++ ++# ++# RCU Subsystem ++# ++CONFIG_CLASSIC_RCU=y ++# CONFIG_TREE_RCU is not set ++# CONFIG_PREEMPT_RCU is not set ++# CONFIG_TREE_RCU_TRACE is not set ++# CONFIG_PREEMPT_RCU_TRACE is not set ++# CONFIG_IKCONFIG is not set ++CONFIG_LOG_BUF_SHIFT=14 ++# CONFIG_GROUP_SCHED is not set ++# CONFIG_CGROUPS is not set ++# CONFIG_SYSFS_DEPRECATED_V2 is not set ++# CONFIG_RELAY is not set ++# CONFIG_NAMESPACES is not set ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_INITRAMFS_SOURCE="" ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++CONFIG_SYSCTL=y ++CONFIG_ANON_INODES=y ++CONFIG_EMBEDDED=y ++CONFIG_UID16=y ++CONFIG_SYSCTL_SYSCALL=y ++# CONFIG_KALLSYMS is not set ++# CONFIG_HOTPLUG is not set ++CONFIG_PRINTK=y ++CONFIG_BUG=y ++# CONFIG_ELF_CORE is not set ++CONFIG_BASE_FULL=y ++CONFIG_FUTEX=y ++CONFIG_EPOLL=y ++# CONFIG_SIGNALFD is not set ++CONFIG_TIMERFD=y ++CONFIG_EVENTFD=y ++CONFIG_SHMEM=y ++CONFIG_AIO=y ++# CONFIG_VM_EVENT_COUNTERS is not set ++CONFIG_SLUB_DEBUG=y ++CONFIG_COMPAT_BRK=y ++# CONFIG_SLAB is not set ++CONFIG_SLUB=y ++# CONFIG_SLOB is not set ++CONFIG_PROFILING=y ++CONFIG_TRACEPOINTS=y ++# CONFIG_MARKERS is not set ++CONFIG_OPROFILE=y ++CONFIG_HAVE_OPROFILE=y ++CONFIG_HAVE_KPROBES=y ++CONFIG_HAVE_KRETPROBES=y ++# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set ++CONFIG_SLABINFO=y ++CONFIG_RT_MUTEXES=y ++CONFIG_BASE_SMALL=0 ++CONFIG_MODULES=y ++# CONFIG_MODULE_FORCE_LOAD is not set ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_MODULE_FORCE_UNLOAD is not set ++# CONFIG_MODVERSIONS is not set ++# CONFIG_MODULE_SRCVERSION_ALL is not set ++CONFIG_BLOCK=y ++# CONFIG_LBD is not set ++# CONFIG_BLK_DEV_IO_TRACE is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_BLK_DEV_INTEGRITY is not set ++ ++# ++# IO Schedulers ++# ++CONFIG_IOSCHED_NOOP=y ++# CONFIG_IOSCHED_AS is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++# CONFIG_DEFAULT_AS is not set ++# CONFIG_DEFAULT_DEADLINE is not set ++# CONFIG_DEFAULT_CFQ is not set ++CONFIG_DEFAULT_NOOP=y ++CONFIG_DEFAULT_IOSCHED="noop" ++# CONFIG_FREEZER is not set ++ ++# ++# System Type ++# ++# CONFIG_PLAT_FARADAY is not set ++CONFIG_PLAT_VEP=y ++# CONFIG_PLAT_AG101 is not set ++# CONFIG_PLAT_AG102 is not set ++# CONFIG_PLAT_AG101P is not set ++# CONFIG_PLAT_QEMU is not set ++CONFIG_PLATFORM_INTC=y ++ ++# ++# VEP Platform Options ++# ++# CONFIG_CACHE_L2 is not set ++ ++# ++# Common Platform Options ++# ++# CONFIG_PLATFORM_AHBDMA is not set ++# CONFIG_PLATFORM_APBDMA is not set ++CONFIG_SYS_CLK=67737600 ++CONFIG_UART_CLK=36864000 ++CONFIG_SDRAM_SIZE=0x10000000 ++ ++# ++# Processor Features ++# ++CONFIG_CPU_CUSTOM=y ++# CONFIG_FPU is not set ++# CONFIG_AUDIO is not set ++# CONFIG_EVIC is not set ++CONFIG_CPU_CONTEXT_ID=y ++CONFIG_ANDES_PAGE_SIZE_4KB=y ++# CONFIG_ANDES_PAGE_SIZE_8KB is not set ++# CONFIG_KERNEL_SPACE_LARGE_PAGE is not set ++# CONFIG_CPU_ICACHE_DISABLE is not set ++# CONFIG_CPU_DCACHE_DISABLE is not set ++# CONFIG_CPU_DCACHE_WRITETHROUGH is not set ++# CONFIG_ALIGNMENT_TRAP is not set ++CONFIG_MMU=y ++ ++# ++# Kernel Features ++# ++CONFIG_PREEMPT_NONE=y ++# CONFIG_PREEMPT_VOLUNTARY is not set ++# CONFIG_PREEMPT is not set ++CONFIG_SELECT_MEMORY_MODEL=y ++CONFIG_FLATMEM_MANUAL=y ++# CONFIG_DISCONTIGMEM_MANUAL is not set ++# CONFIG_SPARSEMEM_MANUAL is not set ++CONFIG_FLATMEM=y ++CONFIG_FLAT_NODE_MEM_MAP=y ++CONFIG_PAGEFLAGS_EXTENDED=y ++CONFIG_SPLIT_PTLOCK_CPUS=4 ++# CONFIG_PHYS_ADDR_T_64BIT is not set ++CONFIG_ZONE_DMA_FLAG=0 ++CONFIG_VIRT_TO_BUS=y ++CONFIG_UNEVICTABLE_LRU=y ++CONFIG_FORCE_MAX_ZONEORDER=11 ++CONFIG_HZ_100=y ++# CONFIG_HZ_250 is not set ++# CONFIG_HZ_300 is not set ++# CONFIG_HZ_1000 is not set ++CONFIG_HZ=100 ++# CONFIG_SCHED_HRTICK is not set ++CONFIG_CMDLINE="root=/dev/ram0 rw mem=64M@0x0 initrd=0x1000000,8M console=ttyS0,38400n8 rootfstype=ext2 init=/bin/busybox init -s user_debug=-1" ++ ++# ++# Power management options ++# ++CONFIG_SYS_SUPPORTS_APM_EMULATION=y ++CONFIG_ARCH_SUSPEND_POSSIBLE=y ++# CONFIG_PM is not set ++ ++# ++# Bus options ++# ++# CONFIG_PCI is not set ++# CONFIG_ARCH_SUPPORTS_MSI is not set ++ ++# ++# Executable file formats ++# ++CONFIG_BINFMT_ELF=y ++# CONFIG_HAVE_AOUT is not set ++# CONFIG_BINFMT_MISC is not set ++CONFIG_NET=y ++ ++# ++# Networking options ++# ++CONFIG_COMPAT_NET_DEV_OPS=y ++CONFIG_PACKET=y ++# CONFIG_PACKET_MMAP is not set ++CONFIG_UNIX=y ++CONFIG_XFRM=y ++# CONFIG_XFRM_USER is not set ++# CONFIG_XFRM_SUB_POLICY is not set ++# CONFIG_XFRM_MIGRATE is not set ++# CONFIG_XFRM_STATISTICS is not set ++CONFIG_NET_KEY=y ++# CONFIG_NET_KEY_MIGRATE is not set ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++# CONFIG_IP_ADVANCED_ROUTER is not set ++CONFIG_IP_FIB_HASH=y ++# CONFIG_IP_PNP is not set ++# CONFIG_NET_IPIP is not set ++# CONFIG_NET_IPGRE is not set ++# CONFIG_IP_MROUTE is not set ++# CONFIG_ARPD is not set ++# CONFIG_SYN_COOKIES is not set ++# CONFIG_INET_AH is not set ++# CONFIG_INET_ESP is not set ++# CONFIG_INET_IPCOMP is not set ++# CONFIG_INET_XFRM_TUNNEL is not set ++# CONFIG_INET_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_TCP_CONG_ADVANCED is not set ++CONFIG_TCP_CONG_CUBIC=y ++CONFIG_DEFAULT_TCP_CONG="cubic" ++# CONFIG_TCP_MD5SIG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_NETWORK_SECMARK is not set ++# CONFIG_NETFILTER is not set ++# CONFIG_IP_DCCP is not set ++# CONFIG_IP_SCTP is not set ++# CONFIG_TIPC is not set ++# CONFIG_ATM is not set ++# CONFIG_BRIDGE is not set ++# CONFIG_NET_DSA is not set ++# CONFIG_VLAN_8021Q is not set ++# CONFIG_DECNET is not set ++# CONFIG_LLC2 is not set ++# CONFIG_IPX is not set ++# CONFIG_ATALK is not set ++# CONFIG_X25 is not set ++# CONFIG_LAPB is not set ++# CONFIG_ECONET is not set ++# CONFIG_WAN_ROUTER is not set ++# CONFIG_NET_SCHED is not set ++# CONFIG_DCB is not set ++ ++# ++# Network testing ++# ++# CONFIG_NET_PKTGEN is not set ++# CONFIG_HAMRADIO is not set ++# CONFIG_CAN is not set ++# CONFIG_IRDA is not set ++# CONFIG_BT is not set ++# CONFIG_AF_RXRPC is not set ++# CONFIG_PHONET is not set ++CONFIG_WIRELESS=y ++# CONFIG_CFG80211 is not set ++CONFIG_WIRELESS_OLD_REGULATORY=y ++# CONFIG_WIRELESS_EXT is not set ++# CONFIG_LIB80211 is not set ++# CONFIG_MAC80211 is not set ++# CONFIG_WIMAX is not set ++# CONFIG_RFKILL is not set ++# CONFIG_NET_9P is not set ++ ++# ++# Device Drivers ++# ++ ++# ++# Generic Driver Options ++# ++CONFIG_STANDALONE=y ++CONFIG_PREVENT_FIRMWARE_BUILD=y ++# CONFIG_SYS_HYPERVISOR is not set ++# CONFIG_CONNECTOR is not set ++# CONFIG_MTD is not set ++# CONFIG_PARPORT is not set ++CONFIG_BLK_DEV=y ++# CONFIG_BLK_DEV_COW_COMMON is not set ++CONFIG_BLK_DEV_LOOP=y ++# CONFIG_BLK_DEV_CRYPTOLOOP is not set ++# CONFIG_BLK_DEV_NBD is not set ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_COUNT=16 ++CONFIG_BLK_DEV_RAM_SIZE=8192 ++# CONFIG_BLK_DEV_XIP is not set ++# CONFIG_CDROM_PKTCDVD is not set ++# CONFIG_ATA_OVER_ETH is not set ++# CONFIG_FTSDC010 is not set ++# CONFIG_FTCFC010 is not set ++# CONFIG_BLK_DEV_HD is not set ++# CONFIG_MISC_DEVICES is not set ++CONFIG_HAVE_IDE=y ++# CONFIG_IDE is not set ++ ++# ++# SCSI device support ++# ++# CONFIG_RAID_ATTRS is not set ++# CONFIG_SCSI is not set ++# CONFIG_SCSI_DMA is not set ++# CONFIG_SCSI_NETLINK is not set ++# CONFIG_ATA is not set ++# CONFIG_MD is not set ++CONFIG_NETDEVICES=y ++# CONFIG_DUMMY is not set ++# CONFIG_BONDING is not set ++# CONFIG_MACVLAN is not set ++# CONFIG_EQUALIZER is not set ++# CONFIG_TUN is not set ++# CONFIG_VETH is not set ++# CONFIG_PHYLIB is not set ++CONFIG_NET_ETHERNET=y ++# CONFIG_MII is not set ++# CONFIG_SMC91X is not set ++# CONFIG_DNET is not set ++# CONFIG_IBM_NEW_EMAC_ZMII is not set ++# CONFIG_IBM_NEW_EMAC_RGMII is not set ++# CONFIG_IBM_NEW_EMAC_TAH is not set ++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set ++# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set ++# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set ++# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set ++# CONFIG_B44 is not set ++CONFIG_FTMAC100=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++ ++# ++# Wireless LAN ++# ++# CONFIG_WLAN_PRE80211 is not set ++# CONFIG_WLAN_80211 is not set ++# CONFIG_IWLWIFI_LEDS is not set ++ ++# ++# Enable WiMAX (Networking options) to see the WiMAX drivers ++# ++# CONFIG_WAN is not set ++# CONFIG_PPP is not set ++# CONFIG_SLIP is not set ++# CONFIG_NETCONSOLE is not set ++# CONFIG_NETPOLL is not set ++# CONFIG_NET_POLL_CONTROLLER is not set ++# CONFIG_ISDN is not set ++# CONFIG_PHONE is not set ++ ++# ++# Input device support ++# ++CONFIG_INPUT=y ++# CONFIG_INPUT_FF_MEMLESS is not set ++# CONFIG_INPUT_POLLDEV is not set ++ ++# ++# Userland interfaces ++# ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_JOYDEV is not set ++# CONFIG_INPUT_EVDEV is not set ++# CONFIG_INPUT_EVBUG is not set ++ ++# ++# Input Device Drivers ++# ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_INPUT_JOYSTICK is not set ++# CONFIG_INPUT_TABLET is not set ++# CONFIG_INPUT_TOUCHSCREEN is not set ++# CONFIG_INPUT_MISC is not set ++ ++# ++# Hardware I/O ports ++# ++# CONFIG_SERIO is not set ++# CONFIG_GAMEPORT is not set ++ ++# ++# Character devices ++# ++CONFIG_VT=y ++CONFIG_CONSOLE_TRANSLATIONS=y ++CONFIG_VT_CONSOLE=y ++CONFIG_HW_CONSOLE=y ++# CONFIG_VT_HW_CONSOLE_BINDING is not set ++CONFIG_DEVKMEM=y ++# CONFIG_SERIAL_NONSTANDARD is not set ++ ++# ++# Serial drivers ++# ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_NR_UARTS=3 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=3 ++# CONFIG_SERIAL_8250_EXTENDED is not set ++ ++# ++# Non-8250 serial port support ++# ++CONFIG_SERIAL_CORE=y ++CONFIG_SERIAL_CORE_CONSOLE=y ++CONFIG_UNIX98_PTYS=y ++# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set ++CONFIG_LEGACY_PTYS=y ++CONFIG_LEGACY_PTY_COUNT=256 ++# CONFIG_IPMI_HANDLER is not set ++# CONFIG_HW_RANDOM is not set ++# CONFIG_R3964 is not set ++# CONFIG_GPIO_FTGPIO010 is not set ++# CONFIG_RAW_DRIVER is not set ++# CONFIG_TCG_TPM is not set ++# CONFIG_I2C is not set ++# CONFIG_SPI is not set ++# CONFIG_W1 is not set ++# CONFIG_POWER_SUPPLY is not set ++# CONFIG_HWMON is not set ++# CONFIG_THERMAL is not set ++# CONFIG_THERMAL_HWMON is not set ++# CONFIG_WATCHDOG is not set ++CONFIG_SSB_POSSIBLE=y ++ ++# ++# Sonics Silicon Backplane ++# ++# CONFIG_SSB is not set ++ ++# ++# Multifunction device drivers ++# ++# CONFIG_MFD_CORE is not set ++# CONFIG_MFD_SM501 is not set ++# CONFIG_HTC_PASIC3 is not set ++# CONFIG_MFD_TMIO is not set ++# CONFIG_REGULATOR is not set ++ ++# ++# Multimedia devices ++# ++ ++# ++# Multimedia core support ++# ++# CONFIG_VIDEO_DEV is not set ++# CONFIG_DVB_CORE is not set ++# CONFIG_VIDEO_MEDIA is not set ++ ++# ++# Multimedia drivers ++# ++# CONFIG_DAB is not set ++ ++# ++# Graphics support ++# ++# CONFIG_VGASTATE is not set ++# CONFIG_VIDEO_OUTPUT_CONTROL is not set ++# CONFIG_FB is not set ++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set ++ ++# ++# Display device support ++# ++# CONFIG_DISPLAY_SUPPORT is not set ++ ++# ++# Console display driver support ++# ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_DUMMY_CONSOLE=y ++# CONFIG_SOUND is not set ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++# CONFIG_MMC is not set ++# CONFIG_MEMSTICK is not set ++# CONFIG_NEW_LEDS is not set ++# CONFIG_ACCESSIBILITY is not set ++CONFIG_RTC_LIB=y ++# CONFIG_RTC_CLASS is not set ++# CONFIG_DMADEVICES is not set ++# CONFIG_UIO is not set ++# CONFIG_STAGING is not set ++ ++# ++# File systems ++# ++CONFIG_EXT2_FS=y ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XIP is not set ++# CONFIG_EXT3_FS is not set ++# CONFIG_EXT4_FS is not set ++# CONFIG_REISERFS_FS is not set ++# CONFIG_JFS_FS is not set ++# CONFIG_FS_POSIX_ACL is not set ++CONFIG_FILE_LOCKING=y ++# CONFIG_XFS_FS is not set ++# CONFIG_OCFS2_FS is not set ++# CONFIG_BTRFS_FS is not set ++CONFIG_DNOTIFY=y ++CONFIG_INOTIFY=y ++CONFIG_INOTIFY_USER=y ++# CONFIG_QUOTA is not set ++# CONFIG_AUTOFS_FS is not set ++# CONFIG_AUTOFS4_FS is not set ++CONFIG_FUSE_FS=y ++ ++# ++# CD-ROM/DVD Filesystems ++# ++# CONFIG_ISO9660_FS is not set ++# CONFIG_UDF_FS is not set ++ ++# ++# DOS/FAT/NT Filesystems ++# ++# CONFIG_MSDOS_FS is not set ++# CONFIG_VFAT_FS is not set ++# CONFIG_NTFS_FS is not set ++ ++# ++# Pseudo filesystems ++# ++CONFIG_PROC_FS=y ++# CONFIG_PROC_KCORE is not set ++CONFIG_PROC_SYSCTL=y ++# CONFIG_PROC_PAGE_MONITOR is not set ++CONFIG_SYSFS=y ++CONFIG_TMPFS=y ++# CONFIG_TMPFS_POSIX_ACL is not set ++# CONFIG_HUGETLB_PAGE is not set ++# CONFIG_CONFIGFS_FS is not set ++CONFIG_MISC_FILESYSTEMS=y ++# CONFIG_ADFS_FS is not set ++# CONFIG_AFFS_FS is not set ++# CONFIG_HFS_FS is not set ++# CONFIG_HFSPLUS_FS is not set ++# CONFIG_BEFS_FS is not set ++# CONFIG_BFS_FS is not set ++# CONFIG_EFS_FS is not set ++# CONFIG_CRAMFS is not set ++# CONFIG_SQUASHFS is not set ++# CONFIG_VXFS_FS is not set ++# CONFIG_MINIX_FS is not set ++# CONFIG_OMFS_FS is not set ++# CONFIG_HPFS_FS is not set ++# CONFIG_QNX4FS_FS is not set ++# CONFIG_ROMFS_FS is not set ++# CONFIG_SYSV_FS is not set ++# CONFIG_UFS_FS is not set ++CONFIG_NETWORK_FILESYSTEMS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++# CONFIG_NFS_V3_ACL is not set ++# CONFIG_NFS_V4 is not set ++# CONFIG_NFSD is not set ++CONFIG_LOCKD=y ++CONFIG_LOCKD_V4=y ++CONFIG_NFS_COMMON=y ++CONFIG_SUNRPC=y ++# CONFIG_SUNRPC_REGISTER_V4 is not set ++# CONFIG_RPCSEC_GSS_KRB5 is not set ++# CONFIG_RPCSEC_GSS_SPKM3 is not set ++# CONFIG_SMB_FS is not set ++# CONFIG_CIFS is not set ++# CONFIG_NCP_FS is not set ++# CONFIG_CODA_FS is not set ++# CONFIG_AFS_FS is not set ++ ++# ++# Partition Types ++# ++# CONFIG_PARTITION_ADVANCED is not set ++CONFIG_MSDOS_PARTITION=y ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="iso8859-1" ++CONFIG_NLS_CODEPAGE_437=y ++# CONFIG_NLS_CODEPAGE_737 is not set ++# CONFIG_NLS_CODEPAGE_775 is not set ++# CONFIG_NLS_CODEPAGE_850 is not set ++# CONFIG_NLS_CODEPAGE_852 is not set ++# CONFIG_NLS_CODEPAGE_855 is not set ++# CONFIG_NLS_CODEPAGE_857 is not set ++# CONFIG_NLS_CODEPAGE_860 is not set ++# CONFIG_NLS_CODEPAGE_861 is not set ++# CONFIG_NLS_CODEPAGE_862 is not set ++# CONFIG_NLS_CODEPAGE_863 is not set ++# CONFIG_NLS_CODEPAGE_864 is not set ++# CONFIG_NLS_CODEPAGE_865 is not set ++# CONFIG_NLS_CODEPAGE_866 is not set ++# CONFIG_NLS_CODEPAGE_869 is not set ++# CONFIG_NLS_CODEPAGE_936 is not set ++# CONFIG_NLS_CODEPAGE_950 is not set ++# CONFIG_NLS_CODEPAGE_932 is not set ++# CONFIG_NLS_CODEPAGE_949 is not set ++# CONFIG_NLS_CODEPAGE_874 is not set ++# CONFIG_NLS_ISO8859_8 is not set ++# CONFIG_NLS_CODEPAGE_1250 is not set ++# CONFIG_NLS_CODEPAGE_1251 is not set ++# CONFIG_NLS_ASCII is not set ++CONFIG_NLS_ISO8859_1=y ++# CONFIG_NLS_ISO8859_2 is not set ++# CONFIG_NLS_ISO8859_3 is not set ++# CONFIG_NLS_ISO8859_4 is not set ++# CONFIG_NLS_ISO8859_5 is not set ++# CONFIG_NLS_ISO8859_6 is not set ++# CONFIG_NLS_ISO8859_7 is not set ++# CONFIG_NLS_ISO8859_9 is not set ++# CONFIG_NLS_ISO8859_13 is not set ++# CONFIG_NLS_ISO8859_14 is not set ++# CONFIG_NLS_ISO8859_15 is not set ++# CONFIG_NLS_KOI8_R is not set ++# CONFIG_NLS_KOI8_U is not set ++# CONFIG_NLS_UTF8 is not set ++# CONFIG_DLM is not set ++ ++# ++# Kernel hacking ++# ++CONFIG_TRACE_IRQFLAGS_SUPPORT=y ++# CONFIG_PRINTK_TIME is not set ++CONFIG_ENABLE_WARN_DEPRECATED=y ++CONFIG_ENABLE_MUST_CHECK=y ++CONFIG_FRAME_WARN=1024 ++# CONFIG_MAGIC_SYSRQ is not set ++# CONFIG_UNUSED_SYMBOLS is not set ++CONFIG_DEBUG_FS=y ++# CONFIG_HEADERS_CHECK is not set ++# CONFIG_DEBUG_KERNEL is not set ++# CONFIG_SLUB_DEBUG_ON is not set ++# CONFIG_SLUB_STATS is not set ++CONFIG_STACKTRACE=y ++# CONFIG_DEBUG_MEMORY_INIT is not set ++CONFIG_FRAME_POINTER=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_LATENCYTOP is not set ++CONFIG_SYSCTL_SYSCALL_CHECK=y ++CONFIG_NOP_TRACER=y ++CONFIG_RING_BUFFER=y ++CONFIG_TRACING=y ++ ++# ++# Tracers ++# ++# CONFIG_DYNAMIC_PRINTK_DEBUG is not set ++# CONFIG_SAMPLES is not set ++CONFIG_HAVE_ARCH_KGDB=y ++CONFIG_DEBUG_USER=y ++# CONFIG_CCTL is not set ++CONFIG_ELFCHK_DEFAULT_ENABLE=y ++ ++# ++# Security options ++# ++# CONFIG_KEYS is not set ++# CONFIG_SECURITY is not set ++# CONFIG_SECURITYFS is not set ++# CONFIG_SECURITY_FILE_CAPABILITIES is not set ++CONFIG_CRYPTO=y ++ ++# ++# Crypto core or helper ++# ++# CONFIG_CRYPTO_FIPS is not set ++# CONFIG_CRYPTO_MANAGER is not set ++# CONFIG_CRYPTO_MANAGER2 is not set ++# CONFIG_CRYPTO_GF128MUL is not set ++# CONFIG_CRYPTO_NULL is not set ++# CONFIG_CRYPTO_CRYPTD is not set ++# CONFIG_CRYPTO_AUTHENC is not set ++# CONFIG_CRYPTO_TEST is not set ++ ++# ++# Authenticated Encryption with Associated Data ++# ++# CONFIG_CRYPTO_CCM is not set ++# CONFIG_CRYPTO_GCM is not set ++# CONFIG_CRYPTO_SEQIV is not set ++ ++# ++# Block modes ++# ++# CONFIG_CRYPTO_CBC is not set ++# CONFIG_CRYPTO_CTR is not set ++# CONFIG_CRYPTO_CTS is not set ++# CONFIG_CRYPTO_ECB is not set ++# CONFIG_CRYPTO_LRW is not set ++# CONFIG_CRYPTO_PCBC is not set ++# CONFIG_CRYPTO_XTS is not set ++ ++# ++# Hash modes ++# ++# CONFIG_CRYPTO_HMAC is not set ++# CONFIG_CRYPTO_XCBC is not set ++ ++# ++# Digest ++# ++# CONFIG_CRYPTO_CRC32C is not set ++# CONFIG_CRYPTO_MD4 is not set ++# CONFIG_CRYPTO_MD5 is not set ++# CONFIG_CRYPTO_MICHAEL_MIC is not set ++# CONFIG_CRYPTO_RMD128 is not set ++# CONFIG_CRYPTO_RMD160 is not set ++# CONFIG_CRYPTO_RMD256 is not set ++# CONFIG_CRYPTO_RMD320 is not set ++# CONFIG_CRYPTO_SHA1 is not set ++# CONFIG_CRYPTO_SHA256 is not set ++# CONFIG_CRYPTO_SHA512 is not set ++# CONFIG_CRYPTO_TGR192 is not set ++# CONFIG_CRYPTO_WP512 is not set ++ ++# ++# Ciphers ++# ++# CONFIG_CRYPTO_AES is not set ++# CONFIG_CRYPTO_ANUBIS is not set ++# CONFIG_CRYPTO_ARC4 is not set ++# CONFIG_CRYPTO_BLOWFISH is not set ++# CONFIG_CRYPTO_CAMELLIA is not set ++# CONFIG_CRYPTO_CAST5 is not set ++# CONFIG_CRYPTO_CAST6 is not set ++# CONFIG_CRYPTO_DES is not set ++# CONFIG_CRYPTO_FCRYPT is not set ++# CONFIG_CRYPTO_KHAZAD is not set ++# CONFIG_CRYPTO_SALSA20 is not set ++# CONFIG_CRYPTO_SEED is not set ++# CONFIG_CRYPTO_SERPENT is not set ++# CONFIG_CRYPTO_TEA is not set ++# CONFIG_CRYPTO_TWOFISH is not set ++ ++# ++# Compression ++# ++# CONFIG_CRYPTO_DEFLATE is not set ++# CONFIG_CRYPTO_LZO is not set ++ ++# ++# Random Number Generation ++# ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++# CONFIG_CRYPTO_HW is not set ++ ++# ++# Library routines ++# ++CONFIG_GENERIC_FIND_LAST_BIT=y ++# CONFIG_CRC_CCITT is not set ++# CONFIG_CRC16 is not set ++# CONFIG_CRC_T10DIF is not set ++# CONFIG_CRC_ITU_T is not set ++# CONFIG_CRC32 is not set ++# CONFIG_CRC7 is not set ++# CONFIG_LIBCRC32C is not set ++CONFIG_PLIST=y ++CONFIG_HAS_IOMEM=y ++CONFIG_HAS_DMA=y +diff -Nur linux-3.4.110.orig/arch/nds32/configs/vep-le_defconfig linux-3.4.110/arch/nds32/configs/vep-le_defconfig +--- linux-3.4.110.orig/arch/nds32/configs/vep-le_defconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/configs/vep-le_defconfig 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,777 @@ ++# ++# Automatically generated make config: don't edit ++# Linux kernel version: 2.6.29 ++# Mon Jul 13 11:41:30 2009 ++# ++CONFIG_NDS32=y ++CONFIG_NO_IOPORT=y ++CONFIG_GENERIC_IOMAP=y ++CONFIG_RWSEM_GENERIC_SPINLOCK=y ++CONFIG_GENERIC_HWEIGHT=y ++CONFIG_GENERIC_FIND_NEXT_BIT=y ++CONFIG_GENERIC_CALIBRATE_DELAY=y ++CONFIG_GENERIC_HARDIRQS=y ++CONFIG_LOCKDEP_SUPPORT=y ++CONFIG_STACKTRACE_SUPPORT=y ++CONFIG_HAVE_LATENCYTOP_SUPPORT=y ++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" ++ ++# ++# General setup ++# ++CONFIG_EXPERIMENTAL=y ++CONFIG_BROKEN_ON_SMP=y ++CONFIG_INIT_ENV_ARG_LIMIT=32 ++CONFIG_LOCALVERSION="" ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_SWAP=y ++CONFIG_SYSVIPC=y ++CONFIG_SYSVIPC_SYSCTL=y ++# CONFIG_POSIX_MQUEUE is not set ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++# CONFIG_TASKSTATS is not set ++# CONFIG_AUDIT is not set ++ ++# ++# RCU Subsystem ++# ++CONFIG_CLASSIC_RCU=y ++# CONFIG_TREE_RCU is not set ++# CONFIG_PREEMPT_RCU is not set ++# CONFIG_TREE_RCU_TRACE is not set ++# CONFIG_PREEMPT_RCU_TRACE is not set ++# CONFIG_IKCONFIG is not set ++CONFIG_LOG_BUF_SHIFT=14 ++# CONFIG_GROUP_SCHED is not set ++# CONFIG_CGROUPS is not set ++# CONFIG_SYSFS_DEPRECATED_V2 is not set ++# CONFIG_RELAY is not set ++# CONFIG_NAMESPACES is not set ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_INITRAMFS_SOURCE="" ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++CONFIG_SYSCTL=y ++CONFIG_ANON_INODES=y ++CONFIG_EMBEDDED=y ++CONFIG_UID16=y ++CONFIG_SYSCTL_SYSCALL=y ++# CONFIG_KALLSYMS is not set ++# CONFIG_HOTPLUG is not set ++CONFIG_PRINTK=y ++CONFIG_BUG=y ++# CONFIG_ELF_CORE is not set ++CONFIG_BASE_FULL=y ++CONFIG_FUTEX=y ++CONFIG_EPOLL=y ++# CONFIG_SIGNALFD is not set ++CONFIG_TIMERFD=y ++CONFIG_EVENTFD=y ++CONFIG_SHMEM=y ++CONFIG_AIO=y ++# CONFIG_VM_EVENT_COUNTERS is not set ++CONFIG_SLUB_DEBUG=y ++CONFIG_COMPAT_BRK=y ++# CONFIG_SLAB is not set ++CONFIG_SLUB=y ++# CONFIG_SLOB is not set ++CONFIG_PROFILING=y ++CONFIG_TRACEPOINTS=y ++# CONFIG_MARKERS is not set ++CONFIG_OPROFILE=y ++CONFIG_HAVE_OPROFILE=y ++CONFIG_HAVE_KPROBES=y ++CONFIG_HAVE_KRETPROBES=y ++# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set ++CONFIG_SLABINFO=y ++CONFIG_RT_MUTEXES=y ++CONFIG_BASE_SMALL=0 ++CONFIG_MODULES=y ++# CONFIG_MODULE_FORCE_LOAD is not set ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_MODULE_FORCE_UNLOAD is not set ++# CONFIG_MODVERSIONS is not set ++# CONFIG_MODULE_SRCVERSION_ALL is not set ++CONFIG_BLOCK=y ++# CONFIG_LBD is not set ++# CONFIG_BLK_DEV_IO_TRACE is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_BLK_DEV_INTEGRITY is not set ++ ++# ++# IO Schedulers ++# ++CONFIG_IOSCHED_NOOP=y ++# CONFIG_IOSCHED_AS is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++# CONFIG_DEFAULT_AS is not set ++# CONFIG_DEFAULT_DEADLINE is not set ++# CONFIG_DEFAULT_CFQ is not set ++CONFIG_DEFAULT_NOOP=y ++CONFIG_DEFAULT_IOSCHED="noop" ++# CONFIG_FREEZER is not set ++ ++# ++# System Type ++# ++# CONFIG_PLAT_FARADAY is not set ++CONFIG_PLAT_VEP=y ++# CONFIG_PLAT_AG101 is not set ++# CONFIG_PLAT_AG102 is not set ++# CONFIG_PLAT_AG101P is not set ++# CONFIG_PLAT_QEMU is not set ++CONFIG_PLATFORM_INTC=y ++ ++# ++# VEP Platform Options ++# ++# CONFIG_CACHE_L2 is not set ++ ++# ++# Common Platform Options ++# ++# CONFIG_PLATFORM_AHBDMA is not set ++# CONFIG_PLATFORM_APBDMA is not set ++CONFIG_SYS_CLK=67737600 ++CONFIG_UART_CLK=36864000 ++CONFIG_SDRAM_SIZE=0x10000000 ++ ++# ++# Processor Features ++# ++CONFIG_CPU_CUSTOM=y ++# CONFIG_FPU is not set ++# CONFIG_AUDIO is not set ++# CONFIG_EVIC is not set ++CONFIG_CPU_CONTEXT_ID=y ++CONFIG_ANDES_PAGE_SIZE_4KB=y ++# CONFIG_ANDES_PAGE_SIZE_8KB is not set ++# CONFIG_KERNEL_SPACE_LARGE_PAGE is not set ++# CONFIG_CPU_ICACHE_DISABLE is not set ++# CONFIG_CPU_DCACHE_DISABLE is not set ++# CONFIG_CPU_DCACHE_WRITETHROUGH is not set ++# CONFIG_ALIGNMENT_TRAP is not set ++CONFIG_MMU=y ++ ++# ++# Kernel Features ++# ++CONFIG_PREEMPT_NONE=y ++# CONFIG_PREEMPT_VOLUNTARY is not set ++# CONFIG_PREEMPT is not set ++CONFIG_SELECT_MEMORY_MODEL=y ++CONFIG_FLATMEM_MANUAL=y ++# CONFIG_DISCONTIGMEM_MANUAL is not set ++# CONFIG_SPARSEMEM_MANUAL is not set ++CONFIG_FLATMEM=y ++CONFIG_FLAT_NODE_MEM_MAP=y ++CONFIG_PAGEFLAGS_EXTENDED=y ++CONFIG_SPLIT_PTLOCK_CPUS=4 ++# CONFIG_PHYS_ADDR_T_64BIT is not set ++CONFIG_ZONE_DMA_FLAG=0 ++CONFIG_VIRT_TO_BUS=y ++CONFIG_UNEVICTABLE_LRU=y ++CONFIG_FORCE_MAX_ZONEORDER=11 ++CONFIG_HZ_100=y ++# CONFIG_HZ_250 is not set ++# CONFIG_HZ_300 is not set ++# CONFIG_HZ_1000 is not set ++CONFIG_HZ=100 ++# CONFIG_SCHED_HRTICK is not set ++CONFIG_CMDLINE="root=/dev/ram0 rw mem=64M@0x0 initrd=0x1000000,8M console=ttyS0,38400n8 rootfstype=ext2 init=/bin/busybox init -s user_debug=-1" ++ ++# ++# Power management options ++# ++CONFIG_SYS_SUPPORTS_APM_EMULATION=y ++CONFIG_ARCH_SUSPEND_POSSIBLE=y ++# CONFIG_PM is not set ++ ++# ++# Bus options ++# ++# CONFIG_PCI is not set ++# CONFIG_ARCH_SUPPORTS_MSI is not set ++ ++# ++# Executable file formats ++# ++CONFIG_BINFMT_ELF=y ++# CONFIG_HAVE_AOUT is not set ++# CONFIG_BINFMT_MISC is not set ++CONFIG_NET=y ++ ++# ++# Networking options ++# ++CONFIG_COMPAT_NET_DEV_OPS=y ++CONFIG_PACKET=y ++# CONFIG_PACKET_MMAP is not set ++CONFIG_UNIX=y ++CONFIG_XFRM=y ++# CONFIG_XFRM_USER is not set ++# CONFIG_XFRM_SUB_POLICY is not set ++# CONFIG_XFRM_MIGRATE is not set ++# CONFIG_XFRM_STATISTICS is not set ++CONFIG_NET_KEY=y ++# CONFIG_NET_KEY_MIGRATE is not set ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++# CONFIG_IP_ADVANCED_ROUTER is not set ++CONFIG_IP_FIB_HASH=y ++# CONFIG_IP_PNP is not set ++# CONFIG_NET_IPIP is not set ++# CONFIG_NET_IPGRE is not set ++# CONFIG_IP_MROUTE is not set ++# CONFIG_ARPD is not set ++# CONFIG_SYN_COOKIES is not set ++# CONFIG_INET_AH is not set ++# CONFIG_INET_ESP is not set ++# CONFIG_INET_IPCOMP is not set ++# CONFIG_INET_XFRM_TUNNEL is not set ++# CONFIG_INET_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_TCP_CONG_ADVANCED is not set ++CONFIG_TCP_CONG_CUBIC=y ++CONFIG_DEFAULT_TCP_CONG="cubic" ++# CONFIG_TCP_MD5SIG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_NETWORK_SECMARK is not set ++# CONFIG_NETFILTER is not set ++# CONFIG_IP_DCCP is not set ++# CONFIG_IP_SCTP is not set ++# CONFIG_TIPC is not set ++# CONFIG_ATM is not set ++# CONFIG_BRIDGE is not set ++# CONFIG_NET_DSA is not set ++# CONFIG_VLAN_8021Q is not set ++# CONFIG_DECNET is not set ++# CONFIG_LLC2 is not set ++# CONFIG_IPX is not set ++# CONFIG_ATALK is not set ++# CONFIG_X25 is not set ++# CONFIG_LAPB is not set ++# CONFIG_ECONET is not set ++# CONFIG_WAN_ROUTER is not set ++# CONFIG_NET_SCHED is not set ++# CONFIG_DCB is not set ++ ++# ++# Network testing ++# ++# CONFIG_NET_PKTGEN is not set ++# CONFIG_HAMRADIO is not set ++# CONFIG_CAN is not set ++# CONFIG_IRDA is not set ++# CONFIG_BT is not set ++# CONFIG_AF_RXRPC is not set ++# CONFIG_PHONET is not set ++CONFIG_WIRELESS=y ++# CONFIG_CFG80211 is not set ++CONFIG_WIRELESS_OLD_REGULATORY=y ++# CONFIG_WIRELESS_EXT is not set ++# CONFIG_LIB80211 is not set ++# CONFIG_MAC80211 is not set ++# CONFIG_WIMAX is not set ++# CONFIG_RFKILL is not set ++# CONFIG_NET_9P is not set ++ ++# ++# Device Drivers ++# ++ ++# ++# Generic Driver Options ++# ++CONFIG_STANDALONE=y ++CONFIG_PREVENT_FIRMWARE_BUILD=y ++# CONFIG_SYS_HYPERVISOR is not set ++# CONFIG_CONNECTOR is not set ++# CONFIG_MTD is not set ++# CONFIG_PARPORT is not set ++CONFIG_BLK_DEV=y ++# CONFIG_BLK_DEV_COW_COMMON is not set ++CONFIG_BLK_DEV_LOOP=y ++# CONFIG_BLK_DEV_CRYPTOLOOP is not set ++# CONFIG_BLK_DEV_NBD is not set ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_COUNT=16 ++CONFIG_BLK_DEV_RAM_SIZE=8192 ++# CONFIG_BLK_DEV_XIP is not set ++# CONFIG_CDROM_PKTCDVD is not set ++# CONFIG_ATA_OVER_ETH is not set ++# CONFIG_FTSDC010 is not set ++# CONFIG_FTCFC010 is not set ++# CONFIG_BLK_DEV_HD is not set ++# CONFIG_MISC_DEVICES is not set ++CONFIG_HAVE_IDE=y ++# CONFIG_IDE is not set ++ ++# ++# SCSI device support ++# ++# CONFIG_RAID_ATTRS is not set ++# CONFIG_SCSI is not set ++# CONFIG_SCSI_DMA is not set ++# CONFIG_SCSI_NETLINK is not set ++# CONFIG_ATA is not set ++# CONFIG_MD is not set ++CONFIG_NETDEVICES=y ++# CONFIG_DUMMY is not set ++# CONFIG_BONDING is not set ++# CONFIG_MACVLAN is not set ++# CONFIG_EQUALIZER is not set ++# CONFIG_TUN is not set ++# CONFIG_VETH is not set ++# CONFIG_PHYLIB is not set ++CONFIG_NET_ETHERNET=y ++# CONFIG_MII is not set ++# CONFIG_SMC91X is not set ++# CONFIG_DNET is not set ++# CONFIG_IBM_NEW_EMAC_ZMII is not set ++# CONFIG_IBM_NEW_EMAC_RGMII is not set ++# CONFIG_IBM_NEW_EMAC_TAH is not set ++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set ++# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set ++# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set ++# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set ++# CONFIG_B44 is not set ++CONFIG_FTMAC100=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++ ++# ++# Wireless LAN ++# ++# CONFIG_WLAN_PRE80211 is not set ++# CONFIG_WLAN_80211 is not set ++# CONFIG_IWLWIFI_LEDS is not set ++ ++# ++# Enable WiMAX (Networking options) to see the WiMAX drivers ++# ++# CONFIG_WAN is not set ++# CONFIG_PPP is not set ++# CONFIG_SLIP is not set ++# CONFIG_NETCONSOLE is not set ++# CONFIG_NETPOLL is not set ++# CONFIG_NET_POLL_CONTROLLER is not set ++# CONFIG_ISDN is not set ++# CONFIG_PHONE is not set ++ ++# ++# Input device support ++# ++CONFIG_INPUT=y ++# CONFIG_INPUT_FF_MEMLESS is not set ++# CONFIG_INPUT_POLLDEV is not set ++ ++# ++# Userland interfaces ++# ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_JOYDEV is not set ++# CONFIG_INPUT_EVDEV is not set ++# CONFIG_INPUT_EVBUG is not set ++ ++# ++# Input Device Drivers ++# ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_INPUT_JOYSTICK is not set ++# CONFIG_INPUT_TABLET is not set ++# CONFIG_INPUT_TOUCHSCREEN is not set ++# CONFIG_INPUT_MISC is not set ++ ++# ++# Hardware I/O ports ++# ++# CONFIG_SERIO is not set ++# CONFIG_GAMEPORT is not set ++ ++# ++# Character devices ++# ++CONFIG_VT=y ++CONFIG_CONSOLE_TRANSLATIONS=y ++CONFIG_VT_CONSOLE=y ++CONFIG_HW_CONSOLE=y ++# CONFIG_VT_HW_CONSOLE_BINDING is not set ++CONFIG_DEVKMEM=y ++# CONFIG_SERIAL_NONSTANDARD is not set ++ ++# ++# Serial drivers ++# ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_NR_UARTS=3 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=3 ++# CONFIG_SERIAL_8250_EXTENDED is not set ++ ++# ++# Non-8250 serial port support ++# ++CONFIG_SERIAL_CORE=y ++CONFIG_SERIAL_CORE_CONSOLE=y ++CONFIG_UNIX98_PTYS=y ++# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set ++CONFIG_LEGACY_PTYS=y ++CONFIG_LEGACY_PTY_COUNT=256 ++# CONFIG_IPMI_HANDLER is not set ++# CONFIG_HW_RANDOM is not set ++# CONFIG_R3964 is not set ++# CONFIG_GPIO_FTGPIO010 is not set ++# CONFIG_RAW_DRIVER is not set ++# CONFIG_TCG_TPM is not set ++# CONFIG_I2C is not set ++# CONFIG_SPI is not set ++# CONFIG_W1 is not set ++# CONFIG_POWER_SUPPLY is not set ++# CONFIG_HWMON is not set ++# CONFIG_THERMAL is not set ++# CONFIG_THERMAL_HWMON is not set ++# CONFIG_WATCHDOG is not set ++CONFIG_SSB_POSSIBLE=y ++ ++# ++# Sonics Silicon Backplane ++# ++# CONFIG_SSB is not set ++ ++# ++# Multifunction device drivers ++# ++# CONFIG_MFD_CORE is not set ++# CONFIG_MFD_SM501 is not set ++# CONFIG_HTC_PASIC3 is not set ++# CONFIG_MFD_TMIO is not set ++# CONFIG_REGULATOR is not set ++ ++# ++# Multimedia devices ++# ++ ++# ++# Multimedia core support ++# ++# CONFIG_VIDEO_DEV is not set ++# CONFIG_DVB_CORE is not set ++# CONFIG_VIDEO_MEDIA is not set ++ ++# ++# Multimedia drivers ++# ++# CONFIG_DAB is not set ++ ++# ++# Graphics support ++# ++# CONFIG_VGASTATE is not set ++# CONFIG_VIDEO_OUTPUT_CONTROL is not set ++# CONFIG_FB is not set ++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set ++ ++# ++# Display device support ++# ++# CONFIG_DISPLAY_SUPPORT is not set ++ ++# ++# Console display driver support ++# ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_DUMMY_CONSOLE=y ++# CONFIG_SOUND is not set ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++# CONFIG_MMC is not set ++# CONFIG_MEMSTICK is not set ++# CONFIG_NEW_LEDS is not set ++# CONFIG_ACCESSIBILITY is not set ++CONFIG_RTC_LIB=y ++# CONFIG_RTC_CLASS is not set ++# CONFIG_DMADEVICES is not set ++# CONFIG_UIO is not set ++# CONFIG_STAGING is not set ++ ++# ++# File systems ++# ++CONFIG_EXT2_FS=y ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XIP is not set ++# CONFIG_EXT3_FS is not set ++# CONFIG_EXT4_FS is not set ++# CONFIG_REISERFS_FS is not set ++# CONFIG_JFS_FS is not set ++# CONFIG_FS_POSIX_ACL is not set ++CONFIG_FILE_LOCKING=y ++# CONFIG_XFS_FS is not set ++# CONFIG_OCFS2_FS is not set ++# CONFIG_BTRFS_FS is not set ++CONFIG_DNOTIFY=y ++CONFIG_INOTIFY=y ++CONFIG_INOTIFY_USER=y ++# CONFIG_QUOTA is not set ++# CONFIG_AUTOFS_FS is not set ++# CONFIG_AUTOFS4_FS is not set ++CONFIG_FUSE_FS=y ++ ++# ++# CD-ROM/DVD Filesystems ++# ++# CONFIG_ISO9660_FS is not set ++# CONFIG_UDF_FS is not set ++ ++# ++# DOS/FAT/NT Filesystems ++# ++# CONFIG_MSDOS_FS is not set ++# CONFIG_VFAT_FS is not set ++# CONFIG_NTFS_FS is not set ++ ++# ++# Pseudo filesystems ++# ++CONFIG_PROC_FS=y ++# CONFIG_PROC_KCORE is not set ++CONFIG_PROC_SYSCTL=y ++# CONFIG_PROC_PAGE_MONITOR is not set ++CONFIG_SYSFS=y ++CONFIG_TMPFS=y ++# CONFIG_TMPFS_POSIX_ACL is not set ++# CONFIG_HUGETLB_PAGE is not set ++# CONFIG_CONFIGFS_FS is not set ++CONFIG_MISC_FILESYSTEMS=y ++# CONFIG_ADFS_FS is not set ++# CONFIG_AFFS_FS is not set ++# CONFIG_HFS_FS is not set ++# CONFIG_HFSPLUS_FS is not set ++# CONFIG_BEFS_FS is not set ++# CONFIG_BFS_FS is not set ++# CONFIG_EFS_FS is not set ++# CONFIG_CRAMFS is not set ++# CONFIG_SQUASHFS is not set ++# CONFIG_VXFS_FS is not set ++# CONFIG_MINIX_FS is not set ++# CONFIG_OMFS_FS is not set ++# CONFIG_HPFS_FS is not set ++# CONFIG_QNX4FS_FS is not set ++# CONFIG_ROMFS_FS is not set ++# CONFIG_SYSV_FS is not set ++# CONFIG_UFS_FS is not set ++CONFIG_NETWORK_FILESYSTEMS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++# CONFIG_NFS_V3_ACL is not set ++# CONFIG_NFS_V4 is not set ++# CONFIG_NFSD is not set ++CONFIG_LOCKD=y ++CONFIG_LOCKD_V4=y ++CONFIG_NFS_COMMON=y ++CONFIG_SUNRPC=y ++# CONFIG_SUNRPC_REGISTER_V4 is not set ++# CONFIG_RPCSEC_GSS_KRB5 is not set ++# CONFIG_RPCSEC_GSS_SPKM3 is not set ++# CONFIG_SMB_FS is not set ++# CONFIG_CIFS is not set ++# CONFIG_NCP_FS is not set ++# CONFIG_CODA_FS is not set ++# CONFIG_AFS_FS is not set ++ ++# ++# Partition Types ++# ++# CONFIG_PARTITION_ADVANCED is not set ++CONFIG_MSDOS_PARTITION=y ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="iso8859-1" ++CONFIG_NLS_CODEPAGE_437=y ++# CONFIG_NLS_CODEPAGE_737 is not set ++# CONFIG_NLS_CODEPAGE_775 is not set ++# CONFIG_NLS_CODEPAGE_850 is not set ++# CONFIG_NLS_CODEPAGE_852 is not set ++# CONFIG_NLS_CODEPAGE_855 is not set ++# CONFIG_NLS_CODEPAGE_857 is not set ++# CONFIG_NLS_CODEPAGE_860 is not set ++# CONFIG_NLS_CODEPAGE_861 is not set ++# CONFIG_NLS_CODEPAGE_862 is not set ++# CONFIG_NLS_CODEPAGE_863 is not set ++# CONFIG_NLS_CODEPAGE_864 is not set ++# CONFIG_NLS_CODEPAGE_865 is not set ++# CONFIG_NLS_CODEPAGE_866 is not set ++# CONFIG_NLS_CODEPAGE_869 is not set ++# CONFIG_NLS_CODEPAGE_936 is not set ++# CONFIG_NLS_CODEPAGE_950 is not set ++# CONFIG_NLS_CODEPAGE_932 is not set ++# CONFIG_NLS_CODEPAGE_949 is not set ++# CONFIG_NLS_CODEPAGE_874 is not set ++# CONFIG_NLS_ISO8859_8 is not set ++# CONFIG_NLS_CODEPAGE_1250 is not set ++# CONFIG_NLS_CODEPAGE_1251 is not set ++# CONFIG_NLS_ASCII is not set ++CONFIG_NLS_ISO8859_1=y ++# CONFIG_NLS_ISO8859_2 is not set ++# CONFIG_NLS_ISO8859_3 is not set ++# CONFIG_NLS_ISO8859_4 is not set ++# CONFIG_NLS_ISO8859_5 is not set ++# CONFIG_NLS_ISO8859_6 is not set ++# CONFIG_NLS_ISO8859_7 is not set ++# CONFIG_NLS_ISO8859_9 is not set ++# CONFIG_NLS_ISO8859_13 is not set ++# CONFIG_NLS_ISO8859_14 is not set ++# CONFIG_NLS_ISO8859_15 is not set ++# CONFIG_NLS_KOI8_R is not set ++# CONFIG_NLS_KOI8_U is not set ++# CONFIG_NLS_UTF8 is not set ++# CONFIG_DLM is not set ++ ++# ++# Kernel hacking ++# ++CONFIG_TRACE_IRQFLAGS_SUPPORT=y ++# CONFIG_PRINTK_TIME is not set ++CONFIG_ENABLE_WARN_DEPRECATED=y ++CONFIG_ENABLE_MUST_CHECK=y ++CONFIG_FRAME_WARN=1024 ++# CONFIG_MAGIC_SYSRQ is not set ++# CONFIG_UNUSED_SYMBOLS is not set ++CONFIG_DEBUG_FS=y ++# CONFIG_HEADERS_CHECK is not set ++# CONFIG_DEBUG_KERNEL is not set ++# CONFIG_SLUB_DEBUG_ON is not set ++# CONFIG_SLUB_STATS is not set ++CONFIG_STACKTRACE=y ++# CONFIG_DEBUG_MEMORY_INIT is not set ++CONFIG_FRAME_POINTER=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_LATENCYTOP is not set ++CONFIG_SYSCTL_SYSCALL_CHECK=y ++CONFIG_NOP_TRACER=y ++CONFIG_RING_BUFFER=y ++CONFIG_TRACING=y ++ ++# ++# Tracers ++# ++# CONFIG_DYNAMIC_PRINTK_DEBUG is not set ++# CONFIG_SAMPLES is not set ++CONFIG_HAVE_ARCH_KGDB=y ++CONFIG_DEBUG_USER=y ++# CONFIG_CCTL is not set ++CONFIG_ELFCHK_DEFAULT_ENABLE=y ++ ++# ++# Security options ++# ++# CONFIG_KEYS is not set ++# CONFIG_SECURITY is not set ++# CONFIG_SECURITYFS is not set ++# CONFIG_SECURITY_FILE_CAPABILITIES is not set ++CONFIG_CRYPTO=y ++ ++# ++# Crypto core or helper ++# ++# CONFIG_CRYPTO_FIPS is not set ++# CONFIG_CRYPTO_MANAGER is not set ++# CONFIG_CRYPTO_MANAGER2 is not set ++# CONFIG_CRYPTO_GF128MUL is not set ++# CONFIG_CRYPTO_NULL is not set ++# CONFIG_CRYPTO_CRYPTD is not set ++# CONFIG_CRYPTO_AUTHENC is not set ++# CONFIG_CRYPTO_TEST is not set ++ ++# ++# Authenticated Encryption with Associated Data ++# ++# CONFIG_CRYPTO_CCM is not set ++# CONFIG_CRYPTO_GCM is not set ++# CONFIG_CRYPTO_SEQIV is not set ++ ++# ++# Block modes ++# ++# CONFIG_CRYPTO_CBC is not set ++# CONFIG_CRYPTO_CTR is not set ++# CONFIG_CRYPTO_CTS is not set ++# CONFIG_CRYPTO_ECB is not set ++# CONFIG_CRYPTO_LRW is not set ++# CONFIG_CRYPTO_PCBC is not set ++# CONFIG_CRYPTO_XTS is not set ++ ++# ++# Hash modes ++# ++# CONFIG_CRYPTO_HMAC is not set ++# CONFIG_CRYPTO_XCBC is not set ++ ++# ++# Digest ++# ++# CONFIG_CRYPTO_CRC32C is not set ++# CONFIG_CRYPTO_MD4 is not set ++# CONFIG_CRYPTO_MD5 is not set ++# CONFIG_CRYPTO_MICHAEL_MIC is not set ++# CONFIG_CRYPTO_RMD128 is not set ++# CONFIG_CRYPTO_RMD160 is not set ++# CONFIG_CRYPTO_RMD256 is not set ++# CONFIG_CRYPTO_RMD320 is not set ++# CONFIG_CRYPTO_SHA1 is not set ++# CONFIG_CRYPTO_SHA256 is not set ++# CONFIG_CRYPTO_SHA512 is not set ++# CONFIG_CRYPTO_TGR192 is not set ++# CONFIG_CRYPTO_WP512 is not set ++ ++# ++# Ciphers ++# ++# CONFIG_CRYPTO_AES is not set ++# CONFIG_CRYPTO_ANUBIS is not set ++# CONFIG_CRYPTO_ARC4 is not set ++# CONFIG_CRYPTO_BLOWFISH is not set ++# CONFIG_CRYPTO_CAMELLIA is not set ++# CONFIG_CRYPTO_CAST5 is not set ++# CONFIG_CRYPTO_CAST6 is not set ++# CONFIG_CRYPTO_DES is not set ++# CONFIG_CRYPTO_FCRYPT is not set ++# CONFIG_CRYPTO_KHAZAD is not set ++# CONFIG_CRYPTO_SALSA20 is not set ++# CONFIG_CRYPTO_SEED is not set ++# CONFIG_CRYPTO_SERPENT is not set ++# CONFIG_CRYPTO_TEA is not set ++# CONFIG_CRYPTO_TWOFISH is not set ++ ++# ++# Compression ++# ++# CONFIG_CRYPTO_DEFLATE is not set ++# CONFIG_CRYPTO_LZO is not set ++ ++# ++# Random Number Generation ++# ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++# CONFIG_CRYPTO_HW is not set ++ ++# ++# Library routines ++# ++CONFIG_GENERIC_FIND_LAST_BIT=y ++# CONFIG_CRC_CCITT is not set ++# CONFIG_CRC16 is not set ++# CONFIG_CRC_T10DIF is not set ++# CONFIG_CRC_ITU_T is not set ++# CONFIG_CRC32 is not set ++# CONFIG_CRC7 is not set ++# CONFIG_LIBCRC32C is not set ++CONFIG_PLIST=y ++CONFIG_HAS_IOMEM=y ++CONFIG_HAS_DMA=y +diff -Nur linux-3.4.110.orig/arch/nds32/configs/xc5_8k_defconfig linux-3.4.110/arch/nds32/configs/xc5_8k_defconfig +--- linux-3.4.110.orig/arch/nds32/configs/xc5_8k_defconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/configs/xc5_8k_defconfig 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,1051 @@ ++# ++# Automatically generated make config: don't edit ++# Linux kernel version: 2.6.29 ++# Fri Oct 2 14:21:05 2009 ++# ++CONFIG_NDS32=y ++# CONFIG_GENERIC_GPIO is not set ++CONFIG_NO_IOPORT=y ++CONFIG_GENERIC_IOMAP=y ++CONFIG_RWSEM_GENERIC_SPINLOCK=y ++CONFIG_GENERIC_HWEIGHT=y ++CONFIG_GENERIC_FIND_NEXT_BIT=y ++CONFIG_GENERIC_CALIBRATE_DELAY=y ++CONFIG_GENERIC_HARDIRQS=y ++CONFIG_LOCKDEP_SUPPORT=y ++CONFIG_STACKTRACE_SUPPORT=y ++CONFIG_HAVE_LATENCYTOP_SUPPORT=y ++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" ++ ++# ++# General setup ++# ++CONFIG_EXPERIMENTAL=y ++CONFIG_BROKEN_ON_SMP=y ++CONFIG_INIT_ENV_ARG_LIMIT=32 ++CONFIG_LOCALVERSION="" ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_SWAP=y ++CONFIG_SYSVIPC=y ++CONFIG_SYSVIPC_SYSCTL=y ++# CONFIG_POSIX_MQUEUE is not set ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++# CONFIG_TASKSTATS is not set ++# CONFIG_AUDIT is not set ++ ++# ++# RCU Subsystem ++# ++CONFIG_CLASSIC_RCU=y ++# CONFIG_TREE_RCU is not set ++# CONFIG_PREEMPT_RCU is not set ++# CONFIG_TREE_RCU_TRACE is not set ++# CONFIG_PREEMPT_RCU_TRACE is not set ++# CONFIG_IKCONFIG is not set ++CONFIG_LOG_BUF_SHIFT=14 ++# CONFIG_GROUP_SCHED is not set ++# CONFIG_CGROUPS is not set ++# CONFIG_SYSFS_DEPRECATED_V2 is not set ++# CONFIG_RELAY is not set ++# CONFIG_NAMESPACES is not set ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_INITRAMFS_SOURCE="" ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++CONFIG_SYSCTL=y ++CONFIG_ANON_INODES=y ++CONFIG_EMBEDDED=y ++CONFIG_UID16=y ++CONFIG_SYSCTL_SYSCALL=y ++CONFIG_KALLSYMS=y ++# CONFIG_KALLSYMS_ALL is not set ++CONFIG_KALLSYMS_EXTRA_PASS=y ++# CONFIG_HOTPLUG is not set ++CONFIG_PRINTK=y ++CONFIG_BUG=y ++CONFIG_ELF_CORE=y ++CONFIG_BASE_FULL=y ++CONFIG_FUTEX=y ++CONFIG_EPOLL=y ++# CONFIG_SIGNALFD is not set ++CONFIG_TIMERFD=y ++CONFIG_EVENTFD=y ++CONFIG_SHMEM=y ++CONFIG_AIO=y ++# CONFIG_VM_EVENT_COUNTERS is not set ++CONFIG_SLUB_DEBUG=y ++CONFIG_COMPAT_BRK=y ++# CONFIG_SLAB is not set ++CONFIG_SLUB=y ++# CONFIG_SLOB is not set ++CONFIG_PROFILING=y ++CONFIG_TRACEPOINTS=y ++# CONFIG_MARKERS is not set ++CONFIG_OPROFILE=y ++CONFIG_HAVE_OPROFILE=y ++# CONFIG_KPROBES is not set ++CONFIG_HAVE_KPROBES=y ++CONFIG_HAVE_KRETPROBES=y ++# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set ++CONFIG_SLABINFO=y ++CONFIG_RT_MUTEXES=y ++CONFIG_BASE_SMALL=0 ++CONFIG_MODULES=y ++# CONFIG_MODULE_FORCE_LOAD is not set ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_MODULE_FORCE_UNLOAD is not set ++# CONFIG_MODVERSIONS is not set ++# CONFIG_MODULE_SRCVERSION_ALL is not set ++CONFIG_BLOCK=y ++# CONFIG_LBD is not set ++# CONFIG_BLK_DEV_IO_TRACE is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_BLK_DEV_INTEGRITY is not set ++ ++# ++# IO Schedulers ++# ++CONFIG_IOSCHED_NOOP=y ++CONFIG_IOSCHED_AS=y ++CONFIG_IOSCHED_DEADLINE=y ++CONFIG_IOSCHED_CFQ=y ++# CONFIG_DEFAULT_AS is not set ++# CONFIG_DEFAULT_DEADLINE is not set ++CONFIG_DEFAULT_CFQ=y ++# CONFIG_DEFAULT_NOOP is not set ++CONFIG_DEFAULT_IOSCHED="cfq" ++# CONFIG_FREEZER is not set ++ ++# ++# System Type ++# ++# CONFIG_PLAT_VEP is not set ++# CONFIG_PLAT_AG101 is not set ++# CONFIG_PLAT_AG102 is not set ++CONFIG_PLAT_AG101P=y ++# CONFIG_PLAT_QEMU is not set ++CONFIG_PLATFORM_INTC=y ++CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y ++ ++# ++# AG101P Platform Options ++# ++ ++# ++# Common Platform Options ++# ++CONFIG_PLATFORM_AHBDMA=y ++CONFIG_PLATFORM_APBDMA=y ++CONFIG_SYS_CLK=40000000 ++CONFIG_UART_CLK=14745600 ++CONFIG_SDRAM_SIZE=0x10000000 ++ ++# ++# Processor Features ++# ++CONFIG_CPU_CUSTOM=y ++# CONFIG_FPU is not set ++# CONFIG_AUDIO is not set ++# CONFIG_EVIC is not set ++CONFIG_CPU_CONTEXT_ID=y ++# CONFIG_CPU_CACHE_NONALIASING is not set ++# CONFIG_ANDES_PAGE_SIZE_4KB is not set ++CONFIG_ANDES_PAGE_SIZE_8KB=y ++CONFIG_KERNEL_SPACE_LARGE_PAGE=y ++# CONFIG_CPU_ICACHE_DISABLE is not set ++# CONFIG_CPU_DCACHE_DISABLE is not set ++# CONFIG_CPU_DCACHE_WRITETHROUGH is not set ++# CONFIG_ABI1 is not set ++CONFIG_ALIGNMENT_TRAP=y ++CONFIG_GENERIC_TIME=y ++CONFIG_GENERIC_CLOCKEVENTS=y ++CONFIG_MMU=y ++ ++# ++# Kernel Features ++# ++# CONFIG_NO_HZ is not set ++# CONFIG_HIGH_RES_TIMERS is not set ++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y ++CONFIG_PREEMPT_NONE=y ++# CONFIG_PREEMPT_VOLUNTARY is not set ++# CONFIG_PREEMPT is not set ++CONFIG_SELECT_MEMORY_MODEL=y ++CONFIG_FLATMEM_MANUAL=y ++# CONFIG_DISCONTIGMEM_MANUAL is not set ++# CONFIG_SPARSEMEM_MANUAL is not set ++CONFIG_FLATMEM=y ++CONFIG_FLAT_NODE_MEM_MAP=y ++CONFIG_PAGEFLAGS_EXTENDED=y ++CONFIG_SPLIT_PTLOCK_CPUS=4 ++# CONFIG_PHYS_ADDR_T_64BIT is not set ++CONFIG_ZONE_DMA_FLAG=0 ++CONFIG_VIRT_TO_BUS=y ++CONFIG_UNEVICTABLE_LRU=y ++CONFIG_FORCE_MAX_ZONEORDER=11 ++CONFIG_HZ_100=y ++# CONFIG_HZ_250 is not set ++# CONFIG_HZ_300 is not set ++# CONFIG_HZ_1000 is not set ++CONFIG_HZ=100 ++# CONFIG_SCHED_HRTICK is not set ++CONFIG_CMDLINE="root=/dev/ram0 rw mem=128M@0x0 initrd=0x1000000,8M console=ttyS0,38400n8 rootfstype=ext2 init=/bin/busybox init -s user_debug=-1" ++ ++# ++# Power management options ++# ++CONFIG_SYS_SUPPORTS_APM_EMULATION=y ++CONFIG_ARCH_SUSPEND_POSSIBLE=y ++# CONFIG_PM is not set ++ ++# ++# Bus options ++# ++# CONFIG_PCI is not set ++# CONFIG_ARCH_SUPPORTS_MSI is not set ++ ++# ++# Executable file formats ++# ++CONFIG_BINFMT_ELF=y ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++# CONFIG_HAVE_AOUT is not set ++# CONFIG_BINFMT_MISC is not set ++CONFIG_NET=y ++ ++# ++# Networking options ++# ++CONFIG_COMPAT_NET_DEV_OPS=y ++CONFIG_PACKET=y ++# CONFIG_PACKET_MMAP is not set ++CONFIG_UNIX=y ++CONFIG_XFRM=y ++# CONFIG_XFRM_USER is not set ++# CONFIG_XFRM_SUB_POLICY is not set ++# CONFIG_XFRM_MIGRATE is not set ++# CONFIG_XFRM_STATISTICS is not set ++CONFIG_NET_KEY=y ++# CONFIG_NET_KEY_MIGRATE is not set ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++# CONFIG_IP_ADVANCED_ROUTER is not set ++CONFIG_IP_FIB_HASH=y ++# CONFIG_IP_PNP is not set ++# CONFIG_NET_IPIP is not set ++# CONFIG_NET_IPGRE is not set ++# CONFIG_IP_MROUTE is not set ++# CONFIG_ARPD is not set ++# CONFIG_SYN_COOKIES is not set ++# CONFIG_INET_AH is not set ++# CONFIG_INET_ESP is not set ++# CONFIG_INET_IPCOMP is not set ++# CONFIG_INET_XFRM_TUNNEL is not set ++# CONFIG_INET_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_TCP_CONG_ADVANCED is not set ++CONFIG_TCP_CONG_CUBIC=y ++CONFIG_DEFAULT_TCP_CONG="cubic" ++# CONFIG_TCP_MD5SIG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_NETWORK_SECMARK is not set ++# CONFIG_NETFILTER is not set ++# CONFIG_IP_DCCP is not set ++# CONFIG_IP_SCTP is not set ++# CONFIG_TIPC is not set ++# CONFIG_ATM is not set ++# CONFIG_BRIDGE is not set ++# CONFIG_NET_DSA is not set ++# CONFIG_VLAN_8021Q is not set ++# CONFIG_DECNET is not set ++# CONFIG_LLC2 is not set ++# CONFIG_IPX is not set ++# CONFIG_ATALK is not set ++# CONFIG_X25 is not set ++# CONFIG_LAPB is not set ++# CONFIG_ECONET is not set ++# CONFIG_WAN_ROUTER is not set ++# CONFIG_NET_SCHED is not set ++# CONFIG_DCB is not set ++ ++# ++# Network testing ++# ++# CONFIG_NET_PKTGEN is not set ++# CONFIG_HAMRADIO is not set ++# CONFIG_CAN is not set ++# CONFIG_IRDA is not set ++# CONFIG_BT is not set ++# CONFIG_AF_RXRPC is not set ++# CONFIG_PHONET is not set ++CONFIG_WIRELESS=y ++# CONFIG_CFG80211 is not set ++CONFIG_WIRELESS_OLD_REGULATORY=y ++# CONFIG_WIRELESS_EXT is not set ++# CONFIG_LIB80211 is not set ++# CONFIG_MAC80211 is not set ++# CONFIG_WIMAX is not set ++# CONFIG_RFKILL is not set ++# CONFIG_NET_9P is not set ++ ++# ++# Device Drivers ++# ++ ++# ++# Generic Driver Options ++# ++CONFIG_STANDALONE=y ++CONFIG_PREVENT_FIRMWARE_BUILD=y ++# CONFIG_DEBUG_DRIVER is not set ++# CONFIG_DEBUG_DEVRES is not set ++# CONFIG_SYS_HYPERVISOR is not set ++# CONFIG_CONNECTOR is not set ++CONFIG_MTD=y ++# CONFIG_MTD_DEBUG is not set ++# CONFIG_MTD_CONCAT is not set ++CONFIG_MTD_PARTITIONS=y ++# CONFIG_MTD_TESTS is not set ++# CONFIG_MTD_REDBOOT_PARTS is not set ++CONFIG_MTD_CMDLINE_PARTS=y ++# CONFIG_MTD_AR7_PARTS is not set ++ ++# ++# User Modules And Translation Layers ++# ++CONFIG_MTD_CHAR=y ++CONFIG_MTD_BLKDEVS=y ++CONFIG_MTD_BLOCK=y ++# CONFIG_FTL is not set ++# CONFIG_NFTL is not set ++# CONFIG_INFTL is not set ++# CONFIG_RFD_FTL is not set ++# CONFIG_SSFDC is not set ++# CONFIG_MTD_OOPS is not set ++ ++# ++# RAM/ROM/Flash chip drivers ++# ++CONFIG_MTD_CFI=y ++# CONFIG_MTD_JEDECPROBE is not set ++CONFIG_MTD_GEN_PROBE=y ++# CONFIG_MTD_CFI_ADV_OPTIONS is not set ++CONFIG_MTD_MAP_BANK_WIDTH_1=y ++CONFIG_MTD_MAP_BANK_WIDTH_2=y ++CONFIG_MTD_MAP_BANK_WIDTH_4=y ++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set ++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set ++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set ++CONFIG_MTD_CFI_I1=y ++CONFIG_MTD_CFI_I2=y ++# CONFIG_MTD_CFI_I4 is not set ++# CONFIG_MTD_CFI_I8 is not set ++CONFIG_MTD_CFI_INTELEXT=y ++# CONFIG_MTD_CFI_AMDSTD is not set ++# CONFIG_MTD_CFI_STAA is not set ++CONFIG_MTD_CFI_UTIL=y ++# CONFIG_MTD_RAM is not set ++# CONFIG_MTD_ROM is not set ++# CONFIG_MTD_ABSENT is not set ++ ++# ++# Mapping drivers for chip access ++# ++# CONFIG_MTD_COMPLEX_MAPPINGS is not set ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_PHYSMAP_COMPAT=y ++CONFIG_MTD_PHYSMAP_START=0x80400000 ++CONFIG_MTD_PHYSMAP_LEN=0x2000000 ++CONFIG_MTD_PHYSMAP_BANKWIDTH=4 ++# CONFIG_MTD_PLATRAM is not set ++ ++# ++# Self-contained MTD device drivers ++# ++# CONFIG_MTD_SLRAM is not set ++# CONFIG_MTD_PHRAM is not set ++# CONFIG_MTD_MTDRAM is not set ++# CONFIG_MTD_BLOCK2MTD is not set ++ ++# ++# Disk-On-Chip Device Drivers ++# ++# CONFIG_MTD_DOC2000 is not set ++# CONFIG_MTD_DOC2001 is not set ++# CONFIG_MTD_DOC2001PLUS is not set ++# CONFIG_MTD_NAND is not set ++# CONFIG_MTD_ONENAND is not set ++ ++# ++# LPDDR flash memory drivers ++# ++# CONFIG_MTD_LPDDR is not set ++ ++# ++# UBI - Unsorted block images ++# ++# CONFIG_MTD_UBI is not set ++# CONFIG_PARPORT is not set ++CONFIG_BLK_DEV=y ++# CONFIG_BLK_DEV_COW_COMMON is not set ++CONFIG_BLK_DEV_LOOP=y ++# CONFIG_BLK_DEV_CRYPTOLOOP is not set ++# CONFIG_BLK_DEV_NBD is not set ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_COUNT=16 ++CONFIG_BLK_DEV_RAM_SIZE=8192 ++# CONFIG_BLK_DEV_XIP is not set ++# CONFIG_CDROM_PKTCDVD is not set ++# CONFIG_ATA_OVER_ETH is not set ++CONFIG_FTSDC010=y ++# CONFIG_FTCFC010 is not set ++# CONFIG_BLK_DEV_HD is not set ++# CONFIG_MISC_DEVICES is not set ++CONFIG_HAVE_IDE=y ++# CONFIG_IDE is not set ++ ++# ++# SCSI device support ++# ++# CONFIG_RAID_ATTRS is not set ++# CONFIG_SCSI is not set ++# CONFIG_SCSI_DMA is not set ++# CONFIG_SCSI_NETLINK is not set ++# CONFIG_ATA is not set ++# CONFIG_MD is not set ++CONFIG_NETDEVICES=y ++# CONFIG_DUMMY is not set ++# CONFIG_BONDING is not set ++# CONFIG_MACVLAN is not set ++# CONFIG_EQUALIZER is not set ++# CONFIG_TUN is not set ++# CONFIG_VETH is not set ++# CONFIG_PHYLIB is not set ++CONFIG_NET_ETHERNET=y ++# CONFIG_MII is not set ++# CONFIG_SMC91X is not set ++# CONFIG_DNET is not set ++# CONFIG_IBM_NEW_EMAC_ZMII is not set ++# CONFIG_IBM_NEW_EMAC_RGMII is not set ++# CONFIG_IBM_NEW_EMAC_TAH is not set ++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set ++# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set ++# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set ++# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set ++# CONFIG_B44 is not set ++CONFIG_FTMAC100=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++ ++# ++# Wireless LAN ++# ++# CONFIG_WLAN_PRE80211 is not set ++# CONFIG_WLAN_80211 is not set ++# CONFIG_IWLWIFI_LEDS is not set ++ ++# ++# Enable WiMAX (Networking options) to see the WiMAX drivers ++# ++# CONFIG_WAN is not set ++# CONFIG_PPP is not set ++# CONFIG_SLIP is not set ++# CONFIG_NETCONSOLE is not set ++# CONFIG_NETPOLL is not set ++# CONFIG_NET_POLL_CONTROLLER is not set ++# CONFIG_ISDN is not set ++# CONFIG_PHONE is not set ++ ++# ++# Input device support ++# ++CONFIG_INPUT=y ++# CONFIG_INPUT_FF_MEMLESS is not set ++# CONFIG_INPUT_POLLDEV is not set ++ ++# ++# Userland interfaces ++# ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_JOYDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_EVBUG is not set ++ ++# ++# Input Device Drivers ++# ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_INPUT_JOYSTICK is not set ++# CONFIG_INPUT_TABLET is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_CPE_TS=y ++CONFIG_TOUCHSCREEN_CPE_TS_DEJITTER=y ++# CONFIG_TOUCHSCREEN_FUJITSU is not set ++# CONFIG_TOUCHSCREEN_GUNZE is not set ++# CONFIG_TOUCHSCREEN_ELO is not set ++# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set ++# CONFIG_TOUCHSCREEN_MTOUCH is not set ++# CONFIG_TOUCHSCREEN_INEXIO is not set ++# CONFIG_TOUCHSCREEN_MK712 is not set ++# CONFIG_TOUCHSCREEN_PENMOUNT is not set ++# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set ++# CONFIG_TOUCHSCREEN_TOUCHWIN is not set ++# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set ++# CONFIG_INPUT_MISC is not set ++ ++# ++# Hardware I/O ports ++# ++# CONFIG_SERIO is not set ++# CONFIG_GAMEPORT is not set ++ ++# ++# Character devices ++# ++CONFIG_VT=y ++CONFIG_CONSOLE_TRANSLATIONS=y ++CONFIG_VT_CONSOLE=y ++CONFIG_HW_CONSOLE=y ++# CONFIG_VT_HW_CONSOLE_BINDING is not set ++CONFIG_DEVKMEM=y ++# CONFIG_SERIAL_NONSTANDARD is not set ++ ++# ++# Serial drivers ++# ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_NR_UARTS=3 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=3 ++# CONFIG_SERIAL_8250_EXTENDED is not set ++ ++# ++# Non-8250 serial port support ++# ++CONFIG_SERIAL_CORE=y ++CONFIG_SERIAL_CORE_CONSOLE=y ++CONFIG_UNIX98_PTYS=y ++# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set ++CONFIG_LEGACY_PTYS=y ++CONFIG_LEGACY_PTY_COUNT=256 ++# CONFIG_IPMI_HANDLER is not set ++# CONFIG_HW_RANDOM is not set ++# CONFIG_R3964 is not set ++# CONFIG_GPIO_FTGPIO010_OLD is not set ++# CONFIG_RAW_DRIVER is not set ++# CONFIG_TCG_TPM is not set ++# CONFIG_I2C is not set ++# CONFIG_SPI is not set ++# CONFIG_GPIOLIB is not set ++# CONFIG_W1 is not set ++# CONFIG_POWER_SUPPLY is not set ++# CONFIG_HWMON is not set ++# CONFIG_THERMAL is not set ++# CONFIG_THERMAL_HWMON is not set ++# CONFIG_WATCHDOG is not set ++CONFIG_SSB_POSSIBLE=y ++ ++# ++# Sonics Silicon Backplane ++# ++# CONFIG_SSB is not set ++ ++# ++# Multifunction device drivers ++# ++# CONFIG_MFD_CORE is not set ++# CONFIG_MFD_SM501 is not set ++# CONFIG_HTC_PASIC3 is not set ++# CONFIG_MFD_TMIO is not set ++# CONFIG_REGULATOR is not set ++ ++# ++# Multimedia devices ++# ++ ++# ++# Multimedia core support ++# ++# CONFIG_VIDEO_DEV is not set ++# CONFIG_DVB_CORE is not set ++# CONFIG_VIDEO_MEDIA is not set ++ ++# ++# Multimedia drivers ++# ++# CONFIG_DAB is not set ++ ++# ++# Graphics support ++# ++# CONFIG_VGASTATE is not set ++# CONFIG_VIDEO_OUTPUT_CONTROL is not set ++CONFIG_FB=y ++# CONFIG_FIRMWARE_EDID is not set ++# CONFIG_FB_DDC is not set ++# CONFIG_FB_BOOT_VESA_SUPPORT is not set ++CONFIG_FB_CFB_FILLRECT=y ++CONFIG_FB_CFB_COPYAREA=y ++CONFIG_FB_CFB_IMAGEBLIT=y ++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set ++# CONFIG_FB_SYS_FILLRECT is not set ++# CONFIG_FB_SYS_COPYAREA is not set ++# CONFIG_FB_SYS_IMAGEBLIT is not set ++# CONFIG_FB_FOREIGN_ENDIAN is not set ++# CONFIG_FB_SYS_FOPS is not set ++# CONFIG_FB_SVGALIB is not set ++# CONFIG_FB_MACMODES is not set ++# CONFIG_FB_BACKLIGHT is not set ++# CONFIG_FB_MODE_HELPERS is not set ++# CONFIG_FB_TILEBLITTING is not set ++ ++# ++# Frame buffer hardware drivers ++# ++# CONFIG_FB_S1D13XXX is not set ++CONFIG_FB_FTLCDC100=y ++CONFIG_PANEL_AUA036QN01=y ++# CONFIG_PANEL_CH7013A is not set ++# CONFIG_PANEL_AUA070VW04 is not set ++# CONFIG_PANEL_LW500AC9601 is not set ++CONFIG_FFB_MODE_RGB=y ++# CONFIG_FFB_MODE_YUV422 is not set ++# CONFIG_FFB_MODE_YUV420 is not set ++# CONFIG_FFB_MODE_8BPP is not set ++CONFIG_FFB_MODE_16BPP=y ++# CONFIG_FFB_MODE_24BPP is not set ++# CONFIG_FB_VIRTUAL is not set ++# CONFIG_FB_METRONOME is not set ++# CONFIG_FB_MB862XX is not set ++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set ++ ++# ++# Display device support ++# ++# CONFIG_DISPLAY_SUPPORT is not set ++ ++# ++# Console display driver support ++# ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_DUMMY_CONSOLE=y ++CONFIG_FRAMEBUFFER_CONSOLE=y ++# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set ++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set ++# CONFIG_FONTS is not set ++CONFIG_FONT_8x8=y ++CONFIG_FONT_8x16=y ++CONFIG_LOGO=y ++CONFIG_LOGO_LINUX_MONO=y ++CONFIG_LOGO_LINUX_VGA16=y ++CONFIG_LOGO_LINUX_CLUT224=y ++CONFIG_SOUND=y ++CONFIG_SOUND_OSS_CORE=y ++CONFIG_SND=y ++CONFIG_SND_TIMER=y ++CONFIG_SND_PCM=y ++# CONFIG_SND_SEQUENCER is not set ++CONFIG_SND_OSSEMUL=y ++# CONFIG_SND_MIXER_OSS is not set ++CONFIG_SND_PCM_OSS=y ++CONFIG_SND_PCM_OSS_PLUGINS=y ++# CONFIG_SND_DYNAMIC_MINORS is not set ++# CONFIG_SND_SUPPORT_OLD_API is not set ++# CONFIG_SND_VERBOSE_PROCFS is not set ++# CONFIG_SND_VERBOSE_PRINTK is not set ++# CONFIG_SND_DEBUG is not set ++CONFIG_SND_DRIVERS=y ++# CONFIG_SND_DUMMY is not set ++# CONFIG_SND_MTPAV is not set ++# CONFIG_SND_SERIAL_U16550 is not set ++# CONFIG_SND_MPU401 is not set ++ ++# ++# ALSA NDS32 devices ++# ++CONFIG_SND_FTSSP010=y ++CONFIG_SND_FTSSP010_AC97=y ++# CONFIG_SND_FTSSP010_I2S is not set ++# CONFIG_SND_SOC is not set ++# CONFIG_SOUND_PRIME is not set ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++# CONFIG_MMC is not set ++# CONFIG_MEMSTICK is not set ++# CONFIG_NEW_LEDS is not set ++# CONFIG_ACCESSIBILITY is not set ++CONFIG_RTC_LIB=y ++CONFIG_RTC_CLASS=y ++# CONFIG_RTC_HCTOSYS is not set ++# CONFIG_RTC_DEBUG is not set ++ ++# ++# RTC interfaces ++# ++CONFIG_RTC_INTF_SYSFS=y ++CONFIG_RTC_INTF_PROC=y ++CONFIG_RTC_INTF_DEV=y ++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set ++# CONFIG_RTC_DRV_TEST is not set ++ ++# ++# SPI RTC drivers ++# ++ ++# ++# Platform RTC drivers ++# ++# CONFIG_RTC_DRV_DS1286 is not set ++# CONFIG_RTC_DRV_DS1511 is not set ++# CONFIG_RTC_DRV_DS1553 is not set ++# CONFIG_RTC_DRV_DS1742 is not set ++# CONFIG_RTC_DRV_STK17TA8 is not set ++# CONFIG_RTC_DRV_M48T86 is not set ++# CONFIG_RTC_DRV_M48T35 is not set ++# CONFIG_RTC_DRV_M48T59 is not set ++# CONFIG_RTC_DRV_BQ4802 is not set ++# CONFIG_RTC_DRV_V3020 is not set ++ ++# ++# on-CPU RTC drivers ++# ++CONFIG_RTC_DRV_FTRTC010=y ++# CONFIG_DMADEVICES is not set ++# CONFIG_UIO is not set ++# CONFIG_STAGING is not set ++ ++# ++# File systems ++# ++CONFIG_EXT2_FS=y ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XIP is not set ++# CONFIG_EXT3_FS is not set ++# CONFIG_EXT4_FS is not set ++# CONFIG_REISERFS_FS is not set ++# CONFIG_JFS_FS is not set ++# CONFIG_FS_POSIX_ACL is not set ++CONFIG_FILE_LOCKING=y ++# CONFIG_XFS_FS is not set ++# CONFIG_OCFS2_FS is not set ++# CONFIG_BTRFS_FS is not set ++CONFIG_DNOTIFY=y ++CONFIG_INOTIFY=y ++CONFIG_INOTIFY_USER=y ++# CONFIG_QUOTA is not set ++# CONFIG_AUTOFS_FS is not set ++# CONFIG_AUTOFS4_FS is not set ++CONFIG_FUSE_FS=y ++ ++# ++# CD-ROM/DVD Filesystems ++# ++# CONFIG_ISO9660_FS is not set ++# CONFIG_UDF_FS is not set ++ ++# ++# DOS/FAT/NT Filesystems ++# ++CONFIG_FAT_FS=y ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_FAT_DEFAULT_CODEPAGE=437 ++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" ++# CONFIG_NTFS_FS is not set ++ ++# ++# Pseudo filesystems ++# ++CONFIG_PROC_FS=y ++# CONFIG_PROC_KCORE is not set ++CONFIG_PROC_SYSCTL=y ++# CONFIG_PROC_PAGE_MONITOR is not set ++CONFIG_SYSFS=y ++CONFIG_TMPFS=y ++# CONFIG_TMPFS_POSIX_ACL is not set ++# CONFIG_HUGETLB_PAGE is not set ++# CONFIG_CONFIGFS_FS is not set ++CONFIG_MISC_FILESYSTEMS=y ++# CONFIG_ADFS_FS is not set ++# CONFIG_AFFS_FS is not set ++# CONFIG_HFS_FS is not set ++# CONFIG_HFSPLUS_FS is not set ++# CONFIG_BEFS_FS is not set ++# CONFIG_BFS_FS is not set ++# CONFIG_EFS_FS is not set ++CONFIG_JFFS2_FS=y ++CONFIG_JFFS2_FS_DEBUG=0 ++CONFIG_JFFS2_FS_WRITEBUFFER=y ++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set ++# CONFIG_JFFS2_SUMMARY is not set ++# CONFIG_JFFS2_FS_XATTR is not set ++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set ++CONFIG_JFFS2_ZLIB=y ++# CONFIG_JFFS2_LZO is not set ++CONFIG_JFFS2_RTIME=y ++# CONFIG_JFFS2_RUBIN is not set ++# CONFIG_CRAMFS is not set ++# CONFIG_SQUASHFS is not set ++# CONFIG_VXFS_FS is not set ++# CONFIG_MINIX_FS is not set ++# CONFIG_OMFS_FS is not set ++# CONFIG_HPFS_FS is not set ++# CONFIG_QNX4FS_FS is not set ++# CONFIG_ROMFS_FS is not set ++# CONFIG_SYSV_FS is not set ++# CONFIG_UFS_FS is not set ++CONFIG_NETWORK_FILESYSTEMS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++# CONFIG_NFS_V3_ACL is not set ++# CONFIG_NFS_V4 is not set ++# CONFIG_NFSD is not set ++CONFIG_LOCKD=y ++CONFIG_LOCKD_V4=y ++CONFIG_NFS_COMMON=y ++CONFIG_SUNRPC=y ++# CONFIG_SUNRPC_REGISTER_V4 is not set ++# CONFIG_RPCSEC_GSS_KRB5 is not set ++# CONFIG_RPCSEC_GSS_SPKM3 is not set ++# CONFIG_SMB_FS is not set ++# CONFIG_CIFS is not set ++# CONFIG_NCP_FS is not set ++# CONFIG_CODA_FS is not set ++# CONFIG_AFS_FS is not set ++ ++# ++# Partition Types ++# ++# CONFIG_PARTITION_ADVANCED is not set ++CONFIG_MSDOS_PARTITION=y ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="iso8859-1" ++CONFIG_NLS_CODEPAGE_437=y ++# CONFIG_NLS_CODEPAGE_737 is not set ++# CONFIG_NLS_CODEPAGE_775 is not set ++# CONFIG_NLS_CODEPAGE_850 is not set ++# CONFIG_NLS_CODEPAGE_852 is not set ++# CONFIG_NLS_CODEPAGE_855 is not set ++# CONFIG_NLS_CODEPAGE_857 is not set ++# CONFIG_NLS_CODEPAGE_860 is not set ++# CONFIG_NLS_CODEPAGE_861 is not set ++# CONFIG_NLS_CODEPAGE_862 is not set ++# CONFIG_NLS_CODEPAGE_863 is not set ++# CONFIG_NLS_CODEPAGE_864 is not set ++# CONFIG_NLS_CODEPAGE_865 is not set ++# CONFIG_NLS_CODEPAGE_866 is not set ++# CONFIG_NLS_CODEPAGE_869 is not set ++# CONFIG_NLS_CODEPAGE_936 is not set ++# CONFIG_NLS_CODEPAGE_950 is not set ++# CONFIG_NLS_CODEPAGE_932 is not set ++# CONFIG_NLS_CODEPAGE_949 is not set ++# CONFIG_NLS_CODEPAGE_874 is not set ++# CONFIG_NLS_ISO8859_8 is not set ++# CONFIG_NLS_CODEPAGE_1250 is not set ++# CONFIG_NLS_CODEPAGE_1251 is not set ++# CONFIG_NLS_ASCII is not set ++CONFIG_NLS_ISO8859_1=y ++# CONFIG_NLS_ISO8859_2 is not set ++# CONFIG_NLS_ISO8859_3 is not set ++# CONFIG_NLS_ISO8859_4 is not set ++# CONFIG_NLS_ISO8859_5 is not set ++# CONFIG_NLS_ISO8859_6 is not set ++# CONFIG_NLS_ISO8859_7 is not set ++# CONFIG_NLS_ISO8859_9 is not set ++# CONFIG_NLS_ISO8859_13 is not set ++# CONFIG_NLS_ISO8859_14 is not set ++# CONFIG_NLS_ISO8859_15 is not set ++# CONFIG_NLS_KOI8_R is not set ++# CONFIG_NLS_KOI8_U is not set ++# CONFIG_NLS_UTF8 is not set ++# CONFIG_DLM is not set ++ ++# ++# Kernel hacking ++# ++CONFIG_TRACE_IRQFLAGS_SUPPORT=y ++# CONFIG_PRINTK_TIME is not set ++CONFIG_ENABLE_WARN_DEPRECATED=y ++CONFIG_ENABLE_MUST_CHECK=y ++CONFIG_FRAME_WARN=1024 ++CONFIG_MAGIC_SYSRQ=y ++# CONFIG_UNUSED_SYMBOLS is not set ++CONFIG_DEBUG_FS=y ++# CONFIG_HEADERS_CHECK is not set ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_SOFTLOCKUP=y ++# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set ++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 ++CONFIG_SCHED_DEBUG=y ++CONFIG_SCHEDSTATS=y ++CONFIG_TIMER_STATS=y ++# CONFIG_DEBUG_OBJECTS is not set ++# CONFIG_SLUB_DEBUG_ON is not set ++# CONFIG_SLUB_STATS is not set ++CONFIG_DEBUG_RT_MUTEXES=y ++CONFIG_DEBUG_PI_LIST=y ++# CONFIG_RT_MUTEX_TESTER is not set ++CONFIG_DEBUG_SPINLOCK=y ++CONFIG_DEBUG_MUTEXES=y ++# CONFIG_DEBUG_LOCK_ALLOC is not set ++# CONFIG_PROVE_LOCKING is not set ++# CONFIG_LOCK_STAT is not set ++CONFIG_DEBUG_SPINLOCK_SLEEP=y ++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set ++CONFIG_STACKTRACE=y ++# CONFIG_DEBUG_KOBJECT is not set ++CONFIG_DEBUG_INFO=y ++# CONFIG_DEBUG_VM is not set ++# CONFIG_DEBUG_WRITECOUNT is not set ++# CONFIG_DEBUG_MEMORY_INIT is not set ++CONFIG_DEBUG_LIST=y ++CONFIG_DEBUG_SG=y ++# CONFIG_DEBUG_NOTIFIERS is not set ++CONFIG_FRAME_POINTER=y ++# CONFIG_BOOT_PRINTK_DELAY is not set ++# CONFIG_RCU_TORTURE_TEST is not set ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_BACKTRACE_SELF_TEST is not set ++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set ++# CONFIG_FAULT_INJECTION is not set ++# CONFIG_LATENCYTOP is not set ++CONFIG_SYSCTL_SYSCALL_CHECK=y ++CONFIG_NOP_TRACER=y ++CONFIG_RING_BUFFER=y ++CONFIG_TRACING=y ++ ++# ++# Tracers ++# ++# CONFIG_IRQSOFF_TRACER is not set ++# CONFIG_SCHED_TRACER is not set ++# CONFIG_CONTEXT_SWITCH_TRACER is not set ++# CONFIG_BOOT_TRACER is not set ++# CONFIG_TRACE_BRANCH_PROFILING is not set ++# CONFIG_FTRACE_STARTUP_TEST is not set ++# CONFIG_DYNAMIC_PRINTK_DEBUG is not set ++# CONFIG_SAMPLES is not set ++CONFIG_HAVE_ARCH_KGDB=y ++# CONFIG_KGDB is not set ++CONFIG_DEBUG_USER=y ++CONFIG_DEBUG_ERRORS=y ++# CONFIG_DEBUG_LL is not set ++# CONFIG_CCTL is not set ++# CONFIG_ELFCHK_DEFAULT_ENABLE is not set ++ ++# ++# Security options ++# ++# CONFIG_KEYS is not set ++# CONFIG_SECURITY is not set ++# CONFIG_SECURITYFS is not set ++# CONFIG_SECURITY_FILE_CAPABILITIES is not set ++CONFIG_CRYPTO=y ++ ++# ++# Crypto core or helper ++# ++# CONFIG_CRYPTO_FIPS is not set ++# CONFIG_CRYPTO_MANAGER is not set ++# CONFIG_CRYPTO_MANAGER2 is not set ++# CONFIG_CRYPTO_GF128MUL is not set ++# CONFIG_CRYPTO_NULL is not set ++# CONFIG_CRYPTO_CRYPTD is not set ++# CONFIG_CRYPTO_AUTHENC is not set ++# CONFIG_CRYPTO_TEST is not set ++ ++# ++# Authenticated Encryption with Associated Data ++# ++# CONFIG_CRYPTO_CCM is not set ++# CONFIG_CRYPTO_GCM is not set ++# CONFIG_CRYPTO_SEQIV is not set ++ ++# ++# Block modes ++# ++# CONFIG_CRYPTO_CBC is not set ++# CONFIG_CRYPTO_CTR is not set ++# CONFIG_CRYPTO_CTS is not set ++# CONFIG_CRYPTO_ECB is not set ++# CONFIG_CRYPTO_LRW is not set ++# CONFIG_CRYPTO_PCBC is not set ++# CONFIG_CRYPTO_XTS is not set ++ ++# ++# Hash modes ++# ++# CONFIG_CRYPTO_HMAC is not set ++# CONFIG_CRYPTO_XCBC is not set ++ ++# ++# Digest ++# ++# CONFIG_CRYPTO_CRC32C is not set ++# CONFIG_CRYPTO_MD4 is not set ++# CONFIG_CRYPTO_MD5 is not set ++# CONFIG_CRYPTO_MICHAEL_MIC is not set ++# CONFIG_CRYPTO_RMD128 is not set ++# CONFIG_CRYPTO_RMD160 is not set ++# CONFIG_CRYPTO_RMD256 is not set ++# CONFIG_CRYPTO_RMD320 is not set ++# CONFIG_CRYPTO_SHA1 is not set ++# CONFIG_CRYPTO_SHA256 is not set ++# CONFIG_CRYPTO_SHA512 is not set ++# CONFIG_CRYPTO_TGR192 is not set ++# CONFIG_CRYPTO_WP512 is not set ++ ++# ++# Ciphers ++# ++# CONFIG_CRYPTO_AES is not set ++# CONFIG_CRYPTO_ANUBIS is not set ++# CONFIG_CRYPTO_ARC4 is not set ++# CONFIG_CRYPTO_BLOWFISH is not set ++# CONFIG_CRYPTO_CAMELLIA is not set ++# CONFIG_CRYPTO_CAST5 is not set ++# CONFIG_CRYPTO_CAST6 is not set ++# CONFIG_CRYPTO_DES is not set ++# CONFIG_CRYPTO_FCRYPT is not set ++# CONFIG_CRYPTO_KHAZAD is not set ++# CONFIG_CRYPTO_SALSA20 is not set ++# CONFIG_CRYPTO_SEED is not set ++# CONFIG_CRYPTO_SERPENT is not set ++# CONFIG_CRYPTO_TEA is not set ++# CONFIG_CRYPTO_TWOFISH is not set ++ ++# ++# Compression ++# ++# CONFIG_CRYPTO_DEFLATE is not set ++# CONFIG_CRYPTO_LZO is not set ++ ++# ++# Random Number Generation ++# ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++# CONFIG_CRYPTO_HW is not set ++ ++# ++# Library routines ++# ++CONFIG_BITREVERSE=y ++CONFIG_GENERIC_FIND_LAST_BIT=y ++# CONFIG_CRC_CCITT is not set ++# CONFIG_CRC16 is not set ++# CONFIG_CRC_T10DIF is not set ++# CONFIG_CRC_ITU_T is not set ++CONFIG_CRC32=y ++# CONFIG_CRC7 is not set ++# CONFIG_LIBCRC32C is not set ++CONFIG_ZLIB_INFLATE=y ++CONFIG_ZLIB_DEFLATE=y ++CONFIG_PLIST=y ++CONFIG_HAS_IOMEM=y ++CONFIG_HAS_DMA=y +diff -Nur linux-3.4.110.orig/arch/nds32/configs/xc5_defconfig linux-3.4.110/arch/nds32/configs/xc5_defconfig +--- linux-3.4.110.orig/arch/nds32/configs/xc5_defconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/configs/xc5_defconfig 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,117 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_CROSS_COMPILE="nds32le-linux-" ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_SYSVIPC=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_INITRAMFS_SOURCE="/home/users/greentime/os/ramdisk/disk-nds32le-linux-glibc-v3 /home/users/greentime/os/ramdisk/disk-nds32le-linux-glibc-v3/dev/initramfs.devnodes" ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++CONFIG_SYSCTL_SYSCALL=y ++# CONFIG_HOTPLUG is not set ++# CONFIG_SIGNALFD is not set ++CONFIG_EMBEDDED=y ++# CONFIG_VM_EVENT_COUNTERS is not set ++CONFIG_PROFILING=y ++CONFIG_OPROFILE=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_BLK_DEV_BSG is not set ++CONFIG_PLATFORM_AHBDMA=y ++CONFIG_PLATFORM_APBDMA=y ++CONFIG_SYS_CLK=30000000 ++CONFIG_UART_CLK=14745600 ++CONFIG_SDRAM_SIZE=0x8000000 ++CONFIG_HZ_100=y ++CONFIG_CMDLINE="root=/dev/ram0 rw mem=128M@0x0 initrd=0x1000000,8M console=ttyS0,38400n8 earlyprintk=uart8250-32bit,0x99600000 rootfstype=ext2 init=/bin/busybox init -s user_debug=-1" ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_NET_KEY=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_BRIDGE=y ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_CHAR=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_PHYSMAP_COMPAT=y ++CONFIG_MTD_PHYSMAP_START=0x80400000 ++CONFIG_MTD_PHYSMAP_LEN=0x2000000 ++CONFIG_MTD_PHYSMAP_BANKWIDTH=4 ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=8192 ++CONFIG_NETDEVICES=y ++CONFIG_TUN=y ++CONFIG_FTMAC100=y ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++# CONFIG_SERIO is not set ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_NR_UARTS=3 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=3 ++# CONFIG_HW_RANDOM is not set ++# CONFIG_HWMON is not set ++CONFIG_FB=y ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_LOGO=y ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_PCM_OSS=y ++# CONFIG_SND_SUPPORT_OLD_API is not set ++# CONFIG_SND_VERBOSE_PROCFS is not set ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++CONFIG_MMC=y ++CONFIG_MMC_FTSDC010=y ++# CONFIG_MEMSTICK is not set ++# CONFIG_NEW_LEDS is not set ++# CONFIG_ACCESSIBILITY is not set ++CONFIG_RTC_LIB=y ++CONFIG_RTC_CLASS=y ++# CONFIG_RTC_HCTOSYS is not set ++CONFIG_EXT2_FS=y ++CONFIG_FUSE_FS=y ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++# CONFIG_PROC_PAGE_MONITOR is not set ++CONFIG_TMPFS=y ++CONFIG_JFFS2_FS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_DEBUG_FS=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_SCHEDSTATS=y ++CONFIG_TIMER_STATS=y ++CONFIG_DEBUG_RT_MUTEXES=y ++CONFIG_DEBUG_SPINLOCK=y ++CONFIG_DEBUG_MUTEXES=y ++CONFIG_DEBUG_INFO=y ++CONFIG_DEBUG_LIST=y ++CONFIG_DEBUG_SG=y ++CONFIG_DEBUG_USER=y ++CONFIG_DEBUG_ERRORS=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++# CONFIG_CRYPTO_HW is not set +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/amic.h linux-3.4.110/arch/nds32/include/asm/amic.h +--- linux-3.4.110.orig/arch/nds32/include/asm/amic.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/amic.h 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,60 @@ ++/* ++ * linux/arch/nds32/include/asm/amic.h ++ * ++ * Andes Multi-core Interrupt Controller Device Driver Interface ++ * ++ * Copyright (C) 2010 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * ChangeLog ++ * ++ * Shawn Lin 09/02/2010 Created for Andes AG102 platform code. ++ */ ++ ++#ifndef __NDS32_AMIC_HEADER__ ++#define __NDS32_AMIC_HEADER__ ++ ++#define IRQ_BASE 0x0 ++#define IRQ_TOTAL 0x20 ++#define DEFAULT_MODE 0x000ecc00 ++#define DEFAULT_LEVEL 0xffffffff ++#define AMIC_BASE AMIC_VA_BASE ++#define CONFIG 0x00 ++#define CPUDC 0x04 ++#define CPUID0 0x08 ++#define CPUID1 0x0c ++#define INTTRG 0x20 ++#define INTLVL 0x24 ++#define INTSRC 0x28 ++#define IPITRG 0x40 ++#define IPISTA 0x44 ++#define IPIPTR 0x48 ++#define IPIGST 0x4c ++#define IPIGPT 0x50 ++#define INTEN 0x80 ++#define INTSTA 0x84 ++#define HW0STA 0x88 ++#define HW1STA 0x8c ++#define HW2STA 0x90 ++#define HW3STA 0x94 ++#define HW4STA 0x98 ++#define HW5STA 0x9c ++#define PRITY0 0xa0 ++#define PRITY1 0xa4 ++#define PRITY2 0xa8 ++#define PRITY3 0xac ++ ++#endif /* __NDS32_AMIC_HEADER__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/asm-offsets.h linux-3.4.110/arch/nds32/include/asm/asm-offsets.h +--- linux-3.4.110.orig/arch/nds32/include/asm/asm-offsets.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/asm-offsets.h 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1 @@ ++#include +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/assembler.h linux-3.4.110/arch/nds32/include/asm/assembler.h +--- linux-3.4.110.orig/arch/nds32/include/asm/assembler.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/assembler.h 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,141 @@ ++/* ++ * linux/arch/nds32/include/asm/assembler.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_ASSEMBLER_H__ ++#define __NDS32_ASSEMBLER_H__ ++ ++/* ++ * Stack format ++ */ ++ ++#define S_PAD 0xac /* Padding for stack pointer 8-byte alignment. */ ++#define S_FUCOP_CTL 0xa8 ++#define S_LP 0xa4 ++#define S_GP 0xa0 ++#define S_FP 0x9c ++#define S_R25 0x98 ++#define S_R24 0x94 ++#define S_R23 0x90 ++#define S_R22 0x8c ++#define S_R21 0x88 ++#define S_R20 0x84 ++#define S_R19 0x80 ++#define S_R18 0x7c ++#define S_R17 0x78 ++#define S_R16 0x74 ++#define S_R15 0x70 ++#define S_R14 0x6c ++#define S_R13 0x68 ++#define S_R12 0x64 ++#define S_R11 0x60 ++#define S_R10 0x5c ++#define S_R9 0x58 ++#define S_R8 0x54 ++#define S_R7 0x50 ++#define S_R6 0x4c ++#define S_R5 0x48 ++#define S_R4 0x44 ++#define S_R3 0x40 ++#define S_R2 0x3c ++#define S_R1 0x38 ++#define S_R0 0x34 ++#define S_D1LO 0x30 ++#define S_D1HI 0x2c ++#define S_D0LO 0x28 ++#define S_D0HI 0x24 ++#define S_PP1 0x20 ++#define S_PP0 0x1c ++#define S_PIPC 0x18 ++#define S_PIPSW 0x14 ++#define S_ORIG_R0 0x10 ++#define S_SP 0xc ++#define S_IPC 0x8 ++#define S_IPSW 0x4 ++#define S_IR0 0x0 ++ ++#if !defined(CONFIG_ABI1) ++#define S_OFF 0 ++#else ++#define S_OFF 24 ++#endif ++ ++#ifdef __ASSEMBLY__ ++.macro get_thread_info, rd ++ srli \rd, $sp, #13 ++ slli \rd, \rd, #13 ++.endm ++ ++.macro gie_disable ++ setgie.d ++#ifdef CONFIG_CPU_N1213_43U1HA0 ++ isb ++#else ++ dsb ++#endif ++.endm ++ ++.macro gie_enable ++ setgie.e ++#ifdef CONFIG_CPU_N1213_43U1HA0 ++ isb ++#else ++ dsb ++#endif ++.endm ++ ++.macro gie_save oldpsw ++ mfsr \oldpsw, $ir0 ++ setgie.d ++#ifdef CONFIG_CPU_N1213_43U1HA0 ++ isb ++#else ++ dsb ++#endif ++.endm ++ ++.macro gie_restore oldpsw ++ andi \oldpsw, \oldpsw, #0x1 ++ beqz \oldpsw, 7001f ++ setgie.e ++#ifdef CONFIG_CPU_N1213_43U1HA0 ++ isb ++#else ++ dsb ++#endif ++7001: ++.endm ++ ++ ++#define USER(insn, reg, addr, opr) \ ++9999: insn reg, addr, opr; \ ++ .section __ex_table,"a"; \ ++ .align 3; \ ++ .long 9999b, 9001f; \ ++ .previous ++ ++#else /* __ASSEMBLY__ */ ++ ++__asm__ (".macro gie_disable\n\t" ++ "setgie.d\n\t" ++#ifdef CONFIG_CPU_N1213_43U1HA0 ++ "isb\n\t" ++#else ++ "dsb\n\t" ++#endif ++ ".endm\n\t" ++ ); ++ ++__asm__ (".macro gie_enable\n\t" ++ "setgie.e\n\t" ++#ifdef CONFIG_CPU_N1213_43U1HA0 ++ "isb\n\t" ++#else ++ "dsb\n\t" ++#endif ++ ".endm\n\t" ++ ); ++ ++#endif /* !__ASSEMBLY__ */ ++#endif /* __NDS32_ASSEMBLER_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/atomic.h linux-3.4.110/arch/nds32/include/asm/atomic.h +--- linux-3.4.110.orig/arch/nds32/include/asm/atomic.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/atomic.h 2016-04-07 10:20:50.890079013 +0200 +@@ -0,0 +1,267 @@ ++/* ++ * linux/arch/nds32/include/asm/atomic.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_ATOMIC_H__ ++#define __NDS32_ATOMIC_H__ ++ ++#include ++#include ++#include ++ ++#if defined(CONFIG_SMP) || !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) ++ ++#define ATOMIC_INIT(i) { (i) } ++#define atomic_set(v,i) (((v)->counter) = (i)) ++#define atomic_read(v) (*(volatile int *)&(v)->counter) ++ ++static inline void atomic_add(int i, atomic_t *v) ++{ ++ int temp; ++ __asm__ __volatile__( ++ "movi\t$r15, #0\n" ++ "1:\n" ++ "\tllw\t%0, [%1+$r15]\n" ++ "\tadd\t%0, %0, %2\n" ++ "\tscw\t%0, [%1+$r15]\n" ++ "\tbeqz\t%0, 1b\n" ++ : "=&r" (temp) : "r" (&v->counter), "r" (i)); ++} ++ ++static inline void atomic_sub(int i, atomic_t *v) ++{ ++ int temp; ++ __asm__ __volatile__( ++ "movi\t$r15, #0\n" ++ "1:\n" ++ "\tllw\t%0, [%1+$r15]\n" ++ "\tsub\t%0, %0, %2\n" ++ "\tscw\t%0, [%1+$r15]\n" ++ "\tbeqz\t%0, 1b\n" ++ : "=&r" (temp) : "r" (&v->counter), "r" (i)); ++} ++ ++#define atomic_inc(v) atomic_add(1, v) ++#define atomic_dec(v) atomic_sub(1, v) ++ ++static inline int atomic_inc_return(atomic_t *v) ++{ ++ int temp1, temp2; ++ __asm__ __volatile__( ++ "movi\t$r15, #0\n" ++ "1:\n" ++ "\tllw\t%1, [%2+$r15]\n" ++ "\taddi\t%1, %1, #1\n" ++ "\tori\t%0, %1, #0\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (temp2), "=&r" (temp1) ++ : "r" (&v->counter)); ++ return temp2; ++} ++ ++static inline int atomic_dec_return(atomic_t *v) ++{ ++ int temp1, temp2; ++ __asm__ __volatile__( ++ "movi\t$r15, #0\n" ++ "1:\n" ++ "\tllw\t%1, [%2+$r15]\n" ++ "\taddi\t%1, %1, #-1\n" ++ "\tori\t%0, %1, #0\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (temp2), "=&r" (temp1) ++ : "r" (&v->counter)); ++ return temp2; ++} ++ ++static inline int atomic_add_return(int i, atomic_t *v) ++{ ++ int temp1, temp2; ++ ++ smp_mb(); ++ ++ __asm__ __volatile__( ++ "movi\t$r15, #0\n" ++ "1:\n" ++ "\tllw\t%1, [%2+$r15]\n" ++ "\tadd\t%1, %1, %3\n" ++ "\tori\t%0, %1, #0\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (temp2), "=&r" (temp1) ++ : "r" (&v->counter), "r" (i)); ++ ++ smp_mb(); ++ ++ return temp2; ++} ++ ++static inline int atomic_sub_return(int i, atomic_t *v) ++{ ++ int temp1, temp2; ++ ++ smp_mb(); ++ ++ __asm__ __volatile__( ++ "movi\t$r15, #0\n" ++ "1:\n" ++ "\tllw\t%1, [%2+$r15]\n" ++ "\tsub\t%1, %1, %3\n" ++ "\tori\t%0, %1, #0\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (temp2), "=&r" (temp1) ++ : "r" (&v->counter), "r" (i)); ++ ++ smp_mb(); ++ ++ return temp2; ++} ++ ++/* ++ * atomic_dec_if_positive - conditionally subtract one from atomic variable ++ * @v: pointer of type atomic_t ++ * ++ * Atomically test @v and subtract one if @v is greater or equal than one. ++ * The function returns the old value of @v minus one. ++ */ ++static inline int atomic_dec_if_positive(atomic_t * v) ++{ ++ int temp1, temp2, temp3; ++ __asm__ __volatile__( ++ "movi\t$r15, #0\n" ++ "1:\n" ++ "\tllw\t%2, [%3+$r15]\n" ++ "\taddi\t%0, %2, #-1\n" ++ "\tslts\t%1, $r15, %2\n" ++ "\tsub\t%2, %2, %1\n" ++ "\tscw\t%2, [%3+$r15]\n" ++ "\tbeqz\t%2, 1b\n" ++ : "=&r" (temp3), "=&r" (temp2), "=&r" (temp1) ++ : "r" (&v->counter)); ++ return temp3; ++} ++ ++ ++#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) ++#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) ++#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) ++#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) ++ ++ ++static inline int atomic_cmpxchg(atomic_t *v, int old, int new) ++{ ++ int temp1, temp2, temp3; ++ ++ smp_mb(); ++ ++ __asm__ __volatile__( ++ "movi\t$r15, #0\n" ++ "1:\n" ++ "\tllw\t%0, [%3+$r15]\n" ++ "\tsub\t%2, %0, %5\n" ++ "\tcmovz\t%1, %4, %2\n" ++ "\tcmovn\t%1, %0, %2\n" ++ "\tscw\t%1, [%3+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (temp3), "=&r" (temp2), "=&r" (temp1) ++ : "r" (&v->counter), "r" (new), "r" (old)); ++ ++ smp_mb(); ++ ++ return temp3; ++} ++ ++static inline int atomic_xchg(atomic_t *v, int new) ++{ ++ int temp1, temp2; ++ __asm__ __volatile__( ++ "movi\t$r15, #0\n" ++ "1:\n" ++ "\tllw\t%0, [%2+$r15]\n" ++ "\tori\t%1, %3, #0\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (temp2), "=&r" (temp1) ++ : "r" (&v->counter), "r" (new)); ++ return temp2; ++} ++ ++static inline int __atomic_add_unless(atomic_t *v, int a, int u) ++{ ++ int c, old; ++ ++ c = atomic_read(v); ++ while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) ++ c = old; ++ return c; ++} ++ ++static inline int atomic_inc_not_zero(atomic_t *v) ++{ ++ int temp1, temp2; ++ __asm__ __volatile__( ++ "movi\t$r15, #0\n" ++ "1:\n" ++ "\tllw\t%1, [%2+$r15]\n" ++ "\tslt\t%0, $r15, %1\n" ++ "\tadd\t%1, %1, %0\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (temp2), "=&r" (temp1) ++ : "r" (&v->counter)); ++ return temp2; ++} ++ ++#define smp_mb__before_atomic_dec() barrier() ++#define smp_mb__after_atomic_dec() barrier() ++#define smp_mb__before_atomic_inc() barrier() ++#define smp_mb__after_atomic_inc() barrier() ++ ++//#include ++#else /* CONFIG_SMP*/ ++ ++ ++ ++#define ATOMIC_INIT(i) { (i) } ++ ++#ifdef __KERNEL__ ++ ++#include ++ ++static inline int atomic_dec_if_positive(atomic_t * v) ++{ ++ unsigned long flags; ++ int result; ++ ++ local_irq_save(flags); ++ result = v->counter; ++ result -= 1; ++ if (result >= 0) ++ v->counter = result; ++ local_irq_restore(flags); ++ return result; ++} ++ ++/* Atomic operations are already serializing on ARM */ ++#define smp_mb__before_atomic_dec() barrier() ++#define smp_mb__after_atomic_dec() barrier() ++#define smp_mb__before_atomic_inc() barrier() ++#define smp_mb__after_atomic_inc() barrier() ++ ++#endif ++ ++#endif /* CONFIG_SMP */ ++ ++#ifndef CONFIG_GENERIC_ATOMIC64 ++typedef struct { ++ u64 __aligned(8) counter; ++} atomic64_t; ++#define ATOMIC64_INIT(i) { (i) } ++static inline u64 atomic64_add_return(u64 i, atomic64_t *v){ return 0; } ++#endif /* CONFIG_GENERIC_ATOMIC64 */ ++ ++#endif /* __NDS32_ATOMIC_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/audio.h linux-3.4.110/arch/nds32/include/asm/audio.h +--- linux-3.4.110.orig/arch/nds32/include/asm/audio.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/audio.h 2016-04-07 10:20:50.890079013 +0200 +@@ -0,0 +1,65 @@ ++/* ++ * linux/arch/nds32/include/asm/audio.h ++ * Copyright (C) 2009 Andes Technology Corporation ++ */ ++ ++#ifndef __ASM_NDS32_AUDIO_H ++#define __ASM_NDS32_AUDIO_H ++ ++#ifndef __ASSEMBLY__ ++#include ++#include ++ ++extern void save_audio(struct task_struct *tsk); ++extern void audioload(struct audio_struct *audioregs); ++extern void save_audio(struct task_struct *__tsk); ++extern void do_audio_context_switch(unsigned long error_code, struct pt_regs *regs); ++ ++#ifdef CONFIG_AUDIO ++ ++#define test_tsk_audio(regs) (regs->NDS32_FUCOP_CTL & FUCOP_CTL_mskAUEN) ++ ++struct task_struct; ++ ++static inline void enable_audio(void) ++{ ++ SET_FUCOP_CTL(GET_FUCOP_CTL() | FUCOP_CTL_mskAUEN); ++} ++ ++static inline void disable_audio(void) ++{ ++ SET_FUCOP_CTL(GET_FUCOP_CTL() & ~FUCOP_CTL_mskAUEN); ++} ++ ++static inline void release_audio(struct pt_regs *regs) ++{ ++ regs->NDS32_FUCOP_CTL &= ~FUCOP_CTL_mskAUEN; ++ regs->NDS32_ipsw &= ~PSW_mskAEN; ++} ++ ++static inline void grab_audio(struct pt_regs *regs) ++{ ++ regs->NDS32_FUCOP_CTL |= FUCOP_CTL_mskAUEN; ++ regs->NDS32_ipsw |= PSW_mskAEN; ++} ++#ifdef CONFIG_UNLAZY_AUDIO ++static inline void unlazy_audio(struct task_struct *tsk) ++{ ++ preempt_disable(); ++ if (test_tsk_audio(task_pt_regs(tsk))) ++ save_audio(tsk); ++ preempt_enable(); ++} ++#endif ++static inline void clear_audio(struct pt_regs *regs) ++{ ++ preempt_disable(); ++ if (test_tsk_audio(regs)) { ++ release_audio(regs); ++ } ++ preempt_enable(); ++} ++#endif /* CONFIG_AUDIO */ ++#endif /* __ASSEMBLY__ */ ++ ++#endif /* __ASM_NDS32_AUDIO_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/auxvec.h linux-3.4.110/arch/nds32/include/asm/auxvec.h +--- linux-3.4.110.orig/arch/nds32/include/asm/auxvec.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/auxvec.h 2016-04-07 10:20:50.890079013 +0200 +@@ -0,0 +1,9 @@ ++/* ++ * linux/arch/nds32/include/asm/auxvec.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_AUXVEC_H__ ++#define __NDS32_AUXVEC_H__ ++ ++#endif /*__NDS32_AUXVEC_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/barrier.h linux-3.4.110/arch/nds32/include/asm/barrier.h +--- linux-3.4.110.orig/arch/nds32/include/asm/barrier.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/barrier.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,85 @@ ++/* ============================================================================ ++ * ++ * linux/arch/nds32/include/asm/system.h ++ * ++ * Copyright (C) 2007 Andes Technology Corporation ++ * This file is part of Linux and should be licensed under the GPL. ++ * See the file COPYING for conditions for redistribution. ++ * ++ * Abstract: ++ * ++ * This program is for NDS32 architecture. ++ * ++ * Revision History: ++ * ++ * Nov.26.2007 Initial ported by Tom, Shawn, and Steven, ++ * patched for KGDB and refined code by Harry. ++ * ++ * Note: ++ * ++ * ============================================================================ ++ */ ++#ifndef __ASM_NDS32_BARRIER_H ++#define __ASM_NDS32_BARRIER_H ++ ++#ifdef __KERNEL__ ++ ++/* ++ * This is used to ensure the compiler did actually allocate the register we ++ * asked it for some inline assembly sequences. Apparently we can't trust ++ * the compiler from one version to another so a bit of paranoia won't hurt. ++ * This string is meant to be concatenated with the inline asm string and ++ * will cause compilation to stop on mismatch. ++ * (for details, see gcc PR 15089) ++ */ ++#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" ++ ++#ifndef __ASSEMBLY__ ++ ++#include ++#include ++ ++struct thread_info; ++struct task_struct; ++ ++struct pt_regs; ++ ++void die(const char *msg, struct pt_regs *regs, int err) ++ __attribute__((noreturn)); ++ ++void die_if_kernel(const char *str, struct pt_regs *regs, int err); ++ ++#include ++ ++#define UDBG_UNDEFINED (1 << 0) ++#define UDBG_SYSCALL (1 << 1) ++#define UDBG_BADABORT (1 << 2) ++#define UDBG_SEGV (1 << 3) ++#define UDBG_BUS (1 << 4) ++ ++extern unsigned int user_debug; ++ ++#define mb() __asm__ __volatile__ ("" : : : "memory") ++#define rmb() mb() ++#define wmb() mb() ++#define read_barrier_depends() do { } while(0) ++#define set_mb(var, value) do { var = value; mb(); } while (0) ++#define set_wmb(var, value) do { var = value; wmb(); } while (0) ++ ++#ifdef CONFIG_SMP ++#define smp_mb() mb() ++#define smp_rmb() rmb() ++#define smp_wmb() wmb() ++#define smp_read_barrier_depends() read_barrier_depends() ++#else ++#define smp_mb() barrier() ++#define smp_rmb() barrier() ++#define smp_wmb() barrier() ++#define smp_read_barrier_depends() do { } while(0) ++#endif ++ ++#endif /* __ASSEMBLY__ */ ++ ++#endif /* __KERNEL__ */ ++ ++#endif //__ASM_NDS32_SYSTEM_H +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/bitfield.h linux-3.4.110/arch/nds32/include/asm/bitfield.h +--- linux-3.4.110.orig/arch/nds32/include/asm/bitfield.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/bitfield.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,895 @@ ++/* ++ * linux/arch/nds32/include/asm/bitfield.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_BITFIELD_H__ ++#define __NDS32_BITFIELD_H__ ++/****************************************************************************** ++ * cr0: CPU_VER (CPU Version Register) ++ *****************************************************************************/ ++#define CPU_VER_offCFGID 0 /* Minor configuration */ ++#define CPU_VER_offREV 16 /* Revision of the CPU version */ ++#define CPU_VER_offCPUID 24 /* Major CPU versions */ ++ ++#define CPU_VER_mskCFGID ( 0xFFFF << CPU_VER_offCFGID ) ++#define CPU_VER_mskREV ( 0xFF << CPU_VER_offREV ) ++#define CPU_VER_mskCPUID ( 0xFF << CPU_VER_offCPUID ) ++ ++/****************************************************************************** ++ * cr1: ICM_CFG (Instruction Cache/Memory Configuration Register) ++ *****************************************************************************/ ++#define ICM_CFG_offISET 0 /* I-cache sets (# of cache lines) per way */ ++#define ICM_CFG_offIWAY 3 /* I-cache ways */ ++#define ICM_CFG_offISZ 6 /* I-cache line size */ ++#define ICM_CFG_offILCK 9 /* I-cache locking support */ ++#define ICM_CFG_offILMB 10 /* On-chip ILM banks */ ++#define ICM_CFG_offBSAV 13 /* ILM base register alignment version */ ++/* bit 15:31 reserved */ ++ ++#define ICM_CFG_mskISET ( 0x7 << ICM_CFG_offISET ) ++#define ICM_CFG_mskIWAY ( 0x7 << ICM_CFG_offIWAY ) ++#define ICM_CFG_mskISZ ( 0x7 << ICM_CFG_offISZ ) ++#define ICM_CFG_mskILCK ( 0x1 << ICM_CFG_offILCK ) ++#define ICM_CFG_mskILMB ( 0x7 << ICM_CFG_offILMB ) ++#define ICM_CFG_mskBSAV ( 0x3 << ICM_CFG_offBSAV ) ++ ++/****************************************************************************** ++ * cr2: DCM_CFG (Data Cache/Memory Configuration Register) ++ *****************************************************************************/ ++#define DCM_CFG_offDSET 0 /* D-cache sets (# of cache lines) per way */ ++#define DCM_CFG_offDWAY 3 /* D-cache ways */ ++#define DCM_CFG_offDSZ 6 /* D-cache line size */ ++#define DCM_CFG_offDLCK 9 /* D-cache locking support */ ++#define DCM_CFG_offDLMB 10 /* On-chip DLM banks */ ++#define DCM_CFG_offBSAV 13 /* DLM base register alignment version */ ++/* bit 15:31 reserved */ ++ ++#define DCM_CFG_mskDSET ( 0x7 << DCM_CFG_offDSET ) ++#define DCM_CFG_mskDWAY ( 0x7 << DCM_CFG_offDWAY ) ++#define DCM_CFG_mskDSZ ( 0x7 << DCM_CFG_offDSZ ) ++#define DCM_CFG_mskDLCK ( 0x1 << DCM_CFG_offDLCK ) ++#define DCM_CFG_mskDLMB ( 0x7 << DCM_CFG_offDLMB ) ++#define DCM_CFG_mskBSAV ( 0x3 << DCM_CFG_offBSAV ) ++ ++/****************************************************************************** ++ * cr3: MMU_CFG (MMU Configuration Register) ++ *****************************************************************************/ ++#define MMU_CFG_offMMPS 0 /* Memory management protection scheme */ ++#define MMU_CFG_offMMPV 2 /* Memory management protection version number */ ++#define MMU_CFG_offFATB 7 /* Fully-associative or non-fully-associative TLB */ ++ ++#ifdef CONFIG_FULL_ASSOC ++#define MMU_CFG_offFATBSZ 8 /* TLB entries while using full-associative TLB */ ++#else ++#define MMU_CFG_offTBW 8 /* TLB ways(non-associative) TBS */ ++#define MMU_CFG_offTBS 11 /* TLB sets per way(non-associative) TBS */ ++/* bit 14:14 reserved */ ++#endif ++ ++#define MMU_CFG_offEP8MIN4 15 /* 8KB page supported while minimum page is 4KB */ ++#define MMU_CFG_offfEPSZ 16 /* Extra page size supported */ ++#define MMU_CFG_offTLBLCK 24 /* TLB locking support */ ++#define MMU_CFG_offHPTWK 25 /* Hardware Page Table Walker implemented */ ++#define MMU_CFG_offDE 26 /* Default endian */ ++#define MMU_CFG_offNTPT 27 /* Partitions for non-translated attributes */ ++#define MMU_CFG_offIVTB 28 /* Invisible TLB */ ++#define MMU_CFG_offVLPT 29 /* VLPT for fast TLB fill handling implemented */ ++#define MMU_CFG_offNTME 30 /* Non-translated VA to PA mapping */ ++/* bit 31 reserved */ ++ ++#define MMU_CFG_mskMMPS ( 0x3 << MMU_CFG_offMMPS ) ++#define MMU_CFG_mskMMPV ( 0x1F << MMU_CFG_offMMPV ) ++#define MMU_CFG_mskFATB ( 0x1 << MMU_CFG_offFATB ) ++#ifdef CONFIG_FULL_ASSOC ++#define MMU_CFG_mskFATBSZ ( 0x7f << MMU_CFG_offFATBSZ ) ++#else ++#define MMU_CFG_mskTBW ( 0x7 << MMU_CFG_offTBW ) ++#define MMU_CFG_mskTBS ( 0x7 << MMU_CFG_offTBS ) ++#endif ++#define MMU_CFG_mskEP8MIN4 ( 0x1 << MMU_CFG_offEP8MIN4 ) ++#define MMU_CFG_mskfEPSZ ( 0xFF << MMU_CFG_offfEPSZ ) ++#define MMU_CFG_mskTLBLCK ( 0x1 << MMU_CFG_offTLBLCK ) ++#define MMU_CFG_mskHPTWK ( 0x1 << MMU_CFG_offHPTWK ) ++#define MMU_CFG_mskDE ( 0x1 << MMU_CFG_offDE ) ++#define MMU_CFG_mskNTPT ( 0x1 << MMU_CFG_offNTPT ) ++#define MMU_CFG_mskIVTB ( 0x1 << MMU_CFG_offIVTB ) ++#define MMU_CFG_mskVLPT ( 0x1 << MMU_CFG_offVLPT ) ++#define MMU_CFG_mskNTME ( 0x1 << MMU_CFG_offNTME ) ++ ++/****************************************************************************** ++ * cr4: MSC_CFG (Misc Configuration Register) ++ *****************************************************************************/ ++#define MSC_CFG_offEDM 0 ++#define MSC_CFG_offLMDMA 1 ++#define MSC_CFG_offPFM 2 ++#define MSC_CFG_offHSMP 3 ++#define MSC_CFG_offTRACE 4 ++#define MSC_CFG_offDIV 5 ++#define MSC_CFG_offMAC 6 ++#define MSC_CFG_offAUDIO 7 ++#define MSC_CFG_offL2C 9 ++#define MSC_CFG_offRDREG 10 ++#define MSC_CFG_offADR24 11 ++#define MSC_CFG_offINTLC 12 ++#define MSC_CFG_offBASEV 13 ++#define MSC_CFG_offNOD 16 ++/* bit 13:31 reserved */ ++ ++#define MSC_CFG_mskEDM ( 0x1 << MSC_CFG_offEDM ) ++#define MSC_CFG_mskLMDMA ( 0x1 << MSC_CFG_offLMDMA ) ++#define MSC_CFG_mskPFM ( 0x1 << MSC_CFG_offPFM ) ++#define MSC_CFG_mskHSMP ( 0x1 << MSC_CFG_offHSMP ) ++#define MSC_CFG_mskTRACE ( 0x1 << MSC_CFG_offTRACE ) ++#define MSC_CFG_mskDIV ( 0x1 << MSC_CFG_offDIV ) ++#define MSC_CFG_mskMAC ( 0x1 << MSC_CFG_offMAC ) ++#define MSC_CFG_mskAUDIO ( 0x3 << MSC_CFG_offAUDIO ) ++#define MSC_CFG_mskL2C ( 0x1 << MSC_CFG_offL2C ) ++#define MSC_CFG_mskRDREG ( 0x1 << MSC_CFG_offRDREG ) ++#define MSC_CFG_mskADR24 ( 0x1 << MSC_CFG_offADR24 ) ++#define MSC_CFG_mskINTLC ( 0x1 << MSC_CFG_offINTLC ) ++#define MSC_CFG_mskBASEV ( 0x7 << MSC_CFG_offBASEV ) ++#define MSC_CFG_mskNOD ( 0x1 << MSC_CFG_offNOD ) ++ ++/****************************************************************************** ++ * cr5: CORE_CFG (Core Identification Register) ++ *****************************************************************************/ ++#define CORE_ID_offCOREID 0 ++/* bit 4:31 reserved */ ++ ++#define CORE_ID_mskCOREID ( 0xF << CORE_ID_offCOREID ) ++ ++/****************************************************************************** ++ * cr6: FUCOP_EXIST (FPU and Coprocessor Existence Configuration Register) ++ *****************************************************************************/ ++#define FUCOP_EXIST_offCP0EX 0 ++#define FUCOP_EXIST_offCP1EX 1 ++#define FUCOP_EXIST_offCP2EX 2 ++#define FUCOP_EXIST_offCP3EX 3 ++#define FUCOP_EXIST_offCP0ISFPU 31 ++ ++#define FUCOP_EXIST_mskCP0EX ( 0x1 << FUCOP_EXIST_offCP0EX ) ++#define FUCOP_EXIST_mskCP1EX ( 0x1 << FUCOP_EXIST_offCP1EX ) ++#define FUCOP_EXIST_mskCP2EX ( 0x1 << FUCOP_EXIST_offCP2EX ) ++#define FUCOP_EXIST_mskCP3EX ( 0x1 << FUCOP_EXIST_offCP3EX ) ++#define FUCOP_EXIST_mskCP0ISFPU ( 0x1 << FUCOP_EXIST_offCP0ISFPU ) ++ ++/****************************************************************************** ++ * ir0: PSW (Processor Status Word Register) ++ * ir1: IPSW (Interruption PSW Register) ++ * ir2: P_IPSW (Previous IPSW Register) ++ *****************************************************************************/ ++#define PSW_offGIE 0 /* Global Interrupt Enable */ ++#define PSW_offINTL 1 /* Interruption Stack Level */ ++#define PSW_offPOM 3 /* Processor Operation Mode, User/Superuser */ ++#define PSW_offBE 5 /* Endianness for data memory access, 1:MSB, 0:LSB */ ++#define PSW_offIT 6 /* Enable instruction address translation */ ++#define PSW_offDT 7 /* Enable data address translation */ ++#define PSW_offIME 8 /* Instruction Machine Error flag */ ++#define PSW_offDME 9 /* Data Machine Error flag */ ++#define PSW_offDEX 10 /* Debug Exception */ ++#define PSW_offHSS 11 /* Hardware Single Stepping */ ++#define PSW_offDRBE 12 /* Device Register Endian Mode */ ++#define PSW_offAEN 13 /* Audio ISA special feature */ ++#define PSW_offWBNA 14 /* Write Back Non-Allocate */ ++#define PSW_offIFCON 15 /* IFC On */ ++#define PSW_offCPL 16 /* Current Priority Level */ ++/* bit 19:31 reserved */ ++ ++#define PSW_mskGIE ( 0x1 << PSW_offGIE ) ++#define PSW_mskINTL ( 0x3 << PSW_offINTL ) ++#define PSW_mskPOM ( 0x3 << PSW_offPOM ) ++#define PSW_mskBE ( 0x1 << PSW_offBE ) ++#define PSW_mskIT ( 0x1 << PSW_offIT ) ++#define PSW_mskDT ( 0x1 << PSW_offDT ) ++#define PSW_mskIME ( 0x1 << PSW_offIME ) ++#define PSW_mskDME ( 0x1 << PSW_offDME ) ++#define PSW_mskDEX ( 0x1 << PSW_offDEX ) ++#define PSW_mskHSS ( 0x1 << PSW_offHSS ) ++#define PSW_mskDRBE ( 0x1 << PSW_offDRBE ) ++#define PSW_mskAEN ( 0x1 << PSW_offAEN ) ++#define PSW_mskWBNA ( 0x1 << PSW_offWBNA ) ++#define PSW_mskIFCON ( 0x1 << PSW_offIFCON ) ++#define PSW_mskCPL ( 0x7 << PSW_offCPL ) ++ ++#define PSW_SYSTEM ( 1 << PSW_offPOM ) ++#define PSW_INTL_1 ( 1 << PSW_offINTL ) ++#define PSW_CPL_NO ( 0 << PSW_offCPL ) ++#define PSW_CPL_ANY ( 7 << PSW_offCPL ) ++/****************************************************************************** ++ * ir3: IVB (Interruption Vector Base Register) ++ *****************************************************************************/ ++/* bit 0:12 reserved */ ++#define IVB_offNIVIC 1 /* Number of input for IVIC Controller */ ++#define IVB_offIVIC_VER 11 /* IVIC Version */ ++#define IVB_offEVIC 13 /* External Vector Interrupt Controller mode */ ++#define IVB_offESZ 14 /* Size of each vector entry */ ++#define IVB_offIVBASE 16 /* BasePA of interrupt vector table */ ++ ++#define IVB_mskNIVIC ( 0x7 << IVB_offNIVIC ) ++#define IVB_mskIVIC_VER ( 0x3 << IVB_offIVIC_VER ) ++#define IVB_mskEVIC ( 0x1 << IVB_offEVIC ) ++#define IVB_mskESZ ( 0x3 << IVB_offESZ ) ++#define IVB_mskIVBASE ( 0xFFFF << IVB_offIVBASE ) ++ ++/****************************************************************************** ++ * ir4: EVA (Exception Virtual Address Register) ++ * ir5: P_EVA (Previous EVA Register) ++ *****************************************************************************/ ++ ++ /* This register contains the VA that causes the exception */ ++ ++/****************************************************************************** ++ * ir6: ITYPE (Interruption Type Register) ++ * ir7: P_ITYPE (Previous ITYPE Register) ++ *****************************************************************************/ ++#define ITYPE_offETYPE 0 /* Exception Type */ ++#define ITYPE_offINST 4 /* Exception caused by insn fetch or data access */ ++/* bit 5:15 reserved */ ++#define ITYPE_offSWID 16 /* SWID of debugging exception */ ++/* bit 31:31 reserved */ ++ ++#define ITYPE_mskETYPE ( 0xF << ITYPE_offETYPE ) ++#define ITYPE_mskINST ( 0x1 << ITYPE_offINST ) ++#define ITYPE_mskSWID ( 0x7FFF << ITYPE_offSWID ) ++ ++/* Additional definitions for ITYPE register */ ++#define ITYPE_offSTYPE 16 /* Arithmetic Sub Type */ ++#define ITYPE_offCPID 20 /* Co-Processor ID which generate the exception */ ++ ++#define ITYPE_mskSTYPE ( 0xF << ITYPE_offSTYPE ) ++#define ITYPE_mskCPID ( 0x3 << ITYPE_offCPID ) ++ ++/****************************************************************************** ++ * ir8: MERR (Machine Error Log Register) ++ *****************************************************************************/ ++/* bit 0:30 reserved */ ++#define MERR_offBUSERR 31 /* Bus error caused by a load insn */ ++ ++#define MERR_mskBUSERR ( 0x1 << MERR_offBUSERR ) ++ ++/****************************************************************************** ++ * ir9: IPC (Interruption Program Counter Register) ++ * ir10: P_IPC (Previous IPC Register) ++ * ir11: OIPC (Overflow Interruption Program Counter Register) ++ *****************************************************************************/ ++ ++ /* This is the shadow stack register of the Program Counter */ ++ ++/****************************************************************************** ++ * ir12: P_P0 (Previous P0 Register) ++ * ir13: P_P1 (Previous P1 Register) ++ *****************************************************************************/ ++ ++ /* These are shadow registers of $p0 and $p1 */ ++ ++/****************************************************************************** ++ * ir14: INT_MASK (Interruption Masking Register) ++ *****************************************************************************/ ++#define INT_MASK_offH0IM 0 /* Hardware Interrupt 0 Mask bit */ ++#define INT_MASK_offH1IM 1 /* Hardware Interrupt 1 Mask bit */ ++#define INT_MASK_offH2IM 2 /* Hardware Interrupt 2 Mask bit */ ++#define INT_MASK_offH3IM 3 /* Hardware Interrupt 3 Mask bit */ ++#define INT_MASK_offH4IM 4 /* Hardware Interrupt 4 Mask bit */ ++#define INT_MASK_offH5IM 5 /* Hardware Interrupt 5 Mask bit */ ++/* bit 6:15 reserved */ ++#define INT_MASK_offSIM 16 /* Software Interrupt Mask bit */ ++/* bit 17:29 reserved */ ++#define INT_MASK_offIDIVZE 30 /* Enable detection for Divide-By-Zero */ ++#define INT_MASK_offDSSIM 31 /* Default Single Stepping Interruption Mask */ ++ ++#define INT_MASK_mskH0IM ( 0x1 << INT_MASK_offH0IM ) ++#define INT_MASK_mskH1IM ( 0x1 << INT_MASK_offH1IM ) ++#define INT_MASK_mskH2IM ( 0x1 << INT_MASK_offH2IM ) ++#define INT_MASK_mskH3IM ( 0x1 << INT_MASK_offH3IM ) ++#define INT_MASK_mskH4IM ( 0x1 << INT_MASK_offH4IM ) ++#define INT_MASK_mskH5IM ( 0x1 << INT_MASK_offH5IM ) ++#define INT_MASK_mskSIM ( 0x1 << INT_MASK_offSIM ) ++#define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE ) ++#define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM ) ++ ++/****************************************************************************** ++ * ir15: INT_PEND (Interrupt Pending Register) ++ *****************************************************************************/ ++#define INT_PEND_offH0I 0 /* Hardware Interrupt 0 pending bit */ ++#define INT_PEND_offH1I 1 /* Hardware Interrupt 1 pending bit */ ++#define INT_PEND_offH2I 2 /* Hardware Interrupt 2 pending bit */ ++#define INT_PEND_offH3I 3 /* Hardware Interrupt 3 pending bit */ ++#define INT_PEND_offH4I 4 /* Hardware Interrupt 4 pending bit */ ++#define INT_PEND_offH5I 5 /* Hardware Interrupt 5 pending bit */ ++ ++#define INT_PEND_offCIPL 0 /* Current Interrupt Priority Level */ ++ ++/* bit 6:15 reserved */ ++#define INT_PEND_offSWI 16 /* Software Interrupt pending bit */ ++/* bit 17:31 reserved */ ++ ++#define INT_PEND_mskH0I ( 0x1 << INT_PEND_offH0I ) ++#define INT_PEND_mskH1I ( 0x1 << INT_PEND_offH1I ) ++#define INT_PEND_mskH2I ( 0x1 << INT_PEND_offH2I ) ++#define INT_PEND_mskH3I ( 0x1 << INT_PEND_offH3I ) ++#define INT_PEND_mskH4I ( 0x1 << INT_PEND_offH4I ) ++#define INT_PEND_mskH5I ( 0x1 << INT_PEND_offH5I ) ++#define INT_PEND_mskCIPL ( 0x1 << INT_PEND_offCIPL ) ++#define INT_PEND_mskSWI ( 0x1 << INT_PEND_offSWI ) ++ ++/****************************************************************************** ++ * mr0: MMU_CTL (MMU Control Register) ++ *****************************************************************************/ ++#define MMU_CTL_offD 0 /* Default minimum page size */ ++#define MMU_CTL_offNTC0 1 /* Non-Translated Cachebility of partition 0 */ ++#define MMU_CTL_offNTC1 3 /* Non-Translated Cachebility of partition 1 */ ++#define MMU_CTL_offNTC2 5 /* Non-Translated Cachebility of partition 2 */ ++#define MMU_CTL_offNTC3 7 /* Non-Translated Cachebility of partition 3 */ ++#define MMU_CTL_offTBALCK 9 /* TLB all-lock resolution scheme */ ++#define MMU_CTL_offMPZIU 10 /* Multiple Page Size In Use bit */ ++#define MMU_CTL_offNTM0 11 /* Non-Translated VA to PA of partition 0 */ ++#define MMU_CTL_offNTM1 13 /* Non-Translated VA to PA of partition 1 */ ++#define MMU_CTL_offNTM2 15 /* Non-Translated VA to PA of partition 2 */ ++#define MMU_CTL_offNTM3 17 /* Non-Translated VA to PA of partition 3 */ ++/* bit 19:31 reserved */ ++ ++#define MMU_CTL_mskD ( 0x1 << MMU_CTL_offD ) ++#define MMU_CTL_mskNTC0 ( 0x3 << MMU_CTL_offNTC0 ) ++#define MMU_CTL_mskNTC1 ( 0x3 << MMU_CTL_offNTC1 ) ++#define MMU_CTL_mskNTC2 ( 0x3 << MMU_CTL_offNTC2 ) ++#define MMU_CTL_mskNTC3 ( 0x3 << MMU_CTL_offNTC3 ) ++#define MMU_CTL_mskTBALCK ( 0x1 << MMU_CTL_offTBALCK ) ++#define MMU_CTL_mskMPZIU ( 0x1 << MMU_CTL_offMPZIU ) ++#define MMU_CTL_mskNTM0 ( 0x3 << MMU_CTL_offNTM0 ) ++#define MMU_CTL_mskNTM1 ( 0x3 << MMU_CTL_offNTM1 ) ++#define MMU_CTL_mskNTM2 ( 0x3 << MMU_CTL_offNTM2 ) ++#define MMU_CTL_mskNTM3 ( 0x3 << MMU_CTL_offNTM3 ) ++ ++/****************************************************************************** ++ * mr1: L1_PPTB (L1 Physical Page Table Base Register) ++ *****************************************************************************/ ++#define L1_PPTB_offNV 0 /* Enable Hardware Page Table Walker (HPTWK) */ ++/* bit 1:11 reserved */ ++#define L1_PPTB_offBASE 12 /* First level physical page table base address */ ++ ++#define L1_PPTB_mskNV ( 0x1 << L1_PPTB_offNV ) ++#define L1_PPTB_mskBASE ( 0xFFFFF << L1_PPTB_offBASE ) ++ ++/****************************************************************************** ++ * mr2: TLB_VPN (TLB Access VPN Register) ++ *****************************************************************************/ ++/* bit 0:11 reserved */ ++#define TLB_VPN_offVPN 12 /* Virtual Page Number */ ++ ++#define TLB_VPN_mskVPN ( 0xFFFFF << TLB_VPN_offVPN ) ++ ++/****************************************************************************** ++ * mr3: TLB_DATA (TLB Access Data Register) ++ *****************************************************************************/ ++#define TLB_DATA_offV 0 /* PTE is valid and present */ ++#define TLB_DATA_offM 1 /* Page read/write access privilege */ ++#define TLB_DATA_offD 4 /* Dirty bit */ ++#define TLB_DATA_offX 5 /* Executable bit */ ++#define TLB_DATA_offA 6 /* Access bit */ ++#define TLB_DATA_offG 7 /* Global page (shared across contexts) */ ++#define TLB_DATA_offC 8 /* Cacheability atribute */ ++/* bit 11:11 reserved */ ++#define TLB_DATA_offPPN 12 /* Phisical Page Number */ ++ ++#define TLB_DATA_mskV ( 0x1 << TLB_DATA_offV ) ++#define TLB_DATA_mskM ( 0x7 << TLB_DATA_offM ) ++#define TLB_DATA_mskD ( 0x1 << TLB_DATA_offD ) ++#define TLB_DATA_mskX ( 0x1 << TLB_DATA_offX ) ++#define TLB_DATA_mskA ( 0x1 << TLB_DATA_offA ) ++#define TLB_DATA_mskG ( 0x1 << TLB_DATA_offG ) ++#define TLB_DATA_mskC ( 0x7 << TLB_DATA_offC ) ++#define TLB_DATA_mskPPN ( 0xFFFFF << TLB_DATA_offPPN ) ++ ++/****************************************************************************** ++ * mr4: TLB_MISC (TLB Access Misc Register) ++ *****************************************************************************/ ++#define TLB_MISC_offACC_PSZ 0 /* Page size of a PTE entry */ ++#define TLB_MISC_offCID 4 /* Context id */ ++/* bit 13:31 reserved */ ++ ++#define TLB_MISC_mskACC_PSZ ( 0xF << TLB_MISC_offACC_PSZ ) ++#define TLB_MISC_mskCID ( 0x1FF << TLB_MISC_offCID ) ++ ++/****************************************************************************** ++ * mr5: VLPT_IDX (Virtual Linear Page Table Index Register) ++ *****************************************************************************/ ++#define VLPT_IDX_offZERO 0 /* Always 0 */ ++#define VLPT_IDX_offEVPN 2 /* Exception Virtual Page Number */ ++#define VLPT_IDX_offVLPTB 22 /* Base VA of VLPT */ ++ ++#define VLPT_IDX_mskZERO ( 0x3 << VLPT_IDX_offZERO ) ++#define VLPT_IDX_mskEVPN ( 0xFFFFF << VLPT_IDX_offEVPN ) ++#define VLPT_IDX_mskVLPTB ( 0x3FF << VLPT_IDX_offVLPTB ) ++ ++/****************************************************************************** ++ * mr6: ILMB (Instruction Local Memory Base Register) ++ *****************************************************************************/ ++#define ILMB_offIEN 0 /* Enable ILM */ ++#define ILMB_offILMSZ 1 /* Size of ILM */ ++/* bit 5:19 reserved */ ++#define ILMB_offIBPA 20 /* Base PA of ILM */ ++ ++#define ILMB_mskIEN ( 0x1 << ILMB_offIEN ) ++#define ILMB_mskILMSZ ( 0xF << ILMB_offILMSZ ) ++#define ILMB_mskIBPA ( 0xFFF << ILMB_offIBPA ) ++ ++/****************************************************************************** ++ * mr7: DLMB (Data Local Memory Base Register) ++ *****************************************************************************/ ++#define DLMB_offDEN 0 /* Enable DLM */ ++#define DLMB_offDLMSZ 1 /* Size of DLM */ ++#define DLMB_offDBM 5 /* Enable Double-Buffer Mode for DLM */ ++#define DLMB_offDBB 6 /* Double-buffer bank which can be accessed by the processor */ ++/* bit 7:19 reserved */ ++#define DLMB_offDBPA 20 /* Base PA of DLM */ ++ ++#define DLMB_mskDEN ( 0x1 << DLMB_offDEN ) ++#define DLMB_mskDLMSZ ( 0xF << DLMB_offDLMSZ ) ++#define DLMB_mskDBM ( 0x1 << DLMB_offDBM ) ++#define DLMB_mskDBB ( 0x1 << DLMB_offDBB ) ++#define DLMB_mskDBPA ( 0xFFF << DLMB_offDBPA ) ++ ++/****************************************************************************** ++ * mr8: CACHE_CTL (Cache Control Register) ++ *****************************************************************************/ ++#define CACHE_CTL_offIC_EN 0 /* Enable I-cache */ ++#define CACHE_CTL_offDC_EN 1 /* Enable D-cache */ ++#define CACHE_CTL_offICALCK 2 /* I-cache all-lock resolution scheme */ ++#define CACHE_CTL_offDCALCK 3 /* D-cache all-lock resolution scheme */ ++#define CACHE_CTL_offDCCWF 4 /* Enable D-cache Critical Word Forwarding */ ++#define CACHE_CTL_offDCPMW 5 /* Enable D-cache concurrent miss and write-back processing */ ++/* bit 6:31 reserved */ ++ ++#define CACHE_CTL_mskIC_EN ( 0x1 << CACHE_CTL_offIC_EN ) ++#define CACHE_CTL_mskDC_EN ( 0x1 << CACHE_CTL_offDC_EN ) ++#define CACHE_CTL_mskICALCK ( 0x1 << CACHE_CTL_offICALCK ) ++#define CACHE_CTL_mskDCALCK ( 0x1 << CACHE_CTL_offDCALCK ) ++#define CACHE_CTL_mskDCCWF ( 0x1 << CACHE_CTL_offDCCWF ) ++#define CACHE_CTL_mskDCPMW ( 0x1 << CACHE_CTL_offDCPMW ) ++ ++/****************************************************************************** ++ * mr9: HSMP_SADDR (High Speed Memory Port Starting Address) ++ *****************************************************************************/ ++#define HSMP_SADDR_offEN 0 /* Enable control bit for the High Speed Memory port */ ++/* bit 1:19 reserved */ ++ ++#define HSMP_SADDR_offRANGE 1 /* Denote the address range (only defined in HSMP v2 ) */ ++#define HSMP_SADDR_offSADDR 20 /* Starting base PA of the High Speed Memory Port region */ ++ ++#define HSMP_SADDR_mskEN ( 0x1 << HSMP_SADDR_offEN ) ++#define HSMP_SADDR_mskRANGE ( 0xFFF << HSMP_SADDR_offRANGE ) ++#define HSMP_SADDR_mskSADDR ( 0xFFF << HSMP_SADDR_offSADDR ) ++ ++/****************************************************************************** ++ * mr10: HSMP_EADDR (High Speed Memory Port Ending Address) ++ *****************************************************************************/ ++/* bit 0:19 reserved */ ++#define HSMP_EADDR_offEADDR 20 ++ ++#define HSMP_EADDR_mskEADDR ( 0xFFF << HSMP_EADDR_offEADDR ) ++ ++/****************************************************************************** ++ * dr0+(n*5): BPCn (n=0-7) (Breakpoint Control Register) ++ *****************************************************************************/ ++#define BPC_offWP 0 /* Configuration of BPAn */ ++#define BPC_offEL 1 /* Enable BPAn */ ++#define BPC_offS 2 /* Data address comparison for a store instruction */ ++#define BPC_offP 3 /* Compared data address is PA */ ++#define BPC_offC 4 /* CID value is compared with the BPCIDn register */ ++#define BPC_offBE0 5 /* Enable byte mask for the comparison with register */ ++#define BPC_offBE1 6 /* Enable byte mask for the comparison with register */ ++#define BPC_offBE2 7 /* Enable byte mask for the comparison with register */ ++#define BPC_offBE3 8 /* Enable byte mask for the comparison with register */ ++#define BPC_offT 9 /* Enable breakpoint Embedded Tracer triggering operation */ ++ ++#define BPC_mskWP ( 0x1 << BPC_offWP ) ++#define BPC_mskEL ( 0x1 << BPC_offEL ) ++#define BPC_mskS ( 0x1 << BPC_offS ) ++#define BPC_mskP ( 0x1 << BPC_offP ) ++#define BPC_mskC ( 0x1 << BPC_offC ) ++#define BPC_mskBE0 ( 0x1 << BPC_offBE0 ) ++#define BPC_mskBE1 ( 0x1 << BPC_offBE1 ) ++#define BPC_mskBE2 ( 0x1 << BPC_offBE2 ) ++#define BPC_mskBE3 ( 0x1 << BPC_offBE3 ) ++#define BPC_mskT ( 0x1 << BPC_offT ) ++ ++/****************************************************************************** ++ * dr1+(n*5): BPAn (n=0-7) (Breakpoint Address Register) ++ *****************************************************************************/ ++ ++ /* These registers contain break point address */ ++ ++/****************************************************************************** ++ * dr2+(n*5): BPAMn (n=0-7) (Breakpoint Address Mask Register) ++ *****************************************************************************/ ++ ++ /* These registerd contain the address comparison mask for the BPAn register */ ++ ++/****************************************************************************** ++ * dr3+(n*5): BPVn (n=0-7) Breakpoint Data Value Register ++ *****************************************************************************/ ++ ++ /* The BPVn register contains the data value that will be compared with the ++ * incoming load/store data value */ ++ ++/****************************************************************************** ++ * dr4+(n*5): BPCIDn (n=0-7) (Breakpoint Context ID Register) ++ *****************************************************************************/ ++#define BPCID_offCID 0 /* CID that will be compared with a process's CID */ ++/* bit 9:31 reserved */ ++ ++#define BPCID_mskCID ( 0x1FF << BPCID_offCID ) ++ ++/****************************************************************************** ++ * dr40: EDM_CFG (EDM Configuration Register) ++ *****************************************************************************/ ++#define EDM_CFG_offBC 0 /* Number of hardware breakpoint sets implemented */ ++#define EDM_CFG_offDIMU 3 /* Debug Instruction Memory Unit exists */ ++/* bit 4:15 reserved */ ++#define EDM_CFG_offVER 16 /* EDM version */ ++ ++#define EDM_CFG_mskBC ( 0x7 << EDM_CFG_offBC ) ++#define EDM_CFG_mskDIMU ( 0x1 << EDM_CFG_offDIMU ) ++#define EDM_CFG_mskVER ( 0xFFFF << EDM_CFG_offVER ) ++ ++/****************************************************************************** ++ * dr41: EDMSW (EDM Status Word) ++ *****************************************************************************/ ++#define EDMSW_offWV 0 /* Write Valid */ ++#define EDMSW_offRV 1 /* Read Valid */ ++#define EDMSW_offDE 2 /* Debug exception has occurred for this core */ ++/* bit 3:31 reserved */ ++ ++#define EDMSW_mskWV ( 0x1 << EDMSW_offWV ) ++#define EDMSW_mskRV ( 0x1 << EDMSW_offRV ) ++#define EDMSW_mskDE ( 0x1 << EDMSW_offDE ) ++ ++/****************************************************************************** ++ * dr42: EDM_CTL (EDM Control Register) ++ *****************************************************************************/ ++/* bit 0:30 reserved */ ++#define EDM_CTL_offV3_EDM_MODE 6 /* EDM compatibility control bit */ ++#define EDM_CTL_offDEH_SEL 31 /* Controls where debug exception is directed to */ ++ ++#define EDM_CTL_mskV3_EDM_MODE ( 0x1 << EDM_CTL_offV3_EDM_MODE ) ++#define EDM_CTL_mskDEH_SEL ( 0x1 << EDM_CTL_offDEH_SEL ) ++ ++/****************************************************************************** ++ * dr43: EDM_DTR (EDM Data Transfer Register) ++ *****************************************************************************/ ++ ++ /* This is used to exchange data between the embedded EDM logic ++ * and the processor core */ ++ ++/****************************************************************************** ++ * dr44: BPMTC (Breakpoint Match Trigger Counter Register) ++ *****************************************************************************/ ++#define BPMTC_offBPMTC 0 /* Breakpoint match trigger counter value */ ++/* bit 16:31 reserved */ ++ ++#define BPMTC_mskBPMTC ( 0xFFFF << BPMTC_offBPMTC ) ++ ++/****************************************************************************** ++ * dr45: DIMBR (Debug Instruction Memory Base Register) ++ *****************************************************************************/ ++/* bit 0:11 reserved */ ++#define DIMBR_offDIMB 12 /* Base address of the Debug Instruction Memory (DIM) */ ++#define DIMBR_mskDIMB ( 0xFFFFF << DIMBR_offDIMB ) ++ ++/****************************************************************************** ++ * dr46: TECR0(Trigger Event Control register 0) ++ * dr47: TECR1 (Trigger Event Control register 1) ++ *****************************************************************************/ ++#define TECR_offBP 0 /* Controld which BP is used as a trigger source */ ++#define TECR_offNMI 8 /* Use NMI as a trigger source */ ++#define TECR_offHWINT 9 /* Corresponding interrupt is used as a trigger source */ ++#define TECR_offEVIC 15 /* Enable HWINT as a trigger source in EVIC mode */ ++#define TECR_offSYS 16 /* Enable SYSCALL instruction as a trigger source */ ++#define TECR_offDBG 17 /* Enable debug exception as a trigger source */ ++#define TECR_offMRE 18 /* Enable MMU related exception as a trigger source */ ++#define TECR_offE 19 /* An exception is used as a trigger source */ ++/* bit 20:30 reserved */ ++#define TECR_offL 31 /* Link/Cascade TECR0 trigger event to TECR1 trigger event */ ++ ++#define TECR_mskBP ( 0xFF << TECR_offBP ) ++#define TECR_mskNMI ( 0x1 << TECR_offBNMI ) ++#define TECR_mskHWINT ( 0x3F << TECR_offBHWINT ) ++#define TECR_mskEVIC ( 0x1 << TECR_offBEVIC ) ++#define TECR_mskSYS ( 0x1 << TECR_offBSYS ) ++#define TECR_mskDBG ( 0x1 << TECR_offBDBG ) ++#define TECR_mskMRE ( 0x1 << TECR_offBMRE ) ++#define TECR_mskE ( 0x1 << TECR_offE ) ++#define TECR_mskL ( 0x1 << TECR_offL ) ++ ++/****************************************************************************** ++ * pfr0-2: PFMC0-2 (Performance Counter Register 0-2) ++ *****************************************************************************/ ++ ++ /* These registers contains performance event count */ ++ ++/****************************************************************************** ++ * pfr3: PFM_CTL (Performance Counter Control Register) ++ *****************************************************************************/ ++#define PFM_CTL_offEN0 0 /* Enable PFMC0 */ ++#define PFM_CTL_offEN1 1 /* Enable PFMC1 */ ++#define PFM_CTL_offEN2 2 /* Enable PFMC2 */ ++#define PFM_CTL_offIE0 3 /* Enable interrupt for PFMC0 */ ++#define PFM_CTL_offIE1 4 /* Enable interrupt for PFMC1 */ ++#define PFM_CTL_offIE2 5 /* Enable interrupt for PFMC2 */ ++#define PFM_CTL_offOVF0 6 /* Overflow bit of PFMC0 */ ++#define PFM_CTL_offOVF1 7 /* Overflow bit of PFMC1 */ ++#define PFM_CTL_offOVF2 8 /* Overflow bit of PFMC2 */ ++#define PFM_CTL_offKS0 9 /* Enable superuser mode event counting for PFMC0 */ ++#define PFM_CTL_offKS1 10 /* Enable superuser mode event counting for PFMC1 */ ++#define PFM_CTL_offKS2 11 /* Enable superuser mode event counting for PFMC2 */ ++#define PFM_CTL_offKU0 12 /* Enable user mode event counting for PFMC0 */ ++#define PFM_CTL_offKU1 13 /* Enable user mode event counting for PFMC1 */ ++#define PFM_CTL_offKU2 14 /* Enable user mode event counting for PFMC2 */ ++#define PFM_CTL_offSEL0 15 /* The event selection for PFMC0 */ ++#define PFM_CTL_offSEL1 21 /* The event selection for PFMC1 */ ++#define PFM_CTL_offSEL2 27 /* The event selection for PFMC2 */ ++/* bit 28:31 reserved */ ++ ++#define PFM_CTL_mskEN0 ( 0x01 << PFM_CTL_offEN0 ) ++#define PFM_CTL_mskEN1 ( 0x01 << PFM_CTL_offEN1 ) ++#define PFM_CTL_mskEN2 ( 0x01 << PFM_CTL_offEN2 ) ++#define PFM_CTL_mskIE0 ( 0x01 << PFM_CTL_offIE0 ) ++#define PFM_CTL_mskIE1 ( 0x01 << PFM_CTL_offIE1 ) ++#define PFM_CTL_mskIE2 ( 0x01 << PFM_CTL_offIE2 ) ++#define PFM_CTL_mskOVF0 ( 0x01 << PFM_CTL_offOVF0 ) ++#define PFM_CTL_mskOVF1 ( 0x01 << PFM_CTL_offOVF1 ) ++#define PFM_CTL_mskOVF2 ( 0x01 << PFM_CTL_offOVF2 ) ++#define PFM_CTL_mskKS0 ( 0x01 << PFM_CTL_offKS0 ) ++#define PFM_CTL_mskKS1 ( 0x01 << PFM_CTL_offKS1 ) ++#define PFM_CTL_mskKS2 ( 0x01 << PFM_CTL_offKS2 ) ++#define PFM_CTL_mskKU0 ( 0x01 << PFM_CTL_offKU0 ) ++#define PFM_CTL_mskKU1 ( 0x01 << PFM_CTL_offKU1 ) ++#define PFM_CTL_mskKU2 ( 0x01 << PFM_CTL_offKU2 ) ++#define PFM_CTL_mskSEL0 ( 0x01 << PFM_CTL_offSEL0 ) ++#define PFM_CTL_mskSEL1 ( 0x3F << PFM_CTL_offSEL1 ) ++#define PFM_CTL_mskSEL2 ( 0x3F << PFM_CTL_offSEL2 ) ++ ++/****************************************************************************** ++ * SDZ_CTL (Structure Downsizing Control Register) ++ *****************************************************************************/ ++#define SDZ_CTL_offICDZ 0 /* I-cache downsizing control */ ++#define SDZ_CTL_offDCDZ 3 /* D-cache downsizing control */ ++#define SDZ_CTL_offMTBDZ 6 /* MTLB downsizing control */ ++#define SDZ_CTL_offBTBDZ 9 /* Branch Target Table downsizing control */ ++/* bit 12:31 reserved */ ++#define SDZ_CTL_mskICDZ ( 0x07 << SDZ_CTL_offICDZ ) ++#define SDZ_CTL_mskDCDZ ( 0x07 << SDZ_CTL_offDCDZ ) ++#define SDZ_CTL_mskMTBDZ ( 0x07 << SDZ_CTL_offMTBDZ ) ++#define SDZ_CTL_mskBTBDZ ( 0x07 << SDZ_CTL_offBTBDZ ) ++ ++/****************************************************************************** ++ * N12MISC_CTL (N12 Miscellaneous Control Register) ++ *****************************************************************************/ ++#define N12MISC_CTL_offBTB 0 /* Disable Branch Target Buffer */ ++#define N12MISC_CTL_offRTP 1 /* Disable Return Target Predictor */ ++#define N12MISC_CTL_offPTEPF 2 /* Disable HPTWK L2 PTE pefetch */ ++/* bit 3:31 reserved */ ++ ++#define N12MISC_CTL_makBTB ( 0x1 << N12MISC_CTL_offBTB ) ++#define N12MISC_CTL_makRTP ( 0x1 << N12MISC_CTL_offRTP ) ++#define N12MISC_CTL_makPTEPF ( 0x1 << N12MISC_CTL_offPTEPF ) ++ ++/****************************************************************************** ++ * PRUSR_ACC_CTL (Privileged Resource User Access Control Registers) ++ *****************************************************************************/ ++#define PRUSR_ACC_CTL_offDMA_EN 0 /* Allow user mode access of DMA registers */ ++#define PRUSR_ACC_CTL_offPFM_EN 1 /* Allow user mode access of PFM registers */ ++ ++#define PRUSR_ACC_CTL_mskDMA_EN ( 0x1 << PRUSR_ACC_CTL_offDMA_EN ) ++#define PRUSR_ACC_CTL_mskPFM_EN ( 0x1 << PRUSR_ACC_CTL_offPFM_EN ) ++ ++/****************************************************************************** ++ * dmar0: DMA_CFG (DMA Configuration Register) ++ *****************************************************************************/ ++#define DMA_CFG_offNCHN 0 /* The number of DMA channels implemented */ ++#define DMA_CFG_offUNEA 2 /* Un-aligned External Address transfer feature */ ++#define DMA_CFG_off2DET 3 /* 2-D Element Transfer feature */ ++/* bit 4:15 reserved */ ++#define DMA_CFG_offVER 16 /* DMA architecture and implementation version */ ++ ++#define DMA_CFG_mskNCHN ( 0x3 << DMA_CFG_offNCHN ) ++#define DMA_CFG_mskUNEA ( 0x1 << DMA_CFG_offUNEA ) ++#define DMA_CFG_msk2DET ( 0x1 << DMA_CFG_off2DET ) ++#define DMA_CFG_mskVER ( 0xFFFF << DMA_CFG_offVER ) ++ ++/****************************************************************************** ++ * dmar1: DMA_GCSW (DMA Global Control and Status Word Register) ++ *****************************************************************************/ ++#define DMA_GCSW_offC0STAT 0 /* DMA channel 0 state */ ++#define DMA_GCSW_offC1STAT 3 /* DMA channel 1 state */ ++/* bit 6:11 reserved */ ++#define DMA_GCSW_offC0INT 12 /* DMA channel 0 generate interrupt */ ++#define DMA_GCSW_offC1INT 13 /* DMA channel 1 generate interrupt */ ++/* bit 14:30 reserved */ ++#define DMA_GCSW_offEN 31 /* Enable DMA engine */ ++ ++#define DMA_GCSW_mskC0STAT ( 0x7 << DMA_GCSW_offC0STAT ) ++#define DMA_GCSW_mskC1STAT ( 0x7 << DMA_GCSW_offC1STAT ) ++#define DMA_GCSW_mskC0INT ( 0x1 << DMA_GCSW_offC0INT ) ++#define DMA_GCSW_mskC1INT ( 0x1 << DMA_GCSW_offC1INT ) ++#define DMA_GCSW_mskEN ( 0x1 << DMA_GCSW_offEN ) ++ ++/****************************************************************************** ++ * dmar2: DMA_CHNSEL (DMA Channel Selection Register) ++ *****************************************************************************/ ++#define DMA_CHNSEL_offCHAN 0 /* Selected channel number */ ++/* bit 2:31 reserved */ ++ ++#define DMA_CHNSEL_mskCHAN ( 0x3 << DMA_CHNSEL_offCHAN ) ++ ++/****************************************************************************** ++ * dmar3: DMA_ACT (DMA Action Register) ++ *****************************************************************************/ ++#define DMA_ACT_offACMD 0 /* DMA Action Command */ ++/* bit 2:31 reserved */ ++#define DMA_ACT_mskACMD ( 0x3 << DMA_ACT_offACMD ) ++ ++/****************************************************************************** ++ * dmar4: DMA_SETUP (DMA Setup Register) ++ *****************************************************************************/ ++#define DMA_SETUP_offLM 0 /* Local Memory Selection */ ++#define DMA_SETUP_offTDIR 1 /* Transfer Direction */ ++#define DMA_SETUP_offTES 2 /* Transfer Element Size */ ++#define DMA_SETUP_offESTR 4 /* External memory transfer Stride */ ++#define DMA_SETUP_offCIE 16 /* Interrupt Enable on Completion */ ++#define DMA_SETUP_offSIE 17 /* Interrupt Enable on explicit Stop */ ++#define DMA_SETUP_offEIE 18 /* Interrupt Enable on Error */ ++#define DMA_SETUP_offUE 19 /* Enable the Un-aligned External Address */ ++#define DMA_SETUP_off2DE 20 /* Enable the 2-D External Transfer */ ++#define DMA_SETUP_offCOA 21 /* Transfer Coalescable */ ++/* bit 22:31 reserved */ ++ ++#define DMA_SETUP_mskLM ( 0x1 << DMA_SETUP_offLM ) ++#define DMA_SETUP_mskTDIR ( 0x1 << DMA_SETUP_offTDIR ) ++#define DMA_SETUP_mskTES ( 0x3 << DMA_SETUP_offTES ) ++#define DMA_SETUP_mskESTR ( 0xFFF << DMA_SETUP_offESTR ) ++#define DMA_SETUP_mskCIE ( 0x1 << DMA_SETUP_offCIE ) ++#define DMA_SETUP_mskSIE ( 0x1 << DMA_SETUP_offSIE ) ++#define DMA_SETUP_mskEIE ( 0x1 << DMA_SETUP_offEIE ) ++#define DMA_SETUP_mskUE ( 0x1 << DMA_SETUP_offUE ) ++#define DMA_SETUP_msk2DE ( 0x1 << DMA_SETUP_off2DE ) ++#define DMA_SETUP_mskCOA ( 0x1 << DMA_SETUP_offCOA ) ++ ++/****************************************************************************** ++ * dmar5: DMA_ISADDR (DMA Internal Start Address Register) ++ *****************************************************************************/ ++#define DMA_ISADDR_offISADDR 0 /* Internal Start Address */ ++/* bit 20:31 reserved */ ++#define DMA_ISADDR_mskISADDR ( 0xFFFFF << DMA_ISADDR_offISADDR ) ++ ++/****************************************************************************** ++ * dmar6: DMA_ESADDR (DMA External Start Address Register) ++ *****************************************************************************/ ++/* This register holds External Start Address */ ++ ++/****************************************************************************** ++ * dmar7: DMA_TCNT (DMA Transfer Element Count Register) ++ *****************************************************************************/ ++#define DMA_TCNT_offTCNT 0 /* DMA transfer element count */ ++/* bit 18:31 reserved */ ++#define DMA_TCNT_mskTCNT ( 0x3FFFF << DMA_TCNT_offTCNT ) ++ ++/****************************************************************************** ++ * dmar8: DMA_STATUS (DMA Status Register) ++ *****************************************************************************/ ++#define DMA_STATUS_offSTAT 0 /* DMA channel state */ ++#define DMA_STATUS_offSTUNA 3 /* Un-aligned error on External Stride value */ ++#define DMA_STATUS_offDERR 4 /* DMA Transfer Disruption Error */ ++#define DMA_STATUS_offEUNA 5 /* Un-aligned error on the External address */ ++#define DMA_STATUS_offIUNA 6 /* Un-aligned error on the Internal address */ ++#define DMA_STATUS_offIOOR 7 /* Out-Of-Range error on the Internal address */ ++#define DMA_STATUS_offEBUS 8 /* Bus Error on an External DMA transfer */ ++#define DMA_STATUS_offESUP 9 /* DMA setup error */ ++/* bit 10:31 reserved */ ++ ++#define DMA_STATUS_mskSTAT ( 0x7 << DMA_STATUS_offSTAT ) ++#define DMA_STATUS_mskSTUNA ( 0x1 << DMDMA_STATUS_offSTUNA ) ++#define DMA_STATUS_mskDERR ( 0x1 << DMDMA_STATUS_offDERR ) ++#define DMA_STATUS_mskEUNA ( 0x1 << DMDMA_STATUS_offEUNA ) ++#define DMA_STATUS_mskIUNA ( 0x1 << DMDMA_STATUS_offIUNA ) ++#define DMA_STATUS_mskIOOR ( 0x1 << DMDMA_STATUS_offIOOR ) ++#define DMA_STATUS_mskEBUS ( 0x1 << DMDMA_STATUS_offEBUS ) ++#define DMA_STATUS_mskESUP ( 0x1 << DMDMA_STATUS_offESUP ) ++ ++/****************************************************************************** ++ * dmar9: DMA_2DSET (DMA 2D Setup Register) ++ *****************************************************************************/ ++#define DMA_2DSET_offWECNT 0 /* The Width Element Count for a 2-D region */ ++#define DMA_2DSET_offHTSTR 16 /* The Height Stride for a 2-D region */ ++ ++#define DMA_2DSET_mskHTSTR ( 0xFFFF << DMA_2DSET_offHTSTR ) ++#define DMA_2DSET_mskWECNT ( 0xFFFF << DMA_2DSET_offWECNT ) ++ ++/****************************************************************************** ++ * dmar10: DMA_2DSCTL (DMA 2D Startup Control Register) ++ *****************************************************************************/ ++#define DMA_2DSCTL_offSTWECNT 0 /* Startup Width Element Count for a 2-D region */ ++/* bit 16:31 reserved */ ++ ++#define DMA_2DSCTL_mskSTWECNT ( 0xFFFF << DMA_2DSCTL_offSTWECNT ) ++ ++/****************************************************************************** ++ * fpcsr: FPCSR (Floating-Point Control Status Register) ++ *****************************************************************************/ ++#define FPCSR_offRM 0 ++#define FPCSR_offIVO 2 ++#define FPCSR_offDBZ 3 ++#define FPCSR_offOVF 4 ++#define FPCSR_offUDF 5 ++#define FPCSR_offIEX 6 ++#define FPCSR_offIVOE 7 ++#define FPCSR_offDBZE 8 ++#define FPCSR_offOVFE 9 ++#define FPCSR_offUDFE 10 ++#define FPCSR_offIEXE 11 ++#define FPCSR_offDNZ 12 ++#define FPCSR_offIVOT 13 ++#define FPCSR_offDBZT 14 ++#define FPCSR_offOVFT 15 ++#define FPCSR_offUDFT 16 ++#define FPCSR_offIEXT 17 ++#define FPCSR_offDNIT 18 ++#define FPCSR_offRIT 19 ++ ++#define FPCSR_mskRM ( 0x3 << FPCSR_offRM ) ++#define FPCSR_mskIVO ( 0x1 << FPCSR_offIVO ) ++#define FPCSR_mskDBZ ( 0x1 << FPCSR_offDBZ ) ++#define FPCSR_mskOVF ( 0x1 << FPCSR_offOVF ) ++#define FPCSR_mskUDF ( 0x1 << FPCSR_offUDF ) ++#define FPCSR_mskIEX ( 0x1 << FPCSR_offIEX ) ++#define FPCSR_mskIVOE ( 0x1 << FPCSR_offIVOE ) ++#define FPCSR_mskDBZE ( 0x1 << FPCSR_offDBZE ) ++#define FPCSR_mskOVFE ( 0x1 << FPCSR_offOVFE ) ++#define FPCSR_mskUDFE ( 0x1 << FPCSR_offUDFE ) ++#define FPCSR_mskIEXE ( 0x1 << FPCSR_offIEXE ) ++#define FPCSR_mskDNZ ( 0x1 << FPCSR_offDNZ ) ++#define FPCSR_mskIVOT ( 0x1 << FPCSR_offIVOT ) ++#define FPCSR_mskDBZT ( 0x1 << FPCSR_offDBZT ) ++#define FPCSR_mskOVFT ( 0x1 << FPCSR_offOVFT ) ++#define FPCSR_mskUDFT ( 0x1 << FPCSR_offUDFT ) ++#define FPCSR_mskIEXT ( 0x1 << FPCSR_offIEXT ) ++#define FPCSR_mskDNIT ( 0x1 << FPCSR_offDNIT ) ++#define FPCSR_mskRIT ( 0x1 << FPCSR_offRIT ) ++#define FPCSR_mskALL (FPCSR_mskIVO | FPCSR_mskDBZ | FPCSR_mskOVF | FPCSR_mskUDF | FPCSR_mskIEX) ++#define FPCSR_mskALLE (FPCSR_mskIVOE | FPCSR_mskDBZE | FPCSR_mskOVFE | FPCSR_mskUDFE | FPCSR_mskIEXE) ++#define FPCSR_mskALLT (FPCSR_mskIVOT | FPCSR_mskDBZT | FPCSR_mskOVFT | FPCSR_mskUDFT | FPCSR_mskIEXT |FPCSR_mskDNIT | FPCSR_mskRIT) ++ ++/****************************************************************************** ++ * fpcfg: FPCFG (Floating-Point Configuration Register) ++ *****************************************************************************/ ++#define FPCFG_offSP 0 ++#define FPCFG_offDP 1 ++#define FPCFG_offFREG 2 ++#define FPCFG_offFMA 4 ++#define FPCFG_offIMVER 22 ++#define FPCFG_offAVER 27 ++ ++#define FPCFG_mskSP ( 0x1 << FPCFG_offSP ) ++#define FPCFG_mskDP ( 0x1 << FPCFG_offDP ) ++#define FPCFG_mskFREG ( 0x3 << FPCFG_offFREG ) ++#define FPCFG_mskFMA ( 0x1 << FPCFG_offFMA ) ++#define FPCFG_mskIMVER ( 0x1F << FPCFG_offIMVER ) ++#define FPCFG_mskAVER ( 0x1F << FPCFG_offAVER ) ++ ++/****************************************************************************** ++ * fucpr: FUCOP_CTL (FPU and Coprocessor Enable Control Register) ++ *****************************************************************************/ ++#define FUCOP_CTL_offCP0EN 0 ++#define FUCOP_CTL_offCP1EN 1 ++#define FUCOP_CTL_offCP2EN 2 ++#define FUCOP_CTL_offCP3EN 3 ++#define FUCOP_CTL_offAUEN 31 ++ ++#define FUCOP_CTL_mskCP0EN ( 0x1 << FUCOP_CTL_offCP0EN ) ++#define FUCOP_CTL_mskCP1EN ( 0x1 << FUCOP_CTL_offCP1EN ) ++#define FUCOP_CTL_mskCP2EN ( 0x1 << FUCOP_CTL_offCP2EN ) ++#define FUCOP_CTL_mskCP3EN ( 0x1 << FUCOP_CTL_offCP3EN ) ++#define FUCOP_CTL_mskAUEN ( 0x1 << FUCOP_CTL_offAUEN ) ++ ++#endif /* __NDS32_BITFIELD_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/bitops.h linux-3.4.110/arch/nds32/include/asm/bitops.h +--- linux-3.4.110.orig/arch/nds32/include/asm/bitops.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/bitops.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,256 @@ ++/* ++ * linux/arch/nds32/include/asm/bitops.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_BITOPS_H__ ++#define __NDS32_BITOPS_H__ ++ ++#if defined(CONFIG_SMP) || !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) ++ ++/* ++#include ++ ++static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) ++{ ++ unsigned long flags; ++ unsigned long mask = 1UL << (bit & 31); ++ ++ p += bit >> 5; ++ ++ raw_local_irq_save(flags); ++ *p |= mask; ++ raw_local_irq_restore(flags); ++} ++ ++static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) ++{ ++ unsigned long flags; ++ unsigned long mask = 1UL << (bit & 31); ++ ++ p += bit >> 5; ++ ++ raw_local_irq_save(flags); ++ *p &= ~mask; ++ raw_local_irq_restore(flags); ++} ++ ++static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) ++{ ++ unsigned long flags; ++ unsigned long mask = 1UL << (bit & 31); ++ ++ p += bit >> 5; ++ ++ raw_local_irq_save(flags); ++ *p ^= mask; ++ raw_local_irq_restore(flags); ++} ++ ++static inline int ++____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) ++{ ++ unsigned long flags; ++ unsigned int res; ++ unsigned long mask = 1UL << (bit & 31); ++ ++ p += bit >> 5; ++ ++ raw_local_irq_save(flags); ++ res = *p; ++ *p = res | mask; ++ raw_local_irq_restore(flags); ++ ++ return (res & mask) != 0; ++} ++static inline int ++____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) ++{ ++ unsigned long flags; ++ unsigned int res; ++ unsigned long mask = 1UL << (bit & 31); ++ ++ p += bit >> 5; ++ ++ raw_local_irq_save(flags); ++ res = *p; ++ *p = res & ~mask; ++ raw_local_irq_restore(flags); ++ ++ return (res & mask) != 0; ++} ++ ++static inline int ++____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) ++{ ++ unsigned long flags; ++ unsigned int res; ++ unsigned long mask = 1UL << (bit & 31); ++ ++ p += bit >> 5; ++ ++ raw_local_irq_save(flags); ++ res = *p; ++ *p = res ^ mask; ++ raw_local_irq_restore(flags); ++ ++ return (res & mask) != 0; ++} ++ ++#include ++#ifndef CONFIG_SMP ++#define ATOMIC_BITOP(name,nr,p) \ ++ (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p)) ++#else ++#define ATOMIC_BITOP(name,nr,p) _##name(nr,p) ++#endif ++ ++#define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p) ++*/ ++ ++static inline void set_bit(int nr,volatile unsigned long *addr) ++{ ++ unsigned long mask = BIT_MASK(nr); ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ unsigned long tmp; ++ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%1+$r15]\n" ++ "\tor\t%0, %0, %2\n" ++ "\tscw\t%0, [%1+$r15]\n" ++ "\tbeqz\t%0, 1b\n" ++ : "=&r" (tmp) ++ : "r" (p), "r" (mask) ++ : "memory"); ++} ++ ++static inline void clear_bit(int nr, volatile unsigned long *addr) ++{ ++ unsigned long mask = BIT_MASK(nr); ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ unsigned long tmp; ++ ++ mask = ~mask; ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%1+$r15]\n" ++ "\tand\t%0, %0, %2\n" ++ "\tscw\t%0, [%1+$r15]\n" ++ "\tbeqz\t%0, 1b\n" ++ : "=&r" (tmp) ++ : "r" (p), "r" (mask) ++ : "memory"); ++} ++ ++static inline void change_bit(int nr, volatile unsigned long *addr) ++{ ++ unsigned long mask = BIT_MASK(nr); ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ unsigned long tmp; ++ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%1+$r15]\n" ++ "\txor\t%0, %0, %2\n" ++ "\tscw\t%0, [%1+$r15]\n" ++ "\tbeqz\t%0, 1b\n" ++ : "=&r" (tmp) ++ : "r" (p), "r" (mask) ++ : "memory"); ++} ++ ++static inline int test_and_set_bit(int nr, volatile unsigned long *addr) ++{ ++ unsigned long mask = BIT_MASK(nr); ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ unsigned long tmp ,ret; ++ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%2+$r15]\n" ++ "\tor\t%1, %0, %3\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (ret), "=&r" (tmp) ++ : "r" (p), "r" (mask) ++ : "memory"); ++ return (ret & mask) != 0; ++} ++ ++static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) ++{ ++ unsigned long mask = BIT_MASK(nr); ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ unsigned long tmp, ret; ++ unsigned long mask2 = ~mask; ++ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%2+$r15]\n" ++ "\tand\t%1, %0, %3\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (ret), "=&r" (tmp) ++ : "r" (p), "r" (mask2) ++ : "memory"); ++ return (ret & mask) != 0; ++} ++ ++static inline int test_and_change_bit(int nr, volatile unsigned long *addr) ++{ ++ unsigned long mask = BIT_MASK(nr); ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ unsigned long tmp, ret; ++ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%2+$r15]\n" ++ "\txor\t%1, %0, %3\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (ret), "=&r" (tmp) ++ : "r" (p), "r" (mask) ++ : "memory"); ++ return (ret & mask) != 0; ++} ++#else ++#include ++#include ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef __KERNEL__ ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++/* ++ * Ext2 is defined to use little-endian byte ordering. ++ */ ++#include ++ ++#endif /* __KERNEL__ */ ++ ++#define smp_mb__before_clear_bit() barrier() ++#define smp_mb__after_clear_bit() barrier() ++ ++#endif /* __NDS32_BITOPS_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/bitsperlong.h linux-3.4.110/arch/nds32/include/asm/bitsperlong.h +--- linux-3.4.110.orig/arch/nds32/include/asm/bitsperlong.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/bitsperlong.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1 @@ ++#include +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/bug.h linux-3.4.110/arch/nds32/include/asm/bug.h +--- linux-3.4.110.orig/arch/nds32/include/asm/bug.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/bug.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,18 @@ ++/* ++ * linux/arch/nds32/include/asm/bug.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_BUG_H__ ++#define __NDS32_BUG_H__ ++ ++#define HAVE_ARCH_BUG ++#include ++ ++#define BUG() do { \ ++ dump_stack(); \ ++ printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \ ++ panic("BUG!"); \ ++} while (0) ++ ++#endif /* __NDS32_BUG_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/bugs.h linux-3.4.110/arch/nds32/include/asm/bugs.h +--- linux-3.4.110.orig/arch/nds32/include/asm/bugs.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/bugs.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/bugs.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_BUGS_H__ ++#define __NDS32_BUGS_H__ ++ ++static inline void check_bugs(void) {} ++ ++#endif /* __NDS32_BUGS_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/byteorder.h linux-3.4.110/arch/nds32/include/asm/byteorder.h +--- linux-3.4.110.orig/arch/nds32/include/asm/byteorder.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/byteorder.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,15 @@ ++/* ++ * linux/arch/nds32/include/asm/byteorder.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_BYTEORDER_H__ ++#define __NDS32_BYTEORDER_H__ ++ ++#ifdef __NDS32_EB__ ++#include ++#else ++#include ++#endif ++ ++#endif /* __NDS32_BYTEORDER_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/cacheflush.h linux-3.4.110/arch/nds32/include/asm/cacheflush.h +--- linux-3.4.110.orig/arch/nds32/include/asm/cacheflush.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/cacheflush.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,55 @@ ++/* ++ * linux/arch/nds32/include/asm/cacheflush.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_CACHEFLUSH_H__ ++#define __NDS32_CACHEFLUSH_H__ ++ ++#include ++ ++#define PG_dcache_dirty PG_arch_1 ++ ++void flush_cache_mm(struct mm_struct *mm); ++ ++void flush_cache_dup_mm(struct mm_struct *mm); ++ ++void flush_cache_range(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end); ++ ++void flush_cache_page(struct vm_area_struct *vma, ++ unsigned long addr, unsigned long pfn); ++ ++void flush_cache_kmaps(void); ++ ++void flush_cache_vmap(unsigned long start, unsigned long end); ++ ++void flush_cache_vunmap(unsigned long start, unsigned long end); ++ ++ ++void flush_dcache_page(struct page *page); ++ ++void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ++ unsigned long vaddr, void *dst, void *src, int len); ++ ++void copy_from_user_page(struct vm_area_struct *vma, struct page *page, ++ unsigned long vaddr, void *dst, void *src, int len); ++ ++#ifndef CONFIG_CPU_CACHE_NONALIASING ++#define ARCH_HAS_FLUSH_ANON_PAGE ++void flush_anon_page(struct vm_area_struct *vma, ++ struct page *page, unsigned long vaddr); ++ ++#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE ++void flush_kernel_dcache_page(struct page *page); ++#endif ++ ++void flush_icache_range(unsigned long start, unsigned long end); ++ ++void flush_icache_page(struct vm_area_struct *vma, struct page *page); ++ ++#define flush_dcache_mmap_lock(mapping) spin_lock_irq(&(mapping)->tree_lock) ++#define flush_dcache_mmap_unlock(mapping) spin_unlock_irq(&(mapping)->tree_lock) ++ ++#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 ++#endif /* __NDS32_CACHEFLUSH_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/cache.h linux-3.4.110/arch/nds32/include/asm/cache.h +--- linux-3.4.110.orig/arch/nds32/include/asm/cache.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/cache.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,22 @@ ++/* ++ * linux/arch/nds32/include/asm/cache.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_CACHE_H__ ++#define __NDS32_CACHE_H__ ++ ++#define L1_CACHE_BYTES 32 ++#define L1_CACHE_SHIFT 5 ++ ++/* ++ * * Memory returned by kmalloc() may be used for DMA, so we must make ++ * * sure that all such allocations are cache aligned. Otherwise, ++ * * unrelated code may cause parts of the buffer to be read into the ++ * * cache before the transfer is done, causing old data to be seen by ++ * * the CPU. ++ * */ ++#define ARCH_DMA_MINALIGN L1_CACHE_BYTES ++ ++ ++#endif /* __NDS32_CACHE_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/cache_info.h linux-3.4.110/arch/nds32/include/asm/cache_info.h +--- linux-3.4.110.orig/arch/nds32/include/asm/cache_info.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/cache_info.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,17 @@ ++/* ++ * linux/arch/nds32/include/asm/cache.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++struct cache_info { ++ unsigned char cache_type; ++ unsigned char ways; ++ unsigned char way_bits; ++ unsigned char line_bits; ++ unsigned char line_size; ++ unsigned char set_bits; ++ unsigned short sets; ++ unsigned short size; ++ unsigned short aliasing_num; ++ unsigned int aliasing_mask; ++ unsigned int not_aliasing_mask; ++}; +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/checksum.h linux-3.4.110/arch/nds32/include/asm/checksum.h +--- linux-3.4.110.orig/arch/nds32/include/asm/checksum.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/checksum.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,173 @@ ++/* ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ * Copyright (C) 1995, 96, 97, 98, 99, 2001 by Ralf Baechle ++ * Copyright (C) 1999 Silicon Graphics, Inc. ++ * Copyright (C) 2001 Thiemo Seufer. ++ * Copyright (C) 2002 Maciej W. Rozycki ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++#ifndef _ASM_CHECKSUM_H ++#define _ASM_CHECKSUM_H ++ ++#include ++ ++#include ++ ++/* ++ * computes the checksum of a memory block at buff, length len, ++ * and adds in "sum" (32-bit) ++ * ++ * returns a 32-bit number suitable for feeding into itself ++ * or csum_tcpudp_magic ++ * ++ * this function must be called with even lengths, except ++ * for the last fragment, which may be odd ++ * ++ * it's best to have buff aligned on a 32-bit boundary ++ */ ++unsigned int csum_partial(const void *buff, int len, unsigned int sum); ++ ++/* ++ * this is a new version of the above that records errors it finds in *errp, ++ * but continues and zeros the rest of the buffer. ++ */ ++unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, int len, ++ unsigned int sum, int *errp); ++ ++/* ++ * Copy and checksum to user ++ */ ++#define HAVE_CSUM_COPY_USER ++static inline unsigned int csum_and_copy_to_user (const unsigned char *src, ++ unsigned char __user *dst, ++ int len, int sum, ++ int *err_ptr) ++{ ++ sum = csum_partial(src, len, sum); ++ ++ if (copy_to_user(dst, src, len)) { ++ *err_ptr = -EFAULT; ++ return -1; ++ } ++ ++ return sum; ++} ++ ++/* ++ * the same as csum_partial, but copies from user space (but on MIPS ++ * we have just one address space, so this is identical to the above) ++ */ ++unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, ++ int len, unsigned int sum); ++ ++/* ++ * Fold a partial checksum without adding pseudo headers ++ */ ++static inline unsigned short int csum_fold(unsigned int sum) ++{ ++ __asm__( ++ "slli\t$p1,%0,16\n\t" ++ "add\t%0,%0,$p1\n\t" ++ "slt\t$p1,%0,$p1\n\t" ++ "srli\t%0,%0,16\n\t" ++ "add\t%0,%0,$p1\n\t" ++ "movi\t$p1,0xffff\n\t" ++ "xor\t%0,%0,$p1" ++ : "=r" (sum) ++ : "0" (sum)); ++ ++ return sum; ++} ++ ++/* ++ * This is a version of ip_compute_csum() optimized for IP headers, ++ * which always checksum on 4 octet boundaries. ++ * ++ * By Jorge Cwik , adapted for linux by ++ * Arnt Gulbrandsen. ++ */ ++static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) ++{ ++ unsigned int *word = (unsigned int *) iph; ++ unsigned int *stop = word + ihl; ++ unsigned int csum; ++ int carry; ++ ++ csum = word[0]; ++ csum += word[1]; ++ carry = (csum < word[1]); ++ csum += carry; ++ ++ csum += word[2]; ++ carry = (csum < word[2]); ++ csum += carry; ++ ++ csum += word[3]; ++ carry = (csum < word[3]); ++ csum += carry; ++ ++ word += 4; ++ do { ++ csum += *word; ++ carry = (csum < *word); ++ csum += carry; ++ word++; ++ } while (word != stop); ++ ++ return csum_fold(csum); ++} ++ ++static inline unsigned int csum_tcpudp_nofold(unsigned long saddr, ++ unsigned long daddr, unsigned short len, unsigned short proto, ++ unsigned int sum) ++{ ++ __asm__( ++ "add\t%0, %0, %2\n\t" ++ "slt\t$p1, %0, %2\n\t" ++ "add\t%0, %0, $p1\n\t" ++ ++ "add\t%0, %0, %3\n\t" ++ "slt\t$p1, %0, %3\n\t" ++ "add\t%0, %0, $p1\n\t" ++ ++ "add\t%0, %0, %4\n\t" ++ "slt\t$p1, %0, %4\n\t" ++ "add\t%0, %0, $p1" ++ : "=r" (sum) ++ : "0" (daddr), "r"(saddr), ++#ifdef __NDS32_EL__ ++ "r" (((unsigned long)htons(len)<<16) + proto*256), ++#else ++ "r" (((unsigned long)(proto)<<16) + len), ++#endif ++ "r" ((__force unsigned long)sum)); ++ ++ return sum; ++} ++ ++/* ++ * computes the checksum of the TCP/UDP pseudo-header ++ * returns a 16-bit checksum, already complemented ++ */ ++static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, ++ unsigned long daddr, ++ unsigned short len, ++ unsigned short proto, ++ unsigned int sum) ++{ ++ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); ++} ++ ++/* ++ * this routine is used for miscellaneous IP-like checksums, mainly ++ * in icmp.c ++ */ ++static inline unsigned short ip_compute_csum(const void * buff, int len) ++{ ++ return csum_fold(csum_partial(buff, len, 0)); ++} ++ ++#endif /* _ASM_CHECKSUM_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/cmpxchg.h linux-3.4.110/arch/nds32/include/asm/cmpxchg.h +--- linux-3.4.110.orig/arch/nds32/include/asm/cmpxchg.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/cmpxchg.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,88 @@ ++#ifndef __ASM_NDS32_CMPXCHG_H ++#define __ASM_NDS32_CMPXCHG_H ++ ++#include ++ ++#define xchg(ptr,x) \ ++ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) ++ ++static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) ++{ ++ extern void __bad_xchg(volatile void *, int); ++ unsigned long ret; ++ unsigned long flags; ++ ++ switch (size) { ++ case 4: ++#if defined(CONFIG_SMP) || !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%2+$r15]\n" ++ "\tori\t%1, %3, #0x0\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (ret), "=&r" (flags) ++ : "r" (ptr), "r" (x) ++ : "memory"); ++#else ++ raw_local_irq_save(flags); ++ ret = *(volatile unsigned long *)ptr; ++ *(volatile unsigned long *)ptr = x; ++ raw_local_irq_restore(flags); ++#endif ++ break; ++ default: ++ __bad_xchg(ptr, size); ++ ret = 0; ++ } ++ return ret; ++} ++ ++ ++#define __HAVE_ARCH_CMPXCHG 1 ++ ++static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, ++ unsigned long new, int size) ++{ ++ extern void __cmpxchg_called_with_bad_pointer(void); /*nonexistence */ ++ unsigned long retval, tmp; ++ unsigned long flags; ++ switch (size) { ++ case 4: ++#if defined(CONFIG_SMP) || !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%3+$r15]\n" ++ "\tsub\t%2, %0, %5\n" ++ "\tcmovz\t%1, %4, %2\n" ++ "\tcmovn\t%1, %0, %2\n" ++ "\tscw\t%1, [%3+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (retval), "=&r" (flags), "=&r" (tmp) ++ : "r" (ptr), "r" (new), "r" (old)); ++#else ++ raw_local_irq_save(flags); ++ retval = *(volatile unsigned long *)ptr; ++ if (retval == old) ++ *(volatile unsigned long *)ptr = new; ++ raw_local_irq_restore(flags); ++#endif ++ break; ++ default: ++ __cmpxchg_called_with_bad_pointer(); ++ tmp = 0; ++ } ++ return retval; ++} ++ ++#define cmpxchg(ptr,o,n) \ ++ ({ \ ++ __typeof__(*(ptr)) _o_ = (o); \ ++ __typeof__(*(ptr)) _n_ = (n); \ ++ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ ++ (unsigned long)_n_, sizeof(*(ptr))); \ ++ }) ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/cpu.h linux-3.4.110/arch/nds32/include/asm/cpu.h +--- linux-3.4.110.orig/arch/nds32/include/asm/cpu.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/cpu.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,25 @@ ++/* ++ * linux/arch/nds32/include/asm/cpu.h ++ * ++ * Copyright (C) 2004-2005 ARM Ltd. ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef __ASM_NDS32_CPU_H ++#define __ASM_NDS32_CPU_H ++ ++#include ++ ++struct cpuinfo_nds32 { ++ struct cpu cpu; ++#ifdef CONFIG_SMP ++ unsigned int loops_per_jiffy; ++#endif ++}; ++ ++DECLARE_PER_CPU(struct cpuinfo_nds32, cpu_data); ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/cputime.h linux-3.4.110/arch/nds32/include/asm/cputime.h +--- linux-3.4.110.orig/arch/nds32/include/asm/cputime.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/cputime.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/cputime.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_CPUTIME_H__ ++#define __NDS32_CPUTIME_H__ ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/cpuver.h linux-3.4.110/arch/nds32/include/asm/cpuver.h +--- linux-3.4.110.orig/arch/nds32/include/asm/cpuver.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/cpuver.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,34 @@ ++/* ++ * linux/arch/nds32/include/asm/cpuver.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_CPUVER_H__ ++#define __NDS32_CPUVER_H__ ++ ++#include ++#include ++ ++#define GET_CPU_ID()\ ++ (( GET_CPU_VER() & CPU_VER_mskCPUID) >> CPU_VER_offCPUID) ++ ++#define GET_CPU_REV()\ ++ (( GET_CPU_VER() & CPU_VER_mskREV) >> CPU_VER_offREV) ++ ++#define GET_CPU_CFGID()\ ++ (( GET_CPU_VER() & CPU_VER_mskCFGID) >> CPU_VER_offCFGID) ++ ++#define CPU_IS_N1213_43U1HA0()\ ++ (((GET_CPU_VER() & CPU_VER_mskCPUID) == 0x0c000000) &&\ ++ ((GET_CPU_VER() & CPU_VER_mskREV) == 0x00010000)) ++ ++#define CPU_IS_N1213_43U1HB0()\ ++ (((GET_CPU_VER() & CPU_VER_mskCPUID) == 0x0c000000) &&\ ++ ((GET_CPU_VER() & CPU_VER_mskREV) == 0x00020000)) ++ ++#define CPU_IS_N1033_S()\ ++ (((GET_CPU_VER() & CPU_VER_mskCPUID) == 0x0a000000) &&\ ++ ((GET_CPU_VER() & CPU_VER_mskREV) == 0x000c0000)) ++ ++#endif ++ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/current.h linux-3.4.110/arch/nds32/include/asm/current.h +--- linux-3.4.110.orig/arch/nds32/include/asm/current.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/current.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,18 @@ ++/* ++ * linux/arch/nds32/include/asm/current.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_CURRENT_H__ ++#define __NDS32_CURRENT_H__ ++ ++#include ++ ++static inline struct task_struct *get_current(void) ++{ ++ return current_thread_info()->task; ++} ++ ++#define current get_current() ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/delay.h linux-3.4.110/arch/nds32/include/asm/delay.h +--- linux-3.4.110.orig/arch/nds32/include/asm/delay.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/delay.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,39 @@ ++/* ++ * linux/arch/nds32/include/asm/delay.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_DELAY_H__ ++#define __NDS32_DELAY_H__ ++ ++#include ++ ++static inline void __delay(unsigned long loops) ++{ ++ __asm__ __volatile__ ( ++ "1:\n" ++ "\taddi\t%0, %0, -1\n" ++ "\tbgtz\t%0, 1b\n" ++ : "=r" (loops) : "0" (loops)); ++} ++ ++static inline void __udelay(unsigned long usecs, unsigned long lpj) ++{ ++ usecs *= (unsigned long) (((0x8000000000000000ULL / (500000 / HZ)) + ++ 0x80000000ULL) >> 32); ++ usecs=(unsigned long)(((unsigned long long)usecs*lpj)>>32); ++ __delay(usecs); ++} ++ ++#define udelay(usecs) __udelay((usecs), loops_per_jiffy) ++ ++/* make sure "usecs *= ..." in udelay do not overflow. */ ++#if HZ >= 1000 ++#define MAX_UDELAY_MS 1 ++#elif HZ <= 200 ++#define MAX_UDELAY_MS 5 ++#else ++#define MAX_UDELAY_MS (1000 / HZ) ++#endif ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/device.h linux-3.4.110/arch/nds32/include/asm/device.h +--- linux-3.4.110.orig/arch/nds32/include/asm/device.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/device.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/device.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_DEVICE_H__ ++#define __NDS32_DEVICE_H__ ++ ++#include ++ ++#endif /* __NDS32_DEVICE_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/div64.h linux-3.4.110/arch/nds32/include/asm/div64.h +--- linux-3.4.110.orig/arch/nds32/include/asm/div64.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/div64.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/div64.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_DIV64_H__ ++#define __NDS32_DIV64_H__ ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/dmad.h linux-3.4.110/arch/nds32/include/asm/dmad.h +--- linux-3.4.110.orig/arch/nds32/include/asm/dmad.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/dmad.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,1071 @@ ++/* ++ * Copyright Andes Technology Corporation 2007-2008 ++ * All Rights Reserved. ++ * ++ * Revision History: ++ * ++ * Aug.21.2007 Created. ++ * ++ * DESCRIPTION ++ * ++ * DMA controller driver internal supplement library. ++ * ++ */ ++ ++#ifndef __NDS_DMAD_INC__ ++#define __NDS_DMAD_INC__ ++ ++#include ++ ++/***************************************************************************** ++ * Configuration section ++*****************************************************************************/ ++ ++/* Debug trace enable switch */ ++#define DMAD_ERROR_TRACE 1 /* message for fatal errors */ ++//MOD by river 2010.10.19 ++#define DMAD_DEBUG_TRACE 0 /* message for debug trace */ ++//End MOD by river 2010.10.19 ++ ++//#ifndef addr_t ++typedef u32 addr_t; ++//#endif ++/* for amerald */ ++#define AMERALD_PRODUCT_ID 0x41471000 ++#define AMERALD_MASK 0xFFFFF000 ++ ++/***************************************************************************** ++ * DMAC - AG101 AHB ++*****************************************************************************/ ++/* Device base address */ ++#define DMAC_BASE DMAC_FTDMAC020_0_VA_BASE ++ ++/* DMA controller registers (8-bit width) */ ++#define DMAC_INT (DMAC_BASE + 0x00) ++#define DMAC_INT_TC (DMAC_BASE + 0x04) ++#define DMAC_INT_TC_CLR (DMAC_BASE + 0x08) ++#define DMAC_INT_ERRABT (DMAC_BASE + 0x0c) ++#define DMAC_INT_ERRABT_CLR (DMAC_BASE + 0x10) ++#define DMAC_TC (DMAC_BASE + 0x14) ++#define DMAC_ERRABT (DMAC_BASE + 0x18) ++#define DMAC_CH_EN (DMAC_BASE + 0x1c) ++#define DMAC_CH_BUSY (DMAC_BASE + 0x20) ++#define DMAC_CSR (DMAC_BASE + 0x24) ++#define DMAC_SYNC (DMAC_BASE + 0x28) ++ ++/* DMA channel registers base address */ ++#define DMAC_C0_OFFSET 0x100 ++#define DMAC_C1_OFFSET 0x120 ++#define DMAC_C2_OFFSET 0x140 ++#define DMAC_C3_OFFSET 0x160 ++#define DMAC_C4_OFFSET 0x180 ++#define DMAC_C5_OFFSET 0x1a0 ++#define DMAC_C6_OFFSET 0x1c0 ++#define DMAC_C7_OFFSET 0x1e0 ++ ++#define DMAC_C0_BASE (DMAC_BASE + DMAC_C0_OFFSET) ++#define DMAC_C1_BASE (DMAC_BASE + DMAC_C1_OFFSET) ++#define DMAC_C2_BASE (DMAC_BASE + DMAC_C2_OFFSET) ++#define DMAC_C3_BASE (DMAC_BASE + DMAC_C3_OFFSET) ++#define DMAC_C4_BASE (DMAC_BASE + DMAC_C4_OFFSET) ++#define DMAC_C5_BASE (DMAC_BASE + DMAC_C5_OFFSET) ++#define DMAC_C6_BASE (DMAC_BASE + DMAC_C6_OFFSET) ++#define DMAC_C7_BASE (DMAC_BASE + DMAC_C7_OFFSET) ++ ++#define DMAC_MAX_CHANNELS 8 ++#define DMAC_BASE_CH(n) (DMAC_C0_BASE + \ ++ (DMAC_C1_OFFSET - DMAC_C0_OFFSET) * \ ++ (addr_t)(n)) /* n = 0 ~ 3 */ ++ ++#define DMAC_CSR_OFFSET 0x00 ++#define DMAC_CFG_OFFSET 0x04 ++#define DMAC_SRC_ADDR_OFFSET 0x08 ++#define DMAC_DST_ADDR_OFFSET 0x0c ++#define DMAC_LLP_OFFSET 0x10 ++#define DMAC_SIZE_OFFSET 0x14 ++ ++/* DMA channel 0 registers (32-bit width) */ ++#define DMAC_C0_CSR (DMAC_C0_BASE + DMAC_CSR_OFFSET) ++#define DMAC_C0_CFG (DMAC_C0_BASE + DMAC_CFG_OFFSET) ++#define DMAC_C0_SRC_ADDR (DMAC_C0_BASE + DMAC_SRC_ADDR_OFFSET) ++#define DMAC_C0_DST_ADDR (DMAC_C0_BASE + DMAC_DST_ADDR_OFFSET) ++#define DMAC_C0_LLP (DMAC_C0_BASE + DMAC_LLP_OFFSET) ++#define DMAC_C0_SIZE (DMAC_C0_BASE + DMAC_SIZE_OFFSET) ++ ++/* DMA channel 1 registers (32-bit width) */ ++#define DMAC_C1_CSR (DMAC_C1_BASE + DMAC_CSR_OFFSET) ++#define DMAC_C1_CFG (DMAC_C1_BASE + DMAC_CFG_OFFSET) ++#define DMAC_C1_SRC_ADDR (DMAC_C1_BASE + DMAC_SRC_ADDR_OFFSET) ++#define DMAC_C1_DST_ADDR (DMAC_C1_BASE + DMAC_DST_ADDR_OFFSET) ++#define DMAC_C1_LLP (DMAC_C1_BASE + DMAC_LLP_OFFSET) ++#define DMAC_C1_SIZE (DMAC_C1_BASE + DMAC_SIZE_OFFSET) ++ ++/* DMA channel 2 registers (32-bit width) */ ++#define DMAC_C2_CSR (DMAC_C2_BASE + DMAC_CSR_OFFSET) ++#define DMAC_C2_CFG (DMAC_C2_BASE + DMAC_CFG_OFFSET) ++#define DMAC_C2_SRC_ADDR (DMAC_C2_BASE + DMAC_SRC_ADDR_OFFSET) ++#define DMAC_C2_DST_ADDR (DMAC_C2_BASE + DMAC_DST_ADDR_OFFSET) ++#define DMAC_C2_LLP (DMAC_C2_BASE + DMAC_LLP_OFFSET) ++#define DMAC_C2_SIZE (DMAC_C2_BASE + DMAC_SIZE_OFFSET) ++ ++/* DMA channel 3 registers (32-bit width) */ ++#define DMAC_C3_CSR (DMAC_C3_BASE + DMAC_CSR_OFFSET) ++#define DMAC_C3_CFG (DMAC_C3_BASE + DMAC_CFG_OFFSET) ++#define DMAC_C3_SRC_ADDR (DMAC_C3_BASE + DMAC_SRC_ADDR_OFFSET) ++#define DMAC_C3_DST_ADDR (DMAC_C3_BASE + DMAC_DST_ADDR_OFFSET) ++#define DMAC_C3_LLP (DMAC_C3_BASE + DMAC_LLP_OFFSET) ++#define DMAC_C3_SIZE (DMAC_C3_BASE + DMAC_SIZE_OFFSET) ++ ++/* DMA channel 4 registers (32-bit width) */ ++#define DMAC_C4_CSR (DMAC_C4_BASE + DMAC_CSR_OFFSET) ++#define DMAC_C4_CFG (DMAC_C4_BASE + DMAC_CFG_OFFSET) ++#define DMAC_C4_SRC_ADDR (DMAC_C4_BASE + DMAC_SRC_ADDR_OFFSET) ++#define DMAC_C4_DST_ADDR (DMAC_C4_BASE + DMAC_DST_ADDR_OFFSET) ++#define DMAC_C4_LLP (DMAC_C4_BASE + DMAC_LLP_OFFSET) ++#define DMAC_C4_SIZE (DMAC_C4_BASE + DMAC_SIZE_OFFSET) ++ ++/* DMA channel 5 registers (32-bit width) */ ++#define DMAC_C5_CSR (DMAC_C5_BASE + DMAC_CSR_OFFSET) ++#define DMAC_C5_CFG (DMAC_C5_BASE + DMAC_CFG_OFFSET) ++#define DMAC_C5_SRC_ADDR (DMAC_C5_BASE + DMAC_SRC_ADDR_OFFSET) ++#define DMAC_C5_DST_ADDR (DMAC_C5_BASE + DMAC_DST_ADDR_OFFSET) ++#define DMAC_C5_LLP (DMAC_C5_BASE + DMAC_LLP_OFFSET) ++#define DMAC_C5_SIZE (DMAC_C5_BASE + DMAC_SIZE_OFFSET) ++ ++/* DMA channel 6 registers (32-bit width) */ ++#define DMAC_C6_CSR (DMAC_C6_BASE + DMAC_CSR_OFFSET) ++#define DMAC_C6_CFG (DMAC_C6_BASE + DMAC_CFG_OFFSET) ++#define DMAC_C6_SRC_ADDR (DMAC_C6_BASE + DMAC_SRC_ADDR_OFFSET) ++#define DMAC_C6_DST_ADDR (DMAC_C6_BASE + DMAC_DST_ADDR_OFFSET) ++#define DMAC_C6_LLP (DMAC_C6_BASE + DMAC_LLP_OFFSET) ++#define DMAC_C6_SIZE (DMAC_C6_BASE + DMAC_SIZE_OFFSET) ++ ++/* DMA channel 7 registers (32-bit width) */ ++#define DMAC_C7_CSR (DMAC_C7_BASE + DMAC_CSR_OFFSET) ++#define DMAC_C7_CFG (DMAC_C7_BASE + DMAC_CFG_OFFSET) ++#define DMAC_C7_SRC_ADDR (DMAC_C7_BASE + DMAC_SRC_ADDR_OFFSET) ++#define DMAC_C7_DST_ADDR (DMAC_C7_BASE + DMAC_DST_ADDR_OFFSET) ++#define DMAC_C7_LLP (DMAC_C7_BASE + DMAC_LLP_OFFSET) ++#define DMAC_C7_SIZE (DMAC_C7_BASE + DMAC_SIZE_OFFSET) ++ ++/***************************************************************************** ++ * DMAC defs - AG101 AHB ++*****************************************************************************/ ++ ++/* Interrupt status register (+00) */ ++#define DMAC_INT0_MASK 0x01 ++#define DMAC_INT0_BIT 0 ++#define DMAC_INT1_MASK 0x02 ++#define DMAC_INT1_BIT 1 ++#define DMAC_INT2_MASK 0x04 ++#define DMAC_INT2_BIT 2 ++#define DMAC_INT3_MASK 0x08 ++#define DMAC_INT3_BIT 3 ++ ++/* Interrupt for terminal count status register (+0x04) */ ++#define DMAC_INT_TC0_MASK 0x01 ++#define DMAC_INT_TC0_BIT 0 ++#define DMAC_INT_TC1_MASK 0x02 ++#define DMAC_INT_TC1_BIT 1 ++#define DMAC_INT_TC2_MASK 0x04 ++#define DMAC_INT_TC2_BIT 2 ++#define DMAC_INT_TC3_MASK 0x08 ++#define DMAC_INT_TC3_BIT 3 ++#define DMAC_INT_TC4_MASK 0x01 ++#define DMAC_INT_TC4_BIT 0 ++#define DMAC_INT_TC5_MASK 0x02 ++#define DMAC_INT_TC5_BIT 1 ++#define DMAC_INT_TC6_MASK 0x04 ++#define DMAC_INT_TC6_BIT 2 ++#define DMAC_INT_TC7_MASK 0x08 ++#define DMAC_INT_TC7_BIT 3 ++ ++#define DMAC_INT_TC_MASK 0xff ++#define DMAC_INT_TC_SHIFT 0 ++ ++/* Interrupt for terminal count clear register (+0x08) */ ++#define DMAC_INT_TC0_CLR_MASK 0x01 ++#define DMAC_INT_TC0_CLR_BIT 0 ++#define DMAC_INT_TC1_CLR_MASK 0x02 ++#define DMAC_INT_TC1_CLR_BIT 1 ++#define DMAC_INT_TC2_CLR_MASK 0x04 ++#define DMAC_INT_TC2_CLR_BIT 2 ++#define DMAC_INT_TC3_CLR_MASK 0x08 ++#define DMAC_INT_TC3_CLR_BIT 3 ++ ++#define DMAC_INT_TC_CLR_MASK 0x0f ++#define DMAC_INT_TC_CLR_SHIFT 0 ++ ++/* Interrupt for error/abort status register (+0x0c, 32-bits width) */ ++#define DMAC_INT_ERR0_MASK 0x00000001 ++#define DMAC_INT_ERR0_BIT 0 ++#define DMAC_INT_ERR1_MASK 0x00000002 ++#define DMAC_INT_ERR1_BIT 1 ++#define DMAC_INT_ERR2_MASK 0x00000004 ++#define DMAC_INT_ERR2_BIT 2 ++#define DMAC_INT_ERR3_MASK 0x00000008 ++#define DMAC_INT_ERR3_BIT 3 ++ ++#define DMAC_INT_ERR_MASK 0x0000000f ++#define DMAC_INT_ERR_SHIFT 0 ++ ++#define DMAC_INT_ABT0_MASK 0x00010000 ++#define DMAC_INT_ABT0_BIT 16 ++#define DMAC_INT_ABT1_MASK 0x00020000 ++#define DMAC_INT_ABT1_BIT 17 ++#define DMAC_INT_ABT2_MASK 0x00040000 ++#define DMAC_INT_ABT2_BIT 18 ++#define DMAC_INT_ABT3_MASK 0x00080000 ++#define DMAC_INT_ABT3_BIT 19 ++ ++#define DMAC_INT_ABT_MASK 0x000f0000 ++#define DMAC_INT_ABT_SHIFT 16 ++ ++/* Interrupt for error/abort status clear register (+0x10, 32-bits width) */ ++#define DMAC_INT_ERR0_CLR_MASK 0x00000001 ++#define DMAC_INT_ERR0_CLR_BIT 0 ++#define DMAC_INT_ERR1_CLR_MASK 0x00000002 ++#define DMAC_INT_ERR1_CLR_BIT 1 ++#define DMAC_INT_ERR2_CLR_MASK 0x00000004 ++#define DMAC_INT_ERR2_CLR_BIT 2 ++#define DMAC_INT_ERR3_CLR_MASK 0x00000008 ++#define DMAC_INT_ERR3_CLR_BIT 3 ++ ++#define DMAC_INT_ERR_CLR_MASK 0x0000000f ++#define DMAC_INT_ERR_CLR_SHIFT 0 ++ ++#define DMAC_INT_ABT0_CLR_MASK 0x00010000 ++#define DMAC_INT_ABT0_CLR_BIT 16 ++#define DMAC_INT_ABT1_CLR_MASK 0x00020000 ++#define DMAC_INT_ABT1_CLR_BIT 17 ++#define DMAC_INT_ABT2_CLR_MASK 0x00040000 ++#define DMAC_INT_ABT2_CLR_BIT 18 ++#define DMAC_INT_ABT3_CLR_MASK 0x00080000 ++#define DMAC_INT_ABT3_CLR_BIT 19 ++ ++#define DMAC_INT_ABT_CLR_MASK 0x000f0000 ++#define DMAC_INT_ABT_CLR_SHIFT 16 ++ ++/* Terminal count status register (+0x14) */ ++#define DMAC_TC0_MASK 0x01 ++#define DMAC_TC0_BIT 0 ++#define DMAC_TC1_MASK 0x02 ++#define DMAC_TC1_BIT 1 ++#define DMAC_TC2_MASK 0x04 ++#define DMAC_TC2_BIT 2 ++#define DMAC_TC3_MASK 0x08 ++#define DMAC_TC3_BIT 3 ++ ++/* Error/abort status register (+0x18, 32-bits width) */ ++#define DMAC_ERR0_MASK 0x00000001 ++#define DMAC_ERR0_BIT 0 ++#define DMAC_ERR1_MASK 0x00000002 ++#define DMAC_ERR1_BIT 1 ++#define DMAC_ERR2_MASK 0x00000004 ++#define DMAC_ERR2_BIT 2 ++#define DMAC_ERR3_MASK 0x00000008 ++#define DMAC_ERR3_BIT 3 ++ ++#define DMAC_ABT0_MASK 0x00010000 ++#define DMAC_ABT0_BIT 16 ++#define DMAC_ABT1_MASK 0x00020000 ++#define DMAC_ABT1_BIT 17 ++#define DMAC_ABT2_MASK 0x00040000 ++#define DMAC_ABT2_BIT 18 ++#define DMAC_ABT3_MASK 0x00080000 ++#define DMAC_ABT3_BIT 19 ++ ++/* Channel enable status register (+0x1c) */ ++#define DMAC_CH0_EN_MASK 0x01 ++#define DMAC_CH0_EN_BIT 0 ++#define DMAC_CH1_EN_MASK 0x02 ++#define DMAC_CH1_EN_BIT 1 ++#define DMAC_CH2_EN_MASK 0x04 ++#define DMAC_CH2_EN_BIT 2 ++#define DMAC_CH3_EN_MASK 0x08 ++#define DMAC_CH3_EN_BIT 3 ++ ++/* Channel busy status register (+0x20) */ ++#define DMAC_CH0_BUSY_MASK 0x01 ++#define DMAC_CH0_BUSY_BIT 0 ++#define DMAC_CH1_BUSY_MASK 0x02 ++#define DMAC_CH1_BUSY_BIT 1 ++#define DMAC_CH2_BUSY_MASK 0x04 ++#define DMAC_CH2_BUSY_BIT 2 ++#define DMAC_CH3_BUSY_MASK 0x08 ++#define DMAC_CH3_BUSY_BIT 3 ++ ++/* Main configuration status register (+0x24) */ ++#define DMAC_DMACEN_MASK 0x01 ++#define DMAC_DMACEN_BIT 0 ++#define DMAC_M0ENDIAN_MASK 0x02 ++#define DMAC_M0ENDIAN_BIT 1 ++#define DMAC_M1ENDIAN_MASK 0x04 ++#define DMAC_M1ENDIAN_BIT 2 ++ ++ #define DMAC_ENDIAN_LITTLE 0 ++ #define DMAC_ENDIAN_BIG 1 ++ ++/* Sync register (+0x28) */ ++#define DMAC_SYNC0_MASK 0x01 ++#define DMAC_SYNC0_BIT 0 ++#define DMAC_SYNC1_MASK 0x02 ++#define DMAC_SYNC1_BIT 1 ++#define DMAC_SYNC2_MASK 0x04 ++#define DMAC_SYNC2_BIT 2 ++#define DMAC_SYNC3_MASK 0x08 ++#define DMAC_SYNC3_BIT 3 ++ ++/* DMA channel 0~n Control Registers (CH[n]_BASE + 0x00) */ ++#define DMAC_CSR_CH_EN_MASK 0x00000001 ++#define DMAC_CSR_CH_EN_BIT 0 ++ ++#define DMAC_CSR_DST_SEL_MASK 0x00000002 ++#define DMAC_CSR_DST_SEL_BIT 1 ++#define DMAC_CSR_SRC_SEL_MASK 0x00000004 ++#define DMAC_CSR_SRC_SEL_BIT 2 ++ #define DMAC_CSR_SEL_MASTER0 0x00 ++ #define DMAC_CSR_SEL_MASTER1 0x01 ++ ++#define DMAC_CSR_DSTAD_CTL_MASK 0x00000018 ++#define DMAC_CSR_DSTAD_CTL_SHIFT 3 ++#define DMAC_CSR_SRCAD_CTL_MASK 0x00000060 ++#define DMAC_CSR_SRCAD_CTL_SHIFT 5 ++ #define DMAC_CSR_AD_INC 0x00 ++ #define DMAC_CSR_AD_DEC 0x01 ++ #define DMAC_CSR_AD_FIX 0x02 ++ ++#define DMAC_CSR_MODE_MASK 0x00000080 ++#define DMAC_CSR_MODE_BIT 7 ++ #define DMAC_CSR_MODE_NORMAL 0x00 ++ #define DMAC_CSR_MODE_HSHK 0x01 ++ ++#define DMAC_CSR_DST_WIDTH_MASK 0x00000700 ++#define DMAC_CSR_DST_WIDTH_SHIFT 8 ++#define DMAC_CSR_SRC_WIDTH_MASK 0x00003800 ++#define DMAC_CSR_SRC_WIDTH_SHIFT 11 ++ #define DMAC_CSR_WIDTH_8 0x00 ++ #define DMAC_CSR_WIDTH_16 0x01 ++ #define DMAC_CSR_WIDTH_32 0x02 ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++#define DMAC_CYCLE_TO_BYTES(cycle, width) ((cycle) << (width)) ++#define DMAC_BYTES_TO_CYCLE(bytes, width) ((bytes) >> (width)) ++#else ++#define DMAC_CYCLE_TO_BYTES(cycle, width) 0 ++#define DMAC_BYTES_TO_CYCLE(bytes, width) 0 ++#endif /* CONFIG_PLATFORM_AHBDMA */ ++ ++#define DMAC_CSR_ABT 0x00008000 ++#define DMAC_CSR_ABT_BIT 15 ++ ++#define DMAC_CSR_SRC_SIZE_MASK 0x00070000 ++#define DMAC_CSR_SRC_SIZE_SHIFT 16 ++ #define DMAC_CSR_SIZE_1 0x00 ++ #define DMAC_CSR_SIZE_4 0x01 ++ #define DMAC_CSR_SIZE_8 0x02 ++ #define DMAC_CSR_SIZE_16 0x03 ++ #define DMAC_CSR_SIZE_32 0x04 ++ #define DMAC_CSR_SIZE_64 0x05 ++ #define DMAC_CSR_SIZE_128 0x06 ++ #define DMAC_CSR_SIZE_256 0x07 ++ ++#define DMAC_CSR_PROT1 0x00080000 ++#define DMAC_CSR_PROT1_BIT 19 ++#define DMAC_CSR_PROT2 0x00100000 ++#define DMAC_CSR_PROT2_BIT 20 ++#define DMAC_CSR_PROT3 0x00200000 ++#define DMAC_CSR_PROT3_BIT 21 ++ ++#define DMAC_CSR_CHPRI_MASK 0x00c00000 ++#define DMAC_CSR_CHPRI_SHIFT 22 ++ #define DMAC_CSR_CHPRI_0 0x00 ++ #define DMAC_CSR_CHPRI_1 0x01 ++ #define DMAC_CSR_CHPRI_2 0x02 ++ #define DMAC_CSR_CHPRI_3 0x03 ++ ++#define DMAC_CSR_FF_TH_MASK 0x07000000 ++#define DMAC_CSR_FF_TH_SHIFT 24 ++ #define DMAC_CSR_FF_TH_1 0x00 ++ #define DMAC_CSR_FF_TH_2 0x01 ++ #define DMAC_CSR_FF_TH_4 0x02 ++ #define DMAC_CSR_FF_TH_8 0x03 ++ #define DMAC_CSR_FF_TH_16 0x04 ++ ++#define DMAC_CSR_TC_MSK_MSK 0x80000000 ++#define DMAC_CSR_TC_MSK_BIT 31 ++ ++/* DMA channel 0~n Configuration Registers (CH[n]_BASE + 0x04) */ ++#define DMAC_CFG_INT_TC_MSK 0x00000001 ++#define DMAC_CFG_INT_TC_MSK_BIT 0 ++#define DMAC_CFG_INT_ERR_MSK 0x00000002 ++#define DMAC_CFG_INT_ERR_MSK_BIT 1 ++#define DMAC_CFG_INT_ABT_MSK 0x00000004 ++#define DMAC_CFG_INT_ABT_MSK_BIT 2 ++ ++#define DMAC_CFG_INT_SRC_RS_MASK 0x00000078 ++#define DMAC_CFG_INT_SRC_RS_SHIFT 3 ++#define DMAC_CFG_INT_SRC_HE_MASK 0x00000080 ++#define DMAC_CFG_INT_SRC_HE_BIT 7 ++ ++#define DMAC_CFG_BUSY_MASK 0x00000100 ++#define DMAC_CFG_BUSY_BIT 8 ++ ++#define DMAC_CFG_INT_DST_RS_MASK 0x00001e00 ++#define DMAC_CFG_INT_DST_RS_SHIFT 9 ++#define DMAC_CFG_INT_DST_HE_MASK 0x00002000 ++#define DMAC_CFG_INT_DST_HE_BIT 13 ++ ++#ifdef CONFIG_PLAT_AG102 ++ #define DMAC_REQN_IDERX 0 ++ #define DMAC_REQN_IDETX 1 ++ #define DMAC_REQN_I2SAC97RX 2 ++ #define DMAC_REQN_I2SAC97TX 3 ++ #define DMAC_REQN_UART2RX 4 ++ #define DMAC_REQN_UART2TX 5 ++ #define DMAC_REQN_UART1RX 6 ++ #define DMAC_REQN_UART1TX 7 ++ #define DMAC_REQN_SDC 8 ++ #define DMAC_REQN_CFC 9 ++ #define DMAC_REQN_LPCREQ0 10 ++ #define DMAC_REQN_LPCREQ1 11 ++ #define DMAC_REQN_LPCREQ2 12 ++ #define DMAC_REQN_LPCREQ3 13 ++ #define DMAC_REQN_NONE 14 ++ #define DMAC_REQN_LPCREQ5 15 ++ #define DMAC_REQN_MAX 15 ++#else ++ #define DMAC_REQN_NONE PMU_REQN_NONE ++ #define DMAC_REQN_CFC PMU_REQN_CFC ++ #define DMAC_REQN_SSP PMU_REQN_SSP ++ #define DMAC_REQN_UART1TX PMU_REQN_UART1TX ++ #define DMAC_REQN_UART1RX PMU_REQN_UART1RX ++ #define DMAC_REQN_UART2TX PMU_REQN_UART2TX ++ #define DMAC_REQN_UART2RX PMU_REQN_UART2RX ++ #define DMAC_REQN_SDC PMU_REQN_SDC ++ #define DMAC_REQN_I2SAC97TX PMU_REQN_I2SAC97TX ++ #define DMAC_REQN_I2SAC97RX PMU_REQN_I2SAC97RX ++/* for amerald ac97 ssp2 */ ++ #define DMAC_REQN_I2SAC97TX_AMERALD PMU_REQN_I2SAC97TX_AMERALD ++ #define DMAC_REQN_I2SAC97RX_AMERALD PMU_REQN_I2SAC97RX_AMERALD ++ #define DMAC_REQN_USB PMU_REQN_USB ++ #define DMAC_REQN_EXT0 PMU_REQN_EXT0 ++ #define DMAC_REQN_EXT1 PMU_REQN_EXT1 ++ #define DMAC_REQN_MAX PMU_REQN_MAX ++#endif ++#define DMAC_CFG_INT_LLPCNT_MASK 0x000f0000 ++#define DMAC_CFG_INT_LLPCNT_SHIFT 16 ++ ++/* DMA channel 0~n Linked List Descriptor Registers (CH[n]_BASE + 0x10) */ ++#define DMAC_LLP_ADDR_MASK 0xfffffffc ++#define DMAC_LLP_ADDR_SHIFT 2 ++#define DMAC_LLP_MASTER_MASK 0x00000001 ++#define DMAC_LLP_MASTER_BIT 0 ++ #define DMAC_LLP_MASTER_0 0 ++ #define DMAC_LLP_MASTER_1 1 ++ ++/* DMA channel 0~3 Transfer Size Registers (CH[n]_BASE + 0x14) */ ++#define DMAC_TOT_SIZE_MASK 0x003fffff ++#define DMAC_TOT_SIZE_SHIFT 0 ++ ++ ++/***************************************************************************** ++ * APBBR - AG101 AHB to APB Bridge ++*****************************************************************************/ ++/* Device base address */ ++#ifdef CONFIG_PLAT_AG102 ++#define APBBR_BASE APBBR_VA_BASE ++#else ++#define APBBR_BASE APBBRG_FTAPBBRG020S_0_VA_BASE ++#endif ++ ++/* DMA channel A registers (32-bit width) */ ++#define APBBR_DMAA_BASE (APBBR_BASE + 0x80) ++#define APBBR_DMAB_BASE (APBBR_BASE + 0x90) ++#define APBBR_DMAC_BASE (APBBR_BASE + 0xa0) ++#define APBBR_DMAD_BASE (APBBR_BASE + 0xb0) ++ ++#define APBBR_DMA_MAX_CHANNELS APBBRG_FTAPBBRG020S_IRQ_COUNT ++/* n = 0 ~ APBBRG_FTAPBBRG020S_IRQ_COUNT */ ++#define APBBR_DMA_BASE_CH(n) (APBBR_DMAA_BASE + \ ++ (APBBR_DMAB_BASE - APBBR_DMAA_BASE) * \ ++ (addr_t)(n)) ++ ++#define APBBR_DMA_SAD_OFFSET 0x00 ++#define APBBR_DMA_DAD_OFFSET 0x04 ++#define APBBR_DMA_CYC_OFFSET 0x08 ++#define APBBR_DMA_CMD_OFFSET 0x0c ++ ++ ++/***************************************************************************** ++ * APBBR defs - AG101 AHB to APB Bridge ++*****************************************************************************/ ++ ++/* APBBR slave#n (n = 1~6, 8, 11, 16~23) base/size register */ ++#define APBBR_SLAVE_SIZE_MASK 0x000f0000 /* Size of address space */ ++#define APBBR_SLAVE_SIZE_SHIFT 16 ++ #define APBBR_SIZE_1M 0 ++ #define APBBR_SIZE_2M 1 ++ #define APBBR_SIZE_4M 2 ++ #define APBBR_SIZE_8M 3 ++ #define APBBR_SIZE_16M 4 ++ #define APBBR_SIZE_32M 5 ++ #define APBBR_SIZE_64M 6 ++ #define APBBR_SIZE_128M 7 ++ #define APBBR_SIZE_256M 8 ++ ++#define APBBR_SLAVE_BASE_MASK 0x3ff00000 ++#define APBBR_SLAVE_BASE_SHIFT 20 ++ ++/* APBBR DMA channel transfer cycles register ++ * DMA cycles (data size), 1 or 4 bus data transfer cycles per DMA cycle ++ * => transfer size = cycles * data_width * burst(1 or 4) ++ * so, max = 16M*4*4 = 256M ++ */ ++#define APBBR_DMA_CYC_MASK 0x00ffffff ++#define APBBR_DMA_CYC_SHIFT 0 ++ ++/* APBBR DMA channel command register */ ++#define APBBR_DMA_CHEN_MASK 0x00000001 ++#define APBBR_DMA_CHEN_BIT 0 ++ ++#define APBBR_DMA_FINTST_MASK 0x00000002 ++#define APBBR_DMA_FINTST_BIT 1 ++#define APBBR_DMA_FINTEN_MASK 0x00000004 ++#define APBBR_DMA_FINTEN_BIT 2 ++ ++#define APBBR_DMA_BURST_MASK 0x00000008 ++#define APBBR_DMA_BURST_BIT 3 ++ ++#define APBBR_DMA_ERRINTST_MASK 0x00000010 ++#define APBBR_DMA_ERRINTST_BIT 4 ++#define APBBR_DMA_ERRINTEN_MASK 0x00000020 ++#define APBBR_DMA_ERRINTEN_BIT 5 ++ ++#define APBBR_DMA_SRCADDRSEL_MASK 0x00000040 ++#define APBBR_DMA_SRCADDRSEL_BIT 6 ++#define APBBR_DMA_DSTADDRSEL_MASK 0x00000080 ++#define APBBR_DMA_DSTADDRSEL_BIT 7 ++ #define APBBR_ADDRSEL_APB 0 ++ #define APBBR_ADDRSEL_AHB 1 ++ ++#define APBBR_DMA_SRCADDRINC_MASK 0x00000700 ++#define APBBR_DMA_SRCADDRINC_SHIFT 8 ++#define APBBR_DMA_DSTADDRINC_MASK 0x00007000 ++#define APBBR_DMA_DSTADDRINC_SHIFT 12 ++ #define APBBR_ADDRINC_FIXED 0 /* no increment */ ++ #define APBBR_ADDRINC_I1X 1 /* +1, +4 (burst) */ ++ #define APBBR_ADDRINC_I2X 2 /* +2, +8 (burst) */ ++ #define APBBR_ADDRINC_I4X 3 /* +4, +16 (burst) */ ++ #define APBBR_ADDRINC_D1 5 /* -1 */ ++ #define APBBR_ADDRINC_D2 6 /* -2 */ ++ #define APBBR_ADDRINC_D4 7 /* -4 */ ++ ++#define APBBR_DMA_DREQSEL_MASK 0x000f0000 ++#define APBBR_DMA_DREQSEL_SHIFT 16 ++#define APBBR_DMA_SREQSEL_MASK 0x0f000000 ++#define APBBR_DMA_SREQSEL_SHIFT 24 ++ ++#ifdef CONFIG_PLAT_AG102 ++ #define APBBR_REQN_NONE 0 ++ #define APBBR_REQN_CFC 1 ++ #define APBBR_REQN_SSP 2 ++ #define APBBR_REQN_SDC 8 ++ #define APBBR_REQN_I2SAC97TX 6 ++ #define APBBR_REQN_SSP2 8 ++ #define APBBR_REQN_STUART 9 ++ #define APBBR_REQN_BTUART 10 ++ #define APBBR_REQN_IRDA 11 ++ #define APBBR_REQN_SMMC 12 ++// #define APBBR_REQN_USB 0 ++ #define APBBR_REQN_I2SAC97RX 13 ++ #define APBBR_REQN_FUSB220 14 ++ #define APBBR_REQN_MMSC 15 ++ #define APBBR_REQN_MAX 15 ++#else ++ #define APBBR_REQN_NONE 0 ++ #define APBBR_REQN_CFC 1 ++ #define APBBR_REQN_SSP 2 ++ #define APBBR_REQN_SDC 5 ++/* for amerald sd */ ++ #define APBBR_REQN_SDC_AMERALD 7 ++/* for amerald ac97 ssp2 */ ++ #define APBBR_REQN_I2SAC97TX_AMERALD 8 ++ #define APBBR_REQN_I2SAC97RX_AMERALD 9 ++ ++ #define APBBR_REQN_I2SAC97TX 6 ++ #define APBBR_REQN_SSP2 8 ++ #define APBBR_REQN_STUART 9 /* UART1 ? */ ++ #define APBBR_REQN_BTUART 10 /* UART2 ? */ ++ #define APBBR_REQN_IRDA 11 ++ #define APBBR_REQN_SMMC 12 ++ //#define APBBR_REQN_USB 13 ++ #define APBBR_REQN_I2SAC97RX 13 ++ #define APBBR_REQN_FUSB220 14 ++ #define APBBR_REQN_MMSC 15 ++ #define APBBR_REQN_MAX 15 ++#endif ++ ++#define APBBR_DMA_DATAWIDTH_MASK 0x00300000 /* Data width of transfer */ ++#define APBBR_DMA_DATAWIDTH_SHIFT 20 ++ #define APBBR_DATAWIDTH_4 0 /* word */ ++ #define APBBR_DATAWIDTH_2 1 /* half-word */ ++ #define APBBR_DATAWIDTH_1 2 /* byte */ ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++#define APBBR_DMA_CYCLE_TO_BYTES(cycle, width) ((cycle) << (2-(width))) ++#define APBBR_DMA_BYTES_TO_CYCLE(bytes, width) ((bytes) >> (2-(width))) ++#else ++#define APBBR_DMA_CYCLE_TO_BYTES(cycle, width) 0 ++#define APBBR_DMA_BYTES_TO_CYCLE(bytes, width) 0 ++#endif /* CONFIG_PLATFORM_APBDMA */ ++ ++ ++#ifdef CONFIG_PLAT_AG102 ++ ++/***************************************************************************** ++ * PCU - AG102 Core APB ++*****************************************************************************/ ++/* Device base address */ ++#define PCU_BASE PCU_VA_BASE ++/* PMU registers (32-bit width) */ ++/* Add by Dennis on 2011.03.09 */ ++#define PCU_DMA_SEL (PCU_BASE+ 0x38) ++ ++#else /* CONFIG_PLAT_AG102 */ ++ ++/***************************************************************************** ++ * PMU - AG101 Core APB ++*****************************************************************************/ ++/* Device base address */ ++#define PMU_BASE PMU_FTPMU010_0_VA_BASE ++ ++/* PMU registers (32-bit width) */ ++#define PMU_AHBDMA_REQACK (PMU_BASE + 0x90) ++ ++#define PMU_CFC_REQACK_CFG (PMU_BASE + 0xa0) ++#define PMU_SSP1_REQACK_CFG (PMU_BASE + 0xa4) ++#define PMU_UART1TX_REQACK_CFG (PMU_BASE + 0xa8) ++#define PMU_UART1RX_REQACK_CFG (PMU_BASE + 0xac) ++#define PMU_UART2TX_REQACK_CFG (PMU_BASE + 0xb0) ++#define PMU_UART2RX_REQACK_CFG (PMU_BASE + 0xb4) ++#define PMU_SDC_REQACK_CFG (PMU_BASE + 0xb8) ++#define PMU_I2SAC97TX_REQACK_CFG (PMU_BASE + 0xbc) ++#define PMU_I2SAC97RX_REQACK_CFG (PMU_BASE + 0xc4) ++#define PMU_UART3TX_REQACK_CFG (PMU_BASE + 0xc0) ++#define PMU_UART3RX_REQACK_CFG (PMU_BASE + 0xcc) ++#define PMU_USB_REQACK_CFG (PMU_BASE + 0xc8) ++#define PMU_IRDA_REQACK_CFG (PMU_BASE + 0xd0) ++#define PMU_EXT0_REQACK_CFG (PMU_BASE + 0xd4) ++#define PMU_EXT1_REQACK_CFG (PMU_BASE + 0xd8) ++ ++ ++/***************************************************************************** ++ * PMU - AG101 Core APB ++*****************************************************************************/ ++ ++/* Driving capability and slew rate control register 2 (+0x48) */ ++#define PMU_STUART_DCSR_MASK 0x0000000f ++#define PMU_STUART_DCSR_SHIFT 0 ++#define PMU_BTUART_DCSR_MASK 0x00000f00 ++#define PMU_BTUART_DCSR_SHIFT 8 ++/*#define PMU_FFUART_DCSR_MASK 0x0000f000*/ ++/*#define PMU_FFUART_DCSR_SHIFT 12 */ ++#define PMU_PMU_DCSR_MASK 0x000f0000 ++#define PMU_PMU_DCSR_SHIFT 16 ++#define PMU_I2SAC97_DCSR_MASK 0x00f00000 ++#define PMU_I2SAC97_DCSR_SHIFT 20 ++#define PMU_SSP_DCSR_MASK 0x0f000000 ++#define PMU_SSP_DCSR_SHIFT 24 ++#define PMU_SD_DCSR_MASK 0xf0000000 ++#define PMU_SD_DCSR_SHIFT 28 ++ ++/* AHB DMA REQ/ACK connection configuration status register (+0x90) */ ++#define PMU_CH0_REQACK_MASK 0x0000000f ++#define PMU_CH0_REQACK_SHIFT 0 ++#define PMU_CH1_REQACK_MASK 0x000000f0 ++#define PMU_CH1_REQACK_SHIFT 4 ++#define PMU_CH2_REQACK_MASK 0x00000f00 ++#define PMU_CH2_REQACK_SHIFT 8 ++#define PMU_CH3_REQACK_MASK 0x0000f000 ++#define PMU_CH3_REQACK_SHIFT 12 ++#define PMU_CH4_REQACK_MASK 0x000f0000 ++#define PMU_CH4_REQACK_SHIFT 16 ++#define PMU_CH5_REQACK_MASK 0x00f00000 ++#define PMU_CH5_REQACK_SHIFT 20 ++#define PMU_CH6_REQACK_MASK 0x0f000000 ++#define PMU_CH6_REQACK_SHIFT 24 ++#define PMU_CH7_REQACK_MASK 0xf0000000 ++#define PMU_CH7_REQACK_SHIFT 28 ++ ++ #define PMU_REQN_NONE 0 ++ #define PMU_REQN_CFC 1 ++ #define PMU_REQN_SSP 2 ++ #define PMU_REQN_UART1TX 3 ++ #define PMU_REQN_UART1RX 4 ++ #define PMU_REQN_UART2TX 5 ++ #define PMU_REQN_UART2RX 6 ++ #define PMU_REQN_SDC 7 ++ #define PMU_REQN_I2SAC97TX 8 ++ #define PMU_REQN_I2SAC97RX 10 ++/* for amerald ac97 ssp2 */ ++ #define PMU_REQN_I2SAC97TX_AMERALD 8 ++ #define PMU_REQN_I2SAC97RX_AMERALD 9 ++ #define PMU_REQN_USB 11 ++ #define PMU_REQN_EXT0 14 ++ #define PMU_REQN_EXT1 15 ++ #define PMU_REQN_MAX 15 ++ ++/* CFC ..., etc, REQ/ACK connection configuration registers (0xa0 ~ 0xd8) */ ++#define PMU_CHANNEL_MASK 0x00000007 ++#define PMU_CHANNEL_SHIFT 0 ++#define PMU_DMACUSED_MASK 0x00000008 ++#define PMU_DMACUSED_BIT 3 ++ ++#endif /* CONFIG_PLAT_AG102 */ ++ ++ ++/***************************************************************************** ++ * DMAD globals section ++ */ ++ ++enum DMAD_DMAC_CORE { ++ DMAD_DMAC_AHB_CORE, ++ DMAD_DMAC_APB_CORE ++}; ++ ++enum DMAD_CHREG_FLAGS { ++ DMAD_FLAGS_NON_BLOCK = 0x00000000, ++ DMAD_FLAGS_SLEEP_BLOCK = 0x00000001, ++ DMAD_FLAGS_SPIN_BLOCK = 0x00000002, ++ DMAD_FLAGS_RING_MODE = 0x00000008, /* ring submission mode */ ++ DMAD_FLAGS_BIDIRECTION = 0x00000010, /* indicates both tx and rx */ ++}; ++ ++enum DMAD_CHDIR ++{ ++ DMAD_DIR_A0_TO_A1 = 0, ++ DMAD_DIR_A1_TO_A0 = 1, ++}; ++ ++/* AHB Channel Request ++ * ++ * Notes for developers: ++ * These should be channel-only properties. Controller-specific properties ++ * should be separated as other driver structure or driver buildin-hardcode. ++ * If controller properties are embeded in this union, request for a channel ++ * may unexpectedly override the controller setting of the request of other ++ * channels. ++ */ ++typedef struct dmad_ahb_chreq ++{ ++ /* channel property */ ++ u32 sync; /* (in) different clock domain */ ++ u32 priority; /* (in) DMAC_CSR_CHPRI_xxx */ ++ u32 hw_handshake; /* (in) hardware handshaking on/off */ ++ u32 burst_size; /* (in) DMAC_CSR_SIZE_xxx */ ++ ++ /* source property */ ++ union { ++ u32 src_width; /* (in) DMAC_CSR_WIDTH_xxx */ ++ u32 addr0_width; /* (in) bi-direction mode alias */ ++ u32 ring_width; /* (in) ring-mode alias */ ++ }; ++ union { ++ u32 src_ctrl; /* (in) DMAC_CSR_AD_xxx */ ++ u32 addr0_ctrl; /* (in) bi-direction mode alias */ ++ u32 ring_ctrl; /* (in) ring-mode alias */ ++ }; ++ union { ++ u32 src_reqn; /* (in) DMAC_REQN_xxx */ ++ u32 addr0_reqn; /* (in) bi-direction mode alias */ ++ u32 ring_reqn; /* (in) ring-mode alias */ ++ }; ++ ++ /* destination property */ ++ union { ++ u32 dst_width; /* (in) DMAC_CSR_WIDTH_xxx */ ++ u32 addr1_width; /* (in) bi-direction mode alias */ ++ u32 dev_width; /* (in) ring-mode alias */ ++ }; ++ union { ++ u32 dst_ctrl; /* (in) DMAC_CSR_AD_xxx */ ++ u32 addr1_ctrl; /* (in) bi-direction mode alias */ ++ u32 dev_ctrl; /* (in) ring-mode alias */ ++ }; ++ union { ++ u32 dst_reqn; /* (in) DMAC_REQN_xxx */ ++ u32 addr1_reqn; /* (in) bi-direction mode alias */ ++ u32 dev_reqn; /* (in) ring-mode alias */ ++ }; ++ ++ /* (in) transfer direction, valid only if following flags were set ... ++ * DMAD_FLAGS_BIDIRECTION or ++ * DMAD_FLAGS_RING_MODE ++ * value: ++ * 0 (addr0 -> addr1, or ring-buff to device) ++ * 1 (addr0 <- addr1, or device to ring-buff) ++ */ ++ u32 tx_dir; ++ ++} dmad_ahb_chreq; ++ ++/* APB Channel Request ++ * ++ * Notes for developers: ++ * These should be channel-only properties. Controller-specific properties ++ * should be separated as other driver structure or driver buildin-hardcode. ++ * If controller properties are embeded in this union, request for a channel ++ * may unexpectedly override the controller setting of the request of other ++ * channels. ++ */ ++typedef struct dmad_apb_chreq ++{ ++ /* controller property (removed! should not exist in this struct) */ ++ ++ /* channel property */ ++ u32 burst_mode; /* (in) Burst mode (0/1) */ ++ u32 data_width; /* (in) APBBR_DATAWIDTH_xxx */ ++ ++ /* source property */ ++ union { ++ u32 src_ctrl; /* (in) APBBR_ADDRINC_xxx */ ++ u32 addr0_ctrl; /* (in) bi-direction mode alias */ ++ u32 ring_ctrl; /* (in) ring-mode alias */ ++ }; ++ union { ++ u32 src_reqn; /* (in) APBBR_REQN_xxx */ ++ u32 addr0_reqn; /* (in) bi-direction mode alias */ ++ u32 ring_reqn; /* (in) ring-mode alias */ ++ }; ++ ++ /* destination property */ ++ union { ++ u32 dst_ctrl; /* (in) APBBR_ADDRINC_xxx */ ++ u32 addr1_ctrl; /* (in) bi-direction mode alias */ ++ u32 dev_ctrl; /* (in) ring-mode alias */ ++ }; ++ union { ++ u32 dst_reqn; /* (in) APBBR_REQN_xxx */ ++ u32 addr1_reqn; /* (in) bi-direction mode alias */ ++ u32 dev_reqn; /* (in) ring-mode alias */ ++ }; ++ ++ /* (in) transfer direction, valid only if following flags were set ... ++ * DMAD_FLAGS_BIDIRECTION or ++ * DMAD_FLAGS_RING_MODE ++ * value: ++ * 0 (addr0 -> addr1, or ring-buff to device) ++ * 1 (addr0 <- addr1, or device to ring-buff) ++ */ ++ u32 tx_dir; ++ ++} dmad_apb_chreq; ++ ++/* Channel Request Descriptor */ ++typedef struct dmad_chreq ++{ ++ /* common fields */ ++ u32 controller; /* (in) enum DMAD_DMAC_CORE */ ++ u32 flags; /* (in) enum DMAD_CHREQ_FLAGS */ ++ ++ /********************************************************************** ++ * ring mode specific fields (valid only for DMAD_FLAGS_RING_MODE) ++ * note: ++ * - size fields are in unit of data width ++ * * for AHB, ring size is limited to 4K * data_width of data if ++ * hw-LLP is not used ++ * * for AHB, ring size is limited to 4K * data_width * LLP-count ++ * hw-if LLP is used ++ * * for APB, ring size is limited to 16M * data_width of data ++ * - currently sw ring mode dma supports only fixed or incremental ++ * src/dst addressing ++ * - ring_size shoule >= periods * period_size ++ */ ++ dma_addr_t ring_base; /* (in) ring buffer base (pa) */ ++ dma_addr_t ring_size; /* (in) unit of data width */ ++ addr_t dev_addr; /* (in) device data port address */ ++ dma_addr_t periods; /* (in) number of ints per ring */ ++ dma_addr_t period_size; /* (in) size per int, data-width */ ++ ++ ++ /* channel-wise completion callback - called when hw-ptr catches sw-ptr ++ * (i.e., channel stops) ++ * ++ * completion_cb: (in) client supplied callback function, executed in ++ * interrupt context. ++ * completion_data: (in) client private data to be passed to data ++ * argument of completion_cb(). ++ */ ++ void (*completion_cb)(int channel, u16 status, void *data); ++ void *completion_data; ++ /*********************************************************************/ ++ ++ /* channel allocation output */ ++ u32 channel; /* (out) allocated channel */ ++ void *drq; /* (out) internal use (DMAD_DRQ *)*/ ++ ++ /* channel-alloc parameters (channel-wise properties) */ ++ union { ++#ifdef CONFIG_PLATFORM_AHBDMA ++ dmad_ahb_chreq ahb_req; /* (in) for AHB DMA parameters */ ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ dmad_apb_chreq apb_req; /* (in) APB Bridge DMA params */ ++#endif ++ }; ++ ++} dmad_chreq; ++ ++/* drb states are mutual exclusive */ ++enum DMAD_DRB_STATE ++{ ++ DMAD_DRB_STATE_FREE = 0, ++ DMAD_DRB_STATE_READY = 0x00000001, ++ DMAD_DRB_STATE_SUBMITTED = 0x00000002, ++ DMAD_DRB_STATE_EXECUTED = 0x00000004, ++ DMAD_DRB_STATE_COMPLETED = 0x00000008, ++ //DMAD_DRB_STATE_ERROR = 0x00000010, ++ DMAD_DRB_STATE_ABORT = 0x00000020, ++}; ++ ++/* DMA request block ++ * todo: replaced link with kernel struct list_head ?? ++ */ ++typedef struct dmad_drb ++{ ++ u32 prev; /* (internal) previous node */ ++ u32 next; /* (internal) next node */ ++ u32 node; /* (internal) this node */ ++ ++ u32 state; /* (out) DRB's current state */ ++ ++ union { ++ dma_addr_t src_addr; /* (in) source pa */ ++ dma_addr_t addr0; /* (in) bi-direction mode alias */ ++ }; ++ ++ union { ++ dma_addr_t dst_addr; /* (in) destination pa */ ++ dma_addr_t addr1; /* (in) bi-direction mode alias */ ++ }; ++ ++ /* (in) AHB DMA (22 bits): 0 ~ 4M-1, unit is "data width" ++ * APB DMA (24 bits): 0 ~ 16M-1, unit is "data width * burst size" ++ * => for safe without mistakes, use dmad_make_req_cycles() to ++ * compose this value if the addressing mode is incremental ++ * mode (not working yet for decremental mode). ++ */ ++ dma_addr_t req_cycle; ++ ++ /* (in) if non-null, this sync object will be signaled upon dma ++ * completion (for blocked-waiting dma completion) ++ */ ++ struct completion *sync; ++ ++} dmad_drb; ++ ++ ++/****************************************************************************** ++ * Debug Trace Mechanism ++ */ ++#if (DMAD_ERROR_TRACE) ++#define dmad_err(format, arg...) printk(KERN_ERR format , ## arg) ++#else ++#define dmad_err(format, arg...) (void)(0) ++#endif ++ ++#if (DMAD_DEBUG_TRACE) ++#define dmad_dbg(format, arg...) printk(KERN_INFO format , ## arg) ++#else ++#define dmad_dbg(format, arg...) (void)(0) ++#endif ++ ++#if (defined(CONFIG_PLATFORM_AHBDMA) || defined(CONFIG_PLATFORM_APBDMA)) ++ ++/****************************************************************************** ++ * DMAD Driver Interface ++******************************************************************************/ ++ ++extern int dmad_channel_alloc(dmad_chreq *ch_req); ++extern int dmad_channel_free(dmad_chreq *ch_req); ++extern int dmad_channel_enable(const dmad_chreq *ch_req, u8 enable); ++extern u32 dmad_max_size_per_drb(dmad_chreq *ch_req); ++extern u32 dmad_bytes_to_cycles(dmad_chreq *ch_req, u32 byte_size); ++ ++extern int dmad_kickoff_requests(dmad_chreq *ch_req); ++extern int dmad_drain_requests(dmad_chreq *ch_req, u8 shutdown); ++ ++/* for performance reason, these two functions are platform-specific */ ++#ifdef CONFIG_PLATFORM_AHBDMA ++extern int dmad_probe_irq_source_ahb(void); ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++extern int dmad_probe_irq_source_apb(void); ++#endif ++ ++/* note: hw_ptr here is phyical address of dma source or destination */ ++extern dma_addr_t dmad_probe_hw_ptr_src(dmad_chreq *ch_req); ++extern dma_addr_t dmad_probe_hw_ptr_dst(dmad_chreq *ch_req); ++ ++/***************************************************************************** ++ * routines only valid in discrete (non-ring) mode ++ */ ++extern int dmad_config_channel_dir(dmad_chreq *ch_req, u8 dir); ++extern int dmad_alloc_drb(dmad_chreq *ch_req, dmad_drb **drb); ++extern int dmad_free_drb(dmad_chreq *ch_req, dmad_drb *drb); ++extern int dmad_submit_request(dmad_chreq *ch_req, ++ dmad_drb *drb, u8 keep_fired); ++extern int dmad_withdraw_request(dmad_chreq *ch_req, dmad_drb *drb); ++/****************************************************************************/ ++ ++/***************************************************************************** ++ * routines only valid in ring mode ++ * note: sw_ptr and hw_ptr are values offset from the ring buffer base ++ * unit of sw_ptr is data-width ++ * unit of hw_ptr returned is byte ++ */ ++extern int dmad_update_ring(dmad_chreq *ch_req); ++extern int dmad_update_ring_sw_ptr(dmad_chreq *ch_req, ++ dma_addr_t sw_ptr, u8 keep_fired); ++extern dma_addr_t dmad_probe_ring_hw_ptr(dmad_chreq *ch_req); ++/****************************************************************************/ ++ ++#else /* CONFIG_PLATFORM_AHBDMA || CONFIG_PLATFORM_APBDMA */ ++ ++static inline int dmad_channel_alloc(dmad_chreq *ch_req) { return -EFAULT; } ++static inline int dmad_channel_free(dmad_chreq *ch_req) { return -EFAULT; } ++static inline int dmad_channel_enable(const dmad_chreq *ch_req, u8 enable) ++ { return -EFAULT; } ++static inline u32 dmad_max_size_per_drb(dmad_chreq *ch_req) { return 0; } ++static inline u32 dmad_bytes_to_cycles(dmad_chreq *ch_req, u32 byte_size) ++ { return 0; } ++static inline int dmad_kickoff_requests(dmad_chreq *ch_req) { return -EFAULT; } ++static inline int dmad_drain_requests(dmad_chreq *ch_req, u8 shutdown) ++ { return -EFAULT; } ++static inline int dmad_probe_irq_source_ahb(void) { return -EFAULT; } ++static inline int dmad_probe_irq_source_apb(void) { return -EFAULT; } ++static inline dma_addr_t dmad_probe_hw_ptr_src(dmad_chreq *ch_req) ++ { return (dma_addr_t)NULL; } ++static inline dma_addr_t dmad_probe_hw_ptr_dst(dmad_chreq *ch_req) ++ { return (dma_addr_t)NULL; } ++static inline int dmad_config_channel_dir(dmad_chreq *ch_req, u8 dir) ++ { return -EFAULT; } ++static inline int dmad_alloc_drb(dmad_chreq *ch_req, dmad_drb **drb) ++ { return -EFAULT; } ++static inline int dmad_free_drb(dmad_chreq *ch_req, dmad_drb *drb) ++ { return -EFAULT; } ++static inline int dmad_submit_request(dmad_chreq *ch_req, ++ dmad_drb *drb, u8 keep_fired) { return -EFAULT; } ++static inline int dmad_withdraw_request(dmad_chreq *ch_req, dmad_drb *drb) ++ { return -EFAULT; } ++static inline int dmad_update_ring(dmad_chreq *ch_req) ++ { return -EFAULT; } ++static inline int dmad_update_ring_sw_ptr(dmad_chreq *ch_req, ++ dma_addr_t sw_ptr, u8 keep_fired) { return -EFAULT; } ++static inline dma_addr_t dmad_probe_ring_hw_ptr(dmad_chreq *ch_req) ++ { return (dma_addr_t)NULL; } ++ ++#endif /* CONFIG_PLATFORM_AHBDMA || CONFIG_PLATFORM_APBDMA */ ++ ++#endif /* __NDS_DMAD_INC__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/dma.h linux-3.4.110/arch/nds32/include/asm/dma.h +--- linux-3.4.110.orig/arch/nds32/include/asm/dma.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/dma.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,17 @@ ++/* ++ * linux/arch/nds32/include/asm/dma.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_DMA_H__ ++#define __NDS32_DMA_H__ ++ ++#define MAX_DMA_ADDRESS 0xffffffff ++ ++#ifdef CONFIG_PCI ++extern int isa_dma_bridge_buggy; ++#else ++#define isa_dma_bridge_buggy (0) ++#endif ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/dma-mapping.h linux-3.4.110/arch/nds32/include/asm/dma-mapping.h +--- linux-3.4.110.orig/arch/nds32/include/asm/dma-mapping.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/dma-mapping.h 2016-04-07 10:20:50.894079168 +0200 +@@ -0,0 +1,453 @@ ++/* ++ * linux/arch/nds32/include/asm/dma-mapping.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef ASMNDS32_DMA_MAPPING_H ++#define ASMNDS32_DMA_MAPPING_H ++ ++#ifdef __KERNEL__ ++ ++#include /* need struct page */ ++#include ++ ++#include ++ ++/* ++ * DMA-consistent mapping functions. These allocate/free a region of ++ * uncached, unwrite-buffered mapped memory space for use with DMA ++ * devices. This is the "generic" version. The PCI specific version ++ * is in pci.h ++ */ ++extern void consistent_sync(void *kaddr, size_t size, int rw); ++ ++/* ++ * Return whether the given device DMA address mask can be supported ++ * properly. For example, if your device can only drive the low 24-bits ++ * during bus mastering, then you would pass 0x00ffffff as the mask ++ * to this function. ++ */ ++static inline int dma_supported(struct device *dev, u64 mask) ++{ ++ return dev->dma_mask && *dev->dma_mask != 0; ++} ++ ++static inline int dma_set_mask(struct device *dev, u64 dma_mask) ++{ ++ if (!dev->dma_mask || !dma_supported(dev, dma_mask)) ++ return -EIO; ++ ++ *dev->dma_mask = dma_mask; ++ ++ return 0; ++} ++ ++static inline int dma_is_consistent(dma_addr_t handle) ++{ ++ return 0; ++} ++ ++/* ++ * DMA errors are defined by all-bits-set in the DMA address. ++ */ ++static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ++{ ++ return dma_addr == ~0; ++} ++ ++/* ++ * Dummy noncoherent implementation. We don't provide a dma_cache_sync ++ * function so drivers using this API are highlighted with build warnings. ++ */ ++static inline void * ++dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) ++{ ++ return NULL; ++} ++ ++static inline void ++dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, ++ dma_addr_t handle) ++{ ++} ++ ++/** ++ * dma_alloc_coherent - allocate consistent memory for DMA ++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ++ * @size: required memory size ++ * @handle: bus-specific DMA address ++ * ++ * Allocate some uncached, unbuffered memory for a device for ++ * performing DMA. This function allocates pages, and will ++ * return the CPU-viewed address, and sets @handle to be the ++ * device-viewed address. ++ */ ++extern void * ++dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); ++ ++/** ++ * dma_free_coherent - free memory allocated by dma_alloc_coherent ++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ++ * @size: size of memory originally requested in dma_alloc_coherent ++ * @cpu_addr: CPU-view address returned from dma_alloc_coherent ++ * @handle: device-view address returned from dma_alloc_coherent ++ * ++ * Free (and unmap) a DMA buffer previously allocated by ++ * dma_alloc_coherent(). ++ * ++ * References to memory and mappings associated with cpu_addr/handle ++ * during and after this call executing are illegal. ++ */ ++extern void ++dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, ++ dma_addr_t handle); ++ ++/** ++ * dma_mmap_coherent - map a coherent DMA allocation into user space ++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ++ * @vma: vm_area_struct describing requested user mapping ++ * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent ++ * @handle: device-view address returned from dma_alloc_coherent ++ * @size: size of memory originally requested in dma_alloc_coherent ++ * ++ * Map a coherent DMA buffer previously allocated by dma_alloc_coherent ++ * into user space. The coherent DMA buffer must not be freed by the ++ * driver until the user space mapping has been released. ++ */ ++int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, ++ void *cpu_addr, dma_addr_t handle, size_t size); ++ ++ ++/** ++ * dma_alloc_writecombine - allocate writecombining memory for DMA ++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ++ * @size: required memory size ++ * @handle: bus-specific DMA address ++ * ++ * Allocate some uncached, buffered memory for a device for ++ * performing DMA. This function allocates pages, and will ++ * return the CPU-viewed address, and sets @handle to be the ++ * device-viewed address. ++ */ ++extern void * ++dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); ++ ++#define dma_free_writecombine(dev,size,cpu_addr,handle) \ ++ dma_free_coherent(dev,size,cpu_addr,handle) ++ ++int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, ++ void *cpu_addr, dma_addr_t handle, size_t size); ++ ++ ++/** ++ * dma_map_single - map a single buffer for streaming DMA ++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ++ * @cpu_addr: CPU direct mapped address of buffer ++ * @size: size of buffer to map ++ * @dir: DMA transfer direction ++ * ++ * Ensure that any data held in the cache is appropriately discarded ++ * or written back. ++ * ++ * The device owns this memory once this call has completed. The CPU ++ * can regain ownership by calling dma_unmap_single() or ++ * dma_sync_single_for_cpu(). ++ */ ++#ifndef CONFIG_DMABOUNCE ++static inline dma_addr_t ++dma_map_single(struct device *dev, void *cpu_addr, size_t size, ++ enum dma_data_direction dir) ++{ ++ consistent_sync(cpu_addr, size, dir); ++ return virt_to_dma(dev, (unsigned long)cpu_addr); ++} ++#else ++extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); ++#endif ++ ++/** ++ * dma_map_page - map a portion of a page for streaming DMA ++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ++ * @page: page that buffer resides in ++ * @offset: offset into page for start of buffer ++ * @size: size of buffer to map ++ * @dir: DMA transfer direction ++ * ++ * Ensure that any data held in the cache is appropriately discarded ++ * or written back. ++ * ++ * The device owns this memory once this call has completed. The CPU ++ * can regain ownership by calling dma_unmap_page() or ++ * dma_sync_single_for_cpu(). ++ */ ++static inline dma_addr_t ++dma_map_page(struct device *dev, struct page *page, ++ unsigned long offset, size_t size, ++ enum dma_data_direction dir) ++{ ++ return dma_map_single(dev, page_address(page) + offset, size, (int)dir); ++} ++ ++/** ++ * dma_unmap_single - unmap a single buffer previously mapped ++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ++ * @handle: DMA address of buffer ++ * @size: size of buffer to map ++ * @dir: DMA transfer direction ++ * ++ * Unmap a single streaming mode DMA translation. The handle and size ++ * must match what was provided in the previous dma_map_single() call. ++ * All other usages are undefined. ++ * ++ * After this call, reads by the CPU to the buffer are guaranteed to see ++ * whatever the device wrote there. ++ */ ++#ifndef CONFIG_DMABOUNCE ++static inline void ++dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, ++ enum dma_data_direction dir) ++{ ++ /* nothing to do */ ++} ++#else ++extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); ++#endif ++ ++/** ++ * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() ++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ++ * @handle: DMA address of buffer ++ * @size: size of buffer to map ++ * @dir: DMA transfer direction ++ * ++ * Unmap a single streaming mode DMA translation. The handle and size ++ * must match what was provided in the previous dma_map_single() call. ++ * All other usages are undefined. ++ * ++ * After this call, reads by the CPU to the buffer are guaranteed to see ++ * whatever the device wrote there. ++ */ ++static inline void ++dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, ++ enum dma_data_direction dir) ++{ ++ dma_unmap_single(dev, handle, size, (int)dir); ++} ++ ++/** ++ * dma_map_sg - map a set of SG buffers for streaming mode DMA ++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ++ * @sg: list of buffers ++ * @nents: number of buffers to map ++ * @dir: DMA transfer direction ++ * ++ * Map a set of buffers described by scatterlist in streaming ++ * mode for DMA. This is the scatter-gather version of the ++ * above dma_map_single interface. Here the scatter gather list ++ * elements are each tagged with the appropriate dma address ++ * and length. They are obtained via sg_dma_{address,length}(SG). ++ * ++ * NOTE: An implementation may be able to use a smaller number of ++ * DMA address/length pairs than there are SG table elements. ++ * (for example via virtual mapping capabilities) ++ * The routine returns the number of addr/length pairs actually ++ * used, at most nents. ++ * ++ * Device ownership issues as mentioned above for dma_map_single are ++ * the same here. ++ */ ++#ifndef CONFIG_DMABOUNCE ++static inline int ++dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ++ enum dma_data_direction dir) ++{ ++ int i; ++ ++ for (i = 0; i < nents; i++, sg++) { ++ void *virt; ++ unsigned long pfn; ++ struct page *page = sg_page(sg); ++ ++ sg->dma_address = page_to_dma(dev, page) + sg->offset; ++ pfn = page_to_pfn(page) + sg->offset / PAGE_SIZE; ++ page = pfn_to_page(pfn); ++ if (PageHighMem(page)) { ++ virt = kmap_atomic(page); ++ consistent_sync(virt, sg->length, dir); ++ kunmap_atomic(virt); ++ } else { ++ if (sg->offset > PAGE_SIZE) ++ panic("sg->offset:%08x > PAGE_SIZE\n", sg->offset); ++ virt = page_address(page) + sg->offset; ++ consistent_sync(virt, sg->length, dir); ++ } ++ } ++ return nents; ++} ++#else ++extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); ++#endif ++ ++/** ++ * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg ++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ++ * @sg: list of buffers ++ * @nents: number of buffers to map ++ * @dir: DMA transfer direction ++ * ++ * Unmap a set of streaming mode DMA translations. ++ * Again, CPU read rules concerning calls here are the same as for ++ * dma_unmap_single() above. ++ */ ++#ifndef CONFIG_DMABOUNCE ++static inline void ++dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, ++ enum dma_data_direction dir) ++{ ++ ++ /* nothing to do */ ++} ++#else ++extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); ++#endif ++ ++ ++/** ++ * dma_sync_single_for_cpu ++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ++ * @handle: DMA address of buffer ++ * @size: size of buffer to map ++ * @dir: DMA transfer direction ++ * ++ * Make physical memory consistent for a single streaming mode DMA ++ * translation after a transfer. ++ * ++ * If you perform a dma_map_single() but wish to interrogate the ++ * buffer using the cpu, yet do not wish to teardown the PCI dma ++ * mapping, you must call this function before doing so. At the ++ * next point you give the PCI dma address back to the card, you ++ * must first the perform a dma_sync_for_device, and then the ++ * device again owns the buffer. ++ */ ++#ifndef CONFIG_DMABOUNCE ++static inline void ++dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, ++ enum dma_data_direction dir) ++{ ++ consistent_sync((void *)dma_to_virt(dev, handle), size, dir); ++} ++ ++static inline void ++dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, ++ enum dma_data_direction dir) ++{ ++ consistent_sync((void *)dma_to_virt(dev, handle), size, dir); ++} ++#else ++extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); ++extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction); ++#endif ++ ++ ++/** ++ * dma_sync_sg_for_cpu ++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices ++ * @sg: list of buffers ++ * @nents: number of buffers to map ++ * @dir: DMA transfer direction ++ * ++ * Make physical memory consistent for a set of streaming ++ * mode DMA translations after a transfer. ++ * ++ * The same as dma_sync_single_for_* but for a scatter-gather list, ++ * same rules and usage. ++ */ ++#ifndef CONFIG_DMABOUNCE ++static inline void ++dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, ++ enum dma_data_direction dir) ++{ ++ int i; ++ ++ for (i = 0; i < nents; i++, sg++) { ++ char *virt = page_address( (struct page *)sg->page_link) + sg->offset; ++ consistent_sync(virt, sg->length, dir); ++ } ++} ++ ++static inline void ++dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, ++ enum dma_data_direction dir) ++{ ++ int i; ++ ++ for (i = 0; i < nents; i++, sg++) { ++ char *virt = page_address( (struct page *)sg->page_link) + sg->offset; ++ consistent_sync(virt, sg->length, dir); ++ } ++} ++#else ++extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); ++extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); ++#endif ++ ++#ifdef CONFIG_DMABOUNCE ++/* ++ * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" ++ * and utilize bounce buffers as needed to work around limited DMA windows. ++ * ++ * On the SA-1111, a bug limits DMA to only certain regions of RAM. ++ * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) ++ * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) ++ * ++ * The following are helper functions used by the dmabounce subystem ++ * ++ */ ++ ++/** ++ * dmabounce_register_dev ++ * ++ * @dev: valid struct device pointer ++ * @small_buf_size: size of buffers to use with small buffer pool ++ * @large_buf_size: size of buffers to use with large buffer pool (can be 0) ++ * ++ * This function should be called by low-level platform code to register ++ * a device as requireing DMA buffer bouncing. The function will allocate ++ * appropriate DMA pools for the device. ++ * ++ */ ++extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); ++ ++/** ++ * dmabounce_unregister_dev ++ * ++ * @dev: valid struct device pointer ++ * ++ * This function should be called by low-level platform code when device ++ * that was previously registered with dmabounce_register_dev is removed ++ * from the system. ++ * ++ */ ++extern void dmabounce_unregister_dev(struct device *); ++ ++/** ++ * dma_needs_bounce ++ * ++ * @dev: valid struct device pointer ++ * @dma_handle: dma_handle of unbounced buffer ++ * @size: size of region being mapped ++ * ++ * Platforms that utilize the dmabounce mechanism must implement ++ * this function. ++ * ++ * The dmabounce routines call this function whenever a dma-mapping ++ * is requested to determine whether a given buffer needs to be bounced ++ * or not. The function must return 0 if the buffer is OK for ++ * DMA access and 1 if the buffer needs to be bounced. ++ * ++ */ ++extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); ++#endif /* CONFIG_DMABOUNCE */ ++ ++#endif /* __KERNEL__ */ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/elf.h linux-3.4.110/arch/nds32/include/asm/elf.h +--- linux-3.4.110.orig/arch/nds32/include/asm/elf.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/elf.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,145 @@ ++/* ++ * linux/arch/nds32/include/asm/elf.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ASMNDS32_ELF_H ++#define __ASMNDS32_ELF_H ++ ++/* ++ * ELF register definitions.. ++ */ ++ ++#include ++#include ++ ++typedef unsigned long elf_greg_t; ++typedef unsigned long elf_freg_t[3]; ++ ++extern unsigned int elf_hwcap; ++ ++#define EM_NDS32 167 ++ ++ ++#define R_NDS32_NONE 0 ++#define R_NDS32_16_RELA 19 ++#define R_NDS32_32_RELA 20 ++#define R_NDS32_9_PCREL_RELA 22 ++#define R_NDS32_15_PCREL_RELA 23 ++#define R_NDS32_17_PCREL_RELA 24 ++#define R_NDS32_25_PCREL_RELA 25 ++#define R_NDS32_HI20_RELA 26 ++#define R_NDS32_LO12S3_RELA 27 ++#define R_NDS32_LO12S2_RELA 28 ++#define R_NDS32_LO12S1_RELA 29 ++#define R_NDS32_LO12S0_RELA 30 ++#define R_NDS32_SDA15S3_RELA 31 ++#define R_NDS32_SDA15S2_RELA 32 ++#define R_NDS32_SDA15S1_RELA 33 ++#define R_NDS32_SDA15S0_RELA 34 ++#define R_NDS32_GOT20 37 ++#define R_NDS32_25_PLTREL 38 ++#define R_NDS32_COPY 39 ++#define R_NDS32_GLOB_DAT 40 ++#define R_NDS32_JMP_SLOT 41 ++#define R_NDS32_RELATIVE 42 ++#define R_NDS32_GOTOFF 43 ++#define R_NDS32_GOTPC20 44 ++#define R_NDS32_GOT_HI20 45 ++#define R_NDS32_GOT_LO12 46 ++#define R_NDS32_GOTPC_HI20 47 ++#define R_NDS32_GOTPC_LO12 48 ++#define R_NDS32_GOTOFF_HI20 49 ++#define R_NDS32_GOTOFF_LO12 50 ++#define R_NDS32_INSN16 51 ++#define R_NDS32_LABEL 52 ++#define R_NDS32_LONGCALL1 53 ++#define R_NDS32_LONGCALL2 54 ++#define R_NDS32_LONGCALL3 55 ++#define R_NDS32_LONGJUMP1 56 ++#define R_NDS32_LONGJUMP2 57 ++#define R_NDS32_LONGJUMP3 58 ++#define R_NDS32_LOADSTORE 59 ++#define R_NDS32_9_FIXED_RELA 60 ++#define R_NDS32_15_FIXED_RELA 61 ++#define R_NDS32_17_FIXED_RELA 62 ++#define R_NDS32_25_FIXED_RELA 63 ++#define R_NDS32_PLTREL_HI20 64 ++#define R_NDS32_PLTREL_LO12 65 ++#define R_NDS32_PLT_GOTREL_HI20 66 ++#define R_NDS32_PLT_GOTREL_LO12 67 ++#define R_NDS32_LO12S0_ORI_RELA 72 ++#define R_NDS32_DWARF2_OP1_RELA 77 ++#define R_NDS32_DWARF2_OP2_RELA 78 ++#define R_NDS32_DWARF2_LEB_RELA 79 ++#define R_NDS32_WORD_9_PCREL_RELA 94 ++#define R_NDS32_LONGCALL4 107 ++#define R_NDS32_RELA_NOP_MIX 192 ++#define R_NDS32_RELA_NOP_MAX 255 ++ ++#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) ++typedef elf_greg_t elf_gregset_t[ELF_NGREG]; ++ ++typedef struct user_fp elf_fpregset_t; ++ ++struct elf32_hdr; ++extern int elf_check_arch(const struct elf32_hdr *hdr); ++ ++/* ++ * These are used to set parameters in the core dumps. ++ */ ++#define ELF_CLASS ELFCLASS32 ++#ifdef __NDS32_EB__ ++#define ELF_DATA ELFDATA2MSB; ++#else ++#define ELF_DATA ELFDATA2LSB; ++#endif ++#define ELF_ARCH EM_NDS32 ++#define USE_ELF_CORE_DUMP ++#define ELF_EXEC_PAGESIZE PAGE_SIZE ++ ++/* This is the location that an ET_DYN program is loaded if exec'ed. Typical ++ use of this is to invoke "./ld.so someprog" to test out a new version of ++ the loader. We need to make sure that it is out of the way of the program ++ that it will "exec", and that there is sufficient room for the brk. */ ++ ++#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) ++ ++/* When the program starts, a1 contains a pointer to a function to be ++ registered with atexit, as per the SVR4 ABI. A value of 0 means we ++ have no such handler. */ ++#define ELF_PLAT_INIT(_r, load_addr) (_r)->NDS32_r0 = 0 ++ ++/* This yields a mask that user programs can use to figure out what ++ instruction set this cpu supports. */ ++ ++#define ELF_HWCAP (elf_hwcap) ++ ++/* This yields a string that ld.so will use to load implementation ++ specific libraries for optimization. This is more specific in ++ intent than poking at uname or /proc/cpuinfo. */ ++ ++/* For now we just provide a fairly general string that describes the ++ processor family. This could be made more specific later if someone ++ implemented optimisations that require it. 26-bit CPUs give you ++ "v1l" for ARM2 (no SWP) and "v2l" for anything else (ARM1 isn't ++ supported). 32-bit CPUs give you "v3[lb]" for anything based on an ++ ARM6 or ARM7 core and "armv4[lb]" for anything based on a StrongARM-1 ++ core. */ ++ ++#define ELF_PLATFORM_SIZE 16 ++extern char elf_platform[]; ++#define ELF_PLATFORM (elf_platform) ++ ++#ifdef __KERNEL__ ++ ++/* Old NetWinder binaries were compiled in such a way that the iBCS ++ heuristic always trips on them. Until these binaries become uncommon ++ enough not to care, don't trust the `ibcs' flag here. In any case ++ there is no other ELF system currently supported by iBCS. ++ @@ Could print a warning message to encourage users to upgrade. */ ++#define SET_PERSONALITY(ex) set_personality(PER_LINUX) ++ ++#endif ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/emergency-restart.h linux-3.4.110/arch/nds32/include/asm/emergency-restart.h +--- linux-3.4.110.orig/arch/nds32/include/asm/emergency-restart.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/emergency-restart.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/emergency-restart.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_EMERGENCY_RESTART_H__ ++#define __NDS32_EMERGENCY_RESTART_H__ ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/errno.h linux-3.4.110/arch/nds32/include/asm/errno.h +--- linux-3.4.110.orig/arch/nds32/include/asm/errno.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/errno.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/errno.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_ERRNO_H__ ++#define __NDS32_ERRNO_H__ ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/exec.h linux-3.4.110/arch/nds32/include/asm/exec.h +--- linux-3.4.110.orig/arch/nds32/include/asm/exec.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/exec.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,6 @@ ++#ifndef __ASM_NDS32_EXEC_H ++#define __ASM_NDS32_EXEC_H ++ ++#define arch_align_stack(x) (x) ++ ++#endif /* __ASM_ARM_EXEC_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/fb.h linux-3.4.110/arch/nds32/include/asm/fb.h +--- linux-3.4.110.orig/arch/nds32/include/asm/fb.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/fb.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,25 @@ ++/* ++ * linux/arch/nds32/include/asm/fb.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_FB_H__ ++#define __NDS32_FB_H__ ++ ++ ++#include ++#include ++#include ++ ++static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, ++ unsigned long off) ++{ ++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); ++} ++ ++static inline int fb_is_primary_device(struct fb_info *info) ++{ ++ return 0; ++} ++ ++#endif /* __NDS32_FB_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/fcntl.h linux-3.4.110/arch/nds32/include/asm/fcntl.h +--- linux-3.4.110.orig/arch/nds32/include/asm/fcntl.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/fcntl.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,37 @@ ++/* ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ * Copyright (C) 1995, 96, 97, 98, 99, 2003, 05 Ralf Baechle ++ */ ++#ifndef _ASM_FCNTL_H ++#define _ASM_FCNTL_H ++ ++/* ++ * The flavours of struct flock. "struct flock" is the ABI compliant ++ * variant. Finally struct flock64 is the LFS variant of struct flock. As ++ * a historic accident and inconsistence with the ABI definition it doesn't ++ * contain all the same fields as struct flock. ++ */ ++ ++#ifdef CONFIG_32BIT ++#include ++ ++struct flock { ++ short l_type; ++ short l_whence; ++ off_t l_start; ++ off_t l_len; ++ long l_sysid; ++ __kernel_pid_t l_pid; ++ long pad[4]; ++}; ++ ++#define HAVE_ARCH_STRUCT_FLOCK ++ ++#endif /* CONFIG_32BIT */ ++ ++#include ++ ++#endif /* _ASM_FCNTL_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/fixmap.h linux-3.4.110/arch/nds32/include/asm/fixmap.h +--- linux-3.4.110.orig/arch/nds32/include/asm/fixmap.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/fixmap.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,88 @@ ++/* ++ * fixmap.h: compile-time virtual memory allocation ++ * ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ * Copyright (C) 1998 Ingo Molnar ++ * ++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 ++ */ ++ ++#ifndef __ASM_NDS32_FIXMAP_H ++#define __ASM_NDS32_FIXMAP_H ++ ++#ifdef CONFIG_HIGHMEM ++#include ++#include ++#endif ++ ++/* ++ * Here we define all the compile-time 'special' virtual ++ * addresses. The point is to have a constant address at ++ * compile time, but to set the physical address only ++ * in the boot process. We allocate these special addresses ++ * from the end of the consistent memory region backwards. ++ * Also this lets us do fail-safe vmalloc(), we ++ * can guarantee that these special addresses and ++ * vmalloc()-ed addresses never overlap. ++ * ++ * these 'compile-time allocated' memory buffers are ++ * fixed-size 4k pages. (or larger if used with an increment ++ * higher than 1) use fixmap_set(idx,phys) to associate ++ * physical memory with fixmap indices. ++ * ++ * TLB entries of such buffers will not be flushed across ++ * task switches. ++ */ ++enum fixed_addresses { ++ FIX_KMAP_RESERVED, ++ FIX_KMAP_BEGIN, ++#ifdef CONFIG_HIGHMEM ++ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS), ++#endif ++#ifdef CONFIG_EARLY_PRINTK ++ FIX_EARLY_DEBUG, ++#endif ++ FIX_RETURN_SYSCALL, ++ __end_of_fixed_addresses ++}; ++#define FIXADDR_TOP ((unsigned long) (-(16 * PAGE_SIZE))) ++#define FIXADDR_SIZE ((__end_of_fixed_addresses) << PAGE_SHIFT) ++#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) ++ ++#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) ++#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) ++ ++#define __this_fixmap_does_not_exist() WARN_ON(1) ++/* ++ * 'index to address' translation. If anyone tries to use the idx ++ * directly without tranlation, we catch the bug with a NULL-deference ++ * kernel oops. Illegal ranges of incoming indices are caught too. ++ */ ++ ++static inline unsigned long fix_to_virt(const unsigned int idx) ++{ ++ /* ++ * this branch gets completely eliminated after inlining, ++ * except when someone tries to use fixaddr indices in an ++ * illegal way. (such as mixing up address types or using ++ * out-of-range indices). ++ * ++ * If it doesn't get removed, the linker will complain ++ * loudly with a reasonably clear error message.. ++ */ ++ if (idx >= __end_of_fixed_addresses) ++ __this_fixmap_does_not_exist(); ++ ++ return __fix_to_virt(idx); ++} ++ ++static inline unsigned long virt_to_fix(const unsigned long vaddr) ++{ ++ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); ++ return __virt_to_fix(vaddr); ++} ++ ++#endif /* __ASM_NDS32_FIXMAP_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/fpu.h linux-3.4.110/arch/nds32/include/asm/fpu.h +--- linux-3.4.110.orig/arch/nds32/include/asm/fpu.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/fpu.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,100 @@ ++/* ++ * linux/arch/nds32/include/asm/fpu.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ASM_NDS32_FPU_H ++#define __ASM_NDS32_FPU_H ++ ++#ifndef __ASSEMBLY__ ++#include ++#include ++ ++extern void save_fpu(struct task_struct *__tsk); ++extern void fpload(struct fpu_struct *fpregs); ++extern void do_fpu_exception(unsigned long error_code, struct pt_regs *regs); ++extern int do_fpu_inst(unsigned short, struct pt_regs *); ++ ++#ifdef CONFIG_FPU ++ ++#define test_tsk_fpu(regs) (regs->NDS32_FUCOP_CTL & FUCOP_CTL_mskCP0EN) ++ ++struct task_struct; ++ ++static inline void release_fpu(struct pt_regs *regs) ++{ ++ regs->NDS32_FUCOP_CTL &= ~FUCOP_CTL_mskCP0EN; ++} ++ ++static inline void grab_fpu(struct pt_regs *regs) ++{ ++ regs->NDS32_FUCOP_CTL |= FUCOP_CTL_mskCP0EN; ++} ++ ++static inline void enable_fpu(void) ++{ ++ SET_FUCOP_CTL(GET_FUCOP_CTL() | FUCOP_CTL_mskCP0EN); ++} ++ ++static inline void disable_fpu(void) ++{ ++ SET_FUCOP_CTL(GET_FUCOP_CTL() & ~FUCOP_CTL_mskCP0EN); ++} ++ ++static inline void lose_fpu(int save) ++{ ++ preempt_disable(); ++ if (test_tsk_fpu(task_pt_regs(current))) { ++ if (save) ++ { ++ save_fpu(current); ++# ifndef CONFIG_UNLAZY_FPU ++ last_task_used_math=NULL; ++# endif ++ } ++ release_fpu(task_pt_regs(current)); ++ } ++ preempt_enable(); ++} ++ ++static inline void own_fpu(int restore) ++{ ++ preempt_disable(); ++ if (!test_tsk_fpu(task_pt_regs(current))) { ++ if (restore) ++ { ++# ifdef CONFIG_UNLAZY_FPU ++ fpload(¤t->thread.fpu); ++# else ++ if((last_task_used_math!=NULL) ++ &&(last_task_used_math!=current)) ++ save_fpu(last_task_used_math); ++ fpload(¤t->thread.fpu); ++ last_task_used_math=current; ++#endif ++ } ++ grab_fpu(task_pt_regs(current)); ++ } ++ preempt_enable(); ++} ++# ifdef CONFIG_UNLAZY_FPU ++static inline void unlazy_fpu(struct task_struct *tsk) ++{ ++ preempt_disable(); ++ if (test_tsk_fpu(task_pt_regs(tsk))) ++ save_fpu(tsk); ++ preempt_enable(); ++} ++# endif /* CONFIG_UNLAZY_FPU */ ++static inline void clear_fpu(struct pt_regs *regs) ++{ ++ preempt_disable(); ++ if (test_tsk_fpu(regs)) { ++ release_fpu(regs); ++ } ++ preempt_enable(); ++} ++#endif /* CONFIG_FPU */ ++#endif /* __ASSEMBLY__ */ ++ ++#endif /* __ASM_NDS32_FPU_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/ftpci.h linux-3.4.110/arch/nds32/include/asm/ftpci.h +--- linux-3.4.110.orig/arch/nds32/include/asm/ftpci.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/ftpci.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,30 @@ ++/* ++ * linux/arch/nds32/include/asm/ftpci.h ++ * ++ * Faraday FTPCI010 PCI Bridge Device Driver Interface ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * ChangeLog ++ * ++ * Peter Liao 09/26/2005 Created, heavily modified from Faraday A320 platform code. ++ */ ++ ++#ifndef __FARADAY_PLATFORM_PCI_HEADER__ ++#define __FARADAY_PLATFORM_PCI_HEADER__ ++ ++ ++#define PCI_BRIDGE_DEVID 0x4321 ++#define PCI_BRIDGE_VENID 0x159b ++ ++extern int ftpci_probed; ++extern void ftpci_clear_irq(unsigned int irq); ++extern void ftpci_mask_irq(unsigned int irq); ++extern void ftpci_unmask_irq(unsigned int irq); ++extern int ftpci_get_irq(void); ++#endif /* __FARADAY_PLATFORM_PCI_HEADER__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/ftrace.h linux-3.4.110/arch/nds32/include/asm/ftrace.h +--- linux-3.4.110.orig/arch/nds32/include/asm/ftrace.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/ftrace.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,17 @@ ++#ifndef _ASM_POWERPC_FTRACE ++#define _ASM_POWERPC_FTRACE ++ ++#ifdef CONFIG_FUNCTION_TRACER ++#define MCOUNT_ADDR ((long)(_mcount)) ++#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ ++ ++#ifdef __ASSEMBLY__ ++ ++#else /* !__ASSEMBLY__ */ ++extern void _mcount(void); ++ ++#endif /* __ASSEMBLY__ */ ++ ++#endif ++ ++#endif /* _ASM_POWERPC_FTRACE */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/futex.h linux-3.4.110/arch/nds32/include/asm/futex.h +--- linux-3.4.110.orig/arch/nds32/include/asm/futex.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/futex.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/futex.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_FUTEX_H__ ++#define __NDS32_FUTEX_H__ ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/glue.h linux-3.4.110/arch/nds32/include/asm/glue.h +--- linux-3.4.110.orig/arch/nds32/include/asm/glue.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/glue.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,26 @@ ++/* ++ * linux/arch/nds32/include/asm/glue.h ++ * ++ * Copyright (C) 1997-1999 Russell King ++ * Copyright (C) 2000-2002 Deep Blue Solutions Ltd. ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This file provides the glue to stick the processor-specific bits ++ * into the kernel in an efficient manner. The idea is to use branches ++ * when we're only targetting one class of TLB, or indirect calls ++ * when we're targetting multiple classes of TLBs. ++ */ ++#ifdef __KERNEL__ ++ ++#ifdef __STDC__ ++#define ____glue(name,fn) name##fn ++#else ++#define ____glue(name,fn) name/**/fn ++#endif ++#define __glue(name,fn) ____glue(name,fn) ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/gpio.h linux-3.4.110/arch/nds32/include/asm/gpio.h +--- linux-3.4.110.orig/arch/nds32/include/asm/gpio.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/gpio.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,10 @@ ++#ifndef _ARCH_NDS32_GPIO_H ++#define _ARCH_NDS32_GPIO_H ++ ++#include ++#define gpio_get_value __gpio_get_value ++#define gpio_set_value __gpio_set_value ++#define gpio_cansleep __gpio_cansleep ++#define gpio_to_irq __gpio_to_irq ++ ++#endif /* _ARCH_NDS32_GPIO_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/hardirq.h linux-3.4.110/arch/nds32/include/asm/hardirq.h +--- linux-3.4.110.orig/arch/nds32/include/asm/hardirq.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/hardirq.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,36 @@ ++/* ++ * linux/arch/nds32/include/asm/hardirq.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++#ifndef __ASM_HARDIRQ_H ++#define __ASM_HARDIRQ_H ++ ++#include ++#include ++#include ++ ++typedef struct { ++ unsigned int __softirq_pending; ++} ____cacheline_aligned irq_cpustat_t; ++ ++#include /* Standard mappings for irq_cpustat_t above */ ++ ++#if NR_IRQS > 256 ++#define HARDIRQ_BITS 9 ++#else ++#define HARDIRQ_BITS 8 ++#endif ++ ++/* ++ * The hardirq mask has to be large enough to have space ++ * for potentially all IRQ sources in the system nesting ++ * on a single CPU: ++ */ ++#if (1 << HARDIRQ_BITS) < NR_IRQS ++# error HARDIRQ_BITS is too low! ++#endif ++ ++#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 ++ ++extern void ack_bad_irq(unsigned int irq); ++#endif /* __ASM_HARDIRQ_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/hardware.h linux-3.4.110/arch/nds32/include/asm/hardware.h +--- linux-3.4.110.orig/arch/nds32/include/asm/hardware.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/hardware.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,61 @@ ++/* ++ * linux/arch/nds32/include/asm/hardware.h ++ * ++ * Faraday Platform Independent Hardware Configuration ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * ChangeLog ++ * ++ * Luke Lee 09/16/2005 Created. ++ * Peter Liao 10/04/2005 Modified for uClinux ++ * Harry Pan 11/02/2007 Added REGxx macros. ++ */ ++ ++#ifndef __FARADAY_PLATFORM_HARDWARE_HEADER__ ++#define __FARADAY_PLATFORM_HARDWARE_HEADER__ ++ ++#include ++ ++#ifndef PCIBIOS_MIN_IO ++/* the mini io address is 0x6000,that is IO will allocate from 0-0x6000 offset*/ ++#define PCIBIOS_MIN_IO 0x0 ++#endif ++ ++#ifndef PCIBIOS_MIN_MEM ++/* the mini MEM address is 0x100000,that is MEM will allocate from 0-0x100000 offset*/ ++#define PCIBIOS_MIN_MEM 0x0 ++#endif ++ ++#define pcibios_assign_all_busses() 1 ++ ++/* Pliauo add 5 to resolve __alloc_bootmem_core return NULL pointer in bootmem.c */ ++#if defined(CPU_MEM_PA_BASE) && defined(CPU_MEM_PA_SIZE) ++ #define PA_SDRAM_BASE (CPU_MEM_PA_BASE) ++#else ++ #define PA_SDRAM_BASE (0x00000000) ++#endif ++ ++/* ++ * Define a simple register accessing method by Harry@Nov.02.2007 ++ */ ++#define REG32(a) (*(volatile unsigned int *)(a)) ++#define REG16(a) (*(volatile unsigned short *)(a)) ++#define REG8(a) (*(volatile unsigned char *)(a)) ++ ++#endif /* __FARADAY_PLATFORM_HARDWARE_HEADER__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/highmem.h linux-3.4.110/arch/nds32/include/asm/highmem.h +--- linux-3.4.110.orig/arch/nds32/include/asm/highmem.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/highmem.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,61 @@ ++#ifndef _ASM_HIGHMEM_H ++#define _ASM_HIGHMEM_H ++ ++#include ++#include ++#include ++ ++/* ++ * Right now we initialize only a single pte table. It can be extended ++ * easily, subsequent pte tables have to be allocated in one physical ++ * chunk of RAM. ++ */ ++/* ++ * Ordering is (from lower to higher memory addresses): ++ * ++ * high_memory ++ * Persistent kmap area ++ * PKMAP_BASE ++ * fixed_addresses ++ * FIXADDR_START ++ * FIXADDR_TOP ++ * Vmalloc area ++ * VMALLOC_START ++ * VMALLOC_END ++ */ ++#define PKMAP_BASE ((FIXADDR_START - PGDIR_SIZE) & (PGDIR_MASK)) ++#define LAST_PKMAP PTRS_PER_PTE ++#define LAST_PKMAP_MASK (LAST_PKMAP - 1) ++#define PKMAP_NR(virt) (((virt) - (PKMAP_BASE)) >> PAGE_SHIFT) ++#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) ++#define kmap_prot PAGE_KERNEL ++ ++static inline void flush_cache_kmaps(void) ++{ ++ cpu_dcache_wbinval_all(); ++} ++ ++/* declarations for highmem.c */ ++extern unsigned long highstart_pfn, highend_pfn; ++ ++extern pte_t *pkmap_page_table; ++ ++extern void *kmap_high(struct page *page); ++extern void kunmap_high(struct page *page); ++ ++extern void kmap_init(void); ++ ++/* ++ * The following functions are already defined by ++ * when CONFIG_HIGHMEM is not set. ++ */ ++#ifdef CONFIG_HIGHMEM ++extern void *kmap(struct page *page); ++extern void kunmap(struct page *page); ++extern void *kmap_atomic(struct page *page); ++extern void __kunmap_atomic(void *kvaddr); ++extern void *kmap_atomic_pfn(unsigned long pfn); ++extern struct page *kmap_atomic_to_page(void *ptr); ++#endif ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/hw_irq.h linux-3.4.110/arch/nds32/include/asm/hw_irq.h +--- linux-3.4.110.orig/arch/nds32/include/asm/hw_irq.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/hw_irq.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/hw_irq.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_HW_IRQ_H__ ++#define __NDS32_HW_IRQ_H__ ++ ++ ++#endif /* __NDS32_HW_IRQ_H__ */ ++ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/intc.h linux-3.4.110/arch/nds32/include/asm/intc.h +--- linux-3.4.110.orig/arch/nds32/include/asm/intc.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/intc.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,45 @@ ++/* ++ * linux/arch/nds32/include/asm/intc.h ++ * ++ * Faraday FTINTC010 Interrupt Controller Device Driver Interface ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * ChangeLog ++ * ++ * Luke Lee 09/14/2005 Created, heavily modified from Faraday CPE platform code. ++ */ ++ ++#ifndef __FARADAY_INTC_FTINTC010_HEADER__ ++#define __FARADAY_INTC_FTINTC010_HEADER__ ++ ++#define IRQ_SOURCE_REG 0 ++#define IRQ_MASK_REG 0x04 ++#define IRQ_CLEAR_REG 0x08 ++#define IRQ_MODE_REG 0x0c ++#define IRQ_LEVEL_REG 0x10 ++#define IRQ_STATUS_REG 0x14 ++ ++#define FIQ_SOURCE_REG 0x20 ++#define FIQ_MASK_REG 0x24 ++#define FIQ_CLEAR_REG 0x28 ++#define FIQ_MODE_REG 0x2c ++#define FIQ_LEVEL_REG 0x30 ++#define FIQ_STATUS_REG 0x34 ++ ++#endif /* __FARADAY_INTC_FTINTC010_HEADER__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/ioctl.h linux-3.4.110/arch/nds32/include/asm/ioctl.h +--- linux-3.4.110.orig/arch/nds32/include/asm/ioctl.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/ioctl.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/ioctl.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_IOCTL_H__ ++#define __NDS32_IOCTL_H__ ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/ioctls.h linux-3.4.110/arch/nds32/include/asm/ioctls.h +--- linux-3.4.110.orig/arch/nds32/include/asm/ioctls.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/ioctls.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,13 @@ ++/* ++ * linux/arch/nds32/include/asm/ioctls.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ASM_NDS32_IOCTLS_H ++#define __ASM_NDS32_IOCTLS_H ++ ++#define FIOQSIZE 0x545E ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/io.h linux-3.4.110/arch/nds32/include/asm/io.h +--- linux-3.4.110.orig/arch/nds32/include/asm/io.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/io.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,343 @@ ++/* ++ * linux/arch/nds32/include/asm/io.h ++ * ++ * Copyright (C) 1996-2000 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Modifications: ++ * 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both ++ * constant addresses and variable addresses. ++ * 04-Dec-1997 RMK Moved a lot of this stuff to the new architecture ++ * specific IO header files. ++ * 27-Mar-1999 PJB Second parameter of memcpy_toio is const.. ++ * 04-Apr-1999 PJB Added check_signature. ++ * 12-Dec-1999 RMK More cleanups ++ * 18-Jun-2000 RMK Removed virt_to_* and friends definitions ++ * 05-Oct-2004 BJD Moved memory string functions to use void __iomem ++ */ ++#ifndef __ASM_NDS32_IO_H ++#define __ASM_NDS32_IO_H ++ ++#ifdef __KERNEL__ ++ ++#include ++#include ++#include ++#include ++#include ++ ++ ++/* ++ * ISA I/O bus memory addresses are 1:1 with the physical address. ++ */ ++#define isa_virt_to_bus virt_to_phys ++#define isa_page_to_bus page_to_phys ++#define isa_bus_to_virt phys_to_virt ++ ++/* ++ * Generic IO read/write. These perform native-endian accesses. Note ++ * that some architectures will want to re-define __raw_{read,write}w. ++ */ ++ ++#define __raw_writeb(v,a) (*(volatile unsigned char __force *)(a) = (v)) ++#define __raw_writew(v,a) (*(volatile unsigned short __force *)(a) = (v)) ++#define __raw_writel(v,a) (*(volatile unsigned int __force *)(a) = (v)) ++ ++#define __raw_readb(a) (*(volatile unsigned char __force *)(a)) ++#define __raw_readw(a) (*(volatile unsigned short __force *)(a)) ++#define __raw_readl(a) (*(volatile unsigned int __force *)(a)) ++ ++/* ++ * Bad read/write accesses... ++ */ ++extern void __readwrite_bug(const char *fn); ++ ++/* ++ * Now, pick up the machine-defined IO definitions ++ */ ++#ifndef __FARADAY_PLATFORM_IO_HEADER__ ++#define __FARADAY_PLATFORM_IO_HEADER__ ++ ++#include ++#include ++ ++#ifndef IO_SPACE_LIMIT ++#define IO_SPACE_LIMIT 0xffffffff ++#endif ++ ++#ifndef __io ++#define __io(a) ((void __iomem *)(a)) ++#endif ++#define IO_ADDRESS(a) __io(a) ++#ifndef __mem_pci ++#define __mem_pci(a) (a) ++#endif ++ ++#endif ++ ++#ifdef __io_pci ++#warning machine class uses buggy __io_pci ++#endif ++#if defined(__arch_putb) || defined(__arch_putw) || defined(__arch_putl) || \ ++ defined(__arch_getb) || defined(__arch_getw) || defined(__arch_getl) ++//-Tom for debug ++//#warning machine class uses old __arch_putw or __arch_getw ++#endif ++ ++/* ++ * IO port access primitives ++ * ------------------------- ++ * ++ * The ARM doesn't have special IO access instructions; all IO is memory ++ * mapped. Note that these are defined to perform little endian accesses ++ * only. Their primary purpose is to access PCI and ISA peripherals. ++ * ++ * Note that for a big endian machine, this implies that the following ++ * big endian mode connectivity is in place, as described by numerious ++ * ARM documents: ++ * ++ * PCI: D0-D7 D8-D15 D16-D23 D24-D31 ++ * ARM: D24-D31 D16-D23 D8-D15 D0-D7 ++ * ++ * The machine specific io.h include defines __io to translate an "IO" ++ * address to a memory address. ++ * ++ * Note that we prevent GCC re-ordering or caching values in expressions ++ * by introducing sequence points into the in*() definitions. Note that ++ * __raw_* do not guarantee this behaviour. ++ * ++ * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space. ++ */ ++#ifdef __io ++#ifdef __NDS32_EB__ ++#define inw(p) ({ unsigned int __v = be16_to_cpu(__raw_readw(__io(p))); __v; }) ++#define inl(p) ({ unsigned int __v = be32_to_cpu(__raw_readl(__io(p))); __v; }) ++#define outw(v,p) __raw_writew(cpu_to_be16(v),__io(p)) ++#define outl(v,p) __raw_writel(cpu_to_be32(v),__io(p)) ++#else ++#define inw(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(__io(p))); __v; }) ++#define inl(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(__io(p))); __v; }) ++#define outw(v,p) __raw_writew(cpu_to_le16(v),__io(p)) ++#define outl(v,p) __raw_writel(cpu_to_le32(v),__io(p)) ++#endif ++ ++#define inb(p) ({ unsigned int __v = __raw_readb(__io(p)); __v; }) ++#define outb(v,p) __raw_writeb(v,__io(p)) ++ ++#endif ++ ++#define outb_p(val,port) outb((val),(port)) ++#define outw_p(val,port) outw((val),(port)) ++#define outl_p(val,port) outl((val),(port)) ++#define inb_p(port) inb((port)) ++#define inw_p(port) inw((port)) ++#define inl_p(port) inl((port)) ++ ++/* ++ * String version of IO memory access ops: ++ */ ++extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t); ++extern void _memcpy_toio(volatile void __iomem *, const void *, size_t); ++extern void _memset_io(volatile void __iomem *, int, size_t); ++ ++#define mmiowb() ++ ++/* ++ * Memory access primitives ++ * ------------------------ ++ * ++ * These perform PCI memory accesses via an ioremap region. They don't ++ * take an address as such, but a cookie. ++ * ++ * Again, this are defined to perform little endian accesses. See the ++ * IO port primitives for more information. ++ */ ++#ifdef __mem_pci ++#ifdef __NDS32_EB__ ++#define readw(c) ({ unsigned int __v = be16_to_cpu(__raw_readw(__mem_pci(c))); __v; }) ++#define readl(c) ({ unsigned int __v = be32_to_cpu(__raw_readl(__mem_pci(c))); __v; }) ++#define writew(v,c) __raw_writew(cpu_to_be16(v),__mem_pci(c)) ++#define writel(v,c) __raw_writel(cpu_to_be32(v),__mem_pci(c)) ++#else ++#define readw(c) ({ unsigned int __v = le16_to_cpu(__raw_readw(__mem_pci(c))); __v; }) ++#define readl(c) ({ unsigned int __v = le32_to_cpu(__raw_readl(__mem_pci(c))); __v; }) ++#define writew(v,c) __raw_writew(cpu_to_le16(v),__mem_pci(c)) ++#define writel(v,c) __raw_writel(cpu_to_le32(v),__mem_pci(c)) ++#endif ++ ++#define readb(c) ({ unsigned int __v = __raw_readb(__mem_pci(c)); __v; }) ++#define writeb(v,c) __raw_writeb(v,__mem_pci(c)) ++ ++#define readb_relaxed(addr) readb(addr) ++#define readw_relaxed(addr) readw(addr) ++#define readl_relaxed(addr) readl(addr) ++ ++#define memset_io(c,v,l) _memset_io(__mem_pci(c),(v),(l)) ++#define memcpy_fromio(a,c,l) _memcpy_fromio((a),__mem_pci(c),(l)) ++#define memcpy_toio(c,a,l) _memcpy_toio(__mem_pci(c),(a),(l)) ++ ++#define eth_io_copy_and_sum(s,c,l,b) \ ++ eth_copy_and_sum((s),__mem_pci(c),(l),(b)) ++ ++#elif !defined(readb) ++ ++#define readb(c) (__readwrite_bug("readb"),0) ++#define readw(c) (__readwrite_bug("readw"),0) ++#define readl(c) (__readwrite_bug("readl"),0) ++#define writeb(v,c) __readwrite_bug("writeb") ++#define writew(v,c) __readwrite_bug("writew") ++#define writel(v,c) __readwrite_bug("writel") ++ ++#define eth_io_copy_and_sum(s,c,l,b) __readwrite_bug("eth_io_copy_and_sum") ++ ++#endif /* __mem_pci */ ++ ++/* ++ * If this architecture has ISA IO, then define the isa_read/isa_write ++ * macros. ++ */ ++#ifdef __mem_isa ++ ++#define isa_readb(addr) __raw_readb(__mem_isa(addr)) ++#define isa_readw(addr) __raw_readw(__mem_isa(addr)) ++#define isa_readl(addr) __raw_readl(__mem_isa(addr)) ++#define isa_writeb(val,addr) __raw_writeb(val,__mem_isa(addr)) ++#define isa_writew(val,addr) __raw_writew(val,__mem_isa(addr)) ++#define isa_writel(val,addr) __raw_writel(val,__mem_isa(addr)) ++#define isa_memset_io(a,b,c) _memset_io(__mem_isa(a),(b),(c)) ++#define isa_memcpy_fromio(a,b,c) _memcpy_fromio((a),__mem_isa(b),(c)) ++#define isa_memcpy_toio(a,b,c) _memcpy_toio(__mem_isa((a)),(b),(c)) ++ ++#define isa_eth_io_copy_and_sum(a,b,c,d) \ ++ eth_copy_and_sum((a),__mem_isa(b),(c),(d)) ++ ++#else /* __mem_isa */ ++ ++#define isa_readb(addr) (__readwrite_bug("isa_readb"),0) ++#define isa_readw(addr) (__readwrite_bug("isa_readw"),0) ++#define isa_readl(addr) (__readwrite_bug("isa_readl"),0) ++#define isa_writeb(val,addr) __readwrite_bug("isa_writeb") ++#define isa_writew(val,addr) __readwrite_bug("isa_writew") ++#define isa_writel(val,addr) __readwrite_bug("isa_writel") ++#define isa_memset_io(a,b,c) __readwrite_bug("isa_memset_io") ++#define isa_memcpy_fromio(a,b,c) __readwrite_bug("isa_memcpy_fromio") ++#define isa_memcpy_toio(a,b,c) __readwrite_bug("isa_memcpy_toio") ++ ++#define isa_eth_io_copy_and_sum(a,b,c,d) \ ++ __readwrite_bug("isa_eth_io_copy_and_sum") ++ ++#endif /* __mem_isa */ ++ ++/* ++ * ioremap and friends. ++ * ++ * ioremap takes a PCI memory address, as specified in ++ * Documentation/IO-mapping.txt. ++ */ ++extern void __iomem * __ioremap(unsigned long, size_t, unsigned long, unsigned long); ++extern void __iounmap(void __iomem *addr); ++ ++#ifndef __arch_ioremap ++#define ioremap(cookie,size) __ioremap(cookie,size,0,1) ++#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0,1) ++#define iounmap(cookie) __iounmap(cookie) ++#else ++#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0,1) ++#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0,1) ++#define iounmap(cookie) __arch_iounmap(cookie) ++#endif ++ ++/* ++ * can the hardware map this into one segment or not, given no other ++ * constraints. ++ */ ++#define BIOVEC_MERGEABLE(vec1, vec2) \ ++ ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) ++ ++/* ++ * Convert a physical pointer to a virtual kernel pointer for /dev/mem ++ * access ++ */ ++#define xlate_dev_mem_ptr(p) __va(p) ++ ++/* ++ * Convert a virtual cached pointer to an uncached pointer ++ */ ++#define xlate_dev_kmem_ptr(p) p ++static inline void readsb(const void __iomem *addr, void * data, int bytelen) ++{ ++ unsigned char *ptr = (unsigned char *)addr; ++ unsigned char *ptr2 = (unsigned char *)data; ++ while(bytelen) { ++ *ptr2 = *ptr; ++ ptr2++; ++ bytelen--; ++ } ++} ++ ++static inline void readsw(const void __iomem *addr, void * data, int wordlen) ++{ ++ unsigned short *ptr = (unsigned short *)addr; ++ unsigned short *ptr2 = (unsigned short *)data; ++ while(wordlen) { ++ *ptr2 = *ptr; ++ ptr2++; ++ wordlen--; ++ } ++} ++ ++static inline void readsl(const void __iomem *addr, void * data, int longlen) ++{ ++ unsigned int *ptr = (unsigned int *)addr; ++ unsigned int *ptr2 = (unsigned int *)data; ++ while(longlen) { ++ *ptr2 = *ptr; ++ ptr2++; ++ longlen--; ++ } ++} ++static inline void writesb(void __iomem *addr, const void * data, int bytelen) ++{ ++ unsigned char *ptr = (unsigned char *)addr; ++ unsigned char *ptr2 = (unsigned char *)data; ++ while(bytelen) { ++ *ptr = *ptr2; ++ ptr2++; ++ bytelen--; ++ } ++} ++static inline void writesw(void __iomem *addr, const void * data, int wordlen) ++{ ++ unsigned short *ptr = (unsigned short *)addr; ++ unsigned short *ptr2 = (unsigned short *)data; ++ while(wordlen) { ++ *ptr = *ptr2; ++ ptr2++; ++ wordlen--; ++ } ++} ++static inline void writesl(void __iomem *addr, const void * data, int longlen) ++{ ++ unsigned int *ptr = (unsigned int *)addr; ++ unsigned int *ptr2 = (unsigned int *)data; ++ while(longlen) { ++ *ptr = *ptr2; ++ ptr2++; ++ longlen--; ++ } ++} ++ ++ ++#define insb(p,d,l) BUG() ++#define insw(p,d,l) BUG() ++#define insl(p,d,l) BUG() ++#define outsb(p,d,l) BUG() ++#define outsw(p,d,l) BUG() ++#define outsl(p,d,l) BUG() ++ ++#endif /* __KERNEL__ */ ++#endif /* __ASM_NDS32_IO_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/ipcbuf.h linux-3.4.110/arch/nds32/include/asm/ipcbuf.h +--- linux-3.4.110.orig/arch/nds32/include/asm/ipcbuf.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/ipcbuf.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,34 @@ ++/* ++ * linux/arch/nds32/include/asm/ipcbuf.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ASMNDS32_IPCBUF_H ++#define __ASMNDS32_IPCBUF_H ++ ++/* ++ * The ipc64_perm structure for arm architecture. ++ * Note extra padding because this structure is passed back and forth ++ * between kernel and user space. ++ * ++ * Pad space is left for: ++ * - 32-bit mode_t and seq ++ * - 2 miscellaneous 32-bit values ++ */ ++ ++struct ipc64_perm ++{ ++ __kernel_key_t key; ++ __kernel_uid32_t uid; ++ __kernel_gid32_t gid; ++ __kernel_uid32_t cuid; ++ __kernel_gid32_t cgid; ++ __kernel_mode_t mode; ++ unsigned short __pad1; ++ unsigned short seq; ++ unsigned short __pad2; ++ unsigned long __unused1; ++ unsigned long __unused2; ++}; ++ ++#endif /* __ASMNDS32_IPCBUF_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/irqflags.h linux-3.4.110/arch/nds32/include/asm/irqflags.h +--- linux-3.4.110.orig/arch/nds32/include/asm/irqflags.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/irqflags.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,65 @@ ++/* ++ * linux/arch/nds32/include/asm/irqflags.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#include ++#include ++ ++#define arch_local_irq_disable() \ ++ GIE_DISABLE(); ++ ++#define arch_local_irq_enable() \ ++ GIE_ENABLE(); ++static inline unsigned long arch_local_irq_save(void) ++{ ++ unsigned long flags; ++ __asm__ __volatile__( ++ "mfsr %0, $PSW\n" ++ "andi %0, %0, #0x1\n" ++ "gie_disable\n" ++ : "=r" (flags) ); ++ return flags; ++} ++static inline unsigned long arch_local_save_flags(void) ++{ ++ unsigned long flags; ++ __asm__ __volatile__( ++ "mfsr %0, $PSW\n" ++ "andi %0, %0, #0x1" ++ : "=r" (flags) ); ++ return flags; ++} ++static inline void arch_local_irq_restore(unsigned long flags) ++{ ++ __asm__ __volatile__( ++ "beqz %0, 1f\n" ++ "gie_enable\n" ++ "1:" ++ :: "r" (flags) ); ++} ++static inline int arch_irqs_disabled_flags(unsigned long flags) ++{ ++ return !flags; ++} ++#if 0 ++#define raw_local_irq_save(x) \ ++ __asm__ __volatile__( \ ++ "mfsr %0, $PSW\n" \ ++ "andi %0, %0, #0x1\n" \ ++ "gie_disable\n" \ ++ : "=r" (x) ) ++#define raw_local_save_flags(x) \ ++ __asm__ __volatile__( \ ++ "mfsr %0, $PSW\n" \ ++ "andi %0, %0, #0x1" \ ++ : "=r" (x) ) ++#define raw_local_irq_restore(x) \ ++ __asm__ __volatile__( \ ++ "beqz %0, 1f\n" \ ++ "gie_enable\n" \ ++ "1:" \ ++ :: "r" (x) ) ++ ++#define raw_irqs_disabled_flags(x) !(x) ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/irq.h linux-3.4.110/arch/nds32/include/asm/irq.h +--- linux-3.4.110.orig/arch/nds32/include/asm/irq.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/irq.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,50 @@ ++/* ++ * linux/arch/nds32/include/asm/irq.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ASM_NDS32_IRQ_H ++#define __ASM_NDS32_IRQ_H ++ ++#include ++ ++#ifndef irq_canonicalize ++#define irq_canonicalize(i) (i) ++#endif ++ ++#ifndef NR_IRQS ++#define NR_IRQS 128 ++#endif ++ ++/* ++ * Use this value to indicate lack of interrupt ++ * capability ++ */ ++#ifndef NO_IRQ ++#define NO_IRQ ((unsigned int)(-1)) ++#endif ++ ++struct irqaction; ++ ++extern void disable_irq_nosync(unsigned int); ++extern void disable_irq(unsigned int); ++extern void enable_irq(unsigned int); ++ ++#define __IRQT_FALEDGE IRQ_TYPE_EDGE_FALLING ++#define __IRQT_RISEDGE IRQ_TYPE_EDGE_RISING ++#define __IRQT_LOWLVL IRQ_TYPE_LEVEL_LOW ++#define __IRQT_HIGHLVL IRQ_TYPE_LEVEL_HIGH ++ ++#define IRQT_NOEDGE (0) ++#define IRQT_RISING (__IRQT_RISEDGE) ++#define IRQT_FALLING (__IRQT_FALEDGE) ++#define IRQT_BOTHEDGE (__IRQT_RISEDGE|__IRQT_FALEDGE) ++#define IRQT_LOW (__IRQT_LOWLVL) ++#define IRQT_HIGH (__IRQT_HIGHLVL) ++#define IRQT_PROBE IRQ_TYPE_PROBE ++ ++struct irqaction; ++struct pt_regs; ++ ++#endif ++ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/irq_regs.h linux-3.4.110/arch/nds32/include/asm/irq_regs.h +--- linux-3.4.110.orig/arch/nds32/include/asm/irq_regs.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/irq_regs.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,9 @@ ++/* ++ * linux/arch/nds32/include/asm/irq_regs.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_IRQ_REGS_H__ ++#define __NDS32_IRQ_REGS_H__ ++#include ++#endif /* __NDS32_IRQ_REGS_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/Kbuild linux-3.4.110/arch/nds32/include/asm/Kbuild +--- linux-3.4.110.orig/arch/nds32/include/asm/Kbuild 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/Kbuild 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,3 @@ ++include include/asm-generic/Kbuild.asm ++ ++header-y += pfm.h +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/kdebug.h linux-3.4.110/arch/nds32/include/asm/kdebug.h +--- linux-3.4.110.orig/arch/nds32/include/asm/kdebug.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/kdebug.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,14 @@ ++/* ++ * linux/arch/nds32/include/asm/kdebug.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_KDEBUG_H__ ++#define __NDS32_KDEBUG_H__ ++ ++enum die_val { ++ DIE_OOPS = 1, ++ DIE_DEBUG, ++}; ++ ++#endif /* __NDS32_KDEBUG_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/kexec.h linux-3.4.110/arch/nds32/include/asm/kexec.h +--- linux-3.4.110.orig/arch/nds32/include/asm/kexec.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/kexec.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,31 @@ ++#ifndef _NDS32_KEXEC_H ++#define _NDS32_KEXEC_H ++ ++#ifdef CONFIG_KEXEC ++ ++/* Maximum physical address we can use pages from */ ++#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) ++/* Maximum address we can reach in physical address mode */ ++#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) ++/* Maximum address we can use for the control code buffer */ ++#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) ++ ++#define KEXEC_CONTROL_PAGE_SIZE 4096 ++ ++#define KEXEC_ARCH KEXEC_ARCH_NDS32 ++ ++#define KEXEC_NDS32_ATAGS_OFFSET 0x1000 ++#define KEXEC_NDS32_ZIMAGE_OFFSET 0x500000 ++ ++#ifndef __ASSEMBLY__ ++ ++struct kimage; ++/* Provide a dummy definition to avoid build failures. */ ++static inline void crash_setup_regs(struct pt_regs *newregs, ++ struct pt_regs *oldregs) { } ++ ++#endif /* __ASSEMBLY__ */ ++ ++#endif /* CONFIG_KEXEC */ ++ ++#endif /* _NDS32_KEXEC_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/kgdb.h linux-3.4.110/arch/nds32/include/asm/kgdb.h +--- linux-3.4.110.orig/arch/nds32/include/asm/kgdb.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/kgdb.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,90 @@ ++/* ============================================================================ ++ * ++ * linux/arch/nds32/include/asm/kgdb.h ++ * ++ * Copyright (C) 2007 Andes Technology Corporation ++ * This file is part of Linux and should be licensed under the GPL. ++ * See the file COPYING for conditions for redistribution. ++ * ++ * Abstract: ++ * ++ * This program is for NDS32 KGDB support. ++ * ++ * Author: Harry Pan ++ * ++ * Revision History: ++ * ++ * Jul.14.2007 Initial ported by Harry. ++ * ++ * Note: ++ * ++ * ============================================================================ ++ */ ++#ifndef __ASM_NDS32_KGDB_H__ ++#define __ASM_NDS32_KGDB_H__ ++ ++#include ++ ++#define BREAK_INSTR_SIZE 2 ++#define CACHE_FLUSH_IS_SAFE 1 ++ ++#ifndef __ASSEMBLY__ ++ ++/* ++ * Define numbers of registers we have in NDS32 arch ++ */ ++#define NDS32_NUM_GR 32 // general registers. ++#define NDS32_NUM_SPR 5 // special registers. (PC, D0, D1) ++#define NDS32_NUM_CR 6 // ctrl registers. ++#define NDS32_NUM_IR 16 // interruption registers. ++#define NDS32_NUM_MR 11 // MMU registers. ++#define NDS32_NUM_DR 48 // debug registers. ++#define NDS32_NUM_PFR 4 // performance monitoring registers. ++#define NDS32_NUM_DMAR 11 // local memory DMA registers ++#define NDS32_NUM_RACR 1 // resource access control registers. ++#define NDS32_NUM_IDR 2 // implementation dependent registers. ++#define NDS32_NUM_SR (NDS32_NUM_CR + NDS32_NUM_IR + NDS32_NUM_MR + \ ++ NDS32_NUM_DR + NDS32_NUM_PFR + NDS32_NUM_DMAR + \ ++ NDS32_NUM_RACR + NDS32_NUM_IDR) ++#define NDS32_NUM_REGS (NDS32_NUM_GR + NDS32_NUM_SPR + NDS32_NUM_SR) ++ ++#define KGDB_MAX_NO_CPUS 1 ++#define BUFMAX 2048 ++#define NUMREGBYTES (NDS32_NUM_REGS << 2) ++ ++/* ++ * NDS32 virtual registers layout for GDB. ++ */ ++enum nds32_regnum ++{ ++ NDS32_R0_REGNUM = 0, // first integer-like argument. ++ NDS32_R5_REGNUM = 5, // last integer-like argument. ++ NDS32_FP_REGNUM = 28, // frame register ++ NDS32_LP_REGNUM = 30, // link pointer ++ NDS32_SP_REGNUM = 31, // address of stack top. ++ NDS32_PC_REGNUM = 32, ++ NDS32_D0LO_REGNUM = 33, ++ NDS32_D0HI_REGNUM = 34, ++ NDS32_D1LO_REGNUM = 35, ++ NDS32_D1HI_REGNUM = 36, ++ NDS32_CR0_REGNUM = 37, ++ NDS32_IR0_REGNUM = (NDS32_CR0_REGNUM + NDS32_NUM_CR), ++ NDS32_MR0_REGNUM = (NDS32_IR0_REGNUM + NDS32_NUM_IR), ++ NDS32_DR0_REGNUM = (NDS32_MR0_REGNUM + NDS32_NUM_MR), ++ NDS32_PFR0_REGNUM = (NDS32_DR0_REGNUM + NDS32_NUM_DR), ++ NDS32_DMAR0_REGNUM = (NDS32_PFR0_REGNUM + NDS32_NUM_PFR), ++ NDS32_RACR0_REGNUM = (NDS32_DMAR0_REGNUM + NDS32_NUM_DMAR), ++ NDS32_IDR0_REGNUM = (NDS32_RACR0_REGNUM + NDS32_NUM_RACR), ++ /* nds32 calling convention. */ ++ NDS32_ARG0_REGNUM = NDS32_R0_REGNUM, ++ NDS32_ARGN_REGNUM = NDS32_R5_REGNUM, ++ NDS32_RET_REGNUM = NDS32_R0_REGNUM, ++}; ++ ++static inline void arch_kgdb_breakpoint(void) ++{ ++ asm __volatile__ ( "break 0x1ff\n" ); ++} ++ ++#endif /* !__ASSEMBLY__ */ ++#endif /* __ASM_NDS32_KGDB_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/kmap_types.h linux-3.4.110/arch/nds32/include/asm/kmap_types.h +--- linux-3.4.110.orig/arch/nds32/include/asm/kmap_types.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/kmap_types.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,29 @@ ++/* ++ * linux/arch/nds32/include/asm/kmap-types.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_KMAP_TYPES_H ++#define __NDS32_KMAP_TYPES_H ++ ++/* ++ * This is the "bare minimum". AIO seems to require this. ++ */ ++enum km_type { ++ KM_BOUNCE_READ, ++ KM_SKB_SUNRPC_DATA, ++ KM_SKB_DATA_SOFTIRQ, ++ KM_USER0, ++ KM_USER1, ++ KM_BIO_SRC_IRQ, ++ KM_BIO_DST_IRQ, ++ KM_PTE0, ++ KM_PTE1, ++ KM_IRQ0, ++ KM_IRQ1, ++ KM_SOFTIRQ0, ++ KM_SOFTIRQ1, ++ KM_TYPE_NR ++}; ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/kprobes.h linux-3.4.110/arch/nds32/include/asm/kprobes.h +--- linux-3.4.110.orig/arch/nds32/include/asm/kprobes.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/kprobes.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,86 @@ ++#ifndef _ASM_ANDES_KPROBES_H ++#define _ASM_ANDES_KPROBES_H ++/* ++ * Kernel Probes (KProbes) ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) IBM Corporation, 2002, 2004 ++ * ++ * See arch/x86/kernel/kprobes.c for x86 kprobes history. ++ */ ++#include ++#include ++#include ++ ++#define __ARCH_WANT_KPROBES_INSN_SLOT ++ ++struct pt_regs; ++struct kprobe; ++ ++typedef unsigned short kprobe_opcode_t; ++ ++#define MAX_INSN_SIZE 2 ++#define MAX_STACK_SIZE 64 ++#define MIN_STACK_SIZE(ADDR) \ ++ (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ ++ THREAD_SIZE - (unsigned long)(ADDR))) \ ++ ? (MAX_STACK_SIZE) \ ++ : (((unsigned long)current_thread_info()) + \ ++ THREAD_SIZE - (unsigned long)(ADDR))) ++#define regs_return_value(regs) ((regs)->NDS32_r0) ++#define flush_insn_slot(p) do { } while (0) ++#define kretprobe_blacklist_size 0 ++ ++void arch_remove_kprobe(struct kprobe *p); ++void kretprobe_trampoline(void); ++ ++/* Architecture specific copy of original instruction*/ ++struct arch_specific_insn { ++ /* copy of the original instruction */ ++ kprobe_opcode_t *insn; ++ /* ++ * boostable = -1: This instruction type is not boostable. ++ * boostable = 0: This instruction type is boostable. ++ * boostable = 1: This instruction has been boosted: we have ++ * added a relative jump after the instruction copy in insn, ++ * so no single-step and fixup are needed (unless there's ++ * a post_handler or break_handler). ++ */ ++ int boostable; ++}; ++ ++struct prev_kprobe { ++ struct kprobe *kp; ++ unsigned long status; ++ unsigned long old_flags; ++ unsigned long saved_flags; ++}; ++ ++/* per-cpu kprobe control block */ ++struct kprobe_ctlblk { ++ unsigned long kprobe_status; ++ unsigned long kprobe_old_flags; ++ unsigned long kprobe_saved_flags; ++ unsigned long *jprobe_saved_sp; ++ struct pt_regs jprobe_saved_regs; ++ kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; ++ struct prev_kprobe prev_kprobe; ++}; ++ ++extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); ++extern int kprobe_exceptions_notify(struct notifier_block *self, ++ unsigned long val, void *data); ++#endif /* _ASM_ANDES_KPROBES_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/kvm.h linux-3.4.110/arch/nds32/include/asm/kvm.h +--- linux-3.4.110.orig/arch/nds32/include/asm/kvm.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/kvm.h 2016-04-07 10:20:50.898079322 +0200 +@@ -0,0 +1,9 @@ ++/* ++ * linux/arch/nds32/include/asm/kvm.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_KVM_H__ ++#define __NDS32_KVM_H__ ++ ++#endif /* __NDS32_KVM_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/l2_cache.h linux-3.4.110/arch/nds32/include/asm/l2_cache.h +--- linux-3.4.110.orig/arch/nds32/include/asm/l2_cache.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/l2_cache.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,134 @@ ++/* ++ * linux/arch/nds32/include/asm/l2_cache.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef L2_CACHE_H ++#define L2_CACHE_H ++ ++// CCTL_CMD_OP ++#define L2_CA_CONF_OFF 0x0 ++#define L2_IF_CONF_OFF 0x4 ++#define L2CC_SETUP_OFF 0x8 ++#define L2CC_PROT_OFF 0xC ++#define L2CC_CTRL_OFF 0x10 ++#define L2_INT_EN_OFF 0x20 ++#define L2_STA_OFF 0x24 ++#define RDERR_ADDR_OFF 0x28 ++#define WRERR_ADDR_OFF 0x2c ++#define EVDPTERR_ADDR_OFF 0x30 ++#define IMPL3ERR_ADDR_OFF 0x34 ++#define L2_CNT0_CTRL_OFF 0x40 ++#define L2_EVNT_CNT0_OFF 0x44 ++#define L2_CNT1_CTRL_OFF 0x48 ++#define L2_EVNT_CNT1_OFF 0x4c ++#define L2_CCTL_CMD_OFF 0x60 ++#define L2_CCTL_STATUS_OFF 0x64 ++#define L2_LINE_TAG_OFF 0x68 ++#define L2_LINE_DPT_OFF 0x70 ++ ++#define CCTL_CMD_L2_IX_INVAL 0x0 ++#define CCTL_CMD_L2_PA_INVAL 0x1 ++#define CCTL_CMD_L2_IX_WB 0x2 ++#define CCTL_CMD_L2_PA_WB 0x3 ++#define CCTL_CMD_L2_PA_WBINVAL 0x5 ++#define CCTL_CMD_L2_SYNC 0xa ++// CCTL_CMD_TYPE ++#define CCTL_SINGLE_CMD 0 ++#define CCTL_BLOCK_CMD 0x10 ++#define CCTL_ALL_CMD 0x10 ++ ++/****************************************************************************** ++ * L2_CA_CONF (Cache architecture configuration) ++ *****************************************************************************/ ++#define L2_CA_CONF_offL2SET 0 ++#define L2_CA_CONF_offL2WAY 4 ++#define L2_CA_CONF_offL2CLSZ 8 ++#define L2_CA_CONF_offL2DW 11 ++#define L2_CA_CONF_offL2PT 14 ++#define L2_CA_CONF_offL2VER 16 ++ ++#define L2_CA_CONF_mskL2SET (0xFUL << L2_CA_CONF_offL2SET) ++#define L2_CA_CONF_mskL2WAY (0xFUL << L2_CA_CONF_offL2WAY) ++#define L2_CA_CONF_mskL2CLSZ (0x7UL << L2_CA_CONF_offL2CLSZ) ++#define L2_CA_CONF_mskL2DW (0x7UL << L2_CA_CONF_offL2DW) ++#define L2_CA_CONF_mskL2PT (0x3UL << L2_CA_CONF_offL2PT) ++#define L2_CA_CONF_mskL2VER (0xFFFFUL << L2_CA_CONF_offL2VER) ++ ++/****************************************************************************** ++ * L2CC_SETUP (L2CC Setup register) ++ *****************************************************************************/ ++#define L2CC_SETUP_offPART 0 ++#define L2CC_SETUP_mskPART (0x3UL << L2CC_SETUP_offPART) ++#define L2CC_SETUP_offDDLATC 4 ++#define L2CC_SETUP_mskDDLATC (0x3UL << L2CC_SETUP_offDDLATC) ++#define L2CC_SETUP_offTDLATC 8 ++#define L2CC_SETUP_mskTDLATC (0x3UL << L2CC_SETUP_offTDLATC) ++ ++/****************************************************************************** ++ * L2CC_PROT (L2CC Protect register) ++ *****************************************************************************/ ++#define L2CC_PROT_offMRWEN 31 ++#define L2CC_PROT_mskMRWEN (0x1UL << L2CC_PROT_offMRWEN) ++//TODO finish this table ++// ++/****************************************************************************** ++ * L2_CCTL_STATUS_Mn (The L2CCTL command working status for Master n) ++ *****************************************************************************/ ++#define L2CC_CTRL_offEN 31 ++#define L2CC_CTRL_mskEN (0x1UL << L2CC_CTRL_offEN) ++ ++/****************************************************************************** ++ * L2_CCTL_STATUS_Mn (The L2CCTL command working status for Master n) ++ *****************************************************************************/ ++#define L2_CCTL_STATUS_offCMD_COMP 31 ++#define L2_CCTL_STATUS_mskCMD_COMP (0x1 << L2_CCTL_STATUS_offCMD_COMP) ++//TODO finish this table ++ ++#ifndef __ASSEMBLY__ ++ ++#include ++#include ++#include ++ ++#define L2C_R_REG(offset) inl(L2CC_VA_BASE + offset) ++#define L2C_W_REG(offset, value) outl(value, L2CC_VA_BASE + offset) ++ ++#define L2_CMD_RDY() \ ++ do{;}while((L2C_R_REG(L2_CCTL_STATUS_OFF) & L2_CCTL_STATUS_mskCMD_COMP) == 0) ++ ++static inline unsigned long L2_CACHE_SET(void){ ++ return 64 << ( ( L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2SET) >> L2_CA_CONF_offL2SET); ++} ++ ++static inline unsigned long L2_CACHE_WAY(void){ ++ return 1 + ( ( L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2WAY) >> L2_CA_CONF_offL2WAY); ++} ++ ++static inline unsigned long L2_CACHE_LINE_SIZE(void){ ++ ++ return 4 << ( ( L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2CLSZ) >> L2_CA_CONF_offL2CLSZ); ++} ++ ++static inline unsigned long GET_L2CC_CTRL_CPU(unsigned long cpu){ ++ if(cpu == smp_processor_id()) ++ return L2C_R_REG(L2CC_CTRL_OFF); ++ return L2C_R_REG(L2CC_CTRL_OFF+(cpu<<8)); ++} ++ ++static inline void SET_L2CC_CTRL_CPU(unsigned long cpu , unsigned long val){ ++ if(cpu == smp_processor_id()) ++ L2C_W_REG(L2CC_CTRL_OFF,val); ++ else ++ L2C_W_REG(L2CC_CTRL_OFF+(cpu<<8),val); ++} ++ ++static inline unsigned long GET_L2CC_STATUS_CPU(unsigned long cpu){ ++ if(cpu == smp_processor_id()) ++ return L2C_R_REG(L2_CCTL_STATUS_OFF); ++ return L2C_R_REG(L2_CCTL_STATUS_OFF+(cpu<<8)); ++} ++ ++#endif ++ ++#endif //L2_CACHE_H +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/leds.h linux-3.4.110/arch/nds32/include/asm/leds.h +--- linux-3.4.110.orig/arch/nds32/include/asm/leds.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/leds.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,51 @@ ++/* ++ * linux/arch/nds32/include/asm/leds.h ++ * ++ * Copyright (C) 1998 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Event-driven interface for LEDs on machines ++ * Added led_start and led_stop- Alex Holden, 28th Dec 1998. ++ */ ++#ifndef ASM_NDS32_LEDS_H ++#define ASM_NDS32_LEDS_H ++ ++ ++typedef enum { ++ led_idle_start, ++ led_idle_end, ++ led_timer, ++ led_start, ++ led_stop, ++ led_claim, /* override idle & timer leds */ ++ led_release, /* restore idle & timer leds */ ++ led_start_timer_mode, ++ led_stop_timer_mode, ++ led_green_on, ++ led_green_off, ++ led_amber_on, ++ led_amber_off, ++ led_red_on, ++ led_red_off, ++ led_blue_on, ++ led_blue_off, ++ /* ++ * I want this between led_timer and led_start, but ++ * someone has decided to export this to user space ++ */ ++ led_halted ++} led_event_t; ++ ++/* Use this routine to handle LEDs */ ++ ++#ifdef CONFIG_LEDS ++extern void (*leds_event)(led_event_t); ++#else ++#define leds_event(e) ++#endif ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/limits.h linux-3.4.110/arch/nds32/include/asm/limits.h +--- linux-3.4.110.orig/arch/nds32/include/asm/limits.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/limits.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,16 @@ ++/* ++ * linux/arch/nds32/include/asm/limits.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ASM_PIPE_H ++#define __ASM_PIPE_H ++ ++#ifndef PAGE_SIZE ++#include ++#endif ++ ++#define PIPE_BUF PAGE_SIZE ++ ++#endif ++ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/linkage.h linux-3.4.110/arch/nds32/include/asm/linkage.h +--- linux-3.4.110.orig/arch/nds32/include/asm/linkage.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/linkage.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,12 @@ ++/* ++ * linux/arch/nds32/include/asm/linkage.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_LINKAGE_H__ ++#define __NDS32_LINKAGE_H__ ++ ++#define __ALIGN .align 2 ++#define __ALIGN_STR ".align 2" ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/local.h linux-3.4.110/arch/nds32/include/asm/local.h +--- linux-3.4.110.orig/arch/nds32/include/asm/local.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/local.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/local.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_LOCAL_H__ ++#define __NDS32_LOCAL_H__ ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mach/arch.h linux-3.4.110/arch/nds32/include/asm/mach/arch.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mach/arch.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mach/arch.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,86 @@ ++/* ++ * linux/arch/nds32/include/asm/mach/arch.h ++ * ++ * Copyright (C) 2000 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#ifndef __ASSEMBLY__ ++ ++struct tag; ++struct meminfo; ++struct sys_timer; ++ ++struct machine_desc { ++ /* ++ * Note! The first four elements are used ++ * by assembler code in head-armv.S ++ */ ++ unsigned int nr; /* architecture number */ ++ ++ const char *name; /* architecture name */ ++ unsigned int param_offset; /* parameter page */ ++ ++ unsigned int video_start; /* start of video RAM */ ++ unsigned int video_end; /* end of video RAM */ ++ ++ unsigned int reserve_lp0 :1; /* never has lp0 */ ++ unsigned int reserve_lp1 :1; /* never has lp1 */ ++ unsigned int reserve_lp2 :1; /* never has lp2 */ ++ unsigned int soft_reboot :1; /* soft reboot */ ++ void (*fixup)(struct machine_desc *, ++ struct tag *, char **, ++ struct meminfo *); ++ void (*map_io)(void);/* IO mapping function */ ++ void (*init_irq)(void); ++ struct sys_timer *timer; /* system tick timer */ ++ void (*init_machine)(void); ++}; ++ ++/* ++ * * Current machine - only accessible during boot. ++ * */ ++extern struct machine_desc *machine_desc; ++ ++/* ++ * Set of macros to define architecture features. This is built into ++ * a table by the linker. ++ */ ++#define MACHINE_START(_type,_name) \ ++const struct machine_desc __mach_desc_##_type \ ++ __attribute__((__section__(".arch.info"))) = { \ ++ .nr = MACH_TYPE_##_type, \ ++ .name = _name, ++ ++#define MAINTAINER(n) ++ ++#define BOOT_PARAMS(_params) \ ++ .param_offset = _params, ++ ++#define VIDEO(_start,_end) \ ++ .video_start = _start, \ ++ .video_end = _end, ++ ++#define DISABLE_PARPORT(_n) \ ++ .reserve_lp##_n = 1, ++ ++#define SOFT_REBOOT \ ++ .soft_reboot = 1, ++ ++#define MAPIO(_func) \ ++ .map_io = _func, ++ ++#define INITIRQ(_func) \ ++ .init_irq = _func, ++ ++#define INIT_MACHINE(_func) \ ++ .init_machine = _func, ++ ++#define MACHINE_END \ ++}; ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mach/dma.h linux-3.4.110/arch/nds32/include/asm/mach/dma.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mach/dma.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mach/dma.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,56 @@ ++/* ++ * linux/arch/nds32/include/asm/mach/dma.h ++ * ++ * Copyright (C) 1998-2000 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This header file describes the interface between the generic DMA handler ++ * (dma.c) and the architecture-specific DMA backends (dma-*.c) ++ */ ++ ++struct dma_struct; ++typedef struct dma_struct dma_t; ++ ++struct dma_ops { ++ int (*request)(dmach_t, dma_t *); /* optional */ ++ void (*free)(dmach_t, dma_t *); /* optional */ ++ void (*enable)(dmach_t, dma_t *); /* mandatory */ ++ void (*disable)(dmach_t, dma_t *); /* mandatory */ ++ int (*residue)(dmach_t, dma_t *); /* optional */ ++ int (*setspeed)(dmach_t, dma_t *, int); /* optional */ ++ char *type; ++}; ++ ++struct dma_struct { ++ struct scatterlist buf; /* single DMA */ ++ int sgcount; /* number of DMA SG */ ++ struct scatterlist *sg; /* DMA Scatter-Gather List */ ++ ++ unsigned int active:1; /* Transfer active */ ++ unsigned int invalid:1; /* Address/Count changed */ ++ unsigned int using_sg:1; /* using scatter list? */ ++ dmamode_t dma_mode; /* DMA mode */ ++ int speed; /* DMA speed */ ++ ++ unsigned int lock; /* Device is allocated */ ++ const char *device_id; /* Device name */ ++ ++ unsigned int dma_base; /* Controller base address */ ++ int dma_irq; /* Controller IRQ */ ++ struct scatterlist cur_sg; /* Current controller buffer */ ++ unsigned int state; ++ ++ struct dma_ops *d_ops; ++}; ++ ++/* Prototype: void arch_dma_init(dma) ++ * Purpose : Initialise architecture specific DMA ++ * Params : dma - pointer to array of DMA structures ++ */ ++extern void arch_dma_init(dma_t *dma); ++ ++extern void isa_init_dma(dma_t *dma); +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mach/flash.h linux-3.4.110/arch/nds32/include/asm/mach/flash.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mach/flash.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mach/flash.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,35 @@ ++/* ++ * linux/arch/nds32/include/asm/mach/flash.h ++ * ++ * Copyright (C) 2003 Russell King, All Rights Reserved. ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef ASMNDS32_MACH_FLASH_H ++#define ASMNDS32_MACH_FLASH_H ++ ++struct mtd_partition; ++ ++/* ++ * map_name: the map probe function name ++ * width: width of mapped device ++ * init: method called at driver/device initialisation ++ * exit: method called at driver/device removal ++ * set_vpp: method called to enable or disable VPP ++ * parts: optional array of mtd_partitions for static partitioning ++ * nr_parts: number of mtd_partitions for static partitoning ++ */ ++struct flash_platform_data { ++ const char *map_name; ++ unsigned int width; ++ int (*init)(void); ++ void (*exit)(void); ++ void (*set_vpp)(int on); ++ struct mtd_partition *parts; ++ unsigned int nr_parts; ++}; ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mach/irda.h linux-3.4.110/arch/nds32/include/asm/mach/irda.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mach/irda.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mach/irda.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,21 @@ ++/* ++ * linux/arch/nds32/include/asm/mach/irda.h ++ * ++ * Copyright (C) 2004 Russell King. ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef __ASM_NDS32_MACH_IRDA_H ++#define __ASM_NDS32_MACH_IRDA_H ++ ++struct irda_platform_data { ++ int (*startup)(struct device *); ++ void (*shutdown)(struct device *); ++ int (*set_power)(struct device *, unsigned int state); ++ void (*set_speed)(struct device *, unsigned int speed); ++}; ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mach/map.h linux-3.4.110/arch/nds32/include/asm/mach/map.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mach/map.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mach/map.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,37 @@ ++/* ++ * linux/arch/nds32/include/asm/mach/map.h ++ * ++ * Copyright (C) 1999-2000 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Page table mapping constructs and function prototypes ++ */ ++struct map_desc { ++ unsigned long virtual; ++ unsigned long physical; ++ unsigned long length; ++ unsigned int type; ++}; ++ ++struct meminfo; ++ ++#define MT_DEVICE_NCB MT_DEVICE ++#define MT_DEVICE_NCNB MT_DEVICE ++#define MT_DEVICE 0 ++#define MT_CACHECLEAN 1 ++#define MT_MINICLEAN 2 ++#define MT_CACHE_L1 3 ++#define MT_UXKRWX_V1 4 ++#define MT_UXKRWX_V2 5 ++#define MT_MEMORY 6 ++#define MT_ROM 7 ++#define MT_ILM 8 ++#define MT_DLM 9 ++ ++extern void create_memmap_holes(struct meminfo *); ++extern void iotable_init(struct map_desc *, int); ++extern void setup_io_desc(void); +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mach/mmc.h linux-3.4.110/arch/nds32/include/asm/mach/mmc.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mach/mmc.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mach/mmc.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,16 @@ ++/* ++ * linux/arch/nds32/include/asm/mach/mmc.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++#ifndef ASMNDS32_MACH_MMC_H ++#define ASMNDS32_MACH_MMC_H ++ ++#include ++ ++struct mmc_platform_data { ++ unsigned int ocr_mask; /* available voltages */ ++ u32 (*translate_vdd)(struct device *, unsigned int); ++ unsigned int (*status)(struct device *); ++}; ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mach/pci.h linux-3.4.110/arch/nds32/include/asm/mach/pci.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mach/pci.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mach/pci.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,76 @@ ++/* ++ * linux/arch/nds32/include/asm/mach/pci.h ++ * ++ * Copyright (C) 2000 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++struct pci_sys_data; ++struct pci_bus; ++ ++struct hw_pci { ++ struct list_head buses; ++ int nr_controllers; ++ int (*setup)(int nr, struct pci_sys_data *); ++ struct pci_bus *(*scan)(int nr, struct pci_sys_data *); ++ void (*preinit)(void); ++ void (*postinit)(void); ++ u8 (*swizzle)(struct pci_dev *dev, u8 *pin); ++ int (*map_irq)(struct pci_dev *dev, u8 slot, u8 pin); ++}; ++ ++/* ++ * Per-controller structure ++ */ ++struct pci_sys_data { ++ struct list_head node; ++ int busnr; /* primary bus number */ ++ unsigned long mem_offset; /* bus->cpu memory mapping offset */ ++ unsigned long io_offset; /* bus->cpu IO mapping offset */ ++ struct pci_bus *bus; /* PCI bus */ ++ struct resource *resource[3]; /* Primary PCI bus resources */ ++ /* Bridge swizzling */ ++ u8 (*swizzle)(struct pci_dev *, u8 *); ++ /* IRQ mapping */ ++ int (*map_irq)(struct pci_dev *, u8, u8); ++ struct hw_pci *hw; ++}; ++ ++/* ++ * This is the standard PCI-PCI bridge swizzling algorithm. ++ */ ++u8 pci_std_swizzle(struct pci_dev *dev, u8 *pinp); ++ ++/* ++ * Call this with your hw_pci struct to initialise the PCI system. ++ */ ++void pci_common_init(struct hw_pci *); ++ ++/* ++ * PCI controllers ++ */ ++extern int iop321_setup(int nr, struct pci_sys_data *); ++extern struct pci_bus *iop321_scan_bus(int nr, struct pci_sys_data *); ++extern void iop321_init(void); ++ ++extern int iop331_setup(int nr, struct pci_sys_data *); ++extern struct pci_bus *iop331_scan_bus(int nr, struct pci_sys_data *); ++extern void iop331_init(void); ++ ++extern int dc21285_setup(int nr, struct pci_sys_data *); ++extern struct pci_bus *dc21285_scan_bus(int nr, struct pci_sys_data *); ++extern void dc21285_preinit(void); ++extern void dc21285_postinit(void); ++ ++extern int via82c505_setup(int nr, struct pci_sys_data *); ++extern struct pci_bus *via82c505_scan_bus(int nr, struct pci_sys_data *); ++extern void via82c505_init(void *sysdata); ++ ++extern int pci_v3_setup(int nr, struct pci_sys_data *); ++extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *); ++extern void pci_v3_preinit(void); ++extern void pci_v3_postinit(void); +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mach/serial_sa1100.h linux-3.4.110/arch/nds32/include/asm/mach/serial_sa1100.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mach/serial_sa1100.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mach/serial_sa1100.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,31 @@ ++/* ++ * linux/include/asm-arm/mach/serial_sa1100.h ++ * ++ * Author: Nicolas Pitre ++ * ++ * Moved to include/asm-arm/mach and changed lots, Russell King ++ * ++ * Low level machine dependent UART functions. ++ */ ++ ++struct uart_port; ++struct uart_info; ++ ++/* ++ * This is a temporary structure for registering these ++ * functions; it is intended to be discarded after boot. ++ */ ++struct sa1100_port_fns { ++ void (*set_mctrl)(struct uart_port *, u_int); ++ u_int (*get_mctrl)(struct uart_port *); ++ void (*pm)(struct uart_port *, u_int, u_int); ++ int (*set_wake)(struct uart_port *, u_int); ++}; ++ ++#ifdef CONFIG_SERIAL_SA1100 ++void sa1100_register_uart_fns(struct sa1100_port_fns *fns); ++void sa1100_register_uart(int idx, int port); ++#else ++#define sa1100_register_uart_fns(fns) do { } while (0) ++#define sa1100_register_uart(idx,port) do { } while (0) ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mach/time.h linux-3.4.110/arch/nds32/include/asm/mach/time.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mach/time.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mach/time.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,62 @@ ++/* ++ * linux/arch/nds32/include/asm/mach/time.h ++ * ++ * Copyright (C) 2004 MontaVista Software, Inc. ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef __ASM_NDS32_MACH_TIME_H ++#define __ASM_NDS32_MACH_TIME_H ++ ++//#include ++ ++/* ++ * This is our kernel timer structure. ++ * ++ * - init ++ * Initialise the kernels jiffy timer source, claim interrupt ++ * using setup_irq. This is called early on during initialisation ++ * while interrupts are still disabled on the local CPU. ++ * - suspend ++ * Suspend the kernel jiffy timer source, if necessary. This ++ * is called with interrupts disabled, after all normal devices ++ * have been suspended. If no action is required, set this to ++ * NULL. ++ * - resume ++ * Resume the kernel jiffy timer source, if necessary. This ++ * is called with interrupts disabled before any normal devices ++ * are resumed. If no action is required, set this to NULL. ++ * - offset ++ * Return the timer offset in microseconds since the last timer ++ * interrupt. Note: this must take account of any unprocessed ++ * timer interrupt which may be pending. ++ */ ++ ++/* + Tom from newlib ++ * Have the 32 bit jiffies value wrap 5 minutes after boot ++ * so jiffies wrap bugs show up earlier. ++ */ ++//#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) ++ ++struct sys_timer { ++// struct sys_device dev; ++ void (*init)(void); ++ void (*suspend)(void); ++ void (*resume)(void); ++ unsigned long (*offset)(void); ++}; ++ ++extern struct sys_timer *system_timer; ++extern void timer_tick( void); ++ ++/* ++ * Kernel time keeping support. ++ */ ++extern int (*set_rtc)(void); ++extern void save_time_delta(struct timespec *delta, struct timespec *rtc); ++extern void restore_time_delta(struct timespec *delta, struct timespec *rtc); ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mach-types.h linux-3.4.110/arch/nds32/include/asm/mach-types.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mach-types.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mach-types.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,14 @@ ++#ifndef __ASSEMBLY__ ++/* The type of machine we're running on */ ++extern unsigned int __machine_arch_type; ++#endif ++ ++#define MACH_TYPE_FARADAY 758 ++ ++# ifdef machine_arch_type ++# undef machine_arch_type ++# define machine_arch_type __machine_arch_type ++# else ++# define machine_arch_type MACH_TYPE_FARADAY ++# endif ++# define machine_is_faraday() (machine_arch_type == MACH_TYPE_FARADAY) +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/memory.h linux-3.4.110/arch/nds32/include/asm/memory.h +--- linux-3.4.110.orig/arch/nds32/include/asm/memory.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/memory.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,218 @@ ++/* ++ * linux/arch/nds32/include/asm/memory.h ++ * ++ * Copyright (C) 2000-2002 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Note: this file should not be included by non-asm/.h files ++ */ ++#ifndef __ASM_NDS32_MEMORY_H ++#define __ASM_NDS32_MEMORY_H ++ ++#include ++#ifndef __FARADAY_PLATFORM_INDEPENDENT_MEMORY_HEADER__ ++#define __FARADAY_PLATFORM_INDEPENDENT_MEMORY_HEADER__ ++ ++#include ++ ++#ifndef __ASSEMBLY__ ++#include ++#endif ++ ++#ifndef PHYS_OFFSET ++#define PHYS_OFFSET CPU_MEM_PA_BASE ++#endif ++ ++#ifndef PAGE_OFFSET ++#define PAGE_OFFSET (0xC0000000) ++#endif ++ ++#ifndef END_MEM ++#define END_MEM (CPU_MEM_PA_LIMIT) ++#endif ++ ++#ifndef __virt_to_bus ++#define __virt_to_bus __virt_to_phys ++#endif ++ ++#ifndef __bus_to_virt ++#define __bus_to_virt __phys_to_virt ++#endif ++ ++#endif /* __FARADAY_PLATFORM_INDEPENDENT_MEMORY_HEADER__ */ ++ ++#ifndef TASK_SIZE ++/* ++ * TASK_SIZE - the maximum size of a user space task. ++ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area ++ */ ++#define TASK_SIZE (0xbf000000UL) ++#define TASK_UNMAPPED_BASE (0x40000000UL) ++#endif ++ ++/* ++ * Page offset: 3GB ++ */ ++#ifndef PAGE_OFFSET ++#define PAGE_OFFSET (0xc0000000) ++#endif ++ ++/* ++ * Physical vs virtual RAM address space conversion. These are ++ * private definitions which should NOT be used outside memory.h ++ * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. ++ */ ++#ifndef __virt_to_phys ++#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) ++#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) ++#endif ++ ++/* ++ * The module space lives between the addresses given by TASK_SIZE ++ * and PAGE_OFFSET - it must be within 32MB of the kernel text. ++ */ ++#define MODULES_END (PAGE_OFFSET) ++#define MODULES_VADDR (MODULES_END - 16*1048576) ++ ++#if TASK_SIZE > MODULES_VADDR ++#error Top of user space clashes with start of module space ++#endif ++ ++#ifndef __ASSEMBLY__ ++ ++/* ++ * The DMA mask corresponding to the maximum bus address allocatable ++ * using GFP_DMA. The default here places no restriction on DMA ++ * allocations. This must be the smallest DMA mask in the system, ++ * so a successful GFP_DMA allocation will always satisfy this. ++ */ ++#ifndef ISA_DMA_THRESHOLD ++#define ISA_DMA_THRESHOLD (0xffffffffULL) ++#endif ++ ++#ifndef arch_adjust_zones ++#define arch_adjust_zones(node,size,holes) do { } while (0) ++#endif ++ ++/* ++ * PFNs are used to describe any physical page; this means ++ * PFN 0 == physical address 0. ++ * ++ * This is the PFN of the first RAM page in the kernel ++ * direct-mapped view. We assume this is the first page ++ * of RAM in the mem_map as well. ++ */ ++#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) ++ ++/* ++ * These are *only* valid on the kernel direct mapped RAM memory. ++ * Note: Drivers should NOT use these. They are the wrong ++ * translation for translating DMA addresses. Use the driver ++ * DMA support - see dma-mapping.h. ++ */ ++static inline unsigned long virt_to_phys(void *x) ++{ ++ return __virt_to_phys((unsigned long)(x)); ++} ++ ++static inline void *phys_to_virt(unsigned long x) ++{ ++ return (void *)(__phys_to_virt((unsigned long)(x))); ++} ++ ++/* ++ * Drivers should NOT use these either. ++ */ ++#define __pa(x) __virt_to_phys((unsigned long)(x)) ++#define __va(x) ((void *)__phys_to_virt((unsigned long)(x))) ++ ++/* ++ * Virtual <-> DMA view memory address translations ++ * Again, these are *only* valid on the kernel direct mapped RAM ++ * memory. Use of these is *deprecated* (and that doesn't mean ++ * use the __ prefixed forms instead.) See dma-mapping.h. ++ */ ++static inline __deprecated unsigned long virt_to_bus(void *x) ++{ ++ return __virt_to_bus((unsigned long)x); ++} ++ ++static inline __deprecated void *bus_to_virt(unsigned long x) ++{ ++ return (void *)__bus_to_virt(x); ++} ++ ++/* ++ * Conversion between a struct page and a physical address. ++ * ++ * Note: when converting an unknown physical address to a ++ * struct page, the resulting pointer must be validated ++ * using VALID_PAGE(). It must return an invalid struct page ++ * for any physical address not corresponding to a system ++ * RAM address. ++ * ++ * pfn_valid(pfn) indicates whether a PFN number is valid ++ * ++ * virt_to_page(k) convert a _valid_ virtual address to struct page * ++ * virt_addr_valid(k) indicates whether a virtual address is valid ++ */ ++#ifndef CONFIG_DISCONTIGMEM ++ ++#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET ++#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) ++ ++#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) ++#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) ++ ++#define PHYS_TO_NID(addr) (0) ++ ++#else /* CONFIG_DISCONTIGMEM */ ++ ++/* ++ * This is more complex. We have a set of mem_map arrays spread ++ * around in memory. ++ */ ++#include ++ ++#define pfn_valid(pfn) (PFN_TO_NID(pfn) < MAX_NUMNODES) ++ ++#define virt_to_page(kaddr) \ ++ (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr)) ++#define virt_addr_valid(kaddr) (KVADDR_TO_NID(kaddr) < MAX_NUMNODES) ++ ++/* ++ * Common discontigmem stuff. ++ * PHYS_TO_NID is used by the NDS32 kernel/setup.c ++ */ ++#define PHYS_TO_NID(addr) PFN_TO_NID((addr) >> PAGE_SHIFT) ++ ++#endif /* !CONFIG_DISCONTIGMEM */ ++ ++/* ++ * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die. ++ */ ++#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) ++ ++/* ++ * Optional device DMA address remapping. Do _not_ use directly! ++ * We should really eliminate virt_to_bus() here - it's deprecated. ++ */ ++#ifndef __arch_page_to_dma ++#define page_to_dma(dev, page) ((dma_addr_t)__virt_to_bus((unsigned long)page_address(page))) ++#define dma_to_virt(dev, addr) ((void *)__bus_to_virt(addr)) ++#define virt_to_dma(dev, addr) ((dma_addr_t)__virt_to_bus((unsigned long)(addr))) ++#else ++#define page_to_dma(dev, page) (__arch_page_to_dma(dev, page)) ++#define dma_to_virt(dev, addr) (__arch_dma_to_virt(dev, addr)) ++#define virt_to_dma(dev, addr) (__arch_virt_to_dma(dev, addr)) ++#endif ++ ++#endif ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/misc_spec.h linux-3.4.110/arch/nds32/include/asm/misc_spec.h +--- linux-3.4.110.orig/arch/nds32/include/asm/misc_spec.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/misc_spec.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,75 @@ ++/* ++ * linux/arch/nds32/include/asm/misc_spec.h ++ * ++ * Faraday A320D platform dependent definitions ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * ChangeLog ++ * ++ * Luke Lee 09/14/2005 Created ++ * Luke Lee 10/06/2005 Modified for automatic system clock rate ++ */ ++ ++#ifndef __A320_PLATFORM_MANUAL_DEFINITION__ ++#define __A320_PLATFORM_MANUAL_DEFINITION__ ++ ++#define BOOT_PARAMETER_PA_BASE (PHYS_OFFSET + 0x400) ++ ++#ifdef CONFIG_AUTO_SYS_CLK ++ ++#ifndef __ASSEMBLY__ ++extern int ag101_get_ahb_clk(void); ++extern int ag102_get_ahb_clk(void); ++ ++#if defined(CONFIG_PLAT_AG101) ++#define AHB_CLK_IN ag101_get_ahb_clk() ++#elif defined(CONFIG_PLAT_AG102) ++#define AHB_CLK_IN ag102_get_ahb_clk() ++ ++#endif ++ ++#endif ++#define TIMER_CLK_IN (CONFIG_SYS_CLK/2) ++ ++#else ++ ++/* Timer clock input is APB CLOCK */ ++#define TIMER_CLK_IN (CONFIG_SYS_CLK/2) ++#define AHB_CLK_IN (CONFIG_SYS_CLK) ++ ++#endif ++ ++#ifndef __ASSEMBLY__ ++#include ++ ++#ifdef CONFIG_PLATFORM_INTC ++extern void __init intc_ftintc010_init_irq(void); ++#define platform_init_irq intc_ftintc010_init_irq ++#endif ++ ++#ifdef CONFIG_PLATFORM_NOINTC ++extern void __init nointc_init_irq(void); ++#define platform_init_irq nointc_init_irq ++#endif ++ ++#endif ++ ++#define daughter_platform_init_irq(x) /* NOP */ ++ ++#endif /*__A320_PLATFORM_MANUAL_DEFINITION__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mman.h linux-3.4.110/arch/nds32/include/asm/mman.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mman.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mman.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,22 @@ ++/* ++ * linux/arch/nds32/include/asm/mman.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_MMAN_H__ ++#define __NDS32_MMAN_H__ ++ ++#include ++ ++#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ ++#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ ++#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ ++#define MAP_LOCKED 0x2000 /* pages are locked */ ++#define MAP_NORESERVE 0x4000 /* don't check for reservations */ ++#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ ++#define MAP_NONBLOCK 0x10000 /* do not block on IO */ ++ ++#define MCL_CURRENT 1 /* lock all current mappings */ ++#define MCL_FUTURE 2 /* lock all future mappings */ ++ ++#endif /* __NDS32_MMAN_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mmu_context.h linux-3.4.110/arch/nds32/include/asm/mmu_context.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mmu_context.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mmu_context.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,84 @@ ++/* ++ * linux/arch/nds32/include/asm/mmu_context.h ++ * ++ * Copyright (C) 1996 Russell King. ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Changelog: ++ * 27-06-1996 RMK Created ++ */ ++#ifndef __ASM_NDS32_MMU_CONTEXT_H ++#define __ASM_NDS32_MMU_CONTEXT_H ++ ++#include ++#include ++#include ++ ++static inline int ++init_new_context(struct task_struct *tsk, struct mm_struct *mm) ++{ ++ mm->context.id = 0; ++ return 0; ++} ++ ++#define destroy_context(mm) do { } while(0) ++ ++#ifndef CONFIG_CPU_NO_CONTEXT_ID ++#define CID_BITS 9 ++extern spinlock_t cid_lock; ++extern unsigned int cpu_last_cid; ++ ++static inline void ++__new_context(struct mm_struct *mm) ++{ ++ unsigned int cid; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&cid_lock, flags); ++ cid = cpu_last_cid; ++ cpu_last_cid += 1 << TLB_MISC_offCID; ++ if (cpu_last_cid == 0) ++ cpu_last_cid = 1 << TLB_MISC_offCID << CID_BITS; ++ spin_unlock_irqrestore(&cid_lock, flags); ++ ++ if ((cid & TLB_MISC_mskCID ) == 0) ++ flush_tlb_all(); ++ ++ mm->context.id = cid; ++} ++ ++static inline void ++check_context(struct mm_struct *mm) ++{ ++ if (unlikely((mm->context.id ^ cpu_last_cid) >> TLB_MISC_offCID >> CID_BITS)) ++ __new_context(mm); ++} ++#else ++#define check_context(m) ++#endif ++ ++static inline void ++enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) ++{ ++} ++ ++static inline void ++switch_mm(struct mm_struct *prev, struct mm_struct *next, ++ struct task_struct *tsk) ++{ ++ unsigned int cpu = smp_processor_id(); ++ ++ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { ++ check_context(next); ++ cpu_switch_mm(next); ++ } ++} ++ ++#define deactivate_mm(tsk,mm) do { } while (0) ++#define activate_mm(prev,next) switch_mm(prev, next, NULL) ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mmu.h linux-3.4.110/arch/nds32/include/asm/mmu.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mmu.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mmu.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,42 @@ ++/* ++ * linux/arch/nds32/include/asm/mmu.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_MMU_H ++#define __NDS32_MMU_H ++ ++typedef struct { ++ unsigned int id; ++} mm_context_t; ++ ++#define ASID(mm) ((mm)->context.id & (TLB_MISC_mskCID >> TLB_MISC_offCID)) ++inline static unsigned long ACC_PSZ(unsigned long page_size) ++{ ++ switch(page_size) { ++ case(1<<12): ++ return 0; ++ case(1<<13): ++ return 1; ++ case(1<<14): ++ return 2; ++ case(1<<16): ++ return 3; ++ case(1<<18): ++ return 4; ++ case(1<<20): ++ return 5; ++ case(1<<22): ++ return 6; ++ case(1<<24): ++ return 7; ++ case(1<<26): ++ return 8; ++ case(1<<28): ++ return 9; ++ default: ++ printk("Huge Page Size is not supported \n"); ++ return 0xffffffff; ++ } ++} ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/module.h linux-3.4.110/arch/nds32/include/asm/module.h +--- linux-3.4.110.orig/arch/nds32/include/asm/module.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/module.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,23 @@ ++/* ++ * linux/arch/nds32/include/asm/module.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASM_NDS32_MODULE_H ++#define _ASM_NDS32_MODULE_H ++ ++struct mod_arch_specific ++{ ++ int foo; ++}; ++ ++#define Elf_Shdr Elf32_Shdr ++#define Elf_Sym Elf32_Sym ++#define Elf_Ehdr Elf32_Ehdr ++ ++/* ++ * Include the ARM architecture version. ++ */ ++#define MODULE_ARCH_VERMAGIC "NDS32vN10" __stringify(__LINUX_NDS32_ARCH__) " " ++ ++#endif /* _ASM_NDS32_MODULE_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/msgbuf.h linux-3.4.110/arch/nds32/include/asm/msgbuf.h +--- linux-3.4.110.orig/arch/nds32/include/asm/msgbuf.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/msgbuf.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,36 @@ ++/* ++ * linux/arch/nds32/include/asm/msgbuf.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_MSGBUF_H ++#define _ASMNDS32_MSGBUF_H ++ ++/* ++ * The msqid64_ds structure for arm architecture. ++ * Note extra padding because this structure is passed back and forth ++ * between kernel and user space. ++ * ++ * Pad space is left for: ++ * - 64-bit time_t to solve y2038 problem ++ * - 2 miscellaneous 32-bit values ++ */ ++ ++struct msqid64_ds { ++ struct ipc64_perm msg_perm; ++ __kernel_time_t msg_stime; /* last msgsnd time */ ++ unsigned long __unused1; ++ __kernel_time_t msg_rtime; /* last msgrcv time */ ++ unsigned long __unused2; ++ __kernel_time_t msg_ctime; /* last change time */ ++ unsigned long __unused3; ++ unsigned long msg_cbytes; /* current number of bytes on queue */ ++ unsigned long msg_qnum; /* number of messages in queue */ ++ unsigned long msg_qbytes; /* max number of bytes on queue */ ++ __kernel_pid_t msg_lspid; /* pid of last msgsnd */ ++ __kernel_pid_t msg_lrpid; /* last receive pid */ ++ unsigned long __unused4; ++ unsigned long __unused5; ++}; ++ ++#endif /* _ASMNDS32_MSGBUF_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/mutex.h linux-3.4.110/arch/nds32/include/asm/mutex.h +--- linux-3.4.110.orig/arch/nds32/include/asm/mutex.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/mutex.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/mutex.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_MUTEX_H__ ++#define __NDS32_MUTEX_H__ ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/namei.h linux-3.4.110/arch/nds32/include/asm/namei.h +--- linux-3.4.110.orig/arch/nds32/include/asm/namei.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/namei.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/namei.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_NAMEI_H__ ++#define __NDS32_NAMEI_H__ ++ ++#define __emul_prefix() NULL ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/nds32.h linux-3.4.110/arch/nds32/include/asm/nds32.h +--- linux-3.4.110.orig/arch/nds32/include/asm/nds32.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/nds32.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,90 @@ ++/* ++ * linux/arch/nds32/include/asm/nds32.h -- Andes NDS32 processor register interface ++ * ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ * Copyright (C) 2006 Andes Technology Corporation ++ * ++ */ ++#ifndef _ASM_NDS32_NDS32_H_ ++#define _ASM_NDS32_NDS32_H_ ++ ++#include ++#include ++ ++#define MSYNC( subtype) __asm__ ("\n\tmsync "#subtype); ++#define STANDBY( cond) __asm__ ("\n\tstandby "#cond); ++ ++#ifndef __ASSEMBLY__ ++ ++static inline void ISB( void) { __asm__ ("\n\tisb"); } ++static inline void DSB( void) { __asm__ ("\n\tdsb"); } ++ ++static inline void GIE_ENABLE( void) ++{ ++ __asm__ ("gie_enable\n\t"); ++} ++ ++static inline void GIE_DISABLE( void) ++{ ++ __asm__ ("gie_disable\n\t"); ++} ++ ++enum cache_t{ ICACHE, DCACHE}; ++ ++static inline unsigned long CACHE_SET( enum cache_t cache){ ++ ++ if( cache == ICACHE) ++ return 64 << ( ( GET_ICM_CFG() & ICM_CFG_mskISET) >> ICM_CFG_offISET); ++ else ++ return 64 << ( ( GET_DCM_CFG() & DCM_CFG_mskDSET) >> DCM_CFG_offDSET); ++} ++ ++static inline unsigned long CACHE_WAY( enum cache_t cache){ ++ ++ if( cache == ICACHE) ++ return 1 + ( ( GET_ICM_CFG() & ICM_CFG_mskIWAY) >> ICM_CFG_offIWAY); ++ else ++ return 1 + ( ( GET_DCM_CFG() & DCM_CFG_mskDWAY) >> DCM_CFG_offDWAY); ++} ++ ++static inline unsigned long CACHE_LINE_SIZE( enum cache_t cache){ ++ ++ if( cache == ICACHE) ++ return 8 << ( ( ( GET_ICM_CFG() & ICM_CFG_mskISZ) >> ICM_CFG_offISZ) - 1); ++ else ++ return 8 << ( ( ( GET_DCM_CFG() & DCM_CFG_mskDSZ) >> DCM_CFG_offDSZ) - 1); ++} ++ ++static inline void GIE_SAVE( unsigned long *var){ ++ ++ *var = GET_PSW(); ++ GIE_DISABLE(); ++} ++ ++static inline void GIE_RESTORE( unsigned long var){ ++ ++ if( var & PSW_mskGIE){ ++ GIE_ENABLE(); ++ } ++} ++ ++#endif /* __ASSEMBLY__ */ ++ ++#define IVB_BASE PHYS_OFFSET /* in user space for intr/exc/trap/break table base, 64KB aligned ++ * We defined at the start of the physical memory */ ++ ++/* The following dispatching entry */ ++#define ENTRY_TLB_MISC (IVB_BASE + nrTLB_MISC*vENTRY_SZ) /* TLB misc eh# */ ++/* dispatched sub-entry exception handler numbering */ ++#define RD_PROT 0 /* read protrection */ ++#define WRT_PROT 1 /* write protection */ ++#define NOEXEC 2 /* non executable */ ++#define PAGE_MODIFY 3 /* page modified */ ++#define ACC_BIT 4 /* access bit */ ++#define RESVED_PTE 5 /* reserved PTE attribute */ ++/* reserved 6 ~ 16 */ ++ ++#endif /* _ASM_NDS32_NDS32_H_ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/numnodes.h linux-3.4.110/arch/nds32/include/asm/numnodes.h +--- linux-3.4.110.orig/arch/nds32/include/asm/numnodes.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/numnodes.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,25 @@ ++/* ++ * linux/arch/nds32/include/asm/numnodes.h ++ * ++ * Copyright (C) 2002 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++/* This declaration for the size of the NUMA (CONFIG_DISCONTIGMEM) ++ * memory node table is the default. ++ * ++ * A good place to override this value is include/asm/arch/memory.h. ++ */ ++ ++#ifndef __ASM_NDS32_NUMNODES_H ++#define __ASM_NDS32_NUMNODES_H ++ ++#ifndef NODES_SHIFT ++# define NODES_SHIFT 2 /* Normally, Max 4 Nodes */ ++#endif ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/page.h linux-3.4.110/arch/nds32/include/asm/page.h +--- linux-3.4.110.orig/arch/nds32/include/asm/page.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/page.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,153 @@ ++/* ++ * linux/arch/nds32/include/asm/page.h ++ * ++ * Copyright (C) 1995-2003 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef _ASMNDS32_PAGE_H ++#define _ASMNDS32_PAGE_H ++ ++ ++#ifdef CONFIG_ANDES_PAGE_SIZE_4KB ++#define PAGE_SHIFT 12 ++#endif ++#ifdef CONFIG_ANDES_PAGE_SIZE_8KB ++#define PAGE_SHIFT 13 ++#endif ++#include ++#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) ++#define PAGE_MASK (~(PAGE_SIZE-1)) ++#define PTE_MASK PAGE_MASK ++ ++/* PAGE_SHIFT determines the page size */ ++#define EXEC_PAGESIZE PAGE_SIZE ++ ++#ifdef CONFIG_ANDES_HUGETLB_PAGE_SIZE_16KB ++#define LARGE_PAGE_SHIFT 14 ++#define HPAGE_SHIFT 14 ++#endif ++ ++#ifdef CONFIG_ANDES_HUGETLB_PAGE_SIZE_64KB ++#define LARGE_PAGE_SHIFT 16 ++#define HPAGE_SHIFT 16 ++#endif ++ ++#ifdef CONFIG_ANDES_HUGETLB_PAGE_SIZE_256KB ++#define LARGE_PAGE_SHIFT 18 ++#define HPAGE_SHIFT 18 ++#endif ++#ifdef CONFIG_ANDES_HUGETLB_PAGE_SIZE_1MB ++#define LARGE_PAGE_SHIFT 20 ++#define HPAGE_SHIFT 20 ++#endif ++ ++#ifdef CONFIG_ANDES_HUGETLB_PAGE_SIZE_4MB ++#define LARGE_PAGE_SHIFT 22 ++#define HPAGE_SHIFT 22 ++#endif ++ ++#ifdef CONFIG_ANDES_HUGETLB_PAGE_SIZE_16MB ++#define LARGE_PAGE_SHIFT 24 ++#define HPAGE_SHIFT 24 ++#endif ++ ++#ifdef CONFIG_ANDES_HUGETLB_PAGE_SIZE_64MB ++#define LARGE_PAGE_SHIFT 26 ++#define HPAGE_SHIFT 26 ++#endif ++ ++#ifdef CONFIG_ANDES_HUGETLB_PAGE_SIZE_256MB ++#define LARGE_PAGE_SHIFT 28 ++#define HPAGE_SHIFT 28 ++#endif ++ ++#ifdef CONFIG_HUGETLB_PAGE ++// taken for i386 style ++#define LARGE_PAGE_SIZE (1UL << LARGE_PAGE_SHIFT) ++#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) ++// taken for SH style ++#define HPAGE_SIZE (1UL << HPAGE_SHIFT) ++#define HPAGE_MASK (~(HPAGE_SIZE-1)) ++#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) ++#endif ++ ++ ++#ifdef __KERNEL__ ++ ++#ifndef __ASSEMBLY__ ++ ++struct page; ++struct vm_area_struct; ++#ifndef CONFIG_CPU_CACHE_NONALIASING ++extern void copy_user_highpage(struct page *to, struct page *from, ++ unsigned long vaddr, struct vm_area_struct *vma); ++extern void clear_user_highpage(struct page *page, unsigned long vaddr); ++ ++#define __HAVE_ARCH_COPY_USER_HIGHPAGE ++#define clear_user_highpage clear_user_highpage ++#else ++#define clear_user_page(page, vaddr, pg) clear_page(page) ++#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) ++#endif ++ ++void clear_page(void *page); ++void copy_page(void *to, void *from); ++ ++#undef STRICT_MM_TYPECHECKS ++ ++#ifdef STRICT_MM_TYPECHECKS ++/* ++ * These are used to make use of C type-checking.. ++ */ ++typedef struct { unsigned long pte; } pte_t; ++typedef struct { unsigned long pmd; } pmd_t; ++typedef struct { unsigned long pgd; } pgd_t; ++typedef struct { unsigned long pgprot; } pgprot_t; ++ ++#define pte_val(x) ((x).pte) ++#define pmd_val(x) ((x).pmd) ++#define pgd_val(x) ((x).pgd) ++#define pgprot_val(x) ((x).pgprot) ++ ++#define __pte(x) ((pte_t) { (x) } ) ++#define __pmd(x) ((pmd_t) { (x) } ) ++#define __pgd(x) ((pgd_t) { (x) } ) ++#define __pgprot(x) ((pgprot_t) { (x) } ) ++ ++#else ++/* ++ * .. while these make it easier on the compiler ++ */ ++typedef unsigned long pte_t; ++typedef unsigned long pmd_t; ++typedef unsigned long pgd_t; ++typedef unsigned long pgprot_t; ++ ++#define pte_val(x) (x) ++#define pmd_val(x) (x) ++#define pgd_val(x) (x) ++#define pgprot_val(x) (x) ++ ++#define __pte(x) (x) ++#define __pmd(x) (x) ++#define __pgd(x) (x) ++#define __pgprot(x) (x) ++ ++#endif /* STRICT_MM_TYPECHECKS */ ++typedef struct page *pgtable_t; ++ ++#include ++#include ++ ++#endif /* !__ASSEMBLY__ */ ++ ++#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ ++ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) ++ ++#endif /* __KERNEL__ */ ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/param.h linux-3.4.110/arch/nds32/include/asm/param.h +--- linux-3.4.110.orig/arch/nds32/include/asm/param.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/param.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,36 @@ ++/* ++ * linux/arch/nds32/include/asm/param.h ++ * ++ * Copyright (C) 1995-1999 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef __ASM_PARAM_H ++#define __ASM_PARAM_H ++ ++#ifdef __KERNEL__ ++ ++# ifndef HZ ++# define HZ CONFIG_HZ /* Internal kernel timer frequency */ ++# endif ++ ++# define USER_HZ HZ /* User interfaces are in "ticks" */ ++# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ ++#else ++# define HZ 100 ++#endif ++ ++//#define EXEC_PAGESIZE 4096 ++ ++#ifndef NOGROUP ++#define NOGROUP (-1) ++#endif ++ ++/* max length of hostname */ ++#define MAXHOSTNAMELEN 64 ++ ++#endif ++ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/parport.h linux-3.4.110/arch/nds32/include/asm/parport.h +--- linux-3.4.110.orig/arch/nds32/include/asm/parport.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/parport.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,19 @@ ++/* ++ * linux/arch/nds32/include/asm/parport.h: NDS32-specific parport initialisation ++ * ++ * Copyright (C) 1999, 2000 Tim Waugh ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This file should only be included by drivers/parport/parport_pc.c. ++ */ ++ ++#ifndef __ASMNDS32_PARPORT_H ++#define __ASMNDS32_PARPORT_H ++ ++static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); ++static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) ++{ ++ return parport_pc_find_isa_ports (autoirq, autodma); ++} ++ ++#endif /* !(_ASMNDS32_PARPORT_H) */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/pci.h linux-3.4.110/arch/nds32/include/asm/pci.h +--- linux-3.4.110.orig/arch/nds32/include/asm/pci.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/pci.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,77 @@ ++/* ++ * linux/arch/nds32/include/asm/pci.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef ASMNDS32_PCI_H ++#define ASMNDS32_PCI_H ++ ++#ifdef __KERNEL__ ++#include ++ ++#include /* for PCIBIOS_MIN_* */ ++ ++#define pcibios_scan_all_fns(a, b) 0 ++ ++static inline void pcibios_set_master(struct pci_dev *dev) ++{ ++ /* No special bus mastering setup handling */ ++} ++ ++static inline void pcibios_penalize_isa_irq(int irq) ++{ ++ /* We don't do dynamic PCI IRQ allocation */ ++} ++ ++/* ++ * The PCI address space does equal the physical memory address space. ++ * The networking and block device layers use this boolean for bounce ++ * buffer decisions. ++ */ ++#define PCI_DMA_BUS_IS_PHYS (0) ++ ++/* ++ * Whether pci_unmap_{single,page} is a nop depends upon the ++ * configuration. ++ */ ++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME; ++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME; ++#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) ++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) ++#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) ++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) ++ ++#define HAVE_PCI_MMAP ++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ++ enum pci_mmap_state mmap_state, int write_combine); ++ ++extern void ++pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, ++ struct resource *res); ++ ++extern void ++pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, ++ struct pci_bus_region *region); ++ ++static inline struct resource * ++pcibios_select_root(struct pci_dev *pdev, struct resource *res) ++{ ++ struct resource *root = NULL; ++ ++ if (res->flags & IORESOURCE_IO) ++ root = &ioport_resource; ++ if (res->flags & IORESOURCE_MEM) ++ root = &iomem_resource; ++ ++ return root; ++} ++ ++#endif /* __KERNEL__ */ ++ ++/* Chances are this interrupt is wired PC-style ... */ ++static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) ++{ ++ return channel ? 15 : 14; ++} ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/percpu.h linux-3.4.110/arch/nds32/include/asm/percpu.h +--- linux-3.4.110.orig/arch/nds32/include/asm/percpu.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/percpu.h 2016-04-07 10:20:50.902079477 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/percpu.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_PERCPU ++#define __NDS32_PERCPU ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/pfm.h linux-3.4.110/arch/nds32/include/asm/pfm.h +--- linux-3.4.110.orig/arch/nds32/include/asm/pfm.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/pfm.h 2016-04-07 10:20:50.906079632 +0200 +@@ -0,0 +1,16 @@ ++#ifndef __PFM_H_ ++#define __PFM_H_ ++ ++struct pcounter { ++ unsigned long long pfm0; /* value of $PFMC0 */ ++ unsigned long long pfm1; /* value of $PFMC1 */ ++ unsigned long long pfm2; /* value of $PFMC2 */ ++}; ++ ++#ifdef __KERNEL__ ++void sys_pfmctl(int event0, int event1, int event2, int start); ++int sys_getpfm(struct pcounter __user *p); ++int sys_setpfm(int pfm0, int pfm1, int pfm2, struct pcounter __user *p); ++#endif ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/pgalloc.h linux-3.4.110/arch/nds32/include/asm/pgalloc.h +--- linux-3.4.110.orig/arch/nds32/include/asm/pgalloc.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/pgalloc.h 2016-04-07 10:20:50.906079632 +0200 +@@ -0,0 +1,104 @@ ++/* ++ * linux/arch/nds32/include/asm/pgalloc.h ++ * ++ * Copyright (C) 2000-2001 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef _ASMNDS32_PGALLOC_H ++#define _ASMNDS32_PGALLOC_H ++ ++#include ++#include ++#include ++ ++/* ++ * Since we have only two-level page tables, these are trivial ++ */ ++#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) ++#define pmd_free(mm, pmd) do { } while (0) ++#define pgd_populate(mm, pmd, pte) BUG() ++#define pmd_pgtable(pmd) pmd_page(pmd) ++ ++extern pgd_t *get_pgd_slow(struct mm_struct *mm); ++extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); ++ ++#define pgd_alloc(mm) get_pgd_slow(mm) ++#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) ++ ++#define check_pgt_cache() do { } while (0) ++ ++static inline pte_t * ++pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) ++{ ++ pte_t *pte; ++ ++ pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); ++ ++ return pte; ++} ++ ++static inline pgtable_t ++pte_alloc_one(struct mm_struct *mm, unsigned long addr) ++{ ++ pgtable_t pte; ++ ++ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); ++ if (pte) ++ cpu_dcache_wb_page((unsigned long)page_address(pte)); ++ ++ return pte; ++} ++ ++/* ++ * Free one PTE table. ++ */ ++static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) ++{ ++ if (pte) { ++ free_page((unsigned long)pte); ++ } ++} ++ ++static inline void pte_free(struct mm_struct *mm, pgtable_t pte) ++{ ++ __free_page(pte); ++} ++ ++/* ++ * Populate the pmdp entry with a pointer to the pte. This pmd is part ++ * of the mm address space. ++ * ++ * Ensure that we always set both PMD entries. ++ */ ++static inline void ++pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) ++{ ++ unsigned long pte_ptr = (unsigned long)ptep; ++ unsigned long pmdval; ++ ++ BUG_ON(mm != &init_mm); ++ ++ /* ++ * The pmd must be loaded with the physical ++ * address of the PTE table ++ */ ++ pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE; ++ set_pmd(pmdp, __pmd(pmdval)); ++} ++ ++static inline void ++pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) ++{ ++ unsigned long pmdval; ++ ++ BUG_ON(mm == &init_mm); ++ ++ pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE; ++ set_pmd(pmdp, __pmd(pmdval)); ++} ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/pgtable.h linux-3.4.110/arch/nds32/include/asm/pgtable.h +--- linux-3.4.110.orig/arch/nds32/include/asm/pgtable.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/pgtable.h 2016-04-07 10:20:50.910079787 +0200 +@@ -0,0 +1,429 @@ ++/* ++ * linux/arch/nds32/include/asm/pgtable.h ++ * ++ * Copyright (C) 1995-2002 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef _ASMNDS32_PGTABLE_H ++#define _ASMNDS32_PGTABLE_H ++ ++#include ++#include ++ ++#include ++#include ++#include ++#ifndef __ASSEMBLY__ ++#include ++#endif ++ ++#ifdef CONFIG_CACHE_L2 ++#include ++#endif ++ ++#ifdef CONFIG_ANDES_PAGE_SIZE_4KB ++#define PGDIR_SHIFT 22 ++#define PTRS_PER_PGD 1024 ++#define PMD_SHIFT 22 ++#define PTRS_PER_PMD 1 ++#define PTRS_PER_PTE 1024 ++#endif ++ ++#ifdef CONFIG_ANDES_PAGE_SIZE_8KB ++#define PGDIR_SHIFT 24 ++#define PTRS_PER_PGD 256 ++#define PMD_SHIFT 24 ++#define PTRS_PER_PMD 1 ++#define PTRS_PER_PTE 2048 ++#endif ++ ++#ifndef __ASSEMBLY__ ++extern void __pte_error(const char *file, int line, unsigned long val); ++extern void __pmd_error(const char *file, int line, unsigned long val); ++extern void __pgd_error(const char *file, int line, unsigned long val); ++ ++#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) ++#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) ++#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) ++#endif /* !__ASSEMBLY__ */ ++ ++#define PMD_SIZE (1UL << PMD_SHIFT) ++#define PMD_MASK (~(PMD_SIZE-1)) ++#define PGDIR_SIZE (1UL << PGDIR_SHIFT) ++#define PGDIR_MASK (~(PGDIR_SIZE-1)) ++ ++/* ++ * This is the lowest virtual address we can permit any user space ++ * mapping to be mapped at. This is particularly important for ++ * non-high vector CPUs. ++ */ ++#define FIRST_USER_ADDRESS 0x8000 ++ ++#define VMALLOC_OFFSET (8 * 1024 * 1024) ++#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) ++#define VMALLOC_VMADDR(x) ((unsigned long)(x)) ++ ++ ++#ifdef CONFIG_HIGHMEM ++#define CONSISTENT_BASE ((PKMAP_BASE) - (SZ_2M)) ++#define CONSISTENT_END (PKMAP_BASE) ++#else ++#define CONSISTENT_BASE (FIXADDR_START - SZ_2M) ++#define CONSISTENT_END (FIXADDR_START) ++#endif ++#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) ++ ++#ifdef CONFIG_HIGHMEM ++#ifndef __ASSEMBLY__ ++#include ++#endif ++#endif ++ ++//# define VMALLOC_END (CONSISTENT_START - PAGE_SIZE) ++# define VMALLOC_END (0xf9000000) ++ ++#define VMALLOC_RESERVE (128 << 20) ++#define MAXMEM (VMALLOC_END - PAGE_OFFSET - VMALLOC_RESERVE) ++#define MAXMEM_PFN PFN_DOWN(MAXMEM) ++ ++#define FIRST_USER_PGD_NR 0 ++#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) + FIRST_USER_PGD_NR) ++ ++/* L2 PTE */ ++#define _PAGE_V (1UL << 0) ++ ++#define _PAGE_M_XKRW (0UL << 1) ++#define _PAGE_M_UR_KR (1UL << 1) ++#define _PAGE_M_UR_KRW (2UL << 1) ++#define _PAGE_M_URW_KRW (3UL << 1) ++#define _PAGE_M_KR (5UL << 1) ++#define _PAGE_M_KRW (7UL << 1) ++ ++#define _PAGE_D (1UL << 4) ++#define _PAGE_E (1UL << 5) ++#define _PAGE_A (1UL << 6) ++#define _PAGE_G (1UL << 7) ++ ++#define _PAGE_C_DEV (0UL << 8) ++#define _PAGE_C_DEV_WB (1UL << 8) ++#define _PAGE_C_MEM (2UL << 8) ++#define _PAGE_C_MEM_SHRD_WB (4UL << 8) ++#define _PAGE_C_MEM_SHRD_WT (5UL << 8) ++#define _PAGE_C_MEM_WB (6UL << 8) ++#define _PAGE_C_MEM_WT (7UL << 8) ++ ++#define _PAGE_L (1UL << 11) ++ ++#ifndef CONFIG_NO_KERNEL_LARGE_PAGE ++#define _HAVE_PAGE_L (_PAGE_L) ++#else ++#define _HAVE_PAGE_L 0 ++#endif ++#define _PAGE_FILE (1UL << 1) ++#define _PAGE_YOUNG 0 ++#define _PAGE_M_MASK _PAGE_M_KRW ++#define _PAGE_C_MASK _PAGE_C_MEM_WT ++ ++#ifdef CONFIG_SMP ++#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ++#define _PAGE_CACHE_SHRD _PAGE_C_MEM_SHRD_WT ++#else ++#define _PAGE_CACHE_SHRD _PAGE_C_MEM_SHRD_WB ++#endif ++#else ++#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ++#define _PAGE_CACHE_SHRD _PAGE_C_MEM_WT ++#else ++#define _PAGE_CACHE_SHRD _PAGE_C_MEM_WB ++#endif ++#endif ++ ++#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ++#define _PAGE_CACHE _PAGE_C_MEM_WT ++#else ++#define _PAGE_CACHE _PAGE_C_MEM_WB ++#endif ++ ++/* ++ * + Level 1 descriptor (PMD) ++ */ ++#define PMD_TYPE_TABLE 0 ++ ++#ifndef __ASSEMBLY__ ++ ++#define _PAGE_USER_TABLE PMD_TYPE_TABLE ++#define _PAGE_KERNEL_TABLE PMD_TYPE_TABLE ++ ++#define PAGE_EXEC __pgprot(_PAGE_V | _PAGE_M_XKRW | _PAGE_E) ++#define PAGE_NONE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_A) ++#define PAGE_READ __pgprot(_PAGE_V | _PAGE_M_UR_KR) ++#define PAGE_RDWR __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D) ++#define PAGE_COPY __pgprot(_PAGE_V | _PAGE_M_UR_KR) ++ ++#define PAGE_UXKRWX_V1 __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) ++#define PAGE_UXKRWX_V2 __pgprot(_PAGE_V | _PAGE_M_XKRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) ++#define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE) ++#define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) ++#define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) ++#define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV) ++#endif /* __ASSEMBLY__ */ ++ ++/* xwr */ ++#define __P000 (PAGE_NONE | _PAGE_CACHE_SHRD) ++#define __P001 (PAGE_READ | _PAGE_CACHE_SHRD) ++#define __P010 (PAGE_COPY | _PAGE_CACHE_SHRD) ++#define __P011 (PAGE_COPY | _PAGE_CACHE_SHRD) ++#define __P100 (PAGE_EXEC | _PAGE_CACHE_SHRD) ++#define __P101 (PAGE_READ | _PAGE_E | _PAGE_CACHE_SHRD) ++#define __P110 (PAGE_COPY | _PAGE_E | _PAGE_CACHE_SHRD) ++#define __P111 (PAGE_COPY | _PAGE_E | _PAGE_CACHE_SHRD) ++ ++#define __S000 (PAGE_NONE | _PAGE_CACHE_SHRD) ++#define __S001 (PAGE_READ | _PAGE_CACHE_SHRD) ++#define __S010 (PAGE_RDWR | _PAGE_CACHE_SHRD) ++#define __S011 (PAGE_RDWR | _PAGE_CACHE_SHRD) ++#define __S100 (PAGE_EXEC | _PAGE_CACHE_SHRD) ++#define __S101 (PAGE_READ | _PAGE_E | _PAGE_CACHE_SHRD) ++#define __S110 (PAGE_RDWR | _PAGE_E | _PAGE_CACHE_SHRD) ++#define __S111 (PAGE_RDWR | _PAGE_E | _PAGE_CACHE_SHRD) ++ ++#ifndef __ASSEMBLY__ ++/* ++ * ZERO_PAGE is a global shared page that is always zero: used ++ * for zero-mapped memory areas etc.. ++ */ ++extern struct page *empty_zero_page; ++#define ZERO_PAGE(vaddr) (empty_zero_page) ++ ++#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) ++#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) ++ ++#define pte_none(pte) !(pte_val(pte)) ++#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0)) ++#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) ++ ++#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) ++#define pte_offset_kernel(dir, address) ((pte_t *)pmd_page_kernel(*(dir)) + pte_index(address)) ++#define pte_offset_map(dir, address) ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) ++#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) ++#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) ++ ++#define pte_unmap(pte) do { } while (0) ++#define pte_unmap_nested(pte) do { } while (0) ++ ++#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) ++ ++static inline pgd_t *get_pgd( void){ ++ ++ return ( pgd_t *)phys_to_virt( GET_L1_PPTB() & L1_PPTB_mskBASE); ++} ++ ++static inline void set_pgd( pgd_t *pgdp, pgd_t pgd){ ++ ++ /* TODO */ ++} ++/* ++ * Set a level 1 translation table entry, and clean it out of ++ * any caches such that the MMUs can load it correctly. ++ */ ++static inline void set_pmd( pmd_t *pmdp, pmd_t pmd){ ++ ++ *pmdp = pmd; ++#if !defined(CONFIG_CPU_DCACHE_DISABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) ++ __asm__ volatile ( "\n\tcctl %0, L1D_VA_WB" ::"r" ( pmdp) :"memory"); ++ MSYNC( all); ++ DSB(); ++#endif ++} ++ ++/* ++ * Set a PTE and flush it out ++ */ ++static inline void set_pte( pte_t *ptep, pte_t pte){ ++ ++ *ptep = pte; ++#if !defined(CONFIG_CPU_DCACHE_DISABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) ++ __asm__ volatile ( "\n\tcctl %0, L1D_VA_WB" ::"r" ( ptep) :"memory"); ++ MSYNC( all); ++ DSB(); ++#endif ++} ++ ++ ++/* ++ * The following only work if pte_present() is true. ++ * Undefined behaviour if not.. ++ */ ++ ++/* ++ * pte_write: this page is writeable for user mode ++ * pte_read: this page is readable for user mode ++ * pte_kernel_write: this page is writeable for kernel mode ++ * ++ * We don't have pte_kernel_read because kernel always can read. ++ * ++ * */ ++ ++#define pte_present(pte) (pte_val(pte) & _PAGE_V) ++#define pte_write(pte) ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW) ++#define pte_read(pte) (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KR) || \ ++ ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KRW) || \ ++ ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW)) ++#define pte_kernel_write(pte) (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW) || \ ++ ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KRW) || \ ++ ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_KRW) || \ ++ (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_XKRW) && pte_exec(pte))) ++#define pte_exec(pte) (pte_val(pte) & _PAGE_E) ++#define pte_dirty(pte) (pte_val(pte) & _PAGE_D) ++#define pte_young(pte) (pte_val(pte) & _PAGE_YOUNG) ++ ++/* ++ * The following only works if pte_present() is not true. ++ */ ++#define pte_file(pte) (pte_val(pte) & _PAGE_FILE) ++#define pte_to_pgoff(x) (pte_val(x) >> 2) ++#define pgoff_to_pte(x) __pte(((x) << 2) | _PAGE_FILE) ++ ++#define PTE_FILE_MAX_BITS 29 ++ ++#define PTE_BIT_FUNC(fn,op) \ ++static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } ++ ++static inline pte_t pte_wrprotect(pte_t pte) ++{ ++ pte_val(pte) = pte_val(pte) & ~_PAGE_M_MASK; ++ pte_val(pte) = pte_val(pte) | _PAGE_M_UR_KR; ++ return pte; ++} ++ ++static inline pte_t pte_mkwrite(pte_t pte) ++{ ++ pte_val(pte) = pte_val(pte) & ~_PAGE_M_MASK; ++ pte_val(pte) = pte_val(pte) | _PAGE_M_URW_KRW; ++ return pte; ++} ++ ++PTE_BIT_FUNC(exprotect, &= ~_PAGE_E); ++PTE_BIT_FUNC(mkexec, |= _PAGE_E); ++PTE_BIT_FUNC(mkclean, &= ~_PAGE_D); ++PTE_BIT_FUNC(mkdirty, |= _PAGE_D); ++PTE_BIT_FUNC(mkold, &= ~_PAGE_YOUNG); ++PTE_BIT_FUNC(mkyoung, |= _PAGE_YOUNG); ++static inline int pte_special(pte_t pte) { return 0; } ++static inline pte_t pte_mkspecial(pte_t pte) { return pte; } ++ ++/* ++ * Mark the prot value as uncacheable and unbufferable. ++ */ ++#define pgprot_noncached(prot) __pgprot((pgprot_val(prot)&~_PAGE_C_MASK) | _PAGE_C_DEV) ++#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot)&~_PAGE_C_MASK) | _PAGE_C_DEV_WB) ++ ++#define pmd_none(pmd) (pmd_val(pmd)&0x1) ++#define pmd_present(pmd) (!pmd_none(pmd)) ++#define pmd_bad(pmd) pmd_none(pmd) ++ ++#define copy_pmd(pmdpd,pmdps) set_pmd((pmdpd), *(pmdps)) ++#define pmd_clear(pmdp) set_pmd((pmdp), __pmd(1)) ++ ++static inline pmd_t __mk_pmd(pte_t *ptep, unsigned long prot) ++{ ++ unsigned long ptr = (unsigned long)ptep; ++ pmd_t pmd; ++ ++ /* ++ * The pmd must be loaded with the physical ++ * address of the PTE table ++ */ ++ ++ pmd_val(pmd) = __virt_to_phys(ptr) | prot; ++ return pmd; ++} ++ ++ ++#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) ++ ++/* ++ * Permanent address of a page. We never have highmem, so this is trivial. ++ */ ++#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) ++ ++/* ++ * Conversion functions: convert a page and protection to a page entry, ++ * and a page entry and page directory to the page they refer to. ++ */ ++#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) ++ ++/* ++ * The "pgd_xxx()" functions here are trivial for a folded two-level ++ * setup: the pgd is never bad, and a pmd always exists (as it's folded ++ * into the pgd entry) ++ */ ++#define pgd_none(pgd) (0) ++#define pgd_bad(pgd) (0) ++#define pgd_present(pgd) (1) ++#define pgd_clear(pgdp) do { } while (0) ++ ++#define page_pte_prot(page,prot) mk_pte(page, prot) ++#define page_pte(page) mk_pte(page, __pgprot(0)) ++/* Tom: ++ * L1PTE = $mr1 + ((virt >> PMD_SHIFT) << 2); ++ * L2PTE = (((virt >> PAGE_SHIFT) & (PTRS_PER_PTE -1 )) << 2); ++ * PPN = (phys & 0xfffff000); ++ * ++*/ ++ ++/* to find an entry in a page-table-directory */ ++#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) ++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) ++/* to find an entry in a kernel page-table-directory */ ++#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) ++ ++/* Find an entry in the second-level page table.. */ ++#define pmd_offset(dir, addr) ((pmd_t *)(dir)) ++ ++static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ++{ ++ const unsigned long mask = 0xfff; ++ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); ++ return pte; ++} ++ ++extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; ++ ++/* Encode and decode a swap entry. ++ * ++ * We support up to 32GB of swap on 4k machines ++ */ ++#define __swp_type(x) (((x).val >> 2) & 0x7f) ++#define __swp_offset(x) ((x).val >> 9) ++#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) }) ++#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) ++#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) ++ ++/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ ++/* FIXME: this is not correct */ ++#define kern_addr_valid(addr) (1) ++ ++#include ++ ++/* ++ * We provide our own arch_get_unmapped_area to cope with VIPT caches. ++ */ ++#define HAVE_ARCH_UNMAPPED_AREA ++ ++/* ++ * remap a physical address `phys' of size `size' with page protection `prot' ++ * into virtual address `from' ++ */ ++#define io_remap_pfn_range(vma,from,pfn,size,prot) \ ++ remap_pfn_range(vma, from, pfn, size, prot) ++ ++#define pgtable_cache_init() do { } while (0) ++ ++#endif /* !__ASSEMBLY__ */ ++ ++#endif /* _ASMNDS32_PGTABLE_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/poll.h linux-3.4.110/arch/nds32/include/asm/poll.h +--- linux-3.4.110.orig/arch/nds32/include/asm/poll.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/poll.h 2016-04-07 10:20:50.910079787 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/poll.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ASMNDS32_POLL_H ++#define __ASMNDS32_POLL_H ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/posix_types.h linux-3.4.110/arch/nds32/include/asm/posix_types.h +--- linux-3.4.110.orig/arch/nds32/include/asm/posix_types.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/posix_types.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,40 @@ ++/* ++ * arch/arm/include/asm/posix_types.h ++ * ++ * Copyright (C) 1996-1998 Russell King. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Changelog: ++ * 27-06-1996 RMK Created ++ */ ++#ifndef __ARCH_NDS32_POSIX_TYPES_H ++#define __ARCH_NDS32_POSIX_TYPES_H ++ ++/* ++ * This file is generally used by user-level software, so you need to ++ * be a little careful about namespace pollution etc. Also, we cannot ++ * assume GCC is being used. ++ */ ++ ++typedef unsigned short __kernel_mode_t; ++#define __kernel_mode_t __kernel_mode_t ++ ++typedef unsigned short __kernel_nlink_t; ++#define __kernel_nlink_t __kernel_nlink_t ++ ++typedef unsigned short __kernel_ipc_pid_t; ++#define __kernel_ipc_pid_t __kernel_ipc_pid_t ++ ++typedef unsigned short __kernel_uid_t; ++typedef unsigned short __kernel_gid_t; ++#define __kernel_uid_t __kernel_uid_t ++ ++typedef unsigned short __kernel_old_dev_t; ++#define __kernel_old_dev_t __kernel_old_dev_t ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/processor.h linux-3.4.110/arch/nds32/include/asm/processor.h +--- linux-3.4.110.orig/arch/nds32/include/asm/processor.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/processor.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,133 @@ ++/* ++ * linux/arch/nds32/include/asm/processor.h ++ */ ++/* Copyright (C) 1995-1999 Russell King ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++/* ============================================================================ ++ * Copyright (C) 2007 Andes Technology Corporation ++ * This file is part of Linux and should be licensed under the GPL. ++ * See the file COPYING for conditions for redistribution. ++ * ++ * Abstract: ++ * ++ * This program is ptrace relative code for Andes NDS32 architecture. ++ * Original referred from ARM, fit to NDS32. ++ * ++ * Revision History: ++ * ++ * Oct.03.2007 Original from Tom, Shawn and Steven, refined by Harry. ++ * ++ * Note: ++ * ++ * ============================================================================ ++ */ ++#ifndef __ASM_NDS32_PROCESSOR_H ++#define __ASM_NDS32_PROCESSOR_H ++ ++/* ++ * Default implementation of macro that returns current ++ * instruction pointer ("program counter"). ++ */ ++#define current_text_addr() ({ __label__ _l; _l: &&_l;}) ++ ++#ifdef __KERNEL__ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#define KERNEL_STACK_SIZE PAGE_SIZE ++#define STACK_TOP TASK_SIZE ++#define STACK_TOP_MAX TASK_SIZE ++ ++struct debug_info { ++ u32 address; ++ u16 insn; ++ u8 valid; ++}; ++ ++struct thread_struct { ++ /* fault info */ ++ unsigned long address; ++ unsigned long trap_no; ++ unsigned long error_code; ++ ++ struct fpu_struct fpu; /* Saved fpu/fpu emulator stuff. */ ++ struct audio_struct audio; ++ struct debug_info debug; /* debugging */ ++}; ++ ++#define INIT_THREAD { } ++ ++#ifdef __NDS32_EB__ ++#define PSW_DE PSW_mskBE ++#else ++#define PSW_DE 0x0 ++#endif ++ ++#ifdef CONFIG_WBNA ++#define PSW_valWBNA PSW_mskWBNA ++#else ++#define PSW_valWBNA 0x0 ++#endif ++ ++#define start_thread(regs,pc,sp) \ ++({ \ ++ unsigned long *stack = (unsigned long *)sp; \ ++ set_fs(USER_DS); \ ++ memzero(regs->uregs, sizeof(regs->uregs)); \ ++ regs->NDS32_ipsw = (PSW_CPL_ANY | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | PSW_mskGIE); \ ++ regs->NDS32_ir0 = (PSW_CPL_ANY | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | PSW_SYSTEM | PSW_INTL_1); \ ++ regs->NDS32_ipc = pc; /* pc */ \ ++ regs->NDS32_sp = sp; /* $sp */ \ ++ regs->NDS32_r2 = stack[4]; /* $r2 (envp) */ \ ++ regs->NDS32_r1 = stack[1]; /* $r1 (argv) */ \ ++ regs->NDS32_r0 = stack[0]; /* $r0 (argc) */ \ ++}) ++ ++ ++/* Forward declaration, a strange C thing */ ++struct task_struct; ++ ++/* Free all resources held by a thread. */ ++extern void release_thread(struct task_struct *); ++#ifdef CONFIG_FPU ++#ifndef CONFIG_UNLAZU_FPU //lazy fpu ++extern struct task_struct *last_task_used_math; ++#endif ++#endif ++#ifdef CONFIG_AUDIO ++#ifndef CONFIG_UNLAZY_AUDIO //lazy audio ++extern struct task_struct *last_task_used_audio; ++#endif ++#endif ++ ++/* Prepare to copy thread state - unlazy all lazy status */ ++#define prepare_to_copy(tsk) do { } while (0) ++ ++unsigned long get_wchan(struct task_struct *p); ++ ++#define cpu_relax() barrier() ++ ++#define task_pt_regs(task) \ ++ ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \ ++ - 8) - 1) ++ ++/* ++ * Create a new kernel thread ++ */ ++extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); ++ ++#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)task_thread_info(tsk)))[1019]) ++#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)task_thread_info(tsk)))[1017]) ++ ++ ++#endif ++ ++#endif /* __ASM_NDS32_PROCESSOR_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/proc-fns.h linux-3.4.110/arch/nds32/include/asm/proc-fns.h +--- linux-3.4.110.orig/arch/nds32/include/asm/proc-fns.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/proc-fns.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,88 @@ ++/* ++ * linux/arch/nds32/include/asm/proc-fns.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_PROCFNS_H__ ++#define __NDS32_PROCFNS_H__ ++ ++#define CPU_NAME n12 ++ ++#ifdef __KERNEL__ ++ ++#ifdef __STDC__ ++#define ____cpu_fn(name,fn) name##fn ++#else ++#define ____cpu_fn(name,fn) name/**/fn ++#endif ++#define __cpu_fn(name,fn) ____cpu_fn(name,fn) ++ ++#define cpu_proc_init __cpu_fn( CPU_NAME, _proc_init) ++#define cpu_proc_fin __cpu_fn( CPU_NAME, _proc_fin) ++#define cpu_do_idle __cpu_fn( CPU_NAME, _do_idle) ++#define cpu_reset __cpu_fn( CPU_NAME, _reset) ++#define cpu_switch_mm __cpu_fn( CPU_NAME, _switch_mm) ++ ++#define cpu_dcache_inval_all __cpu_fn( CPU_NAME, _dcache_inval_all) ++#define cpu_dcache_wbinval_all __cpu_fn( CPU_NAME, _dcache_wbinval_all) ++#define cpu_dcache_inval_page __cpu_fn( CPU_NAME, _dcache_inval_page) ++#define cpu_dcache_wb_page __cpu_fn( CPU_NAME, _dcache_wb_page) ++#define cpu_dcache_wbinval_page __cpu_fn( CPU_NAME, _dcache_wbinval_page) ++#define cpu_dcache_inval_range __cpu_fn( CPU_NAME, _dcache_inval_range) ++#define cpu_dcache_wb_range __cpu_fn( CPU_NAME, _dcache_wb_range) ++#define cpu_dcache_wbinval_range __cpu_fn( CPU_NAME, _dcache_wbinval_range) ++ ++#define cpu_icache_inval_all __cpu_fn( CPU_NAME, _icache_inval_all) ++#define cpu_icache_inval_page __cpu_fn( CPU_NAME, _icache_inval_page) ++#define cpu_icache_inval_range __cpu_fn( CPU_NAME, _icache_inval_range) ++ ++#define cpu_cache_wbinval_page __cpu_fn( CPU_NAME, _cache_wbinval_page) ++#define cpu_cache_wbinval_range __cpu_fn( CPU_NAME, _cache_wbinval_range) ++#define cpu_cache_wbinval_range_check __cpu_fn( CPU_NAME, _cache_wbinval_range_check) ++ ++#define cpu_dma_wb_range __cpu_fn( CPU_NAME, _dma_wb_range) ++#define cpu_dma_inval_range __cpu_fn( CPU_NAME, _dma_inval_range) ++#define cpu_dma_wbinval_range __cpu_fn( CPU_NAME, _dma_wbinval_range) ++ ++#include ++ ++struct mm_struct; ++struct vm_area_struct; ++extern void cpu_proc_init(void); ++extern void cpu_proc_fin(void); ++extern void cpu_do_idle(void); ++extern void cpu_reset(unsigned long reset); ++extern void cpu_switch_mm(struct mm_struct *mm); ++ ++extern void cpu_dcache_inval_all(void); ++extern void cpu_dcache_wbinval_all(void); ++extern void cpu_dcache_inval_page(unsigned long page); ++extern void cpu_dcache_wb_page(unsigned long page); ++extern void cpu_dcache_wbinval_page(unsigned long page); ++extern void cpu_dcache_inval_range(unsigned long start, unsigned long end); ++extern void cpu_dcache_wb_range(unsigned long start, unsigned long end); ++extern void cpu_dcache_wbinval_range(unsigned long start, unsigned long end); ++ ++extern void cpu_icache_inval_all(void); ++extern void cpu_icache_inval_page(unsigned long page); ++extern void cpu_icache_inval_range(unsigned long start, unsigned long end); ++ ++extern void cpu_cache_wbinval_page(unsigned long page, int flushi); ++extern void cpu_cache_wbinval_range(unsigned long start, ++ unsigned long end, int flushi); ++extern void cpu_cache_wbinval_range_check(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end); ++ ++extern void cpu_dma_wb_range(unsigned long start, unsigned long end); ++extern void cpu_dma_inval_range(unsigned long start, unsigned long end); ++extern void cpu_dma_wbinval_range(unsigned long start, unsigned long end); ++ ++#ifdef CONFIG_CACHE_L2 ++#define cpu_L2cache_inval __cpu_fn(CPU_NAME, _L2cache_inval) ++#define cpu_L2cache_wb __cpu_fn(CPU_NAME, _L2cache_wb) ++extern void cpu_L2cache_inval(void); ++extern void cpu_L2cache_wb(void); ++#endif ++ ++#endif /* __KERNEL__ */ ++#endif /* __NDS32_PROCFNS_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/procinfo.h linux-3.4.110/arch/nds32/include/asm/procinfo.h +--- linux-3.4.110.orig/arch/nds32/include/asm/procinfo.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/procinfo.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,71 @@ ++/* ++ * linux/arch/nds32/include/asm/procinfo.h ++ * ++ * Copyright (C) 1996-1999 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef __ASM_PROCINFO_H ++#define __ASM_PROCINFO_H ++ ++#ifndef __ASSEMBLY__ ++ ++/* ++ * These structure are defined in assembly ++ * ( nds32/mm/proc-nds32.S) ++ */ ++struct proc_info_item { ++ ++ const char *manufacturer; ++ const char *cpu_name; ++}; ++ ++/* ++ * Note! struct processor is always defined if we're ++ * using MULTI_CPU, otherwise this entry is unused, ++ * but still exists. ++ * ++ * NOTE! The following structure is defined by assembly ++ * language, NOT C code. For more information, check: ++ * arch/arm/mm/proc-*.S and arch/arm/kernel/head.S ++ */ ++struct proc_info_list { ++ ++ unsigned int cpu_val; ++ unsigned int cpu_mask; ++ const char *arch_name; ++ const char *elf_name; ++ unsigned int elf_hwcap; ++ struct proc_info_item *info; ++}; ++ ++extern unsigned int elf_hwcap; ++ ++#endif /* __ASSEMBLY__ */ ++ ++#define HWCAP_MFUSR_PC 0x000001 ++#define HWCAP_EXT 0x000002 ++#define HWCAP_EXT2 0x000004 ++#define HWCAP_FPU 0x000008 ++#define HWCAP_AUDIO 0x000010 ++#define HWCAP_BASE16 0x000020 ++#define HWCAP_STRING 0x000040 ++#define HWCAP_REDUCED_REGS 0x000080 ++#define HWCAP_VIDEO 0x000100 ++#define HWCAP_ENCRYPT 0x000200 ++#define HWCAP_EDM 0x000400 ++#define HWCAP_LMDMA 0x000800 ++#define HWCAP_PFM 0x001000 ++#define HWCAP_HSMP 0x002000 ++#define HWCAP_TRACE 0x004000 ++#define HWCAP_DIV 0x008000 ++#define HWCAP_MAC 0x010000 ++#define HWCAP_L2C 0x020000 ++#define HWCAP_FPU_DP 0x040000 ++#define HWCAP_V2 0x080000 ++#define HWCAP_DX_REGS 0x100000 ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/ptrace.h linux-3.4.110/arch/nds32/include/asm/ptrace.h +--- linux-3.4.110.orig/arch/nds32/include/asm/ptrace.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/ptrace.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,119 @@ ++/* ++ * linux/arch/nds32/include/asm/ptrace.h ++ * ++ * Copyright (C) 1996-2003 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef __ASM_NDS32_PTRACE_H ++#define __ASM_NDS32_PTRACE_H ++ ++#define PTRACE_GETREGS 12 ++#define PTRACE_SETREGS 13 ++#define PTRACE_GETFPREGS 14 ++#define PTRACE_SETFPREGS 15 ++#define PTRACE_GETAUREGS 18 ++#define PTRACE_SETAUREGS 19 ++ ++#define PTRACE_OLDSETOPTIONS 21 ++ ++#define PTRACE_GET_THREAD_AREA 22 ++ ++#ifndef __ASSEMBLY__ ++/* this struct defines the way the registers are stored on the ++ stack during a system call. */ ++ ++struct pt_regs { ++#if defined(CONFIG_ABI1) ++ long dummy[6]; ++#endif ++ long uregs[44]; ++}; ++#define NDS32_osp uregs[43] ++#define NDS32_FUCOP_CTL uregs[42] ++#define NDS32_lp uregs[41] ++#define NDS32_gp uregs[40] ++#define NDS32_fp uregs[39] ++#define NDS32_r25 uregs[38] ++#define NDS32_r24 uregs[37] ++#define NDS32_r23 uregs[36] ++#define NDS32_r22 uregs[35] ++#define NDS32_r21 uregs[34] ++#define NDS32_r20 uregs[33] ++#define NDS32_r19 uregs[32] ++#define NDS32_r18 uregs[31] ++#define NDS32_r17 uregs[30] ++#define NDS32_r16 uregs[29] ++#define NDS32_r15 uregs[28] ++#define NDS32_r14 uregs[27] ++#define NDS32_r13 uregs[26] ++#define NDS32_r12 uregs[25] ++#define NDS32_r11 uregs[24] ++#define NDS32_r10 uregs[23] ++#define NDS32_r9 uregs[22] ++#define NDS32_r8 uregs[21] ++#define NDS32_r7 uregs[20] ++#define NDS32_r6 uregs[19] ++#define NDS32_r5 uregs[18] ++#define NDS32_r4 uregs[17] ++#define NDS32_r3 uregs[16] ++#define NDS32_r2 uregs[15] ++#define NDS32_r1 uregs[14] ++#define NDS32_r0 uregs[13] ++#if defined(CONFIG_HWZOL) ++#define NDS32_lc uregs[11] ++#define NDS32_le uregs[10] ++#define NDS32_lb uregs[9] ++#endif ++#define NDS32_pp1 uregs[8] ++#define NDS32_pp0 uregs[7] ++#define NDS32_pipc uregs[6] ++#define NDS32_pipsw uregs[5] ++#define NDS32_ORIG_r0 uregs[4] ++#define NDS32_sp uregs[3] ++#define NDS32_ipc uregs[2] ++#define NDS32_ipsw uregs[1] ++#define NDS32_ir0 uregs[0] ++ ++#ifdef __KERNEL__ ++#include ++ ++#define arch_has_single_step() (1) ++struct task_struct; ++extern void user_enable_single_step(struct task_struct *); ++extern void user_disable_single_step(struct task_struct *); ++ ++#define user_mode(regs) (((regs)->NDS32_ipsw & PSW_mskPOM) == 0) ++ ++#define interrupts_enabled(regs) (!((regs)->NDS32_ipsw & ~PSW_mskGIE)) ++ ++extern void show_regs(struct pt_regs *); ++ ++/* Are the current registers suitable for user mode? ++ * (used to maintain security in signal handlers) ++ */ ++static inline int valid_user_regs(struct pt_regs *regs) ++{ ++ return user_mode(regs) && ((regs->NDS32_ipsw & PSW_mskGIE) == 1); ++} ++ ++static inline unsigned long regs_return_value(struct pt_regs *regs) ++{ ++ return regs->NDS32_r0; ++} ++ ++ ++#define instruction_pointer(regs) ((regs)->NDS32_ipc) ++ ++#ifdef CONFIG_SMP ++extern unsigned long profile_pc(struct pt_regs *regs); ++#else ++#define profile_pc(regs) instruction_pointer(regs) ++#endif ++ ++#endif /* __KERNEL__ */ ++#endif /* __ASSEMBLY__ */ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/reg_access.h linux-3.4.110/arch/nds32/include/asm/reg_access.h +--- linux-3.4.110.orig/arch/nds32/include/asm/reg_access.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/reg_access.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,153 @@ ++/* ++ * linux/arch/nds32/include/asm/reg_access.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_REG_ACCESS_H__ ++#define __NDS32_REG_ACCESS_H__ ++ ++#ifndef __ASSEMBLY__ ++#define DEFINE_GET_SYS_REG( reg) \ ++inline static unsigned long GET_##reg( void){ \ ++ unsigned long val; \ ++ __asm__ volatile ( "mfsr %0, $"#reg :"=&r" (val) ::"memory"); \ ++ return val; \ ++} ++ ++#define DEFINE_PUT_SYS_REG( reg) \ ++inline static void SET_##reg( unsigned long val){ \ ++ __asm__ volatile ( "\n\tmtsr %0, $"#reg \ ++ "\n\tdsb" ::"r" ( val) :"memory"); \ ++} ++ ++#define DEFINE_PUT_SYS_REG_i( reg) \ ++inline static void SET_##reg( unsigned long val){ \ ++ __asm__ volatile ( "\n\tmtsr %0, $"#reg \ ++ "\n\tisb" ::"r" ( val) :"memory"); \ ++} ++#define DEFINE_SYS_REG_OP( reg) \ ++DEFINE_GET_SYS_REG( reg); \ ++DEFINE_PUT_SYS_REG( reg); ++ ++#define DEFINE_SYS_REG_OP_i( reg) \ ++DEFINE_GET_SYS_REG( reg); \ ++DEFINE_PUT_SYS_REG_i( reg); ++ ++DEFINE_SYS_REG_OP( CPU_VER); ++DEFINE_SYS_REG_OP( ICM_CFG); ++DEFINE_SYS_REG_OP( DCM_CFG); ++DEFINE_SYS_REG_OP( MMU_CFG); ++DEFINE_SYS_REG_OP( MSC_CFG); ++DEFINE_SYS_REG_OP( CORE_ID); ++DEFINE_SYS_REG_OP( FUCOP_EXIST); ++ ++DEFINE_SYS_REG_OP_i( PSW); ++DEFINE_SYS_REG_OP( IPSW); ++DEFINE_SYS_REG_OP( P_IPSW); ++DEFINE_SYS_REG_OP( IVB); ++DEFINE_SYS_REG_OP( EVA); ++DEFINE_SYS_REG_OP( P_EVA); ++DEFINE_SYS_REG_OP( ITYPE); ++DEFINE_SYS_REG_OP( P_ITYPE); ++DEFINE_SYS_REG_OP( MERR); ++DEFINE_SYS_REG_OP( IPC); ++DEFINE_SYS_REG_OP( P_IPC); ++DEFINE_SYS_REG_OP( OIPC); ++DEFINE_SYS_REG_OP( P_P0); ++DEFINE_SYS_REG_OP( P_P1); ++DEFINE_SYS_REG_OP( INT_MASK); ++DEFINE_SYS_REG_OP( INT_PEND); ++DEFINE_SYS_REG_OP( INT_MASK2); ++DEFINE_SYS_REG_OP( INT_PEND2); ++DEFINE_SYS_REG_OP( INT_TRIGGER); ++ ++DEFINE_SYS_REG_OP( MMU_CTL); ++DEFINE_SYS_REG_OP( L1_PPTB); ++DEFINE_SYS_REG_OP( TLB_VPN); ++DEFINE_SYS_REG_OP( TLB_DATA); ++DEFINE_SYS_REG_OP( TLB_MISC); ++DEFINE_SYS_REG_OP( VLPT_IDX); ++DEFINE_SYS_REG_OP( ILMB); ++DEFINE_SYS_REG_OP( DLMB); ++DEFINE_SYS_REG_OP( CACHE_CTL); ++DEFINE_SYS_REG_OP( HSMP_SADDR); ++DEFINE_SYS_REG_OP( HSMP_EADDR); ++ ++DEFINE_SYS_REG_OP( EDM_CFG); ++DEFINE_SYS_REG_OP( EDMSW); ++DEFINE_SYS_REG_OP( EDM_CTL); ++DEFINE_SYS_REG_OP( EDM_DTR); ++DEFINE_SYS_REG_OP( BPMTC); ++DEFINE_SYS_REG_OP( DIMBR); ++DEFINE_SYS_REG_OP( TECR0); ++DEFINE_SYS_REG_OP( TECR1); ++ ++DEFINE_SYS_REG_OP( BPC0); ++DEFINE_SYS_REG_OP( BPA0); ++DEFINE_SYS_REG_OP( BPAM0); ++DEFINE_SYS_REG_OP( BPV0); ++DEFINE_SYS_REG_OP( BPCID0); ++DEFINE_SYS_REG_OP( BPC1); ++DEFINE_SYS_REG_OP( BPA1); ++DEFINE_SYS_REG_OP( BPAM1); ++DEFINE_SYS_REG_OP( BPV1); ++DEFINE_SYS_REG_OP( BPCID1); ++DEFINE_SYS_REG_OP( BPC2); ++DEFINE_SYS_REG_OP( BPA2); ++DEFINE_SYS_REG_OP( BPAM2); ++DEFINE_SYS_REG_OP( BPV2); ++DEFINE_SYS_REG_OP( BPCID2); ++DEFINE_SYS_REG_OP( BPC3); ++DEFINE_SYS_REG_OP( BPA3); ++DEFINE_SYS_REG_OP( BPAM3); ++DEFINE_SYS_REG_OP( BPV3); ++DEFINE_SYS_REG_OP( BPCID3); ++DEFINE_SYS_REG_OP( BPC4); ++DEFINE_SYS_REG_OP( BPA4); ++DEFINE_SYS_REG_OP( BPAM4); ++DEFINE_SYS_REG_OP( BPV4); ++DEFINE_SYS_REG_OP( BPCID4); ++DEFINE_SYS_REG_OP( BPC5); ++DEFINE_SYS_REG_OP( BPA5); ++DEFINE_SYS_REG_OP( BPAM5); ++DEFINE_SYS_REG_OP( BPV5); ++DEFINE_SYS_REG_OP( BPCID5); ++DEFINE_SYS_REG_OP( BPC6); ++DEFINE_SYS_REG_OP( BPA6); ++DEFINE_SYS_REG_OP( BPAM6); ++DEFINE_SYS_REG_OP( BPV6); ++DEFINE_SYS_REG_OP( BPCID6); ++DEFINE_SYS_REG_OP( BPC7); ++DEFINE_SYS_REG_OP( BPA7); ++DEFINE_SYS_REG_OP( BPAM7); ++DEFINE_SYS_REG_OP( BPV7); ++DEFINE_SYS_REG_OP( BPCID7); ++ ++DEFINE_SYS_REG_OP( PFMC0); ++DEFINE_SYS_REG_OP( PFMC1); ++DEFINE_SYS_REG_OP( PFMC2); ++DEFINE_SYS_REG_OP( PFM_CTL); ++ ++DEFINE_SYS_REG_OP( SDZ_CTL); ++DEFINE_SYS_REG_OP( N12MISC_CTL); ++DEFINE_SYS_REG_OP( PRUSR_ACC_CTL); ++ ++DEFINE_SYS_REG_OP( DMA_CFG); ++DEFINE_SYS_REG_OP( DMA_GCSW); ++DEFINE_SYS_REG_OP( DMA_CHNSEL); ++DEFINE_SYS_REG_OP( DMA_ACT); ++DEFINE_SYS_REG_OP( DMA_SETUP); ++DEFINE_SYS_REG_OP( DMA_ISADDR); ++DEFINE_SYS_REG_OP( DMA_ESADDR); ++DEFINE_SYS_REG_OP( DMA_TCNT); ++DEFINE_SYS_REG_OP( DMA_STATUS); ++DEFINE_SYS_REG_OP( DMA_2DSET); ++DEFINE_SYS_REG_OP( DMA_2DSCTL); ++ ++DEFINE_SYS_REG_OP( FPCSR); ++DEFINE_SYS_REG_OP( FPCFG); ++DEFINE_SYS_REG_OP( FUCOP_CTL); ++ ++#endif /* !__ASSEMBLY__ */ ++ ++#endif /* __NDS32_REG_ACCESS_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/resource.h linux-3.4.110/arch/nds32/include/asm/resource.h +--- linux-3.4.110.orig/arch/nds32/include/asm/resource.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/resource.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/resource.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_RESOURCE_H__ ++#define __NDS32_RESOURCE_H__ ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/rtc.h linux-3.4.110/arch/nds32/include/asm/rtc.h +--- linux-3.4.110.orig/arch/nds32/include/asm/rtc.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/rtc.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,43 @@ ++/* ++ * linux/arch/nds32/include/asm/rtc.h ++ * ++ * Copyright (C) 2003 Deep Blue Solutions Ltd. ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef ASMNDS32_RTC_H ++#define ASMNDS32_RTC_H ++ ++struct module; ++ ++struct rtc_ops { ++ struct module *owner; ++ int (*open)(void); ++ void (*release)(void); ++ int (*ioctl)(unsigned int, unsigned long); ++ ++ int (*read_time)(struct rtc_time *); ++ int (*set_time)(struct rtc_time *); ++ int (*read_alarm)(struct rtc_wkalrm *); ++ int (*set_alarm)(struct rtc_wkalrm *); ++ int (*proc)(char *buf); ++}; ++ ++void rtc_update(unsigned long, unsigned long); ++int register_rtc(struct rtc_ops *); ++void unregister_rtc(struct rtc_ops *); ++ ++static inline int rtc_periodic_alarm(struct rtc_time *tm) ++{ ++ return (tm->tm_year == -1) || ++ ((unsigned)tm->tm_mon >= 12) || ++ ((unsigned)(tm->tm_mday - 1) >= 31) || ++ ((unsigned)tm->tm_hour > 23) || ++ ((unsigned)tm->tm_min > 59) || ++ ((unsigned)tm->tm_sec > 59); ++} ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/scatterlist.h linux-3.4.110/arch/nds32/include/asm/scatterlist.h +--- linux-3.4.110.orig/arch/nds32/include/asm/scatterlist.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/scatterlist.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,35 @@ ++/* ++ * linux/arch/nds32/include/asm/scatterlist.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_SCATTERLIST_H ++#define _ASMNDS32_SCATTERLIST_H ++ ++#include ++#include ++#include ++ ++#if 0 ++struct scatterlist { ++#ifdef CONFIG_DEBUG_SG ++ unsigned long sg_magic; ++#endif ++ unsigned long page_link; ++ unsigned int offset; /* buffer offset */ ++ dma_addr_t dma_address; /* dma address */ ++ unsigned int length; /* length */ ++}; ++ ++/* ++ * These macros should be used after a pci_map_sg call has been done ++ * to get bus addresses of each of the SG entries and their lengths. ++ * You should only work with the number of sg entries pci_map_sg ++ * returns, or alternatively stop on the first sg_dma_len(sg) which ++ * is 0. ++ */ ++#define sg_dma_address(sg) ((sg)->dma_address) ++#define sg_dma_len(sg) ((sg)->length) ++#endif ++ ++#endif /* _ASMNDS32_SCATTERLIST_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/sections.h linux-3.4.110/arch/nds32/include/asm/sections.h +--- linux-3.4.110.orig/arch/nds32/include/asm/sections.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/sections.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/sections.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __NDS32_SECTIONS_H__ ++#define __NDS32_SECTIONS_H__ ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/segment.h linux-3.4.110/arch/nds32/include/asm/segment.h +--- linux-3.4.110.orig/arch/nds32/include/asm/segment.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/segment.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,16 @@ ++/* ++ * linux/arch/nds32/include/asm/segment.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ASM_NDS32_SEGMENT_H ++#define __ASM_NDS32_SEGMENT_H ++ ++#define __KERNEL_CS 0x0 ++#define __KERNEL_DS 0x0 ++ ++#define __USER_CS 0x1 ++#define __USER_DS 0x1 ++ ++#endif /* __ASM_NDS32_SEGMENT_H */ ++ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/semaphore.h linux-3.4.110/arch/nds32/include/asm/semaphore.h +--- linux-3.4.110.orig/arch/nds32/include/asm/semaphore.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/semaphore.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,6 @@ ++/* ++ * linux/arch/nds32/include/asm/semaphore.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#include +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/semaphore-helper.h linux-3.4.110/arch/nds32/include/asm/semaphore-helper.h +--- linux-3.4.110.orig/arch/nds32/include/asm/semaphore-helper.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/semaphore-helper.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,89 @@ ++/* ++ * linux/arch/nds32/include/asm/semaphore-helper.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef ASMNDS32_SEMAPHORE_HELPER_H ++#define ASMNDS32_SEMAPHORE_HELPER_H ++ ++/* ++ * These two _must_ execute atomically wrt each other. ++ */ ++static inline void wake_one_more(struct semaphore * sem) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&semaphore_wake_lock, flags); ++ if (atomic_read(&sem->count) <= 0) ++ sem->waking++; ++ spin_unlock_irqrestore(&semaphore_wake_lock, flags); ++} ++ ++static inline int waking_non_zero(struct semaphore *sem) ++{ ++ unsigned long flags; ++ int ret = 0; ++ ++ spin_lock_irqsave(&semaphore_wake_lock, flags); ++ if (sem->waking > 0) { ++ sem->waking--; ++ ret = 1; ++ } ++ spin_unlock_irqrestore(&semaphore_wake_lock, flags); ++ return ret; ++} ++ ++/* ++ * waking non zero interruptible ++ * 1 got the lock ++ * 0 go to sleep ++ * -EINTR interrupted ++ * ++ * We must undo the sem->count down_interruptible() increment while we are ++ * protected by the spinlock in order to make this atomic_inc() with the ++ * atomic_read() in wake_one_more(), otherwise we can race. -arca ++ */ ++static inline int waking_non_zero_interruptible(struct semaphore *sem, ++ struct task_struct *tsk) ++{ ++ unsigned long flags; ++ int ret = 0; ++ ++ spin_lock_irqsave(&semaphore_wake_lock, flags); ++ if (sem->waking > 0) { ++ sem->waking--; ++ ret = 1; ++ } else if (signal_pending(tsk)) { ++ atomic_inc(&sem->count); ++ ret = -EINTR; ++ } ++ spin_unlock_irqrestore(&semaphore_wake_lock, flags); ++ return ret; ++} ++ ++/* ++ * waking_non_zero_try_lock: ++ * 1 failed to lock ++ * 0 got the lock ++ * ++ * We must undo the sem->count down_interruptible() increment while we are ++ * protected by the spinlock in order to make this atomic_inc() with the ++ * atomic_read() in wake_one_more(), otherwise we can race. -arca ++ */ ++static inline int waking_non_zero_trylock(struct semaphore *sem) ++{ ++ unsigned long flags; ++ int ret = 1; ++ ++ spin_lock_irqsave(&semaphore_wake_lock, flags); ++ if (sem->waking <= 0) ++ atomic_inc(&sem->count); ++ else { ++ sem->waking--; ++ ret = 0; ++ } ++ spin_unlock_irqrestore(&semaphore_wake_lock, flags); ++ return ret; ++} ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/sembuf.h linux-3.4.110/arch/nds32/include/asm/sembuf.h +--- linux-3.4.110.orig/arch/nds32/include/asm/sembuf.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/sembuf.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,30 @@ ++/* ++ * linux/arch/nds32/include/asm/sembuf.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_SEMBUF_H ++#define _ASMNDS32_SEMBUF_H ++ ++/* ++ * The semid64_ds structure for arm architecture. ++ * Note extra padding because this structure is passed back and forth ++ * between kernel and user space. ++ * ++ * Pad space is left for: ++ * - 64-bit time_t to solve y2038 problem ++ * - 2 miscellaneous 32-bit values ++ */ ++ ++struct semid64_ds { ++ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ ++ __kernel_time_t sem_otime; /* last semop time */ ++ unsigned long __unused1; ++ __kernel_time_t sem_ctime; /* last change time */ ++ unsigned long __unused2; ++ unsigned long sem_nsems; /* no. of semaphores in array */ ++ unsigned long __unused3; ++ unsigned long __unused4; ++}; ++ ++#endif /* _ASMNDS32_SEMBUF_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/serial.h linux-3.4.110/arch/nds32/include/asm/serial.h +--- linux-3.4.110.orig/arch/nds32/include/asm/serial.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/serial.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,20 @@ ++/* ++ * linux/arch/nds32/include/asm/serial.h ++ * ++ * Copyright (C) 1996 Russell King. ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Changelog: ++ * 15-10-1996 RMK Created ++ */ ++ ++#ifndef __ASM_SERIAL_H ++#define __ASM_SERIAL_H ++ ++#define BASE_BAUD (CONFIG_UART_CLK / 16) ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/setup.h linux-3.4.110/arch/nds32/include/asm/setup.h +--- linux-3.4.110.orig/arch/nds32/include/asm/setup.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/setup.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,232 @@ ++/* ++ * linux/arch/nds32/include/asm/setup.h ++ * ++ * Copyright (C) 1997-1999 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Structure passed to kernel to tell it about the ++ * hardware it's running on. See Documentation/arm/Setup ++ * for more info. ++ */ ++#ifndef __ASMNDS32_SETUP_H ++#define __ASMNDS32_SETUP_H ++ ++#define COMMAND_LINE_SIZE 256 ++ ++/* The list ends with an ATAG_NONE node. */ ++#define ATAG_NONE 0x00000000 ++ ++struct tag_header { ++ u32 size; ++ u32 tag; ++}; ++ ++/* The list must start with an ATAG_CORE node */ ++#define ATAG_CORE 0x54410001 ++ ++struct tag_core { ++ u32 flags; /* bit 0 = read-only */ ++ u32 pagesize; ++ u32 rootdev; ++}; ++ ++/* it is allowed to have multiple ATAG_MEM nodes */ ++#define ATAG_MEM 0x54410002 ++ ++struct tag_mem32 { ++ u32 size; ++ u32 start; /* physical start address */ ++}; ++ ++/* VGA text type displays */ ++#define ATAG_VIDEOTEXT 0x54410003 ++ ++struct tag_videotext { ++ u8 x; ++ u8 y; ++ u16 video_page; ++ u8 video_mode; ++ u8 video_cols; ++ u16 video_ega_bx; ++ u8 video_lines; ++ u8 video_isvga; ++ u16 video_points; ++}; ++ ++/* describes how the ramdisk will be used in kernel */ ++#define ATAG_RAMDISK 0x54410004 ++ ++struct tag_ramdisk { ++ u32 flags; /* bit 0 = load, bit 1 = prompt */ ++ u32 size; /* decompressed ramdisk size in _kilo_ bytes */ ++ u32 start; /* starting block of floppy-based RAM disk image */ ++}; ++ ++/*M Tom ++ * this one accidentally used virtual addresses - as such, ++ * it's deprecated. ++ * describes where the compressed ramdisk image lives (virtual address) ++ */ ++#define ATAG_INITRD 0x54410005 ++ ++/* describes where the compressed ramdisk image lives (physical address) */ ++#define ATAG_INITRD2 0x54420005 ++ ++struct tag_initrd { ++ u32 start; //M Tom va of start addr /* physical start address */ ++ u32 size; //M Tom unzipped size /* size of compressed ramdisk image in bytes */ ++}; ++ ++/* board serial number. "64 bits should be enough for everybody" */ ++#define ATAG_SERIAL 0x54410006 ++ ++struct tag_serialnr { ++ u32 low; ++ u32 high; ++}; ++ ++/* board revision */ ++#define ATAG_REVISION 0x54410007 ++ ++struct tag_revision { ++ u32 rev; ++}; ++ ++/* initial values for vesafb-type framebuffers. see struct screen_info ++ * in include/linux/tty.h ++ */ ++#define ATAG_VIDEOLFB 0x54410008 ++ ++struct tag_videolfb { ++ u16 lfb_width; ++ u16 lfb_height; ++ u16 lfb_depth; ++ u16 lfb_linelength; ++ u32 lfb_base; ++ u32 lfb_size; ++ u8 red_size; ++ u8 red_pos; ++ u8 green_size; ++ u8 green_pos; ++ u8 blue_size; ++ u8 blue_pos; ++ u8 rsvd_size; ++ u8 rsvd_pos; ++}; ++ ++/* command line: \0 terminated string */ ++#define ATAG_CMDLINE 0x54410009 ++ ++struct tag_cmdline { ++ char cmdline[COMMAND_LINE_SIZE];//M Tom ++}; ++ ++/* acorn RiscPC specific information */ ++/*-d Tom ++#define ATAG_ACORN 0x41000101 ++ ++struct tag_acorn { ++ u32 memc_control_reg; ++ u32 vram_pages; ++ u8 sounddefault; ++ u8 adfsdrives; ++}; ++*/ ++#define ATAG_CPE 0x41000101 ++struct tag_cpe { ++ u32 memc_control_reg; ++ u32 vram_pages; ++ u8 sounddefault; ++ u8 adfsdrives; ++}; ++ ++ ++/* footbridge memory clock, see arch/arm/mach-footbridge/arch.c */ ++#define ATAG_MEMCLK 0x41000402 ++ ++struct tag_memclk { ++ u32 fmemclk; ++}; ++ ++struct tag { ++ struct tag_header hdr; ++ union { ++ struct tag_core core; ++ struct tag_mem32 mem; ++ struct tag_videotext videotext; ++ struct tag_ramdisk ramdisk; ++ struct tag_initrd initrd; ++ struct tag_serialnr serialnr; ++ struct tag_revision revision; ++ struct tag_videolfb videolfb; ++ struct tag_cmdline cmdline; ++ ++ /* ++ * Andes specific ++ */ ++ struct tag_cpe cpe; ++ ++ /* ++ * DC21285 specific ++ */ ++ struct tag_memclk memclk; ++ } u; ++}; ++ ++struct tagtable { ++ u32 tag; ++ int (*parse)(const struct tag *); ++}; ++ ++#define tag_member_present(tag,member) \ ++ ((unsigned long)(&((struct tag *)0L)->member + 1) \ ++ <= (tag)->hdr.size * 4) ++ ++#define tag_next(t) ((struct tag *)((u32 *)(t) + (t)->hdr.size)) ++#define tag_size(type) ((sizeof(struct tag_header) + sizeof(struct type)) >> 2) ++ ++#define for_each_tag(t,base) \ ++ for (t = base; t->hdr.size; t = tag_next(t)) ++ ++#ifdef __KERNEL__ ++ ++#define __tag __used __attribute__((__section__(".taglist"))) ++#define __tagtable(tag, fn) \ ++static struct tagtable __tagtable_##fn __tag = { tag, fn } ++ ++/* ++ * Memory map description ++ */ ++#ifdef CONFIG_ARCH_LH7A40X ++# define NR_BANKS 16 ++#else ++# define NR_BANKS 8 ++#endif ++ ++struct meminfo { ++ int nr_banks; ++ struct { ++ unsigned long start; ++ unsigned long size; ++ int node; ++ } bank[NR_BANKS]; ++}; ++ ++/* ++ * Early command line parameters. ++ */ ++struct early_params { ++ const char *arg; ++ void (*fn)(char **p); ++}; ++ ++#define __early_param(name,fn) \ ++static struct early_params __early_##fn __used \ ++__attribute__((__section__("__early_param"))) = { name, fn } ++ ++#endif ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/shmbuf.h linux-3.4.110/arch/nds32/include/asm/shmbuf.h +--- linux-3.4.110.orig/arch/nds32/include/asm/shmbuf.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/shmbuf.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,47 @@ ++/* ++ * linux/arch/nds32/include/asm/shmbuf.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_SHMBUF_H ++#define _ASMNDS32_SHMBUF_H ++ ++/* ++ * The shmid64_ds structure for arm architecture. ++ * Note extra padding because this structure is passed back and forth ++ * between kernel and user space. ++ * ++ * Pad space is left for: ++ * - 64-bit time_t to solve y2038 problem ++ * - 2 miscellaneous 32-bit values ++ */ ++ ++struct shmid64_ds { ++ struct ipc64_perm shm_perm; /* operation perms */ ++ size_t shm_segsz; /* size of segment (bytes) */ ++ __kernel_time_t shm_atime; /* last attach time */ ++ unsigned long __unused1; ++ __kernel_time_t shm_dtime; /* last detach time */ ++ unsigned long __unused2; ++ __kernel_time_t shm_ctime; /* last change time */ ++ unsigned long __unused3; ++ __kernel_pid_t shm_cpid; /* pid of creator */ ++ __kernel_pid_t shm_lpid; /* pid of last operator */ ++ unsigned long shm_nattch; /* no. of current attaches */ ++ unsigned long __unused4; ++ unsigned long __unused5; ++}; ++ ++struct shminfo64 { ++ unsigned long shmmax; ++ unsigned long shmmin; ++ unsigned long shmmni; ++ unsigned long shmseg; ++ unsigned long shmall; ++ unsigned long __unused1; ++ unsigned long __unused2; ++ unsigned long __unused3; ++ unsigned long __unused4; ++}; ++ ++#endif /* _ASMNDS32_SHMBUF_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/shmparam.h linux-3.4.110/arch/nds32/include/asm/shmparam.h +--- linux-3.4.110.orig/arch/nds32/include/asm/shmparam.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/shmparam.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,27 @@ ++/* ++ * linux/arch/nds32/include/asm/shmparam.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_SHMPARAM_H ++#define _ASMNDS32_SHMPARAM_H ++ ++/* ++ * This should be the size of the virtually indexed cache/ways, ++ * whichever is greater since the cache aliases every size/ways ++ * bytes. ++ */ ++/* ++ * Reference ARM architecture, retain the previous code ++ * #define SHMLBA 0x4000 ++ * #define REALSHMLBA (CACHE_SET( DCACHE) * CACHE_LINE_SIZE( DCACHE)) ++ */ ++#define SHMLBA (4 * PAGE_SIZE) /* attach addr a multiple of this */ ++#define REALSHMLBA SHMLBA ++ ++/* ++ * Enforce SHMLBA in shmat ++ */ ++#define __ARCH_FORCE_SHMLBA ++ ++#endif /* _ASMNDS32_SHMPARAM_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/sigcontext.h linux-3.4.110/arch/nds32/include/asm/sigcontext.h +--- linux-3.4.110.orig/arch/nds32/include/asm/sigcontext.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/sigcontext.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,81 @@ ++/* ++ * linux/arch/nds32/include/asm/sigcontext.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_SIGCONTEXT_H ++#define _ASMNDS32_SIGCONTEXT_H ++ ++/* ++ * Signal context structure - contains all info to do with the state ++ * before the signal handler was invoked. Note: only add new entries ++ * to the end of the structure. ++ */ ++struct fpu_struct { ++ unsigned long fs_regs[32]; ++ unsigned long long fd_regs[16]; ++ unsigned long fpcsr; ++}; ++ ++struct audio_struct { ++ unsigned long auregs[32]; ++}; ++ ++struct zol_struct { ++ unsigned long nds32_lc; /* $LC */ ++ unsigned long nds32_le; /* $LE */ ++ unsigned long nds32_lb; /* $LB */ ++}; ++ ++struct sigcontext { ++ unsigned long trap_no; ++ unsigned long error_code; ++ unsigned long oldmask; ++ unsigned long nds32_r0; ++ unsigned long nds32_r1; ++ unsigned long nds32_r2; ++ unsigned long nds32_r3; ++ unsigned long nds32_r4; ++ unsigned long nds32_r5; ++ unsigned long nds32_r6; ++ unsigned long nds32_r7; ++ unsigned long nds32_r8; ++ unsigned long nds32_r9; ++ unsigned long nds32_r10; ++ unsigned long nds32_r11; ++ unsigned long nds32_r12; ++ unsigned long nds32_r13; ++ unsigned long nds32_r14; ++ unsigned long nds32_r15; ++ unsigned long nds32_r16; ++ unsigned long nds32_r17; ++ unsigned long nds32_r18; ++ unsigned long nds32_r19; ++ unsigned long nds32_r20; ++ unsigned long nds32_r21; ++ unsigned long nds32_r22; ++ unsigned long nds32_r23; ++ unsigned long nds32_r24; ++ unsigned long nds32_r25; ++ unsigned long nds32_fp; /* $r28 */ ++ unsigned long nds32_gp; /* $r29 */ ++ unsigned long nds32_lr; /* $r30 */ ++ unsigned long nds32_sp; /* $r31 */ ++ unsigned long nds32_ipc; ++ unsigned long fault_address; ++#if defined(CONFIG_FPU) ++ unsigned long used_math_flag; ++ /* FPU Registers */ ++ struct fpu_struct fpu; ++#endif ++ /* Audio Registers */ ++#if defined(CONFIG_AUDIO) ++ unsigned long used_audio_flag; ++ struct audio_struct audio; ++#endif ++#if defined(CONFIG_HWZOL) ++ struct zol_struct zol; ++#endif ++}; ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/siginfo.h linux-3.4.110/arch/nds32/include/asm/siginfo.h +--- linux-3.4.110.orig/arch/nds32/include/asm/siginfo.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/siginfo.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/siginfo.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_SIGINFO_H ++#define _ASMNDS32_SIGINFO_H ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/signal.h linux-3.4.110/arch/nds32/include/asm/signal.h +--- linux-3.4.110.orig/arch/nds32/include/asm/signal.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/signal.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,173 @@ ++/* ++ * linux/arch/nds32/include/asm/signal.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_SIGNAL_H ++#define _ASMNDS32_SIGNAL_H ++ ++#include ++ ++/* Avoid too many header ordering problems. */ ++struct siginfo; ++ ++#ifdef __KERNEL__ ++/* Most things should be clean enough to redefine this at will, if care ++ is taken to make libc match. */ ++ ++#define _NSIG 64 ++#define _NSIG_BPW 32 ++#define _NSIG_WORDS (_NSIG / _NSIG_BPW) ++ ++typedef unsigned long old_sigset_t; /* at least 32 bits */ ++ ++typedef struct { ++ unsigned long sig[_NSIG_WORDS]; ++} sigset_t; ++ ++#else ++/* Here we must cater to libcs that poke about in kernel headers. */ ++ ++#define NSIG 32 ++typedef unsigned long sigset_t; ++ ++#endif /* __KERNEL__ */ ++ ++#define SIGHUP 1 ++#define SIGINT 2 ++#define SIGQUIT 3 ++#define SIGILL 4 ++#define SIGTRAP 5 ++#define SIGABRT 6 ++#define SIGIOT 6 ++#define SIGBUS 7 ++#define SIGFPE 8 ++#define SIGKILL 9 ++#define SIGUSR1 10 ++#define SIGSEGV 11 ++#define SIGUSR2 12 ++#define SIGPIPE 13 ++#define SIGALRM 14 ++#define SIGTERM 15 ++#define SIGSTKFLT 16 ++#define SIGCHLD 17 ++#define SIGCONT 18 ++#define SIGSTOP 19 ++#define SIGTSTP 20 ++#define SIGTTIN 21 ++#define SIGTTOU 22 ++#define SIGURG 23 ++#define SIGXCPU 24 ++#define SIGXFSZ 25 ++#define SIGVTALRM 26 ++#define SIGPROF 27 ++#define SIGWINCH 28 ++#define SIGIO 29 ++#define SIGPOLL SIGIO ++/* ++#define SIGLOST 29 ++*/ ++#define SIGPWR 30 ++#define SIGSYS 31 ++#define SIGUNUSED 31 ++ ++/* These should not be considered constants from userland. */ ++#define SIGRTMIN 32 ++#define SIGRTMAX _NSIG ++ ++#define SIGSWI 32 ++ ++/* ++ * SA_FLAGS values: ++ * ++ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. ++ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. ++ * SA_SIGINFO deliver the signal with SIGINFO structs ++ * SA_THIRTYTWO delivers the signal in 32-bit mode, even if the task ++ * is running in 26-bit. ++ * SA_ONSTACK allows alternate signal stacks (see sigaltstack(2)). ++ * SA_RESTART flag to get restarting signals (which were the default long ago) ++ * SA_NODEFER prevents the current signal from being masked in the handler. ++ * SA_RESETHAND clears the handler when the signal is delivered. ++ * ++ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single ++ * Unix names RESETHAND and NODEFER respectively. ++ */ ++#define SA_NOCLDSTOP 0x00000001 ++#define SA_NOCLDWAIT 0x00000002 ++#define SA_SIGINFO 0x00000004 ++#define SA_THIRTYTWO 0x02000000 ++#define SA_RESTORER 0x04000000 ++#define SA_ONSTACK 0x08000000 ++#define SA_RESTART 0x10000000 ++#define SA_NODEFER 0x40000000 ++#define SA_RESETHAND 0x80000000 ++ ++#define SA_NOMASK SA_NODEFER ++#define SA_ONESHOT SA_RESETHAND ++ ++ ++/* ++ * sigaltstack controls ++ */ ++#define SS_ONSTACK 1 ++#define SS_DISABLE 2 ++ ++#define MINSIGSTKSZ 2048 ++#define SIGSTKSZ 8192 ++ ++#ifdef __KERNEL__ ++#define SA_IRQNOMASK 0x08000000 ++#endif ++ ++#include ++ ++#ifdef __KERNEL__ ++struct old_sigaction { ++ __sighandler_t sa_handler; ++ old_sigset_t sa_mask; ++ unsigned long sa_flags; ++ __sigrestore_t sa_restorer; ++}; ++ ++struct sigaction { ++ __sighandler_t sa_handler; ++ unsigned long sa_flags; ++ __sigrestore_t sa_restorer; ++ sigset_t sa_mask; /* mask last for extensibility */ ++}; ++ ++struct k_sigaction { ++ struct sigaction sa; ++}; ++ ++#else ++/* Here we must cater to libcs that poke about in kernel headers. */ ++ ++struct sigaction { ++ union { ++ __sighandler_t _sa_handler; ++ void (*_sa_sigaction)(int, struct siginfo *, void *); ++ } _u; ++ sigset_t sa_mask; ++ unsigned long sa_flags; ++ void (*sa_restorer)(void); ++}; ++ ++#define sa_handler _u._sa_handler ++#define sa_sigaction _u._sa_sigaction ++ ++#endif /* __KERNEL__ */ ++ ++typedef struct sigaltstack { ++ void __user *ss_sp; ++ int ss_flags; ++ size_t ss_size; ++} stack_t; ++ ++#ifdef __KERNEL__ ++#include ++#define ptrace_signal_deliver(regs, cookie) do { } while (0) ++#endif ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/sizes.h linux-3.4.110/arch/nds32/include/asm/sizes.h +--- linux-3.4.110.orig/arch/nds32/include/asm/sizes.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/sizes.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,53 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++/* DO NOT EDIT!! - this file automatically generated ++ * from .s file by awk -f s2h.awk ++ */ ++/* Size definitions ++ * Copyright (C) ARM Limited 1998. All rights reserved. ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __sizes_h ++#define __sizes_h 1 ++ ++/* handy sizes */ ++#define SZ_1K 0x00000400 ++#define SZ_4K 0x00001000 ++#define SZ_8K 0x00002000 ++#define SZ_16K 0x00004000 ++#define SZ_64K 0x00010000 ++#define SZ_128K 0x00020000 ++#define SZ_256K 0x00040000 ++#define SZ_512K 0x00080000 ++ ++#define SZ_1M 0x00100000 ++#define SZ_2M 0x00200000 ++#define SZ_4M 0x00400000 ++#define SZ_8M 0x00800000 ++#define SZ_16M 0x01000000 ++#define SZ_32M 0x02000000 ++#define SZ_64M 0x04000000 ++#define SZ_128M 0x08000000 ++#define SZ_256M 0x10000000 ++#define SZ_512M 0x20000000 ++ ++#define SZ_1G 0x40000000 ++#define SZ_2G 0x80000000 ++ ++#endif ++ ++/* END */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/smp.h linux-3.4.110/arch/nds32/include/asm/smp.h +--- linux-3.4.110.orig/arch/nds32/include/asm/smp.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/smp.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,42 @@ ++/* ++ * linux/arch/nds32/include/asm/smp.h ++ * ++ * Copyright (C) 2004-2005 ARM Ltd. ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef __ASM_NDS32_SMP_H ++#define __ASM_NDS32_SMP_H ++ ++#include ++#include ++#include ++ ++#ifndef CONFIG_SMP ++# error " included in non-SMP build" ++#endif ++ ++/* ++ * at the moment, there's not a big penalty for changing CPUs ++ * (the >big< penalty is running SMP in the first place) ++ */ ++#define PROC_CHANGE_PENALTY 15 ++ ++struct seq_file; ++ ++/* ++ * Move global data into per-processor storage. ++ */ ++extern void smp_store_cpu_info(unsigned int cpuid); ++ ++#define raw_smp_processor_id() (unsigned int)(GET_CORE_ID()&CORE_ID_mskCOREID) ++ ++extern void smp_init_cpus(void); ++extern void smp_send_timer(void); ++extern void arch_send_call_function_single_ipi(int cpu); ++extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); ++ ++#endif /* ifndef __ASM_NDS32_SMP_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/socket.h linux-3.4.110/arch/nds32/include/asm/socket.h +--- linux-3.4.110.orig/arch/nds32/include/asm/socket.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/socket.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/socket.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_SOCKET_H ++#define _ASMNDS32_SOCKET_H ++ ++#include ++ ++#endif /* _ASMNDS32_SOCKET_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/sockios.h linux-3.4.110/arch/nds32/include/asm/sockios.h +--- linux-3.4.110.orig/arch/nds32/include/asm/sockios.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/sockios.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,18 @@ ++/* ++ * linux/arch/nds32/include/asm/sockios.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ARCH_NDS32_SOCKIOS_H ++#define __ARCH_NDS32_SOCKIOS_H ++ ++/* Socket-level I/O control calls. */ ++#define FIOSETOWN 0x8901 ++#define SIOCSPGRP 0x8902 ++#define FIOGETOWN 0x8903 ++#define SIOCGPGRP 0x8904 ++#define SIOCATMARK 0x8905 ++#define SIOCGSTAMP 0x8906 /* Get stamp */ ++#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/spec-ag101.h linux-3.4.110/arch/nds32/include/asm/spec-ag101.h +--- linux-3.4.110.orig/arch/nds32/include/asm/spec-ag101.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/spec-ag101.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,237 @@ ++#ifndef __NDS32_AG101_SPECIFICATION_H__ ++#define __NDS32_AG101_SPECIFICATION_H__ ++ ++#define UART0_PA_BASE 0x99600000 ++#define UART0_VA_BASE 0xF9960000 ++#define UART0_IRQ 7 ++#define UART1_PA_BASE 0x98300000 ++#define UART1_VA_BASE 0xF9830000 ++#define UART1_IRQ 11 ++#define CFC_FTCFC010_0_PA_BASE 0x98D00000 ++#define CFC_FTCFC010_0_VA_BASE 0xF98D0000 ++#define SDC_FTSDC010_0_PA_BASE 0x98e00000 ++#define SDC_FTSDC010_0_VA_BASE 0xF98e0000 ++#define SDMC_FTSDMC021_PA_BASE 0x90300000 ++#define SDMC_FTSDMC021_VA_BASE 0xF9030000 ++#define PCIC_FTPCI100_0_PA_BASE 0x90C00000 ++#define PCIC_FTPCI100_0_VA_BASE 0xF90C0000 ++#define PCIC_FTPCI100_0_VA_LIMIT 0xF90CFFFF ++#define PCIIO_PA_BASE 0x90C01000 ++#define PCIIO_VA_BASE 0xFE901000 ++#define PCIIO_0_PA_BASE 0x90C01000 ++#define PCIIO_0_VA_BASE 0xFE901000 ++#define PCIIO_VA_LIMIT 0xFE9FFFFF ++#define AHB_ATFAHBC020S_0_PA_BASE 0x90100000 ++#define AHB_ATFAHBC020S_0_VA_BASE 0xF9010000 ++ ++/* DMAC */ ++#define DMAC_FTDMAC020_PA_COUNT 1 ++#define DMAC_FTDMAC020_PA_BASE 0x90400000 ++#define DMAC_FTDMAC020_PA_LIMIT 0x90400FFF ++#define DMAC_FTDMAC020_PA_SIZE 0x00001000 ++#define DMAC_FTDMAC020_0_PA_BASE 0x90400000 ++#define DMAC_FTDMAC020_0_PA_LIMIT 0x90400FFF ++#define DMAC_FTDMAC020_0_PA_SIZE 0x00001000 ++#define DMAC_FTDMAC020_VA_COUNT 1 ++#define DMAC_FTDMAC020_VA_BASE 0xF9040000 ++#define DMAC_FTDMAC020_VA_LIMIT 0xF9040FFF ++#define DMAC_FTDMAC020_VA_SIZE 0x00001000 ++#define DMAC_FTDMAC020_0_VA_BASE 0xF9040000 ++#define DMAC_FTDMAC020_0_VA_LIMIT 0xF9040FFF ++#define DMAC_FTDMAC020_0_VA_SIZE 0x00001000 ++ ++/* TIMER */ ++#define TIMER_FTTMR010_PA_COUNT 1 ++#define TIMER_FTTMR010_PA_BASE 0x98400000 ++#define TIMER_FTTMR010_PA_LIMIT 0x98400FFF ++#define TIMER_FTTMR010_PA_SIZE 0x00001000 ++#define TIMER_FTTMR010_0_PA_BASE 0x98400000 ++#define TIMER_FTTMR010_0_PA_LIMIT 0x98400FFF ++#define TIMER_FTTMR010_0_PA_SIZE 0x00001000 ++#define TIMER_FTTMR010_VA_COUNT 1 ++#define TIMER_FTTMR010_VA_BASE 0xF9840000 ++#define TIMER_FTTMR010_VA_LIMIT 0xF9840FFF ++#define TIMER_FTTMR010_VA_SIZE 0x00001000 ++#define TIMER_FTTMR010_0_VA_BASE 0xF9840000 ++#define TIMER_FTTMR010_0_VA_LIMIT 0xF9840FFF ++#define TIMER_FTTMR010_0_VA_SIZE 0x00001000 ++ ++/* RTC */ ++#define RTC_FTRTC010_PA_COUNT 1 ++#define RTC_FTRTC010_PA_BASE 0x98600000 ++#define RTC_FTRTC010_PA_LIMIT 0x98600FFF ++#define RTC_FTRTC010_PA_SIZE 0x00001000 ++#define RTC_FTRTC010_0_PA_BASE 0x98600000 ++#define RTC_FTRTC010_0_PA_LIMIT 0x98600FFF ++#define RTC_FTRTC010_0_PA_SIZE 0x00001000 ++#define RTC_FTRTC010_VA_COUNT 1 ++#define RTC_FTRTC010_VA_BASE 0xF9860000 ++#define RTC_FTRTC010_VA_LIMIT 0xF9860FFF ++#define RTC_FTRTC010_VA_SIZE 0x00001000 ++#define RTC_FTRTC010_0_VA_BASE 0xF9860000 ++#define RTC_FTRTC010_0_VA_LIMIT 0xF9860FFF ++#define RTC_FTRTC010_0_VA_SIZE 0x00001000 ++ ++/* WDT */ ++#define WDT_FTWDT010_PA_COUNT 1 ++#define WDT_FTWDT010_PA_BASE 0x98500000 ++#define WDT_FTWDT010_PA_LIMIT 0x98500FFF ++#define WDT_FTWDT010_PA_SIZE 0x00001000 ++#define WDT_FTWDT010_0_PA_BASE 0x98500000 ++#define WDT_FTWDT010_0_PA_LIMIT 0x98500FFF ++#define WDT_FTWDT010_0_PA_SIZE 0x00001000 ++#define WDT_FTWDT010_VA_COUNT 1 ++#define WDT_FTWDT010_VA_BASE 0xF9850000 ++#define WDT_FTWDT010_VA_LIMIT 0xF9850FFF ++#define WDT_FTWDT010_VA_SIZE 0x00001000 ++#define WDT_FTWDT010_0_VA_BASE 0xF9850000 ++#define WDT_FTWDT010_0_VA_LIMIT 0xF9850FFF ++#define WDT_FTWDT010_0_VA_SIZE 0x00001000 ++ ++/* GPIO */ ++#define GPIO_FTGPIO010_PA_COUNT 1 ++#define GPIO_FTGPIO010_PA_BASE 0x98700000 ++#define GPIO_FTGPIO010_PA_LIMIT 0x98700FFF ++#define GPIO_FTGPIO010_PA_SIZE 0x00001000 ++#define GPIO_FTGPIO010_0_PA_BASE 0x98700000 ++#define GPIO_FTGPIO010_0_PA_LIMIT 0x98700FFF ++#define GPIO_FTGPIO010_0_PA_SIZE 0x00001000 ++#define GPIO_FTGPIO010_VA_COUNT 1 ++#define GPIO_FTGPIO010_VA_BASE 0xF9870000 ++#define GPIO_FTGPIO010_VA_LIMIT 0xF9870FFF ++#define GPIO_FTGPIO010_VA_SIZE 0x00001000 ++#define GPIO_FTGPIO010_0_VA_BASE 0xF9870000 ++#define GPIO_FTGPIO010_0_VA_LIMIT 0xF9870FFF ++#define GPIO_FTGPIO010_0_VA_SIZE 0x00001000 ++ ++ ++/* USB OTG */ ++#define USB_FOTG2XX_0_PA_COUNT 1 ++#define USB_FOTG2XX_0_PA_BASE 0x90B00000 ++#define USB_FOTG2XX_0_PA_LIMIT 0x90B00FFF ++#define USB_FOTG2XX_0_PA_SIZE 0x00001000 ++#define USB_FOTG2XX_0_VA_BASE 0xF90B0000 ++#define USB_FOTG2XX_0_VA_LIMIT 0xF90B0FFF ++#define USB_FOTG2XX_0_VA_SIZE 0x00001000 ++#define USB_FOTG2XX_0_IRQ 26 ++#define USB_FOTG2XX_PA_COUNT 1 ++#define USB_FOTG2XX_PA_BASE 0x90B00000 ++#define USB_FOTG2XX_PA_LIMIT 0x90B00FFF ++#define USB_FOTG2XX_PA_SIZE 0x00001000 ++#define USB_FOTG2XX_VA_BASE 0xF90B0000 ++#define USB_FOTG2XX_VA_LIMIT 0xF90B0FFF ++#define USB_FOTG2XX_VA_SIZE 0x00001000 ++#define USB_FOTG2XX_IRQ 26 ++#define USB_FOTG2XX_IRQ_COUNT 1 ++ ++/* SSP */ ++#define SSP_FTSSP010_PA_COUNT 1 ++#define SSP_FTSSP010_PA_BASE 0x99400000 ++#define SSP_FTSSP010_PA_LIMIT 0x99400FFF ++#define SSP_FTSSP010_PA_SIZE 0x00001000 ++#define SSP_FTSSP010_0_PA_BASE 0x99400000 ++#define SSP_FTSSP010_0_PA_LIMIT 0x99400FFF ++#define SSP_FTSSP010_0_PA_SIZE 0x00001000 ++#define SSP_FTSSP010_VA_COUNT 1 ++#define SSP_FTSSP010_VA_BASE 0xF9940000 ++#define SSP_FTSSP010_VA_LIMIT 0xF9940FFF ++#define SSP_FTSSP010_VA_SIZE 0x00001000 ++#define SSP_FTSSP010_0_VA_BASE 0xF9940000 ++#define SSP_FTSSP010_0_VA_LIMIT 0xF9940FFF ++#define SSP_FTSSP010_0_VA_SIZE 0x00001000 ++ ++/* APBBRG */ ++#define APBBRG_FTAPBBRG020S_PA_COUNT 1 ++#define APBBRG_FTAPBBRG020S_PA_BASE 0x90500000 ++#define APBBRG_FTAPBBRG020S_PA_LIMIT 0x90500FFF ++#define APBBRG_FTAPBBRG020S_PA_SIZE 0x00001000 ++#define APBBRG_FTAPBBRG020S_0_PA_BASE 0x90500000 ++#define APBBRG_FTAPBBRG020S_0_PA_LIMIT 0x90500FFF ++#define APBBRG_FTAPBBRG020S_0_PA_SIZE 0x00001000 ++#define APBBRG_FTAPBBRG020S_1_PA_BASE 0x90E00000 ++#define APBBRG_FTAPBBRG020S_1_PA_LIMIT 0x90E00FFF ++#define APBBRG_FTAPBBRG020S_1_PA_SIZE 0x00001000 ++#define APBBRG_FTAPBBRG020S_VA_COUNT 1 ++#define APBBRG_FTAPBBRG020S_VA_BASE 0xF9050000 ++#define APBBRG_FTAPBBRG020S_VA_LIMIT 0xF9050FFF ++#define APBBRG_FTAPBBRG020S_VA_SIZE 0x00001000 ++#define APBBRG_FTAPBBRG020S_0_VA_BASE 0xF9050000 ++#define APBBRG_FTAPBBRG020S_0_VA_LIMIT 0xF9050FFF ++#define APBBRG_FTAPBBRG020S_0_VA_SIZE 0x00001000 ++#define APBBRG_FTAPBBRG020S_1_VA_BASE 0xF90E0000 ++#define APBBRG_FTAPBBRG020S_1_VA_LIMIT 0xF90E0FFF ++#define APBBRG_FTAPBBRG020S_1_VA_SIZE 0x00001000 ++ ++/* I2C */ ++#define I2C_FTI2C010_PA_COUNT 1 ++#define I2C_FTI2C010_PA_BASE 0x98A00000 ++#define I2C_FTI2C010_PA_LIMIT 0x98A00FFF ++#define I2C_FTI2C010_PA_SIZE 0x00001000 ++#define I2C_FTI2C010_0_PA_BASE 0x98A00000 ++#define I2C_FTI2C010_0_PA_LIMIT 0x98A00FFF ++#define I2C_FTI2C010_0_PA_SIZE 0x00001000 ++#define I2C_FTI2C010_VA_COUNT 1 ++#define I2C_FTI2C010_VA_BASE 0xF98A0000 ++#define I2C_FTI2C010_VA_LIMIT 0xF98A0FFF ++#define I2C_FTI2C010_VA_SIZE 0x00001000 ++#define I2C_FTI2C010_0_VA_BASE 0xF98A0000 ++#define I2C_FTI2C010_0_VA_LIMIT 0xF98A0FFF ++#define I2C_FTI2C010_0_VA_SIZE 0x00001000 ++ ++/* L2CC */ ++#define L2CC_PA_BASE 0x90F00000 /* reserved */ ++#define L2CC_VA_BASE 0xF90F0000 /* FIXME */ ++ ++#define LED_PA_COUNT 1 ++#define LED_PA_BASE 0x902FF000 ++#define LED_PA_LIMIT 0x90200FFF ++#define LED_PA_SIZE 0x00001000 ++#define LED_0_PA_BASE 0x90200000 ++#define LED_0_PA_LIMIT 0x90200FFF ++#define LED_0_PA_SIZE 0x00001000 ++#define LED_VA_COUNT 1 ++#define LED_VA_BASE 0xF9020000 ++#define LED_VA_LIMIT 0xF9020000 ++#define LED_VA_SIZE 0x00001000 ++#define LED_0_VA_BASE 0xF9020000 ++#define LED_0_VA_LIMIT 0xF9020000 ++#define LED_0_VA_SIZE 0x00001000 ++ ++/* MAC */ ++#define MAC_FTMAC100_PA_COUNT 1 ++#define MAC_FTMAC100_PA_BASE 0x90900000 ++#define MAC_FTMAC100_PA_LIMIT 0x90900FFF ++#define MAC_FTMAC100_PA_SIZE 0x00001000 ++#define MAC_FTMAC100_0_PA_BASE 0x90900000 ++#define MAC_FTMAC100_0_PA_LIMIT 0x90900FFF ++#define MAC_FTMAC100_0_PA_SIZE 0x00001000 ++#define MAC_FTMAC100_1_PA_BASE 0x92000000 ++#define MAC_FTMAC100_1_PA_LIMIT 0x92000FFF ++#define MAC_FTMAC100_1_PA_SIZE 0x00001000 ++#define MAC_FTMAC100_VA_COUNT 1 ++#define MAC_FTMAC100_VA_BASE 0xF9090000 ++#define MAC_FTMAC100_VA_LIMIT 0xF9090FFF ++#define MAC_FTMAC100_VA_SIZE 0x00001000 ++#define MAC_FTMAC100_0_VA_BASE 0xF9090000 ++#define MAC_FTMAC100_0_VA_LIMIT 0xF9090FFF ++#define MAC_FTMAC100_0_VA_SIZE 0x00001000 ++#define MAC_FTMAC100_1_VA_BASE 0xF9200000 ++#define MAC_FTMAC100_1_VA_LIMIT 0xF9200FFF ++#define MAC_FTMAC100_1_VA_SIZE 0x00001000 ++ ++/* LCD */ ++#define LCD_FTLCDC100_PA_COUNT 1 ++#define LCD_FTLCDC100_PA_BASE 0x90600000 ++#define LCD_FTLCDC100_PA_LIMIT 0x90600FFF ++#define LCD_FTLCDC100_PA_SIZE 0x00001000 ++#define LCD_FTLCDC100_0_PA_BASE 0x90600000 ++#define LCD_FTLCDC100_0_PA_LIMIT 0x90600FFF ++#define LCD_FTLCDC100_0_PA_SIZE 0x00001000 ++#define LCD_FTLCDC100_VA_COUNT 1 ++#define LCD_FTLCDC100_VA_BASE 0xF9060000 ++#define LCD_FTLCDC100_VA_LIMIT 0xF9060FFF ++#define LCD_FTLCDC100_VA_SIZE 0x00001000 ++#define LCD_FTLCDC100_0_VA_BASE 0xF9060000 ++#define LCD_FTLCDC100_0_VA_LIMIT 0xF9060FFF ++#define LCD_FTLCDC100_0_VA_SIZE 0x00001000 ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/spec-ag102.h linux-3.4.110/arch/nds32/include/asm/spec-ag102.h +--- linux-3.4.110.orig/arch/nds32/include/asm/spec-ag102.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/spec-ag102.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,167 @@ ++#ifndef __NDS32_AG102_SPECIFICATION_H__ ++#define __NDS32_AG102_SPECIFICATION_H__ ++ ++#define UART0_PA_BASE 0x94200000 ++#define UART0_VA_BASE 0xF9420000 ++#define UART0_IRQ 10 ++#define UART1_PA_BASE 0x94600000 ++#define UART1_VA_BASE 0xF9460000 ++#define UART1_IRQ 11 ++#define AMIC_PA_BASE 0x90F00000 ++#define AMIC_VA_BASE 0xF90F0000 ++#define GMAC_PA_BASE 0x90B00000 ++#define GMAC_VA_BASE 0xF90B0000 ++#define APBBR_PA_BASE 0x90D00000 ++#define APBBR_VA_BASE 0xF90D0000 ++#define GMAC_IRQ 25 ++#define TIMER_PA_BASE 0x94900000 ++#define TIMER_VA_BASE 0xF9490000 ++#define L2CC_PA_BASE 0x90900000 ++#define L2CC_VA_BASE 0xF9090000 ++#define CFC_FTCFC010_0_PA_BASE 0x94000000 ++#define CFC_FTCFC010_0_VA_BASE 0xF9400000 ++#define SDC_FTSDC010_0_PA_BASE 0x94400000 ++#define SDC_FTSDC010_0_VA_BASE 0xF9440000 ++#define PCU_PA_BASE 0x94800000 ++#define PCU_VA_BASE 0xF9480000 ++#define PCIC_FTPCI100_0_PA_BASE 0x90000000 ++#define PCIC_FTPCI100_0_VA_BASE 0xF9000000 ++#define PCIC_FTPCI100_0_VA_LIMIT 0xF9000FFF ++#define PCIIO_PA_BASE 0x90001000 ++#define PCIIO_VA_BASE 0xF8901000 ++#define PCIIO_0_PA_BASE 0x90001000 ++#define PCIIO_0_VA_BASE 0xF8901000 ++#define PCIIO_VA_LIMIT 0xF89FFFFF ++#define LPC_IO_PA_BASE 0x90100000 ++#define LPC_IO_VA_BASE 0xF9010000 ++#define LPC_REG_PA_BASE 0x90200000 ++#define LPC_REG_VA_BASE 0xF9020000 ++#define LPC_IRQ 29 ++#define GPU_PA_BASE 0x90A00000 ++#define GPU_VA_BASE 0xFEA00000 ++#define IDE_FTIDE020_VA_BASE 0xF9070000 ++#define IDE_FTIDE020_PA_BASE 0x90700000 ++#define IDE_FTIDE020_IRQ 1 ++#define USB_FOTG2XX_0_PA_BASE 0x90800000 ++#define USB_FOTG2XX_0_VA_BASE 0xF9080000 ++#define USB_FOTG2XX_0_IRQ 26 ++#define DDR2C_PA_BASE 0x90500000 ++#define DDR2C_VA_BASE 0xF9050000 ++#define GPIO_VA_BASE 0xF94C0000 ++#define GPIO_PA_BASE 0x94C00000 ++ ++#define PLATFORM_LPC_IRQ_BASE 180 ++#define PLATFORM_LPC_IRQ_TOTALCOUNT 22 ++ ++/* DMAC */ ++#define DMAC_FTDMAC020_PA_COUNT 1 ++#define DMAC_FTDMAC020_PA_BASE 0x90600000 ++#define DMAC_FTDMAC020_PA_LIMIT 0x90600FFF ++#define DMAC_FTDMAC020_PA_SIZE 0x00001000 ++#define DMAC_FTDMAC020_0_PA_BASE 0x90600000 ++#define DMAC_FTDMAC020_0_PA_LIMIT 0x90600FFF ++#define DMAC_FTDMAC020_0_PA_SIZE 0x00001000 ++#define DMAC_FTDMAC020_VA_COUNT 1 ++#define DMAC_FTDMAC020_VA_BASE 0xF9060000 ++#define DMAC_FTDMAC020_VA_LIMIT 0xF9060FFF ++#define DMAC_FTDMAC020_VA_SIZE 0x00001000 ++#define DMAC_FTDMAC020_0_VA_BASE 0xF9060000 ++#define DMAC_FTDMAC020_0_VA_LIMIT 0xF9060FFF ++#define DMAC_FTDMAC020_0_VA_SIZE 0x00001000 ++ ++/* TIMER */ ++#define TIMER_FTTMR010_PA_COUNT 1 ++#define TIMER_FTTMR010_PA_BASE 0x94900000 ++#define TIMER_FTTMR010_PA_LIMIT 0x94900FFF ++#define TIMER_FTTMR010_PA_SIZE 0x00001000 ++#define TIMER_FTTMR010_0_PA_BASE 0x94900000 ++#define TIMER_FTTMR010_0_PA_LIMIT 0x94900FFF ++#define TIMER_FTTMR010_0_PA_SIZE 0x00001000 ++#define TIMER_FTTMR010_VA_COUNT 1 ++#define TIMER_FTTMR010_VA_BASE 0xF9490000 ++#define TIMER_FTTMR010_VA_LIMIT 0xF9490FFF ++#define TIMER_FTTMR010_VA_SIZE 0x00001000 ++#define TIMER_FTTMR010_0_VA_BASE 0xF9490000 ++#define TIMER_FTTMR010_0_VA_LIMIT 0xF9490FFF ++#define TIMER_FTTMR010_0_VA_SIZE 0x00001000 ++ ++/* WDT */ ++#define WDT_FTWDT010_PA_COUNT 1 ++#define WDT_FTWDT010_PA_BASE 0x94A00000 ++#define WDT_FTWDT010_PA_LIMIT 0x94A00FFF ++#define WDT_FTWDT010_PA_SIZE 0x00001000 ++#define WDT_FTWDT010_0_PA_BASE 0x94A00000 ++#define WDT_FTWDT010_0_PA_LIMIT 0x94A00FFF ++#define WDT_FTWDT010_0_PA_SIZE 0x00001000 ++#define WDT_FTWDT010_VA_COUNT 1 ++#define WDT_FTWDT010_VA_BASE 0xF94A0000 ++#define WDT_FTWDT010_VA_LIMIT 0xF94A0FFF ++#define WDT_FTWDT010_VA_SIZE 0x00001000 ++#define WDT_FTWDT010_0_VA_BASE 0xF94A0000 ++#define WDT_FTWDT010_0_VA_LIMIT 0xF94A0FFF ++#define WDT_FTWDT010_0_VA_SIZE 0x00001000 ++ ++/* RTC */ ++#define RTC_FTRTC010_PA_COUNT 1 ++#define RTC_FTRTC010_PA_BASE 0x94B00000 ++#define RTC_FTRTC010_PA_LIMIT 0x94B00FFF ++#define RTC_FTRTC010_PA_SIZE 0x00001000 ++#define RTC_FTRTC010_0_PA_BASE 0x94B00000 ++#define RTC_FTRTC010_0_PA_LIMIT 0x94B00FFF ++#define RTC_FTRTC010_0_PA_SIZE 0x00001000 ++#define RTC_FTRTC010_VA_COUNT 1 ++#define RTC_FTRTC010_VA_BASE 0xF94B0000 ++#define RTC_FTRTC010_VA_LIMIT 0xF94B0FFF ++#define RTC_FTRTC010_VA_SIZE 0x00001000 ++#define RTC_FTRTC010_0_VA_BASE 0xF94B0000 ++#define RTC_FTRTC010_0_VA_LIMIT 0xF94B0FFF ++#define RTC_FTRTC010_0_VA_SIZE 0x00001000 ++ ++/* GPIO */ ++#define GPIO_FTGPIO010_PA_COUNT 1 ++#define GPIO_FTGPIO010_PA_BASE 0x94C00000 ++#define GPIO_FTGPIO010_PA_LIMIT 0x94C00FFF ++#define GPIO_FTGPIO010_PA_SIZE 0x00001000 ++#define GPIO_FTGPIO010_0_PA_BASE 0x94C00000 ++#define GPIO_FTGPIO010_0_PA_LIMIT 0x94C00FFF ++#define GPIO_FTGPIO010_0_PA_SIZE 0x00001000 ++#define GPIO_FTGPIO010_VA_COUNT 1 ++#define GPIO_FTGPIO010_VA_BASE 0xF94C0000 ++#define GPIO_FTGPIO010_VA_LIMIT 0xF94C0FFF ++#define GPIO_FTGPIO010_VA_SIZE 0x00001000 ++#define GPIO_FTGPIO010_0_VA_BASE 0xF94C0000 ++#define GPIO_FTGPIO010_0_VA_LIMIT 0xF94C0FFF ++#define GPIO_FTGPIO010_0_VA_SIZE 0x00001000 ++ ++/* SSP */ ++#define SSP_FTSSP010_PA_BASE 0x94500000 ++#define SSP_FTSSP010_0_PA_BASE 0x94500000 ++#define SSP_FTSSP010_VA_BASE 0xF9450000 ++#define SSP_FTSSP010_0_VA_BASE 0xF9450000 ++ ++/* SPI */ ++#define SPI_FTSSP010_PA_BASE 0x94100000 ++#define SPI_FTSSP010_0_PA_BASE 0x94100000 ++#define SPI_FTSSP010_VA_BASE 0xF9410000 ++#define SPI_FTSSP010_0_VA_BASE 0xF9410000 ++ ++/* AHB Controller */ ++#define AHB_ATFAHBC020S_0_PA_BASE 0x90C00000 ++#define AHB_ATFAHBC020S_0_VA_BASE 0xF90C0000 ++ ++#define I2C_FTI2C010_PA_COUNT 1 ++#define I2C_FTI2C010_PA_BASE 0x94E00000 ++#define I2C_FTI2C010_PA_LIMIT 0x94E00FFF ++#define I2C_FTI2C010_PA_SIZE 0x00001000 ++#define I2C_FTI2C010_0_PA_BASE 0x94E00000 ++#define I2C_FTI2C010_0_PA_LIMIT 0x94E00FFF ++#define I2C_FTI2C010_0_PA_SIZE 0x00001000 ++#define I2C_FTI2C010_VA_COUNT 1 ++#define I2C_FTI2C010_VA_BASE 0xF94E0000 ++#define I2C_FTI2C010_VA_LIMIT 0xF94E0FFF ++#define I2C_FTI2C010_VA_SIZE 0x00001000 ++#define I2C_FTI2C010_0_VA_BASE 0xF94E0000 ++#define I2C_FTI2C010_0_VA_LIMIT 0xF94E0FFF ++#define I2C_FTI2C010_0_VA_SIZE 0x00001000 ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/spec.h linux-3.4.110/arch/nds32/include/asm/spec.h +--- linux-3.4.110.orig/arch/nds32/include/asm/spec.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/spec.h 2016-04-07 10:20:50.914079941 +0200 +@@ -0,0 +1,510 @@ ++/* ++ * linux/arch/nds32/include/asm/spec.h ++ * ++ * AG101 Platform Independent Specification ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * ChangeLog ++ * ++ * Luke Lee 09/14/2005 Created. ++ * 09/15/2005 Completed. ++ */ ++ ++#ifndef __FARADAY_PLATFORM_INDEPENDENT_SPECIFICATION__ ++#define __FARADAY_PLATFORM_INDEPENDENT_SPECIFICATION__ ++ ++#include ++#ifdef CONFIG_PLAT_AG102 ++#include ++#else ++#include ++#endif ++ ++/* ++ * Platform dependent specification ++ */ ++#define PLATFORM_NAME "Andes AG101" ++ ++/* ++ * Component counts ++ */ ++ ++/* INTC */ ++#define INTC_COUNT 2 ++#define INTC_FTINTC010_COUNT 2 ++/* TIMER */ ++#define TIMER_COUNT 1 ++#define TIMER_FTTMR010_COUNT 1 ++/* SSP */ ++#define SSP_COUNT 1 ++#define SSP_FTSSP010_COUNT 1 ++/* PMU */ ++#define PMU_COUNT 1 ++#define PMU_FTPMU010_COUNT 1 ++/* MAC */ ++#define MAC_COUNT 1 ++#define MAC_FTMAC100_COUNT 1 ++ ++/* SDC */ ++#define SDC_COUNT 1 ++#define SDC_FTSDC010_COUNT 1 ++/* AHBDMA */ ++#define AHBDMA_COUNT 1 ++/* APBDMA */ ++#define APBDMA_COUNT 1 ++/* RTC */ ++#define RTC_COUNT 1 ++#define RTC_FTRTC010_COUNT 1 ++/* WDT */ ++#define WDT_COUNT 1 ++#define WDT_FTWDT010_COUNT 1 ++/* GPIO */ ++#define GPIO_COUNT 1 ++#define GPIO_FTGPIO010_COUNT 1 ++/* CFC */ ++#define CFC_COUNT 1 ++#define CFC_FTCFC010_COUNT 1 ++/* LCD */ ++#define LCD_COUNT 1 ++#define LCD_FTLCDC100_COUNT 1 ++/* I2C */ ++#define I2C_COUNT 1 ++#define I2C_FTI2C010_COUNT 1 ++/* USB */ ++#define USB_COUNT 3 ++#define USB_FOTG2XX_COUNT 1 ++#define USB_FUSBH200_COUNT 1 ++#define USB_FUSB220_COUNT 1 ++/* DMAC */ ++#define DMAC_COUNT 1 ++#define DMAC_FTDMAC020_COUNT 1 ++/* KMI */ ++#define KMI_COUNT 2 ++#define KMI_FTKBC010_COUNT 2 ++/* PCI */ ++#define PCI_COUNT 1 ++/* PCIMEM */ ++#define PCIMEM_COUNT 1 ++/* PCIIO */ ++#define PCIIO_COUNT 1 ++/* PCIC */ ++#define PCIC_COUNT 1 ++#define PCIC_FTPCI100_COUNT 1 ++ ++/* ++ * Hierarchial Component IDs ++ */ ++ ++#define PLATFORM_AHBDMA_DMAC_FTDMAC020_ID 0 ++#define PLATFORM_APBDMA_APBBRG_FTAPBBRG020S_ID 0 ++ ++/* ++ * Number of interrupts ++ */ ++ ++#define PLATFORM_IRQ_TOTALCOUNT 32 ++#define PLATFORM_AHBDMA_IRQ_TOTALCOUNT 8 ++#define PLATFORM_APBDMA_IRQ_TOTALCOUNT 4 ++ ++#define PLATFORM_IRQ_BASE 0 ++#define PLATFORM_AHBDMA_IRQ_BASE 64 ++#define PLATFORM_APBDMA_IRQ_BASE 72 ++#define PLATFORM_PCI_IRQ_BASE 176 ++#define PLATFORM_INTERRUPTS 202 ++ ++/* ++ * IRQ trigger level and trigger mode ++ */ ++ ++#define PLATFORM_IRQ_TRIGGER_MODE 0x000EC880 ++#define PLATFORM_IRQ_TRIGGER_LEVEL 0x10000000 ++#define PLATFORM_AHBDMA_IRQ_TRIGGER_MODE 0x00000000 ++#define PLATFORM_AHBDMA_IRQ_TRIGGER_LEVEL 0xFFFFFFFF ++#define PLATFORM_APBDMA_IRQ_TRIGGER_MODE 0x00000000 ++#define PLATFORM_APBDMA_IRQ_TRIGGER_LEVEL 0xFFFFFFFF ++ ++/* ++ * Interrupt numbers of Hierarchical Architecture ++ */ ++ ++/* AHBDMA */ ++#define PLATFORM_AHBDMA_IRQ 21 ++ ++/* APBDMA */ ++#define PLATFORM_APBDMA_IRQ 24 ++ ++/* PCI */ ++#ifdef CONFIG_XC5_PCI ++#define PLATFORM_PCI_IRQ 10 ++#else ++#define PLATFORM_PCI_IRQ 28 ++#endif ++ ++ ++/* ++ * Interrrupt numbers ++ */ ++ ++/* TIMER */ ++#define TIMER_FTTMR010_IRQ_COUNT 3 ++#define TIMER_FTTMR010_IRQ0 19 ++#define TIMER_FTTMR010_0_IRQ0 19 ++#define TIMER_FTTMR010_IRQ1 14 ++#define TIMER_FTTMR010_0_IRQ1 14 ++#define TIMER_FTTMR010_IRQ2 15 ++#define TIMER_FTTMR010_0_IRQ2 15 ++ ++/* SSP */ ++#define SSP_FTSSP010_IRQ_COUNT 1 ++#define SSP_FTSSP010_IRQ 6 ++#define SSP_FTSSP010_0_IRQ 6 ++ ++/* MAC */ ++#define MAC_FTMAC100_IRQ_COUNT 1 ++#define MAC_FTMAC100_IRQ 25 ++#define MAC_FTMAC100_0_IRQ 25 ++#define MAC_FTMAC100_1_IRQ 133 ++ ++/* SDC */ ++#define SDC_FTSDC010_IRQ_COUNT 1 ++#define SDC_FTSDC010_IRQ 5 ++#define SDC_FTSDC010_0_IRQ 5 ++ ++/* RTC */ ++#define RTC_FTRTC010_IRQ_COUNT 2 ++#define RTC_FTRTC010_IRQ0 17 ++#define RTC_FTRTC010_0_IRQ0 17 ++#define RTC_FTRTC010_IRQ1 18 ++#define RTC_FTRTC010_0_IRQ1 18 ++ ++/* WDT */ ++#define WDT_FTWDT010_IRQ_COUNT 1 ++#define WDT_FTWDT010_IRQ 16 ++#define WDT_FTWDT010_0_IRQ 16 ++ ++/* GPIO */ ++#define GPIO_FTGPIO010_IRQ_COUNT 1 ++#define GPIO_FTGPIO010_IRQ 13 ++#define GPIO_FTGPIO010_0_IRQ 13 ++ ++/* CFC */ ++#define CFC_FTCFC010_IRQ_COUNT 2 ++#define CFC_FTCFC010_IRQ0 0 ++#define CFC_FTCFC010_0_IRQ0 0 ++#define CFC_FTCFC010_IRQ1 1 ++#define CFC_FTCFC010_0_IRQ1 1 ++ ++/* LCD */ ++#define LCD_FTLCDC100_IRQ_COUNT 1 ++#define LCD_FTLCDC100_IRQ 20 ++#define LCD_FTLCDC100_0_IRQ 20 ++ ++/* I2C */ ++#define I2C_FTI2C010_IRQ_COUNT 1 ++#define I2C_FTI2C010_IRQ 3 ++#define I2C_FTI2C010_0_IRQ 3 ++ ++/* USB */ ++#define USB_FUSBH200_IRQ_COUNT 1 ++#define USB_FUSBH200_IRQ 29 ++#define USB_FUSBH200_0_IRQ 29 ++#define USB_FUSB220_IRQ_COUNT 1 ++#define USB_FUSB220_IRQ 26 ++#define USB_FUSB220_0_IRQ 26 ++ ++/* DMAC */ ++#define DMAC_FTDMAC020_IRQ_COUNT 8 ++#define DMAC_FTDMAC020_IRQ0 64 ++#define DMAC_FTDMAC020_0_IRQ0 64 ++#define DMAC_FTDMAC020_IRQ1 65 ++#define DMAC_FTDMAC020_0_IRQ1 65 ++#define DMAC_FTDMAC020_IRQ2 66 ++#define DMAC_FTDMAC020_0_IRQ2 66 ++#define DMAC_FTDMAC020_IRQ3 67 ++#define DMAC_FTDMAC020_0_IRQ3 67 ++#define DMAC_FTDMAC020_IRQ4 68 ++#define DMAC_FTDMAC020_0_IRQ4 68 ++#define DMAC_FTDMAC020_IRQ5 69 ++#define DMAC_FTDMAC020_0_IRQ5 69 ++#define DMAC_FTDMAC020_IRQ6 70 ++#define DMAC_FTDMAC020_0_IRQ6 70 ++#define DMAC_FTDMAC020_IRQ7 71 ++#define DMAC_FTDMAC020_0_IRQ7 71 ++ ++/* APBBRG */ ++#define APBBRG_FTAPBBRG020S_IRQ_COUNT 4 ++#define APBBRG_FTAPBBRG020S_IRQ0 72 ++#define APBBRG_FTAPBBRG020S_0_IRQ0 72 ++#define APBBRG_FTAPBBRG020S_IRQ1 73 ++#define APBBRG_FTAPBBRG020S_0_IRQ1 73 ++#define APBBRG_FTAPBBRG020S_IRQ2 74 ++#define APBBRG_FTAPBBRG020S_0_IRQ2 74 ++#define APBBRG_FTAPBBRG020S_IRQ3 75 ++#define APBBRG_FTAPBBRG020S_0_IRQ3 75 ++#define APBBRG_FTAPBBRG020S_1_IRQ0 172 ++#define APBBRG_FTAPBBRG020S_1_IRQ1 173 ++#define APBBRG_FTAPBBRG020S_1_IRQ2 174 ++#define APBBRG_FTAPBBRG020S_1_IRQ3 175 ++ ++/* KMI */ ++#define KMI_FTKBC010_IRQ_COUNT 1 ++#define KMI_FTKBC010_IRQ 30 ++#define KMI_FTKBC010_0_IRQ 30 ++#define KMI_FTKBC010_1_IRQ 31 ++ ++/* PCIC */ ++#define PCIC_FTPCI100_IRQ_COUNT 4 ++#define PCIC_FTPCI100_IRQ0 176 ++#define PCIC_FTPCI100_0_IRQ0 176 ++#define PCIC_FTPCI100_IRQ1 177 ++#define PCIC_FTPCI100_0_IRQ1 177 ++#define PCIC_FTPCI100_IRQ2 178 ++#define PCIC_FTPCI100_0_IRQ2 178 ++#define PCIC_FTPCI100_IRQ3 179 ++#define PCIC_FTPCI100_0_IRQ3 179 ++ ++/* ++ * Base addresses ++ */ ++ ++/* CPU */ ++#define CPU_MEM_PA_BASE CONFIG_MEMORY_START ++ ++ ++/* INTC */ ++#define INTC_FTINTC010_PA_COUNT 1 ++#define INTC_FTINTC010_PA_BASE 0x98800000 ++#define INTC_FTINTC010_PA_LIMIT 0x98800FFF ++#define INTC_FTINTC010_PA_SIZE 0x00001000 ++#define INTC_FTINTC010_0_PA_BASE 0x98800000 ++#define INTC_FTINTC010_0_PA_LIMIT 0x98800FFF ++#define INTC_FTINTC010_0_PA_SIZE 0x00001000 ++#define INTC_FTINTC010_1_PA_BASE 0xB0800000 ++#define INTC_FTINTC010_1_PA_LIMIT 0xB0800FFF ++#define INTC_FTINTC010_1_PA_SIZE 0x00001000 ++#define INTC_FTINTC010_VA_COUNT 1 ++#define INTC_FTINTC010_VA_BASE 0xF9880000 ++#define INTC_FTINTC010_VA_LIMIT 0xF9880FFF ++#define INTC_FTINTC010_VA_SIZE 0x00001000 ++#define INTC_FTINTC010_0_VA_BASE 0xF9880000 ++#define INTC_FTINTC010_0_VA_LIMIT 0xF9880FFF ++#define INTC_FTINTC010_0_VA_SIZE 0x00001000 ++#define INTC_FTINTC010_1_VA_BASE 0xFB080000 ++#define INTC_FTINTC010_1_VA_LIMIT 0xFB080FFF ++#define INTC_FTINTC010_1_VA_SIZE 0x00001000 ++ ++/* PMU */ ++#define PMU_FTPMU010_PA_COUNT 1 ++#define PMU_FTPMU010_PA_BASE 0x98100000 ++#define PMU_FTPMU010_PA_LIMIT 0x98100FFF ++#define PMU_FTPMU010_PA_SIZE 0x00001000 ++#define PMU_FTPMU010_0_PA_BASE 0x98100000 ++#define PMU_FTPMU010_0_PA_LIMIT 0x98100FFF ++#define PMU_FTPMU010_0_PA_SIZE 0x00001000 ++#define PMU_FTPMU010_VA_COUNT 1 ++#define PMU_FTPMU010_VA_BASE 0xF9810000 ++#define PMU_FTPMU010_VA_LIMIT 0xF9810FFF ++#define PMU_FTPMU010_VA_SIZE 0x00001000 ++#define PMU_FTPMU010_0_VA_BASE 0xF9810000 ++#define PMU_FTPMU010_0_VA_LIMIT 0xF9810FFF ++#define PMU_FTPMU010_0_VA_SIZE 0x00001000 ++ ++/* USB */ ++#define USB_FUSBH200_PA_COUNT 1 ++#define USB_FUSBH200_PA_BASE 0x92000000 ++#define USB_FUSBH200_PA_LIMIT 0x92000FFF ++#define USB_FUSBH200_PA_SIZE 0x00001000 ++#define USB_FUSBH200_0_PA_BASE 0x92000000 ++#define USB_FUSBH200_0_PA_LIMIT 0x92000FFF ++#define USB_FUSBH200_0_PA_SIZE 0x00001000 ++#define USB_FUSBH200_VA_COUNT 1 ++#define USB_FUSBH200_VA_BASE 0xF9200000 ++#define USB_FUSBH200_VA_LIMIT 0xF9200FFF ++#define USB_FUSBH200_VA_SIZE 0x00001000 ++#define USB_FUSBH200_0_VA_BASE 0xF9200000 ++#define USB_FUSBH200_0_VA_LIMIT 0xF9200FFF ++#define USB_FUSBH200_0_VA_SIZE 0x00001000 ++#define USB_FUSB220_PA_COUNT 1 ++#define USB_FUSB220_PA_BASE 0x90B00000 ++#define USB_FUSB220_PA_LIMIT 0x90B00FFF ++#define USB_FUSB220_PA_SIZE 0x00001000 ++#define USB_FUSB220_0_PA_BASE 0x90B00000 ++#define USB_FUSB220_0_PA_LIMIT 0x90B00FFF ++#define USB_FUSB220_0_PA_SIZE 0x00001000 ++#define USB_FUSB220_VA_COUNT 1 ++#define USB_FUSB220_VA_BASE 0xF90B0000 ++#define USB_FUSB220_VA_LIMIT 0xF90B0FFF ++#define USB_FUSB220_VA_SIZE 0x00001000 ++#define USB_FUSB220_0_VA_BASE 0xF90B0000 ++#define USB_FUSB220_0_VA_LIMIT 0xF90B0FFF ++#define USB_FUSB220_0_VA_SIZE 0x00001000 ++ ++/* KMI */ ++#define KMI_FTKBC010_PA_COUNT 1 ++#define KMI_FTKBC010_PA_BASE 0x97200000 ++#define KMI_FTKBC010_PA_LIMIT 0x97200FFF ++#define KMI_FTKBC010_PA_SIZE 0x00001000 ++#define KMI_FTKBC010_0_PA_BASE 0x97200000 ++#define KMI_FTKBC010_0_PA_LIMIT 0x97200FFF ++#define KMI_FTKBC010_0_PA_SIZE 0x00001000 ++#define KMI_FTKBC010_1_PA_BASE 0x97300000 ++#define KMI_FTKBC010_1_PA_LIMIT 0x97300FFF ++#define KMI_FTKBC010_1_PA_SIZE 0x00001000 ++#define KMI_FTKBC010_VA_COUNT 1 ++#define KMI_FTKBC010_VA_BASE 0xF9720000 ++#define KMI_FTKBC010_VA_LIMIT 0xF9720FFF ++#define KMI_FTKBC010_VA_SIZE 0x00001000 ++#define KMI_FTKBC010_0_VA_BASE 0xF9720000 ++#define KMI_FTKBC010_0_VA_LIMIT 0xF9720FFF ++#define KMI_FTKBC010_0_VA_SIZE 0x00001000 ++#define KMI_FTKBC010_1_VA_BASE 0xF9730000 ++#define KMI_FTKBC010_1_VA_LIMIT 0xF9730FFF ++#define KMI_FTKBC010_1_VA_SIZE 0x00001000 ++ ++/* PCIMEM */ ++#define PCIMEM_PA_COUNT 1 ++#define PCIMEM_PA_BASE 0xA0000000 ++#define PCIMEM_PA_LIMIT 0xAFFFFFFF ++#define PCIMEM_PA_SIZE 0x10000000 ++#define PCIMEM_0_PA_BASE 0xA0000000 ++#define PCIMEM_0_PA_LIMIT 0xAFFFFFFF ++#define PCIMEM_0_PA_SIZE 0x10000000 ++ ++ ++#ifdef CONFIG_PLATFORM_AHBDMA_MODULE ++#define CONFIG_PLATFORM_AHBDMA ++#endif ++ ++#ifdef CONFIG_PLATFORM_APBDMA_MODULE ++#define CONFIG_PLATFORM_APBDMA ++#endif ++ ++#include /* Manual defined spec */ ++ ++/* ++ * Platform independent specification ++ */ ++ ++#define NR_IRQS PLATFORM_INTERRUPTS ++ ++#ifndef TIMER_CLK_IN ++#error Missing platform dependent symbol TIMER_CLK_IN in file. ++#endif ++ ++/* ++ * Macros for retrieving IP related information ++ */ ++#define IP_IDENTIFIER __glue(__glue(IPMODULE,_),__glue(IPNAME,_)) ++ ++#define IP_COUNT __glue(IP_IDENTIFIER,COUNT) ++ ++#define IP_IRQ_COUNT __glue(IP_IDENTIFIER,IRQ_COUNT) ++#define IP_IRQ(n) __glue(__glue(IP_IDENTIFIER,n),_IRQ) ++#define IP_irq __glue(IP_IDENTIFIER,irq) ++ ++#define IP_PA_COUNT __glue(IP_IDENTIFIER,PA_COUNT) ++#define IP_PA_BASE(n) __glue(__glue(IP_IDENTIFIER,n),_PA_BASE) ++#define IP_PA_LIMIT(n) __glue(__glue(IP_IDENTIFIER,n),_PA_LIMIT) ++#define IP_PA_SIZE(n) __glue(__glue(IP_IDENTIFIER,n),_PA_SIZE) ++#define IP_pa_base __glue(IP_IDENTIFIER,pa_base) ++#define IP_pa_limit __glue(IP_IDENTIFIER,pa_limit) ++#define IP_pa_size __glue(IP_IDENTIFIER,pa_size) ++ ++#define IP_VA_COUNT __glue(IP_IDENTIFIER,VA_COUNT) ++#define IP_VA_BASE(n) __glue(__glue(IP_IDENTIFIER,n),_VA_BASE) ++#define IP_VA_LIMIT(n) __glue(__glue(IP_IDENTIFIER,n),_VA_LIMIT) ++#define IP_VA_SIZE(n) __glue(__glue(IP_IDENTIFIER,n),_VA_SIZE) ++#define IP_va_base __glue(IP_IDENTIFIER,va_base) ++#define IP_va_limit __glue(IP_IDENTIFIER,va_limit) ++#define IP_va_size __glue(IP_IDENTIFIER,va_size) ++ ++/* ++ * Facility macros ++ */ ++/* IRQ0~7 */ ++#define IP_IRQ0(n) __glue(__glue(IP_IDENTIFIER,n),_IRQ0) ++#define IP_IRQ1(n) __glue(__glue(IP_IDENTIFIER,n),_IRQ1) ++#define IP_IRQ2(n) __glue(__glue(IP_IDENTIFIER,n),_IRQ2) ++#define IP_IRQ3(n) __glue(__glue(IP_IDENTIFIER,n),_IRQ3) ++#define IP_IRQ4(n) __glue(__glue(IP_IDENTIFIER,n),_IRQ4) ++#define IP_IRQ5(n) __glue(__glue(IP_IDENTIFIER,n),_IRQ5) ++#define IP_IRQ6(n) __glue(__glue(IP_IDENTIFIER,n),_IRQ6) ++#define IP_IRQ7(n) __glue(__glue(IP_IDENTIFIER,n),_IRQ7) ++ ++/* PA_BASE0~7 */ ++#define IP_PA_BASE0(n) __glue(__glue(IP_IDENTIFIER,n),_PA_BASE0) ++#define IP_PA_BASE1(n) __glue(__glue(IP_IDENTIFIER,n),_PA_BASE1) ++#define IP_PA_BASE2(n) __glue(__glue(IP_IDENTIFIER,n),_PA_BASE2) ++#define IP_PA_BASE3(n) __glue(__glue(IP_IDENTIFIER,n),_PA_BASE3) ++#define IP_PA_BASE4(n) __glue(__glue(IP_IDENTIFIER,n),_PA_BASE4) ++#define IP_PA_BASE5(n) __glue(__glue(IP_IDENTIFIER,n),_PA_BASE5) ++#define IP_PA_BASE6(n) __glue(__glue(IP_IDENTIFIER,n),_PA_BASE6) ++#define IP_PA_BASE7(n) __glue(__glue(IP_IDENTIFIER,n),_PA_BASE7) ++ ++/* PA_LIMIT0~7 */ ++#define IP_PA_LIMIT0(n) __glue(__glue(IP_IDENTIFIER,n),_PA_LIMIT0) ++#define IP_PA_LIMIT1(n) __glue(__glue(IP_IDENTIFIER,n),_PA_LIMIT1) ++#define IP_PA_LIMIT2(n) __glue(__glue(IP_IDENTIFIER,n),_PA_LIMIT2) ++#define IP_PA_LIMIT3(n) __glue(__glue(IP_IDENTIFIER,n),_PA_LIMIT3) ++#define IP_PA_LIMIT4(n) __glue(__glue(IP_IDENTIFIER,n),_PA_LIMIT4) ++#define IP_PA_LIMIT5(n) __glue(__glue(IP_IDENTIFIER,n),_PA_LIMIT5) ++#define IP_PA_LIMIT6(n) __glue(__glue(IP_IDENTIFIER,n),_PA_LIMIT6) ++#define IP_PA_LIMIT7(n) __glue(__glue(IP_IDENTIFIER,n),_PA_LIMIT7) ++ ++/* PA_SIZE0~7 */ ++#define IP_PA_SIZE0(n) __glue(__glue(IP_IDENTIFIER,n),_PA_SIZE0) ++#define IP_PA_SIZE1(n) __glue(__glue(IP_IDENTIFIER,n),_PA_SIZE1) ++#define IP_PA_SIZE2(n) __glue(__glue(IP_IDENTIFIER,n),_PA_SIZE2) ++#define IP_PA_SIZE3(n) __glue(__glue(IP_IDENTIFIER,n),_PA_SIZE3) ++#define IP_PA_SIZE4(n) __glue(__glue(IP_IDENTIFIER,n),_PA_SIZE4) ++#define IP_PA_SIZE5(n) __glue(__glue(IP_IDENTIFIER,n),_PA_SIZE5) ++#define IP_PA_SIZE6(n) __glue(__glue(IP_IDENTIFIER,n),_PA_SIZE6) ++#define IP_PA_SIZE7(n) __glue(__glue(IP_IDENTIFIER,n),_PA_SIZE7) ++ ++/* VA_BASE0~7 */ ++#define IP_VA_BASE0(n) __glue(__glue(IP_IDENTIFIER,n),_VA_BASE0) ++#define IP_VA_BASE1(n) __glue(__glue(IP_IDENTIFIER,n),_VA_BASE1) ++#define IP_VA_BASE2(n) __glue(__glue(IP_IDENTIFIER,n),_VA_BASE2) ++#define IP_VA_BASE3(n) __glue(__glue(IP_IDENTIFIER,n),_VA_BASE3) ++#define IP_VA_BASE4(n) __glue(__glue(IP_IDENTIFIER,n),_VA_BASE4) ++#define IP_VA_BASE5(n) __glue(__glue(IP_IDENTIFIER,n),_VA_BASE5) ++#define IP_VA_BASE6(n) __glue(__glue(IP_IDENTIFIER,n),_VA_BASE6) ++#define IP_VA_BASE7(n) __glue(__glue(IP_IDENTIFIER,n),_VA_BASE7) ++ ++/* VA_LIMIT0~7 */ ++#define IP_VA_LIMIT0(n) __glue(__glue(IP_IDENTIFIER,n),_VA_LIMIT0) ++#define IP_VA_LIMIT1(n) __glue(__glue(IP_IDENTIFIER,n),_VA_LIMIT1) ++#define IP_VA_LIMIT2(n) __glue(__glue(IP_IDENTIFIER,n),_VA_LIMIT2) ++#define IP_VA_LIMIT3(n) __glue(__glue(IP_IDENTIFIER,n),_VA_LIMIT3) ++#define IP_VA_LIMIT4(n) __glue(__glue(IP_IDENTIFIER,n),_VA_LIMIT4) ++#define IP_VA_LIMIT5(n) __glue(__glue(IP_IDENTIFIER,n),_VA_LIMIT5) ++#define IP_VA_LIMIT6(n) __glue(__glue(IP_IDENTIFIER,n),_VA_LIMIT6) ++#define IP_VA_LIMIT7(n) __glue(__glue(IP_IDENTIFIER,n),_VA_LIMIT7) ++ ++/* VA_SIZE0~7 */ ++#define IP_VA_SIZE0(n) __glue(__glue(IP_IDENTIFIER,n),_VA_SIZE0) ++#define IP_VA_SIZE1(n) __glue(__glue(IP_IDENTIFIER,n),_VA_SIZE1) ++#define IP_VA_SIZE2(n) __glue(__glue(IP_IDENTIFIER,n),_VA_SIZE2) ++#define IP_VA_SIZE3(n) __glue(__glue(IP_IDENTIFIER,n),_VA_SIZE3) ++#define IP_VA_SIZE4(n) __glue(__glue(IP_IDENTIFIER,n),_VA_SIZE4) ++#define IP_VA_SIZE5(n) __glue(__glue(IP_IDENTIFIER,n),_VA_SIZE5) ++#define IP_VA_SIZE6(n) __glue(__glue(IP_IDENTIFIER,n),_VA_SIZE6) ++#define IP_VA_SIZE7(n) __glue(__glue(IP_IDENTIFIER,n),_VA_SIZE7) ++ ++#endif /* __FARADAY_PLATFORM_INDEPENDENT_SPECIFICATION__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/spinlock.h linux-3.4.110/arch/nds32/include/asm/spinlock.h +--- linux-3.4.110.orig/arch/nds32/include/asm/spinlock.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/spinlock.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,183 @@ ++/* ++ * linux/arch/nds32/include/asm/spinlock.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ASM_SPINLOCK_H ++#define __ASM_SPINLOCK_H ++ ++#include ++ ++#define arch_spin_is_locked(x) ((x)->lock != 0) ++ ++#define arch_spin_unlock_wait(lock) \ ++ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) ++ ++#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) ++ ++static inline void arch_spin_lock(arch_spinlock_t *lock) ++{ ++ unsigned long tmp; ++ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%1+$r15]\n" ++ "\tbnez\t%0, 1b\n" ++ "\tmovi\t%0, #0x1\n" ++ "\tscw\t%0, [%1+$r15]\n" ++ "\tbeqz\t%0, 1b\n" ++ : "=&r" (tmp) ++ : "r" (&lock->lock) ++ : "memory"); ++} ++ ++static inline int arch_spin_trylock(arch_spinlock_t *lock) ++{ ++ unsigned long ret, tmp; ++ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%2+$r15]\n" ++ "\tmovi\t%1, #0x1\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ : "=&r" (ret), "=&r" (tmp) ++ : "r" (&lock->lock) ++ : "memory"); ++ ++ return ret == 0; ++} ++ ++static inline void arch_spin_unlock(arch_spinlock_t *lock) ++{ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "\tswi\t$r15, [%0]\n" ++ : ++ : "r" (&lock->lock) ++ : "memory"); ++} ++ ++static inline void arch_write_lock(arch_rwlock_t *rw) ++{ ++ unsigned long tmp; ++ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%1+$r15]\n" ++ "\tbnez\t%0, 1b\n" ++ "\tsethi\t%0, 0x80000\n" ++ "\tscw\t%0, [%1+$r15]\n" ++ "\tbeqz\t%0, 1b\n" ++ : "=&r" (tmp) ++ : "r" (&rw->lock) ++ : "memory"); ++} ++ ++static inline void arch_write_unlock(arch_rwlock_t *rw) ++{ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "\tswi\t$r15, [%0]\n" ++ : ++ : "r" (&rw->lock) ++ : "memory"); ++} ++ ++#define arch_write_can_lock(x) ((x)->lock == 0) ++static inline void arch_read_lock(arch_rwlock_t *rw) ++{ ++ int tmp; ++ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%1+$r15]\n" ++ "\tbltz\t%0, 1b\n" ++ "\taddi\t%0, %0, #1\n" ++ "\tscw\t%0, [%1+$r15]\n" ++ "\tbeqz\t%0, 1b\n" ++ : "=&r" (tmp) ++ : "r" (&rw->lock) ++ : "memory"); ++} ++ ++static inline void arch_read_unlock(arch_rwlock_t *rw) ++{ ++ unsigned long tmp; ++ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "1:\n" ++ "\tllw\t%0, [%1+$r15]\n" ++ "\taddi\t%0, %0, #-1\n" ++ "\tscw\t%0, [%1+$r15]\n" ++ "\tbeqz\t%0, 1b\n" ++ : "=&r" (tmp) ++ : "r" (&rw->lock) ++ : "memory"); ++} ++ ++static inline int arch_read_trylock(arch_rwlock_t *rw) ++{ ++ unsigned long ret, tmp; ++ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "\tmovi\t%0, #0x0\n" ++ "1:\n" ++ "\tllw\t%1, [%2+$r15]\n" ++ "\tbltz\t%1, 2f\n" ++ "\taddi\t%1, %1, #1\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ "\tmovi\t%0, #0x1\n" ++ "\tj\t3f\n" ++ "2:\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "3:\n" ++ : "=&r" (ret), "=&r" (tmp) ++ : "r" (&rw->lock) ++ : "memory"); ++ ++ return ret; ++} ++ ++static inline int arch_write_trylock(arch_rwlock_t *rw) ++{ ++ unsigned long ret, tmp; ++ ++ __asm__ __volatile__( ++ "xor\t$r15, $r15, $r15\n" ++ "\tmovi\t%0, #0x0\n" ++ "1:\n" ++ "\tllw\t%1, [%2+$r15]\n" ++ "\tbnez\t%1, 2f\n" ++ "\tsethi\t%1, 0x80000\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "\tbeqz\t%1, 1b\n" ++ "\tmovi\t%0, #0x1\n" ++ "\tj\t3f\n" ++ "2:\n" ++ "\tscw\t%1, [%2+$r15]\n" ++ "3:\n" ++ : "=&r" (ret), "=&r" (tmp) ++ : "r" (&rw->lock) ++ : "memory"); ++ ++ return ret; ++} ++ ++#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) ++#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) ++ ++#define arch_read_can_lock(x) ((x)->lock < 0x80000000) ++ ++#define arch_spin_relax(lock) cpu_relax() ++#define arch_read_relax(lock) cpu_relax() ++#define arch_write_relax(lock) cpu_relax() ++ ++#endif /* __ASM_SPINLOCK_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/spinlock_types.h linux-3.4.110/arch/nds32/include/asm/spinlock_types.h +--- linux-3.4.110.orig/arch/nds32/include/asm/spinlock_types.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/spinlock_types.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,25 @@ ++/* ++ * linux/arch/nds32/include/asm/spinlock_types.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASM_SPINLOCK_TYPES_H ++#define _ASM_SPINLOCK_TYPES_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++# error "please don't include this file directly" ++#endif ++ ++typedef struct { ++ volatile unsigned int lock; ++} arch_spinlock_t; ++ ++#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } ++ ++typedef struct { ++ volatile unsigned int lock; ++} arch_rwlock_t; ++ ++#define __ARCH_RW_LOCK_UNLOCKED { 0 } ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/statfs.h linux-3.4.110/arch/nds32/include/asm/statfs.h +--- linux-3.4.110.orig/arch/nds32/include/asm/statfs.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/statfs.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/statfs.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_STATFS_H ++#define _ASMNDS32_STATFS_H ++ ++#include ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/stat.h linux-3.4.110/arch/nds32/include/asm/stat.h +--- linux-3.4.110.orig/arch/nds32/include/asm/stat.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/stat.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,101 @@ ++/* ++ * linux/arch/nds32/include/asm/stat.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_STAT_H ++#define _ASMNDS32_STAT_H ++ ++struct __old_kernel_stat { ++ unsigned short st_dev; ++ unsigned short st_ino; ++ unsigned short st_mode; ++ unsigned short st_nlink; ++ unsigned short st_uid; ++ unsigned short st_gid; ++ unsigned short st_rdev; ++ unsigned long st_size; ++ unsigned long st_atime; ++ unsigned long st_mtime; ++ unsigned long st_ctime; ++}; ++ ++#define STAT_HAVE_NSEC ++ ++struct stat { ++#if defined(__NDS32_EB__) ++ unsigned short st_dev; ++ unsigned short __pad1; ++#else ++ unsigned long st_dev; ++#endif ++ unsigned long st_ino; ++ unsigned short st_mode; ++ unsigned short st_nlink; ++ unsigned short st_uid; ++ unsigned short st_gid; ++#if defined(__NDS32_EB__) ++ unsigned short st_rdev; ++ unsigned short __pad2; ++#else ++ unsigned long st_rdev; ++#endif ++ unsigned long st_size; ++ unsigned long st_blksize; ++ unsigned long st_blocks; ++ unsigned long st_atime; ++ unsigned long st_atime_nsec; ++ unsigned long st_mtime; ++ unsigned long st_mtime_nsec; ++ unsigned long st_ctime; ++ unsigned long st_ctime_nsec; ++ unsigned long __unused4; ++ unsigned long __unused5; ++}; ++ ++/* This matches struct stat64 in glibc2.1, hence the absolutely ++ * insane amounts of padding around dev_t's. ++ * Note: The kernel zero's the padded region because glibc might read them ++ * in the hope that the kernel has stretched to using larger sizes. ++ */ ++ ++struct stat64 { ++ unsigned long long st_dev; ++ unsigned long __pad0; ++ ++#define STAT64_HAS_BROKEN_ST_INO 1 ++ unsigned long __st_ino; ++ unsigned int st_mode; ++ unsigned int st_nlink; ++ ++ unsigned long st_uid; ++ unsigned long st_gid; ++ ++ unsigned long long st_rdev; ++ unsigned int __pad3; ++ ++ unsigned long long st_size; ++ unsigned long st_blksize; ++ ++//#if defined(__NDS32EB__) ++// unsigned long __pad4; // Future possible st_blocks hi bits ++ unsigned long long st_blocks; // Number 512-byte blocks allocated. ++//#else // Must be little ++// unsigned long st_blocks; // Number 512-byte blocks allocated. ++// unsigned long __pad4; // Future possible st_blocks hi bits ++//#endif ++ ++ unsigned long st_atime; ++ unsigned long st_atime_nsec; ++ ++ unsigned long st_mtime; ++ unsigned long st_mtime_nsec; ++ ++ unsigned long st_ctime; ++ unsigned long st_ctime_nsec; ++ ++ unsigned long long st_ino; ++}; ++ ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/string.h linux-3.4.110/arch/nds32/include/asm/string.h +--- linux-3.4.110.orig/arch/nds32/include/asm/string.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/string.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,45 @@ ++/* ++ * linux/arch/nds32/include/asm/string.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ASM_NDS32_STRING_H ++#define __ASM_NDS32_STRING_H ++ ++/* ++ * We don't do inline string functions, since the ++ * optimised inline asm versions are not small. ++ */ ++ ++#define __HAVE_ARCH_STRRCHR ++extern char * strrchr(const char * s, int c); ++ ++#define __HAVE_ARCH_STRCHR ++extern char * strchr(const char * s, int c); ++ ++#define __HAVE_ARCH_MEMCPY ++extern void * memcpy(void *, const void *, __kernel_size_t); ++ ++#define __HAVE_ARCH_MEMMOVE ++extern void * memmove(void *, const void *, __kernel_size_t); ++ ++#define __HAVE_ARCH_MEMZERO ++#define __HAVE_ARCH_MEMSET ++extern void * memset(void *, int, __kernel_size_t); ++ ++extern void __memzero(void *ptr, __kernel_size_t n); ++ ++#define memset(p,v,n) \ ++ ({ \ ++ if ((n) != 0) { \ ++ if (__builtin_constant_p((v)) && (v) == 0) \ ++ __memzero((p),(n)); \ ++ else \ ++ memset((p),(v),(n)); \ ++ } \ ++ (p); \ ++ }) ++ ++#define memzero(p,n) ({ if ((n) != 0) __memzero((p),(n)); (p); }) ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/suspend.h linux-3.4.110/arch/nds32/include/asm/suspend.h +--- linux-3.4.110.orig/arch/nds32/include/asm/suspend.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/suspend.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,9 @@ ++/* ++ * linux/arch/nds32/include/asm/suspend.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_SUSPEND_H ++#define _ASMNDS32_SUSPEND_H ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/swab.h linux-3.4.110/arch/nds32/include/asm/swab.h +--- linux-3.4.110.orig/arch/nds32/include/asm/swab.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/swab.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,34 @@ ++/* ++ * include/asm/byteorder.h ++ * Copyright (C) 2008 Andes Technology, Inc. ++ */ ++ ++#ifndef __NDS32_SWAB_H__ ++#define __NDS32_SWAB_H__ ++ ++#include ++#include ++ ++static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) ++{ ++ __asm__("wsbh %0, %0\n\t" /* word swap byte within halfword */ ++ "rotri %0, %0, #16\n" ++ : "=r" (x) : "0" (x)); ++ return x; ++} ++ ++static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) ++{ ++ __asm__("wsbh %0, %0\n" /* word swap byte within halfword */ ++ : "=r" (x) : "0" (x)); ++ return x; ++} ++#define __arch_swab32(x) ___arch__swab32(x) ++#define __arch_swab16(x) ___arch__swab16(x) ++ ++#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) ++# define __BYTEORDER_HAS_U64__ ++# define __SWAB_64_THRU_32__ ++#endif ++ ++#endif /* __NDS32_SWAB_H__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/switch_to.h linux-3.4.110/arch/nds32/include/asm/switch_to.h +--- linux-3.4.110.orig/arch/nds32/include/asm/switch_to.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/switch_to.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,18 @@ ++#ifndef __ASM_NDS32_SWITCH_TO_H ++#define __ASM_NDS32_SWITCH_TO_H ++ ++#include ++ ++/* ++ * switch_to(prev, next) should switch from task `prev' to `next' ++ * `prev' will never be the same as `next'. schedule() itself ++ * contains the memory barrier to tell GCC not to cache `current'. ++ */ ++extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); ++ ++#define switch_to( prev, next, last) \ ++do { \ ++ last = __switch_to( prev, task_thread_info( prev), task_thread_info( next)); \ ++} while (0) ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/system.h linux-3.4.110/arch/nds32/include/asm/system.h +--- linux-3.4.110.orig/arch/nds32/include/asm/system.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/system.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,4 @@ ++/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ ++#include ++#include ++ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/termbits.h linux-3.4.110/arch/nds32/include/asm/termbits.h +--- linux-3.4.110.orig/arch/nds32/include/asm/termbits.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/termbits.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,198 @@ ++#ifndef __ASM_NDS32_TERMBITS_H ++#define __ASM_NDS32_TERMBITS_H ++ ++typedef unsigned char cc_t; ++typedef unsigned int speed_t; ++typedef unsigned int tcflag_t; ++ ++#define NCCS 19 ++struct termios { ++ tcflag_t c_iflag; /* input mode flags */ ++ tcflag_t c_oflag; /* output mode flags */ ++ tcflag_t c_cflag; /* control mode flags */ ++ tcflag_t c_lflag; /* local mode flags */ ++ cc_t c_line; /* line discipline */ ++ cc_t c_cc[NCCS]; /* control characters */ ++}; ++ ++struct termios2 { ++ tcflag_t c_iflag; /* input mode flags */ ++ tcflag_t c_oflag; /* output mode flags */ ++ tcflag_t c_cflag; /* control mode flags */ ++ tcflag_t c_lflag; /* local mode flags */ ++ cc_t c_line; /* line discipline */ ++ cc_t c_cc[NCCS]; /* control characters */ ++ speed_t c_ispeed; /* input speed */ ++ speed_t c_ospeed; /* output speed */ ++}; ++ ++struct ktermios { ++ tcflag_t c_iflag; /* input mode flags */ ++ tcflag_t c_oflag; /* output mode flags */ ++ tcflag_t c_cflag; /* control mode flags */ ++ tcflag_t c_lflag; /* local mode flags */ ++ cc_t c_line; /* line discipline */ ++ cc_t c_cc[NCCS]; /* control characters */ ++ speed_t c_ispeed; /* input speed */ ++ speed_t c_ospeed; /* output speed */ ++}; ++ ++ ++/* c_cc characters */ ++#define VINTR 0 ++#define VQUIT 1 ++#define VERASE 2 ++#define VKILL 3 ++#define VEOF 4 ++#define VTIME 5 ++#define VMIN 6 ++#define VSWTC 7 ++#define VSTART 8 ++#define VSTOP 9 ++#define VSUSP 10 ++#define VEOL 11 ++#define VREPRINT 12 ++#define VDISCARD 13 ++#define VWERASE 14 ++#define VLNEXT 15 ++#define VEOL2 16 ++ ++/* c_iflag bits */ ++#define IGNBRK 0000001 ++#define BRKINT 0000002 ++#define IGNPAR 0000004 ++#define PARMRK 0000010 ++#define INPCK 0000020 ++#define ISTRIP 0000040 ++#define INLCR 0000100 ++#define IGNCR 0000200 ++#define ICRNL 0000400 ++#define IUCLC 0001000 ++#define IXON 0002000 ++#define IXANY 0004000 ++#define IXOFF 0010000 ++#define IMAXBEL 0020000 ++#define IUTF8 0040000 ++ ++/* c_oflag bits */ ++#define OPOST 0000001 ++#define OLCUC 0000002 ++#define ONLCR 0000004 ++#define OCRNL 0000010 ++#define ONOCR 0000020 ++#define ONLRET 0000040 ++#define OFILL 0000100 ++#define OFDEL 0000200 ++#define NLDLY 0000400 ++#define NL0 0000000 ++#define NL1 0000400 ++#define CRDLY 0003000 ++#define CR0 0000000 ++#define CR1 0001000 ++#define CR2 0002000 ++#define CR3 0003000 ++#define TABDLY 0014000 ++#define TAB0 0000000 ++#define TAB1 0004000 ++#define TAB2 0010000 ++#define TAB3 0014000 ++#define XTABS 0014000 ++#define BSDLY 0020000 ++#define BS0 0000000 ++#define BS1 0020000 ++#define VTDLY 0040000 ++#define VT0 0000000 ++#define VT1 0040000 ++#define FFDLY 0100000 ++#define FF0 0000000 ++#define FF1 0100000 ++ ++/* c_cflag bit meaning */ ++#define CBAUD 0010017 ++#define B0 0000000 /* hang up */ ++#define B50 0000001 ++#define B75 0000002 ++#define B110 0000003 ++#define B134 0000004 ++#define B150 0000005 ++#define B200 0000006 ++#define B300 0000007 ++#define B600 0000010 ++#define B1200 0000011 ++#define B1800 0000012 ++#define B2400 0000013 ++#define B4800 0000014 ++#define B9600 0000015 ++#define B19200 0000016 ++#define B38400 0000017 ++#define EXTA B19200 ++#define EXTB B38400 ++#define CSIZE 0000060 ++#define CS5 0000000 ++#define CS6 0000020 ++#define CS7 0000040 ++#define CS8 0000060 ++#define CSTOPB 0000100 ++#define CREAD 0000200 ++#define PARENB 0000400 ++#define PARODD 0001000 ++#define HUPCL 0002000 ++#define CLOCAL 0004000 ++#define CBAUDEX 0010000 ++#define BOTHER 0010000 ++#define B57600 0010001 ++#define B115200 0010002 ++#define B230400 0010003 ++#define B460800 0010004 ++#define B500000 0010005 ++#define B576000 0010006 ++#define B921600 0010007 ++#define B1000000 0010010 ++#define B1152000 0010011 ++#define B1500000 0010012 ++#define B2000000 0010013 ++#define B2500000 0010014 ++#define B3000000 0010015 ++#define B3500000 0010016 ++#define B4000000 0010017 ++#define CIBAUD 002003600000 /* input baud rate */ ++#define CMSPAR 010000000000 /* mark or space (stick) parity */ ++#define CRTSCTS 020000000000 /* flow control */ ++ ++#define IBSHIFT 16 ++ ++/* c_lflag bits */ ++#define ISIG 0000001 ++#define ICANON 0000002 ++#define XCASE 0000004 ++#define ECHO 0000010 ++#define ECHOE 0000020 ++#define ECHOK 0000040 ++#define ECHONL 0000100 ++#define NOFLSH 0000200 ++#define TOSTOP 0000400 ++#define ECHOCTL 0001000 ++#define ECHOPRT 0002000 ++#define ECHOKE 0004000 ++#define FLUSHO 0010000 ++#define PENDIN 0040000 ++#define IEXTEN 0100000 ++#define EXTPROC 0200000 ++ ++/* tcflow() and TCXONC use these */ ++#define TCOOFF 0 ++#define TCOON 1 ++#define TCIOFF 2 ++#define TCION 3 ++ ++/* tcflush() and TCFLSH use these */ ++#define TCIFLUSH 0 ++#define TCOFLUSH 1 ++#define TCIOFLUSH 2 ++ ++/* tcsetattr uses these */ ++#define TCSANOW 0 ++#define TCSADRAIN 1 ++#define TCSAFLUSH 2 ++ ++#endif /* __ASM_NDS32_TERMBITS_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/termios.h linux-3.4.110/arch/nds32/include/asm/termios.h +--- linux-3.4.110.orig/arch/nds32/include/asm/termios.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/termios.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,116 @@ ++/* ++ * linux/arch/nds32/include/asm/termios.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ASM_NDS32_TERMIOS_H ++#define __ASM_NDS32_TERMIOS_H ++ ++#include ++#include ++ ++struct winsize { ++ unsigned short ws_row; ++ unsigned short ws_col; ++ unsigned short ws_xpixel; ++ unsigned short ws_ypixel; ++}; ++ ++#define NCC 8 ++struct termio { ++ unsigned short c_iflag; /* input mode flags */ ++ unsigned short c_oflag; /* output mode flags */ ++ unsigned short c_cflag; /* control mode flags */ ++ unsigned short c_lflag; /* local mode flags */ ++ unsigned char c_line; /* line discipline */ ++ unsigned char c_cc[NCC]; /* control characters */ ++}; ++ ++#ifdef __KERNEL__ ++/* intr=^C quit=^| erase=del kill=^U ++ eof=^D vtime=\0 vmin=\1 sxtc=\0 ++ start=^Q stop=^S susp=^Z eol=\0 ++ reprint=^R discard=^U werase=^W lnext=^V ++ eol2=\0 ++*/ ++#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" ++#endif ++ ++/* modem lines */ ++#define TIOCM_LE 0x001 ++#define TIOCM_DTR 0x002 ++#define TIOCM_RTS 0x004 ++#define TIOCM_ST 0x008 ++#define TIOCM_SR 0x010 ++#define TIOCM_CTS 0x020 ++#define TIOCM_CAR 0x040 ++#define TIOCM_RNG 0x080 ++#define TIOCM_DSR 0x100 ++#define TIOCM_CD TIOCM_CAR ++#define TIOCM_RI TIOCM_RNG ++#define TIOCM_OUT1 0x2000 ++#define TIOCM_OUT2 0x4000 ++#define TIOCM_LOOP 0x8000 ++ ++/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ ++ ++/* line disciplines */ ++#define N_TTY 0 ++#define N_SLIP 1 ++#define N_MOUSE 2 ++#define N_PPP 3 ++#define N_STRIP 4 ++#define N_AX25 5 ++#define N_X25 6 /* X.25 async */ ++#define N_6PACK 7 ++#define N_MASC 8 /* Reserved for Mobitex module */ ++#define N_R3964 9 /* Reserved for Simatic R3964 module */ ++#define N_PROFIBUS_FDL 10 /* Reserved for Profibus */ ++#define N_IRDA 11 /* Linux IrDa - http://irda.sourceforge.net/ */ ++#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */ ++#define N_HDLC 13 /* synchronous HDLC */ ++#define N_SYNC_PPP 14 ++#define N_HCI 15 /* Bluetooth HCI UART */ ++ ++#ifdef __KERNEL__ ++ ++/* ++ * Translate a "termio" structure into a "termios". Ugh. ++ */ ++#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ ++ unsigned short __tmp; \ ++ get_user(__tmp,&(termio)->x); \ ++ *(unsigned short *) &(termios)->x = __tmp; \ ++} ++ ++#define user_termio_to_kernel_termios(termios, termio) \ ++({ \ ++ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ ++ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ ++ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ ++ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ ++ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ ++}) ++ ++/* ++ * Translate a "termios" structure into a "termio". Ugh. ++ */ ++#define kernel_termios_to_user_termio(termio, termios) \ ++({ \ ++ put_user((termios)->c_iflag, &(termio)->c_iflag); \ ++ put_user((termios)->c_oflag, &(termio)->c_oflag); \ ++ put_user((termios)->c_cflag, &(termio)->c_cflag); \ ++ put_user((termios)->c_lflag, &(termio)->c_lflag); \ ++ put_user((termios)->c_line, &(termio)->c_line); \ ++ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ ++}) ++ ++#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) ++#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) ++ ++#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) ++#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) ++ ++#endif /* __KERNEL__ */ ++ ++#endif /* __ASM_NDS32_TERMIOS_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/thread_info.h linux-3.4.110/arch/nds32/include/asm/thread_info.h +--- linux-3.4.110.orig/arch/nds32/include/asm/thread_info.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/thread_info.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,145 @@ ++/* ++ * linux/arch/nds32/include/asm/thread_info.h ++ * ++ * Copyright (C) 2002 Russell King. ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef __ASM_NDS32_THREAD_INFO_H ++#define __ASM_NDS32_THREAD_INFO_H ++ ++#ifdef __KERNEL__ ++ ++#define THREAD_SHIFT (13) ++#define THREAD_SIZE (1 << THREAD_SHIFT) ++ ++#ifndef __ASSEMBLY__ ++ ++struct task_struct; ++struct exec_domain; ++ ++#include ++#include ++ ++ ++typedef struct { ++ unsigned long seg; ++} mm_segment_t; ++//typedef unsigned long mm_segment_t; ++ ++struct cpu_context_save { ++ unsigned long r6; ++ unsigned long r7; ++ unsigned long r8; ++ unsigned long r9; ++ unsigned long r10; ++ unsigned long r11; ++ unsigned long r12; ++ unsigned long r13; ++ unsigned long r14; ++ unsigned long fp; ++ unsigned long pc; ++}; ++ ++/* ++ * low level task data that entry.S needs immediate access to. ++ * __switch_to() assumes cpu_context follows immediately after cpu_domain. ++ */ ++struct thread_info { ++ unsigned long flags; /* low level flags */ ++ __s32 preempt_count; /* 0 => preemptable, <0 => bug */ ++ mm_segment_t addr_limit; /* address limit */ ++ struct task_struct *task; /* main task structure */ ++ struct exec_domain *exec_domain; /* execution domain */ ++ __u32 cpu; /* cpu */ ++ struct cpu_context_save* sp_save; /* cpu context */ ++ struct restart_block restart_block; ++}; ++ ++#define INIT_THREAD_INFO(tsk) \ ++{ \ ++ .task = &tsk, \ ++ .exec_domain = &default_exec_domain, \ ++ .flags = 0, \ ++ .cpu = 0, \ ++ .preempt_count = 1, \ ++ .addr_limit = KERNEL_DS, \ ++ .restart_block = { \ ++ .fn = do_no_restart_syscall, \ ++ }, \ ++} ++ ++#define init_thread_info (init_thread_union.thread_info) ++#define init_stack (init_thread_union.stack) ++ ++ ++/* ++ * how to get the thread information struct from C ++ */ ++static inline struct thread_info *current_thread_info(void) __attribute_const__; ++ ++static inline struct thread_info *current_thread_info(void) ++{ ++ register unsigned long sp asm ("$sp"); //M Tom asm -> __asm__ __volatile__ ++ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); ++} ++ ++#define get_thread_info(ti) get_task_struct((ti)->task) ++#define put_thread_info(ti) put_task_struct((ti)->task) ++ ++#define thread_saved_pc(tsk) \ ++ ((unsigned long)(task_thread_info(tsk)->sp_save->pc)) ++#define thread_saved_fp(tsk) \ ++ ((unsigned long)(task_thread_info(tsk)->sp_save->fp)) ++#endif ++ ++#define THREAD_SIZE_ORDER (1) ++/* ++ * We use bit 30 of the preempt_count to indicate that kernel ++ * preemption is occuring. See include/asm-arm/hardirq.h. ++ */ ++#define PREEMPT_ACTIVE 0x40000000 ++ ++/* ++ * thread information flags: ++ * TIF_SYSCALL_TRACE - syscall trace active ++ * TIF_NOTIFY_RESUME - resumption notification requested ++ * TIF_SIGPENDING - signal pending ++ * TIF_NEED_RESCHED - rescheduling necessary ++ * TIF_USEDFPU - FPU was used by this task this quantum (SMP) ++ * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED ++ * TIF_USEDAUDIO - Audio has been used by this task and no need to init the regs ++ */ ++#define TIF_SIGPENDING 1 ++#define TIF_NEED_RESCHED 2 ++#define TIF_SINGLESTEP 3 ++#define TIF_NOTIFY_RESUME 5 ++#define TIF_SYSCALL_TRACE 8 ++#define TIF_RESTORE_SIGMASK 9 ++#define TIF_USEDFPU 16 ++#define TIF_POLLING_NRFLAG 17 ++#define TIF_MEMDIE 18 ++#define TIF_FREEZE 19 ++#define TIF_USEDAUDIO 20 ++ ++#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) ++#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) ++#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) ++#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) ++#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) ++#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) ++#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) ++#define _TIF_FREEZE (1 << TIF_FREEZE) ++ ++/* ++ * Change these and you break ASM code in entry-common.S ++ */ ++#define _TIF_WORK_MASK 0x000000ff ++#define _TIF_WORK_SYSCALL_ENTRY (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP) ++#define _TIF_WORK_SYSCALL_LEAVE (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP) ++ ++#endif /* __KERNEL__ */ ++#endif /* __ASM_NDS32_THREAD_INFO_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/timer.h linux-3.4.110/arch/nds32/include/asm/timer.h +--- linux-3.4.110.orig/arch/nds32/include/asm/timer.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/timer.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,94 @@ ++/* ++ * include/arch/nds32/include/asm/timer.h ++ * ++ * Faraday FTTMR010 Timer Device Driver Interface ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Note ++ * ++ * As IP_COUNT might be greater than one, timer ID is computed as follows: ++ * id=0~2 : Timer 1~3 of the first FTTMR010 IP ++ * id=3~5 : Timer 1~3 of the second FTTMR010 IP ++ * ... ++ * Therefore: ++ * (id / 3) : Compute which IP ++ * (id % 3) : Compute which timer in this IP ++ * Notice: ++ * For simplicity's sake, all code does not check for invalid timer id ++ * ++ * ChangeLog ++ * ++ * Luke Lee 09/14/2005 Created, heavily modified from Faraday CPE platform code. ++ */ ++ ++ ++#ifndef __FARADAY_TIMER_FTTMR010_HEADER__ ++#define __FARADAY_TIMER_FTTMR010_HEADER__ ++ ++/* ++ * Definition of register offsets ++ */ ++ ++#define TIMER1_COUNT 0x0 ++#define TIMER1_LOAD 0x4 ++#define TIMER1_MATCH1 0x8 ++#define TIMER1_MATCH2 0xC ++ ++#define TIMER2_COUNT 0x10 ++#define TIMER2_LOAD 0x14 ++#define TIMER2_MATCH1 0x18 ++#define TIMER2_MATCH2 0x1C ++ ++#define TIMER3_COUNT 0x20 ++#define TIMER3_LOAD 0x24 ++#define TIMER3_MATCH1 0x28 ++#define TIMER3_MATCH2 0x2C ++ ++#define TIMER_TMCR 0x30 ++#define TIMER_INTRSTATE 0x34 ++#define TIMER_INTRMASK 0x38 ++ ++/* Each timer's register address is offset by 0x10 */ ++#define TIMER_OFFSET 0x10 ++ ++/* ++ * Definition of TMCR bits ++ */ ++ ++#define TM1ENABLE 1 ++#define TM1CLOCK (1<<1) ++#define TM1OFENABLE (1<<2) ++ ++#define TM2ENABLE (1<<3) ++#define TM2CLOCK (1<<4) ++#define TM2OFENABLE (1<<5) ++ ++#define TM3ENABLE (1<<6) ++#define TM3CLOCK (1<<7) ++#define TM3OFENABLE (1<<8) ++ ++#define TM1UPDOWN (1<<9) ++#define TM2UPDOWN (1<<10) ++#define TM3UPDOWN (1<<11) ++ ++ ++#define TM1MATCH1 (1 << 0) ++#define TM1MATCH2 (1 << 1) ++#define TM1OVERFLOW (1 << 2) ++#define TM2MATCH1 (1 << 3) ++#define TM2MATCH2 (1 << 4) ++#define TM2OVERFLOW (1 << 5) ++#define TM3MATCH1 (1 << 6) ++#define TM3MATCH2 (1 << 7) ++#define TM3OVERFLOW (1 << 8) ++ ++struct sys_timer; ++extern struct sys_timer platform_timer; ++ ++#endif // __FARADAY_TIMER_FTTMR010_HEADER__ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/timex.h linux-3.4.110/arch/nds32/include/asm/timex.h +--- linux-3.4.110.orig/arch/nds32/include/asm/timex.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/timex.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,36 @@ ++/* ++ * linux/arch/nds32/include/asm/timex.h ++ * ++ * Copyright (C) 1997,1998 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Architecture Specific TIME specifications ++ */ ++#ifndef _ASMNDS32_TIMEX_H ++#define _ASMNDS32_TIMEX_H ++ ++#ifndef __FARADAY_PLATFORM_INDEPENDENT_TIMEX_HEADER__ ++#define __FARADAY_PLATFORM_INDEPENDENT_TIMEX_HEADER__ ++ ++#include ++ ++#ifndef CLOCK_TICK_RATE ++#define CLOCK_TICK_RATE (TIMER_CLK_IN) ++#endif ++ ++#endif /* __FARADAY_PLATFORM_INDEPENDENT_TIMEX_HEADER__ */ ++ ++typedef unsigned long cycles_t; ++ ++extern cycles_t cacheflush_time; ++ ++static inline cycles_t get_cycles (void) ++{ ++ return 0; ++} ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/tlbflush.h linux-3.4.110/arch/nds32/include/asm/tlbflush.h +--- linux-3.4.110.orig/arch/nds32/include/asm/tlbflush.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/tlbflush.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,80 @@ ++/* ++ * linux/arch/nds32/include/asm/tlbflush.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_TLBFLUSH_H ++#define _ASMNDS32_TLBFLUSH_H ++ ++#include ++#include ++#include ++ ++static inline void local_flush_tlb_all(void) ++{ ++ asm("tlbop FLUA\n"); ++ __nds32__isb(); ++} ++static inline void local_flush_tlb_mm(struct mm_struct *mm) ++{ ++ asm("tlbop FLUA\n"); ++ __nds32__isb(); ++} ++static inline void local_flush_tlb_kernel_range(unsigned long start, ++ unsigned long end) ++{ ++ while(start < end) { ++ asm("tlbop %0, INV"::"r" (start)); ++ __nds32__isb(); ++ start += PAGE_SIZE; ++ } ++} ++ ++#ifndef CONFIG_CPU_NO_CONTEXT_ID ++void local_flush_tlb_range(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end); ++void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); ++#else ++static inline void local_flush_tlb_range(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end) ++{ ++ if ((end - start) > 0x400000) { ++ asm("tlbop FLUA"); ++ __nds32__isb(); ++ return ; ++ } ++ while(start < end) { ++ asm("tlbop %0, INV"::"r" (start)); ++ __nds32__isb(); ++ start += PAGE_SIZE; ++ } ++} ++ ++static inline void local_flush_tlb_page(struct vm_area_struct *vma, ++ unsigned long addr) ++{ ++ asm("tlbop %0, INV"::"r" (addr)); ++ __nds32__isb(); ++} ++#endif ++ ++#ifndef CONFIG_SMP ++#define flush_tlb_all local_flush_tlb_all ++#define flush_tlb_mm local_flush_tlb_mm ++#define flush_tlb_range local_flush_tlb_range ++#define flush_tlb_page local_flush_tlb_page ++#define flush_tlb_kernel_range local_flush_tlb_kernel_range ++#else ++void flush_tlb_all(void); ++void flush_tlb_mm(struct mm_struct *mm); ++void flush_tlb_range(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end); ++void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); ++void flush_tlb_kernel_range(unsigned long start, unsigned long end); ++#endif ++ ++void update_mmu_cache(struct vm_area_struct *vma, ++ unsigned long address, pte_t* pte); ++void tlb_migrate_finish(struct mm_struct *mm); ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/tlb.h linux-3.4.110/arch/nds32/include/asm/tlb.h +--- linux-3.4.110.orig/arch/nds32/include/asm/tlb.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/tlb.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,30 @@ ++/* ++ * linux/arch/nds32/include/asm/tlb.h ++ * Copyright (C) 2009 Andes Technology Corporation ++ */ ++ ++#ifndef __ASMNDS32_TLB_H ++#define __ASMNDS32_TLB_H ++ ++#define tlb_start_vma(tlb,vma) \ ++ do { \ ++ if (!tlb->fullmm) \ ++ flush_cache_range(vma, vma->vm_start, vma->vm_end); \ ++ } while (0) ++ ++#define tlb_end_vma(tlb,vma) \ ++ do { \ ++ if(!tlb->fullmm) \ ++ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ ++ } while (0) ++ ++#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0) ++ ++#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) ++ ++#include ++ ++#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) ++#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tln)->mm, pmd) ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/topology.h linux-3.4.110/arch/nds32/include/asm/topology.h +--- linux-3.4.110.orig/arch/nds32/include/asm/topology.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/topology.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,11 @@ ++/* ++ * linux/arch/nds32/include/asm/topology.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASM_NDS32_TOPOLOGY_H ++#define _ASM_NDS32_TOPOLOGY_H ++ ++#include ++ ++#endif /* _ASM_NDS32_TOPOLOGY_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/traps.h linux-3.4.110/arch/nds32/include/asm/traps.h +--- linux-3.4.110.orig/arch/nds32/include/asm/traps.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/traps.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,25 @@ ++/* ++ * linux/arch/nds32/include/asm/traps.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_TRAP_H ++#define _ASMNDS32_TRAP_H ++ ++#include ++ ++struct undef_hook { ++ struct list_head node; ++ u32 instr_mask; ++ u32 instr_val; ++ u32 cpsr_mask; ++ u32 cpsr_val; ++ int (*fn)(struct pt_regs *regs, unsigned int instr); ++}; ++ ++void register_undef_hook(struct undef_hook *hook); ++void unregister_undef_hook(struct undef_hook *hook); ++ ++extern void __init early_trap_init(void); ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/types.h linux-3.4.110/arch/nds32/include/asm/types.h +--- linux-3.4.110.orig/arch/nds32/include/asm/types.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/types.h 2016-04-07 10:20:50.918080096 +0200 +@@ -0,0 +1,16 @@ ++#ifndef __ASM_NDS32_TYPES_H ++#define __ASM_NDS32_TYPES_H ++ ++#include ++ ++/* ++ * These aren't exported outside the kernel to avoid name space clashes ++ */ ++#ifdef __KERNEL__ ++ ++#define BITS_PER_LONG 32 ++ ++#endif /* __KERNEL__ */ ++ ++#endif ++ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/uaccess.h linux-3.4.110/arch/nds32/include/asm/uaccess.h +--- linux-3.4.110.orig/arch/nds32/include/asm/uaccess.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/uaccess.h 2016-04-07 10:20:50.934080715 +0200 +@@ -0,0 +1,428 @@ ++/* ++ * linux/arch/nds32/include/asm/uaccess.h ++ * ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef _ASMANDES_UACCESS_H ++#define _ASMANDES_UACCESS_H ++ ++/* ++ * User space memory access functions ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define VERIFY_READ 0 ++#define VERIFY_WRITE 1 ++ ++/* ++ * The exception table consists of pairs of addresses: the first is the ++ * address of an instruction that is allowed to fault, and the second is ++ * the address at which the program should continue. No registers are ++ * modified, so it is entirely up to the continuation code to figure out ++ * what to do. ++ * ++ * All the routines below use bits of fixup code that are out of line ++ * with the main instruction path. This means when everything is well, ++ * we don't even have to jump over them. Further, they do not intrude ++ * on our cache or tlb entries. ++ */ ++ ++struct exception_table_entry ++{ ++ unsigned long insn, fixup; ++}; ++ ++extern int fixup_exception(struct pt_regs *regs); ++ ++#define KERNEL_DS ((mm_segment_t) { ~0UL }) ++#define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) ++ ++#define get_ds() (KERNEL_DS) ++#define get_fs() (current_thread_info()->addr_limit) ++ ++static inline void set_fs (mm_segment_t fs) ++{ ++ current_thread_info()->addr_limit = fs; ++} ++ ++#define segment_eq(a, b) ((a.seg) == (b.seg)) ++ ++#define __range_ok(addr, size) (size <= get_fs().seg && addr <= (get_fs().seg -size)) ++ ++#define access_ok(type, addr, size) \ ++ __range_ok((unsigned long)addr, (unsigned long)size) ++/* ++ * Single-value transfer routines. They automatically use the right ++ * size if we just have the right pointer type. Note that the functions ++ * which read from user space (*get_*) need to take care not to leak ++ * kernel data even if the calling code is buggy and fails to check ++ * the return value. This means zeroing out the destination variable ++ * or buffer on error. Normally this is done out of line by the ++ * fixup code, but there are a few places where it intrudes on the ++ * main code path. When we only write to user space, there is no ++ * problem. ++ * ++ * The "__xxx" versions of the user access functions do not verify the ++ * address space - it must have been done previously with a separate ++ * "access_ok()" call. ++ * ++ * The "xxx_error" versions set the third argument to EFAULT if an ++ * error occurs, and leave it unchanged on success. Note that these ++ * versions are void (ie, don't return a value as such). ++ */ ++ ++extern int __get_user_1(void *); ++extern int __get_user_2(void *); ++extern int __get_user_4(void *); ++extern int __get_user_8(void *); ++extern int __get_user_bad(void); ++ ++#define __get_user_x(__r2,__p,__e,__s,__i...) \ ++ __asm__ __volatile__ ( \ ++ __asmeq("%0", "$r0") __asmeq("%1", "$r2") \ ++ "bal __get_user_" #__s \ ++ : "=&r" (__e), "=r" (__r2) \ ++ : "0" (__p) \ ++ : __i, "cc") ++ ++#define get_user(x,p) \ ++ ({ \ ++ const register typeof(*(p)) __user *__p asm("$r0") = (p);\ ++ register unsigned long __r2 asm("$r2"); \ ++ register int __e asm("$r0"); \ ++ switch (sizeof(*(__p))) { \ ++ case 1: \ ++ __get_user_x(__r2, __p, __e, 1, "$lp"); \ ++ break; \ ++ case 2: \ ++ __get_user_x(__r2, __p, __e, 2, "$r3", "$lp"); \ ++ break; \ ++ case 4: \ ++ __get_user_x(__r2, __p, __e, 4, "$lp"); \ ++ break; \ ++ case 8: \ ++ __get_user_x(__r2, __p, __e, 8, "$lp"); \ ++ break; \ ++ default: __e = __get_user_bad(); break; \ ++ } \ ++ x = (typeof(*(p))) __r2; \ ++ __e; \ ++ }) ++ ++#define __get_user(x,ptr) \ ++({ \ ++ long __gu_err = 0; \ ++ __get_user_err((x),(ptr),__gu_err); \ ++ __gu_err; \ ++}) ++ ++#define __get_user_error(x,ptr,err) \ ++({ \ ++ __get_user_err((x),(ptr),err); \ ++ (void) 0; \ ++}) ++ ++#define __get_user_err(x,ptr,err) \ ++do { \ ++ unsigned long __gu_addr = (unsigned long)(ptr); \ ++ unsigned long __gu_val; \ ++ __chk_user_ptr(ptr); \ ++ switch (sizeof(*(ptr))) { \ ++ case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ ++ case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ ++ case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \ ++ default: (__gu_val) = __get_user_bad(); \ ++ } \ ++ (x) = (__typeof__(*(ptr)))__gu_val; \ ++} while (0) ++ ++#define __get_user_asm_byte(x,addr,err) \ ++ __asm__ __volatile__( \ ++ "1: lbi %1,[%2]\n" \ ++ "2:\n" \ ++ " .section .fixup,\"ax\"\n" \ ++ " .align 2\n" \ ++ "3: move %0, %3\n" \ ++ " move %1, #0\n" \ ++ " b 2b\n" \ ++ " .previous\n" \ ++ " .section __ex_table,\"a\"\n" \ ++ " .align 3\n" \ ++ " .long 1b, 3b\n" \ ++ " .previous" \ ++ : "+r" (err), "=&r" (x) \ ++ : "r" (addr), "i" (-EFAULT) \ ++ : "cc") ++ ++#ifndef __NDS32_EB__ ++#define __get_user_asm_half(x,__gu_addr,err) \ ++({ \ ++ unsigned long __b1, __b2; \ ++ __get_user_asm_byte(__b1, __gu_addr, err); \ ++ __get_user_asm_byte(__b2, __gu_addr + 1, err); \ ++ (x) = __b1 | (__b2 << 8); \ ++}) ++#else ++#define __get_user_asm_half(x,__gu_addr,err) \ ++({ \ ++ unsigned long __b1, __b2; \ ++ __get_user_asm_byte(__b1, __gu_addr, err); \ ++ __get_user_asm_byte(__b2, __gu_addr + 1, err); \ ++ (x) = (__b1 << 8) | __b2; \ ++}) ++#endif ++ ++#define __get_user_asm_word(x,addr,err) \ ++ __asm__ __volatile__( \ ++ "1: lwi %1,[%2]\n" \ ++ "2:\n" \ ++ " .section .fixup,\"ax\"\n" \ ++ " .align 2\n" \ ++ "3: move %0, %3\n" \ ++ " move %1, #0\n" \ ++ " b 2b\n" \ ++ " .previous\n" \ ++ " .section __ex_table,\"a\"\n" \ ++ " .align 3\n" \ ++ " .long 1b, 3b\n" \ ++ " .previous" \ ++ : "+r" (err), "=&r" (x) \ ++ : "r" (addr), "i" (-EFAULT) \ ++ : "cc") ++ ++extern int __put_user_1(void *, unsigned int); ++extern int __put_user_2(void *, unsigned int); ++extern int __put_user_4(void *, unsigned int); ++extern int __put_user_8(void *, unsigned long long); ++extern int __put_user_bad(void); ++ ++#ifdef _GCC444 ++#define __put_user_x(__r2,__p,__e,__s) \ ++ __asm__ __volatile__ ( \ ++ __asmeq("%0", "$r0") __asmeq("%2", "$r2") \ ++ "bal __put_user_" #__s \ ++ : "=&r" (__e) \ ++ : "0" (__p), "r" (__r2) \ ++ : "$p0", "$lp", "cc") ++#else ++#define __put_user_x(__r2,__p,__e,__s) \ ++ __asm__ __volatile__ ( \ ++ __asmeq("%0", "$r0") __asmeq("%2", "$r2") \ ++ "bal __put_user_" #__s \ ++ : "=&r" (__e) \ ++ : "0" (__p), "r" (__r2) \ ++ : "$r26", "$lp", "cc") ++#endif ++ ++#define put_user(x,p) \ ++ ({ \ ++ const register typeof(*(p)) __r2 asm("$r2") = (x); \ ++ const register typeof(*(p)) __user *__p asm("$r0") = (p);\ ++ register int __e asm("$r0"); \ ++ switch (sizeof(*(__p))) { \ ++ case 1: \ ++ __put_user_x(__r2, __p, __e, 1); \ ++ break; \ ++ case 2: \ ++ __put_user_x(__r2, __p, __e, 2); \ ++ break; \ ++ case 4: \ ++ __put_user_x(__r2, __p, __e, 4); \ ++ break; \ ++ case 8: \ ++ __put_user_x(__r2, __p, __e, 8); \ ++ break; \ ++ default: __e = __put_user_bad(); break; \ ++ } \ ++ __e; \ ++ }) ++ ++#define __put_user(x,ptr) \ ++({ \ ++ long __pu_err = 0; \ ++ __put_user_err((x),(ptr),__pu_err); \ ++ __pu_err; \ ++}) ++ ++#define __put_user_error(x,ptr,err) \ ++({ \ ++ __put_user_err((x),(ptr),err); \ ++ (void) 0; \ ++}) ++ ++#define __put_user_err(x,ptr,err) \ ++do { \ ++ unsigned long __pu_addr = (unsigned long)(ptr); \ ++ __typeof__(*(ptr)) __pu_val = (x); \ ++ __chk_user_ptr(ptr); \ ++ switch (sizeof(*(ptr))) { \ ++ case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ ++ case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ ++ case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \ ++ case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \ ++ default: __put_user_bad(); \ ++ } \ ++} while (0) ++ ++#define __put_user_asm_byte(x,__pu_addr,err) \ ++ __asm__ __volatile__( \ ++ "1: sbi %1,[%2]\n" \ ++ "2:\n" \ ++ " .section .fixup,\"ax\"\n" \ ++ " .align 2\n" \ ++ "3: move %0, %3\n" \ ++ " b 2b\n" \ ++ " .previous\n" \ ++ " .section __ex_table,\"a\"\n" \ ++ " .align 3\n" \ ++ " .long 1b, 3b\n" \ ++ " .previous" \ ++ : "+r" (err) \ ++ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ ++ : "cc") ++ ++#ifndef __NDS32_EB__ ++#define __put_user_asm_half(x,__pu_addr,err) \ ++({ \ ++ unsigned long __temp = (unsigned long)(x); \ ++ __put_user_asm_byte(__temp, __pu_addr, err); \ ++ __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ ++}) ++#else ++#define __put_user_asm_half(x,__pu_addr,err) \ ++({ \ ++ unsigned long __temp = (unsigned long)(x); \ ++ __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ ++ __put_user_asm_byte(__temp, __pu_addr + 1, err); \ ++}) ++#endif ++ ++#define __put_user_asm_word(x,__pu_addr,err) \ ++ __asm__ __volatile__( \ ++ "1: swi %1,[%2]\n" \ ++ "2:\n" \ ++ " .section .fixup,\"ax\"\n" \ ++ " .align 2\n" \ ++ "3: move %0, %3\n" \ ++ " b 2b\n" \ ++ " .previous\n" \ ++ " .section __ex_table,\"a\"\n" \ ++ " .align 3\n" \ ++ " .long 1b, 3b\n" \ ++ " .previous" \ ++ : "+r" (err) \ ++ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ ++ : "cc") ++ ++#ifdef __NDS32_EB__ ++#define __reg_oper0 "%H2" ++#define __reg_oper1 "%L2" ++#else ++#define __reg_oper0 "%L2" ++#define __reg_oper1 "%H2" ++#endif ++ ++#define __put_user_asm_dword(x, __pu_addr, __pu_err) \ ++ __asm__ __volatile__ ( \ ++ "\n1:\tswi " __reg_oper0 ",[%1]\n" \ ++ "\n2:\tswi " __reg_oper1 ",[%1+4]\n" \ ++ "3:\n" \ ++ " .section .fixup,\"ax\"\n" \ ++ " .align 2\n" \ ++ "4: move %0, %3\n" \ ++ " b 3b\n" \ ++ " .previous\n" \ ++ " .section __ex_table,\"a\"\n" \ ++ " .align 3\n" \ ++ " .long 1b, 4b\n" \ ++ " .long 2b, 4b\n" \ ++ " .previous" \ ++ : "+r"(__pu_err) \ ++ : "r"(__pu_addr), "r"(x), "i"(-EFAULT) \ ++ : "cc") ++extern unsigned long __arch_copy_from_user(void *to, const void __user *from, unsigned long n); ++extern unsigned long __arch_copy_to_user(void __user *to, const void *from, unsigned long n); ++extern unsigned long __arch_clear_user(void __user *addr, unsigned long n); ++extern unsigned long __arch_strncpy_from_user(char *to, const char __user *from, unsigned long count); ++extern unsigned long __arch_strnlen_user(const char __user *s, long n); ++ ++static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ if (access_ok(VERIFY_READ, from, n)) ++ n = __arch_copy_from_user(to, from, n); ++ else /* security hole - plug it */ ++ memzero(to, n); ++ return n; ++} ++ ++static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ return __arch_copy_from_user(to, from, n); ++} ++ ++static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ if (access_ok(VERIFY_WRITE, to, n)) ++ n = __arch_copy_to_user(to, from, n); ++ return n; ++} ++ ++static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ return __arch_copy_to_user(to, from, n); ++} ++ ++#define __copy_to_user_inatomic __copy_to_user ++#define __copy_from_user_inatomic __copy_from_user ++ ++static inline unsigned long clear_user (void __user *to, unsigned long n) ++{ ++ if (access_ok(VERIFY_WRITE, to, n)) ++ n = __arch_clear_user(to, n); ++ return n; ++} ++ ++static inline unsigned long __clear_user (void __user *to, unsigned long n) ++{ ++ return __arch_clear_user(to, n); ++} ++ ++/* ++ * We check the flags of vma here before __arch_strncpy_from_user(). ++ * An alternative way to do it is using lwup instruction in __arch_strncpy_from_user(). ++ * TODO: Should perform performance evaluation of the two. ++ */ ++static inline long strncpy_from_user (char *dst, const char __user *src, long count) ++{ ++ long res = -EFAULT; ++ if (access_ok(VERIFY_READ, src, 1)) ++ res = __arch_strncpy_from_user(dst, src, count); ++ return res; ++} ++ ++static inline long __strncpy_from_user (char *dst, const char __user *src, long count) ++{ ++ return __arch_strncpy_from_user(dst, src, count); ++} ++ ++#define strlen_user(s) strnlen_user(s, ~0UL >> 1) ++ ++static inline long strnlen_user(const char __user *s, long n) ++{ ++ unsigned long res = 0; ++ ++ if (segment_eq(get_fs(),KERNEL_DS) || ((unsigned long)s < get_fs().seg)) ++ res = __arch_strnlen_user(s, n); ++ ++ return res; ++} ++#endif /* _ASMNDS32_UACCESS_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/ucontext.h linux-3.4.110/arch/nds32/include/asm/ucontext.h +--- linux-3.4.110.orig/arch/nds32/include/asm/ucontext.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/ucontext.h 2016-04-07 10:20:50.934080715 +0200 +@@ -0,0 +1,17 @@ ++/* ++ * linux/arch/nds32/include/asm/ucontext.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _ASMNDS32_UCONTEXT_H ++#define _ASMNDS32_UCONTEXT_H ++ ++struct ucontext { ++ unsigned long uc_flags; ++ struct ucontext *uc_link; ++ stack_t uc_stack; ++ struct sigcontext uc_mcontext; ++ sigset_t uc_sigmask; /* mask last for extensibility */ ++}; ++ ++#endif /* !_ASMNDS32_UCONTEXT_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/unaligned.h linux-3.4.110/arch/nds32/include/asm/unaligned.h +--- linux-3.4.110.orig/arch/nds32/include/asm/unaligned.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/unaligned.h 2016-04-07 10:20:50.934080715 +0200 +@@ -0,0 +1,24 @@ ++/* ++ * linux/arch/nds32/include/asm/unaligned.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef __ASM_NDS32_UNALIGNED_H ++#define __ASM_NDS32_UNALIGNED_H ++ ++#include ++#include ++#include ++ ++/* ++ * Select endianness ++ */ ++#if defined(__NDS32_EB__) ++#define get_unaligned __get_unaligned_be ++#define put_unaligned __put_unaligned_be ++#else ++#define get_unaligned __get_unaligned_le ++#define put_unaligned __put_unaligned_le ++#endif ++ ++#endif /* __ASM_NDS32_UNALIGNED_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/uncompress.h linux-3.4.110/arch/nds32/include/asm/uncompress.h +--- linux-3.4.110.orig/arch/nds32/include/asm/uncompress.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/uncompress.h 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,65 @@ ++/* ++ * linux/arch/nds32/include/asm/uncompress.h ++ * ++ * Faraday Linux Boot Loader UART (FTUART010) Routines ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * Note ++ * ++ * The first UART (FTUART010) in the system is used for dumping debug messages. ++ * ++ * ChangeLog ++ * ++ * Luke Lee 09/21/2005 Created. Heavily modified from Faraday CPE port. ++ */ ++ ++#include ++ ++#ifdef CONFIG_PLAT_AG102 ++#define UART_PA_BASE 0x94200000 ++#else ++#define UART_PA_BASE 0x99600000 ++#endif ++ ++#define arch_decomp_setup() ++#define arch_decomp_wdog() ++ ++#ifndef STANDALONE_DEBUG ++#define putstr debug_puts ++#endif ++ ++#define SERIAL_THR 0x00 ++#define SERIAL_LSR 0x14 ++#define SERIAL_LSR_THRE 0x20 ++ ++static inline void uncompress_puts(const char *s) ++{ ++ volatile unsigned *status = (volatile unsigned *)(UART_PA_BASE+SERIAL_LSR); ++ while (*s) { ++ while ((*status & SERIAL_LSR_THRE)==0); ++ ++ *(volatile unsigned*)(UART_PA_BASE+SERIAL_THR) = (unsigned)*s; ++ ++ if (*s == '\n') { ++ while ((*status & SERIAL_LSR_THRE)==0); ++ *(volatile unsigned*)(UART_PA_BASE+SERIAL_THR) = '\r'; ++ } ++ s++; ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/unistd.h linux-3.4.110/arch/nds32/include/asm/unistd.h +--- linux-3.4.110.orig/arch/nds32/include/asm/unistd.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/unistd.h 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,468 @@ ++/* ++ * linux/arch/nds32/include/asm/unistd.h ++ * ++ * Copyright (C) 2001-2003 Russell King ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Please forward _all_ changes to this file to rmk@arm.linux.org.uk, ++ * no matter what the change is. Thanks! ++ */ ++/* ============================================================================ ++ * ++ * linux/arch/nds32/include/asm/unistd.h ++ * ++ * Copyright (C) 2007 Andes Technology Corporation ++ * This file is part of Linux and should be licensed under the GPL. ++ * See the file COPYING for conditions for redistribution. ++ * ++ * Abstract: ++ * ++ * This program is syscall scheme for Andes NDS32 architecture. ++ * ++ * Revision History: ++ * ++ * Jul.07.2007 Original from Shawn and Tom, refined by Harry. ++ * ++ * Note: ++ * ++ * ============================================================================ ++ */ ++ ++#ifndef __ASM_NDS32_UNISTD_H ++#define __ASM_NDS32_UNISTD_H ++ ++#define __NR_SYSCALL_BASE 0x5000 ++#define __NR_NDS32_BASE 0x7000 ++/* ++ * This file contains the system call numbers. ++ */ ++ ++#define __NR_restart_syscall (__NR_SYSCALL_BASE+ 0) ++#define __NR_exit (__NR_SYSCALL_BASE+ 1) ++#define __NR_fork (__NR_SYSCALL_BASE+ 2) ++#define __NR_read (__NR_SYSCALL_BASE+ 3) ++#define __NR_write (__NR_SYSCALL_BASE+ 4) ++#define __NR_open (__NR_SYSCALL_BASE+ 5) ++#define __NR_close (__NR_SYSCALL_BASE+ 6) ++#define __NR_waitpid (__NR_SYSCALL_BASE+ 7) ++ /* 7 was sys_waitpid */ ++#define __NR_creat (__NR_SYSCALL_BASE+ 8) ++#define __NR_link (__NR_SYSCALL_BASE+ 9) ++#define __NR_unlink (__NR_SYSCALL_BASE+ 10) ++#define __NR_execve (__NR_SYSCALL_BASE+ 11) ++#define __NR_chdir (__NR_SYSCALL_BASE+ 12) ++#define __NR_time (__NR_SYSCALL_BASE+ 13) ++#define __NR_mknod (__NR_SYSCALL_BASE+ 14) ++#define __NR_chmod (__NR_SYSCALL_BASE+ 15) ++#define __NR_lchown (__NR_SYSCALL_BASE+ 16) ++ /* 17 was sys_break */ ++ /* 18 was sys_oldstat */ ++#define __NR_lseek (__NR_SYSCALL_BASE+ 19) ++#define __NR_getpid (__NR_SYSCALL_BASE+ 20) ++#define __NR_mount (__NR_SYSCALL_BASE+ 21) ++#define __NR_umount (__NR_SYSCALL_BASE+ 22) ++#define __NR_setuid (__NR_SYSCALL_BASE+ 23) ++#define __NR_getuid (__NR_SYSCALL_BASE+ 24) ++#define __NR_stime (__NR_SYSCALL_BASE+ 25) ++#define __NR_ptrace (__NR_SYSCALL_BASE+ 26) ++#define __NR_alarm (__NR_SYSCALL_BASE+ 27) ++ /* 28 was sys_oldfstat */ ++#define __NR_pause (__NR_SYSCALL_BASE+ 29) ++#define __NR_utime (__NR_SYSCALL_BASE+ 30) ++ /* 31 was sys_stty */ ++ /* 32 was sys_gtty */ ++#define __NR_access (__NR_SYSCALL_BASE+ 33) ++#define __NR_nice (__NR_SYSCALL_BASE+ 34) ++ /* 35 was sys_ftime */ ++#define __NR_sync (__NR_SYSCALL_BASE+ 36) ++#define __NR_kill (__NR_SYSCALL_BASE+ 37) ++#define __NR_rename (__NR_SYSCALL_BASE+ 38) ++#define __NR_mkdir (__NR_SYSCALL_BASE+ 39) ++#define __NR_rmdir (__NR_SYSCALL_BASE+ 40) ++#define __NR_dup (__NR_SYSCALL_BASE+ 41) ++#define __NR_pipe (__NR_SYSCALL_BASE+ 42) ++#define __NR_times (__NR_SYSCALL_BASE+ 43) ++ /* 44 was sys_prof */ ++#define __NR_brk (__NR_SYSCALL_BASE+ 45) ++#define __NR_setgid (__NR_SYSCALL_BASE+ 46) ++#define __NR_getgid (__NR_SYSCALL_BASE+ 47) ++ /* 48 was sys_signal */ ++#define __NR_geteuid (__NR_SYSCALL_BASE+ 49) ++#define __NR_getegid (__NR_SYSCALL_BASE+ 50) ++#define __NR_acct (__NR_SYSCALL_BASE+ 51) ++#define __NR_umount2 (__NR_SYSCALL_BASE+ 52) ++ /* 53 was sys_lock */ ++#define __NR_ioctl (__NR_SYSCALL_BASE+ 54) ++#define __NR_fcntl (__NR_SYSCALL_BASE+ 55) ++ /* 56 was sys_mpx */ ++#define __NR_setpgid (__NR_SYSCALL_BASE+ 57) ++ /* 58 was sys_ulimit */ ++ /* 59 was sys_olduname */ ++#define __NR_umask (__NR_SYSCALL_BASE+ 60) ++#define __NR_chroot (__NR_SYSCALL_BASE+ 61) ++#define __NR_ustat (__NR_SYSCALL_BASE+ 62) ++#define __NR_dup2 (__NR_SYSCALL_BASE+ 63) ++#define __NR_getppid (__NR_SYSCALL_BASE+ 64) ++#define __NR_getpgrp (__NR_SYSCALL_BASE+ 65) ++#define __NR_setsid (__NR_SYSCALL_BASE+ 66) ++#define __NR_sigaction (__NR_SYSCALL_BASE+ 67) ++ /* 68 was sys_sgetmask */ ++ /* 69 was sys_ssetmask */ ++#define __NR_setreuid (__NR_SYSCALL_BASE+ 70) ++#define __NR_setregid (__NR_SYSCALL_BASE+ 71) ++#define __NR_sigsuspend (__NR_SYSCALL_BASE+ 72) ++#define __NR_sigpending (__NR_SYSCALL_BASE+ 73) ++#define __NR_sethostname (__NR_SYSCALL_BASE+ 74) ++#define __NR_setrlimit (__NR_SYSCALL_BASE+ 75) ++#define __NR_getrlimit (__NR_SYSCALL_BASE+ 76) /* Back compat 2GB limited rlimit */ ++#define __NR_getrusage (__NR_SYSCALL_BASE+ 77) ++#define __NR_gettimeofday (__NR_SYSCALL_BASE+ 78) ++#define __NR_settimeofday (__NR_SYSCALL_BASE+ 79) ++#define __NR_getgroups (__NR_SYSCALL_BASE+ 80) ++#define __NR_setgroups (__NR_SYSCALL_BASE+ 81) ++#define __NR_select (__NR_SYSCALL_BASE+ 82) ++#define __NR_symlink (__NR_SYSCALL_BASE+ 83) ++ /* 84 was sys_lstat */ ++#define __NR_readlink (__NR_SYSCALL_BASE+ 85) ++#define __NR_uselib (__NR_SYSCALL_BASE+ 86) ++#define __NR_swapon (__NR_SYSCALL_BASE+ 87) ++#define __NR_reboot (__NR_SYSCALL_BASE+ 88) ++#define __NR_readdir (__NR_SYSCALL_BASE+ 89) ++#define __NR_mmap (__NR_SYSCALL_BASE+ 90) ++#define __NR_munmap (__NR_SYSCALL_BASE+ 91) ++#define __NR_truncate (__NR_SYSCALL_BASE+ 92) ++#define __NR_ftruncate (__NR_SYSCALL_BASE+ 93) ++#define __NR_fchmod (__NR_SYSCALL_BASE+ 94) ++#define __NR_fchown (__NR_SYSCALL_BASE+ 95) ++#define __NR_getpriority (__NR_SYSCALL_BASE+ 96) ++#define __NR_setpriority (__NR_SYSCALL_BASE+ 97) ++ /* 98 was sys_profil */ ++#define __NR_statfs (__NR_SYSCALL_BASE+ 99) ++#define __NR_fstatfs (__NR_SYSCALL_BASE+100) ++ /* 101 was sys_ioperm */ ++#define __NR_socketcall (__NR_SYSCALL_BASE+102) ++#define __NR_syslog (__NR_SYSCALL_BASE+103) ++#define __NR_setitimer (__NR_SYSCALL_BASE+104) ++#define __NR_getitimer (__NR_SYSCALL_BASE+105) ++#define __NR_stat (__NR_SYSCALL_BASE+106) ++#define __NR_lstat (__NR_SYSCALL_BASE+107) ++#define __NR_fstat (__NR_SYSCALL_BASE+108) ++ /* 109 was sys_uname */ ++ /* 110 was sys_iopl */ ++#define __NR_vhangup (__NR_SYSCALL_BASE+111) ++ /* 112 was sys_idle */ ++#define __NR_syscall (__NR_SYSCALL_BASE+113) /* syscall to call a syscall! */ ++#define __NR_wait4 (__NR_SYSCALL_BASE+114) ++#define __NR_swapoff (__NR_SYSCALL_BASE+115) ++#define __NR_sysinfo (__NR_SYSCALL_BASE+116) ++#define __NR_ipc (__NR_SYSCALL_BASE+117) ++#define __NR_fsync (__NR_SYSCALL_BASE+118) ++#define __NR_sigreturn (__NR_SYSCALL_BASE+119) ++#define __NR_clone (__NR_SYSCALL_BASE+120) ++#define __NR_setdomainname (__NR_SYSCALL_BASE+121) ++#define __NR_uname (__NR_SYSCALL_BASE+122) ++ /* 123 was sys_modify_ldt */ ++#define __NR_adjtimex (__NR_SYSCALL_BASE+124) ++#define __NR_mprotect (__NR_SYSCALL_BASE+125) ++#define __NR_sigprocmask (__NR_SYSCALL_BASE+126) ++ /* 127 was sys_create_module */ ++#define __NR_init_module (__NR_SYSCALL_BASE+128) ++#define __NR_delete_module (__NR_SYSCALL_BASE+129) ++ /* 130 was sys_get_kernel_syms */ ++#define __NR_quotactl (__NR_SYSCALL_BASE+131) ++#define __NR_getpgid (__NR_SYSCALL_BASE+132) ++#define __NR_fchdir (__NR_SYSCALL_BASE+133) ++#define __NR_bdflush (__NR_SYSCALL_BASE+134) ++#define __NR_sysfs (__NR_SYSCALL_BASE+135) ++#define __NR_personality (__NR_SYSCALL_BASE+136) ++ /* 137 was sys_afs_syscall */ ++#define __NR_setfsuid (__NR_SYSCALL_BASE+138) ++#define __NR_setfsgid (__NR_SYSCALL_BASE+139) ++#define __NR__llseek (__NR_SYSCALL_BASE+140) ++#define __NR_getdents (__NR_SYSCALL_BASE+141) ++#define __NR__newselect (__NR_SYSCALL_BASE+142) ++#define __NR_flock (__NR_SYSCALL_BASE+143) ++#define __NR_msync (__NR_SYSCALL_BASE+144) ++#define __NR_readv (__NR_SYSCALL_BASE+145) ++#define __NR_writev (__NR_SYSCALL_BASE+146) ++#define __NR_getsid (__NR_SYSCALL_BASE+147) ++#define __NR_fdatasync (__NR_SYSCALL_BASE+148) ++#define __NR__sysctl (__NR_SYSCALL_BASE+149) ++#define __NR_mlock (__NR_SYSCALL_BASE+150) ++#define __NR_munlock (__NR_SYSCALL_BASE+151) ++#define __NR_mlockall (__NR_SYSCALL_BASE+152) ++#define __NR_munlockall (__NR_SYSCALL_BASE+153) ++#define __NR_sched_setparam (__NR_SYSCALL_BASE+154) ++#define __NR_sched_getparam (__NR_SYSCALL_BASE+155) ++#define __NR_sched_setscheduler (__NR_SYSCALL_BASE+156) ++#define __NR_sched_getscheduler (__NR_SYSCALL_BASE+157) ++#define __NR_sched_yield (__NR_SYSCALL_BASE+158) ++#define __NR_sched_get_priority_max (__NR_SYSCALL_BASE+159) ++#define __NR_sched_get_priority_min (__NR_SYSCALL_BASE+160) ++#define __NR_sched_rr_get_interval (__NR_SYSCALL_BASE+161) ++#define __NR_nanosleep (__NR_SYSCALL_BASE+162) ++#define __NR_mremap (__NR_SYSCALL_BASE+163) ++#define __NR_setresuid (__NR_SYSCALL_BASE+164) ++#define __NR_getresuid (__NR_SYSCALL_BASE+165) ++#define __NR_getpagesize (__NR_SYSCALL_BASE+166) ++ /* 167 was sys_query_module */ ++#define __NR_poll (__NR_SYSCALL_BASE+168) ++#define __NR_nfsservctl (__NR_SYSCALL_BASE+169) ++#define __NR_setresgid (__NR_SYSCALL_BASE+170) ++#define __NR_getresgid (__NR_SYSCALL_BASE+171) ++#define __NR_prctl (__NR_SYSCALL_BASE+172) ++#define __NR_rt_sigreturn (__NR_SYSCALL_BASE+173) ++#define __NR_rt_sigaction (__NR_SYSCALL_BASE+174) ++#define __NR_rt_sigprocmask (__NR_SYSCALL_BASE+175) ++#define __NR_rt_sigpending (__NR_SYSCALL_BASE+176) ++#define __NR_rt_sigtimedwait (__NR_SYSCALL_BASE+177) ++#define __NR_rt_sigqueueinfo (__NR_SYSCALL_BASE+178) ++#define __NR_rt_sigsuspend (__NR_SYSCALL_BASE+179) ++#define __NR_pread64 (__NR_SYSCALL_BASE+180) ++#define __NR_pwrite64 (__NR_SYSCALL_BASE+181) ++#define __NR_chown (__NR_SYSCALL_BASE+182) ++#define __NR_getcwd (__NR_SYSCALL_BASE+183) ++#define __NR_capget (__NR_SYSCALL_BASE+184) ++#define __NR_capset (__NR_SYSCALL_BASE+185) ++#define __NR_sigaltstack (__NR_SYSCALL_BASE+186) ++#define __NR_sendfile (__NR_SYSCALL_BASE+187) ++ /* 188 reserved */ ++ /* 189 reserved */ ++#define __NR_vfork (__NR_SYSCALL_BASE+190) ++#define __NR_ugetrlimit (__NR_SYSCALL_BASE+191) /* SuS compliant getrlimit */ ++#define __NR_mmap2 (__NR_SYSCALL_BASE+192) ++#define __NR_truncate64 (__NR_SYSCALL_BASE+193) ++#define __NR_ftruncate64 (__NR_SYSCALL_BASE+194) ++#define __NR_stat64 (__NR_SYSCALL_BASE+195) ++#define __NR_lstat64 (__NR_SYSCALL_BASE+196) ++#define __NR_fstat64 (__NR_SYSCALL_BASE+197) ++#define __NR_lchown32 (__NR_SYSCALL_BASE+198) ++#define __NR_getuid32 (__NR_SYSCALL_BASE+199) ++#define __NR_getgid32 (__NR_SYSCALL_BASE+200) ++#define __NR_geteuid32 (__NR_SYSCALL_BASE+201) ++#define __NR_getegid32 (__NR_SYSCALL_BASE+202) ++#define __NR_setreuid32 (__NR_SYSCALL_BASE+203) ++#define __NR_setregid32 (__NR_SYSCALL_BASE+204) ++#define __NR_getgroups32 (__NR_SYSCALL_BASE+205) ++#define __NR_setgroups32 (__NR_SYSCALL_BASE+206) ++#define __NR_fchown32 (__NR_SYSCALL_BASE+207) ++#define __NR_setresuid32 (__NR_SYSCALL_BASE+208) ++#define __NR_getresuid32 (__NR_SYSCALL_BASE+209) ++#define __NR_setresgid32 (__NR_SYSCALL_BASE+210) ++#define __NR_getresgid32 (__NR_SYSCALL_BASE+211) ++#define __NR_chown32 (__NR_SYSCALL_BASE+212) ++#define __NR_setuid32 (__NR_SYSCALL_BASE+213) ++#define __NR_setgid32 (__NR_SYSCALL_BASE+214) ++#define __NR_setfsuid32 (__NR_SYSCALL_BASE+215) ++#define __NR_setfsgid32 (__NR_SYSCALL_BASE+216) ++#define __NR_getdents64 (__NR_SYSCALL_BASE+217) ++#define __NR_pivot_root (__NR_SYSCALL_BASE+218) ++#define __NR_mincore (__NR_SYSCALL_BASE+219) ++#define __NR_madvise (__NR_SYSCALL_BASE+220) ++#define __NR_fcntl64 (__NR_SYSCALL_BASE+221) ++ /* 222 for tux */ ++ /* 223 is unused */ ++#define __NR_gettid (__NR_SYSCALL_BASE+224) ++#define __NR_readahead (__NR_SYSCALL_BASE+225) ++#define __NR_setxattr (__NR_SYSCALL_BASE+226) ++#define __NR_lsetxattr (__NR_SYSCALL_BASE+227) ++#define __NR_fsetxattr (__NR_SYSCALL_BASE+228) ++#define __NR_getxattr (__NR_SYSCALL_BASE+229) ++#define __NR_lgetxattr (__NR_SYSCALL_BASE+230) ++#define __NR_fgetxattr (__NR_SYSCALL_BASE+231) ++#define __NR_listxattr (__NR_SYSCALL_BASE+232) ++#define __NR_llistxattr (__NR_SYSCALL_BASE+233) ++#define __NR_flistxattr (__NR_SYSCALL_BASE+234) ++#define __NR_removexattr (__NR_SYSCALL_BASE+235) ++#define __NR_lremovexattr (__NR_SYSCALL_BASE+236) ++#define __NR_fremovexattr (__NR_SYSCALL_BASE+237) ++#define __NR_tkill (__NR_SYSCALL_BASE+238) ++#define __NR_sendfile64 (__NR_SYSCALL_BASE+239) ++#define __NR_futex (__NR_SYSCALL_BASE+240) ++#define __NR_sched_setaffinity (__NR_SYSCALL_BASE+241) ++#define __NR_sched_getaffinity (__NR_SYSCALL_BASE+242) ++#define __NR_io_setup (__NR_SYSCALL_BASE+243) ++#define __NR_io_destroy (__NR_SYSCALL_BASE+244) ++#define __NR_io_getevents (__NR_SYSCALL_BASE+245) ++#define __NR_io_submit (__NR_SYSCALL_BASE+246) ++#define __NR_io_cancel (__NR_SYSCALL_BASE+247) ++#define __NR_exit_group (__NR_SYSCALL_BASE+248) ++#define __NR_lookup_dcookie (__NR_SYSCALL_BASE+249) ++#define __NR_epoll_create (__NR_SYSCALL_BASE+250) ++#define __NR_epoll_ctl (__NR_SYSCALL_BASE+251) ++#define __NR_epoll_wait (__NR_SYSCALL_BASE+252) ++#define __NR_remap_file_pages (__NR_SYSCALL_BASE+253) ++ /* 254 for set_thread_area */ ++ /* 255 for get_thread_area */ ++#define __NR_set_tid_address (__NR_SYSCALL_BASE+256) ++#define __NR_timer_create (__NR_SYSCALL_BASE+257) ++#define __NR_timer_settime (__NR_SYSCALL_BASE+258) ++#define __NR_timer_gettime (__NR_SYSCALL_BASE+259) ++#define __NR_timer_getoverrun (__NR_SYSCALL_BASE+260) ++#define __NR_timer_delete (__NR_SYSCALL_BASE+261) ++#define __NR_clock_settime (__NR_SYSCALL_BASE+262) ++#define __NR_clock_gettime (__NR_SYSCALL_BASE+263) ++#define __NR_clock_getres (__NR_SYSCALL_BASE+264) ++#define __NR_clock_nanosleep (__NR_SYSCALL_BASE+265) ++#define __NR_statfs64 (__NR_SYSCALL_BASE+266) ++#define __NR_fstatfs64 (__NR_SYSCALL_BASE+267) ++#define __NR_tgkill (__NR_SYSCALL_BASE+268) ++#define __NR_utimes (__NR_SYSCALL_BASE+269) ++#define __NR_fadvise64_64 (__NR_SYSCALL_BASE+270) ++#define __NR_pciconfig_iobase (__NR_SYSCALL_BASE+271) ++#define __NR_pciconfig_read (__NR_SYSCALL_BASE+272) ++#define __NR_pciconfig_write (__NR_SYSCALL_BASE+273) ++#define __NR_mq_open (__NR_SYSCALL_BASE+274) ++#define __NR_mq_unlink (__NR_SYSCALL_BASE+275) ++#define __NR_mq_timedsend (__NR_SYSCALL_BASE+276) ++#define __NR_mq_timedreceive (__NR_SYSCALL_BASE+277) ++#define __NR_mq_notify (__NR_SYSCALL_BASE+278) ++#define __NR_mq_getsetattr (__NR_SYSCALL_BASE+279) ++#define __NR_waitid (__NR_SYSCALL_BASE+280) ++#define __NR_add_key (__NR_SYSCALL_BASE+281) ++#define __NR_request_key (__NR_SYSCALL_BASE+282) ++#define __NR_keyctl (__NR_SYSCALL_BASE+283) ++#define __NR_ioprio_set (__NR_SYSCALL_BASE+284) ++#define __NR_ioprio_get (__NR_SYSCALL_BASE+285) ++#define __NR_inotify_init (__NR_SYSCALL_BASE+286) ++#define __NR_inotify_add_watch (__NR_SYSCALL_BASE+287) ++#define __NR_inotify_rm_watch (__NR_SYSCALL_BASE+288) ++#define __NR_migrate_pages (__NR_SYSCALL_BASE+289) ++#define __NR_openat (__NR_SYSCALL_BASE+290) ++#define __NR_mkdirat (__NR_SYSCALL_BASE+291) ++#define __NR_mknodat (__NR_SYSCALL_BASE+292) ++#define __NR_fchownat (__NR_SYSCALL_BASE+293) ++#define __NR_futimesat (__NR_SYSCALL_BASE+294) ++#define __NR_fstatat64 (__NR_SYSCALL_BASE+295) ++#define __NR_unlinkat (__NR_SYSCALL_BASE+296) ++#define __NR_renameat (__NR_SYSCALL_BASE+297) ++#define __NR_linkat (__NR_SYSCALL_BASE+298) ++#define __NR_symlinkat (__NR_SYSCALL_BASE+299) ++#define __NR_readlinkat (__NR_SYSCALL_BASE+300) ++#define __NR_fchmodat (__NR_SYSCALL_BASE+301) ++#define __NR_faccessat (__NR_SYSCALL_BASE+302) ++#define __NR_pselect6 (__NR_SYSCALL_BASE+303) ++#define __NR_ppoll (__NR_SYSCALL_BASE+304) ++#define __NR_unshare (__NR_SYSCALL_BASE+305) ++#define __NR_set_robust_list (__NR_SYSCALL_BASE+306) ++#define __NR_get_robust_list (__NR_SYSCALL_BASE+307) ++#define __NR_splice (__NR_SYSCALL_BASE+308) ++#define __NR_sync_file_range2 (__NR_SYSCALL_BASE+309) ++#define __NR_tee (__NR_SYSCALL_BASE+310) ++#define __NR_vmsplice (__NR_SYSCALL_BASE+311) ++#define __NR_move_pages (__NR_SYSCALL_BASE+312) ++#define __NR_fadvise64 (__NR_SYSCALL_BASE+313) ++#define __NR_utimensat (__NR_SYSCALL_BASE+314) ++#define __NR_signalfd (__NR_SYSCALL_BASE+315) ++#define __NR_timerfd_create (__NR_SYSCALL_BASE+316) ++#define __NR_eventfd (__NR_SYSCALL_BASE+317) ++#define __NR_fallocate (__NR_SYSCALL_BASE+318) ++#define __NR_timerfd_settime (__NR_SYSCALL_BASE+319) ++#define __NR_timerfd_gettime (__NR_SYSCALL_BASE+320) ++#define __NR_getcpu (__NR_SYSCALL_BASE+321) ++#define __NR_signalfd4 (__NR_SYSCALL_BASE+322) ++#define __NR_eventfd2 (__NR_SYSCALL_BASE+323) ++#define __NR_epoll_create1 (__NR_SYSCALL_BASE+324) ++#define __NR_dup3 (__NR_SYSCALL_BASE+325) ++#define __NR_pipe2 (__NR_SYSCALL_BASE+326) ++#define __NR_inotify_init1 (__NR_SYSCALL_BASE+327) ++#define __NR_kexec_load (__NR_SYSCALL_BASE+328) ++#define __NR_accept (__NR_SYSCALL_BASE+329) ++#define __NR_bind (__NR_SYSCALL_BASE+330) ++#define __NR_connect (__NR_SYSCALL_BASE+331) ++#define __NR_getpeername (__NR_SYSCALL_BASE+332) ++#define __NR_getsockname (__NR_SYSCALL_BASE+333) ++#define __NR_getsockopt (__NR_SYSCALL_BASE+334) ++#define __NR_listen (__NR_SYSCALL_BASE+335) ++#define __NR_recv (__NR_SYSCALL_BASE+336) ++#define __NR_recvfrom (__NR_SYSCALL_BASE+337) ++#define __NR_recvmsg (__NR_SYSCALL_BASE+338) ++#define __NR_send (__NR_SYSCALL_BASE+339) ++#define __NR_sendmsg (__NR_SYSCALL_BASE+340) ++#define __NR_sendto (__NR_SYSCALL_BASE+341) ++#define __NR_setsockopt (__NR_SYSCALL_BASE+342) ++#define __NR_shutdown (__NR_SYSCALL_BASE+343) ++#define __NR_socket (__NR_SYSCALL_BASE+344) ++#define __NR_socketpair (__NR_SYSCALL_BASE+345) ++#define __NR_prlimit64 (__NR_SYSCALL_BASE+346) ++#define __NR_accept4 (__NR_SYSCALL_BASE+347) ++#define __NR_recvmmsg (__NR_SYSCALL_BASE+348) ++#define __NR_sendmmsg (__NR_SYSCALL_BASE+349) ++#define __NR_fanotify_init (__NR_SYSCALL_BASE+350) ++#define __NR_fanotify_mark (__NR_SYSCALL_BASE+351) ++#define __NR_msgget (__NR_SYSCALL_BASE+352) ++#define __NR_msgctl (__NR_SYSCALL_BASE+353) ++#define __NR_msgrcv (__NR_SYSCALL_BASE+354) ++#define __NR_msgsnd (__NR_SYSCALL_BASE+355) ++#define __NR_semget (__NR_SYSCALL_BASE+356) ++#define __NR_semctl (__NR_SYSCALL_BASE+357) ++#define __NR_semtimedop (__NR_SYSCALL_BASE+358) ++#define __NR_semop (__NR_SYSCALL_BASE+359) ++#define __NR_shmget (__NR_SYSCALL_BASE+360) ++#define __NR_shmctl (__NR_SYSCALL_BASE+361) ++#define __NR_shmat (__NR_SYSCALL_BASE+362) ++#define __NR_shmdt (__NR_SYSCALL_BASE+363) ++#define __NR_syncfs (__NR_SYSCALL_BASE+364) ++#define __NR_setns (__NR_SYSCALL_BASE+365) ++#define __NR_name_to_handle_at (__NR_SYSCALL_BASE+366) ++#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+367) ++#define __NR_process_vm_readv (__NR_SYSCALL_BASE+368) ++#define __NR_process_vm_writev (__NR_SYSCALL_BASE+369) ++#define __NR_clock_adjtime (__NR_SYSCALL_BASE+370) ++#define __NR_get_mempolicy (__NR_SYSCALL_BASE+371) ++#define __NR_mbind (__NR_SYSCALL_BASE+372) ++#define __NR_perf_event_open (__NR_SYSCALL_BASE+373) ++#define __NR_preadv (__NR_SYSCALL_BASE+374) ++#define __NR_pwritev (__NR_SYSCALL_BASE+375) ++#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+376) ++#define __NR_set_mempolicy (__NR_SYSCALL_BASE+377) ++#define __NR_epoll_pwait (__NR_SYSCALL_BASE+378) ++ ++ ++ ++ ++#define __NR_lmmap (__NR_NDS32_BASE+ 1) ++#define __NR_lmunmap (__NR_NDS32_BASE+ 2) ++#define __NR_lmdma (__NR_NDS32_BASE+ 3) ++#define __NR_pfmctl (__NR_NDS32_BASE+ 4) ++#define __NR_getpfm (__NR_NDS32_BASE+ 5) ++#define __NR_setpfm (__NR_NDS32_BASE+ 6) ++#define __NR_wbna (__NR_NDS32_BASE+ 7) ++ ++ ++ ++#ifdef __KERNEL__ ++ ++#define __ARCH_WANT_IPC_PARSE_VERSION ++#define __ARCH_WANT_OLD_READDIR ++#define __ARCH_WANT_STAT64 ++#define __ARCH_WANT_SYS_ALARM ++#define __ARCH_WANT_SYS_GETHOSTNAME ++#define __ARCH_WANT_SYS_PAUSE ++#define __ARCH_WANT_SYS_TIME ++#define __ARCH_WANT_SYS_UTIME ++#define __ARCH_WANT_SYS_SOCKETCALL ++#define __ARCH_WANT_SYS_FADVISE64 ++#define __ARCH_WANT_SYS_GETPGRP ++#define __ARCH_WANT_SYS_LLSEEK ++#define __ARCH_WANT_SYS_NICE ++#define __ARCH_WANT_SYS_OLD_GETRLIMIT ++#define __ARCH_WANT_SYS_OLD_MMAP ++#define __ARCH_WANT_SYS_OLDUMOUNT ++#define __ARCH_WANT_SYS_SIGPENDING ++#define __ARCH_WANT_SYS_SIGPROCMASK ++#define __ARCH_WANT_SYS_RT_SIGACTION ++ ++/* ++ * "Conditional" syscalls ++ * ++ * What we want is __attribute__((weak,alias("sys_ni_syscall"))), ++ * but it doesn't work on all toolchains, so we just do it by hand ++ */ ++#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall"); ++ ++#endif ++#endif /* __ASM_NDS32_UNISTD_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/user.h linux-3.4.110/arch/nds32/include/asm/user.h +--- linux-3.4.110.orig/arch/nds32/include/asm/user.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/user.h 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,89 @@ ++/* ++ * linux/arch/nds32/include/asm/user.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef _NDS32_USER_H ++#define _NDS32_USER_H ++ ++#include ++#include ++/* Core file format: The core file is written in such a way that gdb ++ can understand it and provide useful information to the user (under ++ linux we use the 'trad-core' bfd). There are quite a number of ++ obstacles to being able to view the contents of the floating point ++ registers, and until these are solved you will not be able to view the ++ contents of them. Actually, you can read in the core file and look at ++ the contents of the user struct to find out what the floating point ++ registers contain. ++ The actual file contents are as follows: ++ UPAGE: 1 page consisting of a user struct that tells gdb what is present ++ in the file. Directly after this is a copy of the task_struct, which ++ is currently not used by gdb, but it may come in useful at some point. ++ All of the registers are stored as part of the upage. The upage should ++ always be only one page. ++ DATA: The data area is stored. We use current->end_text to ++ current->brk to pick up all of the user variables, plus any memory ++ that may have been malloced. No attempt is made to determine if a page ++ is demand-zero or if a page is totally unused, we just cover the entire ++ range. All of the addresses are rounded in such a way that an integral ++ number of pages is written. ++ STACK: We need the stack information in order to get a meaningful ++ backtrace. We need to write the data from (esp) to ++ current->start_stack, so we round each of these off in order to be able ++ to write an integer number of pages. ++ The minimum core file size is 3 pages, or 12288 bytes. ++*/ ++ ++struct user_fp { ++ struct fp_reg { ++ unsigned int sign1:1; ++ unsigned int unused:15; ++ unsigned int sign2:1; ++ unsigned int exponent:14; ++ unsigned int j:1; ++ unsigned int mantissa1:31; ++ unsigned int mantissa0:32; ++ } fpregs[8]; ++ unsigned int fpsr:32; ++ unsigned int fpcr:32; ++ unsigned char ftype[8]; ++ unsigned int init_flag; ++}; ++ ++/* When the kernel dumps core, it starts by dumping the user struct - ++ this will be used by gdb to figure out where the data and stack segments ++ are within the file, and what virtual addresses to use. */ ++struct user{ ++/* We start with the registers, to mimic the way that "memory" is returned ++ from the ptrace(3,...) function. */ ++ struct pt_regs regs; /* Where the registers are actually stored */ ++/* ptrace does not yet supply these. Someday.... */ ++ int u_fpvalid; /* True if math co-processor being used. */ ++ /* for this mess. Not yet used. */ ++/* The rest of this junk is to help gdb figure out what goes where */ ++ unsigned long int u_tsize; /* Text segment size (pages). */ ++ unsigned long int u_dsize; /* Data segment size (pages). */ ++ unsigned long int u_ssize; /* Stack segment size (pages). */ ++ unsigned long start_code; /* Starting virtual address of text. */ ++ unsigned long start_stack; /* Starting virtual address of stack area. ++ This is actually the bottom of the stack, ++ the top of the stack is always found in the ++ esp register. */ ++ long int signal; /* Signal that caused the core dump. */ ++ int reserved; /* No longer used */ ++ struct pt_regs * u_ar0; /* Used by gdb to help find the values for */ ++ /* the registers. */ ++ unsigned long magic; /* To uniquely identify a core file */ ++ char u_comm[32]; /* User command that was responsible */ ++ int u_debugreg[8]; ++ struct user_fp u_fp; /* FP state */ ++ struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */ ++ /* the FP registers. */ ++}; ++#define NBPG PAGE_SIZE ++#define UPAGES 1 ++#define HOST_TEXT_START_ADDR (u.start_code) ++#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) ++ ++#endif /* _NDS32_USER_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/vga.h linux-3.4.110/arch/nds32/include/asm/vga.h +--- linux-3.4.110.orig/arch/nds32/include/asm/vga.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/vga.h 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,17 @@ ++/* ++ * linux/arch/nds32/include/asm/vga.h ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#ifndef ASMNDS32_VGA_H ++#define ASMNDS32_VGA_H ++ ++#include ++#include ++ ++#define VGA_MAP_MEM(x) (PCIMEM_BASE + (x)) ++ ++#define vga_readb(x) (*((volatile unsigned char *)x)) ++#define vga_writeb(x,y) (*((volatile unsigned char *)y) = (x)) ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/vmalloc.h linux-3.4.110/arch/nds32/include/asm/vmalloc.h +--- linux-3.4.110.orig/arch/nds32/include/asm/vmalloc.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/vmalloc.h 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,31 @@ ++/* ++ * linux/arch/nds32/include/asm/vmalloc.h ++ * ++ * Faraday Platform Independent Virtual Memory Configuration ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * ChangeLog ++ * ++ * Luke Lee 09/16/2005 Copy from Faraday CPE codes. ++ */ ++ ++#ifndef __FARADAY_PLATFORM_INDEPENDENT_VMALLOC_HEADER__ ++#define __FARADAY_PLATFORM_INDEPENDENT_VMALLOC_HEADER__ ++ ++#endif /* __FARADAY_PLATFORM_INDEPENDENT_VMALLOC_HEADER__ */ +diff -Nur linux-3.4.110.orig/arch/nds32/include/asm/xor.h linux-3.4.110/arch/nds32/include/asm/xor.h +--- linux-3.4.110.orig/arch/nds32/include/asm/xor.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/include/asm/xor.h 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,157 @@ ++/* ++ * linux/arch/nds32/include/asm/xor.h ++ * ++ * Copyright (C) 2001 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++ ++#define __XOR(a1, a2) a1 ^= a2 ++ ++ ++#define GET_BLOCK_2(dst) \ ++ __asm__( \ ++ " lmw.bim %1, [%0], %2 \n"\ ++ : "=r" (dst), "=r" (a1), "=r" (a2) \ ++ : "0" (dst)) ++ ++#define GET_BLOCK_4(dst) \ ++ __asm__( \ ++ " lmw.bim %1, [%0], %1 \n" \ //ldmia %0, {%1, %2, %3, %4} ++ " lmw.bim %2, [%0], %2 \n" \ ++ " lmw.bim %3, [%0], %3 \n" \ ++ " lmw.bim %4, [%0], %4 \n" \ ++ : "=r" (dst), "=r" (a1), "=r" (a2), "=r" (a3), "=r" (a4) \ ++ : "0" (dst)) ++ ++#define XOR_BLOCK_2(src) \ ++ __asm__(\ ++ " lmw.bim %1, [%0], %2 \n " \ ++ " lmw.bim %2, [%0], %2 \n" \ ++ : "=r" (src), "=r" (b1), "=r" (b2) \ ++ : "0" (src)); \ ++ __XOR(a1, b1); __XOR(a2, b2); ++ ++ ++#define XOR_BLOCK_4(src) \ ++ __asm__(\ ++ "lmw.bim %1, [%0], %1 \n " \ // ldmia %0!, {%1, %2, %3, %4} ++ "lmw.bim %2, [%0], %2 \n " \ ++ "lmw.bim %3, [%0], %3 \n " \ ++ "lmw.bim %4, [%0], %4 \n " \ ++ : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ ++ : "0" (src)); \ ++ __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) ++ ++#define PUT_BLOCK_2(dst) \ ++ __asm__ __volatile__( \ ++ " smw.bim %2, [%0], %3" \ ++ : "=r" (dst) \ ++ : "0" (dst), "r" (a1), "r" (a2)) ++ ++#define PUT_BLOCK_4(dst) \ ++ __asm__ __volatile__( \ ++ " smw.bim %2, [%0], %5 \n" \ ++ : "=r" (dst) \ ++ : "0" (dst), "r" (a1), "r" (a2), "r" (a3), "r" (a4)) ++ ++static void ++xor_nds32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) ++{ ++ unsigned int lines = bytes / sizeof(unsigned long) / 4; ++ register unsigned int a1 __asm__("r4"); ++ register unsigned int a2 __asm__("r5"); ++ register unsigned int a3 __asm__("r6"); ++ register unsigned int a4 __asm__("r7"); ++ register unsigned int b1 __asm__("r8"); ++ register unsigned int b2 __asm__("r9"); ++ register unsigned int b3 __asm__("p0"); ++ register unsigned int b4 __asm__("ra"); ++ ++ do { ++ GET_BLOCK_4(p1); ++ XOR_BLOCK_4(p2); ++ PUT_BLOCK_4(p1); ++ } while (--lines); ++} ++ ++static void ++xor_nds32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, ++ unsigned long *p3) ++{ ++ unsigned int lines = bytes / sizeof(unsigned long) / 4; ++ register unsigned int a1 __asm__("r4"); ++ register unsigned int a2 __asm__("r5"); ++ register unsigned int a3 __asm__("r6"); ++ register unsigned int a4 __asm__("r7"); ++ register unsigned int b1 __asm__("r8"); ++ register unsigned int b2 __asm__("r9"); ++ register unsigned int b3 __asm__("p0"); ++ register unsigned int b4 __asm__("ra"); ++ ++ do { ++ GET_BLOCK_4(p1); ++ XOR_BLOCK_4(p2); ++ XOR_BLOCK_4(p3); ++ PUT_BLOCK_4(p1); ++ } while (--lines); ++} ++ ++static void ++xor_nds32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, ++ unsigned long *p3, unsigned long *p4) ++{ ++ unsigned int lines = bytes / sizeof(unsigned long) / 2; ++ register unsigned int a1 __asm__("r8"); ++ register unsigned int a2 __asm__("r9"); ++ register unsigned int b1 __asm__("p0"); ++ register unsigned int b2 __asm__("ra"); ++ ++ do { ++ GET_BLOCK_2(p1); ++ XOR_BLOCK_2(p2); ++ XOR_BLOCK_2(p3); ++ XOR_BLOCK_2(p4); ++ PUT_BLOCK_2(p1); ++ } while (--lines); ++} ++ ++static void ++xor_nds32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, ++ unsigned long *p3, unsigned long *p4, unsigned long *p5) ++{ ++ unsigned int lines = bytes / sizeof(unsigned long) / 2; ++ register unsigned int a1 __asm__("r8"); ++ register unsigned int a2 __asm__("r9"); ++ register unsigned int b1 __asm__("p0"); ++ register unsigned int b2 __asm__("ra"); ++ ++ do { ++ GET_BLOCK_2(p1); ++ XOR_BLOCK_2(p2); ++ XOR_BLOCK_2(p3); ++ XOR_BLOCK_2(p4); ++ XOR_BLOCK_2(p5); ++ PUT_BLOCK_2(p1); ++ } while (--lines); ++} ++ ++static struct xor_block_template xor_block_nds32regs = { ++ .name = "nds32regs", ++ .do_2 = xor_nds32regs_2, ++ .do_3 = xor_nds32regs_3, ++ .do_4 = xor_nds32regs_4, ++ .do_5 = xor_nds32regs_5, ++}; ++ ++#undef XOR_TRY_TEMPLATES ++#define XOR_TRY_TEMPLATES \ ++ do { \ ++ xor_speed(&xor_block_nds32regs); \ ++ xor_speed(&xor_block_8regs); \ ++ xor_speed(&xor_block_32regs); \ ++ } while (0) +diff -Nur linux-3.4.110.orig/arch/nds32/Kconfig linux-3.4.110/arch/nds32/Kconfig +--- linux-3.4.110.orig/arch/nds32/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/Kconfig 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,249 @@ ++# ++# For a description of the syntax of this configuration file, ++# see Documentation/kbuild/kconfig-language.txt. ++# ++ ++config NDS32 ++ bool ++ default y ++ select RTC_LIB ++ select HAVE_OPROFILE ++ select HAVE_ARCH_KGDB ++ select HAVE_KPROBES ++ select HAVE_KRETPROBES ++ select HAVE_FUNCTION_TRACER ++ select HAVE_FUNCTION_GRAPH_TRACER ++ select HAVE_FUNCTION_TRACE_MCOUNT_TEST ++ select SYS_SUPPORTS_APM_EMULATION ++ select HAVE_IDE ++ select HAVE_MEMBLOCK ++ select HAVE_MEMBLOCK_NODE_MAP ++ ++config GENERIC_GPIO ++ bool ++ default n ++ ++config GENERIC_TIME ++ bool ++ default y ++ ++config GENERIC_CLOCKEVENTS ++ bool ++ default y ++ ++config NO_IOPORT ++ bool ++ default y ++ ++config GENERIC_IOMAP ++ def_bool y ++ ++config GENERIC_LOCKBREAK ++ bool ++ default y ++ depends on SMP && PREEMPT ++ ++config RWSEM_GENERIC_SPINLOCK ++ bool ++ default y ++ ++config RWSEM_XCHGADD_ALGORITHM ++ bool ++ ++config GENERIC_HWEIGHT ++ bool ++ default y ++ ++config GENERIC_FIND_NEXT_BIT ++ bool ++ default y ++ ++config GENERIC_CALIBRATE_DELAY ++ bool ++ default y ++ ++config GENERIC_BUST_SPINLOCK ++ bool ++ ++config GENERIC_HARDIRQS ++ bool ++ default y ++ ++config GENERIC_HARDIRQS_NO__DO_IRQ ++ def_bool y ++ ++config LOCKDEP_SUPPORT ++ bool ++ default y ++ ++config STACKTRACE_SUPPORT ++ bool ++ default y ++ ++config HAVE_LATENCYTOP_SUPPORT ++ def_bool y ++ ++source "init/Kconfig" ++source "kernel/Kconfig.freezer" ++ ++menu "System Type" ++source "arch/nds32/platforms/Kconfig" ++source "arch/nds32/Kconfig.cpu" ++ ++config VECTORS_BASE ++ hex ++ default 0xfeff0000 if MMU || CPU_HIGH_VECTOR ++ default DRAM_BASE if REMAP_VECTORS_TO_RAM ++ default 0x00000000 ++ help ++ The base address of exception vectors. ++ ++config MMU ++ bool ++ default y ++ ++endmenu ++ ++menu "Kernel Features" ++source "kernel/time/Kconfig" ++ ++config SMP ++ bool "Symmetric Multi-Processing" ++ depends on PLATFORM_AMIC ++ select USE_GENERIC_SMP_HELPERS ++ help ++ This enables support for systems with more than one CPU. If you have ++ a system with only one CPU, like most personal computers, say N. If ++ you have a system with more than one CPU, say Y. ++ ++ If you say N here, the kernel will run on single and multiprocessor ++ machines, but will use only one CPU of a multiprocessor machine. If ++ you say Y here, the kernel will run on many, but not all, single ++ processor machines. On a single processor machine, the kernel will ++ run faster if you say N here. ++ ++ See also the , ++ , , ++ and the SMP-HOWTO available at ++ . ++ ++ If you don't know what to do here, say N. ++ ++config NR_CPUS ++ int "Maximum number of CPUs (2-32)" ++ depends on SMP ++ default "4" ++ ++source "kernel/Kconfig.preempt" ++source "mm/Kconfig" ++ ++config FORCE_MAX_ZONEORDER ++ int "MAX_ORDER for the Page Allocator" ++ default "11" ++ ++source "kernel/Kconfig.hz" ++ ++config CMDLINE ++ string "Default kernel command string" ++ default "mem=64M@0x0 initrd=0x800000,8M root=/dev/ram0 rw console=ttyS0,38400n8 rootfstype=ext2 init=/sbin/init -s" ++ ++endmenu ++ ++menu "Power management options" ++ ++config SYS_SUPPORTS_APM_EMULATION ++ bool ++ ++config ARCH_SUSPEND_POSSIBLE ++ def_bool y ++ ++source "kernel/power/Kconfig" ++ ++if PLAT_AG101 || PLAT_AG102 ++source "drivers/cpufreq/Kconfig" ++ ++if PLAT_AG101 ++choice ++ prompt "Default CPUFreq Implementation" ++ depends on CPU_FREQ ++ default AG101_CPU_FREQ_SCALING_MODE ++ help ++ This option sets which CPUFreq governor shall be loaded at ++ startup. If in doubt, select 'performance'. ++config AG101_CPU_FREQ_SCALING_MODE ++ bool "AG101 Frequency Scaling Mode" ++ help ++ Rescale CPU frequency and Bus clock without changing PLL. ++ ++config AG101_CPU_FREQ_FCS ++ bool "AG101 Frequency Change Sequence (FCS)" ++ help ++ The Frequency Change Sequence (FCS) is used to change the system ++ clock frequency. While in the FCS, the system clocks stop. This ++ mode is intended for setting a different frequency to overwrite ++ the default value at initial boot-up. This can be used as a powr- ++ saving feature that allows the AG101 to run at the minimum required ++ frequency ++endchoice ++endif ++ ++if PLAT_AG102 ++choice ++ prompt "Default CPUFreq Implementation" ++ depends on CPU_FREQ ++ default AG102_CPU_FREQ_SCALING_MODE ++ help ++ This option sets which CPUFreq governor shall be loaded at ++ startup. If in doubt, select 'performance'. ++config AG102_CPU_FREQ_SCALING_MODE ++ bool "AG102 Frequency Scaling Mode" ++ help ++ Rescale CPU frequency and Bus clock without changing PLL. ++ ++config AG102_CPU_FREQ_FCS ++ bool "AG102 Frequency Change Sequence (FCS)" ++ help ++ The Frequency Change Sequence (FCS) is used to change the system ++ clock frequency. While in the FCS, the system clocks stop. This ++ mode is intended for setting a different frequency to overwrite ++ the default value at initial boot-up. This can be used as a powr- ++ saving feature that allows the AG101 to run at the minimum required ++ frequency ++endchoice ++endif ++ ++ ++endif ++ ++endmenu ++ ++menu "Bus options" ++ ++config PCI ++ bool "PCI support" ++ help ++ Find out whether you have a PCI motherboard. PCI is the name of a ++ bus system, i.e. the way the CPU talks to the other stuff inside ++ your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or ++ VESA. If you have PCI, say Y, otherwise N. ++ ++ The PCI-HOWTO, available from ++ , contains valuable ++ information about which PCI hardware does work under Linux and which ++ doesn't. ++ ++source "drivers/pci/Kconfig" ++ ++endmenu ++ ++menu "Executable file formats" ++source "fs/Kconfig.binfmt" ++endmenu ++ ++source "net/Kconfig" ++source "drivers/Kconfig" ++source "fs/Kconfig" ++source "arch/nds32/Kconfig.debug" ++source "security/Kconfig" ++source "crypto/Kconfig" ++source "lib/Kconfig" +diff -Nur linux-3.4.110.orig/arch/nds32/Kconfig.cpu linux-3.4.110/arch/nds32/Kconfig.cpu +--- linux-3.4.110.orig/arch/nds32/Kconfig.cpu 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/Kconfig.cpu 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,148 @@ ++comment "Processor Features" ++config CPU_N1213 ++ bool ++ ++config CPU_N1213_43U1HA0 ++ bool ++ ++config CPU_N1233F ++ bool ++ ++config CPU_CUSTOM ++ bool ++ ++config FPU ++ bool "fpu support" ++ ++config UNLAZY_FPU ++ bool "Unlazy FPU support" ++ depends on FPU ++ help ++ Say Y here to enable unlazy FPU and disable lazy FPU. ++ ++ ++config AUDIO ++ bool "audio support" if CPU_CUSTOM ++ ++config HWZOL ++ bool "hardware zero overhead loop support" ++ default y ++ ++config UNLAZY_AUDIO ++ bool "Unlazy audio support" ++ depends on AUDIO ++ help ++ Say Y here to enable unlazy audio and disable lazy audio. ++choice ++ prompt "Vector Interrupt Controller mode" ++ default IVIC_INTC ++config IVIC_INTC ++ bool "IVIC mode with Interrupt Controller" ++config IVIC ++ bool "IVIC mode" ++config EVIC ++ bool "EVIC mode" ++endchoice ++ ++config CPU_NO_CONTEXT_ID ++ def_bool CPU_N1213_43U1HA0 || SMP ++ ++config CPU_CACHE_NONALIASING ++ bool "Non-aliasing cache" ++ ++choice ++ prompt "Paging -- page size " ++ default ANDES_PAGE_SIZE_4KB ++config ANDES_PAGE_SIZE_4KB ++ bool "use 4KB page size" ++config ANDES_PAGE_SIZE_8KB ++ bool "use 8KB page size" ++endchoice ++ ++config NO_KERNEL_LARGE_PAGE ++ def_bool CPU_N1213_43U1HA0 || SMP ++ ++config CPU_ICACHE_DISABLE ++ bool "Disable I-Cache" ++ help ++ Say Y here to disable the processor instruction cache. Unless ++ you have a reason not to or are unsure, say N. ++ ++config CPU_DCACHE_DISABLE ++ bool "Disable D-Cache" ++ help ++ Say Y here to disable the processor data cache. Unless ++ you have a reason not to or are unsure, say N. ++ ++config CPU_DCACHE_WRITETHROUGH ++ bool "Force write through D-cache" ++ depends on !CPU_DCACHE_DISABLE ++ help ++ Say Y here to use the data cache in writethrough mode. Unless you ++ specifically require this or are unsure, say N. ++ ++config KEXEC ++ bool "Kexec system call (EXPERIMENTAL)" ++ depends on EXPERIMENTAL ++ help ++ kexec is a system call that implements the ability to shutdown your ++ current kernel, and to start another kernel. It is like a reboot ++ but it is independent of the system firmware. And like a reboot ++ you can start any kernel with it, not just Linux. ++ ++ It is an ongoing process to be certain the hardware in a machine ++ is properly shutdown, so do not be surprised if this code does not ++ initially work for you. It may help to enable device hotplugging ++ support. ++ ++config ABI1 ++ bool "Allow ABI 1 binaries to run with this kernel (EXPERIMENTAL)" ++ depends on EXPERIMENTAL ++ default n ++ help ++ This option preserves the old syscall interface of ABI 1. If ++ you know you'll be using ABI 2 or 2fp then you can say N here. ++ If this option is not selected and you attempt to execute a ++ legacy ABI binary then the result will be UNPREDICTABLE ++ (in fact it can be predicted that it won't work at all). If ++ in doubt say Y. ++ ++config WBNA ++ bool "WBNA" ++ default n ++ help ++ Say Y here to enable write-back memory with no-write-allocation policy. ++ ++config HSS ++ bool "Using Hardware Single-Step (HSS) instead of software breakpoint" ++ default y ++ ++config ALIGNMENT_TRAP ++ tristate "Kernel support unaligned access handling" ++ depends on EXPERIMENTAL ++ default y ++ help ++ Andes processors cannot fetch/store information which is not ++ naturally aligned on the bus, i.e., a 4 byte fetch must start at an ++ address divisible by 4. On 32-bit Andes processors, these non-aligned ++ fetch/store instructions will be emulated in software if you say ++ here, which has a severe performance impact. This is necessary for ++ correct operation of some network protocols. With an IP-only ++ configuration it is safe to say N, otherwise say Y. ++ ++config HIGHMEM ++ bool "High Memory Support" ++ depends on MMU ++ help ++ The address space of ARM processors is only 4 Gigabytes large ++ and it has to accommodate user address space, kernel address ++ space as well as some memory mapped IO. That means that, if you ++ have a large amount of physical memory and/or IO, not all of the ++ memory can be "permanently mapped" by the kernel. The physical ++ memory that is not permanently mapped is called "high memory". ++ ++ Depending on the selected kernel/user memory split, minimum ++ vmalloc space and actual amount of RAM, you may not need this ++ option which should result in a slightly faster kernel. ++ ++ If unsure, say n. +diff -Nur linux-3.4.110.orig/arch/nds32/Kconfig.debug linux-3.4.110/arch/nds32/Kconfig.debug +--- linux-3.4.110.orig/arch/nds32/Kconfig.debug 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/Kconfig.debug 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,82 @@ ++menu "Kernel hacking" ++ ++config TRACE_IRQFLAGS_SUPPORT ++ bool ++ default y ++ ++source "lib/Kconfig.debug" ++ ++config FRAME_POINTER ++ bool ++ default y ++ help ++ If you say N here, the resulting kernel will be slightly smaller and ++ faster. However, when a problem occurs with the kernel, the ++ information that is reported is severely limited. Most people ++ should say Y here. ++ ++config DEBUG_USER ++ bool "Verbose user fault messages" ++ help ++ When a user program crashes due to an exception, the kernel can ++ print a brief message explaining what the problem was. This is ++ sometimes helpful for debugging but serves no purpose on a ++ production system. Most people should say N here. ++ ++ In addition, you need to pass user_debug=N on the kernel command ++ line to enable this feature. N consists of the sum of: ++ ++ 1 - undefined instruction events ++ 2 - system calls ++ 4 - invalid data aborts ++ 8 - SIGSEGV faults ++ 16 - SIGBUS faults ++ ++config DEBUG_ERRORS ++ bool "Verbose kernel error messages" ++ depends on DEBUG_KERNEL ++ help ++ This option controls verbose debugging information which can be ++ printed when the kernel detects an internal error. This debugging ++ information is useful to kernel hackers when tracking down problems, ++ but mostly meaningless to other people. It's safe to say Y unless ++ you are concerned with the code size or don't want to see these ++ messages. ++ ++config DEBUG_LL ++ bool "Kernel low-level debugging functions" ++ depends on DEBUG_KERNEL ++ help ++ Say Y here to include definitions of printascii, printchar, printhex ++ in the kernel. This is helpful if you are debugging code that ++ executes before the console is initialized. ++ ++config CCTL ++ tristate "User space cache control support (EXPERIMENTAL)" ++ depends on EXPERIMENTAL ++ help ++ export cache control to user space via /proc ++ ++ If unsure, say N. ++ ++config ELFCHK_DEFAULT_ENABLE ++ bool "Enable ELF-Core Checking by default" ++ default n ++ select PROC_FS ++ help ++ ELF-Core Checking is a mechanism which prevents ELF binary from ++ being loaded if it requires any feature that the underlying platform ++ doesn't support. ++ ++ If you say Y here, the resulting kernel enables ELF-Core Checking ++ mechanism by default. ++config EARLY_PRINTK ++ bool "Early printk support" ++ default y ++ help ++ Say Y here if you want to have an early console using the ++ earlyprintk=[,][,] kernel parameter. It ++ is assumed that the early console device has been initialised ++ by the boot loader prior to starting the Linux kernel. ++ ++endmenu +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/asm-offsets.c linux-3.4.110/arch/nds32/kernel/asm-offsets.c +--- linux-3.4.110.orig/arch/nds32/kernel/asm-offsets.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/asm-offsets.c 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,72 @@ ++/* ++ * Copyright (C) 1995-2003 Russell King ++ * 2001-2002 Keith Owens ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * Generate definitions needed by assembly language modules. ++ * This code generates raw asm output which is post-processed to extract ++ * and format the required data. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * Make sure that the compiler and target are compatible. ++ */ ++ ++#if __GNUC__ < 3 || \ ++ (__GNUC__ == 3 && __GNUC_MINOR__ < 4) || \ ++ (__GNUC__ == 3 && __GNUC_MINOR__ == 4 && __GNUC_PATCHLEVEL__ != 0 && \ ++ __GNUC_PATCHLEVEL__ < 4) || \ ++ (__GNUC__ == 4 && __GNUC_MINOR__ < 2) ++#error Your compiler is too buggy; it is known to miscompile kernels. ++#error Known good compilers: 3.4.4, 4.2 ++#endif ++ ++/* Use marker if you need to separate the values later */ ++ ++#define DEFINE(sym, val) \ ++ asm volatile("\n->" #sym " %0 " #val : : "i" (val)) ++ ++#define BLANK() asm volatile("\n->" : : ) ++ ++int main(void) ++{ ++ DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); ++ BLANK(); ++ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); ++ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); ++ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); ++ DEFINE(TI_TASK, offsetof(struct thread_info, task)); ++ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); ++ DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); ++// DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); ++ DEFINE(TI_SP_SAVE, offsetof(struct thread_info, sp_save)); ++ BLANK(); ++ DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); ++ BLANK(); ++ DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); ++ BLANK(); ++ DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); ++ DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags)); ++ BLANK(); ++ DEFINE(VM_EXEC, VM_EXEC); ++ BLANK(); ++ DEFINE(VIRT_OFFSET, PAGE_OFFSET); ++ BLANK(); ++ DEFINE(SIZEOF_MACHINE_DESC, sizeof(struct machine_desc)); ++ DEFINE(MACHINFO_TYPE, offsetof(struct machine_desc, nr)); ++ DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name)); ++ BLANK(); ++ DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list)); ++ ++ return 0; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/audio.c linux-3.4.110/arch/nds32/kernel/audio.c +--- linux-3.4.110.orig/arch/nds32/kernel/audio.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/audio.c 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,218 @@ ++/* ++ * arch/nds32/kernel/audio.c ++ * ++ * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli ++ * Copyright (C) 2002 STMicroelectronics Limited ++ * Author : Stuart Menefy ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * Started from SH4 version: ++ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka ++ * ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "audio.h" ++/* ++ * Initially load the audio with signalling NANS. This bit pattern ++ * has the property that no matter whether considered as single or as ++ * double precision, it still represents a signalling NAN. ++ */ ++ ++static struct audio_struct init_audioregs = { ++ .auregs = {[0...31] = NAN32} ++}; ++ ++void save_audio(struct task_struct *tsk) ++{ ++ unsigned int tmp; ++ enable_audio(); ++ asm volatile ("mfsr %0, $MSC_CFG\n\t" ++ "andi %0, %0, %2\n\t" ++ "srli %0, %0, %3\n\t" ++ "slti %0, %0, 2\n\t" ++ "bnez %0, 99f\n\t" ++ "amfar %0, $D0.L24\n\t" ++ "swi %0, [%1+0x0]\n\t" ++ "amfar %0, $D1.L24\n\t" ++ "swi %0, [%1+0x4]\n\t" ++ "99:\n\t" ++ "amfar %0, $I0\n\t" ++ "swi %0, [%1+0x8]\n\t" ++ "amfar %0, $I1\n\t" ++ "swi %0, [%1+0xc]\n\t" ++ "amfar %0, $I2\n\t" ++ "swi %0, [%1+0x10]\n\t" ++ "amfar %0, $I3\n\t" ++ "swi %0, [%1+0x14]\n\t" ++ "amfar %0, $I4\n\t" ++ "swi %0, [%1+0x18]\n\t" ++ "amfar %0, $I5\n\t" ++ "swi %0, [%1+0x1c]\n\t" ++ "amfar %0, $I6\n\t" ++ "swi %0, [%1+0x20]\n\t" ++ "amfar %0, $I7\n\t" ++ "swi %0, [%1+0x24]\n\t" ++ "amfar %0, $M1\n\t" ++ "swi %0, [%1+0x28]\n\t" ++ "amfar %0, $M2\n\t" ++ "swi %0, [%1+0x2c]\n\t" ++ "amfar %0, $M3\n\t" ++ "swi %0, [%1+0x30]\n\t" ++ "amfar %0, $M5\n\t" ++ "swi %0, [%1+0x34]\n\t" ++ "amfar %0, $M6\n\t" ++ "swi %0, [%1+0x38]\n\t" ++ "amfar %0, $M7\n\t" ++ "swi %0, [%1+0x3c]\n\t" ++ "amfar %0, $MOD\n\t" ++ "swi %0, [%1+0x40]\n\t" ++ "amfar %0, $LB\n\t" ++ "swi %0, [%1+0x44]\n\t" ++ "amfar %0, $LE\n\t" ++ "swi %0, [%1+0x48]\n\t" ++ "amfar %0, $LC\n\t" ++ "swi %0, [%1+0x4c]\n\t" ++ "amfar %0, $ADM_VBASE\n\t" ++ "swi %0, [%1+0x50]\n\t" ++ "amfar %0, $SHFT_CTL0\n\t" ++ "swi %0, [%1+0x54]\n\t" ++ "amfar %0, $SHFT_CTL1\n\t" ++ "swi %0, [%1+0x58]\n\t" ++ "amfar2 %0, $CB_CTL\n\t" ++ "swi %0, [%1+0x5c]\n\t" ++ "amfar2 %0, $CBB0\n\t" ++ "swi %0, [%1+0x60]\n\t" ++ "amfar2 %0, $CBB1\n\t" ++ "swi %0, [%1+0x64]\n\t" ++ "amfar2 %0, $CBB2\n\t" ++ "swi %0, [%1+0x68]\n\t" ++ "amfar2 %0, $CBB3\n\t" ++ "swi %0, [%1+0x6c]\n\t" ++ "amfar2 %0, $CBE0\n\t" ++ "swi %0, [%1+0x70]\n\t" ++ "amfar2 %0, $CBE1\n\t" ++ "swi %0, [%1+0x74]\n\t" ++ "amfar2 %0, $CBE2\n\t" ++ "swi %0, [%1+0x78]\n\t" ++ "amfar2 %0, $CBE3\n\t" ++ "swi %0, [%1+0x7c]\n\t":"=&r" (tmp) ++ :"r"(&tsk->thread.audio), "i"(MSC_CFG_mskAUDIO), ++ "i"(MSC_CFG_offAUDIO) ++ :"memory"); ++ disable_audio(); ++} ++ ++void audioload(struct audio_struct *audioregs) ++{ ++ unsigned int tmp; ++ enable_audio(); ++ asm volatile ("mfsr %0, $MSC_CFG\n\t" ++ "andi %0, %0, %2\n\t" ++ "srli %0, %0, %3\n\t" ++ "slti %0, %0, 2\n\t" ++ "bnez %0, 98f\n\t" ++ "lwi %0, [%1+0x0]\n\t" ++ "amtar %0, $D0.L24\n\t" ++ "lwi %0, [%1+0x4]\n\t" ++ "amtar %0, $D1.L24\n\t" ++ "98:\n\t" ++ "lwi %0, [%1+0x8]\n\t" ++ "amtar %0, $I0\n\t" ++ "lwi %0, [%1+0xc]\n\t" ++ "amtar %0, $I1\n\t" ++ "lwi %0, [%1+0x10]\n\t" ++ "amtar %0, $I2\n\t" ++ "lwi %0, [%1+0x14]\n\t" ++ "amtar %0, $I3\n\t" ++ "lwi %0, [%1+0x18]\n\t" ++ "amtar %0, $I4\n\t" ++ "lwi %0, [%1+0x1c]\n\t" ++ "amtar %0, $I5\n\t" ++ "lwi %0, [%1+0x20]\n\t" ++ "amtar %0, $I6\n\t" ++ "lwi %0, [%1+0x24]\n\t" ++ "amtar %0, $I7\n\t" ++ "lwi %0, [%1+0x28]\n\t" ++ "amtar %0, $M1\n\t" ++ "lwi %0, [%1+0x2c]\n\t" ++ "amtar %0, $M2\n\t" ++ "lwi %0, [%1+0x30]\n\t" ++ "amtar %0, $M3\n\t" ++ "lwi %0, [%1+0x34]\n\t" ++ "amtar %0, $M5\n\t" ++ "lwi %0, [%1+0x38]\n\t" ++ "amtar %0, $M6\n\t" ++ "lwi %0, [%1+0x3c]\n\t" ++ "amtar %0, $M7\n\t" ++ "lwi %0, [%1+0x40]\n\t" ++ "amtar %0, $MOD\n\t" ++ "lwi %0, [%1+0x44]\n\t" ++ "amtar %0, $LB\n\t" ++ "lwi %0, [%1+0x48]\n\t" ++ "amtar %0, $LE\n\t" ++ "lwi %0, [%1+0x4c]\n\t" ++ "amtar %0, $LC\n\t" ++ "lwi %0, [%1+0x50]\n\t" ++ "amtar %0, $ADM_VBASE\n\t" ++ "lwi %0, [%1+0x54]\n\t" ++ "amtar %0, $SHFT_CTL0\n\t" ++ "lwi %0, [%1+0x58]\n\t" ++ "amtar %0, $SHFT_CTL1\n\t" ++ "lwi %0, [%1+0x5c]\n\t" ++ "amtar2 %0, $CB_CTL\n\t" ++ "lwi %0, [%1+0x60]\n\t" ++ "amtar2 %0, $CBB0\n\t" ++ "lwi %0, [%1+0x64]\n\t" ++ "amtar2 %0, $CBB1\n\t" ++ "lwi %0, [%1+0x68]\n\t" ++ "amtar2 %0, $CBB2\n\t" ++ "lwi %0, [%1+0x6c]\n\t" ++ "amtar2 %0, $CBB3\n\t" ++ "lwi %0, [%1+0x70]\n\t" ++ "amtar2 %0, $CBE0\n\t" ++ "lwi %0, [%1+0x74]\n\t" ++ "amtar2 %0, $CBE1\n\t" ++ "lwi %0, [%1+0x78]\n\t" ++ "amtar2 %0, $CBE2\n\t" ++ "lwi %0, [%1+0x7c]\n\t" ++ "amtar2 %0, $CBE3\n\t":"=&r" (tmp) ++ :"r"(audioregs), "i"(MSC_CFG_mskAUDIO), ++ "i"(MSC_CFG_offAUDIO)); ++ disable_audio(); ++} ++ ++void do_audio_context_switch(unsigned long error_code, struct pt_regs *regs) ++{ ++ struct task_struct *tsk = current; ++ ++ if (!user_mode(regs)) ++ die("Audio used in kernel", regs, error_code); ++ ++ /* Enable to use audio. */ ++ grab_audio(regs); ++#ifndef CONFIG_UNLAZY_AUDIO //Lazy audio is used ++ if (last_task_used_audio == current) ++ return; ++ ++ if (last_task_used_audio != NULL) ++ /* Other processes audio state, save away */ ++ save_audio(last_task_used_audio); ++ last_task_used_audio = current; ++#endif ++ if (test_tsk_thread_flag(tsk, TIF_USEDAUDIO)) { ++ audioload(¤t->thread.audio); ++ } else { ++ /* First time audio user. */ ++ audioload(&init_audioregs); ++ set_tsk_thread_flag(tsk, TIF_USEDAUDIO); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/audio.h linux-3.4.110/arch/nds32/kernel/audio.h +--- linux-3.4.110.orig/arch/nds32/kernel/audio.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/audio.h 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,2 @@ ++ ++#define NAN32 0x0UL +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/bios32.c linux-3.4.110/arch/nds32/kernel/bios32.c +--- linux-3.4.110.orig/arch/nds32/kernel/bios32.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/bios32.c 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,711 @@ ++/* ++ * linux/arch/nds32/kernel/bios32.c ++ * ++ * PCI bios-type initialisation for PCI machines ++ * ++ * Bits taken from various places. ++ * ++ * Copyright (C) 2009 Andes Technology Corporation ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++static int debug_pci; ++static int use_firmware; ++ ++/* ++ * We can't use pci_find_device() here since we are ++ * called from interrupt context. ++ */ ++static void pcibios_bus_report_status(struct pci_bus *bus, u_int status_mask, ++ int warn) ++{ ++ struct pci_dev *dev; ++ ++ list_for_each_entry(dev, &bus->devices, bus_list) { ++ u16 status; ++ ++ /* ++ * ignore host bridge - we handle ++ * that separately ++ */ ++ if (dev->bus->number == 0 && dev->devfn == 0) ++ continue; ++ ++ pci_read_config_word(dev, PCI_STATUS, &status); ++ if (status == 0xffff) ++ continue; ++ ++ if ((status & status_mask) == 0) ++ continue; ++ ++ /* clear the status errors */ ++ pci_write_config_word(dev, PCI_STATUS, status & status_mask); ++ ++ if (warn) ++ printk("(%s: %04X) ", pci_name(dev), status); ++ } ++ ++ list_for_each_entry(dev, &bus->devices, bus_list) ++ if (dev->subordinate) ++ pcibios_bus_report_status(dev->subordinate, status_mask, warn); ++} ++ ++void pcibios_report_status(u_int status_mask, int warn) ++{ ++ struct list_head *l; ++ ++ list_for_each(l, &pci_root_buses) { ++ struct pci_bus *bus = pci_bus_b(l); ++ ++ pcibios_bus_report_status(bus, status_mask, warn); ++ } ++} ++ ++/* ++ * We don't use this to fix the device, but initialisation of it. ++ * It's not the correct use for this, but it works. ++ * Note that the arbiter/ISA bridge appears to be buggy, specifically in ++ * the following area: ++ * 1. park on CPU ++ * 2. ISA bridge ping-pong ++ * 3. ISA bridge master handling of target RETRY ++ * ++ * Bug 3 is responsible for the sound DMA grinding to a halt. We now ++ * live with bug 2. ++ */ ++static void __devinit pci_fixup_83c553(struct pci_dev *dev) ++{ ++ /* ++ * Set memory region to start at address 0, and enable IO ++ */ ++ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, ++ PCI_BASE_ADDRESS_SPACE_MEMORY); ++ pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO); ++ ++ dev->resource[0].end -= dev->resource[0].start; ++ dev->resource[0].start = 0; ++ ++ /* ++ * All memory requests from ISA to be channelled to PCI ++ */ ++ pci_write_config_byte(dev, 0x48, 0xff); ++ ++ /* ++ * Enable ping-pong on bus master to ISA bridge transactions. ++ * This improves the sound DMA substantially. The fixed ++ * priority arbiter also helps (see below). ++ */ ++ pci_write_config_byte(dev, 0x42, 0x01); ++ ++ /* ++ * Enable PCI retry ++ */ ++ pci_write_config_byte(dev, 0x40, 0x22); ++ ++ /* ++ * We used to set the arbiter to "park on last master" (bit ++ * 1 set), but unfortunately the CyberPro does not park the ++ * bus. We must therefore park on CPU. Unfortunately, this ++ * may trigger yet another bug in the 553. ++ */ ++ pci_write_config_byte(dev, 0x83, 0x02); ++ ++ /* ++ * Make the ISA DMA request lowest priority, and disable ++ * rotating priorities completely. ++ */ ++ pci_write_config_byte(dev, 0x80, 0x11); ++ pci_write_config_byte(dev, 0x81, 0x00); ++ ++ /* ++ * Route INTA input to IRQ 11, and set IRQ11 to be level ++ * sensitive. ++ */ ++ pci_write_config_word(dev, 0x44, 0xb000); ++ outb(0x08, 0x4d1); ++} ++ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, ++ pci_fixup_83c553); ++ ++static void __devinit pci_fixup_unassign(struct pci_dev *dev) ++{ ++ dev->resource[0].end -= dev->resource[0].start; ++ dev->resource[0].start = 0; ++} ++ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, ++ pci_fixup_unassign); ++ ++/* ++ * Prevent the PCI layer from seeing the resources allocated to this device ++ * if it is the host bridge by marking it as such. These resources are of ++ * no consequence to the PCI layer (they are handled elsewhere). ++ */ ++static void __devinit pci_fixup_dec21285(struct pci_dev *dev) ++{ ++ int i; ++ ++ if (dev->devfn == 0) { ++ dev->class &= 0xff; ++ dev->class |= PCI_CLASS_BRIDGE_HOST << 8; ++ for (i = 0; i < PCI_NUM_RESOURCES; i++) { ++ dev->resource[i].start = 0; ++ dev->resource[i].end = 0; ++ dev->resource[i].flags = 0; ++ } ++ } ++} ++ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, ++ pci_fixup_dec21285); ++ ++/* ++ * PCI IDE controllers use non-standard I/O port decoding, respect it. ++ */ ++static void __devinit pci_fixup_ide_bases(struct pci_dev *dev) ++{ ++ struct resource *r; ++ int i; ++ ++ if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) ++ return; ++ ++ for (i = 0; i < PCI_NUM_RESOURCES; i++) { ++ r = dev->resource + i; ++ if ((r->start & ~0x80) == 0x374) { ++ r->start |= 2; ++ r->end = r->start; ++ } ++ } ++} ++ ++DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); ++ ++/* ++ * Put the DEC21142 to sleep ++ */ ++static void __devinit pci_fixup_dec21142(struct pci_dev *dev) ++{ ++ pci_write_config_dword(dev, 0x40, 0x80000000); ++} ++ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, ++ pci_fixup_dec21142); ++ ++/* ++ * The CY82C693 needs some rather major fixups to ensure that it does ++ * the right thing. Idea from the Alpha people, with a few additions. ++ * ++ * We ensure that the IDE base registers are set to 1f0/3f4 for the ++ * primary bus, and 170/374 for the secondary bus. Also, hide them ++ * from the PCI subsystem view as well so we won't try to perform ++ * our own auto-configuration on them. ++ * ++ * In addition, we ensure that the PCI IDE interrupts are routed to ++ * IRQ 14 and IRQ 15 respectively. ++ * ++ * The above gets us to a point where the IDE on this device is ++ * functional. However, The CY82C693U _does not work_ in bus ++ * master mode without locking the PCI bus solid. ++ */ ++static void __devinit pci_fixup_cy82c693(struct pci_dev *dev) ++{ ++ if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) { ++ u32 base0, base1; ++ ++ if (dev->class & 0x80) { /* primary */ ++ base0 = 0x1f0; ++ base1 = 0x3f4; ++ } else { /* secondary */ ++ base0 = 0x170; ++ base1 = 0x374; ++ } ++ ++ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, ++ base0 | PCI_BASE_ADDRESS_SPACE_IO); ++ pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, ++ base1 | PCI_BASE_ADDRESS_SPACE_IO); ++ ++ dev->resource[0].start = 0; ++ dev->resource[0].end = 0; ++ dev->resource[0].flags = 0; ++ ++ dev->resource[1].start = 0; ++ dev->resource[1].end = 0; ++ dev->resource[1].flags = 0; ++ } else if (PCI_FUNC(dev->devfn) == 0) { ++ /* ++ * Setup IDE IRQ routing. ++ */ ++ pci_write_config_byte(dev, 0x4b, 14); ++ pci_write_config_byte(dev, 0x4c, 15); ++ ++ /* ++ * Disable FREQACK handshake, enable USB. ++ */ ++ pci_write_config_byte(dev, 0x4d, 0x41); ++ ++ /* ++ * Enable PCI retry, and PCI post-write buffer. ++ */ ++ pci_write_config_byte(dev, 0x44, 0x17); ++ ++ /* ++ * Enable ISA master and DMA post write buffering. ++ */ ++ pci_write_config_byte(dev, 0x45, 0x03); ++ } ++} ++ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, ++ pci_fixup_cy82c693); ++ ++void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) ++{ ++ if (debug_pci) ++ printk("PCI: Assigning IRQ %02d to %s\n", irq, pci_name(dev)); ++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); ++} ++ ++/* ++ * If the bus contains any of these devices, then we must not turn on ++ * parity checking of any kind. Currently this is CyberPro 20x0 only. ++ */ ++static inline int pdev_bad_for_parity(struct pci_dev *dev) ++{ ++ return (dev->vendor == PCI_VENDOR_ID_INTERG && ++ (dev->device == PCI_DEVICE_ID_INTERG_2000 || ++ dev->device == PCI_DEVICE_ID_INTERG_2010)); ++} ++ ++/* ++ * Adjust the device resources from bus-centric to Linux-centric. ++ */ ++static void __devinit ++pdev_fixup_device_resources(struct pci_sys_data *root, struct pci_dev *dev) ++{ ++ resource_size_t offset; ++ int i; ++ ++ for (i = 0; i < PCI_NUM_RESOURCES; i++) { ++ if (dev->resource[i].start == 0) ++ continue; ++ if (dev->resource[i].flags & IORESOURCE_MEM) ++ offset = root->mem_offset; ++ else ++ offset = root->io_offset; ++ ++ dev->resource[i].start += offset; ++ dev->resource[i].end += offset; ++ } ++} ++ ++static void __devinit ++pbus_assign_bus_resources(struct pci_bus *bus, struct pci_sys_data *root) ++{ ++ struct pci_dev *dev = bus->self; ++ int i; ++ ++ if (!dev) { ++ /* ++ * Assign root bus resources. ++ */ ++ for (i = 0; i < 3; i++) ++ bus->resource[i] = root->resource[i]; ++ } ++} ++ ++/* ++ * pcibios_fixup_bus - Called after each bus is probed, ++ * but before its children are examined. ++ */ ++void __devinit pcibios_fixup_bus(struct pci_bus *bus) ++{ ++ struct pci_sys_data *root = bus->sysdata; ++ struct pci_dev *dev; ++ u16 features = ++ PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK; ++ ++ pbus_assign_bus_resources(bus, root); ++ ++ /* ++ * Walk the devices on this bus, working out what we can ++ * and can't support. ++ */ ++ list_for_each_entry(dev, &bus->devices, bus_list) { ++ u16 status; ++ ++ pdev_fixup_device_resources(root, dev); ++ ++ pci_read_config_word(dev, PCI_STATUS, &status); ++ ++ /* ++ * If any device on this bus does not support fast back ++ * to back transfers, then the bus as a whole is not able ++ * to support them. Having fast back to back transfers ++ * on saves us one PCI cycle per transaction. ++ */ ++ if (!(status & PCI_STATUS_FAST_BACK)) ++ features &= ~PCI_COMMAND_FAST_BACK; ++ ++ if (pdev_bad_for_parity(dev)) ++ features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); ++ ++ switch (dev->class >> 8) { ++#if defined(CONFIG_ISA) || defined(CONFIG_EISA) ++ case PCI_CLASS_BRIDGE_ISA: ++ case PCI_CLASS_BRIDGE_EISA: ++ /* ++ * If this device is an ISA bridge, set isa_bridge ++ * to point at this device. We will then go looking ++ * for things like keyboard, etc. ++ */ ++ isa_bridge = dev; ++ break; ++#endif ++ case PCI_CLASS_BRIDGE_PCI: ++ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status); ++ status |= ++ PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_MASTER_ABORT; ++ status &= ++ ~(PCI_BRIDGE_CTL_BUS_RESET | ++ PCI_BRIDGE_CTL_FAST_BACK); ++ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status); ++ break; ++ ++ case PCI_CLASS_BRIDGE_CARDBUS: ++ pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL, ++ &status); ++ status |= ++ PCI_CB_BRIDGE_CTL_PARITY | ++ PCI_CB_BRIDGE_CTL_MASTER_ABORT; ++ pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL, ++ status); ++ break; ++ } ++ } ++ ++ /* ++ * Now walk the devices again, this time setting them up. ++ */ ++ list_for_each_entry(dev, &bus->devices, bus_list) { ++ u16 cmd; ++ ++ pci_read_config_word(dev, PCI_COMMAND, &cmd); ++ cmd |= features; ++ pci_write_config_word(dev, PCI_COMMAND, cmd); ++ ++ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, ++ L1_CACHE_BYTES >> 2); ++ } ++ ++ /* ++ * Propagate the flags to the PCI bridge. ++ */ ++ if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) { ++ if (features & PCI_COMMAND_FAST_BACK) ++ bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK; ++ if (features & PCI_COMMAND_PARITY) ++ bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY; ++ } ++ ++ /* ++ * Report what we did for this bus ++ */ ++ printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n", ++ bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis"); ++} ++ ++/* ++ * Convert from Linux-centric to bus-centric addresses for bridge devices. ++ */ ++void ++pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, ++ struct resource *res) ++{ ++ struct pci_sys_data *root = dev->sysdata; ++ unsigned long offset = 0; ++ ++ if (res->flags & IORESOURCE_IO) ++ offset = root->io_offset; ++ if (res->flags & IORESOURCE_MEM) ++ offset = root->mem_offset; ++ ++ region->start = res->start - offset; ++ region->end = res->end - offset; ++} ++ ++void __devinit ++pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, ++ struct pci_bus_region *region) ++{ ++ struct pci_sys_data *root = dev->sysdata; ++ unsigned long offset = 0; ++ ++ if (res->flags & IORESOURCE_IO) ++ offset = root->io_offset; ++ if (res->flags & IORESOURCE_MEM) ++ offset = root->mem_offset; ++ ++ res->start = region->start + offset; ++ res->end = region->end + offset; ++} ++ ++#ifdef CONFIG_HOTPLUG ++EXPORT_SYMBOL(pcibios_fixup_bus); ++EXPORT_SYMBOL(pcibios_resource_to_bus); ++EXPORT_SYMBOL(pcibios_bus_to_resource); ++#endif ++ ++/* ++ * This is the standard PCI-PCI bridge swizzling algorithm: ++ * ++ * Dev: 0 1 2 3 ++ * A A B C D ++ * B B C D A ++ * C C D A B ++ * D D A B C ++ * ^^^^^^^^^^ irq pin on bridge ++ */ ++u8 __devinit pci_std_swizzle(struct pci_dev *dev, u8 * pinp) ++{ ++ int pin = *pinp - 1; ++ ++ while (dev->bus->self) { ++ pin = (pin + PCI_SLOT(dev->devfn)) & 3; ++ /* ++ * move up the chain of bridges, ++ * swizzling as we go. ++ */ ++ dev = dev->bus->self; ++ } ++ *pinp = pin + 1; ++ ++ return PCI_SLOT(dev->devfn); ++} ++ ++/* ++ * Swizzle the device pin each time we cross a bridge. ++ * This might update pin and returns the slot number. ++ */ ++static u8 __devinit pcibios_swizzle(struct pci_dev *dev, u8 * pin) ++{ ++ struct pci_sys_data *sys = dev->sysdata; ++ int slot = 0, oldpin = *pin; ++ ++ if (sys->swizzle) ++ slot = sys->swizzle(dev, pin); ++ ++ if (debug_pci) ++ printk("PCI: %s swizzling pin %d => pin %d slot %d\n", ++ pci_name(dev), oldpin, *pin, slot); ++ ++ return slot; ++} ++ ++/* ++ * Map a slot/pin to an IRQ. ++ */ ++static int pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) ++{ ++ struct pci_sys_data *sys = dev->sysdata; ++ int irq = -1; ++ ++ if (sys->map_irq) ++ irq = sys->map_irq(dev, slot, pin); ++ ++ if (debug_pci) ++ printk("PCI: %s mapping slot %d pin %d => irq %d\n", ++ pci_name(dev), slot, pin, irq); ++ ++ return irq; ++} ++ ++static void __init pcibios_init_hw(struct hw_pci *hw) ++{ ++ struct pci_sys_data *sys = NULL; ++ int ret; ++ int nr, busnr; ++ ++ for (nr = busnr = 0; nr < hw->nr_controllers; nr++) { ++ sys = kmalloc(sizeof(struct pci_sys_data), GFP_KERNEL); ++ if (!sys) ++ panic("PCI: unable to allocate sys data!"); ++ ++ memset(sys, 0, sizeof(struct pci_sys_data)); ++ ++ sys->hw = hw; ++ sys->busnr = busnr; ++ sys->swizzle = hw->swizzle; ++ sys->map_irq = hw->map_irq; ++ sys->resource[0] = &ioport_resource; ++ sys->resource[1] = &iomem_resource; ++ ++ ret = hw->setup(nr, sys); ++ ++ if (ret > 0) { ++ sys->bus = hw->scan(nr, sys); ++ ++ if (!sys->bus) ++ panic("PCI: unable to scan bus!"); ++ ++ busnr = sys->bus->subordinate + 1; ++ ++ list_add(&sys->node, &hw->buses); ++ } else { ++ kfree(sys); ++ if (ret < 0) ++ break; ++ } ++ } ++} ++ ++void __init pci_common_init(struct hw_pci *hw) ++{ ++ struct pci_sys_data *sys; ++ ++ INIT_LIST_HEAD(&hw->buses); ++ ++ if (hw->preinit) ++ hw->preinit(); ++ pcibios_init_hw(hw); ++ if (hw->postinit) ++ hw->postinit(); ++ ++ pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq); ++ ++ list_for_each_entry(sys, &hw->buses, node) { ++ struct pci_bus *bus = sys->bus; ++ ++ if (!use_firmware) { ++ /* ++ * Size the bridge windows. ++ */ ++ pci_bus_size_bridges(bus); ++ ++ /* ++ * Assign resources. ++ */ ++ pci_bus_assign_resources(bus); ++ } ++ ++ /* ++ * Tell drivers about devices found. ++ */ ++ pci_bus_add_devices(bus); ++ } ++} ++ ++char *__devinit pcibios_setup(char *str) ++{ ++ if (!strcmp(str, "debug")) { ++ debug_pci = 1; ++ return NULL; ++ } else if (!strcmp(str, "firmware")) { ++ use_firmware = 1; ++ return NULL; ++ } ++ return str; ++} ++ ++/* ++ * From arch/i386/kernel/pci-i386.c: ++ * ++ * We need to avoid collisions with `mirrored' VGA ports ++ * and other strange ISA hardware, so we always want the ++ * addresses to be allocated in the 0x000-0x0ff region ++ * modulo 0x400. ++ * ++ * Why? Because some silly external IO cards only decode ++ * the low 10 bits of the IO address. The 0x00-0xff region ++ * is reserved for motherboard devices that decode all 16 ++ * bits, so it's ok to allocate at, say, 0x2800-0x28ff, ++ * but we want to try to avoid allocating at 0x2900-0x2bff ++ * which might be mirrored at 0x0100-0x03ff.. ++ */ ++void pcibios_align_resource(void *data, struct resource *res, ++ resource_size_t size, resource_size_t align) ++{ ++ resource_size_t start = res->start; ++ ++ if (res->flags & IORESOURCE_IO && start & 0x300) ++ start = (start + 0x3ff) & ~0x3ff; ++ ++ res->start = (start + align - 1) & ~(align - 1); ++} ++ ++/** ++ * pcibios_enable_device - Enable I/O and memory. ++ * @dev: PCI device to be enabled ++ */ ++int pcibios_enable_device(struct pci_dev *dev, int mask) ++{ ++ u16 cmd, old_cmd; ++ int idx; ++ struct resource *r; ++ ++ pci_read_config_word(dev, PCI_COMMAND, &cmd); ++ old_cmd = cmd; ++ for (idx = 0; idx < 6; idx++) { ++ /* Only set up the requested stuff */ ++ if (!(mask & (1 << idx))) ++ continue; ++ ++ r = dev->resource + idx; ++ if (!r->start && r->end) { ++ printk(KERN_ERR "PCI: Device %s not available because" ++ " of resource collisions\n", pci_name(dev)); ++ return -EINVAL; ++ } ++ if (r->flags & IORESOURCE_IO) ++ cmd |= PCI_COMMAND_IO; ++ if (r->flags & IORESOURCE_MEM) ++ cmd |= PCI_COMMAND_MEMORY; ++ } ++ ++ /* ++ * Bridges (eg, cardbus bridges) need to be fully enabled ++ */ ++ if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) ++ cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY; ++ ++ if (cmd != old_cmd) { ++ printk("PCI: enabling device %s (%04x -> %04x)\n", ++ pci_name(dev), old_cmd, cmd); ++ pci_write_config_word(dev, PCI_COMMAND, cmd); ++ } ++ return 0; ++} ++ ++int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ++ enum pci_mmap_state mmap_state, int write_combine) ++{ ++ struct pci_sys_data *root = dev->sysdata; ++ unsigned long phys; ++ ++ if (mmap_state == pci_mmap_io) { ++ return -EINVAL; ++ } else { ++ phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT); ++ } ++ ++ /* ++ * Mark this as IO ++ */ ++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ++ ++ if (remap_pfn_range(vma, vma->vm_start, phys, ++ vma->vm_end - vma->vm_start, vma->vm_page_prot)) ++ return -EAGAIN; ++ ++ return 0; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/calls.S linux-3.4.110/arch/nds32/kernel/calls.S +--- linux-3.4.110.orig/arch/nds32/kernel/calls.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/calls.S 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,400 @@ ++/* ++ * linux/arch/nds32/kernel/calls.S ++ * ++ * Copyright (C) 1995-2004 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This file is included twice in entry-common.S ++ */ ++ ++/* 0 */ CALL(sys_restart_syscall) ++ CALL(sys_exit) ++ CALL(sys_fork_wrapper) ++ CALL(sys_read) ++ CALL(sys_write) ++/* 5 */ CALL(sys_open) ++ CALL(sys_close) ++ CALL(sys_waitpid) ++ CALL(sys_creat) ++ CALL(sys_link) ++/* 10 */ CALL(sys_unlink) ++ CALL(sys_execve_wrapper) ++ CALL(sys_chdir) ++ CALL(sys_time) /* used by libc4 */ ++ CALL(sys_mknod) ++/* 15 */ CALL(sys_chmod) ++ CALL(sys_lchown16) ++ CALL(sys_ni_syscall) /* was sys_break */ ++ CALL(sys_ni_syscall) /* was sys_stat */ ++ CALL(sys_lseek) ++/* 20 */ CALL(sys_getpid) ++ CALL(sys_mount) ++ CALL(sys_oldumount) /* used by libc4 */ ++ CALL(sys_setuid16) ++ CALL(sys_getuid16) ++/* 25 */ CALL(sys_stime) ++ CALL(sys_ptrace) ++ CALL(sys_alarm) /* used by libc4 */ ++ CALL(sys_ni_syscall) /* was sys_fstat */ ++ CALL(sys_pause) ++/* 30 */ CALL(sys_utime) /* used by libc4 */ ++ CALL(sys_ni_syscall) /* was sys_stty */ ++ CALL(sys_ni_syscall) /* was sys_getty */ ++ CALL(sys_access) ++ CALL(sys_nice) ++/* 35 */ CALL(sys_ni_syscall) /* was sys_ftime */ ++ CALL(sys_sync) ++ CALL(sys_kill) ++ CALL(sys_rename) ++ CALL(sys_mkdir) ++/* 40 */ CALL(sys_rmdir) ++ CALL(sys_dup) ++ CALL(sys_pipe) ++ CALL(sys_times) ++ CALL(sys_ni_syscall) /* was sys_prof */ ++/* 45 */ CALL(sys_brk) ++ CALL(sys_setgid16) ++ CALL(sys_getgid16) ++ CALL(sys_ni_syscall) /* was sys_signal */ ++ CALL(sys_geteuid16) ++/* 50 */ CALL(sys_getegid16) ++ CALL(sys_acct) ++ CALL(sys_umount) ++ CALL(sys_ni_syscall) /* was sys_lock */ ++ CALL(sys_ioctl) ++/* 55 */ CALL(sys_fcntl) ++ CALL(sys_ni_syscall) /* was sys_mpx */ ++ CALL(sys_setpgid) ++ CALL(sys_ni_syscall) /* was sys_ulimit */ ++ CALL(sys_cacheflush) /* was sys_olduname */ ++/* 60 */ CALL(sys_umask) ++ CALL(sys_chroot) ++ CALL(sys_ustat) ++ CALL(sys_dup2) ++ CALL(sys_getppid) ++/* 65 */ CALL(sys_getpgrp) ++ CALL(sys_setsid) ++ CALL(sys_sigaction) ++ CALL(sys_ni_syscall) /* was sys_sgetmask */ ++ CALL(sys_ni_syscall) /* was sys_ssetmask */ ++/* 70 */ CALL(sys_setreuid16) ++ CALL(sys_setregid16) ++ CALL(sys_sigsuspend_wrapper) ++ CALL(sys_sigpending) ++ CALL(sys_sethostname) ++/* 75 */ CALL(sys_setrlimit) ++ CALL(sys_old_getrlimit) /* used by libc4 */ ++ CALL(sys_getrusage) ++ CALL(sys_gettimeofday) ++ CALL(sys_settimeofday) ++/* 80 */ CALL(sys_getgroups16) ++ CALL(sys_setgroups16) ++ CALL(old_select) /* used by libc4 */ ++ CALL(sys_symlink) ++ CALL(sys_ni_syscall) /* was sys_lstat */ ++/* 85 */ CALL(sys_readlink) ++ CALL(sys_uselib) ++ CALL(sys_swapon) ++ CALL(sys_reboot) ++ CALL(sys_old_readdir) /* used by libc4 */ ++/* 90 */ CALL(sys_old_mmap) /* used by libc4 */ ++ CALL(sys_munmap) ++ CALL(sys_truncate) ++ CALL(sys_ftruncate) ++ CALL(sys_fchmod) ++/* 95 */ CALL(sys_fchown16) ++ CALL(sys_getpriority) ++ CALL(sys_setpriority) ++ CALL(sys_ni_syscall) /* was sys_profil */ ++ CALL(sys_statfs) ++/* 100 */ CALL(sys_fstatfs) ++ CALL(sys_ni_syscall) ++ CALL(sys_socketcall) ++ CALL(sys_syslog) ++ CALL(sys_setitimer) ++/* 105 */ CALL(sys_getitimer) ++ CALL(sys_newstat) ++ CALL(sys_newlstat) ++ CALL(sys_newfstat) ++ CALL(sys_ni_syscall) /* was sys_uname */ ++/* 110 */ CALL(sys_ni_syscall) /* was sys_iopl */ ++ CALL(sys_vhangup) ++ CALL(sys_ni_syscall) ++ CALL(sys_syscall) /* call a syscall */ ++ CALL(sys_wait4) ++/* 115 */ CALL(sys_swapoff) ++ CALL(sys_sysinfo) ++ CALL(sys_ipc) ++ CALL(sys_fsync) ++ CALL(sys_sigreturn_wrapper) ++/* 120 */ CALL(sys_clone_wrapper) ++ CALL(sys_setdomainname) ++ CALL(sys_newuname) ++ CALL(sys_ni_syscall) ++ CALL(sys_adjtimex) ++/* 125 */ CALL(sys_mprotect) ++ CALL(sys_sigprocmask) ++ CALL(sys_ni_syscall) /* was sys_create_module */ ++ CALL(sys_init_module) ++ CALL(sys_delete_module) ++/* 130 */ CALL(sys_ni_syscall) /* was sys_get_kernel_syms */ ++ CALL(sys_quotactl) ++ CALL(sys_getpgid) ++ CALL(sys_fchdir) ++ CALL(sys_bdflush) ++/* 135 */ CALL(sys_sysfs) ++ CALL(sys_personality) ++ CALL(sys_ni_syscall) /* was _sys_afs_syscall */ ++ CALL(sys_setfsuid16) ++ CALL(sys_setfsgid16) ++/* 140 */ CALL(sys_llseek) ++ CALL(sys_getdents) ++ CALL(sys_select) ++ CALL(sys_flock) ++ CALL(sys_msync) ++/* 145 */ CALL(sys_readv) ++ CALL(sys_writev) ++ CALL(sys_getsid) ++ CALL(sys_fdatasync) ++ CALL(sys_sysctl) ++/* 150 */ CALL(sys_mlock) ++ CALL(sys_munlock) ++ CALL(sys_mlockall) ++ CALL(sys_munlockall) ++ CALL(sys_sched_setparam) ++/* 155 */ CALL(sys_sched_getparam) ++ CALL(sys_sched_setscheduler) ++ CALL(sys_sched_getscheduler) ++ CALL(sys_sched_yield) ++ CALL(sys_sched_get_priority_max) ++/* 160 */ CALL(sys_sched_get_priority_min) ++ CALL(sys_sched_rr_get_interval) ++ CALL(sys_nanosleep) ++ CALL(sys_nds32_mremap) ++ CALL(sys_setresuid16) ++/* 165 */ CALL(sys_getresuid16) ++ CALL(sys_getpagesize) ++ CALL(sys_ni_syscall) /* was sys_query_module */ ++ CALL(sys_poll) ++ CALL(sys_ni_syscall) /* was nfsservctl */ ++/* 170 */ CALL(sys_setresgid16) ++ CALL(sys_getresgid16) ++ CALL(sys_prctl) ++ CALL(sys_rt_sigreturn_wrapper) ++ CALL(sys_rt_sigaction) ++/* 175 */ CALL(sys_rt_sigprocmask) ++ CALL(sys_rt_sigpending) ++ CALL(sys_rt_sigtimedwait) ++ CALL(sys_rt_sigqueueinfo) ++ CALL(sys_rt_sigsuspend_wrapper) ++/* 180 */ CALL(sys_pread64) ++ CALL(sys_pwrite64) ++ CALL(sys_chown16) ++ CALL(sys_getcwd) ++ CALL(sys_capget) ++/* 185 */ CALL(sys_capset) ++ CALL(sys_sigaltstack_wrapper) ++ CALL(sys_sendfile) ++ CALL(sys_ni_syscall) ++ CALL(sys_ni_syscall) ++/* 190 */ CALL(sys_vfork_wrapper) ++ CALL(sys_getrlimit) ++ CALL(sys_mmap2) ++ CALL(sys_truncate64) ++ CALL(sys_ftruncate64) ++/* 195 */ CALL(sys_stat64) ++ CALL(sys_lstat64) ++ CALL(sys_fstat64) ++ CALL(sys_lchown) ++ CALL(sys_getuid) ++/* 200 */ CALL(sys_getgid) ++ CALL(sys_geteuid) ++ CALL(sys_getegid) ++ CALL(sys_setreuid) ++ CALL(sys_setregid) ++/* 205 */ CALL(sys_getgroups) ++ CALL(sys_setgroups) ++ CALL(sys_fchown) ++ CALL(sys_setresuid) ++ CALL(sys_getresuid) ++/* 210 */ CALL(sys_setresgid) ++ CALL(sys_getresgid) ++ CALL(sys_chown) ++ CALL(sys_setuid) ++ CALL(sys_setgid) ++/* 215 */ CALL(sys_setfsuid) ++ CALL(sys_setfsgid) ++ CALL(sys_getdents64) ++ CALL(sys_pivot_root) ++ CALL(sys_mincore) ++/* 220 */ CALL(sys_madvise) ++ CALL(sys_fcntl64) ++ CALL(sys_ni_syscall) /* TUX */ ++ CALL(sys_ni_syscall) ++ CALL(sys_gettid) ++/* 225 */ CALL(sys_readahead) ++ CALL(sys_setxattr) ++ CALL(sys_lsetxattr) ++ CALL(sys_fsetxattr) ++ CALL(sys_getxattr) ++/* 230 */ CALL(sys_lgetxattr) ++ CALL(sys_fgetxattr) ++ CALL(sys_listxattr) ++ CALL(sys_llistxattr) ++ CALL(sys_flistxattr) ++/* 235 */ CALL(sys_removexattr) ++ CALL(sys_lremovexattr) ++ CALL(sys_fremovexattr) ++ CALL(sys_tkill) ++ CALL(sys_sendfile64) ++/* 240 */ CALL(sys_futex_wrapper) ++ CALL(sys_sched_setaffinity) ++ CALL(sys_sched_getaffinity) ++ CALL(sys_io_setup) ++ CALL(sys_io_destroy) ++/* 245 */ CALL(sys_io_getevents) ++ CALL(sys_io_submit) ++ CALL(sys_io_cancel) ++ CALL(sys_exit_group) ++ CALL(sys_lookup_dcookie) ++/* 250 */ CALL(sys_epoll_create) ++ CALL(sys_epoll_ctl) ++ CALL(sys_epoll_wait) ++ CALL(sys_remap_file_pages) ++ CALL(sys_ni_syscall) /* sys_set_thread_area */ ++/* 255 */ CALL(sys_ni_syscall) /* sys_get_thread_area */ ++ CALL(sys_set_tid_address) ++ CALL(sys_timer_create) ++ CALL(sys_timer_settime) ++ CALL(sys_timer_gettime) ++/* 260 */ CALL(sys_timer_getoverrun) ++ CALL(sys_timer_delete) ++ CALL(sys_clock_settime) ++ CALL(sys_clock_gettime) ++ CALL(sys_clock_getres) ++/* 265 */ CALL(sys_clock_nanosleep) ++ CALL(sys_statfs64) ++ CALL(sys_fstatfs64) ++ CALL(sys_tgkill) ++ CALL(sys_utimes) ++/* 270 */ CALL(sys_fadvise64_64_wrapper) ++ CALL(sys_pciconfig_iobase) ++ CALL(sys_pciconfig_read) ++ CALL(sys_pciconfig_write) ++ CALL(sys_mq_open) ++/* 275 */ CALL(sys_mq_unlink) ++ CALL(sys_mq_timedsend) ++ CALL(sys_mq_timedreceive) ++ CALL(sys_mq_notify) ++ CALL(sys_mq_getsetattr) ++/* 280 */ CALL(sys_waitid) ++ CALL(sys_add_key) ++ CALL(sys_request_key) ++ CALL(sys_keyctl) ++ CALL(sys_ioprio_set) ++/* 285 */ CALL(sys_ioprio_get) ++ CALL(sys_inotify_init) ++ CALL(sys_inotify_add_watch) ++ CALL(sys_inotify_rm_watch) ++ CALL(sys_migrate_pages) ++/* 290 */ CALL(sys_openat) ++ CALL(sys_mkdirat) ++ CALL(sys_mknodat) ++ CALL(sys_fchownat) ++ CALL(sys_futimesat) ++/* 295 */ CALL(sys_fstatat64) ++ CALL(sys_unlinkat) ++ CALL(sys_renameat) ++ CALL(sys_linkat) ++ CALL(sys_symlinkat) ++/* 300 */ CALL(sys_readlinkat) ++ CALL(sys_fchmodat) ++ CALL(sys_faccessat) ++ CALL(sys_pselect6) /* sys_pselect6 */ ++ CALL(sys_ppoll) /* sys_ppoll */ ++/* 305 */ CALL(sys_unshare) ++ CALL(sys_set_robust_list) ++ CALL(sys_get_robust_list) ++ CALL(sys_splice) ++ CALL(sys_sync_file_range2) ++/* 310 */ CALL(sys_tee) ++ CALL(sys_vmsplice) ++ CALL(sys_move_pages) ++ CALL(sys_fadvise64) ++ CALL(sys_utimensat) ++/* 315 */ CALL(sys_signalfd) ++ CALL(sys_timerfd_create) ++ CALL(sys_eventfd) ++ CALL(sys_fallocate) ++ CALL(sys_timerfd_settime) ++/* 320 */ CALL(sys_timerfd_gettime) ++ CALL(sys_getcpu) ++ CALL(sys_signalfd4) ++ CALL(sys_eventfd2) ++ CALL(sys_epoll_create1) ++/* 325 */ CALL(sys_dup3) ++ CALL(sys_pipe2) ++ CALL(sys_inotify_init1) ++ CALL(sys_kexec_load) ++ CALL(sys_accept) ++/* 330 */ CALL(sys_bind) ++ CALL(sys_connect) ++ CALL(sys_getpeername) ++ CALL(sys_getsockname) ++ CALL(sys_getsockopt) ++/* 335 */ CALL(sys_listen) ++ CALL(sys_recv) ++ CALL(sys_recvfrom) ++ CALL(sys_recvmsg) ++ CALL(sys_send) ++/* 340 */ CALL(sys_sendmsg) ++ CALL(sys_sendto) ++ CALL(sys_setsockopt) ++ CALL(sys_shutdown) ++ CALL(sys_socket) ++/* 345 */ CALL(sys_socketpair) ++ CALL(sys_prlimit64) ++ CALL(sys_accept4) ++ CALL(sys_recvmmsg) ++ CALL(sys_sendmmsg) ++/* 350 */ CALL(sys_fanotify_init) ++ CALL(sys_fanotify_mark) ++ CALL(sys_msgget) ++ CALL(sys_msgctl) ++ CALL(sys_msgrcv) ++/* 355 */ CALL(sys_msgsnd) ++ CALL(sys_semget) ++ CALL(sys_semctl) ++ CALL(sys_semtimedop) ++ CALL(sys_semop) ++/* 360 */ CALL(sys_shmget) ++ CALL(sys_shmctl) ++ CALL(sys_shmat) ++ CALL(sys_shmdt) ++ CALL(sys_syncfs) ++/* 365 */ CALL(sys_setns) ++ CALL(sys_name_to_handle_at) ++ CALL(sys_open_by_handle_at) ++ CALL(sys_process_vm_readv) ++ CALL(sys_process_vm_writev) ++/* 370 */ CALL(sys_clock_adjtime) ++ CALL(sys_get_mempolicy) ++ CALL(sys_mbind) ++ CALL(sys_perf_event_open) ++ CALL(sys_preadv) ++/* 375 */ CALL(sys_pwritev) ++ CALL(sys_rt_tgsigqueueinfo) ++ CALL(sys_set_mempolicy) ++ CALL(sys_epoll_pwait) ++ ++#ifndef syscalls_counted ++.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls ++#define syscalls_counted ++#endif ++ .rept syscalls_padding ++ CALL(sys_ni_syscall) ++ .endr +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/early_printk.c linux-3.4.110/arch/nds32/kernel/early_printk.c +--- linux-3.4.110.orig/arch/nds32/kernel/early_printk.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/early_printk.c 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,126 @@ ++/* ++ * Earlyprintk support. ++ * ++ * Copyright (C) 2012 ARM Ltd. ++ * Author: Catalin Marinas ++ * ++ * This program is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++extern void __iomem *early_io_map(phys_addr_t phys); ++static void __iomem *early_base; ++static void (*printch) (char ch); ++ ++/* ++ * 8250/16550 (8-bit aligned registers) single character TX. ++ */ ++static void uart8250_8bit_printch(char ch) ++{ ++ while (!(readb(early_base + UART_LSR) & UART_LSR_THRE)) ; ++ writeb(ch, early_base + UART_TX); ++} ++ ++/* ++ * 8250/16550 (32-bit aligned registers) single character TX. ++ */ ++static void uart8250_32bit_printch(char ch) ++{ ++ while (!(readl(early_base + (UART_LSR << 2)) & UART_LSR_THRE)) ; ++ writel(ch, early_base + (UART_TX << 2)); ++} ++ ++struct earlycon_match { ++ const char *name; ++ void (*printch) (char ch); ++}; ++ ++static const struct earlycon_match earlycon_match[] __initconst = { ++ {.name = "uart8250-8bit",.printch = uart8250_8bit_printch,}, ++ {.name = "uart8250-32bit",.printch = uart8250_32bit_printch,}, ++ {} ++}; ++ ++static void early_write(struct console *con, const char *s, unsigned n) ++{ ++ while (n-- > 0) { ++ if (*s == '\n') ++ printch('\r'); ++ printch(*s); ++ s++; ++ } ++} ++ ++static struct console early_console_dev = { ++ .name = "earlycon", ++ .write = early_write, ++ .flags = CON_PRINTBUFFER | CON_BOOT, ++ .index = -1, ++}; ++ ++/* ++ * Parse earlyprintk=... parameter in the format: ++ * ++ * [,][,] ++ * ++ * and register the early console. It is assumed that the UART has been ++ * initialised by the bootloader already. ++ */ ++static int __init setup_early_printk(char *buf) ++{ ++ const struct earlycon_match *match = earlycon_match; ++ phys_addr_t paddr = 0; ++ ++ if (!buf) { ++ pr_warning("No earlyprintk arguments passed.\n"); ++ return 0; ++ } ++ ++ while (match->name) { ++ size_t len = strlen(match->name); ++ if (!strncmp(buf, match->name, len)) { ++ buf += len; ++ break; ++ } ++ match++; ++ } ++ if (!match->name) { ++ pr_warning("Unknown earlyprintk arguments: %s\n", buf); ++ return 0; ++ } ++ ++ /* I/O address */ ++ if (!strncmp(buf, ",0x", 3)) { ++ char *e; ++ paddr = simple_strtoul(buf + 1, &e, 16); ++ buf = e; ++ } ++ ++ if (paddr) ++ early_base = early_io_map(paddr); ++ printch = match->printch; ++ //early_console = &early_console_dev; ++ register_console(&early_console_dev); ++ ++ return 0; ++} ++ ++early_param("earlyprintk", setup_early_printk); +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/elfchk.c linux-3.4.110/arch/nds32/kernel/elfchk.c +--- linux-3.4.110.orig/arch/nds32/kernel/elfchk.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/elfchk.c 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,190 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define ELF_CHECKING_OS ++#include "nds32-elf.h" ++ ++extern struct proc_dir_entry *proc_dir_cpu; ++ ++#ifdef CONFIG_ELFCHK_DEFAULT_ENABLE ++static int elf_check_en = 1; ++#else ++static int elf_check_en = 0; ++#endif ++ ++struct reg_struct { ++ ++ const char *name; ++ int idx; ++}; ++ ++static struct reg_struct regs[] = { ++ ++ {"cpu_ver", CPU_SR_INDEX(0, 0, 0)}, ++ {"icm_cfg", CPU_SR_INDEX(0, 1, 0)}, ++ {"dcm_cfg", CPU_SR_INDEX(0, 2, 0)}, ++ {"mmu_cfg", CPU_SR_INDEX(0, 3, 0)}, ++ {"msc_cfg", CPU_SR_INDEX(0, 4, 0)}, ++#ifdef CONFIG_FPU ++ {"fucop_exist", CPU_SR_INDEX(0, 5, 0)} ++#endif ++}; ++ ++#ifdef CONFIG_FPU ++static unsigned int read_fpu_fpcfg(void) ++{ ++ unsigned int fpcfg = 0; ++ ++ enable_fpu(); ++ asm volatile ("fmfcfg %0\n\t":"=&r" (fpcfg)); ++ disable_fpu(); ++ ++ return fpcfg; ++} ++#endif ++ ++static unsigned int read_cpu_sr(unsigned int idx) ++{ ++ switch (idx) { ++ ++ case CPU_SR_INDEX(0, 0, 0): ++ return GET_CPU_VER(); ++ case CPU_SR_INDEX(0, 1, 0): ++ return GET_ICM_CFG(); ++ case CPU_SR_INDEX(0, 2, 0): ++ return GET_DCM_CFG(); ++ case CPU_SR_INDEX(0, 3, 0): ++ return GET_MMU_CFG(); ++ case CPU_SR_INDEX(0, 4, 0): ++ return GET_MSC_CFG(); ++ case CPU_SR_INDEX(0, 5, 0): ++ return GET_FUCOP_EXIST(); ++ ++ default: ++ printk(KERN_ERR ++ "%s: invalid system register index (%d, %d, %d)\n", ++ __func__, (idx >> 7) & 0x7, (idx >> 3) & 0xf, idx & 0x7); ++ ++ return SR_NOT_EXIST; ++ } ++} ++ ++static unsigned int reg_read_callback(unsigned int idx) ++{ ++ if (HW_IS_CPU(idx)) { ++ ++ return read_cpu_sr(SR_INDEX(idx)); ++ } else if (HW_IS_FPU(idx)) { ++ ++#ifdef CONFIG_FPU ++ if (SR_FPU_FPCFG == SR_INDEX(idx)) ++ return read_fpu_fpcfg(); ++ else ++ return SR_NOT_EXIST; ++#endif ++ } else if (HW_IS_AUDIO(idx)) { ++ ++ return SR_NOT_EXIST; ++ } ++ ++ return SR_NOT_EXIST; ++} ++ ++#define BUFLEN 1024 ++ ++int do_elf_check_arch(const struct elf32_hdr *hdr) ++{ ++ static char msg_buf[BUFLEN]; ++ unsigned int err; ++ int buf_status = 0; ++ ++ if (!elf_check_en) ++ return 1; ++ ++ err = ++ elf_check((void *)hdr, reg_read_callback, msg_buf, BUFLEN, ++ &buf_status); ++ ++ if (err) ++ printk(KERN_WARNING "%s", msg_buf); ++ ++ return !err; ++} ++ ++static int proc_elf_check_read(char *page, char **start, off_t off, int count, ++ int *eof, void *data) ++{ ++ return sprintf(page, "%d\n", elf_check_en); ++} ++ ++#define INPUTLEN 10 ++static int proc_elf_check_write(struct file *file, const char __user * buffer, ++ unsigned long count, void *data) ++{ ++ unsigned long en; ++ char inbuf[INPUTLEN]; ++ ++ if (count > INPUTLEN - 1) ++ count = INPUTLEN - 1; ++ ++ if (copy_from_user(inbuf, buffer, count)) ++ return -EFAULT; ++ ++ inbuf[count] = '\0'; ++ ++ if (!sscanf(inbuf, "%lu", &en) || en > 3) ++ return -EFAULT; ++ ++ elf_check_en = en & 0x01; ++ ++ return count; ++} ++ ++static int proc_elf_check_read_reg(char *page, char **start, ++ off_t off, int count, int *eof, void *data) ++{ ++ unsigned long val = read_cpu_sr(*(int *)data); ++ ++ return sprintf(page, "0x%08lx\n", val); ++} ++ ++int __init elf_check_init(void) ++{ ++ static struct proc_dir_entry *res_elf_check; ++ int i; ++ ++ if (!proc_dir_cpu) { ++ if (!(proc_dir_cpu = proc_mkdir("cpu", NULL))) ++ return -ENOMEM; ++ } ++ ++ res_elf_check = ++ create_proc_entry("elf_core_checking", S_IWUSR | S_IRUGO, ++ proc_dir_cpu); ++ if (!res_elf_check) ++ return -ENOMEM; ++ ++ res_elf_check->read_proc = proc_elf_check_read; ++ res_elf_check->write_proc = proc_elf_check_write; ++ ++ for (i = 0; i < ARRAY_SIZE(regs); i++) { ++ ++ if (!create_proc_read_entry(regs[i].name, S_IWUSR | S_IRUGO, ++ proc_dir_cpu, ++ proc_elf_check_read_reg, ++ ®s[i].idx)) ++ ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++module_init(elf_check_init); +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/ex-entry.S linux-3.4.110/arch/nds32/kernel/ex-entry.S +--- linux-3.4.110.orig/arch/nds32/kernel/ex-entry.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/ex-entry.S 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,294 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ .macro push_zol ++#ifdef CONFIG_HWZOL ++ mfusr $r10, $LB ++ mfusr $r11, $LE ++ mfusr $r12, $LC ++#endif ++ .endm ++ .macro save_user_regs ++ /* $p1 is VA of percpu data */ ++ sethi $p0, hi20(PAGE_OFFSET - PHYS_OFFSET) ++ add $p1, $p1, $p0 ++ ++ /* change to kernel stack */ ++ mfsr $p0, $IPSW ++ andi $p0, $p0, #PSW_mskPOM ++ cmovz $sp, $r25, $p0 ++ ++ /* 8 byte aligned */ ++ movi $r25, #~7 ++ and $r25, $sp, $r25 ++ smw.adm $sp, [$r25], $sp, #0x1 ++ move $sp, $r25 ++ ++#if defined(CONFIG_FPU) || defined(CONFIG_AUDIO) ++ mfsr $r25, $FUCOP_CTL ++ push $r25 ++ bclr $r25, $r25, #FUCOP_CTL_offCP0EN ++ mtsr $r25, $FUCOP_CTL ++#else ++ addi $sp, $sp, -4 ++#endif ++ pushm $r28, $r30 ++ ++ /* zero fp if user mode*/ ++ movi $r25, #0x0 ++ cmovz $fp, $r25, $p0 ++ ++ lwi $r25, [$p1+#0x0] ++ pushm $r0, $r25 ++#ifdef CONFIG_HWZOL ++ push_zol ++#endif ++ mfsr $r9, $P_P1 ++ mfsr $r8, $P_P0 ++ mfsr $r7, $P_IPC ++ mfsr $r6, $P_IPSW ++ mfsr $r18, $IPC ++ mfsr $r17, $IPSW ++ mfsr $r16, $PSW ++ pushm $r6, $r13 ++ push $r0 ++ lwi $p0, [$p1+#0x8] ++ push $p0 ++ pushm $r16, $r18 ++ ++ andi $r19, $r16, #PSW_mskINTL ++ slti $r20, $r19, #4 ++ bnez $r20, 1f ++ addi $r21, $r16, #-2 ++ mtsr $r21, $PSW ++ isb ++1: ++ addi $sp, $sp, -S_OFF ++ .endm ++ ++ .text ++ ++/* ++ * Exception Vector ++ */ ++exception_handlers: ++ .long unhandled_exceptions !Reset/NMI ++ .long unhandled_exceptions !TLB fill ++ .long do_page_fault !PTE not present ++ .long do_dispatch_tlb_misc !TLB misc ++ .long unhandled_exceptions !TLB VLPT ++ .long unhandled_exceptions !Machine Error ++ .long do_debug_trap !Debug related ++ .long do_dispatch_general !General exception ++ .long eh_syscall !Syscall ++ .long asm_do_IRQ !IRQ ++ ++common_exception_handler: ++ save_user_regs ++ lwi $p0, [$p1+#0x4] ++ andi $p1, $p0, #0x78 ++ bnez $p1, 1f ++ sethi $lp, hi20(ret_from_exception) ++ ori $lp, $lp, lo12(ret_from_exception) ++ sethi $p1, hi20(exception_handlers) ++ ori $p1, $p1, lo12(exception_handlers) ++ lw $p1, [$p1+$p0<<2] ++ move $r0, $p0 ++ mfsr $r1, $EVA ++ mfsr $r2, $ITYPE ++ move $r3, $sp ++ mfsr $r4, $OIPC ++ /* enable gie if it is enabled in IPSW. */ ++ mfsr $r21, $PSW ++ andi $r17, $r17, #0x1 ++ or $r21, $r21, $r17 ++ mtsr $r21, $PSW ++ dsb ++ jr $p1 ++ ++ /* syscall */ ++1: ++ addi $p1, $p0, #-8 ++ bnez $p1, 2f ++ sethi $lp, hi20(ret_from_exception) ++ ori $lp, $lp, lo12(ret_from_exception) ++ sethi $p1, hi20(exception_handlers) ++ ori $p1, $p1, lo12(exception_handlers) ++ lwi $p1, [$p1+#0x8<<2] ++ jr $p1 ++ ++ /* interrupt */ ++2: ++#ifdef CONFIG_TRACE_IRQFLAGS ++ jal arch_trace_hardirqs_off ++#endif ++#if defined(CONFIG_EVIC) || defined(CONFIG_IVIC) ++ addi $r0, $p0, #-9 ++#else ++# ifdef CONFIG_IVIC_INTC ++ jal get_IntSrc ++# else ++# error "Not configure Vector Interrupt Controller mode" ++# endif ++#endif ++ move $r1, $sp ++ sethi $lp, hi20(ret_from_intr) ++ ori $lp, $lp, lo12(ret_from_intr) ++ sethi $p0, hi20(exception_handlers) ++ ori $p0, $p0, lo12(exception_handlers) ++ lwi $p0, [$p0+#0x9<<2] ++ jr $p0 ++ ++ .macro EXCEPTION_VECTOR, num ++ .align 6 ++ .ifc \num, 6 ++ mfsr $p0, $EDM_CTL ++ andi $p0, $p0, EDM_CTL_mskV3_EDM_MODE ++ tnez $p0, 0x1a ++ .endif ++ /* $p1 is PA of percpu data */ ++#ifdef CONFIG_CPU_N1213_43U1HA0 ++ movi $p1, #0x0 ++#else ++ mfsr $p1, $CORE_ID ++#endif ++ slli $p1, $p1, #0x9 ++ li $p0, #(PHYS_OFFSET+0x6000) ++ or $p1, $p1, $p0 ++ ++ /* ++ * 0x0 saved $r25 ++ * 0x4 vector number ++ * 0x8 user stack pointer ++ * 0xc kernel stack pointer ++ */ ++ swi $r25, [$p1+0x0] ++ movi $p0, \num ++ swi $p0, [$p1+#0x4] ++ swi $sp, [$p1+#0x8] ++ lwi $r25, [$p1+#0xc] ++ sethi $p0, hi20(common_exception_handler) ++ ori $p0, $p0, lo12(common_exception_handler) ++ jral.ton $p0, $p0 ++ .endm ++ ++ .macro IPI_VECTOR, num ++ .align 6 ++#ifdef CONFIG_CPU_N1213_43U1HA0 ++ movi $p1, #0x0 ++#else ++ mfsr $p1, $CORE_ID ++#endif ++ slli $p1, $p1, #0x9 ++ li $p0, #(PHYS_OFFSET+0x6000) ++ or $p1, $p1, $p0 ++ ++ /* ++ * 0x10 indicator of CPU is initialized ++ * 0x14 CPU init function ++ */ ++ swi $r25, [$p1+0x0] ++ movi $p0, \num ++ swi $p0, [$p1+#0x4] ++ swi $sp, [$p1+#0x8] ++ lwi $r25, [$p1+#0xc] ++ lwi $p0, [$p1+#0x10] ++ beqz $p0, 1f ++ sethi $p0, hi20(common_exception_handler) ++ ori $p0, $p0, lo12(common_exception_handler) ++ jral.ton $p0, $p0 ++1: ++ lwi $p0, [$p1+#0x14] ++ jr $p0 ++ ++ .endm ++ ++ .section ".text.init", #alloc, #execinstr ++ .global exception_vector ++exception_vector: ++ EXCEPTION_VECTOR 0 ++ EXCEPTION_VECTOR 1 ++ EXCEPTION_VECTOR 2 ++ EXCEPTION_VECTOR 3 ++ EXCEPTION_VECTOR 4 ++ EXCEPTION_VECTOR 5 ++ EXCEPTION_VECTOR 6 ++ EXCEPTION_VECTOR 7 ++ EXCEPTION_VECTOR 8 ++#ifdef CONFIG_EVIC ++ EXCEPTION_VECTOR 9 ++#else ++ IPI_VECTOR 9 ++#endif ++ EXCEPTION_VECTOR 10 ++ EXCEPTION_VECTOR 11 ++ EXCEPTION_VECTOR 12 ++ EXCEPTION_VECTOR 13 ++ EXCEPTION_VECTOR 14 ++ EXCEPTION_VECTOR 15 ++ EXCEPTION_VECTOR 16 ++ EXCEPTION_VECTOR 17 ++ EXCEPTION_VECTOR 18 ++ EXCEPTION_VECTOR 19 ++ EXCEPTION_VECTOR 20 ++ EXCEPTION_VECTOR 21 ++ EXCEPTION_VECTOR 22 ++ EXCEPTION_VECTOR 23 ++ EXCEPTION_VECTOR 24 ++ EXCEPTION_VECTOR 25 ++ EXCEPTION_VECTOR 26 ++ EXCEPTION_VECTOR 27 ++ EXCEPTION_VECTOR 28 ++ EXCEPTION_VECTOR 29 ++ EXCEPTION_VECTOR 30 ++ EXCEPTION_VECTOR 31 ++ EXCEPTION_VECTOR 32 ++ EXCEPTION_VECTOR 33 ++ EXCEPTION_VECTOR 34 ++ EXCEPTION_VECTOR 35 ++ EXCEPTION_VECTOR 36 ++ EXCEPTION_VECTOR 37 ++ EXCEPTION_VECTOR 38 ++ EXCEPTION_VECTOR 39 ++ EXCEPTION_VECTOR 40 ++#ifdef CONFIG_EVIC ++ IPI_VECTOR 41 ++#else ++ EXCEPTION_VECTOR 41 ++#endif ++ EXCEPTION_VECTOR 42 ++ EXCEPTION_VECTOR 43 ++ EXCEPTION_VECTOR 44 ++ EXCEPTION_VECTOR 45 ++ EXCEPTION_VECTOR 46 ++ EXCEPTION_VECTOR 47 ++ EXCEPTION_VECTOR 48 ++ EXCEPTION_VECTOR 49 ++ EXCEPTION_VECTOR 50 ++ EXCEPTION_VECTOR 51 ++ EXCEPTION_VECTOR 52 ++ EXCEPTION_VECTOR 53 ++ EXCEPTION_VECTOR 54 ++ EXCEPTION_VECTOR 55 ++ EXCEPTION_VECTOR 56 ++ EXCEPTION_VECTOR 57 ++ EXCEPTION_VECTOR 58 ++ EXCEPTION_VECTOR 59 ++ EXCEPTION_VECTOR 60 ++ EXCEPTION_VECTOR 61 ++ EXCEPTION_VECTOR 62 ++ EXCEPTION_VECTOR 63 ++ EXCEPTION_VECTOR 64 ++ EXCEPTION_VECTOR 65 ++ EXCEPTION_VECTOR 66 ++ EXCEPTION_VECTOR 67 ++ EXCEPTION_VECTOR 68 ++ EXCEPTION_VECTOR 69 ++ EXCEPTION_VECTOR 70 ++ EXCEPTION_VECTOR 71 ++ EXCEPTION_VECTOR 72 +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/ex-exit.S linux-3.4.110/arch/nds32/kernel/ex-exit.S +--- linux-3.4.110.orig/arch/nds32/kernel/ex-exit.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/ex-exit.S 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,220 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define why $r8 // Linux syscall (!= 0) ++#define tsk $r9 // current thread_info ++ ++ ++ .macro pop_zol ++#ifdef CONFIG_HWZOL ++ mtusr $r10, $LB ++ mtusr $r11, $LE ++ mtusr $r12, $LC ++#endif ++ .endm ++ ++ .macro restore_user_regs_first ++ setgie.d ++ isb ++ /* $p1 is VA of percpu data */ ++#ifdef CONFIG_CPU_N1213_43U1HA0 ++ movi $p1, #0x0 ++#else ++ mfsr $p1, $CORE_ID ++#endif ++ andi $p1, $p1, #0x3 ++ slli $p1, $p1, #0x9 ++ sethi $p0, #0xc0006 ++ or $p1, $p1, $p0 ++ ++ addi $sp, $sp, S_OFF ++ popm $r16, $r18 ++ mtsr $r16, $PSW ++ pop $r16 ++ swi $r16, [$p1+#0x8] ++ addi $sp, $sp, #4 !pop $r0 ++ popm $r6, $r13 ++ mtsr $r17, $IPSW ++ mtsr $r18, $IPC ++ mtsr $r6, $P_IPSW ++ mtsr $r7, $P_IPC ++ mtsr $r8, $P_P0 ++ mtsr $r9, $P_P1 ++#ifdef CONFIG_HWZOL ++ pop_zol ++#endif ++ .endm ++ ++ .macro restore_user_regs_last ++ popm $r28, $r30 ++#if defined(CONFIG_FPU) || defined(CONFIG_AUDIO) ++ pop $p0 ++ mtsr $p0, $FUCOP_CTL ++#else ++ addi $sp, $sp, 4 ++#endif ++ ++ pop $p0 ++ cmovn $sp, $p0, $p0 ++ ++ mfsr $p0, $IPSW ++ andi $p0, $p0, #PSW_mskPOM ++ bnez $p0, 1f ++ swi $sp, [$p1+#0xc] ++ lwi $sp, [$p1+#0x8] ++1: ++ // This is SW workaround for Bug #6294 ++ li $p0, 0xc0000000 ++ cctl $p0, L1D_VA_INVAL ++ lwi $p1, [$p0] ++ ++ iret ++ nop ++ ++ .endm ++ ++ .macro restore_user_regs ++ restore_user_regs_first ++ popm $r0, $r25 ++ restore_user_regs_last ++ .endm ++ ++ .macro fast_restore_user_regs ++ restore_user_regs_first ++ addi $sp, $sp, #4 ++ popm $r1, $r25 ++ restore_user_regs_last ++ .endm ++ ++#ifdef CONFIG_PREEMPT ++ .macro preempt_stop ++ .endm ++#else ++ .macro preempt_stop ++ setgie.d ++ isb ++ .endm ++#define resume_kernel no_work_pending ++#endif ++ ++ENTRY(ret_from_exception) ++ preempt_stop ++ENTRY(ret_from_intr) ++ get_thread_info tsk ++ move why, #0 ! not system call ++ ++/* ++ * judge Kernel or user mode ++ * ++ */ ++ lwi $p0, [$sp+(#S_IPSW+#S_OFF)] ++! mfsr $p0, $IPSW ! Check if in nested interrupt ++ andi $p0, $p0, #PSW_mskINTL ++ bnez $p0, resume_kernel ! done with iret ++ j resume_userspace ++ ++ ++/* ++ * This is the fast syscall return path. We do as little as ++ * possible here, and this includes saving $r0 back into the SVC ++ * stack. ++ * fixed: tsk - $r9, why - $r8, $r7 - syscall #, $r8 - syscall table pointer ++ */ ++ENTRY(ret_fast_syscall) ++ gie_disable ++ lwi $r1, [tsk+#TI_FLAGS] ++ andi $p1, $r1, #_TIF_WORK_MASK ++ bnez $p1, fast_work_pending ++ fast_restore_user_regs ! iret ++ ++/* ++ * Ok, we need to do extra processing, ++ * enter the slow path returning from syscall, while pending work. ++ */ ++fast_work_pending: ++ swi $r0, [$sp+(#S_R0+#S_OFF)] ! what is different from ret_from_exception ++ ! addi $sp, $sp, S_OFF ++ move why, #1 ! come from a syscall ++work_pending: ++ andi $p1, $r1, #_TIF_NEED_RESCHED ++ bnez $p1, work_resched ++ ++ andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME ++ beqz $p1, no_work_pending ++ ++ move $r0, $sp ! 'regs' ++ move $r2, why ++ gie_enable ++ bal do_notify_resume ++ ++ b ret_slow_syscall ! return from slow_restore_user_regs ++ ++work_resched: ++ bal schedule ! path, return to user mode ++ ++/* ++ * "slow" syscall return path. ++ * "why" tells us if this was a real syscall. ++ */ ++ENTRY(resume_userspace) ++ENTRY(ret_slow_syscall) ++ gie_disable ++ lwi $p0, [$sp+(#S_IPSW+#S_OFF)] ++! mfsr $p0, $IPSW ! Check if in nested interrupt ++ andi $p0, $p0, #PSW_mskINTL ++ bnez $p0, no_work_pending ! done with iret ++ lwi $r1, [tsk+#TI_FLAGS] ++ andi $p1, $r1, #_TIF_WORK_MASK ++ bnez $p1, work_pending ! handle work_resched, sig_pend ++ ++no_work_pending: ++#ifdef CONFIG_TRACE_IRQFLAGS ++ lwi $p0, [$sp+(#S_IPSW+#S_OFF)] ++ andi $p0, $p0, #0x1 ++ la $r10, arch_trace_hardirqs_off ++ la $r9, arch_trace_hardirqs_on ++ cmovz $r9, $p0, $r10 ++ jral $r9 ++#endif ++ restore_user_regs ! return from iret ++ ++ ++/* ++ * preemptive kernel ++ */ ++#ifdef CONFIG_PREEMPT ++resume_kernel: ++ gie_disable ++ lwi $t0, [tsk+#TI_PREEMPT] ++ bnez $t0, no_work_pending ++need_resched: ++ lwi $t0, [tsk+#TI_FLAGS] ++ andi $p1, $t0, #_TIF_NEED_RESCHED ++ beqz $p1, no_work_pending ++ ++ lwi $t0, [$sp+(#S_IPSW+#S_OFF)] ! Interrupts off? ++ andi $t0, $t0, #1 ++ beqz $t0, no_work_pending ++ ++ jal preempt_schedule_irq ++ b need_resched ++#endif ++ ++/* ++ * This is how we return from a fork. ++ */ ++ENTRY(ret_from_fork) ++ bal schedule_tail ++ get_thread_info tsk ++ lwi $r1, [tsk+#TI_FLAGS] ! check for syscall tracing ++ move why, #1 ++ andi $p1, $r1, #_TIF_WORK_SYSCALL_LEAVE ! are we tracing syscalls? ++ beqz $p1, ret_slow_syscall ++ move $r0, $sp ++ bal syscall_trace_leave ++ b ret_slow_syscall ++ +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/ex-scall.S linux-3.4.110/arch/nds32/kernel/ex-scall.S +--- linux-3.4.110.orig/arch/nds32/kernel/ex-scall.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/ex-scall.S 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,288 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * $r0 = previous task_struct, ++ * $r1 = previous thread_info, ++ * $r2 = next thread_info ++ * previous and next are guaranteed not to be the same. ++ */ ++ ++ENTRY(_switch) ++ smw.adm $r6, [$sp], $r14, #0xa ++ swi $sp, [$r1+ TI_SP_SAVE] /* Save $sp to task structure */ ++ lwi $sp, [$r2+ TI_SP_SAVE] /* Get $sp from task structure */ ++ lmw.bim $r6, [$sp], $r14, #0xa ++ ret ++ ++/* ++ * These are the registers used in the syscall handler, and allow us ++ * to have in theory up to 6 arguments to a function - $r0 to $r5. ++ * $r7 is reserved for the system call number for Andes architecture. ++ * ++ * Note that tbl == why is intentional. ++ * We must set at least "tsk" and "why" when calling ret_with_reschedule. ++ */ ++#define tbl $r8 // syscall table pointer ++#define why $r8 // Linux syscall (!= 0) ++#define tsk $r9 // current thread_info ++ ++/* ++ * Get the system call number. let $r7 take a system call nr. ++ * ++ * if it is called from library function, $r7 is filled by library call ++ * when user program make a system call. and swid bitfield of $ir6 will ++ * always be encoded as 0x7fff. ++ * ++ * if it is called from kernel code, $r7 will be writen as syscall nr ++ * by retrieving from $ir6 'swid' bitfiled ++ */ ++ .macro get_scno ++ mfsr $r7, $ITYPE ++#ifdef CONFIG_PLAT_QEMU ++ slli $r7, $r7, #1 ++ srli $r7, $r7, #ITYPE_offSWID+1 ++#else ++ srli $r7, $r7, #ITYPE_offSWID ++#endif ++ .endm ++ ++ .macro updateipc ++ addi $r17, $r18, #4 ++ swi $r17, [$sp + S_OFF + S_IPC] ++ .endm ++ ++ .equ NR_syscalls,0 ++#define CALL(x) .equ NR_syscalls,NR_syscalls+1 ++#include "calls.S" ++#undef CALL ++#define CALL(x) .long x ++ ++ENTRY(eh_syscall) ++ updateipc ++ ++ get_scno ++ gie_enable ++ get_thread_info tsk ++ lwi $p0, [tsk+#TI_FLAGS] ! check for syscall tracing ++ ++ andi $p1, $p0, #_TIF_WORK_SYSCALL_ENTRY ! are we tracing syscalls? ++ bnez $p1, __sys_trace ++ ++ la $lp, ret_fast_syscall ! return address ++ ++ li $p0, __NR_NDS32_BASE ++ addi $p0, $p0, -1 ++ slt $p0, $p0, $r7 ++ beqz $p0, 1f ++ addi $r1, $SP, #0 ++ move $r0, $r7 ++ b nds32_syscall ++1: ++ ! check upper syscall limit, ++ andi $r7, $r7, #0xfff ++ addi $p1, $r7, #-NR_syscalls ! syscall number of syscall instruction is guarded by addembler ++ bgez $p1, _SCNO_EXCEED ! call sys_* routine ++ la tbl, sys_call_table ! load syscall table pointer ++ slli $p1, $r7, #2 ++ add $p1, tbl, $p1 ++ lwi $p1, [$p1] ++ jr $p1 ! no return ++ ++_SCNO_EXCEED: ++ ori $r0, $r7, #0 ++ ori $r1, $sp, #0 ++ b bad_syscall ++ ++/* ++ * This is the really slow path. We're going to be doing ++ * context switches, and waiting for our parent to respond. ++ */ ++__sys_trace: ++ move $r1, $sp ++ move $r0, $r7 ! trace entry [IP = 0] ++ bal syscall_trace_enter ++ move $r7, $r0 ++ la $lp, __sys_trace_return ! return address ++ ++ li $p0, __NR_NDS32_BASE ++ addi $p0, $p0, -1 ++ slt $p0, $p0, $r7 ++ beqz $p0, 1f ++ addi $r1, $SP, #0 ++ b nds32_syscall ++1: ++ andi $r7, $r7, #0xfff ++ addi $p1, $sp, #S_R0+S_OFF ! pointer to regs ++ addi $p0, $r7, -#NR_syscalls ! check upper syscall limit ++ bgez $p0, _SCNO_EXCEED ++ lmw.bi $r0, [$p1], $r5 ! have to reload $r0 - $r5 ++ ++ slli $p0, $r7, #2 ! call sys_* routine ++ la tbl, sys_call_table ! load syscall table pointer ++ add $p0, tbl, $p0 ++ lwi $p0, [$p0] ++ jr $p0 ++ ++__sys_trace_return: ++ swi $r0, [$sp+(#S_R0+S_OFF)] ! T: save returned $r0 ++ bal syscall_trace_leave ++ b ret_slow_syscall ++ ++ .type sys_call_table, #object ++ENTRY(sys_call_table) ++#include "calls.S" ++ ++/* ++ * Special system call wrappers ++ * ++ * $r0 = syscall number ++ * $r8 = syscall table ++ */ ++ .type sys_syscall, #function ++sys_syscall: ++ ++ li $p0, __NR_NDS32_BASE ++ addi $p0, $p0, -1 ++ slt $p0, $p0, $r0 ++ beqz $p0, 1f ++ addi $r1, $SP, #0 ++ b nds32_syscall ++1: ++ bltz $r0, 3f ! Guard whether syscall number is between 0~0x7fff. ++ andi $r0, $r0, #0xfff ++ addi $p1, $r0, #-NR_syscalls ++ bgtz $p1, 3f ++ move $p1, $r0 ++ move $r0, $r1 ++ move $r1, $r2 ++ move $r2, $r3 ++ move $r3, $r4 ++ move $r4, $r5 ++! add for syscall 6 args ++ lwi $r5, [$sp + (#S_SP + #S_OFF) ] ++ lwi $r5, [$r5 + #S_OFF] ++! ~add for syscall 6 args ++ ++ lw $p1, [tbl+$p1<<2] ++ jr $p1 ++3: b sys_ni_syscall ++ ++sys_fork_wrapper: ++ addi $r0, $sp, #0 ++ b sys_fork ++ ++sys_vfork_wrapper: ++ addi $r0, $sp, #0 ++ b sys_vfork ++ ++sys_execve_wrapper: ++ addi $r3, $sp, #0 ++ b sys_execve ++ ++sys_clone_wrapper: ++ addi $r5, $SP, #0 ++ b sys_clone ++ ++sys_sigsuspend_wrapper: ++ addi $r3, $sp, #0 ++ b sys_sigsuspend ++ ++sys_rt_sigsuspend_wrapper: ++ addi $r2, $sp, #0 ++ b sys_rt_sigsuspend ++ ++sys_sigreturn_wrapper: ++ addi $r0, $sp, #0 ++ b sys_sigreturn ++ ++sys_rt_sigreturn_wrapper: ++ addi $r0, $sp, #0 ++ b sys_rt_sigreturn ++ ++sys_sigaltstack_wrapper: ++ lwi $r2, [$sp+(#S_OFF + S_SP)] ++ b do_sigaltstack ++ ++sys_futex_wrapper: ++ b sys_futex ++ ++#ifdef CONFIG_FUNCTION_TRACER ++ .global _mcount ++ .global ftrace_stub ++_mcount: ++ sethi $r15, hi20(function_trace_stop) ++ lwi $r15, [$r15 + lo12(function_trace_stop)] ++ bnez $r15, _ftrace_stub ++ ++ sethi $r15, hi20(ftrace_trace_function) ++ lwi $p0, [$r15 + lo12(ftrace_trace_function)] ++ sethi $r15, hi20(ftrace_stub) ++ ori $r15, $r15, lo12(ftrace_stub) ++ bne $r15, $p0, trace ++ ++#ifdef CONFIG_FUNCTION_GRAPH_TRACER ++ sethi $p0, hi20(ftrace_graph_return) ++ lwi $p0, [$p0 + lo12(ftrace_graph_return)] ++ bne $r15, $p0, _ftrace_graph_caller ++ ++ sethi $r15, hi20(ftrace_graph_entry) ++ lwi $r15, [$r15 + lo12(ftrace_graph_entry)] ++ sethi $p0, hi20(ftrace_graph_entry_stub) ++ ori $p0, $p0, lo12(ftrace_graph_entry_stub) ++ bne $r15, $p0, _ftrace_graph_caller ++#endif ++ ++_ftrace_stub: ++ftrace_stub: ++ ret ++ ++trace: ++ smw.adm $r0, [$sp], $r5, #10 ++ move $r0, $lp ++ sethi $r15, hi20(get_selfpc) ++ ori $r15, $r15, lo12(get_selfpc) ++ jral $r15 ++ move $r1, $r0 ++ lwi $r0, [$sp+36] ++ jral $p0 ++ lmw.bim $r0, [$sp], $r5, #10 ++ ret ++#endif ++ ++#ifdef CONFIG_FUNCTION_GRAPH_TRACER ++ .global ftrace_graph_caller ++ .global return_to_handler ++_ftrace_graph_caller: ++ftrace_graph_caller: ++ sethi $p1, hi20(function_trace_stop) ++ lwi $p1, [$p1 + lo12(function_trace_stop)] ++ bnez $p1, ftrace_stub ++ ++ sethi $p1, hi20(prepare_ftrace_return) ++ ori $p1, $p1, lo12(prepare_ftrace_return) ++ ++ smw.adm $r0, [$sp], $r5, #10 ++ move $r0, $lp ++ sethi $p0, hi20(get_selfpc) ++ ori $p0, $p0, lo12(get_selfpc) ++ jral $p0 ++ move $r1, $r0 ++ addi $r0, $sp, #36 ++ jral $p1 ++ lmw.bim $r0, [$sp], $r5, #10 ++ ret ++ ++return_to_handler: ++ smw.adm $r0, [$sp], $r5, #10 ++ sethi $r15, hi20(ftrace_return_to_handler) ++ ori $r15, $r15, lo12(ftrace_return_to_handler) ++ jral $r15 ++ move $r15, $r0 ++ lmw.bim $r0, [$sp], $r5, #10 ++ jr $r15 ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/fpu.c linux-3.4.110/arch/nds32/kernel/fpu.c +--- linux-3.4.110.orig/arch/nds32/kernel/fpu.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/fpu.c 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,306 @@ ++/* ++ * arch/nds32/kernel/fpu.c ++ * ++ * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli ++ * Copyright (C) 2002 STMicroelectronics Limited ++ * Author : Stuart Menefy ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * Started from SH4 version: ++ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka ++ * ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "fpu.h" ++ ++extern void do_revinsn(struct pt_regs *regs); ++extern int do_fpu_denorm(struct pt_regs *regs, struct fpu_struct *fpu); ++ ++static struct fpu_struct init_fpuregs = { ++ .fs_regs = {[0 ... 31] = sNAN32}, ++ .fd_regs = {[0 ... 15] = sNAN64}, ++ .fpcsr = FPCSR_INIT ++}; ++ ++void save_fpu(struct task_struct *tsk) ++{ ++ unsigned int fpcfg, fpcsr; ++ enable_fpu(); ++ asm volatile ("fmfcfg %0\n\t":"=&r" (fpcfg)); ++ fpcfg = ((fpcfg & FPCFG_mskFREG) >> FPCFG_offFREG); ++ ++ switch (fpcfg) { ++ case 3: ++ asm volatile ("fsdi $fd31, [%0+0xf8]\n\t" ++ "fsdi $fd30, [%0+0xf0]\n\t" ++ "fsdi $fd29, [%0+0xe8]\n\t" ++ "fsdi $fd28, [%0+0xe0]\n\t" ++ "fsdi $fd27, [%0+0xd8]\n\t" ++ "fsdi $fd26, [%0+0xd0]\n\t" ++ "fsdi $fd25, [%0+0xc8]\n\t" ++ "fsdi $fd24, [%0+0xc0]\n\t" ++ "fsdi $fd23, [%0+0xb8]\n\t" ++ "fsdi $fd22, [%0+0xb0]\n\t" ++ "fsdi $fd21, [%0+0xa8]\n\t" ++ "fsdi $fd20, [%0+0xa0]\n\t" ++ "fsdi $fd19, [%0+0x98]\n\t" ++ "fsdi $fd18, [%0+0x90]\n\t" ++ "fsdi $fd17, [%0+0x88]\n\t" ++ "fsdi $fd16, [%0+0x80]\n\t" ++ : /* no output */ ++ :"r" (&tsk->thread.fpu) ++ :"memory"); ++ /* fall through */ ++ case 2: ++ asm volatile ("fssi $fs31, [%0+0x7c]\n\t" ++ "fssi $fs30, [%0+0x78]\n\t" ++ "fssi $fs29, [%0+0x74]\n\t" ++ "fssi $fs28, [%0+0x70]\n\t" ++ "fssi $fs27, [%0+0x6c]\n\t" ++ "fssi $fs26, [%0+0x68]\n\t" ++ "fssi $fs25, [%0+0x64]\n\t" ++ "fssi $fs24, [%0+0x60]\n\t" ++ "fssi $fs23, [%0+0x5c]\n\t" ++ "fssi $fs22, [%0+0x58]\n\t" ++ "fssi $fs21, [%0+0x54]\n\t" ++ "fssi $fs20, [%0+0x50]\n\t" ++ "fssi $fs19, [%0+0x4c]\n\t" ++ "fssi $fs18, [%0+0x48]\n\t" ++ "fssi $fs17, [%0+0x44]\n\t" ++ "fssi $fs16, [%0+0x40]\n\t" ++ : /* no output */ ++ :"r" (&tsk->thread.fpu) ++ :"memory"); ++ /* fall through */ ++ case 1: ++ asm volatile ("fssi $fs15, [%0+0x3c]\n\t" ++ "fssi $fs14, [%0+0x38]\n\t" ++ "fssi $fs13, [%0+0x34]\n\t" ++ "fssi $fs12, [%0+0x30]\n\t" ++ "fssi $fs11, [%0+0x2c]\n\t" ++ "fssi $fs10, [%0+0x28]\n\t" ++ "fssi $fs9, [%0+0x24]\n\t" ++ "fssi $fs8, [%0+0x20]\n\t" ++ : /* no output */ ++ :"r" (&tsk->thread.fpu) ++ :"memory"); ++ /* fall through */ ++ case 0: ++ asm volatile ("fssi $fs7, [%1+0x1c]\n\t" ++ "fssi $fs6, [%1+0x18]\n\t" ++ "fssi $fs5, [%1+0x14]\n\t" ++ "fssi $fs4, [%1+0x10]\n\t" ++ "fssi $fs3, [%1+0xc]\n\t" ++ "fssi $fs2, [%1+0x8]\n\t" ++ "fssi $fs1, [%1+0x4]\n\t" ++ "fssi $fs0, [%1+0x0]\n\t" ++ "fmfcsr %0\n\t" ++ "swi %0, [%1+0x100]\n\t" ++ :"=&r" (fpcsr) ++ :"r"(&tsk->thread.fpu) ++ :"memory"); ++ } ++ disable_fpu(); ++} ++ ++void fpload(struct fpu_struct *fpregs) ++{ ++ unsigned int fpcfg, fpcsr; ++ enable_fpu(); ++ ++ asm volatile ("fmfcfg %0\n\t":"=&r" (fpcfg)); ++ fpcfg = ((fpcfg & FPCFG_mskFREG) >> FPCFG_offFREG); ++ ++ switch (fpcfg) { ++ case 3: ++ asm volatile ("fldi $fd31, [%0+0xf8]\n\t" ++ "fldi $fd30, [%0+0xf0]\n\t" ++ "fldi $fd29, [%0+0xe8]\n\t" ++ "fldi $fd28, [%0+0xe0]\n\t" ++ "fldi $fd27, [%0+0xd8]\n\t" ++ "fldi $fd26, [%0+0xd0]\n\t" ++ "fldi $fd25, [%0+0xc8]\n\t" ++ "fldi $fd24, [%0+0xc0]\n\t" ++ "fldi $fd23, [%0+0xb8]\n\t" ++ "fldi $fd22, [%0+0xb0]\n\t" ++ "fldi $fd21, [%0+0xa8]\n\t" ++ "fldi $fd20, [%0+0xa0]\n\t" ++ "fldi $fd19, [%0+0x98]\n\t" ++ "fldi $fd18, [%0+0x90]\n\t" ++ "fldi $fd17, [%0+0x88]\n\t" ++ "fldi $fd16, [%0+0x80]\n\t" ++ : /* no output */ ++ :"r" (fpregs)); ++ /* fall through */ ++ case 2: ++ asm volatile ("flsi $fs31, [%0+0x7c]\n\t" ++ "flsi $fs30, [%0+0x78]\n\t" ++ "flsi $fs29, [%0+0x74]\n\t" ++ "flsi $fs28, [%0+0x70]\n\t" ++ "flsi $fs27, [%0+0x6c]\n\t" ++ "flsi $fs26, [%0+0x68]\n\t" ++ "flsi $fs25, [%0+0x64]\n\t" ++ "flsi $fs24, [%0+0x60]\n\t" ++ "flsi $fs23, [%0+0x5c]\n\t" ++ "flsi $fs22, [%0+0x58]\n\t" ++ "flsi $fs21, [%0+0x54]\n\t" ++ "flsi $fs20, [%0+0x50]\n\t" ++ "flsi $fs19, [%0+0x4c]\n\t" ++ "flsi $fs18, [%0+0x48]\n\t" ++ "flsi $fs17, [%0+0x44]\n\t" ++ "flsi $fs16, [%0+0x40]\n\t" ++ : /* no output */ ++ :"r" (fpregs)); ++ /* fall through */ ++ case 1: ++ asm volatile ("flsi $fs15, [%0+0x3c]\n\t" ++ "flsi $fs14, [%0+0x38]\n\t" ++ "flsi $fs13, [%0+0x34]\n\t" ++ "flsi $fs12, [%0+0x30]\n\t" ++ "flsi $fs11, [%0+0x2c]\n\t" ++ "flsi $fs10, [%0+0x28]\n\t" ++ "flsi $fs9, [%0+0x24]\n\t" ++ "flsi $fs8, [%0+0x20]\n\t" ++ : /* no output */ ++ :"r" (fpregs)); ++ /* fall through */ ++ case 0: ++ asm volatile ("flsi $fs7, [%1+0x1c]\n\t" ++ "flsi $fs6, [%1+0x18]\n\t" ++ "flsi $fs5, [%1+0x14]\n\t" ++ "flsi $fs4, [%1+0x10]\n\t" ++ "flsi $fs3, [%1+0xc]\n\t" ++ "flsi $fs2, [%1+0x8]\n\t" ++ "flsi $fs1, [%1+0x4]\n\t" ++ "flsi $fs0, [%1+0x0]\n\t" ++ "lwi %0, [%1+0x100]\n\t" ++ "fmtcsr %0\n\t":"=&r" (fpcsr) ++ :"r"(fpregs)); ++ } ++ disable_fpu(); ++} ++ ++void do_fpu_context_switch(unsigned long error_code, struct pt_regs *regs) ++{ ++ /* Enable to use FPU. */ ++ ++ if (!user_mode(regs)) { ++ printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); ++ BUG(); ++ return; ++ } ++ ++ grab_fpu(regs); ++#ifndef CONFIG_UNLAZY_FPU //Lazy FPU is used ++ if (last_task_used_math == current) ++ return; ++ if (last_task_used_math != NULL) ++ /* Other processes fpu state, save away */ ++ save_fpu(last_task_used_math); ++ last_task_used_math = current; ++#endif ++ if (used_math()) { ++ fpload(¤t->thread.fpu); ++ } else { ++ /* First time FPU user. */ ++ fpload(&init_fpuregs); ++ set_used_math(); ++ } ++ ++} ++ ++void do_fpu_exception(unsigned long error_code, struct pt_regs *regs) ++{ ++ unsigned int subtype = ++ ((GET_ITYPE() & ITYPE_mskSTYPE) >> ITYPE_offSTYPE); ++ unsigned int cpid = ((GET_ITYPE() & ITYPE_mskCPID) >> ITYPE_offCPID); ++ ++ /* FPU */ ++ if ((cpid == 0) && (GET_FUCOP_EXIST() & FUCOP_EXIST_mskCP0ISFPU)) { ++ /* Coprocessor disabled exception */ ++ if (subtype == 1) { ++ preempt_disable(); ++ do_fpu_context_switch(error_code, regs); ++ preempt_enable(); ++ } ++ /* Coprocessor exception */ ++ else if (subtype == 2) { ++ siginfo_t si = { 0 }; ++ unsigned int fpcsr; ++ enable_fpu(); ++ asm volatile ("fmfcsr %0\n\t":"=&r" (fpcsr)); ++ disable_fpu(); ++ ++ if (fpcsr & FPCSR_mskALLT) { ++ si.si_signo = SIGFPE; ++ /* Exception handling, denorm input, UDF and OVF */ ++ if (fpcsr & FPCSR_mskDNIT) { ++ unsigned int rfpcsr; ++ lose_fpu(1); ++ si.si_signo = ++ do_fpu_denorm(regs, ++ ¤t->thread.fpu); ++ own_fpu(1); ++ ++ if (si.si_signo == SIGFPE) { ++ rfpcsr = ++ current->thread.fpu.fpcsr; ++ ++ if (rfpcsr & FPCSR_mskIVO) ++ si.si_code = FPE_FLTINV; ++ if (rfpcsr & FPCSR_mskDBZ) ++ si.si_code = FPE_FLTDIV; ++ if (rfpcsr & FPCSR_mskOVF) ++ si.si_code = FPE_FLTOVF; ++ if (rfpcsr & FPCSR_mskUDF) ++ si.si_code = FPE_FLTUND; ++ if (rfpcsr & FPCSR_mskIEX) ++ si.si_code = FPE_FLTRES; ++ } else if (si.si_code == SIGILL) ++ show_regs(regs); ++ else if (si.si_code == SIGBUS) ++ si.si_code = BUS_ADRERR; ++ } else if (fpcsr & FPCSR_mskRIT) { ++ printk("Reserved Instruction\n"); ++ show_regs(regs); ++ if (!user_mode(regs)) ++ do_exit(SIGILL); ++ si.si_signo = SIGILL; ++ } else if (fpcsr & FPCSR_mskUDFT) ++ si.si_code = FPE_FLTUND; ++ else if (fpcsr & FPCSR_mskOVFT) ++ si.si_code = FPE_FLTOVF; ++ else if (fpcsr & FPCSR_mskIVOT) ++ si.si_code = FPE_FLTINV; ++ else if (fpcsr & FPCSR_mskDBZT) ++ si.si_code = FPE_FLTDIV; ++ else if (fpcsr & FPCSR_mskIEXT) ++ si.si_code = FPE_FLTRES; ++ /* If something went wrong, signal */ ++ if (si.si_signo) { ++ if (si.si_code == SIGILL) { ++ force_sig(si.si_signo, current); ++ } else { ++ si.si_addr = ++ (void __user *) ++ instruction_pointer(regs); ++ force_sig_info(si.si_signo, &si, ++ current); ++ } ++ } ++ } else { ++ printk("Bad FPU exception\n"); ++ BUG(); ++ } ++ } ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/fpu.h linux-3.4.110/arch/nds32/kernel/fpu.h +--- linux-3.4.110.orig/arch/nds32/kernel/fpu.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/fpu.h 2016-04-07 10:20:50.938080870 +0200 +@@ -0,0 +1,13 @@ ++ ++ ++/* ++ * Initially load the FPU with signalling NANS. This bit pattern ++ * has the property that no matter whether considered as single or as ++ * double precision, it still represents a signalling NAN. ++ */ ++ ++ #define sNAN64 0xFFFFFFFFFFFFFFFFULL ++ #define sNAN32 0xFFFFFFFFUL ++ ++ #define FPCSR_INIT 0x0 /* Hardware reset value */ ++ +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/ftrace.c linux-3.4.110/arch/nds32/kernel/ftrace.c +--- linux-3.4.110.orig/arch/nds32/kernel/ftrace.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/ftrace.c 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,219 @@ ++/* ++ * Code for replacing ftrace calls with jumps. ++ * ++ * Copyright (C) 2007-2008 Steven Rostedt ++ * ++ * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. ++ * ++ * Added function graph tracer code, taken from x86 that was written ++ * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++# define GET_ADDR(addr) (*(unsigned long *)addr) ++ ++#ifdef CONFIG_DYNAMIC_FTRACE ++static unsigned int ftrace_nop_replace(void) ++{ ++// return PPC_INST_NOP; ++ return 0; ++} ++ ++static unsigned int ++ftrace_call_replace(unsigned long ip, unsigned long addr, int link) ++{ ++ unsigned int op; ++ ++ addr = GET_ADDR(addr); ++ ++ /* if (link) set op to 'bl' else 'b' */ ++// op = create_branch((unsigned int *)ip, addr, link ? 1 : 0); ++ ++ return op; ++} ++ ++static int ++ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) ++{ ++ unsigned int replaced; ++ ++ /* ++ * Note: Due to modules and __init, code can ++ * disappear and change, we need to protect against faulting ++ * as well as code changing. We do this by using the ++ * probe_kernel_* functions. ++ * ++ * No real locking needed, this code is run through ++ * kstop_machine, or before SMP starts. ++ */ ++ ++ /* read the text we want to modify */ ++ if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE)) ++ return -EFAULT; ++ ++ /* Make sure it is what we expect it to be */ ++ if (replaced != old) ++ return -EINVAL; ++ ++ /* replace the text with the new text */ ++ if (probe_kernel_write((void *)ip, &new, MCOUNT_INSN_SIZE)) ++ return -EPERM; ++ ++ flush_icache_range(ip, ip + 8); ++ ++ return 0; ++} ++ ++int ftrace_make_nop(struct module *mod, ++ struct dyn_ftrace *rec, unsigned long addr) ++{ ++ unsigned long ip = rec->ip; ++ unsigned int old, new; ++ ++ old = ftrace_call_replace(ip, addr, 1); ++ new = ftrace_nop_replace(); ++ return ftrace_modify_code(ip, old, new); ++} ++ ++int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) ++{ ++ unsigned long ip = rec->ip; ++ unsigned int old, new; ++ ++ old = ftrace_nop_replace(); ++ new = ftrace_call_replace(ip, addr, 1); ++ return ftrace_modify_code(ip, old, new); ++} ++ ++int ftrace_update_ftrace_func(ftrace_func_t func) ++{ ++ unsigned long ip = (unsigned long)(&ftrace_call); ++ unsigned int old, new; ++ int ret; ++ ++ old = *(unsigned int *)&ftrace_call; ++ new = ftrace_call_replace(ip, (unsigned long)func, 1); ++ ret = ftrace_modify_code(ip, old, new); ++ ++ return ret; ++} ++ ++int __init ftrace_dyn_arch_init(void *data) ++{ ++ /* caller expects data to be zero */ ++ unsigned long *p = data; ++ ++ *p = 0; ++ ++ return 0; ++} ++#endif /* CONFIG_DYNAMIC_FTRACE */ ++ ++#ifdef CONFIG_FUNCTION_GRAPH_TRACER ++ ++#ifdef CONFIG_DYNAMIC_FTRACE ++extern void ftrace_graph_call(void); ++extern void ftrace_graph_stub(void); ++ ++int ftrace_enable_ftrace_graph_caller(void) ++{ ++ unsigned long ip = (unsigned long)(&ftrace_graph_call); ++ unsigned long addr = (unsigned long)(&ftrace_graph_caller); ++ unsigned long stub = (unsigned long)(&ftrace_graph_stub); ++ unsigned int old, new; ++ ++ old = ftrace_call_replace(ip, stub, 0); ++ new = ftrace_call_replace(ip, addr, 0); ++ ++ return ftrace_modify_code(ip, old, new); ++} ++ ++int ftrace_disable_ftrace_graph_caller(void) ++{ ++ unsigned long ip = (unsigned long)(&ftrace_graph_call); ++ unsigned long addr = (unsigned long)(&ftrace_graph_caller); ++ unsigned long stub = (unsigned long)(&ftrace_graph_stub); ++ unsigned int old, new; ++ ++ old = ftrace_call_replace(ip, addr, 0); ++ new = ftrace_call_replace(ip, stub, 0); ++ ++ return ftrace_modify_code(ip, old, new); ++} ++#endif /* CONFIG_DYNAMIC_FTRACE */ ++int get_selfpc(unsigned long mcount_lp) ++{ ++ unsigned long symbol_size, offset; ++ kallsyms_lookup_size_offset(mcount_lp, &symbol_size, &offset); ++ return mcount_lp - offset; ++} ++ ++/* ++ * Hook the return address and push it in the stack of return addrs ++ * in current thread info. ++ */ ++void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) ++{ ++ unsigned long old; ++ int faulted; ++ struct ftrace_graph_ent trace; ++ unsigned long return_hooker = (unsigned long)&return_to_handler; ++ ++ if (unlikely(atomic_read(¤t->tracing_graph_pause))) ++ return; ++ ++ /* ++ * Protect against fault, even if it shouldn't ++ * happen. This tool is too much intrusive to ++ * ignore such a protection. ++ */ ++ asm volatile ("1: lwi %[old], [%[parent]]\n" ++ "2: swi %[return_hooker], [%[parent]]\n" ++ " movi %[faulted], 0\n" ++ "3:\n" ++ ".section .fixup, \"ax\"\n" ++ ".align 2 \n" ++ "4: movi %[faulted], 1\n" ++ " b 3b\n" ++ ".previous\n" ++ ".section __ex_table,\"a\"\n" ++ ".align 3 \n" ++ ".long 1b,4b\n" ++ ".long 2b,4b\n" ++ ".previous":[old] "=&r"(old),[faulted] "=r"(faulted) ++ :[parent] "r"(parent),[return_hooker] "r"(return_hooker) ++ :"memory"); ++ ++ if (unlikely(faulted)) { ++ ftrace_graph_stop(); ++ WARN_ON(1); ++ return; ++ } ++ ++ trace.func = self_addr; ++ trace.depth = current->curr_ret_stack + 1; ++ ++ /* Only trace if the calling function expects to */ ++ if (!ftrace_graph_entry(&trace)) { ++ *parent = old; ++ return; ++ } ++ ++ if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) { ++ *parent = old; ++ return; ++ } ++} ++#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/head.S linux-3.4.110/arch/nds32/kernel/head.S +--- linux-3.4.110.orig/arch/nds32/kernel/head.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/head.S 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,350 @@ ++/* ++ * arch/nds32/kernel/head.S ++ * ++ * NDS32 Kernel startup code ++ * ++ * Copyright (C) 2007 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++//#include ++#include ++#include ++#include ++#include ++ ++/* ++ * We place the page tables 16K below TEXTADDR. Therefore, we must make sure ++ * that TEXTADDR is correctly set. Currently, we expect the least significant ++ * 16 bits to be 0x8000, but we could probably relax this restriction to ++ * TEXTADDR >= PAGE_OFFSET + 0x4000 ++ * ++ * Note that swapper_pg_dir is the virtual address of the page tables, and ++ * pgtbl gives us a position-independent reference to these tables. We can ++ * do this because _stext == TEXTADDR ++ */ ++ ++ .globl swapper_pg_dir ++ .equ swapper_pg_dir, TEXTADDR - 0x4000 ++ ++/* ++ * Kernel startup entry point. ++ * --------------------------- ++ * ++ * This is normally called from the decompressor code. The requirements ++ * are: MMU = off, D-cache = off, I-cache = dont care, $r0 = 0, ++ * $r1 = machine nr. ++ * ++ * This code is mostly position independent, so if you link the kernel at ++ * 0xc0008000, you call this at __pa(0xc0008000). ++ * ++ * See linux/arch/nds32/tools/mach-types for the complete list of machine ++ * numbers for $r1. ++ * ++ * We're trying to keep crap to a minimum; DO NOT add any machine specific ++ * crap here - that's what the boot loader (or in extreme, well justified ++ * circumstances, zImage) is for. ++ */ ++ .section ".head.text", "ax" ++ .type _stext, %function ++ENTRY(_stext) ++ setgie.d ! Disable interrupt ++ isb ++ move $r1, #MACH_TYPE_FARADAY ! Note: as far, we are in the Superuser mode ++ jal __lookup_processor_type ! get processor id, $r5=procinfo, $r9=cpuid, invalid processor $r5=0 ++ li $r2, 'p' ++ beqz $r5, __error ! yes, error 'p' ++ jal __lookup_machine_type ! $r5=machinfo ++ li $r2, 'a' ++ beqz $r5, __error ++ ++/* ++ * Create a temporary mapping area for booting, before start_kernel ++ */ ++ sethi $r4, hi20(swapper_pg_dir) ++ li $p0, (PAGE_OFFSET - PHYS_OFFSET) ++ sub $r4, $r4, $p0 ++ tlbop FlushAll ! invalidate TLB\n" ++ isb ++ mtsr $r4, $L1_PPTB ! load page table pointer\n" ++ ++/* set NTC0 cacheable/writeback, mutliple page size in use */ ++ mfsr $r3, $MMU_CTL ++ li $r0, ~0x6 ++ and $r3, $r3, $r0 ++ ori $r3, $r3, 0x404 ++ mtsr $r3, $MMU_CTL ++ isb ++ ++#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ++ li $r2, #(PHYS_OFFSET + 0x7bf) ++#else ++ li $r2, #(PHYS_OFFSET + 0x6bf) ! to remember here ++#endif ++ movi $r3, #0x5 ++ mtsr $r3, $TLB_MISC ++ ++ sethi $r3, hi20(PAGE_OFFSET) ++ li $r0, #PHYS_OFFSET ++ sethi $r5, hi20(SZ_1M) ! Use 1MB pages ++ sethi $r6, hi20(PHYS_OFFSET+SZ_32M) ! Create 32MB first, leave the rest in paging_init() ++_tlb: ++ mtsr $r3, $TLB_VPN ++ dsb ++ tlbop $r2, RWR ++ isb ++ add $r0, $r0, $r5 ++ add $r3, $r3, $r5 ++ add $r2, $r2, $r5 ++ bne $r0, $r6, _tlb ++ ++ mtsr $r3, $TLB_MISC ! setup access page size ++ li $r2, #~0xf ++ and $r3, $r3, $r2 ++#ifdef CONFIG_ANDES_PAGE_SIZE_8KB ++ ori $r3, $r3, #0x1 ++#endif ++ mtsr $r3, $TLB_MISC ++ ++ mfsr $r0, $MISC_CTL ! Enable BTB and RTP ++ li $r1, #~0x3 ++ and $r0, $r0, $r1 ++ mtsr $r0, $MISC_CTL ++ ++/* ++ * Disable L2CC and wait until L2CC registers are mapped into memory to use L2$. ++ */ ++#ifdef CONFIG_CACHE_L2 ++ li $p0, L2CC_PA_BASE ++ li $p1, 0 ++ swi $p1, [$p0 + L2CC_CTRL_OFF] ++#endif ++ ++#ifdef CONFIG_PLAT_AG102 ++/* ++ * Set GPUBA to 0x0c000006. GPUB 0x1c000000, FB size 64MB. ++ */ ++ li $p0, DDR2C_PA_BASE + 0x02a4 ++ li $p1, 0x0c000006 ++ swi $p1, [$p0] ++#endif ++ ++ mfsr $p1, $PSW ++ li $r15, #~0x43df ! clear WBNA|DME|IME|DT|IT|POM|INTL|GIE ++ and $p1, $p1, $r15 ++#ifdef __NDS32_EB__ ++ #ifdef CONFIG_WBNA ++ ori $p1, $p1, #0x40ea ! set WBNA|DT|IT|BE|POM:super|INTL:1 ++ #else ++ ori $p1, $p1, #0xea ! set ----|DT|IT|BE|POM:super|INTL:1 ++ #endif ++#else ++ #ifdef CONFIG_WBNA ++ ori $p1, $p1, #0x40ca ! set WBNA|DT|IT|--|POM:super|INTL:1 ++ #else ++ ori $p1, $p1, #0xca ! set ----|DT|IT|--|POM:super|INTL:1 ++ #endif ++#endif ++ ++ mtsr $p1, $IPSW ! when iret, it will automatically enable MMU ++ la $lp, __mmap_switched ++ mtsr $lp, $IPC ++ iret ++ nop ++ ++ .type __switch_data, %object ++__switch_data: ++ .long __mmap_switched ++ .long _sdata ! $r5 ++ .long __bss_start ! $r6 ++ .long _end ! $r7 ++ .long __machine_arch_type ! $r5 ++ .long init_thread_union + 8192 ! $sp ++ ++ ++/* ++ * The following fragment of code is executed with the MMU on in MMU mode, ++ * and uses absolute addresses; this is not position independent. ++ * ++ * $r0 ++ * $r10 = points to proc info ++ * $r8 = points to machine info, ++ * $r1 = machine ID, 0x2c8 for Andes AG101 board ++ * $r9 = processor ID of value in version register ++ */ ++ .align ++ .type __mmap_switched, %function ++__mmap_switched: ++ la $r3, __switch_data + 4 ++ lmw.bim $r5, [$r3], $r7 ++ ++ move $fp, #0 ! Clear BSS (and zero $fp) ++ beq $r7, $r6, _RRT ++1: swi.bi $fp, [$r6], #4 ++ bne $r7, $r6, 1b ++ ++_RRT: ++ lmw.bim $r4, [$r3], $r4, #0b0001 ++ sw $r1, [$r4] ! Save machine type to memory ++ b start_kernel ++ ++ ++/* ++ * Read processor ID register (CP#15, $CR0), and look up in the linker-built ++ * supported processor list. Note that we can't use the absolute addresses ++ * for the __proc_info lists since we aren't running with the MMU on ++ * (and therefore, we are not in the correct address space). We have to ++ * calculate the offset. ++ * ++ * $r9 = cpuid, get from $CPU_VER ++ * Returns: ++ * $r3, $r4, $r6 corrupted ++ * $r5 = proc_info pointer in physical address space ++ * $r9 = cpuid ++ */ ++ ++ .type __lookup_processor_type, %function ++__lookup_processor_type: ++ la $r5, __proc_info_begin ++ la $r6, __proc_info_end ++ mfsr $r9, $CPU_VER ! get cpu version ++ li $p0, (PAGE_OFFSET - PHYS_OFFSET) ++1: ++ sub $p1, $r5, $p0 ++ lmw.bi $r3, [$p1], $r4 ! value, mask ++ and $r4, $r4, $r9 ! mask wanted bits ++ xor $p1, $r3, $r4 ++ beqz $p1, 2f ++ addi $r5, $r5, #PROC_INFO_SZ ! sizeof(proc_info_list) ++ bne $r5, $r6, 1b ++ ++ move $r5, #0 ! unknown processor -> exit ++2: ret ++ ++ ++ ++/* ++ * Lookup machine architecture in the linker-build list of architectures. ++ * Note that we can't use the absolute addresses for the __arch_info ++ * lists since we aren't running with the MMU on (and therefore, we are ++ * not in the correct address space). We have to calculate the offset. ++ * ++ * $r1 = machine architecture number ++ * Returns: ++ * $r3, $r4, $r6 corrupted ++ * $r5 = mach_info pointer in physical address space ++ */ ++ .type __lookup_machine_type, %function ++__lookup_machine_type: ++ la $r5, __arch_info_begin ++ la $r6, __arch_info_end ++1: ++ li $p0, (PAGE_OFFSET - PHYS_OFFSET) ++ sub $p1, $r5, $p0 ++ lwi $r3, [$p1] ! use PA to get machine type ++ xor $p1, $r3, $r1 ! matches loader number? ++ beqz $p1, 2f ! found ++ addi $r5, $r5, #SIZEOF_MACHINE_DESC ! next machine_desc ++ bne $r5, $r6, 1b ++ move $r5, #0 ! unknown machine ++2: ret ++ ++ ++/* ++ * Exception handling. Something went wrong and we can't proceed. We ++ * ought to tell the user, but since we don't have any guarantee that ++ * we're even running on the right architecture, we do virtually nothing. ++ * ++ * a = invalid architecture ++ * p = invalid processor ++ * ++ * Generally, only serious errors cause this. ++ */ ++__error: ++ li $r1, UART0_PA_BASE ++ sw $r2, [$r1] ++die: b die ++ ++ ++ ++#ifdef CONFIG_SMP ++ ++ .type secondary_startup, %function ++ENTRY(secondary_startup) ++ /* ++ * Common entry point for secondary CPUs. ++ * ++ * Lookup the processor type - there is no need to check the ++ * machine type as it has already been validated by the ++ * primary processor. ++ */ ++ mfsr $r0, $MMU_CTL ++ ori $r0, $r0, #4 ++#ifndef CONFIG_NO_KERNEL_LARGE_PAGE ++ ori $r0, $r0, #0x400 ++#endif ++ mtsr $r0, $MMU_CTL ++ ++ movi $r15, #0x01 ++ swi $r15, [$p1+#0x10] ++ lwi $sp, [$p1+#0x18] ++ ++ /* ++ * Set stack, L1_PPTB, and enable mmu ++ */ ++ sethi $r4, hi20(swapper_pg_dir) ++ li $p0, (PAGE_OFFSET - PHYS_OFFSET) ++ sub $r4, $r4, $p0 ++ ++ tlbop FlushAll ++ isb ++ mtsr $r4, $L1_PPTB ++ ++#ifdef CONFIG_CACHE_L2 ++ li $r0, #0x1801 ++ mtsr $r0, $HSMP_SADDR ++ isb ++ li $r0, #(L2CC_PA_BASE+0x10) ! L2CC Control ++ lwi $r1, [$r0] ++ li $r2, #~(0xf << 28) ++ and $r1, $r1, $r2 ++ bset $r1, $r1, #29 ++ bset $r1, $r1, #31 ++ swi $r1, [$r0] ++#endif ++ ++ move $fp, #0 ++ ++ mfsr $p1, $PSW ++ li $r15, #~0x43df ! clear WBNA|DME|IME|DT|IT|POM|INTL|GIE ++ and $p1, $p1, $r15 ++#ifdef __NDS32_EB__ ++ #ifdef CONFIG_WBNA ++ ori $p1, $p1, #0x40ea ! set WBNA|DT|IT|BE|POM:super|INTL:1 ++ #else ++ ori $p1, $p1, #0xea ! set ----|DT|IT|BE|POM:super|INTL:1 ++ #endif ++#else ++ #ifdef CONFIG_WBNA ++ ori $p1, $p1, #0x40ca ! set WBNA|DT|IT|--|POM:super|INTL:1 ++ #else ++ ori $p1, $p1, #0xca ! set ----|DT|IT|--|POM:super|INTL:1 ++ #endif ++#endif ++ ++ mtsr $p1, $IPSW ! when iret, it will automatically enable MMU ++ la $lp, secondary_start_kernel ++ mtsr $lp, $IPC ++ iret ++ nop ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/init_task.c linux-3.4.110/arch/nds32/kernel/init_task.c +--- linux-3.4.110.orig/arch/nds32/kernel/init_task.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/init_task.c 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,25 @@ ++/* ++ * linux/arch/nds32/kernel/init_task.c ++ * ++ * Copyright (C) 2009 Andes Technology Corporation ++ */ ++#include ++#include ++#include ++#include ++ ++#include ++ ++static struct signal_struct init_signals = INIT_SIGNALS(init_signals); ++static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); ++ ++/* Initial task structure */ ++struct task_struct init_task = INIT_TASK(init_task); ++EXPORT_SYMBOL(init_task); ++ ++/* ++ * Initial thread structure. Alignment of this is handled by a special ++ * linker map entry. ++ */ ++union thread_union init_thread_union __init_task_data = ++ { INIT_THREAD_INFO(init_task) }; +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/io.c linux-3.4.110/arch/nds32/kernel/io.c +--- linux-3.4.110.orig/arch/nds32/kernel/io.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/io.c 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,51 @@ ++#include ++#include ++ ++#include ++ ++/* ++ * Copy data from IO memory space to "real" memory space. ++ * This needs to be optimized. ++ */ ++void _memcpy_fromio(void *to, const volatile void __iomem * from, size_t count) ++{ ++ unsigned char *t = to; ++ while (count) { ++ count--; ++ *t = readb(from); ++ t++; ++ from++; ++ } ++} ++ ++/* ++ * Copy data from "real" memory space to IO memory space. ++ * This needs to be optimized. ++ */ ++void _memcpy_toio(volatile void __iomem * to, const void *from, size_t count) ++{ ++ const unsigned char *f = from; ++ while (count) { ++ count--; ++ writeb(*f, to); ++ f++; ++ to++; ++ } ++} ++ ++/* ++ * "memset" on IO memory space. ++ * This needs to be optimized. ++ */ ++void _memset_io(volatile void __iomem * dst, int c, size_t count) ++{ ++ while (count) { ++ count--; ++ writeb(c, dst); ++ dst++; ++ } ++} ++ ++EXPORT_SYMBOL(_memcpy_fromio); ++EXPORT_SYMBOL(_memcpy_toio); ++EXPORT_SYMBOL(_memset_io); +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/irq.c linux-3.4.110/arch/nds32/kernel/irq.c +--- linux-3.4.110.orig/arch/nds32/kernel/irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/irq.c 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,122 @@ ++/* ++ * linux/arch/nds32/kernel/irq.c ++ * ++ * Copyright (C) 1992 Linus Torvalds ++ * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This file contains the code used by various IRQ handling routines: ++ * asking for different IRQ's should be done through these routines ++ * instead of just grabbing them. Thus setups with different IRQ numbers ++ * shouldn't result in any weird surprises, and installing new handlers ++ * should be easier. ++ * ++ * IRQ's are in fact implemented a bit like signal handlers for the kernel. ++ * Naturally it's not a 1:1 relation, but there are similarities. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++void (*init_arch_irq) (void)__initdata = NULL; ++unsigned long irq_err_count; ++ ++void ack_bad_irq(unsigned int irq) ++{ ++ printk("bad IRQ %d\n", irq); ++} ++ ++int show_interrupts(struct seq_file *p, void *v) ++{ ++ int i = *(loff_t *) v, cpu; ++ struct irqaction *action; ++ unsigned long flags; ++ ++ if (i == 0) { ++ char cpuname[12]; ++ ++ seq_printf(p, " "); ++ for_each_present_cpu(cpu) { ++ sprintf(cpuname, "CPU%d", cpu); ++ seq_printf(p, " %10s", cpuname); ++ } ++ seq_putc(p, '\n'); ++ } ++ ++ if (i < NR_IRQS) { ++ raw_spin_lock_irqsave(&irq_desc[i].lock, flags); ++ action = irq_desc[i].action; ++ if (!action) ++ goto unlock; ++ ++ seq_printf(p, "%3d: ", i); ++ for_each_present_cpu(cpu) ++ seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); ++ seq_printf(p, " %s", action->name); ++ for (action = action->next; action; action = action->next) ++ seq_printf(p, ", %s", action->name); ++ ++ seq_putc(p, '\n'); ++unlock: ++ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); ++ } else if (i == NR_IRQS) { ++ seq_printf(p, "Err: %10lu\n", irq_err_count); ++ } ++ return 0; ++} ++ ++/* ++ * do_IRQ handles all hardware IRQ's. Decoded IRQs should not ++ * come via this function. Instead, they should provide their ++ * own 'handler' ++ */ ++asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) ++{ ++ struct pt_regs *old_regs = set_irq_regs(regs); ++ ++ /* ++ * Some hardware gives randomly wrong interrupts. Rather ++ * than crashing, do something sensible. ++ */ ++ if (unlikely(irq >= NR_IRQS)) { ++ printk(KERN_EMERG "IRQ exceeds NR_IRQS\n"); ++ BUG(); ++ } ++ ++ irq_enter(); ++ generic_handle_irq(irq); ++ irq_exit(); ++ set_irq_regs(old_regs); ++ ++} ++ ++void __init init_IRQ(void) ++{ ++ int irq; ++ ++ for (irq = 0; irq < NR_IRQS; irq++) ++ irq_set_noprobe(irq); ++ ++ init_arch_irq(); ++} ++ ++#ifdef CONFIG_TRACE_IRQFLAGS ++void notrace arch_trace_hardirqs_on(void) ++{ ++ trace_hardirqs_on(); ++} ++ ++void notrace arch_trace_hardirqs_off(void) ++{ ++ trace_hardirqs_off(); ++} ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/kgdb.c linux-3.4.110/arch/nds32/kernel/kgdb.c +--- linux-3.4.110.orig/arch/nds32/kernel/kgdb.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/kgdb.c 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,291 @@ ++/* ============================================================================ ++ * ++ * arch/nds32/kernel/kgdb.c ++ * ++ * Copyright (C) 2007 Andes Technology Corporation ++ * This file is part of Linux and should be licensed under the GPL. ++ * See the file COPYING for conditions for redistribution. ++ * ++ * Abstract: ++ * ++ * This program is for NDS32 KGDB support. ++ * ++ * Author: Harry Pan ++ * ++ * Revision History: ++ * ++ * Nov.23.2007 Initial ported by Harry, ++ * inherited from the KGDB in 2.6.11 and 2.4.35. ++ * ++ * Note: ++ * ++ * ============================================================================ ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++// ============================================================================ ++// regs_to_gdb_regs() ++// ++// Make a local copy of the registers passed into the handler (bletch). ++// ============================================================================ ++void pt_regs_to_gdb_regs(unsigned long *gregs, struct pt_regs *kregs) ++{ ++ int regno; ++ ++ /* Initialize all to zero (??) */ ++ for (regno = 0; regno < NDS32_NUM_REGS; regno++) ++ gregs[regno] = 0; ++ ++ gregs[0] = kregs->NDS32_r0; ++ gregs[1] = kregs->NDS32_r1; ++ gregs[2] = kregs->NDS32_r2; ++ gregs[3] = kregs->NDS32_r3; ++ gregs[4] = kregs->NDS32_r4; ++ gregs[5] = kregs->NDS32_r5; ++ gregs[6] = kregs->NDS32_r6; ++ gregs[7] = kregs->NDS32_r7; ++ gregs[8] = kregs->NDS32_r8; ++ gregs[9] = kregs->NDS32_r9; ++ gregs[10] = kregs->NDS32_r10; ++ gregs[11] = kregs->NDS32_r11; ++ gregs[12] = kregs->NDS32_r12; ++ gregs[13] = kregs->NDS32_r13; ++ gregs[14] = kregs->NDS32_r14; ++ gregs[15] = kregs->NDS32_r15; ++ gregs[16] = kregs->NDS32_r16; ++ gregs[17] = kregs->NDS32_r17; ++ gregs[18] = kregs->NDS32_r18; ++ gregs[19] = kregs->NDS32_r19; ++ gregs[20] = kregs->NDS32_r20; ++ gregs[21] = kregs->NDS32_r21; ++ gregs[22] = kregs->NDS32_r22; ++ gregs[23] = kregs->NDS32_r23; ++ gregs[24] = kregs->NDS32_r24; ++ gregs[25] = kregs->NDS32_r25; ++ gregs[26] = kregs->NDS32_pp0; ++ gregs[27] = kregs->NDS32_pp1; ++ gregs[28] = kregs->NDS32_fp; ++ gregs[29] = kregs->NDS32_gp; ++ gregs[30] = kregs->NDS32_lp; ++ gregs[31] = kregs->NDS32_sp; ++ gregs[32] = kregs->NDS32_ipc; ++ gregs[33] = kregs->NDS32_d0lo; ++ gregs[34] = kregs->NDS32_d0hi; ++ gregs[35] = kregs->NDS32_d1lo; ++ gregs[36] = kregs->NDS32_d1hi; ++ gregs[NDS32_IR0_REGNUM] = kregs->NDS32_ipsw; ++} ++ ++// ============================================================================ ++// gdb_regs_to_regs() ++// ++// Copy local gdb registers back to kgdb regs, for later copy to kernel. ++// ============================================================================ ++void gdb_regs_to_pt_regs(unsigned long *gregs, struct pt_regs *kregs) ++{ ++ kregs->NDS32_r0 = gregs[0]; ++ kregs->NDS32_r1 = gregs[1]; ++ kregs->NDS32_r2 = gregs[2]; ++ kregs->NDS32_r3 = gregs[3]; ++ kregs->NDS32_r4 = gregs[4]; ++ kregs->NDS32_r5 = gregs[5]; ++ kregs->NDS32_r6 = gregs[6]; ++ kregs->NDS32_r7 = gregs[7]; ++ kregs->NDS32_r8 = gregs[8]; ++ kregs->NDS32_r9 = gregs[9]; ++ kregs->NDS32_r10 = gregs[10]; ++ kregs->NDS32_r11 = gregs[11]; ++ kregs->NDS32_r12 = gregs[12]; ++ kregs->NDS32_r13 = gregs[13]; ++ kregs->NDS32_r14 = gregs[14]; ++ kregs->NDS32_r15 = gregs[15]; ++ kregs->NDS32_r16 = gregs[16]; ++ kregs->NDS32_r17 = gregs[17]; ++ kregs->NDS32_r18 = gregs[18]; ++ kregs->NDS32_r19 = gregs[19]; ++ kregs->NDS32_r20 = gregs[20]; ++ kregs->NDS32_r21 = gregs[21]; ++ kregs->NDS32_r22 = gregs[22]; ++ kregs->NDS32_r23 = gregs[23]; ++ kregs->NDS32_r24 = gregs[24]; ++ kregs->NDS32_r25 = gregs[25]; ++ kregs->NDS32_pp0 = gregs[26]; ++ kregs->NDS32_pp1 = gregs[27]; ++ kregs->NDS32_fp = gregs[28]; ++ kregs->NDS32_gp = gregs[29]; ++ kregs->NDS32_lp = gregs[30]; ++ kregs->NDS32_sp = gregs[31]; ++ kregs->NDS32_ipc = gregs[32]; ++ kregs->NDS32_d0lo = gregs[33]; ++ kregs->NDS32_d0hi = gregs[34]; ++ kregs->NDS32_d1lo = gregs[35]; ++ kregs->NDS32_d1hi = gregs[36]; ++ kregs->NDS32_ipsw = gregs[NDS32_IR0_REGNUM]; ++} ++ ++// ---------------------------------------------------------------------------- ++// kgdb_get_user_regs() ++// ++// Get user process registers. ++// ---------------------------------------------------------------------------- ++static inline struct pt_regs *kgdb_get_user_regs(struct task_struct *task) ++{ ++ return (struct pt_regs *) ++ ((unsigned long)task_thread_info(task) + THREAD_SIZE - ++ 8 - sizeof(struct pt_regs)); ++} ++ ++// ============================================================================ ++// sleeping_thread_to_gdb_regs() ++// ++// ============================================================================ ++void sleeping_thread_to_gdb_regs(unsigned long *gregs, struct task_struct *task) ++{ ++ int regno; ++ struct pt_regs *tregs; ++ ++ /* Just making sure... */ ++ if (task == NULL) ++ return; ++ ++ /* Initialize to zero */ ++ for (regno = 0; regno < NDS32_NUM_REGS; regno++) ++ gregs[regno] = 0; ++ ++ /* Otherwise, we have only some registers from switch_to() */ ++ tregs = kgdb_get_user_regs(task); ++ ++ gregs[0] = tregs->NDS32_r0; ++ gregs[1] = tregs->NDS32_r1; ++ gregs[2] = tregs->NDS32_r2; ++ gregs[3] = tregs->NDS32_r3; ++ gregs[4] = tregs->NDS32_r4; ++ gregs[5] = tregs->NDS32_r5; ++ gregs[6] = tregs->NDS32_r6; ++ gregs[7] = tregs->NDS32_r7; ++ gregs[8] = tregs->NDS32_r8; ++ gregs[9] = tregs->NDS32_r9; ++ gregs[10] = tregs->NDS32_r10; ++ gregs[11] = tregs->NDS32_r11; ++ gregs[12] = tregs->NDS32_r12; ++ gregs[13] = tregs->NDS32_r13; ++ gregs[14] = tregs->NDS32_r14; ++ gregs[15] = tregs->NDS32_r15; ++ gregs[16] = tregs->NDS32_r16; ++ gregs[17] = tregs->NDS32_r17; ++ gregs[18] = tregs->NDS32_r18; ++ gregs[19] = tregs->NDS32_r19; ++ gregs[20] = tregs->NDS32_r20; ++ gregs[21] = tregs->NDS32_r21; ++ gregs[22] = tregs->NDS32_r22; ++ gregs[23] = tregs->NDS32_r23; ++ gregs[24] = tregs->NDS32_r24; ++ gregs[25] = tregs->NDS32_r25; ++ gregs[26] = tregs->NDS32_pp0; ++ gregs[27] = tregs->NDS32_pp1; ++ gregs[28] = tregs->NDS32_fp; ++ gregs[29] = tregs->NDS32_gp; ++ gregs[30] = tregs->NDS32_lp; ++ gregs[31] = tregs->NDS32_sp; ++ gregs[32] = tregs->NDS32_ipc; ++ gregs[33] = tregs->NDS32_d0lo; ++ gregs[34] = tregs->NDS32_d0hi; ++ gregs[35] = tregs->NDS32_d1lo; ++ gregs[36] = tregs->NDS32_d1hi; ++ gregs[NDS32_IR0_REGNUM] = tregs->NDS32_ipsw; ++} ++ ++int kgdb_arch_handle_exception(int exception_vector, int signo, ++ int err_code, char *remcom_in_buffer, ++ char *remcom_out_buffer, ++ struct pt_regs *linux_regs) ++{ ++ long addr; ++ char *ptr; ++ ++ if (0 == atomic_dec_if_positive(&kgdb_setting_breakpoint)) ++ linux_regs->NDS32_ipc += 2; ++ ++ switch (remcom_in_buffer[0]) { ++ case 'k': ++ case 'D': ++ case 'c': ++ case 's': ++ kgdb_contthread = NULL; ++ ++ /* ++ * Try to read optional parameter, pc unchanged if no parm. ++ * If this was a compiled breakpoint, we need to move ++ * to the next instruction or we will just breakpoint ++ * over and over again. ++ */ ++ ptr = &remcom_in_buffer[1]; ++ if (kgdb_hex2long(&ptr, &addr)) { ++ linux_regs->NDS32_ipc = addr; ++ } ++ linux_regs->NDS32_ipsw &= ~0x800; ++ if (remcom_in_buffer[0] == 's') { ++ linux_regs->NDS32_ipsw |= 0x800; ++ } ++ ++ return 0; ++ } ++ ++ return -1; ++} ++ ++static int kgdb_notify(struct notifier_block *self, ++ unsigned long cmd, void *ptr) ++{ ++ struct die_args *args = ptr; ++ unsigned long addr = args->err; ++ if (addr > TASK_SIZE) { ++ kgdb_handle_exception(args->trapnr, args->signr, ++ args->err, args->regs); ++ return NOTIFY_STOP; ++ } ++ return NOTIFY_DONE; ++} ++ ++static struct notifier_block kgdb_notifier = { ++ .notifier_call = kgdb_notify, ++}; ++ ++int kgdb_arch_init(void) ++{ ++ register_die_notifier(&kgdb_notifier); ++} ++ ++void kgdb_arch_exit(void) ++{ ++ unregister_die_notifier(&kgdb_notifier); ++} ++ ++struct kgdb_arch arch_kgdb_ops = { ++#ifdef __NDS32_EL__ ++ .gdb_bpt_instr = {0xeb, 0xff} ++#else ++ .gdb_bpt_instr = {0xff, 0xeb} ++#endif ++}; +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/kprobes.c linux-3.4.110/arch/nds32/kernel/kprobes.c +--- linux-3.4.110.orig/arch/nds32/kernel/kprobes.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/kprobes.c 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,869 @@ ++/* ++ * Kernel Probes (KProbes) ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) IBM Corporation, 2002, 2004 ++ * ++ * 2002-Oct Created by Vamsi Krishna S Kernel ++ * Probes initial implementation ( includes contributions from ++ * Rusty Russell). ++ * 2004-July Suparna Bhattacharya added jumper probes ++ * interface to access function arguments. ++ * 2004-Oct Jim Keniston and Prasanna S Panchamukhi ++ * adapted for x86_64 from i386. ++ * 2005-Mar Roland McGrath ++ * Fixed to handle %rip-relative addressing mode correctly. ++ * 2005-May Hien Nguyen , Jim Keniston ++ * and Prasanna S Panchamukhi ++ * added function-return probes. ++ * 2005-May Rusty Lynch ++ * Added function return probes functionality ++ * 2006-Feb Masami Hiramatsu added ++ * kprobe-booster and kretprobe-booster for i386. ++ * 2007-Dec Masami Hiramatsu added kprobe-booster ++ * and kretprobe-booster for x86-64 ++ * 2007-Dec Masami Hiramatsu , Arjan van de Ven ++ * and Jim Keniston ++ * unified x86 kprobes code. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#ifdef __NDS32_EL__ ++#define SZINSN(insn) (((insn & 0x00000080) == 0) ? 4 : 2) ++#define BREAK16_1FE 0xFEEB ++#else ++#define SZINSN(insn) (((insn & 0x80000000) == 0) ? 4 : 2) ++#define BREAK16_1FE 0xEBFE ++#endif ++ ++void jprobe_return_point(void); ++ ++DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; ++DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); ++ ++#if 0 ++/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ ++static void __kprobes set_jmp_op(void *from, void *to) ++{ ++ struct __arch_jmp_op { ++ char op; ++ s32 raddr; ++ } __attribute__ ((packed)) * jop; ++ jop = (struct __arch_jmp_op *)from; ++ jop->raddr = (s32) ((long)(to) - ((long)(from) + 5)); ++ jop->op = RELATIVEJUMP_INSTRUCTION; ++} ++ ++/* ++ * Check for the REX prefix which can only exist on X86_64 ++ * X86_32 always returns 0 ++ */ ++static int __kprobes is_REX_prefix(kprobe_opcode_t * insn) ++{ ++#ifdef CONFIG_X86_64 ++ if ((*insn & 0xf0) == 0x40) ++ return 1; ++#endif ++ return 0; ++} ++ ++/* ++ * Returns non-zero if opcode is boostable. ++ * RIP relative instructions are adjusted at copying time in 64 bits mode ++ */ ++static int __kprobes can_boost(kprobe_opcode_t * opcodes) ++{ ++ kprobe_opcode_t opcode; ++ kprobe_opcode_t *orig_opcodes = opcodes; ++ ++ if (search_exception_tables(opcodes)) ++ return 0; /* Page fault may occur on this address. */ ++ ++retry: ++ if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) ++ return 0; ++ opcode = *(opcodes++); ++ ++ /* 2nd-byte opcode */ ++ if (opcode == 0x0f) { ++ if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) ++ return 0; ++ return test_bit(*opcodes, ++ (unsigned long *)twobyte_is_boostable); ++ } ++ ++ switch (opcode & 0xf0) { ++#ifdef CONFIG_X86_64 ++ case 0x40: ++ goto retry; /* REX prefix is boostable */ ++#endif ++ case 0x60: ++ if (0x63 < opcode && opcode < 0x67) ++ goto retry; /* prefixes */ ++ /* can't boost Address-size override and bound */ ++ return (opcode != 0x62 && opcode != 0x67); ++ case 0x70: ++ return 0; /* can't boost conditional jump */ ++ case 0xc0: ++ /* can't boost software-interruptions */ ++ return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; ++ case 0xd0: ++ /* can boost AA* and XLAT */ ++ return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); ++ case 0xe0: ++ /* can boost in/out and absolute jmps */ ++ return ((opcode & 0x04) || opcode == 0xea); ++ case 0xf0: ++ if ((opcode & 0x0c) == 0 && opcode != 0xf1) ++ goto retry; /* lock/rep(ne) prefix */ ++ /* clear and set flags are boostable */ ++ return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); ++ default: ++ /* segment override prefixes are boostable */ ++ if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e) ++ goto retry; /* prefixes */ ++ /* CS override prefix and call are not boostable */ ++ return (opcode != 0x2e && opcode != 0x9a); ++ } ++} ++ ++/* ++ * Returns non-zero if opcode modifies the interrupt flag. ++ */ ++static int __kprobes is_IF_modifier(kprobe_opcode_t * insn) ++{ ++ switch (*insn) { ++ case 0xfa: /* cli */ ++ case 0xfb: /* sti */ ++ case 0xcf: /* iret/iretd */ ++ case 0x9d: /* popf/popfd */ ++ return 1; ++ } ++ ++ /* ++ * on X86_64, 0x40-0x4f are REX prefixes so we need to look ++ * at the next byte instead.. but of course not recurse infinitely ++ */ ++ if (is_REX_prefix(insn)) ++ return is_IF_modifier(++insn); ++ ++ return 0; ++} ++ ++/* ++ * Adjust the displacement if the instruction uses the %rip-relative ++ * addressing mode. ++ * If it does, Return the address of the 32-bit displacement word. ++ * If not, return null. ++ * Only applicable to 64-bit x86. ++ */ ++static void __kprobes fix_riprel(struct kprobe *p) ++{ ++#ifdef CONFIG_X86_64 ++ u8 *insn = p->ainsn.insn; ++ s64 disp; ++ int need_modrm; ++ ++ /* Skip legacy instruction prefixes. */ ++ while (1) { ++ switch (*insn) { ++ case 0x66: ++ case 0x67: ++ case 0x2e: ++ case 0x3e: ++ case 0x26: ++ case 0x64: ++ case 0x65: ++ case 0x36: ++ case 0xf0: ++ case 0xf3: ++ case 0xf2: ++ ++insn; ++ continue; ++ } ++ break; ++ } ++ ++ /* Skip REX instruction prefix. */ ++ if (is_REX_prefix(insn)) ++ ++insn; ++ ++ if (*insn == 0x0f) { ++ /* Two-byte opcode. */ ++ ++insn; ++ need_modrm = test_bit(*insn, ++ (unsigned long *)twobyte_has_modrm); ++ } else ++ /* One-byte opcode. */ ++ need_modrm = test_bit(*insn, ++ (unsigned long *)onebyte_has_modrm); ++ ++ if (need_modrm) { ++ u8 modrm = *++insn; ++ if ((modrm & 0xc7) == 0x05) { ++ /* %rip+disp32 addressing mode */ ++ /* Displacement follows ModRM byte. */ ++ ++insn; ++ /* ++ * The copied instruction uses the %rip-relative ++ * addressing mode. Adjust the displacement for the ++ * difference between the original location of this ++ * instruction and the location of the copy that will ++ * actually be run. The tricky bit here is making sure ++ * that the sign extension happens correctly in this ++ * calculation, since we need a signed 32-bit result to ++ * be sign-extended to 64 bits when it's added to the ++ * %rip value and yield the same 64-bit result that the ++ * sign-extension of the original signed 32-bit ++ * displacement would have given. ++ */ ++ disp = (u8 *) p->addr + *((s32 *) insn) - ++ (u8 *) p->ainsn.insn; ++ BUG_ON((s64) (s32) disp != disp); /* Sanity check. */ ++ *(s32 *) insn = (s32) disp; ++ } ++ } ++#endif ++} ++#endif ++ ++static void __kprobes arch_copy_kprobe(struct kprobe *p) ++{ ++ memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); ++ flush_icache_range((unsigned long)p->ainsn.insn, ++ (unsigned long)p->ainsn.insn + ++ MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); ++ ++// fix_riprel(p); ++ ++// if (can_boost(p->addr)) ++// p->ainsn.boostable = 0; ++// else ++ p->ainsn.boostable = -1; ++ ++ p->opcode = *p->addr; ++} ++ ++int __kprobes arch_prepare_kprobe(struct kprobe *p) ++{ ++ /* insn: must be on special executable page on x86. */ ++ p->ainsn.insn = get_insn_slot(); ++ if (!p->ainsn.insn) ++ return -ENOMEM; ++ arch_copy_kprobe(p); ++ return 0; ++} ++ ++void __kprobes arch_arm_kprobe(struct kprobe *p) ++{ ++ *p->addr = BREAK16_1FE; ++ flush_icache_range((unsigned long)p->addr, ++ (unsigned long)p->addr + sizeof(kprobe_opcode_t)); ++} ++ ++void __kprobes arch_disarm_kprobe(struct kprobe *p) ++{ ++ *p->addr = p->opcode; ++ flush_icache_range((unsigned long)p->addr, ++ (unsigned long)p->addr + sizeof(kprobe_opcode_t)); ++} ++ ++void __kprobes arch_remove_kprobe(struct kprobe *p) ++{ ++ if (p->ainsn.insn) { ++ free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); ++ p->ainsn.insn = NULL; ++ } ++} ++ ++static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) ++{ ++ kcb->prev_kprobe.kp = kprobe_running(); ++ kcb->prev_kprobe.status = kcb->kprobe_status; ++ kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags; ++ kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; ++} ++ ++static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) ++{ ++ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; ++ kcb->kprobe_status = kcb->prev_kprobe.status; ++ kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; ++ kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; ++} ++ ++static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, ++ struct kprobe_ctlblk *kcb) ++{ ++ __get_cpu_var(current_kprobe) = p; ++// kcb->kprobe_saved_flags = kcb->kprobe_old_flags ++// = regs->NDS32_ipsw & PSW_mskHSS; ++} ++ ++static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) ++{ ++ regs->NDS32_ipsw |= PSW_mskHSS; ++ /* single step inline if the instruction is an int3 */ ++// if (p->opcode == BREAK16_1FE) ++// regs->NDS32_ipc = (unsigned long)p->addr; ++// else ++ regs->NDS32_ipc = (unsigned long)p->ainsn.insn; ++} ++ ++void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, ++ struct pt_regs *regs) ++{ ++ ri->ret_addr = (kprobe_opcode_t *) regs->NDS32_lp; ++ regs->NDS32_lp = (unsigned long)&kretprobe_trampoline; ++} ++ ++static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, ++ struct kprobe_ctlblk *kcb) ++{ ++#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) ++ if (p->ainsn.boostable == 1 && !p->post_handler) { ++ /* Boost up -- we can execute copied instructions directly */ ++ reset_current_kprobe(); ++ regs->NDS32_ipc = (unsigned long)p->ainsn.insn; ++ preempt_enable_no_resched(); ++ return; ++ } ++#endif ++ prepare_singlestep(p, regs); ++ kcb->kprobe_status = KPROBE_HIT_SS; ++} ++ ++/* ++ * We have reentered the kprobe_handler(), since another probe was hit while ++ * within the handler. We save the original kprobes variables and just single ++ * step on the instruction of the new probe without calling any user handlers. ++ */ ++static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, ++ struct kprobe_ctlblk *kcb) ++{ ++ switch (kcb->kprobe_status) { ++#if 0 ++ case KPROBE_HIT_SSDONE: ++#ifdef CONFIG_X86_64 ++ /* TODO: Provide re-entrancy from post_kprobes_handler() and ++ * avoid exception stack corruption while single-stepping on ++ * the instruction of the new probe. ++ */ ++ arch_disarm_kprobe(p); ++ regs->ip = (unsigned long)p->addr; ++ reset_current_kprobe(); ++ preempt_enable_no_resched(); ++ break; ++#endif ++#endif ++ case KPROBE_HIT_ACTIVE: ++ save_previous_kprobe(kcb); ++ set_current_kprobe(p, regs, kcb); ++ kprobes_inc_nmissed_count(p); ++ prepare_singlestep(p, regs); ++ kcb->kprobe_status = KPROBE_REENTER; ++ break; ++ case KPROBE_HIT_SS: ++ if (p == kprobe_running()) { ++ regs->NDS32_ipc &= ~PSW_mskHSS; ++// regs->NDS32_ipc |= kcb->kprobe_saved_flags; ++ return 0; ++ } else { ++ /* A probe has been hit in the codepath leading up ++ * to, or just after, single-stepping of a probed ++ * instruction. This entire codepath should strictly ++ * reside in .kprobes.text section. Raise a warning ++ * to highlight this peculiar case. ++ */ ++ } ++ default: ++ /* impossible cases */ ++ WARN_ON(1); ++ return 0; ++ } ++ ++ return 1; ++} ++ ++/* ++ * Interrupts are disabled on entry as trap3 is an interrupt gate and they ++ * remain disabled thorough out this function. ++ */ ++static int __kprobes kprobe_handler(struct pt_regs *regs) ++{ ++ kprobe_opcode_t *addr; ++ struct kprobe *p; ++ struct kprobe_ctlblk *kcb; ++ ++ addr = (kprobe_opcode_t *) regs->NDS32_ipc; ++ if (*addr != BREAK16_1FE) { ++ /* ++ * The breakpoint instruction was removed right ++ * after we hit it. Another cpu has removed ++ * either a probepoint or a debugger breakpoint ++ * at this address. In either case, no further ++ * handling of this interrupt is appropriate. ++ * Back up over the (now missing) int3 and run ++ * the original instruction. ++ */ ++ return 1; ++ } ++ ++ /* ++ * We don't want to be preempted for the entire ++ * duration of kprobe processing. We conditionally ++ * re-enable preemption at the end of this function, ++ * and also in reenter_kprobe() and setup_singlestep(). ++ */ ++ preempt_disable(); ++ ++ kcb = get_kprobe_ctlblk(); ++ p = get_kprobe(addr); ++ ++ if (p) { ++ if (kprobe_running()) { ++ if (reenter_kprobe(p, regs, kcb)) ++ return 1; ++ } else { ++ set_current_kprobe(p, regs, kcb); ++ kcb->kprobe_status = KPROBE_HIT_ACTIVE; ++ ++ /* ++ * If we have no pre-handler or it returned 0, we ++ * continue with normal processing. If we have a ++ * pre-handler and it returned non-zero, it prepped ++ * for calling the break_handler below on re-entry ++ * for jprobe processing, so get out doing nothing ++ * more here. ++ */ ++ if (!p->pre_handler || !p->pre_handler(p, regs)) ++ setup_singlestep(p, regs, kcb); ++ return 1; ++ } ++ } else if (kprobe_running()) { ++ p = __get_cpu_var(current_kprobe); ++ if (p->break_handler && p->break_handler(p, regs)) { ++ setup_singlestep(p, regs, kcb); ++ return 1; ++ } ++ } ++ /* else: not a kprobe fault; let the kernel handle it */ ++ preempt_enable_no_resched(); ++ return 0; ++} ++ ++/* ++ * When a retprobed function returns, this code saves registers and ++ * calls trampoline_handler() runs, which calls the kretprobe's handler. ++ */ ++static void __used __kprobes kretprobe_trampoline_holder(void) ++{ ++ asm volatile (".global kretprobe_trampoline \n" ++ "kretprobe_trampoline: \n" ++ "smw.adm $r15, [$sp], $r15, #0x0\n" ++ "smw.adm $r0, [$sp], $r5, #0x1 \n" ++ "addi $r0, $sp, #-76 \n" ++ "bal trampoline_handler \n" ++ "move $lp, $r0 \n" ++ "lmw.bim $r0, [$sp], $r5, #0x1 \n" ++ "lmw.bim $r15, [$sp], $r15, #0x0\n" ++ "ret \n"); ++} ++ ++/* ++ * Called from kretprobe_trampoline ++ */ ++static __used __kprobes void *trampoline_handler(struct pt_regs *regs) ++{ ++ struct kretprobe_instance *ri = NULL; ++ struct hlist_head *head, empty_rp; ++ struct hlist_node *node, *tmp; ++ unsigned long flags, orig_ret_address = 0; ++ unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; ++ ++ INIT_HLIST_HEAD(&empty_rp); ++ kretprobe_hash_lock(current, &head, &flags); ++ ++ /* ++ * It is possible to have multiple instances associated with a given ++ * task either because multiple functions in the call path have ++ * return probes installed on them, and/or more than one ++ * return probe was registered for a target function. ++ * ++ * We can handle this because: ++ * - instances are always pushed into the head of the list ++ * - when multiple return probes are registered for the same ++ * function, the (chronologically) first instance's ret_addr ++ * will be the real return address, and all the rest will ++ * point to kretprobe_trampoline. ++ */ ++ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { ++ if (ri->task != current) ++ /* another task is sharing our hash bucket */ ++ continue; ++ ++ if (ri->rp && ri->rp->handler) { ++ __get_cpu_var(current_kprobe) = &ri->rp->kp; ++ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; ++ ri->rp->handler(ri, regs); ++ __get_cpu_var(current_kprobe) = NULL; ++ } ++ ++ orig_ret_address = (unsigned long)ri->ret_addr; ++ recycle_rp_inst(ri, &empty_rp); ++ ++ if (orig_ret_address != trampoline_address) ++ /* ++ * This is the real return address. Any other ++ * instances associated with this task are for ++ * other calls deeper on the call stack ++ */ ++ break; ++ } ++ ++ kretprobe_assert(ri, orig_ret_address, trampoline_address); ++ ++ kretprobe_hash_unlock(current, &flags); ++ ++ hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { ++ hlist_del(&ri->hlist); ++ kfree(ri); ++ } ++ return (void *)orig_ret_address; ++} ++ ++/* ++ * Called after single-stepping. p->addr is the address of the ++ * instruction whose first byte has been replaced by the "int 3" ++ * instruction. To avoid the SMP problems that can occur when we ++ * temporarily put back the original opcode to single-step, we ++ * single-stepped a copy of the instruction. The address of this ++ * copy is p->ainsn.insn. ++ * ++ * This function prepares to return from the post-single-step ++ * interrupt. We have to fix up the stack as follows: ++ * ++ * 0) Except in the case of absolute or indirect jump or call instructions, ++ * the new ip is relative to the copied instruction. We need to make ++ * it relative to the original instruction. ++ * ++ * 1) If the single-stepped instruction was pushfl, then the TF and IF ++ * flags are set in the just-pushed flags, and may need to be cleared. ++ * ++ * 2) If the single-stepped instruction was a call, the return address ++ * that is atop the stack is the address following the copied instruction. ++ * We need to make it the address following the original instruction. ++ * ++ * If this is the first time we've single-stepped the instruction at ++ * this probepoint, and the instruction is boostable, boost it: add a ++ * jump instruction after the copied instruction, that jumps to the next ++ * instruction after the probepoint. ++ */ ++static void __kprobes resume_execution(struct kprobe *p, ++ struct pt_regs *regs, ++ struct kprobe_ctlblk *kcb) ++{ ++// unsigned long *tos = stack_addr(regs); ++// unsigned long copy_ip = (unsigned long)p->ainsn.insn; ++// unsigned long orig_ip = (unsigned long)p->addr; ++ kprobe_opcode_t *insn = p->ainsn.insn; ++ unsigned long rawinsn = *(unsigned long *)insn; ++ int size = SZINSN(rawinsn); ++ regs->NDS32_ipc = (unsigned long)p->addr + size; ++ regs->NDS32_ipsw &= ~PSW_mskHSS; ++#if 0 ++ ++ /*skip the REX prefix */ ++ if (is_REX_prefix(insn)) ++ insn++; ++ ++ regs->flags &= ~X86_EFLAGS_TF; ++ switch (*insn) { ++ case 0x9c: /* pushfl */ ++ *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF); ++ *tos |= kcb->kprobe_old_flags; ++ break; ++ case 0xc2: /* iret/ret/lret */ ++ case 0xc3: ++ case 0xca: ++ case 0xcb: ++ case 0xcf: ++ case 0xea: /* jmp absolute -- ip is correct */ ++ /* ip is already adjusted, no more changes required */ ++ p->ainsn.boostable = 1; ++ goto no_change; ++ case 0xe8: /* call relative - Fix return addr */ ++ *tos = orig_ip + (*tos - copy_ip); ++ break; ++#ifdef CONFIG_X86_32 ++ case 0x9a: /* call absolute -- same as call absolute, indirect */ ++ *tos = orig_ip + (*tos - copy_ip); ++ goto no_change; ++#endif ++ case 0xff: ++ if ((insn[1] & 0x30) == 0x10) { ++ /* ++ * call absolute, indirect ++ * Fix return addr; ip is correct. ++ * But this is not boostable ++ */ ++ *tos = orig_ip + (*tos - copy_ip); ++ goto no_change; ++ } else if (((insn[1] & 0x31) == 0x20) || ++ ((insn[1] & 0x31) == 0x21)) { ++ /* ++ * jmp near and far, absolute indirect ++ * ip is correct. And this is boostable ++ */ ++ p->ainsn.boostable = 1; ++ goto no_change; ++ } ++ default: ++ break; ++ } ++ ++ if (p->ainsn.boostable == 0) { ++ if ((regs->ip > copy_ip) && ++ (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) { ++ /* ++ * These instructions can be executed directly if it ++ * jumps back to correct address. ++ */ ++ set_jmp_op((void *)regs->ip, ++ (void *)orig_ip + (regs->ip - copy_ip)); ++ p->ainsn.boostable = 1; ++ } else { ++ p->ainsn.boostable = -1; ++ } ++ } ++ ++ regs->ip += orig_ip - copy_ip; ++ ++no_change: ++ restore_btf(); ++#endif ++} ++ ++/* ++ * Interrupts are disabled on entry as trap1 is an interrupt gate and they ++ * remain disabled thoroughout this function. ++ */ ++static int __kprobes post_kprobe_handler(struct pt_regs *regs) ++{ ++ struct kprobe *cur = kprobe_running(); ++ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); ++ ++ if (!cur) ++ return 0; ++ ++ resume_execution(cur, regs, kcb); ++ regs->NDS32_ipsw |= kcb->kprobe_saved_flags; ++ ++ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { ++ kcb->kprobe_status = KPROBE_HIT_SSDONE; ++ cur->post_handler(cur, regs, 0); ++ } ++ ++ /* Restore back the original saved kprobes variables and continue. */ ++ if (kcb->kprobe_status == KPROBE_REENTER) { ++ restore_previous_kprobe(kcb); ++ goto out; ++ } ++ reset_current_kprobe(); ++out: ++ preempt_enable_no_resched(); ++ ++ /* ++ * if somebody else is singlestepping across a probe point, flags ++ * will have TF set, in which case, continue the remaining processing ++ * of do_debug, as if this is not a probe hit. ++ */ ++ if (regs->NDS32_ipsw & PSW_mskHSS) ++ return 0; ++ ++ return 1; ++} ++ ++int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) ++{ ++ struct kprobe *cur = kprobe_running(); ++ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); ++ ++ switch (kcb->kprobe_status) { ++ case KPROBE_HIT_SS: ++ case KPROBE_REENTER: ++ /* ++ * We are here because the instruction being single ++ * stepped caused a page fault. We reset the current ++ * kprobe and the ip points back to the probe address ++ * and allow the page fault handler to continue as a ++ * normal page fault. ++ */ ++ regs->NDS32_ipc = (unsigned long)cur->addr; ++ regs->NDS32_ipsw |= kcb->kprobe_old_flags; ++ if (kcb->kprobe_status == KPROBE_REENTER) ++ restore_previous_kprobe(kcb); ++ else ++ reset_current_kprobe(); ++ preempt_enable_no_resched(); ++ break; ++ case KPROBE_HIT_ACTIVE: ++ case KPROBE_HIT_SSDONE: ++ /* ++ * We increment the nmissed count for accounting, ++ * we can also use npre/npostfault count for accounting ++ * these specific fault cases. ++ */ ++ kprobes_inc_nmissed_count(cur); ++ ++ /* ++ * We come here because instructions in the pre/post ++ * handler caused the page_fault, this could happen ++ * if handler tries to access user space by ++ * copy_from_user(), get_user() etc. Let the ++ * user-specified handler try to fix it first. ++ */ ++ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) ++ return 1; ++ ++ /* ++ * In case the user-specified fault handler returned ++ * zero, try to fix up. ++ */ ++ if (fixup_exception(regs)) ++ return 1; ++ ++ /* ++ * fixup routine could not handle it, ++ * Let do_page_fault() fix it. ++ */ ++ break; ++ default: ++ break; ++ } ++ return 0; ++} ++ ++/* ++ * Wrapper routine for handling exceptions. ++ */ ++int __kprobes kprobe_exceptions_notify(struct notifier_block *self, ++ unsigned long val, void *data) ++{ ++ struct die_args *args = data; ++ int ret = NOTIFY_DONE; ++ int why = args->trapnr & 0xf; ++ ++ if (args->regs && user_mode(args->regs)) ++ return ret; ++ ++ switch (why) { ++ case 1: ++ if (kprobe_handler(args->regs)) ++ ret = NOTIFY_STOP; ++ break; ++ case 7: ++ if (post_kprobe_handler(args->regs)) ++ ret = NOTIFY_STOP; ++ break; ++ default: ++ break; ++ } ++ return ret; ++} ++ ++int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) ++{ ++ struct jprobe *jp = container_of(p, struct jprobe, kp); ++ unsigned long addr; ++ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); ++ ++ kcb->jprobe_saved_regs = *regs; ++ kcb->jprobe_saved_sp = regs->NDS32_sp; ++ addr = (unsigned long)(kcb->jprobe_saved_sp); ++ ++ /* ++ * As Linus pointed out, gcc assumes that the callee ++ * owns the argument space and could overwrite it, e.g. ++ * tailcall optimization. So, to be absolutely safe ++ * we also save and restore enough stack bytes to cover ++ * the argument area. ++ */ ++ memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr, ++ MIN_STACK_SIZE(addr)); ++ regs->NDS32_ipsw &= ~PSW_mskGIE; ++ trace_hardirqs_off(); ++ regs->NDS32_ipc = (unsigned long)(jp->entry); ++ return 1; ++} ++ ++void __kprobes jprobe_return(void) ++{ ++ struct kprobe_ctlblk *kcd = get_kprobe_ctlblk(); ++ asm volatile (" move $sp, %0\n" ++ " .globl jprobe_return_point\n" ++ " jprobe_return_point: \n" ++ " break #0x1fe \n"::"r" (kcd-> ++ jprobe_saved_sp)); ++} ++ ++int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) ++{ ++ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); ++ struct jprobe *jp = container_of(p, struct jprobe, kp); ++ ++ if (regs->NDS32_ipc == jprobe_return_point) { ++ if (regs->NDS32_sp != kcb->jprobe_saved_sp) { ++ struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; ++ printk(KERN_ERR ++ "current sp %p does not match saved sp %p\n", ++ regs->NDS32_sp, kcb->jprobe_saved_sp); ++ printk(KERN_ERR "Saved registers for jprobe %p\n", jp); ++ show_regs(saved_regs); ++ printk(KERN_ERR "Current registers\n"); ++ show_regs(regs); ++ BUG(); ++ } ++ *regs = kcb->jprobe_saved_regs; ++ memcpy((kprobe_opcode_t *) (kcb->jprobe_saved_sp), ++ kcb->jprobes_stack, ++ MIN_STACK_SIZE(kcb->jprobe_saved_sp)); ++ preempt_enable_no_resched(); ++ return 1; ++ } ++ return 0; ++} ++ ++int __init arch_init_kprobes(void) ++{ ++ return 0; ++} ++ ++int __kprobes arch_trampoline_kprobe(struct kprobe *p) ++{ ++ return 0; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/machine_kexec.c linux-3.4.110/arch/nds32/kernel/machine_kexec.c +--- linux-3.4.110.orig/arch/nds32/kernel/machine_kexec.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/machine_kexec.c 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,80 @@ ++/* ++ * machine_kexec.c - handle transition of Linux booting another kernel ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++extern const unsigned char relocate_new_kernel[]; ++extern const unsigned int relocate_new_kernel_size; ++ ++extern void setup_mm_for_reboot(char mode); ++ ++extern unsigned long kexec_start_address; ++extern unsigned long kexec_indirection_page; ++extern unsigned long kexec_mach_type; ++extern unsigned long kexec_boot_atags; ++ ++/* ++ * Provide a dummy crash_notes definition while crash dump arrives to nds32. ++ * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. ++ */ ++ ++int machine_kexec_prepare(struct kimage *image) ++{ ++ return 0; ++} ++ ++void machine_kexec_cleanup(struct kimage *image) ++{ ++} ++ ++void machine_shutdown(void) ++{ ++} ++ ++void machine_crash_shutdown(struct pt_regs *regs) ++{ ++} ++ ++void machine_kexec(struct kimage *image) ++{ ++ unsigned long page_list; ++ unsigned long reboot_code_buffer_phys; ++ void *reboot_code_buffer; ++ ++ page_list = image->head & PAGE_MASK; ++ ++ /* we need both effective and real address here */ ++ reboot_code_buffer_phys = ++ page_to_pfn(image->control_code_page) << PAGE_SHIFT; ++ reboot_code_buffer = page_address(image->control_code_page); ++ ++ /* Prepare parameters for reboot_code_buffer */ ++ kexec_start_address = image->start; ++ kexec_indirection_page = page_list; ++ kexec_mach_type = machine_arch_type; ++ kexec_boot_atags = ++ image->start - KEXEC_NDS32_ZIMAGE_OFFSET + KEXEC_NDS32_ATAGS_OFFSET; ++ ++ /* copy our kernel relocation code to the control code page */ ++ memcpy(reboot_code_buffer, ++ relocate_new_kernel, relocate_new_kernel_size); ++ ++ flush_icache_range((unsigned long)reboot_code_buffer, ++ (unsigned long)reboot_code_buffer + ++ KEXEC_CONTROL_PAGE_SIZE); ++ printk(KERN_INFO "Bye!\n"); ++ ++ cpu_proc_fin(); ++ setup_mm_for_reboot(0); /* mode is not used, so just pass 0 */ ++ cpu_reset(reboot_code_buffer_phys); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/Makefile linux-3.4.110/arch/nds32/kernel/Makefile +--- linux-3.4.110.orig/arch/nds32/kernel/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/Makefile 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,45 @@ ++# ++# Makefile for the linux kernel. ++# ++ ++CPPFLAGS_vmlinux.lds +=-DTEXTADDR=$(TEXTADDR) -DDATAADDR=$(DATAADDR) ++AFLAGS_head.o := -DTEXTADDR=$(TEXTADDR) -DDATAADDR=$(DATAADDR) ++ ++# Object file lists. ++ ++obj-y := ex-entry.o ex-exit.o ex-scall.o irq.o \ ++ process.o ptrace.o setup.o signal.o \ ++ sys_nds32.o time.o traps.o io.o proc.o \ ++ elfchk.o ++ifdef CONFIG_FUNCTION_TRACER ++CFLAGS_REMOVE_ftrace.o = -pg ++CFLAGS_REMOVE_ex-entry.o = -pg ++CFLAGS_REMOVE_ex-exit.o = -pg ++CFLAGS_REMOVE_ex-scall.o = -pg ++CFLAGS_REMOVE_stacktrace.o = -pg ++CFLAGS_REMOVE_traps.o = -pg ++endif ++ ++obj-$(CONFIG_MODULES) += nds32_ksyms.o module.o ++obj-$(CONFIG_ISA_DMA) += dma-isa.o ++obj-$(CONFIG_PCI) += bios32.o ++obj-$(CONFIG_SMP) += smp.o ++obj-$(CONFIG_KGDB) += kgdb.o ++obj-$(CONFIG_STACKTRACE) += stacktrace.o ++obj-$(CONFIG_KPROBES) += kprobes.o ++obj-$(CONFIG_FPU) += fpu.o ++obj-$(CONFIG_AUDIO) += audio.o ++obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o ++obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o ++obj-$(CONFIG_EARLY_PRINTK) += early_printk.o ++ ++extra-y := head.o init_task.o vmlinux.lds ++ ++CFLAGS_fpu.o += \ ++ $(shell $(CC) -E -dM -xc /dev/null | grep -o -m1 NDS32_EXT_FPU_SP | sed -e 's/NDS32_EXT_FPU_SP/-mext-fpu-sp/') \ ++ $(shell $(CC) -E -dM -xc /dev/null | grep -o -m1 NDS32_EXT_FPU_DP | sed -e 's/NDS32_EXT_FPU_DP/-mext-fpu-dp/') ++ifdef CONFIG_FPU ++CFLAGS_elfchk.o += \ ++ $(shell $(CC) -E -dM -xc /dev/null | grep -o -m1 NDS32_EXT_FPU_SP | sed -e 's/NDS32_EXT_FPU_SP/-mext-fpu-sp/') \ ++ $(shell $(CC) -E -dM -xc /dev/null | grep -o -m1 NDS32_EXT_FPU_DP | sed -e 's/NDS32_EXT_FPU_DP/-mext-fpu-dp/') ++endif +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/module.c linux-3.4.110/arch/nds32/kernel/module.c +--- linux-3.4.110.orig/arch/nds32/kernel/module.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/module.c 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,314 @@ ++/* ++ * linux/arch/nds32/kernel/module.c ++ * ++ * Copyright (C) 2002 Russell King. ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Module allocation method suggested by Andi Kleen. ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define DEBUG 0 ++#if DEBUG ++#define PRINTK printk ++#else ++#define PRINTK(x...) ++#endif ++ ++void *module_alloc(unsigned long size) ++{ ++#ifdef CONFIG_KPROBES ++ if (size == 0) ++ return NULL; ++ return vmalloc_exec(size); ++#else ++ return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, ++ GFP_KERNEL, PAGE_KERNEL, -1, ++ __builtin_return_address(0)); ++#endif ++} ++ ++void module_free(struct module *module, void *region) ++{ ++ vfree(region); ++} ++ ++int module_frob_arch_sections(Elf_Ehdr * hdr, ++ Elf_Shdr * sechdrs, ++ char *secstrings, struct module *mod) ++{ ++ return 0; ++} ++ ++void do_reloc16(unsigned int val, unsigned int *loc, unsigned int val_mask, ++ unsigned int val_shift, unsigned int loc_mask, ++ unsigned int partial_in_place, unsigned int swap) ++{ ++ unsigned int tmp = 0, tmp2 = 0; ++ ++ __asm__ __volatile__("\tlhi.bi\t%0, [%2], 0\n" ++ "\tbeqz\t%3, 1f\n" ++ "\twsbh\t%0, %1\n" ++ "1:\n":"=r"(tmp):"0"(tmp), "r"(loc), "r"(swap) ++ ); ++ ++ tmp2 = tmp & loc_mask; ++ if (partial_in_place) { ++ tmp &= (!loc_mask); ++ tmp = ++ tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); ++ } else { ++ tmp = tmp2 | ((val & val_mask) >> val_shift); ++ } ++ ++ __asm__ __volatile__("\tbeqz\t%3, 2f\n" ++ "\twsbh\t%0, %1\n" ++ "2:\n" ++ "\tshi.bi\t%0, [%2], 0\n":"=r"(tmp):"0"(tmp), ++ "r"(loc), "r"(swap) ++ ); ++} ++ ++void do_reloc32(unsigned int val, unsigned int *loc, unsigned int val_mask, ++ unsigned int val_shift, unsigned int loc_mask, ++ unsigned int partial_in_place, unsigned int swap) ++{ ++ unsigned int tmp = 0, tmp2 = 0; ++ ++ __asm__ __volatile__("\tlmw.bi\t%0, [%2], %0, 0\n" ++ "\tbeqz\t%3, 1f\n" ++ "\twsbh\t%0, %1\n" ++ "\trotri\t%0, %1, 16\n" ++ "1:\n":"=r"(tmp):"0"(tmp), "r"(loc), "r"(swap) ++ ); ++ ++ tmp2 = tmp & loc_mask; ++ if (partial_in_place) { ++ tmp &= (!loc_mask); ++ tmp = ++ tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); ++ } else { ++ tmp = tmp2 | ((val & val_mask) >> val_shift); ++ } ++ ++ __asm__ __volatile__("\tbeqz\t%3, 2f\n" ++ "\twsbh\t%0, %1\n" ++ "\trotri\t%0, %1, 16\n" ++ "2:\n" ++ "\tsmw.bi\t%0, [%2], %0, 0\n":"=r"(tmp):"0"(tmp), ++ "r"(loc), "r"(swap) ++ ); ++} ++ ++static inline int exceed_limit(int offset, unsigned int val_mask, ++ struct module *module, Elf32_Rela * rel, ++ unsigned int relindex, unsigned int reloc_order) ++{ ++ int abs_off = offset < 0 ? ~offset : offset; ++ ++ if (abs_off & (~val_mask)) { ++ printk(KERN_ERR "\n%s: relocation type %d out of range.\n" ++ "please rebuild the kernel module with gcc option \"-Wa,-mno-small-text\".\n", ++ module->name, ELF32_R_TYPE(rel->r_info)); ++ PRINTK("section %d reloc %d offset 0x%x relative 0x%x.\n" ++ relindex, reloc_order, rel->r_offset, offset); ++ return true; ++ } ++ return false; ++} ++ ++#ifdef __NDS32_EL__ ++#define NEED_SWAP 1 ++#else ++#define NEED_SWAP 0 ++#endif ++ ++int ++apply_relocate_add(Elf32_Shdr * sechdrs, const char *strtab, ++ unsigned int symindex, unsigned int relindex, ++ struct module *module) ++{ ++ Elf32_Shdr *symsec = sechdrs + symindex; ++ Elf32_Shdr *relsec = sechdrs + relindex; ++ Elf32_Shdr *dstsec = sechdrs + relsec->sh_info; ++ Elf32_Rela *rel = (void *)relsec->sh_addr; ++ unsigned int i; ++ ++ for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rela); i++, rel++) { ++ Elf32_Addr *loc; ++ Elf32_Sym *sym; ++ Elf32_Addr v; ++ s32 offset; ++ ++ offset = ELF32_R_SYM(rel->r_info); ++ if (offset < 0 ++ || offset > (symsec->sh_size / sizeof(Elf32_Sym))) { ++ printk(KERN_ERR "%s: bad relocation\n", module->name); ++ PRINTK("section %d reloc %d\n", module->name, relindex, ++ i); ++ return -ENOEXEC; ++ } ++ ++ sym = ((Elf32_Sym *) symsec->sh_addr) + offset; ++ ++ if (rel->r_offset < 0 ++ || rel->r_offset > dstsec->sh_size - sizeof(u16)) { ++ printk(KERN_ERR "%s: out of bounds relocation\n", ++ module->name); ++ PRINTK("section %d reloc %d offset 0x%0x size %d\n", ++ relindex, i, rel->r_offset, dstsec->sh_size); ++ return -ENOEXEC; ++ } ++ ++ loc = (Elf32_Addr *) (dstsec->sh_addr + rel->r_offset); ++ v = sym->st_value + rel->r_addend; ++ ++ switch (ELF32_R_TYPE(rel->r_info)) { ++ case R_NDS32_NONE: ++ case R_NDS32_INSN16: ++ case R_NDS32_LABEL: ++ case R_NDS32_LONGCALL1: ++ case R_NDS32_LONGCALL2: ++ case R_NDS32_LONGCALL3: ++ case R_NDS32_LONGCALL4: ++ case R_NDS32_LONGJUMP1: ++ case R_NDS32_LONGJUMP2: ++ case R_NDS32_LONGJUMP3: ++ case R_NDS32_9_FIXED_RELA: ++ case R_NDS32_15_FIXED_RELA: ++ case R_NDS32_17_FIXED_RELA: ++ case R_NDS32_25_FIXED_RELA: ++ case R_NDS32_LOADSTORE: ++ case R_NDS32_DWARF2_OP1_RELA: ++ case R_NDS32_DWARF2_OP2_RELA: ++ case R_NDS32_DWARF2_LEB_RELA: ++ case R_NDS32_RELA_NOP_MIX...R_NDS32_RELA_NOP_MAX: ++ break; ++ ++ case R_NDS32_32_RELA: ++ do_reloc32(v, loc, 0xffffffff, 0, 0, 0, 0); ++ break; ++ ++ case R_NDS32_HI20_RELA: ++ do_reloc32(v, loc, 0xfffff000, 12, 0xfff00000, 0, ++ NEED_SWAP); ++ break; ++ ++ case R_NDS32_LO12S3_RELA: ++ do_reloc32(v, loc, 0x00000fff, 3, 0xfffff000, 0, ++ NEED_SWAP); ++ break; ++ ++ case R_NDS32_LO12S2_RELA: ++ do_reloc32(v, loc, 0x00000fff, 2, 0xfffff000, 0, ++ NEED_SWAP); ++ break; ++ ++ case R_NDS32_LO12S1_RELA: ++ do_reloc32(v, loc, 0x00000fff, 1, 0xfffff000, 0, ++ NEED_SWAP); ++ break; ++ ++ case R_NDS32_LO12S0_RELA: ++ case R_NDS32_LO12S0_ORI_RELA: ++ do_reloc32(v, loc, 0x00000fff, 0, 0xfffff000, 0, ++ NEED_SWAP); ++ break; ++ ++ case R_NDS32_9_PCREL_RELA: ++ if (exceed_limit ++ ((v - (Elf32_Addr) loc), 0x000000ff, module, rel, ++ relindex, i)) ++ return -ENOEXEC; ++ do_reloc16(v - (Elf32_Addr) loc, loc, 0x000001ff, 1, ++ 0xffffff00, 0, NEED_SWAP); ++ break; ++ ++ case R_NDS32_15_PCREL_RELA: ++ if (exceed_limit ++ ((v - (Elf32_Addr) loc), 0x00003fff, module, rel, ++ relindex, i)) ++ return -ENOEXEC; ++ do_reloc32(v - (Elf32_Addr) loc, loc, 0x00007fff, 1, ++ 0xffffc000, 0, NEED_SWAP); ++ break; ++ ++ case R_NDS32_17_PCREL_RELA: ++ if (exceed_limit ++ ((v - (Elf32_Addr) loc), 0x0000ffff, module, rel, ++ relindex, i)) ++ return -ENOEXEC; ++ do_reloc32(v - (Elf32_Addr) loc, loc, 0x0001ffff, 1, ++ 0xffff0000, 0, NEED_SWAP); ++ break; ++ ++ case R_NDS32_25_PCREL_RELA: ++ if (exceed_limit ++ ((v - (Elf32_Addr) loc), 0x00ffffff, module, rel, ++ relindex, i)) ++ return -ENOEXEC; ++ do_reloc32(v - (Elf32_Addr) loc, loc, 0x01ffffff, 1, ++ 0xff000000, 0, NEED_SWAP); ++ break; ++ case R_NDS32_WORD_9_PCREL_RELA: ++ if (exceed_limit ++ ((v - (Elf32_Addr) loc), 0x000000ff, module, rel, ++ relindex, i)) ++ return -ENOEXEC; ++ do_reloc32(v - (Elf32_Addr) loc, loc, 0x000001ff, 1, ++ 0xffffff00, 0, NEED_SWAP); ++ break; ++ ++ case R_NDS32_SDA15S3_RELA: ++ case R_NDS32_SDA15S2_RELA: ++ case R_NDS32_SDA15S1_RELA: ++ case R_NDS32_SDA15S0_RELA: ++ printk(KERN_ERR "%s: unsupported relocation type %d.\n", ++ module->name, ELF32_R_TYPE(rel->r_info)); ++ printk(KERN_ERR ++ "Small data section access doesn't work in the kernel space; " ++ "please rebuild the kernel module with gcc option -G0.\n"); ++ PRINTK("section %d reloc %d offset 0x%x size %d\n", ++ relindex, i, rel->r_offset, dstsec->sh_size); ++ break; ++ ++ default: ++ printk(KERN_ERR "%s: unsupported relocation type %d.\n", ++ module->name, ELF32_R_TYPE(rel->r_info)); ++ PRINTK("section %d reloc %d offset 0x%x size %d\n", ++ relindex, i, rel->r_offset, dstsec->sh_size); ++ } ++ } ++ return 0; ++} ++ ++int ++apply_relocate(Elf32_Shdr * sechdrs, const char *strtab, ++ unsigned int symindex, unsigned int relsec, ++ struct module *module) ++{ ++// printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n", module->name); ++// return -ENOEXEC; ++ return 0; ++} ++ ++int ++module_finalize(const Elf32_Ehdr * hdr, const Elf_Shdr * sechdrs, ++ struct module *module) ++{ ++ return 0; ++} ++ ++void module_arch_cleanup(struct module *mod) ++{ ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/nds32-elf.h linux-3.4.110/arch/nds32/kernel/nds32-elf.h +--- linux-3.4.110.orig/arch/nds32/kernel/nds32-elf.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/nds32-elf.h 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,855 @@ ++#ifndef _NDS32_ELF_CHECK ++#define _NDS32_ELF_CHECK ++ ++//#define TEST_ELF_CHECK_FUNC ++#ifdef TEST_ELF_CHECK_FUNC ++#include ++#include ++#endif ++ ++//#include ++//#include ++ ++#ifdef __cplusplus ++extern "C" ++{ ++#else ++#include ++#endif //#ifdef __cplusplus ++ ++ ++ enum ELF_HEADER_FLAG_FIELD ++ { ++ EHFF_ARCH_VER = 0xF0000000, EHFF_ARCH_VER_SHIFT = 28, ++ //EHFF_RESERVED = 0x08000000, ++ EHFF_HAS_ZOL = 0x04000000, ++ EHFF_ISA_DSP = 0x02000000, ++ EHFF_ISA_FPU_MAC = 0x01000000, ++ EHFF_FPU_REG = 0x00C00000, EHFF_FPU_REG_SHIFT = 22, ++ EHFF_ISA_L2C = 0x00200000, ++ EHFF_ISA_NO_MAC = 0x00100000, ++ EHFF_ISA_MAC_DX = 0x00100000, ++ EHFF_ISA_FPU_DP = 0x00080000, ++ //EHFF_RESERVED = 0x00040000, ++ EHFF_ISA_SATURATION = 0x00020000, ++ EHFF_REDUCED_REGS = 0x00010000, ++ EHFF_ISA_STRING = 0x00008000, ++ EHFF_ISA_16BIT = 0x00004000, ++ EHFF_ISA_IFC = 0x00004000, ++ EHFF_ISA_DIV = 0x00002000, ++ EHFF_ISA_DIV_DX = 0x00002000, ++ EHFF_ISA_AUDIO = 0x00001000, ++ EHFF_ISA_FPU_SP = 0x00000800, ++ EHFF_ISA_EXT2 = 0x00000400, ++ EHFF_ISA_EXT = 0x00000200, ++ EHFF_ISA_EIT = 0x00000100, ++ EHFF_ISA_MFUSR_PC = 0x00000100, ++ EHFF_ABI_VER = 0x000000F0, ++ EHFF_ELF_VER = 0x0000000F, EHFF_ELF_VER_SHIFT = 0, ++ }; ++ ++ ++ enum ELF_HEADER_FLAG_FIELD_ARCH_VER ++ { ++ EHFF_ARCH_VER_RESERVED = 0x0, ++ EHFF_ARCH_VER_V1 = 0x1, ++ EHFF_ARCH_VER_V2 = 0x2, ++ EHFF_ARCH_VER_V3 = 0x3, ++ EHFF_ARCH_VER_V3M = 0x4, ++ }; ++ ++ static const char *EHFF_ARCH_VER_MSG[] = ++ { ++ "RESERVED", ++ "BASE V1", ++ "BASE V2", ++ "BASE V3", ++ "BASE V3M", ++ }; ++ ++ ++ ++ ++ /* ----------------------------------------------------------- */ ++ /* 4-bit for ABI signature, allow up to 16 ABIs */ ++ /* 0 : for OLD ABI V0, phase out ++ * 1 : for V1 , starting with V0 toolchain ++ * 2 : for V2 ++ * 3 : for V2FP (fs0, fs1 as function parameter) ++ * 4 : for AABI */ ++ /* only old N1213HC use V0 */ ++ /* New ABI is used due to return register is changed to r0 from r5 */ ++ /* ----------------------------------------------------------- */ ++#define E_NDS_ABI_V0 0x00000000 ++#define E_NDS_ABI_V1 0x00000010 ++#define E_NDS_ABI_V2 0x00000020 ++#define E_NDS_ABI_V2FP 0x00000030 ++#define E_NDS_ABI_AABI 0x00000040 ++#define E_NDS_ABI_V2FP_PLUS 0x00000050 ++ ++ /* ---------------------------------------------------------------------------- */ ++ /* This flag signifies the version of Andes ELF */ ++ /* note : */ ++ /* 1. v1.3.1 and beyond is accompanying with Baseline ISA 1.0b/2.0/... in ELF. */ ++ /* 2. v1.3.1 is accompanying with Baseline ISA 1.0b in ELF. */ ++ /* ... | MAC | ... | DIV | ... */ ++ /* 3. v1.3.1 is accompanying with Baseline ISA 2.0 and beyond in ELF. */ ++ /* ... | MAC_DX | ... | DIV_DX | ... */ ++ /* ---------------------------------------------------------------------------- */ ++ ++ enum ELF_HEADER_FLAG_FIELD_ELF_VER ++ { ++ EHFF_ELF_VER_1_3_0 = 0x0, ++ EHFF_ELF_VER_1_3_1 = 0x1, ++ EHFF_ELF_VER_1_4_0 = 0x2, ++ }; ++ ++ static const char *EHFF_ELF_VER_MSG[] = ++ { ++ "1.3.0", ++ "1.3.1", ++ "1.4.0", ++ }; ++ /* */ ++ /* sr layout : */ ++ /* sr[14..10] : hardware components */ ++ /* cpu / fpu / audio / ... */ ++ /* sr[9..0] : sr index number dedicated for sr[14..10] */ ++ /* */ ++ ++ // ++ // sr[14..10] definition: ++ // 0 : cpu ++ // 1 : fpu ++ // 2 : audio ++#define INDEX_HW_MASK 0x00007c00 ++#define INDEX_HW_CPU 0x00000000 ++#define INDEX_HW_FPU 0x00000400 ++#define INDEX_HW_AUDIO 0x00000800 ++#define HW_IS_CPU(sr) ((sr & INDEX_HW_MASK) == INDEX_HW_CPU) ++#define HW_IS_FPU(sr) ((sr & INDEX_HW_MASK) == INDEX_HW_FPU) ++#define HW_IS_AUDIO(sr) ((sr & INDEX_HW_MASK) == INDEX_HW_AUDIO) ++ ++ // ++ // sr[9..0] definition: ++ // if (HW_IS_CPU(sr)) // cpu score ++ // sr[9..0] defined in chap 9 of Andes-Privilege-Architecture spec. ++ // else if (HW_IS_FPU(sr)) // fpu score ++ // sr[9..0] == SR_FPU_FPCFG, FPCFG defined in FPU_ISA_EXT spec. ++ // else if (HW_IS_AUDIO(sr)) // audio score ++ // //none register is used in loader checking mechanism ++ // ++#define SR_INDEX_MASK 0x000003ff ++#define SR_INDEX(sr) (sr & SR_INDEX_MASK) ++#define CPU_SR_INDEX(x,y,z) ((x << 7) + (y << 3) + z) ++#define FPU_SR_FPCFG() (INDEX_HW_FPU) ++ ++ //FPU-belonged system registers ++#define SR_FPU_FPCFG 0x00 ++ ++#define SR_NOT_EXIST 0xffffffff ++ ++ typedef unsigned int (* CALLBACK_FUNC) (unsigned int index); ++ ++ ++ static const char *NEC_MSG_FPU_reg[5] = ++ { ++ "N/A", ++ " 8SP/ 4DP", ++ "16SP/ 8DP", ++ "32SP/16DP", ++ "32SP/32DP" ++ }; ++ static const char *NEC_MSG_endian[2] = ++ { ++ "little", ++ "big" ++ }; ++ ++#define EM_NDS32 167 ++#if defined elf_check_swap_2 || defined elf_check_swap_4 ++#error "ERROR : elf_check_swap_2 and elf_check_swap_4 are multiple defined" ++#endif ++#define elf_check_swap_2(data) (((data&0x0000ff00)>>8) | ((data&0x000000ff)<<8)) ++#define elf_check_swap_4(data) (((data&0xff000000)>>24) | \ ++ ((data&0x00ff0000)>>8) | \ ++ ((data&0x0000ff00)<<8) | \ ++ ((data&0x000000ff)<<24)) ++ ++#define MSC_CFG_BASEV 0x0000e000 ++ ++#define CPU_VER_EXT 0x00000001 ++#define CPU_VER_A16 0x00000002 ++#define CPU_VER_EXT2 0x00000004 ++#define CPU_VER_FPU 0x00000008 ++#define CPU_VER_STRING 0x00000010 ++#define CPU_VER_SATURATION 0x00000020 ++ ++#define MSC_CFG_DIV 0x00000020 ++#define MSC_CFG_MAC 0x00000040 ++#define MSC_CFG_L2C 0x00000200 ++#define MSC_CFG_REDUCED_REG 0x00000400 ++#define MSC_CFG_NOD 0x00010000 ++#define MSC_CFG_AUDIO 0x00000180 ++#define MSC_CFG_AUDIO_NONE 0x00000000 ++#define MSC_CFG_IFC 0x00080000 ++#define MSC_CFG_MCU 0x00100000 ++#define MSC_CFG_EX9IT 0x01000000 ++#define MSC_CFG_MSC_EXT 0xc0000000 ++ ++#define MSC_CFG2_DSPPF 0x00000018 ++#define MSC_CFG2_ZOL 0x00000020 ++ ++#define MMU_CFG_DE 0x04000000 ++ ++ typedef struct nds32_elfinfo_s ++ { ++ unsigned int endian; // 1.local-used constant definition ++ // 0 : little , 1 : big ++ // 2.system-used constant definition ++ // little / big depends on system definition ++ unsigned int machine; //magic number (167 for nds32 machine) ++ unsigned int mfusr_pc; //reclaim in baseline v2 ++ unsigned int abi; // abi version ++ unsigned int base16; // 0 : not support , 1 : support ++ unsigned int pex1; ++ unsigned int div; //reclaim in baseline v2 ++ unsigned int pex2; ++ unsigned int fpu; //fpu single precision ++ unsigned int audio; ++ unsigned int string; ++ unsigned int reduced_regs; ++ unsigned int saturation; ++ unsigned int ifc; ++ unsigned int elf_ver; //elf version number, 0 for v1.3.0, 1 for v1.3.1 ++ unsigned int l2c; ++ unsigned int mac; //reclaim in baseline v2 ++ //unsigned int isa_ver;//0x0, baseline = baseline V1 - 16 bit ISA ++ //0x1, baseline = baseline V1 ++ //0x2, baseline = baseline V1 + V2 extension ISA ++ //unsigned int fpu_sp; //fpu double precision ++ //unsigned int fpu_reg; //fpu registers capacity ++ } nds32_elfinfo_t; ++ ++ typedef enum nds32_elfchk_e ++ { ++ endian_chk = 0, ++ machine_chk, ++ isa_chk, ++ abi_chk ++ } nds32_elfchk_t; ++ ++ typedef enum ELF_Fail_Type ++ { ++ EFT_NONE, ++ EFT_WARNING, ++ EFT_ERROR ++ }ELF_Fail_Type; ++ ++ static inline void NEC_itoa(unsigned int value, char *buf, const unsigned int base) ++ { ++ char temp[10] = "\0", ch; ++ int len = 1, index; ++ ++ while(value > 0) ++ { ++ ch = value%base; ++ value = value/base; ++ if(ch >= 10) ++ ch = ch+'a'-10; ++ else ++ ch = ch+'0'; ++ temp[len++] = ch; ++ } ++ len--; ++ ++ index = len; ++ while(index >= 0) ++ { ++ buf[index] = temp[len-index]; ++ index--; ++ } ++ } ++ ++ static inline void NEC_format(char *buf, unsigned int width) ++ { ++ unsigned int len = strlen(buf); ++ memmove(buf+(width-len), buf, len+1); ++ memset(buf, ' ', (width-len)); ++ } ++ ++ static void NEC_sprintf(char *buf, const char *str, ...) ++ { ++ int width, len = 0; ++ va_list ap; ++ char token, temp[100]; ++ buf[0] = '\0'; ++ ++ ++ va_start(ap, str); ++ while(*str != '\0') ++ { ++ if(*str != '%') ++ buf[len++] = *str; ++ else //*str == '%' ++ { ++ token = *(++str); ++ ++ width = 0; ++ while(token >= '0' && token <= '9') ++ { ++ width *= 10; ++ width += token-'0'; ++ token = *(++str); ++ } ++ ++ switch(token) ++ { ++ case 'd': ++ NEC_itoa(va_arg(ap, unsigned int), temp, 10); ++ break; ++ case 'x': ++ NEC_itoa(va_arg(ap, unsigned int), temp, 16); ++ break; ++ case 's': ++ strcpy(temp, va_arg(ap, char *)); ++ break; ++ } ++ ++ if(width != 0) ++ NEC_format(temp, width); ++ ++ buf[len++] = '\0'; ++ strcat(buf, temp); ++ len = strlen(buf); ++ } ++ ++ str++; ++ } ++ buf[len] = '\0'; ++ } ++ ++ //NDS32 strcat for avoiding buf overflow ++ static inline void NEC_strcat_safety(char *destination, unsigned int destination_size, char *source) ++ { ++ strncat(destination, source, destination_size - strlen(destination) - 1); ++ } ++ ++ //NDS32 Elf Check print ++ static inline void NEC_print(char *buf, unsigned int len, ELF_Fail_Type type, const char *name, const char *cpu, const char *elf, const char *error_message) ++ { ++ char temp[100]; ++ switch(type) ++ { ++ case EFT_NONE: ++ NEC_sprintf(temp, "\t | %9s | %9s | %14s\n", cpu, elf, name); ++ break; ++ case EFT_WARNING: ++ NEC_sprintf(temp, "\t?| %9s | %9s | %14s Warning: %s\n", cpu, elf, name, error_message); ++ break; ++ case EFT_ERROR: ++ NEC_sprintf(temp, "\t!| %9s | %9s | %14s Error: %s\n", cpu, elf, name, error_message); ++ break; ++ } ++ NEC_strcat_safety(buf, len, temp); ++ } ++ ++ static inline bool NEC_check_bool(char *buf, unsigned int len, ELF_Fail_Type type, const char *isa, bool cpu, bool elf) ++ { ++ bool code; ++ const char *NEC_MSG_ISA[2] = { "OFF", "ON" }; ++ if(!cpu && elf) ++ code = 1; ++ else ++ { ++ code = 0; ++ type = EFT_NONE; ++ } ++ NEC_print(buf, len, type, isa, NEC_MSG_ISA[cpu], NEC_MSG_ISA[elf], "Not supported by CPU"); ++ return code; ++ } ++ ++ static inline ELF_Fail_Type elf_ver_and_arch_ver_compatibility_check(unsigned int elf_ver, unsigned int arch_ver) ++ { ++ switch(elf_ver) ++ { ++ case EHFF_ELF_VER_1_3_0: ++ switch(arch_ver) ++ { ++ case EHFF_ARCH_VER_V1: ++ return EFT_NONE; ++ default: ++ return EFT_ERROR; ++ } ++ case EHFF_ELF_VER_1_3_1: ++ switch(arch_ver) ++ { ++ case EHFF_ARCH_VER_V1: ++ case EHFF_ARCH_VER_V2: ++ case EHFF_ARCH_VER_V3M: ++ return EFT_NONE; ++ default: ++ return EFT_ERROR; ++ } ++ case EHFF_ELF_VER_1_4_0: ++ switch(arch_ver) ++ { ++ case EHFF_ARCH_VER_V1: ++ case EHFF_ARCH_VER_V2: ++ case EHFF_ARCH_VER_V3: ++ case EHFF_ARCH_VER_V3M: ++ return EFT_NONE; ++ default: ++ return EFT_ERROR; ++ } ++ } ++ return EFT_ERROR; ++ } ++ ++ ++ static inline ELF_Fail_Type arch_ver_check(unsigned int CPU, unsigned int ELF) ++ { ++ switch(CPU) ++ { ++ case EHFF_ARCH_VER_V1: ++ switch(ELF) ++ { ++ case EHFF_ARCH_VER_V1: ++ return EFT_NONE; ++ default: ++ return EFT_ERROR; ++ } ++ case EHFF_ARCH_VER_V2: ++ switch(ELF) ++ { ++ case EHFF_ARCH_VER_V1: ++ case EHFF_ARCH_VER_V2: ++ return EFT_NONE; ++ default: ++ return EFT_ERROR; ++ } ++ case EHFF_ARCH_VER_V3: ++ switch(ELF) ++ { ++ case EHFF_ARCH_VER_V1: ++ case EHFF_ARCH_VER_V2: ++ case EHFF_ARCH_VER_V3: ++ case EHFF_ARCH_VER_V3M: ++ return EFT_NONE; ++ default: ++ return EFT_ERROR; ++ } ++ case EHFF_ARCH_VER_V3M: ++ switch(ELF) ++ { ++ case EHFF_ARCH_VER_V3M: ++ return EFT_NONE; ++ default: ++ return EFT_ERROR; ++ } ++ } ++ return EFT_ERROR; ++ } ++ ++ // buf : buffer of char*, put Target Isa Info into *buf ++ // len : length of buffer (at least 300 chars in length) ++ // buf_status : status of buffer ++ // 0 : ok ++ // 1 : overflow ++#define TARGET_ISA_INFO_LEN 2000 ++ static inline unsigned int elf_check (unsigned char *ehdr, CALLBACK_FUNC reg_read_callback, char *buf, unsigned int len, unsigned int *buf_status) ++ { ++ unsigned int SR_msc_cfg, SR_msc_cfg2 = 0, SR_cpu_ver, SR_mmu_cfg, fpcfg, fucop_exist, fpu_mount; ++ unsigned int CPU_DIV_DX_ISA, CPU_MAC_DX_ISA; ++ unsigned int eflag, ELF_arch_ver, ELF_elf_ver, CPU_arch_ver; ++ unsigned short machine; ++ unsigned char big_endian_elf = 0, big_endian_cpu; ++ ++ char temp[100]; ++ char temp_cpu[10]; ++ char temp_elf[10]; ++ int n_error, n_warning; ++ int CPU_support; ++ unsigned char FPU_reg_elf, FPU_reg_cpu; ++ ELF_Fail_Type error_type; ++ ++ ++ ++ n_error = 0; ++ n_warning = 0; ++ ++ buf[0] = '\0'; ++ *buf_status = 0; ++ ++ SR_cpu_ver = reg_read_callback(CPU_SR_INDEX(0,0,0)); ++ SR_msc_cfg = reg_read_callback(CPU_SR_INDEX(0,4,0)); ++ SR_mmu_cfg = reg_read_callback(CPU_SR_INDEX(0,3,0)); ++ ++ if (SR_msc_cfg & MSC_CFG_MSC_EXT) ++ SR_msc_cfg2 = reg_read_callback(CPU_SR_INDEX(0,4,1)); ++ ++ switch(*((char*)(ehdr+5))) ++ { ++ case 1: ++ big_endian_elf = 0; ++ break; ++ case 2: ++ big_endian_elf = 1; ++ break; ++ } ++ ++ if(SR_mmu_cfg & MMU_CFG_DE) ++ big_endian_cpu = 1; ++ else ++ big_endian_cpu = 0; ++ ++ ++ /* 20091106 note : ++ * 1. In term of OS, elf_check() would be used in OS kernel and ld.so ++ * 2. Since OS is running on SID, eflag/machine did not need endian conversion for big endian format. ++ * 3. Later, elf_check interface is going to cover "OS" case by adding a new parameter. ++ * ++ */ ++#ifdef ELF_CHECKING_OS ++ eflag = *((unsigned int *)(ehdr+36)); ++ machine = *((unsigned short*)(ehdr+18)); ++#else // GDB loader / SID loader ++ eflag = (big_endian_elf == 0)? *((unsigned int *)(ehdr+36)) : elf_check_swap_4(*((unsigned int *)(ehdr+36))); ++ machine = (big_endian_elf == 0)? *((unsigned short*)(ehdr+18)) : elf_check_swap_2(*((unsigned short*)(ehdr+18))); ++#endif ++ ++ ELF_arch_ver = (eflag & EHFF_ARCH_VER) >> EHFF_ARCH_VER_SHIFT; ++ ELF_elf_ver = (eflag & EHFF_ELF_VER) >> EHFF_ELF_VER_SHIFT; ++ ++ CPU_arch_ver = ((SR_msc_cfg & MSC_CFG_BASEV) >> 13) + 1; ++ if(CPU_arch_ver == 3) ++ if(SR_msc_cfg & MSC_CFG_MCU) ++ CPU_arch_ver = 4; ++ ++ /*Basic version check ++ ++ 1.ELF version check ++ 2.Architecture version check ++ 3.Machine check ++ */ ++ if(ELF_elf_ver > EHFF_ELF_VER_1_4_0) ++ { ++ NEC_sprintf(temp, "Error: unsupport ELF version: 0x%x\n", ELF_elf_ver); ++ NEC_strcat_safety(buf, len, temp); ++ return 1; ++ } ++ NEC_sprintf(temp, "ELF version: %s\n", EHFF_ELF_VER_MSG[ELF_elf_ver]); ++ NEC_strcat_safety(buf, len, temp); ++ ++ ++ if(elf_ver_and_arch_ver_compatibility_check(ELF_elf_ver, ELF_arch_ver) == EFT_ERROR) ++ { ++ NEC_sprintf(temp, "Error: architecture version is not supported in this ELF version: %s\n", EHFF_ARCH_VER_MSG[ELF_arch_ver]); ++ NEC_strcat_safety(buf, len, temp); ++ return 1; ++ } ++ ++ NEC_sprintf(temp, "\t %9s %9s \n", "CPU", "ELF"); ++ NEC_strcat_safety(buf, len, temp); ++ if(big_endian_cpu != big_endian_elf) ++ { ++ error_type = EFT_ERROR; ++ n_error++; ++ } ++ else ++ error_type = EFT_NONE; ++ NEC_print(buf, len, error_type, "endianess", NEC_MSG_endian[big_endian_cpu], NEC_MSG_endian[big_endian_elf], "endianess mismatch"); ++ ++ ++ ++ ++ if (EM_NDS32 != machine) ++ { ++ error_type = EFT_ERROR; ++ n_error++; ++ } ++ else ++ error_type = EFT_NONE; ++ NEC_sprintf(temp_cpu, "%d", EM_NDS32); ++ NEC_sprintf(temp_elf, "%d", machine); ++ NEC_print(buf, len, error_type, "machine", temp_cpu, temp_elf, "wrong machine"); ++ ++ ++ error_type = arch_ver_check(CPU_arch_ver, ELF_arch_ver); ++ if(error_type == EFT_ERROR) ++ n_error++; ++ NEC_print(buf, len, error_type, "BASELINE ISA", EHFF_ARCH_VER_MSG[CPU_arch_ver], EHFF_ARCH_VER_MSG[ELF_arch_ver], "BASELINE ISA mismatch"); ++ ++ /*Prepare reference variables ++ ++ 1.DIV, MAC, DX ++ 2.FPU ++ */ ++ ++ CPU_MAC_DX_ISA = 0; ++ CPU_DIV_DX_ISA = 0; ++ switch(CPU_arch_ver) ++ { ++ case EHFF_ARCH_VER_V1: ++ if (SR_msc_cfg & MSC_CFG_MAC) ++ CPU_MAC_DX_ISA = 1; ++ if (SR_msc_cfg & MSC_CFG_DIV) ++ CPU_DIV_DX_ISA = 1; ++ break; ++ case EHFF_ARCH_VER_V2: ++ case EHFF_ARCH_VER_V3: ++ case EHFF_ARCH_VER_V3M: ++ if (!(SR_msc_cfg & MSC_CFG_NOD)) ++ { ++ CPU_MAC_DX_ISA = 1; ++ CPU_DIV_DX_ISA = 1; ++ } ++ break; ++ } ++ fpu_mount = 0; ++ if (SR_cpu_ver & CPU_VER_FPU) ++ { ++ fucop_exist = reg_read_callback(CPU_SR_INDEX(0,5,0)); ++ if (fucop_exist & 0x80000000) ++ { ++ fpu_mount = 1; ++ fpcfg = reg_read_callback(FPU_SR_FPCFG()); ++ } ++ else ++ fpu_mount = 0; ++ } ++ ++ //Parse Configuration field (bit 27~8) ++ ++ //bit 27 Reserved ++ //bit 26 ZOL ++ CPU_support = 0; ++ if ((SR_msc_cfg & MSC_CFG_MSC_EXT) && (SR_msc_cfg2 & MSC_CFG2_ZOL)) ++ CPU_support = 1; ++ if (ELF_elf_ver == EHFF_ELF_VER_1_4_0) ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "ZOL", CPU_support, eflag & EHFF_HAS_ZOL); ++ ++ //bit 25 DSP ++ CPU_support = 0; ++ if ((SR_msc_cfg & MSC_CFG_MSC_EXT) && (SR_msc_cfg2 & MSC_CFG2_DSPPF)) ++ CPU_support = 1; ++ if (ELF_elf_ver == EHFF_ELF_VER_1_4_0) ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "DSP ISA", CPU_support, eflag & EHFF_ISA_DSP); ++ ++ //bit 24 ++ CPU_support = 0; ++ if(fpu_mount) ++ if(fpcfg & 0x00000010) ++ CPU_support = 1; ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "FPU MAC ISA", CPU_support, eflag & EHFF_ISA_FPU_MAC); ++ ++ //bit 23~22 ++ if(fpu_mount) ++ FPU_reg_cpu = ((fpcfg >> 2) & 0x3) + 1; ++ else ++ FPU_reg_cpu = 0; ++ ++ if(eflag & (EHFF_ISA_FPU_SP | EHFF_ISA_FPU_DP | EHFF_ISA_FPU_MAC)) ++ FPU_reg_elf = ((eflag & EHFF_FPU_REG) >> EHFF_FPU_REG_SHIFT) + 1; ++ else ++ FPU_reg_elf = 0; ++ if(FPU_reg_elf > FPU_reg_cpu) ++ { ++ error_type = EFT_ERROR; ++ n_error++; ++ } ++ else ++ error_type = EFT_NONE; ++ NEC_print(buf, len, error_type, "FPU REGISTER", NEC_MSG_FPU_reg[FPU_reg_cpu], NEC_MSG_FPU_reg[FPU_reg_elf], ++ "FPU REGISTERS not supported by CPU"); ++ //bit 21 ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "L2C ISA", SR_msc_cfg & MSC_CFG_L2C, eflag & EHFF_ISA_L2C); ++ ++ //bit 20 ++ //MAC_DX check ++ // Target Machine certainly has MAC_DX under the following conditions: ++ // 1. Baseline V1 ISA && MSC_CFG.MAC (softcore version) ++ // 2. Baseline V2 ISA && D0/D1 support ++ // 3. Baseline V3 ISA && D0/D1 support ++ ++ switch(ELF_arch_ver) ++ { ++ case EHFF_ARCH_VER_V1: ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "MAC/MAC DX ISA", CPU_MAC_DX_ISA, !(eflag & EHFF_ISA_NO_MAC)); ++ break; ++ case EHFF_ARCH_VER_V2: ++ case EHFF_ARCH_VER_V3: ++ case EHFF_ARCH_VER_V3M: ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "MAC DX ISA", CPU_MAC_DX_ISA, eflag & EHFF_ISA_MAC_DX); ++ break; ++ } ++ //bit 19 ++ CPU_support = 0; ++ if(fpu_mount) ++ if(fpcfg & 0x00000002) ++ CPU_support = 1; ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "FPU DP ISA", CPU_support, eflag & EHFF_ISA_FPU_DP); ++ ++ ++ //bit 18 Reserved ++ //bit 17 ++ switch(ELF_elf_ver) ++ { ++ case EHFF_ELF_VER_1_3_0: ++ case EHFF_ELF_VER_1_3_1: ++ break; ++ case EHFF_ELF_VER_1_4_0: ++ switch(ELF_arch_ver) ++ { ++ case EHFF_ARCH_VER_V3: ++ case EHFF_ARCH_VER_V3M: ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "SATURATION ISA", SR_cpu_ver & CPU_VER_SATURATION, eflag & EHFF_ISA_SATURATION); ++ break; ++ } ++ break; ++ } ++ //bit 16 ++ if(SR_msc_cfg & MSC_CFG_REDUCED_REG) ++ CPU_support = 0; ++ else ++ CPU_support = 1; ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "32 GPR", CPU_support, (eflag & EHFF_REDUCED_REGS) == 0); ++ ++ //bit 15 ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "STRING ISA", SR_cpu_ver & CPU_VER_STRING, eflag & EHFF_ISA_STRING); ++ ++ //bit 14 ++ switch(ELF_elf_ver) ++ { ++ case EHFF_ELF_VER_1_3_0: ++ case EHFF_ELF_VER_1_3_1: ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "16-BIT ISA", SR_cpu_ver & CPU_VER_A16, eflag & EHFF_ISA_16BIT); ++ break; ++ case EHFF_ELF_VER_1_4_0: ++ switch(ELF_arch_ver) ++ { ++ case EHFF_ARCH_VER_V1: ++ case EHFF_ARCH_VER_V2: ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "16-BIT ISA", SR_cpu_ver & CPU_VER_A16, eflag & EHFF_ISA_16BIT); ++ break; ++ case EHFF_ARCH_VER_V3: ++ case EHFF_ARCH_VER_V3M: ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "IFC ISA", SR_msc_cfg & MSC_CFG_IFC, eflag & EHFF_ISA_IFC); ++ break; ++ } ++ break; ++ } ++ ++ //bit 13 ++ switch(ELF_arch_ver) ++ { ++ case EHFF_ARCH_VER_V1: ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "DIV DX ISA", CPU_DIV_DX_ISA, eflag & EHFF_ISA_DIV); ++ break; ++ case EHFF_ARCH_VER_V2: ++ case EHFF_ARCH_VER_V3: ++ case EHFF_ARCH_VER_V3M: ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "DIV DX ISA", CPU_DIV_DX_ISA, eflag & EHFF_ISA_DIV_DX); ++ break; ++ } ++ //bit 12 ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "AUDIO/DSP ISA", SR_msc_cfg & MSC_CFG_AUDIO, eflag & EHFF_ISA_AUDIO); ++ //bit 11 ++ CPU_support = 0; ++ if(fpu_mount) ++ if(fpcfg & 0x00000001) ++ CPU_support = 1; ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "FPU SP ISA", CPU_support, eflag & EHFF_ISA_FPU_SP); ++ ++ //bit 10 ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "PEX2 ISA", SR_cpu_ver & CPU_VER_EXT2, eflag & EHFF_ISA_EXT2); ++ ++ //bit 9 ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "PEX1 ISA", SR_cpu_ver & CPU_VER_EXT, eflag & EHFF_ISA_EXT); ++ ++ //bit 8 ++ CPU_support = 0; ++ if(CPU_arch_ver == EHFF_ARCH_VER_V3M) ++ CPU_support = 1; ++ switch(ELF_elf_ver) ++ { ++ case EHFF_ELF_VER_1_3_0: ++ case EHFF_ELF_VER_1_3_1: ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "MFUSR_PC ISA", CPU_support, eflag & EHFF_ISA_MFUSR_PC); ++ break; ++ case EHFF_ELF_VER_1_4_0: ++ switch(ELF_arch_ver) ++ { ++ case EHFF_ARCH_VER_V1: ++ case EHFF_ARCH_VER_V2: ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "MFUSR_PC ISA", CPU_support, eflag & EHFF_ISA_MFUSR_PC); ++ break; ++ case EHFF_ARCH_VER_V3: ++ case EHFF_ARCH_VER_V3M: ++ n_error += NEC_check_bool(buf, len, EFT_ERROR, "EIT ISA", SR_msc_cfg & MSC_CFG_EX9IT, eflag & EHFF_ISA_EIT); ++ break; ++ } ++ break; ++ } ++ ++ if(n_error) ++ { ++ NEC_strcat_safety(buf, len, (char*)"Error: ELF and CPU mismatch\n"); ++ NEC_sprintf(temp, "Total Error: %d\n", n_error); ++ NEC_strcat_safety(buf, len, temp); ++ ++ NEC_strcat_safety(buf, len, (char*)"Usage error, Consult Andes Toolchains and their compatible Andes cores for the Toolchain-CPU compatibility.\n"); ++ NEC_strcat_safety(buf, len, (char*)"The Loader Checking can be disabled under Debug Configuration.\n"); ++ } ++ else ++ NEC_strcat_safety(buf, len, (char*)"NDS32 ELF checking pass\n"); ++ ++ if(n_warning) ++ { ++ NEC_sprintf(temp, "Total Warning: %d\n", n_warning); ++ NEC_strcat_safety(buf, len, temp); ++ } ++ ++ ++ // checking buf overflow ++ if(strlen(buf) >= len) ++ *buf_status = 1; ++ ++ return n_error; ++ } //end of elf_check ++ ++#undef elf_check_swap_2 ++#undef elf_check_swap_4 ++ ++#undef MSC_CFG_BASEV ++ ++#undef CPU_VER_STRING ++#undef CPU_VER_EXT ++#undef CPU_VER_A16 ++#undef CPU_VER_EXT2 ++#undef CPU_VER_FPU ++#undef CPU_VER_SATURATION ++ ++#undef MSC_CFG_DIV ++#undef MSC_CFG_MAC ++#undef MSC_CFG_L2C ++#undef MSC_CFG_REDUCED_REG ++#undef MSC_CFG_NOD ++#undef MSC_CFG_AUDIO ++#undef MSC_CFG_AUDIO_NONE ++#undef MSC_CFG_IFC ++#undef MSC_CFG_MCU ++#undef MSC_CFG_EX9IT ++#undef MSC_CFG_MSC_EXT ++ ++#undef MSC_CFG2_DSPPF ++#undef MSC_CFG2_ZOL ++ ++#undef MMU_CFG_DE ++ ++#ifdef __cplusplus ++} ++#endif //#ifdef __cplusplus ++ ++#endif //end of _NDS32_ELF_CHECK ++ +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/nds32_ksyms.c linux-3.4.110/arch/nds32/kernel/nds32_ksyms.c +--- linux-3.4.110.orig/arch/nds32/kernel/nds32_ksyms.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/nds32_ksyms.c 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,103 @@ ++/* ++ * arch/nds32/kernel/nds32_ksyms.c ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++/* ++ * libgcc functions - functions that are used internally by the ++ * compiler... (prototypes are not correct though, but that ++ * doesn't really matter since they're not versioned). ++ */ ++extern void __ashldi3(void); ++extern void __ashrdi3(void); ++extern void __divsi3(void); ++extern void __lshrdi3(void); ++extern void __modsi3(void); ++extern void __muldi3(void); ++extern void __ucmpdi2(void); ++extern void __udivdi3(void); ++extern void __umoddi3(void); ++extern void __udivmoddi4(void); ++extern void __udivsi3(void); ++extern void __umodsi3(void); ++ ++/* ++ * This has a special calling convention; it doesn't ++ * modify any of the usual registers, except for LR. ++ */ ++#define EXPORT_SYMBOL_ALIAS(sym,orig) \ ++ const struct kernel_symbol __ksymtab_##sym \ ++ __attribute__((section("__ksymtab"))) = \ ++ { (unsigned long)&orig, #sym }; ++ ++/* ++ * floating point math emulator support. ++ * These symbols will never change their calling convention... ++ */ ++ ++/* networking */ ++EXPORT_SYMBOL(csum_partial); ++EXPORT_SYMBOL(csum_partial_copy_nocheck); ++ ++/* string / mem functions */ ++EXPORT_SYMBOL(strchr); ++EXPORT_SYMBOL(strrchr); ++EXPORT_SYMBOL(memset); ++EXPORT_SYMBOL(memcpy); ++EXPORT_SYMBOL(memmove); ++EXPORT_SYMBOL(__memzero); ++ ++/* user mem (segment) */ ++EXPORT_SYMBOL(__arch_copy_from_user); ++EXPORT_SYMBOL(__arch_copy_to_user); ++EXPORT_SYMBOL(__arch_clear_user); ++EXPORT_SYMBOL(__arch_strnlen_user); ++EXPORT_SYMBOL(__arch_strncpy_from_user); ++ ++EXPORT_SYMBOL(__get_user_1); ++EXPORT_SYMBOL(__get_user_2); ++EXPORT_SYMBOL(__get_user_4); ++EXPORT_SYMBOL(__get_user_8); ++ ++EXPORT_SYMBOL(__put_user_1); ++EXPORT_SYMBOL(__put_user_2); ++EXPORT_SYMBOL(__put_user_4); ++EXPORT_SYMBOL(__put_user_8); ++ ++/* gcc lib functions */ ++EXPORT_SYMBOL(__ashldi3); ++EXPORT_SYMBOL(__ashrdi3); ++EXPORT_SYMBOL(__divsi3); ++EXPORT_SYMBOL(__lshrdi3); ++EXPORT_SYMBOL(__modsi3); ++EXPORT_SYMBOL(__muldi3); ++EXPORT_SYMBOL(__ucmpdi2); ++EXPORT_SYMBOL(__udivdi3); ++EXPORT_SYMBOL(__umoddi3); ++EXPORT_SYMBOL(__udivmoddi4); ++EXPORT_SYMBOL(__udivsi3); ++EXPORT_SYMBOL(__umodsi3); ++ ++/* syscalls */ ++EXPORT_SYMBOL(sys_write); ++EXPORT_SYMBOL(sys_lseek); ++EXPORT_SYMBOL(sys_exit); ++EXPORT_SYMBOL(sys_wait4); ++ ++/* cache handling */ ++ ++EXPORT_SYMBOL(cpu_icache_inval_all); ++EXPORT_SYMBOL(cpu_dcache_wbinval_all); ++EXPORT_SYMBOL(cpu_dma_inval_range); ++EXPORT_SYMBOL(cpu_dma_wb_range); +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/proc.c linux-3.4.110/arch/nds32/kernel/proc.c +--- linux-3.4.110.orig/arch/nds32/kernel/proc.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/proc.c 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,81 @@ ++/* ++ * linux/arch/nds32/kernel/setup.c ++ * ++ * Copyright (C) 1995-2001 Russell King ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++/* ============================================================================ ++ * Copyright (C) 2007 Andes Technology Corporation ++ * This file is part of Linux and should be licensed under the GPL. ++ * See the file COPYING for conditions for redistribution. ++ * ++ * Abstract: ++ * ++ * This program is for Andes NDS32 architecture. ++ * ++ * Revision History: ++ * ++ * Jul.05.2007 Initial ported by Tom, revised and patched for KGDB ++ * by Harry. ++ * ++ * Note: ++ * ++ * ============================================================================ ++ */ ++ ++#include ++ ++struct proc_info_item info_item = { ++ "AndesCore", ++#ifndef CONFIG_CPU_ICACHE_DISABLE ++ "I" ++#endif ++#ifndef CONFIG_CPU_DCACHE_DISABLE ++ "D" ++#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ++ "(wt)" ++#else ++ "(wb)" ++#endif ++#endif ++}; ++ ++struct proc_info_list n10_proc_info ++ __attribute__ ((section(".proc.info.init"))) = { ++.cpu_val = 0x0a000000,.cpu_mask = 0xff000000,.arch_name = ++ "NDS32 N10",.elf_name = "NDS32 ELF",.elf_hwcap = 0,.info = ++ &info_item,}; ++ ++struct proc_info_list n12_proc_info ++ __attribute__ ((section(".proc.info.init"))) = { ++.cpu_val = 0x0c000000,.cpu_mask = 0xff000000,.arch_name = ++ "NDS32 N12",.elf_name = "NDS32 ELF",.elf_hwcap = 0,.info = ++ &info_item,}; ++ ++struct proc_info_list n13_proc_info ++ __attribute__ ((section(".proc.info.init"))) = { ++.cpu_val = 0x0d000000,.cpu_mask = 0xff000000,.arch_name = ++ "NDS32 N13",.elf_name = "NDS32 ELF",.elf_hwcap = 0,.info = ++ &info_item,}; ++ ++struct proc_info_list n968_proc_info ++ __attribute__ ((section(".proc.info.init"))) = { ++.cpu_val = 0x19000000,.cpu_mask = 0xff000000,.arch_name = ++ "NDS32 N968",.elf_name = "NDS32 ELF",.elf_hwcap = 0,.info = ++ &info_item,}; ++ ++struct proc_info_list n1068_proc_info ++ __attribute__ ((section(".proc.info.init"))) = { ++.cpu_val = 0x1a000000,.cpu_mask = 0xff000000,.arch_name = ++ "NDS32 N1068",.elf_name = "NDS32 ELF",.elf_hwcap = 0,.info = ++ &info_item,}; ++ ++// the last one (a roust way to do so) ++struct proc_info_list nXX_proc_info ++ __attribute__ ((section(".proc.info.init"))) = { ++.cpu_val = 0x00000000,.cpu_mask = 0x00000000,.arch_name = ++ "NDS32 N??",.elf_name = "NDS32 ELF",.elf_hwcap = 0,.info = ++ &info_item,}; +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/process.c linux-3.4.110/arch/nds32/kernel/process.c +--- linux-3.4.110.orig/arch/nds32/kernel/process.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/process.c 2016-04-07 10:20:50.942081024 +0200 +@@ -0,0 +1,630 @@ ++/* ++ * linux/arch/nds32/kernel/process.c ++ */ ++/* Copyright (C) 1996-2000 Russell King - Converted to ARM. ++ * Original Copyright (C) 1995 Linus Torvalds ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++/* ============================================================================ ++ * Copyright (C) 2007 Andes Technology Corporation ++ * This file is part of Linux and should be licensed under the GPL. ++ * See the file COPYING for conditions for redistribution. ++ * ++ * Abstract: ++ * ++ * This program is process implementation for NDS32 architecture, and it ++ * is original referred from ARM. ++ * ++ * Revision History: ++ * ++ * Oct.02.2007 Initial ported by Tom, Shawn, Steven, and Harry. ++ * Oct.03.2007 Updated get_wchan() for info under /proc. ++ * ++ * Note: ++ * ++ * ============================================================================ ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++extern const char *processor_modes[]; ++extern void setup_mm_for_reboot(char mode); ++extern struct task_struct *_switch(struct task_struct *last, ++ struct thread_info *prev, ++ struct thread_info *next); ++ ++#ifndef CONFIG_UNLAZY_FPU ++struct task_struct *last_task_used_math = NULL; ++#endif ++#ifndef CONFIG_UNLAZY_AUDIO ++struct task_struct *last_task_used_audio = NULL; ++#endif ++static volatile int hlt_counter; ++ ++#ifdef CONFIG_PROC_FS ++struct proc_dir_entry *proc_dir_cpu; ++EXPORT_SYMBOL(proc_dir_cpu); ++#endif ++ ++extern inline void arch_reset(char mode) ++{ ++ if (mode == 's') { ++ /* Use cpu handler, jump to 0 */ ++ cpu_reset(0); ++ } else { ++ /* ++ * Suppose there should be HW function. ++ * Here we used one tick watchdog as a trick. ++ * It is not very good idea in the view of SoC ++ * design, but smart in firmware functionality. ++ * Harry@Jan,08.2008 ++ */ ++ REG32(WDT_FTWDT010_0_VA_BASE + 0x0C) = 0; // WdCR ++ REG32(WDT_FTWDT010_0_VA_BASE + 0x04) = 1; // WdLoad ++ REG32(WDT_FTWDT010_0_VA_BASE + 0x08) = 0x5AB9; // WdRestart ++ REG32(WDT_FTWDT010_0_VA_BASE + 0x0C) = 0x13; // Go... ++ } ++} ++ ++#ifndef arch_idle ++static inline void arch_idle(void) ++{ ++ cpu_do_idle(); ++} ++#endif ++ ++void disable_hlt(void) ++{ ++ hlt_counter++; ++} ++ ++EXPORT_SYMBOL(disable_hlt); ++ ++void enable_hlt(void) ++{ ++ hlt_counter--; ++} ++ ++EXPORT_SYMBOL(enable_hlt); ++ ++static int __init nohlt_setup(char *__unused) ++{ ++ hlt_counter = 1; ++ return 1; ++} ++ ++static int __init hlt_setup(char *__unused) ++{ ++ hlt_counter = 0; ++ return 1; ++} ++ ++__setup("nohlt", nohlt_setup); ++__setup("hlt", hlt_setup); ++ ++/* ++ * The following aren't currently used. ++ */ ++ ++void (*pm_idle) (void) = NULL; ++EXPORT_SYMBOL(pm_idle); ++ ++void (*pm_power_off) (void); ++EXPORT_SYMBOL(pm_power_off); ++ ++/* ++ * This is our default idle handler. We need to disable ++ * interrupts here to ensure we don't miss a wakeup call. ++ */ ++void default_idle(void) ++{ ++ local_irq_disable(); ++ if (!need_resched() && !hlt_counter) ++ arch_idle(); ++ local_irq_enable(); ++} ++ ++/* ++ * The idle thread. We try to conserve power, while trying to keep ++ * overall latency low. The architecture specific idle is passed ++ * a value to indicate the level of "idleness" of the system. ++ */ ++void cpu_idle(void) ++{ ++ ++ /* endless idle loop with no priority at all */ ++ while (1) { ++ void (*idle) (void) = pm_idle; ++ ++ if (!idle) ++ idle = default_idle; ++ tick_nohz_idle_enter(); ++ leds_event(led_idle_start); ++ while (!need_resched()) ++ idle(); ++ leds_event(led_idle_end); ++ tick_nohz_idle_exit(); ++ preempt_enable_no_resched(); ++ schedule(); ++ preempt_disable(); ++ } ++} ++ ++static char reboot_mode = 'h'; ++ ++int __init reboot_setup(char *str) ++{ ++ reboot_mode = str[0]; ++ return 1; ++} ++ ++#ifdef CONFIG_PLAT_AG102 ++static int cpub_pwroff(void) ++{ ++ //PCU_SET_REG(PCS9_PARA, ++ // PCU_PREPARE(PCS9_PARA, IE, 0x0) | ++ // PCU_PREPARE(PCS9_PARA, CMD, PCS_CMD_SCALING) | ++ // PCU_PREPARE(PCS9_PARA, SYNC, PCS_SYNC_SRC) | ++ // PCU_PREPARE(PCS9_PARA, NXTPAR, 0x1b) // turn off cpub power ++ //); ++ //printk("cpub Power off: ok!!!OKOKOK\n"); ++ //REG32(PCU_VA_BASE + 0x1A4) = (0x0000001B & 0x00FFFFFF)| ((0x2 << 24) & 0x0F000000) | ((0x1 << 28) & 0x70000000) | 0x00000000; ++ //__asm__ volatile ("standby wait_done\n"); ++ //PCU_SET_REG(PCS9_ST1, 0x0); // clear status ++ //REG32(PCU_VA_BASE + 0x1A8) = 0x0; ++ ++ //par = PCS_POWER_CPUB; ++ //PCU_SET_REG(PCS9_PARA, ++ //PCU_PREPARE(PCS9_PARA, IE, 0x1) | ++ //PCU_PREPARE(PCS9_PARA, CMD, PCS_CMD_PW_DOWN) | ++ //PCU_PREPARE(PCS9_PARA, SYNC, PCS_SYNC_SRC) | ++ //PCU_PREPARE(PCS9_PARA, NXTPAR, 0x6) // turn off all power ++ ++ //printk("cpua Power off: ok!!!YAYAYA\n"); ++ //printk("The value = 0x%08x\n", (0x00000006 & 0x00FFFFFF)| ((0x1 << 24) & 0x0F000000) | ((0x2 << 28) & 0x70000000) | 0x80000000); ++ REG32(PCU_VA_BASE + 0x1A4) = ++ (0x00000006 & 0x00FFFFFF) | ((0x1 << 24) & 0x0F000000) | ++ ((0x2 << 28) & 0x70000000) | 0x80000000; ++ __asm__ volatile ("standby wake_grant\n"); ++ return (0); ++} ++#else ++static int cpub_pwroff(void) ++{ ++ return 0; ++} ++ ++#endif ++ ++__setup("reboot=", reboot_setup); ++ ++void machine_halt(void) ++{ ++ //ADD by river 2011.04.14 ++ cpub_pwroff(); ++} ++ ++EXPORT_SYMBOL(machine_halt); ++ ++void machine_power_off(void) ++{ ++ if (pm_power_off) ++ pm_power_off(); ++} ++ ++EXPORT_SYMBOL(machine_power_off); ++ ++void machine_restart(char *__unused) ++{ ++ /* ++ * Clean and disable cache, and turn off interrupts ++ */ ++ cpu_proc_fin(); ++ ++ /* ++ * Tell the mm system that we are going to reboot - ++ * we may need it to insert some 1:1 mappings so that ++ * soft boot works. ++ */ ++ setup_mm_for_reboot(reboot_mode); ++ ++ /* ++ * Now call the architecture specific reboot code. ++ */ ++ arch_reset(reboot_mode); ++ ++ /* ++ * Whoops - the architecture was unable to reboot. ++ * Tell the user! ++ */ ++ mdelay(1000); ++ printk("Reboot failed -- System halted\n"); ++ while (1) ; ++} ++ ++EXPORT_SYMBOL(machine_restart); ++ ++void show_regs(struct pt_regs *regs) ++{ ++ print_symbol("PC is at %s\n", instruction_pointer(regs)); ++ print_symbol("LR is at %s\n", regs->NDS32_lp); ++ printk("pc : [<%08lx>] lp : [<%08lx>] %s\n" ++ "sp : %08lx fp : %08lx gp : %08lx\n", ++ instruction_pointer(regs), ++ regs->NDS32_lp, print_tainted(), regs->NDS32_sp, ++ regs->NDS32_fp, regs->NDS32_gp); ++ printk("r25: %08lx r24 : %08lx\n", regs->NDS32_r25, regs->NDS32_r24); ++ ++ printk("r23: %08lx r22: %08lx r21: %08lx r20: %08lx\n", ++ regs->NDS32_r23, regs->NDS32_r22, ++ regs->NDS32_r21, regs->NDS32_r20); ++ printk("r19: %08lx r18: %08lx r17: %08lx r16: %08lx\n", ++ regs->NDS32_r19, regs->NDS32_r18, ++ regs->NDS32_r17, regs->NDS32_r16); ++ printk("r15: %08lx r14: %08lx r13: %08lx r12: %08lx\n", ++ regs->NDS32_r15, regs->NDS32_r14, ++ regs->NDS32_r13, regs->NDS32_r12); ++ printk("r11: %08lx r10: %08lx r9 : %08lx r8 : %08lx\n", ++ regs->NDS32_r11, regs->NDS32_r10, ++ regs->NDS32_r9, regs->NDS32_r8); ++ printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", ++ regs->NDS32_r7, regs->NDS32_r6, regs->NDS32_r5, regs->NDS32_r4); ++ printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", ++ regs->NDS32_r3, regs->NDS32_r2, regs->NDS32_r1, regs->NDS32_r0); ++ printk(" IRQs o%s Segment %s\n", ++ interrupts_enabled(regs) ? "n" : "ff", ++ segment_eq(get_fs(), get_ds())? "kernel" : "user"); ++} ++ ++void show_fpregs(struct user_fp *regs) ++{ ++ int i; ++ ++ for (i = 0; i < 8; i++) { ++ unsigned long *p; ++ char type; ++ ++ p = (unsigned long *)(regs->fpregs + i); ++ ++ switch (regs->ftype[i]) { ++ case 1: ++ type = 'f'; ++ break; ++ case 2: ++ type = 'd'; ++ break; ++ case 3: ++ type = 'e'; ++ break; ++ default: ++ type = '?'; ++ break; ++ } ++ if (regs->init_flag) ++ type = '?'; ++ ++ printk(" f%d(%c): %08lx %08lx %08lx%c", ++ i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' '); ++ } ++ ++ printk("FPSR: %08lx FPCR: %08lx\n", ++ (unsigned long)regs->fpsr, (unsigned long)regs->fpcr); ++} ++ ++/* ++ * Task structure and kernel stack allocation. ++ */ ++static unsigned long *thread_info_head; ++static unsigned int nr_thread_info; ++ ++#define EXTRA_TASK_STRUCT 4 ++#define ll_alloc_task_struct() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) ++#define ll_free_task_struct(p) free_pages((unsigned long)(p),1) ++ ++struct thread_info *alloc_thread_info(struct task_struct *task) ++{ ++ struct thread_info *thread = NULL; ++ ++ if (EXTRA_TASK_STRUCT) { ++ unsigned long *p = thread_info_head; ++ ++ if (p) { ++ thread_info_head = (unsigned long *)p[0]; ++ nr_thread_info -= 1; ++ } ++ thread = (struct thread_info *)p; ++ } ++ ++ if (!thread) ++ thread = ll_alloc_task_struct(); ++ ++#ifdef CONFIG_MAGIC_SYSRQ ++ /* ++ * The stack must be cleared if you want SYSRQ-T to ++ * give sensible stack usage information ++ */ ++ if (thread) { ++ char *p = (char *)thread; ++ memzero(p + KERNEL_STACK_SIZE, KERNEL_STACK_SIZE); ++ } ++#endif ++ return thread; ++} ++ ++void free_thread_info(struct thread_info *thread) ++{ ++ if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) { ++ unsigned long *p = (unsigned long *)thread; ++ p[0] = (unsigned long)thread_info_head; ++ thread_info_head = p; ++ nr_thread_info += 1; ++ } else ++ ll_free_task_struct(thread); ++} ++ ++/* ++ * Free current thread data structures etc.. ++ */ ++void exit_thread(void) ++{ ++#if defined(CONFIG_FPU) ++# ifndef CONFIG_UNLAZY_FPU ++ if (last_task_used_math == current) { ++ last_task_used_math = NULL; ++ } ++# endif ++#endif ++#if defined(CONFIG_AUDIO) ++# ifndef CONFIG_UNLAZY_AUDIO ++ if (last_task_used_audio == current) { ++ last_task_used_audio = NULL; ++ } ++# endif ++#endif ++} ++ ++void flush_thread(void) ++{ ++ struct task_struct *tsk = current; ++ ++ memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); ++#if defined(CONFIG_FPU) ++ clear_fpu(task_pt_regs(tsk)); ++ clear_used_math(); ++# ifndef CONFIG_UNLAZY_FPU ++ if (last_task_used_math == current) { ++ last_task_used_math = NULL; ++ } ++# endif ++#endif ++ ++#if defined(CONFIG_AUDIO) ++ clear_audio(task_pt_regs(tsk)); ++ clear_tsk_thread_flag(tsk, TIF_USEDAUDIO); ++# ifndef CONFIG_UNLAZY_AUDIO ++ if (last_task_used_audio == current) { ++ last_task_used_audio = NULL; ++ } ++# endif ++#endif ++ ++} ++ ++void release_thread(struct task_struct *dead_task) ++{ ++} ++ ++asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); ++ ++/* ++ * Shuffle the argument into the correct register before calling the ++ * thread function. $r1 is the thread argument, $r2 is the pointer to ++ * the thread function, and $r3 points to the exit function. ++ */ ++extern void kernel_thread_helper(void); ++asm(".section .text\n" ++ " .align\n" ++ " .type kernel_thread_helper, #function\n" ++ "kernel_thread_helper:\n" ++ " move $r2, $lp\n" ++ " move $r0, $r1\n" ++ " move $lp, $r3\n" ++ " jr $r2 \n" ++ " .size kernel_thread_helper, . - kernel_thread_helper\n" ++ " .previous"); ++ ++pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags) ++{ ++ struct pt_regs regs; ++ ++ memset(®s, 0, sizeof(regs)); ++ ++ regs.NDS32_r1 = (unsigned long)arg; ++ regs.NDS32_r2 = (unsigned long)fn; ++ /* to apply right path */ ++ regs.NDS32_lp = regs.NDS32_r2; ++ regs.NDS32_r3 = (unsigned long)do_exit; ++ regs.NDS32_ipc = (unsigned long)kernel_thread_helper; ++ ++#ifdef __NDS32_EB__ ++#define PSW_DE PSW_mskBE ++#else ++#define PSW_DE 0x0 ++#endif ++ ++#ifdef CONFIG_WBNA ++#define PSW_valWBNA PSW_mskWBNA ++#else ++#define PSW_valWBNA 0x0 ++#endif ++ ++ regs.NDS32_ipsw = ++ (PSW_CPL_ANY | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | ++ PSW_SYSTEM | PSW_INTL_1 | PSW_mskGIE); ++ regs.NDS32_ir0 = ++ (PSW_CPL_ANY | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | ++ PSW_SYSTEM | PSW_INTL_1); ++ ++ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, ++ NULL); ++} ++ ++EXPORT_SYMBOL(kernel_thread); ++ ++int ++copy_thread(unsigned long clone_flags, unsigned long stack_start, ++ unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs) ++{ ++ struct thread_info *thread = task_thread_info(p); ++ struct pt_regs *childregs; ++ ++ childregs = ++ ((struct pt_regs *)((unsigned long)thread + THREAD_SIZE - 8)) - 1; ++ ++ *childregs = *regs; ++ childregs->NDS32_r0 = 0; /* child get zero as ret. */ ++ childregs->NDS32_sp = stack_start; ++ childregs->NDS32_osp = 0; ++ ++ thread->sp_save = ((struct cpu_context_save *)(childregs)) - 1; ++ /* cpu context switching */ ++ thread->sp_save->pc = (unsigned long)ret_from_fork; ++ if (clone_flags & CLONE_SETTLS) ++ childregs->NDS32_r25 = regs->NDS32_r3; ++ ++#ifdef CONFIG_FPU ++ if (used_math()) { ++# ifdef CONFIG_UNLAZY_FPU ++ unlazy_fpu(current); ++# else ++ preempt_disable(); ++ if (last_task_used_math == current) ++ save_fpu(current); ++ preempt_enable(); ++# endif ++ p->thread.fpu = current->thread.fpu; ++ clear_fpu(task_pt_regs(p)); ++ set_stopped_child_used_math(p); ++ } ++#endif ++ ++#ifdef CONFIG_AUDIO ++ if (test_tsk_thread_flag(current, TIF_USEDAUDIO)) { ++# ifdef CONFIG_UNLAZY_AUDIO ++ unlazy_audio(current); ++# else ++ preempt_disable(); ++ if (last_task_used_audio == current) ++ save_audio(current); ++ preempt_enable(); ++# endif ++ p->thread.audio = current->thread.audio; ++ clear_audio(childregs); ++ set_tsk_thread_flag(p, TIF_USEDAUDIO); ++ } ++#endif ++ ++#ifdef CONFIG_HWZOL ++ childregs->NDS32_lb = 0; ++ childregs->NDS32_le = 0; ++ childregs->NDS32_lc = 0; ++#endif ++ ++ return 0; ++} ++ ++struct task_struct *__switch_to(struct task_struct *last, ++ struct thread_info *prev, ++ struct thread_info *next) ++{ ++#if defined(CONFIG_FPU) || defined(CONFIG_AUDIO) ++# ifdef CONFIG_UNLAZY_FPU ++ unlazy_fpu(prev->task); ++# endif ++# ifdef CONFIG_UNLAZY_AUDIO ++ unlazy_audio(prev->task); ++# endif ++ if (!(next->task->flags & PF_KTHREAD)) { ++#ifdef CONFIG_FPU ++ clear_fpu(task_pt_regs(next->task)); ++#endif ++#ifdef CONFIG_AUDIO ++ clear_audio(task_pt_regs(next->task)); ++#endif ++ } ++#endif ++ return _switch(last, prev, next); ++} ++ ++/* ++ * fill in the fpe structure for a core dump... ++ */ ++int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu) ++{ ++ int fpvalid = 0; ++ ++#if 0 // XXX defined(CONFIG_FPU) ++ struct task_struct *tsk = current; ++ ++ fpvalid = !!tsk_used_math(tsk); ++ if (fpvalid) { ++ unlazy_fpu(tsk, regs); ++ memcpy(fpu, &tsk->thread.fpu, sizeof(*fpu)); ++ } ++#endif ++ ++ return fpvalid; ++} ++ ++EXPORT_SYMBOL(dump_fpu); ++ ++unsigned long get_wchan(struct task_struct *p) ++{ ++ unsigned long fp, lr; ++ unsigned long stack_start, stack_end; ++ int count = 0; ++ ++ if (!p || p == current || p->state == TASK_RUNNING) ++ return 0; ++ ++#ifdef CONFIG_FRAME_POINTER ++ stack_start = (unsigned long)end_of_stack(p); ++ stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE; ++ ++ fp = thread_saved_fp(p); ++ do { ++ if (fp < stack_start || fp > stack_end) ++ return 0; ++ lr = ((unsigned long *)fp)[0]; ++ if (!in_sched_functions(lr)) ++ return lr; ++ fp = *(unsigned long *)(fp + 4); ++ } while (count++ < 16); ++ return 0; ++#else ++ return 0; ++#endif ++} ++ ++EXPORT_SYMBOL(get_wchan); ++ ++extern int do_elf_check_arch(const struct elf32_hdr *hdr); ++ ++int elf_check_arch(const struct elf32_hdr *hdr) ++{ ++ return do_elf_check_arch(hdr); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/ptrace.c linux-3.4.110/arch/nds32/kernel/ptrace.c +--- linux-3.4.110.orig/arch/nds32/kernel/ptrace.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/ptrace.c 2016-04-07 10:22:02.056831821 +0200 +@@ -0,0 +1,919 @@ ++/* ++ * linux/arch/nds32/kernel/ptrace.c ++ */ ++/* By Ross Biro 1/23/92 ++ * edited by Linus Torvalds ++ * ARM modifications Copyright (C) 2000 Russell King ++ * NDS32 modifications Copyright (C) 2007 Harry Pan ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++/* ============================================================================ ++ * Copyright (C) 2007 Andes Technology Corporation ++ * This file is part of Linux and should be licensed under the GPL. ++ * See the file COPYING for conditions for redistribution. ++ * ++ * Abstract: ++ * ++ * This program is for Andes NDS32 architecture. ++ * ++ * Revision History: ++ * ++ * Jul.31.2007 Initial ported by Tom, Shawn, and Steven, ++ * revised by Harry. ++ * Current implmentation is based on Andes Instruction ++ * Set Architecture Specification (AS-0001-0001) ++ * version:3.7 date:7-20-2007. ++ * It is original taken from ARM, then fit to NDS32. ++ * Aug.15.2007 Mainly updated breakpoint handling base on NDS32 ISA. ++ * I also did code revise. ++ * Nov.14.2007 Fixed up ptrace_set_bpt() while handling 16-bit insn. ++ * Nov.27.2007 Added checking duplicate breakpoints in ++ * add_breakpoint(), based on Shawn's idea. ++ * Dec.06.2007 Added get_user_gpr(). ++ * Apr.17.2009 Added support for FPU and Audio regs. ++ * Added support for PTRACE_GETFPREGS, PTRACE_SETFPREGS, ++ * PTRACE_GETAUREGS, PTRACE_SETAUREGS ++ * ++ * Note: ++ * ++ * Current layout: 0-31 GR, 32-34 SPR, 35-... SR, index start from zero. ++ * ++ * +----------+-----+--------------+---+--------+ ++ * | GR | SPR | SR |...| Audio | ++ * +----------+-----+--------------+---+--------+ ++ * 0 32 35 ...500 531 ++ * ++ * ============================================================================ ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ptrace.h" ++ ++#ifdef __NDS32_EL__ ++#define BREAKINST 0x01EA ++#else ++#define BREAKINST 0xEA01 ++#endif ++ ++/* get_user_reg() ++ * ++ * This routine will get a word off of the processes privileged stack. ++ * the offset is how far from the base addr as stored in the THREAD. ++ * this routine assumes that all the privileged stacks are in our ++ * data space. ++ */ ++static inline unsigned int get_user_reg(struct task_struct *task, int offset) ++{ ++ return task_pt_regs(task)->uregs[offset]; ++} ++ ++#if !defined(CONFIG_HSS) ++/* get_user_gpr() ++ * ++ */ ++static unsigned int get_user_gpr(struct task_struct *task, int idx) ++{ ++ unsigned int ret; ++ ++ if (idx < 26) // r0 to r25 ++ ret = get_user_reg(task, idx + 13); ++ else if (idx == 26 || idx == 27) // p0, p1 ++ ret = get_user_reg(task, idx - 26 + 7); ++ else if (idx > 27 && idx < 31) // fp, gp, lp ++ ret = get_user_reg(task, idx + 11); ++ else if (idx == 31) // sp ++ ret = get_user_reg(task, 3); ++ else ++ ret = 0; ++ ++ return ret; ++} ++#endif ++ ++/* put_user_reg() ++ * ++ * this routine will put a word on the processes privileged stack. ++ * the offset is how far from the base addr as stored in the THREAD. ++ * this routine assumes that all the privileged stacks are in our ++ * data space. ++ */ ++static inline int put_user_reg(struct task_struct *task, int offset, long data) ++{ ++ struct pt_regs newregs, *regs = task_pt_regs(task); ++ int ret = -EINVAL; ++ ++ newregs = *regs; ++ newregs.uregs[offset] = data; ++ ++ if (valid_user_regs(&newregs)) { ++ regs->uregs[offset] = data; ++ ret = 0; ++ } ++ ++ return ret; ++} ++ ++#if !defined(CONFIG_HSS) ++/* ++ * Read instruction. ++ */ ++static inline int ++read_insn(struct task_struct *task, unsigned long addr, u32 * res) ++{ ++ int ret; ++ *res = 0; ++ ret = access_process_vm(task, addr, res, 2, 0); ++ if (ret != 2) ++ return 0; ++#ifdef __NDS32_EL__ ++ *res = swab16(*res); ++#endif ++ if ((*res) & 0x8000) ++ return 2; ++ ++ ret = access_process_vm(task, addr, res, 4, 0); ++ if (ret != 4) ++ return 0; ++#ifdef __NDS32_EL__ ++ *res = swab32(*res); ++#endif ++ return 4; ++} ++ ++/* get_branch_address() ++ * ++ * Decode branch instructions and destination. ++ */ ++static unsigned long ++get_branch_address(struct task_struct *child, ++ unsigned long pc, unsigned long insn, unsigned int size) ++{ ++ unsigned int tpc = 0; ++ ++ /* ++ * TODO: COLE ++ * would it be simpler if we use two BREAKs, ++ * one for take, one for not ++ */ ++ ++ if (size == 4) { ++ /* 32-bit instruction */ ++ if ((insn & 0x7e000000) == 0x4c000000) // BR1 ++ { ++ int cond, imm14s; ++ unsigned int rt, ra; ++ ++ rt = (insn >> 20) & 0x1f; ++ ra = (insn >> 15) & 0x1f; ++ cond = (insn >> 14) & 0x01; ++ imm14s = insn & 0x00003fff; ++ ++ if (imm14s & 0x00002000) // sign extend ++ imm14s -= (0x00002000 << 1); ++ ++ rt = get_user_gpr(child, rt); ++ ra = get_user_gpr(child, ra); ++ ++ if (((cond == 1) && (rt != ra)) || // bne ++ ((cond == 0) && (rt == ra))) // beq ++ tpc = pc + (imm14s << 1); ++ } else if ((insn & 0x7e000000) == 0x4e000000) // BR2 ++ { ++ int cond, imm16s, taken = 0; ++ int rt; ++ ++ rt = (insn >> 20) & 0x1f; ++ cond = (insn >> 16) & 0x0f; ++ imm16s = insn & 0x0000ffff; ++ ++ if (imm16s & 0x00008000) // sign extend ++ imm16s -= (0x00008000 << 1); ++ ++ rt = get_user_gpr(child, rt); ++ ++ switch (cond) { ++ case 0x02: // beqz ++ if (rt == 0) ++ taken = 1; ++ break; ++ case 0x03: // bnez ++ if (rt != 0) ++ taken = 1; ++ break; ++ case 0x06: // bgtz ++ if (rt > 0) ++ taken = 1; ++ break; ++ case 0x07: // blez ++ if (rt <= 0) ++ taken = 1; ++ break; ++ case 0x04: // bgez ++ case 0x0c: // bgezal ++ if (rt >= 0) ++ taken = 1; ++ break; ++ case 0x05: // bltz ++ case 0x0d: // bltzal ++ if (rt < 0) ++ taken = 1; ++ break; ++ default: ++ printk(KERN_WARNING ++ "ptrace: unknown conditional branch.\n"); ++ break; ++ } ++ ++ if (taken) ++ tpc = pc + (imm16s << 1); ++ } else if ((insn & 0x7e000000) == 0x48000000) // JI ++ { ++ int imm24s; ++ ++ imm24s = insn & 0x00ffffff; ++ ++ if (imm24s & 0x00800000) // sign extend ++ imm24s -= (0x00800000 << 1); ++ ++ tpc = pc + (imm24s << 1); ++ } else if ((insn & 0x7e000000) == 0x4a000000) // JREG ++ { ++ unsigned int rb = (insn >> 10) & 0x1f; ++ ++ tpc = get_user_gpr(child, rb); ++ } ++ ++ } else { ++ /* 16-bit instruction */ ++ ++ if ((insn & 0xf800) == 0xc000) // beqz38 ++ { ++ unsigned int rt3; ++ ++ rt3 = (insn >> 8) & 0x07; ++ ++ rt3 = get_user_gpr(child, rt3); ++ ++ if (rt3 == 0) { ++ int imm8s; ++ ++ imm8s = insn & 0x00ff; ++ ++ if (imm8s & 0x0080) // sign extend ++ imm8s -= (0x0080 << 1); ++ ++ tpc = pc + (imm8s << 1); ++ } ++ } ++ ++ if ((insn & 0xf800) == 0xc800) // bnez38 ++ { ++ unsigned int rt3; ++ ++ rt3 = (insn >> 8) & 0x07; ++ ++ rt3 = get_user_gpr(child, rt3); ++ ++ if (rt3 != 0) { ++ int imm8s; ++ ++ imm8s = insn & 0x00ff; ++ ++ if (imm8s & 0x0080) // sign extend ++ imm8s -= (0x0080 << 1); ++ ++ tpc = pc + (imm8s << 1); ++ } ++ } ++ ++ if ((insn & 0xf800) == 0xd000) // beqs38, j8 ++ { ++ unsigned int rt3, r5; ++ ++ rt3 = (insn >> 8) & 0x07; ++ ++ rt3 = get_user_gpr(child, rt3); ++ r5 = get_user_gpr(child, 5); ++ ++ if (r5 == rt3) { ++ int imm8s; ++ imm8s = insn & 0x00ff; ++ ++ if (imm8s & 0x0080) // sign extend ++ imm8s -= (0x0080 << 1); ++ ++ tpc = pc + (imm8s << 1); ++ } ++ } ++ ++ if ((insn & 0xf800) == 0xd800) // bnes38 ++ { ++ unsigned int rt3, r5; ++ ++ rt3 = (insn >> 8) & 0x07; ++ ++ r5 = get_user_gpr(child, 5); ++ rt3 = get_user_gpr(child, rt3); ++ ++ if (r5 != rt3) { ++ int imm8s; ++ ++ imm8s = insn & 0x00ff; ++ ++ if (imm8s & 0x0080) // sign extend ++ imm8s -= (0x0080 << 1); ++ ++ tpc = pc + (imm8s << 1); ++ } ++ } ++ ++ if ((insn & 0xffe0) == 0xdd00 || // jr5 ++ (insn & 0xffe0) == 0xdd80 || // ret5 ++ (insn & 0xffe0) == 0xdd20) // jral5 ++ { ++ unsigned int rb5; ++ ++ rb5 = insn & 0x1f; ++ ++ tpc = get_user_gpr(child, rb5); ++ } ++ ++ if ((insn & 0xfe00) == 0xe800) { ++ int taken = 0; ++ unsigned int r15; ++ ++ r15 = get_user_gpr(child, 15); ++ ++ if (insn & 0x0100) // bnezs8 ++ { ++ if (r15 != 0) ++ taken = 1; ++ } else // beqzs8 ++ { ++ if (r15 == 0) ++ taken = 1; ++ } ++ ++ if (taken) { ++ int imm8s; ++ ++ imm8s = insn & 0x00ff; ++ ++ if (imm8s & 0x0080) // sign extend ++ imm8s -= (0x0080 << 1); ++ ++ tpc = pc + (imm8s << 1); ++ } ++ } ++ } ++ ++ return tpc; ++} ++ ++/* ++ * Swap instructions in the user program. ++ * swap in new_insn. save orignal value in old_insn ++ * assume new_insn is 2-bytes long ++ */ ++static int ++swap_insn(struct task_struct *task, unsigned long addr, ++ u16 * old_insn, u16 * new_insn) ++{ ++ int ret; ++ ++ ret = access_process_vm(task, addr, old_insn, 2, 0); ++ if (ret == 2) ++ ret = access_process_vm(task, addr, new_insn, 2, 1); ++ ++ return ret; ++} ++ ++/* ++ * Add one breakpoint in the user program. ++ */ ++static void ++add_breakpoint(struct task_struct *task, struct debug_info *dbg, ++ unsigned long addr) ++{ ++ u16 new_insn = BREAKINST; ++ int res; ++ ++ if (dbg->valid) { ++ printk(KERN_ERR "ptrace: too many breakpoints\n"); ++ return; ++ } ++ ++ dbg->address = addr; ++ res = swap_insn(task, addr, &dbg->insn, &new_insn); ++ if (res == 2) ++ dbg->valid = 1; ++ if (!dbg->valid) ++ printk(KERN_ERR "ptrace: fail to add breakpoint\n"); ++} ++ ++/* ++ * Clear one software breakpoint in the user program. ++ */ ++static void clear_breakpoint(struct task_struct *task, struct debug_info *dbg) ++{ ++ int ret; ++ unsigned int addr = dbg->address; ++ u16 old_insn; ++ ++ if (!dbg->valid) { ++ return; ++ } ++ dbg->valid = 0; ++ ++ ret = swap_insn(task, addr, &old_insn, &dbg->insn); ++ ++ if (ret != 2 || old_insn != BREAKINST) { ++ printk(KERN_ERR "ptrace: %s:%d: corrupted NDS16 breakpoint at " ++ "0x%08x (0x%04x)\n", task->comm, task->pid, ++ addr, old_insn); ++ } ++} ++ ++/* ++ * ptrace_set_swbk ++ * Set breakpoint in user program. ++ */ ++void ptrace_set_swbk(struct task_struct *child) ++{ ++ struct pt_regs *regs; ++ unsigned long pc; ++ unsigned int size; ++ u32 insn; ++ ++ /* ++ * always clear before set, ++ * since in some sepcial case, it may fail to hit ++ */ ++ ptrace_cancel_swbk(child); ++ regs = task_pt_regs(child); ++ pc = instruction_pointer(regs); ++ size = read_insn(child, pc, &insn); ++ ++ printk(KERN_DEBUG " STEP.size=%d\n", size); ++ ++ if (size > 0) { ++ struct debug_info *dbg = &child->thread.debug; ++ unsigned int tpc; ++ ++ /* Predict next PC. */ ++ tpc = get_branch_address(child, pc, insn, size); ++ ++ if (tpc) { ++ printk(KERN_DEBUG " STEP.addr=0x%x\n", tpc); ++ add_breakpoint(child, dbg, tpc); ++ } else { ++ if (size == 4) { ++ printk(KERN_DEBUG " STEP.addr=0x%x\n", ++ (unsigned int)(pc + 4)); ++ add_breakpoint(child, dbg, pc + 4); ++ } else if (size == 2) { ++ printk(KERN_DEBUG " STEP.addr=0x%x\n", ++ (unsigned int)(pc + 2)); ++ add_breakpoint(child, dbg, pc + 2); ++ } else { ++ printk(KERN_ERR ++ "ptrace: bad step address, pc + %d\n", ++ size); ++ } ++ } ++ } ++} ++ ++/* ++ * Ensure no single-step breakpoint is pending. Returns non-zero ++ * value if child was being single-stepped. ++ */ ++void ptrace_cancel_swbk(struct task_struct *child) ++{ ++ if (!child->thread.debug.valid) { ++ return; ++ } ++ ++ clear_breakpoint(child, &child->thread.debug); ++} ++#endif /* end of !defined (CONFIG_HSS) */ ++ ++/* ++ * Called by kernel/ptrace.c when detaching.. ++ * ++ * Make sure the single step bit is not set. ++ */ ++void ptrace_disable(struct task_struct *child) ++{ ++ user_disable_single_step(child); ++} ++ ++/* ++ * Handle hitting a breakpoint. ++ */ ++void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, ++ int error_code, int si_code) ++{ ++ struct siginfo info; ++ ++#if !defined (CONFIG_HSS) ++ /* clear the swbk; otherwise the user will see it */ ++ ptrace_cancel_swbk(tsk); ++#endif ++ ++ tsk->thread.trap_no = 1; ++ tsk->thread.error_code = error_code; ++ ++ memset(&info, 0, sizeof(info)); ++ info.si_signo = SIGTRAP; ++ info.si_code = si_code; ++ ++ info.si_addr = (void __user *)instruction_pointer(regs); ++ ++ /* Send us the fake SIGTRAP */ ++ force_sig_info(SIGTRAP, &info, tsk); ++} ++ ++/* ptrace_read_user() ++ * ++ * Read the word at offset "off" into the "struct user". We ++ * actually access the pt_regs stored on the kernel stack. ++ */ ++static int ++ptrace_read_user(struct task_struct *tsk, unsigned long off, ++ unsigned long __user * ret) ++{ ++ unsigned long tmp = 0; ++ ++ if (off < 500) { ++ if (off & 3 || off >= sizeof(struct user)) ++ return -EIO; ++ ++ if (off < sizeof(struct pt_regs)) ++ tmp = get_user_reg(tsk, off >> 2); ++ ++ return put_user(tmp, ret); ++ } else if (off < 532) { ++#ifdef CONFIG_AUDIO ++ off -= 500; ++ if (test_tsk_thread_flag(tsk, TIF_USEDAUDIO)) { ++#ifdef CONFIG_UNLAZY_AUDIO ++ unlazy_audio(tsk); ++#else ++ preempt_disable(); ++ if (last_task_used_audio == tsk) ++ save_audio(tsk); ++ preempt_enable(); ++#endif ++ tmp = tsk->thread.audio.auregs[off]; ++ } ++#endif ++ return put_user(tmp, ret); ++ } else ++ return -EIO; ++} ++ ++/* ptrace_write_user() ++ * ++ * Write the word at offset "off" into "struct user". We ++ * actually access the pt_regs stored on the kernel stack. ++ */ ++static int ++ptrace_write_user(struct task_struct *tsk, unsigned long off, unsigned long val) ++{ ++ if (off < 500) { ++ if (off & 3 || off >= sizeof(struct user)) ++ return -EIO; ++ ++ if (off >= sizeof(struct pt_regs)) ++ return 0; ++ ++ return put_user_reg(tsk, off >> 2, val); ++ } else if (off < 532) { ++#ifdef CONFIG_AUDIO ++ off -= 500; ++ if (!test_tsk_thread_flag(tsk, TIF_USEDAUDIO)) { ++ /* First time Audio user. */ ++ memset(&tsk->thread.audio, 0, ++ sizeof(struct audio_struct)); ++ set_tsk_thread_flag(tsk, TIF_USEDAUDIO); ++ } else { ++#ifdef CONFIG_UNLAZY_AUDIO ++ unlazy_audio(tsk); ++#else ++ if (last_task_used_audio == tsk) { ++ preempt_disable(); ++ save_audio(tsk); ++ preempt_enable(); ++ } ++#endif ++ /* Let the lazy mechanism do the restore. */ ++ clear_audio(task_pt_regs(tsk)); ++ } ++ tsk->thread.audio.auregs[off] = val; ++#endif ++ return 0; ++ } else ++ return -EIO; ++} ++ ++/* ptrace_getregs() ++ * ++ * Get all user integer registers. ++ */ ++static int ptrace_getregs(struct task_struct *tsk, void __user * uregs) ++{ ++ struct pt_regs *regs = task_pt_regs(tsk); ++ ++ return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; ++} ++ ++/* ptrace_setregs() ++ * ++ * Set all user integer registers. ++ */ ++static int ptrace_setregs(struct task_struct *tsk, void __user * uregs) ++{ ++ struct pt_regs newregs; ++ int ret; ++ ++ ret = -EFAULT; ++ if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { ++ struct pt_regs *regs = task_pt_regs(tsk); ++ ++ ret = -EINVAL; ++ if (valid_user_regs(&newregs)) { ++ *regs = newregs; ++ ret = 0; ++ } ++ } ++ ++ return ret; ++} ++ ++/* ptrace_getfpregs() ++ * ++ * Get the child FPU state. ++ */ ++static int ptrace_getfpregs(struct task_struct *tsk, void __user * ufpregs) ++{ ++#ifdef CONFIG_FPU ++ if (used_math()) { ++# ifdef CONFIG_UNLAZY_FPU ++ unlazy_fpu(tsk); ++# else ++ preempt_disable(); ++ if (last_task_used_math == tsk) ++ save_fpu(tsk); ++ preempt_enable(); ++# endif ++ return copy_to_user(ufpregs, &tsk->thread.fpu, ++ sizeof(struct fpu_struct)) ? -EFAULT : 0; ++ } else { ++ /* First time FPU user. */ ++ memset(ufpregs, -1, sizeof(struct fpu_struct)); ++ return 0; ++ } ++ ++#else ++ return -EFAULT; ++#endif ++} ++ ++/* ++ * Set the child FPU state. ++ */ ++static int ptrace_setfpregs(struct task_struct *tsk, void __user * ufpregs) ++{ ++#ifdef CONFIG_FPU ++ int ret; ++ ++# ifndef CONFIG_UNLAZY_FPU ++ if (last_task_used_math == tsk) ++# endif ++ clear_fpu(task_pt_regs(tsk)); ++ ++ ret = ++ copy_from_user(&tsk->thread.fpu, ufpregs, ++ sizeof(struct fpu_struct)) ? -EFAULT : 0; ++ ++ if (!ret && !used_math()) { ++ /* First time Audio user. */ ++ set_used_math(); ++ } ++ ++ return ret; ++#else ++ return -EFAULT; ++#endif ++} ++ ++/* ptrace_getauregs() ++ * ++ * Get the child Audio state. ++ */ ++static int ptrace_getauregs(struct task_struct *tsk, void __user * uauregs) ++{ ++#ifdef CONFIG_AUDIO ++ if (test_tsk_thread_flag(tsk, TIF_USEDAUDIO)) { ++#ifdef CONFIG_UNLAZY_AUDIO ++ unlazy_audio(tsk); ++#else ++ preempt_disable(); ++ if (last_task_used_audio == tsk) ++ save_audio(tsk); ++ preempt_enable(); ++#endif ++ return copy_to_user(uauregs, &tsk->thread.audio, ++ sizeof(struct audio_struct)) ? -EFAULT : 0; ++ } else { ++ /* First time Audio user. */ ++ memset(uauregs, 0, sizeof(struct audio_struct)); ++ return 0; ++ } ++ ++#else ++ return -EFAULT; ++#endif ++} ++ ++/* ++ * Set the child Audio state. ++ */ ++static int ptrace_setauregs(struct task_struct *tsk, void __user * uauregs) ++{ ++#ifdef CONFIG_AUDIO ++ int ret; ++ ++#ifdef CONFIG_UNLAZY_AUDIO ++ clear_audio(task_pt_regs(tsk)); ++#else ++ if (last_task_used_audio == tsk) ++ clear_audio(task_pt_regs(tsk)); ++#endif ++ ret = ++ copy_from_user(&tsk->thread.audio, uauregs, ++ sizeof(struct audio_struct)) ? -EFAULT : 0; ++ ++ if (!ret && !test_tsk_thread_flag(tsk, TIF_USEDAUDIO)) { ++ /* First time Audio user. */ ++ set_tsk_thread_flag(tsk, TIF_USEDAUDIO); ++ } ++ ++ return ret; ++#else ++ return -EFAULT; ++#endif ++} ++ ++/* do_ptrace() ++ * ++ * Provide ptrace defined service. ++ */ ++long arch_ptrace(struct task_struct *child, long request, unsigned long addr, ++ unsigned long data) ++{ ++ int ret; ++ ++ switch (request) { ++ case PTRACE_PEEKUSR: ++ ret = ++ ptrace_read_user(child, addr, (unsigned long __user *)data); ++ break; ++ ++ case PTRACE_POKEUSR: ++ ret = ptrace_write_user(child, addr, data); ++ break; ++ ++ case PTRACE_GETREGS: ++ ret = ptrace_getregs(child, (void __user *)data); ++ break; ++ ++ case PTRACE_SETREGS: ++ ret = ptrace_setregs(child, (void __user *)data); ++ break; ++ ++ case PTRACE_GETFPREGS: ++ ret = ptrace_getfpregs(child, (void __user *)data); ++ break; ++ ++ case PTRACE_SETAUREGS: ++ ret = ptrace_setauregs(child, (void __user *)data); ++ break; ++ ++ case PTRACE_GETAUREGS: ++ ret = ptrace_getauregs(child, (void __user *)data); ++ break; ++ ++ case PTRACE_SETFPREGS: ++ ret = ptrace_setfpregs(child, (void __user *)data); ++ break; ++/* ++ case PTRACE_GET_THREAD_AREA: ++ ret = put_user(task_thread_info(child)->tp_value, ++ (unsigned long __user *) data); ++ break; ++*/ ++ ++ default: ++ ret = ptrace_request(child, request, addr, data); ++ break; ++ } ++ ++ return ret; ++} ++ ++void user_enable_single_step(struct task_struct *child) ++{ ++ struct pt_regs *regs; ++ regs = task_pt_regs(child); ++#ifdef CONFIG_HSS ++ regs->NDS32_ipsw |= PSW_mskHSS; ++#else ++ ptrace_set_swbk(child); ++#endif ++ set_tsk_thread_flag(child, TIF_SINGLESTEP); ++} ++ ++void user_disable_single_step(struct task_struct *child) ++{ ++ struct pt_regs *regs; ++ regs = task_pt_regs(child); ++#ifdef CONFIG_HSS ++ regs->NDS32_ipsw &= ~PSW_mskHSS; ++#else ++ ptrace_cancel_swbk(child); ++#endif ++ clear_tsk_thread_flag(child, TIF_SINGLESTEP); ++} ++ ++/* sys_trace() ++ * ++ * syscall trace handler. ++ */ ++static inline void do_syscall_trace(void) ++{ ++ if (!test_thread_flag(TIF_SYSCALL_TRACE)) ++ return; ++ if (!(current->ptrace & PT_PTRACED)) ++ return; ++ ++ /* the 0x80 provides a way for the tracing parent to distinguish ++ between a syscall stop and SIGTRAP delivery */ ++ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ++ ? 0x80 : 0)); ++ /* ++ * this isn't the same as continuing with a signal, but it will do ++ * for normal use. strace only continues with a signal if the ++ * stopping signal is not SIGTRAP. -brl ++ */ ++ if (current->exit_code) { ++ send_sig(current->exit_code, current, 1); ++ current->exit_code = 0; ++ } ++} ++ ++asmlinkage int syscall_trace_enter(int syscall, struct pt_regs *regs) ++{ ++ int orig_r0; ++ ++ orig_r0 = regs->NDS32_ORIG_r0; ++ regs->NDS32_ORIG_r0 = syscall; ++ do_syscall_trace(); ++ syscall = regs->NDS32_ORIG_r0; ++ regs->NDS32_ORIG_r0 = orig_r0; ++ return syscall & 0xfff; ++} ++ ++asmlinkage void syscall_trace_leave(struct pt_regs *regs) ++{ ++ do_syscall_trace(); ++ ++ /* synthsize a single-step */ ++#if !defined(CONFIG_HSS) ++ /* for SWBK, break should be remove */ ++ ptrace_cancel_swbk(current); ++#endif ++ if (test_thread_flag(TIF_SINGLESTEP)) { ++ printk(KERN_INFO "synthsize trap (tf=0x%0x\n", ++ (unsigned int)current_thread_info()->flags); ++ send_sigtrap(current, regs, 0, TRAP_BRKPT); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/ptrace.h linux-3.4.110/arch/nds32/kernel/ptrace.h +--- linux-3.4.110.orig/arch/nds32/kernel/ptrace.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/ptrace.h 2016-04-07 10:20:50.946081179 +0200 +@@ -0,0 +1,48 @@ ++/* ++ * linux/arch/nds32/kernel/ptrace.h ++ */ ++/* Copyright (C) 2000-2003 Russell King ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++/* ============================================================================ ++ * Copyright (C) 2007 Andes Technology Corporation ++ * This file is part of Linux and should be licensed under the GPL. ++ * See the file COPYING for conditions for redistribution. ++ * ++ * Abstract: ++ * ++ * This program is for Andes NDS32 architecture. ++ * ++ * Revision History: ++ * ++ * Jul.31.2007 Initial ported by Tom, Shawn, and Steven, ++ * revised by Harry. ++ * Current implmentation is based on Andes Instruction ++ * Set Architecture Specification (AS-0001-0001) ++ * version:3.7 date:7-20-2007. ++ * It is original taken from ARM, then fit to NDS32. ++ * ++ * Note: ++ * ++ * Current layout: 0-31 GR, 32-34 SPR, 35-... SR, index start from zero. ++ * ++ * +----------+-----+--------------+ ++ * | GR | SPR | SR | ++ * +-------------------------------+ ++ * 0 32 35 ... ++ * ++ * ============================================================================ ++ */ ++#ifndef __KERNEL_PTRACE_H__ ++#define __KERNEL_PTRACE_H__ ++ ++extern void ptrace_cancel_swbk(struct task_struct *); ++extern void ptrace_set_swbk(struct task_struct *); ++extern void ptrace_break(struct task_struct *, struct pt_regs *); ++extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, ++ int error_code, int si_code); ++ ++#endif // __KERNEL_PTRACE_H__ +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/relocate_kernel.S linux-3.4.110/arch/nds32/kernel/relocate_kernel.S +--- linux-3.4.110.orig/arch/nds32/kernel/relocate_kernel.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/relocate_kernel.S 2016-04-07 10:20:50.946081179 +0200 +@@ -0,0 +1,89 @@ ++/* ++ * relocate_kernel.S - put the kernel image in place to boot ++ */ ++ ++#include ++ ++ .globl relocate_new_kernel ++relocate_new_kernel: ++ la $r0, kexec_indirection_page ++ slli $r0, $r0, 2 ++ srli $r0, $r0, 2 ++ lwi $r0, [$r0] ++ la $r1, kexec_start_address ++ slli $r1, $r1, 2 ++ srli $r1, $r1, 2 ++ lwi $r1, [$r1] ++ ++0: /* top, read another word for the indirection page */ ++ lwi.bi $r3, [$r0], #4 ++ ++ /* Is it a destination page. Put destination address to r4 */ ++ andi $p0, $r3, #1 ++ beqz $p0, 1f ++ li $p0, ~#1 ++ and $r4, $r3, $p0 ++ b 0b ++1: ++ /* Is it an indirection page */ ++ andi $p0, $r3, #2 ++ beqz $p0, 1f ++ li $p0, ~#2 ++ and $r0, $r3, $p0 ++ b 0b ++1: ++ /* are we done ? */ ++ andi $p0, $r3, #4 ++ beqz $p0, 1f ++ b 2f ++1: ++ /* is it source ? */ ++ andi $p0, $r3, #8 ++ beqz $p0, 0b ++ li $p0, ~#8 ++ and $r3, $r3, $p0 ++ li $r6, #(1024/16) /* 16 words per loop */ ++9: ++ lmw.bim $r7, [$r3], $r22 ++ smw.bim $r7, [$r4], $r22 ++ addi $r6, $r6, -1 ++ bnez $r6, 9b ++ b 0b ++2: ++ /* Jump to relocated kernel */ ++ move $lp, $r1 ++ li $r0, 0 ++ la $r1, kexec_mach_type ++ slli $r1, $r1, 2 ++ srli $r1, $r1, 2 ++ lwi $r1, [$r1] ++ la $r2, kexec_boot_atags ++ slli $r2, $r2, 2 ++ srli $r2, $r2, 2 ++ lwi $r2, [$r2] ++ ret ++ ++ .globl kexec_start_address ++kexec_start_address: ++ .long 0x0 ++ ++ .globl kexec_indirection_page ++kexec_indirection_page: ++ .long 0x0 ++ ++ .globl kexec_mach_type ++kexec_mach_type: ++ .long 0x0 ++ ++ /* phy addr of the atags for the new kernel */ ++ .globl kexec_boot_atags ++kexec_boot_atags: ++ .long 0x0 ++ ++relocate_new_kernel_end: ++ ++ .globl relocate_new_kernel_size ++relocate_new_kernel_size: ++ .long relocate_new_kernel_end - relocate_new_kernel ++ ++ +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/setup.c linux-3.4.110/arch/nds32/kernel/setup.c +--- linux-3.4.110.orig/arch/nds32/kernel/setup.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/setup.c 2016-04-07 10:20:50.946081179 +0200 +@@ -0,0 +1,1049 @@ ++/* ++ * linux/arch/nds32/kernel/setup.c ++ * ++ * Copyright (C) 1995-2001 Russell King ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++/* ============================================================================ ++ * Copyright (C) 2007 Andes Technology Corporation ++ * This file is part of Linux and should be licensed under the GPL. ++ * See the file COPYING for conditions for redistribution. ++ * ++ * Abstract: ++ * ++ * This program is for Andes NDS32 architecture. ++ * ++ * Revision History: ++ * ++ * Jul.05.2007 Initial ported by Tom, revised and patched for KGDB ++ * by Harry. ++ * Aug.26.2008 Some reworks on CPU info output for SMP. ++ * ++ * Note: ++ * ++ * ============================================================================ ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifndef MEM_SIZE ++#define MEM_SIZE CONFIG_SDRAM_SIZE ++#endif ++ ++#ifndef RAMDISK_SIZE ++#define RAMDISK_SIZE CONFIG_BLK_DEV_RAM_SIZE ++#endif ++ ++extern void (*init_arch_irq) (void); ++ ++extern void paging_init(struct machine_desc *desc); ++extern void reboot_setup(char *str); ++extern int root_mountflags; ++extern unsigned long _stext, _text, _etext, _sdata, _edata, _end; ++extern unsigned int ag101_cpufreq_get(unsigned int dummy); ++ ++unsigned long cpu_id, cpu_rev, cpu_cfgid; ++char *endianness = NULL; ++ ++unsigned int __machine_arch_type; ++EXPORT_SYMBOL(__machine_arch_type); ++ ++unsigned int elf_hwcap; ++EXPORT_SYMBOL(elf_hwcap); ++ ++unsigned char aux_device_present; ++ ++char elf_platform[ELF_PLATFORM_SIZE]; ++EXPORT_SYMBOL(elf_platform); ++ ++unsigned long phys_initrd_start __initdata = 0; ++unsigned long phys_initrd_size __initdata = 0; ++ ++static struct meminfo meminfo __initdata = { 0, }; ++ ++static const char *machine_name; ++static struct proc_info_item proc_info; ++static char command_line[COMMAND_LINE_SIZE]; ++ ++struct machine_desc *machine_desc __initdata; ++ ++static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; ++ ++DEFINE_PER_CPU(struct cpuinfo_nds32, cpu_data); ++ ++/* ++ * Standard memory resources ++ */ ++static struct resource mem_res[] = { ++ { ++ .name = "Video RAM", ++ .start = 0, ++ .end = 0, ++ .flags = IORESOURCE_MEM}, ++ { ++ .name = "Kernel text", ++ .start = 0, ++ .end = 0, ++ .flags = IORESOURCE_MEM}, ++ { ++ .name = "Kernel data", ++ .start = 0, ++ .end = 0, ++ .flags = IORESOURCE_MEM} ++}; ++ ++#define video_ram mem_res[0] ++#define kernel_code mem_res[1] ++#define kernel_data mem_res[2] ++ ++static struct resource io_res[] = { ++ { ++ .name = "reserved", ++ .start = 0x3bc, ++ .end = 0x3be, ++ .flags = IORESOURCE_IO | IORESOURCE_BUSY}, ++ { ++ .name = "reserved", ++ .start = 0x378, ++ .end = 0x37f, ++ .flags = IORESOURCE_IO | IORESOURCE_BUSY}, ++ { ++ .name = "reserved", ++ .start = 0x278, ++ .end = 0x27f, ++ .flags = IORESOURCE_IO | IORESOURCE_BUSY} ++}; ++ ++#define lp0 io_res[0] ++#define lp1 io_res[1] ++#define lp2 io_res[2] ++ ++/* ++ * The following string table, must sync with HWCAP_xx bitmask, ++ * which is defined in ++ */ ++static const char *hwcap_str[] = { ++ "mfusr_pc", ++ "perf1", ++ "perf2", ++ "fpu", ++ "audio", ++ "16b", ++ "string", ++ "reduced_regs", ++ "video", ++ "encrypt", ++ "edm", ++ "lmdma", ++ "pfm", ++ "hsmp", ++ "trace", ++ "div", ++ "mac", ++ "l2c", ++ "dx_regs", ++ "v2", ++ NULL, ++}; ++ ++static void __init squash_mem_tags(struct tag *tag) ++{ ++ for (; tag->hdr.size; tag = tag_next(tag)) ++ if (tag->hdr.tag == ATAG_MEM) ++ tag->hdr.tag = ATAG_NONE; ++} ++ ++struct cache_info L1_cache_info[2]; ++ ++static void __init dump_cpu_info(int cpu) ++{ ++ int i = 0, aliasing_num; ++#ifdef CONFIG_CACHE_L2 ++ unsigned long l2set, l2way, l2clsz; ++#endif ++ printk("CPU%d Features: ", cpu); ++ ++ for (i = 0; hwcap_str[i]; i++) { ++ if (elf_hwcap & (1 << i)) ++ printk("%s ", hwcap_str[i]); ++ } ++ ++ printk("\n"); ++ ++ L1_cache_info[ICACHE].cache_type = ICACHE; ++ L1_cache_info[ICACHE].ways = CACHE_WAY(ICACHE); ++ L1_cache_info[ICACHE].way_bits = ilog2(CACHE_WAY(ICACHE)); ++ L1_cache_info[ICACHE].line_size = CACHE_LINE_SIZE(ICACHE); ++ L1_cache_info[ICACHE].line_bits = ilog2(CACHE_LINE_SIZE(ICACHE)); ++ L1_cache_info[ICACHE].sets = CACHE_SET(ICACHE); ++ L1_cache_info[ICACHE].set_bits = ilog2(CACHE_SET(ICACHE)); ++ L1_cache_info[ICACHE].size = ++ CACHE_SET(ICACHE) * CACHE_WAY(ICACHE) * CACHE_LINE_SIZE(ICACHE) / ++ 1024; ++ printk("L1I:%dKB/%dS/%dW/%dB\n", L1_cache_info[ICACHE].size, ++ L1_cache_info[ICACHE].sets, L1_cache_info[ICACHE].ways, ++ L1_cache_info[ICACHE].line_size); ++ aliasing_num = ++ L1_cache_info[ICACHE].size * 1024 / PAGE_SIZE / ++ L1_cache_info[ICACHE].ways; ++ if (aliasing_num & 1 && aliasing_num != 1) ++ printk ++ ("%s: not alising:%d, it should be multiple of 2 if it ia aliasing cache.\n", ++ __func__, aliasing_num); ++ L1_cache_info[ICACHE].aliasing_num = aliasing_num; ++ L1_cache_info[ICACHE].aliasing_mask = (aliasing_num - 1) << PAGE_SHIFT; ++ L1_cache_info[ICACHE].not_aliasing_mask = ++ ~L1_cache_info[ICACHE].aliasing_mask; ++ L1_cache_info[DCACHE].cache_type = DCACHE; ++ L1_cache_info[DCACHE].ways = CACHE_WAY(DCACHE); ++ L1_cache_info[DCACHE].way_bits = ilog2(CACHE_WAY(DCACHE)); ++ L1_cache_info[DCACHE].line_size = CACHE_LINE_SIZE(DCACHE); ++ L1_cache_info[DCACHE].line_bits = ilog2(CACHE_LINE_SIZE(DCACHE)); ++ L1_cache_info[DCACHE].sets = CACHE_SET(DCACHE); ++ L1_cache_info[DCACHE].set_bits = ilog2(CACHE_SET(DCACHE)); ++ L1_cache_info[DCACHE].size = ++ CACHE_SET(DCACHE) * CACHE_WAY(DCACHE) * CACHE_LINE_SIZE(DCACHE) / ++ 1024; ++ printk("L1D:%dKB/%dS/%dW/%dB\n", L1_cache_info[DCACHE].size, ++ L1_cache_info[DCACHE].sets, L1_cache_info[DCACHE].ways, ++ L1_cache_info[DCACHE].line_size); ++ aliasing_num = ++ L1_cache_info[DCACHE].size * 1024 / PAGE_SIZE / ++ L1_cache_info[DCACHE].ways; ++#ifdef CONFIG_HIGHMEM ++ if (aliasing_num > 1 && CONFIG_HIGHMEM) ++ WARN(1, ++ "%s: HIGHMEM is not supported for alising VIPT cache. aliasing_num:%d\n", ++ __func__, aliasing_num); ++#else ++ if (aliasing_num & 1 && aliasing_num != 1) ++ printk ++ ("%s: not alising:%d, it should be multiple of 2 if it ia aliasing cache.\n", ++ __func__, aliasing_num); ++#endif ++ L1_cache_info[DCACHE].aliasing_num = aliasing_num; ++ L1_cache_info[DCACHE].aliasing_mask = (aliasing_num - 1) << PAGE_SHIFT; ++ L1_cache_info[DCACHE].not_aliasing_mask = ++ ~L1_cache_info[DCACHE].aliasing_mask; ++#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ++ printk("L1 D-Cache is WRITE-THROUGH\n"); ++#else ++ printk("L1 D-Cache is WRITE-BACK\n"); ++#endif ++ ++ ++#ifdef CONFIG_CACHE_L2 ++ /* Here translation is on but I/O address is not mapped yet. */ ++ SET_PSW(GET_PSW() & ~PSW_mskDT); ++ DSB(); ++ l2set = ++ 64 << ((inl(L2CC_PA_BASE + L2_CA_CONF_OFF) & L2_CA_CONF_mskL2SET) >> ++ L2_CA_CONF_offL2SET); ++ l2way = ++ 1 + ++ ((inl(L2CC_PA_BASE + L2_CA_CONF_OFF) & L2_CA_CONF_mskL2WAY) >> ++ L2_CA_CONF_offL2WAY); ++ l2clsz = ++ 4 << ((inl(L2CC_PA_BASE + L2_CA_CONF_OFF) & L2_CA_CONF_mskL2CLSZ) >> ++ L2_CA_CONF_offL2CLSZ); ++ SET_PSW(GET_PSW() | PSW_mskDT); ++ DSB(); ++ ++ printk("L2:%luKB/%luS/%luW/%luB\n", ++ l2set * l2way * l2clsz / 1024, l2set, l2way, l2clsz); ++#endif ++#ifdef CONFIG_FPU ++ /* Disable fpu and enable when it is used. */ ++ disable_fpu(); ++ printk("FPU is able to use.\n"); ++#endif ++} ++ ++static void __init setup_processor(void) ++{ ++ unsigned long tmp = 0; ++ struct proc_info_list *list; ++ extern struct proc_info_list __proc_info_begin, __proc_info_end; ++ ++ register unsigned coreid = GET_CPU_VER(); ++ for (list = &__proc_info_begin; list < &__proc_info_end; list++) ++// if (list->cpu_val == (GET_CPU_VER() & CPU_VER_mskCPUID)) ++ if (list->cpu_val == (coreid & list->cpu_mask)) ++ break; ++ /* ++ * If the architecture type is not recognised, then we ++ * can co nothing... ++ */ ++ if (list >= &__proc_info_end) { ++ printk ++ ("Processor configuration botched (CPU_VER 0x%lx), unable to continue.\n", ++ GET_CPU_VER()); ++ while (1) ; ++ } ++ ++ proc_info = *list->info; ++ ++ cpu_dcache_inval_all(); // XXX head.S turn $$ on, need change. ++ cpu_icache_inval_all(); ++ DSB(); ++ ISB(); ++ ++ cpu_id = GET_CPU_ID(); ++ cpu_rev = GET_CPU_REV(); ++ cpu_cfgid = GET_CPU_CFGID(); ++ ++ printk("CPU: %s, %s %s, CPU_VER 0x%08lx(id %lu, rev %lu, cfg %lu)\n", ++ list->arch_name, ++ proc_info.manufacturer, proc_info.cpu_name, ++ GET_CPU_VER(), cpu_id, cpu_rev, cpu_cfgid); ++ ++ elf_hwcap |= HWCAP_MFUSR_PC; ++ ++ if (((GET_MSC_CFG() & MSC_CFG_mskBASEV) >> MSC_CFG_offBASEV) == 0) { ++ if (CPU_IS_N1213_43U1HA0() || CPU_IS_N1213_43U1HB0()) ++ elf_hwcap &= ~HWCAP_MFUSR_PC; ++ ++ if (GET_MSC_CFG() & MSC_CFG_mskDIV) ++ elf_hwcap |= HWCAP_DIV; ++ ++ if ((GET_MSC_CFG() & MSC_CFG_mskMAC) ++ || (cpu_id == 12 && cpu_rev < 4)) ++ elf_hwcap |= HWCAP_MAC; ++ } else { ++ elf_hwcap |= HWCAP_V2; ++ elf_hwcap |= HWCAP_DIV; ++ elf_hwcap |= HWCAP_MAC; ++ } ++ ++ if (cpu_cfgid & 0x0001) ++ elf_hwcap |= HWCAP_EXT; ++ ++ if (cpu_cfgid & 0x0002) ++ elf_hwcap |= HWCAP_BASE16; ++ ++ if (cpu_cfgid & 0x0004) ++ elf_hwcap |= HWCAP_EXT2; ++ ++ if (cpu_cfgid & 0x0008) ++ elf_hwcap |= HWCAP_FPU; ++ ++ if (cpu_cfgid & 0x0010) ++ elf_hwcap |= HWCAP_STRING; ++ ++ if (GET_MMU_CFG() & MMU_CFG_mskDE) ++ endianness = "MSB"; ++ else ++ endianness = "LSB"; ++ ++ if (GET_MSC_CFG() & MSC_CFG_mskEDM) ++ elf_hwcap |= HWCAP_EDM; ++ ++ if (GET_MSC_CFG() & MSC_CFG_mskLMDMA) ++ elf_hwcap |= HWCAP_LMDMA; ++ ++ if (GET_MSC_CFG() & MSC_CFG_mskPFM) ++ elf_hwcap |= HWCAP_PFM; ++ ++ if (GET_MSC_CFG() & MSC_CFG_mskHSMP) ++ elf_hwcap |= HWCAP_HSMP; ++ ++ if (GET_MSC_CFG() & MSC_CFG_mskTRACE) ++ elf_hwcap |= HWCAP_TRACE; ++ ++ if (GET_MSC_CFG() & MSC_CFG_mskAUDIO) ++ elf_hwcap |= HWCAP_AUDIO; ++ ++ if (GET_MSC_CFG() & MSC_CFG_mskL2C) ++ elf_hwcap |= HWCAP_L2C; ++ ++#ifdef CONFIG_ANDES_PAGE_SIZE_4KB ++ if (CPU_IS_N1213_43U1HA0()) { ++ /* ++ * Downsize dcache to bypass N1213-43u1h inconsistent ++ * use of PA and VA in fill-buffer logic issue. ++ */ ++ ++ if ((CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE)) > 4096) ++ tmp |= 0x02 << SDZ_CTL_offDCDZ; ++ ++ if ((CACHE_SET(ICACHE) * CACHE_LINE_SIZE(ICACHE)) > 4096) ++ tmp |= 0x02 << SDZ_CTL_offICDZ; ++ ++ SET_SDZ_CTL(tmp); ++ ISB(); ++ printk("CPU%i enabled dcache downsizing to half set/4-way.\n", ++ smp_processor_id()); ++ } ++#endif /* CONFIG_ANDES_PAGE_SIZE_4KB */ ++ ++#ifdef CONFIG_CACHE_L2 ++#ifdef CONFIG_PLAT_AG102 ++ SET_HSMP_SADDR((CPU_MEM_PA_BASE & HSMP_SADDR_mskSADDR) | ++ (0xC00 << HSMP_SADDR_offRANGE) | HSMP_SADDR_mskEN); ++#endif ++ ++ /* Here translation is on but I/O address is not mapped yet. */ ++ SET_PSW(GET_PSW() & ~PSW_mskDT); ++ DSB(); ++ ++ /* This is the time when we enable L2$. */ ++ /* All masters can't write another master register */ ++ tmp = inl(L2CC_PA_BASE + L2CC_PROT_OFF); ++ tmp &= ~L2CC_PROT_mskMRWEN; ++ outl(tmp, L2CC_PA_BASE + L2CC_PROT_OFF); ++ ++ /* All masters share the whole cache memory space */ ++ tmp = inl(L2CC_PA_BASE + L2CC_SETUP_OFF); ++ tmp &= ~L2CC_SETUP_mskPART; ++ outl(tmp, L2CC_PA_BASE + L2CC_SETUP_OFF); ++ ++ /* each master access self master, does not add master base */ ++ tmp = inl(L2CC_PA_BASE + L2CC_CTRL_OFF); ++ tmp |= L2CC_CTRL_mskEN; ++ outl(tmp, L2CC_PA_BASE + L2CC_CTRL_OFF); ++ ++ SET_PSW(GET_PSW() | PSW_mskDT); ++ ISB(); ++#endif ++ tmp = GET_CACHE_CTL(); ++#ifndef CONFIG_CPU_DCACHE_DISABLE ++ tmp |= CACHE_CTL_mskDC_EN; ++#endif ++ ++#ifndef CONFIG_CPU_ICACHE_DISABLE ++ tmp |= CACHE_CTL_mskIC_EN; ++#endif ++ SET_CACHE_CTL(tmp); ++ DSB(); ++ ISB(); ++ ++ sprintf(elf_platform, "%s %s", list->elf_name, endianness); ++ ++ dump_cpu_info(smp_processor_id()); ++} ++ ++static struct machine_desc *__init setup_machine(unsigned int nr) ++{ ++ struct machine_desc *list; ++ ++ extern struct machine_desc __arch_info_begin, __arch_info_end; ++ /* ++ * locate machine in the list of supported machines. ++ */ ++ for (list = &__arch_info_begin; list < &__arch_info_end; list++) ++ if (list->nr == nr) ++ break; ++ /* ++ * If the architecture type is not recognised, then we ++ * can co nothing... ++ */ ++ if (list >= &__arch_info_end) { ++ printk("Architecture configuration botched (nr 0x%x), unable " ++ "to continue.\n", nr); ++ while (1) ; ++ } ++ ++ printk(KERN_INFO "Machine: %s\n", list->name); ++ ++ return list; ++} ++ ++static void __init early_initrd(char **p) ++{ ++ unsigned long start, size; ++ ++ start = memparse(*p, p); ++ if (**p == ',') { ++ size = memparse((*p) + 1, p); ++ ++ phys_initrd_start = start; //pa ++ phys_initrd_size = size; ++ } ++ printk(KERN_INFO ++ "phys_initrd_start at 0x%08lx, phys_initrd_size:0x%08lx\n", ++ phys_initrd_start, phys_initrd_size); ++ //memblock_reserve(phys_initrd_start, phys_initrd_size); ++} ++ ++__early_param("initrd=", early_initrd); ++ ++/* ++ * Pick out the memory size. We look for mem=size@start, ++ * where start and size are "size[KkMm]" ++ */ ++static void __init early_mem(char **p) ++{ ++ static int usermem __initdata = 0; ++ unsigned long size, start; ++ ++ /* ++ * If the user specifies memory size, we ++ * blow away any automatically generated ++ * size. ++ */ ++ if (usermem == 0) { ++ usermem = 1; ++ meminfo.nr_banks = 0; ++ } ++ ++ start = PHYS_OFFSET; //Tom 0x0 ++ size = memparse(*p, p); ++ if (**p == '@') ++ start = memparse(*p + 1, p); ++ ++ meminfo.bank[meminfo.nr_banks].start = start; ++ meminfo.bank[meminfo.nr_banks].size = size; ++ meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start); ++ memblock_add_node(meminfo.bank[meminfo.nr_banks].start, ++ meminfo.bank[meminfo.nr_banks].size, 0); ++ meminfo.nr_banks += 1; ++ ++} ++ ++__early_param("mem=", early_mem); ++ ++/* ++ * Initial parsing of the command line. ++ */ ++void __init parse_cmdline(char **cmdline_p, char *from) ++{ ++ char c = ' ', *to = command_line; ++ int len = 0; ++ ++ for (;;) { ++ if (c == ' ') { ++ extern struct early_params __early_begin, __early_end; ++ struct early_params *p; ++ ++ for (p = &__early_begin; p < &__early_end; p++) { ++ int len = strlen(p->arg); ++ ++ if (memcmp(from, p->arg, len) == 0) { ++ if (to != command_line) ++ to -= 1; ++ from += len; ++ p->fn(&from); ++ ++ while (*from != ' ' && *from != '\0') ++ from++; ++ break; ++ } ++ } ++ } ++ c = *from++; ++ if (!c) ++ break; ++ if (COMMAND_LINE_SIZE <= ++len) ++ break; ++ *to++ = c; ++ } ++ *to = '\0'; ++ *cmdline_p = command_line; ++} ++ ++static void __init ++setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) ++{ ++#ifdef CONFIG_BLK_DEV_RAM ++ extern int rd_size, rd_image_start, rd_prompt, rd_doload; ++ ++ rd_image_start = image_start; ++ rd_prompt = prompt; ++ rd_doload = doload; ++ ++ if (rd_sz) ++ rd_size = rd_sz; ++#endif ++} ++ ++static void __init ++request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc) ++{ ++ struct resource *res; ++ int i; ++ ++ kernel_code.start = virt_to_phys(&_text); ++ kernel_code.end = virt_to_phys(&_etext - 1); ++ kernel_data.start = virt_to_phys(&_sdata); ++ kernel_data.end = virt_to_phys(&_end - 1); ++ ++ for (i = 0; i < mi->nr_banks; i++) { ++ unsigned long virt_start, virt_end; ++ ++ if (mi->bank[i].size == 0) ++ continue; ++ ++ virt_start = __phys_to_virt(mi->bank[i].start); ++ virt_end = virt_start + mi->bank[i].size - 1; ++ ++ res = alloc_bootmem_low(sizeof(*res)); ++ res->name = "System RAM"; ++ res->start = __virt_to_phys(virt_start); ++ res->end = __virt_to_phys(virt_end); ++ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; ++ ++ request_resource(&iomem_resource, res); ++ ++ if (kernel_code.start >= res->start && ++ kernel_code.end <= res->end) ++ request_resource(res, &kernel_code); ++ if (kernel_data.start >= res->start && ++ kernel_data.end <= res->end) ++ request_resource(res, &kernel_data); ++ } ++ ++ if (mdesc->video_start) { ++ video_ram.start = mdesc->video_start; ++ video_ram.end = mdesc->video_end; ++ request_resource(&iomem_resource, &video_ram); ++ } ++ ++ /* ++ * Some machines don't have the possibility of ever ++ * possessing lp0, lp1 or lp2 ++ */ ++ if (mdesc->reserve_lp0) ++ request_resource(&ioport_resource, &lp0); ++ if (mdesc->reserve_lp1) ++ request_resource(&ioport_resource, &lp1); ++ if (mdesc->reserve_lp2) ++ request_resource(&ioport_resource, &lp2); ++} ++ ++/* ++ * Tag parsing. ++ * ++ * This is the new way of passing data to the kernel at boot time. Rather ++ * than passing a fixed inflexible structure to the kernel, we pass a list ++ * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE ++ * tag for the list to be recognised (to distinguish the tagged list from ++ * a param_struct). The list is terminated with a zero-length tag (this tag ++ * is not parsed in any way). ++ */ ++//flag bit 0 = read-only ++static int __init parse_tag_core(const struct tag *tag) ++{ ++ if (tag->hdr.size > 2) { ++ if ((tag->u.core.flags & 1) == 0) ++ root_mountflags &= ~MS_RDONLY; ++ ROOT_DEV = old_decode_dev(tag->u.core.rootdev); ++ } ++ return 0; ++} ++ ++__tagtable(ATAG_CORE, parse_tag_core); ++ ++static int __init parse_tag_mem32(const struct tag *tag) ++{ ++ if (meminfo.nr_banks >= NR_BANKS) { ++ printk(KERN_WARNING ++ "Ignoring memory bank 0x%08x size %dKB\n", ++ tag->u.mem.start, tag->u.mem.size / 1024); ++ return -EINVAL; ++ } ++ meminfo.bank[meminfo.nr_banks].start = tag->u.mem.start; ++ meminfo.bank[meminfo.nr_banks].size = tag->u.mem.size; ++ memblock_add_node(meminfo.bank[meminfo.nr_banks].start, ++ meminfo.bank[meminfo.nr_banks].size, 0); ++ meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(tag->u.mem.start); ++ meminfo.nr_banks += 1; ++ ++ return 0; ++} ++ ++__tagtable(ATAG_MEM, parse_tag_mem32); ++ ++#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) ++struct screen_info screen_info = { ++ .orig_video_lines = 30, ++ .orig_video_cols = 80, ++ .orig_video_mode = 0, ++ .orig_video_ega_bx = 0, ++ .orig_video_isVGA = 1, ++ .orig_video_points = 8 ++}; ++ ++static int __init parse_tag_videotext(const struct tag *tag) ++{ ++ screen_info.orig_x = tag->u.videotext.x; ++ screen_info.orig_y = tag->u.videotext.y; ++ screen_info.orig_video_page = tag->u.videotext.video_page; ++ screen_info.orig_video_mode = tag->u.videotext.video_mode; ++ screen_info.orig_video_cols = tag->u.videotext.video_cols; ++ screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; ++ screen_info.orig_video_lines = tag->u.videotext.video_lines; ++ screen_info.orig_video_isVGA = tag->u.videotext.video_isvga; ++ screen_info.orig_video_points = tag->u.videotext.video_points; ++ return 0; ++} ++ ++__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); ++#endif ++ ++static int __init parse_tag_ramdisk(const struct tag *tag) ++{ ++ setup_ramdisk((tag->u.ramdisk.flags & 1) == 0, ++ (tag->u.ramdisk.flags & 2) == 0, ++ tag->u.ramdisk.start, tag->u.ramdisk.size); ++ return 0; ++} ++ ++__tagtable(ATAG_RAMDISK, parse_tag_ramdisk); ++ ++static int __init parse_tag_initrd(const struct tag *tag) ++{ ++ printk(KERN_WARNING "ATAG_INITRD is deprecated; " ++ "please update your bootloader.\n"); ++ phys_initrd_start = __virt_to_phys(tag->u.initrd.start); ++ phys_initrd_size = tag->u.initrd.size; ++ return 0; ++} ++ ++__tagtable(ATAG_INITRD, parse_tag_initrd); ++ ++static int __init parse_tag_initrd2(const struct tag *tag) ++{ ++ phys_initrd_start = tag->u.initrd.start; ++ phys_initrd_size = tag->u.initrd.size; ++ return 0; ++} ++ ++__tagtable(ATAG_INITRD2, parse_tag_initrd2); ++ ++static int __init parse_tag_revision(const struct tag *tag) ++{ ++ return 0; ++} ++ ++__tagtable(ATAG_REVISION, parse_tag_revision); ++ ++static int __init parse_tag_cmdline(const struct tag *tag) ++{ ++ strlcpy(default_command_line, tag->u.cmdline.cmdline, ++ COMMAND_LINE_SIZE); ++ return 0; ++} ++ ++__tagtable(ATAG_CMDLINE, parse_tag_cmdline); ++ ++/* ++ * Scan the tag table for this tag, and call its parse function. ++ * The tag table is built by the linker from all the __tagtable ++ * declarations. ++ */ ++static int __init parse_tag(const struct tag *tag) ++{ ++ extern struct tagtable __tagtable_begin, __tagtable_end; ++ struct tagtable *t; ++ ++ for (t = &__tagtable_begin; t < &__tagtable_end; t++) ++ if (tag->hdr.tag == t->tag) { ++ t->parse(tag); ++ break; ++ } ++ ++ return t < &__tagtable_end; ++} ++ ++/* ++ * Parse all tags in the list, checking both the global and architecture ++ * specific tag tables. ++ */ ++static void __init parse_tags(const struct tag *t) ++{ ++ for (; t->hdr.size; t = tag_next(t)) ++ if (!parse_tag(t)) ++ printk(KERN_WARNING ++ "Ignoring unrecognised tag 0x%08x\n", ++ t->hdr.tag); ++} ++ ++/* ++ * This holds our defaults. ++ */ ++static struct init_tags { ++ struct tag_header hdr1; ++ struct tag_core core; ++ struct tag_header hdr2; ++ struct tag_mem32 mem; ++ struct tag_header hdr3; ++} init_tags __initdata = { ++ {tag_size(tag_core), ATAG_CORE}, //hdr1 ++ {0, PAGE_SIZE, 0xff}, ++ {tag_size(tag_mem32), ATAG_MEM}, //hdr2 ++ {MEM_SIZE, PHYS_OFFSET}, ++ {0, ATAG_NONE} ++}; ++ ++static unsigned long __init setup_memory(void) ++{ ++ unsigned long bootmap_size; ++ unsigned long ram_start_pfn; ++ unsigned long free_ram_start_pfn; ++ phys_addr_t memory_start, memory_end; ++ struct memblock_region *region; ++ ++ memory_end = memory_start = 0; ++ ++ /* Find main memory where is the kernel */ ++ for_each_memblock(memory, region) { ++ memory_start = region->base; ++ memory_end = region->base + region->size; ++ printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, ++ memory_start, memory_end); ++ } ++ ++ if (!memory_end) { ++ panic("No memory!"); ++ } ++ ++ ram_start_pfn = PFN_UP(memblock_start_of_DRAM()); ++ /* free_ram_start_pfn is first page after kernel */ ++ free_ram_start_pfn = PFN_UP(__pa(&_end)); ++ max_pfn = PFN_DOWN(memblock_end_of_DRAM()); ++ ++ /* it could update max_pfn */ ++ if (max_pfn - ram_start_pfn <= MAXMEM_PFN) ++ max_low_pfn = max_pfn; ++ else { ++ max_low_pfn = MAXMEM_PFN + ram_start_pfn; ++#ifndef CONFIG_HIGHMEM ++ max_pfn = MAXMEM_PFN + ram_start_pfn; ++#endif ++ } ++ /* high_memory is related with VMALLOC */ ++ high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); ++ min_low_pfn = free_ram_start_pfn; ++ ++ /* ++ * initialize the boot-time allocator (with low memory only). ++ * ++ * This makes the memory from the end of the kernel to the end of ++ * RAM usable. ++ * init_bootmem sets the global values min_low_pfn, max_low_pfn. ++ */ ++ bootmap_size = init_bootmem_node(NODE_DATA(0), free_ram_start_pfn, ++ ram_start_pfn, max_low_pfn); ++ free_bootmem(PFN_PHYS(free_ram_start_pfn), ++ (max_low_pfn - free_ram_start_pfn) << PAGE_SHIFT); ++ reserve_bootmem(PFN_PHYS(free_ram_start_pfn), bootmap_size, ++ BOOTMEM_DEFAULT); ++ ++ for_each_memblock(reserved, region) { ++ if (region->size != 0) { ++ printk(KERN_INFO "Reserved - 0x%08x-0x%08x\n", ++ (u32) region->base, (u32) region->size); ++ reserve_bootmem(region->base, region->size, ++ BOOTMEM_DEFAULT); ++ } ++ } ++ return max_low_pfn; ++} ++ ++void __init setup_arch(char **cmdline_p) ++{ ++ struct tag *tags = (struct tag *)&init_tags; ++ struct machine_desc *mdesc; ++ char *from = default_command_line; ++ ++ setup_processor(); ++ mdesc = setup_machine(machine_arch_type); ++ machine_desc = mdesc; ++ machine_name = mdesc->name; ++ ++ if (mdesc->soft_reboot) ++ reboot_setup("s"); ++ ++ if (mdesc->param_offset) ++ tags = phys_to_virt(mdesc->param_offset); ++ ++ if (tags->hdr.tag != ATAG_CORE) ++ tags = (struct tag *)&init_tags; ++ ++ if (tags->hdr.tag == ATAG_CORE) { ++ if (meminfo.nr_banks != 0) ++ squash_mem_tags(tags); ++ parse_tags(tags); ++ } ++ ++ init_mm.start_code = (unsigned long)&_text; ++ init_mm.end_code = (unsigned long)&_etext; ++ init_mm.end_data = (unsigned long)&_edata; ++ init_mm.brk = (unsigned long)&_end; ++ ++ memcpy(boot_command_line, from, COMMAND_LINE_SIZE); ++ boot_command_line[COMMAND_LINE_SIZE - 1] = '\0'; ++ parse_cmdline(cmdline_p, from); ++ ++ /* use generic way to parse */ ++ parse_early_param(); ++ ++ /* setup bootmem allocator */ ++ setup_memory(); ++ ++ strlcpy(command_line, from, COMMAND_LINE_SIZE); ++ *cmdline_p = command_line; ++ ++ paging_init(mdesc); ++ request_standard_resources(&meminfo, mdesc); //- for test only ++ ++#ifdef CONFIG_SMP ++ smp_init_cpus(); ++#endif ++ ++ /* ++ * Set up various architecture-specific pointers ++ */ ++ init_arch_irq = mdesc->init_irq; ++ system_timer = mdesc->timer; ++ if (mdesc->init_machine) ++ mdesc->init_machine(); ++ ++#if defined(CONFIG_VT) ++#if defined(CONFIG_VGA_CONSOLE) ++ conswitchp = &vga_con; ++#elif defined(CONFIG_DUMMY_CONSOLE) ++ conswitchp = &dummy_con; //+ Tom: we will reach here ++#endif ++#endif ++ ++ early_trap_init(); ++} ++ ++/* ++ * cpu_init - initialise one CPU. ++ * ++ * cpu_init dumps the cache information, initialises SMP specific ++ * information. ++ */ ++ ++void __init cpu_init(void) ++{ ++ unsigned int cpu = smp_processor_id(), tmp = 0; ++ ++ if (cpu >= NR_CPUS) { ++ printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); ++ BUG(); ++ } ++ ++ if (system_state == SYSTEM_BOOTING) ++ dump_cpu_info(cpu); ++ ++ tmp = 1 << IVB_offESZ; ++#ifdef CONFIG_EVIC ++ tmp |= 1 << IVB_offEVIC; ++#endif ++ SET_IVB(tmp | IVB_BASE); ++ tmp = 0x10003; ++ SET_INT_MASK(tmp); ++ ISB(); ++} ++ ++static int __init topology_init(void) ++{ ++ int cpu; ++ ++ for_each_possible_cpu(cpu) ++ register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu); ++ ++ return 0; ++} ++ ++subsys_initcall(topology_init); ++ ++static int c_show(struct seq_file *m, void *v) ++{ ++ int i; ++ ++ seq_printf(m, "Processor\t: %s %s (id %lu, rev %lu, cfg %lu)\n", ++ proc_info.manufacturer, proc_info.cpu_name, ++ cpu_id, cpu_rev, cpu_cfgid); ++#if defined(CONFIG_AG101_CPU_FREQ_SCALING_MODE) || defined(CONFIG_AG101_CPU_FREQ_FCS) ++ seq_printf(m, "MHz\t\t: %u\n", ag101_cpufreq_get(1) / 1000); ++#endif ++ ++ seq_printf(m, "L1I\t\t: %luKB/%luS/%luW/%luB\n", ++ CACHE_SET(ICACHE) * CACHE_WAY(ICACHE) * ++ CACHE_LINE_SIZE(ICACHE) / 1024, CACHE_SET(ICACHE), ++ CACHE_WAY(ICACHE), CACHE_LINE_SIZE(ICACHE)); ++ ++ seq_printf(m, "L1D\t\t: %luKB/%luS/%luW/%luB\n", ++ CACHE_SET(DCACHE) * CACHE_WAY(DCACHE) * ++ CACHE_LINE_SIZE(DCACHE) / 1024, CACHE_SET(DCACHE), ++ CACHE_WAY(DCACHE), CACHE_LINE_SIZE(DCACHE)); ++ ++#if defined(CONFIG_SMP) ++ for_each_online_cpu(i) { ++ seq_printf(m, "Processor\t: %d\n", i); ++ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", ++ per_cpu(cpu_data, ++ i).loops_per_jiffy / (500000UL / HZ), ++ (per_cpu(cpu_data, i).loops_per_jiffy / ++ (5000UL / HZ)) % 100); ++ } ++#else /* CONFIG_SMP */ ++ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", ++ loops_per_jiffy / (500000 / HZ), ++ (loops_per_jiffy / (5000 / HZ)) % 100); ++#endif ++ ++ /* dump out the processor features */ ++ seq_puts(m, "Features\t: "); ++ ++ for (i = 0; hwcap_str[i]; i++) ++ if (elf_hwcap & (1 << i)) ++ seq_printf(m, "%s ", hwcap_str[i]); ++ ++ seq_puts(m, "\n\n"); ++ seq_printf(m, "Hardware\t: %s\n", elf_platform); ++ ++ return 0; ++} ++ ++static void *c_start(struct seq_file *m, loff_t * pos) ++{ ++ return *pos < 1 ? (void *)1 : NULL; ++} ++ ++static void *c_next(struct seq_file *m, void *v, loff_t * pos) ++{ ++ ++*pos; ++ return NULL; ++} ++ ++static void c_stop(struct seq_file *m, void *v) ++{ ++} ++ ++struct seq_operations cpuinfo_op = { ++ .start = c_start, ++ .next = c_next, ++ .stop = c_stop, ++ .show = c_show ++}; +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/signal.c linux-3.4.110/arch/nds32/kernel/signal.c +--- linux-3.4.110.orig/arch/nds32/kernel/signal.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/signal.c 2016-04-07 10:20:50.946081179 +0200 +@@ -0,0 +1,850 @@ ++/* ++ * linux/arch/nds32/kernel/signal.c ++ * ++ * Copyright (C) 1995-2002 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ptrace.h" ++#include "signal.h" ++#include "fpu.h" ++#include "audio.h" ++ ++#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) ++ ++/* ++ * For NDS32 syscalls, we encode the syscall number into the instruction. ++ */ ++#if defined( __NDS32_EL__) ++#define SWI_SYS_SIGRETURN (0xeb0e0a64) ++#define SWI_SYS_RT_SIGRETURN (0xab150a64) ++#define SWI_SYS_RESTART (0x0b000a64) /* syscall __NR_restart_syscall */ ++#define SWI_SYS_RESTART_LWIBI (0x0180af0d) /* lwi.bi $p0, [$sp], 4 */ ++#define SWI_SYS_RESTART_JRP0 (0x0068004a) /* jr $p0 */ ++#elif defined(__NDS32_EB__) ++#define SWI_SYS_SIGRETURN (0x6400000b|(__NR_sigreturn<<5)) ++#define SWI_SYS_RT_SIGRETURN (0x6400000b|(__NR_rt_sigreturn<<5)) ++#define SWI_SYS_RESTART (0x640a000b) /* syscall __NR_restart_syscall */ ++#define SWI_SYS_RESTART_LWIBI (0x0daf8001) /* lwi.bi $p0, [$sp], 4 */ ++#define SWI_SYS_RESTART_JRP0 (0x4a006800) /* jr $p0 */ ++#else ++#error "NDS32, but neither __NDS32_EB__, nor __NDS32_EL__???" ++#endif ++ ++#ifdef CONFIG_FPU ++static struct fpu_struct init_fpuregs = { ++ .fs_regs = {[0 ... 31] = sNAN32}, ++ .fd_regs = {[0 ... 15] = sNAN64}, ++ .fpcsr = FPCSR_INIT ++}; ++#endif ++#ifdef CONFIG_AUDIO ++static struct audio_struct init_audioregs = { ++ .auregs = {[0...31] = NAN32} ++}; ++#endif ++const unsigned long retcodes[2] = { ++ SWI_SYS_SIGRETURN, ++ SWI_SYS_RT_SIGRETURN ++}; ++ ++const unsigned long syscall_restart_code[3] = { ++ SWI_SYS_RESTART, /* syscall __NR_restart_syscall */ ++ SWI_SYS_RESTART_LWIBI, /* lwi.bi $p0, [$sp], 4 */ ++ SWI_SYS_RESTART_JRP0 /* jr $p0 */ ++}; ++ ++static int do_signal(sigset_t * oldset, struct pt_regs *regs, int syscall); ++ ++/* ++ * atomically swap in the new signal mask, and wait for a signal. ++ */ ++asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, ++ old_sigset_t mask, struct pt_regs *regs) ++{ ++ sigset_t blocked; ++ ++ mask &= _BLOCKABLE; ++ current->saved_sigmask = current->blocked; ++ siginitset(&blocked, mask); ++ set_current_blocked(&blocked); ++ ++ current->state = TASK_INTERRUPTIBLE; ++ schedule(); ++ set_thread_flag(TIF_RESTORE_SIGMASK); ++ return -ERESTARTNOHAND; ++} ++ ++asmlinkage int sys_rt_sigsuspend(sigset_t __user * unewset, size_t sigsetsize, ++ struct pt_regs *regs) ++{ ++ sigset_t newset; ++ ++ /* XXX: Don't preclude handling different sized sigset_t's. */ ++ if (sigsetsize != sizeof(sigset_t)) ++ return -EINVAL; ++ ++ if (copy_from_user(&newset, unewset, sizeof(newset))) ++ return -EFAULT; ++ sigdelsetmask(&newset, ~_BLOCKABLE); ++ current->saved_sigmask = current->blocked; ++ set_current_blocked(&newset); ++ current->state = TASK_INTERRUPTIBLE; ++ schedule(); ++ set_thread_flag(TIF_RESTORE_SIGMASK); ++ return -ERESTARTNOHAND; ++} ++ ++asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user * act, ++ struct old_sigaction __user * oact) ++{ ++ struct k_sigaction new_ka, old_ka; ++ int ret; ++ ++ if (act) { ++ old_sigset_t mask; ++ if (!access_ok(VERIFY_READ, act, sizeof(*act)) || ++ __get_user(new_ka.sa.sa_handler, &act->sa_handler) || ++ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) { ++ ++ return -EFAULT; ++ } ++ __get_user(new_ka.sa.sa_flags, &act->sa_flags); ++ __get_user(mask, &act->sa_mask); ++ ++ siginitset(&new_ka.sa.sa_mask, mask); ++ } ++ ++ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); ++ ++ if (!ret && oact) { ++ ++ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || ++ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || ++ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) { ++ ++ return -EFAULT; ++ } ++ __put_user(old_ka.sa.sa_flags, &oact->sa_flags); ++ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); ++ } ++ ++ return ret; ++} ++ ++/* ++ * Auxiliary signal frame. This saves stuff like FP state. ++ * The layout of this structure is not part of the user ABI. ++ */ ++struct aux_sigframe { ++}; ++ ++/* ++ * Do a signal return; undo the signal stack. These are aligned to 64-bit. ++ */ ++struct sigframe { ++ struct sigcontext sc; ++ unsigned long extramask[_NSIG_WORDS - 1]; ++ unsigned long retcode; ++ struct aux_sigframe aux __attribute__ ((aligned(8))); ++}; ++ ++struct rt_sigframe { ++ struct siginfo __user *pinfo; ++ void __user *puc; ++ struct siginfo info; ++ struct ucontext uc; ++ unsigned long retcode; ++ struct aux_sigframe aux __attribute__ ((aligned(8))); ++}; ++ ++#ifdef CONFIG_FPU ++static inline int restore_sigcontext_fpu(struct pt_regs *regs, ++ struct sigcontext __user * sc) ++{ ++ struct task_struct *tsk = current; ++ unsigned long used_math_flag; ++ int ret = 0; ++ ++ if (!(GET_FUCOP_EXIST() & FUCOP_EXIST_mskCP0ISFPU)) ++ return 0; ++ ++ __get_user_error(used_math_flag, &sc->used_math_flag, ret); ++ ++ if (!used_math_flag) ++ return 0; ++ ++ set_used_math(); ++ ++#ifdef CONFIG_UNLAZY_FPU ++ clear_fpu(regs); ++#else ++ preempt_disable(); ++ if (current == last_task_used_math) { ++ last_task_used_math = NULL; ++ release_fpu(regs); ++ } ++ preempt_enable(); ++#endif ++ ++ return __copy_from_user(&tsk->thread.fpu, &sc->fpu, ++ sizeof(struct fpu_struct)); ++} ++ ++static inline int setup_sigcontext_fpu(struct pt_regs *regs, ++ struct sigcontext __user * sc) ++{ ++ struct task_struct *tsk = current; ++ int ret = 0; ++ ++ if (!(GET_FUCOP_EXIST() & FUCOP_EXIST_mskCP0ISFPU)) ++ return 0; ++ ++ __put_user_error(used_math(), &sc->used_math_flag, ret); ++ ++ if (!used_math()) ++ return ret; ++ ++ preempt_disable(); ++#ifdef CONFIG_UNLAZY_FPU ++ unlazy_fpu(tsk); ++#else ++ if (last_task_used_math != NULL) ++ save_fpu(last_task_used_math); ++#endif ++ ret = __copy_to_user(&sc->fpu, &tsk->thread.fpu, ++ sizeof(struct fpu_struct)); ++ ++ grab_fpu(task_pt_regs(tsk)); ++ fpload(&init_fpuregs); ++#ifndef CONFIG_UNLAZY_FPU //Lazy FPU ++ last_task_used_math = current; ++#endif ++ preempt_enable(); ++ return ret; ++} ++#else ++static inline int ++restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user * sc) ++{ ++ return 0; ++} ++ ++static inline int ++setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user * sc) ++{ ++ return 0; ++} ++#endif /* CONFIG_FPU */ ++ ++#ifdef CONFIG_AUDIO ++static inline int restore_sigcontext_audio(struct pt_regs *regs, ++ struct sigcontext __user * sc) ++{ ++ struct task_struct *tsk = current; ++ unsigned long used_audio_flag; ++ int ret = 0; ++ ++ if (!(GET_MSC_CFG() & MSC_CFG_mskAUDIO)) ++ return 0; ++ ++ __get_user_error(used_audio_flag, &sc->used_audio_flag, ret); ++ ++ if (!used_audio_flag) ++ return 0; ++ ++ set_tsk_thread_flag(tsk, TIF_USEDAUDIO); ++#ifdef CONFIG_UNLAZY_AUDIO ++ clear_audio(regs); ++#else ++ preempt_disable(); ++ if (current == last_task_used_audio) { ++ last_task_used_audio = NULL; ++ clear_audio(regs); ++ } ++ preempt_enable(); ++#endif ++ ++ return __copy_from_user(&tsk->thread.audio, &sc->audio, ++ sizeof(struct audio_struct)); ++} ++ ++static inline int setup_sigcontext_audio(struct pt_regs *regs, ++ struct sigcontext __user * sc) ++{ ++ struct task_struct *tsk = current; ++ int ret = 0; ++ ++ if (!(GET_MSC_CFG() & MSC_CFG_mskAUDIO)) ++ return 0; ++ ++ __put_user_error(test_tsk_thread_flag(tsk, TIF_USEDAUDIO), ++ &sc->used_audio_flag, ret); ++ ++ if (!test_tsk_thread_flag(tsk, TIF_USEDAUDIO)) ++ return ret; ++ ++ preempt_disable(); ++#ifdef CONFIG_UNLAZY_AUDIO ++ unlazy_audio(tsk); ++#else ++ if (NULL != last_task_used_audio) { ++ save_audio(tsk); ++ } ++#endif ++ ret = __copy_to_user(&sc->audio, &tsk->thread.audio, ++ sizeof(struct audio_struct)); ++ ++ grab_audio(task_pt_regs(tsk)); ++ audioload(&init_audioregs); ++#ifndef CONFIG_UNLAZY_AUDIO //Lazy audio ++ last_task_used_audio = current; ++#endif ++ preempt_enable(); ++ return ret; ++} ++#else /*CONFIG_AUDIO */ ++static inline int ++restore_sigcontext_audio(struct pt_regs *regs, struct sigcontext __user * sc) ++{ ++ return 0; ++} ++ ++static inline int ++setup_sigcontext_audio(struct pt_regs *regs, struct sigcontext __user * sc) ++{ ++ return 0; ++} ++#endif ++ ++static int restore_sigcontext(struct pt_regs *regs, ++ struct sigcontext __user * sc, ++ struct aux_sigframe __user * aux) ++{ ++ int err = 0; ++ ++ __get_user_error(regs->NDS32_r0, &sc->nds32_r0, err); ++ __get_user_error(regs->NDS32_r1, &sc->nds32_r1, err); ++ __get_user_error(regs->NDS32_r2, &sc->nds32_r2, err); ++ __get_user_error(regs->NDS32_r3, &sc->nds32_r3, err); ++ __get_user_error(regs->NDS32_r4, &sc->nds32_r4, err); ++ __get_user_error(regs->NDS32_r5, &sc->nds32_r5, err); ++ __get_user_error(regs->NDS32_r6, &sc->nds32_r6, err); ++ __get_user_error(regs->NDS32_r7, &sc->nds32_r7, err); ++ __get_user_error(regs->NDS32_r8, &sc->nds32_r8, err); ++ __get_user_error(regs->NDS32_r9, &sc->nds32_r9, err); ++ __get_user_error(regs->NDS32_r10, &sc->nds32_r10, err); ++ __get_user_error(regs->NDS32_r11, &sc->nds32_r11, err); ++ __get_user_error(regs->NDS32_r12, &sc->nds32_r12, err); ++ __get_user_error(regs->NDS32_r13, &sc->nds32_r13, err); ++ __get_user_error(regs->NDS32_r14, &sc->nds32_r14, err); ++ __get_user_error(regs->NDS32_r15, &sc->nds32_r15, err); ++ __get_user_error(regs->NDS32_r16, &sc->nds32_r16, err); ++ __get_user_error(regs->NDS32_r17, &sc->nds32_r17, err); ++ __get_user_error(regs->NDS32_r18, &sc->nds32_r18, err); ++ __get_user_error(regs->NDS32_r19, &sc->nds32_r19, err); ++ __get_user_error(regs->NDS32_r20, &sc->nds32_r20, err); ++ ++ __get_user_error(regs->NDS32_r21, &sc->nds32_r21, err); ++ __get_user_error(regs->NDS32_r22, &sc->nds32_r22, err); ++ __get_user_error(regs->NDS32_r23, &sc->nds32_r23, err); ++ __get_user_error(regs->NDS32_r24, &sc->nds32_r24, err); ++ __get_user_error(regs->NDS32_r25, &sc->nds32_r25, err); ++ __get_user_error(regs->NDS32_fp, &sc->nds32_fp, err); ++ __get_user_error(regs->NDS32_gp, &sc->nds32_gp, err); ++ __get_user_error(regs->NDS32_lp, &sc->nds32_lr, err); ++ __get_user_error(regs->NDS32_sp, &sc->nds32_sp, err); ++ ++ __get_user_error(regs->NDS32_ipc, &sc->nds32_ipc, err); ++#if defined(CONFIG_HWZOL) ++ __get_user_error(regs->NDS32_lc, &sc->zol.nds32_lc, err); ++ __get_user_error(regs->NDS32_le, &sc->zol.nds32_le, err); ++ __get_user_error(regs->NDS32_lb, &sc->zol.nds32_lb, err); ++#endif ++ ++ err |= !valid_user_regs(regs); ++ err |= restore_sigcontext_audio(regs, sc); ++ err |= restore_sigcontext_fpu(regs, sc); ++ ++ return err; ++} ++ ++asmlinkage int sys_sigreturn(struct pt_regs *regs) ++{ ++ struct sigframe __user *frame; ++ sigset_t set; ++ ++ /* Always make any pending restarted system calls return -EINTR */ ++ current_thread_info()->restart_block.fn = do_no_restart_syscall; ++ ++ /* ++ * Since we stacked the signal on a 64-bit boundary, ++ * then 'sp' should be word aligned here. If it's ++ * not, then the user is trying to mess with us. ++ */ ++ if (regs->NDS32_sp & 7) ++ goto badframe; ++ ++ frame = (struct sigframe __user *)regs->NDS32_sp; ++ ++ if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) ++ goto badframe; ++ ++ if (__get_user(set.sig[0], &frame->sc.oldmask) ++ || (_NSIG_WORDS > 1 ++ && __copy_from_user(&set.sig[1], &frame->extramask, ++ sizeof(frame->extramask)))) ++ goto badframe; ++ ++ sigdelsetmask(&set, ~_BLOCKABLE); ++ spin_lock_irq(¤t->sighand->siglock); ++ current->blocked = set; ++ recalc_sigpending(); ++ spin_unlock_irq(¤t->sighand->siglock); ++ ++ if (restore_sigcontext(regs, &frame->sc, &frame->aux)) ++ goto badframe; ++ ++ return regs->NDS32_r0; ++ ++badframe: ++ force_sig(SIGSEGV, current); ++ return 0; ++} ++ ++asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) ++{ ++ struct rt_sigframe __user *frame; ++ sigset_t set; ++ ++ /* Always make any pending restarted system calls return -EINTR */ ++ current_thread_info()->restart_block.fn = do_no_restart_syscall; ++ ++ /* ++ * Since we stacked the signal on a 64-bit boundary, ++ * then 'sp' should be word aligned here. If it's ++ * not, then the user is trying to mess with us. ++ */ ++ if (regs->NDS32_sp & 7) ++ goto badframe; ++ ++ frame = (struct rt_sigframe __user *)regs->NDS32_sp; ++ ++ if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) ++ goto badframe; ++ ++ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) ++ goto badframe; ++ ++ sigdelsetmask(&set, ~_BLOCKABLE); ++ spin_lock_irq(¤t->sighand->siglock); ++ current->blocked = set; ++ recalc_sigpending(); ++ spin_unlock_irq(¤t->sighand->siglock); ++ ++ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &frame->aux)) ++ goto badframe; ++ ++ if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->NDS32_sp) == ++ -EFAULT) ++ goto badframe; ++ ++ return regs->NDS32_r0; ++ ++badframe: ++ force_sig(SIGSEGV, current); ++ return 0; ++} ++ ++static int setup_sigcontext(struct sigcontext __user * sc, ++ struct aux_sigframe __user * aux, ++ struct pt_regs *regs, unsigned long mask) ++{ ++ int err = 0; ++ ++ err |= setup_sigcontext_fpu(regs, sc); ++ err |= setup_sigcontext_audio(regs, sc); ++ ++ __put_user_error(regs->NDS32_r0, &sc->nds32_r0, err); ++ __put_user_error(regs->NDS32_r1, &sc->nds32_r1, err); ++ __put_user_error(regs->NDS32_r2, &sc->nds32_r2, err); ++ __put_user_error(regs->NDS32_r3, &sc->nds32_r3, err); ++ __put_user_error(regs->NDS32_r4, &sc->nds32_r4, err); ++ __put_user_error(regs->NDS32_r5, &sc->nds32_r5, err); ++ __put_user_error(regs->NDS32_r6, &sc->nds32_r6, err); ++ __put_user_error(regs->NDS32_r7, &sc->nds32_r7, err); ++ __put_user_error(regs->NDS32_r8, &sc->nds32_r8, err); ++ __put_user_error(regs->NDS32_r9, &sc->nds32_r9, err); ++ __put_user_error(regs->NDS32_r10, &sc->nds32_r10, err); ++ __put_user_error(regs->NDS32_r11, &sc->nds32_r11, err); ++ __put_user_error(regs->NDS32_r12, &sc->nds32_r12, err); ++ __put_user_error(regs->NDS32_r13, &sc->nds32_r13, err); ++ __put_user_error(regs->NDS32_r14, &sc->nds32_r14, err); ++ __put_user_error(regs->NDS32_r15, &sc->nds32_r15, err); ++ __put_user_error(regs->NDS32_r16, &sc->nds32_r16, err); ++ __put_user_error(regs->NDS32_r17, &sc->nds32_r17, err); ++ __put_user_error(regs->NDS32_r18, &sc->nds32_r18, err); ++ __put_user_error(regs->NDS32_r19, &sc->nds32_r19, err); ++ __put_user_error(regs->NDS32_r20, &sc->nds32_r20, err); ++ ++ __put_user_error(regs->NDS32_r21, &sc->nds32_r21, err); ++ __put_user_error(regs->NDS32_r22, &sc->nds32_r22, err); ++ __put_user_error(regs->NDS32_r23, &sc->nds32_r23, err); ++ __put_user_error(regs->NDS32_r24, &sc->nds32_r24, err); ++ __put_user_error(regs->NDS32_r25, &sc->nds32_r25, err); ++ __put_user_error(regs->NDS32_fp, &sc->nds32_fp, err); ++ __put_user_error(regs->NDS32_gp, &sc->nds32_gp, err); ++ __put_user_error(regs->NDS32_lp, &sc->nds32_lr, err); ++ __put_user_error(regs->NDS32_sp, &sc->nds32_sp, err); ++ __put_user_error(regs->NDS32_ipc, &sc->nds32_ipc, err); ++#if defined(CONFIG_HWZOL) ++ __get_user_error(regs->NDS32_lc, &sc->zol.nds32_lc, err); ++ __get_user_error(regs->NDS32_le, &sc->zol.nds32_le, err); ++ __get_user_error(regs->NDS32_lb, &sc->zol.nds32_lb, err); ++#endif ++ ++ __put_user_error(current->thread.trap_no, &sc->trap_no, err); ++ __put_user_error(current->thread.error_code, &sc->error_code, err); ++ __put_user_error(current->thread.address, &sc->fault_address, err); ++ __put_user_error(mask, &sc->oldmask, err); ++ ++ return err; ++} ++ ++static inline void __user *get_sigframe(struct k_sigaction *ka, ++ struct pt_regs *regs, int framesize) ++{ ++ unsigned long sp = regs->NDS32_sp; ++ void __user *frame; ++ ++ /* ++ * This is the X/Open sanctioned signal stack switching. ++ */ ++ if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) ++ sp = current->sas_ss_sp + current->sas_ss_size; ++ ++ /* ++ * ATPCS B01 mandates 8-byte alignment ++ */ ++ frame = (void __user *)((sp - framesize) & ~7); ++ ++ /* ++ * Check that we can actually write to the signal frame. ++ */ ++ if (!access_ok(VERIFY_WRITE, frame, framesize)) ++ frame = NULL; ++ ++ return frame; ++} ++ ++static int setup_return(struct pt_regs *regs, struct k_sigaction *ka, ++ unsigned long __user * rc, void __user * frame, ++ int usig) ++{ ++ unsigned long handler = (unsigned long)ka->sa.sa_handler; ++ unsigned long retcode; ++ struct sigframe *sf = (struct sigframe *)frame; ++ ++ /* ++ * Maybe we need to deliver a 32-bit signal to a 26-bit task. ++ */ ++ if (ka->sa.sa_flags & SA_RESTORER) { ++ retcode = (unsigned long)ka->sa.sa_restorer; ++ } else { ++ ++ unsigned int idx = 0; //thumb; ++ unsigned long line_size = CACHE_LINE_SIZE(ICACHE); ++ unsigned long start, end; ++ ++ if (ka->sa.sa_flags & SA_SIGINFO) ++ idx++; ++ ++ if (__put_user(retcodes[idx], rc)) ++ return 1; ++ ++ /* ++ * Ensure that the instruction cache sees ++ * the return code written onto the stack. ++ */ ++ start = (unsigned long)rc & ~(line_size - 1); ++ end = start + line_size; ++ flush_icache_range(start, end); ++ ++ retcode = KERN_SIGRETURN_CODE + (idx << 2); ++ } ++ ++ regs->NDS32_r0 = usig; ++ regs->NDS32_r1 = 0; ++ regs->NDS32_r2 = (unsigned long)&sf->sc; ++ regs->NDS32_sp = (unsigned long)frame; ++ regs->NDS32_lp = retcode; ++ regs->NDS32_ipc = handler; ++ /* Also store handler address in r15 for updating GP in the handler. */ ++ regs->NDS32_r15 = handler; ++ ++ return 0; ++} ++ ++static int setup_frame(int usig, struct k_sigaction *ka, sigset_t * set, ++ struct pt_regs *regs) ++{ ++ struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); ++ int err = 0; ++ ++ if (!frame) ++ return 1; ++ ++ err |= setup_sigcontext(&frame->sc, &frame->aux, regs, set->sig[0]); ++ ++ if (_NSIG_WORDS > 1) { ++ err |= __copy_to_user(frame->extramask, &set->sig[1], ++ sizeof(frame->extramask)); ++ } ++ ++ if (err == 0) ++ err = setup_return(regs, ka, &frame->retcode, frame, usig); ++ ++ return err; ++} ++ ++static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t * info, ++ sigset_t * set, struct pt_regs *regs) ++{ ++ struct rt_sigframe __user *frame = ++ get_sigframe(ka, regs, sizeof(*frame)); ++ stack_t stack; ++ int err = 0; ++ ++ if (!frame) ++ return 1; ++ ++ __put_user_error(&frame->info, &frame->pinfo, err); ++ __put_user_error(&frame->uc, &frame->puc, err); ++ err |= copy_siginfo_to_user(&frame->info, info); ++ ++ __put_user_error(0, &frame->uc.uc_flags, err); ++ __put_user_error(NULL, &frame->uc.uc_link, err); ++ ++ memset(&stack, 0, sizeof(stack)); ++ stack.ss_sp = (void __user *)current->sas_ss_sp; ++ stack.ss_flags = sas_ss_flags(regs->NDS32_sp); //NDS32_sp ++ stack.ss_size = current->sas_ss_size; ++ err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack)); ++ ++ err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->aux, ++ regs, set->sig[0]); ++ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); ++ ++ if (err == 0) ++ err = setup_return(regs, ka, &frame->retcode, frame, usig); ++ ++ if (err == 0) { ++ /* ++ * For realtime signals we must also set the second and third ++ * arguments for the signal handler. ++ * -- Peter Maydell 2000-12-06 ++ */ ++ regs->NDS32_r1 = (unsigned long)&frame->info; ++ regs->NDS32_r2 = (unsigned long)&frame->uc; ++ } ++ ++ return err; ++} ++ ++static inline void setup_restart_syscall(struct pt_regs *regs) ++{ ++ regs->NDS32_r0 = regs->NDS32_ORIG_r0; ++ regs->NDS32_ipc -= 4; ++} ++ ++/* ++ * OK, we're invoking a handler ++ */ ++static void handle_signal(unsigned long sig, struct k_sigaction *ka, ++ siginfo_t * info, sigset_t * oldset, ++ struct pt_regs *regs, int syscall) ++{ ++ struct thread_info *thread = current_thread_info(); ++ struct task_struct *tsk = current; ++ int usig = sig; ++ int ret; ++ ++ /* ++ * If we were from a system call, check for system call restarting... ++ */ ++ if (syscall) { ++ switch (regs->NDS32_r0) { ++ case -ERESTART_RESTARTBLOCK: ++ case -ERESTARTNOHAND: ++ regs->NDS32_r0 = -EINTR; ++ break; ++ case -ERESTARTSYS: ++ if (!(ka->sa.sa_flags & SA_RESTART)) { ++ regs->NDS32_r0 = -EINTR; ++ break; ++ } ++ /* fallthrough */ ++ case -ERESTARTNOINTR: ++ setup_restart_syscall(regs); ++ } ++ } ++ ++ /* ++ * translate the signal ++ */ ++ if (usig < 32 && thread->exec_domain ++ && thread->exec_domain->signal_invmap) ++ usig = thread->exec_domain->signal_invmap[usig]; ++ ++ /* ++ * Set up the stack frame ++ */ ++ if (ka->sa.sa_flags & SA_SIGINFO) ++ ret = setup_rt_frame(usig, ka, info, oldset, regs); ++ else ++ ret = setup_frame(usig, ka, oldset, regs); ++ ++ /* ++ * Check that the resulting registers are actually sane. ++ */ ++ ret |= !valid_user_regs(regs); ++ ++ /* ++ * Block the signal if we were unsuccessful. ++ */ ++ if (ret == 0) { ++ spin_lock_irq(&tsk->sighand->siglock); ++ sigorsets(&tsk->blocked, &tsk->blocked, &ka->sa.sa_mask); ++ if (!(ka->sa.sa_flags & SA_NODEFER)) ++ sigaddset(&tsk->blocked, sig); ++ recalc_sigpending(); ++ spin_unlock_irq(&tsk->sighand->siglock); ++#if 0 & defined(CONFIG_HSS) ++ /* COLE: i marked this tempory. ++ It doesn't behave the same as x86 */ ++ /* Clear HSS when entering the signal handler, ++ User should be step into signal handler */ ++ if (regs->NDS32_ipsw & PSW_mskHSS) { ++ regs->NDS32_ipsw &= ~PSW_mskHSS; ++ printk(KERN_INFO "clear for sig %d. pc=0x%08x\n", sig, ++ regs->NDS32_ipc); ++ } ++#endif ++ return; ++ } ++ ++ force_sigsegv(sig, tsk); ++} ++ ++/* ++ * Note that 'init' is a special process: it doesn't get signals it doesn't ++ * want to handle. Thus you cannot kill init even with a SIGKILL even by ++ * mistake. ++ * ++ * Note that we go through the signals twice: once to check the signals that ++ * the kernel can handle, and then we build all the user-level signal handling ++ * stack-frames in one go after that. ++ */ ++asmlinkage int do_signal(sigset_t * oldset, struct pt_regs *regs, int syscall) ++{ ++ struct k_sigaction ka; ++ siginfo_t info; ++ int signr; ++ ++ /* ++ * We want the common case to go fast, which ++ * is why we may in certain cases get here from ++ * kernel mode. Just return without doing anything ++ * if so. ++ */ ++ ++ if (!user_mode(regs)) ++ return 0; ++ ++ if (try_to_freeze()) ++ goto no_signal; ++ ++ if (test_thread_flag(TIF_RESTORE_SIGMASK)) ++ oldset = ¤t->saved_sigmask; ++ ++ signr = get_signal_to_deliver(&info, &ka, regs, NULL); ++ ++ if (signr > 0) { ++ handle_signal(signr, &ka, &info, oldset, regs, syscall); ++ /* ++ * A signal was successfully delivered; the saved ++ * sigmask will have been stored in the signal frame, ++ * and will be restored by sigreturn, so we can simply ++ * clear the TIF_RESTORE_SIGMASK flag. ++ */ ++ if (test_thread_flag(TIF_RESTORE_SIGMASK)) ++ clear_thread_flag(TIF_RESTORE_SIGMASK); ++ return 1; ++ } ++ ++no_signal: ++ /* ++ * No signal to deliver to the process - restart the syscall. ++ */ ++ if (syscall) { ++ switch (regs->NDS32_r0) { ++ u32 __user *usp; ++ ++ case -ERESTART_RESTARTBLOCK: ++ regs->NDS32_sp -= 4; ++ usp = (u32 __user *) regs->NDS32_sp; ++ ++ if (put_user(regs->NDS32_ipc, usp) == 0) ++ regs->NDS32_ipc = KERN_RESTART_CODE; ++ else { ++ regs->NDS32_sp += 4; ++ force_sigsegv(0, current); ++ } ++ regs->NDS32_r0 = regs->NDS32_ORIG_r0; ++ break; ++ ++ case -ERESTARTNOHAND: ++ case -ERESTARTSYS: ++ case -ERESTARTNOINTR: ++ ++ setup_restart_syscall(regs); ++ break; ++ } ++ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { ++ clear_thread_flag(TIF_RESTORE_SIGMASK); ++ sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); ++ } ++ } ++ ++ return 0; ++} ++ ++asmlinkage void do_notify_resume(struct pt_regs *regs, ++ unsigned int thread_flags, int syscall) ++{ ++ if (thread_flags & _TIF_SIGPENDING) ++ do_signal(¤t->blocked, regs, syscall); ++ if (thread_flags & _TIF_NOTIFY_RESUME) { ++ clear_thread_flag(TIF_NOTIFY_RESUME); ++ tracehook_notify_resume(regs); ++ if (current->replacement_session_keyring) ++ key_replace_session_keyring(); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/signal.h linux-3.4.110/arch/nds32/kernel/signal.h +--- linux-3.4.110.orig/arch/nds32/kernel/signal.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/signal.h 2016-04-07 10:20:50.946081179 +0200 +@@ -0,0 +1,20 @@ ++/* ++ * linux/arch/arm/kernel/signal.h ++ * ++ * Copyright (C) 2005-2009 Russell King. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++ ++#define RETURN_SYSCALL_BASE (0x2000) ++#define RETURN_SYSCALL_PA_BASE (PHYS_OFFSET | RETURN_SYSCALL_BASE) ++ ++#define KERN_SIGRETURN_CODE (fix_to_virt(FIX_RETURN_SYSCALL)) ++#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(retcodes)) ++ ++extern const unsigned long retcodes[2]; ++extern const unsigned long syscall_restart_code[3]; +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/smp.c linux-3.4.110/arch/nds32/kernel/smp.c +--- linux-3.4.110.orig/arch/nds32/kernel/smp.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/smp.c 2016-04-07 10:20:50.946081179 +0200 +@@ -0,0 +1,335 @@ ++/* ++ * linux/arch/nds32/kernel/smp.c ++ * ++ * Copyright (C) 2002 ARM Limited, All Rights Reserved. ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++void smp_init_cpus(void) ++{ ++ int i; ++ for (i = 0; i < NR_CPUS; i++) ++ cpu_set(i, cpu_possible_map); ++} ++ ++int setup_profiling_timer(unsigned int multiplier) ++{ ++ return 0; ++} ++ ++struct tlb_data { ++ struct vm_area_struct *vma; ++ unsigned long start; ++ unsigned long end; ++}; ++ ++static void ipi_flush_tlb_all(void *data) ++{ ++ local_flush_tlb_all(); ++} ++ ++void flush_tlb_all(void) ++{ ++ on_each_cpu(ipi_flush_tlb_all, NULL, 1); ++} ++ ++static void ipi_flush_tlb_mm(void *data) ++{ ++ struct mm_struct *mm = (struct mm_struct *)data; ++ local_flush_tlb_mm(mm); ++} ++ ++void flush_tlb_mm(struct mm_struct *mm) ++{ ++ on_each_cpu(ipi_flush_tlb_mm, mm, 1); ++} ++ ++static void ipi_flush_tlb_page(void *data) ++{ ++ struct tlb_data *t = (struct tlb_data *)data; ++ local_flush_tlb_page(t->vma, t->start); ++} ++ ++void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) ++{ ++ struct tlb_data data; ++ ++ data.vma = vma; ++ data.start = addr; ++ on_each_cpu(ipi_flush_tlb_page, &data, 1); ++} ++ ++static void ipi_flush_tlb_range(void *data) ++{ ++ struct tlb_data *t = (struct tlb_data *)data; ++ local_flush_tlb_range(t->vma, t->start, t->end); ++} ++ ++void flush_tlb_range(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end) ++{ ++ struct tlb_data data; ++ ++ data.vma = vma; ++ data.start = start; ++ data.end = end; ++ on_each_cpu(ipi_flush_tlb_range, &data, 1); ++} ++ ++static void ipi_flush_tlb_kernel_range(void *data) ++{ ++ struct tlb_data *t = (struct tlb_data *)data; ++ local_flush_tlb_kernel_range(t->start, t->end); ++} ++ ++void flush_tlb_kernel_range(unsigned long start, unsigned long end) ++{ ++ struct tlb_data data; ++ ++ data.start = start; ++ data.end = end; ++ on_each_cpu(ipi_flush_tlb_kernel_range, &data, 1); ++} ++ ++/* IPI implementation */ ++static inline unsigned long read_ipi_trigger(void) ++{ ++ /* AMIC IPI trigger register */ ++ return *(volatile unsigned long *)(AMIC_VA_BASE + 0x40); ++} ++ ++static inline void write_ipi_trigger(const struct cpumask *mask) ++{ ++ unsigned long data = *cpus_addr(*mask); ++ *(volatile unsigned long *)(AMIC_VA_BASE + 0x40) = data; ++} ++ ++static inline void clear_ipi_status(void) ++{ ++ *(volatile unsigned long *)(AMIC_VA_BASE + 0x44) = 0xf; ++ asm("msync store\nisb"); ++} ++ ++static void __init send_IPI_boot(int cpu, struct task_struct *tsk) ++{ ++ extern void secondary_startup(void); ++ unsigned long *ptr = (unsigned long *)(0xc0006000 + (cpu << 9)); ++ ptr[5] = virt_to_phys(secondary_startup); ++ ptr[6] = (unsigned long)task_stack_page(tsk) + THREAD_SIZE - 8; ++ asm("cctl %0, L1D_VA_WB, alevel\n"::"r"(ptr)); ++ asm("cctl %0, L1D_VA_INVAL, alevel\nmsync\ndsb\n"::"r"(ptr)); ++ write_ipi_trigger(get_cpu_mask(cpu)); ++} ++ ++static int __init wait_cpu_boot_done(int cpu) ++{ ++ int i, state; ++ for (i = 0; i < 10000; i++) { ++ state = read_ipi_trigger(); ++ if ((state & (1 << cpu)) == 0) ++ break; ++ udelay(100); ++ } ++ return (i == 10000) ? -1 : 0; ++} ++ ++void __init secondary_start_kernel(void) ++{ ++ unsigned long tmp; ++ unsigned int cpu = smp_processor_id(); ++ ++ atomic_inc(&init_mm.mm_count); ++ current->active_mm = &init_mm; ++ ++ /* ++ * enable cache. ++ */ ++ ++#ifdef CONFIG_ANDES_PAGE_SIZE_4KB ++ if (CPU_IS_N1213_43U1HA0()) { ++ /* Downsize cache to bypass cache aliasing issue */ ++ ++ if ((CACHE_SET(ICACHE) * CACHE_LINE_SIZE(ICACHE)) > 4096) ++ tmp = 0x02 << SDZ_CTL_offICDZ; ++ ++ if ((CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE)) > 4096) ++ tmp |= 0x02 << SDZ_CTL_offDCDZ; ++ ++ SET_SDZ_CTL(tmp); ++ ISB(); ++// printk("CPU%d enabled cache downsizing.\n", cpu); ++ } ++#endif /* CONFIG_ANDES_PAGE_SIZE_4KB */ ++ ++ tmp = GET_CACHE_CTL(); ++#ifndef CONFIG_CPU_DCACHE_DISABLE ++ tmp |= CACHE_CTL_mskDC_EN; ++#endif ++ ++#ifndef CONFIG_CPU_ICACHE_DISABLE ++ tmp |= CACHE_CTL_mskIC_EN; ++#endif ++ SET_CACHE_CTL(tmp); ++ DSB(); ++ ISB(); ++ ++ preempt_disable(); ++ local_irq_enable(); ++ ++ //assume all of cores runs the same speed ++ //calibrate_delay(); ++ /* store cpu info ot the second cpu */ ++ smp_store_cpu_info(cpu); ++ ++ clear_ipi_status(); ++ cpu_idle(); ++} ++ ++/* ++ * Called by both boot and secondaries to move global data into ++ * per-processor storage. ++ */ ++void __init smp_store_cpu_info(unsigned int cpuid) ++{ ++ struct cpuinfo_nds32 *cpu_info = &per_cpu(cpu_data, cpuid); ++ ++ cpu_info->loops_per_jiffy = loops_per_jiffy; ++} ++ ++/* functions be used by generic layer */ ++void arch_send_call_function_single_ipi(int cpu) ++{ ++ write_ipi_trigger(get_cpu_mask(cpu)); ++} ++ ++void arch_send_call_function_ipi_mask(const struct cpumask *mask) ++{ ++ write_ipi_trigger(mask); ++} ++ ++static void __init smp_boot_one_cpu(int cpu) ++{ ++ struct task_struct *idle; ++ int ret; ++ ++ idle = fork_idle(cpu); ++ if (IS_ERR(idle)) ++ panic(KERN_ERR "Fork failed for CPU %d", cpu); ++ ++ send_IPI_boot(cpu, idle); ++ ret = wait_cpu_boot_done(cpu); ++ if (ret == 0) ++ cpu_set(cpu, cpu_online_map); ++ else ++ put_task_struct(idle); ++} ++ ++static void ipi_timer(void *data) ++{ ++ profile_tick(CPU_PROFILING); ++ update_process_times(user_mode(get_irq_regs())); ++} ++ ++void smp_send_timer(void) ++{ ++ smp_call_function(ipi_timer, NULL, 0); ++} ++ ++static void ipi_stop(void *data) ++{ ++ cpu_clear(smp_processor_id(), cpu_online_map); ++ local_irq_enable(); ++ for (;;) ; ++} ++ ++void smp_send_stop(void) ++{ ++ smp_call_function(ipi_stop, NULL, 0); ++} ++ ++void smp_send_reschedule(int cpu) ++{ ++ write_ipi_trigger(get_cpu_mask(cpu)); ++} ++ ++static void ipi_handler(unsigned int irq, struct irq_desc *desc) ++{ ++ clear_ipi_status(); ++ generic_smp_call_function_single_interrupt(); ++ generic_smp_call_function_interrupt(); ++} ++ ++void __init smp_prepare_cpus(unsigned int max_cpus) ++{ ++ /* ++ * XXX detect how many cores exist ++ */ ++ int i; ++ unsigned int cpu = smp_processor_id(); ++ ++ /* store master core cpu info */ ++ smp_store_cpu_info(cpu); ++ for (i = 0; i < max_cpus; i++) ++ cpu_set(i, cpu_present_map); ++ ++ /* setup IPI handler */ ++ set_irq_chip(32, &dummy_irq_chip); ++ set_irq_chained_handler(32, ipi_handler); ++} ++ ++int __cpuinit __cpu_up(unsigned int cpu) ++{ ++ smp_boot_one_cpu(cpu); ++ return cpu_online(cpu) ? 0 : -ENOSYS; ++} ++ ++void __init smp_cpus_done(unsigned int max_cpus) ++{ ++ int cpu; ++ unsigned long bogosum = 0; ++ ++ for_each_online_cpu(cpu) ++ bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; ++ ++ printk(KERN_INFO "SMP: Total of %d processors activated " ++ "(%lu.%02lu BogoMIPS).\n", ++ num_online_cpus(), ++ bogosum / (500000 / HZ), (bogosum / (5000 / HZ)) % 100); ++ ++ /* now we can set all interruption don't cared */ ++ *(volatile unsigned long *)(AMIC_VA_BASE + 0x4) = 0xffffffff; ++} ++ ++void __init smp_prepare_boot_cpu(void) ++{ ++ unsigned int cpu = smp_processor_id(); ++ unsigned long *ptr = (unsigned long *)(0xc0006000 + (cpu << 9)); ++ ptr[4] = 1; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/stacktrace.c linux-3.4.110/arch/nds32/kernel/stacktrace.c +--- linux-3.4.110.orig/arch/nds32/kernel/stacktrace.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/stacktrace.c 2016-04-07 10:20:50.946081179 +0200 +@@ -0,0 +1,41 @@ ++#include ++#include ++void save_stack_trace(struct stack_trace *trace) ++{ ++ save_stack_trace_tsk(current, trace); ++} ++ ++void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) ++{ ++ unsigned long *fpn; ++ int skip = trace->skip; ++ int savesched; ++ ++ if (tsk == current) { ++ __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn)); ++ savesched = 1; ++ } else { ++ fpn = (unsigned long *)thread_saved_fp(tsk); ++ savesched = 0; ++ } ++ ++ while (!kstack_end(fpn) && !((unsigned long)fpn & 0x3) ++ && (fpn >= (unsigned long *)TASK_SIZE)) { ++ unsigned long lpp, fpp; ++ lpp = fpn[0]; ++ fpp = fpn[-1]; ++ if (!__kernel_text_address(lpp)) ++ break; ++ ++ if (savesched || !in_sched_functions(lpp)) { ++ if (skip) { ++ skip--; ++ } else { ++ trace->entries[trace->nr_entries++] = lpp; ++ if (trace->nr_entries >= trace->max_entries) ++ break; ++ } ++ } ++ fpn = (unsigned long *)fpp; ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/sys_nds32.c linux-3.4.110/arch/nds32/kernel/sys_nds32.c +--- linux-3.4.110.orig/arch/nds32/kernel/sys_nds32.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/sys_nds32.c 2016-04-07 10:20:50.946081179 +0200 +@@ -0,0 +1,331 @@ ++/* ++ * linux/arch/nds32/kernel/sys_nds32.c ++ * ++ * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c ++ * Copyright (C) 2007 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This file contains various random system calls that ++ * have a non-standard calling sequence on the Linux/nds32 ++ * platform. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, ++ unsigned long new_len, unsigned long flags, ++ unsigned long new_addr); ++ ++struct mmap_arg_struct { ++ unsigned long addr; ++ unsigned long len; ++ unsigned long prot; ++ unsigned long flags; ++ unsigned long fd; ++ unsigned long offset; ++}; ++ ++asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, ++ unsigned long prot, unsigned long flags, ++ unsigned long fd, unsigned long pgoff) ++{ ++ if (pgoff & (~PAGE_MASK >> 12)) ++ return -EINVAL; ++ ++ return sys_mmap_pgoff(addr, len, prot, flags, fd, ++ pgoff >> (PAGE_SHIFT - 12)); ++} ++ ++asmlinkage unsigned long ++sys_nds32_mremap(unsigned long addr, unsigned long old_len, ++ unsigned long new_len, unsigned long flags, ++ unsigned long new_addr) ++{ ++ unsigned long ret = -EINVAL; ++ ++ if (flags & MREMAP_FIXED && new_addr < FIRST_USER_ADDRESS) ++ goto out; ++ ++ down_write(¤t->mm->mmap_sem); ++ ret = do_mremap(addr, old_len, new_len, flags, new_addr); ++ up_write(¤t->mm->mmap_sem); ++ ++out: ++ return ret; ++} ++ ++/* ++ * Perform the select(nd, in, out, ex, tv) and mmap() system ++ * calls. ++ */ ++ ++struct sel_arg_struct { ++ unsigned long n; ++ fd_set __user *inp, *outp, *exp; ++ struct timeval __user *tvp; ++}; ++ ++asmlinkage int old_select(struct sel_arg_struct __user * arg) ++{ ++ struct sel_arg_struct a; ++ ++ if (copy_from_user(&a, arg, sizeof(a))) ++ return -EFAULT; ++ /* sys_select() does the appropriate kernel locking */ ++ return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); ++} ++ ++/* ++ * sys_ipc() is the de-multiplexer for the SysV IPC calls.. ++ * ++ * This is really horribly ugly. ++ */ ++asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, ++ unsigned long third, void __user * ptr, long fifth) ++{ ++ int version, ret; ++ ++ version = call >> 16; /* hack for backward compatibility */ ++ call &= 0xffff; ++ ++ switch (call) { ++ case SEMOP: ++ return sys_semtimedop(first, (struct sembuf __user *)ptr, ++ second, NULL); ++ case SEMTIMEDOP: ++ return sys_semtimedop(first, (struct sembuf __user *)ptr, ++ second, ++ (const struct timespec __user *)fifth); ++ ++ case SEMGET: ++ return sys_semget(first, second, third); ++ case SEMCTL:{ ++ union semun fourth; ++ if (!ptr) ++ return -EINVAL; ++ if (get_user(fourth.__pad, (void __user * __user *)ptr)) ++ return -EFAULT; ++ return sys_semctl(first, second, third, fourth); ++ } ++ ++ case MSGSND: ++ return sys_msgsnd(first, (struct msgbuf __user *)ptr, ++ second, third); ++ case MSGRCV: ++ switch (version) { ++ case 0:{ ++ struct ipc_kludge tmp; ++ if (!ptr) ++ return -EINVAL; ++ if (copy_from_user ++ (&tmp, (struct ipc_kludge __user *)ptr, ++ sizeof(tmp))) ++ return -EFAULT; ++ return sys_msgrcv(first, tmp.msgp, second, ++ tmp.msgtyp, third); ++ } ++ default: ++ return sys_msgrcv(first, ++ (struct msgbuf __user *)ptr, ++ second, fifth, third); ++ } ++ case MSGGET: ++ return sys_msgget((key_t) first, second); ++ case MSGCTL: ++ return sys_msgctl(first, second, (struct msqid_ds __user *)ptr); ++ ++ case SHMAT: ++ switch (version) { ++ default:{ ++ ulong raddr; ++ ret = ++ do_shmat(first, (char __user *)ptr, second, ++ &raddr); ++ if (ret) ++ return ret; ++ return put_user(raddr, (ulong __user *) third); ++ } ++ case 1: /* Of course, we don't support iBCS2! */ ++ return -EINVAL; ++ } ++ case SHMDT: ++ return sys_shmdt((char __user *)ptr); ++ case SHMGET: ++ return sys_shmget(first, second, third); ++ case SHMCTL: ++ return sys_shmctl(first, second, (struct shmid_ds __user *)ptr); ++ default: ++ return -ENOSYS; ++ } ++} ++ ++/* Fork a new task - this creates a new program thread. ++ * This is called indirectly via a small wrapper ++ */ ++asmlinkage int sys_fork(struct pt_regs *regs) ++{ ++ return do_fork(SIGCHLD, regs->NDS32_sp, regs, 0, NULL, NULL); ++} ++ ++/* Clone a task - this clones the calling program thread. ++ * This is called indirectly via a small wrapper ++ */ ++asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, ++ int __user * parent_tidptr, int tls_val, ++ int __user * child_tidptr, struct pt_regs *regs) ++{ ++ if (!newsp) ++ newsp = regs->NDS32_sp; ++ ++ return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, ++ child_tidptr); ++} ++ ++asmlinkage int sys_vfork(struct pt_regs *regs) ++{ ++ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->NDS32_sp, regs, ++ 0, NULL, NULL); ++} ++ ++/* sys_execve() executes a new program. ++ * This is called indirectly via a small wrapper ++ */ ++asmlinkage int sys_execve(const char __user * filenamei, ++ const char __user * const __user * argv, ++ const char __user * const __user * envp, ++ struct pt_regs *regs) ++{ ++ int error; ++ char *filename; ++ ++ filename = getname(filenamei); ++ error = PTR_ERR(filename); ++ if (IS_ERR(filename)) ++ goto out; ++ ++ error = do_execve(filename, argv, envp, regs); ++ putname(filename); ++ ++out: ++ return error; ++} ++ ++asmlinkage unsigned long sys_getpagesize(void) ++{ ++ return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */ ++} ++ ++int kernel_execve(const char *filename, const char *const argv[], ++ const char *const envp[]) ++{ ++ struct pt_regs regs; ++ int ret; ++ ++ memset(®s, 0, sizeof(struct pt_regs)); ++ ret = do_execve(filename, argv, envp, ®s); ++ ++ if (ret < 0) { ++ goto out; ++ } ++ /* ++ * Save argc to the register structure for userspace. ++ */ ++ regs.NDS32_r0 = ret; ++ ++ /* ++ * We were successful. We won't be returning to our caller, but ++ * instead to user space by manipulating the kernel stack. ++ */ ++ asm("addi $r0, %0, %1\n\t" ++ "move $r1, %2\n\t" ++ "move $r2, %3\n\t" ++ "bal memmove\n\t" /* copy regs to top of stack */ ++ "move $r8, #0\n\t" /* not a syscall */ ++ "move $r9, %0\n\t" /* thread structure */ ++ "move $sp, $r0\n\t" /* reposition stack pointer */ ++"b resume_userspace": ++: "r"(current_thread_info()), ++ "ir"(THREAD_SIZE - 8 - sizeof(regs)), ++ "r"(®s), "ir"(sizeof(regs)) ++#ifdef _GCC444 ++: "$r0", "$r1", "$r2", "$r4", "$r5", "$r8", "$r9", "$p0", "$p1", ++ "memory"); ++#else ++: "$r0", "$r1", "$r2", "$r4", "$r5", "$r8", "$r9", "$r26", "$r27", ++ "memory"); ++#endif ++out: ++ return ret; ++} ++ ++EXPORT_SYMBOL(kernel_execve); ++ ++int sys_cacheflush(unsigned int start, unsigned int end) ++{ ++ struct vm_area_struct *vma; ++ ++ vma = find_vma(current->mm, start); ++ if (!vma) ++ return 0; ++ cpu_cache_wbinval_range_check(vma, start, end); ++ return 0; ++} ++ ++asmlinkage long sys_fadvise64_64_wrapper(int fd, int advice, loff_t offset, ++ loff_t len) ++{ ++ return sys_fadvise64_64(fd, offset, len, advice); ++} ++ ++asmlinkage int nds32_syscall(int number, struct pt_regs *regs) ++{ ++ switch (number) { ++ case 0x7000: /* cacheflush */ ++ return sys_cacheflush(regs->NDS32_r1, regs->NDS32_r2); ++ ++#ifdef CONFIG_OPROFILE ++ case __NR_pfmctl: ++ sys_pfmctl(regs->NDS32_r1, regs->NDS32_r2, regs->NDS32_r3, ++ regs->NDS32_r4); ++ return 0; ++ ++ case __NR_getpfm: ++ return sys_getpfm((struct pcounter __user *)regs->NDS32_r1); ++ ++ case __NR_setpfm: ++ return sys_setpfm(regs->NDS32_r1, regs->NDS32_r2, ++ regs->NDS32_r3, ++ (struct pcounter __user *)regs->NDS32_r4); ++#endif ++ case __NR_wbna: ++ if (regs->NDS32_r1) ++ regs->NDS32_ipsw |= PSW_mskWBNA; ++ else ++ regs->NDS32_ipsw &= ~PSW_mskWBNA; ++ return 0; ++ ++ default: ++ return -ENOSYS; ++ } ++ return -ENOSYS; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/time.c linux-3.4.110/arch/nds32/kernel/time.c +--- linux-3.4.110.orig/arch/nds32/kernel/time.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/time.c 2016-04-07 10:20:50.950081334 +0200 +@@ -0,0 +1,94 @@ ++/* ++ * linux/arch/nds32/kernel/time.c ++ * ++ * Copyright (C) 1991, 1992, 1995 Linus Torvalds ++ * Modifications for ARM (C) 1994-2001 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This file contains the ARM-specific time handling details: ++ * reading the RTC at bootup, etc... ++ * ++ * 1994-07-02 Alan Modra ++ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime ++ * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 ++ * "A Kernel Model for Precision Timekeeping" by Dave Mills ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++/* ++ * Our system timer. ++ */ ++struct sys_timer *system_timer; ++ ++#ifdef CONFIG_SMP ++unsigned long profile_pc(struct pt_regs *regs) ++{ ++ unsigned long fp, pc = instruction_pointer(regs); ++ ++ if (in_lock_functions(pc)) { ++ fp = regs->NDS32_fp; ++ pc = ((unsigned long *)fp)[-1]; ++ } ++ ++ return pc; ++} ++ ++EXPORT_SYMBOL(profile_pc); ++#endif ++#if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) ++static int timer_suspend(void) ++{ ++ if (system_timer->suspend) ++ system_timer->suspend(); ++ ++ return 0; ++} ++ ++static void timer_resume(void) ++{ ++ if (system_timer->resume) ++ system_timer->resume(); ++} ++#else ++#define timer_suspend NULL ++#define timer_resume NULL ++#endif ++#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET ++u32 arch_gettimeoffset(void) ++{ ++ if (system_timer->offset != NULL) ++ return system_timer->offset() * 1000; ++ ++ return 0; ++} ++#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ ++static struct syscore_ops timer_syscore_ops = { ++ .suspend = timer_suspend, ++ .resume = timer_resume, ++}; ++ ++static int __init timer_init_syscore_ops(void) ++{ ++ register_syscore_ops(&timer_syscore_ops); ++ ++ return 0; ++} ++ ++device_initcall(timer_init_syscore_ops); ++ ++void __init time_init(void) ++{ ++ system_timer = machine_desc->timer; ++ system_timer->init(); // link to cpe_timer_init ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/traps.c linux-3.4.110/arch/nds32/kernel/traps.c +--- linux-3.4.110.orig/arch/nds32/kernel/traps.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/traps.c 2016-04-07 10:20:50.950081334 +0200 +@@ -0,0 +1,733 @@ ++/* ++ * linux/arch/nds32/kernel/traps.c ++ * ++ * Copyright (C) 1995-2002 Russell King ++ * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * 'traps.c' handles hardware exceptions after we have saved some state in ++ * 'linux/arch/nds32/lib/traps.S'. Mostly a debugging aid, but will probably ++ * kill the offending process. ++ */ ++/* ============================================================================ ++ * ++ * linux/arch/nds32/kernel/traps.c ++ * ++ * Copyright (C) 2007 Andes Technology Corporation ++ * This file is part of Linux and should be licensed under the GPL. ++ * See the file COPYING for conditions for redistribution. ++ * ++ * Abstract: ++ * ++ * This program is trap handling for NDS32 core, initial refer from ARM. ++ * ++ * Revision History: ++ * ++ * Jul.16.2007 Initial ported by Tom, revised for KGDB by Harry. ++ * ++ * Note: ++ * ++ * ============================================================================ ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include "ptrace.h" ++#include "signal.h" ++ ++extern void show_pte(struct mm_struct *mm, unsigned long addr); ++ ++#ifdef CONFIG_DEBUG_USER ++unsigned int user_debug; ++ ++static int __init user_debug_setup(char *str) ++{ ++ get_option(&str, &user_debug); ++ return 1; ++} ++ ++__setup("user_debug=", user_debug_setup); ++#endif ++ ++/* ++ * Dump out the contents of some memory nicely... ++ */ ++void dump_mem(const char *str, unsigned long bottom, unsigned long top) ++{ ++ unsigned long p = bottom & ~31; ++ mm_segment_t fs; ++ int i; ++ ++ /* ++ * We need to switch to kernel mode so that we can use __get_user ++ * to safely read from kernel space. Note that we now dump the ++ * code first, just in case the backtrace kills us. ++ */ ++ fs = get_fs(); ++ set_fs(KERNEL_DS); ++ ++ printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); ++ ++ for (p = bottom & ~31; p < top;) { ++ printk("%04lx: ", p & 0xffff); ++ ++ for (i = 0; i < 8; i++, p += 4) { ++ unsigned int val; ++ ++ if (p < bottom || p >= top) ++ printk(" "); ++ else { ++ __get_user(val, (unsigned long *)p); ++ printk("%08x ", val); ++ } ++ } ++ printk("\n"); ++ } ++ ++ set_fs(fs); ++} ++ ++EXPORT_SYMBOL(dump_mem); ++ ++/* These intrinsic functions are not supported in V2 toolchains of BSP321. */ ++#ifndef __NDS32_BASELINE_V2__ ++#define DEBUG_TLB_CACHE ++#endif ++#ifdef DEBUG_TLB_CACHE ++/* This is the number of TLB entries. User should change it if necessary. */ ++#define TLB_NUM 128 ++unsigned int tlb_misc_new[TLB_NUM], tlb_vpn_new[TLB_NUM], tlb_data_new[TLB_NUM]; ++/* To get the whole TLB contents once it uses va=0x0 */ ++void dump_tlb(unsigned long va) ++{ ++ unsigned int rd_num, tlb_vpn, tlb_misc, tlb_data, mmu_cfg, tlb_num; ++ ++ /* save tlb system registers */ ++ tlb_vpn = __nds32__mfsr(NDS32_SR_TLB_VPN); ++ tlb_misc = __nds32__mfsr(NDS32_SR_TLB_MISC); ++ tlb_data = __nds32__mfsr(NDS32_SR_TLB_DATA); ++ mmu_cfg = __nds32__mfsr(NDS32_SR_MMU_CFG); ++ tlb_num = ++ (((mmu_cfg & MMU_CFG_mskTBW) >> MMU_CFG_offTBW) + ++ 1) * (1 << (((mmu_cfg & MMU_CFG_mskTBS) >> MMU_CFG_offTBS) + 2)); ++ ++ for (rd_num = 0; rd_num < tlb_num; rd_num++) { ++ /* read tlb entry with index */ ++ __nds32__tlbop_trd(rd_num); ++ __nds32__dsb(); ++ tlb_vpn_new[rd_num] = __nds32__mfsr(NDS32_SR_TLB_VPN); ++ tlb_misc_new[rd_num] = __nds32__mfsr(NDS32_SR_TLB_MISC); ++ tlb_data_new[rd_num] = __nds32__mfsr(NDS32_SR_TLB_DATA); ++ } ++ ++ /* restore tlb system registers */ ++ __nds32__mtsr(tlb_vpn, NDS32_SR_TLB_VPN); ++ __nds32__mtsr(tlb_misc, NDS32_SR_TLB_MISC); ++ __nds32__dsb(); ++ ++ printk("cur VPN:%08x, MISC:%08x, DATA:%08x\n", tlb_vpn, tlb_misc, ++ tlb_data); ++ /* to read out all the data */ ++ for (rd_num = 0; rd_num < tlb_num; rd_num++) { ++ /*unsigned int vpn = tlb_vpn_new[rd_num] & PAGE_MASK; */ ++ if (tlb_data_new[rd_num] & 0x1) ++ if (va == 0x0 ++ || (va != 0x0 ++ && (tlb_vpn_new[rd_num] == (va & PAGE_MASK)))) ++ printk ++ ("idx:0x%08x, VPN:%08x, MISC:%08x, DATA:%08x\n", ++ rd_num, tlb_vpn_new[rd_num], ++ tlb_misc_new[rd_num], ++ tlb_data_new[rd_num]); ++ } ++} ++ ++EXPORT_SYMBOL(dump_tlb); ++ ++struct cache_element { ++ unsigned int pa; ++ unsigned int wd; ++ unsigned int cacheline[8]; ++ unsigned char dirty; ++ unsigned char valid; ++ unsigned char lock; ++}; ++ ++/* This is the number of cache entries. User should change it if necessary. */ ++#define CACHE_SET_NUM 0x100 ++#define CACHE_WAY_NUM 0x4 ++volatile struct cache_element ce[CACHE_SET_NUM][CACHE_WAY_NUM]; ++#define CCTL_mskDIRTY 0x400000 ++#define CCTL_offDIRTY 22 ++#define CCTL_mskVALID 0x2 ++#define CCTL_offVALID 1 ++#define CCTL_mskLOCK 0x1 ++#define CCTL_offLOCK 0 ++#define CCTL_mskTAG 0x3ffffc ++#define CCTL_offTAG 0x2 ++extern unsigned long va2idx(unsigned long va, unsigned int cache_type, ++ unsigned long *way_offset); ++#include ++extern struct cache_info L1_cache_info[2]; ++void dump_cache(unsigned int cache_type) ++{ ++ volatile unsigned long idx, way, ra, tag, i; ++ unsigned long sets, ways, line_size, set_bits, way_bits, line_bits, ++ way_offset; ++ ++ ways = L1_cache_info[DCACHE].ways; ++ sets = L1_cache_info[DCACHE].sets; ++ set_bits = L1_cache_info[cache_type].set_bits; ++ way_bits = L1_cache_info[cache_type].way_bits; ++ line_bits = L1_cache_info[cache_type].line_bits; ++ line_size = L1_cache_info[cache_type].line_size; ++ way_offset = set_bits + line_bits; ++ ++ if (cache_type != ICACHE && cache_type != DCACHE) { ++ printk("%s not supported cache_type:%x\n", __func__, ++ cache_type); ++ return; ++ } ++ ++ /* NDS32_CCTL_L1I_IX_RTAG Read tag L1I cache */ ++ /* NDS32_CCTL_L1I_IX_RWD Read word data L1I cache */ ++ ++ for (idx = 0; idx < sets; idx++) { ++ for (way = 0; way < ways; way++) { ++ ra = (way << way_offset) | (idx << line_bits); ++ if (cache_type == ICACHE) ++ tag = ++ __nds32__cctlidx_read ++ (NDS32_CCTL_L1I_IX_RTAG, ra); ++ else ++ tag = ++ __nds32__cctlidx_read ++ (NDS32_CCTL_L1D_IX_RTAG, ra); ++ ce[idx][way].dirty = ++ (unsigned char)(tag & CCTL_mskDIRTY) >> ++ CCTL_offDIRTY; ++ ce[idx][way].valid = ++ (unsigned char)(tag & CCTL_mskVALID) >> ++ CCTL_offVALID; ++ ce[idx][way].lock = ++ (unsigned char)(tag & CCTL_mskLOCK) >> CCTL_offLOCK; ++ ce[idx][way].pa = ++ (tag & CCTL_mskTAG) >> CCTL_offTAG << PAGE_SHIFT; ++ for (i = 0; i < line_size / 4; i++) { ++ if (cache_type == ICACHE) ++ ce[idx][way].cacheline[i] = ++ __nds32__cctlidx_read ++ (NDS32_CCTL_L1I_IX_RWD, ++ (ra | i << 2)); ++ else ++ ce[idx][way].cacheline[i] = ++ __nds32__cctlidx_read ++ (NDS32_CCTL_L1D_IX_RWD, ++ (ra | i << 2)); ++ } ++ } ++ } ++ printk("dump %s\n", cache_type ? "DCACHE" : "ICACHE"); ++ printk("%8s %4s %4s %1s %1s %1s %8s %8s %8s %8s %8s %8s %8s %8s\n", ++ "ADDRESS", "SET", "WAY", "V", "D", "L", "00", "04", "08", "0C", ++ "10", "14", "18", "1C"); ++ for (idx = 0; idx < sets; idx++) { ++ for (way = 0; way < ways; way++) { ++ printk("%08lx %04lx %04lx %1u %1u %1u ", ++ ce[idx][way].pa + ++ ((idx * line_size) % PAGE_SIZE), idx, way, ++ ce[idx][way].valid, ce[idx][way].dirty, ++ ce[idx][way].lock); ++ for (i = 0; i < line_size / 4; i++) { ++ printk("%08x ", ce[idx][way].cacheline[i]); ++ } ++ printk("\n"); ++ } ++ } ++} ++ ++EXPORT_SYMBOL(dump_cache); ++ ++void dump_cache_va(unsigned int cache_type, unsigned int va) ++{ ++ volatile struct cache_element cache_entry[4]; ++ volatile unsigned long idx, way, tag, ra, i; ++ unsigned long ways, line_size, set_bits, line_bits, way_offset; ++ ++ ways = L1_cache_info[cache_type].ways; ++ line_size = CACHE_LINE_SIZE(cache_type); ++ set_bits = L1_cache_info[cache_type].set_bits; ++ line_bits = L1_cache_info[cache_type].line_bits; ++ ++ if (cache_type != ICACHE && cache_type != DCACHE) { ++ printk("%s not supported cache_type:%x\n", __func__, ++ cache_type); ++ return; ++ } ++ idx = va2idx(va, cache_type, &way_offset); ++ //idx = (va & (((1 << set_bits) - 1) << line_bits)) >> line_bits; ++ for (way = 0; way < ways; way++) { ++ ra = (way << way_offset) | idx; ++ if (cache_type == ICACHE) ++ tag = __nds32__cctlidx_read(NDS32_CCTL_L1I_IX_RTAG, ra); ++ else ++ tag = __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RTAG, ra); ++ cache_entry[way].dirty = ++ (unsigned char)(tag & CCTL_mskDIRTY) >> CCTL_offDIRTY; ++ cache_entry[way].valid = ++ (unsigned char)(tag & CCTL_mskVALID) >> CCTL_offVALID; ++ cache_entry[way].lock = ++ (unsigned char)(tag & CCTL_mskLOCK) >> CCTL_offLOCK; ++ cache_entry[way].pa = ++ (tag & CCTL_mskTAG) >> CCTL_offTAG << PAGE_SHIFT; ++ for (i = 0; i < line_size / 4; i++) { ++ if (cache_type == ICACHE) ++ cache_entry[way].cacheline[i] = ++ __nds32__cctlidx_read(NDS32_CCTL_L1I_IX_RWD, ++ (ra | i << 2)); ++ else ++ cache_entry[way].cacheline[i] = ++ __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD, ++ (ra | i << 2)); ++ } ++ } ++ printk("dump %s va:%x\n", cache_type ? "DCACHE" : "ICACHE", va); ++ ++ printk("%8s %4s %4s %1s %1s %1s %8s %8s %8s %8s %8s %8s %8s %8s\n", ++ "ADDRESS", "SET", "WAY", "V", "D", "L", "00", "04", "08", "0C", ++ "10", "14", "18", "1C"); ++ for (way = 0; way < ways; way++) { ++ printk("%08lx %04lx %04lx %1u %1u %1u ", ++ cache_entry[way].pa + ++ (((idx >> line_bits) * line_size) % PAGE_SIZE), ++ (idx >> line_bits), way, cache_entry[way].valid, ++ cache_entry[way].dirty, cache_entry[way].lock); ++ for (i = 0; i < 8; i++) { ++ printk("%08x ", cache_entry[way].cacheline[i]); ++ } ++ printk("\n"); ++ } ++} ++ ++EXPORT_SYMBOL(dump_cache_va); ++#endif ++ ++static void dump_instr(struct pt_regs *regs) ++{ ++ unsigned long addr = instruction_pointer(regs); ++ const int width = 8; ++ mm_segment_t fs; ++ int i; ++ ++ return; ++ /* ++ * We need to switch to kernel mode so that we can use __get_user ++ * to safely read from kernel space. Note that we now dump the ++ * code first, just in case the backtrace kills us. ++ */ ++ fs = get_fs(); ++ set_fs(KERNEL_DS); ++ ++ printk("Code: "); ++ for (i = -4; i < 1; i++) { ++ unsigned int val, bad; ++ ++ bad = __get_user(val, &((u32 *) addr)[i]); ++ ++ if (!bad) ++ printk(i == 0 ? "(%0*x) " : "%0*x ", width, val); ++ else { ++ printk("bad PC value."); ++ break; ++ } ++ } ++ printk("\n"); ++ ++ set_fs(fs); ++} ++ ++#define LOOP_TIMES (100) ++void dump_stack(void) ++{ ++ int cnt = LOOP_TIMES; ++#ifndef CONFIG_FRAME_POINTER ++ unsigned long *stack; ++ unsigned long addr; ++ ++ __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(stack)); ++ printk("Call Trace:\n"); ++ while (!kstack_end(stack)) { ++ addr = *stack++; ++ if (__kernel_text_address(addr)) { ++ printk("[<%08lx>] ", addr); ++ print_symbol("%s\n", addr); ++ } ++ cnt--; ++ if (cnt < 0) ++ break; ++ } ++ printk("\n"); ++#else ++ unsigned long *fpn; ++ __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn)); ++ printk("Call Trace:\n"); ++ while (!kstack_end((void *)fpn) && !((unsigned long)fpn & 0x3) ++ && ((unsigned long)fpn >= TASK_SIZE)) { ++ unsigned long lpp, fpp; ++#if !defined(NDS32_ABI_2) ++ lpp = fpn[0]; ++ fpp = fpn[1]; ++#else ++ lpp = fpn[0]; ++ fpp = fpn[-1]; ++#endif ++ if (__kernel_text_address(lpp)) { ++ printk("[<%08lx>] ", lpp); ++ print_symbol("%s\n", lpp); ++ fpn = (unsigned long *)fpp; ++ } ++ cnt--; ++ if (cnt < 0) ++ break; ++ } ++ printk("\n"); ++#endif ++} ++ ++EXPORT_SYMBOL(dump_stack); ++ ++void show_stack(struct task_struct *tsk, unsigned long *sp) ++{ ++ unsigned long fp; ++ ++ if (!tsk) ++ tsk = current; ++ ++ if (tsk != current) ++ fp = thread_saved_fp(tsk); ++ else ++ asm("move %0, $fp":"=r"(fp)); ++ ++ dump_stack(); ++ barrier(); ++} ++ ++DEFINE_SPINLOCK(die_lock); ++ ++/* ++ * This function is protected against re-entrancy. ++ */ ++void die(const char *str, struct pt_regs *regs, int err) ++{ ++ struct task_struct *tsk = current; ++ static int die_counter; ++ ++ console_verbose(); ++ spin_lock_irq(&die_lock); ++ bust_spinlocks(1); ++ ++ printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); ++ print_modules(); ++ printk("CPU: %i\n", smp_processor_id()); ++ show_regs(regs); ++ printk("Process %s (pid: %d, stack limit = 0x%p)\n", ++ tsk->comm, tsk->pid, task_thread_info(tsk) + 1); ++ ++ if (!user_mode(regs) || in_interrupt()) { ++ dump_mem("Stack: ", regs->NDS32_sp, ++ 8192 + (unsigned long)task_thread_info(tsk)); ++ dump_instr(regs); ++ dump_stack(); ++ } ++ ++ bust_spinlocks(0); ++ spin_unlock_irq(&die_lock); ++ do_exit(SIGSEGV); ++} ++ ++void die_if_kernel(const char *str, struct pt_regs *regs, int err) ++{ ++ if (user_mode(regs)) ++ return; ++ ++ die(str, regs, err); ++} ++ ++int bad_syscall(int n, struct pt_regs *regs) ++{ ++ struct thread_info *thread = current_thread_info(); ++ siginfo_t info; ++ ++ if (current->personality != PER_LINUX && thread->exec_domain->handler) { ++ thread->exec_domain->handler(n, regs); ++ return regs->NDS32_r0; ++ } ++#ifdef CONFIG_DEBUG_USER ++ if (user_debug & UDBG_SYSCALL) { ++ printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", ++ current->pid, current->comm, n); ++ dump_instr(regs); ++ } ++#endif ++ ++ info.si_signo = SIGILL; ++ info.si_errno = 0; ++ info.si_code = ILL_ILLTRP; ++ info.si_addr = (void __user *)instruction_pointer(regs) - 4; ++ ++ force_sig_info(SIGILL, &info, current); ++ die_if_kernel("Oops - bad syscall", regs, n); ++ return regs->NDS32_r0; ++} ++ ++void __pte_error(const char *file, int line, unsigned long val) ++{ ++ printk("%s:%d: bad pte %08lx.\n", file, line, val); ++} ++ ++void __pmd_error(const char *file, int line, unsigned long val) ++{ ++ printk("%s:%d: bad pmd %08lx.\n", file, line, val); ++} ++ ++void __pgd_error(const char *file, int line, unsigned long val) ++{ ++ printk("%s:%d: bad pgd %08lx.\n", file, line, val); ++} ++ ++extern char exception_vector[73][64]; ++void __init trap_init(void) ++{ ++ return; ++} ++ ++void __init early_trap_init(void) ++{ ++ unsigned long ivb = 0; ++ unsigned long base = 0xc0000000; ++ ++ memcpy((unsigned long *)base, (unsigned long *)exception_vector, ++ sizeof(exception_vector)); ++ ivb = __nds32__mfsr(NDS32_SR_IVB); ++#ifdef CONFIG_EVIC ++ __nds32__mtsr((ivb & ~IVB_mskESZ) | (2 << IVB_offESZ) | ++ (1 << IVB_offEVIC) | IVB_BASE, NDS32_SR_IVB); ++#else ++ /* Check platform support. */ ++# if defined (CONFIG_IVIC_INTC) ++ if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) >= 2) ++ panic ++ ("IVIC mode is not allowed on the platform with interrupt controller\n"); ++# elif defined(CONFIG_IVIC) ++ if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2) ++ panic ++ ("IVIC mode is not allowed on the platform without interrupt controller\n"); ++# endif ++ __nds32__mtsr((ivb & ~IVB_mskESZ) | (2 << IVB_offESZ) | IVB_BASE, ++ NDS32_SR_IVB); ++#endif ++ __nds32__mtsr(0x10003, NDS32_SR_INT_MASK); ++ /* ++ * Copy signal return handlers into the vector page, and ++ * set sigreturn to be a pointer to these. ++ */ ++ memcpy((void *)KERN_SIGRETURN_CODE, retcodes, sizeof(retcodes)); ++ memcpy((void *)KERN_RESTART_CODE, syscall_restart_code, ++ sizeof(syscall_restart_code)); ++ ++ /* ++ * 0x2000 is 8K-aligned of 0x1240 = 73 vectors * 64byte ++ * 0x1000 is page saving sigreturn & restart code ++ */ ++ flush_icache_range(base, base + 0x3000); ++} ++ ++#if 0 ++COLE:use send_sigtrap instread ++ static __inline__ void do_trap(int trapnr, int signr, const char *str, ++ struct pt_regs *regs, ++ unsigned long error_code, siginfo_t * info) ++{ ++ if (user_mode(regs)) { ++ /* trap_signal */ ++ struct task_struct *tsk = current; ++ tsk->thread.error_code = error_code; ++ tsk->thread.trap_no = trapnr; ++ if (info) ++ force_sig_info(signr, info, tsk); ++ else ++ force_sig(signr, tsk); ++ return; ++ } else { ++ /* kernel_trap */ ++ if (!fixup_exception(regs)) ++ die(str, regs, error_code); ++ return; ++ } ++} ++#endif ++ ++/* ++ * I modified original debug_trap to apply KGDB stuff, ++ * Harry@Jul.18.2007 ++ */ ++void do_debug_trap(unsigned long entry, unsigned long addr, ++ unsigned long type, struct pt_regs *regs) ++{ ++ if (notify_die(DIE_DEBUG, "debug", regs, addr, type, SIGTRAP) ++ == NOTIFY_STOP) ++ return; ++ ++#if !defined(CONFIG_HSS) ++ /* clear the swbk; otherwise the user will see it */ ++ if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) ++ ptrace_cancel_swbk(current); ++#endif ++ ++ /* do_trap(1, SIGTRAP, 0, regs, 0, NULL); */ ++ if (user_mode(regs)) { ++ /* trap_signal */ ++ send_sigtrap(current, regs, 0, TRAP_BRKPT); ++ } else { ++ /* kernel_trap */ ++ if (!fixup_exception(regs)) ++ die("unexpected kernel_trap", regs, 0); ++ } ++} ++ ++void unhandled_interruption(struct pt_regs *regs) ++{ ++ siginfo_t si; ++ printk("unhandled_interruption\n"); ++ show_regs(regs); ++ if (!user_mode(regs)) ++ do_exit(SIGKILL); ++ si.si_signo = SIGKILL; ++ si.si_errno = 0; ++ force_sig_info(SIGKILL, &si, current); ++} ++ ++void unhandled_exceptions(unsigned long entry, unsigned long addr, ++ unsigned long type, struct pt_regs *regs) ++{ ++ siginfo_t si; ++ printk("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry, ++ addr, type); ++ show_regs(regs); ++ if (!user_mode(regs)) ++ do_exit(SIGKILL); ++ si.si_signo = SIGKILL; ++ si.si_errno = 0; ++ si.si_addr = (void *)addr; ++ force_sig_info(SIGKILL, &si, current); ++} ++ ++extern int do_page_fault(unsigned long entry, unsigned long addr, ++ unsigned int error_code, struct pt_regs *regs); ++ ++/* ++ * 2:DEF dispatch for TLB MISC exception handler ++*/ ++ ++void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr, ++ unsigned long type, struct pt_regs *regs) ++{ ++ type = type & (ITYPE_mskINST | ITYPE_mskETYPE); ++ if ((type & 0xf) < 5) ++ do_page_fault(entry, addr, type, regs); ++ else ++ unhandled_exceptions(entry, addr, type, regs); ++} ++ ++int (*do_unaligned_access) (unsigned long entry, unsigned long addr, ++ unsigned long type, struct pt_regs * regs) = NULL; ++ ++EXPORT_SYMBOL(do_unaligned_access); ++ ++void do_revinsn(struct pt_regs *regs) ++{ ++ siginfo_t si; ++ printk("Reserved Instruction\n"); ++ show_regs(regs); ++ if (!user_mode(regs)) ++ do_exit(SIGILL); ++ si.si_signo = SIGILL; ++ si.si_errno = 0; ++ force_sig_info(SIGILL, &si, current); ++} ++ ++/* ++ * 7:DEF dispatch for General exception handler ++ */ ++void do_dispatch_general(unsigned long entry, unsigned long addr, ++ unsigned long itype, struct pt_regs *regs, ++ unsigned long oipc) ++{ ++ unsigned int swid = itype >> ITYPE_offSWID; ++ unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE); ++ if (type == 0) { /* Alignment check */ ++ ++ if (do_unaligned_access) { ++ ++ int ret = do_unaligned_access(entry, addr, type, regs); ++ ++ if (ret == 0) ++ return; ++ ++ if (ret == -EFAULT) ++ printk ++ ("Unhandled unaligned access exception\n"); ++ } ++ do_page_fault(entry, addr, type, regs); ++ } else if (type == 1) /* Reserved instruction */ ++ do_revinsn(regs); ++ else if (type == 6) { /* Coprocessor */ ++ if (((GET_ITYPE() & ITYPE_mskSTYPE) >> ITYPE_offSTYPE) == 3) { ++#ifdef CONFIG_AUDIO ++ preempt_disable(); ++ do_audio_context_switch(type, regs); ++ preempt_enable(); ++#else ++ unhandled_exceptions(entry, addr, type, regs); ++#endif ++ } else { ++#ifdef CONFIG_FPU ++ do_fpu_exception(type, regs); ++#else ++ unhandled_exceptions(entry, addr, type, regs); ++#endif ++ } ++ } else if (type == 2 && swid == 0x1a) { ++ /* trap, used on v3 EDM target debugging workaround */ ++ /* ++ * DIPC(OIPC) is passed as parameter before ++ * interrupt is enabled, so the DIPC will not be corrupted ++ * even though interrupts are coming in ++ */ ++ /* ++ * 1. update ipc ++ * 2. update pt_regs ipc with oipc ++ * 3. update pt_regs ipsw (clear DEX) ++ */ ++ __asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc)); ++ regs->NDS32_ipc = oipc; ++ regs->NDS32_ipsw &= ~0x400; ++ do_debug_trap(entry, addr, itype, regs); ++ } else ++ unhandled_exceptions(entry, addr, type, regs); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/kernel/vmlinux.lds.S linux-3.4.110/arch/nds32/kernel/vmlinux.lds.S +--- linux-3.4.110.orig/arch/nds32/kernel/vmlinux.lds.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/kernel/vmlinux.lds.S 2016-04-07 10:20:50.950081334 +0200 +@@ -0,0 +1,74 @@ ++#include ++#include ++#include ++#include ++ ++#define LOAD_OFFSET (PAGE_OFFSET - PHYS_OFFSET) ++#include ++ ++OUTPUT_ARCH(nds32) ++ENTRY(_stext_lma) ++jiffies = jiffies_64; ++ ++SECTIONS ++{ ++ _stext_lma = TEXTADDR - LOAD_OFFSET; ++ . = TEXTADDR; ++ __init_begin = .; ++ HEAD_TEXT_SECTION ++ INIT_TEXT_SECTION(PAGE_SIZE) ++ /* These sections are arch specific. */ ++ .arch_info : AT(ADDR(.arch_info) - LOAD_OFFSET) { ++ . = ALIGN(4); ++ VMLINUX_SYMBOL(__proc_info_begin) = .; ++ *(.proc.info.init) ++ VMLINUX_SYMBOL(__proc_info_end) = .; ++ __arch_info_begin = .; ++ *(.arch.info) ++ __arch_info_end = .; ++ __tagtable_begin = .; ++ *(.taglist) ++ __tagtable_end = .; ++ . = ALIGN(16); ++ __pv_table_begin = .; ++ *(.pv_table) ++ __pv_table_end = .; ++ __early_begin = .; ++ *(__early_param) ++ __early_end = .; ++ } ++ ++ INIT_DATA_SECTION(16) ++ PERCPU_SECTION(L1_CACHE_BYTES) ++ __init_end = .; ++ ++ . = ALIGN(PAGE_SIZE); ++ _stext = .; ++ /* Real text segment */ ++ .text : AT(ADDR(.text) - LOAD_OFFSET) { ++ _text = .; /* Text and read-only data */ ++ TEXT_TEXT ++ SCHED_TEXT ++ LOCK_TEXT ++ KPROBES_TEXT ++ IRQENTRY_TEXT ++ *(.fixup) ++ } ++ ++ _etext = .; /* End of text and rodata section */ ++ ++ _sdata = .; ++ RO_DATA_SECTION(PAGE_SIZE) ++ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) ++ _edata = .; ++ ++ EXCEPTION_TABLE(16) ++ NOTES ++ BSS_SECTION(4, 4, 4) ++ _end = .; ++ ++ STABS_DEBUG ++ DWARF_DEBUG ++ ++ DISCARDS ++} +diff -Nur linux-3.4.110.orig/arch/nds32/lib/copy_page.S linux-3.4.110/arch/nds32/lib/copy_page.S +--- linux-3.4.110.orig/arch/nds32/lib/copy_page.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/copy_page.S 2016-04-07 10:20:50.950081334 +0200 +@@ -0,0 +1,36 @@ ++/* ++ * linux/arch/nds32/lib/copypage.S ++ * Copyright (C) 2009 Andes Technology Corporation ++ */ ++#include ++#include ++ ++ .text ++ENTRY(copy_page) ++ pushm $r2, $r10 ++ movi $r2, PAGE_SIZE >> 5 ++.Lcopy_loop: ++ lmw.bim $r3, [$r1], $r10 ++ smw.bim $r3, [$r0], $r10 ++ subi45 $r2, #1 ++ bnez38 $r2, .Lcopy_loop ++ popm $r2, $r10 ++ ret ++ ++ENTRY(clear_page) ++ pushm $r1, $r9 ++ movi $r1, PAGE_SIZE >> 5 ++ movi55 $r2, #0 ++ movi55 $r3, #0 ++ movi55 $r4, #0 ++ movi55 $r5, #0 ++ movi55 $r6, #0 ++ movi55 $r7, #0 ++ movi55 $r8, #0 ++ movi55 $r9, #0 ++.Lclear_loop: ++ smw.bim $r2, [$r0], $r9 ++ subi45 $r1, #1 ++ bnez38 $r1, .Lclear_loop ++ popm $r1, $r9 ++ ret +diff -Nur linux-3.4.110.orig/arch/nds32/lib/csum_partial.c linux-3.4.110/arch/nds32/lib/csum_partial.c +--- linux-3.4.110.orig/arch/nds32/lib/csum_partial.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/csum_partial.c 2016-04-07 10:20:50.950081334 +0200 +@@ -0,0 +1,135 @@ ++/* ++ * INET An implementation of the TCP/IP protocol suite for the LINUX ++ * operating system. INET is implemented using the BSD Socket ++ * interface as the means of communication with the user level. ++ * ++ * MIPS specific IP/TCP/UDP checksumming routines ++ * ++ * Authors: Ralf Baechle, ++ * Lots of code moved from tcp.c and ip.c; see those files ++ * for more names. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version ++ * 2 of the License, or (at your option) any later version. ++ */ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#define addc(_t,_r) \ ++ __asm__ __volatile__ ( \ ++ "add\t%0, %0, %1\n\t" \ ++ "slt\t$p1, %0, %1\n\t" \ ++ "add\r%0, %0, $p1\n\t" \ ++ : "=r"(_t) \ ++ : "r"(_r), "0"(_t)); ++ ++static inline unsigned short from32to16(unsigned int x) ++{ ++ /* 32 bits --> 16 bits + carry */ ++ x = (x & 0xffff) + (x >> 16); ++ /* 16 bits + carry --> 16 bits including carry */ ++ x = (x & 0xffff) + (x >> 16); ++ return (unsigned short)x; ++} ++ ++static inline unsigned int do_csum(const unsigned char *buff, int len) ++{ ++ int odd; ++ register unsigned int result = 0; ++ register unsigned int count; ++ if (len <= 0) ++ goto out; ++ odd = 1 & (unsigned long)buff; ++ if (odd) { ++ result = be16_to_cpu(*buff); ++ len--; ++ buff++; ++ } ++ count = len >> 1; /* nr of 16-bit words.. */ ++ if (count) { ++ if (2 & (unsigned long)buff) { ++ result += *(unsigned short *)buff; ++ count--; ++ len -= 2; ++ buff += 2; ++ } ++ count >>= 1; /* nr of 32-bit words.. */ ++ if (count) { ++ while (count >= 8) { ++ __asm__ ++ __volatile__("lmw.bi $r17, [%1], $r24\n\t" ++ "add\t%0, %0, $r17\n\t" ++ "slt\t$p1, %0, $r17\n\t" ++ "add\r%0, %0, $p1\n\t" ++ "add\t%0, %0, $r18\n\t" ++ "slt\t$p1, %0, $r18\n\t" ++ "add\r%0, %0, $p1\n\t" ++ "add\t%0, %0, $r19\n\t" ++ "slt\t$p1, %0, $r19\n\t" ++ "add\r%0, %0, $p1\n\t" ++ "add\t%0, %0, $r20\n\t" ++ "slt\t$p1, %0, $r20\n\t" ++ "add\r%0, %0, $p1\n\t" ++ "add\t%0, %0, $r21\n\t" ++ "slt\t$p1, %0, $r21\n\t" ++ "add\r%0, %0, $p1\n\t" ++ "add\t%0, %0, $r22\n\t" ++ "slt\t$p1, %0, $r22\n\t" ++ "add\r%0, %0, $p1\n\t" ++ "add\t%0, %0, $r23\n\t" ++ "slt\t$p1, %0, $r23\n\t" ++ "add\r%0, %0, $p1\n\t" ++ "add\t%0, %0, $r24\n\t" ++ "slt\t$p1, %0, $r24\n\t" ++ "add\r%0, %0, $p1\n\t":"=r" ++ (result) ++ :"r"(buff), "0"(result) ++ :"$r17", "$r18", "$r19", ++ "$r20", "$r21", "$r22", "$r23", ++ "$r24"); ++ count -= 8; ++ buff += 32; ++ } ++ while (count) { ++ unsigned int w = *(unsigned int *)buff; ++ count--; ++ buff += 4; ++ addc(result, w); ++ } ++ result = (result & 0xffff) + (result >> 16); ++ } ++ if (len & 2) { ++ result += *(unsigned short *)buff; ++ buff += 2; ++ } ++ } ++ if (len & 1) ++ result += le16_to_cpu(*buff); ++ result = from32to16(result); ++ if (odd) ++ result = swab16(result); ++out: ++ return result; ++} ++ ++/* ++ * computes a partial checksum, e.g. for TCP/UDP fragments ++ */ ++/* ++ * why bother folding? ++ */ ++unsigned int csum_partial(const void *buff, int len, unsigned int sum) ++{ ++ unsigned int result = 0; ++// printk("csum_partial %x %x %x\n", buff, len, sum); ++ result = do_csum(buff, len); ++ addc(result, sum); ++ return (unsigned short)from32to16(result); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/lib/csum_partial_copy.c linux-3.4.110/arch/nds32/lib/csum_partial_copy.c +--- linux-3.4.110.orig/arch/nds32/lib/csum_partial_copy.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/csum_partial_copy.c 2016-04-07 10:20:50.950081334 +0200 +@@ -0,0 +1,58 @@ ++/* ++ * INET An implementation of the TCP/IP protocol suite for the LINUX ++ * operating system. INET is implemented using the BSD Socket ++ * interface as the means of communication with the user level. ++ * ++ * MIPS specific IP/TCP/UDP checksumming routines ++ * ++ * Authors: Ralf Baechle, ++ * Lots of code moved from tcp.c and ip.c; see those files ++ * for more names. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version ++ * 2 of the License, or (at your option) any later version. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * copy while checksumming, otherwise like csum_partial ++ */ ++unsigned int csum_partial_copy_nocheck(const unsigned char *src, ++ unsigned char *dst, int len, ++ unsigned int sum) ++{ ++ /* ++ * It's 2:30 am and I don't feel like doing it real ... ++ * This is lots slower than the real thing (tm) ++ */ ++ sum = csum_partial(src, len, sum); ++ memcpy(dst, src, len); ++ ++ return sum; ++} ++ ++/* ++ * Copy from userspace and compute checksum. If we catch an exception ++ * then zero the rest of the buffer. ++ */ ++unsigned int csum_partial_copy_from_user(const unsigned char *src, ++ unsigned char *dst, int len, ++ unsigned int sum, int *err_ptr) ++{ ++ int missing; ++ ++ missing = copy_from_user(dst, src, len); ++ if (missing) { ++ memset(dst + len - missing, 0, missing); ++ *err_ptr = -EFAULT; ++ } ++ ++ return csum_partial(dst, len, sum); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/lib/divmod.c linux-3.4.110/arch/nds32/lib/divmod.c +--- linux-3.4.110.orig/arch/nds32/lib/divmod.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/divmod.c 2016-04-07 10:20:50.950081334 +0200 +@@ -0,0 +1,46 @@ ++extern unsigned long udivmodsi4(unsigned long num, unsigned long den, ++ int modwanted); ++ ++long __divsi3(long a, long b) ++{ ++ int neg = 0; ++ long res; ++ ++ if (a < 0) { ++ a = -a; ++ neg = !neg; ++ } ++ ++ if (b < 0) { ++ b = -b; ++ neg = !neg; ++ } ++ ++ res = udivmodsi4(a, b, 0); ++ ++ if (neg) ++ res = -res; ++ ++ return res; ++} ++ ++long __modsi3(long a, long b) ++{ ++ int neg = 0; ++ long res; ++ ++ if (a < 0) { ++ a = -a; ++ neg = 1; ++ } ++ ++ if (b < 0) ++ b = -b; ++ ++ res = udivmodsi4(a, b, 1); ++ ++ if (neg) ++ res = -res; ++ ++ return res; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/lib/findbit.S linux-3.4.110/arch/nds32/lib/findbit.S +--- linux-3.4.110.orig/arch/nds32/lib/findbit.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/findbit.S 2016-04-07 10:20:50.950081334 +0200 +@@ -0,0 +1,108 @@ ++/* ++ * linux/arch/nds32/lib/findbit.S ++ * ++ * Copyright (C) 1995-2000 Russell King ++ * Copyright (C) 2006 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++ ++ .text ++ ++/* ++ * Purpose : Find a 'zero' bit ++ * Prototype: int find_first_zero_bit(void *addr, int maxbit); ++ */ ++ENTRY(_find_first_zero_bit) ++ move $r2, #0 ++ move $p0, #0 ! Reset bit count ++next_word: ++ move $r4, #4 ++ lmw.bim $p1, [$r0], $p1 ++ li $r3, #0xffffffff ! Inversion mask ++ xor $p1, $p1, $r3 ! Inverted the word ++ addi $p0, $p0, #32 ++ beqz $p1, next_word ++ addi $p0, $p0, #-32 ++next_byte: ++ beqz $r4, next_word ++ andi $r3, $p1, #0xff ! Get the next byte ++ bnez $r3, found ! Found zero bit in this byte ++ addi $p0, $p0, #8 ! Update bit count ++ addi $r4, $r4, #-1 ++ srli $p1, $p1, #8 ++ bgt $r1, $p0, next_byte ++ addi $r0, $r1, #1 ! Return not found ++ ret ++ ++/* ++ * Purpose : Find next 'zero' bit ++ * Prototype: int find_next_zero_bit(void *addr, int maxbit, int offset) ++ */ ++ENTRY(_find_next_zero_bit) ++ beqz $r2, _find_first_zero_bit ! If offset=0, goto find_first_zero_bit ++ srli $p0, $r2, #5 ! Get offset byte count ++ slli $p0, $p0, #2 ++ add $r0, $r0, $p0 ! $r0 is the first word to load ++ lmw.bim $p1, [$r0], $p1 ++ li $p0, #0xffffffff ! Inversion mask ++ xor $p1, $p1, $p0 ! Inverted the word ++ andi $r4, $r2, #31 ! Left bits in offset ++ srl $p1, $p1, $r4 ! Shift out the left bits ++ xor $p0, $p0, $p0 ++ subri $r4, $r4, #31 ++loop: ++ andi $r3, $p1, #0xff ! The first byte to check ++ bnez $r3, found ! Found zero bit in this byte ++ addi $p0, $p0, #8 ++ addi $r4, $r4, #-8 ++ srli $p1, $p1, #8 ! Move on to the next byte ++ bgtz $r4, loop ++ b next_word ++ ++/* ++ * One or more bits in the LSB of $p1 are assumed to be set. ++ */ ++ ++found: ++ move $p1, $r3 ++ xor $r4, $r4, $r4 ++ andi $r5, $p1, #0x0f ! Get bits 0-3 ++ move $r3, #4 ! For 0 case (no set bit found) ++ cmovn $r3, $r4, $r5 ! Not 0 case (There's set bit in these 4 bits) ++ add $p0, $p0, $r3 ! Update bit count ++ slli $r3, $p1, #4 ! Not 0 case (Find set bit in these 4 bits) ++ cmovn $p1, $r3, $r5 ! For 0 case (Find set bit in the rest 4 bits) ++ andi $r5, $p1, #0x30 ! Get 4-5 ++ move $r3, #2 ! For 0 case (no set bit found) ++ cmovn $r3, $r4, $r5 ! Not 0 case (There's set bit in these 2 bits) ++ add $p0, $p0, $r3 ! Update bit count ++ slli $r3, $p1, #2 ! Not 0 case (Find set bit in these 2 bits) ++ cmovn $p1, $r3, $r5 ! For 0 case (Find set bit in the rest 2 bits) ++ andi $r5, $p1, #0x40 ! Get bit 6 ++ move $r3, #1 ! For 0 case (bit 6 is not set) ++ cmovn $r3, $r4, $r5 ! Not 0 case (bit 6 is set bit) ++ add $r5, $p0, $r3 ++ add $r0, $r5, $r2 ++ ret ++ ++ENTRY(_ext2_find_first_zero_bit) ++ pushm $r2, $r4 ++ move $r2, #0 ++ move $p0, #0 ! Reset bit count ++1: ++ lbi.bi $r3, [$r0], #1 ++ xori $r3, $r3, #0xff ++ addi $p0, $p0, #8 ++ beqz $r3, 1b ++ addi $p0, $p0, #-8 ++ ++2: ++ b found ! No return ++ popm $r2, $r4 ++ ret ++ +diff -Nur linux-3.4.110.orig/arch/nds32/lib/getuser.S linux-3.4.110/arch/nds32/lib/getuser.S +--- linux-3.4.110.orig/arch/nds32/lib/getuser.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/getuser.S 2016-04-07 10:20:50.950081334 +0200 +@@ -0,0 +1,79 @@ ++/* ++ * linux/arch/nds32/lib/getuser.S ++ * ++ * Copyright (C) 2001 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Idea from x86 version, (C) Copyright 1998 Linus Torvalds ++ * ++ * These functions have a non-standard call interface to make them more ++ * efficient, especially as they return an error value in addition to ++ * the "real" return value. ++ * ++ * __get_user_X ++ * ++ * Inputs: $r0 contains the address ++ * Outputs: $r0 is the error code ++ * $r2, $r3 contains the zero-extended value ++ * lr corrupted ++ * ++ * No other registers must be altered. (see include/asm-nds32/uaccess.h ++ * for specific ASM register usage). ++ * ++ * Note that ADDR_LIMIT is either 0 or 0xc0000000. ++ * Note also that it is intended that __get_user_bad is not global. ++ */ ++#include ++#include ++#include ++#include ++ ++ ++ENTRY(__get_user_1) ++1: lbi $r2, [$r0] ++ move $r0, #0 ++ ret ++ ++ENTRY(__get_user_2) ++2: lbi.bi $r2, [$r0], #1 ++3: lbi $r3, [$r0] ++#ifndef __NDS32_EB__ ++ slli $p1, $r3, #8 ++ or $r2, $r2, $p1 ++#else ++ slli $p1, $r2, #8 ++ or $r2, $r3, $p1 ++#endif ++ move $r0, #0 ++ ret ++ ++ENTRY(__get_user_4) ++4: lwi $r2, [$r0] ++ move $r0, #0 ++ ret ++ ++ENTRY(__get_user_8) ++5: lwi.bi $r2, [$r0], #4 ++6: lwi $r3, [$r0] ++ move $r0, #0 ++ ret ++ ++__get_user_bad_8: ++ move $r3, #0 ++__get_user_bad: ++ move $r2, #0 ++ move $r0, #-EFAULT ++ ret ++ ++.section __ex_table, "a" ++ .long 1b, __get_user_bad ++ .long 2b, __get_user_bad ++ .long 3b, __get_user_bad ++ .long 4b, __get_user_bad ++ .long 5b, __get_user_bad_8 ++ .long 6b, __get_user_bad_8 ++.previous +diff -Nur linux-3.4.110.orig/arch/nds32/lib/libgcc2.c linux-3.4.110/arch/nds32/lib/libgcc2.c +--- linux-3.4.110.orig/arch/nds32/lib/libgcc2.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/libgcc2.c 2016-04-07 10:20:50.950081334 +0200 +@@ -0,0 +1,351 @@ ++#define BITS_PER_UNIT 8 ++#include "longlong.h" ++typedef unsigned int UQItype __attribute__ ((mode(QI))); ++typedef int SItype __attribute__ ((mode(SI))); ++typedef unsigned int USItype __attribute__ ((mode(SI))); ++typedef int DItype __attribute__ ((mode(DI))); ++typedef unsigned int UDItype __attribute__ ((mode(DI))); ++ ++typedef int word_type __attribute__ ((mode(__word__))); ++ ++#define Wtype SItype ++#define UWtype USItype ++#define DWtype DItype ++#define UDWtype UDItype ++ ++#ifdef __NDS32_EB__ ++struct DWstruct { ++ Wtype high, low; ++}; ++#else ++struct DWstruct { ++ Wtype low, high; ++}; ++#endif ++ ++typedef union { ++ struct DWstruct s; ++ DWtype ll; ++} DWunion; ++DWtype __negdi2(DWtype u) ++{ ++ const DWunion uu = {.ll = u }; ++ const DWunion w = { {.low = -uu.s.low, ++ .high = -uu.s.high - ((UWtype) - uu.s.low > 0)} ++ }; ++ ++ return w.ll; ++} ++ ++DWtype __lshrdi3(DWtype u, word_type b) ++{ ++ const DWunion uu = {.ll = u }; ++ const word_type bm = (sizeof(Wtype) * BITS_PER_UNIT) - b; ++ DWunion w; ++ ++ if (b == 0) ++ return u; ++ ++ if (bm <= 0) { ++ w.s.high = 0; ++ w.s.low = (UWtype) uu.s.high >> -bm; ++ } else { ++ const UWtype carries = (UWtype) uu.s.high << bm; ++ ++ w.s.high = (UWtype) uu.s.high >> b; ++ w.s.low = ((UWtype) uu.s.low >> b) | carries; ++ } ++ ++ return w.ll; ++} ++ ++DWtype __ashldi3(DWtype u, word_type b) ++{ ++ const DWunion uu = {.ll = u }; ++ const word_type bm = (sizeof(Wtype) * BITS_PER_UNIT) - b; ++ DWunion w; ++ ++ if (b == 0) ++ return u; ++ ++ if (bm <= 0) { ++ w.s.low = 0; ++ w.s.high = (UWtype) uu.s.low << -bm; ++ } else { ++ const UWtype carries = (UWtype) uu.s.low >> bm; ++ ++ w.s.low = (UWtype) uu.s.low << b; ++ w.s.high = ((UWtype) uu.s.high << b) | carries; ++ } ++ ++ return w.ll; ++} ++ ++DWtype __ashrdi3(DWtype u, word_type b) ++{ ++ const DWunion uu = {.ll = u }; ++ const word_type bm = (sizeof(Wtype) * BITS_PER_UNIT) - b; ++ DWunion w; ++ ++ if (b == 0) ++ return u; ++ ++ if (bm <= 0) { ++ /* w.s.high = 1..1 or 0..0 */ ++ w.s.high = uu.s.high >> (sizeof(Wtype) * BITS_PER_UNIT - 1); ++ w.s.low = uu.s.high >> -bm; ++ } else { ++ const UWtype carries = (UWtype) uu.s.high << bm; ++ ++ w.s.high = uu.s.high >> b; ++ w.s.low = ((UWtype) uu.s.low >> b) | carries; ++ } ++ ++ return w.ll; ++} ++ ++DWtype __muldi3(DWtype u, DWtype v) ++{ ++ const DWunion uu = {.ll = u }; ++ const DWunion vv = {.ll = v }; ++ DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low) }; ++ ++ w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high ++ + (UWtype) uu.s.high * (UWtype) vv.s.low); ++ ++ return w.ll; ++} ++ ++const UQItype __clz_tab[] = { ++ 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, ++ 5, 5, 5, 5, 5, 5, 5, 5, ++ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, ++ 6, 6, 6, 6, 6, 6, 6, 6, ++ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, ++ 7, 7, 7, 7, 7, 7, 7, 7, ++ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, ++ 7, 7, 7, 7, 7, 7, 7, 7, ++ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, ++ 8, 8, 8, 8, 8, 8, 8, 8, ++ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, ++ 8, 8, 8, 8, 8, 8, 8, 8, ++ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, ++ 8, 8, 8, 8, 8, 8, 8, 8, ++ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, ++ 8, 8, 8, 8, 8, 8, 8, 8, ++}; ++ ++UDWtype __udivmoddi4(UDWtype n, UDWtype d, UDWtype * rp) ++{ ++ const DWunion nn = {.ll = n }; ++ const DWunion dd = {.ll = d }; ++ DWunion rr; ++ UWtype d0, d1, n0, n1, n2; ++ UWtype q0, q1; ++ UWtype b, bm; ++ DWunion ww; ++ ++ d0 = dd.s.low; ++ d1 = dd.s.high; ++ n0 = nn.s.low; ++ n1 = nn.s.high; ++ ++#if !UDIV_NEEDS_NORMALIZATION ++ if (d1 == 0) { ++ if (d0 > n1) { ++ /* 0q = nn / 0D */ ++ ++ udiv_qrnnd(q0, n0, n1, n0, d0); ++ q1 = 0; ++ ++ /* Remainder in n0. */ ++ } else { ++ /* qq = NN / 0d */ ++ ++ if (d0 == 0) ++ d0 = 1 / d0; /* Divide intentionally by zero. */ ++ ++ udiv_qrnnd(q1, n1, 0, n1, d0); ++ udiv_qrnnd(q0, n0, n1, n0, d0); ++ ++ /* Remainder in n0. */ ++ } ++ ++ if (rp != 0) { ++ rr.s.low = n0; ++ rr.s.high = 0; ++ *rp = rr.ll; ++ } ++ } ++#else /* UDIV_NEEDS_NORMALIZATION */ ++ ++ if (d1 == 0) { ++ if (d0 > n1) { ++ /* 0q = nn / 0D */ ++ ++ count_leading_zeros(bm, d0); ++ ++ if (bm != 0) { ++ /* Normalize, i.e. make the most significant bit of the ++ denominator set. */ ++ ++ d0 = d0 << bm; ++ n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm)); ++ n0 = n0 << bm; ++ } ++ ++ udiv_qrnnd(q0, n0, n1, n0, d0); ++ q1 = 0; ++ ++ /* Remainder in n0 >> bm. */ ++ } else { ++ /* qq = NN / 0d */ ++ ++ if (d0 == 0) ++ d0 = 1 / d0; /* Divide intentionally by zero. */ ++ ++ count_leading_zeros(bm, d0); ++ ++ if (bm == 0) { ++ /* From (n1 >= d0) /\ (the most significant bit of d0 is set), ++ conclude (the most significant bit of n1 is set) /\ (the ++ leading quotient digit q1 = 1). ++ ++ This special case is necessary, not an optimization. ++ (Shifts counts of W_TYPE_SIZE are undefined.) */ ++ ++ n1 -= d0; ++ q1 = 1; ++ } else { ++ /* Normalize. */ ++ ++ b = W_TYPE_SIZE - bm; ++ ++ d0 = d0 << bm; ++ n2 = n1 >> b; ++ n1 = (n1 << bm) | (n0 >> b); ++ n0 = n0 << bm; ++ ++ udiv_qrnnd(q1, n1, n2, n1, d0); ++ } ++ ++ /* n1 != d0... */ ++ ++ udiv_qrnnd(q0, n0, n1, n0, d0); ++ ++ /* Remainder in n0 >> bm. */ ++ } ++ ++ if (rp != 0) { ++ rr.s.low = n0 >> bm; ++ rr.s.high = 0; ++ *rp = rr.ll; ++ } ++ } ++#endif /* UDIV_NEEDS_NORMALIZATION */ ++ ++ else { ++ if (d1 > n1) { ++ /* 00 = nn / DD */ ++ ++ q0 = 0; ++ q1 = 0; ++ ++ /* Remainder in n1n0. */ ++ if (rp != 0) { ++ rr.s.low = n0; ++ rr.s.high = n1; ++ *rp = rr.ll; ++ } ++ } else { ++ /* 0q = NN / dd */ ++ ++ count_leading_zeros(bm, d1); ++ if (bm == 0) { ++ /* From (n1 >= d1) /\ (the most significant bit of d1 is set), ++ conclude (the most significant bit of n1 is set) /\ (the ++ quotient digit q0 = 0 or 1). ++ ++ This special case is necessary, not an optimization. */ ++ ++ /* The condition on the next line takes advantage of that ++ n1 >= d1 (true due to program flow). */ ++ if (n1 > d1 || n0 >= d0) { ++ q0 = 1; ++ sub_ddmmss(n1, n0, n1, n0, d1, d0); ++ } else ++ q0 = 0; ++ ++ q1 = 0; ++ ++ if (rp != 0) { ++ rr.s.low = n0; ++ rr.s.high = n1; ++ *rp = rr.ll; ++ } ++ } else { ++ UWtype m1, m0; ++ /* Normalize. */ ++ ++ b = W_TYPE_SIZE - bm; ++ ++ d1 = (d1 << bm) | (d0 >> b); ++ d0 = d0 << bm; ++ n2 = n1 >> b; ++ n1 = (n1 << bm) | (n0 >> b); ++ n0 = n0 << bm; ++ ++ udiv_qrnnd(q0, n1, n2, n1, d1); ++ umul_ppmm(m1, m0, q0, d0); ++ ++ if (m1 > n1 || (m1 == n1 && m0 > n0)) { ++ q0--; ++ sub_ddmmss(m1, m0, m1, m0, d1, d0); ++ } ++ ++ q1 = 0; ++ ++ /* Remainder in (n1n0 - m1m0) >> bm. */ ++ if (rp != 0) { ++ sub_ddmmss(n1, n0, n1, n0, m1, m0); ++ rr.s.low = (n1 << b) | (n0 >> bm); ++ rr.s.high = n1 >> bm; ++ *rp = rr.ll; ++ } ++ } ++ } ++ } ++ ++ ww.s.low = q0, ww.s.high = q1; ++ return ww.ll; ++} ++ ++UDWtype __umoddi3(UDWtype u, UDWtype v) ++{ ++ UDWtype w; ++ ++ (void)__udivmoddi4(u, v, &w); ++ ++ return w; ++} ++ ++UDWtype __udivdi3(UDWtype n, UDWtype d) ++{ ++ return __udivmoddi4(n, d, (UDWtype *) 0); ++} ++ ++word_type __ucmpdi2(DWtype a, DWtype b) ++{ ++ const DWunion au = {.ll = a }; ++ const DWunion bu = {.ll = b }; ++ ++ if ((UWtype) au.s.high < (UWtype) bu.s.high) ++ return 0; ++ else if ((UWtype) au.s.high > (UWtype) bu.s.high) ++ return 2; ++ if ((UWtype) au.s.low < (UWtype) bu.s.low) ++ return 0; ++ else if ((UWtype) au.s.low > (UWtype) bu.s.low) ++ return 2; ++ return 1; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/lib/longlong.h linux-3.4.110/arch/nds32/lib/longlong.h +--- linux-3.4.110.orig/arch/nds32/lib/longlong.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/longlong.h 2016-04-07 10:20:50.950081334 +0200 +@@ -0,0 +1,105 @@ ++#define __BITS4 (W_TYPE_SIZE / 4) ++#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2)) ++#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1)) ++#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2)) ++ ++#define W_TYPE_SIZE (4 * 8) ++#define UHWtype USItype ++ ++#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ ++ do { \ ++ UWtype __x; \ ++ __x = (al) - (bl); \ ++ (sh) = (ah) - (bh) - (__x > (al)); \ ++ (sl) = __x; \ ++ } while (0) ++ ++#define umul_ppmm(w1, w0, u, v) \ ++ do { \ ++ UWtype __x0, __x1, __x2, __x3; \ ++ UHWtype __ul, __vl, __uh, __vh; \ ++ \ ++ __ul = __ll_lowpart (u); \ ++ __uh = __ll_highpart (u); \ ++ __vl = __ll_lowpart (v); \ ++ __vh = __ll_highpart (v); \ ++ \ ++ __x0 = (UWtype) __ul * __vl; \ ++ __x1 = (UWtype) __ul * __vh; \ ++ __x2 = (UWtype) __uh * __vl; \ ++ __x3 = (UWtype) __uh * __vh; \ ++ \ ++ __x1 += __ll_highpart (__x0);/* this can't give carry */ \ ++ __x1 += __x2; /* but this indeed can */ \ ++ if (__x1 < __x2) /* did we get it? */ \ ++ __x3 += __ll_B; /* yes, add it in the proper pos. */ \ ++ \ ++ (w1) = __x3 + __ll_highpart (__x1); \ ++ (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \ ++ } while (0) ++ ++#define __umulsidi3(u, v) \ ++ ({DWunion __w; \ ++ umul_ppmm (__w.s.high, __w.s.low, u, v); \ ++ __w.ll; }) ++ ++#define __udiv_qrnnd_c(q, r, n1, n0, d) \ ++ do { \ ++ UWtype __d1, __d0, __q1, __q0; \ ++ UWtype __r1, __r0, __m; \ ++ __d1 = __ll_highpart (d); \ ++ __d0 = __ll_lowpart (d); \ ++ \ ++ __r1 = (n1) % __d1; \ ++ __q1 = (n1) / __d1; \ ++ __m = (UWtype) __q1 * __d0; \ ++ __r1 = __r1 * __ll_B | __ll_highpart (n0); \ ++ if (__r1 < __m) \ ++ { \ ++ __q1--, __r1 += (d); \ ++ if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\ ++ if (__r1 < __m) \ ++ __q1--, __r1 += (d); \ ++ } \ ++ __r1 -= __m; \ ++ \ ++ __r0 = __r1 % __d1; \ ++ __q0 = __r1 / __d1; \ ++ __m = (UWtype) __q0 * __d0; \ ++ __r0 = __r0 * __ll_B | __ll_lowpart (n0); \ ++ if (__r0 < __m) \ ++ { \ ++ __q0--, __r0 += (d); \ ++ if (__r0 >= (d)) \ ++ if (__r0 < __m) \ ++ __q0--, __r0 += (d); \ ++ } \ ++ __r0 -= __m; \ ++ \ ++ (q) = (UWtype) __q1 * __ll_B | __q0; \ ++ (r) = __r0; \ ++ } while (0) ++ ++#define UDIV_NEEDS_NORMALIZATION 1 ++#define udiv_qrnnd __udiv_qrnnd_c ++ ++#define count_leading_zeros(count, x) \ ++ do { \ ++ UWtype __xr = (x); \ ++ UWtype __a; \ ++ \ ++ if (W_TYPE_SIZE <= 32) \ ++ { \ ++ __a = __xr < ((UWtype)1<<2*__BITS4) \ ++ ? (__xr < ((UWtype)1<<__BITS4) ? 0 : __BITS4) \ ++ : (__xr < ((UWtype)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \ ++ } \ ++ else \ ++ { \ ++ for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \ ++ if (((__xr >> __a) & 0xff) != 0) \ ++ break; \ ++ } \ ++ \ ++ (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \ ++ } while (0) +diff -Nur linux-3.4.110.orig/arch/nds32/lib/Makefile linux-3.4.110/arch/nds32/lib/Makefile +--- linux-3.4.110.orig/arch/nds32/lib/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/Makefile 2016-04-07 10:20:50.950081334 +0200 +@@ -0,0 +1,19 @@ ++# ++# linux/arch/nds32/lib/Makefile ++# ++# Copyright (C) 2006 Andes Technology Corporation ++# ++ ++lib-y := csum_partial_copy.o csum_partial.o \ ++ copy_page.o memcpy.o memmove.o \ ++ memset.o memzero.o strncpy_from_user.o \ ++ strnlen_user.o strchr.o strrchr.o \ ++ uaccess.o getuser.o \ ++ putuser.o libgcc2.o divmod.o udivmod.o udivmodsi4.o ++ ++ifdef CONFIG_FUNCTION_TRACER ++CFLAGS_REMOVE_libgcc2.o = -pg ++CFLAGS_REMOVE_divmod.o = -pg ++CFLAGS_REMOVE_udivmod.o = -pg ++CFLAGS_REMOVE_udivmodsi4.o = -pg ++endif +diff -Nur linux-3.4.110.orig/arch/nds32/lib/memcpy.S linux-3.4.110/arch/nds32/lib/memcpy.S +--- linux-3.4.110.orig/arch/nds32/lib/memcpy.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/memcpy.S 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,101 @@ ++/* ++ * linux/arch/nds32/lib/memcpy.S -- Memory copy function. ++ * ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ * Copyright (C) 2001 Hiroyuki Kondo, and Hirokazu Takata ++ * Copyright (C) 2004 Hirokazu Takata ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ */ ++ ++#include ++ ++/* ++ void *memcpy(void *dst, const void *src, int n); ++ ++ dst: $r0 ++ src: $r1 ++ n : $r2 ++ ret: $r0 - pointer to the memory area dst. ++*/ ++ ++#include ++ ++ .text ++ ++ENTRY(memcpy) ++ move $r5, $r0 ++ beq $r0, $r1, quit_memcpy ++ beqz $r2, quit_memcpy ++ srli $r3, $r2, #5 ! check if len < cache-line size 32 ++ beqz $r3, word_copy_entry ++ andi $r4, $r0, #0x3 ! check byte-align ++ beqz $r4, unalign_word_copy_entry ++ ++ addi $r4, $r4,#-4 ++ abs $r4, $r4 ! check how many un-align byte to copy ++ sub $r2, $r2, $r4 ! update $R2 ++ ++unalign_byte_copy: ++ lbi.bi $r3, [$r1], #1 ++ addi $r4, $r4, #-1 ++ sbi.bi $r3, [$r0], #1 ++ bnez $r4, unalign_byte_copy ++ beqz $r2, quit_memcpy ++ ++unalign_word_copy_entry: ++ andi $r3, $r0, 0x1f ! check cache-line unaligncount ++ beqz $r3, cache_copy ++ ++ addi $r3, $r3, #-32 ++ abs $r3, $r3 ++ sub $r2, $r2, $r3 ! update $R2 ++ ++unalign_word_copy: ++ lmw.bim $r4, [$r1], $r4 ++ addi $r3, $r3, #-4 ++ smw.bim $r4, [$r0], $r4 ++ bnez $r3, unalign_word_copy ++ beqz $r2, quit_memcpy ++ ++ addi $r3, $r2, #-32 ! to check $r2< cache_line , than go to word_copy ++ bltz $r3, word_copy_entry ++cache_copy: ++ srli $r3, $r2, #5 ++ beqz $r3, word_copy_entry ++ pushm $r6, $r13 ++3: ++ lmw.bim $r6, [$r1], $r13 ++ addi $r3, $r3, #-1 ++ smw.bim $r6, [$r0], $r13 ++ bnez $r3, 3b ++ popm $r6, $r13 ++ ++word_copy_entry: ++ andi $r2, $r2, #31 ++ ++ beqz $r2, quit_memcpy ++5: ++ srli $r3, $r2, #2 ++ beqz $r3, byte_copy ++word_copy: ++ lmw.bim $r4, [$r1], $r4 ++ addi $r3, $r3, #-1 ++ smw.bim $r4, [$r0], $r4 ++ bnez $r3, word_copy ++ andi $r2, $r2, #3 ++ beqz $r2, quit_memcpy ++byte_copy: ++ lbi.bi $r3, [$r1], #1 ++ addi $r2, $r2, #-1 ++ ++ sbi.bi $r3, [$r0], #1 ++ bnez $r2, byte_copy ++quit_memcpy: ++ move $r0, $r5 ++ ret ++ ++ .end +diff -Nur linux-3.4.110.orig/arch/nds32/lib/memmove.S linux-3.4.110/arch/nds32/lib/memmove.S +--- linux-3.4.110.orig/arch/nds32/lib/memmove.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/memmove.S 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,80 @@ ++/* ++ * linux/arch/nds32/lib/memmove.S -- Memory move function. ++ * ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ * Copyright (C) 2001 Hiroyuki Kondo, and Hirokazu Takata ++ * Copyright (C) 2004 Hirokazu Takata ++ * Copyright (C) 2006 Andes Technology Corporation ++ * ++ */ ++ ++#include ++ ++/* ++ void *memmove(void *dst, const void *src, int n); ++ ++ dst: $r0 ++ src: $r1 ++ n : $r2 ++ ret: $r0 - pointer to the memory area dst. ++*/ ++ .text ++ ++ENTRY(memmove) ++ move $r5, $r0 ! Set return value = det ++ beq $r0, $r1, exit_memcpy ! Exit when det = src ++ beqz $r2, exit_memcpy ! Exit when n = 0 ++ pushm $t0, $t1 ! Save reg ++ srli $p1, $r2, #2 ! $p1 is how many words to copy ++ ++ ! Avoid data lost when memory overlap ++ ! Copy data reversely when src < dst ++ slt $p0, $r0, $r1 ! check if $r0 < $r1 ++ beqz $p0, do_reverse ! branch if dst > src ++ ++ ! No reverse, dst < src ++ andi $r2, $r2, #3 ! How many bytes are less than a word ++ li $t0, #1 ! Determining copy direction in byte_cpy ++ beqz $p1, byte_cpy ! When n is less than a word ++ ++word_cpy: ++ lmw.bim $p0, [$r1], $p0 ! Read a word from src ++ addi $p1, $p1, #-1 ! How many words left to copy ++ smw.bim $p0, [$r0], $p0 ! Copy the word to det ++ bnez $p1, word_cpy ! If remained words > 0 ++ beqz $r2, end_memcpy ! No left bytes to copy ++ b byte_cpy ++ ++do_reverse: ++ add $r0, $r0, $r2 ! Start with the end of $r0 ++ add $r1, $r1, $r2 ! Start with the end of $r1 ++ andi $r2, $r2, #3 ! How many bytes are less than a word ++ li $t0, #-1 ! Determining copy direction in byte_cpy ++ beqz $p1, reverse_byte_cpy ! When n is less than a word ++ ++reverse_word_cpy: ++ lmw.adm $p0, [$r1], $p0 ! Read a word from src ++ addi $p1, $p1, #-1 ! How many words left to copy ++ smw.adm $p0, [$r0], $p0 ! Copy the word to det ++ bnez $p1, reverse_word_cpy ! If remained words > 0 ++ beqz $r2, end_memcpy ! No left bytes to copy ++ ++reverse_byte_cpy: ++ addi $r0, $r0, #-1 ++ addi $r1, $r1, #-1 ++byte_cpy: ! Less than 4 bytes to copy now ++ lb.bi $p0, [$r1], $t0 ! Read a byte from src ++ addi $r2, $r2, #-1 ! How many bytes left to copy ++ sb.bi $p0, [$r0], $t0 ! copy the byte to det ++ bnez $r2, byte_cpy ! If remained bytes > 0 ++ ++end_memcpy: ++ popm $t0, $t1 ++exit_memcpy: ++ move $r0, $r5 ++ ret ++ ++ .end +diff -Nur linux-3.4.110.orig/arch/nds32/lib/memset.S linux-3.4.110/arch/nds32/lib/memset.S +--- linux-3.4.110.orig/arch/nds32/lib/memset.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/memset.S 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,51 @@ ++ ++/* ++ * linux/arch/nds32/lib/memset.S -- memset function. ++ * ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ * Copyright (C) 2001,2002 Hiroyuki Kondo, and Hirokazu Takata ++ * Copyright (C) 2004 Hirokazu Takata ++ * Copyright (C) 2006 Andes Technology Corporation ++ * ++ */ ++#include ++#include ++ ++/* ++ void *memset(void *dst, int val, int len); ++ ++ dst: $r0 ++ val: $r1 ++ len: $r2 ++ ret: $r0 - pointer to the memory area dst. ++*/ ++ .text ++ENTRY(memset) ++ move $r5, $r0 ! Return value ++ beqz $r2, end_memset ! Exit when len = 0 ++ srli $p1, $r2, 2 ! $p1 is how many words to copy ++ andi $r2, $r2, 3 ! How many bytes are less than a word ++ beqz $p1, byte_set ! When n is less than a word ++ ++ ! set $r1 from ??????ab to abababab ++ andi $r1, $r1, #0x00ff ! $r1 = 000000ab ++ slli $p0, $r1, #8 ! $p0 = 0000ab00 ++ or $r1, $r1, $p0 ! $r1 = 0000abab ++ slli $p0, $r1, #16 ! $p0 = abab0000 ++ or $r1, $r1, $p0 ! $r1 = abababab ++word_set: ++ addi $p1, $p1, #-1 ! How many words left to copy ++ smw.bim $r1, [$r0], $r1 ! Copy the word to det ++ bnez $p1, word_set ! Still words to set, continue looping ++ beqz $r2, end_memset ! No left byte to set ++byte_set: ! Less than 4 bytes left to set ++ addi $r2, $r2, #-1 ! Decrease len by 1 ++ sbi.bi $r1, [$r0], #1 ! Set data of the next byte to $r1 ++ bnez $r2, byte_set ! Still bytes left to set ++end_memset: ++ move $r0, $r5 ++ ret ++ +diff -Nur linux-3.4.110.orig/arch/nds32/lib/memzero.S linux-3.4.110/arch/nds32/lib/memzero.S +--- linux-3.4.110.orig/arch/nds32/lib/memzero.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/memzero.S 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,36 @@ ++/* ++ * linux/arch/nds32/lib/memzero.S ++ * ++ * Copyright (C) 1995-2000 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++#include ++ ++ .text ++/* ++ * void *__memzero(void *dst, int len); ++ * ++ * dst: $r0 ++ * len: $r1 ++ * ret: $r0 - pointer to the memory area dst. ++ * ++ * Call memset(dst, 0, len) to perform memzero. ++ * Currently no optimization because only being referenced in 31 files ++ * (For comparison, memset being referenced in 2527 files) ++ */ ++ENTRY(__memzero) ++ beqz $r1, 1f ++ push $lp ++ move $r2, $r1 ++ move $r1, #0 ++ push $r0 ++ bal memset ++ pop $r0 ++ pop $lp ++1: ++ ret +diff -Nur linux-3.4.110.orig/arch/nds32/lib/putuser.S linux-3.4.110/arch/nds32/lib/putuser.S +--- linux-3.4.110.orig/arch/nds32/lib/putuser.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/putuser.S 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,68 @@ ++/* ++ * linux/arch/nds32/lib/putuser.S ++ * ++ * Copyright (C) 2001 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Idea from x86 version, (C) Copyright 1998 Linus Torvalds ++ * ++ * These functions have a non-standard call interface to make ++ * them more efficient, especially as they return an error ++ * value in addition to the "real" return value. ++ * ++ * __put_user_X ++ * ++ * Inputs: $r0 contains the address ++ * $r2, $r3 contains the value ++ * Outputs: $r0 is the error code ++ * lr corrupted ++ * ++ * No other registers must be altered. (see include/asm-arm/uaccess.h ++ * for specific ASM register usage). ++ * ++ * Note that ADDR_LIMIT is either 0 or 0xc0000000 ++ * Note also that it is intended that __put_user_bad is not global. ++ */ ++#include ++#include ++#include ++#include ++ ++ .text ++ ++ENTRY(__put_user_1) ++1: sb $r2, [$r0] ++ move $r0, #0 ++ ret ++ ++ENTRY(__put_user_2) ++2: shi $r2, [$r0] ! Store input halfword ++ move $r0, #0 ++ ret ++ ++ENTRY(__put_user_4) ++3: sw $r2, [$r0] ++ move $r0, #0 ++ ret ++ ++ENTRY(__put_user_8) ++5: swi.bi $r2, [$r0], #4 ++6: sw $r3, [$r0] ++ move $r0, #0 ++ ret ++ ++__put_user_bad: ++ move $r0, #-EFAULT ++ ret ++ ++.section __ex_table, "a" ++ .long 1b, __put_user_bad ++ .long 2b, __put_user_bad ++ .long 3b, __put_user_bad ++ .long 5b, __put_user_bad ++ .long 6b, __put_user_bad ++.previous +diff -Nur linux-3.4.110.orig/arch/nds32/lib/strchr.S linux-3.4.110/arch/nds32/lib/strchr.S +--- linux-3.4.110.orig/arch/nds32/lib/strchr.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/strchr.S 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,37 @@ ++/* ++ * linux/arch/nds32/lib/strchr.S ++ * ++ * Copyright (C) 1995-2000 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * ASM optimised string functions ++ */ ++#include ++#include ++ ++/* ++ * Prototype: char *strrchr(const char *s, int c); ++ * Purpose : Returns a pointer to the first occurrence of the character c ++ * in the string s. Here "character" means "byte" - these functions ++ * do not work with wide or multi-byte characters. ++ */ ++ ++ .text ++ ++ENTRY(strchr) ++ move $r5, $r0 ! Setup return value ++ andi $r1, $r1, #0xff ! Wipe out useless bits ++loop: ++ lbi.bi $p0, [$r5], #1 ! Load the next byte ++ beqz $p0, exit ! Reach EOS (NULL), return NULL ++ bne $p0, $r1, loop ! Continue if != c ++ addi $r5, $r5, #-1 ! Found ++exit: ++ cmovz $r5, $p0, $p0 ! Return NULL if EOS (NULL) ++ move $r0, $r5 ++ ret ++ +diff -Nur linux-3.4.110.orig/arch/nds32/lib/strncpy_from_user.S linux-3.4.110/arch/nds32/lib/strncpy_from_user.S +--- linux-3.4.110.orig/arch/nds32/lib/strncpy_from_user.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/strncpy_from_user.S 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,45 @@ ++/* ++ * linux/arch/nds32/lib/strncpy_from_user.S ++ * ++ * Copyright (C) 1995-2000 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++#include ++#include ++ ++ ++/* ++ * Copy a string from user space to kernel space. ++ * $r0 = dst, $r1 = src, $r2 = n (byte length) ++ * returns the number of characters copied (strlen of copied string), ++ * -EFAULT on exception, or "len" if we fill the whole buffer ++ */ ++ ++ .text ++ .align 4 ++ENTRY(__arch_strncpy_from_user) ++ move $p1, $r1 ! Record the start src addr ++loop: ++ addi $r2, $r2, #-1 ! Decrease n by 1 ++ bltz $r2, exit ! Exit if n < 0 ++USER( lbi.bi, $p0, [$r1], #1) ! Load the byte from src ++ sbi.bi $p0, [$r0], #1 ! Store the byte to dst ++ bnez $p0, loop ! Continue looping if terminator is not reached ++ addi $r1, $r1, #-1 ! Don't count the terminator ++exit: ++ sub $r0, $r1, $p1 ! Get copied count ++ ret ++ ++ .section .fixup,"ax" ++ .align 0 ++9001: xor $p0, $p0, $p0 ++ sb $p0, [$r0] ! Zero the buffer ++ move $r0, -EFAULT ! Return -EFAULT ++ ret ++ .previous ++ +diff -Nur linux-3.4.110.orig/arch/nds32/lib/strnlen_user.S linux-3.4.110/arch/nds32/lib/strnlen_user.S +--- linux-3.4.110.orig/arch/nds32/lib/strnlen_user.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/strnlen_user.S 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,43 @@ ++/* ++ * linux/arch/nds32/lib/strnlen_user.S ++ * ++ * Copyright (C) 1995-2000 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++#include ++#include ++ ++ .text ++ ++/* Prototype: unsigned long ___arch_strnlen_user(const char *str, long n) ++ * Purpose : get length of a string in user memory ++ * Params : str - address of string in user memory ++ * Returns : length of string *including terminator* ++ * or zero on exception, or n + 1 if too long ++ */ ++ .align 4 ++ENTRY(__arch_strnlen_user) ++ move $p0, $r0 ! Record the start addr ++ ! beqz $r0, exit ! Exit when Null address ++ beqz $r1, exit ! Exit when n = 0 ++loop: ++USER( lbi.bi, $p1, [$r0], #1) ! Load the next byte ++ beqz $p1, exit ! Exit when terminator is reached ++ addi $r1, $r1, #-1 ! Decrease n by 1 ++ bnez $r1, loop ! Continue looping if n != 0 ++ addi $r0, $r0, #1 ! Return n+1 if too long ++exit: ++ sub $r0, $r0, $p0 ! Get the counted length ++ ret ++ ++ .section .fixup,"ax" ++ .align 0 ++9001: move $r0, #0 ! Return 0 on exception ++ ret ++ .previous ++ +diff -Nur linux-3.4.110.orig/arch/nds32/lib/strrchr.S linux-3.4.110/arch/nds32/lib/strrchr.S +--- linux-3.4.110.orig/arch/nds32/lib/strrchr.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/strrchr.S 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,37 @@ ++/* ++ * linux/arch/nds32/lib/strrchr.S ++ * ++ * Copyright (C) 1995-2000 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * ASM optimised string functions ++ */ ++#include ++#include ++ ++ .text ++/* ++ * Prototype: char *strrchr(const char *s, int c); ++ * Purpose : Returns a pointer to the last occurrence of the character c ++ * in the string s. Here "character" means "byte" - these functions ++ * do not work with wide or multi-byte characters. ++ */ ++ .align 4 ++ENTRY(strrchr) ++ move $r5, #0 ++ beqz $r0, exit ++ andi $r1, $r1, #0xff ! Wipe out useless bits ++loop: ++ lbi $p0, [$r0] ! Load the next byte ++ xor $p1, $p0, $r1 ! Test if the byte == c ++ cmovz $r5, $r0, $p1 ! Save the current position ++ addi $r0, $r0, #1 ! Move on to the next byte ++ bnez $p0, loop ! Continue if not NULL (EOS) ++exit: ++ move $r0, $r5 ++ ret ++ +diff -Nur linux-3.4.110.orig/arch/nds32/lib/uaccess.S linux-3.4.110/arch/nds32/lib/uaccess.S +--- linux-3.4.110.orig/arch/nds32/lib/uaccess.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/uaccess.S 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,159 @@ ++/* ++ * linux/arch/nds32/lib/uaccess.S ++ * ++ * Copyright (C) 1995, 1996,1997,1998 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Routines to block copy data to/from user memory ++ * These are highly optimised both for the 4k page size ++ * and for various alignments. ++ */ ++#include ++#include ++#include ++ ++ .text ++ ++//#define PAGE_SHIFT 12 ++ ++/* Prototype: int __arch_copy_to_user(void *to, const char *from, size_t n) ++ * Purpose : copy a block to user memory from kernel memory ++ * Params : to - user memory ++ * : from - kernel memory ++ * : n - number of bytes to copy ++ * Returns : Number of bytes NOT copied. ++ */ ++ ++ENTRY(__arch_copy_to_user) ++ push $r0 ++ push $r2 ++ beqz $r2, ctu_exit ++ srli $p0, $r2, #2 ! $p0 = number of word to clear ++ andi $r2, $r2, #3 ! Bytes less than a word to copy ++ beqz $p0, byte_ctu ! Only less than a word to copy ++word_ctu: ++ lmw.bim $p1, [$r1], $p1 ! Load the next word ++USER( smw.bim,$p1, [$r0], $p1) ! Store the next word ++ addi $p0, $p0, #-1 ! Decrease word count ++ bnez $p0, word_ctu ! Continue looping to copy all words ++ beqz $r2, ctu_exit ! No left bytes to copy ++byte_ctu: ++ lbi.bi $p1, [$r1], #1 ! Load the next byte ++USER( sbi.bi, $p1, [$r0], #1) ! Store the next byte ++ addi $r2, $r2, #-1 ! Decrease byte count ++ bnez $r2, byte_ctu ! Continue looping to clear all left bytes ++ctu_exit: ++ move $r0, $r2 ! Set return value ++ pop $r2 ++ pop $r2 ! Pop saved $r0 to $r2 to not corrupt return value ++ ret ++ ++ .section .fixup,"ax" ++ .align 0 ++9001: ++ pop $p1 ! Original $r2, n ++ pop $p0 ! Original $r0, void *to ++ sub $r1, $r0, $p0 ! Bytes copied ++ sub $r2, $p1, $r1 ! Bytes left to copy ++ push $lp ++ move $r0, $p0 ++ bal __memzero ! Clean up the memory ++ pop $lp ++ move $r0, $r2 ++ ret ++ ++ .previous ++ ++/* Prototype: unsigned long __arch_copy_from_user(void *to,const void *from,unsigned long n); ++ * Purpose : copy a block from user memory to kernel memory ++ * Params : to - kernel memory ++ * : from - user memory ++ * : n - number of bytes to copy ++ * Returns : Number of bytes NOT copied. ++ */ ++ ++ ++ENTRY(__arch_copy_from_user) ++ push $r1 ++ push $r2 ++ beqz $r2, cfu_exit ++ srli $p0, $r2, #2 ! $p0 = number of word to clear ++ andi $r2, $r2, #3 ! Bytes less than a word to copy ++ beqz $p0, byte_cfu ! Only less than a word to copy ++word_cfu: ++USER( lmw.bim,$p1, [$r1], $p1) ! Load the next word ++ smw.bim $p1, [$r0], $p1 ! Store the next word ++ addi $p0, $p0, #-1 ! Decrease word count ++ bnez $p0, word_cfu ! Continue looping to copy all words ++ beqz $r2, cfu_exit ! No left bytes to copy ++byte_cfu: ++USER( lbi.bi, $p1, [$r1], #1) ! Load the next byte ++ sbi.bi $p1, [$r0], #1 ! Store the next byte ++ addi $r2, $r2, #-1 ! Decrease byte count ++ bnez $r2, byte_cfu ! Continue looping to clear all left bytes ++cfu_exit: ++ move $r0, $r2 ! Set return value ++ pop $r2 ++ pop $r1 ++ ret ++ ++ .section .fixup,"ax" ++ .align 0 ++ /* ++ * We took an exception. $r0 contains a pointer to ++ * the byte not copied. ++ */ ++9001: ++ pop $p1 ! Original $r2, n ++ pop $p0 ! Original $r0, void *to ++ sub $r1, $r1, $p0 ! Bytes copied ++ sub $r2, $p1, $r1 ! Bytes left to copy ++ push $lp ++ bal __memzero ! Clean up the memory ++ pop $lp ++ move $r0, $r2 ++ ret ++ .previous ++ ++/* Prototype: int __arch_clear_user(void *addr, size_t sz) ++ * Purpose : clear some user memory ++ * Params : addr - user memory address to clear ++ * : sz - number of bytes to clear ++ * Returns : number of bytes NOT cleared ++ */ ++ .align 5 ++ENTRY(__arch_clear_user) ++ pushm $r0, $r1 ++ beqz $r1, clear_exit ++ xor $p1, $p1, $p1 ! Use $p1=0 to clear mem ++ srli $p0, $r1, #2 ! $p0 = number of word to clear ++ andi $r1, $r1, #3 ! Bytes less than a word to copy ++ beqz $p0, byte_clear ! Only less than a word to clear ++word_clear: ++USER( smw.bim,$p1, [$r0], $p1) ! Clear the word ++ addi $p0, $p0, #-1 ! Decrease word count ++ bnez $p0, word_clear ! Continue looping to clear all words ++ beqz $r1, clear_exit ! No left bytes to copy ++byte_clear: ++USER( sbi.bi, $p1, [$r0], #1) ! Clear the byte ++ addi $r1, $r1, #-1 ! Decrease byte count ++ bnez $r1, byte_clear ! Continue looping to clear all left bytes ++clear_exit: ++ move $r0, $r1 ! Set return value ++ pop $r1 ++ pop $r1 ! Pop saved $r0 to $r1 to not corrupt return value ++ ret ++ ++ .section .fixup,"ax" ++ .align 0 ++9001: ++ popm $p0, $p1 ! $p0 = original $r0, *addr, $p1 = original $r1, n ++ sub $p0, $r0, $p0 ! Bytes copied ++ sub $r0, $p1, $p0 ! Bytes left to copy ++ ret ++ .previous ++ +diff -Nur linux-3.4.110.orig/arch/nds32/lib/udivmod.c linux-3.4.110/arch/nds32/lib/udivmod.c +--- linux-3.4.110.orig/arch/nds32/lib/udivmod.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/udivmod.c 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,12 @@ ++extern unsigned long udivmodsi4(unsigned long num, unsigned long den, ++ int modwanted); ++ ++long __udivsi3(long a, long b) ++{ ++ return udivmodsi4(a, b, 0); ++} ++ ++long __umodsi3(long a, long b) ++{ ++ return udivmodsi4(a, b, 1); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/lib/udivmodsi4.c linux-3.4.110/arch/nds32/lib/udivmodsi4.c +--- linux-3.4.110.orig/arch/nds32/lib/udivmodsi4.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/lib/udivmodsi4.c 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,21 @@ ++unsigned long udivmodsi4(unsigned long num, unsigned long den, int modwanted) ++{ ++ unsigned long bit = 1; ++ unsigned long res = 0; ++ ++ while (den < num && bit && !(den & (1L << 31))) { ++ den <<= 1; ++ bit <<= 1; ++ } ++ while (bit) { ++ if (num >= den) { ++ num -= den; ++ res |= bit; ++ } ++ bit >>= 1; ++ den >>= 1; ++ } ++ if (modwanted) ++ return num; ++ return res; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/Makefile linux-3.4.110/arch/nds32/Makefile +--- linux-3.4.110.orig/arch/nds32/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/Makefile 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,100 @@ ++# ++# arch/nds32/Makefile ++# ++# This file is subject to the terms and conditions of the GNU General Public ++# License. See the file "COPYING" in the main directory of this archive ++# for more details. ++# ++# Copyright (C) 1995-2001 by Russell King ++ ++LDFLAGS_vmlinux :=-nostdlib --no-undefined -X ++OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S ++GZFLAGS :=-9 ++KBUILD_CFLAGS +=-pipe -mno-sched-prolog-epilog ++ ++# Do not use arch/nds32/defconfig - it's always outdated. ++# Select a platform tht is kept up-to-date ++KBUILD_DEFCONFIG := orca_defconfig ++ ++ifeq ($(CONFIG_FRAME_POINTER),y) ++KBUILD_CFLAGS +=-fno-omit-frame-pointer ++endif ++ ++comma = , ++ ++# This selects which instruction set is used. ++# Note that GCC does not numerically define an architecture version ++# macro, but instead defines a whole series of macros which makes ++# testing for a specific architecture or later rather impossible. ++arch-y +=-D__nds32__ ++gcc_ver :=$(shell $(CC) -E -dM -xc /dev/null | grep __VERSION__ | sed 's/\#define __VERSION__ //') ++ifeq ($(shell expr `echo $(gcc_ver)` \>= 4.9.2 ), 1) ++arch-y += \ ++ $(shell $(CC) -E -dM -xc /dev/null | \ ++ grep -o -m1 NDS32_EXT_FPU_SP | \ ++ sed -e 's/NDS32_EXT_FPU_SP/-mno-ext-fpu-sp -mfloat-abi=soft/') \ ++ $(shell $(CC) -E -dM -xc /dev/null | \ ++ grep -o -m1 NDS32_EXT_FPU_DP | \ ++ sed -e 's/NDS32_EXT_FPU_DP/-mno-ext-fpu-dp -mfloat-abi=soft/') ++tune-y =-D__OPTIMIZE__ -mcmodel=large -D__ARCH_WANT_SYS_WAITPID ++else ++$(shell echo $(__VERSION__)) ++arch-y += $(shell $(CC) -E -dM -xc /dev/null | grep -o -m1 NDS32_EXT_FPU_SP | \ ++ sed -e 's/NDS32_EXT_FPU_SP/-mno-ext-fpu-sp/') \ ++ $(shell $(CC) -E -dM -xc /dev/null | grep -o -m1 NDS32_EXT_FPU_DP | \ ++ sed -e 's/NDS32_EXT_FPU_DP/-mno-ext-fpu-dp/') \ ++ $(shell $(CC) -E -dM -xc /dev/null | grep -o -m1 NDS32_ABI | \ ++ sed -e 's/NDS32_ABI/-mabi=2/') ++tune-y =-D__OPTIMIZE__ -G0 -D__ARCH_WANT_SYS_WAITPID -D_GCC444 ++endif ++ ++# This is a workaround for FUNCTION_TRACER because v3push will push $fp, $gp and $lp. ++ifdef CONFIG_FUNCTION_TRACER ++arch-y += -mno-v3push ++endif ++# This selects how we optimise for the processor. ++# Need -Unds32 for gcc < 3.x ++CHECKFLAGS += -D__nds32__ ++ ++KBUILD_CFLAGS +=$(CFLAGS_ABI) $(arch-y) $(tune-y) -Unds32 -DSTRICT_MM_TYPECHECKS # for c-style checking for page.h ++KBUILD_AFLAGS +=$(AFLAGS_ABI) $(arch-y) $(tune-y) ++ ++#Default value ++head-y := arch/nds32/kernel/head.o arch/nds32/kernel/init_task.o ++textaddr-y := 0xC000C000 ++ ++TEXTADDR := $(textaddr-y) ++ ++export TEXTADDR DATAADDR GZFLAGS ++ ++ ++# If we have a machine-specific directory, then include it in the build. ++core-y += arch/nds32/kernel/ arch/nds32/mm/ ++core-y += arch/nds32/platforms/ ++core-$(CONFIG_FPU) += arch/nds32/math-emu/ ++ ++drivers-$(CONFIG_OPROFILE) += arch/nds32/oprofile/ ++ ++libs-y += arch/nds32/lib/ ++ ++boot := arch/nds32/boot ++ ++.PHONY: FORCE ++ ++Image: vmlinux ++ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ ++ ++CLEAN_FILES += include/asm-nds32/constants.h* ++ ++# We use MRPROPER_FILES and CLEAN_FILES now ++archclean: ++ $(Q)$(MAKE) $(clean)=$(boot) ++ ++.PHONY: arch/nds32/kernel/asm-offsets.s ++arch/nds32/kernel/asm-offsets.s: arch/nds32/kernel/asm-offsets.c ++ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) ++ ++ ++define archhelp ++ echo ' Image - kernel image (arch/$(ARCH)/boot/Image)' ++endef +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_add.c linux-3.4.110/arch/nds32/math-emu/dp_add.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_add.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_add.c 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,179 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ * ++ */ ++ ++#include "ieee754dp.h" ++ ++ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y) ++{ ++ COMPXDP; ++ COMPYDP; ++ ++ EXPLODEXDP; ++ EXPLODEYDP; ++ ++ CLEARCX; ++ ++ FLUSHXDP; ++ FLUSHYDP; ++ ++ switch (CLPAIR(xc, yc)) { ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_nanxcpt(ieee754dp_indef(), "add", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): ++ return x; ++ ++ /* Infinity handling ++ */ ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): ++ if (xs == ys) ++ return x; ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_xcpt(ieee754dp_indef(), "add", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): ++ return x; ++ ++ /* Zero handling ++ */ ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): ++ if (xs == ys) ++ return x; ++ else ++ return ieee754dp_zero(ieee754_csr.rm == IEEE754_RD); ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): ++ return x; ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): ++ DPDNORMX; ++ ++ /* FALL THROUGH */ ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): ++ DPDNORMY; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): ++ DPDNORMX; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): ++ break; ++ } ++ assert(xm & DP_HIDDEN_BIT); ++ assert(ym & DP_HIDDEN_BIT); ++ ++ /* provide guard,round and stick bit space */ ++ xm <<= 3; ++ ym <<= 3; ++ ++ if (xe > ye) { ++ /* have to shift y fraction right to align ++ */ ++ int s = xe - ye; ++ ym = XDPSRS(ym, s); ++ ye += s; ++ } else if (ye > xe) { ++ /* have to shift x fraction right to align ++ */ ++ int s = ye - xe; ++ xm = XDPSRS(xm, s); ++ xe += s; ++ } ++ assert(xe == ye); ++ assert(xe <= DP_EMAX); ++ ++ if (xs == ys) { ++ /* generate 28 bit result of adding two 27 bit numbers ++ * leaving result in xm,xs,xe ++ */ ++ xm = xm + ym; ++ xe = xe; ++ xs = xs; ++ ++ if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */ ++ xm = XDPSRS1(xm); ++ xe++; ++ } ++ } else { ++ if (xm >= ym) { ++ xm = xm - ym; ++ xe = xe; ++ xs = xs; ++ } else { ++ xm = ym - xm; ++ xe = xe; ++ xs = ys; ++ } ++ if (xm == 0) ++ return ieee754dp_zero(ieee754_csr.rm == IEEE754_RD); ++ ++ /* normalize to rounding precision */ ++ while ((xm >> (DP_MBITS + 3)) == 0) { ++ xm <<= 1; ++ xe--; ++ } ++ ++ } ++ DPNORMRET2(xs, xe, xm, "add", x, y); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_cmp.c linux-3.4.110/arch/nds32/math-emu/dp_cmp.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_cmp.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_cmp.c 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,66 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++int ieee754dp_cmp(ieee754dp x, ieee754dp y, int cmp, int sig) ++{ ++ COMPXDP; ++ COMPYDP; ++ ++ EXPLODEXDP; ++ EXPLODEYDP; ++ FLUSHXDP; ++ FLUSHYDP; ++ CLEARCX; /* Even clear inexact flag here */ ++ ++ if (ieee754dp_isnan(x) || ieee754dp_isnan(y)) { ++ if (sig || xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN) ++ SETCX(IEEE754_INVALID_OPERATION); ++ if (cmp & IEEE754_CUN) ++ return 1; ++ if (cmp & (IEEE754_CLT | IEEE754_CGT)) { ++ if (sig && SETANDTESTCX(IEEE754_INVALID_OPERATION)) ++ return ieee754si_xcpt(0, "fcmpf", x); ++ } ++ return 0; ++ } else { ++ s64 vx = x.bits; ++ s64 vy = y.bits; ++ ++ if (vx < 0) ++ vx = -vx ^ DP_SIGN_BIT; ++ if (vy < 0) ++ vy = -vy ^ DP_SIGN_BIT; ++ ++ if (vx < vy) ++ return (cmp & IEEE754_CLT) != 0; ++ else if (vx == vy) ++ return (cmp & IEEE754_CEQ) != 0; ++ else ++ return (cmp & IEEE754_CGT) != 0; ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_div.c linux-3.4.110/arch/nds32/math-emu/dp_div.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_div.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_div.c 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,155 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++ieee754dp ieee754dp_div(ieee754dp x, ieee754dp y) ++{ ++ COMPXDP; ++ COMPYDP; ++ ++ EXPLODEXDP; ++ EXPLODEYDP; ++ ++ CLEARCX; ++ ++ FLUSHXDP; ++ FLUSHYDP; ++ ++ switch (CLPAIR(xc, yc)) { ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_nanxcpt(ieee754dp_indef(), "div", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): ++ return x; ++ ++ /* Infinity handling ++ */ ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_xcpt(ieee754dp_indef(), "div", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): ++ return ieee754dp_zero(xs ^ ys); ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): ++ return ieee754dp_inf(xs ^ ys); ++ ++ /* Zero handling ++ */ ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_xcpt(ieee754dp_indef(), "div", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): ++ SETCX(IEEE754_ZERO_DIVIDE); ++ return ieee754dp_xcpt(ieee754dp_inf(xs ^ ys), "div", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): ++ return ieee754dp_zero(xs == ys ? 0 : 1); ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): ++ DPDNORMX; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): ++ DPDNORMY; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): ++ DPDNORMX; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): ++ break; ++ } ++ assert(xm & DP_HIDDEN_BIT); ++ assert(ym & DP_HIDDEN_BIT); ++ ++ /* provide rounding space */ ++ xm <<= 3; ++ ym <<= 3; ++ ++ { ++ /* now the dirty work */ ++ ++ u64 rm = 0; ++ int re = xe - ye; ++ u64 bm; ++ ++ for (bm = DP_MBIT(DP_MBITS + 2); bm; bm >>= 1) { ++ if (xm >= ym) { ++ xm -= ym; ++ rm |= bm; ++ if (xm == 0) ++ break; ++ } ++ xm <<= 1; ++ } ++ rm <<= 1; ++ if (xm) ++ rm |= 1; /* have remainder, set sticky */ ++ ++ assert(rm); ++ ++ /* normalise rm to rounding precision ? ++ */ ++ while ((rm >> (DP_MBITS + 3)) == 0) { ++ rm <<= 1; ++ re--; ++ } ++ ++ DPNORMRET2(xs == ys ? 0 : 1, re, rm, "div", x, y); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_fint.c linux-3.4.110/arch/nds32/math-emu/dp_fint.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_fint.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_fint.c 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,79 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++ieee754dp ieee754dp_fint(int x) ++{ ++ u64 xm; ++ int xe; ++ int xs; ++ ++ CLEARCX; ++ ++ if (x == 0) ++ return ieee754dp_zero(0); ++ if (x == 1 || x == -1) ++ return ieee754dp_one(x < 0); ++ if (x == 10 || x == -10) ++ return ieee754dp_ten(x < 0); ++ ++ xs = (x < 0); ++ if (xs) { ++ if (x == (1 << 31)) ++ xm = ((unsigned)1 << 31); /* max neg can't be safely negated */ ++ else ++ xm = -x; ++ } else { ++ xm = x; ++ } ++ ++#if 1 ++ /* normalize - result can never be inexact or overflow */ ++ xe = DP_MBITS; ++ while ((xm >> DP_MBITS) == 0) { ++ xm <<= 1; ++ xe--; ++ } ++ return builddp(xs, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); ++#else ++ /* normalize */ ++ xe = DP_MBITS + 3; ++ while ((xm >> (DP_MBITS + 3)) == 0) { ++ xm <<= 1; ++ xe--; ++ } ++ DPNORMRET1(xs, xe, xm, "fint", x); ++#endif ++} ++ ++ieee754dp ieee754dp_funs(unsigned int u) ++{ ++ if ((int)u < 0) ++ return ieee754dp_add(ieee754dp_1e31(), ++ ieee754dp_fint(u & ~(1 << 31))); ++ return ieee754dp_fint(u); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_flong.c linux-3.4.110/arch/nds32/math-emu/dp_flong.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_flong.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_flong.c 2016-04-07 10:20:50.958081643 +0200 +@@ -0,0 +1,77 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++ieee754dp ieee754dp_flong(s64 x) ++{ ++ u64 xm; ++ int xe; ++ int xs; ++ ++ CLEARCX; ++ ++ if (x == 0) ++ return ieee754dp_zero(0); ++ if (x == 1 || x == -1) ++ return ieee754dp_one(x < 0); ++ if (x == 10 || x == -10) ++ return ieee754dp_ten(x < 0); ++ ++ xs = (x < 0); ++ if (xs) { ++ if (x == (1ULL << 63)) ++ xm = (1ULL << 63); /* max neg can't be safely negated */ ++ else ++ xm = -x; ++ } else { ++ xm = x; ++ } ++ ++ /* normalize */ ++ xe = DP_MBITS + 3; ++ if (xm >> (DP_MBITS + 1 + 3)) { ++ /* shunt out overflow bits */ ++ while (xm >> (DP_MBITS + 1 + 3)) { ++ XDPSRSX1(); ++ } ++ } else { ++ /* normalize in grs extended double precision */ ++ while ((xm >> (DP_MBITS + 3)) == 0) { ++ xm <<= 1; ++ xe--; ++ } ++ } ++ DPNORMRET1(xs, xe, xm, "dp_flong", x); ++} ++ ++ieee754dp ieee754dp_fulong(u64 u) ++{ ++ if ((s64) u < 0) ++ return ieee754dp_add(ieee754dp_1e63(), ++ ieee754dp_flong(u & ~(1ULL << 63))); ++ return ieee754dp_flong(u); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_frexp.c linux-3.4.110/arch/nds32/math-emu/dp_frexp.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_frexp.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_frexp.c 2016-04-07 10:20:50.974082262 +0200 +@@ -0,0 +1,52 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++/* close to ieeep754dp_logb ++*/ ++ieee754dp ieee754dp_frexp(ieee754dp x, int *eptr) ++{ ++ COMPXDP; ++ CLEARCX; ++ EXPLODEXDP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ case IEEE754_CLASS_QNAN: ++ case IEEE754_CLASS_INF: ++ case IEEE754_CLASS_ZERO: ++ *eptr = 0; ++ return x; ++ case IEEE754_CLASS_DNORM: ++ DPDNORMX; ++ break; ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ *eptr = xe + 1; ++ return builddp(xs, -1 + DP_EBIAS, xm & ~DP_HIDDEN_BIT); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_fsp.c linux-3.4.110/arch/nds32/math-emu/dp_fsp.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_fsp.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_fsp.c 2016-04-07 10:20:50.974082262 +0200 +@@ -0,0 +1,71 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++ieee754dp ieee754dp_fsp(ieee754sp x) ++{ ++ COMPXSP; ++ ++ EXPLODEXSP; ++ ++ CLEARCX; ++ ++ FLUSHXSP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_nanxcpt(ieee754dp_indef(), "fsp"); ++ case IEEE754_CLASS_QNAN: ++ return ieee754dp_nanxcpt(builddp(xs, ++ DP_EMAX + 1 + DP_EBIAS, ++ ((u64) xm ++ << (DP_MBITS - ++ SP_MBITS))), "fsp", x); ++ case IEEE754_CLASS_INF: ++ return ieee754dp_inf(xs); ++ case IEEE754_CLASS_ZERO: ++ return ieee754dp_zero(xs); ++ case IEEE754_CLASS_DNORM: ++ /* normalize */ ++ while ((xm >> SP_MBITS) == 0) { ++ xm <<= 1; ++ xe--; ++ } ++ break; ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ ++ /* CANT possibly overflow,underflow, or need rounding ++ */ ++ ++ /* drop the hidden bit */ ++ xm &= ~SP_HIDDEN_BIT; ++ ++ return builddp(xs, xe + DP_EBIAS, (u64) xm << (DP_MBITS - SP_MBITS)); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_logb.c linux-3.4.110/arch/nds32/math-emu/dp_logb.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_logb.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_logb.c 2016-04-07 10:20:50.974082262 +0200 +@@ -0,0 +1,53 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++ieee754dp ieee754dp_logb(ieee754dp x) ++{ ++ COMPXDP; ++ ++ CLEARCX; ++ ++ EXPLODEXDP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ return ieee754dp_nanxcpt(x, "logb", x); ++ case IEEE754_CLASS_QNAN: ++ return x; ++ case IEEE754_CLASS_INF: ++ return ieee754dp_inf(0); ++ case IEEE754_CLASS_ZERO: ++ return ieee754dp_inf(1); ++ case IEEE754_CLASS_DNORM: ++ DPDNORMX; ++ break; ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ return ieee754dp_fint(xe); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_modf.c linux-3.4.110/arch/nds32/math-emu/dp_modf.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_modf.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_modf.c 2016-04-07 10:20:50.974082262 +0200 +@@ -0,0 +1,79 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++/* modf function is always exact for a finite number ++*/ ++ieee754dp ieee754dp_modf(ieee754dp x, ieee754dp * ip) ++{ ++ COMPXDP; ++ ++ CLEARCX; ++ ++ EXPLODEXDP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ case IEEE754_CLASS_QNAN: ++ case IEEE754_CLASS_INF: ++ case IEEE754_CLASS_ZERO: ++ *ip = x; ++ return x; ++ case IEEE754_CLASS_DNORM: ++ /* far to small */ ++ *ip = ieee754dp_zero(xs); ++ return x; ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ if (xe < 0) { ++ *ip = ieee754dp_zero(xs); ++ return x; ++ } ++ if (xe >= DP_MBITS) { ++ *ip = x; ++ return ieee754dp_zero(xs); ++ } ++ /* generate ipart mantissa by clearing bottom bits ++ */ ++ *ip = builddp(xs, xe + DP_EBIAS, ++ ((xm >> (DP_MBITS - xe)) << (DP_MBITS - xe)) & ++ ~DP_HIDDEN_BIT); ++ ++ /* generate fpart mantissa by clearing top bits ++ * and normalizing (must be able to normalize) ++ */ ++ xm = (xm << (64 - (DP_MBITS - xe))) >> (64 - (DP_MBITS - xe)); ++ if (xm == 0) ++ return ieee754dp_zero(xs); ++ ++ while ((xm >> DP_MBITS) == 0) { ++ xm <<= 1; ++ xe--; ++ } ++ return builddp(xs, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_mul.c linux-3.4.110/arch/nds32/math-emu/dp_mul.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_mul.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_mul.c 2016-04-07 10:20:50.974082262 +0200 +@@ -0,0 +1,170 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++ieee754dp ieee754dp_mul(ieee754dp x, ieee754dp y) ++{ ++ COMPXDP; ++ COMPYDP; ++ ++ EXPLODEXDP; ++ EXPLODEYDP; ++ ++ CLEARCX; ++ ++ FLUSHXDP; ++ FLUSHYDP; ++ ++ switch (CLPAIR(xc, yc)) { ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_nanxcpt(ieee754dp_indef(), "mul", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): ++ return x; ++ ++ /* Infinity handling */ ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_xcpt(ieee754dp_indef(), "mul", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): ++ return ieee754dp_inf(xs ^ ys); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): ++ return ieee754dp_zero(xs ^ ys); ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): ++ DPDNORMX; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): ++ DPDNORMY; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): ++ DPDNORMX; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): ++ break; ++ } ++ /* rm = xm * ym, re = xe+ye basicly */ ++ assert(xm & DP_HIDDEN_BIT); ++ assert(ym & DP_HIDDEN_BIT); ++ { ++ int re = xe + ye; ++ int rs = xs ^ ys; ++ u64 rm; ++ ++ /* shunt to top of word */ ++ xm <<= 64 - (DP_MBITS + 1); ++ ym <<= 64 - (DP_MBITS + 1); ++ ++ /* multiply 32bits xm,ym to give high 32bits rm with stickness ++ */ ++ ++ /* 32 * 32 => 64 */ ++#define DPXMULT(x, y) ((u64)(x) * (u64)y) ++ ++ { ++ unsigned lxm = xm; ++ unsigned hxm = xm >> 32; ++ unsigned lym = ym; ++ unsigned hym = ym >> 32; ++ u64 lrm; ++ u64 hrm; ++ ++ lrm = DPXMULT(lxm, lym); ++ hrm = DPXMULT(hxm, hym); ++ ++ { ++ u64 t = DPXMULT(lxm, hym); ++ { ++ u64 at = lrm + (t << 32); ++ hrm += at < lrm; ++ lrm = at; ++ } ++ hrm = hrm + (t >> 32); ++ } ++ ++ { ++ u64 t = DPXMULT(hxm, lym); ++ { ++ u64 at = lrm + (t << 32); ++ hrm += at < lrm; ++ lrm = at; ++ } ++ hrm = hrm + (t >> 32); ++ } ++ rm = hrm | (lrm != 0); ++ } ++ ++ /* ++ * sticky shift down to normal rounding precision ++ */ ++ if ((s64) rm < 0) { ++ rm = (rm >> (64 - (DP_MBITS + 1 + 3))) | ++ ((rm << (DP_MBITS + 1 + 3)) != 0); ++ re++; ++ } else { ++ rm = (rm >> (64 - (DP_MBITS + 1 + 3 + 1))) | ++ ((rm << (DP_MBITS + 1 + 3 + 1)) != 0); ++ } ++ assert(rm & (DP_HIDDEN_BIT << 3)); ++ DPNORMRET2(rs, re, rm, "mul", x, y); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_scalb.c linux-3.4.110/arch/nds32/math-emu/dp_scalb.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_scalb.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_scalb.c 2016-04-07 10:20:50.974082262 +0200 +@@ -0,0 +1,56 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++ieee754dp ieee754dp_scalb(ieee754dp x, int n) ++{ ++ COMPXDP; ++ ++ CLEARCX; ++ ++ EXPLODEXDP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ return ieee754dp_nanxcpt(x, "scalb", x, n); ++ case IEEE754_CLASS_QNAN: ++ case IEEE754_CLASS_INF: ++ case IEEE754_CLASS_ZERO: ++ return x; ++ case IEEE754_CLASS_DNORM: ++ DPDNORMX; ++ break; ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ DPNORMRET2(xs, xe + n, xm << 3, "scalb", x, n); ++} ++ ++ieee754dp ieee754dp_ldexp(ieee754dp x, int n) ++{ ++ return ieee754dp_scalb(x, n); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_simple.c linux-3.4.110/arch/nds32/math-emu/dp_simple.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_simple.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_simple.c 2016-04-07 10:20:50.974082262 +0200 +@@ -0,0 +1,87 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++int ieee754dp_finite(ieee754dp x) ++{ ++ return DPBEXP(x) != DP_EMAX + 1 + DP_EBIAS; ++} ++ ++ieee754dp ieee754dp_copysign(ieee754dp x, ieee754dp y) ++{ ++ CLEARCX; ++ DPSIGN(x) = DPSIGN(y); ++ return x; ++} ++ ++ieee754dp ieee754dp_neg(ieee754dp x) ++{ ++ COMPXDP; ++ ++ EXPLODEXDP; ++ CLEARCX; ++ FLUSHXDP; ++ ++ /* ++ * Invert the sign ALWAYS to prevent an endless recursion on ++ * pow() in libc. ++ */ ++ /* quick fix up */ ++ DPSIGN(x) ^= 1; ++ ++ if (xc == IEEE754_CLASS_SNAN) { ++ ieee754dp y = ieee754dp_indef(); ++ SETCX(IEEE754_INVALID_OPERATION); ++ DPSIGN(y) = DPSIGN(x); ++ return ieee754dp_nanxcpt(y, "neg"); ++ } ++ ++ if (ieee754dp_isnan(x)) /* but not infinity */ ++ return ieee754dp_nanxcpt(x, "neg", x); ++ return x; ++} ++ ++ieee754dp ieee754dp_abs(ieee754dp x) ++{ ++ COMPXDP; ++ ++ EXPLODEXDP; ++ CLEARCX; ++ FLUSHXDP; ++ ++ if (xc == IEEE754_CLASS_SNAN) { ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_nanxcpt(ieee754dp_indef(), "neg"); ++ } ++ ++ if (ieee754dp_isnan(x)) /* but not infinity */ ++ return ieee754dp_nanxcpt(x, "abs", x); ++ ++ /* quick fix up */ ++ DPSIGN(x) = 0; ++ return x; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_sqrt.c linux-3.4.110/arch/nds32/math-emu/dp_sqrt.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_sqrt.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_sqrt.c 2016-04-07 10:20:50.974082262 +0200 +@@ -0,0 +1,164 @@ ++/* IEEE754 floating point arithmetic ++ * double precision square root ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++static const unsigned table[] = { ++ 0, 1204, 3062, 5746, 9193, 13348, 18162, 23592, ++ 29598, 36145, 43202, 50740, 58733, 67158, 75992, ++ 85215, 83599, 71378, 60428, 50647, 41945, 34246, ++ 27478, 21581, 16499, 12183, 8588, 5674, 3403, ++ 1742, 661, 130 ++}; ++ ++ieee754dp ieee754dp_sqrt(ieee754dp x) ++{ ++ struct _ieee754_csr oldcsr; ++ ieee754dp y, z, t; ++ unsigned scalx, yh; ++ COMPXDP; ++ ++ EXPLODEXDP; ++ CLEARCX; ++ FLUSHXDP; ++ ++ /* x == INF or NAN? */ ++ switch (xc) { ++ case IEEE754_CLASS_QNAN: ++ /* sqrt(Nan) = Nan */ ++ return ieee754dp_nanxcpt(x, "sqrt"); ++ case IEEE754_CLASS_SNAN: ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_nanxcpt(ieee754dp_indef(), "sqrt"); ++ case IEEE754_CLASS_ZERO: ++ /* sqrt(0) = 0 */ ++ return x; ++ case IEEE754_CLASS_INF: ++ if (xs) { ++ /* sqrt(-Inf) = Nan */ ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_nanxcpt(ieee754dp_indef(), "sqrt"); ++ } ++ /* sqrt(+Inf) = Inf */ ++ return x; ++ case IEEE754_CLASS_DNORM: ++ DPDNORMX; ++ /* fall through */ ++ case IEEE754_CLASS_NORM: ++ if (xs) { ++ /* sqrt(-x) = Nan */ ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_nanxcpt(ieee754dp_indef(), "sqrt"); ++ } ++ break; ++ } ++ ++ /* save old csr; switch off INX enable & flag; set RN rounding */ ++ oldcsr = ieee754_csr; ++ ieee754_csr.mx &= ~IEEE754_INEXACT; ++ ieee754_csr.sx &= ~IEEE754_INEXACT; ++ ieee754_csr.rm = IEEE754_RN; ++ ++ /* adjust exponent to prevent overflow */ ++ scalx = 0; ++ if (xe > 512) { /* x > 2**-512? */ ++ xe -= 512; /* x = x / 2**512 */ ++ scalx += 256; ++ } else if (xe < -512) { /* x < 2**-512? */ ++ xe += 512; /* x = x * 2**512 */ ++ scalx -= 256; ++ } ++ ++ y = x = builddp(0, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); ++ ++ /* magic initial approximation to almost 8 sig. bits */ ++ yh = y.bits >> 32; ++ yh = (yh >> 1) + 0x1ff80000; ++ yh = yh - table[(yh >> 15) & 31]; ++ y.bits = ((u64) yh << 32) | (y.bits & 0xffffffff); ++ ++ /* Heron's rule once with correction to improve to ~18 sig. bits */ ++ /* t=x/y; y=y+t; py[n0]=py[n0]-0x00100006; py[n1]=0; */ ++ t = ieee754dp_div(x, y); ++ y = ieee754dp_add(y, t); ++ y.bits -= 0x0010000600000000LL; ++ y.bits &= 0xffffffff00000000LL; ++ ++ /* triple to almost 56 sig. bits: y ~= sqrt(x) to within 1 ulp */ ++ /* t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y; */ ++ z = t = ieee754dp_mul(y, y); ++ t.parts.bexp += 0x001; ++ t = ieee754dp_add(t, z); ++ z = ieee754dp_mul(ieee754dp_sub(x, z), y); ++ ++ /* t=z/(t+x) ; pt[n0]+=0x00100000; y+=t; */ ++ t = ieee754dp_div(z, ieee754dp_add(t, x)); ++ t.parts.bexp += 0x001; ++ y = ieee754dp_add(y, t); ++ ++ /* twiddle last bit to force y correctly rounded */ ++ ++ /* set RZ, clear INEX flag */ ++ ieee754_csr.rm = IEEE754_RZ; ++ ieee754_csr.sx &= ~IEEE754_INEXACT; ++ ++ /* t=x/y; ...chopped quotient, possibly inexact */ ++ t = ieee754dp_div(x, y); ++ ++ if (ieee754_csr.sx & IEEE754_INEXACT || t.bits != y.bits) { ++ ++ if (!(ieee754_csr.sx & IEEE754_INEXACT)) ++ /* t = t-ulp */ ++ t.bits -= 1; ++ ++ /* add inexact to result status */ ++ oldcsr.cx |= IEEE754_INEXACT; ++ oldcsr.sx |= IEEE754_INEXACT; ++ ++ switch (oldcsr.rm) { ++ case IEEE754_RP: ++ y.bits += 1; ++ /* drop through */ ++ case IEEE754_RN: ++ t.bits += 1; ++ break; ++ } ++ ++ /* y=y+t; ...chopped sum */ ++ y = ieee754dp_add(y, t); ++ ++ /* adjust scalx for correctly rounded sqrt(x) */ ++ scalx -= 1; ++ } ++ ++ /* py[n0]=py[n0]+scalx; ...scale back y */ ++ y.parts.bexp += scalx; ++ ++ /* restore rounding mode, possibly set inexact */ ++ ieee754_csr = oldcsr; ++ ++ return y; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_sub.c linux-3.4.110/arch/nds32/math-emu/dp_sub.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_sub.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_sub.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,187 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y) ++{ ++ COMPXDP; ++ COMPYDP; ++ ++ EXPLODEXDP; ++ EXPLODEYDP; ++ ++ CLEARCX; ++ ++ FLUSHXDP; ++ FLUSHYDP; ++ ++ switch (CLPAIR(xc, yc)) { ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_nanxcpt(ieee754dp_indef(), "sub", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): ++ return x; ++ ++ /* Infinity handling ++ */ ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): ++ if (xs != ys) ++ return x; ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754dp_xcpt(ieee754dp_indef(), "sub", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): ++ return ieee754dp_inf(ys ^ 1); ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): ++ return x; ++ ++ /* Zero handling ++ */ ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): ++ if (xs != ys) ++ return x; ++ else ++ return ieee754dp_zero(ieee754_csr.rm == IEEE754_RD); ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): ++ return x; ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): ++ /* quick fix up */ ++ DPSIGN(y) ^= 1; ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): ++ DPDNORMX; ++ /* FAAL THOROUGH */ ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): ++ /* normalize ym,ye */ ++ DPDNORMY; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): ++ /* normalize xm,xe */ ++ DPDNORMX; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): ++ break; ++ } ++ /* flip sign of y and handle as add */ ++ ys ^= 1; ++ ++ assert(xm & DP_HIDDEN_BIT); ++ assert(ym & DP_HIDDEN_BIT); ++ ++ /* provide guard,round and stick bit dpace */ ++ xm <<= 3; ++ ym <<= 3; ++ ++ if (xe > ye) { ++ /* have to shift y fraction right to align ++ */ ++ int s = xe - ye; ++ ym = XDPSRS(ym, s); ++ ye += s; ++ } else if (ye > xe) { ++ /* have to shift x fraction right to align ++ */ ++ int s = ye - xe; ++ xm = XDPSRS(xm, s); ++ xe += s; ++ } ++ assert(xe == ye); ++ assert(xe <= DP_EMAX); ++ ++ if (xs == ys) { ++ /* generate 28 bit result of adding two 27 bit numbers ++ */ ++ xm = xm + ym; ++ xe = xe; ++ xs = xs; ++ ++ if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */ ++ xm = XDPSRS1(xm); /* shift preserving sticky */ ++ xe++; ++ } ++ } else { ++ if (xm >= ym) { ++ xm = xm - ym; ++ xe = xe; ++ xs = xs; ++ } else { ++ xm = ym - xm; ++ xe = xe; ++ xs = ys; ++ } ++ if (xm == 0) { ++ if (ieee754_csr.rm == IEEE754_RD) ++ return ieee754dp_zero(1); /* round negative inf. => sign = -1 */ ++ else ++ return ieee754dp_zero(0); /* other round modes => sign = 1 */ ++ } ++ ++ /* normalize to rounding precision ++ */ ++ while ((xm >> (DP_MBITS + 3)) == 0) { ++ xm <<= 1; ++ xe--; ++ } ++ } ++ DPNORMRET2(xs, xe, xm, "sub", x, y); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_tint.c linux-3.4.110/arch/nds32/math-emu/dp_tint.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_tint.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_tint.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,121 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include ++#include "ieee754dp.h" ++ ++int ieee754dp_tint(ieee754dp x) ++{ ++ COMPXDP; ++ ++ CLEARCX; ++ ++ EXPLODEXDP; ++ FLUSHXDP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ case IEEE754_CLASS_QNAN: ++ case IEEE754_CLASS_INF: ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754si_xcpt(ieee754si_indef(), "dp_tint", x); ++ case IEEE754_CLASS_ZERO: ++ return 0; ++ case IEEE754_CLASS_DNORM: ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ if (xe > 31) { ++ /* Set invalid. We will only use overflow for floating ++ point overflow */ ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754si_xcpt(ieee754si_indef(), "dp_tint", x); ++ } ++ /* oh gawd */ ++ if (xe > DP_MBITS) { ++ xm <<= xe - DP_MBITS; ++ } else if (xe < DP_MBITS) { ++ u64 residue; ++ int round; ++ int sticky; ++ int odd; ++ ++ if (xe < -1) { ++ residue = xm; ++ round = 0; ++ sticky = residue != 0; ++ xm = 0; ++ } else { ++ residue = xm << (64 - DP_MBITS + xe); ++ round = (residue >> 63) != 0; ++ sticky = (residue << 1) != 0; ++ xm >>= DP_MBITS - xe; ++ } ++ /* Note: At this point upper 32 bits of xm are guaranteed ++ to be zero */ ++ odd = (xm & 0x1) != 0x0; ++ switch (ieee754_csr.rm) { ++ case IEEE754_RN: ++ if (round && (sticky || odd)) ++ xm++; ++ break; ++ case IEEE754_RZ: ++ break; ++ case IEEE754_RU: /* toward +Infinity */ ++ if ((round || sticky) && !xs) ++ xm++; ++ break; ++ case IEEE754_RD: /* toward -Infinity */ ++ if ((round || sticky) && xs) ++ xm++; ++ break; ++ } ++ /* look for valid corner case 0x80000000 */ ++ if ((xm >> 31) != 0 && (xs == 0 || xm != 0x80000000)) { ++ /* This can happen after rounding */ ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754si_xcpt(ieee754si_indef(), "dp_tint", x); ++ } ++ if (round || sticky) ++ SETCX(IEEE754_INEXACT); ++ } ++ if (xs) ++ return -xm; ++ else ++ return xm; ++} ++ ++unsigned int ieee754dp_tuns(ieee754dp x) ++{ ++ ieee754dp hb = ieee754dp_1e31(); ++ ++ /* what if x < 0 ?? */ ++ if (ieee754dp_lt(x, hb)) ++ return (unsigned)ieee754dp_tint(x); ++ ++ return (unsigned)ieee754dp_tint(ieee754dp_sub(x, hb)) | ++ ((unsigned)1 << 31); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/dp_tlong.c linux-3.4.110/arch/nds32/math-emu/dp_tlong.c +--- linux-3.4.110.orig/arch/nds32/math-emu/dp_tlong.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/dp_tlong.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,123 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++s64 ieee754dp_tlong(ieee754dp x) ++{ ++ COMPXDP; ++ ++ CLEARCX; ++ ++ EXPLODEXDP; ++ FLUSHXDP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ case IEEE754_CLASS_QNAN: ++ case IEEE754_CLASS_INF: ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754di_xcpt(ieee754di_indef(), "dp_tlong", x); ++ case IEEE754_CLASS_ZERO: ++ return 0; ++ case IEEE754_CLASS_DNORM: ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ if (xe >= 63) { ++ /* look for valid corner case */ ++ if (xe == 63 && xs && xm == DP_HIDDEN_BIT) ++ return -0x8000000000000000LL; ++ /* Set invalid. We will only use overflow for floating ++ point overflow */ ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754di_xcpt(ieee754di_indef(), "dp_tlong", x); ++ } ++ /* oh gawd */ ++ if (xe > DP_MBITS) { ++ xm <<= xe - DP_MBITS; ++ } else if (xe < DP_MBITS) { ++ u64 residue; ++ int round; ++ int sticky; ++ int odd; ++ ++ if (xe < -1) { ++ residue = xm; ++ round = 0; ++ sticky = residue != 0; ++ xm = 0; ++ } else { ++ /* Shifting a u64 64 times does not work, ++ * so we do it in two steps. Be aware that xe ++ * may be -1 */ ++ residue = xm << (xe + 1); ++ residue <<= 63 - DP_MBITS; ++ round = (residue >> 63) != 0; ++ sticky = (residue << 1) != 0; ++ xm >>= DP_MBITS - xe; ++ } ++ odd = (xm & 0x1) != 0x0; ++ switch (ieee754_csr.rm) { ++ case IEEE754_RN: ++ if (round && (sticky || odd)) ++ xm++; ++ break; ++ case IEEE754_RZ: ++ break; ++ case IEEE754_RU: /* toward +Infinity */ ++ if ((round || sticky) && !xs) ++ xm++; ++ break; ++ case IEEE754_RD: /* toward -Infinity */ ++ if ((round || sticky) && xs) ++ xm++; ++ break; ++ } ++ if ((xm >> 63) != 0) { ++ /* This can happen after rounding */ ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754di_xcpt(ieee754di_indef(), "dp_tlong", x); ++ } ++ if (round || sticky) ++ SETCX(IEEE754_INEXACT); ++ } ++ if (xs) ++ return -xm; ++ else ++ return xm; ++} ++ ++u64 ieee754dp_tulong(ieee754dp x) ++{ ++ ieee754dp hb = ieee754dp_1e63(); ++ ++ /* what if x < 0 ?? */ ++ if (ieee754dp_lt(x, hb)) ++ return (u64) ieee754dp_tlong(x); ++ ++ return (u64) ieee754dp_tlong(ieee754dp_sub(x, hb)) | (1ULL << 63); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/fpuemu.c linux-3.4.110/arch/nds32/math-emu/fpuemu.c +--- linux-3.4.110.orig/arch/nds32/math-emu/fpuemu.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/fpuemu.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,565 @@ ++/* ++ * linux/arch/nds32/math-emu/fpuemu.c: a nds32 coprocessor (fpu) instruction emulator ++ * ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com ++ * Copyright (C) 2000 MIPS Technologies, Inc. ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * A complete emulator for MIPS coprocessor 1 instructions. This is ++ * required for #float(switch) or #float(trap), where it catches all ++ * COP1 instructions via the "CoProcessor Unusable" exception. ++ * ++ * More surprisingly it is also required for #float(ieee), to help out ++ * the hardware fpu at the boundaries of the IEEE-754 representation ++ * (denormalised values, infinities, underflow, etc). It is made ++ * quite nasty because emulation of some non-COP1 instructions is ++ * required, e.g. in branch delay slots. ++ * ++ * Note if you know that you won't have an fpu, then you'll get much ++ * better performance by compiling with -msoft-float! ++ */ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include "ieee754.h" ++#include "insn.h" ++#include "fpu_emulator.h" ++ ++/* Function which emulates a floating point instruction. */ ++ ++static int fpu_emu(struct pt_regs *, struct fpu_struct *, unsigned long); ++ ++/* Further private data for which no space exists in fpu_struct */ ++ ++struct fpu_emulator_stats fpuemustats; ++ ++/* rounding mode */ ++#define FPU_FPCSR_RN 0x0 /* nearest */ ++#define FPU_FPCSR_RU 0x1 /* towards +Infinity */ ++#define FPU_FPCSR_RD 0x2 /* towards -Infinity */ ++#define FPU_FPCSR_RZ 0x3 /* towards zero */ ++ ++/* Convert Mips rounding mode (0..3) to IEEE library modes. */ ++static const unsigned char ieee_rm[4] = { ++ [FPU_FPCSR_RN] = IEEE754_RN, ++ [FPU_FPCSR_RU] = IEEE754_RU, ++ [FPU_FPCSR_RD] = IEEE754_RD, ++ [FPU_FPCSR_RZ] = IEEE754_RZ, ++}; ++ ++/* Convert IEEE library modes to NDS32 rounding mode (0..3). */ ++static const unsigned char nds32_rm[4] = { ++ [IEEE754_RN] = FPU_FPCSR_RN, ++ [IEEE754_RZ] = FPU_FPCSR_RZ, ++ [IEEE754_RD] = FPU_FPCSR_RD, ++ [IEEE754_RU] = FPU_FPCSR_RU, ++}; ++ ++/* ++ * In the Linux kernel, we support selection of FPR format on the ++ * basis of the Status.FR bit. This does imply that, if a full 32 ++ * FPRs are desired, there needs to be a flip-flop that can be written ++ * to one at that bit position. In any case, O32 MIPS ABI uses ++ * only the even FPRs (Status.FR = 0). ++ */ ++ ++#define CP0_STATUS_FR_SUPPORT ++ ++#ifdef CP0_STATUS_FR_SUPPORT ++#define FR_BIT ST0_FR ++#else ++#define FR_BIT 0 ++#endif ++ ++#define SIFROMREG(si, x) ((si) = *((unsigned long *)ctx + x)) ++#define SITOREG(si, x) (*((unsigned long *)ctx + x)= (si)) ++ ++#ifdef __NDS32_EL__ ++#define DIFROMREG(di, x) ((di) = (unsigned long long)ptr[2*x] << 32 | (unsigned long long)ptr[2*x+1]) ++#define DITOREG(di, x) ptr[2*x] = (di) >> 32; ptr[2*x+1] = (unsigned long)(di) ++#else ++#define DIFROMREG(di, x) ((di) = (unsigned long long)ptr[2*x+1] << 32 | (unsigned long long)ptr[2*x]) ++#define DITOREG(di, x) ptr[2*x+1] = (di) >> 32; ptr[2*x] = (unsigned long)(di) ++#endif ++ ++#define SPFROMREG(sp, x) SIFROMREG((sp).bits, x) ++#define SPTOREG(sp, x) SITOREG((sp).bits, x) ++#define DPFROMREG(dp, x) DIFROMREG((dp).bits, x) ++#define DPTOREG(dp, x) DITOREG((dp).bits, x) ++ ++#define DEF3OP(name, p, f1, f2, f3) \ ++static ieee754##p fpemu_##p##_##name(ieee754##p r, ieee754##p s, \ ++ ieee754##p t) \ ++{ \ ++ struct _ieee754_csr ieee754_csr_save; \ ++ s = f1(s, t); \ ++ ieee754_csr_save = ieee754_csr; \ ++ s = f2(s, r); \ ++ ieee754_csr_save.cx |= ieee754_csr.cx; \ ++ ieee754_csr_save.sx |= ieee754_csr.sx; \ ++ s = f3(s); \ ++ ieee754_csr.cx |= ieee754_csr_save.cx; \ ++ ieee754_csr.sx |= ieee754_csr_save.sx; \ ++ return s; \ ++} ++ ++DEF3OP(madd, sp, ieee754sp_mul, ieee754sp_add,); ++DEF3OP(msub, sp, ieee754sp_mul, ieee754sp_sub,); ++DEF3OP(nmadd, sp, ieee754sp_mul, ieee754sp_add, ieee754sp_neg); ++DEF3OP(nmsub, sp, ieee754sp_mul, ieee754sp_sub, ieee754sp_neg); ++DEF3OP(madd, dp, ieee754dp_mul, ieee754dp_add,); ++DEF3OP(msub, dp, ieee754dp_mul, ieee754dp_sub,); ++DEF3OP(nmadd, dp, ieee754dp_mul, ieee754dp_add, ieee754dp_neg); ++DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg); ++ ++/* ++ * Emulate the floating point instruction w/ denorm input pointed at by IPC. ++ * According to spec, only FPU arithmetic, data format conversion, and compare instructions. ++ */ ++ ++static int fpuEmulate(struct pt_regs *regs, struct fpu_struct *fpu) ++{ ++ unsigned long insn = 0, addr = regs->NDS32_ipc; ++ unsigned long emulpc, contpc; ++ unsigned char *pc = (void *)&insn; ++ char c; ++ int i = 0; ++ ++ for (i = 0; i < 4; i++) { ++ if (__get_user(c, (unsigned char *)addr++)) ++ return SIGBUS; ++ *pc++ = c; ++ } ++ ++ insn = be32_to_cpu(insn); ++ ++ emulpc = regs->NDS32_ipc; ++ contpc = regs->NDS32_ipc + 4; ++ ++ fpuemustats.emulated++; ++ switch (NDS32Insn_OPCODE(insn)) { ++ case cop0_op: ++ switch (NDS32Insn_OPCODE_COP0(insn)) { ++ ++ case fs1_op: ++ case fs2_op: ++ case fd1_op: ++ case fd2_op: ++ { ++ int sig; ++ ++ /* a real fpu computation instruction */ ++ if ((sig = fpu_emu(regs, fpu, insn))) ++ return sig; ++ } ++ break; ++ ++ default: ++ return SIGILL; ++ } ++ break; ++ default: ++ return SIGILL; ++ } ++ ++ /* we did it !! */ ++ regs->NDS32_ipc = contpc; ++ ++ return 0; ++} ++ ++/* ++ * Conversion table from NDS32 fcmp TYP ++ * cond = ieee754dp_cmp(x,y,IEEE754_UN,sig); ++ */ ++static const unsigned char cmptab[8] = { ++ IEEE754_CEQ, ++ IEEE754_CLT, ++ IEEE754_CLT | IEEE754_CEQ, ++ IEEE754_CUN, ++ 0, ++ 0, ++ 0, ++ 0, ++}; ++ ++/* ++ * Emulate a single FPU arithmetic instruction. ++ */ ++static int fpu_emu(struct pt_regs *xcp, struct fpu_struct *ctx, ++ unsigned long insn) ++{ ++ int rfmt; /* resulting format */ ++ unsigned rfpcsr = 0; /* resulting csr */ ++ unsigned long *ptr = (unsigned long *)ctx; ++ union { ++ ieee754dp d; ++ ieee754sp s; ++ int w; ++ } rv; /* resulting value */ ++ ++ fpuemustats.fpuops++; ++ switch (rfmt = NDS32Insn_OPCODE_COP0(insn)) { ++ case fs1_op:{ ++ union { ++ ieee754sp(*t) (ieee754sp, ieee754sp, ieee754sp); ++ ieee754sp(*b) (ieee754sp, ieee754sp); ++ ieee754sp(*u) (ieee754sp); ++ } handler; ++ ++ switch (NDS32Insn_OPCODE_BIT69(insn)) { ++ case fadds_op: ++ handler.b = ieee754sp_add; ++ goto scopbop; ++ case fsubs_op: ++ handler.b = ieee754sp_sub; ++ goto scopbop; ++ case fmadds_op: ++ handler.t = fpemu_sp_madd; ++ goto scoptop; ++ case fmsubs_op: ++ handler.t = fpemu_sp_msub; ++ goto scoptop; ++ case fnmadds_op: ++ handler.t = fpemu_sp_nmadd; ++ goto scoptop; ++ case fnmsubs_op: ++ handler.t = fpemu_sp_nmsub; ++ goto scoptop; ++ case fmuls_op: ++ handler.b = ieee754sp_mul; ++ goto scopbop; ++ case fdivs_op: ++ handler.b = ieee754sp_div; ++ goto scopbop; ++ ++ /* binary op on handler */ ++scoptop: ++ { ++ ieee754sp fd, fr, fs, ft; ++ ++ SPFROMREG(fr, ++ NDS32Insn_OPCODE_Rt(insn)); ++ SPFROMREG(fs, ++ NDS32Insn_OPCODE_Ra(insn)); ++ SPFROMREG(ft, ++ NDS32Insn_OPCODE_Rb(insn)); ++ fd = (*handler.t) (fr, fs, ft); ++ SPTOREG(fd, NDS32Insn_OPCODE_Rt(insn)); ++ } ++scopbop: ++ { ++ ieee754sp fs, ft; ++ ++ SPFROMREG(fs, ++ NDS32Insn_OPCODE_Ra(insn)); ++ SPFROMREG(ft, ++ NDS32Insn_OPCODE_Rb(insn)); ++ ++ rv.s = (*handler.b) (fs, ft); ++ goto copcsr; ++ } ++scopuop: ++ { ++ ieee754sp fs; ++ ++ SPFROMREG(fs, ++ NDS32Insn_OPCODE_Ra(insn)); ++ rv.s = (*handler.u) (fs); ++ goto copcsr; ++ } ++copcsr: ++ if (ieee754_cxtest(IEEE754_INEXACT)) ++ rfpcsr |= FPCSR_mskIEX; ++ if (ieee754_cxtest(IEEE754_UNDERFLOW)) ++ rfpcsr |= FPCSR_mskUDF; ++ if (ieee754_cxtest(IEEE754_OVERFLOW)) ++ rfpcsr |= FPCSR_mskOVF; ++ if (ieee754_cxtest(IEEE754_ZERO_DIVIDE)) ++ rfpcsr |= FPCSR_mskDBZ; ++ if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) ++ rfpcsr |= FPCSR_mskIVO; ++ break; ++ ++ case fs1_f2op_op: ++ switch (NDS32Insn_OPCODE_BIT1014(insn)) { ++ case fs2d_op:{ ++ ieee754sp fs; ++ SPFROMREG(fs, ++ NDS32Insn_OPCODE_Ra ++ (insn)); ++ rv.d = ieee754dp_fsp(fs); ++ rfmt = fd1_op; ++ goto copcsr; ++ } ++ case fsqrts_op: ++ handler.u = ieee754sp_sqrt; ++ goto scopuop; ++ case fabss_op: ++ handler.u = ieee754sp_abs; ++ goto scopuop; ++ default: ++ return SIGILL; ++ } ++ default: ++ return SIGILL; ++ } ++ break; ++ } ++ ++ case fs2_op: ++ switch (NDS32Insn_OPCODE_BIT69(insn)) { ++ case fcmpeqs_op: ++ case fcmpeqs_e_op: ++ case fcmplts_op: ++ case fcmplts_e_op: ++ case fcmples_op: ++ case fcmples_e_op: ++ case fcmpuns_op: ++ case fcmpuns_e_op: ++ { ++ unsigned int cmpop = ++ NDS32Insn_OPCODE_BIT69(insn); ++ if (cmpop < 0x8) { ++ ieee754sp fs, ft; ++ ++ SPFROMREG(fs, ++ NDS32Insn_OPCODE_Ra(insn)); ++ SPFROMREG(ft, ++ NDS32Insn_OPCODE_Rb(insn)); ++ rv.w = ++ ieee754sp_cmp(fs, ft, ++ cmptab[(cmpop >> 1) & ++ 0x7], ++ cmpop & 0x1); ++ if ((cmpop & 0x1) ++ && ++ ieee754_cxtest ++ (IEEE754_INVALID_OPERATION)) ++ rfpcsr = FPCSR_mskIVO; ++ else ++ goto copcsr; ++ } else ++ return SIGILL; ++ } ++ break; ++ ++ default: ++ return SIGILL; ++ } ++ break; ++ ++ case fd1_op: ++ { ++ union { ++ ieee754dp(*t) (ieee754dp, ieee754dp, ieee754dp); ++ ieee754dp(*b) (ieee754dp, ieee754dp); ++ ieee754dp(*u) (ieee754dp); ++ } handler; ++ ++ switch (NDS32Insn_OPCODE_BIT69(insn)) { ++ case faddd_op: ++ handler.b = ieee754dp_add; ++ goto dcopbop; ++ case fsubd_op: ++ handler.b = ieee754dp_sub; ++ goto dcopbop; ++ case fmaddd_op: ++ handler.t = fpemu_dp_madd; ++ goto tdcoptop; ++ case fmsubd_op: ++ handler.t = fpemu_dp_msub; ++ goto tdcoptop; ++ case fnmaddd_op: ++ handler.t = fpemu_dp_nmadd; ++ goto tdcoptop; ++ case fnmsubd_op: ++ handler.t = fpemu_dp_nmsub; ++ goto tdcoptop; ++ ++tdcoptop: ++ { ++ ieee754dp fd, fr, fs, ft; ++ ++ DPFROMREG(fr, ++ NDS32Insn_OPCODE_Rt(insn)); ++ DPFROMREG(fs, ++ NDS32Insn_OPCODE_Ra(insn)); ++ DPFROMREG(ft, ++ NDS32Insn_OPCODE_Rb(insn)); ++ fd = (*handler.t) (fr, fs, ft); ++ DPTOREG(fd, NDS32Insn_OPCODE_Rt(insn)); ++ goto copcsr; ++ } ++ ++ case fmuld_op: ++ handler.b = ieee754dp_mul; ++ goto dcopbop; ++ case fdivd_op: ++ handler.b = ieee754dp_div; ++ goto dcopbop; ++ ++ /* binary op on handler */ ++dcopbop: { ++ ieee754dp fs, ft; ++ ++ DPFROMREG(fs, ++ NDS32Insn_OPCODE_Ra(insn)); ++ DPFROMREG(ft, ++ NDS32Insn_OPCODE_Rb(insn)); ++ ++ rv.d = (*handler.b) (fs, ft); ++ goto copcsr; ++ } ++dcopuop: { ++ ieee754dp fs; ++ ++ DPFROMREG(fs, ++ NDS32Insn_OPCODE_Ra(insn)); ++ rv.d = (*handler.u) (fs); ++ goto copcsr; ++ } ++ ++ case fd1_f2op_op: ++ switch (NDS32Insn_OPCODE_BIT1014(insn)) { ++ case fd2s_op:{ ++ ieee754dp fs; ++ ++ DPFROMREG(fs, ++ NDS32Insn_OPCODE_Ra ++ (insn)); ++ rv.s = ieee754sp_fdp(fs); ++ rfmt = fd1_op; ++ goto copcsr; ++ } ++ case fsqrtd_op: ++ handler.u = ieee754dp_sqrt; ++ goto dcopuop; ++ case fabsd_op: ++ handler.u = ieee754dp_abs; ++ goto dcopuop; ++ default: ++ return SIGILL; ++ } ++ default: ++ return SIGILL; ++ } ++ break; ++ } ++ ++ case fd2_op: ++ switch (NDS32Insn_OPCODE_BIT69(insn)) { ++ case fcmpeqd_op: ++ case fcmpeqd_e_op: ++ case fcmpltd_op: ++ case fcmpltd_e_op: ++ case fcmpled_op: ++ case fcmpled_e_op: ++ case fcmpund_op: ++ case fcmpund_e_op: ++ { ++ unsigned cmpop = NDS32Insn_OPCODE_BIT69(insn); ++ if (cmpop < 0x8) { ++ ieee754dp fs, ft; ++ ++ DPFROMREG(fs, ++ NDS32Insn_OPCODE_Ra(insn)); ++ DPFROMREG(ft, ++ NDS32Insn_OPCODE_Rb(insn)); ++ rv.w = ++ ieee754dp_cmp(fs, ft, ++ cmptab[(cmpop >> 1) & ++ 0x7], ++ cmpop & 0x1); ++ rfmt = fs2_op; ++ if ((cmpop & 0x1) ++ && ++ ieee754_cxtest ++ (IEEE754_INVALID_OPERATION)) ++ rfpcsr = FPCSR_mskIVO; ++ else ++ goto copcsr; ++ } else ++ return SIGILL; ++ } ++ break; ++ default: ++ return SIGILL; ++ } ++ break; ++ ++ default: ++ return SIGILL; ++ } ++ ++ /* ++ * Now we can safely write the result back to the register file. ++ */ ++ switch (rfmt) { ++ case fd1_op: ++ case fd2_op: ++ DPTOREG(rv.d, NDS32Insn_OPCODE_Rt(insn)); ++ break; ++ case fs1_op: ++ case fs2_op: ++ SPTOREG(rv.s, NDS32Insn_OPCODE_Rt(insn)); ++ break; ++ default: ++ return SIGILL; ++ } ++ ++ /* ++ * Update the fpu CSR register for this operation. ++ * If an exception is required, generate a tidy SIGFPE exception, ++ * without updating the result register. ++ * Note: cause exception bits do not accumulate, they are rewritten ++ * for each op; only the flag/sticky bits accumulate. ++ */ ++ ctx->fpcsr = (ctx->fpcsr & ~FPCSR_mskALL) | rfpcsr; ++ if ((ctx->fpcsr << 5) & ctx->fpcsr & FPCSR_mskALLE) { ++ return SIGFPE; ++ } ++ ++ return 0; ++} ++ ++int do_fpu_denorm(struct pt_regs *regs, struct fpu_struct *fpu) ++{ ++ int sig = 0; ++ ++ /* ++ * The 'ieee754_csr' is an alias of ++ * fpcsr->RM. No need to copy fpcsr->RM to ++ * ieee754_csr. But ieee754_csr.rm is ieee ++ * library modes. (not NDS32 rounding mode) ++ */ ++ /* convert to ieee library modes */ ++ ieee754_csr.rm = ieee_rm[ieee754_csr.rm]; ++ sig = fpuEmulate(regs, fpu); ++ /* revert to NDS32 rounding mode */ ++ ieee754_csr.rm = nds32_rm[ieee754_csr.rm]; ++ ++ return sig; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/fpu_emulator.h linux-3.4.110/arch/nds32/math-emu/fpu_emulator.h +--- linux-3.4.110.orig/arch/nds32/math-emu/fpu_emulator.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/fpu_emulator.h 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,36 @@ ++/* ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * Further private data for which no space exists in mips_fpu_struct. ++ * This should be subsumed into the mips_fpu_struct structure as ++ * defined in processor.h as soon as the absurd wired absolute assembler ++ * offsets become dynamic at compile time. ++ * ++ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com ++ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. ++ */ ++#ifndef _ASM_FPU_EMULATOR_H ++#define _ASM_FPU_EMULATOR_H ++ ++struct fpu_emulator_stats { ++ unsigned int emulated; ++ unsigned int loads; ++ unsigned int stores; ++ unsigned int fpuops; ++ unsigned int errors; ++}; ++ ++extern struct fpu_emulator_stats fpuemustats; ++ ++#endif /* _ASM_FPU_EMULATOR_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/ieee754.c linux-3.4.110/arch/nds32/math-emu/ieee754.c +--- linux-3.4.110.orig/arch/nds32/math-emu/ieee754.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/ieee754.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,125 @@ ++/* ieee754 floating point arithmetic ++ * single and double precision ++ * ++ * BUGS ++ * not much dp done ++ * doesn't generate IEEE754_INEXACT ++ * ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754int.h" ++#include "ieee754sp.h" ++#include "ieee754dp.h" ++ ++#define DP_EBIAS 1023 ++#define DP_EMIN (-1022) ++#define DP_EMAX 1023 ++ ++#define SP_EBIAS 127 ++#define SP_EMIN (-126) ++#define SP_EMAX 127 ++ ++/* special constants ++*/ ++ ++#if (defined(BYTE_ORDER) && BYTE_ORDER == LITTLE_ENDIAN) || defined(__NDS32_EL__) ++#define SPSTR(s, b, m) {m, b, s} ++#define DPSTR(s, b, mh, ml) {ml, mh, b, s} ++#endif ++ ++#ifdef __NDS32_EB__ ++#define SPSTR(s, b, m) {s, b, m} ++#define DPSTR(s, b, mh, ml) {s, b, mh, ml} ++#endif ++ ++const struct ieee754dp_konst __ieee754dp_spcvals[] = { ++ DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* + zero */ ++ DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* - zero */ ++ DPSTR(0, DP_EBIAS, 0, 0), /* + 1.0 */ ++ DPSTR(1, DP_EBIAS, 0, 0), /* - 1.0 */ ++ DPSTR(0, 3 + DP_EBIAS, 0x40000, 0), /* + 10.0 */ ++ DPSTR(1, 3 + DP_EBIAS, 0x40000, 0), /* - 10.0 */ ++ DPSTR(0, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* + infinity */ ++ DPSTR(1, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* - infinity */ ++ DPSTR(0, DP_EMAX + 1 + DP_EBIAS, 0x7FFFF, 0xFFFFFFFF), /* + indef quiet Nan */ ++ DPSTR(0, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF), /* + max */ ++ DPSTR(1, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF), /* - max */ ++ DPSTR(0, DP_EMIN + DP_EBIAS, 0, 0), /* + min normal */ ++ DPSTR(1, DP_EMIN + DP_EBIAS, 0, 0), /* - min normal */ ++ DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* + min denormal */ ++ DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* - min denormal */ ++ DPSTR(0, 31 + DP_EBIAS, 0, 0), /* + 1.0e31 */ ++ DPSTR(0, 63 + DP_EBIAS, 0, 0), /* + 1.0e63 */ ++}; ++ ++const struct ieee754sp_konst __ieee754sp_spcvals[] = { ++ SPSTR(0, SP_EMIN - 1 + SP_EBIAS, 0), /* + zero */ ++ SPSTR(1, SP_EMIN - 1 + SP_EBIAS, 0), /* - zero */ ++ SPSTR(0, SP_EBIAS, 0), /* + 1.0 */ ++ SPSTR(1, SP_EBIAS, 0), /* - 1.0 */ ++ SPSTR(0, 3 + SP_EBIAS, 0x200000), /* + 10.0 */ ++ SPSTR(1, 3 + SP_EBIAS, 0x200000), /* - 10.0 */ ++ SPSTR(0, SP_EMAX + 1 + SP_EBIAS, 0), /* + infinity */ ++ SPSTR(1, SP_EMAX + 1 + SP_EBIAS, 0), /* - infinity */ ++ SPSTR(0, SP_EMAX + 1 + SP_EBIAS, 0x3FFFFF), /* + indef quiet Nan */ ++ SPSTR(0, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* + max normal */ ++ SPSTR(1, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* - max normal */ ++ SPSTR(0, SP_EMIN + SP_EBIAS, 0), /* + min normal */ ++ SPSTR(1, SP_EMIN + SP_EBIAS, 0), /* - min normal */ ++ SPSTR(0, SP_EMIN - 1 + SP_EBIAS, 1), /* + min denormal */ ++ SPSTR(1, SP_EMIN - 1 + SP_EBIAS, 1), /* - min denormal */ ++ SPSTR(0, 31 + SP_EBIAS, 0), /* + 1.0e31 */ ++ SPSTR(0, 63 + SP_EBIAS, 0), /* + 1.0e63 */ ++}; ++ ++int ieee754si_xcpt(int r, const char *op, ...) ++{ ++ struct ieee754xctx ax; ++ ++ if (!TSTX()) ++ return r; ++ ax.op = op; ++ ax.rt = IEEE754_RT_SI; ++ ax.rv.si = r; ++ va_start(ax.ap, op); ++ ieee754_xcpt(&ax); ++ va_end(ax.ap); ++ return ax.rv.si; ++} ++ ++s64 ieee754di_xcpt(s64 r, const char *op, ...) ++{ ++ struct ieee754xctx ax; ++ ++ if (!TSTX()) ++ return r; ++ ax.op = op; ++ ax.rt = IEEE754_RT_DI; ++ ax.rv.di = r; ++ va_start(ax.ap, op); ++ ieee754_xcpt(&ax); ++ va_end(ax.ap); ++ return ax.rv.di; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/ieee754d.c linux-3.4.110/arch/nds32/math-emu/ieee754d.c +--- linux-3.4.110.orig/arch/nds32/math-emu/ieee754d.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/ieee754d.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,134 @@ ++/* ++ * Some debug functions ++ * ++ * MIPS floating point support ++ * ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * Nov 7, 2000 ++ * Modified to build and operate in Linux kernel environment. ++ * ++ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com ++ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. ++ */ ++ ++#include ++#include "ieee754.h" ++ ++#define DP_EBIAS 1023 ++#define DP_EMIN (-1022) ++#define DP_EMAX 1023 ++#define DP_FBITS 52 ++ ++#define SP_EBIAS 127 ++#define SP_EMIN (-126) ++#define SP_EMAX 127 ++#define SP_FBITS 23 ++ ++#define DP_MBIT(x) ((u64)1 << (x)) ++#define DP_HIDDEN_BIT DP_MBIT(DP_FBITS) ++#define DP_SIGN_BIT DP_MBIT(63) ++ ++#define SP_MBIT(x) ((u32)1 << (x)) ++#define SP_HIDDEN_BIT SP_MBIT(SP_FBITS) ++#define SP_SIGN_BIT SP_MBIT(31) ++ ++#define SPSIGN(sp) (sp.parts.sign) ++#define SPBEXP(sp) (sp.parts.bexp) ++#define SPMANT(sp) (sp.parts.mant) ++ ++#define DPSIGN(dp) (dp.parts.sign) ++#define DPBEXP(dp) (dp.parts.bexp) ++#define DPMANT(dp) (dp.parts.mant) ++ ++ieee754dp ieee754dp_dump(char *m, ieee754dp x) ++{ ++ int i; ++ ++ printk("%s", m); ++ printk("<%08x,%08x>\n", (unsigned)(x.bits >> 32), (unsigned)x.bits); ++ printk("\t="); ++ switch (ieee754dp_class(x)) { ++ case IEEE754_CLASS_QNAN: ++ case IEEE754_CLASS_SNAN: ++ printk("Nan %c", DPSIGN(x) ? '-' : '+'); ++ for (i = DP_FBITS - 1; i >= 0; i--) ++ printk("%c", DPMANT(x) & DP_MBIT(i) ? '1' : '0'); ++ break; ++ case IEEE754_CLASS_INF: ++ printk("%cInfinity", DPSIGN(x) ? '-' : '+'); ++ break; ++ case IEEE754_CLASS_ZERO: ++ printk("%cZero", DPSIGN(x) ? '-' : '+'); ++ break; ++ case IEEE754_CLASS_DNORM: ++ printk("%c0.", DPSIGN(x) ? '-' : '+'); ++ for (i = DP_FBITS - 1; i >= 0; i--) ++ printk("%c", DPMANT(x) & DP_MBIT(i) ? '1' : '0'); ++ printk("e%d", DPBEXP(x) - DP_EBIAS); ++ break; ++ case IEEE754_CLASS_NORM: ++ printk("%c1.", DPSIGN(x) ? '-' : '+'); ++ for (i = DP_FBITS - 1; i >= 0; i--) ++ printk("%c", DPMANT(x) & DP_MBIT(i) ? '1' : '0'); ++ printk("e%d", DPBEXP(x) - DP_EBIAS); ++ break; ++ default: ++ printk("Illegal/Unknown IEEE754 value class"); ++ } ++ printk("\n"); ++ return x; ++} ++ ++ieee754sp ieee754sp_dump(char *m, ieee754sp x) ++{ ++ int i; ++ ++ printk("%s=", m); ++ printk("<%08x>\n", (unsigned)x.bits); ++ printk("\t="); ++ switch (ieee754sp_class(x)) { ++ case IEEE754_CLASS_QNAN: ++ case IEEE754_CLASS_SNAN: ++ printk("Nan %c", SPSIGN(x) ? '-' : '+'); ++ for (i = SP_FBITS - 1; i >= 0; i--) ++ printk("%c", SPMANT(x) & SP_MBIT(i) ? '1' : '0'); ++ break; ++ case IEEE754_CLASS_INF: ++ printk("%cInfinity", SPSIGN(x) ? '-' : '+'); ++ break; ++ case IEEE754_CLASS_ZERO: ++ printk("%cZero", SPSIGN(x) ? '-' : '+'); ++ break; ++ case IEEE754_CLASS_DNORM: ++ printk("%c0.", SPSIGN(x) ? '-' : '+'); ++ for (i = SP_FBITS - 1; i >= 0; i--) ++ printk("%c", SPMANT(x) & SP_MBIT(i) ? '1' : '0'); ++ printk("e%d", SPBEXP(x) - SP_EBIAS); ++ break; ++ case IEEE754_CLASS_NORM: ++ printk("%c1.", SPSIGN(x) ? '-' : '+'); ++ for (i = SP_FBITS - 1; i >= 0; i--) ++ printk("%c", SPMANT(x) & SP_MBIT(i) ? '1' : '0'); ++ printk("e%d", SPBEXP(x) - SP_EBIAS); ++ break; ++ default: ++ printk("Illegal/Unknown IEEE754 value class"); ++ } ++ printk("\n"); ++ return x; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/ieee754dp.c linux-3.4.110/arch/nds32/math-emu/ieee754dp.c +--- linux-3.4.110.orig/arch/nds32/math-emu/ieee754dp.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/ieee754dp.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,239 @@ ++/* IEEE754 floating point arithmetic ++ * double precision: common utilities ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754dp.h" ++ ++int ieee754dp_class(ieee754dp x) ++{ ++ COMPXDP; ++ EXPLODEXDP; ++ return xc; ++} ++ ++int ieee754dp_isnan(ieee754dp x) ++{ ++ return ieee754dp_class(x) >= IEEE754_CLASS_SNAN; ++} ++ ++int ieee754dp_issnan(ieee754dp x) ++{ ++ assert(ieee754dp_isnan(x)); ++ return ((DPMANT(x) & DP_MBIT(DP_MBITS - 1)) == DP_MBIT(DP_MBITS - 1)); ++} ++ ++ieee754dp ieee754dp_xcpt(ieee754dp r, const char *op, ...) ++{ ++ struct ieee754xctx ax; ++ if (!TSTX()) ++ return r; ++ ++ ax.op = op; ++ ax.rt = IEEE754_RT_DP; ++ ax.rv.dp = r; ++ va_start(ax.ap, op); ++ ieee754_xcpt(&ax); ++ va_end(ax.ap); ++ return ax.rv.dp; ++} ++ ++ieee754dp ieee754dp_nanxcpt(ieee754dp r, const char *op, ...) ++{ ++ struct ieee754xctx ax; ++ ++ assert(ieee754dp_isnan(r)); ++ ++ if (!ieee754dp_issnan(r)) /* QNAN does not cause invalid op !! */ ++ return r; ++ ++ if (!SETANDTESTCX(IEEE754_INVALID_OPERATION)) { ++ /* not enabled convert to a quiet NaN */ ++ DPMANT(r) &= (~DP_MBIT(DP_MBITS - 1)); ++ if (ieee754dp_isnan(r)) ++ return r; ++ else ++ return ieee754dp_indef(); ++ } ++ ++ ax.op = op; ++ ax.rt = 0; ++ ax.rv.dp = r; ++ va_start(ax.ap, op); ++ ieee754_xcpt(&ax); ++ va_end(ax.ap); ++ return ax.rv.dp; ++} ++ ++ieee754dp ieee754dp_bestnan(ieee754dp x, ieee754dp y) ++{ ++ assert(ieee754dp_isnan(x)); ++ assert(ieee754dp_isnan(y)); ++ ++ if (DPMANT(x) > DPMANT(y)) ++ return x; ++ else ++ return y; ++} ++ ++static u64 get_rounding(int sn, u64 xm) ++{ ++ /* inexact must round of 3 bits ++ */ ++ if (xm & (DP_MBIT(3) - 1)) { ++ switch (ieee754_csr.rm) { ++ case IEEE754_RZ: ++ break; ++ case IEEE754_RN: ++ xm += 0x3 + ((xm >> 3) & 1); ++ /* xm += (xm&0x8)?0x4:0x3 */ ++ break; ++ case IEEE754_RU: /* toward +Infinity */ ++ if (!sn) /* ?? */ ++ xm += 0x8; ++ break; ++ case IEEE754_RD: /* toward -Infinity */ ++ if (sn) /* ?? */ ++ xm += 0x8; ++ break; ++ } ++ } ++ return xm; ++} ++ ++/* generate a normal/denormal number with over,under handling ++ * sn is sign ++ * xe is an unbiased exponent ++ * xm is 3bit extended precision value. ++ */ ++ieee754dp ieee754dp_format(int sn, int xe, u64 xm) ++{ ++ assert(xm); /* we don't gen exact zeros (probably should) */ ++ ++ assert((xm >> (DP_MBITS + 1 + 3)) == 0); /* no execess */ ++ assert(xm & (DP_HIDDEN_BIT << 3)); ++ ++ if (xe < DP_EMIN) { ++ /* strip lower bits */ ++ int es = DP_EMIN - xe; ++ ++ if (ieee754_csr.nod) { ++ SETCX(IEEE754_UNDERFLOW); ++ SETCX(IEEE754_INEXACT); ++ ++ switch (ieee754_csr.rm) { ++ case IEEE754_RN: ++ return ieee754dp_zero(sn); ++ case IEEE754_RZ: ++ return ieee754dp_zero(sn); ++ case IEEE754_RU: /* toward +Infinity */ ++ if (sn == 0) ++ return ieee754dp_min(0); ++ else ++ return ieee754dp_zero(1); ++ case IEEE754_RD: /* toward -Infinity */ ++ if (sn == 0) ++ return ieee754dp_zero(0); ++ else ++ return ieee754dp_min(1); ++ } ++ } ++ ++ if (xe == DP_EMIN - 1 ++ && get_rounding(sn, xm) >> (DP_MBITS + 1 + 3)) { ++ /* Not tiny after rounding */ ++ SETCX(IEEE754_INEXACT); ++ xm = get_rounding(sn, xm); ++ xm >>= 1; ++ /* Clear grs bits */ ++ xm &= ~(DP_MBIT(3) - 1); ++ xe++; ++ } else { ++ /* sticky right shift es bits ++ */ ++ xm = XDPSRS(xm, es); ++ xe += es; ++ assert((xm & (DP_HIDDEN_BIT << 3)) == 0); ++ assert(xe == DP_EMIN); ++ } ++ } ++ if (xm & (DP_MBIT(3) - 1)) { ++ SETCX(IEEE754_INEXACT); ++ if ((xm & (DP_HIDDEN_BIT << 3)) == 0) { ++ SETCX(IEEE754_UNDERFLOW); ++ } ++ ++ /* inexact must round of 3 bits ++ */ ++ xm = get_rounding(sn, xm); ++ /* adjust exponent for rounding add overflowing ++ */ ++ if (xm >> (DP_MBITS + 3 + 1)) { ++ /* add causes mantissa overflow */ ++ xm >>= 1; ++ xe++; ++ } ++ } ++ /* strip grs bits */ ++ xm >>= 3; ++ ++ assert((xm >> (DP_MBITS + 1)) == 0); /* no execess */ ++ assert(xe >= DP_EMIN); ++ ++ if (xe > DP_EMAX) { ++ SETCX(IEEE754_OVERFLOW); ++ SETCX(IEEE754_INEXACT); ++ /* -O can be table indexed by (rm,sn) */ ++ switch (ieee754_csr.rm) { ++ case IEEE754_RN: ++ return ieee754dp_inf(sn); ++ case IEEE754_RZ: ++ return ieee754dp_max(sn); ++ case IEEE754_RU: /* toward +Infinity */ ++ if (sn == 0) ++ return ieee754dp_inf(0); ++ else ++ return ieee754dp_max(1); ++ case IEEE754_RD: /* toward -Infinity */ ++ if (sn == 0) ++ return ieee754dp_max(0); ++ else ++ return ieee754dp_inf(1); ++ } ++ } ++ /* gen norm/denorm/zero */ ++ ++ if ((xm & DP_HIDDEN_BIT) == 0) { ++ /* we underflow (tiny/zero) */ ++ assert(xe == DP_EMIN); ++ if (ieee754_csr.mx & IEEE754_UNDERFLOW) ++ SETCX(IEEE754_UNDERFLOW); ++ return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm); ++ } else { ++ assert((xm >> (DP_MBITS + 1)) == 0); /* no execess */ ++ assert(xm & DP_HIDDEN_BIT); ++ ++ return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/ieee754dp.h linux-3.4.110/arch/nds32/math-emu/ieee754dp.h +--- linux-3.4.110.orig/arch/nds32/math-emu/ieee754dp.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/ieee754dp.h 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,83 @@ ++/* ++ * IEEE754 floating point ++ * double precision internal header file ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++ ++#include "ieee754int.h" ++ ++#define assert(expr) ((void)0) ++ ++/* 3bit extended double precision sticky right shift */ ++#define XDPSRS(v,rs) \ ++ ((rs > (DP_MBITS+3))?1:((v) >> (rs)) | ((v) << (64-(rs)) != 0)) ++ ++#define XDPSRSX1() \ ++ (xe++, (xm = (xm >> 1) | (xm & 1))) ++ ++#define XDPSRS1(v) \ ++ (((v) >> 1) | ((v) & 1)) ++ ++/* convert denormal to normalized with extended exponent */ ++#define DPDNORMx(m,e) \ ++ while( (m >> DP_MBITS) == 0) { m <<= 1; e--; } ++#define DPDNORMX DPDNORMx(xm, xe) ++#define DPDNORMY DPDNORMx(ym, ye) ++ ++static inline ieee754dp builddp(int s, int bx, u64 m) ++{ ++ ieee754dp r; ++ ++ assert((s) == 0 || (s) == 1); ++ assert((bx) >= DP_EMIN - 1 + DP_EBIAS ++ && (bx) <= DP_EMAX + 1 + DP_EBIAS); ++ assert(((m) >> DP_MBITS) == 0); ++ ++ r.parts.sign = s; ++ r.parts.bexp = bx; ++ r.parts.mant = m; ++ return r; ++} ++ ++extern int ieee754dp_isnan(ieee754dp); ++extern int ieee754dp_issnan(ieee754dp); ++extern int ieee754si_xcpt(int, const char *, ...); ++extern s64 ieee754di_xcpt(s64, const char *, ...); ++extern ieee754dp ieee754dp_xcpt(ieee754dp, const char *, ...); ++extern ieee754dp ieee754dp_nanxcpt(ieee754dp, const char *, ...); ++extern ieee754dp ieee754dp_bestnan(ieee754dp, ieee754dp); ++extern ieee754dp ieee754dp_format(int, int, u64); ++ ++ ++#define DPNORMRET2(s, e, m, name, a0, a1) \ ++{ \ ++ ieee754dp V = ieee754dp_format(s, e, m); \ ++ if(TSTX()) \ ++ return ieee754dp_xcpt(V, name, a0, a1); \ ++ else \ ++ return V; \ ++} ++ ++#define DPNORMRET1(s, e, m, name, a0) DPNORMRET2(s, e, m, name, a0, a0) +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/ieee754.h linux-3.4.110/arch/nds32/math-emu/ieee754.h +--- linux-3.4.110.orig/arch/nds32/math-emu/ieee754.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/ieee754.h 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,467 @@ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * Nov 7, 2000 ++ * Modification to allow integration with Linux kernel ++ * ++ * Kevin D. Kissell, kevink@mips.com and Carsten Langgard, carstenl@mips.com ++ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. ++ */ ++#ifndef __ARCH_MIPS_MATH_EMU_IEEE754_H ++#define __ARCH_MIPS_MATH_EMU_IEEE754_H ++ ++#include ++#include ++#include ++ ++/* ++ * Not very pretty, but the Linux kernel's normal va_list definition ++ * does not allow it to be used as a structure element, as it is here. ++ */ ++#ifndef _STDARG_H ++#include ++#endif ++ ++#ifdef __LITTLE_ENDIAN ++struct ieee754dp_konst { ++ unsigned mantlo:32; ++ unsigned manthi:20; ++ unsigned bexp:11; ++ unsigned sign:1; ++}; ++struct ieee754sp_konst { ++ unsigned mant:23; ++ unsigned bexp:8; ++ unsigned sign:1; ++}; ++ ++typedef union _ieee754dp { ++ struct ieee754dp_konst oparts; ++ struct { ++ u64 mant:52; ++ unsigned int bexp:11; ++ unsigned int sign:1; ++ } parts; ++ u64 bits; ++ double d; ++} ieee754dp; ++ ++typedef union _ieee754sp { ++ struct ieee754sp_konst parts; ++ float f; ++ u32 bits; ++} ieee754sp; ++#endif ++ ++#ifdef __BIG_ENDIAN ++struct ieee754dp_konst { ++ unsigned sign:1; ++ unsigned bexp:11; ++ unsigned manthi:20; ++ unsigned mantlo:32; ++}; ++ ++typedef union _ieee754dp { ++ struct ieee754dp_konst oparts; ++ struct { ++ unsigned int sign:1; ++ unsigned int bexp:11; ++ u64 mant:52; ++ } parts; ++ double d; ++ u64 bits; ++} ieee754dp; ++ ++struct ieee754sp_konst { ++ unsigned sign:1; ++ unsigned bexp:8; ++ unsigned mant:23; ++}; ++ ++typedef union _ieee754sp { ++ struct ieee754sp_konst parts; ++ float f; ++ u32 bits; ++} ieee754sp; ++#endif ++ ++/* ++ * single precision (often aka float) ++*/ ++int ieee754sp_finite(ieee754sp x); ++int ieee754sp_class(ieee754sp x); ++ ++ieee754sp ieee754sp_abs(ieee754sp x); ++ieee754sp ieee754sp_neg(ieee754sp x); ++ieee754sp ieee754sp_scalb(ieee754sp x, int); ++ieee754sp ieee754sp_logb(ieee754sp x); ++ ++/* x with sign of y */ ++ieee754sp ieee754sp_copysign(ieee754sp x, ieee754sp y); ++ ++ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y); ++ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y); ++ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y); ++ieee754sp ieee754sp_div(ieee754sp x, ieee754sp y); ++ ++ieee754sp ieee754sp_fint(int x); ++ieee754sp ieee754sp_funs(unsigned x); ++ieee754sp ieee754sp_flong(s64 x); ++ieee754sp ieee754sp_fulong(u64 x); ++ieee754sp ieee754sp_fdp(ieee754dp x); ++ ++int ieee754sp_tint(ieee754sp x); ++unsigned int ieee754sp_tuns(ieee754sp x); ++s64 ieee754sp_tlong(ieee754sp x); ++u64 ieee754sp_tulong(ieee754sp x); ++ ++int ieee754sp_cmp(ieee754sp x, ieee754sp y, int cop, int sig); ++/* ++ * basic sp math ++ */ ++ieee754sp ieee754sp_modf(ieee754sp x, ieee754sp * ip); ++ieee754sp ieee754sp_frexp(ieee754sp x, int *exp); ++ieee754sp ieee754sp_ldexp(ieee754sp x, int exp); ++ ++ieee754sp ieee754sp_ceil(ieee754sp x); ++ieee754sp ieee754sp_floor(ieee754sp x); ++ieee754sp ieee754sp_trunc(ieee754sp x); ++ ++ieee754sp ieee754sp_sqrt(ieee754sp x); ++ ++/* ++ * double precision (often aka double) ++*/ ++int ieee754dp_finite(ieee754dp x); ++int ieee754dp_class(ieee754dp x); ++ ++/* x with sign of y */ ++ieee754dp ieee754dp_copysign(ieee754dp x, ieee754dp y); ++ ++ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y); ++ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y); ++ieee754dp ieee754dp_mul(ieee754dp x, ieee754dp y); ++ieee754dp ieee754dp_div(ieee754dp x, ieee754dp y); ++ ++ieee754dp ieee754dp_abs(ieee754dp x); ++ieee754dp ieee754dp_neg(ieee754dp x); ++ieee754dp ieee754dp_scalb(ieee754dp x, int); ++ ++/* return exponent as integer in floating point format ++ */ ++ieee754dp ieee754dp_logb(ieee754dp x); ++ ++ieee754dp ieee754dp_fint(int x); ++ieee754dp ieee754dp_funs(unsigned x); ++ieee754dp ieee754dp_flong(s64 x); ++ieee754dp ieee754dp_fulong(u64 x); ++ieee754dp ieee754dp_fsp(ieee754sp x); ++ ++ieee754dp ieee754dp_ceil(ieee754dp x); ++ieee754dp ieee754dp_floor(ieee754dp x); ++ieee754dp ieee754dp_trunc(ieee754dp x); ++ ++int ieee754dp_tint(ieee754dp x); ++unsigned int ieee754dp_tuns(ieee754dp x); ++s64 ieee754dp_tlong(ieee754dp x); ++u64 ieee754dp_tulong(ieee754dp x); ++ ++int ieee754dp_cmp(ieee754dp x, ieee754dp y, int cop, int sig); ++/* ++ * basic sp math ++ */ ++ieee754dp ieee754dp_modf(ieee754dp x, ieee754dp * ip); ++ieee754dp ieee754dp_frexp(ieee754dp x, int *exp); ++ieee754dp ieee754dp_ldexp(ieee754dp x, int exp); ++ ++ieee754dp ieee754dp_ceil(ieee754dp x); ++ieee754dp ieee754dp_floor(ieee754dp x); ++ieee754dp ieee754dp_trunc(ieee754dp x); ++ ++ieee754dp ieee754dp_sqrt(ieee754dp x); ++ ++ ++ ++/* 5 types of floating point number ++*/ ++#define IEEE754_CLASS_NORM 0x00 ++#define IEEE754_CLASS_ZERO 0x01 ++#define IEEE754_CLASS_DNORM 0x02 ++#define IEEE754_CLASS_INF 0x03 ++#define IEEE754_CLASS_SNAN 0x04 ++#define IEEE754_CLASS_QNAN 0x05 ++ ++/* exception numbers */ ++#define IEEE754_INVALID_OPERATION 0x01 ++#define IEEE754_ZERO_DIVIDE 0x02 ++#define IEEE754_OVERFLOW 0x04 ++#define IEEE754_UNDERFLOW 0x08 ++#define IEEE754_INEXACT 0x10 ++ ++/* cmp operators ++*/ ++#define IEEE754_CLT 0x01 ++#define IEEE754_CEQ 0x02 ++#define IEEE754_CGT 0x04 ++#define IEEE754_CUN 0x08 ++ ++/* rounding mode ++*/ ++#define IEEE754_RN 0 /* round to nearest */ ++#define IEEE754_RZ 1 /* round toward zero */ ++#define IEEE754_RD 2 /* round toward -Infinity */ ++#define IEEE754_RU 3 /* round toward +Infinity */ ++ ++/* other naming */ ++#define IEEE754_RM IEEE754_RD ++#define IEEE754_RP IEEE754_RU ++ ++/* "normal" comparisons ++*/ ++static inline int ieee754sp_eq(ieee754sp x, ieee754sp y) ++{ ++ return ieee754sp_cmp(x, y, IEEE754_CEQ, 0); ++} ++ ++static inline int ieee754sp_ne(ieee754sp x, ieee754sp y) ++{ ++ return ieee754sp_cmp(x, y, ++ IEEE754_CLT | IEEE754_CGT | IEEE754_CUN, 0); ++} ++ ++static inline int ieee754sp_lt(ieee754sp x, ieee754sp y) ++{ ++ return ieee754sp_cmp(x, y, IEEE754_CLT, 0); ++} ++ ++static inline int ieee754sp_le(ieee754sp x, ieee754sp y) ++{ ++ return ieee754sp_cmp(x, y, IEEE754_CLT | IEEE754_CEQ, 0); ++} ++ ++static inline int ieee754sp_gt(ieee754sp x, ieee754sp y) ++{ ++ return ieee754sp_cmp(x, y, IEEE754_CGT, 0); ++} ++ ++ ++static inline int ieee754sp_ge(ieee754sp x, ieee754sp y) ++{ ++ return ieee754sp_cmp(x, y, IEEE754_CGT | IEEE754_CEQ, 0); ++} ++ ++static inline int ieee754dp_eq(ieee754dp x, ieee754dp y) ++{ ++ return ieee754dp_cmp(x, y, IEEE754_CEQ, 0); ++} ++ ++static inline int ieee754dp_ne(ieee754dp x, ieee754dp y) ++{ ++ return ieee754dp_cmp(x, y, ++ IEEE754_CLT | IEEE754_CGT | IEEE754_CUN, 0); ++} ++ ++static inline int ieee754dp_lt(ieee754dp x, ieee754dp y) ++{ ++ return ieee754dp_cmp(x, y, IEEE754_CLT, 0); ++} ++ ++static inline int ieee754dp_le(ieee754dp x, ieee754dp y) ++{ ++ return ieee754dp_cmp(x, y, IEEE754_CLT | IEEE754_CEQ, 0); ++} ++ ++static inline int ieee754dp_gt(ieee754dp x, ieee754dp y) ++{ ++ return ieee754dp_cmp(x, y, IEEE754_CGT, 0); ++} ++ ++static inline int ieee754dp_ge(ieee754dp x, ieee754dp y) ++{ ++ return ieee754dp_cmp(x, y, IEEE754_CGT | IEEE754_CEQ, 0); ++} ++ ++ ++/* ++ * Like strtod ++ */ ++ieee754dp ieee754dp_fstr(const char *s, char **endp); ++char *ieee754dp_tstr(ieee754dp x, int prec, int fmt, int af); ++ ++ ++/* ++ * The control status register ++ */ ++struct _ieee754_csr { ++#ifdef __BIG_ENDIAN ++ unsigned pad0:12; ++ unsigned cx:7; /* exceptions this operation */ ++ unsigned nod:1; /* set 1 for no denormalised numbers */ ++ unsigned mx:5; /* exception enable mask */ ++ unsigned sx:5; /* exceptions total */ ++ unsigned rm:2; /* current rounding mode */ ++#endif ++#ifdef __LITTLE_ENDIAN ++ unsigned rm:2; /* current rounding mode */ ++ unsigned sx:5; /* exceptions total */ ++ unsigned mx:5; /* exception enable mask */ ++ unsigned nod:1; /* set 1 for no denormalised numbers */ ++ unsigned cx:7; /* exceptions this operation */ ++ unsigned pad0:12; ++#endif ++}; ++#define ieee754_csr (*(struct _ieee754_csr *)(¤t->thread.fpu.fpcsr)) ++ ++static inline unsigned ieee754_getrm(void) ++{ ++ return (ieee754_csr.rm); ++} ++static inline unsigned ieee754_setrm(unsigned rm) ++{ ++ return (ieee754_csr.rm = rm); ++} ++ ++/* ++ * get current exceptions ++ */ ++static inline unsigned ieee754_getcx(void) ++{ ++ return (ieee754_csr.cx); ++} ++ ++/* test for current exception condition ++ */ ++static inline int ieee754_cxtest(unsigned n) ++{ ++ return (ieee754_csr.cx & n); ++} ++ ++/* ++ * get sticky exceptions ++ */ ++static inline unsigned ieee754_getsx(void) ++{ ++ return (ieee754_csr.sx); ++} ++ ++/* clear sticky conditions ++*/ ++static inline unsigned ieee754_clrsx(void) ++{ ++ return (ieee754_csr.sx = 0); ++} ++ ++/* test for sticky exception condition ++ */ ++static inline int ieee754_sxtest(unsigned n) ++{ ++ return (ieee754_csr.sx & n); ++} ++ ++/* debugging */ ++ieee754sp ieee754sp_dump(char *s, ieee754sp x); ++ieee754dp ieee754dp_dump(char *s, ieee754dp x); ++ ++#define IEEE754_SPCVAL_PZERO 0 ++#define IEEE754_SPCVAL_NZERO 1 ++#define IEEE754_SPCVAL_PONE 2 ++#define IEEE754_SPCVAL_NONE 3 ++#define IEEE754_SPCVAL_PTEN 4 ++#define IEEE754_SPCVAL_NTEN 5 ++#define IEEE754_SPCVAL_PINFINITY 6 ++#define IEEE754_SPCVAL_NINFINITY 7 ++#define IEEE754_SPCVAL_INDEF 8 ++#define IEEE754_SPCVAL_PMAX 9 /* +max norm */ ++#define IEEE754_SPCVAL_NMAX 10 /* -max norm */ ++#define IEEE754_SPCVAL_PMIN 11 /* +min norm */ ++#define IEEE754_SPCVAL_NMIN 12 /* +min norm */ ++#define IEEE754_SPCVAL_PMIND 13 /* +min denorm */ ++#define IEEE754_SPCVAL_NMIND 14 /* +min denorm */ ++#define IEEE754_SPCVAL_P1E31 15 /* + 1.0e31 */ ++#define IEEE754_SPCVAL_P1E63 16 /* + 1.0e63 */ ++ ++extern const struct ieee754dp_konst __ieee754dp_spcvals[]; ++extern const struct ieee754sp_konst __ieee754sp_spcvals[]; ++#define ieee754dp_spcvals ((const ieee754dp *)__ieee754dp_spcvals) ++#define ieee754sp_spcvals ((const ieee754sp *)__ieee754sp_spcvals) ++ ++/* ++ * Return infinity with given sign ++ */ ++#define ieee754dp_inf(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PINFINITY+(sn)]) ++#define ieee754dp_zero(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PZERO+(sn)]) ++#define ieee754dp_one(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PONE+(sn)]) ++#define ieee754dp_ten(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PTEN+(sn)]) ++#define ieee754dp_indef() (ieee754dp_spcvals[IEEE754_SPCVAL_INDEF]) ++#define ieee754dp_max(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PMAX+(sn)]) ++#define ieee754dp_min(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PMIN+(sn)]) ++#define ieee754dp_mind(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PMIND+(sn)]) ++#define ieee754dp_1e31() (ieee754dp_spcvals[IEEE754_SPCVAL_P1E31]) ++#define ieee754dp_1e63() (ieee754dp_spcvals[IEEE754_SPCVAL_P1E63]) ++ ++#define ieee754sp_inf(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PINFINITY+(sn)]) ++#define ieee754sp_zero(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PZERO+(sn)]) ++#define ieee754sp_one(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PONE+(sn)]) ++#define ieee754sp_ten(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PTEN+(sn)]) ++#define ieee754sp_indef() (ieee754sp_spcvals[IEEE754_SPCVAL_INDEF]) ++#define ieee754sp_max(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PMAX+(sn)]) ++#define ieee754sp_min(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PMIN+(sn)]) ++#define ieee754sp_mind(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PMIND+(sn)]) ++#define ieee754sp_1e31() (ieee754sp_spcvals[IEEE754_SPCVAL_P1E31]) ++#define ieee754sp_1e63() (ieee754sp_spcvals[IEEE754_SPCVAL_P1E63]) ++ ++/* ++ * Indefinite integer value ++ */ ++#define ieee754si_indef() INT_MAX ++#ifdef LONG_LONG_MAX ++#define ieee754di_indef() LONG_LONG_MAX ++#else ++#define ieee754di_indef() ((s64)(~0ULL>>1)) ++#endif ++ ++/* IEEE exception context, passed to handler */ ++struct ieee754xctx { ++ const char *op; /* operation name */ ++ int rt; /* result type */ ++ union { ++ ieee754sp sp; /* single precision */ ++ ieee754dp dp; /* double precision */ ++#ifdef IEEE854_XP ++ ieee754xp xp; /* extended precision */ ++#endif ++ int si; /* standard signed integer (32bits) */ ++ s64 di; /* extended signed integer (64bits) */ ++ } rv; /* default result format implied by op */ ++ va_list ap; ++}; ++ ++/* result types for xctx.rt */ ++#define IEEE754_RT_SP 0 ++#define IEEE754_RT_DP 1 ++#define IEEE754_RT_XP 2 ++#define IEEE754_RT_SI 3 ++#define IEEE754_RT_DI 4 ++ ++extern void ieee754_xcpt(struct ieee754xctx *xcp); ++ ++/* compat */ ++#define ieee754dp_fix(x) ieee754dp_tint(x) ++#define ieee754sp_fix(x) ieee754sp_tint(x) ++ ++#endif /* __ARCH_MIPS_MATH_EMU_IEEE754_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/ieee754int.h linux-3.4.110/arch/nds32/math-emu/ieee754int.h +--- linux-3.4.110.orig/arch/nds32/math-emu/ieee754int.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/ieee754int.h 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,165 @@ ++/* ++ * IEEE754 floating point ++ * common internal header file ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++ ++#include "ieee754.h" ++ ++#define DP_EBIAS 1023 ++#define DP_EMIN (-1022) ++#define DP_EMAX 1023 ++#define DP_MBITS 52 ++ ++#define SP_EBIAS 127 ++#define SP_EMIN (-126) ++#define SP_EMAX 127 ++#define SP_MBITS 23 ++ ++#define DP_MBIT(x) ((u64)1 << (x)) ++#define DP_HIDDEN_BIT DP_MBIT(DP_MBITS) ++#define DP_SIGN_BIT DP_MBIT(63) ++ ++#define SP_MBIT(x) ((u32)1 << (x)) ++#define SP_HIDDEN_BIT SP_MBIT(SP_MBITS) ++#define SP_SIGN_BIT SP_MBIT(31) ++ ++ ++#define SPSIGN(sp) (sp.parts.sign) ++#define SPBEXP(sp) (sp.parts.bexp) ++#define SPMANT(sp) (sp.parts.mant) ++ ++#define DPSIGN(dp) (dp.parts.sign) ++#define DPBEXP(dp) (dp.parts.bexp) ++#define DPMANT(dp) (dp.parts.mant) ++ ++#define CLPAIR(x, y) ((x)*6+(y)) ++ ++#define CLEARCX \ ++ (ieee754_csr.cx = 0) ++ ++#define SETCX(x) \ ++ (ieee754_csr.cx |= (x), ieee754_csr.sx |= (x)) ++ ++#define SETANDTESTCX(x) \ ++ (SETCX(x), ieee754_csr.mx & (x)) ++ ++#define TSTX() \ ++ (ieee754_csr.cx & ieee754_csr.mx) ++ ++ ++#define COMPXSP \ ++ unsigned xm; int xe; int xs; int xc ++ ++#define COMPYSP \ ++ unsigned ym; int ye; int ys; int yc ++ ++#define EXPLODESP(v, vc, vs, ve, vm) \ ++{\ ++ vs = SPSIGN(v);\ ++ ve = SPBEXP(v);\ ++ vm = SPMANT(v);\ ++ if(ve == SP_EMAX+1+SP_EBIAS){\ ++ if(vm == 0)\ ++ vc = IEEE754_CLASS_INF;\ ++ else if(vm & SP_MBIT(SP_MBITS-1)) \ ++ vc = IEEE754_CLASS_SNAN;\ ++ else \ ++ vc = IEEE754_CLASS_QNAN;\ ++ } else if(ve == SP_EMIN-1+SP_EBIAS) {\ ++ if(vm) {\ ++ ve = SP_EMIN;\ ++ vc = IEEE754_CLASS_DNORM;\ ++ } else\ ++ vc = IEEE754_CLASS_ZERO;\ ++ } else {\ ++ ve -= SP_EBIAS;\ ++ vm |= SP_HIDDEN_BIT;\ ++ vc = IEEE754_CLASS_NORM;\ ++ }\ ++} ++#define EXPLODEXSP EXPLODESP(x, xc, xs, xe, xm) ++#define EXPLODEYSP EXPLODESP(y, yc, ys, ye, ym) ++ ++ ++#define COMPXDP \ ++u64 xm; int xe; int xs; int xc ++ ++#define COMPYDP \ ++u64 ym; int ye; int ys; int yc ++ ++#define EXPLODEDP(v, vc, vs, ve, vm) \ ++{\ ++ vm = DPMANT(v);\ ++ vs = DPSIGN(v);\ ++ ve = DPBEXP(v);\ ++ if(ve == DP_EMAX+1+DP_EBIAS){\ ++ if(vm == 0)\ ++ vc = IEEE754_CLASS_INF;\ ++ else if(vm & DP_MBIT(DP_MBITS-1)) \ ++ vc = IEEE754_CLASS_SNAN;\ ++ else \ ++ vc = IEEE754_CLASS_QNAN;\ ++ } else if(ve == DP_EMIN-1+DP_EBIAS) {\ ++ if(vm) {\ ++ ve = DP_EMIN;\ ++ vc = IEEE754_CLASS_DNORM;\ ++ } else\ ++ vc = IEEE754_CLASS_ZERO;\ ++ } else {\ ++ ve -= DP_EBIAS;\ ++ vm |= DP_HIDDEN_BIT;\ ++ vc = IEEE754_CLASS_NORM;\ ++ }\ ++} ++#define EXPLODEXDP EXPLODEDP(x, xc, xs, xe, xm) ++#define EXPLODEYDP EXPLODEDP(y, yc, ys, ye, ym) ++ ++#define FLUSHDP(v, vc, vs, ve, vm) \ ++ if(vc==IEEE754_CLASS_DNORM) {\ ++ if(ieee754_csr.nod) {\ ++ SETCX(IEEE754_INEXACT);\ ++ vc = IEEE754_CLASS_ZERO;\ ++ ve = DP_EMIN-1+DP_EBIAS;\ ++ vm = 0;\ ++ v = ieee754dp_zero(vs);\ ++ }\ ++ } ++ ++#define FLUSHSP(v, vc, vs, ve, vm) \ ++ if(vc==IEEE754_CLASS_DNORM) {\ ++ if(ieee754_csr.nod) {\ ++ SETCX(IEEE754_INEXACT);\ ++ vc = IEEE754_CLASS_ZERO;\ ++ ve = SP_EMIN-1+SP_EBIAS;\ ++ vm = 0;\ ++ v = ieee754sp_zero(vs);\ ++ }\ ++ } ++ ++#define FLUSHXDP FLUSHDP(x, xc, xs, xe, xm) ++#define FLUSHYDP FLUSHDP(y, yc, ys, ye, ym) ++#define FLUSHXSP FLUSHSP(x, xc, xs, xe, xm) ++#define FLUSHYSP FLUSHSP(y, yc, ys, ye, ym) +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/ieee754m.c linux-3.4.110/arch/nds32/math-emu/ieee754m.c +--- linux-3.4.110.orig/arch/nds32/math-emu/ieee754m.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/ieee754m.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,55 @@ ++/* ++ * floor, trunc, ceil ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754.h" ++ ++ieee754dp ieee754dp_floor(ieee754dp x) ++{ ++ ieee754dp i; ++ ++ if (ieee754dp_lt(ieee754dp_modf(x, &i), ieee754dp_zero(0))) ++ return ieee754dp_sub(i, ieee754dp_one(0)); ++ else ++ return i; ++} ++ ++ieee754dp ieee754dp_ceil(ieee754dp x) ++{ ++ ieee754dp i; ++ ++ if (ieee754dp_gt(ieee754dp_modf(x, &i), ieee754dp_zero(0))) ++ return ieee754dp_add(i, ieee754dp_one(0)); ++ else ++ return i; ++} ++ ++ieee754dp ieee754dp_trunc(ieee754dp x) ++{ ++ ieee754dp i; ++ ++ (void)ieee754dp_modf(x, &i); ++ return i; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/ieee754sp.c linux-3.4.110/arch/nds32/math-emu/ieee754sp.c +--- linux-3.4.110.orig/arch/nds32/math-emu/ieee754sp.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/ieee754sp.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,239 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++int ieee754sp_class(ieee754sp x) ++{ ++ COMPXSP; ++ EXPLODEXSP; ++ return xc; ++} ++ ++int ieee754sp_isnan(ieee754sp x) ++{ ++ return ieee754sp_class(x) >= IEEE754_CLASS_SNAN; ++} ++ ++int ieee754sp_issnan(ieee754sp x) ++{ ++ assert(ieee754sp_isnan(x)); ++ return (SPMANT(x) & SP_MBIT(SP_MBITS - 1)); ++} ++ ++ieee754sp ieee754sp_xcpt(ieee754sp r, const char *op, ...) ++{ ++ struct ieee754xctx ax; ++ ++ if (!TSTX()) ++ return r; ++ ++ ax.op = op; ++ ax.rt = IEEE754_RT_SP; ++ ax.rv.sp = r; ++ va_start(ax.ap, op); ++ ieee754_xcpt(&ax); ++ va_end(ax.ap); ++ return ax.rv.sp; ++} ++ ++ieee754sp ieee754sp_nanxcpt(ieee754sp r, const char *op, ...) ++{ ++ struct ieee754xctx ax; ++ ++ assert(ieee754sp_isnan(r)); ++ ++ if (!ieee754sp_issnan(r)) /* QNAN does not cause invalid op !! */ ++ return r; ++ ++ if (!SETANDTESTCX(IEEE754_INVALID_OPERATION)) { ++ /* not enabled convert to a quiet NaN */ ++ SPMANT(r) &= (~SP_MBIT(SP_MBITS - 1)); ++ if (ieee754sp_isnan(r)) ++ return r; ++ else ++ return ieee754sp_indef(); ++ } ++ ++ ax.op = op; ++ ax.rt = 0; ++ ax.rv.sp = r; ++ va_start(ax.ap, op); ++ ieee754_xcpt(&ax); ++ va_end(ax.ap); ++ return ax.rv.sp; ++} ++ ++ieee754sp ieee754sp_bestnan(ieee754sp x, ieee754sp y) ++{ ++ assert(ieee754sp_isnan(x)); ++ assert(ieee754sp_isnan(y)); ++ ++ if (SPMANT(x) > SPMANT(y)) ++ return x; ++ else ++ return y; ++} ++ ++static unsigned get_rounding(int sn, unsigned xm) ++{ ++ /* inexact must round of 3 bits ++ */ ++ if (xm & (SP_MBIT(3) - 1)) { ++ switch (ieee754_csr.rm) { ++ case IEEE754_RZ: ++ break; ++ case IEEE754_RN: ++ xm += 0x3 + ((xm >> 3) & 1); ++ /* xm += (xm&0x8)?0x4:0x3 */ ++ break; ++ case IEEE754_RU: /* toward +Infinity */ ++ if (!sn) /* ?? */ ++ xm += 0x8; ++ break; ++ case IEEE754_RD: /* toward -Infinity */ ++ if (sn) /* ?? */ ++ xm += 0x8; ++ break; ++ } ++ } ++ return xm; ++} ++ ++/* generate a normal/denormal number with over,under handling ++ * sn is sign ++ * xe is an unbiased exponent ++ * xm is 3bit extended precision value. ++ */ ++ieee754sp ieee754sp_format(int sn, int xe, unsigned xm) ++{ ++ assert(xm); /* we don't gen exact zeros (probably should) */ ++ ++ assert((xm >> (SP_MBITS + 1 + 3)) == 0); /* no execess */ ++ assert(xm & (SP_HIDDEN_BIT << 3)); ++ ++ if (xe < SP_EMIN) { ++ /* strip lower bits */ ++ int es = SP_EMIN - xe; ++ ++ if (ieee754_csr.nod) { ++ SETCX(IEEE754_UNDERFLOW); ++ SETCX(IEEE754_INEXACT); ++ ++ switch (ieee754_csr.rm) { ++ case IEEE754_RN: ++ return ieee754sp_zero(sn); ++ case IEEE754_RZ: ++ return ieee754sp_zero(sn); ++ case IEEE754_RU: /* toward +Infinity */ ++ if (sn == 0) ++ return ieee754sp_min(0); ++ else ++ return ieee754sp_zero(1); ++ case IEEE754_RD: /* toward -Infinity */ ++ if (sn == 0) ++ return ieee754sp_zero(0); ++ else ++ return ieee754sp_min(1); ++ } ++ } ++ ++ if (xe == SP_EMIN - 1 ++ && get_rounding(sn, xm) >> (SP_MBITS + 1 + 3)) { ++ /* Not tiny after rounding */ ++ SETCX(IEEE754_INEXACT); ++ xm = get_rounding(sn, xm); ++ xm >>= 1; ++ /* Clear grs bits */ ++ xm &= ~(SP_MBIT(3) - 1); ++ xe++; ++ } else { ++ /* sticky right shift es bits ++ */ ++ SPXSRSXn(es); ++ assert((xm & (SP_HIDDEN_BIT << 3)) == 0); ++ assert(xe == SP_EMIN); ++ } ++ } ++ if (xm & (SP_MBIT(3) - 1)) { ++ SETCX(IEEE754_INEXACT); ++ if ((xm & (SP_HIDDEN_BIT << 3)) == 0) { ++ SETCX(IEEE754_UNDERFLOW); ++ } ++ ++ /* inexact must round of 3 bits ++ */ ++ xm = get_rounding(sn, xm); ++ /* adjust exponent for rounding add overflowing ++ */ ++ if (xm >> (SP_MBITS + 1 + 3)) { ++ /* add causes mantissa overflow */ ++ xm >>= 1; ++ xe++; ++ } ++ } ++ /* strip grs bits */ ++ xm >>= 3; ++ ++ assert((xm >> (SP_MBITS + 1)) == 0); /* no execess */ ++ assert(xe >= SP_EMIN); ++ ++ if (xe > SP_EMAX) { ++ SETCX(IEEE754_OVERFLOW); ++ SETCX(IEEE754_INEXACT); ++ /* -O can be table indexed by (rm,sn) */ ++ switch (ieee754_csr.rm) { ++ case IEEE754_RN: ++ return ieee754sp_inf(sn); ++ case IEEE754_RZ: ++ return ieee754sp_max(sn); ++ case IEEE754_RU: /* toward +Infinity */ ++ if (sn == 0) ++ return ieee754sp_inf(0); ++ else ++ return ieee754sp_max(1); ++ case IEEE754_RD: /* toward -Infinity */ ++ if (sn == 0) ++ return ieee754sp_max(0); ++ else ++ return ieee754sp_inf(1); ++ } ++ } ++ /* gen norm/denorm/zero */ ++ ++ if ((xm & SP_HIDDEN_BIT) == 0) { ++ /* we underflow (tiny/zero) */ ++ assert(xe == SP_EMIN); ++ if (ieee754_csr.mx & IEEE754_UNDERFLOW) ++ SETCX(IEEE754_UNDERFLOW); ++ return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm); ++ } else { ++ assert((xm >> (SP_MBITS + 1)) == 0); /* no execess */ ++ assert(xm & SP_HIDDEN_BIT); ++ ++ return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/ieee754sp.h linux-3.4.110/arch/nds32/math-emu/ieee754sp.h +--- linux-3.4.110.orig/arch/nds32/math-emu/ieee754sp.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/ieee754sp.h 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,89 @@ ++/* ++ * IEEE754 floating point ++ * double precision internal header file ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++ ++#include "ieee754int.h" ++ ++#define assert(expr) ((void)0) ++ ++/* 3bit extended single precision sticky right shift */ ++#define SPXSRSXn(rs) \ ++ (xe += rs, \ ++ xm = (rs > (SP_MBITS+3))?1:((xm) >> (rs)) | ((xm) << (32-(rs)) != 0)) ++ ++#define SPXSRSX1() \ ++ (xe++, (xm = (xm >> 1) | (xm & 1))) ++ ++#define SPXSRSYn(rs) \ ++ (ye+=rs, \ ++ ym = (rs > (SP_MBITS+3))?1:((ym) >> (rs)) | ((ym) << (32-(rs)) != 0)) ++ ++#define SPXSRSY1() \ ++ (ye++, (ym = (ym >> 1) | (ym & 1))) ++ ++/* convert denormal to normalized with extended exponent */ ++#define SPDNORMx(m,e) \ ++ while( (m >> SP_MBITS) == 0) { m <<= 1; e--; } ++#define SPDNORMX SPDNORMx(xm, xe) ++#define SPDNORMY SPDNORMx(ym, ye) ++ ++static inline ieee754sp buildsp(int s, int bx, unsigned m) ++{ ++ ieee754sp r; ++ ++ assert((s) == 0 || (s) == 1); ++ assert((bx) >= SP_EMIN - 1 + SP_EBIAS ++ && (bx) <= SP_EMAX + 1 + SP_EBIAS); ++ assert(((m) >> SP_MBITS) == 0); ++ ++ r.parts.sign = s; ++ r.parts.bexp = bx; ++ r.parts.mant = m; ++ ++ return r; ++} ++ ++extern int ieee754sp_isnan(ieee754sp); ++extern int ieee754sp_issnan(ieee754sp); ++extern int ieee754si_xcpt(int, const char *, ...); ++extern s64 ieee754di_xcpt(s64, const char *, ...); ++extern ieee754sp ieee754sp_xcpt(ieee754sp, const char *, ...); ++extern ieee754sp ieee754sp_nanxcpt(ieee754sp, const char *, ...); ++extern ieee754sp ieee754sp_bestnan(ieee754sp, ieee754sp); ++extern ieee754sp ieee754sp_format(int, int, unsigned); ++ ++ ++#define SPNORMRET2(s, e, m, name, a0, a1) \ ++{ \ ++ ieee754sp V = ieee754sp_format(s, e, m); \ ++ if(TSTX()) \ ++ return ieee754sp_xcpt(V, name, a0, a1); \ ++ else \ ++ return V; \ ++} ++ ++#define SPNORMRET1(s, e, m, name, a0) SPNORMRET2(s, e, m, name, a0, a0) +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/ieee754xcpt.c linux-3.4.110/arch/nds32/math-emu/ieee754xcpt.c +--- linux-3.4.110.orig/arch/nds32/math-emu/ieee754xcpt.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/ieee754xcpt.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,48 @@ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++/************************************************************************** ++ * Nov 7, 2000 ++ * Added preprocessor hacks to map to Linux kernel diagnostics. ++ * ++ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com ++ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. ++ *************************************************************************/ ++ ++#include ++#include "ieee754.h" ++ ++/* ++ * Very naff exception handler (you can plug in your own and ++ * override this). ++ */ ++ ++static const char *const rtnames[] = { ++ "sp", "dp", "xp", "si", "di" ++}; ++ ++void ieee754_xcpt(struct ieee754xctx *xcp) ++{ ++ printk(KERN_DEBUG "floating point exception in \"%s\", type=%s\n", ++ xcp->op, rtnames[xcp->rt]); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/insn.h linux-3.4.110/arch/nds32/math-emu/insn.h +--- linux-3.4.110.orig/arch/nds32/math-emu/insn.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/insn.h 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,101 @@ ++#ifndef _ASM_INST_H ++#define _ASM_INST_H ++ ++#define cop0_op 0x35 ++ ++/* ++ * COP0 field of opcodes. ++ */ ++#define fs1_op 0x0 ++#define fs2_op 0x4 ++#define fd1_op 0x8 ++#define fd2_op 0xc ++ ++/* ++ * FS1 opcode. ++ */ ++enum fs1 { ++ fadds_op, fsubs_op, fcpynss_op, fcpyss_op, ++ fmadds_op, fmsubs_op, fcmovns_op, fcmovzs_op, ++ fnmadds_op, fnmsubs_op, ++ fmuls_op = 0xc, fdivs_op, ++ fs1_f2op_op = 0xf ++}; ++ ++/* ++ * FS1/F2OP opcode. ++ */ ++enum fs1_f2 { ++ fs2d_op, fsqrts_op, fabss_op = 0x5, ++ fui2s_op = 0x8, fsi2s_op = 0xc, ++ fs2ui_op = 0x10, fs2ui_z_op = 0x14, ++ fs2si_op = 0x18, fs2si_z_op = 0x1c ++}; ++ ++/* ++ * FS2 opcode. ++ */ ++enum fs2 { ++ fcmpeqs_op, fcmpeqs_e_op, fcmplts_op, fcmplts_e_op, ++ fcmples_op, fcmples_e_op, fcmpuns_op, fcmpuns_e_op ++}; ++ ++/* ++ * FD1 opcode. ++ */ ++enum fd1 { ++ faddd_op, fsubd_op, fcpynsd_op, fcpysd_op, ++ fmaddd_op, fmsubd_op, fcmovnd_op, fcmovzd_op, ++ fnmaddd_op, fnmsubd_op, ++ fmuld_op = 0xc, fdivd_op, fd1_f2op_op = 0xf ++}; ++ ++/* ++ * FD1/F2OP opcode. ++ */ ++enum fd1_f2 { ++ fd2s_op, fsqrtd_op, fabsd_op = 0x5, ++ fui2d_op = 0x8, fsi2d_op = 0xc, ++ fd2ui_op = 0x10, fd2ui_z_op = 0x14, ++ fd2si_op = 0x18, fd2si_z_op = 0x1c ++}; ++ ++/* ++ * FD2 opcode. ++ */ ++enum fd2 { ++ fcmpeqd_op, fcmpeqd_e_op, fcmpltd_op, fcmpltd_e_op, ++ fcmpled_op, fcmpled_e_op, fcmpund_op, fcmpund_e_op ++}; ++ ++ ++#define NDS32Insn(x) x ++ ++#define I_OPCODE_off 25 ++#define NDS32Insn_OPCODE(x) (NDS32Insn(x) >> I_OPCODE_off) ++ ++#define I_OPCODE_offRt 20 ++#define I_OPCODE_mskRt (0x1fUL << I_OPCODE_offRt) ++#define NDS32Insn_OPCODE_Rt(x) ((NDS32Insn(x) & I_OPCODE_mskRt) >> I_OPCODE_offRt) ++ ++#define I_OPCODE_offRa 15 ++#define I_OPCODE_mskRa (0x1fUL << I_OPCODE_offRa) ++#define NDS32Insn_OPCODE_Ra(x) ((NDS32Insn(x) & I_OPCODE_mskRa) >> I_OPCODE_offRa) ++ ++#define I_OPCODE_offRb 10 ++#define I_OPCODE_mskRb (0x1fUL << I_OPCODE_offRb) ++#define NDS32Insn_OPCODE_Rb(x) ((NDS32Insn(x) & I_OPCODE_mskRb) >> I_OPCODE_offRb) ++ ++#define I_OPCODE_offbit1014 10 ++#define I_OPCODE_mskbit1014 (0x1fUL << I_OPCODE_offbit1014) ++#define NDS32Insn_OPCODE_BIT1014(x) ((NDS32Insn(x) & I_OPCODE_mskbit1014) >> I_OPCODE_offbit1014) ++ ++#define I_OPCODE_offbit69 6 ++#define I_OPCODE_mskbit69 (0xfUL << I_OPCODE_offbit69) ++#define NDS32Insn_OPCODE_BIT69(x) ((NDS32Insn(x) & I_OPCODE_mskbit69) >> I_OPCODE_offbit69) ++ ++#define I_OPCODE_offCOP0 0 ++#define I_OPCODE_mskCOP0 (0x3fUL << I_OPCODE_offCOP0) ++#define NDS32Insn_OPCODE_COP0(x) ((NDS32Insn(x) & I_OPCODE_mskCOP0) >> I_OPCODE_offCOP0) ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/Makefile linux-3.4.110/arch/nds32/math-emu/Makefile +--- linux-3.4.110.orig/arch/nds32/math-emu/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/Makefile 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,13 @@ ++# ++# Makefile for the Linux/nds32 kernel FPU emulation. ++# ++ ++obj-y := fpuemu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \ ++ ieee754xcpt.o dp_frexp.o dp_modf.o dp_div.o dp_mul.o dp_sub.o \ ++ dp_add.o dp_fsp.o dp_cmp.o dp_logb.o dp_scalb.o dp_simple.o \ ++ dp_tint.o dp_fint.o dp_tlong.o dp_flong.o sp_frexp.o sp_modf.o \ ++ sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \ ++ sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \ ++ dp_sqrt.o sp_sqrt.o ++ ++EXTRA_CFLAGS += -Werror +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_add.c linux-3.4.110/arch/nds32/math-emu/sp_add.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_add.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_add.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,173 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y) ++{ ++ COMPXSP; ++ COMPYSP; ++ ++ EXPLODEXSP; ++ EXPLODEYSP; ++ ++ CLEARCX; ++ ++ FLUSHXSP; ++ FLUSHYSP; ++ ++ switch (CLPAIR(xc, yc)) { ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_nanxcpt(ieee754sp_indef(), "add", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): ++ return x; ++ ++ /* Infinity handling ++ */ ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): ++ if (xs == ys) ++ return x; ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_xcpt(ieee754sp_indef(), "add", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): ++ return x; ++ ++ /* Zero handling ++ */ ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): ++ if (xs == ys) ++ return x; ++ else ++ return ieee754sp_zero(ieee754_csr.rm == IEEE754_RD); ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): ++ return x; ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): ++ SPDNORMX; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): ++ SPDNORMY; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): ++ SPDNORMX; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): ++ break; ++ } ++ assert(xm & SP_HIDDEN_BIT); ++ assert(ym & SP_HIDDEN_BIT); ++ ++ /* provide guard,round and stick bit space */ ++ xm <<= 3; ++ ym <<= 3; ++ ++ if (xe > ye) { ++ /* have to shift y fraction right to align ++ */ ++ int s = xe - ye; ++ SPXSRSYn(s); ++ } else if (ye > xe) { ++ /* have to shift x fraction right to align ++ */ ++ int s = ye - xe; ++ SPXSRSXn(s); ++ } ++ assert(xe == ye); ++ assert(xe <= SP_EMAX); ++ ++ if (xs == ys) { ++ /* generate 28 bit result of adding two 27 bit numbers ++ * leaving result in xm,xs,xe ++ */ ++ xm = xm + ym; ++ xe = xe; ++ xs = xs; ++ ++ if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */ ++ SPXSRSX1(); ++ } ++ } else { ++ if (xm >= ym) { ++ xm = xm - ym; ++ xe = xe; ++ xs = xs; ++ } else { ++ xm = ym - xm; ++ xe = xe; ++ xs = ys; ++ } ++ if (xm == 0) ++ return ieee754sp_zero(ieee754_csr.rm == IEEE754_RD); ++ ++ /* normalize in extended single precision */ ++ while ((xm >> (SP_MBITS + 3)) == 0) { ++ xm <<= 1; ++ xe--; ++ } ++ ++ } ++ SPNORMRET2(xs, xe, xm, "add", x, y); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_cmp.c linux-3.4.110/arch/nds32/math-emu/sp_cmp.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_cmp.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_cmp.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,66 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++int ieee754sp_cmp(ieee754sp x, ieee754sp y, int cmp, int sig) ++{ ++ COMPXSP; ++ COMPYSP; ++ ++ EXPLODEXSP; ++ EXPLODEYSP; ++ FLUSHXSP; ++ FLUSHYSP; ++ CLEARCX; /* Even clear inexact flag here */ ++ ++ if (ieee754sp_isnan(x) || ieee754sp_isnan(y)) { ++ if (sig || xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN) ++ SETCX(IEEE754_INVALID_OPERATION); ++ if (cmp & IEEE754_CUN) ++ return 1; ++ if (cmp & (IEEE754_CLT | IEEE754_CGT)) { ++ if (sig && SETANDTESTCX(IEEE754_INVALID_OPERATION)) ++ return ieee754si_xcpt(0, "fcmpf", x); ++ } ++ return 0; ++ } else { ++ int vx = x.bits; ++ int vy = y.bits; ++ ++ if (vx < 0) ++ vx = -vx ^ SP_SIGN_BIT; ++ if (vy < 0) ++ vy = -vy ^ SP_SIGN_BIT; ++ ++ if (vx < vy) ++ return (cmp & IEEE754_CLT) != 0; ++ else if (vx == vy) ++ return (cmp & IEEE754_CEQ) != 0; ++ else ++ return (cmp & IEEE754_CGT) != 0; ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_div.c linux-3.4.110/arch/nds32/math-emu/sp_div.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_div.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_div.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,155 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++ieee754sp ieee754sp_div(ieee754sp x, ieee754sp y) ++{ ++ COMPXSP; ++ COMPYSP; ++ ++ EXPLODEXSP; ++ EXPLODEYSP; ++ ++ CLEARCX; ++ ++ FLUSHXSP; ++ FLUSHYSP; ++ ++ switch (CLPAIR(xc, yc)) { ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_nanxcpt(ieee754sp_indef(), "div", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): ++ return x; ++ ++ /* Infinity handling ++ */ ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_xcpt(ieee754sp_indef(), "div", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): ++ return ieee754sp_zero(xs ^ ys); ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): ++ return ieee754sp_inf(xs ^ ys); ++ ++ /* Zero handling ++ */ ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_xcpt(ieee754sp_indef(), "div", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): ++ SETCX(IEEE754_ZERO_DIVIDE); ++ return ieee754sp_xcpt(ieee754sp_inf(xs ^ ys), "div", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): ++ return ieee754sp_zero(xs == ys ? 0 : 1); ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): ++ SPDNORMX; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): ++ SPDNORMY; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): ++ SPDNORMX; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): ++ break; ++ } ++ assert(xm & SP_HIDDEN_BIT); ++ assert(ym & SP_HIDDEN_BIT); ++ ++ /* provide rounding space */ ++ xm <<= 3; ++ ym <<= 3; ++ ++ { ++ /* now the dirty work */ ++ ++ unsigned rm = 0; ++ int re = xe - ye; ++ unsigned bm; ++ ++ for (bm = SP_MBIT(SP_MBITS + 2); bm; bm >>= 1) { ++ if (xm >= ym) { ++ xm -= ym; ++ rm |= bm; ++ if (xm == 0) ++ break; ++ } ++ xm <<= 1; ++ } ++ rm <<= 1; ++ if (xm) ++ rm |= 1; /* have remainder, set sticky */ ++ ++ assert(rm); ++ ++ /* normalise rm to rounding precision ? ++ */ ++ while ((rm >> (SP_MBITS + 3)) == 0) { ++ rm <<= 1; ++ re--; ++ } ++ ++ SPNORMRET2(xs == ys ? 0 : 1, re, rm, "div", x, y); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_fdp.c linux-3.4.110/arch/nds32/math-emu/sp_fdp.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_fdp.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_fdp.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,76 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++ieee754sp ieee754sp_fdp(ieee754dp x) ++{ ++ COMPXDP; ++ ieee754sp nan; ++ ++ EXPLODEXDP; ++ ++ CLEARCX; ++ ++ FLUSHXDP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_nanxcpt(ieee754sp_indef(), "fdp"); ++ case IEEE754_CLASS_QNAN: ++ nan = buildsp(xs, SP_EMAX + 1 + SP_EBIAS, (u32) ++ (xm >> (DP_MBITS - SP_MBITS))); ++ if (!ieee754sp_isnan(nan)) ++ nan = ieee754sp_indef(); ++ return ieee754sp_nanxcpt(nan, "fdp", x); ++ case IEEE754_CLASS_INF: ++ return ieee754sp_inf(xs); ++ case IEEE754_CLASS_ZERO: ++ return ieee754sp_zero(xs); ++ case IEEE754_CLASS_DNORM: ++ /* can't possibly be sp representable */ ++ SETCX(IEEE754_UNDERFLOW); ++ SETCX(IEEE754_INEXACT); ++ if ((ieee754_csr.rm == IEEE754_RU && !xs) || ++ (ieee754_csr.rm == IEEE754_RD && xs)) ++ return ieee754sp_xcpt(ieee754sp_mind(xs), "fdp", x); ++ return ieee754sp_xcpt(ieee754sp_zero(xs), "fdp", x); ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ ++ { ++ u32 rm; ++ ++ /* convert from DP_MBITS to SP_MBITS+3 with sticky right shift ++ */ ++ rm = (xm >> (DP_MBITS - (SP_MBITS + 3))) | ++ ((xm << (64 - (DP_MBITS - (SP_MBITS + 3)))) != 0); ++ ++ SPNORMRET1(xs, xe, rm, "fdp", x); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_fint.c linux-3.4.110/arch/nds32/math-emu/sp_fint.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_fint.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_fint.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,78 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++ieee754sp ieee754sp_fint(int x) ++{ ++ unsigned xm; ++ int xe; ++ int xs; ++ ++ CLEARCX; ++ ++ if (x == 0) ++ return ieee754sp_zero(0); ++ if (x == 1 || x == -1) ++ return ieee754sp_one(x < 0); ++ if (x == 10 || x == -10) ++ return ieee754sp_ten(x < 0); ++ ++ xs = (x < 0); ++ if (xs) { ++ if (x == (1 << 31)) ++ xm = ((unsigned)1 << 31); /* max neg can't be safely negated */ ++ else ++ xm = -x; ++ } else { ++ xm = x; ++ } ++ xe = SP_MBITS + 3; ++ ++ if (xm >> (SP_MBITS + 1 + 3)) { ++ /* shunt out overflow bits ++ */ ++ while (xm >> (SP_MBITS + 1 + 3)) { ++ SPXSRSX1(); ++ } ++ } else { ++ /* normalize in grs extended single precision ++ */ ++ while ((xm >> (SP_MBITS + 3)) == 0) { ++ xm <<= 1; ++ xe--; ++ } ++ } ++ SPNORMRET1(xs, xe, xm, "fint", x); ++} ++ ++ieee754sp ieee754sp_funs(unsigned int u) ++{ ++ if ((int)u < 0) ++ return ieee754sp_add(ieee754sp_1e31(), ++ ieee754sp_fint(u & ~(1 << 31))); ++ return ieee754sp_fint(u); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_flong.c linux-3.4.110/arch/nds32/math-emu/sp_flong.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_flong.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_flong.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,77 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++ieee754sp ieee754sp_flong(s64 x) ++{ ++ u64 xm; /* <--- need 64-bit mantissa temp */ ++ int xe; ++ int xs; ++ ++ CLEARCX; ++ ++ if (x == 0) ++ return ieee754sp_zero(0); ++ if (x == 1 || x == -1) ++ return ieee754sp_one(x < 0); ++ if (x == 10 || x == -10) ++ return ieee754sp_ten(x < 0); ++ ++ xs = (x < 0); ++ if (xs) { ++ if (x == (1ULL << 63)) ++ xm = (1ULL << 63); /* max neg can't be safely negated */ ++ else ++ xm = -x; ++ } else { ++ xm = x; ++ } ++ xe = SP_MBITS + 3; ++ ++ if (xm >> (SP_MBITS + 1 + 3)) { ++ /* shunt out overflow bits ++ */ ++ while (xm >> (SP_MBITS + 1 + 3)) { ++ SPXSRSX1(); ++ } ++ } else { ++ /* normalize in grs extended single precision */ ++ while ((xm >> (SP_MBITS + 3)) == 0) { ++ xm <<= 1; ++ xe--; ++ } ++ } ++ SPNORMRET1(xs, xe, xm, "sp_flong", x); ++} ++ ++ieee754sp ieee754sp_fulong(u64 u) ++{ ++ if ((s64) u < 0) ++ return ieee754sp_add(ieee754sp_1e63(), ++ ieee754sp_flong(u & ~(1ULL << 63))); ++ return ieee754sp_flong(u); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_frexp.c linux-3.4.110/arch/nds32/math-emu/sp_frexp.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_frexp.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_frexp.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,52 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++/* close to ieeep754sp_logb ++*/ ++ieee754sp ieee754sp_frexp(ieee754sp x, int *eptr) ++{ ++ COMPXSP; ++ CLEARCX; ++ EXPLODEXSP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ case IEEE754_CLASS_QNAN: ++ case IEEE754_CLASS_INF: ++ case IEEE754_CLASS_ZERO: ++ *eptr = 0; ++ return x; ++ case IEEE754_CLASS_DNORM: ++ SPDNORMX; ++ break; ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ *eptr = xe + 1; ++ return buildsp(xs, -1 + SP_EBIAS, xm & ~SP_HIDDEN_BIT); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_logb.c linux-3.4.110/arch/nds32/math-emu/sp_logb.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_logb.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_logb.c 2016-04-07 10:20:50.978082417 +0200 +@@ -0,0 +1,53 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++ieee754sp ieee754sp_logb(ieee754sp x) ++{ ++ COMPXSP; ++ ++ CLEARCX; ++ ++ EXPLODEXSP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ return ieee754sp_nanxcpt(x, "logb", x); ++ case IEEE754_CLASS_QNAN: ++ return x; ++ case IEEE754_CLASS_INF: ++ return ieee754sp_inf(0); ++ case IEEE754_CLASS_ZERO: ++ return ieee754sp_inf(1); ++ case IEEE754_CLASS_DNORM: ++ SPDNORMX; ++ break; ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ return ieee754sp_fint(xe); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_modf.c linux-3.4.110/arch/nds32/math-emu/sp_modf.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_modf.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_modf.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,79 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++/* modf function is always exact for a finite number ++*/ ++ieee754sp ieee754sp_modf(ieee754sp x, ieee754sp * ip) ++{ ++ COMPXSP; ++ ++ CLEARCX; ++ ++ EXPLODEXSP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ case IEEE754_CLASS_QNAN: ++ case IEEE754_CLASS_INF: ++ case IEEE754_CLASS_ZERO: ++ *ip = x; ++ return x; ++ case IEEE754_CLASS_DNORM: ++ /* far to small */ ++ *ip = ieee754sp_zero(xs); ++ return x; ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ if (xe < 0) { ++ *ip = ieee754sp_zero(xs); ++ return x; ++ } ++ if (xe >= SP_MBITS) { ++ *ip = x; ++ return ieee754sp_zero(xs); ++ } ++ /* generate ipart mantissa by clearing bottom bits ++ */ ++ *ip = buildsp(xs, xe + SP_EBIAS, ++ ((xm >> (SP_MBITS - xe)) << (SP_MBITS - xe)) & ++ ~SP_HIDDEN_BIT); ++ ++ /* generate fpart mantissa by clearing top bits ++ * and normalizing (must be able to normalize) ++ */ ++ xm = (xm << (32 - (SP_MBITS - xe))) >> (32 - (SP_MBITS - xe)); ++ if (xm == 0) ++ return ieee754sp_zero(xs); ++ ++ while ((xm >> SP_MBITS) == 0) { ++ xm <<= 1; ++ xe--; ++ } ++ return buildsp(xs, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_mul.c linux-3.4.110/arch/nds32/math-emu/sp_mul.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_mul.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_mul.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,168 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y) ++{ ++ COMPXSP; ++ COMPYSP; ++ ++ EXPLODEXSP; ++ EXPLODEYSP; ++ ++ CLEARCX; ++ ++ FLUSHXSP; ++ FLUSHYSP; ++ ++ switch (CLPAIR(xc, yc)) { ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_nanxcpt(ieee754sp_indef(), "mul", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): ++ return x; ++ ++ /* Infinity handling */ ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_xcpt(ieee754sp_indef(), "mul", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): ++ return ieee754sp_inf(xs ^ ys); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): ++ return ieee754sp_zero(xs ^ ys); ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): ++ SPDNORMX; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): ++ SPDNORMY; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): ++ SPDNORMX; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): ++ break; ++ } ++ /* rm = xm * ym, re = xe+ye basicly */ ++ assert(xm & SP_HIDDEN_BIT); ++ assert(ym & SP_HIDDEN_BIT); ++ ++ { ++ int re = xe + ye; ++ int rs = xs ^ ys; ++ unsigned rm; ++ ++ /* shunt to top of word */ ++ xm <<= 32 - (SP_MBITS + 1); ++ ym <<= 32 - (SP_MBITS + 1); ++ ++ /* multiply 32bits xm,ym to give high 32bits rm with stickness ++ */ ++ { ++ unsigned short lxm = xm & 0xffff; ++ unsigned short hxm = xm >> 16; ++ unsigned short lym = ym & 0xffff; ++ unsigned short hym = ym >> 16; ++ unsigned lrm; ++ unsigned hrm; ++ ++ lrm = lxm * lym; /* 16 * 16 => 32 */ ++ hrm = hxm * hym; /* 16 * 16 => 32 */ ++ ++ { ++ unsigned t = lxm * hym; /* 16 * 16 => 32 */ ++ { ++ unsigned at = lrm + (t << 16); ++ hrm += at < lrm; ++ lrm = at; ++ } ++ hrm = hrm + (t >> 16); ++ } ++ ++ { ++ unsigned t = hxm * lym; /* 16 * 16 => 32 */ ++ { ++ unsigned at = lrm + (t << 16); ++ hrm += at < lrm; ++ lrm = at; ++ } ++ hrm = hrm + (t >> 16); ++ } ++ rm = hrm | (lrm != 0); ++ } ++ ++ /* ++ * sticky shift down to normal rounding precision ++ */ ++ if ((int)rm < 0) { ++ rm = (rm >> (32 - (SP_MBITS + 1 + 3))) | ++ ((rm << (SP_MBITS + 1 + 3)) != 0); ++ re++; ++ } else { ++ rm = (rm >> (32 - (SP_MBITS + 1 + 3 + 1))) | ++ ((rm << (SP_MBITS + 1 + 3 + 1)) != 0); ++ } ++ assert(rm & (SP_HIDDEN_BIT << 3)); ++ ++ SPNORMRET2(rs, re, rm, "mul", x, y); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_scalb.c linux-3.4.110/arch/nds32/math-emu/sp_scalb.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_scalb.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_scalb.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,56 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++ieee754sp ieee754sp_scalb(ieee754sp x, int n) ++{ ++ COMPXSP; ++ ++ CLEARCX; ++ ++ EXPLODEXSP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ return ieee754sp_nanxcpt(x, "scalb", x, n); ++ case IEEE754_CLASS_QNAN: ++ case IEEE754_CLASS_INF: ++ case IEEE754_CLASS_ZERO: ++ return x; ++ case IEEE754_CLASS_DNORM: ++ SPDNORMX; ++ break; ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ SPNORMRET2(xs, xe + n, xm << 3, "scalb", x, n); ++} ++ ++ieee754sp ieee754sp_ldexp(ieee754sp x, int n) ++{ ++ return ieee754sp_scalb(x, n); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_simple.c linux-3.4.110/arch/nds32/math-emu/sp_simple.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_simple.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_simple.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,87 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++int ieee754sp_finite(ieee754sp x) ++{ ++ return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS; ++} ++ ++ieee754sp ieee754sp_copysign(ieee754sp x, ieee754sp y) ++{ ++ CLEARCX; ++ SPSIGN(x) = SPSIGN(y); ++ return x; ++} ++ ++ieee754sp ieee754sp_neg(ieee754sp x) ++{ ++ COMPXSP; ++ ++ EXPLODEXSP; ++ CLEARCX; ++ FLUSHXSP; ++ ++ /* ++ * Invert the sign ALWAYS to prevent an endless recursion on ++ * pow() in libc. ++ */ ++ /* quick fix up */ ++ SPSIGN(x) ^= 1; ++ ++ if (xc == IEEE754_CLASS_SNAN) { ++ ieee754sp y = ieee754sp_indef(); ++ SETCX(IEEE754_INVALID_OPERATION); ++ SPSIGN(y) = SPSIGN(x); ++ return ieee754sp_nanxcpt(y, "neg"); ++ } ++ ++ if (ieee754sp_isnan(x)) /* but not infinity */ ++ return ieee754sp_nanxcpt(x, "neg", x); ++ return x; ++} ++ ++ieee754sp ieee754sp_abs(ieee754sp x) ++{ ++ COMPXSP; ++ ++ EXPLODEXSP; ++ CLEARCX; ++ FLUSHXSP; ++ ++ if (xc == IEEE754_CLASS_SNAN) { ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_nanxcpt(ieee754sp_indef(), "abs"); ++ } ++ ++ if (ieee754sp_isnan(x)) /* but not infinity */ ++ return ieee754sp_nanxcpt(x, "abs", x); ++ ++ /* quick fix up */ ++ SPSIGN(x) = 0; ++ return x; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_sqrt.c linux-3.4.110/arch/nds32/math-emu/sp_sqrt.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_sqrt.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_sqrt.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,116 @@ ++/* IEEE754 floating point arithmetic ++ * single precision square root ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++ieee754sp ieee754sp_sqrt(ieee754sp x) ++{ ++ int ix, s, q, m, t, i; ++ unsigned int r; ++ COMPXSP; ++ ++ /* take care of Inf and NaN */ ++ ++ EXPLODEXSP; ++ CLEARCX; ++ FLUSHXSP; ++ ++ /* x == INF or NAN? */ ++ switch (xc) { ++ case IEEE754_CLASS_QNAN: ++ /* sqrt(Nan) = Nan */ ++ return ieee754sp_nanxcpt(x, "sqrt"); ++ case IEEE754_CLASS_SNAN: ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_nanxcpt(ieee754sp_indef(), "sqrt"); ++ case IEEE754_CLASS_ZERO: ++ /* sqrt(0) = 0 */ ++ return x; ++ case IEEE754_CLASS_INF: ++ if (xs) { ++ /* sqrt(-Inf) = Nan */ ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_nanxcpt(ieee754sp_indef(), "sqrt"); ++ } ++ /* sqrt(+Inf) = Inf */ ++ return x; ++ case IEEE754_CLASS_DNORM: ++ case IEEE754_CLASS_NORM: ++ if (xs) { ++ /* sqrt(-x) = Nan */ ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_nanxcpt(ieee754sp_indef(), "sqrt"); ++ } ++ break; ++ } ++ ++ ix = x.bits; ++ ++ /* normalize x */ ++ m = (ix >> 23); ++ if (m == 0) { /* subnormal x */ ++ for (i = 0; (ix & 0x00800000) == 0; i++) ++ ix <<= 1; ++ m -= i - 1; ++ } ++ m -= 127; /* unbias exponent */ ++ ix = (ix & 0x007fffff) | 0x00800000; ++ if (m & 1) /* odd m, double x to make it even */ ++ ix += ix; ++ m >>= 1; /* m = [m/2] */ ++ ++ /* generate sqrt(x) bit by bit */ ++ ix += ix; ++ q = s = 0; /* q = sqrt(x) */ ++ r = 0x01000000; /* r = moving bit from right to left */ ++ ++ while (r != 0) { ++ t = s + r; ++ if (t <= ix) { ++ s = t + r; ++ ix -= t; ++ q += r; ++ } ++ ix += ix; ++ r >>= 1; ++ } ++ ++ if (ix != 0) { ++ SETCX(IEEE754_INEXACT); ++ switch (ieee754_csr.rm) { ++ case IEEE754_RP: ++ q += 2; ++ break; ++ case IEEE754_RN: ++ q += (q & 1); ++ break; ++ } ++ } ++ ix = (q >> 1) + 0x3f000000; ++ ix += (m << 23); ++ x.bits = ix; ++ return x; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_sub.c linux-3.4.110/arch/nds32/math-emu/sp_sub.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_sub.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_sub.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,180 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y) ++{ ++ COMPXSP; ++ COMPYSP; ++ ++ EXPLODEXSP; ++ EXPLODEYSP; ++ ++ CLEARCX; ++ ++ FLUSHXSP; ++ FLUSHYSP; ++ ++ switch (CLPAIR(xc, yc)) { ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_nanxcpt(ieee754sp_indef(), "sub", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): ++ return x; ++ ++ /* Infinity handling ++ */ ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): ++ if (xs != ys) ++ return x; ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754sp_xcpt(ieee754sp_indef(), "sub", x, y); ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): ++ return ieee754sp_inf(ys ^ 1); ++ ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): ++ return x; ++ ++ /* Zero handling ++ */ ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): ++ if (xs != ys) ++ return x; ++ else ++ return ieee754sp_zero(ieee754_csr.rm == IEEE754_RD); ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): ++ return x; ++ ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): ++ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): ++ /* quick fix up */ ++ DPSIGN(y) ^= 1; ++ return y; ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): ++ SPDNORMX; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): ++ SPDNORMY; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): ++ SPDNORMX; ++ break; ++ ++ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): ++ break; ++ } ++ /* flip sign of y and handle as add */ ++ ys ^= 1; ++ ++ assert(xm & SP_HIDDEN_BIT); ++ assert(ym & SP_HIDDEN_BIT); ++ ++ /* provide guard,round and stick bit space */ ++ xm <<= 3; ++ ym <<= 3; ++ ++ if (xe > ye) { ++ /* have to shift y fraction right to align ++ */ ++ int s = xe - ye; ++ SPXSRSYn(s); ++ } else if (ye > xe) { ++ /* have to shift x fraction right to align ++ */ ++ int s = ye - xe; ++ SPXSRSXn(s); ++ } ++ assert(xe == ye); ++ assert(xe <= SP_EMAX); ++ ++ if (xs == ys) { ++ /* generate 28 bit result of adding two 27 bit numbers ++ */ ++ xm = xm + ym; ++ xe = xe; ++ xs = xs; ++ ++ if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */ ++ SPXSRSX1(); /* shift preserving sticky */ ++ } ++ } else { ++ if (xm >= ym) { ++ xm = xm - ym; ++ xe = xe; ++ xs = xs; ++ } else { ++ xm = ym - xm; ++ xe = xe; ++ xs = ys; ++ } ++ if (xm == 0) { ++ if (ieee754_csr.rm == IEEE754_RD) ++ return ieee754sp_zero(1); /* round negative inf. => sign = -1 */ ++ else ++ return ieee754sp_zero(0); /* other round modes => sign = 1 */ ++ } ++ /* normalize to rounding precision ++ */ ++ while ((xm >> (SP_MBITS + 3)) == 0) { ++ xm <<= 1; ++ xe--; ++ } ++ } ++ SPNORMRET2(xs, xe, xm, "sub", x, y); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_tint.c linux-3.4.110/arch/nds32/math-emu/sp_tint.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_tint.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_tint.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,125 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include ++#include "ieee754sp.h" ++ ++int ieee754sp_tint(ieee754sp x) ++{ ++ COMPXSP; ++ ++ CLEARCX; ++ ++ EXPLODEXSP; ++ FLUSHXSP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ case IEEE754_CLASS_QNAN: ++ case IEEE754_CLASS_INF: ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754si_xcpt(ieee754si_indef(), "sp_tint", x); ++ case IEEE754_CLASS_ZERO: ++ return 0; ++ case IEEE754_CLASS_DNORM: ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ if (xe >= 31) { ++ /* look for valid corner case */ ++ if (xe == 31 && xs && xm == SP_HIDDEN_BIT) ++ return -0x80000000; ++ /* Set invalid. We will only use overflow for floating ++ point overflow */ ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754si_xcpt(ieee754si_indef(), "sp_tint", x); ++ } ++ /* oh gawd */ ++ if (xe > SP_MBITS) { ++ xm <<= xe - SP_MBITS; ++ } else { ++ u32 residue; ++ int round; ++ int sticky; ++ int odd; ++ ++ if (xe < -1) { ++ residue = xm; ++ round = 0; ++ sticky = residue != 0; ++ xm = 0; ++ } else { ++ /* Shifting a u32 32 times does not work, ++ * so we do it in two steps. Be aware that xe ++ * may be -1 */ ++ residue = xm << (xe + 1); ++ residue <<= 31 - SP_MBITS; ++ round = (residue >> 31) != 0; ++ sticky = (residue << 1) != 0; ++ xm >>= SP_MBITS - xe; ++ } ++ odd = (xm & 0x1) != 0x0; ++ switch (ieee754_csr.rm) { ++ case IEEE754_RN: ++ if (round && (sticky || odd)) ++ xm++; ++ break; ++ case IEEE754_RZ: ++ break; ++ case IEEE754_RU: /* toward +Infinity */ ++ if ((round || sticky) && !xs) ++ xm++; ++ break; ++ case IEEE754_RD: /* toward -Infinity */ ++ if ((round || sticky) && xs) ++ xm++; ++ break; ++ } ++ if ((xm >> 31) != 0) { ++ /* This can happen after rounding */ ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754si_xcpt(ieee754si_indef(), "sp_tint", x); ++ } ++ if (round || sticky) ++ SETCX(IEEE754_INEXACT); ++ } ++ if (xs) ++ return -xm; ++ else ++ return xm; ++} ++ ++unsigned int ieee754sp_tuns(ieee754sp x) ++{ ++ ieee754sp hb = ieee754sp_1e31(); ++ ++ /* what if x < 0 ?? */ ++ if (ieee754sp_lt(x, hb)) ++ return (unsigned)ieee754sp_tint(x); ++ ++ return (unsigned)ieee754sp_tint(ieee754sp_sub(x, hb)) | ++ ((unsigned)1 << 31); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/math-emu/sp_tlong.c linux-3.4.110/arch/nds32/math-emu/sp_tlong.c +--- linux-3.4.110.orig/arch/nds32/math-emu/sp_tlong.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/math-emu/sp_tlong.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,119 @@ ++/* IEEE754 floating point arithmetic ++ * single precision ++ */ ++/* ++ * MIPS floating point support ++ * Copyright (C) 1994-2000 Algorithmics Ltd. ++ * http://www.algor.co.uk ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ */ ++ ++#include "ieee754sp.h" ++ ++s64 ieee754sp_tlong(ieee754sp x) ++{ ++ COMPXDP; /* <-- need 64-bit mantissa tmp */ ++ ++ CLEARCX; ++ ++ EXPLODEXSP; ++ FLUSHXSP; ++ ++ switch (xc) { ++ case IEEE754_CLASS_SNAN: ++ case IEEE754_CLASS_QNAN: ++ case IEEE754_CLASS_INF: ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754di_xcpt(ieee754di_indef(), "sp_tlong", x); ++ case IEEE754_CLASS_ZERO: ++ return 0; ++ case IEEE754_CLASS_DNORM: ++ case IEEE754_CLASS_NORM: ++ break; ++ } ++ if (xe >= 63) { ++ /* look for valid corner case */ ++ if (xe == 63 && xs && xm == SP_HIDDEN_BIT) ++ return -0x8000000000000000LL; ++ /* Set invalid. We will only use overflow for floating ++ point overflow */ ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754di_xcpt(ieee754di_indef(), "sp_tlong", x); ++ } ++ /* oh gawd */ ++ if (xe > SP_MBITS) { ++ xm <<= xe - SP_MBITS; ++ } else if (xe < SP_MBITS) { ++ u32 residue; ++ int round; ++ int sticky; ++ int odd; ++ ++ if (xe < -1) { ++ residue = xm; ++ round = 0; ++ sticky = residue != 0; ++ xm = 0; ++ } else { ++ residue = xm << (32 - SP_MBITS + xe); ++ round = (residue >> 31) != 0; ++ sticky = (residue << 1) != 0; ++ xm >>= SP_MBITS - xe; ++ } ++ odd = (xm & 0x1) != 0x0; ++ switch (ieee754_csr.rm) { ++ case IEEE754_RN: ++ if (round && (sticky || odd)) ++ xm++; ++ break; ++ case IEEE754_RZ: ++ break; ++ case IEEE754_RU: /* toward +Infinity */ ++ if ((round || sticky) && !xs) ++ xm++; ++ break; ++ case IEEE754_RD: /* toward -Infinity */ ++ if ((round || sticky) && xs) ++ xm++; ++ break; ++ } ++ if ((xm >> 63) != 0) { ++ /* This can happen after rounding */ ++ SETCX(IEEE754_INVALID_OPERATION); ++ return ieee754di_xcpt(ieee754di_indef(), "sp_tlong", x); ++ } ++ if (round || sticky) ++ SETCX(IEEE754_INEXACT); ++ } ++ if (xs) ++ return -xm; ++ else ++ return xm; ++} ++ ++u64 ieee754sp_tulong(ieee754sp x) ++{ ++ ieee754sp hb = ieee754sp_1e63(); ++ ++ /* what if x < 0 ?? */ ++ if (ieee754sp_lt(x, hb)) ++ return (u64) ieee754sp_tlong(x); ++ ++ return (u64) ieee754sp_tlong(ieee754sp_sub(x, hb)) | (1ULL << 63); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/mm/alignment.c linux-3.4.110/arch/nds32/mm/alignment.c +--- linux-3.4.110.orig/arch/nds32/mm/alignment.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/alignment.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,560 @@ ++/* ++ * linux/arch/nds32/mm/alignment.c ++ * ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++ ++#include ++ ++#ifdef CONFIG_PROC_FS ++extern struct proc_dir_entry *proc_dir_cpu; ++#endif ++ ++#define DEBUG(enable, tagged, ...) \ ++ do{ \ ++ if (enable) { \ ++ if (tagged) \ ++ printk(KERN_WARNING "[ %30s() ] ", __func__); \ ++ printk(KERN_WARNING __VA_ARGS__); \ ++ } \ ++ } while (0) ++ ++#define RT(inst) (((inst) >> 20) & 0x1FUL) ++#define RA(inst) (((inst) >> 15) & 0x1FUL) ++#define RB(inst) (((inst) >> 10) & 0x1FUL) ++#define SV(inst) (((inst) >> 8) & 0x3UL) ++#define IMM(inst) (((inst) >> 0) & 0x3FFFUL) ++ ++#define RA3(inst) (((inst) >> 3) & 0x7UL) ++#define RT3(inst) (((inst) >> 6) & 0x7UL) ++#define IMM3U(inst) (((inst) >> 0) & 0x7UL) ++ ++#define RA5(inst) (((inst) >> 0) & 0x1FUL) ++#define RT4(inst) (((inst) >> 5) & 0xFUL) ++ ++extern int (*do_unaligned_access) ++ (unsigned long entry, unsigned long addr, ++ unsigned long type, struct pt_regs * regs); ++extern int va_present(struct mm_struct *mm, unsigned long addr); ++extern int va_kernel_present(unsigned long addr); ++extern int va_readable(struct pt_regs *regs, unsigned long addr); ++extern int va_writable(struct pt_regs *regs, unsigned long addr); ++ ++static int mode = 0x3; ++module_param(mode, int, 1); ++ ++static inline unsigned long *idx_to_addr(struct pt_regs *regs, int idx) ++{ ++ /* this should be consistent with ptrace.h */ ++ if (idx >= 0 && idx <= 25) /* R0-R25 */ ++ return ®s->NDS32_r0 + idx; ++ else if (idx >= 28 && idx <= 30) /* FP, GP, LP */ ++ return ®s->NDS32_fp + (idx - 28); ++ else if (idx == 31) /* SP */ ++ return ®s->NDS32_sp; ++ else ++ return NULL; /* cause a segfault */ ++} ++ ++static inline unsigned long get_inst(unsigned long addr) ++{ ++ /* FIXME: consider 16-bit inst. */ ++ return be32_to_cpu(get_unaligned((u32 *) addr)); ++} ++ ++static inline unsigned long get_data(unsigned long addr, int len) ++{ ++ if (len == 4) ++ return get_unaligned((u32 *) addr); ++ else ++ return get_unaligned((u16 *) addr); ++} ++ ++static inline void set_data(unsigned long addr, unsigned long val, int len) ++{ ++ if (len == 4) ++ put_unaligned(val, (u32 *) addr); ++ else ++ put_unaligned(val, (u16 *) addr); ++} ++ ++static inline unsigned long sign_extend(unsigned long val, int len) ++{ ++ unsigned long ret = 0; ++ unsigned char *s, *t; ++ int i = 0; ++ ++ val = cpu_to_le32(val); ++ ++ s = (void *)&val; ++ t = (void *)&ret; ++ ++ while (i++ < len) ++ *t++ = *s++; ++ ++ if (((*(t - 1)) & 0x80) && (i < 4)) { ++ ++ while (i++ <= 4) ++ *t++ = 0xff; ++ } ++ ++ return le32_to_cpu(ret); ++} ++ ++static inline int do_16(unsigned long inst, struct pt_regs *regs) ++{ ++ int imm, regular, load, len, addr_mode, idx_mode; ++ unsigned long unaligned_addr, target_val, source_idx, target_idx, ++ shift = 0; ++ switch ((inst >> 9) & 0x3F) { ++ ++ case 0x12: /* LHI333 */ ++ imm = 1; ++ regular = 1; ++ load = 1; ++ len = 2; ++ addr_mode = 3; ++ idx_mode = 3; ++ break; ++ case 0x10: /* LWI333 */ ++ imm = 1; ++ regular = 1; ++ load = 1; ++ len = 4; ++ addr_mode = 3; ++ idx_mode = 3; ++ break; ++ case 0x11: /* LWI333.bi */ ++ imm = 1; ++ regular = 0; ++ load = 1; ++ len = 4; ++ addr_mode = 3; ++ idx_mode = 3; ++ break; ++ case 0x1A: /* LWI450 */ ++ imm = 0; ++ regular = 1; ++ load = 1; ++ len = 4; ++ addr_mode = 5; ++ idx_mode = 4; ++ break; ++ case 0x16: /* SHI333 */ ++ imm = 1; ++ regular = 1; ++ load = 0; ++ len = 2; ++ addr_mode = 3; ++ idx_mode = 3; ++ break; ++ case 0x14: /* SWI333 */ ++ imm = 1; ++ regular = 1; ++ load = 0; ++ len = 4; ++ addr_mode = 3; ++ idx_mode = 3; ++ break; ++ case 0x15: /* SWI333.bi */ ++ imm = 1; ++ regular = 0; ++ load = 0; ++ len = 4; ++ addr_mode = 3; ++ idx_mode = 3; ++ break; ++ case 0x1B: /* SWI450 */ ++ imm = 0; ++ regular = 1; ++ load = 0; ++ len = 4; ++ addr_mode = 5; ++ idx_mode = 4; ++ break; ++ ++ default: ++ return -EFAULT; ++ } ++ ++ if (addr_mode == 3) { ++ unaligned_addr = *idx_to_addr(regs, RA3(inst)); ++ source_idx = RA3(inst); ++ } else { ++ unaligned_addr = *idx_to_addr(regs, RA5(inst)); ++ source_idx = RA5(inst); ++ } ++ ++ if (idx_mode == 3) ++ target_idx = RT3(inst); ++ else ++ target_idx = RT4(inst); ++ ++ if (imm) ++ shift = IMM3U(inst) * len; ++ ++ if (regular) ++ unaligned_addr += shift; ++ else ++ *idx_to_addr(regs, source_idx) = unaligned_addr + shift; ++ ++ if (load) { ++ ++ if (!va_readable(regs, unaligned_addr)) ++ return -EACCES; ++ ++ if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len)) ++ return -EACCES; ++ ++ *idx_to_addr(regs, target_idx) = get_data(unaligned_addr, len); ++ } else { ++ ++ if (!va_writable(regs, unaligned_addr)) ++ return -EACCES; ++ ++ if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len)) ++ return -EACCES; ++ ++ target_val = *idx_to_addr(regs, target_idx); ++ set_data(unaligned_addr, target_val, len); ++ } ++ ++ regs->NDS32_ipc += 2; ++ ++ return 0; ++} ++ ++static inline int do_32(unsigned long inst, struct pt_regs *regs) ++{ ++ int imm, regular, load, len, sign_ext; ++ unsigned long unsligned_addr, target_val, shift; ++ ++ unsligned_addr = *idx_to_addr(regs, RA(inst)); ++ ++ switch ((inst >> 25) << 1) { ++ ++ case 0x02: /* LHI */ ++ imm = 1; ++ regular = 1; ++ load = 1; ++ len = 2; ++ sign_ext = 0; ++ break; ++ case 0x0A: /* LHI.bi */ ++ imm = 1; ++ regular = 0; ++ load = 1; ++ len = 2; ++ sign_ext = 0; ++ break; ++ case 0x22: /* LHSI */ ++ imm = 1; ++ regular = 1; ++ load = 1; ++ len = 2; ++ sign_ext = 1; ++ break; ++ case 0x2A: /* LHSI.bi */ ++ imm = 1; ++ regular = 0; ++ load = 1; ++ len = 2; ++ sign_ext = 1; ++ break; ++ case 0x04: /* LWI */ ++ imm = 1; ++ regular = 1; ++ load = 1; ++ len = 4; ++ sign_ext = 0; ++ break; ++ case 0x0C: /* LWI.bi */ ++ imm = 1; ++ regular = 0; ++ load = 1; ++ len = 4; ++ sign_ext = 0; ++ break; ++ case 0x12: /* SHI */ ++ imm = 1; ++ regular = 1; ++ load = 0; ++ len = 2; ++ sign_ext = 0; ++ break; ++ case 0x1A: /* SHI.bi */ ++ imm = 1; ++ regular = 0; ++ load = 0; ++ len = 2; ++ sign_ext = 0; ++ break; ++ case 0x14: /* SWI */ ++ imm = 1; ++ regular = 1; ++ load = 0; ++ len = 4; ++ sign_ext = 0; ++ break; ++ case 0x1C: /* SWI.bi */ ++ imm = 1; ++ regular = 0; ++ load = 0; ++ len = 4; ++ sign_ext = 0; ++ break; ++ ++ default: ++ switch (inst & 0xff) { ++ ++ case 0x01: /* LH */ ++ imm = 0; ++ regular = 1; ++ load = 1; ++ len = 2; ++ sign_ext = 0; ++ break; ++ case 0x05: /* LH.bi */ ++ imm = 0; ++ regular = 0; ++ load = 1; ++ len = 2; ++ sign_ext = 0; ++ break; ++ case 0x11: /* LHS */ ++ imm = 0; ++ regular = 1; ++ load = 1; ++ len = 2; ++ sign_ext = 1; ++ break; ++ case 0x15: /* LHS.bi */ ++ imm = 0; ++ regular = 0; ++ load = 1; ++ len = 2; ++ sign_ext = 1; ++ break; ++ case 0x02: /* LW */ ++ imm = 0; ++ regular = 1; ++ load = 1; ++ len = 4; ++ sign_ext = 0; ++ break; ++ case 0x06: /* LW.bi */ ++ imm = 0; ++ regular = 0; ++ load = 1; ++ len = 4; ++ sign_ext = 0; ++ break; ++ case 0x09: /* SH */ ++ imm = 0; ++ regular = 1; ++ load = 0; ++ len = 2; ++ sign_ext = 0; ++ break; ++ case 0x0D: /* SH.bi */ ++ imm = 0; ++ regular = 0; ++ load = 0; ++ len = 2; ++ sign_ext = 0; ++ break; ++ case 0x0A: /* SW */ ++ imm = 0; ++ regular = 1; ++ load = 0; ++ len = 4; ++ sign_ext = 0; ++ break; ++ case 0x0E: /* SW.bi */ ++ imm = 0; ++ regular = 0; ++ load = 0; ++ len = 4; ++ sign_ext = 0; ++ break; ++ ++ default: ++ return -EFAULT; ++ } ++ } ++ ++ if (imm) ++ shift = IMM(inst) * len; ++ else ++ shift = *idx_to_addr(regs, RB(inst)) << SV(inst); ++ ++ if (regular) ++ unsligned_addr += shift; ++ else ++ *idx_to_addr(regs, RA(inst)) = unsligned_addr + shift; ++ ++ if (load) { ++ ++ if (!va_readable(regs, unsligned_addr)) ++ return -EACCES; ++ ++ if (!access_ok(VERIFY_READ, (void *)unsligned_addr, len)) ++ return -EACCES; ++ ++ if (sign_ext) ++ *idx_to_addr(regs, RT(inst)) = ++ sign_extend(get_data(unsligned_addr, len), len); ++ else ++ *idx_to_addr(regs, RT(inst)) = ++ get_data(unsligned_addr, len); ++ } else { ++ ++ if (!va_writable(regs, unsligned_addr)) ++ return -EACCES; ++ ++ if (!access_ok(VERIFY_WRITE, (void *)unsligned_addr, len)) ++ return -EACCES; ++ ++ target_val = *idx_to_addr(regs, RT(inst)); ++ set_data(unsligned_addr, target_val, len); ++ } ++ ++ regs->NDS32_ipc += 4; ++ ++ return 0; ++} ++ ++static int _do_unaligned_access(unsigned long entry, unsigned long addr, ++ unsigned long type, struct pt_regs *regs) ++{ ++ unsigned long inst; ++ int ret = -EFAULT; ++ ++ if (user_mode(regs)) { ++ /* user mode */ ++ if (!va_present(current->mm, addr)) ++ return ret; ++ } else { ++ /* kernel mode */ ++ if (!va_kernel_present(addr)) ++ return ret; ++ } ++ ++ inst = get_inst(regs->NDS32_ipc); ++ ++ DEBUG(mode & 0x04, 1, ++ "Faulting Addr: 0x%08lx, PC: 0x%08lx [ 0x%08lx ]\n", addr, ++ regs->NDS32_ipc, inst); ++ ++ if ((user_mode(regs) && (mode & 0x01)) ++ || (!user_mode(regs) && (mode & 0x02))) { ++ ++ mm_segment_t seg = get_fs(); ++ ++ set_fs(KERNEL_DS); ++ ++ if (inst & 0x80000000) ++ ret = do_16((inst >> 16) & 0xffff, regs); ++ else ++ ret = do_32(inst, regs); ++ ++ set_fs(seg); ++ } ++ ++ return ret; ++} ++ ++#ifdef CONFIG_PROC_FS ++ ++static int proc_alignment_read(char *page, char **start, off_t off, int count, ++ int *eof, void *data) ++{ ++ char *p = page; ++ int len; ++ ++ p += sprintf(p, "(0x01) User Mode: %s\n", mode & 0x01 ? "on" : "off"); ++ p += sprintf(p, "(0x02) Kernel Mode: %s\n", mode & 0x02 ? "on" : "off"); ++ p += sprintf(p, "(0x04) Warning: %s\n", mode & 0x04 ? "on" : "off"); ++ ++ len = (p - page) - off; ++ if (len < 0) ++ len = 0; ++ ++ *eof = (len <= count) ? 1 : 0; ++ *start = page + off; ++ ++ return len; ++} ++ ++#define INPUTLEN 12 /* '0' + 'x' + 8digit + '\n' + '\0' */ ++ ++static int proc_alignment_write(struct file *file, const char __user * buffer, ++ unsigned long count, void *data) ++{ ++ unsigned long en; ++ char *endp; ++ char inbuf[INPUTLEN]; ++ ++ if (count > INPUTLEN - 1) ++ return -EFAULT; ++ ++ if (copy_from_user(inbuf, buffer, count)) ++ return -EFAULT; ++ ++ inbuf[count - 1] = '\0'; ++ ++ en = simple_strtoul(inbuf, &endp, 0); ++ if (en > 0x07) ++ return -EFAULT; ++ ++ mode = en & 0x7; ++ ++ return count; ++} ++ ++#endif /* CONFIG_PROC_FS */ ++ ++static int __init unaligned_access_init(void) ++{ ++#ifdef CONFIG_PROC_FS ++ static struct proc_dir_entry *res_alignment; ++ ++ if (!proc_dir_cpu) ++ if (!(proc_dir_cpu = proc_mkdir("cpu", NULL))) ++ return -ENOMEM; ++ ++ if (! ++ (res_alignment = ++ create_proc_entry("alignment", S_IWUSR | S_IRUGO, proc_dir_cpu))) ++ return -ENOMEM; ++ ++ res_alignment->read_proc = proc_alignment_read; ++ res_alignment->write_proc = proc_alignment_write; ++#endif ++ do_unaligned_access = _do_unaligned_access; ++ ++ return 0; ++} ++ ++static void __exit unaligned_access_exit(void) ++{ ++#ifdef CONFIG_PROC_FS ++ remove_proc_entry("alignment", proc_dir_cpu); ++#endif ++ do_unaligned_access = NULL; ++} ++ ++MODULE_AUTHOR("Roy Lee"); ++MODULE_DESCRIPTION("Unaligned Access Handler"); ++MODULE_LICENSE("GPL"); ++ ++module_init(unaligned_access_init); ++module_exit(unaligned_access_exit); +diff -Nur linux-3.4.110.orig/arch/nds32/mm/cacheflush.c linux-3.4.110/arch/nds32/mm/cacheflush.c +--- linux-3.4.110.orig/arch/nds32/mm/cacheflush.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/cacheflush.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,355 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++extern struct cache_info L1_cache_info[2]; ++ ++#ifdef CONFIG_CPU_CACHE_NONALIASING ++void flush_cache_mm(struct mm_struct *mm) ++{ ++} ++ ++void flush_cache_dup_mm(struct mm_struct *mm) ++{ ++} ++ ++void flush_cache_range(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end) ++{ ++} ++ ++void flush_cache_page(struct vm_area_struct *vma, ++ unsigned long addr, unsigned long pfn) ++{ ++} ++ ++void flush_cache_vmap(unsigned long start, unsigned long end) ++{ ++} ++ ++void flush_cache_vunmap(unsigned long start, unsigned long end) ++{ ++} ++ ++void flush_dcache_page(struct page *page) ++{ ++ struct address_space *mapping; ++ ++ if (!PageHighMem(page)) { ++ mapping = page_mapping(page); ++ if (mapping && !mapping_mapped(mapping)) ++ set_bit(PG_dcache_dirty, &page->flags); ++ else ++ cpu_dcache_wbinval_page((unsigned long) ++ page_address(page)); ++ } else { ++ unsigned long kaddr = (unsigned long)kmap_atomic(page); ++ cpu_dcache_wbinval_page(kaddr); ++ kunmap_atomic((void *)kaddr); ++ } ++} ++ ++void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ++ unsigned long vaddr, void *dst, void *src, int len) ++{ ++ unsigned long line_size, start, end; ++ ++ memcpy(dst, src, len); ++ if (vma->vm_flags & VM_EXEC) { ++ line_size = L1_cache_info[DCACHE].line_size; ++ start = (unsigned long)dst & ~(line_size - 1); ++ end = ++ ((unsigned long)dst + len + line_size - 1) & ~(line_size - ++ 1); ++ cpu_cache_wbinval_range(start, end, 1); ++ } ++} ++ ++void copy_from_user_page(struct vm_area_struct *vma, struct page *page, ++ unsigned long vaddr, void *dst, void *src, int len) ++{ ++ memcpy(dst, src, len); ++} ++ ++void flush_icache_range(unsigned long start, unsigned long end) ++{ ++ cpu_cache_wbinval_range(start, end, 1); ++} ++ ++void flush_icache_page(struct vm_area_struct *vma, struct page *page) ++{ ++} ++ ++void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, ++ pte_t * pte) ++{ ++ struct page *page; ++ unsigned long pfn = pte_pfn(*pte); ++ ++ if (!pfn_valid(pfn)) ++ return; ++ ++ if (vma->vm_mm == current->active_mm) ++ asm("mtsr %1, $mr2\ndsb\n" ++ "tlbop %0, RWR\nisb\n" ++ ::"r"(*pte), "r"(addr)); ++ ++ page = pfn_to_page(pfn); ++ ++ if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) || ++ (vma->vm_flags & VM_EXEC)) { ++ ++ if (!PageHighMem(page)) { ++ cpu_cache_wbinval_page((unsigned long) ++ page_address(page), ++ vma->vm_flags & VM_EXEC); ++ } else { ++ unsigned long kaddr = (unsigned long)kmap_atomic(page); ++ cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC); ++ kunmap_atomic((void *)kaddr); ++ } ++ } ++} ++#else ++int va_present(struct mm_struct *mm, unsigned long addr); ++ ++static inline unsigned long aliasing(unsigned long addr, unsigned long page) ++{ ++ return ((addr & PAGE_MASK) ^ page) & (REALSHMLBA - 1); ++} ++ ++static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa) ++{ ++ unsigned long kaddr, pte; ++ ++#define BASE_ADDR0 0xffffc000 ++ kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask); ++ pte = (pa | PAGE_KERNEL); ++ asm("mtsr %1, $mr2\ndsb\n" ++ "tlbop %0, RWLK\nisb\n" ++ ::"r"(pte), "r"(kaddr)); ++ return kaddr; ++} ++ ++static inline void kunmap01(unsigned long kaddr) ++{ ++ asm volatile ("tlbop %0, UNLK\n\t" ++ "tlbop %0, INV\n\t" ++ ::"r" (kaddr)); ++} ++ ++static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa) ++{ ++ unsigned long kaddr, pte; ++ ++#define BASE_ADDR1 0xffff8000 ++ kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask); ++ pte = (pa | PAGE_KERNEL); ++ asm("mtsr %1, $mr2\ndsb\n" ++ "tlbop %0, RWLK\nisb\n" ++ ::"r"(pte), "r"(kaddr)); ++ return kaddr; ++} ++ ++void flush_cache_mm(struct mm_struct *mm) ++{ ++ cpu_dcache_wbinval_all(); ++ cpu_icache_inval_all(); ++} ++ ++void flush_cache_dup_mm(struct mm_struct *mm) ++{ ++} ++ ++void flush_cache_range(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end) ++{ ++ if ((end - start) > 8 * PAGE_SIZE) { ++ cpu_dcache_wbinval_all(); ++ if (vma->vm_flags & VM_EXEC) ++ cpu_icache_inval_all(); ++ return; ++ } ++ ++ while (start < end) { ++ if (va_present(vma->vm_mm, start)) ++ cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC); ++ start += PAGE_SIZE; ++ } ++} ++ ++void flush_cache_page(struct vm_area_struct *vma, ++ unsigned long addr, unsigned long pfn) ++{ ++ unsigned long vto, flags; ++ ++ local_irq_save(flags); ++ vto = kremap0(addr, pfn << PAGE_SHIFT); ++ cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC); ++ kunmap01(vto); ++ local_irq_restore(flags); ++} ++ ++void flush_cache_vmap(unsigned long start, unsigned long end) ++{ ++ cpu_dcache_wbinval_all(); ++} ++ ++void flush_cache_vunmap(unsigned long start, unsigned long end) ++{ ++ cpu_dcache_wbinval_all(); ++} ++ ++void copy_user_highpage(struct page *to, struct page *from, ++ unsigned long vaddr, struct vm_area_struct *vma) ++{ ++ unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto; ++ kto = ((unsigned long)page_address(to) & PAGE_MASK); ++ kfrom = ((unsigned long)page_address(from) & PAGE_MASK); ++ pto = page_to_phys(to); ++ pfrom = page_to_phys(from); ++ ++ if (aliasing(vaddr, (unsigned long)kfrom)) ++ cpu_dcache_wb_page((unsigned long)kfrom); ++ if (aliasing(vaddr, (unsigned long)kto)) ++ cpu_dcache_inval_page((unsigned long)kto); ++ local_irq_save(flags); ++ vto = kremap0(vaddr, pto); ++ vfrom = kremap1(vaddr, pfrom); ++ copy_page((void *)vto, (void *)vfrom); ++ kunmap01(vfrom); ++ kunmap01(vto); ++ local_irq_restore(flags); ++} ++ ++EXPORT_SYMBOL(copy_user_highpage); ++ ++void clear_user_highpage(struct page *page, unsigned long vaddr) ++{ ++ unsigned long vto, flags, kto; ++ ++ kto = ((unsigned long)page_address(page) & PAGE_MASK); ++ ++ local_irq_save(flags); ++ if (aliasing(kto, vaddr) && kto != 0) { ++ cpu_dcache_inval_page(kto); ++ cpu_icache_inval_page(kto); ++ } ++ vto = kremap0(vaddr, page_to_phys(page)); ++ clear_page((void *)vto); ++ kunmap01(vto); ++ local_irq_restore(flags); ++} ++ ++EXPORT_SYMBOL(clear_user_highpage); ++ ++void flush_dcache_page(struct page *page) ++{ ++ struct address_space *mapping; ++ ++ mapping = page_mapping(page); ++ if (mapping && !mapping_mapped(mapping)) ++ set_bit(PG_dcache_dirty, &page->flags); ++ else { ++ int i, pc; ++ unsigned long vto, kaddr, flags; ++ cpu_dcache_wbinval_page((unsigned long)page_address(page)); ++ kaddr = (unsigned long)page_address(page); ++ pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE; ++ for (i = 0; i < pc; i++) { ++ local_irq_save(flags); ++ vto = kremap0(kaddr + i * PAGE_SIZE, __pa(kaddr)); ++ cpu_dcache_wbinval_page(vto); ++ kunmap01(vto); ++ local_irq_restore(flags); ++ } ++ } ++} ++ ++void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ++ unsigned long vaddr, void *dst, void *src, int len) ++{ ++ unsigned long line_size, start, end, vto, flags; ++ ++ local_irq_save(flags); ++ vto = kremap0(vaddr, page_to_phys(page)); ++ dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1))); ++ memcpy(dst, src, len); ++ if (vma->vm_flags & VM_EXEC) { ++ line_size = L1_cache_info[DCACHE].line_size; ++ start = (unsigned long)dst & ~(line_size - 1); ++ end = ++ ((unsigned long)dst + len + line_size - 1) & ~(line_size - ++ 1); ++ cpu_cache_wbinval_range(start, end, 1); ++ } ++ kunmap01(vto); ++ local_irq_restore(flags); ++} ++ ++void copy_from_user_page(struct vm_area_struct *vma, struct page *page, ++ unsigned long vaddr, void *dst, void *src, int len) ++{ ++ unsigned long vto, flags; ++ ++ local_irq_save(flags); ++ vto = kremap0(vaddr, page_to_phys(page)); ++ src = (void *)(vto | (vaddr & (PAGE_SIZE - 1))); ++ memcpy(dst, src, len); ++ kunmap01(vto); ++ local_irq_restore(flags); ++} ++ ++void flush_anon_page(struct vm_area_struct *vma, ++ struct page *page, unsigned long vaddr) ++{ ++ if (!PageAnon(page)) ++ return; ++ ++ if (vma->vm_mm != current->active_mm) ++ return; ++ ++ cpu_cache_wbinval_page(vaddr & PAGE_MASK, vma->vm_flags & VM_EXEC); ++} ++ ++void flush_kernel_dcache_page(struct page *page) ++{ ++ cpu_dcache_wbinval_page((unsigned long)page_address(page)); ++} ++ ++void flush_icache_range(unsigned long start, unsigned long end) ++{ ++ cpu_cache_wbinval_range(start, end, 1); ++} ++ ++void flush_icache_page(struct vm_area_struct *vma, struct page *page) ++{ ++} ++ ++void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, ++ pte_t * pte) ++{ ++ struct page *page; ++ unsigned long pfn = pte_pfn(*pte); ++ ++ if (!pfn_valid(pfn)) ++ return; ++ ++ if (vma->vm_mm == current->active_mm) { ++ asm("mtsr %1, $mr2\ndsb\n" ++ "tlbop %0, RWR\nisb\n" ++ ::"r"(*pte), "r"(addr)); ++ } ++ ++ page = pfn_to_page(pfn); ++ if (test_and_clear_bit(PG_dcache_dirty, &page->flags) || ++ (vma->vm_flags & VM_EXEC)) ++ cpu_dcache_wbinval_page((unsigned long)page_address(page)); ++} ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/mm/cctl.c linux-3.4.110/arch/nds32/mm/cctl.c +--- linux-3.4.110.orig/arch/nds32/mm/cctl.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/cctl.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,284 @@ ++/* ++ * linux/arch/nds32/mm/cctl.c ++ * ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "cctl.h" ++ ++#define DEBUG( enable, tagged, ...) \ ++ do{ \ ++ if( enable){ \ ++ if( tagged) \ ++ printk( "[ %30s() ] ", __func__); \ ++ printk( __VA_ARGS__); \ ++ } \ ++ } while( 0) ++ ++static int debug = 1; ++module_param(debug, int, 0); ++ ++static int proc_read_cache_en(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ ++ if (!strncmp(data, "ic_en", 7)) ++ return sprintf(page, "I-cache: %s\n", ++ (GET_CACHE_CTL() & CACHE_CTL_mskIC_EN) ? ++ "Enabled" : "Disabled"); ++ else ++ return sprintf(page, "D-cache: %s\n", ++ (GET_CACHE_CTL() & CACHE_CTL_mskDC_EN) ? ++ "Enabled" : "Disabled"); ++} ++ ++static int proc_write_cache_en(struct file *file, const char *buffer, ++ unsigned long count, void *data) ++{ ++ ++ unsigned long en, saved_gie; ++ char inbuf[INPUTLEN]; ++ ++ if (count > INPUTLEN - 1) ++ count = INPUTLEN - 1; ++ ++ if (copy_from_user(inbuf, buffer, count)) ++ return -EFAULT; ++ ++ inbuf[count] = '\0'; ++ ++ if (!sscanf(inbuf, "%lu", &en) || en > 1) ++ return -EFAULT; ++ ++ GIE_SAVE(&saved_gie); ++ ++ if (!strncmp(data, "ic_en", 7)) { ++ ++ if (en && !(GET_CACHE_CTL() & CACHE_CTL_mskIC_EN)) { ++ ++ SET_CACHE_CTL(GET_CACHE_CTL() | CACHE_CTL_mskIC_EN); ++ DEBUG(debug, 1, "I-cache: Enabled\n"); ++ } else if (!en && (GET_CACHE_CTL() & CACHE_CTL_mskIC_EN)) { ++ ++ SET_CACHE_CTL(GET_CACHE_CTL() & ~CACHE_CTL_mskIC_EN); ++ cpu_icache_inval_all(); ++ DEBUG(debug, 1, "I-cache: Disabled\n"); ++ } ++ } else { ++ if (en && !(GET_CACHE_CTL() & CACHE_CTL_mskDC_EN)) { ++ ++ SET_CACHE_CTL(GET_CACHE_CTL() | CACHE_CTL_mskDC_EN); ++ DEBUG(debug, 1, "D-cache: Enabled\n"); ++ } else if (!en && (GET_CACHE_CTL() & CACHE_CTL_mskDC_EN)) { ++ ++ SET_CACHE_CTL(GET_CACHE_CTL() & ~CACHE_CTL_mskDC_EN); ++ cpu_dcache_wbinval_all(); ++ DEBUG(debug, 1, "D-cache: Disabled\n"); ++ } ++ } ++ ++ GIE_RESTORE(saved_gie); ++ ++ return count; ++} ++ ++struct entry_struct proc_table_cache_en[] = { ++ ++ {"ic_en", 0644, proc_read_cache_en, proc_write_cache_en}, ++ {"dc_en", 0644, proc_read_cache_en, proc_write_cache_en}, ++ {NULL, 0, NULL, NULL} ++}; ++ ++static int sprint_cache_sdz(char *buf, unsigned long size, unsigned long way, ++ unsigned sdz) ++{ ++ ++ return sprintf(buf, "[%c] %luK x %lu\n[%c] %luK x %lu\n" ++ "[%c] %luK x %lu\n[%c] %luK x %lu\n", ++ (sdz == 0) ? '*' : ' ', (size / 1024), way, ++ (sdz == 1) ? '*' : ' ', (size / 1024), way / 2, ++ (sdz == 2) ? '*' : ' ', (size / 1024) / 2, way, ++ (sdz == 3) ? '*' : ' ', (size / 1024) / 2, way / 2); ++} ++ ++static int proc_read_cache_sdz(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ ++ if (!strncmp(data, "ic_sdz", 7)) { ++ ++ return sprint_cache_sdz(page, ++ CACHE_LINE_SIZE(ICACHE) * ++ CACHE_SET(ICACHE), CACHE_WAY(ICACHE), ++ (GET_SDZ_CTL() & SDZ_CTL_mskICDZ) >> ++ SDZ_CTL_offICDZ); ++ } else { ++ return sprint_cache_sdz(page, ++ CACHE_LINE_SIZE(DCACHE) * ++ CACHE_SET(DCACHE), CACHE_WAY(DCACHE), ++ (GET_SDZ_CTL() & SDZ_CTL_mskDCDZ) >> ++ SDZ_CTL_offDCDZ); ++ } ++} ++ ++static int proc_write_cache_sdz(struct file *file, const char *buffer, ++ unsigned long count, void *data) ++{ ++ ++ unsigned long mode, saved_gie, saved_cctl; ++ char inbuf[INPUTLEN]; ++ ++ if (count > INPUTLEN - 1) ++ count = INPUTLEN - 1; ++ ++ if (copy_from_user(inbuf, buffer, count)) ++ return -EFAULT; ++ ++ inbuf[count] = '\0'; ++ ++ if (!sscanf(inbuf, "%lu", &mode) || mode > 3) ++ return -EFAULT; ++ ++ GIE_SAVE(&saved_gie); ++ ++ saved_cctl = GET_CACHE_CTL(); ++ DEBUG(debug, 1, "saved_gie: %ld, saved_cctl: 0x%08lx\n", saved_gie, ++ saved_cctl); ++ ++ if (!strncmp(data, "ic_sdz", 7)) { ++ ++ DEBUG(debug, 1, "IC_SDZ: mode %ld\n", mode); ++ ++ if (mode == 2 || mode == 3) { ++ ++ if (CACHE_LINE_SIZE(ICACHE) * CACHE_SET(ICACHE) / 2 < ++ 4096) { ++ ++ GIE_RESTORE(saved_gie); ++ DEBUG(debug, 1, "Error: way size < 4096\n"); ++ return -1; ++ } ++ } ++ ++ /* turn off and flush cache */ ++ DEBUG(debug, 1, "turning off cache\n"); ++ SET_CACHE_CTL(saved_cctl & ~CACHE_CTL_mskIC_EN); ++ DEBUG(debug, 1, "flushing cache\n"); ++ cpu_icache_inval_all(); ++ ++ /* perform down size operation */ ++ DEBUG(debug, 1, "downsizing cache\n"); ++ SET_SDZ_CTL((GET_SDZ_CTL() & ~SDZ_CTL_mskICDZ) | ++ (mode << SDZ_CTL_offICDZ)); ++ } else { ++ ++ DEBUG(debug, 1, "DC_SDZ: mode %ld\n", mode); ++ ++ if (mode == 2 || mode == 3) { ++ ++ if (CACHE_LINE_SIZE(DCACHE) * CACHE_SET(DCACHE) / 2 < ++ 4096) { ++ ++ GIE_RESTORE(saved_gie); ++ DEBUG(debug, 1, "Error: way size < 4096\n"); ++ return -1; ++ } ++ } ++ ++ /* turn off and flush cache */ ++ DEBUG(debug, 1, "turning off cache\n"); ++ SET_CACHE_CTL(saved_cctl & ~CACHE_CTL_mskDC_EN); ++ DEBUG(debug, 1, "flushing cache\n"); ++ cpu_dcache_wbinval_all(); ++ ++ /* perform down size operation */ ++ DEBUG(debug, 1, "downsizing cache\n"); ++ SET_SDZ_CTL((GET_SDZ_CTL() & ~SDZ_CTL_mskDCDZ) | ++ (mode << SDZ_CTL_offDCDZ)); ++ } ++ ++ /* turn on cache ( if it was enabled) */ ++ DEBUG(debug, 1, "restoring saved_cctl : 0x%08lx\n", saved_cctl); ++ SET_CACHE_CTL(saved_cctl); ++ ++ DEBUG(debug, 1, "restoring saved_git: %ld\n", saved_gie); ++ GIE_RESTORE(saved_gie); ++ ++ return count; ++} ++ ++struct entry_struct proc_table_cache_sdz[] = { ++ ++ {"ic_sdz", 0644, proc_read_cache_sdz, proc_write_cache_sdz}, ++ {"dc_sdz", 0644, proc_read_cache_sdz, proc_write_cache_sdz}, ++ {NULL, 0, NULL, NULL} ++}; ++ ++static struct proc_dir_entry *proc_cctl; ++ ++static void create_seq_entry(struct entry_struct *e, mode_t mode, ++ struct proc_dir_entry *parent) ++{ ++ ++ struct proc_dir_entry *entry = create_proc_entry(e->name, mode, parent); ++ ++ if (entry) { ++ ++ entry->read_proc = e->readop; ++ entry->write_proc = e->writeop; ++ entry->data = e->name; ++ } ++} ++ ++static void install_proc_table(struct entry_struct *table) ++{ ++ ++ while (table->name) { ++ ++ create_seq_entry(table, table->perm, proc_cctl); ++ table++; ++ } ++} ++ ++static void remove_proc_table(struct entry_struct *table) ++{ ++ ++ while (table->name) { ++ ++ remove_proc_entry(table->name, proc_cctl); ++ table++; ++ } ++} ++ ++static int __init init_cctl(void) ++{ ++ ++ DEBUG(debug, 1, "CCTL module registered\n"); ++ ++ proc_cctl = proc_mkdir("cctl", NULL); ++ ++ install_proc_table(proc_table_cache_en); ++ install_proc_table(proc_table_cache_sdz); ++ ++ return 0; ++} ++ ++static void __exit cleanup_cctl(void) ++{ ++ ++ remove_proc_table(proc_table_cache_sdz); ++ remove_proc_table(proc_table_cache_en); ++ remove_proc_entry("cctl", NULL); ++ ++ DEBUG(debug, 1, "CCTL module unregistered\n"); ++} ++ ++module_init(init_cctl); ++module_exit(cleanup_cctl); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Userspace Cache Control Module"); +diff -Nur linux-3.4.110.orig/arch/nds32/mm/cctl.h linux-3.4.110/arch/nds32/mm/cctl.h +--- linux-3.4.110.orig/arch/nds32/mm/cctl.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/cctl.h 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,22 @@ ++#ifndef CCTL_H ++#define CCTL_H ++ ++#include ++#include ++#include ++#include ++ ++#define INPUTLEN 32 ++ ++extern void cpu_icache_flush(void); ++extern void cpu_dcache_flush(void); ++ ++struct entry_struct{ ++ ++ char *name; ++ int perm; ++ read_proc_t *readop; ++ write_proc_t *writeop; ++}; ++ ++#endif /* CCTL_H */ +diff -Nur linux-3.4.110.orig/arch/nds32/mm/consistent.c linux-3.4.110/arch/nds32/mm/consistent.c +--- linux-3.4.110.orig/arch/nds32/mm/consistent.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/consistent.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,448 @@ ++/* ++ * linux/arch/nds32/mm/consistent.c ++ * ++ * Copyright (C) 2000-2004 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * DMA uncached mapping support. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * This is the page table (2MB) covering uncached, DMA consistent allocations ++ */ ++static pte_t *consistent_pte; ++static DEFINE_RAW_SPINLOCK(consistent_lock); ++ ++/* ++ * VM region handling support. ++ * ++ * This should become something generic, handling VM region allocations for ++ * vmalloc and similar (ioremap, module space, etc). ++ * ++ * I envisage vmalloc()'s supporting vm_struct becoming: ++ * ++ * struct vm_struct { ++ * struct vm_region region; ++ * unsigned long flags; ++ * struct page **pages; ++ * unsigned int nr_pages; ++ * unsigned long phys_addr; ++ * }; ++ * ++ * get_vm_area() would then call vm_region_alloc with an appropriate ++ * struct vm_region head (eg): ++ * ++ * struct vm_region vmalloc_head = { ++ * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), ++ * .vm_start = VMALLOC_START, ++ * .vm_end = VMALLOC_END, ++ * }; ++ * ++ * However, vmalloc_head.vm_start is variable (typically, it is dependent on ++ * the amount of RAM found at boot time.) I would imagine that get_vm_area() ++ * would have to initialise this each time prior to calling vm_region_alloc(). ++ */ ++struct arch_vm_region { ++ struct list_head vm_list; ++ unsigned long vm_start; ++ unsigned long vm_end; ++ struct page *vm_pages; ++}; ++ ++static struct arch_vm_region consistent_head = { ++ .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), ++ .vm_start = CONSISTENT_BASE, ++ .vm_end = CONSISTENT_END, ++}; ++ ++static struct arch_vm_region *vm_region_alloc(struct arch_vm_region *head, ++ size_t size, int gfp) ++{ ++ unsigned long addr = head->vm_start, end = head->vm_end - size; ++ unsigned long flags; ++ struct arch_vm_region *c, *new; ++ ++ new = kmalloc(sizeof(struct arch_vm_region), gfp); ++ if (!new) ++ goto out; ++ ++ raw_spin_lock_irqsave(&consistent_lock, flags); ++ ++ list_for_each_entry(c, &head->vm_list, vm_list) { ++ if ((addr + size) < addr) ++ goto nospc; ++ if ((addr + size) <= c->vm_start) ++ goto found; ++ addr = c->vm_end; ++ if (addr > end) ++ goto nospc; ++ } ++ ++found: ++ /* ++ * Insert this entry _before_ the one we found. ++ */ ++ list_add_tail(&new->vm_list, &c->vm_list); ++ new->vm_start = addr; ++ new->vm_end = addr + size; ++ ++ raw_spin_unlock_irqrestore(&consistent_lock, flags); ++ return new; ++ ++nospc: ++ raw_spin_unlock_irqrestore(&consistent_lock, flags); ++ kfree(new); ++out: ++ return NULL; ++} ++ ++static struct arch_vm_region *vm_region_find(struct arch_vm_region *head, ++ unsigned long addr) ++{ ++ struct arch_vm_region *c; ++ ++ list_for_each_entry(c, &head->vm_list, vm_list) { ++ if (c->vm_start == addr) ++ goto out; ++ } ++ c = NULL; ++out: ++ return c; ++} ++ ++#ifdef CONFIG_HUGETLB_PAGE ++#error ARM Coherent DMA allocator does not (yet) support huge TLB ++#endif ++ ++static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t * handle, ++ int gfp, pgprot_t prot) ++{ ++ struct page *page; ++ struct arch_vm_region *c; ++ unsigned long order; ++ unsigned int i; ++ u64 mask = ISA_DMA_THRESHOLD, limit; ++ ++ if (!consistent_pte) { ++ printk(KERN_ERR "%s: not initialised\n", __func__); ++ dump_stack(); ++ return NULL; ++ } ++ ++ if (dev) { ++ mask = dev->coherent_dma_mask; ++ ++ /* ++ * Sanity check the DMA mask - it must be non-zero, and ++ * must be able to be satisfied by a DMA allocation. ++ */ ++ if (mask == 0) { ++ dev_warn(dev, "coherent DMA mask is unset\n"); ++ goto no_page; ++ } ++ ++ if ((~mask) & ISA_DMA_THRESHOLD) { ++ dev_warn(dev, "coherent DMA mask %#llx is smaller " ++ "than system GFP_DMA mask %#llx\n", ++ mask, (unsigned long long)ISA_DMA_THRESHOLD); ++ goto no_page; ++ } ++ } ++ ++ /* ++ * Sanity check the allocation size. ++ */ ++ size = PAGE_ALIGN(size); ++ limit = (mask + 1) & ~mask; ++ if ((limit && size >= limit) || ++ size >= (CONSISTENT_END - CONSISTENT_BASE)) { ++ printk(KERN_WARNING "coherent allocation too big " ++ "(requested %#x mask %#llx)\n", size, mask); ++ goto no_page; ++ } ++ ++ order = get_order(size); ++ ++ if (mask != 0xffffffff) ++ gfp |= GFP_DMA; ++ ++ page = alloc_pages(gfp, order); ++ if (!page) ++ goto no_page; ++ ++ for (i = 1; i < (1 << order); i++) ++ atomic_set(&(page + i)->_count, 1); ++ ++ /* ++ * Invalidate any data that might be lurking in the ++ * kernel direct-mapped region for device DMA. ++ */ ++ { ++ unsigned long kaddr = (unsigned long)page_address(page); ++ memset(page_address(page), 0, size); ++ cpu_dma_wbinval_range(kaddr, kaddr + size); ++ } ++ ++ /* ++ * Allocate a virtual address in the consistent mapping region. ++ */ ++ c = vm_region_alloc(&consistent_head, size, ++ gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); ++ if (c) { ++ pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start); ++ struct page *end = page + (1 << order); ++ ++ c->vm_pages = page; ++ ++ /* ++ * Set the "dma handle" ++ */ ++ *handle = page_to_dma(dev, page); ++ ++ do { ++ BUG_ON(!pte_none(*pte)); ++ ++ /* ++ * x86 does not mark the pages reserved... ++ */ ++ SetPageReserved(page); ++ set_pte(pte, mk_pte(page, prot)); ++ page++; ++ pte++; ++ } while (size -= PAGE_SIZE); ++ ++ /* ++ * Free the otherwise unused pages. ++ */ ++ while (page < end) { ++ __free_page(page); ++ page++; ++ } ++ ++ return (void *)c->vm_start; ++ } ++ ++ if (page) ++ __free_pages(page, order); ++no_page: ++ *handle = ~0; ++ return NULL; ++} ++ ++/* ++ * Allocate DMA-coherent memory space and return both the kernel remapped ++ * virtual and bus address for that space. ++ */ ++void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * handle, ++ gfp_t gfp) ++{ ++ return __dma_alloc(dev, size, handle, gfp, ++ pgprot_noncached(PAGE_KERNEL)); ++} ++ ++EXPORT_SYMBOL(dma_alloc_coherent); ++ ++/* ++ * Allocate a writecombining region, in much the same way as ++ * dma_alloc_coherent above. ++ */ ++void *dma_alloc_writecombine(struct device *dev, size_t size, ++ dma_addr_t * handle, gfp_t gfp) ++{ ++ return __dma_alloc(dev, size, handle, gfp, ++ pgprot_writecombine(PAGE_KERNEL)); ++} ++ ++EXPORT_SYMBOL(dma_alloc_writecombine); ++ ++static int dma_mmap(struct device *dev, struct vm_area_struct *vma, ++ void *cpu_addr, dma_addr_t dma_addr, size_t size) ++{ ++ unsigned long flags, user_size, kern_size; ++ struct arch_vm_region *c; ++ int ret = -ENXIO; ++ ++ user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; ++ ++ raw_spin_lock_irqsave(&consistent_lock, flags); ++ c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); ++ raw_spin_unlock_irqrestore(&consistent_lock, flags); ++ ++ if (c) { ++ unsigned long off = vma->vm_pgoff; ++ ++ kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; ++ ++ if (off < kern_size && user_size <= (kern_size - off)) { ++ vma->vm_flags |= VM_RESERVED; ++ ret = remap_pfn_range(vma, vma->vm_start, ++ page_to_pfn(c->vm_pages) + off, ++ user_size << PAGE_SHIFT, ++ vma->vm_page_prot); ++ } ++ } ++ ++ return ret; ++} ++ ++int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, ++ void *cpu_addr, dma_addr_t dma_addr, size_t size) ++{ ++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ++ return dma_mmap(dev, vma, cpu_addr, dma_addr, size); ++} ++ ++EXPORT_SYMBOL(dma_mmap_coherent); ++ ++int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, ++ void *cpu_addr, dma_addr_t dma_addr, size_t size) ++{ ++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); ++ return dma_mmap(dev, vma, cpu_addr, dma_addr, size); ++} ++ ++EXPORT_SYMBOL(dma_mmap_writecombine); ++ ++/* ++ * free a page as defined by the above mapping. ++ */ ++void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, ++ dma_addr_t handle) ++{ ++ struct arch_vm_region *c; ++ unsigned long flags, addr; ++ pte_t *ptep; ++ ++ size = PAGE_ALIGN(size); ++ ++ raw_spin_lock_irqsave(&consistent_lock, flags); ++ ++ c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); ++ if (!c) ++ goto no_area; ++ ++ if ((c->vm_end - c->vm_start) != size) { ++ printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", ++ __func__, c->vm_end - c->vm_start, size); ++ dump_stack(); ++ size = c->vm_end - c->vm_start; ++ } ++ ++ ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); ++ addr = c->vm_start; ++ do { ++ pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); ++ unsigned long pfn; ++ ++ ptep++; ++ addr += PAGE_SIZE; ++ ++ if (!pte_none(pte) && pte_present(pte)) { ++ pfn = pte_pfn(pte); ++ ++ if (pfn_valid(pfn)) { ++ struct page *page = pfn_to_page(pfn); ++ ++ /* ++ * x86 does not mark the pages reserved... ++ */ ++ ClearPageReserved(page); ++ ++ __free_page(page); ++ continue; ++ } ++ } ++ ++ printk(KERN_CRIT "%s: bad page in kernel page table\n", ++ __func__); ++ } while (size -= PAGE_SIZE); ++ ++ flush_tlb_kernel_range(c->vm_start, c->vm_end); ++ ++ list_del(&c->vm_list); ++ ++ raw_spin_unlock_irqrestore(&consistent_lock, flags); ++ ++ kfree(c); ++ return; ++ ++no_area: ++ raw_spin_unlock_irqrestore(&consistent_lock, flags); ++ printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", ++ __func__, cpu_addr); ++ dump_stack(); ++} ++ ++EXPORT_SYMBOL(dma_free_coherent); ++ ++/* ++ * Initialise the consistent memory allocation. ++ */ ++static int __init consistent_init(void) ++{ ++ pgd_t *pgd; ++ pmd_t *pmd; ++ pte_t *pte; ++ int ret = 0; ++ ++ do { ++ pgd = pgd_offset(&init_mm, CONSISTENT_BASE); ++ pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); ++ if (!pmd) { ++ printk(KERN_ERR "%s: no pmd tables\n", __func__); ++ ret = -ENOMEM; ++ break; ++ } ++ /* The first level mapping may be created in somewhere. ++ * It's not necessary to warn here. */ ++ /* WARN_ON(!pmd_none(*pmd)); */ ++ ++ pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); ++ if (!pte) { ++ ret = -ENOMEM; ++ break; ++ } ++ ++ consistent_pte = pte; ++ } while (0); ++ ++ return ret; ++} ++ ++core_initcall(consistent_init); ++ ++/* ++ * Make an area consistent for devices. ++ */ ++void consistent_sync(void *vaddr, size_t size, int direction) ++{ ++ unsigned long start = (unsigned long)vaddr; ++ unsigned long end = start + size; ++ ++ switch (direction) { ++ case DMA_FROM_DEVICE: /* invalidate only */ ++ cpu_dma_inval_range(start, end); ++ break; ++ case DMA_TO_DEVICE: /* writeback only */ ++ cpu_dma_wb_range(start, end); ++ break; ++ case DMA_BIDIRECTIONAL: /* writeback and invalidate */ ++ cpu_dma_wbinval_range(start, end); ++ break; ++ default: ++ BUG(); ++ } ++} ++ ++EXPORT_SYMBOL(consistent_sync); +diff -Nur linux-3.4.110.orig/arch/nds32/mm/extable.c linux-3.4.110/arch/nds32/mm/extable.c +--- linux-3.4.110.orig/arch/nds32/mm/extable.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/extable.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,18 @@ ++/* ++ * linux/arch/nds32/mm/extable.c ++ * ++ * Copyright (C) 2009 Andes Technology Corporation ++ */ ++#include ++#include ++ ++int fixup_exception(struct pt_regs *regs) ++{ ++ const struct exception_table_entry *fixup; ++ ++ fixup = search_exception_tables(instruction_pointer(regs)); ++ if (fixup) ++ regs->NDS32_ipc = fixup->fixup; ++ ++ return fixup != NULL; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/mm/fault.c linux-3.4.110/arch/nds32/mm/fault.c +--- linux-3.4.110.orig/arch/nds32/mm/fault.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/fault.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,507 @@ ++/* ++ * linux/arch/arm/mm/fault.c ++ * ++ * Copyright (C) 1995 Linus Torvalds ++ * Modifications for ARM processor (c) 1995-2004 Russell King ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++/* ============================================================================ ++ * ++ * linux/arch/nds32/mm/fault.c ++ * ++ * Copyright (C) 2007 Andes Technology Corporation ++ * This file is part of Linux and should be licensed under the GPL. ++ * See the file COPYING for conditions for redistribution. ++ * ++ * Abstract: ++ * ++ * This program is for NDS32 architecture, referred from ARM's ++ * implementation. ++ * ++ * Revision History: ++ * ++ * Nov.26.2007 Initial ported by Tom, Shawn, and Steven, ++ * patched for KGDB and refined code by Harry. ++ * ++ * Note: ++ * ++ * ============================================================================ ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include "fault.h" ++ ++/* ++ * This is useful to dump out the page tables associated with ++ * 'addr' in mm 'mm'. ++ */ ++void show_pte(struct mm_struct *mm, unsigned long addr) ++{ ++ pgd_t *pgd; ++ if (!mm) ++ mm = &init_mm; ++ ++ printk(KERN_ALERT "pgd = %p\n", mm->pgd); ++ pgd = pgd_offset(mm, addr); ++ printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd)); ++ ++ do { ++ pmd_t *pmd; ++ ++ if (pgd_none(*pgd)) ++ break; ++ ++ if (pgd_bad(*pgd)) { ++ printk("(bad)"); ++ break; ++ } ++ ++ pmd = pmd_offset(pgd, addr); ++#if PTRS_PER_PMD != 1 ++ printk(", *pmd=%08lx", pmd_val(*pmd)); ++#endif ++ ++ if (pmd_none(*pmd)) ++ break; ++ ++ if (pmd_bad(*pmd)) { ++ printk("(bad)"); ++ break; ++ } ++#ifndef CONFIG_HIGHMEM ++ { ++ pte_t *pte; ++ /* We must not map this if we have highmem enabled */ ++ pte = pte_offset_map(pmd, addr); ++ printk(", *pte=%08lx", pte_val(*pte)); ++ pte_unmap(pte); ++ } ++#endif ++ } while (0); ++ ++ printk("\n"); ++} ++ ++/* ++ * Oops. The kernel tried to access some page that wasn't present. ++ */ ++static void ++__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, ++ struct pt_regs *regs) ++{ ++ /* ++ * Are we prepared to handle this kernel fault? ++ */ ++ if (fixup_exception(regs)) ++ return; ++ ++ /* ++ * No handler, we'll have to terminate things with extreme prejudice. ++ */ ++ bust_spinlocks(1); ++ printk(KERN_ALERT ++ "__do_kernel_fault: Unable to handle kernel %s at virtual address %08lx\n", ++ (addr < PAGE_SIZE) ? "NULL pointer dereference" : ++ "paging request", addr); ++ ++ show_pte(mm, addr); ++ die("Oops", regs, fsr); ++ bust_spinlocks(0); ++ do_exit(SIGKILL); ++} ++ ++/* ++ * Something tried to access memory that isn't in our memory map.. ++ * User mode accesses just cause a SIGSEGV ++ */ ++static void ++__do_user_fault(struct task_struct *tsk, unsigned long addr, ++ unsigned int fsr, int code, struct pt_regs *regs) ++{ ++ struct siginfo si; ++ ++#ifdef CONFIG_DEBUG_USER ++ if (user_debug & UDBG_SEGV) { ++ printk(KERN_DEBUG ++ "%s: unhandled page fault at 0x%08lx, code 0x%03x\n", ++ tsk->comm, addr, fsr); ++ show_pte(tsk->mm, addr); ++ show_regs(regs); ++ } ++#endif ++ ++ tsk->thread.address = addr; ++ tsk->thread.error_code = fsr; ++ tsk->thread.trap_no = 14; ++ si.si_signo = SIGSEGV; ++ si.si_errno = 0; ++ si.si_code = code; ++ si.si_addr = (void __user *)addr; ++ force_sig_info(SIGSEGV, &si, tsk); ++} ++ ++void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) ++{ ++ struct task_struct *tsk = current; ++ struct mm_struct *mm = tsk->active_mm; ++ ++ /* ++ * If we are in kernel mode at this point, we ++ * have no context to handle this fault with. ++ */ ++ if (user_mode(regs)) { ++ __do_user_fault(tsk, addr, fsr, SEGV_MAPERR, regs); ++ } else { ++ __do_kernel_fault(mm, addr, fsr, regs); ++ } ++} ++ ++#define VM_FAULT_BADMAP 0x010000 ++#define VM_FAULT_BADACCESS 0x020000 ++ ++void do_page_fault(unsigned long entry, unsigned long addr, ++ unsigned int error_code, struct pt_regs *regs) ++{ ++ struct task_struct *tsk; ++ struct mm_struct *mm; ++ struct vm_area_struct *vma; ++ siginfo_t info; ++ int fault; ++ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; ++ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; ++ ++ error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE); ++ tsk = current; ++ mm = tsk->mm; ++ info.si_code = SEGV_MAPERR; ++ /* ++ * We fault-in kernel-space virtual memory on-demand. The ++ * 'reference' page table is init_mm.pgd. ++ * ++ * NOTE! We MUST NOT take any locks for this case. We may ++ * be in an interrupt or a critical region, and should ++ * only copy the information from the master page table, ++ * nothing more. ++ */ ++ if (addr >= TASK_SIZE) { ++ if (user_mode(regs)) ++ goto bad_area_nosemaphore; ++ ++ if (addr >= TASK_SIZE && addr < VMALLOC_END && (entry == 2)) ++ goto vmalloc_fault; ++ else ++ goto no_context; ++ } ++ ++ /* ++ * If we're in an interrupt or have no user ++ * context, we must not take the fault.. ++ */ ++ if (unlikely(in_atomic() || !mm)) ++ goto no_context; ++ ++ /* ++ * As per x86, we may deadlock here. However, since the kernel only ++ * validly references user space from well defined areas of the code, ++ * we can bug out early if this is from code which shouldn't. ++ */ ++ if (unlikely(!down_read_trylock(&mm->mmap_sem))) { ++ if (!user_mode(regs) && ++ !search_exception_tables(instruction_pointer(regs))) ++ goto no_context; ++retry: ++ down_read(&mm->mmap_sem); ++ } else { ++ /* ++ * The above down_read_trylock() might have succeeded in which ++ * case, we'll have missed the might_sleep() from down_read(). ++ */ ++ might_sleep(); ++#ifdef CONFIG_DEBUG_VM ++ if (!user_mode(regs) && ++ !search_exception_tables(instruction_pointer(regs))) ++ goto no_context; ++#endif ++ } ++ ++ vma = find_vma(mm, addr); ++ ++ if (unlikely(!vma)) ++ goto bad_area; ++ ++ if (vma->vm_start <= addr) ++ goto good_area; ++ ++ if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) ++ goto bad_area; ++ ++ if (unlikely(expand_stack(vma, addr))) ++ goto bad_area; ++ ++ /* ++ * Ok, we have a good vm_area for this memory access, so ++ * we can handle it.. ++ */ ++ ++good_area: ++ info.si_code = SEGV_ACCERR; ++ ++ /* first do some preliminary protection checks */ ++ if (entry == 2) { ++ if (error_code & ITYPE_mskINST) ++ mask = VM_EXEC; ++ else { ++ mask = VM_READ | VM_WRITE; ++ if (vma->vm_flags & VM_WRITE) ++ flags |= FAULT_FLAG_WRITE; ++ } ++ } else if (entry == 3) { ++ switch (error_code & ITYPE_mskETYPE) { ++ case RD_PROT: ++ mask = VM_READ; ++ break; ++ case WRT_PROT: ++ mask = VM_WRITE; ++ flags |= FAULT_FLAG_WRITE; ++ break; ++ case NOEXEC: ++ mask = VM_EXEC; ++ break; ++ case PAGE_MODIFY: ++ mask = VM_WRITE; ++ flags |= FAULT_FLAG_WRITE; ++ break; ++ case ACC_BIT: ++ BUG(); ++ default: ++ break; ++ } ++ ++ } ++ if (!(vma->vm_flags & mask)) ++ goto bad_area; ++ ++ /* ++ * If for any reason at all we couldn't handle the fault, ++ * make sure we exit gracefully rather than endlessly redo ++ * the fault. ++ */ ++ ++ fault = handle_mm_fault(mm, vma, addr, flags); ++ ++ /* ++ * If we need to retry but a fatal signal is pending, handle the ++ * signal first. We do not need to release the mmap_sem because it ++ * would already be released in __lock_page_or_retry in mm/filemap.c. ++ */ ++ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) ++ return; ++ ++ if (unlikely(fault & VM_FAULT_ERROR)) { ++ if (fault & VM_FAULT_OOM) ++ goto out_of_memory; ++ else if (fault & VM_FAULT_SIGBUS) ++ goto do_sigbus; ++ BUG(); ++ } ++ ++ /* ++ * Major/minor page fault accounting is only done on the initial ++ * attempt. If we go through a retry, it is extremely likely that the ++ * page will be found in page cache at that point. ++ */ ++ if (flags & FAULT_FLAG_ALLOW_RETRY) { ++ if (fault & VM_FAULT_MAJOR) ++ tsk->maj_flt++; ++ else ++ tsk->min_flt++; ++ if (fault & VM_FAULT_RETRY) { ++ flags &= ~FAULT_FLAG_ALLOW_RETRY; ++ flags |= FAULT_FLAG_TRIED; ++ ++ /* No need to up_read(&mm->mmap_sem) as we would ++ * have already released it in __lock_page_or_retry ++ * in mm/filemap.c. ++ */ ++ goto retry; ++ } ++ } ++ ++ up_read(&mm->mmap_sem); ++ return; ++ ++ /* ++ * Something tried to access memory that isn't in our memory map.. ++ * Fix it, but check if it's kernel or user first.. ++ */ ++bad_area: ++ up_read(&mm->mmap_sem); ++ ++bad_area_nosemaphore: ++ ++ /* User mode accesses just cause a SIGSEGV */ ++ ++ if (user_mode(regs)) { ++ tsk->thread.address = addr; ++ tsk->thread.error_code = error_code; ++ tsk->thread.trap_no = 14; ++ info.si_signo = SIGSEGV; ++ info.si_errno = 0; ++ /* info.si_code has been set above */ ++ info.si_addr = (void *)addr; ++ force_sig_info(SIGSEGV, &info, tsk); ++ return; ++ } ++ ++no_context: ++ ++ /* Are we prepared to handle this kernel fault? ++ * ++ * (The kernel has valid exception-points in the source ++ * when it acesses user-memory. When it fails in one ++ * of those points, we find it in a table and do a jump ++ * to some fixup code that loads an appropriate error ++ * code) ++ */ ++ ++ { ++ const struct exception_table_entry *entry; ++ ++ if ((entry = ++ search_exception_tables(instruction_pointer(regs))) != ++ NULL) { ++ /* Adjust the instruction pointer in the stackframe */ ++ instruction_pointer(regs) = entry->fixup; ++ return; ++ } ++ } ++ ++ /* ++ * Oops. The kernel tried to access some bad page. We'll have to ++ * terminate things with extreme prejudice. ++ */ ++ ++ bust_spinlocks(1); ++ printk(KERN_ALERT ++ "Unable to handle kernel %s at virtual address %08lx\n", ++ (addr < PAGE_SIZE) ? "NULL pointer dereference" : ++ "paging request", addr); ++ ++ show_pte(mm, addr); ++ die("Oops", regs, error_code); ++ bust_spinlocks(0); ++ do_exit(SIGKILL); ++ ++ /* TODO: verify this necessity */ ++ return; ++ ++ /* ++ * We ran out of memory, or some other thing happened to us that made ++ * us unable to handle the page fault gracefully. ++ */ ++ ++out_of_memory: ++ up_read(&mm->mmap_sem); ++ if (!user_mode(regs)) ++ goto no_context; ++ pagefault_out_of_memory(); ++ return; ++ ++do_sigbus: ++ up_read(&mm->mmap_sem); ++ ++ /* Kernel mode? Handle exceptions or die */ ++ if (!user_mode(regs)) ++ goto no_context; ++ ++ /* ++ * Send a sigbus ++ */ ++ tsk->thread.address = addr; ++ tsk->thread.error_code = error_code; ++ tsk->thread.trap_no = 14; ++ info.si_signo = SIGBUS; ++ info.si_errno = 0; ++ info.si_code = BUS_ADRERR; ++ info.si_addr = (void *)addr; ++ force_sig_info(SIGBUS, &info, tsk); ++ ++ return; ++ ++vmalloc_fault: ++ { ++ /* ++ * Synchronize this task's top level page-table ++ * with the 'reference' page table. ++ * ++ * Use current_pgd instead of tsk->active_mm->pgd ++ * since the latter might be unavailable if this ++ * code is executed in a misfortunately run irq ++ * (like inside schedule() between switch_mm and ++ * switch_to...). ++ */ ++ ++ unsigned int index = pgd_index(addr); ++ pgd_t *pgd, *pgd_k; ++ pud_t *pud, *pud_k; ++ pmd_t *pmd, *pmd_k; ++ pte_t *pte_k; ++ ++ pgd = (pgd_t *) __va(GET_L1_PPTB()) + index; ++ pgd_k = init_mm.pgd + index; ++ ++ if (!pgd_present(*pgd_k)) ++ goto no_context; ++ ++ pud = pud_offset(pgd, addr); ++ pud_k = pud_offset(pgd_k, addr); ++ if (!pud_present(*pud_k)) ++ goto no_context; ++ ++ pmd = pmd_offset(pud, addr); ++ pmd_k = pmd_offset(pud_k, addr); ++ if (!pmd_present(*pmd_k)) ++ goto no_context; ++ ++ if (!pmd_present(*pmd)) { ++ set_pmd(pmd, *pmd_k); ++ /* TODO: need to do a cache flush like arm, ++ * maybe add at header file */ ++ } else ++ ++ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); ++ ++ /* ++ * Since the vmalloc area is global, we don't ++ * need to copy individual PTE's, it is enough to ++ * copy the pgd pointer into the pte page of the ++ * root task. If that is there, we'll find our pte if ++ * it exists. ++ */ ++ ++ /* Make sure the actual PTE exists as well to ++ * catch kernel vmalloc-area accesses to non-mapped ++ * addres. If we don't do this, this will just ++ * silently loop forever. ++ */ ++ ++ pte_k = pte_offset_kernel(pmd_k, addr); ++ if (!pte_present(*pte_k)) ++ goto no_context; ++ ++ return; ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/mm/fault.h linux-3.4.110/arch/nds32/mm/fault.h +--- linux-3.4.110.orig/arch/nds32/mm/fault.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/fault.h 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,5 @@ ++void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); ++ ++void show_pte(struct mm_struct *mm, unsigned long addr); ++ ++unsigned long search_exception_table(unsigned long addr); +diff -Nur linux-3.4.110.orig/arch/nds32/mm/highmem.c linux-3.4.110/arch/nds32/mm/highmem.c +--- linux-3.4.110.orig/arch/nds32/mm/highmem.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/highmem.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,76 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++void *kmap(struct page *page) ++{ ++ unsigned long vaddr; ++ might_sleep(); ++ if (!PageHighMem(page)) ++ return page_address(page); ++ vaddr = (unsigned long)kmap_high(page); ++ return (void *)vaddr; ++} ++ ++EXPORT_SYMBOL(kmap); ++ ++void kunmap(struct page *page) ++{ ++ BUG_ON(in_interrupt()); ++ if (!PageHighMem(page)) ++ return; ++ kunmap_high(page); ++} ++ ++EXPORT_SYMBOL(kunmap); ++ ++/* ++ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because ++ * no global lock is needed and because the kmap code must perform a global TLB ++ * invalidation when the kmap pool wraps. ++ * ++ * However when holding an atomic kmap is is not legal to sleep, so atomic ++ * kmaps are appropriate for short, tight code paths only. ++ */ ++void *kmap_atomic(struct page *page) ++{ ++ unsigned int idx; ++ unsigned long vaddr, pte; ++ int type; ++ ++ pagefault_disable(); ++ if (!PageHighMem(page)) ++ return page_address(page); ++ ++ type = kmap_atomic_idx_push(); ++ ++ idx = type + KM_TYPE_NR * smp_processor_id(); ++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ pte = (page_to_pfn(page) << PAGE_SHIFT) | (PAGE_KERNEL); ++ asm volatile ("mtsr %0, $TLB_VPN\n\t" ++ "dsb\n\t" ++ "tlbop %1, RWLK\n\t" ++ "isb\n\t" ++ ::"r" (vaddr), "r"(pte)); ++ return (void *)vaddr; ++} ++ ++EXPORT_SYMBOL(kmap_atomic); ++ ++void __kunmap_atomic(void *kvaddr) ++{ ++ if (kvaddr >= (void *)FIXADDR_START) { ++ kmap_atomic_idx_pop(); ++ asm volatile ("tlbop %0, UNLK\n\t" ++ "tlbop %0, INV\n\t" ++ ::"r" (kvaddr)); ++ } ++ pagefault_enable(); ++} ++ ++EXPORT_SYMBOL(__kunmap_atomic); +diff -Nur linux-3.4.110.orig/arch/nds32/mm/init.c linux-3.4.110/arch/nds32/mm/init.c +--- linux-3.4.110.orig/arch/nds32/mm/init.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/init.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,438 @@ ++/* ++ * linux/arch/nds32/mm/init.c ++ * ++ * Copyright (C) 1995-2002 Russell King ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "mm.h" ++#include "../../kernel/signal.h" ++ ++#define TABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t)) ++ ++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); ++DEFINE_SPINLOCK(anon_alias_lock); ++extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; ++extern unsigned long phys_initrd_start; ++extern unsigned long phys_initrd_size; ++ ++/* ++ * empty_zero_page is a special page that is used for ++ * zero-initialized data and COW. ++ */ ++struct page *empty_zero_page; ++ ++void show_mem(unsigned int flags) ++{ ++ int free = 0, total = 0, reserved = 0; ++ int shared = 0, cached = 0, slab = 0, node; ++ ++ printk("Mem-info:\n"); ++ show_free_areas(flags); ++ printk("Free swap: %6ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10)); ++ ++ for_each_online_node(node) { ++ struct page *page, *end; ++ ++ page = NODE_MEM_MAP(node); ++ end = page + NODE_DATA(node)->node_spanned_pages; ++ ++ do { ++ total++; ++ if (PageReserved(page)) ++ reserved++; ++ else if (PageSwapCache(page)) ++ cached++; ++ else if (PageSlab(page)) ++ slab++; ++ else if (!page_count(page)) ++ free++; ++ else ++ shared += page_count(page) - 1; ++ page++; ++ } while (page < end); ++ } ++ ++ printk("%d pages of RAM\n", total); ++ printk("%d free pages\n", free); ++ printk("%d reserved pages\n", reserved); ++ printk("%d slab pages\n", slab); ++ printk("%d pages shared\n", shared); ++ printk("%d pages swap cached\n", cached); ++} ++ ++struct node_info { ++ unsigned int start; ++ unsigned int end; ++ int bootmap_pages; ++}; ++ ++#define O_PFN_DOWN(x) ((x) >> PAGE_SHIFT) ++#define V_PFN_DOWN(x) O_PFN_DOWN(__pa(x)) ++ ++#define O_PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT) ++#define V_PFN_UP(x) O_PFN_UP(__pa(x)) ++ ++#define PFN_SIZE(x) ((x) >> PAGE_SHIFT) ++#define PFN_RANGE(s,e) PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \ ++ (((unsigned long)(s)) & PAGE_MASK)) ++#ifdef CONFIG_EARLY_PRINTK ++#include ++ ++/* ++ * Using tlbop to create an early I/O mapping ++ */ ++void __iomem *__init early_io_map(phys_addr_t pa) ++{ ++ unsigned long va; ++ pa &= PAGE_MASK; ++ pa += pgprot_val(PAGE_DEVICE); ++ va = fix_to_virt(FIX_EARLY_DEBUG); ++ /* insert and lock this page to tlb entry directly */ ++ asm volatile ("mtsr %0, $TLB_VPN\n\t" ++ "dsb\n\t" ++ "tlbop %1, RWLK\n\t" "isb\n\t"::"r" (va), "r"(pa)); ++ return (void __iomem *)va; ++} ++ ++int __init early_io_unmap(void) ++{ ++ unsigned long va; ++ va = fix_to_virt(FIX_EARLY_DEBUG); ++ asm volatile ("tlbop %0, UNLK\n\t" ++ "tlbop %0, INV\n\t" "isb\n\t"::"r" (va)); ++ return 0; ++} ++ ++late_initcall(early_io_unmap); ++#endif ++ ++static void __init zone_sizes_init(void) ++{ ++ unsigned long zones_size[MAX_NR_ZONES]; ++ ++ /* Clear the zone sizes */ ++ memset(zones_size, 0, sizeof(zones_size)); ++ ++ zones_size[ZONE_NORMAL] = max_low_pfn; ++#ifdef CONFIG_HIGHMEM ++ zones_size[ZONE_HIGHMEM] = max_pfn; ++#endif ++ free_area_init_nodes(zones_size); ++ ++} ++ ++/* ++ * Map all physical memory under high_memory into kernel's address space. ++ * ++ * This is explicitly coded for two-level page tables, so if you need ++ * something else then this needs to change. ++ */ ++static void __init map_ram(void) ++{ ++ unsigned long v, p, e; ++ pgd_t *pge; ++ pud_t *pue; ++ pmd_t *pme; ++ pte_t *pte; ++ /* These mark extents of read-only kernel pages... ++ * ...from vmlinux.lds.S ++ */ ++ ++ p = (u32) memblock_start_of_DRAM() & PAGE_MASK; ++ e = min((u32) memblock_end_of_DRAM(), (u32) __pa(high_memory)); ++ ++ v = (u32) __va(p); ++ pge = pgd_offset_k(v); ++ ++ while (p < e) { ++ int j; ++ pue = pud_offset(pge, v); ++ pme = pmd_offset(pue, v); ++ ++ if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) { ++ panic("%s: Kernel hardcoded for " ++ "two-level page tables", __func__); ++ } ++ ++ /* Alloc one page for holding PTE's... */ ++ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); ++ set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE)); ++ ++ /* Fill the newly allocated page with PTE'S */ ++ for (j = 0; p < e && j < PTRS_PER_PTE; ++ v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) { ++ /* Create mapping between p and v. */ ++ /* TODO: more fine grant for page access permission */ ++ set_pte(pte, __pte(p + pgprot_val(PAGE_KERNEL))); ++ } ++ ++ pge++; ++ } ++} ++ ++static void __init fixedrange_init(void) ++{ ++ unsigned long vaddr, phys, prot; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *pte; ++ ++ /* ++ * Fixed mappings: ++ */ ++ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); ++ pgd = swapper_pg_dir + pgd_index(vaddr); ++ pud = pud_offset(pgd, vaddr); ++ pmd = pmd_offset(pud, vaddr); ++ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); ++ set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE)); ++ ++ /* create return_syscall mapping. */ ++ vaddr = __fix_to_virt(FIX_RETURN_SYSCALL); ++ phys = RETURN_SYSCALL_PA_BASE; ++ prot = PAGE_UXKRWX_V2; ++ pte = pte_offset_kernel(pmd, vaddr); ++ set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); ++ ++#ifdef CONFIG_HIGHMEM ++ /* ++ * Permanent kmaps: ++ */ ++ vaddr = PKMAP_BASE; ++ ++ pgd = swapper_pg_dir + pgd_index(vaddr); ++ pud = pud_offset(pgd, vaddr); ++ pmd = pmd_offset(pud, vaddr); ++ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); ++ set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE)); ++ pkmap_page_table = pte; ++#endif /* CONFIG_HIGHMEM */ ++} ++ ++/* ++ * paging_init() sets up the page tables, initialises the zone memory ++ * maps, and sets up the zero page, bad page and bad page tables. ++ */ ++void __init paging_init(struct machine_desc *mdesc) ++{ ++ void *zero_page; ++ int i; ++ ++ printk(KERN_INFO "Setting up paging and PTEs.\n"); ++ ++#ifdef CONFIG_BLK_DEV_INITRD ++ if (phys_initrd_size) { ++ /* assume initrd is put on node 0 */ ++ reserve_bootmem_node(NODE_DATA(0), phys_initrd_start, ++ phys_initrd_size, BOOTMEM_DEFAULT); ++ initrd_start = __phys_to_virt(phys_initrd_start); ++ initrd_end = initrd_start + phys_initrd_size; ++ printk(KERN_INFO ++ "initrd_start at 0x%08lx, initrd_end at 0x%08lx\n", ++ initrd_start, initrd_end); ++ } ++#endif ++ ++ /* clear out the init_mm.pgd that will contain the kernel's mappings */ ++ for (i = 0; i < PTRS_PER_PGD; i++) ++ swapper_pg_dir[i] = __pgd(1); ++ ++ map_ram(); ++ ++ if (mdesc->map_io) ++ mdesc->map_io(); ++ ++ fixedrange_init(); ++ ++ /* allocate space for empty_zero_page */ ++ zero_page = alloc_bootmem_low_pages(PAGE_SIZE); ++ memset(zero_page, 0, PAGE_SIZE); ++ ++ zone_sizes_init(); ++ ++ empty_zero_page = virt_to_page(zero_page); ++#ifdef CONFIG_NO_KERNEL_LARGE_PAGE ++ SET_MMU_CTL(GET_MMU_CTL() & ~0x400); ++#endif ++#ifdef CONFIG_SMP ++ cpu_dcache_wbinval_all(); ++#endif ++ flush_dcache_page(empty_zero_page); ++} ++ ++static inline int free_area(unsigned long addr, unsigned long end, char *s) ++{ ++ unsigned int size = (end - addr) >> 10; ++ int pages = 0; ++ ++ for (; addr < end; addr += PAGE_SIZE) { ++ struct page *page = virt_to_page(addr); ++ ClearPageReserved(page); ++ init_page_count(page); ++ free_page(addr); ++ totalram_pages++; ++ pages++; ++ } ++ ++ if (size && s) ++ printk(KERN_INFO "free_area: Freeing %s memory: %dK\n", s, ++ size); ++ ++ return pages; ++} ++ ++/* Free the reserved page into the buddy system, so it gets managed. */ ++static inline void __free_reserved_page(struct page *page) ++{ ++ ClearPageReserved(page); ++ init_page_count(page); ++ __free_page(page); ++} ++ ++#ifdef CONFIG_HIGHMEM ++void free_highmem_page(struct page *page) ++{ ++ __free_reserved_page(page); ++ totalram_pages++; ++ totalhigh_pages++; ++} ++#endif ++ ++static inline void __init free_highmem(void) ++{ ++#ifdef CONFIG_HIGHMEM ++ unsigned long pfn; ++ for (pfn = PFN_UP(__pa(high_memory)); pfn < max_pfn; pfn++) { ++ phys_addr_t paddr = (phys_addr_t) pfn << PAGE_SHIFT; ++ if (!memblock_is_reserved(paddr)) ++ free_highmem_page(pfn_to_page(pfn)); ++ } ++#endif ++} ++ ++static void __init set_max_mapnr_init(void) ++{ ++ max_mapnr = max_pfn; ++} ++ ++/* ++ * mem_init() marks the free areas in the mem_map and tells us how much ++ * memory is free. This is done after various parts of the system have ++ * claimed their memory after the kernel image. ++ */ ++void __init mem_init(void) ++{ ++ phys_addr_t memory_start = memblock_start_of_DRAM(); ++ BUG_ON(!mem_map); ++ set_max_mapnr_init(); ++ ++ free_highmem(); ++ ++ /* this will put all low memory onto the freelists */ ++ totalram_pages += free_all_bootmem(); ++ ++ printk(KERN_INFO "virtual kernel memory layout:\n" ++ " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" ++#ifdef CONFIG_HIGHMEM ++ " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" ++#endif ++ " consist : 0x%08lx - 0x%08lx (%4ld MB)\n" ++ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" ++ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" ++ " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" ++ " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" ++ " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", ++ FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, ++#ifdef CONFIG_HIGHMEM ++ PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, ++ (LAST_PKMAP * PAGE_SIZE) >> 10, ++#endif ++ CONSISTENT_BASE, CONSISTENT_END, ++ ((CONSISTENT_END) - (CONSISTENT_BASE)) >> 20, VMALLOC_START, ++ (unsigned long)VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, ++ (unsigned long)__va(memory_start), (unsigned long)high_memory, ++ ((unsigned long)high_memory - ++ (unsigned long)__va(memory_start)) >> 20, ++ (unsigned long)&__init_begin, (unsigned long)&__init_end, ++ ((unsigned long)&__init_end - ++ (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext, ++ (unsigned long)&_edata, ++ ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, ++ (unsigned long)&_text, (unsigned long)&_etext, ++ ((unsigned long)&_etext - (unsigned long)&_text) >> 10); ++ ++ /* ++ * Check boundaries twice: Some fundamental inconsistencies can ++ * be detected at build time already. ++ */ ++#define __FIXADDR_TOP (-PAGE_SIZE) ++#ifdef CONFIG_HIGHMEM ++ BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START); ++ BUILD_BUG_ON((CONSISTENT_END) > PKMAP_BASE); ++#endif ++ BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); ++#define high_memory (-128UL << 20) ++ BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); ++#undef high_memory ++#undef __FIXADDR_TOP ++ ++#ifdef CONFIG_HIGHMEM ++ BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START); ++ BUG_ON(CONSISTENT_END > PKMAP_BASE); ++#endif ++ BUG_ON(VMALLOC_END > CONSISTENT_BASE); ++ BUG_ON(VMALLOC_START >= VMALLOC_END); ++ BUG_ON((unsigned long)high_memory > VMALLOC_START); ++ ++ return; ++} ++ ++void free_initmem(void) ++{ ++ free_area((unsigned long)(&__init_begin), ++ (unsigned long)(&__init_end), "init"); ++} ++ ++#ifdef CONFIG_BLK_DEV_INITRD ++static int keep_initrd; ++ ++void free_initrd_mem(unsigned long start, unsigned long end) ++{ ++ if (!keep_initrd) ++ free_area(start, end, "initrd"); ++} ++ ++static int __init keepinitrd_setup(char *__unused) ++{ ++ keep_initrd = 1; ++ return 1; ++} ++ ++__setup("keepinitrd", keepinitrd_setup); ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/mm/ioremap.c linux-3.4.110/arch/nds32/mm/ioremap.c +--- linux-3.4.110.orig/arch/nds32/mm/ioremap.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/ioremap.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,89 @@ ++/* ++ * linux/arch/nds32/mm/ioremap.c ++ * ++ * Re-map IO memory to kernel address space so that we can access it. ++ * ++ * (C) Copyright 1995 1996 Linus Torvalds ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * Hacked for ARM by Phil Blundell ++ * Hacked to allow all architectures to build, and various cleanups ++ * by Russell King ++ * ++ * This allows a driver to remap an arbitrary region of bus memory into ++ * virtual space. One should *only* use readl, writel, memcpy_toio and ++ * so on with such remapped areas. ++ * ++ * Because the ARM only has a 32-bit address space we can't address the ++ * whole of the (physical) PCI space at once. PCI huge-mode addressing ++ * allows us to circumvent this restriction by splitting PCI space into ++ * two 2GB chunks and mapping only one at a time into processor memory. ++ * We use MMU protection domains to trap any attempt to access the bank ++ * that is not currently mapped. (This isn't fully implemented yet.) ++ */ ++#include ++#include ++#include ++#include ++ ++/* ++ * Remap an arbitrary physical address space into the kernel virtual ++ * address space. Needed when the kernel wants to access high addresses ++ * directly. ++ * ++ * NOTE! We need to allow non-page-aligned mappings too: we will obviously ++ * have to convert them into an offset in a page-aligned mapping, but the ++ * caller shouldn't need to know that small detail. ++ * ++ * 'flags' are the extra L_PTE_ flags that you want to specify for this ++ * mapping. See include/asm-arm/proc-armv/pgtable.h for more information. ++ */ ++void __iomem *__ioremap(unsigned long phys_addr, size_t size, ++ unsigned long flags, unsigned long align) ++{ ++ struct vm_struct *area; ++ unsigned long addr, offset, last_addr; ++ pgprot_t prot; ++ ++ /* Don't allow wraparound or zero size */ ++ last_addr = phys_addr + size - 1; ++ if (!size || last_addr < phys_addr) ++ return NULL; ++ ++ /* ++ * Mappings have to be page-aligned ++ */ ++ offset = phys_addr & ~PAGE_MASK; ++ phys_addr &= PAGE_MASK; ++ size = PAGE_ALIGN(last_addr + 1) - phys_addr; ++ ++ /* ++ * Ok, go for it.. ++ */ ++ area = get_vm_area(size, VM_IOREMAP); ++ if (!area) ++ return NULL; ++ ++ area->phys_addr = phys_addr; ++ addr = (unsigned long)area->addr; ++ /* TODO: verify this value for ioremap */ ++ prot = __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | ++ _PAGE_G | _PAGE_C_DEV | flags); ++ /* TODO: verify this use generic ioremap_page_range instead of ++ * self's remap_area_pages */ ++ if (ioremap_page_range(addr, addr + size, phys_addr, prot)) { ++ vunmap((void *)addr); ++ return NULL; ++ } ++ return (__force void __iomem *)(offset + (char *)addr); ++ ++} ++ ++EXPORT_SYMBOL(__ioremap); ++ ++void __iounmap(void __iomem * addr) ++{ ++ vunmap((void *)(PAGE_MASK & (unsigned long)addr)); ++} ++ ++EXPORT_SYMBOL(__iounmap); +diff -Nur linux-3.4.110.orig/arch/nds32/mm/Makefile linux-3.4.110/arch/nds32/mm/Makefile +--- linux-3.4.110.orig/arch/nds32/mm/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/Makefile 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,24 @@ ++# ++# Makefile for the linux arm-specific parts of the memory manager. ++# ++ ++obj-y := consistent.o extable.o \ ++ fault.o init.o ioremap.o mmap.o \ ++ mm-nds32.o cacheflush.o ++ ++obj-y += proc-n12.o ++obj-$(CONFIG_CCTL) += cctl.o ++obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o ++ifneq ($(CONFIG_CPU_NO_CONTEXT_ID), y) ++obj-y += tlb.o ++endif ++gcc_ver :=$(shell $(CC) -E -dM -xc /dev/null | grep __VERSION__ | sed 's/\#define __VERSION__ //') ++ifeq ($(shell expr `echo $(gcc_ver)` \>= 4.9.2 ), 1) ++CFLAGS_proc-n12.o += -fomit-frame-pointer ++else ++CFLAGS_proc-n12.o += -fomit-frame-pointer -mno-16bit ++endif ++obj-$(CONFIG_HIGHMEM) += highmem.o ++ifdef CONFIG_FUNCTION_TRACER ++CFLAGS_REMOVE_proc-n12.o = -pg ++endif +diff -Nur linux-3.4.110.orig/arch/nds32/mm/mmap.c linux-3.4.110/arch/nds32/mm/mmap.c +--- linux-3.4.110.orig/arch/nds32/mm/mmap.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/mmap.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,101 @@ ++/* ++ * linux/arch/nds32/mm/mmap.c ++ * ++ * Copyright (C) 2009 Andes Technology Corporation ++ */ ++#include ++#include ++#include ++ ++#define COLOUR_ALIGN(addr,pgoff) \ ++ ((((addr)+REALSHMLBA-1)&~(REALSHMLBA-1)) + \ ++ (((pgoff)<mm; ++ struct vm_area_struct *vma; ++ unsigned long start_addr; ++ int do_align = 0; ++#ifdef CONFIG_CPU_CACHE_NONALIASING ++ int aliasing = 0; ++#else ++ int aliasing = 1; ++#endif ++ ++ /* ++ * We only need to do colour alignment if either the I or D ++ * caches alias. ++ */ ++ if (aliasing) ++ do_align = filp || (flags & MAP_SHARED); ++ ++ /* ++ * We should enforce the MAP_FIXED case. However, currently ++ * the generic kernel code doesn't allow us to handle this. ++ */ ++ if (flags & MAP_FIXED) { ++ if (aliasing && flags & MAP_SHARED && addr & (REALSHMLBA - 1)) ++ return -EINVAL; ++ return addr; ++ } ++ ++ if (len > TASK_SIZE) ++ return -ENOMEM; ++ ++ if (addr) { ++ if (do_align) ++ addr = COLOUR_ALIGN(addr, pgoff); ++ else ++ addr = PAGE_ALIGN(addr); ++ ++ vma = find_vma(mm, addr); ++ if (TASK_SIZE - len >= addr && ++ (!vma || addr + len <= vma->vm_start)) ++ return addr; ++ } ++ start_addr = addr = mm->free_area_cache; ++ ++full_search: ++ if (do_align) ++ addr = COLOUR_ALIGN(addr, pgoff); ++ else ++ addr = PAGE_ALIGN(addr); ++ ++ for (vma = find_vma(mm, addr);; vma = vma->vm_next) { ++ /* At this point: (!vma || addr < vma->vm_end). */ ++ if (TASK_SIZE - len < addr) { ++ /* ++ * Start a new search - just in case we missed ++ * some holes. ++ */ ++ if (start_addr != TASK_UNMAPPED_BASE) { ++ start_addr = addr = TASK_UNMAPPED_BASE; ++ goto full_search; ++ } ++ return -ENOMEM; ++ } ++ if (!vma || addr + len <= vma->vm_start) { ++ /* ++ * Remember the place where we stopped the search: ++ */ ++ mm->free_area_cache = addr + len; ++ return addr; ++ } ++ addr = vma->vm_end; ++ if (do_align) ++ addr = COLOUR_ALIGN(addr, pgoff); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/mm/mm.h linux-3.4.110/arch/nds32/mm/mm.h +--- linux-3.4.110.orig/arch/nds32/mm/mm.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/mm.h 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,19 @@ ++/* the upper-most page table pointer */ ++ ++#ifdef CONFIG_MMU ++ ++extern pmd_t *top_pmd; ++ ++#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) ++ ++static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) ++{ ++ return pmd_offset(pgd, virt); ++} ++ ++static inline pmd_t *pmd_off_k(unsigned long virt) ++{ ++ return pmd_off(pgd_offset_k(virt), virt); ++} ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/mm/mm-nds32.c linux-3.4.110/arch/nds32/mm/mm-nds32.c +--- linux-3.4.110.orig/arch/nds32/mm/mm-nds32.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/mm-nds32.c 2016-04-07 10:20:50.982082572 +0200 +@@ -0,0 +1,267 @@ ++/* ++ * linux/arch/nds32/mm/mm-nds32.c ++ * ++ * Copyright (C) 1998-2002 Russell King ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * Page table sludge for Andes N10/N12 processor architectures. ++ */ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include "./../kernel/signal.h" ++ ++extern void _text, _stext, _etext; ++extern void *high_memory; ++ ++#define FIRST_KERNEL_PGD_NR (USER_PTRS_PER_PGD) ++ ++/* ++ * need to get a 4k page for level 1 ++ */ ++ ++pgd_t *get_pgd_slow(struct mm_struct *mm) ++{ ++ pgd_t *new_pgd, *init_pgd; ++ int i; ++ ++ new_pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, 0); //M order 0: one page ++ if (!new_pgd) ++ return NULL; ++ for (i = 0; i < PTRS_PER_PGD; i++) { ++ (*new_pgd) = 1; ++ new_pgd++; ++ } ++ new_pgd -= PTRS_PER_PGD; ++ ++ init_pgd = pgd_offset_k(0); ++ ++ memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, ++ (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); ++ ++ cpu_dcache_wb_range((unsigned long)new_pgd, ++ (unsigned long)new_pgd + ++ PTRS_PER_PGD * sizeof(pgd_t)); ++ inc_zone_page_state(virt_to_page((unsigned long *)new_pgd), ++ NR_PAGETABLE); ++ ++ return new_pgd; ++} ++ ++void free_pgd_slow(struct mm_struct *mm, pgd_t * pgd) ++{ ++ pmd_t *pmd; ++ struct page *pte; ++ ++ if (!pgd) ++ return; ++ ++ pmd = (pmd_t *) pgd; ++ if (pmd_none(*pmd)) ++ goto free; ++ if (pmd_bad(*pmd)) { ++ pmd_ERROR(*pmd); ++ pmd_clear(pmd); ++ goto free; ++ } ++ ++ pte = pmd_page(*pmd); ++ pmd_clear(pmd); ++ dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE); ++ pte_free(mm, pte); ++ pmd_free(mm, pmd); ++free: ++ free_pages((unsigned long)pgd, 0); ++} ++ ++/* ++ * Add a PAGE mapping between VIRT and PHYS in domain ++ * DOMAIN with protection PROT. Note that due to the ++ * way we map the PTEs, we must allocate two PTE_SIZE'd ++ * blocks - one for the Linux pte table, and one for ++ * the hardware pte table. ++ */ ++static inline void ++alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, ++ pgprot_t prot) ++{ ++ pmd_t *pmdp; ++ pte_t *ptep; ++ ++ pmdp = pmd_offset(pgd_offset_k(virt), virt); //L1PTE ++ if (pmd_none(*pmdp)) { //must not or 0xc0000000 ++ ptep = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t)); ++ set_pmd(pmdp, __mk_pmd(ptep, 0)); ++ } ++ ptep = pte_offset_kernel(pmdp, virt); //L2PTE ++ set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); ++} ++ ++/* ++ * Clear any PGD mapping. On a two-level page table system, ++ * the clearance is done by the middle-level functions (pmd) ++ * rather than the top-level (pgd) functions. ++ */ ++static inline void clear_mapping(unsigned long virt) ++{ ++ pmd_clear(pmd_offset(pgd_offset_k(virt), virt)); ++} ++ ++struct mem_types { ++ unsigned int prot_pte; ++ unsigned int prot_l1; ++}; ++ ++static struct mem_types mem_types[] __initdata = { ++ [MT_DEVICE] = { ++ .prot_pte = 0x9f, //_KERNPG_TABLE, ++ .prot_l1 = PMD_TYPE_TABLE, ++ }, ++ [MT_CACHECLEAN] = { ++ .prot_l1 = PMD_TYPE_TABLE, ++ .prot_pte = 0x0, //_KERNPG_TABLE, ++ }, ++ [MT_MINICLEAN] = { ++ .prot_l1 = PMD_TYPE_TABLE, ++ .prot_pte = 0x0, // _KERNPG_TABLE, ++ }, ++ [MT_CACHE_L1] = { ++ .prot_pte = PAGE_CACHE_L1, ++ .prot_l1 = PMD_TYPE_TABLE, ++ }, ++ [MT_UXKRWX_V1] = { ++ .prot_pte = PAGE_UXKRWX_V1, ++ .prot_l1 = PMD_TYPE_TABLE, ++ }, ++ [MT_UXKRWX_V2] = { ++ .prot_pte = PAGE_UXKRWX_V2, ++ .prot_l1 = PMD_TYPE_TABLE, ++ }, ++ [MT_MEMORY] = { ++ .prot_pte = PAGE_MEMORY, ++ .prot_l1 = PMD_TYPE_TABLE, ++ }, ++ [MT_ROM] = { ++ .prot_l1 = PMD_TYPE_TABLE, ++ .prot_pte = 0x2bb, // _KERNPG_TABLE, ++ }, ++ [MT_ILM] = { ++ .prot_l1 = PMD_TYPE_TABLE, ++ .prot_pte = 0x2b7, // _KERNPG_TABLE, ++ }, ++ [MT_DLM] = { ++ .prot_l1 = PMD_TYPE_TABLE, ++ .prot_pte = 0x297, // _KERNPG_TABLE, ++ } ++}; ++ ++/* ++ * Create the page directory entries and any necessary ++ * page tables for the mapping specified by `md'. We ++ * are able to cope here with varying sizes and address ++ * offsets, and we take full advantage of sections. ++ */ ++static void __init create_mapping(struct map_desc *md) ++{ ++ unsigned long virt, length; ++ int prot_l1; ++ pgprot_t prot_pte; ++ long off; ++ ++ printk("virt:0x%08lx,phys:%08lx,size:%08lx,pte:%08x\n", ++ md->virtual, md->physical, md->length, ++ __pgprot(mem_types[md->type].prot_pte)); ++ ++ if (md->virtual < TASK_SIZE) { ++ printk(KERN_WARNING "BUG: not creating mapping area for " ++ "0x%08lx at 0x%08lx in user region, next frame\n", ++ md->physical, md->virtual); ++ panic("In :%s, line:%d", __func__, __LINE__); ++ return; ++ } ++ ++ if ((md->type == MT_DEVICE || md->type == MT_ROM) && ++ md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { ++ printk(KERN_WARNING "BUG: mapping area for 0x%08lx at 0x%08lx " ++ "overlaps vmalloc space, next frame\n", ++ md->physical, md->virtual); ++ panic("In :%s, line:%d", __func__, __LINE__); ++ } ++ ++ prot_pte = __pgprot(mem_types[md->type].prot_pte); ++ prot_l1 = mem_types[md->type].prot_l1; ++ virt = md->virtual; ++ off = md->physical - virt; ++ ++ length = md->length; ++ if (mem_types[md->type].prot_l1 == 1 && ++ (virt & 0xfffff || (virt + off) & 0xfffff ++ || (virt + length) & 0xfffff)) { ++ printk(KERN_WARNING ++ "BUG: map area for 0x%08lx at 0x%08lx can not " ++ "be mapped using pages, ignoring. next frame\n", ++ md->physical, md->virtual); ++ panic("In :%s, line:%d", __func__, __LINE__); ++ return; ++ } ++ ++ while (length >= PAGE_SIZE) { ++#ifdef CONFIG_SMP ++ if (((virt >= (unsigned long)&_text) ++ && (virt < (unsigned long)&_etext)) ++ || ((virt >= 0xc0000000) && (virt < 0xc0008000))) ++ prot_pte = __pgprot(mem_types[MT_CACHE_L1].prot_pte); ++#endif ++ alloc_init_page(virt, virt + off, prot_l1, prot_pte); ++ virt += PAGE_SIZE; ++ length -= PAGE_SIZE; ++ } ++} ++ ++/* ++ * In order to soft-boot, we need to insert a 1:1 mapping in place of ++ * the user-mode pages. This will then ensure that we have predictable ++ * results when turning the mmu off ++ */ ++void setup_mm_for_reboot(char mode) ++{ ++ unsigned long pmdval; ++ pgd_t *pgd; ++ pmd_t *pmd; ++ int i; ++ ++ if (current->mm && current->mm->pgd) ++ pgd = current->mm->pgd; ++ else ++ pgd = init_mm.pgd; ++ ++ for (i = 0; i < USER_PTRS_PER_PGD; i++) { ++ pmdval = (i << PGDIR_SHIFT); ++ pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT); ++ set_pmd(pmd, __pmd(pmdval)); ++ } ++} ++ ++/* ++ * Create the architecture specific mappings ++ */ ++void __init iotable_init(struct map_desc *io_desc, int nr) ++{ ++ int i; ++ ++ for (i = 0; i < nr; i++) { ++ create_mapping(io_desc + i); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/mm/proc-n12.c linux-3.4.110/arch/nds32/mm/proc-n12.c +--- linux-3.4.110.orig/arch/nds32/mm/proc-n12.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/proc-n12.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,845 @@ ++/* ++ * linux/arch/nds32/mm/proc-nds32.c ++ * ++ * Copyright (C) 2006 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * ++ * These are the low level assembler for performing cache and TLB ++ * functions on the nds32. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef CONFIG_CACHE_L2 ++#include ++#endif ++#include ++ ++#include ++extern struct cache_info L1_cache_info[2]; ++ ++#ifdef CONFIG_CACHE_L2 ++void n12_L2cache_inval(void) ++{ ++ unsigned long cmd = CCTL_CMD_L2_IX_INVAL | CCTL_ALL_CMD; ++ ++ L2_CMD_RDY(); ++ L2C_W_REG(L2_CCTL_CMD_OFF, cmd); ++ L2_CMD_RDY(); ++ L2C_W_REG(L2_CCTL_CMD_OFF, CCTL_CMD_L2_SYNC); ++ L2_CMD_RDY(); ++} ++ ++void n12_L2cache_wb(void) ++{ ++ unsigned long cmd = CCTL_CMD_L2_IX_WB | CCTL_ALL_CMD; ++ ++ L2_CMD_RDY(); ++ L2C_W_REG(L2_CCTL_CMD_OFF, cmd); ++ L2_CMD_RDY(); ++ L2C_W_REG(L2_CCTL_CMD_OFF, CCTL_CMD_L2_SYNC); ++ L2_CMD_RDY(); ++} ++#endif ++ ++int va_kernel_present(unsigned long addr) ++{ ++ pmd_t *pmd; ++ pte_t *ptep, pte; ++ int ret = 0; ++ ++ pmd = pmd_offset(pgd_offset_k(addr), addr); ++ if (!pmd_none(*pmd)) { ++ ptep = pte_offset_map(pmd, addr); ++ pte = *ptep; ++ if (pte_present(pte)) ++ ret = 1; ++ } ++ return ret; ++} ++ ++int va_present(struct mm_struct *mm, unsigned long addr) ++{ ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *ptep, pte; ++ int ret = 0; ++ ++ pgd = pgd_offset(mm, addr); ++ if (!pgd_none(*pgd)) { ++ pud = pud_offset(pgd, addr); ++ if (!pud_none(*pud)) { ++ pmd = pmd_offset(pud, addr); ++ if (!pmd_none(*pmd)) { ++ ptep = pte_offset_map(pmd, addr); ++ pte = *ptep; ++ if (pte_present(pte)) ++ ret = 1; ++ } ++ } ++ } ++ return ret; ++ ++} ++ ++int va_readable(struct pt_regs *regs, unsigned long addr) ++{ ++ struct mm_struct *mm = current->mm; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *ptep, pte; ++ int ret = 0; ++ ++ if (user_mode(regs)) { ++ /* user mode */ ++ pgd = pgd_offset(mm, addr); ++ if (!pgd_none(*pgd)) { ++ pud = pud_offset(pgd, addr); ++ if (!pud_none(*pud)) { ++ pmd = pmd_offset(pud, addr); ++ if (!pmd_none(*pmd)) { ++ ptep = pte_offset_map(pmd, addr); ++ pte = *ptep; ++ if (pte_present(pte) && pte_read(pte)) ++ ret = 1; ++ } ++ } ++ } ++ } else { ++ /* superuser mode is always readable, so we can only ++ * check it is present or not*/ ++ pmd = pmd_offset(pgd_offset_k(addr), addr); ++ if (!pmd_none(*pmd)) { ++ ptep = pte_offset_map(pmd, addr); ++ pte = *ptep; ++ if (pte_present(pte)) ++ ret = 1; ++ } ++ } ++ return ret; ++} ++ ++int va_writable(struct pt_regs *regs, unsigned long addr) ++{ ++ struct mm_struct *mm = current->mm; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *ptep, pte; ++ int ret = 0; ++ ++ if (user_mode(regs)) { ++ /* user mode */ ++ pgd = pgd_offset(mm, addr); ++ if (!pgd_none(*pgd)) { ++ pud = pud_offset(pgd, addr); ++ if (!pud_none(*pud)) { ++ pmd = pmd_offset(pud, addr); ++ if (!pmd_none(*pmd)) { ++ ptep = pte_offset_map(pmd, addr); ++ pte = *ptep; ++ if (pte_present(pte) && pte_write(pte)) ++ ret = 1; ++ } ++ } ++ } ++ } else { ++ /* superuser mode */ ++ pmd = pmd_offset(pgd_offset_k(addr), addr); ++ if (!pmd_none(*pmd)) { ++ ptep = pte_offset_map(pmd, addr); ++ pte = *ptep; ++ if (pte_present(pte) && pte_kernel_write(pte)) ++ ret = 1; ++ } ++ } ++ return ret; ++} ++ ++#if 0 ++static inline void flush_fill_buffer(void) ++{ ++ unsigned long kaddr, pte, flags; ++ ++ local_irq_save(flags); ++#define BASE_ADDR2 0xffff4000 ++ kaddr = BASE_ADDR2; ++ pte = (0x6000 | _PAGE_V | _PAGE_M_KRW | _PAGE_G | _PAGE_C_DEV); ++ asm("tlbop %0, INV\nisb\n"::"r"(kaddr)); ++ asm("mtsr %1, $mr2\ndsb\n" ++ "tlbop %0, RWR\nisb\n"::"r"(pte), "r"(kaddr)); ++asm("lmw.bi $r0, [%0], $r1\n"::"r"(kaddr):"$r0", "$r1"); ++ local_irq_restore(flags); ++} ++#else ++static inline void flush_fill_buffer(void) ++{ ++} ++#endif ++ ++/* ++ * All ++ */ ++void n12_icache_inval_all(void) ++{ ++ unsigned long end, line_size; ++ ++ line_size = L1_cache_info[ICACHE].line_size; ++ end = ++ line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets; ++ ++ do { ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end)); ++ } while (end > 0); ++} ++ ++void n12_dcache_inval_all(void) ++{ ++#ifdef CONFIG_PLAT_AG102 ++ __asm__ volatile ("\n\tcctl L1D_INVALALL"); ++#else ++ unsigned long end, line_size; ++ ++ line_size = L1_cache_info[DCACHE].line_size; ++ end = ++ line_size * L1_cache_info[DCACHE].ways * L1_cache_info[DCACHE].sets; ++ ++ do { ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_INVAL"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_INVAL"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_INVAL"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_INVAL"::"r" (end)); ++ } while (end > 0); ++#endif ++} ++ ++void n12_dcache_wb_all(void) ++{ ++#ifdef __NDS32_BASELINE_V3 ++#ifdef CONFIG_CACHE_L2 ++ __nds32__cctl_l1d_wball_alvl(); ++#else ++ __nds32__cctl_l1d_wball_one_lvl(); ++#endif ++#else ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ unsigned long end, line_size; ++ ++ line_size = L1_cache_info[DCACHE].line_size; ++ end = ++ line_size * L1_cache_info[DCACHE].ways * L1_cache_info[DCACHE].sets; ++ ++ do { ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_WB"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_WB"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_WB"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_WB"::"r" (end)); ++ } while (end > 0); ++#endif ++#endif ++} ++ ++void n12_dcache_wbinval_all(void) ++{ ++ unsigned long end, line_size; ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ unsigned long saved_gie; ++#endif ++ ++ line_size = L1_cache_info[DCACHE].line_size; ++ end = ++ line_size * L1_cache_info[DCACHE].ways * L1_cache_info[DCACHE].sets; ++ ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ GIE_SAVE(&saved_gie); ++#endif ++#ifdef __NDS32_BASELINE_V3 ++#ifdef CONFIG_CACHE_L2 ++ __nds32__cctl_l1d_wball_alvl(); ++#else ++ __nds32__cctl_l1d_wball_one_lvl(); ++#endif ++ __nds32__cctl_l1d_invalall(); ++#else ++ do { ++ end -= line_size; ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_WB"::"r" (end)); ++#endif ++#ifndef CONFIG_PLAT_AG102 ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_INVAL"::"r" (end)); ++#endif ++ end -= line_size; ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_WB"::"r" (end)); ++#endif ++#ifndef CONFIG_PLAT_AG102 ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_INVAL"::"r" (end)); ++#endif ++ end -= line_size; ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_WB"::"r" (end)); ++#endif ++#ifndef CONFIG_PLAT_AG102 ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_INVAL"::"r" (end)); ++#endif ++ end -= line_size; ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_WB"::"r" (end)); ++#endif ++#ifndef CONFIG_PLAT_AG102 ++ __asm__ volatile ("\n\tcctl %0, L1D_IX_INVAL"::"r" (end)); ++#endif ++ } while (end > 0); ++#ifdef CONFIG_PLAT_AG102 ++ __asm__ volatile ("\n\tcctl L1D_INVALALL"); ++#endif ++#endif ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ GIE_RESTORE(saved_gie); ++#endif ++} ++ ++/* ++ * Page ++ */ ++void n12_icache_inval_page(unsigned long start) ++{ ++ unsigned long line_size, end; ++ ++ line_size = L1_cache_info[ICACHE].line_size; ++ end = start + PAGE_SIZE; ++ ++ do { ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end)); ++ } while (end != start); ++} ++ ++void n12_dcache_inval_page(unsigned long start) ++{ ++ unsigned long line_size, end; ++ ++ line_size = L1_cache_info[DCACHE].line_size; ++ end = start + PAGE_SIZE; ++ ++ flush_fill_buffer(); ++ do { ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); ++ } while (end != start); ++} ++ ++void n12_dcache_wb_page(unsigned long start) ++{ ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ unsigned long line_size, end; ++ ++ line_size = L1_cache_info[DCACHE].line_size; ++ end = start + PAGE_SIZE; ++ ++ flush_fill_buffer(); ++ do { ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); ++ end -= line_size; ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); ++ } while (end != start); ++#endif ++} ++ ++void n12_dcache_wbinval_page(unsigned long start) ++{ ++ unsigned long line_size, end; ++ ++ line_size = L1_cache_info[DCACHE].line_size; ++ end = start + PAGE_SIZE; ++ ++ flush_fill_buffer(); ++ do { ++ end -= line_size; ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); ++#endif ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); ++ end -= line_size; ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); ++#endif ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); ++ end -= line_size; ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); ++#endif ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); ++ end -= line_size; ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); ++#endif ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); ++ } while (end != start); ++} ++ ++void n12_cache_wbinval_page(unsigned long page, int flushi) ++{ ++ n12_dcache_wbinval_page(page); ++ if (flushi) ++ n12_icache_inval_page(page); ++} ++ ++/* These functions are used to invalidate cache by idx instead of ++ * virtual address. User can use virtual address to do this purpose. ++ * It supports 4way and 32KB cache size with 32bytes cacheline size. */ ++#include ++//#define WB_WITH_IDX ++inline unsigned long va2idx(unsigned long va, unsigned int cache_type, ++ unsigned long *way_offset) ++{ ++ unsigned char set_bits, way_bits, line_bits; ++ unsigned int idx; ++ set_bits = L1_cache_info[cache_type].set_bits; ++ way_bits = L1_cache_info[cache_type].way_bits; ++ line_bits = L1_cache_info[cache_type].line_bits; ++ *way_offset = set_bits + line_bits; ++ ++ idx = (va & (((1 << set_bits) - 1) << line_bits)); ++ return idx; ++} ++ ++static inline void n12_dcache_inval_idx(unsigned long p) ++{ ++ unsigned long idx, i, way_offset; ++ unsigned char ways; ++ ways = L1_cache_info[DCACHE].ways; ++ ++ /* Unroll loop. Not support 2 ways invalidate. */ ++ if (ways == 2) ++ panic("This way size is not supported. ways:%u, %s\n", ways, ++ __func__); ++ idx = va2idx(p, DCACHE, &way_offset); ++ for (i = 0; i < ways / 4; i++) { ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_INVAL, ++ (idx | i << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_INVAL, ++ (idx | (i + 1) << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_INVAL, ++ (idx | (i + 2) << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_INVAL, ++ (idx | (i + 3) << way_offset)); ++ } ++} ++ ++static inline void n12_dcache_wb_idx(unsigned long p) ++{ ++ unsigned long idx, i, way_offset; ++ unsigned char ways; ++ ways = L1_cache_info[DCACHE].ways; ++ ++ /* Unroll the loop. Not support 2 ways invalidate. */ ++ if (ways == 2) ++ panic("This way size is not supported. ways:%d, %s\n", ways, ++ __func__); ++ idx = va2idx(p, DCACHE, &way_offset); ++ for (i = 0; i < ways / 4; i++) { ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_WB, ++ (idx | i << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_INVAL, ++ (idx | i << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_WB, ++ (idx | (i + 1) << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_INVAL, ++ (idx | (i + 1) << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_WB, ++ (idx | (i + 2) << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_INVAL, ++ (idx | (i + 2) << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_WB, ++ (idx | (i + 3) << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_INVAL, ++ (idx | (i + 3) << way_offset)); ++ } ++} ++ ++static inline void n12_dcache_wbinval_idx(unsigned long p) ++{ ++ unsigned long idx, i, way_offset; ++ unsigned char ways; ++ ways = L1_cache_info[DCACHE].ways; ++ ++ /* Unroll the loop. Not support 2 ways invalidate. */ ++ if (ways == 2) ++ panic("This way size is not supported. ways:%d, %s\n", ways, ++ __func__); ++ idx = va2idx(p, DCACHE, &way_offset); ++ for (i = 0; i < ways / 4; i++) { ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_WB, ++ (idx | i << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_WB, ++ (idx | (i + 1) << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_WB, ++ (idx | (i + 2) << way_offset)); ++ __nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_WB, ++ (idx | (i + 3) << way_offset)); ++ } ++} ++ ++/* ++ * Range ++ */ ++void n12_icache_inval_range(unsigned long start, unsigned long end) ++{ ++ unsigned long line_size; ++ ++ line_size = L1_cache_info[ICACHE].line_size; ++ ++ while (end > start) { ++ __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start)); ++ start += line_size; ++ } ++} ++ ++void n12_dcache_inval_range(unsigned long start, unsigned long end) ++{ ++ unsigned long line_size; ++ ++ line_size = L1_cache_info[DCACHE].line_size; ++ ++ flush_fill_buffer(); ++ while (end > start) { ++#ifdef WB_WITH_IDX ++ n12_dcache_inval_idx(start); ++#else ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start)); ++#endif ++ start += line_size; ++ } ++} ++ ++void n12_dcache_wb_range(unsigned long start, unsigned long end) ++{ ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++ unsigned long line_size; ++ ++ line_size = L1_cache_info[DCACHE].line_size; ++ ++ flush_fill_buffer(); ++ while (end > start) { ++#ifdef WB_WITH_IDX ++ n12_dcache_wb_idx(start); ++#else ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start)); ++#endif ++ start += line_size; ++ } ++#endif ++} ++ ++void n12_dcache_wbinval_range(unsigned long start, unsigned long end) ++{ ++ unsigned long line_size; ++ ++ line_size = L1_cache_info[DCACHE].line_size; ++ ++ flush_fill_buffer(); ++ while (end > start) { ++#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ++#ifdef WB_WITH_IDX ++ n12_dcache_wbinval_idx(start); ++#else ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start)); ++#endif ++#endif ++ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start)); ++ start += line_size; ++ } ++} ++ ++void n12_cache_wbinval_range(unsigned long start, unsigned long end, int flushi) ++{ ++ n12_dcache_wbinval_range(start, end); ++ if (flushi) ++ n12_icache_inval_range(start, end); ++} ++ ++void n12_cache_wbinval_range_check(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end) ++{ ++ unsigned long line_size, t_start, t_end; ++ int flushi; ++ ++ flushi = vma->vm_flags & VM_EXEC; ++ line_size = L1_cache_info[DCACHE].line_size; ++ start = start & ~(line_size - 1); ++ end = (end + line_size - 1) & ~(line_size - 1); ++ ++ if ((end - start) > (8 * PAGE_SIZE)) { ++ n12_dcache_wbinval_all(); ++ if (flushi) ++ n12_icache_inval_all(); ++ return; ++ } ++ ++ t_start = (start + PAGE_SIZE) & PAGE_MASK; ++ t_end = ((end - 1) & PAGE_MASK); ++ ++ if ((start & PAGE_MASK) == t_end) { ++ if (va_present(vma->vm_mm, start)) ++ n12_cache_wbinval_range(start, end, flushi); ++ return; ++ } ++ ++ if (va_present(vma->vm_mm, start)) ++ n12_cache_wbinval_range(start, t_start, flushi); ++ ++ if (va_present(vma->vm_mm, end - 1)) ++ n12_cache_wbinval_range(t_end, end, flushi); ++ ++ while (t_start < t_end) { ++ if (va_present(vma->vm_mm, t_start)) ++ n12_cache_wbinval_page(t_start, flushi); ++ t_start += PAGE_SIZE; ++ } ++} ++ ++/* ++ * DMA ++ */ ++void n12_dma_wb_range(unsigned long start, unsigned long end) ++{ ++ unsigned long line_size; ++ line_size = L1_cache_info[DCACHE].line_size; ++ start = start & (~(line_size - 1)); ++ end = (end + line_size - 1) & (~(line_size - 1)); ++ if (unlikely(start == end)) ++ return; ++ ++ n12_dcache_wb_range(start, end); ++ ++#ifdef CONFIG_CACHE_L2 ++ { ++ unsigned long p_start = __pa(start); ++ unsigned long p_end = __pa(end); ++ unsigned long cmd; ++ //TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE ++ line_size = L2_CACHE_LINE_SIZE(); ++ cmd = ++ (p_start & ~(line_size - 1)) | CCTL_CMD_L2_PA_WB | ++ CCTL_SINGLE_CMD; ++ do { ++ L2_CMD_RDY(); ++ L2C_W_REG(L2_CCTL_CMD_OFF, cmd); ++ cmd += line_size; ++ p_start += line_size; ++ } while (p_end > p_start); ++ cmd = CCTL_CMD_L2_SYNC; ++ L2_CMD_RDY(); ++ L2C_W_REG(L2_CCTL_CMD_OFF, cmd); ++ L2_CMD_RDY(); ++ ++ } ++#endif ++} ++ ++#ifdef CONFIG_CACHE_L2 ++void n12_l2dcache_wbinval_range(unsigned long start, unsigned long end) ++{ ++ unsigned long p_start; ++ unsigned long p_end; ++ unsigned long cmd; ++ unsigned long line_size; ++ ++ p_start = __pa(start); ++ p_end = __pa(end); ++ //TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE ++ line_size = L2_CACHE_LINE_SIZE(); ++ cmd = ++ (p_start & ~(line_size - 1)) | CCTL_CMD_L2_PA_WBINVAL | ++ CCTL_SINGLE_CMD; ++ do { ++ L2_CMD_RDY(); ++ L2C_W_REG(L2_CCTL_CMD_OFF, cmd); ++ cmd += line_size; ++ p_start += line_size; ++ } while (p_end > p_start); ++ cmd = CCTL_CMD_L2_SYNC; ++ L2_CMD_RDY(); ++ L2C_W_REG(L2_CCTL_CMD_OFF, cmd); ++ L2_CMD_RDY(); ++ ++} ++#endif ++ ++void n12_dma_inval_range(unsigned long start, unsigned long end) ++{ ++ unsigned long line_size; ++ unsigned long old_start = start; ++ unsigned long old_end = end; ++ line_size = L1_cache_info[DCACHE].line_size; ++ start = start & (~(line_size - 1)); ++ end = (end + line_size - 1) & (~(line_size - 1)); ++ if (unlikely(start == end)) ++ return; ++ if (start != old_start) { ++ n12_dcache_wbinval_range(start, start + line_size); ++#ifdef CONFIG_CACHE_L2 ++ n12_l2dcache_wbinval_range(start, start + line_size); ++#endif ++ } ++ if (end != old_end) { ++ n12_dcache_wbinval_range(end - line_size, end); ++#ifdef CONFIG_CACHE_L2 ++ n12_l2dcache_wbinval_range(end - line_size, end); ++#endif ++ } ++ n12_dcache_inval_range(start, end); ++#ifdef CONFIG_CACHE_L2 ++ unsigned long p_start = __pa(start); ++ unsigned long p_end = __pa(end); ++ unsigned long cmd; ++ //TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE ++ line_size = L2_CACHE_LINE_SIZE(); ++ cmd = ++ (p_start & ~(line_size - 1)) | CCTL_CMD_L2_PA_INVAL | ++ CCTL_SINGLE_CMD; ++ do { ++ L2_CMD_RDY(); ++ L2C_W_REG(L2_CCTL_CMD_OFF, cmd); ++ cmd += line_size; ++ p_start += line_size; ++ } while (p_end > p_start); ++ cmd = CCTL_CMD_L2_SYNC; ++ L2_CMD_RDY(); ++ L2C_W_REG(L2_CCTL_CMD_OFF, cmd); ++ L2_CMD_RDY(); ++#endif ++ ++} ++ ++void n12_dma_wbinval_range(unsigned long start, unsigned long end) ++{ ++ unsigned long line_size; ++ line_size = L1_cache_info[DCACHE].line_size; ++ start = start & (~(line_size - 1)); ++ end = (end + line_size - 1) & (~(line_size - 1)); ++ if (unlikely(start == end)) ++ return; ++ ++ n12_dcache_wbinval_range(start, end); ++#ifdef CONFIG_CACHE_L2 ++ { ++ unsigned long p_start = __pa(start); ++ unsigned long p_end = __pa(end); ++ unsigned long cmd; ++ //TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE ++ line_size = L2_CACHE_LINE_SIZE(); ++ cmd = ++ (p_start & ~(line_size - 1)) | CCTL_CMD_L2_PA_WBINVAL | ++ CCTL_SINGLE_CMD; ++ do { ++ L2_CMD_RDY(); ++ L2C_W_REG(L2_CCTL_CMD_OFF, cmd); ++ cmd += line_size; ++ p_start += line_size; ++ } while (p_end > p_start); ++ cmd = CCTL_CMD_L2_SYNC; ++ L2_CMD_RDY(); ++ L2C_W_REG(L2_CCTL_CMD_OFF, cmd); ++ L2_CMD_RDY(); ++ ++ } ++#endif ++} ++ ++void n12_proc_init(void) ++{ ++} ++ ++void n12_proc_fin(void) ++{ ++} ++ ++void n12_do_idle(void) ++{ ++ STANDBY(no_wake_grant); ++} ++ ++void n12_reset(unsigned long reset) ++{ ++ GIE_DISABLE(); ++ SET_CACHE_CTL(GET_CACHE_CTL() & ++ ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN)); ++ n12_dcache_wbinval_all(); ++ n12_icache_inval_all(); ++ ++ __asm__ __volatile__("jr.toff %0\n\t"::"r"(reset)); ++} ++ ++void n12_switch_mm(struct mm_struct *mm) ++{ ++ unsigned long cid; ++#ifndef CONFIG_CPU_NO_CONTEXT_ID ++ cid = GET_TLB_MISC(); ++ cid = (cid & ~TLB_MISC_mskCID) | mm->context.id; ++ SET_TLB_MISC(cid); ++#endif ++ SET_L1_PPTB(__pa(mm->pgd)); ++ //workaround N10 single-entry cache flush issue ++ //the following line can be removed once the issue is fixed. ++ __asm__ __volatile__("tlbop %0, INV"::"r"(cid)); ++ __nds32__isb(); ++#ifdef CONFIG_CPU_NO_CONTEXT_ID ++ local_flush_tlb_mm(mm); ++#endif ++} +diff -Nur linux-3.4.110.orig/arch/nds32/mm/tlb.c linux-3.4.110/arch/nds32/mm/tlb.c +--- linux-3.4.110.orig/arch/nds32/mm/tlb.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/mm/tlb.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,47 @@ ++#include ++#include ++#include ++#include ++#include ++ ++unsigned int cpu_last_cid = { TLB_MISC_mskCID + (2 << TLB_MISC_offCID) }; ++ ++DEFINE_SPINLOCK(cid_lock); ++ ++void local_flush_tlb_range(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end) ++{ ++ unsigned long flags, ocid, ncid; ++ ++ if ((end - start) > 0x400000) { ++ asm("tlbop FLUA"); ++ __nds32__isb(); ++ return; ++ } ++ ++ spin_lock_irqsave(&cid_lock, flags); ++ ocid = GET_TLB_MISC(); ++ ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id; ++ SET_TLB_MISC(ncid); ++ while (start < end) { ++ asm("tlbop %0, INV"::"r"(start)); ++ __nds32__isb(); ++ start += PAGE_SIZE; ++ } ++ SET_TLB_MISC(ocid); ++ spin_unlock_irqrestore(&cid_lock, flags); ++} ++ ++void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) ++{ ++ unsigned long flags, ocid, ncid; ++ ++ spin_lock_irqsave(&cid_lock, flags); ++ ocid = GET_TLB_MISC(); ++ ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id; ++ SET_TLB_MISC(ncid); ++ asm("tlbop %0, INV"::"r"(addr)); ++ __nds32__isb(); ++ SET_TLB_MISC(ocid); ++ spin_unlock_irqrestore(&cid_lock, flags); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/oprofile/common.c linux-3.4.110/arch/nds32/oprofile/common.c +--- linux-3.4.110.orig/arch/nds32/oprofile/common.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/oprofile/common.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,104 @@ ++/* ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ * Copyright (C) 2004, 2005 Ralf Baechle ++ * Copyright (C) 2005 MIPS Technologies, Inc. ++ * Copyright (C) 2008 Andes Technology Corporation ++ */ ++#include ++#include ++#include ++#include ++ ++#include "op_impl.h" ++ ++extern struct op_nds32_model op_model_nds32_ops __attribute__ ((weak)); ++ ++static struct op_nds32_model *model; ++ ++static struct op_counter_config ctr[20]; ++ ++static int op_nds32_setup(void) ++{ ++ /* Pre-compute the values to stuff in the hardware registers. */ ++ model->reg_setup(ctr); ++ ++ /* Configure the registers on all cpus. */ ++ on_each_cpu(model->cpu_setup, NULL, 1); ++ ++ return 0; ++} ++ ++static int op_nds32_create_files(struct super_block *sb, struct dentry *root) ++{ ++ int i; ++ ++ for (i = 0; i < model->num_counters; ++i) { ++ struct dentry *dir; ++ char buf[4]; ++ ++ snprintf(buf, sizeof buf, "%d", i); ++ dir = oprofilefs_mkdir(sb, root, buf); ++ ++ oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); ++ oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); ++ oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); ++ oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); ++ oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); ++ oprofilefs_create_ulong(sb, dir, "exl", &ctr[i].exl); ++ /* Dummy. */ ++ oprofilefs_create_ulong(sb, dir, "unit_mask", ++ &ctr[i].unit_mask); ++ } ++ ++ return 0; ++} ++ ++static int op_nds32_start(void) ++{ ++ on_each_cpu(model->cpu_start, NULL, 1); ++ ++ return 0; ++} ++ ++static void op_nds32_stop(void) ++{ ++ /* Disable performance monitoring for all counters. */ ++ on_each_cpu(model->cpu_stop, NULL, 1); ++} ++ ++int __init oprofile_arch_init(struct oprofile_operations *ops) ++{ ++ struct op_nds32_model *lmodel = NULL; ++ int res; ++ ++ lmodel = &op_model_nds32_ops; ++ ++ if (!lmodel) ++ return -ENODEV; ++ ++ res = lmodel->init(); ++ if (res) ++ return res; ++ ++ model = lmodel; ++ ++ ops->create_files = op_nds32_create_files; ++ ops->setup = op_nds32_setup; ++ ops->start = op_nds32_start; ++ ops->stop = op_nds32_stop; ++ ops->cpu_type = lmodel->cpu_type; ++ ++ printk(KERN_INFO "oprofile: using %s performance monitoring.\n", ++ lmodel->cpu_type); ++ ++ return 0; ++} ++ ++void oprofile_arch_exit(void) ++{ ++ if (model) ++ model->exit(); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/oprofile/Makefile linux-3.4.110/arch/nds32/oprofile/Makefile +--- linux-3.4.110.orig/arch/nds32/oprofile/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/oprofile/Makefile 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,11 @@ ++EXTRA_CFLAGS := ++ ++obj-$(CONFIG_OPROFILE) += oprofile.o ++ ++DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ ++ oprof.o cpu_buffer.o buffer_sync.o \ ++ event_buffer.o oprofile_files.o \ ++ oprofilefs.o oprofile_stats.o \ ++ timer_int.o ) ++ ++oprofile-y := $(DRIVER_OBJS) common.o op_model_nds32.o +diff -Nur linux-3.4.110.orig/arch/nds32/oprofile/op_impl.h linux-3.4.110/arch/nds32/oprofile/op_impl.h +--- linux-3.4.110.orig/arch/nds32/oprofile/op_impl.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/oprofile/op_impl.h 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,42 @@ ++/** ++ * @file arch/alpha/oprofile/op_impl.h ++ * ++ * @remark Copyright 2002 OProfile authors ++ * @remark Read the file COPYING ++ * ++ * @author Richard Henderson ++ */ ++ ++#ifndef OP_IMPL_H ++#define OP_IMPL_H 1 ++ ++struct pt_regs; ++ ++extern int null_perf_irq(struct pt_regs *regs); ++extern int (*perf_irq)(struct pt_regs *regs); ++ ++/* Per-counter configuration as set via oprofilefs. */ ++struct op_counter_config { ++ unsigned long enabled; ++ unsigned long event; ++ unsigned long count; ++ /* Dummies because I am too lazy to hack the userspace tools. */ ++ unsigned long kernel; ++ unsigned long user; ++ unsigned long exl; ++ unsigned long unit_mask; ++}; ++ ++/* Per-architecture configury and hooks. */ ++struct op_nds32_model { ++ void (*reg_setup) (struct op_counter_config *); ++ void (*cpu_setup) (void * dummy); ++ int (*init)(void); ++ void (*exit)(void); ++ void (*cpu_start)(void *args); ++ void (*cpu_stop)(void *args); ++ char *cpu_type; ++ unsigned char num_counters; ++}; ++ ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/oprofile/op_model_nds32.c linux-3.4.110/arch/nds32/oprofile/op_model_nds32.c +--- linux-3.4.110.orig/arch/nds32/oprofile/op_model_nds32.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/oprofile/op_model_nds32.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,400 @@ ++/* ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ * Copyright (C) 2004, 2005 by Ralf Baechle ++ * Copyright (C) 2005 by MIPS Technologies, Inc. ++ * Copyright (C) 2007 Andes Technology Corporation ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "op_impl.h" ++ ++#ifdef CONFIG_PLAT_AG102 ++#include ++#define NDS32_PERFCNTR_IRQA 23 ++#define NDS32_PERFCNTR_IRQB 22 ++#else ++#define NDS32_PERFCNTR_IRQA 10 ++#endif ++#define NDS32_PERFCTL_EN(num) (1 << num) ++#define NDS32_PERFCTL_INTEN (1UL << 3) ++#define NDS32_PERFCTL_OVERFLOW (1UL << 6) ++#define NDS32_PERFCTL_NOKERNEL (1UL << 9) ++#define NDS32_PERFCTL_NOUSER (1UL << 12) ++#define NDS32_PERFCTL_EVENT_0(event) (event << 15) ++#define NDS32_PERFCTL_EVENT_1(event) (event << 16) ++#define NDS32_PERFCTL_EVENT_2(event) (event << 22) ++ ++static unsigned long long ov0, ov1, ov2; ++static int syscall = 0; ++ ++struct pmu_counter { ++ volatile unsigned long ovf; ++ unsigned long reset_counter; ++}; ++ ++enum { PFMC0, PFMC1, PFMC2, MAX_COUNTERS }; ++ ++static struct pmu_counter results[MAX_COUNTERS]; ++ ++static inline unsigned int read_perfcntr(int counter) ++{ ++ switch (counter) { ++ case PFMC0: ++ return GET_PFMC0(); ++ break; ++ case PFMC1: ++ return GET_PFMC1(); ++ break; ++ case PFMC2: ++ return GET_PFMC2(); ++ break; ++ default: ++ printk(KERN_ERR ++ "Oprofile read_perfcntr: CPU has no %d performance counters\n", ++ counter); ++ } ++ ++ return 0; ++} ++ ++static inline unsigned int read_perfctrl(void) ++{ ++ return GET_PFM_CTL(); ++} ++ ++static inline void write_perfcntr(int counter, unsigned int value) ++{ ++ switch (counter) { ++ case PFMC0: ++ SET_PFMC0(value); ++ break; ++ case PFMC1: ++ SET_PFMC1(value); ++ break; ++ case PFMC2: ++ SET_PFMC2(value); ++ break; ++ default: ++ printk(KERN_ERR ++ "Oprofile write_perfcntr: CPU has no %d performance counters\n", ++ counter); ++ } ++} ++ ++static inline void write_perfctrl(unsigned int value) ++{ ++ SET_PFM_CTL(value); ++} ++ ++struct op_nds32_model op_model_nds32_ops; ++ ++static struct nds32_register_config { ++ unsigned int control[3]; ++ unsigned int counter[3]; ++} reg; ++ ++/* Compute all of the registers in preparation for enabling profiling. */ ++ ++static void nds32_reg_setup(struct op_counter_config *ctr) ++{ ++ unsigned int counters = op_model_nds32_ops.num_counters; ++ int i; ++ ++ /* Compute the performance counter control word. */ ++ /* For now count kernel and user mode */ ++ for (i = 0; i < counters; i++) { ++ reg.control[i] = 0; ++ reg.counter[i] = 0; ++ ++ if (!ctr[i].enabled) ++ continue; ++ ++ switch (i) { ++ case 0: ++ reg.control[i] = NDS32_PERFCTL_EVENT_0(ctr[i].event) | ++ (NDS32_PERFCTL_INTEN << i); ++ break; ++ case 1: ++ reg.control[i] = NDS32_PERFCTL_EVENT_1(ctr[i].event) | ++ (NDS32_PERFCTL_INTEN << i); ++ break; ++ case 2: ++ reg.control[i] = NDS32_PERFCTL_EVENT_2(ctr[i].event) | ++ (NDS32_PERFCTL_INTEN << i); ++ break; ++ default: ++ printk(KERN_ERR ++ "Oprofile nds32_reg_setup: CPU has no %d performance counters\n", ++ i); ++ } ++ if (!(ctr[i].kernel)) ++ reg.control[i] |= (NDS32_PERFCTL_NOKERNEL << i); ++ if (!(ctr[i].user)) ++ reg.control[i] |= (NDS32_PERFCTL_NOUSER << i); ++ reg.counter[i] = -ctr[i].count; ++ } ++} ++ ++/* Program all of the registers in preparation for enabling profiling. */ ++ ++static void nds32_cpu_setup(void *args) ++{ ++ unsigned int counters = op_model_nds32_ops.num_counters; ++ ++ switch (counters) { ++ case 3: ++ write_perfcntr(2, reg.counter[2]); ++ case 2: ++ write_perfcntr(1, reg.counter[1]); ++ case 1: ++ write_perfcntr(0, reg.counter[0]); ++ } ++ write_perfctrl(0); ++} ++ ++/* Start all counters on current CPU */ ++static void nds32_cpu_start(void *args) ++{ ++ unsigned int counters = op_model_nds32_ops.num_counters; ++ unsigned int value = 0; ++ ++ switch (counters) { ++ case 3: ++ if (reg.control[2]) ++ value |= (NDS32_PERFCTL_EN(2) | reg.control[2]); ++ case 2: ++ if (reg.control[1]) ++ value |= (NDS32_PERFCTL_EN(1) | reg.control[1]); ++ case 1: ++ if (reg.control[0]) ++ value |= (NDS32_PERFCTL_EN(0) | reg.control[0]); ++ } ++ write_perfctrl(value); ++} ++ ++/* Stop all counters on current CPU */ ++static void nds32_cpu_stop(void *args) ++{ ++ write_perfctrl(0); ++} ++ ++static irqreturn_t nds32_perfcount_handler(int irq, void *dev_id) ++{ ++ unsigned int control, i; ++ ++ control = read_perfctrl(); ++ write_perfctrl(0); ++ ++ if (syscall) { ++ if (control & PFM_CTL_mskOVF0) ++ ov0++; ++ if (control & PFM_CTL_mskOVF1) ++ ov1++; ++ if (control & PFM_CTL_mskOVF2) ++ ov2++; ++ } else { ++ for (i = 0; i < MAX_COUNTERS; i++) { ++ if ((control & (NDS32_PERFCTL_INTEN << i)) ++ && (control & (NDS32_PERFCTL_OVERFLOW << i))) { ++ oprofile_add_sample(get_irq_regs(), i); ++ write_perfcntr(i, reg.counter[i]); ++ break; ++ } ++ } ++ } ++ write_perfctrl(control); ++ ++ return IRQ_HANDLED; ++} ++ ++static inline int n_counters(void) ++{ ++ return 3; ++} ++ ++static inline void reset_counters(int counters) ++{ ++ switch (counters) { ++ case 3: ++ write_perfcntr(2, 0); ++ case 2: ++ write_perfcntr(1, 0); ++ case 1: ++ write_perfcntr(0, 0); ++ } ++ write_perfctrl(0); ++} ++ ++static int __init nds32_init(void) ++{ ++ int counters, ret; ++ ++ counters = n_counters(); ++ if (counters == 0) { ++ printk(KERN_ERR "Oprofile: CPU has no performance counters\n"); ++ return -ENODEV; ++ } ++ ++ reset_counters(counters); ++ ++ op_model_nds32_ops.num_counters = counters; ++ op_model_nds32_ops.cpu_type = "nds32"; ++ ++ ret = ++ request_irq(NDS32_PERFCNTR_IRQA, nds32_perfcount_handler, ++ IRQF_SHARED, "NDS32 PERFCNTR", (void *)results); ++ if (ret < 0) { ++ printk(KERN_ERR "oprofile: unable to request IRQ%d\n", ++ NDS32_PERFCNTR_IRQA); ++ return ret; ++ } ++#ifdef CONFIG_PLAT_AG102 ++ unsigned int tmp; ++ /* Set NDS32_PERFCNTR_IRQA to bind on core A */ ++ tmp = ++ *(volatile unsigned long *)(AMIC_VA_BASE + CPUID0 + ++ ((NDS32_PERFCNTR_IRQA >> 4) << 2)); ++ tmp &= ~(0x11 << ((NDS32_PERFCNTR_IRQA & ~0x10) * 2)); ++ *(volatile unsigned long *)(AMIC_VA_BASE + CPUID0 + ++ ((NDS32_PERFCNTR_IRQA >> 4) << 2)) = tmp; ++ tmp = ++ (*(volatile unsigned long *)(AMIC_VA_BASE + CPUDC)) & ~(1 << ++ NDS32_PERFCNTR_IRQA); ++ *(volatile unsigned long *)(AMIC_VA_BASE + CPUDC) = tmp; ++ ++ ret = ++ request_irq(NDS32_PERFCNTR_IRQB, nds32_perfcount_handler, ++ IRQF_SHARED, "NDS32 PERFCNTR", (void *)results); ++ if (ret < 0) { ++ printk(KERN_ERR "oprofile: unable to request IRQ%d\n", ++ NDS32_PERFCNTR_IRQB); ++ return ret; ++ } ++ /* Set NDS32_PERFCNTR_IRQB to bind on core B */ ++ tmp = ++ *(volatile unsigned long *)(AMIC_VA_BASE + CPUID0 + ++ ((NDS32_PERFCNTR_IRQB >> 4) << 2)); ++ tmp &= ~(0x11 << ((NDS32_PERFCNTR_IRQB & ~0x10) * 2)); ++ tmp |= 1 << ((NDS32_PERFCNTR_IRQB & ~0x10) * 2); ++ *(volatile unsigned long *)(AMIC_VA_BASE + CPUID0 + ++ ((NDS32_PERFCNTR_IRQB >> 4) << 2)) = tmp; ++ tmp = ++ (*(volatile unsigned long *)(AMIC_VA_BASE + CPUDC)) & ~(1 << ++ NDS32_PERFCNTR_IRQB); ++ *(volatile unsigned long *)(AMIC_VA_BASE + CPUDC) = tmp; ++#endif ++ ++ return 0; ++} ++ ++static void nds32_exit(void) ++{ ++ reset_counters(op_model_nds32_ops.num_counters); ++ ++ free_irq(NDS32_PERFCNTR_IRQA, results); ++#ifdef CONFIG_PLAT_AG102 ++ free_irq(NDS32_PERFCNTR_IRQB, results); ++#endif ++} ++ ++void sys_pfmctl(int event0, int event1, int event2, int start) ++{ ++ unsigned int ctl = 0; ++ ++ if (start) { ++ syscall = 1; ++ if (event0 >= 0) ++ ctl |= ++ (NDS32_PERFCTL_EVENT_0(event0) | ++ (NDS32_PERFCTL_INTEN << 0) | NDS32_PERFCTL_EN(0)); ++ if (event1 >= 0) ++ ctl |= ++ (NDS32_PERFCTL_EVENT_1(event1) | ++ (NDS32_PERFCTL_INTEN << 1) | NDS32_PERFCTL_EN(1)); ++ if (event2 >= 0) ++ ctl |= ++ (NDS32_PERFCTL_EVENT_2(event2) | ++ (NDS32_PERFCTL_INTEN << 2) | NDS32_PERFCTL_EN(2)); ++ } else { ++ syscall = 0; ++ if (event0 >= 0) ++ ctl &= ++ ~((NDS32_PERFCTL_INTEN << 0) | NDS32_PERFCTL_EN(0)); ++ if (event1 >= 0) ++ ctl &= ++ ~((NDS32_PERFCTL_INTEN << 1) | NDS32_PERFCTL_EN(1)); ++ if (event2 >= 0) ++ ctl &= ++ ~((NDS32_PERFCTL_INTEN << 2) | NDS32_PERFCTL_EN(2)); ++ } ++ ++ write_perfctrl(ctl); ++ ++} ++ ++int sys_getpfm(struct pcounter __user * p) ++{ ++ struct pcounter pfm; ++ unsigned int control; ++ ++ control = read_perfctrl(); ++ write_perfctrl(0); ++ ++ pfm.pfm0 = ov0 << 32 | GET_PFMC0(); ++ pfm.pfm1 = ov1 << 32 | GET_PFMC1(); ++ pfm.pfm2 = ov2 << 32 | GET_PFMC2(); ++ ++ if (copy_to_user(p, &pfm, sizeof(pfm))) ++ return -EFAULT; ++ ++ write_perfctrl(control); ++ ++ return 0; ++} ++ ++int sys_setpfm(int pfm0, int pfm1, int pfm2, struct pcounter __user * p) ++{ ++ struct pcounter pfm; ++ unsigned int control; ++ ++ control = read_perfctrl(); ++ write_perfctrl(0); ++ ++ if (copy_from_user(&pfm, p, sizeof(pfm))) ++ return -EFAULT; ++ ++ if (pfm0) { ++ SET_PFMC0((unsigned int)(pfm.pfm0 & 0xffffffff)); ++ ov0 = pfm.pfm0 >> 32; ++ } ++ if (pfm1) { ++ SET_PFMC1((unsigned int)(pfm.pfm1 & 0xffffffff)); ++ ov1 = pfm.pfm1 >> 32; ++ } ++ if (pfm2) { ++ SET_PFMC2((unsigned int)(pfm.pfm2 & 0xffffffff)); ++ ov2 = pfm.pfm2 >> 32; ++ } ++ ++ write_perfctrl(control); ++ ++ return 0; ++} ++ ++struct op_nds32_model op_model_nds32_ops = { ++ .reg_setup = nds32_reg_setup, ++ .cpu_setup = nds32_cpu_setup, ++ .init = nds32_init, ++ .exit = nds32_exit, ++ .cpu_start = nds32_cpu_start, ++ .cpu_stop = nds32_cpu_stop, ++}; +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag101/cpu-fcs.c linux-3.4.110/arch/nds32/platforms/ag101/cpu-fcs.c +--- linux-3.4.110.orig/arch/nds32/platforms/ag101/cpu-fcs.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag101/cpu-fcs.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,401 @@ ++/* ++ * linux/arch/nds32/platforms/ag101/cpu-fcs.c ++ * ++ * Copyright (C) 2002,2003 Intrinsyc Software ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * History: ++ * 31-Jul-2002 : Initial version [FB] ++ * 29-Jan-2003 : added PXA255 support [FB] ++ * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.) ++ * 18-Jun-2008 : ported to NDS32 architecture ( Roy Lee, Andestech Corp.) ++ * ++ * Note: ++ * This driver may change the memory bus clock rate, but will not do any ++ * platform specific access timing changes... for example if you have flash ++ * memory connected to CS0, you will need to register a platform specific ++ * notifier which will adjust the memory access strobes to maintain a ++ * minimum strobe width. ++ * ++ */ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#define NDS32_FCS_IRQ 8 ++#define AG101_MIN_FREQ 100000 ++#define AG101_MAX_FREQ 400000 ++#define OSC_KHZ 10000 /* 10 MHz AG101 */ ++ ++#define USE_CACHE 0 ++struct ag101_freq_struct { ++ ++ unsigned int khz; /* cpu_clk in khz */ ++ unsigned int pll; /* pll mul */ ++ unsigned int div; /* ahb div */ ++ unsigned int frange; /* pll1 freq range */ ++}; ++ ++struct ag101_freq_struct ag101_run_freqs[] = { ++ ++ /* khz , pll, div, frange pll/cpu/ahb/apb */ ++ {100000, 10, 2, 0}, /* 400/400/050/025 */ ++ {200000, 20, 4, 1}, /* 400/400/050/025 */ ++ {300000, 30, 6, 2}, /* 400/400/050/025 */ ++ {400000, 40, 8, 2}, /* 400/400/050/025 */ ++ {500000, 50, 10, 3}, /* 500/500/050/025 */ ++ {0,} ++}; ++ ++#define NUM_RUN_FREQS ARRAY_SIZE( ag101_run_freqs) ++static struct cpufreq_frequency_table ag101_run_freq_table[NUM_RUN_FREQS + 1]; ++ ++/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */ ++static struct ag101_freq_struct ag101_turbo_freqs[] = { ++ ++ /* khz , pll, div, frange pll/cpu/ahb/apb */ ++ {100000, 10, 2, 0}, /* 400/400/050/025 */ ++ {200000, 20, 4, 1}, /* 400/400/050/025 */ ++ {300000, 30, 6, 2}, /* 400/400/050/025 */ ++ {400000, 40, 8, 2}, /* 400/400/050/025 */ ++ {500000, 50, 10, 3}, /* 500/500/050/025 */ ++ {0,} ++}; ++ ++#define NUM_TURBO_FREQS ARRAY_SIZE( ag101_turbo_freqs) ++static struct cpufreq_frequency_table ag101_turbo_freq_table[NUM_TURBO_FREQS + ++ 1]; ++ ++/* Generic helper function get CPU clocks in kHz */ ++unsigned int ag101_cpufreq_get(unsigned int dummy) ++{ ++ ++ unsigned int mul = (REG32(PMU_FTPMU010_VA_BASE + 0x30) >> 3UL) & 0x01ff; /* pll1 mul */ ++ return OSC_KHZ * mul; ++} ++ ++/* find a valid frequency point */ ++static int ag101_verify_policy(struct cpufreq_policy *policy) ++{ ++ ++ struct cpufreq_frequency_table *ag101_freqs_table; ++ ++ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { ++ ++ ag101_freqs_table = ag101_run_freq_table; ++ } else if (policy->policy == CPUFREQ_POLICY_POWERSAVE) { ++ ++ ag101_freqs_table = ag101_turbo_freq_table; ++ } else { ++ printk ++ ("CPU PXA: Unknown policy found. Using CPUFREQ_POLICY_PERFORMANCE\n"); ++ ag101_freqs_table = ag101_run_freq_table; ++ } ++ ++ printk("Verified CPU policy: %dKhz min to %dKhz max\n", policy->min, ++ policy->max); ++ ++ return cpufreq_frequency_table_verify(policy, ag101_freqs_table); ++} ++ ++static int cal_edivahbclk(int div) ++{ ++ ++ switch (div) { ++ ++ case 1: ++ case 2: ++ case 3: ++ case 4: ++ case 5: ++ case 6: ++ return --div; ++ case 8: ++ return 8; ++ case 10: ++ return 9; ++ case 12: ++ return 10; ++ case 14: ++ return 11; ++ case 15: ++ return 12; ++ case 18: ++ return 13; ++ case 20: ++ return 14; ++ default: ++ printk("Error: No such CPU/AHB frequency ratio %d", div); ++ } ++ ++ return 9; ++} ++ ++void start_fcs(unsigned int pll, unsigned int frange, unsigned int div) ++{ ++ ++ /* PDLLCR0 */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x30) &= ~0x00003ff8; /* clear PLL1NS and PLL1FRANG fields */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x30) |= (pll << 3); /* set PLL1NS */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x30) |= (frange << 12); /* set PLL1FRANG */ ++ ++ /* PMODE */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x0c) &= ~0x000000ff; /* clear EDIVAHBCLK field */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x0c) |= (div << 4); /* set EDIVAHBCLK [7:4] */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x0c) |= (1UL << 2); /* [2]: FCS */ ++ ++ __asm__ __volatile__("msync all"); ++ __asm__ __volatile__("isb"); ++ __asm__ __volatile__("standby wake_grant"); ++ REG32(PMU_FTPMU010_VA_BASE + 0x30) |= (1UL << 16); /* PDLLCR0 bit[16]==1:disable dll */ ++} ++ ++void end_fcs(void) ++{ ++ ++ /* Leave this function as a place marker. */ ++} ++ ++static int nds32_fcs_handler(int irq, void *dev_id) ++{ ++ ++ REG32(PMU_FTPMU010_VA_BASE + 0x20) = (1UL << 17); /* Clear IntFCS PMSR[17] */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x0c) &= ~(1UL << 2); /* Power Mode Register */ ++ ++ return 1; ++} ++ ++static int ag101_speedstep(int idx) ++{ ++ ++ unsigned int pll, frange, div; ++ unsigned long flags = 0; ++ int irq, saved_irq_mask; ++ void (*do_fcs) (unsigned int pll, unsigned int frange, ++ unsigned int div); ++ ++#if USE_CACHE ++ ++ int i; ++ int line_size = CACHE_LINE_SIZE(ICACHE); ++ unsigned long start = ((unsigned long)start_fcs) & ~(line_size - 1); ++ unsigned long end = ++ (((unsigned long)end_fcs) + line_size) & ~(line_size - 1); ++ ++ printk("&start_fcs(): 0x%08lx, aligned to: 0x%08lx\n", ++ (unsigned long)start_fcs, start); ++ printk("&end_fcs(): 0x%08lx, aligned to: 0x%08lx\n", ++ (unsigned long)end_fcs, end); ++ ++ for (i = start; i <= end; i += CACHE_LINE_SIZE(ICACHE)) ++ __asm__ volatile ("\n\tcctl %0, L1I_VA_FILLCK":: ++ "r" (i):"memory"); ++ ++ do_fcs = start_fcs; ++#else ++ unsigned long buf, aligned_buf, len = PAGE_SIZE; ++ ++ buf = (unsigned long)kmalloc(0x100000 + 1000, GFP_KERNEL); ++ if (!buf) ++ printk("Error: kmalloc( base) failed\n"); ++ ++ aligned_buf = (buf + 0x100000 - 1) & 0xFFF00000; ++ ++ if (sys_lmmap(LM_ILM, aligned_buf, aligned_buf + 0x1000, 0, NULL)) { ++ printk("Error: lmmap failed, can't scale frequency.\n"); ++#ifdef CONFIG_CPU_FREQ_DEBUG ++ WARN_ON(1); ++#endif ++ return 0; ++ } ++ ++ if (((GET_ILMB() & ILMB_mskILMSZ) >> ILMB_offILMSZ) == 9) ++ len = 0x400; ++ else if (((GET_ILMB() & ILMB_mskILMSZ) >> ILMB_offILMSZ) == 10) ++ len = 0x800; ++ memcpy((unsigned char *)aligned_buf, (unsigned char *)start_fcs, len); ++ ++ do_fcs = (void *)aligned_buf; ++#endif ++ pll = ag101_run_freqs[idx].pll; ++ div = cal_edivahbclk(ag101_run_freqs[idx].div); ++ frange = ag101_run_freqs[idx].frange; ++ ++ irq = ++ request_irq(NDS32_FCS_IRQ, nds32_fcs_handler, ++ IRQF_DISABLED | IRQF_TRIGGER_FALLING, ++ "NDS32 Frequency Change Sequence", ++ (void *)ag101_run_freqs); ++ if (irq < 0) ++ printk(KERN_ERR "Error: unable to request FCS IRQ%d\n", ++ NDS32_FCS_IRQ); ++ ++ local_irq_save(flags); ++ ++ saved_irq_mask = REG32(INTC_FTINTC010_VA_BASE + 0x04); ++ REG32(INTC_FTINTC010_VA_BASE + 0x04) = (1UL << NDS32_FCS_IRQ); ++ ++ do_fcs(pll, frange, div); ++ ++ REG32(INTC_FTINTC010_VA_BASE + 0x04) = saved_irq_mask; ++ ++ local_irq_restore(flags); ++ free_irq(NDS32_FCS_IRQ, ag101_run_freqs); ++ ++#if USE_CACHE ++ for (i = start; i <= end; i += CACHE_LINE_SIZE(ICACHE)) ++ __asm__ volatile ("\n\tcctl %0, L1I_VA_ULCK"::"r" (i):"memory"); ++#else ++ if (sys_lmunmap(aligned_buf, 0)) ++ printk("Error: lmunmap failed\n"); ++ ++ kfree((void *)buf); ++#endif ++ return 1; ++} ++ ++static int ag101_set_target(struct cpufreq_policy *policy, ++ unsigned int target_freq, unsigned int relation) ++{ ++ ++ unsigned int idx; ++ struct cpufreq_frequency_table *ag101_freqs_table; ++ struct ag101_freq_struct *ag101_freq_settings; ++ struct cpufreq_freqs freqs; ++ ++ /* Get the current policy */ ++ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { ++ ++ ag101_freq_settings = ag101_run_freqs; ++ ag101_freqs_table = ag101_run_freq_table; ++ } else if (policy->policy == CPUFREQ_POLICY_POWERSAVE) { ++ ++ ag101_freq_settings = ag101_turbo_freqs; ++ ag101_freqs_table = ag101_turbo_freq_table; ++ } else { ++ printk ++ ("Unknown FCS policy found. Using CPUFREQ_POLICY_PERFORMANCE\n"); ++ ag101_freq_settings = ag101_run_freqs; ++ ag101_freqs_table = ag101_run_freq_table; ++ } ++ ++ /* Lookup the next frequency */ ++ if (cpufreq_frequency_table_target ++ (policy, ag101_freqs_table, target_freq, relation, &idx)) ++ return -EINVAL; ++ ++ freqs.old = policy->cur; ++ freqs.new = ag101_freq_settings[idx].khz; ++ freqs.cpu = policy->cpu; ++ ++ /* ++ * Tell everyone what we're about to do... ++ * you should add a notify client with any platform specific ++ * Vcc changing capability ++ */ ++ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); ++ ++ if (freqs.new != freqs.old) { ++ if (!ag101_speedstep(idx)) ++ return -ENODEV; ++ } ++ ++ /* ++ * Tell everyone what we've just done... ++ * you should add a notify client with any platform specific ++ * SDRAM refresh timer adjustments ++ */ ++ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); ++ ++ return 0; ++} ++ ++static int ag101_cpufreq_init(struct cpufreq_policy *policy) ++{ ++ ++ int i; ++ /* set default policy and cpuinfo */ ++ policy->governor = CPUFREQ_DEFAULT_GOVERNOR; ++ policy->policy = CPUFREQ_POLICY_PERFORMANCE; ++ policy->cpuinfo.max_freq = AG101_MAX_FREQ; ++ policy->cpuinfo.min_freq = AG101_MIN_FREQ; ++ policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ ++ policy->cur = ag101_cpufreq_get(0); /* current freq */ ++ policy->min = policy->max = policy->cur; ++ ++ /* Generate the run cpufreq_frequency_table struct */ ++ for (i = 0; i < NUM_RUN_FREQS; i++) { ++ ++ ag101_run_freq_table[i].frequency = ag101_run_freqs[i].khz; ++ ag101_run_freq_table[i].index = i; ++ } ++ ++ ag101_run_freq_table[i].frequency = CPUFREQ_TABLE_END; ++ ++ /* Generate the turbo cpufreq_frequency_table struct */ ++ for (i = 0; i < NUM_TURBO_FREQS; i++) { ++ ++ ag101_turbo_freq_table[i].frequency = ag101_turbo_freqs[i].khz; ++ ag101_turbo_freq_table[i].index = i; ++ } ++ ++ ag101_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END; ++ ++ printk("CPU frequency change support initialized\n"); ++ ++ return 0; ++} ++ ++static struct cpufreq_driver ag101_cpufreq_driver = { ++ ++ .verify = ag101_verify_policy, ++ .target = ag101_set_target, ++ .init = ag101_cpufreq_init, ++ .get = ag101_cpufreq_get, ++ .name = "AG101", ++}; ++ ++static int __init ag101_cpu_init(void) ++{ ++ ++ if (CPU_IS_N1213_43U1HA0() || CPU_IS_N1213_43U1HB0()) { ++ ++ /* Clear IntFS, IntFCS and irq8 */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x20) = (1UL << 16); ++ return cpufreq_register_driver(&ag101_cpufreq_driver); ++ } else ++ return -ENODEV; ++} ++ ++static void __exit ag101_cpu_exit(void) ++{ ++ ++ if (CPU_IS_N1213_43U1HA0() || CPU_IS_N1213_43U1HB0()) ++ cpufreq_unregister_driver(&ag101_cpufreq_driver); ++} ++ ++MODULE_AUTHOR("Andes Technology Corporation"); ++MODULE_DESCRIPTION("CPU frequency changing driver for the AG101 architecture"); ++MODULE_LICENSE("GPL"); ++module_init(ag101_cpu_init); ++module_exit(ag101_cpu_exit); +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag101/devices.c linux-3.4.110/arch/nds32/platforms/ag101/devices.c +--- linux-3.4.110.orig/arch/nds32/platforms/ag101/devices.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag101/devices.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,94 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++const struct map_desc platform_io_desc[] __initdata = { ++ {UART0_VA_BASE, UART0_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {UART1_VA_BASE, UART1_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {INTC_FTINTC010_0_VA_BASE, INTC_FTINTC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {TIMER_FTTMR010_0_VA_BASE, TIMER_FTTMR010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {SSP_FTSSP010_0_VA_BASE, SSP_FTSSP010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {PMU_FTPMU010_0_VA_BASE, PMU_FTPMU010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {MAC_FTMAC100_0_VA_BASE, MAC_FTMAC100_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {SDC_FTSDC010_0_VA_BASE, SDC_FTSDC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {RTC_FTRTC010_0_VA_BASE, RTC_FTRTC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {WDT_FTWDT010_0_VA_BASE, WDT_FTWDT010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {GPIO_FTGPIO010_0_VA_BASE, GPIO_FTGPIO010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {CFC_FTCFC010_0_VA_BASE, CFC_FTCFC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {LCD_FTLCDC100_0_VA_BASE, LCD_FTLCDC100_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {I2C_FTI2C010_0_VA_BASE, I2C_FTI2C010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {DMAC_FTDMAC020_0_VA_BASE, DMAC_FTDMAC020_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {APBBRG_FTAPBBRG020S_0_VA_BASE, APBBRG_FTAPBBRG020S_0_PA_BASE, ++ PAGE_SIZE, MT_DEVICE_NCB}, ++ {KMI_FTKBC010_0_VA_BASE, KMI_FTKBC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCNB}, ++ {KMI_FTKBC010_1_VA_BASE, KMI_FTKBC010_1_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCNB}, ++ {USB_FUSB220_0_VA_BASE, USB_FUSB220_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCNB}, ++ {PCIIO_0_VA_BASE, PCIIO_0_PA_BASE, 0x000FF000, MT_DEVICE_NCB}, ++ {PCIC_FTPCI100_0_VA_BASE, PCIC_FTPCI100_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {LED_VA_BASE, LED_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {SDMC_FTSDMC021_VA_BASE, SDMC_FTSDMC021_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {L2CC_VA_BASE, L2CC_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB} ++}; ++ ++static void __init platform_map_io(void) ++{ ++ iotable_init((struct map_desc *)platform_io_desc, ++ ARRAY_SIZE(platform_io_desc)); ++} ++ ++static struct uart_port uart0 = { ++ .membase = (void __iomem *)UART0_VA_BASE, ++ .irq = UART0_IRQ, ++ .uartclk = CONFIG_UART_CLK, ++ .regshift = 2, ++ .iotype = UPIO_MEM, ++ .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, ++ .line = 0, ++ .mapbase = UART0_PA_BASE, ++}; ++ ++static struct uart_port uart1 = { ++ .membase = (void __iomem *)UART1_VA_BASE, ++ .irq = UART1_IRQ, ++ .uartclk = CONFIG_UART_CLK, ++ .regshift = 2, ++ .iotype = UPIO_MEM, ++ .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, ++ .line = 1, ++ .mapbase = UART1_PA_BASE, ++}; ++ ++void ag101_calc_ahb_clk(void); ++static void __init soc_init(void) ++{ ++ ag101_calc_ahb_clk(); ++ early_serial_setup(&uart0); ++ early_serial_setup(&uart1); ++} ++ ++MACHINE_START(FARADAY, PLATFORM_NAME) ++ .param_offset = BOOT_PARAMETER_PA_BASE,.map_io = platform_map_io,.init_irq = platform_init_irq,.timer = &platform_timer, /* defined in timer.c */ ++ .init_machine = soc_init, MACHINE_END +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag101/fia320.c linux-3.4.110/arch/nds32/platforms/ag101/fia320.c +--- linux-3.4.110.orig/arch/nds32/platforms/ag101/fia320.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag101/fia320.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,103 @@ ++/* ++ * linux/arch/nds32/platforms/ag101/fia320.c ++ * ++ * Faraday A320D Platform Dependent Functions ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * ChangeLog ++ * ++ * Luke Lee 09/26/2005 Created ++ * Peter Liao 09/29/2005 Port dynamically getting AHB clock ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#ifdef CONFIG_AUTO_SYS_CLK ++#include ++#define AG101B0 (0x0c020003) ++ ++/* ++ * Table for ahb divisor, PMODE[07:04] ++ */ ++static const int ahb_div[16] = { ++ 1, 2, 3, 4, 5, 6, 3, 5, ++ 8, 10, 12, 14, 15, 18, 20, -1 ++}; ++ ++/* ag101_get_ahb_clk() ++ * ++ * return AHB clock in Hz. ++ */ ++static int ahbclk; ++void ag101_calc_ahb_clk(void) ++{ ++ /* ++ * FIXME: We should not put AG101 term in here, Harry@Oct.23.2007 ++ */ ++ const unsigned int osc = 10; // OSC in MHz ++ unsigned int mul, div, cpu, pll; ++ unsigned int ahb = 0; // ahb clk in Hz ++ unsigned int cpu_ver; ++ ++ mul = (REG32(PMU_FTPMU010_0_VA_BASE + 0x30) >> 3) & 0x01ff; // pll1 mul ++ div = (REG32(PMU_FTPMU010_0_VA_BASE + 0x4) >> 8) & 0x000f; // pll1 div ++ ahb = (REG32(PMU_FTPMU010_0_VA_BASE + 0x4) >> 4) & 0x000f; // ahb div ++ div += 1; ++ ++ pll = (osc * mul / div); // depend on OSC. ++ ++ //AG101B0 PLL divider fix ++ cpu_ver = __nds32__mfsr(NDS32_SR_CPU_VER); ++ if (AG101B0 == cpu_ver) ++ pll >>= 1; ++ ++ if (-1 != ahb_div[ahb]) { ++ if ((ahb == 6) || (ahb == 7)) // special cases for 3:2 & 5:2 ++ cpu = pll >> 1; // divide by 2 ++ else ++ cpu = pll; ++ ++ ahb = pll / ahb_div[ahb]; // become ahb clk in MHz. ++ ++ printk("AG101 auto-detected AHB clock: CPU/AHB=%uMHz/%uMHz\n", ++ cpu, ahb); ++ ahb *= 1000000; // become ahb clk in Hz. ++ ahbclk = (int)ahb; ++ } else { ++ printk("Unknown AHB divisor:0x%x\n", ahb); ++ ahbclk = 0; ++ } ++ ++ ahbclk = (int)ahb; ++} ++ ++int ag101_get_ahb_clk(void) ++{ ++ return ahbclk; ++} ++ ++EXPORT_SYMBOL(ag101_get_ahb_clk); ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag101/freq-scaling.c linux-3.4.110/arch/nds32/platforms/ag101/freq-scaling.c +--- linux-3.4.110.orig/arch/nds32/platforms/ag101/freq-scaling.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag101/freq-scaling.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,367 @@ ++/* ++ * linux/arch/nds32/platforms/ag101/cpu-fcs.c ++ * ++ * Copyright (C) 2002,2003 Intrinsyc Software ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * History: ++ * 31-Jul-2002 : Initial version [FB] ++ * 29-Jan-2003 : added PXA255 support [FB] ++ * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.) ++ * 18-Jun-2008 : ported to NDS32 architecture ( Roy Lee, Andestech Corp.) ++ * ++ * Note: ++ * This driver may change the memory bus clock rate, but will not do any ++ * platform specific access timing changes... for example if you have flash ++ * memory connected to CS0, you will need to register a platform specific ++ * notifier which will adjust the memory access strobes to maintain a ++ * minimum strobe width. ++ * ++ */ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#define NDS32_FCS_IRQ 8 ++#define AG101_MIN_FREQ 70000 ++#define AG101_MAX_FREQ 420000 ++#define OSC_KHZ 10000 /* 10 MHz AG101 */ ++ ++#define USE_CACHE 0 ++struct ag101_freq_struct { ++ ++ unsigned int khz; /* cpu_clk in khz */ ++ unsigned int sf; /* scaling factor */ ++ unsigned int cr; /* clock ratio */ ++}; ++ ++struct ag101_freq_struct ag101_run_freqs[] = { ++ ++ /* khz , sf, cr pll/cpu/ahb/apb */ ++ {AG101_MAX_FREQ / 6, 6, 1}, /* 420/070/070/035 */ ++ {AG101_MAX_FREQ / 1, 1, 6}, /* 420/420/070/035 */ ++ {0} ++}; ++ ++#define NUM_RUN_FREQS ARRAY_SIZE( ag101_run_freqs) ++static struct cpufreq_frequency_table ag101_run_freq_table[NUM_RUN_FREQS + 1]; ++ ++/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */ ++static struct ag101_freq_struct ag101_turbo_freqs[] = { ++ ++ /* khz , sf, cr pll/cpu/ahb/apb */ ++ {AG101_MAX_FREQ / 6, 6, 1}, /* 420/070/070/035 */ ++ {AG101_MAX_FREQ / 1, 1, 6}, /* 420/420/070/035 */ ++ {0} ++}; ++ ++#define NUM_TURBO_FREQS ARRAY_SIZE( ag101_turbo_freqs) ++static struct cpufreq_frequency_table ag101_turbo_freq_table[NUM_TURBO_FREQS + ++ 1]; ++ ++/* Generic helper function get CPU clocks in kHz */ ++unsigned int ag101_cpufreq_get(unsigned int dummy) ++{ ++ ++ unsigned int pll = (REG32(PMU_FTPMU010_VA_BASE + 0x30) >> 3UL) & 0x01ff; /* pll */ ++ unsigned int sf = (REG32(PMU_FTPMU010_VA_BASE + 0x0c) >> 8UL) & 0x0f; /* scaling factor */ ++ ++ return OSC_KHZ * pll / (sf + 1); ++} ++ ++/* find a valid frequency point */ ++static int ag101_verify_policy(struct cpufreq_policy *policy) ++{ ++ ++ struct cpufreq_frequency_table *ag101_freqs_table; ++ ++ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { ++ ++ ag101_freqs_table = ag101_run_freq_table; ++ } else if (policy->policy == CPUFREQ_POLICY_POWERSAVE) { ++ ++ ag101_freqs_table = ag101_turbo_freq_table; ++ } else { ++ printk ++ ("CPU PXA: Unknown policy found. Using CPUFREQ_POLICY_PERFORMANCE\n"); ++ ag101_freqs_table = ag101_run_freq_table; ++ } ++ ++ printk("Verified CPU policy: %dKhz min to %dKhz max\n", policy->min, ++ policy->max); ++ ++ return cpufreq_frequency_table_verify(policy, ag101_freqs_table); ++} ++ ++void start_fcs(unsigned int sf, unsigned cr) ++{ ++ ++ /* set EFSF in PMODE [11:8] */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x0c) &= ~(0xfUL << 8); ++ REG32(PMU_FTPMU010_VA_BASE + 0x0c) |= (sf << 8); ++ ++ /* set EDIVAHBCLK [7:4] */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x0c) &= ~(0xffUL << 0); ++ REG32(PMU_FTPMU010_VA_BASE + 0x0c) |= (cr << 4); ++ ++ /* PMR[1]: scaling mode */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x0c) |= (1UL << 1); ++ ++ __asm__ __volatile__("msync all"); ++ __asm__ __volatile__("isb"); ++ __asm__ __volatile__("standby wake_grant"); ++ // REG32( PMU_FTPMU010_VA_BASE + 0x30) |= ( 1UL << 16); /* PDLLCR0 bit[16]==1:disable dll */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x0c) &= ~(1UL << 1); /* Power Mode Register */ ++} ++ ++void end_fcs(void) ++{ ++ ++ /* Leave this function as a place marker. */ ++} ++ ++static int cal_edivahbclk(int div) ++{ ++ ++ switch (div) { ++ ++ case 1: ++ case 2: ++ case 3: ++ case 4: ++ case 5: ++ case 6: ++ return --div; ++ case 8: ++ return 8; ++ case 10: ++ return 9; ++ case 12: ++ return 10; ++ case 14: ++ return 11; ++ case 15: ++ return 12; ++ case 18: ++ return 13; ++ case 20: ++ return 14; ++ default: ++ printk("Error: No such CPU/AHB frequency ratio %d", div); ++ } ++ ++ return 9; ++} ++ ++static int ag101_speedstep(int idx) ++{ ++ ++ unsigned int sf, cr; ++ unsigned long flags = 0; ++ void (*do_fcs) (unsigned int efsf, unsigned int edivhbaclk); ++ ++#if USE_CACHE ++ ++ int i; ++ int line_size = CACHE_LINE_SIZE(ICACHE); ++ unsigned long start = ((unsigned long)start_fcs) & ~(line_size - 1); ++ unsigned long end = ++ (((unsigned long)end_fcs) + line_size) & ~(line_size - 1); ++ ++ printk("&start_fcs(): 0x%08lx, aligned to: 0x%08lx\n", ++ (unsigned long)start_fcs, start); ++ printk("&end_fcs(): 0x%08lx, aligned to: 0x%08lx\n", ++ (unsigned long)end_fcs, end); ++ ++ for (i = start; i <= end; i += CACHE_LINE_SIZE(ICACHE)) ++ __asm__ volatile ("\n\tcctl %0, L1I_VA_FILLCK":: ++ "r" (i):"memory"); ++ ++ do_fcs = start_fcs; ++#else ++ unsigned long buf, aligned_buf, len = PAGE_SIZE; ++ ++ buf = (unsigned long)kmalloc(0x100000 + 1000, GFP_KERNEL); ++ if (!buf) ++ printk("Error: kmalloc( base) failed\n"); ++ ++ aligned_buf = (buf + 0x100000 - 1) & 0xFFF00000; ++ ++ if (sys_lmmap(LM_ILM, aligned_buf, aligned_buf + 0x1000, 0, NULL)) { ++ printk("Error: lmmap failed, can't scale frequency.\n"); ++#ifdef CONFIG_CPU_FREQ_DEBUG ++ WARN_ON(1); ++#endif ++ return 0; ++ } ++ ++ if (((GET_ILMB() & ILMB_mskILMSZ) >> ILMB_offILMSZ) == 9) ++ len = 0x400; ++ else if (((GET_ILMB() & ILMB_mskILMSZ) >> ILMB_offILMSZ) == 10) ++ len = 0x800; ++ memcpy((unsigned char *)aligned_buf, (unsigned char *)start_fcs, len); ++ ++ do_fcs = (void *)aligned_buf; ++#endif ++ sf = ag101_run_freqs[idx].sf - 1; ++ cr = cal_edivahbclk(ag101_run_freqs[idx].cr); ++ ++ local_irq_save(flags); ++ do_fcs(sf, cr); ++ local_irq_restore(flags); ++ ++#if USE_CACHE ++ for (i = start; i <= end; i += CACHE_LINE_SIZE(ICACHE)) ++ __asm__ volatile ("\n\tcctl %0, L1I_VA_ULCK"::"r" (i):"memory"); ++#else ++ if (sys_lmunmap(aligned_buf, 0)) ++ printk("Error: lmunmap failed\n"); ++ ++ kfree((void *)buf); ++#endif ++ return 1; ++} ++ ++static int ag101_set_target(struct cpufreq_policy *policy, ++ unsigned int target_freq, unsigned int relation) ++{ ++ ++ unsigned int idx; ++ struct cpufreq_frequency_table *ag101_freqs_table; ++ struct ag101_freq_struct *ag101_freq_settings; ++ struct cpufreq_freqs freqs; ++ ++ /* Get the current policy */ ++ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { ++ ++ ag101_freq_settings = ag101_run_freqs; ++ ag101_freqs_table = ag101_run_freq_table; ++ } else if (policy->policy == CPUFREQ_POLICY_POWERSAVE) { ++ ++ ag101_freq_settings = ag101_turbo_freqs; ++ ag101_freqs_table = ag101_turbo_freq_table; ++ } else { ++ printk ++ ("Unknown FCS policy found. Using CPUFREQ_POLICY_PERFORMANCE\n"); ++ ag101_freq_settings = ag101_run_freqs; ++ ag101_freqs_table = ag101_run_freq_table; ++ } ++ ++ /* Lookup the next frequency */ ++ if (cpufreq_frequency_table_target ++ (policy, ag101_freqs_table, target_freq, relation, &idx)) ++ return -EINVAL; ++ ++ freqs.old = policy->cur; ++ freqs.new = ag101_freq_settings[idx].khz; ++ freqs.cpu = policy->cpu; ++ ++ /* ++ * Tell everyone what we're about to do... ++ * you should add a notify client with any platform specific ++ * Vcc changing capability ++ */ ++ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); ++ ++ if (freqs.new != freqs.old) { ++ ++ if (!ag101_speedstep(idx)) ++ return -ENODEV; ++ } ++ ++ /* ++ * Tell everyone what we've just done... ++ * you should add a notify client with any platform specific ++ * SDRAM refresh timer adjustments ++ */ ++ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); ++ ++ return 0; ++} ++ ++static int ag101_cpufreq_init(struct cpufreq_policy *policy) ++{ ++ ++ int i; ++ /* set default policy and cpuinfo */ ++ policy->governor = CPUFREQ_DEFAULT_GOVERNOR; ++ policy->policy = CPUFREQ_POLICY_PERFORMANCE; ++ policy->cpuinfo.max_freq = AG101_MAX_FREQ; ++ policy->cpuinfo.min_freq = AG101_MIN_FREQ; ++ policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ ++ policy->cur = ag101_cpufreq_get(0); /* current freq */ ++ policy->min = policy->max = policy->cur; ++ ++ /* Generate the run cpufreq_frequency_table struct */ ++ for (i = 0; i < NUM_RUN_FREQS; i++) { ++ ++ ag101_run_freq_table[i].frequency = ag101_run_freqs[i].khz; ++ ag101_run_freq_table[i].index = i; ++ } ++ ++ ag101_run_freq_table[i].frequency = CPUFREQ_TABLE_END; ++ ++ /* Generate the turbo cpufreq_frequency_table struct */ ++ for (i = 0; i < NUM_TURBO_FREQS; i++) { ++ ++ ag101_turbo_freq_table[i].frequency = ag101_turbo_freqs[i].khz; ++ ag101_turbo_freq_table[i].index = i; ++ } ++ ++ ag101_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END; ++ ++ printk("CPU frequency change support initialized\n"); ++ ++ return 0; ++} ++ ++static struct cpufreq_driver ag101_cpufreq_driver = { ++ ++ .verify = ag101_verify_policy, ++ .target = ag101_set_target, ++ .init = ag101_cpufreq_init, ++ .get = ag101_cpufreq_get, ++ .name = "AG101", ++}; ++ ++static int __init ag101_cpu_init(void) ++{ ++ ++ if (CPU_IS_N1213_43U1HA0() || CPU_IS_N1213_43U1HB0()) ++ return cpufreq_register_driver(&ag101_cpufreq_driver); ++ else ++ return -ENODEV; ++} ++ ++static void __exit ag101_cpu_exit(void) ++{ ++ ++ if (CPU_IS_N1213_43U1HA0() || CPU_IS_N1213_43U1HB0()) ++ cpufreq_unregister_driver(&ag101_cpufreq_driver); ++} ++ ++MODULE_AUTHOR("Andes Technology Corporation"); ++MODULE_DESCRIPTION("CPU frequency changing driver for the AG101 architecture"); ++MODULE_LICENSE("GPL"); ++module_init(ag101_cpu_init); ++module_exit(ag101_cpu_exit); +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag101/Kconfig linux-3.4.110/arch/nds32/platforms/ag101/Kconfig +--- linux-3.4.110.orig/arch/nds32/platforms/ag101/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag101/Kconfig 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,9 @@ ++menu "AG101 Platform Options" ++ ++config AUTO_SYS_CLK ++ bool "Automatic AHB Clock Detection" ++ default y ++ help ++ Automatic detection of AHB clock ++ ++endmenu +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag101/Makefile linux-3.4.110/arch/nds32/platforms/ag101/Makefile +--- linux-3.4.110.orig/arch/nds32/platforms/ag101/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag101/Makefile 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,5 @@ ++obj-y = devices.o ++obj-$(CONFIG_AUTO_SYS_CLK) += fia320.o ++obj-$(CONFIG_PM) += pm.o sleep.o ++obj-$(CONFIG_AG101_CPU_FREQ_FCS) += cpu-fcs.o ++obj-$(CONFIG_AG101_CPU_FREQ_SCALING_MODE) += freq-scaling.o +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag101/pm.c linux-3.4.110/arch/nds32/platforms/ag101/pm.c +--- linux-3.4.110.orig/arch/nds32/platforms/ag101/pm.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag101/pm.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,190 @@ ++/* ++ * AG101 Power Management Routines ++ * ++ * Copyright (c) 2007 Harry Pan ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License. ++ * ++ * Abstract: ++ * ++ * This program is for AG101 power management routines. ++ * It is initail referred from 2.6.11 SA1100 PM driver. ++ * ++ * Revision History: ++ * ++ * Jul.13.2007 Initial code by Harry. ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++extern void ag101_cpu_sleep(void); ++extern void ag101_cpu_resume(void); ++extern void ag101_cpu_resume2(void); ++ ++#include ++/* ++ * AG101 PMU sleep mode handler. ++ */ ++void ag101_pmu_sleep(void) ++{ ++ int i; ++ static int irq_saves[3]; ++ ++ irq_saves[0] = REG32(INTC_FTINTC010_VA_BASE + 0x4); ++ irq_saves[1] = REG32(INTC_FTINTC010_VA_BASE + 0xc); ++ irq_saves[2] = REG32(INTC_FTINTC010_VA_BASE + 0x10); ++ ++ /* save SDRAM settings */ ++ for (i = 0; i < 0x30; i += 4) ++ REG32(PMU_FTPMU010_VA_BASE + 0x50 + i) = REG32(SDMC_FTSDMC021_VA_BASE + i); //SDRAMC ++ ++ /* set resume return address */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x88) = ++ virt_to_phys(ag101_cpu_resume) | 0x10000000; ++ REG32(PMU_FTPMU010_VA_BASE + 0x8c) = (u32) ag101_cpu_resume2; ++ REG32(PMU_FTPMU010_VA_BASE + 0x80) = GET_L1_PPTB(); ++ ++ /* setup wakeup sources */ ++ REG32(PMU_FTPMU010_VA_BASE + 0x14) |= -1; ++ REG32(PMU_FTPMU010_VA_BASE + 0x10) |= 0x1fff; ++ ++ cpu_dcache_wbinval_all(); ++ cpu_icache_inval_all(); ++ SET_CACHE_CTL(GET_CACHE_CTL() & ~CACHE_CTL_mskDC_EN); ++ ag101_cpu_sleep(); ++ ++#ifndef CONFIG_ANDES_PAGE_SIZE_8KB ++ ++ if (CPU_IS_N1213_43U1HA0()) { ++ int tmp = 0; ++ /* Downsize cache to bypass cache aliasing issue */ ++ ++ if ((CACHE_SET(ICACHE) * CACHE_LINE_SIZE(ICACHE)) > 4096) ++ tmp = 0x02 << SDZ_CTL_offICDZ; ++ ++ if ((CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE)) > 4096) ++ tmp |= 0x02 << SDZ_CTL_offDCDZ; ++ ++ SET_SDZ_CTL(tmp); ++ ISB(); ++ } ++#endif ++ ++ SET_CACHE_CTL(GET_CACHE_CTL() | CACHE_CTL_mskDC_EN); ++ REG32(INTC_FTINTC010_VA_BASE + 0x4) = irq_saves[0]; ++ REG32(INTC_FTINTC010_VA_BASE + 0xc) = irq_saves[1]; ++ REG32(INTC_FTINTC010_VA_BASE + 0x10) = irq_saves[2]; ++} ++ ++static int ag101_pm_valid(suspend_state_t state) ++{ ++ switch (state) { ++ case PM_SUSPEND_ON: ++ case PM_SUSPEND_STANDBY: ++ case PM_SUSPEND_MEM: ++ return 1; ++ ++ default: ++ return 0; ++ } ++} ++ ++static int ag101_pm_begin(suspend_state_t state) ++{ ++ /* TBD if we need it */ ++ return 0; ++} ++ ++static unsigned long irq_save; ++static inline void setup_wakeup_event(void) ++{ ++ REG32(GPIO_FTGPIO010_VA_BASE + 0x20) = 1; ++ irq_save = REG32(INTC_FTINTC010_VA_BASE + 0x4); ++ REG32(INTC_FTINTC010_VA_BASE + 0x4) &= ~(1 << 19); ++ REG32(INTC_FTINTC010_VA_BASE + 0x4) |= 1 << 13; ++} ++ ++static inline void remove_wakeup_event(void) ++{ ++ REG32(GPIO_FTGPIO010_VA_BASE + 0x30) = 1; ++ REG32(GPIO_FTGPIO010_VA_BASE + 0x20) = 0; ++ REG32(INTC_FTINTC010_VA_BASE + 0x4) = irq_save; ++} ++ ++static inline void cpu_standby(void) ++{ ++ asm __volatile__("standby no_wake_grant"); ++} ++ ++static int ag101_pm_enter(suspend_state_t state) ++{ ++ switch (state) { ++ case PM_SUSPEND_STANDBY: ++ setup_wakeup_event(); ++ cpu_standby(); ++ remove_wakeup_event(); ++ return 0; ++ case PM_SUSPEND_MEM: ++ ag101_pmu_sleep(); ++ return 0; ++ default: ++ return -EINVAL; ++ } ++} ++ ++/* ++ * Called after processes are frozen, but before we shutdown devices. ++ */ ++static int ag101_pm_prepare(void) ++{ ++ /* TBD if we need it */ ++ return 0; ++} ++ ++/* ++ * Called after devices are wakeuped, but before processes are thawed. ++ */ ++static void ag101_pm_finish(void) ++{ ++ /* TBD if we need it */ ++} ++ ++static void ag101_pm_end(void) ++{ ++ /* TBD if we need it */ ++} ++ ++/* ++ * Set to PM_DISK_FIRMWARE so we can quickly veto suspend-to-disk. ++ */ ++static struct platform_suspend_ops ag101_pm_ops = { ++ .valid = ag101_pm_valid, ++ .begin = ag101_pm_begin, ++ .prepare = ag101_pm_prepare, ++ .enter = ag101_pm_enter, ++ .finish = ag101_pm_finish, ++ .end = ag101_pm_end, ++}; ++ ++static int __init ag101_pm_init(void) ++{ ++ printk("PM driver init\n"); ++ suspend_set_ops(&ag101_pm_ops); ++ REG32(GPIO_FTGPIO010_VA_BASE + 0x30) = 1; ++ REG32(INTC_FTINTC010_VA_BASE + 0x0c) &= ~(1 << 13); ++ REG32(INTC_FTINTC010_VA_BASE + 0x10) &= ~(1 << 13); ++ ++ return 0; ++} ++ ++late_initcall(ag101_pm_init); +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag101/sleep.S linux-3.4.110/arch/nds32/platforms/ag101/sleep.S +--- linux-3.4.110.orig/arch/nds32/platforms/ag101/sleep.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag101/sleep.S 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,118 @@ ++/* ++ * AG101 Assembler Sleep/WakeUp Management Routines ++ * ++ * Copyright (c) 2007 Harry Pan ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License. ++ * ++ * Abstract: ++ * ++ * This program is for AG101 suspend/wakeup. ++ * ++ * Revision History: ++ * ++ * Jul.13.2007 Initial code by Harry. ++ */ ++#include ++#include ++#include ++#include ++ ++ .text ++ ++/* ag101_cpu_suspend() ++ * ++ * Causes AG101 to enter sleep state ++ */ ++ ++ENTRY(ag101_cpu_sleep) ++ pushm $r0, $r30 ++ mfusr $r0, $d0.lo ! $d0 lo byte ++ mfusr $r1, $d0.hi ! $d0 hi byte ++ mfusr $r2, $d1.lo ! $d1 lo byte ++ mfusr $r3, $d1.hi ! $d1 hi byte ++ mfsr $r4, $mr0 ++ mfsr $r5, $mr1 ++ mfsr $r6, $mr4 ++ mfsr $r7, $mr6 ++ mfsr $r8, $mr7 ++ mfsr $r9, $mr8 ++ mfsr $r10, $ir0 ++ mfsr $r11, $ir1 ++ mfsr $r12, $ir2 ++ mfsr $r13, $ir3 ++ mfsr $r14, $ir9 ++ mfsr $r15, $ir10 ++ mfsr $r16, $ir12 ++ mfsr $r17, $ir13 ++ mfsr $r18, $ir14 ++ mfsr $r19, $ir15 ++ pushm $r0, $r19 ++ ++ sethi $r0, hi20(PMU_FTPMU010_0_VA_BASE + 0x84) ++ ori $r2, $r0, lo12(PMU_FTPMU010_0_VA_BASE + 0x84) ++ swi $r31, [$r2] ++ ++ lwi $r2, [$r0+#0x0c] ! sleep mode ++ ori $r2, $r2, #1 ++ swi $r2, [$r0+#0x0c] ! sleep mode ++ standby wake_grant ++1: ++ b 1b ! loop waiting for sleep ++ ++/* ag101_cpu_resume() ++ * ++ * Entry point from boot code back to kernel. ++ * ++ */ ++ ++ENTRY(ag101_cpu_resume) ++ mfsr $r2, $mr0 ++ ori $r2, $r2, #0x6 ++#ifdef CONFIG_ANDES_PAGE_SIZE_8KB ++ ori $r2, $r2, #0x1 ++#endif ++ mtsr $r2, $mr0 ++ ++ sethi $r2, hi20(PMU_FTPMU010_0_PA_BASE + 0x80) ++ ori $r2, $r2, lo12(PMU_FTPMU010_0_PA_BASE + 0x80) ++ lwi $r3, [$r2] ++ lwi $r4, [$r2 + 0xc] ++ mtsr $r3, $mr1 ++ ++ mfsr $r0, $mr8 ++ ori $r0, $r0, #0x1 ++ mtsr $r0, $mr8 ++ ++ sethi $r2, hi20(AHB_ATFAHBC020S_0_PA_BASE + 0x88) ++ ori $r2, $r2, lo12(AHB_ATFAHBC020S_0_PA_BASE + 0x88) ++ movi $r3, #0x1 ++ swi $r3, [$r2] ++ ++ jral.ton $r4, $r4 ++ ++ENTRY(ag101_cpu_resume2) ++ popm $r0, $r19 ++ mtusr $r0, $d0.lo ! $d0 lo byte ++ mtusr $r1, $d0.hi ! $d0 hi byte ++ mtusr $r2, $d1.lo ! $d1 lo byte ++ mtusr $r3, $d1.hi ! $d1 hi byte ++ mtsr $r4, $mr0 ++ mtsr $r5, $mr1 ++ mtsr $r6, $mr4 ++ mtsr $r7, $mr6 ++ mtsr $r8, $mr7 ++ mtsr $r9, $mr8 ++ mtsr $r10, $ir0 ++ mtsr $r11, $ir1 ++ mtsr $r12, $ir2 ++ mtsr $r13, $ir3 ++ mtsr $r14, $ir9 ++ mtsr $r15, $ir10 ++ mtsr $r16, $ir12 ++ mtsr $r17, $ir13 ++ mtsr $r18, $ir14 ++ mtsr $r19, $ir15 ++ popm $r0, $r30 ++ ret +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag101p/devices.c linux-3.4.110/arch/nds32/platforms/ag101p/devices.c +--- linux-3.4.110.orig/arch/nds32/platforms/ag101p/devices.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag101p/devices.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,104 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++const struct map_desc platform_io_desc[] __initdata = { ++ {UART0_VA_BASE, UART0_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {UART1_VA_BASE, UART1_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {INTC_FTINTC010_0_VA_BASE, INTC_FTINTC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {TIMER_FTTMR010_0_VA_BASE, TIMER_FTTMR010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {SSP_FTSSP010_0_VA_BASE, SSP_FTSSP010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {PMU_FTPMU010_0_VA_BASE, PMU_FTPMU010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {MAC_FTMAC100_0_VA_BASE, MAC_FTMAC100_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {SDC_FTSDC010_0_VA_BASE, SDC_FTSDC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {RTC_FTRTC010_0_VA_BASE, RTC_FTRTC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {WDT_FTWDT010_0_VA_BASE, WDT_FTWDT010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {GPIO_FTGPIO010_0_VA_BASE, GPIO_FTGPIO010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {CFC_FTCFC010_0_VA_BASE, CFC_FTCFC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {LCD_FTLCDC100_0_VA_BASE, LCD_FTLCDC100_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {I2C_FTI2C010_0_VA_BASE, I2C_FTI2C010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {DMAC_FTDMAC020_0_VA_BASE, DMAC_FTDMAC020_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {APBBRG_FTAPBBRG020S_0_VA_BASE, APBBRG_FTAPBBRG020S_0_PA_BASE, ++ PAGE_SIZE, MT_DEVICE_NCB}, ++ {USB_FOTG2XX_0_VA_BASE, USB_FOTG2XX_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {PCIIO_0_VA_BASE, PCIIO_0_PA_BASE, (0x000FF000 & PAGE_MASK), ++ MT_DEVICE_NCB}, ++ {PCIC_FTPCI100_0_VA_BASE, PCIC_FTPCI100_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {AHB_ATFAHBC020S_0_VA_BASE, AHB_ATFAHBC020S_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {LED_VA_BASE, LED_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {SDMC_FTSDMC021_VA_BASE, SDMC_FTSDMC021_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {L2CC_VA_BASE, L2CC_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB} ++}; ++ ++static void __init platform_map_io(void) ++{ ++ iotable_init((struct map_desc *)platform_io_desc, ++ ARRAY_SIZE(platform_io_desc)); ++} ++ ++static struct uart_port uart0 = { ++ .membase = (void __iomem *)UART0_VA_BASE, ++ .irq = UART0_IRQ, ++ .uartclk = CONFIG_UART_CLK, ++ .regshift = 2, ++ .iotype = UPIO_MEM, ++ .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, ++ .line = 0, ++ .mapbase = UART0_PA_BASE, ++}; ++ ++static struct uart_port uart1 = { ++ .membase = (void __iomem *)UART1_VA_BASE, ++ .irq = UART1_IRQ, ++ .uartclk = CONFIG_UART_CLK, ++ .regshift = 2, ++ .iotype = UPIO_MEM, ++ .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, ++ .line = 1, ++ .mapbase = UART1_PA_BASE, ++}; ++ ++static void __init soc_init(void) ++{ ++ early_serial_setup(&uart0); ++ early_serial_setup(&uart1); ++} ++ ++MACHINE_START(FARADAY, PLATFORM_NAME) ++ .param_offset = BOOT_PARAMETER_PA_BASE, ++ .map_io = platform_map_io, ++ .init_irq = platform_init_irq, ++ .timer = &platform_timer, /* defined in timer.c */ ++ .init_machine = soc_init, ++MACHINE_END static struct platform_device usb_dev_otg_host = { ++ .name = "fotg-ehci", ++}; ++ ++static int __init fotg_init(void) ++{ ++ platform_device_register(&usb_dev_otg_host); ++ return 0; ++} ++ ++device_initcall(fotg_init); +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag101p/interrupt-latency.c linux-3.4.110/arch/nds32/platforms/ag101p/interrupt-latency.c +--- linux-3.4.110.orig/arch/nds32/platforms/ag101p/interrupt-latency.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag101p/interrupt-latency.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,197 @@ ++/* ++ * interrupt_latency v1.0 11/25/01 ++ * www.embeddedlinuxinterfacing.com ++ * ++ * The original location of this code is ++ * http://www.embeddedlinuxinterfacing.com/chapters/11/ ++ * ++ * Copyright (C) 2001 by Craig Hollabaugh ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Library General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Library General Public License for more details. ++ * ++ * You should have received a copy of the GNU Library General Public ++ * License along with this program; if not, write to the ++ * Free Software Foundation, Inc., ++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++/* ++ * interrupt_latency.c is based on procfs_example.c by Erik Mouw. ++ * For more information, please see, The Linux Kernel Procfs Guide, Erik Mouw ++ * http://kernelnewbies.org/documents/kdoc/procfs-guide/lkprocfsguide.html ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define ILT_MODULE_VERSION "1.0" ++#define MODULE_NAME "interrupt_latency" ++ ++#define USE_PFM ++static int interruptcount = 0; ++#ifdef USE_PFM ++static unsigned int start_cycle = 0, finish_cycle = 0; ++static unsigned int start_inst = 0, finish_inst = 0; ++static unsigned int start_int = 0, finish_int = 0; ++#else ++static struct timeval tv1, tv2; /* do_gettimeofday fills these */ ++#endif ++ ++#define INTERRUPT 9 ++ ++static struct proc_dir_entry *interrupt_latency_file; ++ ++/* ++ * function interrupt_interrupt_latency ++ * This function is the interrupt handler for interrupt 7. It sets the tv2 ++ * structure using do_gettimeofday. It then deasserts D7. ++ */ ++static irqreturn_t interrupt_interrupt_latency(int irq, void *dev_id) ++{ ++ unsigned int ir15, pfm_ctl; ++#ifdef USE_PFM ++ /* disable counter */ ++ pfm_ctl = 0x4410000; //cycles, instructions and icache misses ++ //pfm_ctl = 0x4c0000; //instructions and interrupts ++ //pfm_ctl = 0x4510000; //icache accesses and misses ++ __nds32__mtsr(pfm_ctl, NDS32_SR_PFM_CTL); //instructions and interrupts ++ finish_cycle = __nds32__mfsr(NDS32_SR_PFMC0); ++ finish_inst = __nds32__mfsr(NDS32_SR_PFMC2); ++ finish_int = __nds32__mfsr(NDS32_SR_PFMC1); ++#else ++ do_gettimeofday(&tv2); ++#endif ++ /* deassert the interrupt signal */ ++ ir15 = __nds32__mfsr(NDS32_SR_INT_PEND); ++ __nds32__mtsr((~0x10000) & ir15, NDS32_SR_INT_PEND); ++ __nds32__dsb(); ++ ++ interruptcount++; ++ return IRQ_HANDLED; ++} ++ ++/* ++ * function proc_read_interrupt_latency ++ * The kernel executes this function when a read operation occurs on ++ * /proc/interrupt_latency. This function sets the tv1 structure. It asserts ++ * D7 which should immediately cause interrupt 7 to occur. The handler ++ * records tv2 and deasserts D7. This function returns the time differential ++ * between tv2 and tv1. ++ */ ++static int proc_read_interrupt_latency(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ int len; ++ unsigned int ir15, pfm_ctl; ++#ifdef USE_PFM ++ //pfm_ctl = 0x4c0007; ++ //pfm_ctl = 0x4510007; ++ pfm_ctl = 0x4410007; ++ start_cycle = __nds32__mfsr(NDS32_SR_PFMC0); ++ start_inst = __nds32__mfsr(NDS32_SR_PFMC2); ++ start_int = __nds32__mfsr(NDS32_SR_PFMC1); ++ __nds32__mtsr(pfm_ctl, NDS32_SR_PFM_CTL); ++#else ++ do_gettimeofday(&tv1); ++#endif ++ /* assert the interrupt signal */ ++ ir15 = __nds32__mfsr(NDS32_SR_INT_PEND); ++ __nds32__mtsr(0x10000 | ir15, NDS32_SR_INT_PEND); ++ __nds32__dsb(); ++ ++#ifdef USE_PFM ++ len = ++ sprintf(page, ++ "Cnt0 %11u Start %11u Finish %11u Cycles %11u\n" ++ "Cnt2 %11u Start %11u Finish %11u icache miss %11u\n" ++ "Cnt1 %11u Start %11u Finish %11u Instructions %11u\n" ++ "Count %11i\n", ((pfm_ctl & 0x8000) >> 15), start_cycle, ++ finish_cycle, (finish_cycle - start_cycle), ++ ((pfm_ctl & 0xfc00000) >> 22), start_inst, finish_inst, ++ (finish_inst - start_inst), ((pfm_ctl & 0x3f0000) >> 16), ++ start_int, finish_int, (finish_int - start_int), ++ interruptcount); ++#else ++ len = sprintf(page, "Start %9i.%06i\nFinish %9i.%06i\nLatency %17i\n\ ++Count %19i\n", (int)tv1.tv_sec, (int)tv1.tv_usec, (int)tv2.tv_sec, (int)tv2.tv_usec, (int)(tv2.tv_usec - tv1.tv_usec), interruptcount); ++#endif ++ *eof = 1; ++ return len; ++} ++ ++/* ++ * function init_interrupt_latency ++ * This function creates the /proc directory entry interrupt_latency. It ++ * also configures the parallel port then requests interrupt 7 from Linux. ++ */ ++static int __init init_interrupt_latency(void) ++{ ++ int rv = 0; ++ unsigned int ir14; ++ ++ interrupt_latency_file = ++ create_proc_entry("interrupt_latency", 0444, NULL); ++ if (interrupt_latency_file == NULL) { ++ return -ENOMEM; ++ } ++ ++ interrupt_latency_file->data = NULL; ++ interrupt_latency_file->read_proc = &proc_read_interrupt_latency; ++ interrupt_latency_file->write_proc = NULL; ++ ++ /* request interrupt from linux */ ++ rv = request_irq(INTERRUPT, interrupt_interrupt_latency, IRQF_DISABLED, ++ "interrupt_latency", (void *)NULL); ++ if (rv) { ++ printk("Can't get interrupt %d\n", INTERRUPT); ++ goto no_interrupt_latency; ++ } ++ ++ /* unmask SWI */ ++ ir14 = __nds32__mfsr(NDS32_SR_INT_MASK); ++ __nds32__mtsr((0x10000 | ir14), NDS32_SR_INT_MASK); ++ __nds32__dsb(); ++ ++ /* everything initialized */ ++ printk(KERN_INFO "%s %s initialized\n", MODULE_NAME, ++ ILT_MODULE_VERSION); ++ return 0; ++ ++ /* remove the proc entry on error */ ++no_interrupt_latency: ++ remove_proc_entry("interrupt_latency", NULL); ++ return 0; ++} ++ ++/* ++ * function cleanup_interrupt_latency ++ * This function frees interrupt then removes the /proc directory entry ++ * interrupt_latency. ++ */ ++static void __exit cleanup_interrupt_latency(void) ++{ ++ /* free the interrupt */ ++ free_irq(INTERRUPT, (void *)NULL); ++ ++ remove_proc_entry("interrupt_latency", NULL); ++ ++ printk(KERN_INFO "%s %s removed\n", MODULE_NAME, ILT_MODULE_VERSION); ++} ++ ++module_init(init_interrupt_latency); ++module_exit(cleanup_interrupt_latency); ++ ++MODULE_DESCRIPTION("interrupt_latency proc module"); +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag101p/Kconfig linux-3.4.110/arch/nds32/platforms/ag101p/Kconfig +--- linux-3.4.110.orig/arch/nds32/platforms/ag101p/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag101p/Kconfig 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,11 @@ ++menu "AG101P Platform Options" ++config CACHE_L2 ++bool "Support L2 cache" ++ default n ++ ++config MEASURE_INTERRUPT_LATENCY ++ bool "Measure interrupt latency" ++ default n ++ help ++ Enable measuring interrupt latency with software interrupt ++endmenu +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag101p/Makefile linux-3.4.110/arch/nds32/platforms/ag101p/Makefile +--- linux-3.4.110.orig/arch/nds32/platforms/ag101p/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag101p/Makefile 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,2 @@ ++obj-y = devices.o ++obj-$(CONFIG_MEASURE_INTERRUPT_LATENCY) += interrupt-latency.o +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag102/ahbclkcal.c linux-3.4.110/arch/nds32/platforms/ag102/ahbclkcal.c +--- linux-3.4.110.orig/arch/nds32/platforms/ag102/ahbclkcal.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag102/ahbclkcal.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,216 @@ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#ifdef CONFIG_AUTO_SYS_CLK ++ ++#define OSCH3_CLK 33000000 ++#define OSCH_CLK 25000000 ++#define MHZ 1000000 ++/* ++ * Table for ahb divisor, PMODE[07:04] ++ */ ++static const int ahb_div[16] = { ++ 1, 2, 3, 4, 5, 6, 3, 5, ++ 8, 10, 12, 14, 15, 18, 20, -1 ++}; ++ ++/* ag102_get_ahb_clk() ++ * ++ * return AHB clock in Hz. ++ */ ++static int ahbclk; ++void ag102_calc_ahb_clk(void) ++{ ++ unsigned int pcs1_param; ++ unsigned int pcs4_param; ++ unsigned int pcs5_param; ++ unsigned int main_PLL_div; ++ unsigned int main_PLL_out; ++ unsigned int ratio; ++ unsigned int main_PLL_N; ++ unsigned int main_PLL_M = 1; ++ unsigned int F_core0; ++ unsigned int F_core1; ++ unsigned int F_l2cc; ++ unsigned int F_ddr2; ++ unsigned int F_ahb; ++ unsigned int F_apb; ++ unsigned int F_pci; ++ ++ pcs1_param = REG32(PCU_VA_BASE + 0xa4); //pcs1 parameter register, for core/ahb/apb clk ratio setting ++ main_PLL_div = (pcs1_param >> 4) & 0x3; ++ ratio = pcs1_param & 0xf; ++ ++ pcs4_param = REG32(PCU_VA_BASE + 0x104); //pcs4 parameter register, for main PLL setting ++ main_PLL_N = pcs4_param & 0xff; ++ main_PLL_out = (OSCH3_CLK * main_PLL_N / main_PLL_M) >> main_PLL_div; ++ ++ pcs5_param = REG32(PCU_VA_BASE + 0x124); //pcs5 parameter register, for PCI PLL/DLL setting ++ ++ switch (ratio) { ++ case 0: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0; ++ F_l2cc = F_core0 >> 1; ++ F_ddr2 = F_core0 >> 1; ++ F_ahb = F_core0 >> 2; ++ F_apb = F_ahb >> 1; ++ break; ++ ++ case 1: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0; ++ F_l2cc = F_core0 >> 1; ++ F_ddr2 = F_core0 >> 1; ++ F_ahb = F_core0 >> 3; ++ F_apb = F_ahb >> 1; ++ break; ++ ++ case 2: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0 >> 1; ++ F_l2cc = F_core0 >> 1; ++ F_ddr2 = F_core0 >> 1; ++ F_ahb = F_core0 >> 2; ++ F_apb = F_ahb >> 1; ++ break; ++ ++ case 3: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0 >> 1; ++ F_l2cc = F_core0 >> 1; ++ F_ddr2 = F_core0 >> 1; ++ F_ahb = F_core0 >> 3; ++ F_apb = F_ahb >> 1; ++ break; ++ ++ case 4: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0 >> 2; ++ F_l2cc = F_core0 >> 2; ++ F_ddr2 = F_core0 >> 1; ++ F_ahb = F_core0 >> 2; ++ F_apb = F_ahb >> 1; ++ break; ++ case 5: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0 >> 2; ++ F_l2cc = F_core0 >> 2; ++ F_ddr2 = F_core0 >> 1; ++ F_ahb = F_core0 >> 3; ++ F_apb = F_ahb >> 1; ++ break; ++ ++ case 6: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0; ++ F_l2cc = F_core0; ++ F_ddr2 = F_core0 >> 1; ++ F_ahb = F_core0 >> 2; ++ F_apb = F_ahb >> 1; ++ break; ++ ++ case 7: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0; ++ F_l2cc = F_core0; ++ F_ddr2 = F_core0 >> 1; ++ F_ahb = F_core0 >> 3; ++ F_apb = F_ahb >> 1; ++ break; ++ ++ case 8: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0; ++ F_l2cc = F_core0 >> 1; ++ F_ddr2 = OSCH_CLK * 24 / 2; ++ F_ahb = F_core0 >> 2; ++ F_apb = F_ahb >> 1; ++ break; ++ ++ case 9: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0; ++ F_l2cc = F_core0 >> 1; ++ F_ddr2 = OSCH_CLK * 24 / 2; ++ F_ahb = F_core0 >> 3; ++ F_apb = F_ahb >> 1; ++ break; ++ case 10: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0 >> 1; ++ F_l2cc = F_core0 >> 1; ++ F_ddr2 = OSCH_CLK * 24 / 2; ++ F_ahb = F_core0 >> 2; ++ F_apb = F_ahb >> 1; ++ break; ++ ++ case 11: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0 >> 1; ++ F_l2cc = F_core0 >> 1; ++ F_ddr2 = OSCH_CLK * 24 / 2; ++ F_ahb = F_core0 >> 3; ++ F_apb = F_ahb >> 1; ++ break; ++ ++ case 12: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0 >> 2; ++ F_l2cc = F_core0 >> 2; ++ F_ddr2 = OSCH_CLK * 24 / 2; ++ F_ahb = F_core0 >> 2; ++ F_apb = F_ahb >> 1; ++ break; ++ ++ case 13: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0 >> 2; ++ F_l2cc = F_core0 >> 2; ++ F_ddr2 = OSCH_CLK * 24 / 2; ++ F_ahb = F_core0 >> 3; ++ F_apb = F_ahb >> 1; ++ break; ++ ++ case 14: ++ F_core0 = main_PLL_out; ++ F_core1 = F_core0; ++ F_l2cc = F_core0; ++ F_ddr2 = F_core0; ++ F_ahb = F_core0; ++ F_apb = F_ahb; ++ break; ++ default: // 15 ++ F_core0 = OSCH3_CLK; ++ F_core1 = F_core0; ++ F_l2cc = F_core0; ++ F_ddr2 = F_core0; ++ F_ahb = F_core0; ++ F_apb = F_ahb; ++ break; ++ } ++ ++ F_pci = OSCH_CLK * 24 / 9; ++ if ((pcs5_param & 0x800) == 0) ++ F_pci = F_pci >> 1; ++ ++ ahbclk = (int)F_ahb; ++} ++ ++int ag102_get_ahb_clk(void) ++{ ++ //return ahbclk+MHZ; ++ return ahbclk; ++} ++ ++EXPORT_SYMBOL(ag102_get_ahb_clk); ++#else ++void ag102_calc_ahb_clk(void) ++{ ++} ++#endif +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag102/devices.c linux-3.4.110/arch/nds32/platforms/ag102/devices.c +--- linux-3.4.110.orig/arch/nds32/platforms/ag102/devices.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag102/devices.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,181 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++extern void amic_init(void); ++ ++const struct map_desc platform_io_desc[] __initdata = { ++ {UART0_VA_BASE, UART0_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {UART1_VA_BASE, UART1_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {AMIC_VA_BASE, AMIC_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {GMAC_VA_BASE, GMAC_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {APBBR_VA_BASE, APBBR_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {TIMER_VA_BASE, TIMER_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {L2CC_VA_BASE, L2CC_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {PCIIO_0_VA_BASE, PCIIO_0_PA_BASE, 0x0000F000, MT_DEVICE_NCB}, ++ {PCIC_FTPCI100_0_VA_BASE, PCIC_FTPCI100_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {SDC_FTSDC010_0_VA_BASE, SDC_FTSDC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {RTC_FTRTC010_0_VA_BASE, RTC_FTRTC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {WDT_FTWDT010_0_VA_BASE, WDT_FTWDT010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {I2C_FTI2C010_0_VA_BASE, I2C_FTI2C010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {GPIO_FTGPIO010_0_VA_BASE, GPIO_FTGPIO010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {PCU_VA_BASE, PCU_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {LPC_IO_VA_BASE, LPC_IO_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {LPC_REG_VA_BASE, LPC_REG_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {DMAC_FTDMAC020_0_VA_BASE, DMAC_FTDMAC020_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {GPU_VA_BASE, GPU_PA_BASE, SZ_64K, MT_DEVICE_NCB}, ++ {IDE_FTIDE020_VA_BASE, IDE_FTIDE020_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {USB_FOTG2XX_0_VA_BASE, USB_FOTG2XX_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {CFC_FTCFC010_0_VA_BASE, CFC_FTCFC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {SSP_FTSSP010_0_VA_BASE, SSP_FTSSP010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {SPI_FTSSP010_0_VA_BASE, SPI_FTSSP010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {DDR2C_VA_BASE, DDR2C_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {AHB_ATFAHBC020S_0_VA_BASE, AHB_ATFAHBC020S_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB} ++ //{ GPIO_VA_BASE, GPIO_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB } ++}; ++ ++/* hid descriptor for a keyboard */ ++static struct hidg_func_descriptor my_hid_data = { ++ .subclass = 0, /* No subclass */ ++ .protocol = 1, /* Keyboard */ ++ .report_length = 8, ++ .report_desc_length = 63, ++ .report_desc = { ++ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */ ++ 0x09, 0x06, /* USAGE (Keyboard) */ ++ 0xa1, 0x01, /* COLLECTION (Application) */ ++ 0x05, 0x07, /* USAGE_PAGE (Keyboard) */ ++ 0x19, 0xe0, /* USAGE_MINIMUM (Keyboard LeftControl) */ ++ 0x29, 0xe7, /* USAGE_MAXIMUM (Keyboard Right GUI) */ ++ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */ ++ 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */ ++ 0x75, 0x01, /* REPORT_SIZE (1) */ ++ 0x95, 0x08, /* REPORT_COUNT (8) */ ++ 0x81, 0x02, /* INPUT (Data,Var,Abs) */ ++ 0x95, 0x01, /* REPORT_COUNT (1) */ ++ 0x75, 0x08, /* REPORT_SIZE (8) */ ++ 0x81, 0x03, /* INPUT (Cnst,Var,Abs) */ ++ 0x95, 0x05, /* REPORT_COUNT (5) */ ++ 0x75, 0x01, /* REPORT_SIZE (1) */ ++ 0x05, 0x08, /* USAGE_PAGE (LEDs) */ ++ 0x19, 0x01, /* USAGE_MINIMUM (Num Lock) */ ++ 0x29, 0x05, /* USAGE_MAXIMUM (Kana) */ ++ 0x91, 0x02, /* OUTPUT (Data,Var,Abs) */ ++ 0x95, 0x01, /* REPORT_COUNT (1) */ ++ 0x75, 0x03, /* REPORT_SIZE (3) */ ++ 0x91, 0x03, /* OUTPUT (Cnst,Var,Abs) */ ++ 0x95, 0x06, /* REPORT_COUNT (6) */ ++ 0x75, 0x08, /* REPORT_SIZE (8) */ ++ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */ ++ 0x25, 0x65, /* LOGICAL_MAXIMUM (101) */ ++ 0x05, 0x07, /* USAGE_PAGE (Keyboard) */ ++ 0x19, 0x00, /* USAGE_MINIMUM (Reserved) */ ++ 0x29, 0x65, /* USAGE_MAXIMUM (Keyboard Application) */ ++ 0x81, 0x00, /* INPUT (Data,Ary,Abs) */ ++ 0xc0 /* END_COLLECTION */ ++ } ++}; ++ ++static struct platform_device my_hid = { ++ .name = "hidg", ++ .id = 0, ++ .num_resources = 0, ++ .resource = 0, ++ .dev.platform_data = &my_hid_data, ++}; ++ ++static void __init platform_map_io(void) ++{ ++ iotable_init((struct map_desc *)platform_io_desc, ++ ARRAY_SIZE(platform_io_desc)); ++} ++ ++static struct resource ftgmac100_resources[] = { ++ [0] = { ++ .start = GMAC_PA_BASE, ++ .end = GMAC_PA_BASE + SZ_4K - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = GMAC_IRQ, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct platform_device ftgmac100_device = { ++ .name = "ftgmac100", ++ .id = 0, ++ .num_resources = ARRAY_SIZE(ftgmac100_resources), ++ .resource = ftgmac100_resources, ++}; ++ ++static int __init devices_init(void) ++{ ++ platform_device_register(&ftgmac100_device); ++ platform_device_register(&my_hid); ++ return 0; ++} ++ ++arch_initcall(devices_init); ++ ++static struct uart_port uart0 = { ++ .membase = (void __iomem *)UART0_VA_BASE, ++ .irq = UART0_IRQ, ++ .uartclk = CONFIG_UART_CLK, ++ .regshift = 2, ++ .iotype = UPIO_MEM, ++ .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, ++ .line = 0, ++ .mapbase = UART0_PA_BASE, ++}; ++ ++static struct uart_port uart1 = { ++ .membase = (void __iomem *)UART1_VA_BASE, ++ .irq = UART1_IRQ, ++ .uartclk = CONFIG_UART_CLK, ++ .regshift = 2, ++ .iotype = UPIO_MEM, ++ .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, ++ .line = 1, ++ .mapbase = UART1_PA_BASE, ++}; ++ ++void ag102_calc_ahb_clk(void); ++static void __init soc_init(void) ++{ ++ ag102_calc_ahb_clk(); ++ early_serial_setup(&uart0); ++ early_serial_setup(&uart1); ++} ++ ++MACHINE_START(FARADAY, PLATFORM_NAME) ++ .param_offset = BOOT_PARAMETER_PA_BASE,.map_io = platform_map_io,.init_irq = amic_init,.timer = &platform_timer, /* defined in timer.c */ ++.init_machine = soc_init, ++ MACHINE_END static struct platform_device usb_dev_otg_host = { ++ .name = "fotg-ehci", ++}; ++ ++static int __init fotg_init(void) ++{ ++ platform_device_register(&usb_dev_otg_host); ++ return 0; ++} ++ ++device_initcall(fotg_init); +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag102/freq-scaling.c linux-3.4.110/arch/nds32/platforms/ag102/freq-scaling.c +--- linux-3.4.110.orig/arch/nds32/platforms/ag102/freq-scaling.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag102/freq-scaling.c 2016-04-07 10:20:50.986082726 +0200 +@@ -0,0 +1,512 @@ ++/* ++ * linux/arch/nds32/platforms/ag102/cpu-fcs.c ++ * ++ * Copyright (C) 2002,2003 Intrinsyc Software ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * History: ++ * 31-Jul-2002 : Initial version [FB] ++ * 29-Jan-2003 : added PXA255 support [FB] ++ * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.) ++ * 18-Jun-2008 : ported to NDS32 architecture ( Roy Lee, Andestech Corp.) ++ * 13-Oct-2010 : ported to NDS32 AG102 architecture(Gavin Guo, Andestech Corp.) ++ * ++ * Note: ++ * This driver may change the memory bus clock rate, but will not do any ++ * platform specific access timing changes... for example if you have flash ++ * memory connected to CS0, you will need to register a platform specific ++ * notifier which will adjust the memory access strobes to maintain a ++ * minimum strobe width. ++ * ++ */ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "pcu.h" ++ ++void __ddr_fsc_lock_start(); ++void __ddr_fsc_lock_end(); ++#define AG102_MIN_FREQ 200000000 ++#define AG102_MAX_FREQ 1000000000 ++#define OSCH3_CLK 33000000 /* 33MHz AG102 */ ++ ++struct ag102_freq_struct { ++ unsigned int khz; /* cpu_clk in khz */ ++ unsigned int sf; /* scaling factor */ ++ unsigned int cr; /* clock ratio */ ++}; ++ ++struct ag102_freq_struct ag102_run_freqs[] = { ++ /* khz sf, cr */ ++ {OSCH3_CLK * 7, 7, 0}, /* CPUA 231MHz */ ++ {OSCH3_CLK * 8, 8, 0}, /* CPUA 264MHz */ ++ {OSCH3_CLK * 9, 9, 0}, ++ {OSCH3_CLK * 10, 10, 0}, ++ {OSCH3_CLK * 11, 11, 0}, ++ {OSCH3_CLK * 12, 12, 0}, ++ {OSCH3_CLK * 13, 13, 0}, ++ {OSCH3_CLK * 14, 14, 0}, ++ {OSCH3_CLK * 15, 15, 0}, ++ {OSCH3_CLK * 16, 16, 0}, ++ /* ++ * {OSCH3_CLK * 17, 17, 0}, ++ * {OSCH3_CLK * 18, 18, 0}, ++ * {OSCH3_CLK * 19, 19, 0}, ++ * {OSCH3_CLK * 20, 20, 0}, ++ * {OSCH3_CLK * 21, 21, 0}, ++ * {OSCH3_CLK * 22, 22, 0}, ++ * {OSCH3_CLK * 23, 23, 0}, ++ * {OSCH3_CLK * 24, 24, 0}, ++ * {OSCH3_CLK * 25, 25, 0}, ++ * {OSCH3_CLK * 26, 26, 0}, ++ * {OSCH3_CLK * 27, 27, 0}, ++ * {OSCH3_CLK * 28, 28, 0}, ++ * {OSCH3_CLK * 29, 29, 0}, ++ * {OSCH3_CLK * 30, 30, 0}, ++ */ ++ {0} /* The last 30 is CPUA 1000MHz */ ++}; ++ ++#define NUM_RUN_FREQS ARRAY_SIZE(ag102_run_freqs) ++static struct cpufreq_frequency_table ag102_run_freq_table[NUM_RUN_FREQS + 1]; ++ ++/* Generic helper function get CPU clocks in kHz */ ++unsigned int ag102_cpufreq_get(unsigned int dummy) ++{ ++ unsigned int main_PLL_M = 1; ++ unsigned int main_PLL_out; ++ unsigned int main_PLL_div; ++ unsigned int ratio; ++ unsigned int main_PLL_N; ++ unsigned int pcs1_param; ++ unsigned int pcs4_param; ++ ++ pcs1_param = PCU_GET_REG(PCS1_ST2); ++ main_PLL_div = PCU_EXTRACT(PCS1_ST2, DIV, pcs1_param); ++ ratio = PCU_EXTRACT(PCS1_ST2, RATIO, pcs1_param); ++ ++ pcs4_param = PCU_GET_REG(PCS4_ST2); ++ main_PLL_N = PCU_EXTRACT(PCS4_ST2, N_FACTOR, pcs4_param); ++ main_PLL_out = (OSCH3_CLK * main_PLL_N / main_PLL_M) >> main_PLL_div; ++ ++ printk("CPU frequency is %d\n", main_PLL_out); ++ return main_PLL_out; ++} ++ ++/* find a valid frequency point */ ++static int ag102_verify_policy(struct cpufreq_policy *policy) ++{ ++ printk("Verified CPU policy: %dKhz min to %dKhz max\n", policy->min, ++ policy->max); ++ return cpufreq_frequency_table_verify(policy, ag102_run_freq_table); ++} ++ ++void wakeup_and_ddr_train(void) ++{ ++ unsigned int csr_reg; ++ unsigned long ddr2_va_base = DDR2C_VA_BASE; ++ /* issue standby */ ++ /*standby(PCU_STBY_WAIT_DONE); */ ++ __asm__ __volatile__("__fcs_stby:"); ++ __asm__ __volatile__("standby wait_done"); ++ /* Fill the inline assembly codes into Cache to avoid SDRAM access before the data training is done */ ++ __asm__ __volatile__("__ddr_fsc_lock_start:\n" ++ "li $p0, %0 \n" ++ "lwi $p1, [$p0 + 0x1f0] \n" ++ "ori $p1, $p1, 0x100 \n" ++ "swi $p1, [$p0 + 0x1f0] ! DLL Reset \n" ++ "msync \n" ++ "isb \n" ++ "1: \n" ++ "lwi $p1, [$p0 + 0x1f0] \n" ++ "andi $p1, $p1, 0x100 \n" ++ "bnez $p1, 1b ! Wait until DLL reset is done\n" ++ "__trigger_dt:\n" ++ "li $p0, %1 \n" ++ "lwi $p1, [$p0] \n" ++ "li $p0, 0x40000000 \n" ++ "or $p1, $p1, $p0 \n" ++ "li $p0, %2 \n" ++ "swi $p1, [$p0] ! Trigger data training\n" ++ "msync \n" ++ "isb \n" ++ "li $p0, %3 \n" ++ "__wait_init_0:\n" ++ "lwi $p1, [$p0 + 0xc] \n" ++ "srli $p1, $p1, 23 \n" ++ "andi $p1, $p1, 0x1 \n" ++ "bnez $p1, __wait_init_0 ! Wait until init bit in CSR == 0\n" ++ "lwi $p1, [$p0 + 0xc] \n" ++ "srli $p1, $p1, 20 \n" ++ "andi $p1, $p1, 0x1 \n" ++ "beqz $p1, __dt_pass ! Monitor data training result\n" ++ "li $p0, 0xff942000 \n" ++ "li $p1, 0x45 \n" ++ "swi $p1, [$p0] ! put 'E' to UART\n" ++ "li $p1, 0x52 \n" ++ "swi $p1, [$p0] ! put 'R' to UART\n" ++ "li $p1, 0x52 \n" ++ "swi $p1, [$p0] ! put 'R' to UART\n" ++ "li $p1, 0x4f \n" ++ "swi $p1, [$p0] ! put 'O' to UART\n" ++ "li $p1, 0x52 \n" ++ "swi $p1, [$p0] ! put 'R' to UART\n" ++ "li $p1, 0xd \n" ++ "swi $p1, [$p0] ! put '\\r' to UART\n" ++ "li $p1, 0xa \n" ++ "swi $p1, [$p0] ! put '\\n' to UART\n" ++ "j __ddr_fsc_lock_end \n" ++ "__dt_pass:\n" ++ "li $p0, 0xff942000 \n" ++ "li $p1, 0x50 \n" ++ "swi $p1, [$p0] ! put 'P' to UART\n" ++ "li $p1, 'A \n" ++ "swi $p1, [$p0] ! put 'A' to UART\n" ++ "li $p1, 0x53 \n" ++ "swi $p1, [$p0] ! put 'S' to UART\n" ++ "li $p1, 0x53 \n" ++ "swi $p1, [$p0] ! put 'S' to UART\n" ++ "li $p1, '\\r \n" ++ "swi $p1, [$p0] ! put '\\r' to UART\n" ++ "li $p1, '\\n \n" ++ "swi $p1, [$p0] ! put '\\n' to UART\n" ++ "__ddr_fsc_lock_end: \n":: ++ "r" ++ (ddr2_va_base), ++ "r" ++ (ddr2_va_base), ++ "r"(ddr2_va_base), "r"(ddr2_va_base)); ++ ++ csr_reg = GET_REG(DDR2C_VA_BASE + 0xc); ++ if (csr_reg & 0x100000) { ++ printk("\n###### Data training error ######\nCSR = 0x%x\n\n", ++ csr_reg); ++ } ++} ++ ++void check_status() ++{ ++ unsigned int reg_value_tmp; ++ ++ /* ++ * CPU will continue if it is waked up ++ * => check status ++ */ ++ reg_value_tmp = PCU_GET_REG(BSM_STATUS); ++ if (PCU_EXTRACT(BSM_STATUS, STS, reg_value_tmp) != PCS_BSM_DONE) { ++ printk("ERROR: BSM status is not expected:0x%x\n", ++ reg_value_tmp); ++ } ++ PCU_SET_REG(BSM_STATUS, 0x0); // write to clear the status ++ ++ /* ++ * Maybe we will need the following code in the future ++ * if (pcs1_used) { ++ * reg_value_tmp = PCU_GET_REG(PCS1_ST1); ++ * if (PCU_EXTRACT(PCS1_ST1, STS, reg_value_tmp) != PCS_STS_DONE) { ++ * printk("ERROR: PCS1 status is not expected:0x%x\n", reg_value_tmp); ++ * printk("DEBUG: PCS1 status 2 is 0x%x\n", PCU_GET_REG(PCS1_ST2)); ++ * } ++ * PCU_SET_REG(PCS1_ST1, 0x0); // write to clear the status ++ * pcs1_used = 0; ++ * } ++ */ ++ ++ /* if (pcs4_used) { */ ++ reg_value_tmp = PCU_GET_REG(PCS4_ST1); ++ if (PCU_EXTRACT(PCS4_ST1, STS, reg_value_tmp) != PCS_STS_DONE) { ++ printk("ERROR: PCS4 status is not expected:0x%x\n", ++ reg_value_tmp); ++ printk("DEBUG: PCS4 status 2 is 0x%x\n", PCU_GET_REG(PCS4_ST2)); ++ } ++ PCU_SET_REG(PCS4_ST1, 0x0); /* write to clear the status */ ++ /* pcs4_used = 0; */ ++ /* } */ ++ ++} ++ ++void start_fcs(unsigned int pll_n_factor, unsigned org_div_param) ++{ ++ unsigned int pll_range; ++ unsigned int pcs4_prmtr; ++ ++ /* ++ * We must keep the frequency between 266~533. ++ * because this is the frequency having been tested. ++ */ ++ if (org_div_param == 0) { ++ if ((pll_n_factor < 7) || (pll_n_factor > 16)) { /* 266~533 */ ++ printk("\npll_n_factor:%d, org_div_param:%d\n", ++ pll_n_factor, org_div_param); ++ printk ++ ("The input is not accepted! Please input the other value\n"); ++ return; ++ } ++ } else { /* div = 1(the divsior is 2), (pll_n_factor*33MHz/2) */ ++ if (pll_n_factor < 16) { /* (533/2~1000/2) */ ++ printk("\npll_n_factor:%d, org_div_param:%d\n", ++ pll_n_factor, org_div_param); ++ printk ++ ("The input is not accepted! Please input the other value\n"); ++ return; ++ } ++ ++ } ++ /* Setup the pll_range, pcu need to know the frequency which we setup */ ++ if (org_div_param == 0) { ++ if (pll_n_factor <= 15) { ++ pll_range = 2; ++ } else ++ pll_range = 3; ++ } else if (org_div_param == 1) { ++ if (pll_n_factor >= 16 && pll_n_factor <= 30) { ++ pll_range = 2; ++ } else ++ pll_range = 3; ++ } ++ ++ printk("\npll_n_factor:%d, org_div_param:%d, pll_range:%d\n", ++ pll_n_factor, org_div_param, pll_range); ++ /* PCS4 */ ++ PCU_SET_REG(PCS4_CFG, 0); /* stop */ ++ pcs4_prmtr = PCU_PREPARE(PCS4_PARA, IE, 1) | PCU_PREPARE(PCS4_PARA, CMD, PCS_CMD_NOP) | PCU_PREPARE(PCS4_PARA, SYNC, 1) | PCU_PREPARE(PCS4_PARA, PWDN, 0) | PCU_PREPARE(PCS4_PARA, RANGE, pll_range) | /* PLL range */ ++ pll_n_factor; /* PLL N factor */ ++ PCU_SET_REG(PCS4_PARA, pcs4_prmtr); ++ ++ /* BSM */ ++ PCU_SET_REG(BSM_CTRL, PCU_PREPARE(BSM_CTRL, IE, 1) | PCU_PREPARE(BSM_CTRL, CMD, PCS_CMD_SCALING | PCS_CMD_DRAM_SF) | PCU_PREPARE(BSM_CTRL, SYNC, 1) | PCU_PREPARE(BSM_CTRL, LINK0, 4) | /* scaling link start */ ++ PCU_PREPARE(BSM_CTRL, LINK1, 0x0)); /* wakeup link start */ ++ wakeup_and_ddr_train(); ++ check_status(); ++} ++ ++void end_fcs(void) ++{ ++ /* Leave this function as a place marker. */ ++} ++ ++#define LPS_PREC 8 ++void calibration() ++{ ++ unsigned long ticks, loopbit, lpj; ++ int lps_precision = LPS_PREC; ++ ++ lpj = (1 << 12); ++ ++ printk(KERN_INFO "Calibrating delay loop... "); ++ while ((lpj <<= 1) != 0) { ++ /* wait for "start of" clock tick */ ++ ticks = jiffies; ++ while (ticks == jiffies) ++ /* nothing */ ; ++ /* Go .. */ ++ ticks = jiffies; ++ __delay(lpj); ++ ticks = jiffies - ticks; ++ if (ticks) ++ break; ++ } ++ ++ /* ++ * Do a binary approximation to get lpj set to ++ * equal one clock (up to lps_precision bits) ++ */ ++ lpj >>= 1; ++ loopbit = lpj; ++ while (lps_precision-- && (loopbit >>= 1)) { ++ lpj |= loopbit; ++ ticks = jiffies; ++ while (ticks == jiffies) ++ /* nothing */ ; ++ ticks = jiffies; ++ __delay(lpj); ++ if (jiffies != ticks) /* longer than 1 tick */ ++ lpj &= ~loopbit; ++ } ++ ++ printk(KERN_CONT "%lu.%02lu BogoMIPS modified(lpj=%lu)\n", ++ lpj / (500000 / HZ), (lpj / (5000 / HZ)) % 100, lpj); ++} ++ ++static int ag102_speedstep(int idx) ++{ ++ unsigned int j; ++ char ch = 'a'; ++ unsigned int sf, cr; ++ unsigned long flags = 0; ++ unsigned long org_div_param; ++ void (*do_fcs) (unsigned int efsf, unsigned int edivhbaclk); ++ int i; ++ int line_size = CACHE_LINE_SIZE(ICACHE); ++ unsigned long start = ++ ((unsigned long)__ddr_fsc_lock_start) & ~(line_size - 1); ++ unsigned long end = ++ (((unsigned long)__ddr_fsc_lock_end) + line_size) & ~(line_size - ++ 1); ++ ++ printk("&__ddr_fsc_lock_start(): 0x%08lx, aligned to: 0x%08lx\n", ++ (unsigned long)__ddr_fsc_lock_start, start); ++ printk("&__ddr_fsc_lock_end(): 0x%08lx, aligned to: 0x%08lx\n", ++ (unsigned long)__ddr_fsc_lock_end, end); ++ ++ for (i = start; i <= end; i += CACHE_LINE_SIZE(ICACHE)) ++ __asm__ volatile ("\n\tcctl %0, L1I_VA_FILLCK":: ++ "r" (i):"memory"); ++ ++ do_fcs = start_fcs; ++ org_div_param = (PCU_GET_REG(PCS1_ST2) >> 4) & 0x3; ++ sf = ag102_run_freqs[idx].sf; ++ if (org_div_param == 1) ++ sf *= 2; ++#if 0 ++ calibration(); ++ printk("pcu read value,before frequency scaling:"); ++ ag102_cpufreq_get(0); ++#endif ++ local_irq_save(flags); ++ do_fcs(sf, org_div_param); ++ local_irq_restore(flags); ++#if 0 ++ printk("\npcu read value,after frequency scaling:"); ++ ag102_cpufreq_get(0); ++#endif ++ ++ for (i = start; i <= end; i += CACHE_LINE_SIZE(ICACHE)) ++ __asm__ volatile ("\n\tcctl %0, L1I_VA_ULCK"::"r" (i):"memory"); ++ ++ return 1; ++} ++ ++static int ag102_set_target(struct cpufreq_policy *policy, ++ unsigned int target_freq, unsigned int relation) ++{ ++ unsigned int idx, i, j; ++ char ch = 'a'; ++ struct cpufreq_frequency_table *ag102_freqs_table; ++ struct ag102_freq_struct *ag102_freq_settings; ++ struct cpufreq_freqs freqs; ++ ++ ag102_freq_settings = ag102_run_freqs; ++ ag102_freqs_table = ag102_run_freq_table; ++ ++ if (target_freq > AG102_MAX_FREQ || target_freq < AG102_MIN_FREQ) { ++ printk ++ ("\nThe frequency you input is illegal!!Please enter the frequency between %d ~ %d\n", ++ AG102_MIN_FREQ, AG102_MAX_FREQ); ++ return -EINVAL; ++ } ++ ++ /* Lookup the next frequency */ ++ if (cpufreq_frequency_table_target ++ (policy, ag102_freqs_table, target_freq, relation, &idx)) ++ return -EINVAL; ++ ++ freqs.old = policy->cur; ++ freqs.new = ag102_freq_settings[idx].khz; ++ freqs.cpu = policy->cpu; ++ ++ /* ++ * Tell everyone what we're about to do... ++ * you should add a notify client with any platform specific ++ * Vcc changing capability ++ */ ++ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); ++ ++ if (freqs.new != freqs.old) { ++ ++ if (!ag102_speedstep(idx)) ++ return -ENODEV; ++ } ++ ++ /* ++ * Tell everyone what we've just done... ++ * you should add a notify client with any platform specific ++ * SDRAM refresh timer adjustments ++ */ ++ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); ++#if 0 ++ printk("After CPUFREQ_POSTCHANGE, scaling:\n"); ++ calibration(); ++ printk("Speed test..."); ++ for (i = 0; i < 26; i++) { ++ for (j = 0; j < 500000000; j++) ; ++ printk("%c", ch); ++ ch += 1; ++ } ++ printk("\n"); ++#endif ++ return 0; ++} ++ ++static int ag102_cpufreq_init(struct cpufreq_policy *policy) ++{ ++ int i; ++ /* set default policy and cpuinfo */ ++ policy->governor = CPUFREQ_DEFAULT_GOVERNOR; ++ policy->policy = CPUFREQ_POLICY_PERFORMANCE; ++ policy->cpuinfo.max_freq = AG102_MAX_FREQ; ++ policy->cpuinfo.min_freq = AG102_MIN_FREQ; ++ policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ ++ policy->cur = ag102_cpufreq_get(0); /* current freq */ ++ policy->min = AG102_MIN_FREQ; ++ policy->max = AG102_MAX_FREQ; ++ ++ /* Generate the run cpufreq_frequency_table struct */ ++ for (i = 0; i < NUM_RUN_FREQS; i++) { ++ ++ ag102_run_freq_table[i].frequency = ag102_run_freqs[i].khz; ++ ag102_run_freq_table[i].index = i; ++ } ++ ++ ag102_run_freq_table[i].frequency = CPUFREQ_TABLE_END; ++ ++ printk("CPU frequency change support initialized\n"); ++ ++ return 0; ++} ++ ++static struct cpufreq_driver ag102_cpufreq_driver = { ++ ++ .verify = ag102_verify_policy, ++ .target = ag102_set_target, ++ .init = ag102_cpufreq_init, ++ .get = ag102_cpufreq_get, ++ .name = "AG102", ++}; ++ ++static int __init ag102_cpu_init(void) ++{ ++ return cpufreq_register_driver(&ag102_cpufreq_driver); ++} ++ ++static void __exit ag102_cpu_exit(void) ++{ ++ cpufreq_unregister_driver(&ag102_cpufreq_driver); ++} ++ ++MODULE_AUTHOR("Andes Technology Corporation"); ++MODULE_DESCRIPTION("CPU frequency changing driver for the AG102 architecture"); ++MODULE_LICENSE("GPL"); ++module_init(ag102_cpu_init); ++module_exit(ag102_cpu_exit); +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag102/gmac.h linux-3.4.110/arch/nds32/platforms/ag102/gmac.h +--- linux-3.4.110.orig/arch/nds32/platforms/ag102/gmac.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag102/gmac.h 2016-04-07 10:20:51.002083345 +0200 +@@ -0,0 +1,354 @@ ++#ifdef CONFIG_PLAT_AG102 ++#include ++#else ++#include ++#endif ++ ++#ifndef __GMAC_H ++#define __GMAC_H ++ ++// ====================================================== ++// GMAC register definition ++// ====================================================== ++// GMAC register ++// ++//#define CPE_GMAC_BASE 0xFF90B000 //VA Base ++#define CPE_DDR2_MEM_BASE 0x00000000 ++ ++#define BIT_MASK(bit_h, bit_l) ((((UINT32)0x1<<(1+bit_h-bit_l))-(UINT32)0x1)< ++#include ++#include ++ ++#include ++ ++#define LPC_REG_SCR 0x10 ++#define LPC_REG_SIR 0x14 ++#define LPC_REG_SIMR 0x18 ++ ++/* ++ * Level trigger IRQ chip methods ++ */ ++ ++static void lpc_level_unmask_irq(unsigned int irq) ++{ ++ unsigned int val; ++// *(volatile unsigned int *) (LPC_REG_BASE + LPC_REG_SIR) = ++// 1 << (irq - PLATFORM_LPC_IRQ_BASE); ++ val = *(volatile unsigned int *)(LPC_REG_VA_BASE + LPC_REG_SIMR); ++ val &= ~(1 << (irq - PLATFORM_LPC_IRQ_BASE)); ++ *(volatile unsigned int *)(LPC_REG_VA_BASE + LPC_REG_SIMR) = val; ++} ++ ++static void lpc_level_mask_irq(unsigned int irq) ++{ ++ unsigned int val; ++ val = *(volatile unsigned int *)(LPC_REG_VA_BASE + LPC_REG_SIMR); ++ val |= 1 << (irq - PLATFORM_LPC_IRQ_BASE); ++ *(volatile unsigned int *)(LPC_REG_VA_BASE + LPC_REG_SIMR) = val; ++} ++ ++static void lpc_level_ack_irq(unsigned int irq) ++{ ++ *(volatile unsigned int *)(LPC_REG_VA_BASE + LPC_REG_SIR) = ++ 1 << (irq - PLATFORM_LPC_IRQ_BASE); ++} ++ ++static struct irq_chip lpc_simple_chip = { ++ .ack = lpc_level_ack_irq, ++ .mask = lpc_level_mask_irq, ++ .unmask = lpc_level_unmask_irq, ++}; ++ ++void lpc_irq_rounter(unsigned int irq, struct irq_desc *desc) ++{ ++ unsigned int lpc_status; ++ unsigned int lpc_mask; ++ unsigned int lpc_irq; ++ int i; ++ struct irq_desc *lpc_desc; ++ ++ desc->chip->mask(irq); ++ desc->chip->ack(irq); ++ ++ lpc_status = *(volatile unsigned int *)(LPC_REG_VA_BASE + LPC_REG_SIR); ++ lpc_mask = *(volatile unsigned int *)(LPC_REG_VA_BASE + LPC_REG_SIMR); ++ *(volatile unsigned int *)(LPC_REG_VA_BASE + LPC_REG_SIMR) = 0xffffffff; ++ for (i = 0; i < PLATFORM_LPC_IRQ_TOTALCOUNT; i++) { ++ if (!(~lpc_mask & (1 << i) & lpc_status)) ++ continue; ++ lpc_irq = PLATFORM_LPC_IRQ_BASE + i; ++ lpc_desc = irq_desc + lpc_irq; ++ lpc_desc->handle_irq(lpc_irq, lpc_desc); ++ *(volatile unsigned int *)(LPC_REG_VA_BASE + LPC_REG_SIR) = ++ 1 << i; ++ } ++ *(volatile unsigned int *)(LPC_REG_VA_BASE + LPC_REG_SIMR) = lpc_mask; ++ ++ desc->chip->unmask(irq); ++} ++ ++int __init lpc_init_irq(void) ++{ ++ int i; ++ ++ *(volatile unsigned int *)(LPC_REG_VA_BASE + LPC_REG_SCR) = 0x1f3; ++ /* Register all IRQ */ ++ for (i = PLATFORM_LPC_IRQ_BASE; ++ i < PLATFORM_LPC_IRQ_BASE + PLATFORM_LPC_IRQ_TOTALCOUNT; i++) { ++ // level trigger ++ set_irq_chip(i, &lpc_simple_chip); ++ set_irq_handler(i, handle_simple_irq); ++ ++ } ++ set_irq_chained_handler(LPC_IRQ, lpc_irq_rounter); ++ ++ return 0; ++} ++ ++device_initcall(lpc_init_irq); ++ ++#define ITE_ADDR 0x2e ++#define ITE_DATA 0x2f ++void outlpc(unsigned int addr, unsigned int data) ++{ ++ *(volatile unsigned int *)(LPC_IO_VA_BASE + 4 * addr) = data; ++} ++ ++unsigned int inlpc(unsigned int addr) ++{ ++ return *(volatile unsigned int *)(LPC_IO_VA_BASE + 4 * addr); ++} ++ ++int __init ite8717_init(void) ++{ ++ unsigned char data1, data2; ++ unsigned int count; ++ /* enter configure mode */ ++ outlpc(ITE_ADDR, 0x87); ++ outlpc(ITE_ADDR, 0x01); ++ outlpc(ITE_ADDR, 0x55); ++ outlpc(ITE_ADDR, 0x55); ++ /* check chip */ ++ outlpc(ITE_ADDR, 0x20); ++ data1 = inlpc(ITE_DATA); ++ outlpc(ITE_ADDR, 0x21); ++ data2 = inlpc(ITE_DATA); ++ if ((data1 != 0x87) && (data2 != 0x17)) ++ goto not_found; ++ /* earlyio program */ ++ outlpc(ITE_ADDR, 0x07); // LDN=0 -> FDC ++ outlpc(ITE_DATA, 0x00); ++ outlpc(ITE_ADDR, 0x30); // Enable FDC ++ outlpc(ITE_DATA, 0x01); ++ outlpc(ITE_ADDR, 0xf1); // Set (Index 0F1h) = 90h ++ outlpc(ITE_DATA, 0x80); ++ ++ outlpc(0x64, 0xaa); // Send KBC self-test command ++ count = 0; ++ do { ++ if (count++ > 0x10000) ++ break; ++ data1 = ~inlpc(0x64); ++ } while (data1 & 0x01); ++ data2 = inlpc(0x60); ++ outlpc(0x64, 0xcb); // Set PS2 mode ++ count = 0; ++ do { ++ if (count++ > 0x10000) ++ break; ++ data1 = inlpc(0x64); ++ } while (data1 & 0x02); ++ outlpc(0x60, 0x01); ++ count = 0; ++ do { ++ if (count++ > 0x10000) ++ break; ++ data1 = inlpc(0x64); ++ } while (data1 & 0x02); ++ outlpc(0x64, 0x60); // 60h = write 8042 command byte ++ count = 0; ++ do { ++ if (count++ > 0x10000) ++ break; ++ data1 = inlpc(0x64); ++ } while (data1 & 0x02); ++ outlpc(0x60, 0x45); // AT interface, keyboard enabled, system flag ++ count = 0; ++ do { ++ if (count++ > 0x10000) ++ break; ++ data1 = inlpc(0x64); ++ } while (data1 & 0x02); ++ outlpc(0x64, 0xae); ++ count = 0; ++ do { ++ if (count++ > 0x10000) ++ break; ++ data1 = ~inlpc(0x64); ++ } while (data1 & 0x01); ++ outlpc(ITE_ADDR, 0x23); ++ outlpc(ITE_DATA, 0x00); ++ ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x04); ++ outlpc(ITE_ADDR, 0xf0); ++ data1 = inlpc(ITE_DATA); ++ data1 &= 0x18; ++ data1 |= 0x0; ++ outlpc(ITE_DATA, data1); ++ outlpc(ITE_ADDR, 0xf2); ++ data1 = inlpc(ITE_DATA); ++ data1 &= 0x2e; ++ data1 |= 0xa; ++ outlpc(ITE_DATA, data1); ++ outlpc(ITE_ADDR, 0xf4); ++ data1 = inlpc(ITE_DATA); ++ data1 &= 0xaf; ++ data1 |= 0x80; ++ outlpc(ITE_DATA, data1); ++ outlpc(ITE_ADDR, 0xf5); ++ data1 = inlpc(ITE_DATA); ++ data1 &= 0x3f; ++ data1 |= 0x0; ++ outlpc(ITE_DATA, data1); ++ /* initialize all device */ ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x04); ++ outlpc(ITE_ADDR, 0x30); ++ outlpc(ITE_DATA, 0x01); ++#if 1 ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x05); ++ outlpc(ITE_ADDR, 0xf0); ++ outlpc(ITE_DATA, 0x4e); ++ outlpc(ITE_ADDR, 0x71); ++ outlpc(ITE_DATA, 0x01); ++#endif ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x06); ++ outlpc(ITE_ADDR, 0x30); ++ outlpc(ITE_DATA, 0x01); ++#if 1 ++ outlpc(ITE_ADDR, 0xf0); ++ outlpc(ITE_DATA, 0x01); ++ outlpc(ITE_ADDR, 0x71); ++ outlpc(ITE_DATA, 0x01); ++#endif ++ ++ /* exit configure mode */ ++ outlpc(ITE_ADDR, 0x02); ++ outlpc(ITE_DATA, 0x02); ++ return 0; ++not_found: ++ printk("ITE8717 not found\n"); ++ return -1; ++} ++ ++subsys_initcall(ite8717_init); +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag102/Makefile linux-3.4.110/arch/nds32/platforms/ag102/Makefile +--- linux-3.4.110.orig/arch/nds32/platforms/ag102/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag102/Makefile 2016-04-07 10:20:51.002083345 +0200 +@@ -0,0 +1,5 @@ ++obj-y += devices.o ahbclkcal.o ++obj-$(CONFIG_LPC) += lpc.o ++obj-$(CONFIG_PM) += pm.o sleep.o ++obj-$(CONFIG_AG102_CPU_FREQ_FCS) += cpu-fcs.o ++obj-$(CONFIG_AG102_CPU_FREQ_SCALING_MODE) += freq-scaling.o +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag102/pcu.h linux-3.4.110/arch/nds32/platforms/ag102/pcu.h +--- linux-3.4.110.orig/arch/nds32/platforms/ag102/pcu.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag102/pcu.h 2016-04-07 10:20:51.002083345 +0200 +@@ -0,0 +1,1887 @@ ++#ifndef __PCU_H ++#define __PCU_H ++#include ++#define CPE_PCU_BASE PCU_VA_BASE ++//Data type ++typedef enum Bool { ++ FALSE, ++ TRUE ++} BOOL; ++ ++typedef enum { ++ SUCCESS=0, ++ FAIL ++} STATUS; ++ ++typedef unsigned char UINT8; ++typedef char INT8; ++typedef unsigned short UINT16; ++typedef short INT16; ++typedef unsigned int UINT32; ++typedef int INT32; ++typedef unsigned long long UINT64; ++typedef long long INT64; ++ ++//Registion IO operation macro ++#define REG32(a) (*(volatile UINT32 *)(a)) ++#define REG16(a) (*(volatile UINT16 *)(a)) ++#define REG8(a) (*(volatile UINT8 *)(a)) ++ ++#define inb(a) REG8(a) ++#define inhw(a) REG16(a) ++#define inw(a) REG32(a) ++ ++#define outb(a, v) (REG8(a) = (UINT8)(v)) ++#define outhw(a, v) (REG16(a) = (UINT16)(v)) ++#define outw(a, v) (REG32(a) = (UINT32)(v)) ++ ++ ++// Register bit operation macro ++#define ANDES_BIT_MASK(bit_h, bit_l) ((((UINT32)0x1<<(1+bit_h-bit_l))-(UINT32)0x1)<> (offset)) ++ ++#define TEST_FIELD(addr, mask) (inw(addr)&(mask)) ++ ++#define SET_REG(addr, value) do { outw(addr, value); } while (0) ++#define GET_REG(addr) (inw(addr)) ++ ++#define CHECK_FIELD(value, mask) ( (value)&(mask) ) ++#define EXTRACT_FIELD(value, mask, offset) ( ((value)&(mask))>>(offset) ) ++#define PREPARE_FIELD(value, mask, offset) ( ((value)<<(offset))&(mask) ) ++ ++ ++// Variable bit operation macro ++#define VAR_TEST_BIT(var, sig) ((var)&(sig)) ++#define VAR_SET_BIT(var, sig) ((var) = (var)|(sig)) ++#define VAR_CLR_BIT(var, sig) ((var) = (var)&(~(sig))) ++// ============================ ++// PCU register definition ++// ============================ ++#define PCU_REG_VER (CPE_PCU_BASE+0x000) //version ++#define PCU_REG_SPINFO (CPE_PCU_BASE+0x004) //scartch pad information ++#define PCU_REG_SOCID (CPE_PCU_BASE+0x010) //SoC ID ++#define PCU_REG_AHB_CFG (CPE_PCU_BASE+0x014) //AHB device configuration ++#define PCU_REG_APB_CFG (CPE_PCU_BASE+0x018) //APB device configuration ++#define PCU_REG_DCSR0 (CPE_PCU_BASE+0x020) //driving capability and slew rate control 0 ++#define PCU_REG_DCSR1 (CPE_PCU_BASE+0x024) //driving capability and slew rate control 1 ++#define PCU_REG_DCSR2 (CPE_PCU_BASE+0x028) //driving capability and slew rate control 2 ++#define PCU_REG_MFPS0 (CPE_PCU_BASE+0x030) //multi-function port setting 0 ++#define PCU_REG_MFPS1 (CPE_PCU_BASE+0x034) //multi-function port setting 1 ++#define PCU_REG_DMA_SEL (CPE_PCU_BASE+0x038) //dma engin selection ++#define PCU_REG_OSC_CTRL (CPE_PCU_BASE+0x040) //OSC control register ++#define PCU_REG_PWM_DIV (CPE_PCU_BASE+0x044) //PWM clock divider value ++#define PCU_REG_MISC (CPE_PCU_BASE+0x048) //misc register ++#define PCU_REG_BSM_CTRL (CPE_PCU_BASE+0x080) //BSM control register ++#define PCU_REG_BSM_STATUS (CPE_PCU_BASE+0x084) //BSM status ++#define PCU_REG_WAKEUP_SEN (CPE_PCU_BASE+0x088) //wakeup event signal sensitivity ++#define PCU_REG_WAKEUP_STATUS (CPE_PCU_BASE+0x08c) //wakeup event status ++#define PCU_REG_RESET_TIMER (CPE_PCU_BASE+0x090) //reset timer register ++#define PCU_REG_INTR (CPE_PCU_BASE+0x094) //interrup register ++#define PCU_REG_PCS1_CFG (CPE_PCU_BASE+0x0a0) ++#define PCU_REG_PCS1_PARA (CPE_PCU_BASE+0x0a4) ++#define PCU_REG_PCS1_ST1 (CPE_PCU_BASE+0x0a8) ++#define PCU_REG_PCS1_ST2 (CPE_PCU_BASE+0x0ac) ++#define PCU_REG_PCS1_PDD (CPE_PCU_BASE+0x0b0) ++#define PCU_REG_PCS2_CFG (CPE_PCU_BASE+0x0c0) ++#define PCU_REG_PCS2_PARA (CPE_PCU_BASE+0x0c4) ++#define PCU_REG_PCS2_ST1 (CPE_PCU_BASE+0x0c8) ++#define PCU_REG_PCS2_ST2 (CPE_PCU_BASE+0x0cc) ++#define PCU_REG_PCS2_PDD (CPE_PCU_BASE+0x0d0) ++#define PCU_REG_PCS3_CFG (CPE_PCU_BASE+0x0e0) ++#define PCU_REG_PCS3_PARA (CPE_PCU_BASE+0x0e4) ++#define PCU_REG_PCS3_ST1 (CPE_PCU_BASE+0x0e8) ++#define PCU_REG_PCS3_ST2 (CPE_PCU_BASE+0x0ec) ++#define PCU_REG_PCS3_PDD (CPE_PCU_BASE+0x0f0) ++#define PCU_REG_PCS4_CFG (CPE_PCU_BASE+0x100) ++#define PCU_REG_PCS4_PARA (CPE_PCU_BASE+0x104) ++#define PCU_REG_PCS4_ST1 (CPE_PCU_BASE+0x108) ++#define PCU_REG_PCS4_ST2 (CPE_PCU_BASE+0x10c) ++#define PCU_REG_PCS4_PDD (CPE_PCU_BASE+0x110) ++#define PCU_REG_PCS5_CFG (CPE_PCU_BASE+0x120) ++#define PCU_REG_PCS5_PARA (CPE_PCU_BASE+0x124) ++#define PCU_REG_PCS5_ST1 (CPE_PCU_BASE+0x128) ++#define PCU_REG_PCS5_ST2 (CPE_PCU_BASE+0x12c) ++#define PCU_REG_PCS5_PDD (CPE_PCU_BASE+0x130) ++#define PCU_REG_PCS6_CFG (CPE_PCU_BASE+0x140) ++#define PCU_REG_PCS6_PARA (CPE_PCU_BASE+0x144) ++#define PCU_REG_PCS6_ST1 (CPE_PCU_BASE+0x148) ++#define PCU_REG_PCS6_ST2 (CPE_PCU_BASE+0x14c) ++#define PCU_REG_PCS6_PDD (CPE_PCU_BASE+0x150) ++#define PCU_REG_PCS7_CFG (CPE_PCU_BASE+0x160) ++#define PCU_REG_PCS7_PARA (CPE_PCU_BASE+0x164) ++#define PCU_REG_PCS7_ST1 (CPE_PCU_BASE+0x168) ++#define PCU_REG_PCS7_ST2 (CPE_PCU_BASE+0x16c) ++#define PCU_REG_PCS7_PDD (CPE_PCU_BASE+0x170) ++#define PCU_REG_PCS8_CFG (CPE_PCU_BASE+0x180) ++#define PCU_REG_PCS8_PARA (CPE_PCU_BASE+0x184) ++#define PCU_REG_PCS8_ST1 (CPE_PCU_BASE+0x188) ++#define PCU_REG_PCS8_ST2 (CPE_PCU_BASE+0x18c) ++#define PCU_REG_PCS8_PDD (CPE_PCU_BASE+0x190) ++#define PCU_REG_PCS9_CFG (CPE_PCU_BASE+0x1a0) ++#define PCU_REG_PCS9_PARA (CPE_PCU_BASE+0x1a4) ++#define PCU_REG_PCS9_ST1 (CPE_PCU_BASE+0x1a8) ++#define PCU_REG_PCS9_ST2 (CPE_PCU_BASE+0x1ac) ++#define PCU_REG_PCS9_PDD (CPE_PCU_BASE+0x1b0) ++ ++#define PCU_SCRATCH_OFFSET_SHIFT (8) ++#define PCU_SCRATCH_SIZE_SHIFT (2) // in byte, 2 means (1<<2) = 4 bytes ++#define PCU_REG_SCRATCH_MEM (CPE_PCU_BASE+ (PCU_SPINFO_OFFSET_DEFAULT< ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License. ++ * ++ * Abstract: ++ * ++ * This program is for AG102 power management routines. ++ * ++ * Revision History: ++ * ++ * Jul.19.2010 Initial code by Gavin. ++ */ ++#include ++#include ++#include ++#include ++#include ++/* ++ * the following include file is for testing device node ++ */ ++#include ++#include ++#include ++/*************************/ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pcu.h" ++#include "gmac.h" ++ ++#ifdef CONFIG_PLAT_AG102 ++#include ++#endif ++ ++#define ANDES_PCU_STRING "andes_pcu" ++static int andes_pcu_major; ++static struct class *andes_pcu_class; ++extern void ag102_cpu_sleep(void); ++extern void ag102_cpu_resume(void); ++extern void ag102_cpu_resume2(void); ++extern void __SELF_REFRESH_LOCK_START(); ++extern void __SELF_REFRESH_LOCK_END(); ++extern void ftpci_postinit(void /**sysdata*/ ); ++UINT32 mac_dah, mac_dal; ++ ++// ADD by river 2010.12.07 for WOL ++ ++UINT32 eth_phy_reg_read(UINT32 phy_addr, UINT32 phy_page, UINT32 phy_reg, ++ UINT32 * phy_data) ++{ ++ UINT32 cycthr = GMAC_GET_FIELD(PHYCR, MDC_CYCTHR); ++ UINT32 wdata; ++ UINT32 rdata; ++ //TIMER_P timer; ++ ++ printk(">>>>> GMAC : Calling eth_phy_reg_read()...\n"); ++ ++ wdata = GMAC_PREPARE(PHYCR, MIIRD, 0x1) | ++ GMAC_PREPARE(PHYCR, PHYAD, phy_addr) | ++ GMAC_PREPARE(PHYCR, REGAD, phy_reg) | cycthr; ++ ++ printk(">>>>> GMAC : wdata = [0x%08x]\n", wdata); ++ ++ GMAC_SET_REG(PHYCR, wdata); ++ ++ // wait phy data read ++ //TIMER_INIT(timer, 10000); // trigger timer & wait until finish of data read ++ //while ( (!TIMER_IS_TIMEOUT(timer)) && (GMAC_GET_FIELD(PHYCR, MIIRD))) { ++ // TIMER_TICK(timer); ++ //} ++ ++ while (GMAC_GET_FIELD(PHYCR, MIIRD)) { ++ mdelay(50); ++ } ++ ++ if (GMAC_GET_FIELD(PHYCR, MIIRD)) { ++ printk("ERR: GPHY read reg[%x] timeout!\n", phy_reg); ++ return 1; ++ } else { ++ rdata = GMAC_GET_FIELD(PHYDATA, MIIRDATA); ++ *phy_data = rdata; ++ printk("GPHY read [%x] \n", rdata); ++ return 0; ++ } ++} ++ ++UINT32 eth_phy_reg_write(UINT32 phy_addr, UINT32 phy_page, UINT32 phy_reg, ++ UINT32 phy_data) ++{ ++ UINT32 wdata; ++ //TIMER_P timer; ++ ++ printk(">>>>> GMAC : Calling eth_phy_reg_write()...\n"); ++ ++ GMAC_SET_FIELD(PHYDATA, MIIWDATA, phy_data); ++ wdata = GMAC_PREPARE(PHYCR, MIIWR, 0x1) | GMAC_PREPARE(PHYCR, PHYAD, phy_addr) | GMAC_PREPARE(PHYCR, REGAD, phy_reg) | 0x34; //cycthr; ++ GMAC_SET_REG(PHYCR, wdata); ++ ++ // wait phy data write ++ //TIMER_INIT(timer, 10000); // trigger timer & wait until finish of data write ++ //while ( (!TIMER_IS_TIMEOUT(timer)) && (GMAC_GET_FIELD(PHYCR, MIIWR))) { ++ // TIMER_TICK(timer); ++ //} ++ ++ while (GMAC_GET_FIELD(PHYCR, MIIWR)) { ++ mdelay(50); ++ } ++ ++ if (GMAC_GET_FIELD(PHYCR, MIIWR)) { ++ printk("ERR: GPHY write reg[%x] = %x timeout!\n", phy_reg, ++ phy_data); ++ return 1; ++ } else { ++ return 0; ++ } ++} ++ ++INT32 eth_phy_detect(UINT32 * phy_addr, UINT32 * phy_id) ++{ ++ UINT32 i, find, data1, data2; ++ ++ printk(">>>>> GMAC : Calling eth_phy_detect()...\n"); ++ ++ for (i = 0, find = 0; i <= 0x1f; i++) { ++ eth_phy_reg_read(i, 0, 2, &data1); ++ if ((data1 != 0) && (data1 != 0xffff)) { ++ find = 1; ++ break; ++ } ++ } ++ ++ if (find == 0) { ++ printk("Err: no valid phy found!\n"); ++ *phy_id = 0; ++ return (-1); ++ } ++ ++ eth_phy_reg_read(i, 0, 2, &data1); ++ eth_phy_reg_read(i, 0, 3, &data2); ++ *phy_id = (data1 << 16) | (data2 & 0xffff); ++ *phy_addr = i; ++ printk("Info: phy id = 0x%08x!\n", *phy_id); ++ return 0; ++} ++ ++void eth_phy_init(UINT32 phy_addr) ++{ ++ UINT32 data, i; ++ //TIMER_P timer; ++ //phy_addr = DVC_PHY_ADDR; ++ ++ printk(">>>>> GMAC : Calling eth_phy_init()...\n"); ++ // GPHY SW reset ++ eth_phy_reg_read(phy_addr, 0, DVC_PHY_REG_CTL, &data); ++ data |= ++ DVC_PHY_REG_CTL_SW_RST | DVC_PHY_REG_CTL_AN_EN | ++ DVC_PHY_REG_CTL_AN_RST; ++ eth_phy_reg_write(phy_addr, 0, DVC_PHY_REG_CTL, data); ++ for (i = 0; i < 10000; i++) { // must delay enough for phy to get ready ++ //eth_mdelay(2000); ++ mdelay(2); ++ eth_phy_reg_read(phy_addr, 0, DVC_PHY_REG_CTL, &data); ++ if ((data & 0x8000) == 0) ++ break; ++ } ++ if ((data & 0x8000) != 0) { ++ printk("Err: phy sw reset timeout!!!\n"); ++ } ++ eth_phy_reg_read(phy_addr, 0, DVC_PHY_REG_CTL, &data); ++ printk("phy control = 0x%x\n", data); ++ eth_phy_reg_read(phy_addr, 0, DVC_PHY_REG_STS, &data); ++ printk("phy status = 0x%x\n", data); ++ ++ return; ++} ++ ++// initialize MAC setting ++void eth_mac_init(ETH_PHY_STAT * phy_stat) ++{ ++ UINT32 i, wdata, rdata; ++ UINT32 duplex, speed, gmac; ++ //UINT32 mac_dah, mac_dal; ++ printk(">>>>> GMAC : Calling eth_mac_init()...\n"); ++ ++ wdata = GMAC_PREPARE(MACCR, SW_RST, 0x1); ++ GMAC_SET_REG(MACCR, wdata); ++ for (i = 0; i < 1000; i++) { // must delay enough for phy to get ready ++ //eth_mdelay(8000); // delay should be enough ++ mdelay(8); ++ rdata = GMAC_GET_REG(FEAR); ++ if ((rdata >> 31) == 0) ++ break; ++ } ++ if ((rdata >> 31) != 0) { ++ printk("Err: mac sw reset timeout!!!\n"); ++ } ++ if (phy_stat->speed == 2) { //giga ++ gmac = 0x1; ++ speed = 0x1; ++ } else if (phy_stat->speed == 1) { //100 Mbps ++ gmac = 0x0; ++ speed = 0x1; ++ } else { //10 Mbps ++ gmac = 0x0; ++ speed = 0x0; ++ } ++ duplex = phy_stat->duplex; ++ GMAC_SET_REG(ISR, 0xffff); // Interrupt Status, write 1 to clear ++ GMAC_SET_REG(IME, 0xffff); // Interrupt Enable, enabling all interrupts ++ GMAC_SET_REG(MAC_MADR, mac_dah); ++ GMAC_SET_REG(MAC_LADR, mac_dal); ++ rdata = GMAC_GET_REG(FEAR); ++ GMAC_SET_FIELD(TPAFCR, TFIFO_SIZE, ++ GMAC_EXTRACT(FEAR, TFIFO_RSIZE, rdata)); ++ GMAC_SET_FIELD(TPAFCR, RFIFO_SIZE, ++ GMAC_EXTRACT(FEAR, RFIFO_RSIZE, rdata)); ++ ++ wdata = GMAC_PREPARE(MACCR, RX_MULTIPKT_EN, 0x1) | ++ GMAC_PREPARE(MACCR, BROADPKT_EN, 0x1) | ++ GMAC_PREPARE(MACCR, RX_ALLADR, 0x1) | ++ GMAC_PREPARE(MACCR, CRC_APD, 0x1) | ++ GMAC_PREPARE(MACCR, FULLDUP, duplex) | ++ GMAC_PREPARE(MACCR, RX_RUNT, 0x1) | ++ GMAC_PREPARE(MACCR, SPEED, speed) | ++ GMAC_PREPARE(MACCR, GMAC_MODE, gmac) | ++ GMAC_PREPARE(MACCR, RXMAC_EN, 0x1) | ++ GMAC_PREPARE(MACCR, TXMAC_EN, 0x1) | ++ GMAC_PREPARE(MACCR, RXDMA_EN, 0x1) | ++ GMAC_PREPARE(MACCR, TXDMA_EN, 0x1); ++ GMAC_SET_REG(MACCR, wdata); ++ ++#ifdef AHBC_NO_REMAP ++ GMAC_SET_REG(NPTXR_BADR, FTGMAC100_TXR_BASE | 0x40000000); ++ GMAC_SET_REG(RXR_BADR, FTGMAC100_RXR_BASE | 0x40000000); ++#else ++ GMAC_SET_REG(NPTXR_BADR, FTGMAC100_TXR_BASE); ++ GMAC_SET_REG(RXR_BADR, FTGMAC100_RXR_BASE); ++#endif ++ return; ++} ++ ++UINT32 eth_phy_stat(UINT32 phy_addr, ETH_PHY_STAT * phy_stat) ++{ ++ ++ phy_stat->speed = 0x1; ++ phy_stat->duplex = 0x1; ++ ++ return 0; ++} ++ ++void gmac_set_wol() ++{ ++ UINT32 wdata; ++ ++ // Set Rx Wake-up Frame ++ // write 1 clear register before enable power saving mode ++ GMAC_SET_REG(WOLSR, 0xffffffff); // clear wol status ++ ++ // set wakeup_sel and power saving mode enable ++ wdata = GMAC_PREPARE(WOLCR, MAGICPKT_EN, 0x1) | ++ GMAC_PREPARE(WOLCR, PWRSAV, 0x1); ++ ++ GMAC_SET_REG(WOLCR, wdata); ++ ++ return; ++} ++ ++// End ADD by river 2010.12.07 for WOL ++ ++//////////////////////////////// LPC wake up /////////////////////////////////////////////////// ++#define CPE_LPCIO_BASE LPC_IO_VA_BASE //VA Base ++#define CPE_LPCREG_BASE LPC_REG_VA_BASE //VA Base ++#define CPE_IC_BASE AMIC_VA_BASE ++ ++#define ITE_ADDR 0x2e ++#define ITE_DATA 0x2f ++ ++#define KBC_CMD 0x64 ++#define KBC_STATUS KBC_CMD ++#define KBC_DATA 0x60 ++ ++#define BIT_OBF 0x01 ++#define BIT_IBF 0x02 ++ ++#define LPC_REG_SCR 0x10 ++#define LPC_REG_SIR 0x14 ++#define LPC_REG_SIMR 0x18 ++ ++#define SIRQ_KB 1 ++#define SIRQ_MS 12 ++ ++//#define outlpc(addr, data) outb(data, CPE_LPCIO_BASE + 4 * addr) ++//#define inlpc(addr) inb(CPE_LPCIO_BASE + 4 * addr) ++#define inlpc(addr) REG32(CPE_LPCIO_BASE + 4 * addr) ++ ++#define IRQ_LPC 29 ++#define LEVEL 0 ++#define H_ACTIVE 0 ++#define L_ACTIVE 1 ++#define IRQ_MASK 0x80 ++#define IRQ_MODE 0x20 ++#define IRQ_LEVEL 0x24 ++ ++UINT32 IRQSources = 0; //define the current irq source for debug ++ ++static inline void outlpc(unsigned long addr, unsigned long data) ++{ ++ REG32(CPE_LPCIO_BASE + 4 * addr) = data; ++} ++ ++static void kbc_cmd(UINT8 cmd) ++{ ++ int loop_limit = 1000; ++ int i; ++ UINT8 tmpc; ++ ++ // wait until the input buffer is empty ++ for (i = 0; i < loop_limit; i++) { ++ tmpc = inlpc(KBC_STATUS); ++ if ((tmpc & BIT_IBF) == 0) ++ break; ++ } ++ ++ outlpc(KBC_CMD, cmd); ++} ++ ++static void kbc_wdata(UINT8 wdata) ++{ ++ int loop_limit = 1000; ++ int i; ++ UINT8 tmpc; ++ ++ // wait until the input buffer is empty ++ for (i = 0; i < loop_limit; i++) { ++ tmpc = inlpc(KBC_STATUS); ++ if ((tmpc & BIT_IBF) == 0) ++ break; ++ } ++ ++ outlpc(KBC_DATA, wdata); ++} ++ ++static UINT8 kbc_rdata(void) ++{ ++ int loop_limit = 1000; ++ int i; ++ UINT8 rdata; ++ UINT8 tmpc; ++ ++ // wait until the output buffer is not empty ++ for (i = 0; i < loop_limit; i++) { ++ tmpc = inlpc(KBC_STATUS); ++ if (tmpc & BIT_OBF) ++ break; ++ } ++ ++ rdata = inlpc(KBC_DATA); ++ return (rdata); ++} ++ ++static void delay_loop(int max_no) ++{ ++ int i; ++ ++ for (i = 0; i < max_no; i++) ; ++} ++ ++static UINT8 get_response(void) ++{ ++ UINT8 tmpc; ++ ++ delay_loop(200000); ++ tmpc = kbc_rdata(); ++ return (tmpc); ++} ++ ++int it8718f_init(void) ++{ ++ static int initialized = 0; ++ UINT8 tmpc; ++ unsigned int chip_id; ++ ++ if (initialized == 1) ++ return (0); ++ ++ // Enter the configuration mode ++ outlpc(ITE_ADDR, 0x87); ++ outlpc(ITE_ADDR, 0x01); ++ outlpc(ITE_ADDR, 0x55); ++ outlpc(ITE_ADDR, 0x55); ++ ++ // Check the chip ID ++ printk("Check IT8718F chip ID => "); ++ outlpc(ITE_ADDR, 0x20); ++ tmpc = inlpc(ITE_DATA); ++ chip_id = (tmpc & 0xff) << 8; ++ outlpc(ITE_ADDR, 0x21); ++ tmpc = inlpc(ITE_DATA); ++ chip_id |= tmpc & 0xff; ++ ++ if (chip_id != 0x8718) { ++ printk("FAILED with chip ID = 0x%C%C.\n", (chip_id >> 8) & 0xff, ++ chip_id & 0xff); ++ return (1); ++ } else ++ printk("PASSED\n"); ++ ++ // KBC Self Test ++ printk("KBC self-test => "); ++ kbc_cmd(0xaa); ++ tmpc = kbc_rdata(); ++ if (tmpc != 0x55) { ++ printk("FAILED with code = 0x%C\n", tmpc & 0xff); ++ return (2); ++ } else ++ printk("PASSED\n"); ++ ++ // KBC Interface Test ++ printk("KBC interface test => "); ++ kbc_cmd(0xab); ++ tmpc = kbc_rdata(); ++ switch (tmpc) { ++ case 0: ++ printk("PASSED\n"); ++ break; ++ ++ case 1: ++ printk("FAILED as the clock is stuck low\n"); ++ return (3); ++ ++ case 2: ++ printk("FAILED as the clock is stuck high\n"); ++ return (3); ++ ++ case 3: ++ printk("FAILED as the data is stuck low\n"); ++ return (3); ++ ++ case 4: ++ printk("FAILED as the data is stuck high\n"); ++ return (3); ++ ++ default: ++ printk("FAILED with unknown error\n"); ++ return (3); ++ break; ++ } ++ ++ // Read the KBC mode ++ kbc_cmd(0xca); ++ tmpc = kbc_rdata(); ++ ++ // Set KBC to the PS/2 mode ++ tmpc |= 0x01; ++ kbc_cmd(0xcb); ++ kbc_wdata(tmpc); ++ ++ // Enable keyboard ++ kbc_wdata(0xf4); ++ tmpc = get_response(); ++ printk("@@@@@ : Response of enabling keyboard = 0x%x\n", tmpc & 0xff); ++ ++ // Enable mouse ++ kbc_cmd(0xd4); ++ kbc_wdata(0xf4); ++ tmpc = get_response(); ++ //outmsg("Response of enabling mouse = 0x%C\n", tmpc & 0xff); ++ ++ // bit 6: translate ++ // bit 5: mouse enable ++ // bit 4: keyboard enable ++ // bit 3: ignore keyboard lock ++ // bit 2: system flag ++ // bit 1: mouse interrupt enable ++ // bit 0: Keyboard interrupt enable ++ kbc_cmd(0x60); ++ kbc_wdata(0x47); ++ ++ // Set repeat rate and delay ++ kbc_wdata(0xf3); ++ kbc_wdata(0x00); ++ tmpc = get_response(); ++ ++ // Set LDN = 5 (Keyboard) ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x05); ++ ++ // KBC clock = 8MHz, Key lock enabled, interrupt type can be changed ++ outlpc(ITE_ADDR, 0xf0); ++ outlpc(ITE_DATA, 0x4e); ++ ++ // Low-level triggered interrupt ++ outlpc(ITE_ADDR, 0x71); ++ outlpc(ITE_DATA, 0x01); ++ ++ // Set LDN = 6 (Mouse) ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x06); ++ ++ // Enable mouse ++ outlpc(ITE_ADDR, 0x30); ++ outlpc(ITE_DATA, 0x01); ++ ++ // Interrupt type can be changed ++ outlpc(ITE_ADDR, 0xf0); ++ outlpc(ITE_DATA, 0x01); ++ ++ // Low-level triggered interrupt ++ outlpc(ITE_ADDR, 0x71); ++ outlpc(ITE_DATA, 0x01); ++ ++ // Exit the configuration mode ++ outlpc(ITE_ADDR, 0x02); ++ outlpc(ITE_DATA, 0x02); ++ ++ initialized = 1; ++ return (0); ++} ++ ++//INT for lpc ++/* Turn the interrupt source on. */ ++void UnmaskIRQ(UINT32 IRQ) ++{ ++ volatile UINT32 *IRQBase; ++ ++ IRQBase = (UINT32 *) CPE_IC_BASE; ++ ++ IRQBase[(IRQ_MASK / sizeof(UINT32))] |= (1 << IRQ); ++} ++ ++void EnableIRQ() ++{ ++ ++ __asm__ volatile ("setgie.d\n\t" ++ "isb\n\t" ++ "mfsr $r1, $INT_MASK\n\t" ++ "ori $r1, $r1, #0x3f\n\t" ++ "mtsr $r1, $INT_MASK\n\t" ++ "setgie.e\n\t" "isb\n\t"); ++ ++} ++ ++void SetIRQmode(UINT32 IRQ, UINT32 edge) ++{ ++ volatile UINT32 *IRQBase; ++ ++ IRQBase = (UINT32 *) CPE_IC_BASE; ++ ++ if (edge) ++ IRQBase[(IRQ_MODE / sizeof(UINT32))] |= (1 << IRQ); ++ else ++ IRQBase[(IRQ_MODE / sizeof(UINT32))] &= ~(1 << IRQ); ++} ++ ++void SetIRQlevel(UINT32 IRQ, UINT32 low) ++{ ++ volatile UINT32 *IRQBase; ++ ++ IRQBase = (UINT32 *) CPE_IC_BASE; ++ ++ if (low) ++ IRQBase[(IRQ_LEVEL / sizeof(UINT32))] |= (1 << IRQ); ++ else ++ IRQBase[(IRQ_LEVEL / sizeof(UINT32))] &= ~(1 << IRQ); ++} ++ ++BOOL SetIntTrig(UINT32 intNum, int intMode, int intLevel) ++{ ++ if (intNum >= 32) { ++ printk("ERROR: The interrupt number %d is incorrect\n", intNum); ++ return FALSE; ++ } else { ++ SetIRQmode(intNum, intMode); ++ SetIRQlevel(intNum, intLevel); ++ return TRUE; ++ } ++} ++ ++BOOL EnableInt(UINT32 intNum) ++{ ++ ++ if (intNum >= 32) { ++ printk("ERROR: The interrupt number %d is incorrect\n", intNum); ++ return FALSE; ++ } else { ++ IRQSources |= 1 << intNum; ++ UnmaskIRQ(intNum); ++ EnableIRQ(); ++ return TRUE; ++ } ++} ++ ++//End INT for lpc ++ ++void enable_sirq(void) ++{ ++ UINT32 data; ++ ++ // Unmask SERIRQs of Keyboard and Mouse ++ data = inw(CPE_LPCREG_BASE + LPC_REG_SIMR); ++ data &= ~((1 << SIRQ_KB) | (1 << SIRQ_MS)); ++ outw(CPE_LPCREG_BASE + LPC_REG_SIMR, data); ++ ++ // Enable SERIRQ ++ data = inw(CPE_LPCREG_BASE + LPC_REG_SCR); ++ data |= 0x1; ++ outw(CPE_LPCREG_BASE + LPC_REG_SCR, data); ++} ++ ++void ite_set_pme() ++{ ++ ++ UINT8 tmpc; ++ unsigned int chip_id; ++ // Enter the configuration mode ++ outlpc(ITE_ADDR, 0x87); ++ outlpc(ITE_ADDR, 0x01); ++ outlpc(ITE_ADDR, 0x55); ++ outlpc(ITE_ADDR, 0x55); ++ ++ // Check the chip ID ++ printk("@@@@@ : Check IT8718F chip ID in get_lpc_value => \n"); ++ outlpc(ITE_ADDR, 0x20); ++ tmpc = inlpc(ITE_DATA); ++ chip_id = (tmpc & 0xff) << 8; ++ outlpc(ITE_ADDR, 0x21); ++ tmpc = inlpc(ITE_DATA); ++ chip_id |= tmpc & 0xff; ++ ++ printk("@@@@@ : chip_id = 0x%08x\n", chip_id); ++ if (chip_id != 0x8718) { ++ printk("FAILED with chip ID = 0x%C%C.\n", (chip_id >> 8) & 0xff, ++ chip_id & 0xff); ++ return (1); ++ } else ++ printk("@@@@@ : PASSED\n"); ++ ++ // Set LDN = 5 ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x05); ++ ++ //Read index 30 ++ outlpc(ITE_ADDR, 0x30); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x30] = 0x%x\n", tmpc); ++ //Set index 30 bit0 = 0; ++ outlpc(ITE_ADDR, 0x30); ++ outlpc(ITE_DATA, tmpc & 0xFE); ++ ++ // Set LDN = 6 ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x06); ++ ++ //Read index 30 ++ outlpc(ITE_ADDR, 0x30); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x30] = 0x%x\n", tmpc); ++ //Set index 30 bit0 = 0; ++ outlpc(ITE_ADDR, 0x30); ++ outlpc(ITE_DATA, tmpc & 0xFE); ++ ++ // Set LDN = 4 for PME ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x04); ++ ++ //Read index F0 ++ outlpc(ITE_ADDR, 0xF0); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xF0] = 0x%x\n", tmpc); ++ //Set index F0 BIT[4:3]=1; ++ outlpc(ITE_ADDR, 0xF0); ++ outlpc(ITE_DATA, tmpc | 0x18); ++ ++ //PME output enable ++ //Read index F2 ++ outlpc(ITE_ADDR, 0xF2); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xF2] = 0x%x\n", tmpc); ++ //Set index F2 bit6=0; ++ outlpc(ITE_ADDR, 0xF2); ++ outlpc(ITE_DATA, tmpc & 0xBF); ++ ++ // Exit the configuration mode ++ outlpc(ITE_ADDR, 0x02); ++ outlpc(ITE_DATA, 0x02); ++} ++ ++void get_keyboard_status() ++{ ++ ++ printk("Getting keyboard status....\n"); ++ ++ UINT8 tmpc; ++ unsigned int chip_id; ++ ++ // Enter the configuration mode ++ outlpc(ITE_ADDR, 0x87); ++ outlpc(ITE_ADDR, 0x01); ++ outlpc(ITE_ADDR, 0x55); ++ outlpc(ITE_ADDR, 0x55); ++ ++ // Check the chip ID ++ //printk("@@@@@ : Check IT8718F chip ID in get_lpc_value => \n"); ++ outlpc(ITE_ADDR, 0x20); ++ tmpc = inlpc(ITE_DATA); ++ chip_id = (tmpc & 0xff) << 8; ++ outlpc(ITE_ADDR, 0x21); ++ tmpc = inlpc(ITE_DATA); ++ chip_id |= tmpc & 0xff; ++ ++ //printk("@@@@@ : chip_id = 0x%08x\n", chip_id); ++ if (chip_id != 0x8718) { ++ printk("FAILED with chip ID = 0x%C%C.\n", (chip_id >> 8) & 0xff, ++ chip_id & 0xff); ++ return (1); ++ } ++ // Set LDN = 4 for PME ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x04); ++ ++ ///////// Dump configuration register ++ outlpc(ITE_ADDR, 0xf1); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xf1] = 0x%x\n", tmpc); ++ ++ ///////// Dump configuration register ++ outlpc(ITE_ADDR, 0xf0); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xf0] = 0x%x\n", tmpc); ++ ++} ++ ++void get_lpc_value() ++{ ++ UINT8 tmpc; ++ unsigned int chip_id; ++ ++ // Enter the configuration mode ++ outlpc(ITE_ADDR, 0x87); ++ outlpc(ITE_ADDR, 0x01); ++ outlpc(ITE_ADDR, 0x55); ++ outlpc(ITE_ADDR, 0x55); ++ ++ // Check the chip ID ++ printk("@@@@@ : Check IT8718F chip ID in get_lpc_value => \n"); ++ outlpc(ITE_ADDR, 0x20); ++ tmpc = inlpc(ITE_DATA); ++ chip_id = (tmpc & 0xff) << 8; ++ outlpc(ITE_ADDR, 0x21); ++ tmpc = inlpc(ITE_DATA); ++ chip_id |= tmpc & 0xff; ++ ++ printk("@@@@@ : chip_id = 0x%08x\n", chip_id); ++ if (chip_id != 0x8718) { ++ printk("FAILED with chip ID = 0x%C%C.\n", (chip_id >> 8) & 0xff, ++ chip_id & 0xff); ++ return (1); ++ } else ++ printk("@@@@@ : PASSED\n"); ++ ++ printk("=================== Get Values ==================\n"); ++ //////////// Environment Controller configuration register ++ printk("@@@@@ : Environment Controller configuration register\n"); ++ // Set LDN = 4 for PME ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x04); ++ ++ printk("@@@@@ : LDN = 4\n"); ++ ++ ///////// Dump configuration register ++ outlpc(ITE_ADDR, 0x30); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x30] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x60); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x60] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x61); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x61] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x62); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x62] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x63); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x63] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x70); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x70] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xf0); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xf0] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xf1); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xf1] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xf2); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xf2] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xf3); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xf3] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xf4); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xf4] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xf5); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xf5] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xf6); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xf6] = 0x%x\n", tmpc); ++ ++ //////////// Keyboard configuration register ++ printk("@@@@@ : Keyboard configuration register\n"); ++ // Set LDN = 5 ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x05); ++ ++ printk("@@@@@ : LDN = 5\n"); ++ ++ outlpc(ITE_ADDR, 0x30); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x30] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x60); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x60] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x61); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x61] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x62); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x62] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x63); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x63] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x70); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x70] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x71); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x71] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xf0); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xf0] = 0x%x\n", tmpc); ++ ++ //////////// GPIO configuration register ++ printk("@@@@@ : GPIO configuration register\n"); ++ // Set LDN = 7 for GPIO ++ outlpc(ITE_ADDR, 0x07); ++ outlpc(ITE_DATA, 0x07); ++ ++ printk("@@@@@ : LDN = 7\n"); ++ ++ outlpc(ITE_ADDR, 0x60); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x60] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x62); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x62] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x63); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x63] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x64); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x64] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x65); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x65] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x70); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x70] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x71); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x71] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x72); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x72] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x73); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x73] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0x74); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0x74] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xB0); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xB0] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xB1); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xB1] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xB2); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xB2] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xB3); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xB3] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xB4); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xB4] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xB5); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xB5] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xB8); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xB8] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xBA); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xBA] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xBB); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xBB] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xBC); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xBC] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xBD); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xBD] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xC0); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xC0] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xC1); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xC1] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xC2); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xC2] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xC3); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xC3] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xC4); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xC4] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xC5); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xC5] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xC8); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xC8] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xC9); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xC9] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xCA); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xCA] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xCB); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xCB] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xCC); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xCC] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xCD); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xCD] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xE0); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xE0] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xE1); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xE1] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xE2); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xE2] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xE3); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xE3] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xE4); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xE4] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xE5); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xE5] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xE6); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xE6] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xF0); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xF0] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xF1); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xF1] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xF2); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xF2] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xF3); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xF3] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xF4); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xF4] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xF5); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xF5] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xF6); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xF6] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xF7); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xF7] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xF8); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xF8] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xF9); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xF9] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xFA); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xFA] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xFB); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xFB] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xFC); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xFC] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xFD); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xFD] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xFE); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xFE] = 0x%x\n", tmpc); ++ ++ outlpc(ITE_ADDR, 0xFF); ++ tmpc = inlpc(ITE_DATA); ++ printk("[0xFF] = 0x%x\n", tmpc); ++ ++ printk("=================== End Get Values ==================\n"); ++ ++} ++ ++/////////////////////////////// End LPC wake up /////////////////////////////////////////////// ++ ++/* ++ * AG102 PMU sleep mode handler. ++ */ ++void andes_suspend_to_ram() ++{ ++ int i, k, l, checksum, checksuma; ++ unsigned int addr, reg; ++ static int irq_saves[3]; ++ unsigned int tmp; ++ pgd_t *pgdv; ++ pud_t *pudv; ++ pmd_t *pmdv; ++ pte_t *ptev; ++ unsigned int resume_addr /*, resume_temp */ ; ++ //unsigned int *resume_tempaddr; ++ __asm__ volatile ("mfsr %0, $ir14\n\t":"=&r" (tmp)); ++ //printk("\nag102_cpu_resume:0x%x\n", ag102_cpu_resume); ++ //printk( KERN_WARNING "\nag102_cpu_resume2:0x%x\n", ag102_cpu_resume2); ++ pgdv = ++ (pgd_t *) __va((GET_L1_PPTB() & L1_PPTB_mskBASE)) + ++ pgd_index((unsigned int)ag102_cpu_resume); ++ pudv = pud_offset(pgdv, (unsigned int)ag102_cpu_resume); ++ pmdv = pmd_offset(pudv, (unsigned int)ag102_cpu_resume); ++ ptev = pte_offset_map(pmdv, (unsigned int)ag102_cpu_resume); ++ //printk("ag102_cpu_resume pte:0x%x\n", ptev); ++ resume_addr = ++ ((*ptev) & TLB_DATA_mskPPN) | ((unsigned int)ag102_cpu_resume & ++ 0x00000fff); ++ printk("resume_addr using Page Table :0x%08x\n", resume_addr); ++ ++ //ADD by river 2010.09.23 ++ printk("@@@@@ resume_addr(VA):0x%08x\n", ag102_cpu_resume); ++ printk("@@@@@ resume_addr2(VA):0x%08x\n", ag102_cpu_resume2); ++ printk("@@@@@ resume_addr(PA) using virt_to_phys :0x%08x\n", ++ virt_to_phys(ag102_cpu_resume)); ++ printk("@@@@@ resume_addr2(PA):0x%08x\n", ++ virt_to_phys(ag102_cpu_resume2)); ++ printk("@@@@@ AHB Controller for ROM :0x%08x\n", ++ REG32(AHB_ATFAHBC020S_0_VA_BASE + 0x10)); ++ printk("@@@@@ AHB Controller for RAM :0x%08x\n", ++ REG32(AHB_ATFAHBC020S_0_VA_BASE + 0x18)); ++ //End ADD by river 2010.09.23 ++ ++ /* trigger mode regs */ ++ irq_saves[0] = REG32(AMIC_VA_BASE + 0x20); ++ /* trigger level regs */ ++ irq_saves[1] = REG32(AMIC_VA_BASE + 0x24); ++ /* interrupt enable regs */ ++ irq_saves[2] = REG32(AMIC_VA_BASE + 0x80); ++ /* save SDRAM settings */ ++ ++ /* set resume return address */ ++ //REG32(PCU_VA_BASE + 0x400) = (resume_addr); ++ REG32(PCU_VA_BASE + 0x400) = (resume_addr) | 0x80000000; ++ REG32(PCU_VA_BASE + 0x404) = (u32) ag102_cpu_resume2; ++ REG32(PCU_VA_BASE + 0x410) = GET_L1_PPTB(); ++ //ADD by river 2010.12.02 for reserve kernel remap ++ REG32(PCU_VA_BASE + 0x418) = REG32(AHB_ATFAHBC020S_0_VA_BASE + 0x10); ++ REG32(PCU_VA_BASE + 0x41C) = REG32(AHB_ATFAHBC020S_0_VA_BASE + 0x18); ++ //End ADD by river 2010.12.02 for reserve kernel remap ++ printk("L1_PPTB (PA) =0x%08x\n", GET_L1_PPTB()); ++ printk("L1_PPTB (VA) in virtual address =0x%08x\n", ++ phys_to_virt(GET_L1_PPTB())); ++ ++ //ADD by river 2010.12.07 ++ UINT32 phy_id, phy_addr; ++ ETH_PHY_STAT phy_stat; ++ ++ if (eth_phy_detect(&phy_addr, &phy_id) != 0) { ++ printk("ERR: fail to detect known PHY!!!\n"); ++ return; ++ } ++ //TEST by river 2010.12.10 ++ eth_phy_init(phy_addr); ++ eth_phy_stat(phy_addr, &phy_stat); ++ eth_mac_init(&phy_stat); ++ //End TEST by river 2010.12.10 ++ gmac_set_wol(); ++ //End ADD by river 2010.12.07 ++ ++ /////////// ADD by river 2010.12.20 for LPC wakeup ///////////////////////////// ++ if (it8718f_init() != 0) ++ return; ++ ++ //INTC setup ++ SetIntTrig(IRQ_LPC, LEVEL, H_ACTIVE); ++ EnableInt(IRQ_LPC); // including INT_MASK and GIE in Core ++ ++ enable_sirq(); ++ ++ //__asm__ volatile ("1:\n\t"); ++ //__asm__ volatile ("b 1b\n\t"); ++ ++ printk("Calling ite_set_pme() YAYAYA....\n"); ++ ite_set_pme(); ++ ++ printk("Get Value..... YAYAYA....1\n"); ++ get_lpc_value(); ++ ++ /////////// End ADD by river 2010.12.20 for LPC wakeup ///////////////////////////// ++ ++ //set GPIO[2] as suspend2dram power control pin ++ PCU_SET_FIELD(MFPS1, SUSPEND_GPIO, 0x1); ++ PCU_SET_FIELD(PCS9_PDD, SUSP2RAM, 0x1); ++ //ADD by river 2010.12.07 ++ PCU_SET_REG(PCS9_CFG, PCU_PREPARE(PCS9_CFG, WKEN, PCS_WKEN_WOL | PCS_WKEN_LPC | PCS_WKEN_RTC)); // use only gpio0 and wol ++ //PCU_SET_REG(PCS9_CFG, PCU_PREPARE(PCS9_CFG, WKEN, PCS_WKEN_LPC)); // use lpc ++ ++ //End ADD by river 2010.12.07 ++ ++ /* set pwoer status */ ++ //int par = PCS_POWER_GPU_AND_DAC|PCS_POWER_CPUB; ++ int reg_tmp = PCU_PREPARE(PCS9_PARA, IE, 0x1) | ++ PCU_PREPARE(PCS9_PARA, CMD, ++ PCS_CMD_PW_DOWN /*PCS_CMD_SCALING|PCS_CMD_DRAM_SF */ ) | ++ PCU_PREPARE(PCS9_PARA, SYNC, PCS_SYNC_SRC) | PCU_PREPARE(PCS9_PARA, NXTPAR, 0 /*par */ ); // change power status ++ PCU_SET_REG(PCS9_PARA, reg_tmp); ++ ++ //PCU_SET_FIELD(MFPS1, SUSPEND_GPIO, 0x1); ++ /* setup wakeup sources */ ++ /* ++ k = PCU_PREPARE(WAKEUP_SEN, GPIO0_POL, 0x1) | ++ PCU_PREPARE(WAKEUP_SEN, GPIO1_POL, 0x1) | ++ PCU_PREPARE(WAKEUP_SEN, GPIO2_POL, 0x1) | ++ PCU_PREPARE(WAKEUP_SEN, GPIO3_POL, 0x1) | ++ PCU_PREPARE(WAKEUP_SEN, GPIO4_POL, 0x1) | ++ PCU_PREPARE(WAKEUP_SEN, GPIO5_POL, 0x1) | ++ PCU_PREPARE(WAKEUP_SEN, WOL_POL, 0x1) | ++ PCU_PREPARE(WAKEUP_SEN, LPC_POL, 0x1); ++ l = ~k; //set polarity ++ //l = 0x1c0; ++ PCU_SET_REG(WAKEUP_SEN, l); ++ PCU_SET_REG(PCS9_CFG, PCU_PREPARE(PCS9_CFG, WKEN, k)); ++ */ ++ /* Set PDD register and set suspend_to_ram */ ++ //PCU_SET_FIELD(PCS9_PDD, SUSP2RAM, 0x1); ++ ++ cpu_dcache_wbinval_all(); ++ cpu_icache_inval_all(); ++ SET_CACHE_CTL(GET_CACHE_CTL() & ~CACHE_CTL_mskDC_EN); ++ ++ /* lock self-refresh code to L1 cache */ ++ ++ addr = (unsigned int)__SELF_REFRESH_LOCK_START; ++ reg = (((unsigned int)__SELF_REFRESH_LOCK_END - (unsigned int)__SELF_REFRESH_LOCK_START) >> 5) + 1; // ++ ++ for (i = 0; i < reg; i++) { ++ __asm__ volatile ("li $p0, 0x0\n\t" ++ "add $p0, $p0, %0\n\t" ++ "cctl $p0, L1I_VA_FILLCK\n\t" ++ "isb\n\t"::"r" (addr)); ++ ++ addr += 32; ++ } ++ ++ unsigned char *ptr; ++ //ADD by river 2010.11.19 ++ printk("The resume address's content is :\n"); ++ ptr = (unsigned char *)ag102_cpu_resume; ++ for (i = 0; i < 20; i++) { ++ printk("0x%02x - ", *ptr++); ++ } ++ ++ printk("Get Value..... YAYAYA....2\n"); ++ get_lpc_value(); ++ ++ //End ADD by river 2010.11.19 ++ ag102_cpu_sleep(); ++ printk("return success-1..........\n"); ++ /* wakeup and ckeck */ ++ /* ++ reg_tmp = PCU_GET_REG(PCS9_ST2);// check Status-2 ++ if (PCU_EXTRACT(PCS9_ST2, CURPAR, reg_tmp) != par) ++ printk("Parameter setup is not the same!\n"); ++ ++ reg_tmp = PCU_GET_REG(PCS9_ST1); // check Status-1 ++ if (PCU_EXTRACT(PCS9_ST1, STS, reg_tmp) != PCS_STS_DONE) ++ printk("The work is not done?!\n"); ++ if (PCU_EXTRACT(PCS9_ST1, ERR, reg_tmp) != PCS_ERR_NONE) ++ printk("Some error happened!!\n"); ++ PCU_SET_REG(PCS9_ST1, 0x0); //clear status ++ ++ */ ++ ftpci_postinit(); ++ SET_CACHE_CTL(GET_CACHE_CTL() | CACHE_CTL_mskDC_EN); ++ /* trigger mode regs */ ++ REG32(AMIC_VA_BASE + 0x20) = irq_saves[0]; ++ /* trigger level regs */ ++ REG32(AMIC_VA_BASE + 0x24) = irq_saves[1]; ++ /* interrupt enable regs */ ++ REG32(AMIC_VA_BASE + 0x80) = irq_saves[2]; ++ __asm__ volatile ("mtsr %0, $ir14\n\t"::"r" (tmp)); ++ ++ printk("return success-2..........\n"); ++ dump_stack(); ++ ++} ++ ++static int ag102_pm_valid(suspend_state_t state) ++{ ++ switch (state) { ++ case PM_SUSPEND_ON: ++ case PM_SUSPEND_STANDBY: ++ case PM_SUSPEND_MEM: ++ return 1; ++ ++ default: ++ return 0; ++ } ++} ++ ++static int ag102_pm_begin(suspend_state_t state) ++{ ++ /* TBD if we need it */ ++ return 0; ++} ++ ++static void andes_suspend_cpu(void) ++{ ++ unsigned int irq_save; ++ /* setup GPIO interrupt enable regs to enable GPIO0 */ ++ REG32(GPIO_VA_BASE + 0x20) = 1; ++ /* save interrupt enable regs */ ++ irq_save = REG32(AMIC_VA_BASE + 0x80); ++ /* accept all interrupts to wake up except timer interrupt */ ++ REG32(AMIC_VA_BASE + 0x80) &= ~(1 << 19); ++ /* enable UART0 interrupt */ ++ REG32(AMIC_VA_BASE + 0x80) |= 1 << 13; ++ ++ /* ++ * for more IRQ info, please refer to ++ * arch/nds32/include/asm/spec.h&spec-ag102.h ++ */ ++ ++ __asm__ volatile ("standby no_wake_grant\n\t"); ++ /* clear GPIO interrupt */ ++ REG32(GPIO_VA_BASE + 0x30) = 1; ++ /* disable GPIO interrupt */ ++ REG32(GPIO_VA_BASE + 0x20) = 0; ++ /* restore GPIO enable regs */ ++ REG32(AMIC_VA_BASE + 0x80) = irq_save; ++} ++ ++static int ag102_pm_enter(suspend_state_t state) ++{ ++ switch (state) { ++ case PM_SUSPEND_STANDBY: ++ andes_suspend_cpu(); ++ return 0; ++ case PM_SUSPEND_MEM: ++ printk("@@@@@@@@@@ : ag102_pm_enter()... from gavin.......\n"); ++ andes_suspend_to_ram(); ++ return 0; ++ default: ++ return -EINVAL; ++ } ++} ++ ++/* ++ * Called after processes are frozen, but before we shutdown devices. ++ */ ++static int ag102_pm_prepare(void) ++{ ++ /* TBD if we need it */ ++ return 0; ++} ++ ++/* ++ * Called after devices are wakeuped, but before processes are thawed. ++ */ ++static void ag102_pm_finish(void) ++{ ++ /* TBD if we need it */ ++} ++ ++static void ag102_pm_end(void) ++{ ++ /* TBD if we need it */ ++#if 0 ++ class_destroy(andes_pcu_class); ++ unregister_chrdev(andes_pcu_major, ANDES_PCU_STRING); ++#endif ++ printk("pm_exit\n"); ++} ++ ++static int andes_pcu_ioctl(struct inode *inode, struct file *file, ++ unsigned int cmd, unsigned long arg) ++{ ++ return 0; ++} ++ ++static int andes_pcu_open(struct inode *inode, struct file *file) ++{ ++ return 0; ++} ++ ++static ssize_t andes_pcu_write(struct file *filp, const char *buff, ++ size_t count, loff_t * ppos) ++{ ++ char data; ++ get_user(data, buff); ++ switch (data) { ++ case 's': ++ printk("Just suspend cpu\n"); ++ andes_suspend_cpu(); ++ break; ++ case 'm': ++ printk("Suspend to ram\n"); ++ andes_suspend_to_ram(); ++ break; ++ } ++ return 0; ++} ++ ++static int andes_pcu_release(struct inode *inode, struct file *file) ++{ ++ return 0; ++} ++ ++static struct file_operations andes_pcu_fops = { ++ .owner = THIS_MODULE, ++ .ioctl = andes_pcu_ioctl, ++ .open = andes_pcu_open, ++ .write = andes_pcu_write, ++ .release = andes_pcu_release, ++}; ++ ++static struct platform_suspend_ops ag102_pm_ops = { ++ .valid = ag102_pm_valid, ++ .begin = ag102_pm_begin, ++ .prepare = ag102_pm_prepare, ++ .enter = ag102_pm_enter, ++ .finish = ag102_pm_finish, ++ .end = ag102_pm_end, ++}; ++ ++//PCU power-off workaround { ++#undef IRQ_LEVEL ++#include ++#include ++ ++//#define PM_DEBUG ++#ifdef PM_DEBUG ++#define PRINTK printk ++#else ++#define PRINTK(...) ++#endif ++ ++static volatile int pcuirq = 0; ++ ++static inline void poweroff_pcu(void) ++{ ++ PRINTK(KERN_INFO "[kernel] poweroff_pcu()"); ++ REG32(PCU_VA_BASE + 0x1a4) = (2 << 28) | (1 << 24) | (6 << 0); //power-down, sync CPUA, domain GPU+CPUB ++ __asm__ volatile ("standby wait_done\n"); ++ REG32(PCU_VA_BASE + 0x1a8) = 0x0; //PCS9 status normal, no error ++} ++ ++static inline void clear_pcu_status(void) ++{ ++ PRINTK(KERN_INFO "[kernel] clear_pcu_status()"); ++ REG32(PCU_VA_BASE + 0x1b0) |= (1 << 31); //clear poweroff flag ++ asm("msync store\nisb"); ++} ++ ++static irqreturn_t gpio0_isr(int irq, void *dev_id) ++{ ++ PRINTK(KERN_INFO "[kernel] gpio0_isr(irq=%d)\n", irq); ++ ++ // is GPIO0 IRQ ++ if (REG32(GPIO_VA_BASE + 0x28) & (1 << 0)) { ++ PRINTK(KERN_INFO "GPIO#0 ASSERT!\n"); ++ ++ pcuirq = 0; ++ ++ //disable GPIO0 IRQ ++ REG32(GPIO_VA_BASE + 0x2c) |= (1 << 0); //GPIO0 IRQ mask, level mode AMIC.GPIO#0 also cleared ++ ++// REG32(AMIC_VA_BASE + 0x84) = (1<<13); //AMIC clear GPIO interrupt ++ ++ return IRQ_HANDLED; ++ } ++ ++ return IRQ_NONE; ++} ++ ++#define GPIO_SEC (1<<11) ++static irqreturn_t pcu_isr(int irq, void *dev_id) ++{ ++ UINT32 status = REG32(PCU_VA_BASE + 0x94); ++ PRINTK(KERN_INFO "[kernel] pcu_isr(irq=%d) status=0x%08x\n", irq, ++ status); ++ if (0 == (GPIO_SEC & status)) ++ return IRQ_NONE; ++ ++ PRINTK(KERN_INFO "[kernel] GPIO_SEC\n"); ++ clear_pcu_status(); //clear PCS9 IRQ (level mode => AMIC.PCU also cleared) ++ pcuirq++; ++ if (1 == pcuirq) { ++ //enable GPIO0 IRQ ++ REG32(GPIO_VA_BASE + 0x30) = (1 << 0); //GPIO0 IRQ cleared ++// REG32(AMIC_VA_BASE + 0x84) = (1 << 13); //AMIC clear GPIO interrupt ++// enable_irq(GPIO_FTGPIO010_IRQ); ++ REG32(AMIC_VA_BASE + 0x80) |= (1 << 13); //AMIC enable GPIO IRQ ++ REG32(GPIO_VA_BASE + 0x2c) &= ~(1 << 0); //GPIO0 IRQ unmask ++ } else if (5 == pcuirq) { ++ poweroff_pcu(); ++ } ++// REG32(AMIC_VA_BASE + 0x84) = (1 << 8); //PCU IRQ clear ++ ++ return IRQ_HANDLED; ++} ++ ++//PCU power-off workaround } ++ ++static int __init ag102_pm_init(void) ++{ ++ int ret = 0; ++ struct device *temp_class; ++ printk("PM driver init\n"); ++ suspend_set_ops(&ag102_pm_ops); ++ ++ andes_pcu_major = register_chrdev(0, ANDES_PCU_STRING, &andes_pcu_fops); ++ ++ printk("@@@@@ : andes_pcu_major = 0x%08x\n", andes_pcu_major); ++ if (andes_pcu_major < 0) { ++ printk("Unable to get a major for andes pcu driver!\n"); ++ return andes_pcu_major; ++ } ++ ++ andes_pcu_class = class_create(THIS_MODULE, ANDES_PCU_STRING); ++ ++ if (IS_ERR(andes_pcu_class)) { ++ printk(KERN_ERR "Error creating andes pcu class.\n"); ++ ret = PTR_ERR(andes_pcu_class); ++ goto err_out1; ++ } ++ ++ temp_class = device_create(andes_pcu_class, NULL, ++ MKDEV(andes_pcu_major, 0), ++ NULL, ANDES_PCU_STRING); ++ ++ if (IS_ERR(temp_class)) { ++ printk(KERN_ERR "Error creating andes pcu class device.\n"); ++ ret = PTR_ERR(temp_class); ++ goto err_out2; ++ } ++ ++ mac_dah = GMAC_GET_REG(MAC_MADR); ++ mac_dal = GMAC_GET_REG(MAC_LADR); ++ ++ //poweroff workaround ++ { ++ if (request_irq ++ (PCU_IRQ, pcu_isr, IRQF_SHARED, "PCU_POWEROFF", pcu_isr)) { ++ printk("Failed to request PCU interrupt.\n"); ++ } ++ ++ if (request_irq ++ (GPIO_FTGPIO010_IRQ, gpio0_isr, IRQF_SHARED, ++ "GPIO0_FOR_PCU", gpio0_isr)) { ++ printk("Failed to request GPIO0 interrupt.\n"); ++ } ++ ++ REG32(PCU_VA_BASE + 0x1b0) |= (3 << 28); //poweroff in 5s ++ ++ //AMIC setup ++ // PCU IRQ default edge trigger, rising edge ++ // GPIO_FTGPIO010_IRQ default edge trigger, rising edge ++// REG32(AMIC_VA_BASE + 0x20) &= ~(1 << 8); //PCU IRQ level trigger ++// REG32(AMIC_VA_BASE + 0x24) &= ~(1 << 8); //PCU IRQ active-high ++// REG32(AMIC_VA_BASE + 0x20) &= ~(1 << 13); //GPIO IRQ level trigger ++ ++ //GPIO #0 setup ++ REG32(GPIO_VA_BASE + 0x08) &= ~(1 << 0); //GPIO0 input ++ REG32(GPIO_VA_BASE + 0x18) &= ~(1 << 0); //GPIO0 not pulled (external pull-high) ++ REG32(GPIO_VA_BASE + 0x2c) |= (1 << 0); //GPIO0 IRQ masked ++ REG32(GPIO_VA_BASE + 0x34) |= (1 << 0); //GPIO0 IRQ level trigger ++// REG32(GPIO_VA_BASE + 0x38) &= ~(1<<0); //GPIO0 IRQ single edge ++ REG32(GPIO_VA_BASE + 0x3c) &= ~(1 << 0); //GPIO0 IRQ high-active ++ REG32(GPIO_VA_BASE + 0x40) |= (1 << 0); //GPIO0 enable bounce ++ REG32(GPIO_VA_BASE + 0x40) = 0xffff; //GPIO bounce time = x/PCLK ++// REG32(GPIO_VA_BASE + 0x30) = (1<<0); //GPIO0 IRQ cleared ++ REG32(GPIO_VA_BASE + 0x20) |= (1 << 0); //GPIO0 IRQ enable ++ ++// enable_irq(PCU_IRQ); ++ REG32(AMIC_VA_BASE + 0x80) |= (1 << 8); //IRQ enabled ++ } ++ ++ printk("pm_init\n"); ++ return 0; ++err_out2: ++ class_destroy(andes_pcu_class); ++err_out1: ++ unregister_chrdev(andes_pcu_major, ANDES_PCU_STRING); ++ return 1; ++} ++ ++//static void resume_to_ccode(void) ++//{ ++// printk("Resume to C code successfully....\n"); ++//} ++ ++late_initcall(ag102_pm_init); +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ag102/sleep.S linux-3.4.110/arch/nds32/platforms/ag102/sleep.S +--- linux-3.4.110.orig/arch/nds32/platforms/ag102/sleep.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ag102/sleep.S 2016-04-07 10:20:51.002083345 +0200 +@@ -0,0 +1,465 @@ ++#include ++.text ++ ++.globl ag102_cpu_sleep ++.globl ag102_cpu_resume ++.globl ag102_cpu_resume2 ++.globl __SELF_REFRESH_LOCK_START ++.globl __SELF_REFRESH_LOCK_END ++ ++#! 0x94200000; keep r0, r1, r2 ++ .macro putch ch ++ li $r0, #0x94200000 ++ move $r2, \ch ++88: ++ lwi $r1, [$r0+#0x14] ++ srli $r1, $r1, #5 ++ andi $r1, $r1, #0x1 ++ beqz $r1, 88b ++ swi $r2, [$r0] ++ .endm ++#! 0x94200000; keep r3, r4 ++ .macro hex2asc val ++ addi \val, \val, -10 ++ bltz \val, 1f ++ addi \val, \val, 0x41 ++ j 2f ++1: ++ addi \val, \val, 0x3a ++2: ++ .endm ++ ++ .macro print_hex mhex ++ move $r3, \mhex ++ ++ srli $r4, $r3, #28 ++ andi $r4, $r4, 0xf ++ hex2asc $r4 ++ putch $r4 ++ srli $r4, $r3, #24 ++ andi $r4, $r4, 0xf ++ hex2asc $r4 ++ putch $r4 ++ srli $r4, $r3, #20 ++ andi $r4, $r4, 0xf ++ hex2asc $r4 ++ putch $r4 ++ srli $r4, $r3, #16 ++ andi $r4, $r4, 0xf ++ hex2asc $r4 ++ putch $r4 ++ srli $r4, $r3, #12 ++ andi $r4, $r4, 0xf ++ hex2asc $r4 ++ putch $r4 ++ srli $r4, $r3, #8 ++ andi $r4, $r4, 0xf ++ hex2asc $r4 ++ putch $r4 ++ srli $r4, $r3, #4 ++ andi $r4, $r4, 0xf ++ hex2asc $r4 ++ putch $r4 ++ move $r4, $r3 ++ andi $r4, $r4, 0xf ++ hex2asc $r4 ++ putch $r4 ++ .endm ++ ++ag102_cpu_resume2: ++/*1: ++b 1b*/ ++ //MOD by river 2010.10.13 ++ popm $r0, $r19 ++ mtusr $r0, $d0.lo ! $d0 lo byte ++ mtusr $r1, $d0.hi ! $d0 hi byte ++ mtusr $r2, $d1.lo ! $d1 lo byte ++ mtusr $r3, $d1.hi ! $d1 hi byte ++ mtsr $r4, $mr0 ++ mtsr $r5, $mr1 ++ mtsr $r6, $mr4 ++ mtsr $r7, $mr6 ++ mtsr $r8, $mr7 ++ mtsr $r9, $mr8 ++ mtsr $r10, $ir0 ++ mtsr $r11, $ir1 ++ mtsr $r12, $ir2 ++ mtsr $r13, $ir3 ++ mtsr $r14, $ir9 ++ mtsr $r15, $ir10 ++ mtsr $r16, $ir12 ++ mtsr $r17, $ir13 ++ mtsr $r18, $ir14 ++ mtsr $r19, $ir15 ++ //End MOD by river 2010.10.13 ++ popm $r0, $r30 ++ ret ++ ++ag102_cpu_sleep: ++ ++ pushm $r0, $r30 ++ //MOD by river 2010.10.13 ++ mfusr $r0, $d0.lo ! $d0 lo byte ++ mfusr $r1, $d0.hi ! $d0 hi byte ++ mfusr $r2, $d1.lo ! $d1 lo byte ++ mfusr $r3, $d1.hi ! $d1 hi byte ++ mfsr $r4, $mr0 ++ mfsr $r5, $mr1 ++ mfsr $r6, $mr4 ++ mfsr $r7, $mr6 ++ mfsr $r8, $mr7 ++ mfsr $r9, $mr8 ++ mfsr $r10, $ir0 ++ mfsr $r11, $ir1 ++ mfsr $r12, $ir2 ++ mfsr $r13, $ir3 ++ mfsr $r14, $ir9 ++ mfsr $r15, $ir10 ++ mfsr $r16, $ir12 ++ mfsr $r17, $ir13 ++ mfsr $r18, $ir14 ++ mfsr $r19, $ir15 ++ pushm $r0, $r19 ++ //End MOD by river 2010.10.13 ++ ++ /* store $sp to 0x408 scratch pad */ ++ sethi $r0, hi20(PCU_VA_BASE + 0x408) ++ ori $r2, $r0, lo12(PCU_VA_BASE + 0x408) ++ swi $r31, [$r2] ++ ++ /* set signaure "SUSP" to 0x40c */ ++ li $r0, (PCU_VA_BASE + 0x40c) ++ li $r1, 0x53555350 ! set signature "SUSP" to scratch pad register offset 0x40c ++ swi $r1, [$r0] ++ ++ /* ++ * store 8 bytes from 16mb to pcu scratch pad resgister ++ * due to data training process will destroy it ++ */ ++ li $r0, 0x10000 ++ li $r2, (PCU_VA_BASE + 0x414) ++ lwi $r1, [$r0] ++ swi $r1, [$r2] ++ lwi $r1, [$r0 + 0x4] ++ swi $r1, [$r2 + 0x4] ++ ++ /* ADD by river 2010.09.23 */ ++ /* Save PSW in PCU_VA_BASE + 0x420 */ ++ li $r0, (PCU_VA_BASE + 0x420) ++ mfsr $r1, $ir0 ++ swi $r1, [$r0] ++ /* Save $mr0 in PCU_VA_BASE + 0x424 */ ++ li $r0, (PCU_VA_BASE + 0x424) ++ mfsr $r1, $mr0 ++ swi $r1, [$r0] ++ /* Save $mr1 in PCU_VA_BASE + 0x428 */ ++ li $r0, (PCU_VA_BASE + 0x428) ++ mfsr $r1, $mr1 ++ swi $r1, [$r0] ++ /* Save $mr2 in PCU_VA_BASE + 0x42c */ ++ li $r0, (PCU_VA_BASE + 0x42c) ++ mfsr $r1, $mr2 ++ swi $r1, [$r0] ++ /* Save $mr3 in PCU_VA_BASE + 0x430 */ ++ li $r0, (PCU_VA_BASE + 0x430) ++ mfsr $r1, $mr3 ++ swi $r1, [$r0] ++ /* Save $mr4 in PCU_VA_BASE + 0x434 */ ++ li $r0, (PCU_VA_BASE + 0x434) ++ mfsr $r1, $mr4 ++ swi $r1, [$r0] ++ /* Save $mr5 in PCU_VA_BASE + 0x438 */ ++ //li $r0, (PCU_VA_BASE + 0x438) ++ //mfsr $r1, $mr5 ++ //swi $r1, [$r0] ++ /* Save $mr6 in PCU_VA_BASE + 0x43c */ ++ li $r0, (PCU_VA_BASE + 0x43c) ++ mfsr $r1, $mr6 ++ swi $r1, [$r0] ++ /* Save $mr7 in PCU_VA_BASE + 0x440 */ ++ li $r0, (PCU_VA_BASE + 0x440) ++ mfsr $r1, $mr7 ++ swi $r1, [$r0] ++ /* Save $mr8 in PCU_VA_BASE + 0x444 */ ++ li $r0, (PCU_VA_BASE + 0x444) ++ mfsr $r1, $mr8 ++ swi $r1, [$r0] ++ ++ /* Save $ir3 in PCU_VA_BASE + 0x450 */ ++ li $r0, (PCU_VA_BASE + 0x450) ++ mfsr $r1, $ir3 ++ swi $r1, [$r0] ++ ++ /* Save $ir14 in PCU_VA_BASE + 0x454 */ ++ li $r0, (PCU_VA_BASE + 0x454) ++ mfsr $r1, $ir14 ++ swi $r1, [$r0] ++ ++ /* Save $ir15 in PCU_VA_BASE + 0x458 */ ++ li $r0, (PCU_VA_BASE + 0x458) ++ mfsr $r1, $ir15 ++ swi $r1, [$r0] ++ ++ tlbop FlushAll ++ isb ++ /* End ADD by river 2010.09.23 */ ++ ++ //Trace by river 2010.12.01 ++ /*1: ++ b 1b*/ ++ //End Trace by river 2010.12.01 ++.p2align 5 ++__SELF_REFRESH_LOCK_START: ++ /*sethi $r0, hi20(0x900005cc) ++ ori $r0, $r0, lo12(0x900005cc) ++ sethi $r1, hi20(DDR2C_VA_BASE) ++ swi $r0, [$r1+0x4] ++ ++ msync ++ isb ++ ++ standby wake_grant ++ .p2align 5*/ ++ sethi $r0, hi20(DDR2C_VA_BASE) ++ lwi $r1, [$r0+0x4] ++ li $r2, 0x07ffefff ++ and $r1, $r2, $r1 ++ li $r2, 0x90001000 ++ or $r1, $r2, $r1 ++ swi $r1, [$r0+0x4] ++ ++ msync ++ isb ++ ++ standby wake_grant ++ .p2align ++ ++__SELF_REFRESH_LOCK_END: ++ ++ag102_cpu_resume: ++ /* TRACE by river 2010.12.02 */ ++ /*1: ++ b 1b*/ ++ /* End TRACE by river 2010.12.02 */ ++ mfsr $r2, $mr0 ++ ori $r2, $r2, #0x6 ++#ifdef CONFIG_ANDES_PAGE_SIZE_8KB ++ ori $r2, $r2, #0x1 ++#endif ++ mtsr $r2, $mr0 ++ ++ /* ADD by river 2010.09.23 */ ++ li $r3, 'C ++ putch $r3 ++ li $r3, '\r ++ putch $r3 ++ li $r3, '\n ++ putch $r3 ++ li $r3, 'P ++ putch $r3 ++ li $r3, '\r ++ putch $r3 ++ li $r3, '\n ++ putch $r3 ++ li $r3, 'U ++ putch $r3 ++ li $r3, '\r ++ putch $r3 ++ li $r3, '\n ++ putch $r3 ++ li $r3, 'R ++ putch $r3 ++ li $r3, '\r ++ putch $r3 ++ li $r3, '\n ++ putch $r3 ++ li $r3, 'E ++ putch $r3 ++ li $r3, '\r ++ putch $r3 ++ li $r3, '\n ++ putch $r3 ++ li $r3, 'S ++ putch $r3 ++ li $r3, '\r ++ putch $r3 ++ li $r3, '\n ++ putch $r3 ++ li $r3, 'U ++ putch $r3 ++ li $r3, '\r ++ putch $r3 ++ li $r3, '\n ++ putch $r3 ++ li $r3, 'M ++ putch $r3 ++ li $r3, '\r ++ putch $r3 ++ li $r3, '\n ++ putch $r3 ++ li $r3, 'E ++ putch $r3 ++ li $r3, '\r ++ putch $r3 ++ li $r3, '\n ++ putch $r3 ++ /* ADD by river 2010.09.23 */ ++ ++ /*tlbop FlushAll*/ ! invalidate TLB\n" ++ ++ /* ADD by river 2010.09.23 */ ++ /* restore PSW */ ++ //sethi $r2, hi20(PCU_PA_BASE + 0x420) ++ //ori $r2, $r2, lo12(PCU_PA_BASE + 0x420) ++ //lwi $r3, [$r2] ++ //mtsr $r3, $ir0 ++ //move $p1, $r3 ++ /* restore $mr0 */ ++ sethi $r2, hi20(PCU_PA_BASE + 0x424) ++ ori $r2, $r2, lo12(PCU_PA_BASE + 0x424) ++ lwi $r3, [$r2] ++ mtsr $r3, $mr0 ++ /* restore $mr1 */ ++ sethi $r2, hi20(PCU_PA_BASE + 0x428) ++ ori $r2, $r2, lo12(PCU_PA_BASE + 0x428) ++ lwi $r3, [$r2] ++ mtsr $r3, $mr1 ++ ++ /* restore $mr2 */ ++ //sethi $r2, hi20(PCU_PA_BASE + 0x42c) ++ //ori $r2, $r2, lo12(PCU_PA_BASE + 0x42c) ++ //lwi $r3, [$r2] ++ //mtsr $r3, $mr2 ++ /* restore $mr3 */ ++ //sethi $r2, hi20(PCU_PA_BASE + 0x430) ++ //ori $r2, $r2, lo12(PCU_PA_BASE + 0x430) ++ //lwi $r3, [$r2] ++ //mtsr $r3, $mr3 ++ /* restore $mr4 */ ++ sethi $r2, hi20(PCU_PA_BASE + 0x434) ++ ori $r2, $r2, lo12(PCU_PA_BASE + 0x434) ++ lwi $r3, [$r2] ++ mtsr $r3, $mr4 ++ /* restore $mr5 */ ++ //sethi $r2, hi20(PCU_PA_BASE + 0x438) ++ //ori $r2, $r2, lo12(PCU_PA_BASE + 0x438) ++ //lwi $r3, [$r2] ++ //mtsr $r3, $mr5 ++ /* restore $mr6 */ ++ sethi $r2, hi20(PCU_PA_BASE + 0x43c) ++ ori $r2, $r2, lo12(PCU_PA_BASE + 0x43c) ++ lwi $r3, [$r2] ++ mtsr $r3, $mr6 ++ /* restore $mr7 */ ++ sethi $r2, hi20(PCU_PA_BASE + 0x440) ++ ori $r2, $r2, lo12(PCU_PA_BASE + 0x440) ++ lwi $r3, [$r2] ++ mtsr $r3, $mr7 ++ /* restore $mr8 */ ++ sethi $r2, hi20(PCU_PA_BASE + 0x444) ++ ori $r2, $r2, lo12(PCU_PA_BASE + 0x444) ++ lwi $r3, [$r2] ++ /* ADD by river 2010.12.02 for ICache Enable */ ++ ori $r3, $r3, #0x1 ++ /* End ADD by river 2010.12.02 for ICache Enable */ ++ mtsr $r3, $mr8 ++ ++ /* restore $ir3 */ ++ sethi $r2, hi20(PCU_PA_BASE + 0x450) ++ ori $r2, $r2, lo12(PCU_PA_BASE + 0x450) ++ lwi $r3, [$r2] ++ mtsr $r3, $ir3 ++ ++ move $p1, $r3 ++ li $r3, '\r ++ putch $r3 ++ li $r3, '\n ++ putch $r3 ++ print_hex $p1 ++ li $r3, '\r ++ putch $r3 ++ li $r3, '\n ++ putch $r3 ++ ++ ++ /* restore $ir14 */ ++ sethi $r2, hi20(PCU_PA_BASE + 0x454) ++ ori $r2, $r2, lo12(PCU_PA_BASE + 0x454) ++ lwi $r3, [$r2] ++ mtsr $r3, $ir14 ++ ++ /* restore $ir15 */ ++ sethi $r2, hi20(PCU_PA_BASE + 0x458) ++ ori $r2, $r2, lo12(PCU_PA_BASE + 0x458) ++ lwi $r3, [$r2] ++ mtsr $r3, $ir15 ++ ++ li $r3, '\r ++ putch $r3 ++ li $r3, '\n ++ putch $r3 ++ move $p1, $r3 ++ print_hex $p1 ++ ++ /* End ADD by river 2010.09.23 */ ++ ++ /* in this buggy version(ram locate at 1G) TC01 we don't need to do remap. ++ * ebios set memory locate at 1G & size = 1G, for more detail info, please ++ * refer to ebios boot.S. ++ * ++ * sethi $r2, hi20(0x90c00000 + 0x88) ++ * ori $r2, $r2, lo12(0x90c00000 + 0x88) ++ * movi $r3, #0x1 ++ * swi $r3, [$r2] ++ **/ ++ ++ /* restore 8 bytes from pcu scratch pad resgister to 16mb */ ++ li $r0, 0x10000 ++ li $r2, (PCU_PA_BASE + 0x414) ++ lwi $r1, [$r2] ++ swi $r1, [$r0] ++ lwi $r1, [$r2 + 0x4] ++ swi $r1, [$r0 + 0x4] ++ ++ .p2align 5 ++ resume_lock_start: ++ /* ADD by river 2010.12.02 for ag102_cpu_resume2 for jral.ton $r4, $r4 */ ++ sethi $r2, hi20(PCU_PA_BASE + 0x404) ++ ori $r2, $r2, lo12(PCU_PA_BASE + 0x404) ++ lwi $r4, [$r2] ++ /* End ADD by river 2010.12.02 for ag102_cpu_resume2 */ ++ ++ /* ADD by river 2010.12.02 for restore kernel ROM map */ ++ sethi $r0, hi20(0x40080000) ++ ori $r0, $r0, lo12(0x40080000) ++ sethi $r1, hi20(AHB_ATFAHBC020S_0_PA_BASE + 0x10) ++ ori $r1, $r1, lo12(AHB_ATFAHBC020S_0_PA_BASE + 0x10) ++ swi $r0, [$r1] ++ ++ /* ADD by river 2010.12.02 for restore kernel RAM map */ ++ sethi $r0, hi20(0x000A0000) ++ ori $r0, $r0, lo12(0x000A0000) ++ sethi $r1, hi20(AHB_ATFAHBC020S_0_PA_BASE + 0x18) ++ ori $r1, $r1, lo12(AHB_ATFAHBC020S_0_PA_BASE + 0x18) ++ swi $r0, [$r1] ++ ++ /******************* Gavin version ***************************/ ++ //MOD by river 2010.10.13 ++ /*sethi $r2, hi20(PCU_PA_BASE + 0x404) ++ ori $r2, $r2, lo12(PCU_PA_BASE + 0x404) ++ lwi $r4, [$r2] ++ mtsr $r4, $IPC ++ li $r1, 0xcb ++ mtsr $r1, $IPSW ++ iret*/ ++ //End MOD by river 2010.10.13 ++ //End Gavin version//////////////////////////////////////////////////////////// ++ ++ /* End ADD by river 2010.09.23 */ ++ //MOD by river 2010.10.13 ++ //////// jral.ton version //////////////////////////////////////////////////// ++ ++ jral.ton $r4, $r4 ++ .p2align ++ resume_lock_end: ++ //End MOD by river 2010.10.13 +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/amic.c linux-3.4.110/arch/nds32/platforms/amic.c +--- linux-3.4.110.orig/arch/nds32/platforms/amic.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/amic.c 2016-04-07 10:20:51.002083345 +0200 +@@ -0,0 +1,203 @@ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#define DEBUG(enabled, tagged, ...) \ ++ do { \ ++ if (enabled) { \ ++ if (tagged) \ ++ printk("[ %30s() ] ", __func__); \ ++ printk(__VA_ARGS__); \ ++ } \ ++ } while (0) ++ ++static DEFINE_SPINLOCK(amic_irq_lock); ++ ++static void amic_ack_irq(unsigned int irq) ++{ ++ unsigned int data; ++ ++ spin_lock(&amic_irq_lock); ++ writel(1 << irq, AMIC_BASE + INTSTA); ++ data = readl(AMIC_BASE + INTSTA); ++ spin_unlock(&amic_irq_lock); ++} ++ ++static void amic_mask_irq(unsigned int irq) ++{ ++ unsigned int data; ++ ++ spin_lock(&amic_irq_lock); ++ data = readl(AMIC_BASE + INTEN); ++ data &= ~(1 << irq); ++ writel(data, AMIC_BASE + INTEN); ++ data = readl(AMIC_BASE + INTEN); ++ spin_unlock(&amic_irq_lock); ++} ++ ++static int amic_set_type(unsigned int irq, unsigned int flow_type) ++{ ++ unsigned int data; ++ ++ spin_lock(&amic_irq_lock); ++ if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { ++ data = readl(AMIC_BASE + INTTRG); ++ data |= 1 << irq; ++ writel(data, AMIC_BASE + INTTRG); ++ } ++ ++ if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { ++ data = readl(AMIC_BASE + INTTRG); ++ data &= ~(1 << irq); ++ writel(data, AMIC_BASE + INTTRG); ++ } ++ ++ if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) { ++ data = readl(AMIC_BASE + INTLVL); ++ data |= 1 << irq; ++ writel(data, AMIC_BASE + INTLVL); ++ } ++ ++ if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING)) { ++ data = readl(AMIC_BASE + INTLVL); ++ data &= ~(1 << irq); ++ writel(data, AMIC_BASE + INTLVL); ++ } ++ spin_unlock(&amic_irq_lock); ++ return 0; ++} ++ ++static void amic_unmask_irq(unsigned int irq) ++{ ++ unsigned int data; ++ ++ spin_lock(&amic_irq_lock); ++ data = readl(AMIC_BASE + INTEN); ++ data |= 1 << irq; ++ writel(data, AMIC_BASE + INTEN); ++ data = readl(AMIC_BASE + INTEN); ++ spin_unlock(&amic_irq_lock); ++} ++ ++static int amic_set_affinity(unsigned int irq, const struct cpumask *dest) ++{ ++ int cnt = 0; ++ int cpu; ++ volatile unsigned int data; ++ volatile unsigned int dc, amic_irq; ++ ++ if (num_online_cpus() > 2) ++ return 0; ++ ++ spin_lock(&amic_irq_lock); ++ /* remap irq number for real controller */ ++ /* this may be needed in future */ ++ /* amic_irq = irq_remap(irq); */ ++ amic_irq = irq; ++ ++ /* change owner */ ++ data = readl(AMIC_BASE + CPUID0 + ((amic_irq >> 4) << 2)); ++ for_each_online_cpu(cpu) { ++ if (cpumask_test_cpu(cpu, dest)) { ++ data &= ~(0x3 << ((amic_irq & ~0x10) * 2)); ++ data |= cpu << ((amic_irq & ~0x10) * 2); ++ cnt++; ++ } ++ } ++ writel(data, AMIC_BASE + CPUID0 + ((amic_irq >> 4) << 2)); ++ ++ dc = readl(AMIC_BASE + CPUDC); ++ if (cnt == 2) /* set bit */ ++ writel((dc | (1 << amic_irq)), (AMIC_BASE + CPUDC)); ++ else /* clear bit */ ++ writel((dc & ~(1 << amic_irq)), (AMIC_BASE + CPUDC)); ++ ++ spin_unlock(&amic_irq_lock); ++ ++ DEBUG(0, 1, "en=%08x,status=%08x\n", readl(AMIC_BASE + INTEN), ++ readl(AMIC_BASE + INTSTA)); ++ ++ return 0; ++} ++ ++static struct irq_chip amic_chip = { ++ .name = "AMIC", ++ .ack = amic_ack_irq, ++ .mask = amic_mask_irq, ++ .unmask = amic_unmask_irq, ++ .set_affinity = amic_set_affinity, ++ .set_type = amic_set_type, ++}; ++ ++void __init amic_init(void) ++{ ++ int i, edge; ++ unsigned int temp = smp_processor_id(); ++ temp |= temp << 2; ++ temp |= temp << 4; ++ temp |= temp << 8; ++ temp |= temp << 16; ++ ++ writel(0x0, AMIC_BASE + INTEN); ++ writel(0x0, AMIC_BASE + CPUDC); ++ writel(temp, AMIC_BASE + CPUID0); ++ writel(temp, AMIC_BASE + CPUID1); ++ writel(0xffff, AMIC_BASE + IPISTA); ++ writel(0xffffffff, AMIC_BASE + INTSTA); ++ writel(0x11111111, AMIC_BASE + PRITY0); ++ writel(0x11111111, AMIC_BASE + PRITY1); ++ writel(0x11111111, AMIC_BASE + PRITY2); ++ writel(0x11111111, AMIC_BASE + PRITY3); ++ writel(DEFAULT_MODE, AMIC_BASE + INTTRG); ++ writel(~DEFAULT_LEVEL, AMIC_BASE + INTLVL); ++ printk("AMIC config %x %x\n", temp, readl(AMIC_BASE + CONFIG)); ++ ++ for (i = IRQ_BASE, edge = 1; i < IRQ_BASE + IRQ_TOTAL; i++, edge <<= 1) { ++ set_irq_chip(i, &amic_chip); ++ if (DEFAULT_MODE & edge) ++ set_irq_handler(i, handle_edge_irq); ++ else ++ set_irq_handler(i, handle_level_irq); ++ } ++ ++} ++ ++unsigned int get_IntSrc(void) ++{ ++ unsigned int irqsta, irq = 31; ++ ++ spin_lock(&amic_irq_lock); ++ irqsta = readl(AMIC_BASE + IPISTA); ++ if (irqsta != 0) ++ irqsta = 0; ++ else ++ irqsta = readl(AMIC_BASE + INTSTA); ++ spin_unlock(&amic_irq_lock); ++ ++ if (irqsta == 0) ++ return 32; ++ if (irqsta & 0x0000ffff) { ++ irq -= 16; ++ irqsta <<= 16; ++ } ++ if (irqsta & 0x00ff0000) { ++ irq -= 8; ++ irqsta <<= 8; ++ } ++ if (irqsta & 0x0f000000) { ++ irq -= 4; ++ irqsta <<= 4; ++ } ++ if (irqsta & 0x30000000) { ++ irq -= 2; ++ irqsta <<= 2; ++ } ++ if (irqsta & 0x40000000) { ++ irq -= 1; ++ } ++ return irq; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/dmad.c linux-3.4.110/arch/nds32/platforms/dmad.c +--- linux-3.4.110.orig/arch/nds32/platforms/dmad.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/dmad.c 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,3601 @@ ++/***************************************************************************** ++ * ++ * Copyright Andes Technology Corporation 2007-2008 ++ * All Rights Reserved. ++ * ++ * Revision History: ++ * ++ * Aug.21.2007 Created. ++ * Feb.23.2009 Porting to Linux 2.6. ++*****************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#if (defined(CONFIG_PLATFORM_AHBDMA) || defined(CONFIG_PLATFORM_APBDMA)) ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++#define DMAD_AHB_MAX_CHANNELS DMAC_MAX_CHANNELS ++#else ++#define DMAD_AHB_MAX_CHANNELS 0 ++#endif /* CONFIG_PLATFORM_AHBDMA */ ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++#define DMAD_APB_MAX_CHANNELS APBBR_DMA_MAX_CHANNELS ++#else ++#define DMAD_APB_MAX_CHANNELS 0 ++#endif /* CONFIG_PLATFORM_APBDMA */ ++ ++#define DMAD_DRB_POOL_SIZE 32 /* 128 */ ++ ++/* reg/io supplementals */ ++static inline void setbl(addr_t bit, addr_t reg) ++{ ++ outl(inl(reg) | (addr_t) ((addr_t) 1 << bit), reg); ++} ++ ++static inline void clrbl(addr_t bit, addr_t reg) ++{ ++ outl(inl(reg) & (~((addr_t) ((addr_t) 1 << bit))), reg); ++} ++ ++static inline addr_t getbl(addr_t bit, addr_t reg) ++{ ++ return inl(reg) & (addr_t) ((addr_t) 1 << bit); ++} ++ ++/******************************************************************************/ ++ ++enum DMAD_DRQ_FLAGS { ++ DMAD_DRQ_STATE_READY = 0x00000001, /* channel allocation status */ ++ DMAD_DRQ_STATE_ABORT = 0x00000002, /* abort drb alloc block-wait */ ++ DMAD_DRQ_DIR_A1_TO_A0 = 0x00000004, /* Transfer direction */ ++}; ++ ++#define DMAD_DRQ_DIR_MASK DMAD_DRQ_DIR_A1_TO_A0 ++ ++/* DMA request queue, one instance per channel */ ++typedef struct dmad_drq { ++ u32 state; /* enum DMAD_DRQ_STATE */ ++ ++ addr_t channel_base; /* register base address */ ++ addr_t enable_port; /* enable register */ ++ addr_t src_port; /* source address register */ ++ addr_t dst_port; /* dest address register */ ++ addr_t cyc_port; /* size(cycle) register */ ++ ++ u32 flags; /* enum DMAD_CHREQ_FLAGS */ ++ ++ spinlock_t drb_pool_lock; ++ dmad_drb *drb_pool; /* drb pool */ ++ ++ u32 fre_head; /* free list head */ ++ u32 fre_tail; /* free list tail */ ++ ++ u32 rdy_head; /* ready list head */ ++ u32 rdy_tail; /* ready list tail */ ++ ++ u32 sbt_head; /* submitted list head */ ++ u32 sbt_tail; /* submitted list tail */ ++ ++ u32 data_width; /* dma transfer data width */ ++ ++ struct completion drb_alloc_sync; ++ ++ /* client supplied callback function, executed in interrupt context ++ * client private data to be passed to data argument of completion_cb(). ++ */ ++ void (*completion_cb) (int channel, u16 status, void *data); ++ void *completion_data; ++ ++ /* ring-mode fields are valid for DMAD_FLAGS_RING_MODE */ ++ dma_addr_t ring_base; /* ring buffer base address */ ++ dma_addr_t ring_size; /* size (of data width) */ ++ addr_t ring_port; /* for setup/fetch hw_ptr */ ++ dmad_drb *ring_drb; ++ ++ addr_t dev_addr; /* device data port */ ++ ++ int periods; /* interrupts periods */ ++ dma_addr_t period_size; /* of dma data with */ ++ dma_addr_t period_bytes; /* Period size, in bytes */ ++ ++ /* ring_size - period_size * periods */ ++ dma_addr_t remnant_size; ++ ++ dma_addr_t sw_ptr; /* sw pointer */ ++ int sw_p_idx; /* current ring_ptr */ ++ dma_addr_t sw_p_off; /* offset to period base */ ++ ++} dmad_drq; ++ ++/* To shrink code size and improve performance, common channel registers ++ * are preload in drq struct at channel allocation time. One of the key ++ * dependency is the enable bit of both DMAC and APBDMA channel command ++ * registers. Please make sure hw design them at bit 0 of the command register ++ * in future evolvement. ++ */ ++#if (DMAC_CSR_CH_EN_BIT != APBBR_DMA_CHEN_BIT) ++#error "DMAC_CSR_CH_EN_BIT != APBBR_DMA_CHEN_BIT" ++#endif ++ ++#define DMAD_PORT_ENABLE_BIT APBBR_DMA_CHEN_BIT ++ ++static inline void dmad_enable_channel(dmad_drq * drq) ++{ ++ setbl(DMAD_PORT_ENABLE_BIT, drq->enable_port); ++} ++ ++static inline void dmad_disable_channel(dmad_drq * drq) ++{ ++ clrbl(DMAD_PORT_ENABLE_BIT, drq->enable_port); ++} ++ ++static inline addr_t dmad_is_channel_enabled(dmad_drq * drq) ++{ ++ return (addr_t) getbl(DMAD_PORT_ENABLE_BIT, drq->enable_port); ++} ++ ++/* AHB DMAC channel re-route table structure */ ++typedef struct _DMAD_AHB_CH_ROUTE { ++ u32 dev_reqn; /* device req/gnt number */ ++ addr_t clear_cr; /* routing control register address */ ++ addr_t route_cr; /* routing control register address */ ++} DMAD_AHB_CH_ROUTE; ++ ++#ifdef CONFIG_PLAT_AG102 ++#if 0 ++/* AHB DMAC channel re-route table. Indexed by AHB DMAC req/ack number. */ ++static DMAD_AHB_CH_ROUTE ahb_ch_route_table[] __attribute__ ((__unused__)) = { ++ /* all todo ... */ ++ ++ { ++ 0x00, DMAC_REQN_IDERX, DMAC_REQN_IDERX}, { ++ 0x01, DMAC_REQN_IDETX, DMAC_REQN_IDETX}, { ++ 0x02, DMAC_REQN_I2SAC97RX, DMAC_REQN_I2SAC97RX}, { ++ 0x03, DMAC_REQN_I2SAC97TX, DMAC_REQN_I2SAC97TX}, { ++ 0x04, DMAC_REQN_UART2RX, DMAC_REQN_UART2RX}, { ++ 0x05, DMAC_REQN_UART2TX, DMAC_REQN_UART2TX}, { ++ 0x06, DMAC_REQN_UART1RX, DMAC_REQN_UART1RX}, { ++ 0x07, DMAC_REQN_UART1TX, DMAC_REQN_UART1TX}, { ++ 0x08, DMAC_REQN_SDC, DMAC_REQN_SDC}, { ++ 0x09, DMAC_REQN_CFC, DMAC_REQN_CFC}, { ++ 0x0a, DMAC_REQN_LPCREQ0, DMAC_REQN_LPCREQ0}, { ++ 0x0b, DMAC_REQN_LPCREQ1, DMAC_REQN_LPCREQ1}, { ++ 0x0c, DMAC_REQN_LPCREQ2, DMAC_REQN_LPCREQ2}, { ++ 0x0d, DMAC_REQN_LPCREQ3, DMAC_REQN_LPCREQ3}, { ++ 0x0e, 0, 0}, { ++0x0f, DMAC_REQN_LPCREQ5, DMAC_REQN_LPCREQ5},}; ++#endif ++#else /* CONFIG_PLAT_AG102 */ ++ ++/* AHB DMAC channel re-route table. Indexed by AHB DMAC req/ack number. */ ++static DMAD_AHB_CH_ROUTE ahb_ch_route_table[] = { ++ {0x00, 0, 0}, ++ {0x01, PMU_CFC_REQACK_CFG, PMU_CFC_REQACK_CFG}, ++ {0x02, PMU_SSP1_REQACK_CFG, PMU_SSP1_REQACK_CFG}, ++ {0x03, PMU_UART1RX_REQACK_CFG, PMU_UART1TX_REQACK_CFG}, ++ {0x04, PMU_UART1TX_REQACK_CFG, PMU_UART1RX_REQACK_CFG}, ++ {0x05, PMU_UART2RX_REQACK_CFG, PMU_UART2TX_REQACK_CFG}, ++ {0x06, PMU_UART2TX_REQACK_CFG, PMU_UART2RX_REQACK_CFG}, ++ {0x07, PMU_SDC_REQACK_CFG, PMU_SDC_REQACK_CFG}, ++ {0x08, PMU_I2SAC97RX_REQACK_CFG, PMU_I2SAC97TX_REQACK_CFG}, ++ {0x09, 0, 0}, ++ {0x0a, PMU_I2SAC97TX_REQACK_CFG, PMU_I2SAC97RX_REQACK_CFG}, ++ {0x0b, PMU_USB_REQACK_CFG, PMU_USB_REQACK_CFG}, ++ {0x0c, 0, 0}, ++ {0x0d, 0, 0}, ++ {0x0e, PMU_EXT0_REQACK_CFG, PMU_EXT0_REQACK_CFG}, ++ {0x0f, PMU_EXT1_REQACK_CFG, PMU_EXT1_REQACK_CFG}, ++}; ++ ++#endif /* CONFIG_PLAT_AG102 */ ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ ++/* system irq number (per channel, ahb) */ ++static const unsigned int ahb_irqs[DMAD_AHB_MAX_CHANNELS] = { ++ DMAC_FTDMAC020_0_IRQ0, ++ DMAC_FTDMAC020_0_IRQ1, ++ DMAC_FTDMAC020_0_IRQ2, ++ DMAC_FTDMAC020_0_IRQ3, ++ DMAC_FTDMAC020_0_IRQ4, ++ DMAC_FTDMAC020_0_IRQ5, ++ DMAC_FTDMAC020_0_IRQ6, ++ DMAC_FTDMAC020_0_IRQ7, ++}; ++ ++#endif /* CONFIG_PLATFORM_AHBDMA */ ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++ ++/* APB Bridge DMA request number re-route table */ ++typedef struct _DMAD_APB_REQN_ROUTE { ++ u32 apb_reqn; /* APB device req/gnt number */ ++ u32 ahb_reqn_tx; /* AHB DMAC req/ack number (tx) */ ++ u32 ahb_reqn_rx; /* AHB DMAC req/ack number (rx) */ ++ u32 bus_sel; /* APBBR_ADDRSEL_APB(0) or APBBR_ADDRSEL_AHB(1) */ ++} DMAD_APB_REQN_ROUTE; ++ ++#ifdef CONFIG_PLAT_AG102 ++ ++/* APB Bridge DMA request number re-route table. Indexed by APB DMA req/gnt ++ * number. */ ++static DMAD_APB_REQN_ROUTE apb_reqn_route_table[] = { ++ {0x00, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x01, DMAC_REQN_CFC, DMAC_REQN_CFC, APBBR_ADDRSEL_APB}, ++ {0x02, 0x00, 0x00, APBBR_ADDRSEL_APB}, ++ {0x03, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x04, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ //MOD by river 2010.10.20 ++ {0x05, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ //End MOD by river 2010.10.20 ++ {0x06, DMAC_REQN_I2SAC97TX, DMAC_REQN_I2SAC97RX, APBBR_ADDRSEL_APB}, ++ {0x07, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ //MOD by river 2010.10.20 ++ {0x08, DMAC_REQN_SDC, DMAC_REQN_SDC, APBBR_ADDRSEL_APB}, ++ //End MOD by river 2010.10.20 ++ {0x09, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x0a, DMAC_REQN_UART2TX, DMAC_REQN_UART2RX, APBBR_ADDRSEL_APB}, ++ {0x0b, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x0c, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x0d, DMAC_REQN_I2SAC97TX, DMAC_REQN_I2SAC97RX, APBBR_ADDRSEL_APB}, ++ {0x0e, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x0f, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++}; ++ ++#else /* CONFIG_PLAT_AG102 */ ++ ++/* APB Bridge DMA request number re-route table. Indexed by APB DMA req/gnt ++ * number. */ ++static DMAD_APB_REQN_ROUTE apb_reqn_route_table[] = { ++ {0x00, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x01, DMAC_REQN_CFC, DMAC_REQN_CFC, APBBR_ADDRSEL_APB}, ++ {0x02, DMAC_REQN_SSP, DMAC_REQN_SSP, APBBR_ADDRSEL_APB}, ++ {0x03, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x04, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x05, DMAC_REQN_SDC, DMAC_REQN_SDC, APBBR_ADDRSEL_APB}, ++ {0x06, DMAC_REQN_I2SAC97TX, DMAC_REQN_I2SAC97RX, APBBR_ADDRSEL_APB}, ++/* for amerald ++ { 0x07, 0x00, 0x00, APBBR_ADDRSEL_AHB },*/ ++ {0x07, APBBR_REQN_SDC_AMERALD, APBBR_REQN_SDC_AMERALD, ++ APBBR_ADDRSEL_AHB}, ++/* for amerald ac97 ++ { 0x08, 0x00, 0x00, APBBR_ADDRSEL_AHB }, */ ++ {0x08, APBBR_REQN_I2SAC97TX_AMERALD, APBBR_REQN_I2SAC97TX_AMERALD, ++ APBBR_ADDRSEL_APB}, ++ {0x09, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x0a, DMAC_REQN_UART2TX, DMAC_REQN_UART2RX, APBBR_ADDRSEL_APB}, ++ {0x0b, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x0c, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x0d, DMAC_REQN_I2SAC97TX, DMAC_REQN_I2SAC97RX, APBBR_ADDRSEL_APB}, ++ {0x0e, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++ {0x0f, 0x00, 0x00, APBBR_ADDRSEL_AHB}, ++}; ++ ++#endif /* CONFIG_PLAT_AG102 */ ++ ++/* system irq number (per channel, apb) */ ++static const unsigned int apb_irqs[DMAD_APB_MAX_CHANNELS] = { ++ APBBRG_FTAPBBRG020S_0_IRQ0, ++ APBBRG_FTAPBBRG020S_0_IRQ1, ++ APBBRG_FTAPBBRG020S_0_IRQ2, ++ APBBRG_FTAPBBRG020S_0_IRQ3, ++}; ++ ++#endif ++ ++/* Driver data structure, one instance per system */ ++typedef struct DMAD_DATA_STRUCT { ++ /* Driver data initialization flag */ ++ ++ /* DMA queue pool access control object */ ++ spinlock_t drq_pool_lock; ++ ++ /* DMA queue base address, to ease alloc/free flow */ ++ dmad_drq *drq_pool; ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ /* DMA queue for AHB DMA channels */ ++ dmad_drq *ahb_drq_pool; ++#endif ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++ /* DMA queue for APB DMA channels */ ++ dmad_drq *apb_drq_pool; ++#endif ++ ++} DMAD_DATA; ++ ++/* Driver data structure instance, one instance per system */ ++static DMAD_DATA dmad __attribute__ ((aligned(4))) = { ++ .drq_pool_lock = __SPIN_LOCK_UNLOCKED(dmad.drq_pool_lock), ++// .drq_pool_lock = SPIN_LOCK_UNLOCKED, ++ .drq_pool = 0, ++#ifdef CONFIG_PLATFORM_AHBDMA ++ .ahb_drq_pool = 0, ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ .apb_drq_pool = 0, ++#endif ++}; ++ ++/** ++ * dmad_next_drb - static function ++ * @drb_pool : [in] The raw DRB pool of a DMA channel ++ * @node : [in] The node number to lookup its next node ++ * @drb : [out] The drb next to the "node" node number ++ * ++ * Lookup next DRB of the specified node number. "drb" is null if reaches end ++ * of the list. ++ */ ++static inline void dmad_next_drb(dmad_drb * drb_pool, u32 node, dmad_drb ** drb) ++{ ++ if (likely(drb_pool[node].next != 0)) ++ *drb = &drb_pool[drb_pool[node].next]; ++ else ++ *drb = 0; ++} ++ ++/** ++ * dmad_prev_drb - static function ++ * @drb_pool : [in] The raw DRB pool of a DMA channel ++ * @node : [in] The node number to lookup its previous node ++ * @drb : [out] The drb previous to the "node" node number ++ * ++ * Lookup previous DRB of the specified node number. "drb" is null if reaches ++ * head-end of the list. ++ */ ++static inline void dmad_prev_drb(dmad_drb * drb_pool, u32 node, dmad_drb ** drb) ++{ ++ if (unlikely(drb_pool[node].prev != 0)) ++ *drb = &drb_pool[drb_pool[node].prev]; ++ else ++ *drb = 0; ++} ++ ++/** ++ * dmad_detach_node - static function ++ * @drb_pool : [in] The raw DRB pool of a DMA channel ++ * @head : [in/out] Reference to the head node number ++ * @tail : [in/out] Reference to the tail node number ++ * @node : [in] The node to be dettached from the queue ++ * ++ * Detached a DRB specified by the node number from the queue. The head and ++ * tail records will be updated accordingly. ++ */ ++static inline void dmad_detach_node(dmad_drb * drb_pool, ++ u32 * head, u32 * tail, u32 node) ++{ ++ if (likely(drb_pool[node].prev != 0)) { ++ /* prev->next = this->next (= 0, if this is a tail) */ ++ drb_pool[drb_pool[node].prev].next = drb_pool[node].next; ++ } else { ++ /* this node is head, move head to next node ++ * (= 0, if this is the only one node) */ ++ *head = drb_pool[node].next; ++ } ++ ++ if (unlikely(drb_pool[node].next != 0)) { ++ /* next->prev = this->prev (= 0, if this is a head) */ ++ drb_pool[drb_pool[node].next].prev = drb_pool[node].prev; ++ } else { ++ /* this node is tail, move tail to previous node ++ * (= 0, if this is the only one node) */ ++ *tail = drb_pool[node].prev; ++ } ++ ++ drb_pool[node].prev = drb_pool[node].next = 0; ++} ++ ++/** ++ * dmad_detach_head - static function ++ * @drb_pool : [in] The raw DRB pool of a DMA channel ++ * @head : [in/out] Reference to the head node number ++ * @tail : [in/out] Reference to the tail node number ++ * @drb : [out] The detached head node; null if the queue is empty ++ * ++ * Detached a DRB from the head of the queue. The head and tail records will ++ * be updated accordingly. ++ */ ++static inline void dmad_detach_head(dmad_drb * drb_pool, ++ u32 * head, u32 * tail, dmad_drb ** drb) ++{ ++ if (unlikely(*head == 0)) { ++ *drb = NULL; ++ return; ++ } ++ ++ *drb = &drb_pool[*head]; ++ ++ if (likely((*drb)->next != 0)) { ++ /* next->prev = this->prev (= 0, if this is a head) */ ++ drb_pool[(*drb)->next].prev = 0; ++ ++ /* prev->next = this->next (do nothing, if this is a head) */ ++ ++ /* head = this->next */ ++ *head = (*drb)->next; ++ } else { ++ /* head = tail = 0 */ ++ *head = 0; ++ *tail = 0; ++ } ++ ++ /* this->prev = this->next = 0 (do nothing, if save code size) */ ++ (*drb)->prev = (*drb)->next = 0; ++} ++ ++/** ++ * dmad_get_head - static function ++ * @drb_pool : [in] The raw DRB pool of a DMA channel ++ * @head : [in/out] Reference to the head node number ++ * @tail : [in/out] Reference to the tail node number ++ * @drb : [out] The head node; null if the queue is empty ++ * ++ * Get a DRB from the head of the queue. The head and tail records remain ++ * unchanged. ++ */ ++static inline void dmad_get_head(dmad_drb * drb_pool, const u32 * head, ++ const u32 * tail, dmad_drb ** drb) ++{ ++ if (unlikely(*head == 0)) { ++ *drb = NULL; ++ return; ++ } ++ ++ *drb = &drb_pool[*head]; ++} ++ ++/** ++ * dmad_detach_tail - static function ++ * @drb_pool : [in] The raw DRB pool of a DMA channel ++ * @head : [in/out] Reference to the head node number ++ * @tail : [in/out] Reference to the tail node number ++ * @drb : [out] The tail node; null if the queue is empty ++ * ++ * Detached a DRB from the head of the queue. The head and tail records will ++ * be updated accordingly. ++ */ ++static inline void dmad_detach_tail(dmad_drb * drb_pool, ++ u32 * head, u32 * tail, dmad_drb ** drb) ++{ ++ if (unlikely(*tail == 0)) { ++ *drb = NULL; ++ return; ++ } ++ ++ *drb = &drb_pool[*tail]; ++ ++ if (likely((*drb)->prev != 0)) { ++ /* prev->next = this->next (= 0, if this is a tail) */ ++ drb_pool[(*drb)->prev].next = 0; ++ ++ /* next->prev = this->prev (do nothing, if this is a tail) */ ++ ++ /* tail = this->prev */ ++ *tail = (*drb)->prev; ++ } else { ++ /* head = tail = 0 */ ++ *head = 0; ++ *tail = 0; ++ } ++ ++ /* this->next = this->prev = 0 (do nothing, if save code size) */ ++ (*drb)->prev = (*drb)->next = 0; ++} ++ ++/** ++ * dmad_get_tail - static function ++ * @drb_pool : [in] The raw DRB pool of a DMA channel ++ * @head : [in/out] Reference to the head node number ++ * @tail : [in/out] Reference to the tail node number ++ * @drb : [out] The tail node; null if the queue is empty ++ * ++ * Get a DRB from the tail of the queue. The head and tail records remain ++ * unchanged. ++ */ ++static inline void dmad_get_tail(dmad_drb * drb_pool, ++ u32 * head, u32 * tail, dmad_drb ** drb) ++{ ++ if (unlikely(*tail == 0)) { ++ *drb = NULL; ++ return; ++ } ++ ++ *drb = &drb_pool[*tail]; ++} ++ ++/** ++ * dmad_attach_head - static function ++ * @drb_pool : [in] The raw DRB pool of a DMA channel ++ * @head : [in/out] Reference to the head node number ++ * @tail : [in/out] Reference to the tail node number ++ * @node : [in] The node to be attached ++ * ++ * Attach a DRB node to the head of the queue. The head and tail records will ++ * be updated accordingly. ++ */ ++static inline void dmad_attach_head(dmad_drb * drb_pool, ++ u32 * head, u32 * tail, u32 node) ++{ ++ if (likely(*head != 0)) { ++ /* head->prev = this */ ++ drb_pool[*head].prev = node; ++ ++ /* this->next = head */ ++ drb_pool[node].next = *head; ++ /* this->prev = 0 */ ++ drb_pool[node].prev = 0; ++ ++ /* head = node */ ++ *head = node; ++ } else { ++ /* head = tail = node */ ++ *head = *tail = node; ++ drb_pool[node].prev = drb_pool[node].next = 0; ++ } ++} ++ ++/** ++ * dmad_attach_head - static function ++ * @drb_pool : [in] The raw DRB pool of a DMA channel ++ * @head : [in/out] Reference to the head node number ++ * @tail : [in/out] Reference to the tail node number ++ * @node : [in] The node to be attached ++ * ++ * Attach a DRB node to the tail of the queue. The head and tail records will ++ * be updated accordingly. ++ */ ++static inline void dmad_attach_tail(dmad_drb * drb_pool, ++ u32 * head, u32 * tail, u32 node) ++{ ++ if (likely(*tail != 0)) { ++ /* tail->next = this */ ++ drb_pool[*tail].next = node; ++ ++ /* this->prev = tail */ ++ drb_pool[node].prev = *tail; ++ /* this->next = 0 */ ++ drb_pool[node].next = 0; ++ ++ /* tail = node */ ++ *tail = node; ++ } else { ++ /* head = tail = node */ ++ *head = *tail = node; ++ drb_pool[node].prev = drb_pool[node].next = 0; ++ } ++} ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ ++/** ++ * dmad_ahb_isr - AHB DMA interrupt service routine ++ * ++ * @irq : [in] The irq number ++ * @dev_id : [in] The identifier to identify the asserted channel ++ * ++ * This is the ISR that services all AHB DMA channels. ++ */ ++static irqreturn_t dmad_ahb_isr(int irq, void *dev_id) ++{ ++ dmad_drq *drq; ++ dmad_drb *drb, *drb_iter; ++ u32 channel = ((u32) dev_id) - 1; ++ u8 tc_int = 0; ++ u8 err_int = 0; ++ u8 abt_int = 0; ++ u8 cpl_events = 1; ++ ++ dmad_dbg("%s() >> channel(%d)\n", __func__, channel); ++ ++ if (channel >= DMAD_AHB_MAX_CHANNELS) { ++ dmad_err("%s() invlaid channel number: %d!\n", ++ __func__, channel); ++ return IRQ_HANDLED; ++ } ++ ++ /* Fetch channel's DRQ struct (DMA Request Queue) */ ++ drq = (dmad_drq *) & dmad.ahb_drq_pool[channel]; ++ ++ /* Check DMA status register to get channel number */ ++ if (likely(getbl(channel, DMAC_INT_TC))) { ++ ++ /* Mark as TC int */ ++ tc_int = 1; ++ ++ /* DMAC INT TC status clear */ ++ setbl(channel, DMAC_INT_TC_CLR); ++ ++ } else if (getbl(channel + DMAC_INT_ERR_SHIFT, DMAC_INT_ERRABT)) { ++ ++ /* Mark as ERR int */ ++ err_int = 1; ++ ++ /* DMAC INT ERR status clear */ ++ setbl(channel + DMAC_INT_ERR_CLR_SHIFT, DMAC_INT_ERRABT_CLR); ++ ++ } else if (getbl(channel + DMAC_INT_ABT_SHIFT, DMAC_INT_ERRABT)) { ++ ++ /* Mark as ABT int */ ++ abt_int = 1; ++ ++ /* DMAC INT ABT status clear */ ++ setbl(channel + DMAC_INT_ABT_CLR_SHIFT, DMAC_INT_ERRABT_CLR); ++ ++ } else { ++ ++ dmad_err("%s() possible false-fired ahb dma int," ++ "channel %d status-reg: tc(0x%08x) arrabt(0x%08x)\n", ++ __func__, channel, ++ inl(DMAC_INT_TC), inl(DMAC_INT_ERRABT_CLR)); ++ ++ /* Stop DMA channel (make sure the channel will be stopped) */ ++ clrbl(DMAC_CSR_CH_EN_BIT, drq->channel_base + DMAC_CSR_OFFSET); ++ ++ return IRQ_HANDLED; ++ } ++ ++ /* DMAC ++ * Stop DMA channel temporarily */ ++ dmad_disable_channel(drq); ++ ++ spin_lock(&drq->drb_pool_lock); ++ ++ /* Lookup/detach latest submitted DRB (DMA Request Block) from ++ * the DRQ (DMA Request Queue), so ISR could kick off next DRB */ ++ dmad_detach_head(drq->drb_pool, &drq->sbt_head, &drq->sbt_tail, &drb); ++ if (drb == NULL) { ++ spin_unlock(&drq->drb_pool_lock); ++ /* submitted list could be empty if client cancel all requests ++ * of the channel. */ ++ return IRQ_HANDLED; ++ } ++ ++ /* release blocking of drb-allocation, if any ... */ ++ if (unlikely((drq->fre_head == 0) && ++ (drq->flags & DMAD_FLAGS_SLEEP_BLOCK))) { ++ complete_all(&drq->drb_alloc_sync); ++ } ++ ++ /* Process DRBs according to interrupt reason */ ++ if (tc_int) { ++ ++ dmad_dbg("dma finish\n"); ++ ++ dmad_dbg("finish drb(%d 0x%08x) addr0(0x%08x) " ++ "addr1(0x%08x) size(0x%08x)\n", ++ drb->node, (u32) drb, drb->src_addr, ++ drb->dst_addr, drb->req_cycle); ++ ++ if (drb->req_cycle == 0) ++ cpl_events = 0; ++ ++ // Mark DRB state as completed ++ drb->state = DMAD_DRB_STATE_COMPLETED; ++ if (cpl_events && drb->sync) ++ complete_all(drb->sync); ++ ++ dmad_attach_tail(drq->drb_pool, &drq->fre_head, ++ &drq->fre_tail, drb->node); ++ ++ // Check whether there are pending requests in the DRQ ++ if (drq->sbt_head != 0) { ++ ++ // Lookup next DRB (DMA Request Block) ++ drb_iter = &drq->drb_pool[drq->sbt_head]; ++ ++ dmad_dbg("exec drb(%d 0x%08x) addr0(0x%08x) " ++ "addr1(0x%08x) size(0x%08x)\n", ++ drb_iter->node, (u32) drb_iter, ++ drb_iter->src_addr, drb_iter->dst_addr, ++ drb_iter->req_cycle); ++ ++ // Kick-off DMA for next DRB ++ // - Source and destination address ++ if (drq->flags & DMAD_DRQ_DIR_A1_TO_A0) { ++ outl(drb_iter->addr1, drq->src_port); ++ outl(drb_iter->addr0, drq->dst_port); ++ } else { ++ outl(drb_iter->addr0, drq->src_port); ++ outl(drb_iter->addr1, drq->dst_port); ++ } ++ ++ /* - Transfer size (in units of source width) */ ++ outl(drb_iter->req_cycle, drq->cyc_port); ++ ++ /* Kick off next request */ ++ dmad_enable_channel(drq); ++ ++ drb_iter->state = DMAD_DRB_STATE_EXECUTED; ++ ++ } else { ++ /* No pending requests, keep the DMA channel stopped */ ++ } ++ ++ } else { ++ ++ dmad_err("%s() ahb dma channel %d error!\n", __func__, channel); ++ ++ /* Zero out src, dst, and size */ ++ outl(0, drq->src_port); ++ outl(0, drq->dst_port); ++ outl(0, drq->cyc_port); ++ ++ /* Remove all pending requests in the queue */ ++ drb_iter = drb; ++ while (drb_iter) { ++ ++ dmad_err("abort drb(%d 0x%08x) addr0(0x%08x) " ++ "addr1(0x%08x) size(0x%08x)\n", ++ drb_iter->node, (u32) drb_iter, ++ drb_iter->src_addr, drb_iter->dst_addr, ++ drb_iter->req_cycle); ++ ++ if (drb_iter->req_cycle == 0) ++ cpl_events = 0; ++ ++ /* Mark DRB state as abort */ ++ drb_iter->state = DMAD_DRB_STATE_ABORT; ++ ++ if (cpl_events && drb_iter->sync) ++ complete_all(drb_iter->sync); ++ ++ dmad_attach_tail(drq->drb_pool, &drq->fre_head, ++ &drq->fre_tail, drb_iter->node); ++ ++ /* Detach next submitted DRB (DMA Request Block) ++ * from the DRQ (DMA Request Queue) */ ++ dmad_detach_head(drq->drb_pool, &drq->sbt_head, ++ &drq->sbt_tail, &drb_iter); ++ } ++ } ++ ++ spin_unlock(&drq->drb_pool_lock); ++ ++ /* dispatch interrupt-context level callbacks */ ++ if (cpl_events && drq->completion_cb) { ++ /* signal DMA driver that new node is available */ ++ drq->completion_cb(channel, tc_int, drq->completion_data); ++ } ++ ++ dmad_dbg("%s() <<\n", __func__); ++ ++ return IRQ_HANDLED; ++} ++ ++/** ++ * dmad_ahb_config_dir - prepare command reg according to tx direction ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @channel_cmds : [out] Reference to array of command words to be prepared with ++ * @return : none ++ * ++ * Prepare command registers according to transfer direction ... ++ * channel_cmd[0] DMAC_CSR ++ * channel_cmd[1] DMAC_CFG ++ * ++ * This function only serves as local helper. No protection wrappers. ++ */ ++static void dmad_ahb_config_dir(dmad_chreq * ch_req, addr_t * channel_cmds) ++{ ++ dmad_drq *drq = (dmad_drq *) ch_req->drq; ++ dmad_ahb_chreq *ahb_req = (dmad_ahb_chreq *) (&ch_req->ahb_req); ++/* for amerald */ ++ u32 reqn0, reqn1; ++ dmad_dbg("%s() channel_cmds(0x%08x, 0x%08x)\n", ++ __func__, channel_cmds[0], channel_cmds[1]); ++/* for amerald */ ++ if ((inl(PMU_BASE) & AMERALD_MASK) == AMERALD_PRODUCT_ID) { ++ reqn0 = ahb_req->addr0_reqn; ++ reqn1 = ahb_req->addr1_reqn; ++ } else { ++ reqn0 = ch_req->channel; ++ reqn1 = ch_req->channel; ++ } ++ channel_cmds[0] &= ~(addr_t) ++ (DMAC_CSR_SRC_WIDTH_MASK | DMAC_CSR_SRCAD_CTL_MASK | ++ DMAC_CSR_DST_WIDTH_MASK | DMAC_CSR_DSTAD_CTL_MASK | ++ DMAC_CSR_MODE_MASK); ++ channel_cmds[1] &= ~(addr_t) ++ (DMAC_CFG_INT_SRC_RS_MASK | DMAC_CFG_INT_SRC_HE_MASK | ++ DMAC_CFG_INT_DST_RS_MASK | DMAC_CFG_INT_DST_HE_MASK); ++ ++ /* 0 - addr0 to addr1; 1 - addr1 to addr0 */ ++ if (ahb_req->tx_dir == 0) { ++ ++ dmad_dbg("%s() addr0 --> addr1\n", __func__); ++ ++ /* - Channel CSR ++ * DST_SEL : 0 (Master 0) ++ * SRC_SEL : 0 (Master 0) ++ * DSTAD_CTL : ahb_req->dst_ctrl ++ * SRCAD_CTL : ahb_req->src_ctrl ++ * MODE : 0 (normal) ++ * DST_WIDTH : ahb_req->dst_width ++ * SRC_WIDTH : ahb_req->src_width ++ * SRC_SIZE : 0 (burst size = 1 byte) ++ */ ++ channel_cmds[0] |= ++ (((ahb_req->addr0_width << DMAC_CSR_SRC_WIDTH_SHIFT) & ++ DMAC_CSR_SRC_WIDTH_MASK) | ++ ((ahb_req->addr0_ctrl << DMAC_CSR_SRCAD_CTL_SHIFT) & ++ DMAC_CSR_SRCAD_CTL_MASK) | ++ ((ahb_req->addr1_width << DMAC_CSR_DST_WIDTH_SHIFT) & ++ DMAC_CSR_DST_WIDTH_MASK) | ++ ((ahb_req->addr1_ctrl << DMAC_CSR_DSTAD_CTL_SHIFT) & ++ DMAC_CSR_DSTAD_CTL_MASK)); ++ ++ /* - Channel CFG ++ * SRC_RS : channel number (not reqn) ++ * SRC_HE : 0 if memory, 1 if device ++ * DST_RS : channel number (not reqn) ++ * DST_HE : 0 if memory, 1 if device ++ */ ++ if (likely(ahb_req->hw_handshake != 0)) { ++ /* Channel CSR - Enable HW-handshake mode */ ++ channel_cmds[0] |= DMAC_CSR_MODE_MASK; ++ ++ /* Channel CFG - Device REQN and HW-handshake mode */ ++#ifdef CONFIG_PLAT_AG102 ++ /* AG102 fixes this bug */ ++ if (ahb_req->addr0_reqn != DMAC_REQN_NONE) { ++ channel_cmds[1] |= (DMAC_CFG_INT_SRC_HE_MASK | ++ ((ahb_req->addr0_reqn << ++ DMAC_CFG_INT_SRC_RS_SHIFT) ++ & ++ DMAC_CFG_INT_SRC_RS_MASK)); ++ } ++ ++ if (ahb_req->addr1_reqn != DMAC_REQN_NONE) { ++ channel_cmds[1] |= (DMAC_CFG_INT_DST_HE_MASK | ++ ((ahb_req->addr1_reqn << ++ DMAC_CFG_INT_DST_RS_SHIFT) ++ & ++ DMAC_CFG_INT_DST_RS_MASK)); ++ } ++#else ++ /* AG101/XC5 bug */ ++/* for amerald */ ++ if (ahb_req->addr0_reqn != DMAC_REQN_NONE) { ++ channel_cmds[1] |= (DMAC_CFG_INT_SRC_HE_MASK | ++ ((reqn0 << ++ DMAC_CFG_INT_SRC_RS_SHIFT) ++ & ++ DMAC_CFG_INT_SRC_RS_MASK)); ++ } ++ ++ if (ahb_req->addr1_reqn != DMAC_REQN_NONE) { ++ channel_cmds[1] |= (DMAC_CFG_INT_DST_HE_MASK | ++ ((reqn1 << ++ DMAC_CFG_INT_DST_RS_SHIFT) ++ & ++ DMAC_CFG_INT_DST_RS_MASK)); ++ } ++#endif ++ } ++ ++ /* update source data width for faster cycle/byte size conversion */ ++ drq->data_width = ahb_req->addr0_width; ++ ++ /* remember channel transfer direction */ ++ drq->flags &= ~(addr_t) DMAD_DRQ_DIR_A1_TO_A0; ++ ++ } else { ++ ++ dmad_dbg("%s() addr0 <-- addr1\n", __func__); ++ ++ /* - Channel CSR ++ * DST_SEL : 0 (Master 0) ++ * SRC_SEL : 0 (Master 0) ++ * DSTAD_CTL : ahb_req->dst_ctrl ++ * SRCAD_CTL : ahb_req->src_ctrl ++ * MODE : 0 (normal) ++ * DST_WIDTH : ahb_req->dst_width ++ * SRC_WIDTH : ahb_req->src_width ++ * SRC_SIZE : 0 (burst size = 1 byte) ++ */ ++ channel_cmds[0] |= ++ (((ahb_req->addr1_width << DMAC_CSR_SRC_WIDTH_SHIFT) & ++ DMAC_CSR_SRC_WIDTH_MASK) | ++ ((ahb_req->addr1_ctrl << DMAC_CSR_SRCAD_CTL_SHIFT) & ++ DMAC_CSR_SRCAD_CTL_MASK) | ++ ((ahb_req->addr0_width << DMAC_CSR_DST_WIDTH_SHIFT) & ++ DMAC_CSR_DST_WIDTH_MASK) | ++ ((ahb_req->addr0_ctrl << DMAC_CSR_DSTAD_CTL_SHIFT) & ++ DMAC_CSR_DSTAD_CTL_MASK)); ++ ++ /* - Channel CFG ++ * SRC_RS : channel number (not reqn) ++ * SRC_HE : 0 if memory, 1 if device ++ * DST_RS : channel number (not reqn) ++ * DST_HE : 0 if memory, 1 if device ++ */ ++ if (likely(ahb_req->hw_handshake != 0)) { ++ /* Channel CSR - Enable HW-handshake mode */ ++ channel_cmds[0] |= DMAC_CSR_MODE_MASK; ++ ++ /* Channel CFG - Device REQN and HW-handshake mode */ ++#ifdef CONFIG_PLAT_AG102 ++ /* AG102 fixes this bug */ ++ if (ahb_req->addr1_reqn != DMAC_REQN_NONE) { ++ channel_cmds[1] |= (DMAC_CFG_INT_SRC_HE_MASK | ++ ((ahb_req->addr1_reqn << ++ DMAC_CFG_INT_SRC_RS_SHIFT) ++ & ++ DMAC_CFG_INT_SRC_RS_MASK)); ++ } ++ ++ if (ahb_req->addr0_reqn != DMAC_REQN_NONE) { ++ channel_cmds[1] |= (DMAC_CFG_INT_DST_HE_MASK | ++ ((ahb_req->addr0_reqn << ++ DMAC_CFG_INT_DST_RS_SHIFT) ++ & ++ DMAC_CFG_INT_DST_RS_MASK)); ++ } ++#else ++ /* AG101/XC5 bug */ ++/* for amerald */ ++ if (ahb_req->addr1_reqn != DMAC_REQN_NONE) { ++ channel_cmds[1] |= (DMAC_CFG_INT_SRC_HE_MASK | ++ ((reqn1 << ++ DMAC_CFG_INT_SRC_RS_SHIFT) ++ & ++ DMAC_CFG_INT_SRC_RS_MASK)); ++ } ++ ++ if (ahb_req->addr0_reqn != DMAC_REQN_NONE) { ++ channel_cmds[1] |= (DMAC_CFG_INT_DST_HE_MASK | ++ ((reqn0 << ++ DMAC_CFG_INT_DST_RS_SHIFT) ++ & ++ DMAC_CFG_INT_DST_RS_MASK)); ++ } ++#endif ++ } ++ ++ /* source data width */ ++ drq->data_width = ahb_req->addr1_width; ++ ++ /* remember channel transfer direction */ ++ drq->flags |= (addr_t) DMAD_DRQ_DIR_A1_TO_A0; ++ } ++ ++ dmad_dbg("%s() channel_cmds(0x%08x, 0x%08x)\n", ++ __func__, channel_cmds[0], channel_cmds[1]); ++} ++ ++/** ++ * dmad_ahb_init - initialize a ahb dma channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @return : 0 if success, non-zero if any error ++ * ++ * Register AHB DMA ISR and performs hw initialization for the given DMA ++ * channel. ++ */ ++static int dmad_ahb_init(dmad_chreq * ch_req) ++{ ++ int err = 0; ++ dmad_drq *drq = (dmad_drq *) ch_req->drq; ++ dmad_ahb_chreq *ahb_req = (dmad_ahb_chreq *) (&ch_req->ahb_req); ++ u32 channel = (u32) ch_req->channel; ++ addr_t channel_base = drq->channel_base; ++ addr_t channel_cmds[2]; // [0] DMAC_CSR; [1] DMAC_CFG ++ unsigned long lock_flags; ++ ++ dmad_dbg("%s()\n", __func__); ++ ++ /* register interrupt handler */ ++ err = request_irq(ahb_irqs[channel], dmad_ahb_isr, 0, ++ "AHB_DMA", (void *)(channel + 1)); ++ if (unlikely(err != 0)) { ++ dmad_err("unable to request IRQ %d for AHB DMA " ++ "(error %d)\n", ahb_irqs[channel], err); ++ free_irq(ahb_irqs[channel], (void *)(channel + 1)); ++ return err; ++ } ++ ++ spin_lock_irqsave(&dmad.drq_pool_lock, lock_flags); ++ ++ /********************************************************** ++ * Following code require _safe_exit return path ++ */ ++ ++#ifdef CONFIG_PLAT_AG102 ++ /* PCU ++ * ++ * Add by Dennis 2011.03.09 ++ * set 0 to dma selection register to using AHB ++ * DMA. ++ */ ++ if (ahb_req->dst_reqn == ahb_req->src_reqn) { ++ dmad_err ++ ("[dmad] invalid source reqn(%d) or destination reqn(%d)\n", ++ ahb_req->src_reqn, ahb_req->dst_reqn); ++ err = -EBADR; ++ goto _safe_exit; ++ } ++ outl(0, PCU_DMA_SEL); ++#else /* CONFIG_PLAT_AG102 */ ++ ++ /* PMU ++ * ++ * Route APB device DMA to an AHB DMAC channel and specify the channel ++ * number. (connection status could be read back from PMU_AHBDMA_REQACK ++ * register) ++ * ++ * Note: Only one device is routed per AHB DMA channel, the other target ++ * should be either (1) the same device (same reqn), or (2) the AHB ++ * device (reqn = 0). ++ */ ++ ++ if (ahb_req->dst_reqn != DMAC_REQN_NONE) { ++ // DMA transfer to device ++ if ((ahb_req->dst_reqn > DMAC_REQN_MAX) || ++ (ahb_ch_route_table[ahb_req->dst_reqn].route_cr == 0)) { ++ dmad_err("Invalid destination reqn(%d) " ++ "or route_cr(0x%08x)\n", ahb_req->dst_reqn, ++ (u32) ahb_ch_route_table[ahb_req->dst_reqn]. ++ route_cr); ++ err = -EBADR; ++ goto _safe_exit; ++ } ++ ++ outl(0, ahb_ch_route_table[ahb_req->dst_reqn].clear_cr); ++ outl(PMU_DMACUSED_MASK | ((channel << PMU_CHANNEL_SHIFT) & ++ PMU_CHANNEL_MASK), ++ ahb_ch_route_table[ahb_req->dst_reqn].route_cr); ++ ++ } else if (ahb_req->src_reqn != DMAC_REQN_NONE) { ++ ++ // DMA transfer from device ++ if ((ahb_req->src_reqn > DMAC_REQN_MAX) || ++ (ahb_ch_route_table[ahb_req->src_reqn].route_cr == 0)) { ++ dmad_err("Invalid source reqn(%d) or " ++ "route_cr(0x%08x)\n", ahb_req->src_reqn, ++ (u32) ahb_ch_route_table[ahb_req->src_reqn]. ++ route_cr); ++ err = -EBADR; ++ goto _safe_exit; ++ } ++ ++ outl(0, ahb_ch_route_table[ahb_req->src_reqn].clear_cr); ++ outl(PMU_DMACUSED_MASK | ((channel << PMU_CHANNEL_SHIFT) & ++ PMU_CHANNEL_MASK), ++ ahb_ch_route_table[ahb_req->src_reqn].route_cr); ++ } ++#endif /* CONFIG_PLAT_AG102 */ ++ ++ /* DMAC (Controller Setting) */ ++ ++ /* - INT TC/ERR/ABT status clear */ ++ setbl(channel, DMAC_INT_TC_CLR); ++ setbl(channel + DMAC_INT_ERR_CLR_SHIFT, DMAC_INT_ERRABT_CLR); ++ setbl(channel + DMAC_INT_ABT_CLR_SHIFT, DMAC_INT_ERRABT_CLR); ++ ++ // - CSR (enable DMAC, set M0 & M1 default to little endian) ++ outl(DMAC_DMACEN_MASK | ++ ((DMAC_ENDIAN_LITTLE << DMAC_M0ENDIAN_BIT) & DMAC_M0ENDIAN_MASK) | ++ ((DMAC_ENDIAN_LITTLE << DMAC_M1ENDIAN_BIT) & DMAC_M1ENDIAN_MASK), ++ DMAC_CSR); ++ ++ /* DMAC (Channel-Specific Setting) */ ++ /* - SYNC */ ++ if (ahb_req->sync) ++ setbl(channel, DMAC_SYNC); ++ else ++ clrbl(channel, DMAC_SYNC); ++ ++ /* - Channel CSR ++ * CH_EN : 0 (disable) ++ * DST_SEL : 0 (Master 0) ++ * SRC_SEL : 0 (Master 0) ++ * DSTAD_CTL : ahb_req->dst_ctrl ++ * SRCAD_CTL : ahb_req->src_ctrl ++ * MODE : 0 (normal) ++ * DST_WIDTH : ahb_req->dst_width ++ * SRC_WIDTH : ahb_req->src_width ++ * ABT : 0 (not abort) ++ * SRC_SIZE : 0 (burst size = 1 byte) ++ * PROT1 : 0 (user mode) ++ * PROT2 : 0 (bot bufferable) ++ * PROT3 : 0 (not cacheable) ++ * CHPRI : ahb_req->priority ++ * DMA_FF_TH : 0 (FIA320 only, threshold = 1) ++ * TC_MSK : 0 (TC counter status enable) ++ */ ++ channel_cmds[0] = (ahb_req->priority << DMAC_CSR_CHPRI_SHIFT) & ++ DMAC_CSR_CHPRI_MASK; ++ channel_cmds[0] |= (ahb_req->burst_size << DMAC_CSR_SRC_SIZE_SHIFT) & ++ DMAC_CSR_SRC_SIZE_MASK; ++ ++ // - Channel CFG ++ // INT_TC_MSK : 0 (enable TC int) ++ // INT_ERR_MSK : 0 (enable ERR int) ++ // INT_ABT_MSK : 0 (enable ABT int) ++ // SRC_RS : 0 ++ // SRC_HE : 0 ++ // BUSY : r/o ++ // DST_RS : 0 ++ // DST_HE : 0 ++ // LLP_CNT : r/o ++ channel_cmds[1] = 0; ++ ++ if (0 == ++ (ch_req->flags & (DMAD_FLAGS_RING_MODE | DMAD_FLAGS_BIDIRECTION))) ++ ahb_req->tx_dir = 0; ++ ++ dmad_ahb_config_dir(ch_req, channel_cmds); ++ ++ outl(channel_cmds[0], channel_base + DMAC_CSR_OFFSET); ++ outl(channel_cmds[1], channel_base + DMAC_CFG_OFFSET); ++ ++ /* SRCADR and DESADR */ ++ outl(0, (addr_t) drq->src_port); ++ outl(0, (addr_t) drq->dst_port); ++ ++ /* CYC (transfer size) */ ++ outl(0, (addr_t) drq->cyc_port); ++ ++ /* LLP */ ++ outl(0, channel_base + DMAC_LLP_OFFSET); ++ ++ /* TOT_SIZE - not now */ ++ ++_safe_exit: ++ ++ spin_unlock_irqrestore(&dmad.drq_pool_lock, lock_flags); ++ ++ return err; ++} ++ ++#endif /* CONFIG_PLATFORM_AHBDMA */ ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++ ++/** ++ * dmad_apb_isr - APB DMA interrupt service routine ++ * ++ * @irq : [in] The irq number ++ * @dev_id : [in] The identifier to identify the asserted channel ++ * ++ * This is the ISR that services all APB DMA channels. ++ */ ++static irqreturn_t dmad_apb_isr(int irq, void *dev_id) ++{ ++ dmad_drq *drq; ++ dmad_drb *drb, *drb_iter; ++ u32 channel = ((u32) dev_id) - 1; ++ u32 status; ++ u8 finish_int = 0; ++ u8 err_int = 0; ++ u8 cpl_events = 1; ++ ++ dmad_dbg("%s() >> channel(%d)\n", __func__, channel); ++ ++ if (channel >= DMAD_APB_MAX_CHANNELS) { ++ dmad_err("%s() invlaid channel number: %d!\n", ++ __func__, channel); ++ return IRQ_HANDLED; ++ } ++ ++ /* Lookup channel's DRQ (DMA Request Queue) */ ++ drq = (dmad_drq *) & dmad.apb_drq_pool[channel]; ++ ++ /* - Check DMA status register to get channel number */ ++ status = inl((addr_t) drq->channel_base + APBBR_DMA_CMD_OFFSET); ++ ++ if (likely(status & APBBR_DMA_FINTST_MASK)) { ++ ++ /*dmad_dbg("apb dma int status: finish (0x%08x)\n", status); */ ++ finish_int = 1; ++ ++ /* APB DMA finish int status clear */ ++ clrbl(APBBR_DMA_FINTST_BIT, ++ (addr_t) drq->channel_base + APBBR_DMA_CMD_OFFSET); ++ ++ } else if (status & APBBR_DMA_ERRINTST_MASK) { ++ ++ /* Perform DMA error checking if no valid channel was found ++ * who assert the finish signal. */ ++ dmad_err("apb dma int status: err (0x%08x)\n", status); ++ ++ /* Mark as error int */ ++ err_int = 1; ++ ++ /* APB DMA error int status clear */ ++ clrbl(APBBR_DMA_ERRINTST_BIT, ++ (addr_t) drq->channel_base + APBBR_DMA_CMD_OFFSET); ++ ++ } else { ++ ++ dmad_err("%s() possible false-fired apb dma int," ++ " channel %d status-reg: 0x%08x\n", ++ __func__, channel, status); ++ ++ /* Stop DMA channel (make sure the channel will be stopped) */ ++ clrbl(APBBR_DMA_CHEN_BIT, ++ (addr_t) drq->channel_base + APBBR_DMA_CMD_OFFSET); ++ ++ return IRQ_HANDLED; ++ } ++ ++ /* Stop DMA channel (make sure the channel will be stopped) */ ++ dmad_disable_channel(drq); ++ ++ spin_lock(&drq->drb_pool_lock); ++ ++ /* Lookup/detach latest submitted DRB (DMA Request Block) from */ ++ /* the DRQ (DMA Request Queue), so ISR could kick off next DRB */ ++ dmad_detach_head(drq->drb_pool, &drq->sbt_head, &drq->sbt_tail, &drb); ++ ++ if (unlikely(drb == NULL)) { ++ spin_unlock(&drq->drb_pool_lock); ++ return IRQ_HANDLED; ++ } ++ ++ /* release blocking of drb-allocation, if any ... */ ++ if (unlikely((drq->fre_head == 0) && ++ (drq->flags & DMAD_FLAGS_SLEEP_BLOCK))) { ++ complete_all(&drq->drb_alloc_sync); ++ } ++ ++ /* Process DRBs according to the cause of this interrupt */ ++ if (likely(finish_int)) { ++ ++ if (drb->req_cycle == 0) ++ cpl_events = 0; ++ ++ /* Mark DRB state as completed */ ++ drb->state = DMAD_DRB_STATE_COMPLETED; ++ if (cpl_events && drb->sync) ++ complete_all(drb->sync); ++ ++ dmad_attach_tail(drq->drb_pool, &drq->fre_head, ++ &drq->fre_tail, drb->node); ++ ++ /* Check whether there are pending requests in the DRQ */ ++ if (drq->sbt_head != 0) { ++ ++ /* Lookup next DRB (DMA Request Block) */ ++ drb_iter = &drq->drb_pool[drq->sbt_head]; ++ ++ dmad_dbg("exec drb(%d 0x%08x) addr0(0x%08x) " ++ "addr1(0x%08x) size(0x%08x)\n", ++ drb_iter->node, (u32) drb_iter, ++ drb_iter->src_addr, drb_iter->dst_addr, ++ drb_iter->req_cycle); ++ ++ /* Kick-off DMA for next DRB */ ++ /* - Source and destination address */ ++ if (drq->flags & DMAD_DRQ_DIR_A1_TO_A0) { ++ outl(drb_iter->addr1, drq->src_port); ++ outl(drb_iter->addr0, drq->dst_port); ++ } else { ++ outl(drb_iter->addr0, drq->src_port); ++ outl(drb_iter->addr1, drq->dst_port); ++ } ++ ++ /* - Transfer size (in units of source width) */ ++ outl(drb_iter->req_cycle, drq->cyc_port); ++ ++ /* Kick off next request */ ++ dmad_enable_channel(drq); ++ ++ drb_iter->state = DMAD_DRB_STATE_EXECUTED; ++ ++ } else { ++ /* No pending requests, keep the DMA channel stopped */ ++ } ++ ++ } else if (err_int) { ++ ++ dmad_err("%s() apb dma channel %d error!\n", __func__, channel); ++ ++ /* Zero out src, dst, and size */ ++ outl(0, drq->src_port); ++ outl(0, drq->dst_port); ++ outl(0, drq->cyc_port); ++ ++ /* Remove all pending requests in the queue */ ++ drb_iter = drb; ++ while (drb_iter) { ++ ++ dmad_err("abort drb(%d 0x%08x) addr0(0x%08x) " ++ "addr1(0x%08x) size(0x%08x)\n", ++ drb_iter->node, (u32) drb_iter, ++ drb_iter->src_addr, drb_iter->dst_addr, ++ drb_iter->req_cycle); ++ ++ if (drb_iter->req_cycle == 0) ++ cpl_events = 0; ++ ++ /* Mark DRB state as abort */ ++ drb_iter->state = DMAD_DRB_STATE_ABORT; ++ ++ if (cpl_events && drb_iter->sync) ++ complete_all(drb_iter->sync); ++ ++ dmad_attach_tail(drq->drb_pool, &drq->fre_head, ++ &drq->fre_tail, drb_iter->node); ++ ++ dmad_detach_head(drq->drb_pool, &drq->sbt_head, ++ &drq->sbt_tail, &drb_iter); ++ } ++ } ++ ++ spin_unlock(&drq->drb_pool_lock); ++ ++ /* dispatch interrupt-context level callbacks */ ++ if (cpl_events && drq->completion_cb) { ++ /* signal DMA driver that new node is available */ ++ drq->completion_cb(channel, status, drq->completion_data); ++ } ++ ++ dmad_dbg("%s() <<\n", __func__); ++ ++ return IRQ_HANDLED; ++} ++ ++/** ++ * dmad_apb_config_dir - prepare command reg according to tx direction ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @channel_cmds : [out] Reference to array of command words to be prepared with ++ * @return : none ++ * ++ * Prepare command registers according to transfer direction ... ++ * channel_cmd[0] APBBR_DMA_CMD ++ * ++ * This function only serves as local helper. No protection wrappers. ++ */ ++static void dmad_apb_config_dir(dmad_chreq * ch_req, addr_t * channel_cmds) ++{ ++ dmad_drq *drq = (dmad_drq *) ch_req->drq; ++ dmad_apb_chreq *apb_req = (dmad_apb_chreq *) (&ch_req->apb_req); ++ ++ dmad_dbg("%s() channel_cmd(0x%08x)\n", __func__, channel_cmds[0]); ++ ++ *channel_cmds &= ~(addr_t) ++ (APBBR_DMA_SRCADDRINC_MASK | APBBR_DMA_DSTADDRINC_MASK | ++ APBBR_DMA_DSTADDRSEL_MASK | APBBR_DMA_DREQSEL_MASK | ++ APBBR_DMA_SRCADDRSEL_MASK | APBBR_DMA_SREQSEL_MASK); ++ ++ /* 0 - addr0 to addr1; 1 - addr1 to addr0 */ ++ if (apb_req->tx_dir == 0) { ++ ++ dmad_dbg("%s() addr0 --> addr1\n", __func__); ++ ++ /* APB Bridge DMA (Channel Setting) ++ * - CMD ++ * SRCADR : apb_req->src_ctrl ++ * DESADR : apb_req->dst_ctrl ++ */ ++ *channel_cmds |= ++ (((apb_req->addr0_ctrl << APBBR_DMA_SRCADDRINC_SHIFT) & ++ APBBR_DMA_SRCADDRINC_MASK) | ++ ((apb_req->addr1_ctrl << APBBR_DMA_DSTADDRINC_SHIFT) & ++ APBBR_DMA_DSTADDRINC_MASK)); ++ ++ /* - CMD ++ * DESADRSEL : AHB/APB, driver auto-conf ++ * DREQSEL ++ */ ++ *channel_cmds |= ++ ((addr_t) (APBBR_DMA_DSTADDRSEL_MASK & ++ (apb_reqn_route_table[apb_req->addr1_reqn]. ++ bus_sel << APBBR_DMA_DSTADDRSEL_BIT)) | ++ (((addr_t) apb_req-> ++ addr1_reqn << APBBR_DMA_DREQSEL_SHIFT) & ++ APBBR_DMA_DREQSEL_MASK)); ++ ++ /* - CMD ++ * SRCADRSEL : AHB/APB, driver auto-conf ++ * SREQSEL ++ */ ++ *channel_cmds |= ++ ((addr_t) (APBBR_DMA_SRCADDRSEL_MASK & ++ (apb_reqn_route_table[apb_req->addr0_reqn]. ++ bus_sel << APBBR_DMA_SRCADDRSEL_BIT)) | ++ (((addr_t) apb_req-> ++ addr0_reqn << APBBR_DMA_SREQSEL_SHIFT) & ++ APBBR_DMA_SREQSEL_MASK)); ++ ++ drq->flags &= ~(addr_t) DMAD_DRQ_DIR_A1_TO_A0; ++ ++ } else { ++ ++ dmad_dbg("%s() addr0 <-- addr1\n", __func__); ++ ++ /* APB Bridge DMA (Channel Setting) ++ * - CMD ++ * SRCADR : apb_req->src_ctrl ++ * DESADR : apb_req->dst_ctrl ++ */ ++ *channel_cmds |= ++ (((apb_req->addr1_ctrl << APBBR_DMA_SRCADDRINC_SHIFT) & ++ APBBR_DMA_SRCADDRINC_MASK) | ++ ((apb_req->addr0_ctrl << APBBR_DMA_DSTADDRINC_SHIFT) & ++ APBBR_DMA_DSTADDRINC_MASK)); ++ ++ /* - CMD ++ * DESADRSEL : AHB/APB, driver auto-conf ++ * DREQSEL ++ */ ++ *channel_cmds |= ((addr_t) (APBBR_DMA_DSTADDRSEL_MASK & ++ (apb_reqn_route_table ++ [apb_req->addr0_reqn]. ++ bus_sel << ++ APBBR_DMA_DSTADDRSEL_BIT)) | ++ (((addr_t) apb_req-> ++ addr0_reqn << APBBR_DMA_DREQSEL_SHIFT) & ++ APBBR_DMA_DREQSEL_MASK)); ++ ++ /* - CMD ++ * SRCADRSEL : AHB/APB, driver auto-conf ++ * SREQSEL ++ */ ++ *channel_cmds |= ((addr_t) (APBBR_DMA_SRCADDRSEL_MASK & ++ (apb_reqn_route_table ++ [apb_req->addr1_reqn]. ++ bus_sel << ++ APBBR_DMA_SRCADDRSEL_BIT)) | ++ (((addr_t) apb_req-> ++ addr1_reqn << APBBR_DMA_SREQSEL_SHIFT) & ++ APBBR_DMA_SREQSEL_MASK)); ++ ++ drq->flags |= (addr_t) DMAD_DRQ_DIR_A1_TO_A0; ++ } ++ ++ dmad_dbg("%s() channel_cmd(0x%08x)\n", __func__, channel_cmds[0]); ++} ++ ++/** ++ * dmad_apb_init - initialize a apb dma channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @return : 0 if success, non-zero if any error ++ * ++ * Register APB DMA ISR and performs hw initialization for the given DMA ++ * channel. ++ */ ++static int dmad_apb_init(dmad_chreq * ch_req) ++{ ++ int err = 0; ++ dmad_drq *drq = (dmad_drq *) ch_req->drq; ++ dmad_apb_chreq *apb_req = (dmad_apb_chreq *) (&ch_req->apb_req); ++ u32 channel = (u32) ch_req->channel; ++ addr_t channel_cmd = 0; ++ unsigned long lock_flags; ++ ++ dmad_dbg("%s()\n", __func__); ++ ++ /* register interrupt handler */ ++ err = request_irq(apb_irqs[channel], dmad_apb_isr, 0, ++ "APB_DMA", (void *)(channel + 1)); ++ if (unlikely(err != 0)) { ++ dmad_err("unable to request IRQ %d for APB DMA (error %d)\n", ++ apb_irqs[channel], err); ++ free_irq(apb_irqs[channel], (void *)(channel + 1)); ++ return err; ++ } ++ ++ spin_lock_irqsave(&dmad.drq_pool_lock, lock_flags); ++ ++ /********************************************************** ++ * Following code require _safe_exit return path ++ */ ++ ++#ifdef CONFIG_PLAT_AG102 ++ ++ /* PCU ++ * ++ */ ++ //ADD by river 2010.10.20 ++ if (unlikely((apb_req->src_reqn > APBBR_REQN_MAX) || ++ (apb_req->dst_reqn > APBBR_REQN_MAX))) { ++ dmad_err("Invalid source reqn(%d) or destination reqn(%d)\n", ++ apb_req->src_reqn, apb_req->dst_reqn); ++ err = -EBADR; ++ goto _safe_exit; ++ } ++ ++ if (apb_req->src_reqn != APBBR_REQN_NONE) { ++ u32 ahb_reqn; ++ ++ if (apb_req->tx_dir == DMAD_DIR_A0_TO_A1) ++ ahb_reqn = ++ apb_reqn_route_table[apb_req->src_reqn].ahb_reqn_tx; ++ else ++ ahb_reqn = ++ apb_reqn_route_table[apb_req->src_reqn].ahb_reqn_rx; ++ ++ } ++ ++ if (apb_req->dst_reqn != APBBR_REQN_NONE) { ++ u32 ahb_reqn; ++ ++ if (apb_req->tx_dir == DMAD_DIR_A0_TO_A1) ++ ahb_reqn = ++ apb_reqn_route_table[apb_req->dst_reqn].ahb_reqn_tx; ++ else ++ ahb_reqn = ++ apb_reqn_route_table[apb_req->dst_reqn].ahb_reqn_rx; ++ ++ } ++ //End ADD by river 2010.10.20 ++ ++#else /* CONFIG_PLAT_AG102 */ ++ ++ /* PMU ++ * - Undo APB device DMA to AHB DMAC channel routing. (connection status ++ * is obtained from reading back the PMU_AHBDMA_REQACK register) ++ */ ++ if (unlikely((apb_req->src_reqn > APBBR_REQN_MAX) || ++ (apb_req->dst_reqn > APBBR_REQN_MAX))) { ++ dmad_err("Invalid source reqn(%d) or destination reqn(%d)\n", ++ apb_req->src_reqn, apb_req->dst_reqn); ++ err = -EBADR; ++ goto _safe_exit; ++ } ++ ++ if (apb_req->src_reqn != APBBR_REQN_NONE) { ++ u32 ahb_reqn; ++ ++ if (apb_req->tx_dir == DMAD_DIR_A0_TO_A1) ++ ahb_reqn = ++ apb_reqn_route_table[apb_req->src_reqn].ahb_reqn_tx; ++ else ++ ahb_reqn = ++ apb_reqn_route_table[apb_req->src_reqn].ahb_reqn_rx; ++ ++ outl(0, ahb_ch_route_table[ahb_reqn].clear_cr); ++ outl(0, ahb_ch_route_table[ahb_reqn].route_cr); ++ outl(0, ahb_ch_route_table[ahb_reqn].clear_cr); ++ outl(0, ahb_ch_route_table[ahb_reqn].route_cr); ++ } ++ ++ if (apb_req->dst_reqn != APBBR_REQN_NONE) { ++ u32 ahb_reqn; ++ ++ if (apb_req->tx_dir == DMAD_DIR_A0_TO_A1) ++ ahb_reqn = ++ apb_reqn_route_table[apb_req->dst_reqn].ahb_reqn_tx; ++ else ++ ahb_reqn = ++ apb_reqn_route_table[apb_req->dst_reqn].ahb_reqn_rx; ++ ++ outl(0, ahb_ch_route_table[ahb_reqn].clear_cr); ++ outl(0, ahb_ch_route_table[ahb_reqn].route_cr); ++ outl(0, ahb_ch_route_table[ahb_reqn].clear_cr); ++ outl(0, ahb_ch_route_table[ahb_reqn].route_cr); ++ } ++#endif /* CONFIG_PLAT_AG102 */ ++ ++ /* APB Bridge DMA (Channel Setting) ++ * - CMD ++ * ENBDIS : 0 (disable for now) ++ * FININTSTS : 0 (clear finishing interrupt status) ++ * FININTENB : 1 (enable finishing interrupt) ++ * BURMOD : apb_req->burst_mode ++ * ERRINTSTS : 0 (clear error interrupt status) ++ * ERRINTENB : 1 (enable error interrupt) ++ * SRCADRSEL : AHB/APB, driver auto-conf ++ * DESADRSEL : AHB/APB, driver auto-conf ++ * SRCADR : apb_req->src_ctrl ++ * DESADR : apb_req->dst_ctrl ++ * REQSEL : apb_req->src_reqn ++ * DATAWIDTH : apb_req->data_width ++ */ ++ ++ /* - CMD ++ * ENBDIS ++ * FININTSTS ++ * FININTENB ++ * BURMOD ++ * ERRINTSTS ++ * ERRINTENB ++ * DATAWIDTH ++ */ ++ channel_cmd = ++ ((addr_t) APBBR_DMA_FINTEN_MASK | APBBR_DMA_ERRINTEN_MASK | ++ ((apb_req-> ++ burst_mode << APBBR_DMA_BURST_BIT) & APBBR_DMA_BURST_MASK) | ++ ((apb_req-> ++ data_width << APBBR_DMA_DATAWIDTH_SHIFT) & ++ APBBR_DMA_DATAWIDTH_MASK)); ++ ++ /* - CMD ++ * SRCADRSEL ++ * DESADRSEL ++ * SRCADR ++ * DESADR ++ * REQSEL ++ */ ++ if (0 == ++ (ch_req->flags & (DMAD_FLAGS_RING_MODE | DMAD_FLAGS_BIDIRECTION))) ++ apb_req->tx_dir = 0; ++ dmad_apb_config_dir(ch_req, &channel_cmd); ++ ++ /* - CMD outport */ ++ outl(channel_cmd, (addr_t) drq->channel_base + APBBR_DMA_CMD_OFFSET); ++ ++ /* SRCADR and DESADR */ ++ outl(0, (addr_t) drq->src_port); ++ outl(0, (addr_t) drq->dst_port); ++ ++ /* CYC (transfer size) */ ++ outl(0, (addr_t) drq->cyc_port); ++ ++ /* keep channel data width for faster cycle/byte size conversion */ ++ drq->data_width = apb_req->data_width; ++ ++_safe_exit: ++ ++ spin_unlock_irqrestore(&dmad.drq_pool_lock, lock_flags); ++ ++ return err; ++} ++ ++#endif /* CONFIG_PLATFORM_APBDMA */ ++ ++/** ++ * dmad_channel_init - initialize given dma channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @return : 0 if success, non-zero if any error ++ * ++ * This function serves as the abstraction layer of dmad_ahb_init() ++ * and dmad_apb_init() functions. ++ */ ++static int dmad_channel_init(dmad_chreq * ch_req) ++{ ++ int err = 0; ++ ++ dmad_dbg("%s()\n", __func__); ++ ++ if (unlikely(ch_req == NULL)) ++ return -EFAULT; ++ ++ if (unlikely(ch_req->drq == NULL)) ++ return -EBADR; ++ ++ /* Initialize DMA controller */ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (ch_req->controller == DMAD_DMAC_AHB_CORE) ++ err = dmad_ahb_init(ch_req); ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ if (ch_req->controller == DMAD_DMAC_APB_CORE) ++ err = dmad_apb_init(ch_req); ++#endif ++ ++ return err; ++} ++ ++static inline void dmad_reset_channel(dmad_drq * drq) ++{ ++ /* disable dma controller */ ++ dmad_disable_channel(drq); ++ ++ /* Source and destination address */ ++ outl(0, drq->src_port); ++ outl(0, drq->dst_port); ++ ++ /* Transfer size (in units of source width) */ ++ outl(0, drq->cyc_port); ++} ++ ++/** ++ * dmad_channel_reset - reset given dma channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @return : 0 if success, non-zero if any error ++ * ++ * This function serves as the abstraction layer of dmad_ahb_reset() ++ * and dmad_apb_reset() functions. ++ */ ++static int dmad_channel_reset(dmad_chreq * ch_req) ++{ ++ u32 channel = (u32) ch_req->channel; ++ unsigned long lock_flags; ++ int err = 0; ++ ++ dmad_dbg("%s()\n", __func__); ++ ++ if (unlikely(ch_req == NULL)) ++ return -EFAULT; ++ ++ if (unlikely(ch_req->drq == NULL)) ++ return -EBADR; ++ ++ spin_lock_irqsave(&((dmad_drq *) ch_req->drq)->drb_pool_lock, ++ lock_flags); ++ ++ /* stop DMA channel */ ++ dmad_reset_channel((dmad_drq *) ch_req->drq); ++ ++ spin_unlock_irqrestore(&((dmad_drq *) ch_req->drq)->drb_pool_lock, ++ lock_flags); ++ ++ /* unregister interrupt handler */ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (ch_req->controller == DMAD_DMAC_AHB_CORE) ++ free_irq(ahb_irqs[channel], (void *)(channel + 1)); ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ if (ch_req->controller == DMAD_DMAC_APB_CORE) ++ free_irq(apb_irqs[channel], (void *)(channel + 1)); ++#endif ++ ++ return err; ++} ++ ++/** ++ * dmad_channel_alloc - allocates and initialize a dma channel ++ * @ch_req : [in/out] Reference to the DMA request descriptor structure ++ * @return : 0 if success, non-zero if any error ++ * ++ * This function allocates a DMA channel according to client's request ++ * parameters. ISR and HW state will also be initialized accordingly. ++ */ ++int dmad_channel_alloc(dmad_chreq * ch_req) ++{ ++ dmad_drq *drq_iter = NULL; ++ dmad_drb *drb_iter; ++ int err = 0; ++ u32 i = 0; ++ ++ dmad_dbg("%s()\n", __func__); ++ ++ if (ch_req == NULL) { ++ printk(KERN_ERR "%s() invalid argument!\n", __func__); ++ return -EFAULT; ++ } ++ ++ spin_lock(&dmad.drq_pool_lock); ++ ++ /* locate an available DMA channel */ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (ch_req->controller == DMAD_DMAC_AHB_CORE) { ++ ++ drq_iter = dmad.ahb_drq_pool; ++ ++ if ((ch_req->ahb_req.src_reqn != DMAC_REQN_NONE) || ++ (ch_req->ahb_req.dst_reqn != DMAC_REQN_NONE)) { ++ /* [2007-12-03] It looks current board have problem to ++ * do dma traffic for APB devices on DMAC channel 0/1. ++ * Redirect all APB devices to start from channel 2. ++ */ ++ ++ /* [todo] include USB controller ? */ ++ drq_iter = &dmad.ahb_drq_pool[2]; ++ for (i = 2; i < DMAD_AHB_MAX_CHANNELS; ++i, ++drq_iter) { ++ if (!(drq_iter->state & DMAD_DRQ_STATE_READY)) ++ break; ++ } ++ } else { ++ /* channel for other devices is free to allocate */ ++ for (i = 0; i < DMAD_AHB_MAX_CHANNELS; ++i, ++drq_iter) { ++ if (!(drq_iter->state & DMAD_DRQ_STATE_READY)) ++ break; ++ } ++ } ++ ++ if (unlikely(i == DMAD_AHB_MAX_CHANNELS)) { ++ spin_unlock(&dmad.drq_pool_lock); ++ dmad_err("out of available channels (AHB DMAC)!\n"); ++ return -ENOSPC; ++ } ++ ++ dmad_dbg("allocated channel: %d (AHB DMAC)\n", i); ++ ++ } ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ if (ch_req->controller == DMAD_DMAC_APB_CORE) { ++ ++ drq_iter = dmad.apb_drq_pool; ++ ++ for (i = 0; i < DMAD_APB_MAX_CHANNELS; ++i, ++drq_iter) { ++ if ((drq_iter->state & DMAD_DRQ_STATE_READY) == 0) ++ break; ++ } ++ ++ if (unlikely(i == DMAD_APB_MAX_CHANNELS)) { ++ spin_unlock(&dmad.drq_pool_lock); ++ dmad_err("out of available channels (APB DMAC)!\n"); ++ return -ENOSPC; ++ } ++ ++ dmad_dbg("allocated channel: %d (APB DMAC)\n", i); ++ } ++#endif ++ if (drq_iter == NULL) { ++ spin_unlock(&dmad.drq_pool_lock); ++ printk(KERN_ERR "%s() invalid argument!\n", __func__); ++ return -EFAULT; ++ } ++ ++ spin_unlock(&dmad.drq_pool_lock); ++ memset(drq_iter, 0, sizeof(dmad_drq)); ++ ++ /* Initialize DMA channel's DRB pool as list of free DRBs */ ++ drq_iter->drb_pool = ++ kmalloc(DMAD_DRB_POOL_SIZE * sizeof(dmad_drb), GFP_ATOMIC); ++ ++ if (drq_iter->drb_pool == NULL) { ++ printk(KERN_ERR "%s() failed to allocate drb pool!\n", ++ __func__); ++ return -ENOMEM; ++ } ++ ++ /* Allocate the DMA channel */ ++ drq_iter->state = DMAD_DRQ_STATE_READY; ++ drq_iter->flags = ch_req->flags; ++ ++ /* Initialize synchronization object for DMA queue access control */ ++ spin_lock_init(&drq_iter->drb_pool_lock); ++ ++ /* Initialize synchronization object for free drb notification */ ++ init_completion(&drq_iter->drb_alloc_sync); ++ ++ /* Record the channel number in client's struct */ ++ ch_req->channel = i; ++ ++ /* Record the channel's queue handle in client's struct */ ++ ch_req->drq = drq_iter; ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (ch_req->controller == DMAD_DMAC_AHB_CORE) { ++ drq_iter->channel_base = (addr_t) DMAC_BASE_CH(i); ++ drq_iter->enable_port = ++ (addr_t) drq_iter->channel_base + DMAC_CSR_OFFSET; ++ drq_iter->src_port = ++ (addr_t) drq_iter->channel_base + DMAC_SRC_ADDR_OFFSET; ++ drq_iter->dst_port = ++ (addr_t) drq_iter->channel_base + DMAC_DST_ADDR_OFFSET; ++ drq_iter->cyc_port = ++ (addr_t) drq_iter->channel_base + DMAC_SIZE_OFFSET; ++ } ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ if (ch_req->controller == DMAD_DMAC_APB_CORE) { ++ drq_iter->channel_base = (addr_t) APBBR_DMA_BASE_CH(i); ++ drq_iter->enable_port = ++ (addr_t) drq_iter->channel_base + APBBR_DMA_CMD_OFFSET; ++ drq_iter->src_port = ++ (addr_t) drq_iter->channel_base + APBBR_DMA_SAD_OFFSET; ++ drq_iter->dst_port = ++ (addr_t) drq_iter->channel_base + APBBR_DMA_DAD_OFFSET; ++ drq_iter->cyc_port = ++ (addr_t) drq_iter->channel_base + APBBR_DMA_CYC_OFFSET; ++ } ++#endif ++ /* drb-0 is an invalid node - for node validation */ ++ drb_iter = &drq_iter->drb_pool[0]; ++ drb_iter->prev = 0; ++ drb_iter->next = 0; ++ drb_iter->node = 0; ++ ++drb_iter; ++ ++ /* init other drbs - link in order */ ++ for (i = 1; i < DMAD_DRB_POOL_SIZE; ++i, ++drb_iter) { ++ drb_iter->prev = i - 1; ++ drb_iter->next = i + 1; ++ drb_iter->node = i; ++ } ++ drq_iter->drb_pool[DMAD_DRB_POOL_SIZE - 1].next = 0; ++ ++ /* Initialize channel's DRB free-list, ready-list, and submitted-list */ ++ drq_iter->fre_head = 1; ++ drq_iter->fre_tail = DMAD_DRB_POOL_SIZE - 1; ++ drq_iter->rdy_head = drq_iter->rdy_tail = 0; ++ drq_iter->sbt_head = drq_iter->sbt_tail = 0; ++ ++ /* initialize ring buffer mode resources */ ++ if (ch_req->flags & DMAD_FLAGS_RING_MODE) { ++ ++ int remnant = (int)ch_req->ring_size - ++ (int)ch_req->periods * (int)ch_req->period_size; ++ if (remnant == 0) { ++ drq_iter->periods = ch_req->periods; ++ } else if (remnant > 0) { ++ drq_iter->periods = ch_req->periods; // + 1; ++ } else { ++ dmad_err("%s() Error - buffer_size < " ++ "periods * period_size!\n", __func__); ++ err = -EFAULT; ++ goto _err_exit; ++ } ++ ++ drq_iter->ring_size = ch_req->ring_size; ++ drq_iter->period_size = ch_req->period_size; ++ drq_iter->remnant_size = (dma_addr_t) remnant; ++ ++ drq_iter->ring_base = (dma_addr_t) ch_req->ring_base; ++ drq_iter->dev_addr = (dma_addr_t) ch_req->dev_addr; ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (ch_req->controller == DMAD_DMAC_AHB_CORE) { ++ if ((ch_req->ahb_req.ring_ctrl == DMAC_CSR_AD_DEC) || ++ (ch_req->ahb_req.dev_ctrl == DMAC_CSR_AD_DEC)) { ++ dmad_err("%s() Error - decremental" ++ " addressing DMA is not supported in" ++ " ring mode currently!\n", __func__); ++ err = -EFAULT; ++ goto _err_exit; ++ } ++ ++ if (ch_req->ahb_req.ring_ctrl == DMAC_CSR_AD_FIX) { ++ dmad_err("%s() Error - ring address control is " ++ "fixed in ring DMA mode!\n", __func__); ++ err = -EFAULT; ++ goto _err_exit; ++ } ++ ++ drq_iter->period_bytes = ++ DMAC_CYCLE_TO_BYTES(ch_req->period_size, ++ ch_req->ahb_req.ring_width); ++ ++ /* 0 - addr0 to addr1; 1 - addr1 to addr0 */ ++ if (ch_req->ahb_req.tx_dir == 0) ++ drq_iter->ring_port = ++ (addr_t) drq_iter->src_port; ++ else ++ drq_iter->ring_port = ++ (addr_t) drq_iter->dst_port; ++ ++ } ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ if (ch_req->controller == DMAD_DMAC_APB_CORE) { ++ ++ if ((ch_req->apb_req.ring_ctrl >= APBBR_ADDRINC_D1) || ++ (ch_req->apb_req.dev_ctrl >= APBBR_ADDRINC_D1)) { ++ dmad_err("%s() Error - decremental" ++ " addressing DMA is not supported in" ++ " ring mode currently!\n", __func__); ++ err = -EFAULT; ++ goto _err_exit; ++ } ++ ++ if (ch_req->apb_req.ring_ctrl == APBBR_ADDRINC_FIXED) { ++ dmad_err("%s() Error - ring address control is " ++ "fixed in ring DMA mode!\n", __func__); ++ err = -EFAULT; ++ goto _err_exit; ++ } ++ ++ drq_iter->period_bytes = ++ APBBR_DMA_CYCLE_TO_BYTES(ch_req->period_size, ++ ch_req->apb_req. ++ data_width); ++ ++ /* 0 - addr0 to addr1; 1 - addr1 to addr0 */ ++ if (ch_req->apb_req.tx_dir == 0) ++ drq_iter->ring_port = ++ (addr_t) drq_iter->src_port; ++ else ++ drq_iter->ring_port = ++ (addr_t) drq_iter->dst_port; ++ } ++#endif ++ dmad_dbg("%s() ring: base(0x%08x) port(0x%08x) periods(0x%08x)" ++ " period_size(0x%08x) period_bytes(0x%08x)" ++ " remnant_size(0x%08x)\n", ++ __func__, drq_iter->ring_base, drq_iter->ring_port, ++ drq_iter->periods, drq_iter->period_size, ++ drq_iter->period_bytes, drq_iter->remnant_size); ++ } ++ ++ drq_iter->completion_cb = ch_req->completion_cb; ++ drq_iter->completion_data = ch_req->completion_data; ++ ++ /* Initialize the channel && register isr */ ++ err = dmad_channel_init(ch_req); ++ ++_err_exit: ++ ++ if (err != 0) { ++ spin_lock(&dmad.drq_pool_lock); ++ ++ kfree(drq_iter->drb_pool); ++ memset(drq_iter, 0, sizeof(dmad_drq)); ++ ++ ch_req->channel = -1; ++ ch_req->drq = (void *)0; ++ ++ spin_unlock(&dmad.drq_pool_lock); ++ ++ dmad_err("Failed to initialize APB DMA! " ++ "Channel allocation aborted!\n"); ++ } ++ ++ return err; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_channel_alloc); ++ ++/** ++ * dmad_channel_free - release a dma channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @return : 0 if success, non-zero if any error ++ * ++ * This function releases a DMA channel. The channel is available for future ++ * allocation after the invokation. ++ */ ++int dmad_channel_free(dmad_chreq * ch_req) ++{ ++ dmad_drq *drq; ++ ++ dmad_dbg("%s()\n", __func__); ++ ++ if (unlikely(ch_req == NULL)) { ++ dmad_err("null ch_req!\n"); ++ return -EFAULT; ++ } ++ ++ drq = (dmad_drq *) ch_req->drq; ++ ++ if (unlikely(drq == NULL)) { ++ dmad_err("null ch_req->drq!\n"); ++ return -EBADR; ++ } ++ if (unlikely((ch_req->channel < 0) || ++ ((drq->state & DMAD_DRQ_STATE_READY) == 0))) { ++ dmad_err("try to free a free channel!\n"); ++ return -EBADR; ++ } ++ ++ /* Stop/abort channel I/O ++ * (forced to shutdown and should be protected against isr) ++ */ ++ dmad_drain_requests(ch_req, 1); ++ dmad_channel_reset(ch_req); ++ ++ dmad_dbg("freed channel: %d\n", ch_req->channel); ++ ++ spin_lock(&dmad.drq_pool_lock); ++ ++ kfree(drq->drb_pool); ++ memset(drq, 0, sizeof(dmad_drq)); ++ ++ ch_req->drq = 0; ++ ch_req->channel = (u32) - 1; ++ ++ spin_unlock(&dmad.drq_pool_lock); ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_channel_free); ++ ++/** ++ * dmad_channel_enable - enable/disable a dma channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @enable : [in] 1 to enable the channel, 0 to disable ++ * @return : 0 if success, non-zero if any error ++ * ++ * Enable or disable the given DMA channel. ++ */ ++int dmad_channel_enable(const dmad_chreq * ch_req, u8 enable) ++{ ++ dmad_drq *drq; ++ unsigned long lock_flags; ++ ++ dmad_dbg("%s()\n", __func__); ++ ++ if (unlikely(ch_req == NULL)) ++ return -EFAULT; ++ ++ drq = (dmad_drq *) ch_req->drq; ++ ++ if (unlikely(drq == NULL)) ++ return -EBADR; ++ ++ spin_lock_irqsave(&drq->drb_pool_lock, lock_flags); ++ ++ /* Enable/disable DMA channel */ ++ if (enable) ++ dmad_enable_channel(drq); ++ else ++ dmad_disable_channel(drq); ++ ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_channel_enable); ++ ++/** ++ * dmad_config_channel_dir - config dma channel transfer direction ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @dir : [in] DMAD_DRQ_DIR_A0_TO_A1 or DMAD_DRQ_DIR_A1_TO_A0 ++ * @return : 0 if success, non-zero if any error ++ * ++ * Reconfigure the channel transfer direction. This function works only if ++ * the channel was allocated with the DMAD_FLAGS_BIDIRECTION flags. Note ++ * that bi-direction mode and ring mode are mutual-exclusive from user's ++ * perspective. ++ */ ++int dmad_config_channel_dir(dmad_chreq * ch_req, u8 dir) ++{ ++ dmad_drq *drq; ++ addr_t channel_cmds[2]; ++ unsigned long lock_flags; ++ u8 cur_dir; ++ ++ if (unlikely(ch_req == NULL)) ++ return -EFAULT; ++ ++ drq = (dmad_drq *) ch_req->drq; ++ ++ if (unlikely(drq == NULL)) ++ return -EBADR; ++ ++ if (unlikely(!(ch_req->flags & DMAD_FLAGS_BIDIRECTION))) { ++ dmad_err("%s() Channel is not configured as" ++ " bidirectional!\n", __func__); ++ return -EFAULT; ++ } ++ ++ cur_dir = drq->flags & DMAD_DRQ_DIR_MASK; ++ if (dir == cur_dir) { ++ dmad_dbg("%s() cur_dir(%d) == dir(%d) skip reprogramming hw.\n", ++ __func__, cur_dir, dir); ++ return 0; ++ } ++ ++ spin_lock_irqsave(&drq->drb_pool_lock, lock_flags); ++ ++ if (unlikely((drq->sbt_head != 0) /*||dmad_is_channel_enabled(drq) */ )) { ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ dmad_err("%s() Cannot change direction while the " ++ "channel has pending requests!\n", __func__); ++ return -EFAULT; ++ } ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (ch_req->controller == DMAD_DMAC_AHB_CORE) { ++ ++ channel_cmds[0] = ++ inl((addr_t) drq->channel_base + DMAC_CSR_OFFSET); ++ channel_cmds[1] = ++ inl((addr_t) drq->channel_base + DMAC_CFG_OFFSET); ++ ++ ch_req->ahb_req.tx_dir = dir; ++ dmad_ahb_config_dir(ch_req, channel_cmds); ++ ++ outl(channel_cmds[1], ++ (addr_t) drq->channel_base + DMAC_CFG_OFFSET); ++ outl(channel_cmds[0], ++ (addr_t) drq->channel_base + DMAC_CSR_OFFSET); ++ ++ } ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ if (ch_req->controller == DMAD_DMAC_APB_CORE) { ++ ++ channel_cmds[0] = ++ inl((addr_t) drq->channel_base + APBBR_DMA_CMD_OFFSET); ++ ++ ch_req->apb_req.tx_dir = dir; ++ dmad_apb_config_dir(ch_req, channel_cmds); ++ ++ outl(channel_cmds[0], ++ (addr_t) drq->channel_base + APBBR_DMA_CMD_OFFSET); ++ } ++#endif ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_config_channel_dir); ++ ++/** ++ * dmad_max_size_per_drb - return maximum transfer size per drb ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @return : The maximum transfer size per drb, in bytes. ++ * ++ * Calculate the maximum transfer size per drb according to the setting of ++ * data width during channel initialization. ++ * ++ * Return size is aligned to 4-byte boundary; this ensures the alignment ++ * requirement of dma starting address if the function was used in a loop to ++ * separate a large size dma transfer. ++ */ ++u32 dmad_max_size_per_drb(dmad_chreq * ch_req) ++{ ++ addr_t size = 0; ++ addr_t data_width = (addr_t) ((dmad_drq *) ch_req->drq)->data_width; ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (ch_req->controller == DMAD_DMAC_AHB_CORE) { ++ size = DMAC_CYCLE_TO_BYTES(DMAC_TOT_SIZE_MASK & ((addr_t) ~ 3), ++ data_width); ++ } ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ if (ch_req->controller == DMAD_DMAC_APB_CORE) { ++ size = ++ APBBR_DMA_CYCLE_TO_BYTES(APBBR_DMA_CYC_MASK & ++ ((addr_t) ~ 3), data_width); ++ } ++#endif ++ dmad_dbg("%s() - 0x%08x bytes\n", __func__, size); ++ ++ return size; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_max_size_per_drb); ++ ++/** ++ * dmad_bytes_to_cycles - calculate drb transfer size, in cycles ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @byte_size : [in] The DMA transfer size to be converted, in bytes ++ * @return : The drb transfer size, in cycles. ++ * ++ * Calculate the drb transfer cycle according to the setting of channel data ++ * width and burst setting. ++ * ++ * AHB DMA : unit is number of "data width". ++ * APB DMA : unit is number of "data width * burst size" ++ * ++ * APB Note: According to specification, decrement addressing seems to regard ++ * the burst size setting. For code efficiency, ++ * dmad_make_req_cycles() does not take care of this case and might ++ * produce wrong result. ++ */ ++u32 dmad_bytes_to_cycles(dmad_chreq * ch_req, u32 byte_size) ++{ ++ addr_t cycle = 0; ++ addr_t data_width = (addr_t) ((dmad_drq *) ch_req->drq)->data_width; ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (ch_req->controller == DMAD_DMAC_AHB_CORE) { ++ cycle = DMAC_BYTES_TO_CYCLE(byte_size, data_width); ++ } ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ if (ch_req->controller == DMAD_DMAC_APB_CORE) { ++ cycle = APBBR_DMA_BYTES_TO_CYCLE(byte_size, data_width); ++ if (ch_req->apb_req.burst_mode) ++ cycle = cycle >> 2; ++ } ++#endif ++ ++ dmad_dbg("%s() - 0x%08x bytes --> 0x%08x cycles\n", ++ __func__, byte_size, cycle); ++ ++ return cycle; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_bytes_to_cycles); ++ ++/** ++ * dmad_alloc_drb_internal - allocate a dma-request-block of a dma channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @drb : [out] Reference to a drb pointer to receive the allocated drb ++ * @return : 0 if success, non-zero if any error ++ * ++ * Allocates a DRB (DMA request block) of the given DMA channel. DRB is a ++ * single dma request which will be pushed into the submission queue of the ++ * given DMA channel. This is a lightweight internal version of ++ * dmad_alloc_drb() majorly for use in ring mode. Critical access to the ++ * drb pool should be protected before entering this function. ++ */ ++static inline int dmad_alloc_drb_internal(dmad_drq * drq, dmad_drb ** drb) ++{ ++ /* Initialize drb ptr in case of fail allocation */ ++ *drb = NULL; ++ ++ if (unlikely(drq->fre_head == 0)) { ++ return -EAGAIN; ++ } ++ ++ dmad_detach_head(drq->drb_pool, &drq->fre_head, &drq->fre_tail, drb); ++ ++ dmad_attach_tail(drq->drb_pool, ++ &drq->rdy_head, &drq->rdy_tail, (*drb)->node); ++ ++ (*drb)->state = DMAD_DRB_STATE_READY; ++ (*drb)->sync = 0; ++ ++ dmad_dbg("%s() drb(%d 0x%08x)\n", __func__, (*drb)->node, (u32) (*drb)); ++ ++ return 0; ++} ++ ++/** ++ * dmad_alloc_drb - allocate a dma-request-block of a dma channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @drb : [out] Reference to a drb pointer to receive the allocated drb ++ * @return : 0 if success, non-zero if any error ++ * ++ * Allocates a DRB (DMA request block) of the given DMA channel. DRB is a ++ * single dma request which will be pushed into the submission queue of the ++ * given DMA channel. ++ */ ++int dmad_alloc_drb(dmad_chreq * ch_req, dmad_drb ** drb) ++{ ++ dmad_drq *drq; ++ unsigned long lock_flags; ++ ++ dmad_dbg("%s()\n", __func__); ++ ++ if (unlikely(ch_req == NULL)) { ++ dmad_err("null ch_req!\n"); ++ return -EFAULT; ++ } ++ ++ drq = (dmad_drq *) ch_req->drq; ++ ++ if (likely(drq == NULL)) { ++ dmad_err("null ch_req->drq!\n"); ++ return -EBADR; ++ } ++ ++ spin_lock_irqsave(&drq->drb_pool_lock, lock_flags); ++ ++ /* Initialize drb ptr in case of fail allocation */ ++ *drb = NULL; ++ ++ if (unlikely(drq->fre_head == 0)) { ++ ++ drq->state &= (u32) ~ DMAD_DRQ_STATE_ABORT; ++ ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ ++_wait_for_free_drbs: ++ ++ /* Wait for free urbs */ ++ if (drq->flags & DMAD_FLAGS_SLEEP_BLOCK) { ++ ++ int timeout = ++ wait_for_completion_interruptible_timeout(&drq-> ++ drb_alloc_sync, ++ msecs_to_jiffies ++ (6000)); ++ ++ /* reset sync object */ ++ INIT_COMPLETION(drq->drb_alloc_sync); ++ ++ if (timeout < 0) { ++ dmad_err("%s() wait for" ++ " completion error! (%d)\n", ++ __func__, timeout); ++ return timeout; ++ } ++ ++ } else if (drq->flags & DMAD_FLAGS_SPIN_BLOCK) { ++ ++ u32 timeout = 0x00ffffff; ++ ++ while ((drq->fre_head == 0) && (--timeout != 0)) { ++ } ++ if (timeout == 0) { ++ dmad_err("%s() polling wait for " ++ "completion timeout!\n", __func__); ++ return -EAGAIN; ++ } ++ ++ } else { ++ return -EAGAIN; ++ } ++ ++ spin_lock_irqsave(&drq->drb_pool_lock, lock_flags); ++ ++ /* check whether all the requests of the channel has been ++ * abandoned or not */ ++ if (unlikely(drq->state & DMAD_DRQ_STATE_ABORT)) { ++ dmad_dbg("%s() drb-allocation aborted due" ++ " to cancel-request ...\n", __func__); ++ drq->state &= (u32) ~ DMAD_DRQ_STATE_ABORT; ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ return -ECANCELED; ++ } ++ ++ /* check again to avoid non-atomic operation between above ++ * two calls */ ++ if (unlikely(drq->fre_head == 0)) { ++ dmad_dbg("%s() lost free drbs ... " ++ "continue waiting ...\n", __func__); ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ goto _wait_for_free_drbs; ++ } ++ } ++ ++ dmad_detach_head(drq->drb_pool, &drq->fre_head, &drq->fre_tail, drb); ++ ++ dmad_attach_tail(drq->drb_pool, ++ &drq->rdy_head, &drq->rdy_tail, (*drb)->node); ++ ++ (*drb)->state = DMAD_DRB_STATE_READY; ++ (*drb)->sync = 0; ++ ++ dmad_dbg("%s() drb(%d 0x%08x)\n", __func__, (*drb)->node, (u32) (*drb)); ++ ++ drq->state &= (u32) ~ DMAD_DRQ_STATE_ABORT; ++ ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_alloc_drb); ++ ++/** ++ * dmad_free_drb - free a dma-request-block of a dma channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @drb : [in] Reference to a drb to be freed ++ * @return : 0 if success, non-zero if any error ++ * ++ * Frees a DRB (DMA request block) of the given DMA channel. DRB is a ++ * single dma request which will be pushed into the submission queue of the ++ * given DMA channel. ++ */ ++int dmad_free_drb(dmad_chreq * ch_req, dmad_drb * drb) ++{ ++ dmad_drq *drq; ++ unsigned long lock_flags; ++ ++ dmad_dbg("%s()\n", __func__); ++ ++ if (unlikely(ch_req == NULL)) { ++ dmad_err("null ch_req!\n"); ++ return -EFAULT; ++ } ++ ++ drq = (dmad_drq *) ch_req->drq; ++ ++ if (unlikely(drq == NULL)) { ++ dmad_err("null ch_req->drq!\n"); ++ return -EBADR; ++ } ++ ++ spin_lock_irqsave(&drq->drb_pool_lock, lock_flags); ++ ++ /**************************************************** ++ * Following code requires _safe_exit return path ++ */ ++ ++ if (unlikely((drq->rdy_head == 0) || (drb->node == 0) || ++ (drb->state != DMAD_DRB_STATE_READY) || ++ (drb->node >= DMAD_DRB_POOL_SIZE))) { ++ dmad_err("Ready-queue is empty or invalid node!\n"); ++ ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ return -EBADR; ++ } ++ ++ dmad_detach_node(drq->drb_pool, ++ &drq->rdy_head, &drq->rdy_tail, drb->node); ++ dmad_attach_tail(drq->drb_pool, ++ &drq->fre_head, &drq->fre_tail, drb->node); ++ ++ drb->state = DMAD_DRB_STATE_FREE; ++ drb->sync = 0; ++ ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_free_drb); ++ ++/** ++ * dmad_submit_request_internal - submit a dma-request-block to the dma channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @drb : [in] Reference to a drb to be submitted ++ * @keep_fired : [in] non-zero to kickoff dma even the channel has stopped due ++ * to finishing its previous request ++ * @return : 0 if success, non-zero if any error ++ * ++ * Submit a DRB (DMA request block) of the given DMA channel to submission ++ * queue. DRB is a single dma request which will be pushed into the ++ * submission queue of the given DMA channel. This is a lightweight internal ++ * version of dmad_alloc_drb() majorly for use in ring mode. Critical access to ++ * the drb pool should be protected before entering this function. ++ */ ++static inline int dmad_submit_request_internal(dmad_drq * drq, dmad_drb * drb) ++{ ++ if (drb->state == DMAD_DRB_STATE_READY) { ++ /* Detach user node from ready list */ ++ dmad_detach_node(drq->drb_pool, ++ &drq->rdy_head, &drq->rdy_tail, drb->node); ++ ++ dmad_attach_tail(drq->drb_pool, ++ &drq->sbt_head, &drq->sbt_tail, drb->node); ++ ++ drb->state = DMAD_DRB_STATE_SUBMITTED; ++ ++ dmad_dbg("%s() submit drb(%d 0x%08x) addr0(0x%08x) " ++ "addr1(0x%08x) size(0x%08x) state(%d)\n", __func__, ++ drb->node, (u32) drb, drb->src_addr, drb->dst_addr, ++ drb->req_cycle, drb->state); ++ } else { ++ dmad_dbg("%s() skip drb(%d 0x%08x) addr0(0x%08x) addr1(0x%08x)" ++ " size(0x%08x) state(%d)\n", __func__, ++ drb->node, (u32) drb, drb->src_addr, drb->dst_addr, ++ drb->req_cycle, drb->state); ++ } ++ ++ return 0; ++} ++ ++/** ++ * dmad_submit_request - submit a dma-request-block to the dma channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @drb : [in] Reference to a drb to be submitted ++ * @keep_fired : [in] non-zero to kickoff dma even the channel has stopped due ++ * to finishing its previous request ++ * @return : 0 if success, non-zero if any error ++ * ++ * Submit a DRB (DMA request block) of the given DMA channel to submission ++ * queue. DRB is a single dma request which will be pushed into the ++ * submission queue of the given DMA channel. ++ */ ++int dmad_submit_request(dmad_chreq * ch_req, dmad_drb * drb, u8 keep_fired) ++{ ++ dmad_drq *drq; ++ unsigned long lock_flags; ++ ++ dmad_dbg("%s()\n", __func__); ++ ++ if (unlikely(ch_req == NULL)) { ++ dmad_err("null ch_req!\n"); ++ return -EFAULT; ++ } ++ ++ drq = (dmad_drq *) ch_req->drq; ++ ++ if (unlikely(drq == NULL)) { ++ dmad_err("null ch_req->drq!\n"); ++ return -EBADR; ++ } ++ ++ spin_lock_irqsave(&drq->drb_pool_lock, lock_flags); ++ ++ /****************************************************** ++ * Following code require _safe_exit return path ++ */ ++ ++ if (unlikely((drq->rdy_head == 0) || (drb->node == 0) || ++ (drb->node >= DMAD_DRB_POOL_SIZE))) { ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ return -EBADR; ++ } ++ ++ /* Detach user node from ready list */ ++ dmad_detach_node(drq->drb_pool, &drq->rdy_head, &drq->rdy_tail, ++ drb->node); ++ ++ /* Queue DRB to the end of the submitted list */ ++ dmad_dbg("submit drb(%d 0x%08x) addr0(0x%08x) addr1(0x%08x) " ++ "size(0x%08x) sync(0x%08x) fire(%d)\n", ++ drb->node, (u32) drb, drb->src_addr, drb->dst_addr, ++ drb->req_cycle, (u32) drb->sync, keep_fired); ++ ++ /* Check if submission is performed to an empty queue */ ++ if (unlikely(keep_fired && (drq->sbt_head == 0))) { ++ /* DMA is not running, so kick off transmission */ ++ dmad_dbg("kickoff dma engine.\n"); ++ ++ dmad_attach_tail(drq->drb_pool, ++ &drq->sbt_head, &drq->sbt_tail, drb->node); ++ ++ /* Source and destination address */ ++ if (drq->flags & DMAD_DRQ_DIR_A1_TO_A0) { ++ outl(drb->addr1, (addr_t) drq->src_port); ++ outl(drb->addr0, (addr_t) drq->dst_port); ++ } else { ++ outl(drb->addr0, (addr_t) drq->src_port); ++ outl(drb->addr1, (addr_t) drq->dst_port); ++ } ++ ++ /* Transfer size (in units of source width) */ ++ outl(drb->req_cycle, (addr_t) drq->cyc_port); ++ ++ /* Enable DMA channel (Kick off transmission when client ++ * enable it's transfer state) */ ++ dmad_enable_channel(drq); ++ ++ drb->state = DMAD_DRB_STATE_EXECUTED; ++ ++ } else { ++ /* DMA is already running, so only queue DRB to the end of the ++ * list */ ++ dmad_attach_tail(drq->drb_pool, ++ &drq->sbt_head, &drq->sbt_tail, drb->node); ++ drb->state = DMAD_DRB_STATE_SUBMITTED; ++ } ++ ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_submit_request); ++ ++/** ++ * dmad_withdraw_request - cancel a submitted dma-request-block ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @drb : [in] Reference to a drb to be submitted ++ * @keep_fired : [in] non-zero to kickoff dma even the channel has stopped due ++ * to finishing its previous request ++ * @return : 0 if success, non-zero if any error ++ * ++ * Cancel a submitted DRB (DMA request block) of the given DMA channel in its ++ * submission queue. DRB is a single dma request which will be pushed into the ++ * submission queue of the given DMA channel. Cancellation fails if the DRB has ++ * already been kicked off. ++ */ ++int dmad_withdraw_request(dmad_chreq * ch_req, dmad_drb * drb) ++{ ++ dmad_drq *drq = 0; ++ unsigned long lock_flags; ++ ++ dmad_dbg("%s()\n", __func__); ++ ++ if (unlikely(ch_req == NULL)) { ++ dmad_err("null ch_req!\n"); ++ return -EFAULT; ++ } ++ ++ drq = (dmad_drq *) ch_req->drq; ++ ++ if (unlikely(drq == NULL)) { ++ dmad_err("null ch_req->drq!\n"); ++ return -EBADR; ++ } ++ ++ if (unlikely(drq->sbt_head == 0)) ++ return -EBADR; ++ ++ if (unlikely((drb->node == 0) || (drb->node >= DMAD_DRB_POOL_SIZE))) ++ return -EBADR; ++ ++ spin_lock_irqsave(&drq->drb_pool_lock, lock_flags); ++ ++ if (unlikely((drq->sbt_head == 0) || (drb->node == 0) || ++ (drb->state != DMAD_DRB_STATE_SUBMITTED) || ++ (drb->node >= DMAD_DRB_POOL_SIZE))) { ++ dmad_err("Submitted-queue is empty or invalid node!\n"); ++ ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ return -EBADR; ++ } ++ ++ dmad_dbg("cancel drb(%d 0x%08x) addr0(0x%08x) addr1(0x%08x) " ++ "size(0x%08x) state(%d)\n", ++ drb->node, (u32) drb, drb->src_addr, drb->dst_addr, ++ drb->req_cycle, drb->state); ++ ++ if (unlikely(drb->state == DMAD_DRB_STATE_EXECUTED)) { ++ dmad_dbg("Already running drb cannot be stopped currently!\n"); ++ ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ return 0;/*-EBADR; */ ++ } ++ ++ dmad_detach_node(drq->drb_pool, ++ &drq->rdy_head, &drq->rdy_tail, drb->node); ++ dmad_attach_tail(drq->drb_pool, ++ &drq->fre_head, &drq->fre_tail, drb->node); ++ ++ drb->state = DMAD_DRB_STATE_FREE; ++ ++ if (drb->sync) ++ complete_all(drb->sync); ++ drb->sync = 0; ++ ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_withdraw_request); ++ ++/** ++ * dmad_kickoff_requests_internal - kickoff hw DMA transmission ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @return : 0 if success, non-zero if any error ++ * ++ * Kickoff hw DMA transmission of the given DMA channel. This function is ++ * valid for both ring & non-ring mode. This is a lightweight internal version ++ * of dmad_kickoff_requests() majorly for use in ring mode. Critical access to ++ * the drb pool should be protected before entering this function. ++ */ ++static inline int dmad_kickoff_requests_internal(dmad_drq * drq) ++{ ++ dmad_drb *drb; ++ ++ dmad_get_head(drq->drb_pool, &drq->sbt_head, &drq->sbt_tail, &drb); ++ ++ if (!drb) { ++ dmad_err("%s() null drb!\n", __func__); ++ return -EBADR; ++ } ++ ++ dmad_dbg("%s() drb(%d 0x%08x) addr0(0x%08x) addr1(0x%08x) " ++ "size(0x%08x) state(%d)\n", __func__, ++ drb->node, (u32) drb, drb->src_addr, drb->dst_addr, ++ drb->req_cycle, drb->state); ++ ++ if (drb->state == DMAD_DRB_STATE_SUBMITTED) { ++ /* Transfer size (in units of source width) */ ++ outl(drb->req_cycle, (addr_t) drq->cyc_port); ++ ++ /* Source and destination address */ ++ if (drq->flags & DMAD_DRQ_DIR_A1_TO_A0) { ++ outl(drb->addr1, (addr_t) drq->src_port); ++ outl(drb->addr0, (addr_t) drq->dst_port); ++ } else { ++ outl(drb->addr0, (addr_t) drq->src_port); ++ outl(drb->addr1, (addr_t) drq->dst_port); ++ } ++ ++ drb->state = DMAD_DRB_STATE_EXECUTED; ++ } ++ ++ /* Enable DMA channel */ ++ if (!dmad_is_channel_enabled(drq)) { ++ dmad_enable_channel(drq); ++ } ++ ++ return 0; ++} ++ ++/** ++ * dmad_kickoff_requests - kickoff hw DMA transmission of the given DMA channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @return : 0 if success, non-zero if any error ++ * ++ * Kickoff hw DMA transmission of the given DMA channel. This function is ++ * valid for both ring & non-ring mode. ++ */ ++int dmad_kickoff_requests(dmad_chreq * ch_req) ++{ ++ dmad_drq *drq = 0; ++ dmad_drb *drb = 0; ++ unsigned long lock_flags; ++ dma_addr_t req_cycle; ++ ++ dmad_dbg("%s()\n", __func__); ++ ++ if (unlikely(ch_req == NULL)) { ++ dmad_err("null ch_req!\n"); ++ return -EFAULT; ++ } ++ ++ drq = (dmad_drq *) ch_req->drq; ++ ++ if (unlikely(drq == NULL)) { ++ dmad_err("null ch_req->drq!\n"); ++ return -EBADR; ++ } ++ ++ spin_lock_irqsave(&drq->drb_pool_lock, lock_flags); ++ ++ dmad_get_head(drq->drb_pool, &drq->sbt_head, &drq->sbt_tail, &drb); ++ ++ dmad_dbg("drq(0x%08x) channel_base(0x%08x)\n", ++ (u32) drq, drq->channel_base); ++ dmad_dbg("kick off drb(%d 0x%08x) addr0(0x%08x) addr1(0x%08x) " ++ "size(0x%08x) state(%d) a1_to_a0(%d)\n", ++ (u32) drb->node, (u32) drb, drb->addr0, drb->addr1, ++ drb->req_cycle, drb->state, ++ drq->flags & DMAD_DRQ_DIR_A1_TO_A0); ++ ++ /* do nothing if no drbs are in the submission queue */ ++ if (unlikely((drb == 0) || (drb->state != DMAD_DRB_STATE_SUBMITTED))) { ++ dmad_dbg("%s() invalid drb(%d 0x%08x) or drb-state(%d)!\n", ++ __func__, ++ drb->node, (u32) drb, drb ? drb->state : 0xffffffff); ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ return 0; ++ } ++ ++ req_cycle = drb->req_cycle; ++ ++ if (unlikely(req_cycle == 0)) { ++ dmad_dbg("%s() zero transfer size!\n", __func__); ++ goto _safe_exit; ++ } ++ ++ /* Transfer size (in units of source width) */ ++ outl(req_cycle, (addr_t) drq->cyc_port); ++ ++ /* Source and destination address */ ++ if (drq->flags & DMAD_DRQ_DIR_A1_TO_A0) { ++ outl(drb->addr1, (addr_t) drq->src_port); ++ outl(drb->addr0, (addr_t) drq->dst_port); ++ } else { ++ outl(drb->addr0, (addr_t) drq->src_port); ++ outl(drb->addr1, (addr_t) drq->dst_port); ++ } ++ ++ drb->state = DMAD_DRB_STATE_EXECUTED; ++ ++ /* Enable DMA channel */ ++ dmad_enable_channel(drq); ++ ++_safe_exit: ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_kickoff_requests); ++ ++/** ++ * dmad_probe_hw_ptr_src - probe DMA source hw-address of the given channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @return : physical address of current HW source pointer ++ * ++ * Probe DMA source hw-address of the given channel. ++ */ ++dma_addr_t dmad_probe_hw_ptr_src(dmad_chreq * ch_req) ++{ ++ return (dma_addr_t) inl(((dmad_drq *) ch_req->drq)->src_port); ++} ++ ++EXPORT_SYMBOL_GPL(dmad_probe_hw_ptr_src); ++ ++/** ++ * dmad_probe_hw_ptr_dst - probe DMA destination hw-address of the given channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @return : physical address of current HW destination pointer ++ * ++ * Probe DMA destination hw-address of the given channel. ++ */ ++dma_addr_t dmad_probe_hw_ptr_dst(dmad_chreq * ch_req) ++{ ++ return (dma_addr_t) inl(((dmad_drq *) ch_req->drq)->dst_port); ++} ++ ++EXPORT_SYMBOL_GPL(dmad_probe_hw_ptr_dst); ++ ++/** ++ * dmad_update_ring - update DMA ring buffer base && size of the given channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @size : [in] The new ring buffer size, in unit of data width (cycles) ++ * @return : 0 if success, non-zero if any error ++ * ++ * Update DMA ring buffer size of the given channel. This function is valid ++ * only if the channel is initialized as ring buffer mode. ++ */ ++int dmad_update_ring(dmad_chreq * ch_req) ++{ ++ unsigned long lock_flags; ++ dmad_drq *drq = (dmad_drq *) ch_req->drq; ++ int remnant; ++ ++ if (unlikely(dmad_is_channel_enabled(drq))) { ++ dmad_err("%s() Error - dma channel should be " ++ "disabled before updating ring size!\n", __func__); ++ return -EFAULT; ++ } ++ ++ spin_lock_irqsave(&drq->drb_pool_lock, lock_flags); ++ ++ /* todo: range checking */ ++ ++ remnant = (int)ch_req->ring_size - ++ (int)ch_req->periods * (int)ch_req->period_size; ++ if (remnant == 0) { ++ drq->periods = ch_req->periods; ++ } else if (remnant > 0) { ++ drq->periods = ch_req->periods; // + 1; ++ } else { ++ dmad_err("%s() Error - buffer_size < " ++ "periods * period_size!\n", __func__); ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ return -EFAULT; ++ } ++ ++ drq->ring_base = ch_req->ring_base; ++ drq->ring_size = ch_req->ring_size; ++ drq->period_size = ch_req->period_size; ++ drq->remnant_size = (dma_addr_t) remnant; ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (ch_req->controller == DMAD_DMAC_AHB_CORE) { ++ drq->period_bytes = ++ DMAC_CYCLE_TO_BYTES(drq->period_size, drq->data_width); ++ } ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ if (ch_req->controller == DMAD_DMAC_APB_CORE) { ++ drq->period_bytes = ++ APBBR_DMA_CYCLE_TO_BYTES(drq->period_size, drq->data_width); ++ } ++#endif ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ ++ dmad_dbg("%s() ring: base(0x%08x) port(0x%08x) periods(0x%08x) " ++ "period_size(0x%08x) period_bytes(0x%08x) " ++ "remnant_size(0x%08x)\n", ++ __func__, drq->ring_base, drq->ring_port, ++ drq->periods, drq->period_size, drq->period_bytes, ++ drq->remnant_size); ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_update_ring); ++ ++/** ++ * dmad_update_ring_sw_ptr - update DMA ring buffer sw-pointer ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @sw_ptr : [in] The new sw-pointer for the hw-pointer to chase of ++ * @keep_fired : [in] non-zero to kickoff dma even the channel has stopped due ++ * to finishing its previous request ++ * @return : 0 if success, non-zero if any error ++ * ++ * Update DMA ring buffer sw-pointer of the given channel on the fly. This ++ * function is valid only if the channel is initialized as ring buffer mode. ++ * Uint of sw_ptr is in number of dma data width. ++ */ ++int dmad_update_ring_sw_ptr(dmad_chreq * ch_req, ++ dma_addr_t sw_ptr, u8 keep_fired) ++{ ++ dmad_drq *drq; ++ unsigned long lock_flags; ++ dma_addr_t hw_off = 0, ring_ptr; ++ dma_addr_t sw_p_off, ring_p_off, period_size, period_bytes; ++ dma_addr_t remnant_size; ++ int sw_p_idx, ring_p_idx, period, periods; ++ dmad_drb *drb = NULL; ++ ++ /*if (ch_req == NULL) { */ ++ /* dmad_dbg("%s() null ch_req!\n", __func__); */ ++ /* return -EFAULT; */ ++ /*} */ ++ ++ drq = (dmad_drq *) ch_req->drq; ++ ++ /*if (drq == NULL) { */ ++ /* dmad_dbg("%s() null ch_req->drq!\n", __func__); */ ++ /* return -EBADR; */ ++ /*} */ ++ ++ if (unlikely(sw_ptr > drq->ring_size)) { ++ dmad_err("%s() Invalid ring buffer sw-pointer " ++ "range (0x%08x)! ring_size(0x%08x)\n", ++ __func__, sw_ptr, drq->ring_size); ++ return -EBADR; ++ } ++ ++ spin_lock_irqsave(&drq->drb_pool_lock, lock_flags); ++ ++ periods = drq->periods; ++ period_size = drq->period_size; ++ period_bytes = drq->period_bytes; ++ remnant_size = drq->remnant_size; ++ ++ ring_ptr = drq->sw_ptr; ++ ring_p_idx = drq->sw_p_idx; ++ ring_p_off = drq->sw_p_off; ++ ++ sw_p_idx = (int)(sw_ptr / period_size); ++ sw_p_off = sw_ptr % period_size; ++ ++ if (remnant_size && (sw_p_idx == periods)) { ++ --sw_p_idx; ++ sw_p_off += period_size; ++ } ++ ++ dmad_dbg("%s() ring_ptr(0x%08x) ring_p_idx(0x%08x) " ++ "ring_p_off(0x%08x)\n", ++ __func__, ring_ptr, ring_p_idx, ring_p_off); ++ dmad_dbg("%s() sw_ptr(0x%08x) sw_p_idx(0x%08x) sw_p_off(0x%08x)\n", ++ __func__, sw_ptr, sw_p_idx, sw_p_off); ++ ++ if (drq->ring_drb && ++ (drq->ring_drb->state & (DMAD_DRB_STATE_READY | ++ DMAD_DRB_STATE_SUBMITTED | ++ DMAD_DRB_STATE_EXECUTED))) { ++ drb = drq->ring_drb; ++ } else { ++ /* alloc new drb if there is none yet at ring_ptr */ ++ if (0 != dmad_alloc_drb_internal(drq, &drb)) { ++ dmad_err("%s() drb allocation failed!\n", __func__); ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ return -ENOSPC; ++ } ++ drb->addr0 = ((dma_addr_t) ring_p_idx * period_bytes) + ++ drq->ring_base; ++ drb->addr1 = drq->dev_addr; ++ drb->req_cycle = 0; // redundent, though, no harm to performance ++ ++ dmad_dbg("init_drb(%d 0x%08x) addr0(0x%08x) addr1(0x%08x) " ++ "size(0x%08x) state(%d)\n", ++ (u32) drb->node, (u32) drb, drb->src_addr, ++ drb->dst_addr, drb->req_cycle, drb->state); ++ ++ drq->ring_drb = drb; ++ } ++ ++ /* Following code-path has been optimized. The design flow is expanded ++ * below for reference. ++ * ++ * if (sw_ptr >= ring_ptr) ++ * if (sw_p_idx == ring_p_idx) ++ * ring_drb::req_cycle <- sw_p_off ++ * if (ring_drb::state == executed) ++ * hw_cycle <- sw_p_idx ++ * fi ++ * else ++ * ring_drb::req_cycle <- period_size ++ * if (ring_drb::state == executed) ++ * hw_cycle <- period_size ++ * fi ++ * for (i = ring_p_idx+1 ~ sw_p_idx-1) ++ * new_drb::ring_addr <- i * period_bytes + ring_base ++ * new_drb::req_cycle <- period_size ++ * rof ++ * sw_drb::ring_addr <- sw_p_idx * period_bytes + ring_base ++ * sw_drb::req_cycle <- sw_p_off ++ * else ++ * // sw_ptr < ring_ptr ++ * ring_drb::req_cycle <- period_size ++ * if (ring_drb::state == executed) ++ * hw_cycle <- period_size ++ * fi ++ * for (i = ring_p_idx+1 ~ idx_max) ++ * new_drb::ring_addr <- i * period_bytes + ring_base ++ * new_drb::req_cycle <- period_size ++ * rof ++ * for (i = 0 ~ sw_p_idx-1) ++ * new_drb::ring_addr <- i * period_bytes + ring_base ++ * new_drb::req_cycle <- period_size ++ * rof ++ * sw_drb::ring_addr <- sw_p_idx * period_bytes + ring_base ++ * sw_drb::req_cycle <- sw_p_off ++ * fi ++ */ ++ if ((sw_ptr >= ring_ptr) && (sw_p_idx == ring_p_idx) && (sw_p_off != 0)) { ++ ++ dmad_dbg("update ring drb\n"); ++ ++ /* update drb size at ring_ptr */ ++ drb->req_cycle = sw_p_off; ++ ++ dmad_dbg("ring_drb(%d 0x%08x) addr0(0x%08x) addr1(0x%08x) " ++ "size(0x%08x) state(%d)\n", ++ (u32) drb->node, (u32) drb, drb->addr0, drb->addr1, ++ drb->req_cycle, drb->state); ++ ++ /* update hw dma size of this drb if it has been sent to the ++ * controller */ ++ if (drb->state == DMAD_DRB_STATE_EXECUTED) { ++ dmad_disable_channel(drq); ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (ch_req->controller == DMAD_DMAC_AHB_CORE) ++ hw_off = DMAC_BYTES_TO_CYCLE((addr_t) ++ inl((addr_t) drq-> ++ ring_port) - ++ (addr_t) drb-> ++ addr0, ++ drq->data_width); ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ if (ch_req->controller == DMAD_DMAC_APB_CORE) ++ hw_off = APBBR_DMA_BYTES_TO_CYCLE((addr_t) ++ inl((addr_t) ++ drq-> ++ ring_port) ++ - ++ (addr_t) drb-> ++ addr0, ++ drq-> ++ data_width); ++#endif ++ dmad_dbg("hw_off(0x%08x) sw_p_off(0x%08x)\n", ++ (u32) hw_off, (u32) sw_p_off); ++ ++ if (sw_p_off < hw_off) ++ dmad_err("%s() underrun! sw_p_off(0x%08x) <" ++ " hw_off(0x%08x)\n", __func__, ++ (u32) sw_p_off, (u32) hw_off); ++ else ++ outl(sw_p_off - hw_off, drq->cyc_port); ++ ++ dmad_enable_channel(drq); ++ ++ } else { ++ dmad_submit_request_internal(drq, drb); ++ } ++ ++ } else { ++ ++ dmad_dbg("fulfill ring drb - sw_ptr(0x%08x) ring_ptr(0x%08x)\n", ++ (u32) sw_ptr, (u32) ring_ptr); ++ ++ /* fulfill last drb at ring_ptr */ ++ if (ring_p_idx == (periods - 1)) ++ drb->req_cycle = period_size + remnant_size; ++ else ++ drb->req_cycle = period_size; ++ ++ dmad_dbg("ring_drb(%d 0x%08x) addr0(0x%08x) addr1(0x%08x) " ++ "size(0x%08x) state(%d)\n", ++ (u32) drb->node, (u32) drb, drb->addr0, drb->addr1, ++ drb->req_cycle, drb->state); ++ ++ if (drb->state == DMAD_DRB_STATE_EXECUTED) { ++ dmad_disable_channel(drq); ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (ch_req->controller == DMAD_DMAC_AHB_CORE) ++ hw_off = DMAC_BYTES_TO_CYCLE((addr_t) ++ inl((addr_t) drq-> ++ ring_port) - ++ (addr_t) drb-> ++ addr0, ++ drq->data_width); ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ if (ch_req->controller == DMAD_DMAC_APB_CORE) ++ hw_off = APBBR_DMA_BYTES_TO_CYCLE((addr_t) ++ inl((addr_t) ++ drq-> ++ ring_port) ++ - ++ (addr_t) drb-> ++ addr0, ++ drq-> ++ data_width); ++#endif ++ dmad_dbg("hw_off(0x%08x) period_size(0x%08x)\n", ++ (u32) hw_off, (u32) period_size); ++ ++ if (ring_p_idx == (periods - 1)) { ++ if (period_size < hw_off) ++ dmad_err("%s() illegal! " ++ "period_size(0x%08x) + " ++ "remnant_size(0x%08x) < " ++ "hw_off(0x%08x)\n", __func__, ++ (u32) period_size, ++ (u32) remnant_size, ++ (u32) hw_off); ++ else ++ outl(period_size + remnant_size - ++ hw_off, drq->cyc_port); ++ } else { ++ if (period_size < hw_off) ++ dmad_err("%s() illegal! " ++ "period_size(0x%08x) < " ++ "hw_off(0x%08x)\n", __func__, ++ (u32) period_size, ++ (u32) hw_off); ++ else ++ outl(period_size - hw_off, ++ drq->cyc_port); ++ } ++ ++ dmad_enable_channel(drq); ++ ++ } else { ++ dmad_submit_request_internal(drq, drb); ++ } ++ ++ ++ring_p_idx; ++ ++ /* adjust sw_ptr period index ahead by one ring cycle */ ++ //if (sw_ptr < ring_ptr) { ++ if (sw_p_idx < ring_p_idx) { ++ sw_p_idx += periods; ++ } ++ ++ /* allocate in-between (ring_ptr+1 to sw_ptr-1) ++ * full-cycle drbs */ ++ for (period = ring_p_idx; period < sw_p_idx; ++period) { ++ if (0 != dmad_alloc_drb_internal(drq, &drb)) { ++ dmad_err("%s() drb allocation failed!\n", ++ __func__); ++ spin_unlock_irqrestore(&drq->drb_pool_lock, ++ lock_flags); ++ return -ENOSPC; ++ } ++ ++ drb->addr0 = (dma_addr_t) (period % periods) * ++ period_bytes + drq->ring_base; ++ drb->addr1 = drq->dev_addr; ++ ++ if (period == (periods - 1)) { ++ drb->req_cycle = period_size + remnant_size; ++ } else { ++ drb->req_cycle = period_size; ++ } ++ ++ dmad_dbg("inbtw_drb(%d 0x%08x) addr0(0x%08x) " ++ "addr1(0x%08x) size(0x%08x) state(%d)\n", ++ (u32) drb->node, (u32) drb, drb->addr0, ++ drb->addr1, drb->req_cycle, drb->state); ++ ++ dmad_submit_request_internal(drq, drb); ++ } ++ ++ /* allocate drb right at sw_ptr */ ++ if (0 != dmad_alloc_drb_internal(drq, &drb)) { ++ dmad_err("%s() drb allocation failed!\n", __func__); ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ return -ENOSPC; ++ } ++ drb->addr0 = (dma_addr_t) (sw_p_idx % periods) * ++ period_bytes + drq->ring_base; ++ drb->addr1 = drq->dev_addr; ++ drb->req_cycle = sw_p_off; ++ ++ dmad_dbg("swptr_drb(%d 0x%08x) addr0(0x%08x) addr1(0x%08x) " ++ "size(0x%08x) state(%d)\n", ++ (u32) drb->node, (u32) drb, drb->addr0, drb->addr1, ++ drb->req_cycle, drb->state); ++ ++ drq->ring_drb = drb; ++ ++ if (sw_p_off > 0) ++ dmad_submit_request_internal(drq, drb); ++ } ++ ++ drq->sw_ptr = sw_ptr % drq->ring_size; ++ drq->sw_p_idx = sw_p_idx % periods; ++ drq->sw_p_off = sw_p_off; ++ ++ if (likely(keep_fired)) { ++ dmad_kickoff_requests_internal(drq); ++ } ++ ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_update_ring_sw_ptr); ++ ++/** ++ * dmad_probe_ring_hw_ptr - probe DMA ring buffer position of the given channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @return : Ring buffer position of current HW ring buffer pointer ++ * ++ * Probe DMA ring buffer position of the given channel. The position is ++ * relative to the ring buffer base. This function is valid only if the ++ * channel is initialized as ring buffer mode. ++ */ ++dma_addr_t dmad_probe_ring_hw_ptr(dmad_chreq * ch_req) ++{ ++ dmad_drq *drq = (dmad_drq *) ch_req->drq; ++ dma_addr_t cycles = ++ (dma_addr_t) inl(drq->ring_port) - (dma_addr_t) drq->ring_base; ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (ch_req->controller == DMAD_DMAC_AHB_CORE) ++ cycles = DMAC_BYTES_TO_CYCLE(cycles, drq->data_width); ++#endif ++#ifdef CONFIG_PLATFORM_APBDMA ++ if (ch_req->controller == DMAD_DMAC_APB_CORE) ++ cycles = APBBR_DMA_BYTES_TO_CYCLE(cycles, drq->data_width); ++#endif ++ return cycles; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_probe_ring_hw_ptr); ++ ++/** ++ * dmad_channel_drain - cancel DMA transmission of the given DMA channel ++ * @controller : [in] One of the enum value of DMAD_DMAC_CORE ++ * @drq : [in] Reference to the DMA queue structure (dmad_drq) ++ * @shutdown : [in] Non-zero to force a immediate channel shutdown ++ * @return : 0 if success, non-zero if any error ++ * ++ * Stop the DMA transmission and cancel all submitted requests of the given ++ * DMA channel. This function drains a single channel and is the internal ++ * implementation of the interface routine dmad_drain_requests() and the ++ * module_exit function. ++ */ ++static int dmad_channel_drain(u32 controller, dmad_drq * drq, u8 shutdown) ++{ ++ dmad_drb *drb = 0; ++ unsigned long lock_flags; ++ ++ if (unlikely(drq == NULL)) { ++ dmad_err("null ch_req->drq!\n"); ++ return -EBADR; ++ } ++ ++ spin_lock_irqsave(&drq->drb_pool_lock, lock_flags); ++ ++ /* Stop DMA channel if forced to shutdown immediately */ ++ if (shutdown) { ++ /* disable dma controller */ ++ dmad_reset_channel(drq); ++ ++ /* todo: more settings to stop DMA controller ?? */ ++ ++ /*if (drb->state == DMAD_DRB_STATE_EXECUTED) { */ ++ /*} */ ++ } ++ ++ /* Detach DRBs in submit queue */ ++ dmad_detach_head(drq->drb_pool, &drq->sbt_head, &drq->sbt_tail, &drb); ++ ++ while (drb) { ++ dmad_dbg("cancel sbt drb(%d 0x%08x) addr0(0x%08x) " ++ "addr1(0x%08x) size(0x%08x) state(%d)\n", ++ drb->node, (u32) drb, drb->src_addr, drb->dst_addr, ++ drb->req_cycle, (u32) drb->state); ++ ++ /* Mark DRB state as abort */ ++ drb->state = DMAD_DRB_STATE_ABORT; ++ ++ if (drb->sync) ++ complete_all(drb->sync); ++ ++ dmad_attach_tail(drq->drb_pool, &drq->fre_head, &drq->fre_tail, ++ drb->node); ++ ++ dmad_detach_head(drq->drb_pool, &drq->sbt_head, &drq->sbt_tail, ++ &drb); ++ } ++ ++ /* Detach DRBs in ready queue */ ++ dmad_detach_head(drq->drb_pool, &drq->rdy_head, &drq->rdy_tail, &drb); ++ ++ while (drb) { ++ dmad_dbg("cancel rdy drb(%d 0x%08x) addr0(0x%08x) " ++ "addr1(0x%08x) size(0x%08x) state(%d)\n", ++ drb->node, (u32) drb, drb->src_addr, drb->dst_addr, ++ drb->req_cycle, (u32) drb->state); ++ ++ /* Mark DRB state as abort */ ++ drb->state = DMAD_DRB_STATE_ABORT; ++ ++ dmad_attach_tail(drq->drb_pool, &drq->fre_head, &drq->fre_tail, ++ drb->node); ++ ++ /* Detach next submitted DRB (DMA Request Block) from the ++ * DRQ (DMA Request Queue) */ ++ dmad_detach_head(drq->drb_pool, &drq->rdy_head, &drq->rdy_tail, ++ &drb); ++ } ++ ++ drq->state |= DMAD_DRQ_STATE_ABORT; ++ ++ drq->ring_drb = NULL; ++ drq->sw_ptr = 0; ++ drq->sw_p_idx = 0; ++ drq->sw_p_off = 0; ++ ++ spin_unlock_irqrestore(&drq->drb_pool_lock, lock_flags); ++ ++ if ( /*(drq->fre_head == 0) && */ (drq->flags & DMAD_FLAGS_SLEEP_BLOCK)) { ++ complete_all(&drq->drb_alloc_sync); ++ } ++ ++ return 0; ++} ++ ++/** ++ * dmad_cancel_requests - cancel DMA transmission of the given DMA channel ++ * @ch_req : [in] Reference to the DMA request descriptor structure ++ * @shutdown : [in] Non-zero to force a immediate channel shutdown ++ * @return : 0 if success, non-zero if any error ++ * ++ * Stop the DMA transmission and cancel all submitted requests of the given ++ * DMA channel. ++ */ ++int dmad_drain_requests(dmad_chreq * ch_req, u8 shutdown) ++{ ++ dmad_dbg("%s()\n", __func__); ++ ++ if (ch_req == NULL) { ++ dmad_err("null ch_req!\n"); ++ return -EFAULT; ++ } ++ ++ return dmad_channel_drain(ch_req->controller, ch_req->drq, shutdown); ++} ++ ++EXPORT_SYMBOL_GPL(dmad_drain_requests); ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ ++/** ++ * dmad_probe_irq_source - probe DMA channel who asserts the shared sw-irq line ++ * @return : The channel number which asserts the shared sw-irq line ++ * ++ * Probe DMA channel who asserts the shared sw-irq line. ++ */ ++int dmad_probe_irq_source_ahb(void) ++{ ++ int channel; /* interrupt channel number */ ++ ++ /* todo: spin_lock */ ++ ++ /* - Check DMA status register to get channel number */ ++ for (channel = 0; channel < DMAD_AHB_MAX_CHANNELS; ++channel) { ++ if (getbl(channel, DMAC_INT_TC)) ++ return channel; ++ } ++ ++ /* Perform DMA error checking if no valid channel was found who ++ * assert the finish signal. */ ++ for (channel = 0; channel < DMAD_AHB_MAX_CHANNELS; ++channel) { ++ if (getbl(channel, DMAC_INT_ERRABT)) ++ return channel; ++ if (getbl(channel << DMAC_INT_ABT_SHIFT, DMAC_INT_ERRABT)) ++ return channel; ++ } ++ ++ /* todo: spin_unlock */ ++ ++ return -EFAULT; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_probe_irq_source_ahb); ++ ++#endif /* CONFIG_PLATFORM_AHBDMA */ ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++ ++int dmad_probe_irq_source_apb(void) ++{ ++ int channel; /* interrupt channel number */ ++ ++ /* todo: spin_lock */ ++ ++ /* Check DMA status register to get channel number */ ++ for (channel = 0; channel < DMAD_APB_MAX_CHANNELS; ++channel) { ++ if (getbl(APBBR_DMA_FINTST_BIT, APBBR_DMA_BASE_CH(channel) + ++ APBBR_DMA_CMD_OFFSET)) ++ return channel; ++ } ++ ++ /* Perform DMA error checking if no valid channel was found who ++ * assert the finish signal. */ ++ for (channel = 0; channel < DMAD_APB_MAX_CHANNELS; ++channel) { ++ if (getbl(APBBR_DMA_ERRINTST_BIT, APBBR_DMA_BASE_CH(channel) + ++ APBBR_DMA_CMD_OFFSET)) ++ return channel; ++ } ++ ++ /* todo: spin_unlock */ ++ ++ return -EFAULT; ++} ++ ++EXPORT_SYMBOL_GPL(dmad_probe_irq_source_apb); ++ ++#endif /* CONFIG_PLATFORM_APBDMA */ ++ ++/** ++ * dmad_module_init - dma module-init function ++ * @return : 0 if success, non-zero if any error ++ */ ++int __init dmad_module_init(void) ++{ ++ int err = 0; ++ dmad_dbg("%s() >>\n", __func__); ++ ++ /* clear device struct since the module may be load/unload many times */ ++ memset(&dmad, 0, sizeof(dmad)); ++ ++ dmad.drq_pool = ++ kmalloc((DMAD_AHB_MAX_CHANNELS + ++ DMAD_APB_MAX_CHANNELS) * sizeof(dmad_drq), GFP_KERNEL); ++ if (dmad.drq_pool == NULL) { ++ dmad_err("%s() failed to allocate drb pool!\n", __func__); ++ return -ENOMEM; ++ } ++ ++ memset(dmad.drq_pool, 0, ++ (DMAD_AHB_MAX_CHANNELS + DMAD_APB_MAX_CHANNELS) * ++ sizeof(dmad_drq)); ++ ++ spin_lock_init(&dmad.drq_pool_lock); ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ dmad.ahb_drq_pool = dmad.drq_pool; ++ if (unlikely(!request_region(DMAC_BASE, SZ_1K, "AHB DMAC"))) { ++ dmad_err("Cannot reserve AHB DMAC I/O region\n"); ++ err = -EBUSY; ++ } ++#endif ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++ dmad.apb_drq_pool = &dmad.drq_pool[DMAD_AHB_MAX_CHANNELS]; ++ if (unlikely(!request_region(APBBR_BASE, SZ_1K, "APB DMAC"))) { ++ dmad_err("Cannot reserve APB DMAC I/O region\n"); ++ err = -EBUSY; ++ } ++#endif ++ ++ dmad_dbg("DMA module init result: (%d)\n", err); ++ dmad_dbg(" AHB channels: %d; APB channels %d; " ++ "DRBs per channel: %d\n", ++ DMAC_MAX_CHANNELS, APBBR_DMA_MAX_CHANNELS, DMAD_DRB_POOL_SIZE); ++ ++ dmad_dbg("%s() return code (%d) <<\n", __func__, err); ++ return err; ++} ++ ++/** ++ * dmad_module_init - dma module clean up function ++ */ ++void __exit dmad_module_exit(void) ++{ ++ dmad_drq *drq; ++ u32 channel; ++ ++ dmad_dbg("%s() >>\n", __func__); ++ ++ spin_lock(&dmad.drq_pool_lock); ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ /* cancel existing requests and unregister interrupt handler */ ++ for (channel = 0; channel < DMAD_AHB_MAX_CHANNELS; ++channel) { ++ ++ /* shutdown dma requests */ ++ drq = (dmad_drq *) & dmad.ahb_drq_pool[channel]; ++ ++ if ((drq->state & DMAD_DRQ_STATE_READY) != 0) ++ dmad_channel_drain(DMAD_DMAC_AHB_CORE, drq, 1); ++ ++ /* free registered irq handlers */ ++ free_irq(ahb_irqs[channel], (void *)(channel + 1)); ++ } ++#endif ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++ /* cancel existing requests and unregister interrupt handler */ ++ for (channel = 0; channel < DMAD_APB_MAX_CHANNELS; ++channel) { ++ ++ /* shutdown dma requests */ ++ drq = (dmad_drq *) & dmad.apb_drq_pool[channel]; ++ ++ if ((drq->state & DMAD_DRQ_STATE_READY) != 0) ++ dmad_channel_drain(DMAD_DMAC_APB_CORE, drq, 1); ++ ++ /* free registered irq handlers */ ++ free_irq(apb_irqs[channel], (void *)(channel + 1)); ++ } ++#endif ++ spin_unlock(&dmad.drq_pool_lock); ++ ++ if (dmad.drq_pool) ++ kfree(dmad.drq_pool); ++ memset(&dmad, 0, sizeof(dmad)); ++ ++ /* release I/O space */ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ release_region(DMAC_BASE, SZ_1K); ++#endif /*CONFIG_PLATFORM_AHBDMA */ ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++ release_region(APBBR_BASE, SZ_1K); ++#endif /* CONFIG_PLATFORM_APBDMA */ ++ ++ dmad_dbg("DMA module unloaded!\n"); ++} ++ ++#ifndef MODULE ++arch_initcall(dmad_module_init); ++#else ++module_init(dmad_module_init); ++module_exit(dmad_module_exit); ++#endif ++ ++#endif /* CONFIG_PLATFORM_AHBDMA || CONFIG_PLATFORM_APBDMA */ +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/dmad_intc.c linux-3.4.110/arch/nds32/platforms/dmad_intc.c +--- linux-3.4.110.orig/arch/nds32/platforms/dmad_intc.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/dmad_intc.c 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,124 @@ ++/* ++ * linux/arch/armnommu/mach-faraday/platform-a320/apb_intc.c ++ * ++ * Faraday AHB DMA Interrupt Process Driver Implementation ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * ++ * ChangeLog ++ * ++ * Peter Liao 09/28/2005 Created ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++extern int dmad_probe_irq_source_ahb(void); ++ ++void AHBDMA_irq_rounter(unsigned int irq, struct irq_desc *desc) ++{ ++ int ahb_irq; ++ struct irq_desc *ahb_desc; ++ struct irq_data data; ++ data.irq = irq; ++ ++ raw_spin_lock(&desc->lock); ++ desc->irq_data.chip->irq_mask(&data); ++ desc->irq_data.chip->irq_ack(&data); ++ ++ ahb_irq = dmad_probe_irq_source_ahb(); ++ if (ahb_irq >= 0) { ++ ahb_irq += DMAC_FTDMAC020_IRQ0; ++ ahb_desc = irq_desc + ahb_irq; ++ ++ raw_spin_unlock(&desc->lock); ++ ahb_desc->handle_irq(ahb_irq, ahb_desc); ++ raw_spin_lock(&desc->lock); ++ } ++ ++ desc->irq_data.chip->irq_unmask(&data); ++ raw_spin_unlock(&desc->lock); ++} ++ ++int __init intc_ftdmac020_init_irq(void) ++{ ++ int i; ++ ++ /* Register all IRQ */ ++ for (i = DMAC_FTDMAC020_IRQ0; ++ i < DMAC_FTDMAC020_IRQ0 + DMAC_FTDMAC020_IRQ_COUNT; i++) { ++ // level trigger ++ irq_set_chip(i, &dummy_irq_chip); ++ irq_set_handler(i, handle_simple_irq); ++ } ++ irq_set_chained_handler(PLATFORM_AHBDMA_IRQ, AHBDMA_irq_rounter); ++ ++ return 0; ++} ++ ++arch_initcall(intc_ftdmac020_init_irq); ++#endif /* CONFIG_PLATFORM_AHBDMA */ ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++extern int dmad_probe_irq_source_apb(void); ++ ++void APBDMA_irq_rounter(unsigned int irq, struct irq_desc *desc) ++{ ++ int apb_irq; ++ struct irq_desc *apb_desc; ++ struct irq_data data; ++ data.irq = irq; ++ ++ raw_spin_lock(&desc->lock); ++ ++ //mask_ack_irq(desc, irq); ++ desc->irq_data.chip->irq_mask(&data); ++ desc->irq_data.chip->irq_ack(&data); ++ ++ apb_irq = dmad_probe_irq_source_apb(); ++ //printk(KERN_INFO "irq (%d) ch(%d)\n", irq, apb_irq); ++ ++ if (apb_irq >= 0) { ++ apb_irq += APBBRG_FTAPBBRG020S_0_IRQ0; ++ apb_desc = irq_desc + apb_irq; ++ ++ raw_spin_unlock(&desc->lock); ++ apb_desc->handle_irq(irq, apb_desc); ++ raw_spin_lock(&desc->lock); ++ } ++ ++ desc->irq_data.chip->irq_unmask(&data); ++ raw_spin_unlock(&desc->lock); ++} ++ ++int __init intc_ftapbbrg020s_init_irq(void) ++{ ++ int i; ++ ++ /* Register all IRQ */ ++ for (i = APBBRG_FTAPBBRG020S_0_IRQ0; ++ i < APBBRG_FTAPBBRG020S_0_IRQ0 + APBBRG_FTAPBBRG020S_IRQ_COUNT; ++ i++) { ++ // level trigger ++ irq_set_chip(i, &dummy_irq_chip); ++ irq_set_handler(i, handle_simple_irq); ++ } ++ ++ irq_set_chained_handler(PLATFORM_APBDMA_IRQ, APBDMA_irq_rounter); ++ ++ return 0; ++} ++ ++arch_initcall(intc_ftapbbrg020s_init_irq); ++#endif /* CONFIG_PLATFORM_APBDMA */ +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/ftpci.c linux-3.4.110/arch/nds32/platforms/ftpci.c +--- linux-3.4.110.orig/arch/nds32/platforms/ftpci.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/ftpci.c 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,497 @@ ++/* ++ * linux/arch/nds32/platforms/ftpci.c ++ * ++ * Faraday FTPCI100 PCI Bridge Controller Device Driver Implementation ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * ++ * ChangeLog ++ * ++ * Peter Liao 09/28/2005 Created. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define IPMODULE PCIC ++#define IPNAME FTPCI100 ++ ++#define DEBUGFPCI ++#undef DEBUGFPCI ++ ++#ifdef DEBUGFPCI ++#define DBGFPCI(x...) printk(x) ++#else ++#define DBGFPCI(x...) ++#endif ++#define FPCI_VA_BASE IP_VA_BASE(0) ++#define FTPCI_PCI_BRIDGE_VENID PCI_BRIDGE_VENID ++#define FPCI_IO_VA_BASE PCIIO_VA_BASE ++#define FPCI_IO_PA_BASE PCIIO_PA_BASE ++#define FPCI_IO_VA_END PCIIO_VA_LIMIT ++#define FPCI_MEM_VA_BASE PCIMEM_VA_BASE ++#define FPCI_MEM_PA_BASE PCIMEM_PA_BASE ++#define FPCI_MEM_VA_END PCIMEM_VA_LIMIT ++#define FPCI_MEM_PA_END (PCIMEM_PA_BASE + SZ_256M) ++ ++// -------------------------------------------------------------------- ++// AHB Control Register ++// -------------------------------------------------------------------- ++#define FTPCI_IOSIZE_REG 0x0 ++#define FTPCI_PROT_REG 0x4 ++#define FTPCI_CTRL_REG 0x8 ++#define FTPCI_ERREN_REG 0xc ++#define FTPCI_SOFTRST_REG 0x10 ++#define FTPCI_EN64_REG 0x14 ++#define FTPCI_ADDRH32_REG 0x18 ++#define FTPCI_CFG_ADR_REG 0x28 ++#define FTPCI_CFG_DATA_REG 0x2c ++ ++// -------------------------------------------------------------------- ++// FTPCI_IOSIZE_REG's constant definitions ++// -------------------------------------------------------------------- ++#define FTPCI_BASE_IO_SIZE_1M 0x0 ++#define FTPCI_BASE_IO_SIZE_2M 0x1 ++#define FTPCI_BASE_IO_SIZE_4M 0x2 ++#define FTPCI_BASE_IO_SIZE_8M 0x3 ++#define FTPCI_BASE_IO_SIZE_16M 0x4 ++#define FTPCI_BASE_IO_SIZE_32M 0x5 ++#define FTPCI_BASE_IO_SIZE_64M 0x6 ++#define FTPCI_BASE_IO_SIZE_128M 0x7 ++#define FTPCI_BASE_IO_SIZE_256M 0x8 ++#define FTPCI_BASE_IO_SIZE_512M 0x9 ++#define FTPCI_BASE_IO_SIZE_1G 0xa ++#define FTPCI_BASE_IO_SIZE_2G 0xb ++ ++// -------------------------------------------------------------------- ++// PCI Configuration Register ++// -------------------------------------------------------------------- ++#define PCI_INT_MASK 0x4c ++#define PCI_MEM_BASE_SIZE1 0x50 ++#define PCI_MEM_BASE_SIZE2 0x54 ++#define PCI_MEM_BASE_SIZE3 0x58 ++ ++// -------------------------------------------------------------------- ++// PCI_INT_MASK's bit definitions ++// -------------------------------------------------------------------- ++#define PCI_INTA_ENABLE (1U<<22) ++#define PCI_INTB_ENABLE (1U<<23) ++#define PCI_INTC_ENABLE (1U<<24) ++#define PCI_INTD_ENABLE (1U<<25) ++ ++// -------------------------------------------------------------------- ++// PCI_MEM_BASE_SIZE1's constant definitions ++// -------------------------------------------------------------------- ++#define FTPCI_BASE_ADR_SIZE_1MB (PHYS_OFFSET | (0x0<<16)) ++#define FTPCI_BASE_ADR_SIZE_2MB (PHYS_OFFSET | (0x1<<16)) ++#define FTPCI_BASE_ADR_SIZE_4MB (PHYS_OFFSET | (0x2<<16)) ++#define FTPCI_BASE_ADR_SIZE_8MB (PHYS_OFFSET | (0x3<<16)) ++#define FTPCI_BASE_ADR_SIZE_16MB (PHYS_OFFSET | (0x4<<16)) ++#define FTPCI_BASE_ADR_SIZE_32MB (PHYS_OFFSET | (0x5<<16)) ++#define FTPCI_BASE_ADR_SIZE_64MB (PHYS_OFFSET | (0x6<<16)) ++#define FTPCI_BASE_ADR_SIZE_128MB (PHYS_OFFSET | (0x7<<16)) ++#define FTPCI_BASE_ADR_SIZE_256MB (PHYS_OFFSET | (0x8<<16)) ++#define FTPCI_BASE_ADR_SIZE_512MB (PHYS_OFFSET | (0x9<<16)) ++#define FTPCI_BASE_ADR_SIZE_1GB (PHYS_OFFSET | (0xa<<16)) ++#define FTPCI_BASE_ADR_SIZE_2GB (PHYS_OFFSET | (0xb<<16)) ++ ++#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (bus << 16) | (device_fn << 8) | (where & ~3) ) ++ ++struct pci_dev *pci_bridge = NULL; ++static unsigned int pci_config_addr; ++static unsigned int pci_config_data; ++int ftpci_probed = 0; ++ ++static struct resource pcic_resource = { ++ .name = "Faradat PCIC", ++ .start = IP_VA_BASE(0), ++ .end = IP_VA_LIMIT(0), ++}; ++ ++// Luke Lee 03/21/2005 mod begin ++static int ftpci_read_config_byte(struct pci_bus *bus, unsigned int devfn, ++ int where, u8 * val) ++{ ++ u32 v; ++ unsigned int shift; ++ ++ outl(CONFIG_CMD(bus->number, devfn, where), pci_config_addr); ++ v = inl(pci_config_data); ++ shift = (where & 0x3) * 8; ++ *val = (v >> shift) & 0xff; ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int ftpci_read_config_word(struct pci_bus *bus, unsigned int devfn, ++ int where, u16 * val) ++{ ++ u32 v; ++ unsigned int shift; ++ ++ outl(CONFIG_CMD(bus->number, devfn, where), pci_config_addr); ++ v = inl(pci_config_data); ++ shift = (where & 0x3) * 8; ++ *val = (v >> shift) & 0xffff; ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int ftpci_read_config_dword(struct pci_bus *bus, unsigned int devfn, ++ int where, u32 * val) ++{ ++ u32 v; ++ ++ outl(CONFIG_CMD(bus->number, devfn, where), pci_config_addr); ++ v = inl(pci_config_data); ++ *val = v; ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int ftpci_write_config_byte(struct pci_bus *bus, unsigned int devfn, ++ int where, u8 val) ++{ ++ u32 org_val; ++ unsigned int shift; ++ ++ shift = (where & 0x3) * 8; ++ outl(CONFIG_CMD(bus->number, devfn, where), pci_config_addr); ++ org_val = inl(pci_config_data); ++ org_val = (org_val & ~(0xff << shift)) | ((u32) val << shift); ++ outl(org_val, pci_config_data); ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int ftpci_write_config_word(struct pci_bus *bus, unsigned int devfn, ++ int where, u16 val) ++{ ++ u32 org_val; ++ unsigned int shift; ++ ++ shift = (where & 0x3) * 8; ++ outl(CONFIG_CMD(bus->number, devfn, where), pci_config_addr); ++ org_val = inl(pci_config_data); ++ org_val = (org_val & ~(0xffff << shift)) | ((u32) val << shift); ++ outl(org_val, pci_config_data); ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int ftpci_write_config_dword(struct pci_bus *bus, unsigned int devfn, ++ int where, u32 val) ++{ ++ outl(CONFIG_CMD(bus->number, devfn, where), pci_config_addr); ++ outl(val, pci_config_data); ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++// Luke Lee 03/21/2005 mod end ++ ++// Luke Lee 03/21/2005 ins begin ++static int ftpci_read_config(struct pci_bus *bus, unsigned int devfn, int where, ++ int size, u32 * val) ++{ ++ int r; ++ switch (size) { ++ case 1: ++ r = ftpci_read_config_byte(bus, devfn, where, (u8 *) val); // Luke Lee TOFIX 03/22/2005 : convert to (u8*) -- beware of endian ! ++ break; ++ case 2: ++ r = ftpci_read_config_word(bus, devfn, where, (u16 *) val); // Luke Lee TOFIX 03/22/2005 : convert to (u16*) -- beware of endian ! ++ break; ++ ++ default: ++ r = ftpci_read_config_dword(bus, devfn, where, val); ++ break; ++ } ++ ++ return r; ++} ++ ++static int ftpci_write_config(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val) ++{ ++ int r; ++ switch (size) { ++ case 1: ++ r = ftpci_write_config_byte(bus, devfn, where, val); ++ break; ++ case 2: ++ r = ftpci_write_config_word(bus, devfn, where, val); ++ break; ++ ++ case 4: ++ r = ftpci_write_config_dword(bus, devfn, where, val); ++ break; ++ default: ++ printk("Invalid size for ftpci_write()\n"); ++ r = PCIBIOS_FUNC_NOT_SUPPORTED; // Luke Lee 03/23/2005 ins 1 ++ } ++ ++ return r; ++} ++ ++// Luke Lee 03/21/2005 ins end ++ ++static struct pci_ops ftpci_ops = { ++ // Luke Lee 03/21/2005 mod begin ++ .read = ftpci_read_config, ++ .write = ftpci_write_config, ++ // Luke Lee 03/21/2005 mod end ++}; ++ ++/* using virtual address for pci_resource_start() function*/ ++static struct resource pci_io = { ++ .name = "Faraday PCI I/O Space", ++ .start = FPCI_IO_VA_BASE, ++ .end = FPCI_IO_VA_END, ++ .flags = IORESOURCE_IO, ++}; ++ ++/* using physical address for memory resource*/ ++static struct resource pci_mem = { ++ .name = "Faraday PCI non-prefetchable Memory Space", ++ .start = FPCI_MEM_PA_BASE, ++ .end = FPCI_MEM_PA_END, ++ .flags = IORESOURCE_MEM, ++}; ++ ++// Luke Lee 03/23/2005 unrem 1 rem 1 ++int __init ftpci_setup_resource(struct resource **resource) ++{ ++ DBGFPCI("PCI I/O space from %08lX to %08lX\n", pci_io.start, ++ pci_io.end); ++ DBGFPCI("PCI Memory space from %08lX to %08lX\n", pci_mem.start, ++ pci_mem.end); ++ if (request_resource(&ioport_resource, &pci_io)) { ++ printk(KERN_ERR "PCI: unable to allocate io region\n"); ++ return -EBUSY; // Luke Lee 03/23/2005 unrem 1 ++ } ++ if (request_resource(&iomem_resource, &pci_mem)) { ++ printk(KERN_ERR "PCI: unable to allocate non-prefetchable " ++ "memory region\n"); ++ return -EBUSY; // Luke Lee 03/23/2005 unrem 1 ++ } ++ ++ /* ++ * bus->resource[0] is the IO resource for this bus ++ * bus->resource[1] is the mem resource for this bus ++ * bus->resource[2] is the prefetch mem resource for this bus ++ */ ++ ++ resource[0] = &pci_io; ++ resource[1] = &pci_mem; ++ resource[2] = NULL; ++ ++ return 1; // Luke Lee 03/23/2005 unrem 1 ++} ++ ++int ftpci_get_irq(void) ++{ ++ unsigned int status; ++ ftpci_read_config_dword(pci_bridge->bus, pci_bridge->devfn, 0x4c, &status); // Luke Lee 03/22/2005 mod 1 ++ DBGFPCI("ftpci_get_irq,status=0x%x\n", status); ++ status = (status >> 28); ++ if (status & 0x1) ++ return 0; ++ if (status & 0x2) ++ return 1; ++ if (status & 0x4) ++ return 2; ++ if (status & 0x8) ++ return 3; ++ return -1; ++} ++ ++void ftpci_clear_irq(unsigned int irq) ++{ ++ //int i; ++ unsigned int status; ++ ftpci_read_config_dword(pci_bridge->bus, pci_bridge->devfn, 0x4c, &status); // Luke Lee 03/22/2005 mod 1 ++ if (irq == 0) ++ status = (status & 0xfffffff) | ((0x1) << 28); ++ else if (irq == 1) ++ status = (status & 0xfffffff) | ((0x2) << 28); ++ else if (irq == 2) ++ status = (status & 0xfffffff) | ((0x4) << 28); ++ else if (irq == 3) ++ status = (status & 0xfffffff) | ((0x8) << 28); ++ ftpci_write_config_dword(pci_bridge->bus, pci_bridge->devfn, 0x4c, status); // Luke Lee 03/22/2005 mod 1 ++} ++ ++void ftpci_unmask_irq(unsigned int irq) ++{ ++ u32 val; ++ ftpci_read_config_dword(pci_bridge->bus, pci_bridge->devfn, ++ PCI_INT_MASK, &val); ++ val |= (PCI_INTA_ENABLE << irq); ++ ftpci_write_config_dword(pci_bridge->bus, pci_bridge->devfn, ++ PCI_INT_MASK, val); ++} ++ ++void ftpci_mask_irq(unsigned int irq) ++{ ++ u32 val; ++ ftpci_read_config_dword(pci_bridge->bus, pci_bridge->devfn, ++ PCI_INT_MASK, &val); ++ val &= ~(PCI_INTA_ENABLE << irq); ++ ftpci_write_config_dword(pci_bridge->bus, pci_bridge->devfn, ++ PCI_INT_MASK, val); ++} ++ ++static int ftpci_probe(unsigned int addr_p) ++{ ++ unsigned int *addr = (unsigned int *)addr_p; ++ *(volatile unsigned int *)addr = 0x80000000; ++ if (*(volatile unsigned int *)addr == 0x80000000) { ++ DBGFPCI("Faraday FPCI bridge probed ok\n"); ++ ftpci_probed = 1; ++ } else { ++ ftpci_probed = 0; ++ } ++ *(volatile unsigned int *)addr = 0x0; ++ return ftpci_probed; ++} ++ ++void __init ftpci_preinit(void /**sysdata*/ ) // Luke Lee 03/22/2005 mod 1 ++{ ++ DBGFPCI("ftpci_preinit()\n\r"); ++ ++#ifdef CONFIG_PLAT_AG101 ++ /* Walk around for A321 but remove after leopard pci */ ++ *(volatile unsigned long *)(FPCI_VA_BASE + 0x8) = 0x10; ++#endif ++ ++ pci_config_addr = FPCI_VA_BASE + FTPCI_CFG_ADR_REG; ++ pci_config_data = FPCI_VA_BASE + FTPCI_CFG_DATA_REG; ++ DBGFPCI("Config addr is %08X, data port is %08X\n", ++ (int)pci_config_addr, (int)pci_config_data); ++ ++ if (!ftpci_probe(pci_config_addr)) ++ return; ++} ++ ++void ftpci_postinit(void /**sysdata*/ ) ++{ ++ u32 val; ++ DBGFPCI("ftpci_postinit()\n\r"); ++ pci_bridge = pci_get_device(PCI_BRIDGE_VENID, PCI_BRIDGE_DEVID, NULL); ++ if (pci_bridge == NULL) ++ return; ++ // Enable the Interrupt Mask (INTA/INTB/INTC/INTD) ++ ftpci_read_config_dword(pci_bridge->bus, pci_bridge->devfn, PCI_INT_MASK, &val); // Luke Lee 03/22/2005 mod 1 ++ val |= ++ (PCI_INTA_ENABLE | PCI_INTB_ENABLE | PCI_INTC_ENABLE | ++ PCI_INTD_ENABLE); ++ ftpci_write_config_dword(pci_bridge->bus, pci_bridge->devfn, PCI_INT_MASK, val); // Luke Lee 03/22/2005 mod 1 ++ ++ // Write DMA Start Address/Size Data to the Bridge configuration space ++ ftpci_write_config_dword(pci_bridge->bus, pci_bridge->devfn, PCI_MEM_BASE_SIZE1, FTPCI_BASE_ADR_SIZE_1GB); // Luke Lee 03/22/2005 mod 1 ++ DBGFPCI("%s: Post init ok\n", __func__); ++} ++ ++/* ++ * This routine handles multiple bridges. ++ */ ++static u8 __init fpci_swizzle(struct pci_dev *dev, u8 * pinp) ++{ ++ // If there are one more bridges on our platfrom, we need to implement this function. ++ DBGFPCI("a320_swizzle(%X,%X)\n\r", (unsigned)dev, (unsigned)pinp); ++ return 0; ++} ++ ++static int __init fpci_map_irq(struct pci_dev *dev, u8 slot, u8 pin) ++{ ++ DBGFPCI("a320_map_irq,slot=%d pin=%d\n", PCI_SLOT(dev->devfn), pin); ++ switch ((PCI_SLOT(dev->devfn) + pin - 8 - 1) % 4) { ++ case 0: ++ return IP_IRQ0(0); ++ case 1: ++ return IP_IRQ1(0); ++ case 2: ++ return IP_IRQ2(0); ++ case 3: ++ return IP_IRQ3(0); ++ default: ++ printk(KERN_ERR "Not Support Slot %d\n", slot); ++ break; ++ } ++ return -1; ++} ++ ++int __init ftpci_setup(int nr, struct pci_sys_data *sys) ++{ ++ int ret = 0; ++ if (nr == 0) { ++ ret = ftpci_setup_resource(sys->resource); ++// sys->mem_offset = FPCI_MEM_VA_BASE - FPCI_MEM_PA_BASE; ++ sys->mem_offset = 0; ++ sys->io_offset = FPCI_IO_VA_BASE - FPCI_IO_PA_BASE; ++ } ++ return ret; ++} ++ ++static struct pci_bus *__devinit ftpci_scan_bus(int nr, ++ struct pci_sys_data *sys) ++{ ++ return pci_scan_bus(sys->busnr, &ftpci_ops, sys); ++} ++ ++static struct hw_pci a320_pci __initdata = { ++ .swizzle = fpci_swizzle, ++ .map_irq = fpci_map_irq, ++ .setup = ftpci_setup, ++ .nr_controllers = 1, ++ .scan = ftpci_scan_bus, ++ .preinit = ftpci_preinit, /* The first called init function */ ++ .postinit = ftpci_postinit, /* It is called after hw init and scanned PCI bus */ ++}; ++ ++static int __init fpci_init(void) ++{ ++#ifdef MODULE ++ printk(KERN_INFO "Faraday PCI driver Init"); ++#endif ++ printk(KERN_DEBUG "Init A321 PCI bridge controller\n"); ++ /* Register I/O address range of this PCI Bridge Controller */ ++ DBGFPCI("Name:%s, Base=%lX, End=%lX\n", pcic_resource.name, ++ pcic_resource.start, pcic_resource.end); ++ request_resource(&ioport_resource, &pcic_resource); ++ pci_common_init(&a320_pci); ++ return 0; ++} ++ ++subsys_initcall(fpci_init); ++ ++EXPORT_SYMBOL(ftpci_probed); ++EXPORT_SYMBOL(ftpci_clear_irq); ++EXPORT_SYMBOL(ftpci_get_irq); +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/intc.c linux-3.4.110/arch/nds32/platforms/intc.c +--- linux-3.4.110.orig/arch/nds32/platforms/intc.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/intc.c 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,257 @@ ++/* ++ * linux/arch/nds32/platforms/intc.c ++ * ++ * Faraday FTINTC010 Master Interrupt Controller Device Driver Implementation ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * Note ++ * ++ * This program implements only the master INTC of the platform. Slave INTCs must ++ * be initialized by themselves. ++ * ++ * ChangeLog ++ * ++ * Luke Lee 09/14/2005 Created. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#define IPMODULE INTC ++#define IPNAME FTINTC010 ++ ++/* ++ * Edge trigger IRQ chip methods ++ */ ++#if defined(CONFIG_SMP) && defined(CONFIG_IRQ_PER_CPU) ++spinlock_t irq_chip_lock; ++#endif ++ ++static void intc_ftintc010_ack_irq(struct irq_data *data) ++{ ++ unsigned int tmp; ++ ++ // ack and disable ++#if defined(CONFIG_SMP) && defined(CONFIG_IRQ_PER_CPU) ++ spin_lock_irq(&irq_chip_lock); ++#endif ++ *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_CLEAR_REG) = 1 << data->irq; ++ tmp = *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_CLEAR_REG); ++ tmp = *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MODE_REG); ++ ++ if (!(tmp & (1UL << data->irq))) { /* level trigger */ ++ ++ *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MASK_REG) &= ++ ~(1 << data->irq); ++ tmp = *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MASK_REG); ++ } ++#if defined(CONFIG_SMP) && defined(CONFIG_IRQ_PER_CPU) ++ spin_unlock_irq(&irq_chip_lock); ++#endif ++} ++ ++static void intc_ftintc010_mask_irq(struct irq_data *data) ++{ ++ unsigned int tmp; ++ ++ // disable ++#if defined(CONFIG_SMP) && defined(CONFIG_IRQ_PER_CPU) ++ spin_lock_irq(&irq_chip_lock); ++#endif ++ *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MASK_REG) &= ++ ~(1 << data->irq); ++ tmp = *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MASK_REG); ++#if defined(CONFIG_SMP) && defined(CONFIG_IRQ_PER_CPU) ++ spin_unlock_irq(&irq_chip_lock); ++#endif ++} ++ ++static void intc_ftintc010_mask_ack_irq(struct irq_data *data) ++{ ++ unsigned int tmp; ++ ++ // disable ++#if defined(CONFIG_SMP) && defined(CONFIG_IRQ_PER_CPU) ++ spin_lock_irq(&irq_chip_lock); ++#endif ++ *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MASK_REG) &= ++ ~(1 << data->irq); ++ tmp = *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MASK_REG); ++ ++ *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_CLEAR_REG) = 1 << data->irq; ++ tmp = *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_CLEAR_REG); ++ ++#if defined(CONFIG_SMP) && defined(CONFIG_IRQ_PER_CPU) ++ spin_unlock_irq(&irq_chip_lock); ++#endif ++} ++ ++static void intc_ftintc010_unmask_irq(struct irq_data *data) ++{ ++ unsigned int tmp; ++ ++ // enable ++#if defined(CONFIG_SMP) && defined(CONFIG_IRQ_PER_CPU) ++ spin_lock_irq(&irq_chip_lock); ++#endif ++ *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MASK_REG) |= 1 << data->irq; ++ tmp = *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MASK_REG); ++#if defined(CONFIG_SMP) && defined(CONFIG_IRQ_PER_CPU) ++ spin_unlock_irq(&irq_chip_lock); ++#endif ++} ++ ++static int intc_ftintc010_set_type(struct irq_data *data, ++ unsigned int flow_type) ++{ ++#if defined(CONFIG_SMP) && defined(CONFIG_IRQ_PER_CPU) ++ spin_lock_irq(&irq_chip_lock); ++#endif ++ /* ++ * IRQ Trigger Mode Register 1: edge ++ * IRQ Trigger Level Register 1: active high ++ */ ++ ++ int tmp; ++ ++ if (flow_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW)) { ++ ++ *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_LEVEL_REG) &= ++ ~(1UL << data->irq); ++ tmp = *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_LEVEL_REG); ++ } ++ ++ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH)) { ++ ++ *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_LEVEL_REG) |= ++ (1UL << data->irq); ++ tmp = *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_LEVEL_REG); ++ } ++ ++ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { ++ ++ *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MODE_REG) |= ++ (1UL << data->irq); ++ tmp = *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MODE_REG); ++ (irq_desc + data->irq)->handle_irq = handle_edge_irq; ++ } ++ ++ if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) { ++ ++ *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MODE_REG) &= ++ ~(1UL << data->irq); ++ tmp = *(volatile unsigned *)(IP_VA_BASE(0) + IRQ_MODE_REG); ++ (irq_desc + data->irq)->handle_irq = handle_level_irq; ++ } ++#if defined(CONFIG_SMP) && defined(CONFIG_IRQ_PER_CPU) ++ spin_unlock_irq(&irq_chip_lock); ++#endif ++ return 0; ++} ++ ++static struct irq_chip intc_ftintc010_chip = { ++ ++ .irq_ack = intc_ftintc010_ack_irq, ++ .irq_mask = intc_ftintc010_mask_irq, ++ .irq_mask_ack = intc_ftintc010_mask_ack_irq, ++ .irq_unmask = intc_ftintc010_unmask_irq, ++ .irq_set_type = intc_ftintc010_set_type, ++}; ++ ++static struct resource intc_resource = { ++ ++ .name = "Main interrupt controller", ++ .start = IP_VA_BASE(0), ++ .end = IP_VA_BASE(0) + IP_VA_SIZE(0), ++}; ++ ++/* ++ * Initialization of master interrupt controller, after this INTC is ++ * enabled, the rest of Linux initialization codes can then be completed. ++ * For example, timer interrupts and UART interrupts must be enabled during ++ * the boot process. ++ */ ++void __init intc_ftintc010_init_irq(void) ++{ ++ int i, edge; ++ ++#if defined(CONFIG_SMP) && defined(CONFIG_IRQ_PER_CPU) ++ spin_lock_init(&irq_chip_lock); ++#endif ++ /* Initialize the INTC */ ++ outl(0x00000000, IP_VA_BASE(0) + IRQ_MASK_REG); ++ outl(0x00000000, IP_VA_BASE(0) + FIQ_MASK_REG); ++ outl(0xffffffff, IP_VA_BASE(0) + IRQ_CLEAR_REG); ++ outl(0xffffffff, IP_VA_BASE(0) + FIQ_CLEAR_REG); ++ outl(PLATFORM_IRQ_TRIGGER_MODE, IP_VA_BASE(0) + IRQ_MODE_REG); ++ /* FTINTC010: bit 0=active high or rising edge, 1=active low or falling edge. */ ++ outl(PLATFORM_IRQ_TRIGGER_LEVEL, IP_VA_BASE(0) + IRQ_LEVEL_REG); ++ ++ /* Register all IRQ */ ++ for (i = PLATFORM_IRQ_BASE, edge = 1; ++ i < PLATFORM_IRQ_BASE + PLATFORM_IRQ_TOTALCOUNT; i++, edge <<= 1) { ++ ++ irq_set_chip(i, &intc_ftintc010_chip); ++ ++ if (PLATFORM_IRQ_TRIGGER_MODE & edge) /* edge trigger */ ++ irq_set_handler(i, handle_edge_irq); ++ ++ else /* level trigger */ ++ irq_set_handler(i, handle_level_irq); ++ } ++ ++ /* Register I/O address range of this INTC */ ++ request_resource(&ioport_resource, &intc_resource); ++ ++} ++ ++unsigned int get_IntSrc(void) ++{ ++ unsigned int irq_status, irq = 31; ++ ++ irq_status = inl(IP_VA_BASE(0) + IRQ_STATUS_REG); ++ if (irq_status == 0) ++ return 32; ++ if (irq_status & 0x0000ffff) { ++ irq -= 16; ++ irq_status <<= 16; ++ } ++ if (irq_status & 0x00ff0000) { ++ irq -= 8; ++ irq_status <<= 8; ++ } ++ if (irq_status & 0x0f000000) { ++ irq -= 4; ++ irq_status <<= 4; ++ } ++ if (irq_status & 0x30000000) { ++ irq -= 2; ++ irq_status <<= 2; ++ } ++ if (irq_status & 0x40000000) { ++ irq -= 1; ++ } ++ return irq; ++} +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/Kconfig linux-3.4.110/arch/nds32/platforms/Kconfig +--- linux-3.4.110.orig/arch/nds32/platforms/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/Kconfig 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,122 @@ ++choice ++ prompt "platform type" ++ default PLAT_AG101P ++ ++config PLAT_VEP ++ bool "vep platform" ++ select CPU_CUSTOM ++ select PLATFORM_INTC ++ ++config PLAT_AG101 ++ bool "ag101 platform" ++ select CPU_N1213 ++ select CPU_N1213_43U1HA0 ++ select PLATFORM_INTC ++ ++config PLAT_AG102 ++ bool "ag102 platform" ++ select CPU_N1233F ++ select PLATFORM_AMIC ++ ++config PLAT_AG101P ++ bool "ag101p platform" ++ select CPU_CUSTOM ++ select PLATFORM_INTC if !IVIC ++ select PLATFORM_NOINTC if IVIC ++ ++config PLAT_QEMU ++ bool "qemu platform" ++ select CPU_CUSTOM ++ select PLATFORM_INTC ++endchoice ++ ++config PLATFORM_NOINTC ++ def_bool n ++ depends on PLAT_AG101P ++ ++config PLATFORM_INTC ++ def_bool n ++ depends on !PLAT_AG102 ++ ++config PLATFORM_AMIC ++ def_bool n ++ depends on PLAT_AG102 ++ ++config ARCH_WANT_OPTIONAL_GPIOLIB ++ bool "Arch Want Optional GPIOLIB" ++ default y ++ ++if PLAT_VEP ++source "arch/nds32/platforms/vep/Kconfig" ++endif ++ ++if PLAT_AG101 ++source "arch/nds32/platforms/ag101/Kconfig" ++endif ++ ++if PLAT_AG102 ++source "arch/nds32/platforms/ag102/Kconfig" ++endif ++ ++if PLAT_AG101P ++source "arch/nds32/platforms/ag101p/Kconfig" ++endif ++ ++if PLAT_QEMU ++source "arch/nds32/platforms/qemu/Kconfig" ++endif ++ ++menu "Common Platform Options" ++ ++config PLATFORM_AHBDMA ++ tristate "AHB DMA Support" ++ help ++ AHB DMA service API support for other device drivers ++ ++config PLATFORM_APBDMA ++ tristate "APB DMA Support" ++ help ++ AHB DMA service API support for other device drivers ++ ++config SYS_CLK ++ int "AHB System Clock" ++ default 67737600 ++ help ++ Manual setting of AHB clock, must match the jumper setting on ++ the board, or the system time won't be correctly calculated. ++ Notice that even when AUTO_SYS_CLK is ON, this value is still ++ required for adjusting minor time offsets. However, the influence ++ should be within micro-second to nano-second scale. ++ ++config UART_CLK ++ int "UART Clock" ++ default 18432000 ++ help ++ Change the UART clock in case of non-3.6864MHz OSC is used as main ++ clock source, or an external UART clock source is fed from GPIO23. ++ To support external UART clock from GPIO23, set PMU ++ "Multi-Function Port Setting Register" bit #8 (UartClkSel) to 1. ++ This control register can be found at physical address 0x98100028 ++ If this options is changed, please also append "38400" to your ++ kernel command line, e.g.: ++ console=uart,shift,2,io,0xF9820000,38400 ++ Note: For A320, the default UART clock is obtained by = 5 * OSC = ++ 5 * 3.6864MHz = 18.432MHz. ++ ++menu "Memory configuration" ++ ++config SDRAM_SIZE ++ hex "SDRAM Size (hex)" ++ default 4000000 ++ ---help--- ++ RAM size ++ ++config MEMORY_START ++ hex "Physical memory start address" ++ default "0x00000000" ++ ---help--- ++ Physical memory start address, you may modify it if it is porting to ++ a new SoC with different start address. ++endmenu ++ ++endmenu +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/Makefile linux-3.4.110/arch/nds32/platforms/Makefile +--- linux-3.4.110.orig/arch/nds32/platforms/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/Makefile 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,25 @@ ++obj-y := timer.o ++ ++ifdef CONFIG_FUNCTION_TRACER ++CFLAGS_REMOVE_timer.o = -pg ++endif ++ ++obj-$(CONFIG_PLATFORM_NOINTC) += nointc.o ++obj-$(CONFIG_PLATFORM_INTC) += intc.o ++obj-$(CONFIG_PLATFORM_AMIC) += amic.o ++ ++ifeq ("$(CONFIG_PLATFORM_AHBDMA)", "y") ++ obj-y += dmad_intc.o dmad.o ++else ++ ifeq ("$(CONFIG_PLATFORM_APBDMA)", "y") ++ obj-y += dmad_intc.o dmad.o ++ endif ++endif ++ ++obj-$(CONFIG_PCI) += ftpci.o pci_intc.o ++ ++obj-$(CONFIG_PLAT_VEP) += vep/ ++obj-$(CONFIG_PLAT_AG101) += ag101/ ++obj-$(CONFIG_PLAT_AG102) += ag102/ ++obj-$(CONFIG_PLAT_AG101P) += ag101p/ ++obj-$(CONFIG_PLAT_QEMU) += qemu/ +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/nointc.c linux-3.4.110/arch/nds32/platforms/nointc.c +--- linux-3.4.110.orig/arch/nds32/platforms/nointc.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/nointc.c 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,134 @@ ++/* ++ * linux/arch/nds32/platforms/intc.c ++ * ++ * Faraday FTINTC010 Master Interrupt Controller Device Driver Implementation ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#include ++ ++static void nointc_ack_irq(struct irq_data *data) ++{ ++ SET_INT_PEND2(1 << data->irq); ++#if 0 ++ asm volatile ("mtsr %0, $INT_PEND2\n\t" ++ "dsb\n\t"::"r" (1 << data->irq)); ++#endif ++#if 0 ++ asm volatile ("mfsr $r6, $INT_MASK2\n\t" ++ "and $r6, $r6,%0\n\t" ++ "mtsr $r6, $INT_MASK2\n\t" ++ "dsb\n\t"::"r" (~(1 << data->irq)):"$r6"); ++#endif ++} ++ ++static void nointc_mask_irq(struct irq_data *data) ++{ ++ unsigned long int_mask2 = GET_INT_MASK2(); ++ SET_INT_MASK2(int_mask2 & (~(1 << data->irq))); ++#if 0 ++ asm volatile ("mfsr $r6, $INT_MASK2\n\t" ++ "and $r6, $r6,%0\n\t" ++ "mtsr $r6, $INT_MASK2\n\t" ++ "dsb\n\t"::"r" (~(1 << data->irq)):"$r6"); ++#endif ++} ++ ++static void nointc_mask_ack_irq(struct irq_data *data) ++{ ++ unsigned long int_mask2 = GET_INT_MASK2(); ++ SET_INT_MASK2(int_mask2 & (~(1 << data->irq))); ++ SET_INT_PEND2(1 << data->irq); ++#if 0 ++ asm volatile ("not $r7, %0\n\t" ++ "mfsr $r6, $INT_MASK2\n\t" ++ "and $r6, $r6, $r7\n\t" ++ "mtsr $r6, $INT_MASK2\n\t" ++ "mtsr %0, $INT_PEND2\n\t" ++ "dsb\n\t"::"r" (1 << data->irq):"$r6", "$r7"); ++#endif ++ ++} ++ ++static void nointc_unmask_irq(struct irq_data *data) ++{ ++ unsigned long int_mask2 = GET_INT_MASK2(); ++ SET_INT_MASK2(int_mask2 | (1 << data->irq)); ++#if 0 ++ asm volatile ("mfsr $r6, $INT_MASK2\n\t" ++ "or $r6, $r6,%0\n\t" ++ "mtsr $r6, $INT_MASK2\n\t" ++ "dsb\n\t"::"r" (1 << data->irq):"$r6"); ++#endif ++} ++ ++static int nointc_set_type(struct irq_data *data, unsigned int flow_type) ++{ ++ printk(KERN_WARNING "interrupt type is not configurable\n"); ++ return 0; ++} ++ ++static struct irq_chip nointc_chip = { ++ ++ .irq_ack = nointc_ack_irq, ++ .irq_mask = nointc_mask_irq, ++ .irq_mask_ack = nointc_mask_ack_irq, ++ .irq_unmask = nointc_unmask_irq, ++ .irq_set_type = nointc_set_type, ++}; ++ ++static unsigned int __initdata nivic_map[6] = { 6, 2, 10, 16, 24, 32 }; ++ ++void __init nointc_init_irq(void) ++{ ++ int i; ++ unsigned long int_trigger_type, int_vec_base, nivic; ++ ++ int_vec_base = GET_IVB(); ++ ++#if 0 ++ asm volatile ("mfsr %0, $IVB\n":"=r" (int_vec_base)); ++#endif ++ ++ if (((int_vec_base & IVB_mskIVIC_VER) >> IVB_offIVIC_VER) == 0) { ++ panic("Unable to use NOINTC option to boot on this cpu\n"); ++ } ++ ++ nivic = (int_vec_base & IVB_mskNIVIC) >> IVB_offNIVIC; ++ if (nivic >= (sizeof nivic_map / sizeof nivic_map[0])) { ++ panic ++ ("The number of input for IVIC Controller is not supported on this cpu\n"); ++ } ++ nivic = nivic_map[nivic]; ++ ++ int_trigger_type = GET_INT_TRIGGER(); ++#if 0 ++ asm volatile ("mfsr %0, $INT_TRIGGER\n":"=r" (int_trigger_type)); ++#endif ++ ++ for (i = 0; i < nivic; i++) { ++ irq_set_chip(i, &nointc_chip); ++ if (int_trigger_type & (1 << i)) ++ /* edge-triggered */ ++ irq_set_handler(i, handle_edge_irq); ++ else ++ /* level-triggered */ ++ irq_set_handler(i, handle_level_irq); ++ } ++} +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/pci_intc.c linux-3.4.110/arch/nds32/platforms/pci_intc.c +--- linux-3.4.110.orig/arch/nds32/platforms/pci_intc.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/pci_intc.c 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,93 @@ ++/* ++ * linux/arch/nds32/platforms/pci_intc.c ++ * ++ * Faraday PCI Bridge Interrupt Process Driver Implementation ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2008 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * ++ * ChangeLog ++ * ++ * Luke Lee 09/15/2005 Created. ++ * Luke Lee 09/27/2005 Fixed for parent chip registration and notification. ++ * Peter Liao 09/28/2005 Port for PCI IP ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#define IPMODULE PCIC ++#define IPNAME FPCI010 ++ ++/* ++ * Level trigger IRQ chip methods ++ */ ++ ++static void intc_ftpci100_level_ack_irq(unsigned int irq) ++{ ++ ftpci_clear_irq(irq - PLATFORM_PCI_IRQ_BASE); ++} ++ ++static void intc_ftpci100_level_mask_irq(unsigned int irq) ++{ ++ ftpci_mask_irq(irq - PLATFORM_PCI_IRQ_BASE); ++} ++ ++static void intc_ftpci100_level_unmask_irq(unsigned int irq) ++{ ++ ftpci_unmask_irq(irq - PLATFORM_PCI_IRQ_BASE); ++} ++ ++static struct irq_chip intc_ftpci100_level_chip = { ++ .ack = intc_ftpci100_level_ack_irq, ++ .mask = intc_ftpci100_level_mask_irq, ++ .unmask = intc_ftpci100_level_unmask_irq, ++}; ++ ++void pci_irq_rounter(unsigned int irq, struct irq_desc *desc) ++{ ++ int pci_irq; ++ struct irq_desc *pci_desc; ++ ++ desc->chip->mask(irq); ++ desc->chip->ack(irq); ++ ++ pci_irq = ftpci_get_irq(); ++ if (pci_irq >= 0) { ++ pci_irq += PCIC_FTPCI100_IRQ0; ++ pci_desc = irq_desc + pci_irq; ++ pci_desc->handle_irq(pci_irq, pci_desc); ++ } ++ ++ desc->chip->unmask(irq); ++} ++ ++int __init intc_ftpci100_init_irq(void) ++{ ++ int i; ++ ++ /* Register all IRQ */ ++ for (i = PCIC_FTPCI100_IRQ0; ++ i < PCIC_FTPCI100_IRQ0 + PCIC_FTPCI100_IRQ_COUNT; i++) { ++ // level trigger ++ set_irq_chip(i, &intc_ftpci100_level_chip); ++ set_irq_handler(i, handle_level_irq); ++ } ++#ifndef CONFIG_PLAT_AG101 ++ set_irq_chained_handler(PLATFORM_PCI_IRQ, pci_irq_rounter); ++#endif ++ ++ return 0; ++} ++ ++subsys_initcall(intc_ftpci100_init_irq); +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/qemu/devices.c linux-3.4.110/arch/nds32/platforms/qemu/devices.c +--- linux-3.4.110.orig/arch/nds32/platforms/qemu/devices.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/qemu/devices.c 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,124 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++const struct map_desc platform_io_desc[] __initdata = { ++ {UART0_VA_BASE, UART0_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {UART1_VA_BASE, UART1_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {INTC_FTINTC010_0_VA_BASE, INTC_FTINTC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {TIMER_FTTMR010_0_VA_BASE, TIMER_FTTMR010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {SSP_FTSSP010_0_VA_BASE, SSP_FTSSP010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {PMU_FTPMU010_0_VA_BASE, PMU_FTPMU010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {MAC_FTMAC100_0_VA_BASE, MAC_FTMAC100_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {SDC_FTSDC010_0_VA_BASE, SDC_FTSDC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {RTC_FTRTC010_0_VA_BASE, RTC_FTRTC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {WDT_FTWDT010_0_VA_BASE, WDT_FTWDT010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {GPIO_FTGPIO010_0_VA_BASE, GPIO_FTGPIO010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {CFC_FTCFC010_0_VA_BASE, CFC_FTCFC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {LCD_FTLCDC100_0_VA_BASE, LCD_FTLCDC100_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {I2C_FTI2C010_0_VA_BASE, I2C_FTI2C010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {DMAC_FTDMAC020_0_VA_BASE, DMAC_FTDMAC020_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {APBBRG_FTAPBBRG020S_0_VA_BASE, APBBRG_FTAPBBRG020S_0_PA_BASE, ++ PAGE_SIZE, MT_DEVICE_NCB}, ++ {PCIIO_0_VA_BASE, PCIIO_0_PA_BASE, 0x000FF000, MT_DEVICE_NCB}, ++ {PCIC_FTPCI100_0_VA_BASE, PCIC_FTPCI100_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {LED_VA_BASE, LED_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {SDMC_FTSDMC021_VA_BASE, SDMC_FTSDMC021_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {L2CC_VA_BASE, L2CC_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB} ++}; ++ ++static void __init platform_map_io(void) ++{ ++ iotable_init((struct map_desc *)platform_io_desc, ++ ARRAY_SIZE(platform_io_desc)); ++} ++ ++static struct uart_port uart0 = { ++ .membase = (void __iomem *)UART0_VA_BASE, ++ .irq = UART0_IRQ, ++ .uartclk = CONFIG_UART_CLK, ++ .regshift = 2, ++ .iotype = UPIO_MEM, ++ .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, ++ .line = 0, ++ .mapbase = UART0_PA_BASE, ++}; ++ ++static struct uart_port uart1 = { ++ .membase = (void __iomem *)UART1_VA_BASE, ++ .irq = UART1_IRQ, ++ .uartclk = CONFIG_UART_CLK, ++ .regshift = 2, ++ .iotype = UPIO_MEM, ++ .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, ++ .line = 1, ++ .mapbase = UART1_PA_BASE, ++}; ++ ++static void __init soc_init(void) ++{ ++ early_serial_setup(&uart0); ++ early_serial_setup(&uart1); ++} ++ ++static struct resource smc91x_resources[] = { ++ [0] = { ++ .name = "smc91x", ++ .start = 0x92100000, ++ .end = 0x92110000, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = 25, ++ .end = 25, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct platform_device smc91x_device = { ++ .name = "smc91x", ++ .id = 0, ++ .num_resources = ARRAY_SIZE(smc91x_resources), ++ .resource = smc91x_resources, ++}; ++ ++static __init int smc_init(void) ++{ ++ int ret; ++ ret = platform_device_register(&smc91x_device); ++ if (ret == 0) ++ printk("smc is installed now.\n"); ++ else ++ printk("smc failed.\n"); ++ return 0; ++} ++ ++module_init(smc_init); ++ ++MACHINE_START(FARADAY, PLATFORM_NAME) ++ .param_offset = BOOT_PARAMETER_PA_BASE, ++ .map_io = platform_map_io, ++ .init_irq = platform_init_irq, ++ .timer = &platform_timer, /* defined in timer.c */ ++ .init_machine = soc_init, ++MACHINE_END +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/qemu/Kconfig linux-3.4.110/arch/nds32/platforms/qemu/Kconfig +--- linux-3.4.110.orig/arch/nds32/platforms/qemu/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/qemu/Kconfig 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,3 @@ ++menu "QEMU Platform Options" ++ ++endmenu +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/qemu/Makefile linux-3.4.110/arch/nds32/platforms/qemu/Makefile +--- linux-3.4.110.orig/arch/nds32/platforms/qemu/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/qemu/Makefile 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1 @@ ++obj-y = devices.o +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/timer.c linux-3.4.110/arch/nds32/platforms/timer.c +--- linux-3.4.110.orig/arch/nds32/platforms/timer.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/timer.c 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,250 @@ ++/* ++ * linux/arch/nds32/platforms/timer.c ++ * ++ * Faraday FTTMR010 Timer Device Driver Implementation ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#define REG32_TMR(x) *(volatile unsigned long *)(TIMER_FTTMR010_VA_BASE + (x)) ++#define APB_CLK_IN (AHB_CLK_IN / 2) ++ ++static struct resource timer_resource = { ++ .name = "Timer 1~3", ++ .start = TIMER_FTTMR010_VA_BASE, ++ .end = TIMER_FTTMR010_VA_LIMIT, ++}; ++ ++static inline cycle_t clocksource_read_cycles(struct clocksource *cs) ++{ ++ return (cycle_t) REG32_TMR(TIMER3_COUNT); ++} ++ ++static void clksrc_fttmr010_resume(struct clocksource *cs) ++{ ++ REG32_TMR(TIMER_INTRMASK) |= TM3MATCH1 | TM3MATCH2 | TM3OVERFLOW; ++ REG32_TMR(TIMER_TMCR) |= TM3UPDOWN | TM3ENABLE; ++} ++ ++static struct clocksource clksrc_fttmr010 = { ++ .name = "fttmr010_tm1", ++ .rating = 300, ++ .read = clocksource_read_cycles, ++ .mask = CLOCKSOURCE_MASK(32), ++ .shift = 21, ++ .flags = CLOCK_SOURCE_IS_CONTINUOUS, ++ .resume = clksrc_fttmr010_resume, ++}; ++ ++static void __init fttmr010_clocksource_init(void) ++{ ++ clksrc_fttmr010.mult = ++ clocksource_hz2mult(APB_CLK_IN, clksrc_fttmr010.shift); ++ ++ REG32_TMR(TIMER3_LOAD) = 0; ++ REG32_TMR(TIMER_INTRMASK) |= TM3MATCH1 | TM3MATCH2 | TM3OVERFLOW; ++ REG32_TMR(TIMER_TMCR) |= TM3UPDOWN | TM3ENABLE; ++ if (clocksource_register(&clksrc_fttmr010)) ++ printk(KERN_ERR "Error: failed to register %s\n", ++ clksrc_fttmr010.name); ++} ++ ++static int fttmr010_set_next_event(unsigned long cycles, ++ struct clock_event_device *evt) ++{ ++ REG32_TMR(TIMER1_LOAD) = cycles; ++ return 0; ++} ++ ++static void fttmr010_set_mode(enum clock_event_mode mode, ++ struct clock_event_device *evt) ++{ ++ switch (mode) { ++ case CLOCK_EVT_MODE_ONESHOT: ++ REG32_TMR(TIMER1_LOAD) = 0xffffffff; ++ REG32_TMR(TIMER_TMCR) |= TM1ENABLE; ++ break; ++ ++ case CLOCK_EVT_MODE_PERIODIC: ++ REG32_TMR(TIMER1_COUNT) = APB_CLK_IN / HZ - 1; ++ REG32_TMR(TIMER1_LOAD) = APB_CLK_IN / HZ - 1; ++ REG32_TMR(TIMER_TMCR) |= TM1ENABLE; ++ break; ++ case CLOCK_EVT_MODE_UNUSED: ++ break; ++ case CLOCK_EVT_MODE_SHUTDOWN: ++ REG32_TMR(TIMER_TMCR) &= ~TM1ENABLE; ++ break; ++ case CLOCK_EVT_MODE_RESUME: ++ REG32_TMR(TIMER_INTRMASK) |= TM1MATCH1 | TM1MATCH2; ++ REG32_TMR(TIMER_TMCR) |= TM1ENABLE | TM1OFENABLE; ++ break; ++ } ++} ++ ++static struct clock_event_device clockevent_fttmr010 = { ++ .name = "fttmr010_tm1", ++ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, ++ .shift = 32, ++ .cpumask = cpu_all_mask, ++ .set_next_event = fttmr010_set_next_event, ++ .set_mode = fttmr010_set_mode, ++}; ++ ++static irqreturn_t timer1_interrupt(int irq, void *dev_id) ++{ ++ struct clock_event_device *evt = dev_id; ++ ++ REG32_TMR(TIMER_INTRSTATE) = TM1MATCH1 | TM1MATCH2 | TM1OVERFLOW; ++ ++ evt->event_handler(evt); ++ ++ return IRQ_HANDLED; ++} ++ ++static struct irqaction timer1_irq = { ++ .name = "Timer Tick", ++ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, ++ .handler = timer1_interrupt, ++ .dev_id = &clockevent_fttmr010 ++}; ++ ++static void __init fttmr010_clockevent_init(void) ++{ ++ clockevent_fttmr010.mult = ++ div_sc(APB_CLK_IN, NSEC_PER_SEC, clockevent_fttmr010.shift); ++ clockevent_fttmr010.max_delta_ns = ++ clockevent_delta2ns(0xffffffff, &clockevent_fttmr010); ++ clockevent_fttmr010.min_delta_ns = ++ clockevent_delta2ns(3, &clockevent_fttmr010); ++ ++ clockevents_register_device(&clockevent_fttmr010); ++ setup_irq(TIMER_FTTMR010_IRQ0, &timer1_irq); ++} ++ ++static void fttmr010_resume(void) ++{ ++} ++ ++#ifdef CONFIG_CPU_FREQ ++void ag102_calc_ahb_clk(void); ++static int fttmr010_cpufreq_notifier(struct notifier_block *nb, ++ unsigned long val, void *data) ++{ ++ if (val == CPUFREQ_POSTCHANGE) { ++ ++ unsigned long flags; ++#ifdef CONFIG_PLAT_AG102 ++ ag102_calc_ahb_clk(); ++#endif ++ local_irq_save(flags); ++ ++ clocksource_unregister(&clksrc_fttmr010); ++ ++ clksrc_fttmr010.mult = ++ clocksource_hz2mult(APB_CLK_IN, clksrc_fttmr010.shift); ++#ifdef CONFIG_PLAT_AG101 ++ clksrc_fttmr010.mult_orig = ++ clocksource_hz2mult(APB_CLK_IN, clksrc_fttmr010.shift); ++#endif ++ ++ if (clocksource_register(&clksrc_fttmr010)) ++ printk(KERN_ERR "Error: failed to re-register %s\n", ++ clksrc_fttmr010.name); ++ else ++ printk("Re-register clock source %s\n", ++ clksrc_fttmr010.name); ++ ++ local_irq_restore(flags); ++ ++ clockevent_fttmr010.mult = ++ div_sc(APB_CLK_IN, NSEC_PER_SEC, clockevent_fttmr010.shift); ++ ++#ifdef CONFIG_PLAT_AG102 ++ printk("Add timer clock modifier...\n"); ++ fttmr010_set_mode(CLOCK_EVT_MODE_PERIODIC, 0); ++#endif ++ } ++ ++ return 0; ++} ++ ++static struct notifier_block fttmr010_cpufreq_notifier_block = { ++ .notifier_call = fttmr010_cpufreq_notifier ++}; ++ ++static int __init fttmr010_init_cpufreq(void) ++{ ++ if (cpufreq_register_notifier(&fttmr010_cpufreq_notifier_block, ++ CPUFREQ_TRANSITION_NOTIFIER)) ++ printk("fttmr010: Failed to setup cpufreq notifier\n"); ++ ++ return 0; ++} ++ ++core_initcall(fttmr010_init_cpufreq); ++#endif ++ ++static int clksrc_init; ++static void __init fttmr010_init(void) ++{ ++ request_resource(&ioport_resource, &timer_resource); ++ ++ printk ++ ("FTTMR010 timer 1 installed on IRQ %d, with clock %d at %d HZ.\r\n", ++ TIMER_FTTMR010_IRQ0, APB_CLK_IN, HZ); ++ ++ REG32_TMR(TIMER_TMCR) &= ++ ~(TM1ENABLE | TM1CLOCK | TM1OFENABLE | TM1UPDOWN); ++ REG32_TMR(TIMER_INTRMASK) |= TM1MATCH1 | TM1MATCH2; ++ REG32_TMR(TIMER_TMCR) |= TM1OFENABLE; ++ ++ fttmr010_clocksource_init(); ++ fttmr010_clockevent_init(); ++ clksrc_init = 1; ++} ++ ++struct sys_timer platform_timer = { ++ .init = fttmr010_init, ++ .resume = fttmr010_resume, ++}; ++ ++unsigned long long sched_clock(void) ++{ ++ if (clksrc_init) ++ return ++ clocksource_cyc2ns(clocksource_read_cycles ++ (&clksrc_fttmr010), clksrc_fttmr010.mult, ++ clksrc_fttmr010.shift); ++ else ++ return (unsigned long long)(jiffies - INITIAL_JIFFIES) ++ * (NSEC_PER_SEC / HZ); ++} +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/vep/devices.c linux-3.4.110/arch/nds32/platforms/vep/devices.c +--- linux-3.4.110.orig/arch/nds32/platforms/vep/devices.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/vep/devices.c 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,83 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++const struct map_desc platform_io_desc[] __initdata = { ++ {UART0_VA_BASE, UART0_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {UART1_VA_BASE, UART1_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {INTC_FTINTC010_0_VA_BASE, INTC_FTINTC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {TIMER_FTTMR010_0_VA_BASE, TIMER_FTTMR010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {SSP_FTSSP010_0_VA_BASE, SSP_FTSSP010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {PMU_FTPMU010_0_VA_BASE, PMU_FTPMU010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {MAC_FTMAC100_0_VA_BASE, MAC_FTMAC100_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {SDC_FTSDC010_0_VA_BASE, SDC_FTSDC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {RTC_FTRTC010_0_VA_BASE, RTC_FTRTC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {WDT_FTWDT010_0_VA_BASE, WDT_FTWDT010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {GPIO_FTGPIO010_0_VA_BASE, GPIO_FTGPIO010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {CFC_FTCFC010_0_VA_BASE, CFC_FTCFC010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {LCD_FTLCDC100_0_VA_BASE, LCD_FTLCDC100_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {I2C_FTI2C010_0_VA_BASE, I2C_FTI2C010_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {DMAC_FTDMAC020_0_VA_BASE, DMAC_FTDMAC020_0_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {APBBRG_FTAPBBRG020S_0_VA_BASE, APBBRG_FTAPBBRG020S_0_PA_BASE, ++ PAGE_SIZE, MT_DEVICE_NCB}, ++ {LED_VA_BASE, LED_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB}, ++ {SDMC_FTSDMC021_VA_BASE, SDMC_FTSDMC021_PA_BASE, PAGE_SIZE, ++ MT_DEVICE_NCB}, ++ {L2CC_VA_BASE, L2CC_PA_BASE, PAGE_SIZE, MT_DEVICE_NCB} ++}; ++ ++static void __init platform_map_io(void) ++{ ++ iotable_init((struct map_desc *)platform_io_desc, ++ ARRAY_SIZE(platform_io_desc)); ++} ++ ++static struct uart_port uart0 = { ++ .membase = (void __iomem *)UART0_VA_BASE, ++ .irq = UART0_IRQ, ++ .uartclk = CONFIG_UART_CLK, ++ .regshift = 2, ++ .iotype = UPIO_MEM, ++ .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, ++ .line = 0, ++ .mapbase = UART0_PA_BASE, ++}; ++ ++static struct uart_port uart1 = { ++ .membase = (void __iomem *)UART1_VA_BASE, ++ .irq = UART1_IRQ, ++ .uartclk = CONFIG_UART_CLK, ++ .regshift = 2, ++ .iotype = UPIO_MEM, ++ .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, ++ .line = 1, ++ .mapbase = UART1_PA_BASE, ++}; ++ ++static void __init soc_init(void) ++{ ++ early_serial_setup(&uart0); ++ early_serial_setup(&uart1); ++} ++ ++MACHINE_START(FARADAY, PLATFORM_NAME) ++ .param_offset = BOOT_PARAMETER_PA_BASE,.map_io = platform_map_io,.init_irq = platform_init_irq,.timer = &platform_timer, /* defined in timer.c */ ++ .init_machine = soc_init, MACHINE_END +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/vep/Kconfig linux-3.4.110/arch/nds32/platforms/vep/Kconfig +--- linux-3.4.110.orig/arch/nds32/platforms/vep/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/vep/Kconfig 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,7 @@ ++menu "VEP Platform Options" ++ ++config CACHE_L2 ++ bool "Support L2 cache" ++ default n ++ ++endmenu +diff -Nur linux-3.4.110.orig/arch/nds32/platforms/vep/Makefile linux-3.4.110/arch/nds32/platforms/vep/Makefile +--- linux-3.4.110.orig/arch/nds32/platforms/vep/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/platforms/vep/Makefile 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1 @@ ++obj-y = devices.o +diff -Nur linux-3.4.110.orig/build_linux.sh linux-3.4.110/build_linux.sh +--- linux-3.4.110.orig/build_linux.sh 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/build_linux.sh 2016-04-07 10:20:51.018083964 +0200 +@@ -0,0 +1,202 @@ ++#!/bin/sh ++ ++export ARCH=nds32 ++ ++print_help() ++{ ++ echo "Usage: [platform_defconfig] [--bootm] [--ramdisk=]" ++ echo "Stop Building." ++ exit ++} ++ ++BUILDBOOTM=0 ++BUILDBOOTPIMAGE=0 ++BUILDHEADERS=0 ++ ++case "$1" in ++ qemu) ++ TARGET=qemu_defconfig ++ IMAGE=qemu ++ export CROSS_COMPILE=nds32le-linux- ++ echo "Building $IMAGE kernel." ++ ;; ++ vep-be) ++ TARGET=vep-be_defconfig ++ IMAGE=vep-be ++ export CROSS_COMPILE=nds32be-linux- ++ echo "Building $IMAGE kernel." ++ ;; ++ vep-le) ++ TARGET=vep-le_defconfig ++ IMAGE=vep-le ++ export CROSS_COMPILE=nds32le-linux- ++ echo "Building $IMAGE kernel." ++ ;; ++ xc5) ++ TARGET=xc5_defconfig ++ IMAGE=xc5 ++ export CROSS_COMPILE=nds32le-linux- ++ echo "Building $IMAGE kernel." ++ ;; ++ xc5_8k) ++ TARGET=xc5_8k_defconfig ++ IMAGE=xc5_8k ++ export CROSS_COMPILE=nds32le-linux- ++ echo "Building $IMAGE kernel." ++ ;; ++ ag101a0) ++ TARGET=ag101a0_defconfig ++ IMAGE=ag101a0 ++ export CROSS_COMPILE=nds32le-linux- ++ echo "Building $IMAGE kernel." ++ ;; ++ ag101b0) ++ TARGET=ag101b0_defconfig ++ IMAGE=ag101b0 ++ export CROSS_COMPILE=nds32le-linux- ++ echo "Building $IMAGE kernel." ++ ;; ++ ag102) ++ TARGET=ag102_defconfig ++ IMAGE=ag102 ++ export CROSS_COMPILE=nds32le-linux- ++ echo "Building $IMAGE kernel." ++ ;; ++ u200) ++ TARGET=u200_defconfig ++ IMAGE=u200 ++ export CROSS_COMPILE=nds32le-linux- ++ echo "Building $IMAGE kernel." ++ ;; ++ xc5-qa) ++ TARGET=xc5-qa_defconfig ++ IMAGE=xc5-qa ++ export CROSS_COMPILE=nds32le-linux- ++ echo "Building $IMAGE kernel." ++ ;; ++ ag101-qa) ++ TARGET=ag101-qa_defconfig ++ IMAGE=ag101-qa ++ export CROSS_COMPILE=nds32le-linux- ++ echo "Building $IMAGE kernel." ++ ;; ++ *) ++ if [ $# = 1 ]; then ++ echo "No defconfig is given." ++ print_help ++ fi ++ if [ ! -e $2 ]; then ++ echo "Given defconfig is not exist." ++ print_help ++ elif [ ! -f $2 ]; then ++ echo "Given defconfig is not a file." ++ print_help ++ elif [ ! -s $2 ]; then ++ echo "Given defconfig is size 0." ++ print_help ++ elif [ ! -r $2 ]; then ++ echo "Given defconfig has no read attribute." ++ print_help ++ fi ++ TARGET=none ++ IMAGE=$1 ++ export CROSS_COMPILE=nds32le-linux- ++ echo "Building $IMAGE kernel." ++esac ++ ++ZIMAGE="$IMAGE"_zImage ++VMLINUZ="$IMAGE"_vmlinuz ++VMLINUX="$IMAGE"_vmlinux ++BOOTPIMAGE="$IMAGE"_bootpImage ++BOOTP="$IMAGE"_bootp ++BOOTM="$IMAGE"_bootm ++ ++for ARG in $@; do ++ if [ $ARG = $1 ]; then ++ continue ++ fi ++ ++ if [ $ARG = $2 ]; then ++ if [ $TARGET = "none" ]; then ++ continue ++ fi ++ fi ++ ++ case "$ARG" in ++ ++ --bootm) ++ BUILDBOOTM=1 ++ ;; ++ ++ --ramdisk=*) ++ BUILDBOOTPIMAGE=1 ++ RAMDISK=${ARG#*=} ++ if [ ! -e $RAMDISK ]; then ++ echo "Given ramdisk is not exist." ++ print_help ++ elif [ ! -f $RAMDISK ]; then ++ echo "Given ramdisk is not a file." ++ print_help ++ elif [ ! -s $RAMDISK ]; then ++ echo "Given ramdisk is size 0." ++ print_help ++ elif [ ! -r $RAMDISK ]; then ++ echo "Given ramdisk has no read attribute." ++ print_help ++ fi ++ ;; ++# --headers) ++# BUILDHEADERS=1 ++# git apply ../linux-2.6-patch/headers.patch ++# echo "Exporting kernel headers." ++# ;; ++ *) ++ print_help ++ esac ++done ++ ++if [ "$OSTYPE" = "cygwin" ]; then ++ HOST_LOADLIBES=-lintl\ -lcurses ++fi ++export HOST_LOADLIBES ++ ++which ${CROSS_COMPILE}gcc &> /dev/null || export CROSS_COMPILE=nds32-elf- ++ ++make mrproper | tee $IMAGE.log ++if [ $TARGET != "none" ]; then ++ make $TARGET| tee -a $IMAGE.log ++else ++ cp $2 .config ++fi ++ ++if [ $BUILDHEADERS = 1 ]; then ++ make dep | tee -a $IMAGE.log ++ make headers_install | tee -a $IMAGE.log ++else ++ make | tee -a $IMAGE.log ++ ++ cp arch/nds32/boot/zImage ./$ZIMAGE | tee -a $IMAGE.log ++ cp arch/nds32/boot/compressed/vmlinux ./$VMLINUZ | tee -a $IMAGE.log ++ cp ./vmlinux ./$VMLINUX | tee -a $IMAGE.log ++ ++ if [ $BUILDBOOTPIMAGE = 1 ]; then ++ make bootpImage INITRD=$RAMDISK | tee -a $IMAGE.log ++ cp arch/nds32/boot/bootpImage ./$BOOTPIMAGE | tee -a $IMAGE.log ++ cp arch/nds32/boot/bootp/bootp ./$BOOTP | tee -a $IMAGE.log ++ fi ++ ++ if [ $BUILDBOOTM = 1 ]; then ++ if [ -e "../u-boot/tools/mkimage" ]; then ++ ../u-boot/tools/mkimage \ ++ -A nds32 \ ++ -O linux \ ++ -T kernel \ ++ -C none \ ++ -a 0x500000 \ ++ -e 0x500040 \ ++ -d ./arch/nds32/boot/zImage $BOOTM | tee -a $IMAGE.log ++ else ++ echo "Error: ../u-boot/tools/mkimage not found" | tee -a $IMAGE.log ++ fi ++ fi ++fi +diff -Nur linux-3.4.110.orig/drivers/block/ftcfc010.c linux-3.4.110/drivers/block/ftcfc010.c +--- linux-3.4.110.orig/drivers/block/ftcfc010.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/block/ftcfc010.c 2016-04-07 10:20:51.022084119 +0200 +@@ -0,0 +1,1299 @@ ++/* drivers/block/CPESD/ftsdc010.c ++ ******************************************************************************* ++ * Faraday FTSDC010 Device Driver ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * ++ * All Rights Reserved ++ * ++ * Porting to Linux 2.6 on 20050815 ++ * Author: Chris Lee, I-Jui Sung, Peter Liao (support APB DMA) ++ * Version: 0.2 ++ * History: ++ * 0.1 new creation ++ * 0.2 Porting to meet the style of linux dma ++ * 0.3 modify dma usage to virtual irq of dma interrupt ++ * 0.4 (20050701) Improve r/w performance ++ * 0.5 Porting to Linux 2.6 and replace busy_loop checking with timer's timeout ++ * Todo: ++ ******************************************************************************* ++ */ ++ ++#define DEBUG_OFF 0 ++ ++#define DEBUG( enable, tagged, ...) \ ++do{ \ ++ if( enable){ \ ++ if( tagged) \ ++ printk( "[ %30s() ] ", __func__); \ ++ printk( __VA_ARGS__); \ ++ } \ ++} while( 0) ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* HDIO_GETGEO */ ++#include ++#include ++#include /* invalidate_bdev */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define IPMODULE CFC ++#define IPNAME FTCFC010 ++ ++#include "ftcfc010.h" ++ ++static int hardsect_size = 512; ++module_param( hardsect_size, int, 0); ++ ++static int cf_major = 0; /* must be declared before including blk.h */ ++#define DEVICE_NAME "Faraday CFC" /* name for messaging */ ++ ++#define FTCFC_VA_BASE IP_VA_BASE( 0) ++#define FTCFC_PA_BASE IP_PA_BASE( 0) ++#define FTCFC_IRQ CFC_FTCFC010_IRQ1 ++ ++#undef CF_DEBUG ++#define CF_DEBUG 0 ++struct block_device_operations cf_fops; ++ ++typedef struct cf_dev { ++ ++ int size; /* device size in sectors */ ++ int usage; /* # of users currently */ ++ int media_change; /* Flag: media changed? */ ++ struct gendisk *gd; /* The gendisk structure */ ++ spinlock_t lock; /* For mutual exclusion */ ++ struct request_queue *queue; /* The device request queue */ ++ int card_state; ++ u32 lba_sec_offset; ++ dmad_chreq ch_req; ++ ++} cf_dev_t; ++ ++static cf_dev_t *cf_devices; ++static cf_card_t cf_card_info; ++ ++static dma_addr_t dma_buf; ++struct completion cf_dma_cmpl; ++ ++static uint first_run; ++static uint cf_err_code; ++ ++static int g_cf_sectors; ++ ++static void SetTransferSize( u32 Addr, u32 Type, u32 OP_Type, u32 Inc_Addr, u32 Transize) ++{ ++ u32 ctrl_reg, buf_ctrl_reg; ++ DEBUG( CF_DEBUG, 1, "Enter\n"); ++ ++ /* set read/write command */ ++ while( cfc->HostStatus & BUF_ACTIVE_BIT) ++ ; ++ ++ /* Set Transfer size mode to default */ ++ cfc->TransSzMode2En = 0; ++ ++ /* set 8/16 bits mode */ ++ ctrl_reg = cfc->ControlReg; ++ ++ if( Transize == SIZE_1_BYTE) ++ ctrl_reg = ( ctrl_reg & ~MODE_BIT) | BYTE_MODE; ++ else ++ ctrl_reg = ( ctrl_reg & ~MODE_BIT) | WORD_MODE; ++ ++ DEBUG( CF_DEBUG, 1, "ctrl 1: 0x%08lx\n", ( unsigned long)ctrl_reg); ++ cfc->ControlReg = ctrl_reg; ++ ++ /* Write command to buffer */ ++ buf_ctrl_reg = cfc->BuffCtrlReg; ++ buf_ctrl_reg = ( buf_ctrl_reg & ~ADR_BIT & ~TYPE_BIT & ~RW_BIT & ~INCADR_BIT & ~TRANS_SIZE_CONTROL_BIT) ++ | Addr | Type | OP_Type | Inc_Addr | ( Transize << TRANS_SIZE_LOC); ++ ++ cfc->BuffCtrlReg = buf_ctrl_reg; ++ DEBUG( CF_DEBUG, 1, "buf_ctrl 1: 0x%08lx\n", ( unsigned long)buf_ctrl_reg); ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++} ++ ++static void SetTransferSizeEx( u32 Addr, u32 Type, u32 OP_Type, u32 Inc_Addr, u32 Transize) ++{ ++ u32 ctrl_reg, buf_ctrl_reg; ++ ++ DEBUG( CF_DEBUG, 1, "Enter, SetTransferSizeEx: %d\n", Transize); ++ ++ /* set read/write command */ ++ while( cfc->HostStatus & BUF_ACTIVE_BIT) ++ ; ++ ++ /* Set Transfer size mode to default */ ++ cfc->TransSzMode2En = 1; ++ ++ /* set 8/16 bits mode */ ++ ctrl_reg = cfc->ControlReg; ++ ++ if( Transize == SIZE_1_BYTE) ++ ctrl_reg = ( ctrl_reg & ~MODE_BIT) | BYTE_MODE; ++ else ++ ctrl_reg = ( ctrl_reg & ~MODE_BIT) | WORD_MODE; ++ ++ cfc->ControlReg = ctrl_reg; ++ ++ /* Set Transfer size mode2 reg in FTCFC */ ++ cfc->TransSzMode2Cnt = Transize - 1; ++ ++ /* Write command to buffer */ ++ buf_ctrl_reg = cfc->BuffCtrlReg; ++ buf_ctrl_reg = ( buf_ctrl_reg & ~ADR_BIT & ~TYPE_BIT & ~RW_BIT & ~INCADR_BIT & ~TRANS_SIZE_CONTROL_BIT) ++ | Addr | Type | OP_Type | Inc_Addr; ++ ++ cfc->BuffCtrlReg = buf_ctrl_reg; ++ ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++} ++ ++static void WriteCFCardByte( u32 Addr, u8 Value, u32 Type) ++{ ++ while( !( cfc->HostStatus & RDY_nIREQ_BIT)) ++ ; ++ ++ SetTransferSize( Addr, Type, WRITE_OP, NOINCADR, SIZE_1_BYTE); ++ ++ cfc->BufferData = Value; ++ ++ while( !( cfc->HostStatus & INTA_BIT)) ++ ; ++ ++ /* clear command complete status */ ++ cfc->HostStatus = INTA_BIT; ++} ++ ++static u8 ReadCFCardByte( u32 Addr, u32 Type) ++{ ++ u8 ret_data; ++ ++ DEBUG( CF_DEBUG, 1, "Enter\n"); ++ ++ /* wait until ready */ ++ while( !( cfc->HostStatus & RDY_nIREQ_BIT)) ++ ; ++ ++ SetTransferSize( Addr, Type, READ_OP, NOINCADR, SIZE_1_BYTE); ++ ++ while( !( cfc->HostStatus & INTA_BIT)) ++ ; ++ ++ ret_data = cfc->BufferData & 0xff; ++ DEBUG( CF_DEBUG, 1, "data: 0x%02x\n", ret_data); ++ ++ while( !( cfc->HostStatus & INTA_BIT)) ++ ; ++ ++ /* clear command complete status */ ++ cfc->HostStatus = INTA_BIT; ++ ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++ return ret_data; ++} ++ ++/* set Head-cylinder-sector on CF */ ++static u32 Translate_Config_HCS( u32 LBA, u32 Sec_count) ++{ ++ u8 DriveHead = ( u8)( 0xE0 | ( LBA >> 24)); /* 0xE0 set in LBA mode (bit 7&5 must set to 1) */ ++ u8 CylHigh = ( u8)( ( LBA >> 16) & 0xFF); ++ u8 CylLow = ( u8)( ( LBA >> 8) & 0xFF); ++ u8 SecNum = ( u8)( LBA & 0xFF); ++ ++ DEBUG( CF_DEBUG, 1, "LBA:%d, %d %d %d %d, count: %d\n", ++ LBA, DriveHead, CylHigh, CylLow, SecNum, Sec_count); ++ ++ /* Set CF-ATA reg for start sector */ ++ WriteCFCardByte( BLKMEM_DRIVE_REG, DriveHead, COMMON_MEM); ++ WriteCFCardByte( BLKMEM_CYLINDER_HIGH_REG, CylHigh, COMMON_MEM); ++ WriteCFCardByte( BLKMEM_CYLINDER_LOW_REG, CylLow, COMMON_MEM); ++ WriteCFCardByte( BLKMEM_SECTOR_NUMBER_REG, SecNum, COMMON_MEM); ++ WriteCFCardByte( BLKMEM_SECTOR_COUNT_REG, Sec_count, COMMON_MEM); ++ ++ return 1; ++} ++ ++/* ++ * SetCFCardConfiguration(): ATTRIBUTE MEMORY ++ * Set CF storgae card configuration option register ++ * Conf1 Conf0 Disk Card Mode ++ * --------------------------------------------------------------- ++ * 0 0 Memory Mapped ++ * 0 1 I/O Mapped, any 16 byte system decoded ++ * 1 0 I/O Mapped, 1F0h - 1F7h / 3F6h - 3F7h ++ * 1 1 I/O Mapped, 170h - 177h / 376h - 377h ++ */ ++static void SetCFCardMode( u8 Mode) ++{ ++ u8 Reg; ++ ++ DEBUG( CF_DEBUG, 1, "Enter\n"); ++ ++ Reg = ReadCFCardByte( CONFIG_OPTION_REG, ATTRIBUTE_MEM); /* CONFIG_OPTION_REG = 0x200 */ ++ Reg &= ( LEVLREQ_BIT | SRESET_BIT); /* clear bit0-5 */ ++ ++ WriteCFCardByte( CONFIG_OPTION_REG, Reg | Mode, ATTRIBUTE_MEM); ++ ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++} ++ ++/* return 0: fail, 1: success */ ++static u32 CF_SendCommand( u16 Cmd) ++{ ++ u16 Reg; ++ DEBUG( CF_DEBUG, 1, "Enter\n"); ++ ++ do{ ++ WriteCFCardByte( BLKMEM_COMMAND_REG, Cmd, COMMON_MEM); ++ ++ /* ++ * Check status register of Task file register is valid for access ++ * No other bits in status register are valid when BUSY bit is ++ * set to a 1 ++ */ ++ while( ( ReadCFCardByte( BLKMEM_STATUS_REG, COMMON_MEM) & BUSY_BIT)) ++ ; ++ ++ Reg = ReadCFCardByte( BLKMEM_STATUS_REG, COMMON_MEM); ++ ++ if( Reg & ERR_BIT){ ++ ++ DEBUG( CF_DEBUG, 1, "Exit ( fail)\n"); ++ return 0; ++ } ++ ++ } while( !( Reg & RDY_BIT)); ++ ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++ return 1; ++} ++ ++static void CardReset( void) ++{ ++ u32 ctrl_reg = cfc->ControlReg; ++ u8 data; ++ ++ DEBUG( CF_DEBUG, 1, "Enter\n"); ++ cfc->ControlReg = ctrl_reg | RESET_BIT | SIGNAL_ON | PWR_ON; /* set reset bit, float control, power on */ ++ ++ /* Software must manually clear this bit */ ++ mdelay( 100); ++ ++ /* clear reset bit */ ++ cfc->ControlReg = ( ctrl_reg &~ RESET_BIT) | SIGNAL_ON | PWR_ON | DATA_CMP_INT_MASK | IO_INT_MASK; ++ ++ do { ++ data = ReadCFCardByte( BLKMEM_STATUS_REG, COMMON_MEM); ++ ++ } while( data & BUSY_BIT); /* please be careful that is locked here */ ++ ++ data = ReadCFCardByte( BLKMEM_STATUS_REG, COMMON_MEM); ++ ++ /* Check if Drive Ready & Drive Seek Complete */ ++ if( data == ( RDY_BIT | DSC_BIT)) ++ DEBUG( CF_DEBUG, 1, "Reset CF OK, data: 0x%02x\n", ( unsigned char)data); ++ else ++ DEBUG( CF_DEBUG, 1, "Reset CF fail, data: 0x%02x\n", ( unsigned char)data); ++ ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++} ++ ++static u32 CFCardInit( void) ++{ ++ u8 buff[CF_SECTOR_SIZE]; ++ u32 i; ++ u32 BSA, BSM, BSIO, BSMOW, BSIORW, apb_ns; ++ ++ DEBUG( CF_DEBUG, 1, "Enter\n"); ++ ++ if( !( cfc->HostStatus & CARD_DETECT_BIT)){ ++ ++ DEBUG( CF_DEBUG, 1, "No CF Card In Socket\n"); ++ return -1; ++ } ++ ++ CardReset(); ++ ++ /* set timer */ ++ ++ apb_ns = 1000000000 / ( AHB_CLK_IN / 2 ); /* the time of APB clock */ ++ BSA = ( 50 + ( apb_ns - ( 50 % apb_ns))) / apb_ns; /* Based on the item tc( R) of BSA */ ++ BSMOW = 3; ++ BSIORW = 3; ++ BSM = ( 30 + ( apb_ns - ( 30 % apb_ns))) / apb_ns; /* Base on the item tw( WE) of BSM */ ++ BSIO = ( 35 + ( apb_ns - ( 35 % apb_ns))) / apb_ns; /* Base on the item tsuA( IORD,IOWR) of BSIO */ ++ ++ cfc->TimeCfgReg = ( ( BSMOW & 0x03) << 12) ++ | ( ( BSIORW & 0x03) << 14) ++ | ( ( BSA & 0x0f) << 0) ++ | ( ( BSM & 0x0f) << 4) ++ | ( ( BSIO & 0x0f) << 8); ++ ++ SetCFCardMode( CF_MEM_MAP_MODE); /* set memory mode read write access */ ++ ++ cfc->MultiSector = 0x1; /* enable multi sector read/write */ ++ ++ /* select drive */ ++ WriteCFCardByte( BLKMEM_DRIVE_REG, 0, COMMON_MEM); ++ ++ /* identify drive device */ ++ if( !CF_SendCommand( ATA_IDENTIFY_DRIVE)){ ++ ++ DEBUG( CF_DEBUG, 1, "Send Identify Drive command fail\n"); ++ return -1; ++ } ++ ++ SetTransferSize( BLKMEM_DATA_REG, COMMON_MEM, READ_OP, NOINCADR, SIZE_512_BYTE); ++ ++ for( i = 0; i < CF_SECTOR_SIZE / 4; i++){ ++ ++ while( !( cfc->HostStatus & BUF_DATA_RDY_BIT)) ++ ; ++ ++ *( ( u32*)&buff[ i*4]) = cfc->BufferData; ++ } ++ ++ /* clear command complete status */ ++ cfc->HostStatus = INTA_BIT; ++ ++ g_cf_sectors = ( ( u32)buff[ 15] << 24) | ( ( u32)buff[ 14] << 16) | ( ( u32)buff[ 17] << 8) | ( ( u32)buff[ 16]); ++ DEBUG( CF_DEBUG, 1, "Card identify - capicity: %d sectors, size: %d kbytes \n", g_cf_sectors, g_cf_sectors >> 1); ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++ ++ return 0; ++} ++ ++static int cfc_read_block( cf_card_t *info, uint size, uint *buf) ++{ ++ /* ++ * Please refer SanDisk SD Manual v1.9 Section 5.1.9.2 ( page 5-76) to set the timeout setting ++ */ ++ unsigned long timeout = jiffies + CFC_TIMEOUT_BASE *( ( size + 2048 + 511) >> 9); ++ uint count; ++ u8 data; ++ dmad_chreq *ch_req = (dmad_chreq *)info->private; ++ dmad_drb *drb = 0; ++ u32 drb_size = 0; ++ dma_addr_t addr_iter; ++ ++ if( info->DMAEnable){ ++ ++ DEBUG( CF_DEBUG, 1, "DMA Read - size: %d, buf: 0x%08lx, dma_buf: 0x%08lx\n", ++ size, ( unsigned long)buf, ( unsigned long)dma_buf); ++ ++ init_completion(&cf_dma_cmpl); ++ ++ if (dma_buf) ++ consistent_sync(__va(dma_buf), size, DMA_FROM_DEVICE); ++ else ++ consistent_sync(buf, size, DMA_FROM_DEVICE); ++ ++ //prepare parameter for add dma entry ++ dmad_config_channel_dir(ch_req, DMAD_DIR_A0_TO_A1); ++ ++ drb_size = dmad_max_size_per_drb(ch_req); ++ ++ if (dma_buf) ++ addr_iter = dma_buf; // given dest phy addr ++ else ++ addr_iter = __pa(buf); ++ ++ cfc->ControlReg |= ENDMA_BIT; ++ ++ while (size > 0) { ++ ++ if (unlikely(0 != dmad_alloc_drb(ch_req, &drb) || (drb == 0))) { ++ printk(KERN_ERR "%s() Failed to allocate dma request block!\n", __func__); ++ return FALSE; ++ } ++ ++ drb->addr0 = FTCFC_PA_BASE + 0x10; ++ drb->addr1 = addr_iter; ++ ++ if (size <= drb_size) { ++ drb->req_cycle = dmad_bytes_to_cycles(ch_req, size); ++ drb->sync = &cf_dma_cmpl; ++ size = 0; ++ } else { ++ drb->req_cycle = dmad_bytes_to_cycles(ch_req, drb_size); ++ drb->sync = 0; ++ size -= drb_size; ++ addr_iter += drb_size; ++ } ++ //printk(KERN_INFO "%s() size_remain 0x%08x.\n", __func__, size); ++ ++ if (unlikely(0 != dmad_submit_request(ch_req, drb, 1))) { ++ printk(KERN_ERR "%s() Failed to submit dma request block!\n", __func__); ++ return FALSE; ++ } ++ } ++ ++ if (wait_for_completion_timeout(&cf_dma_cmpl, timeout - jiffies) == 0) ++ printk("%s: read timeout\n", __func__); ++ ++ DEBUG( CF_DEBUG, 1, "ControlReg: 0x%08x, HostStatus: 0x%08x, BufCtrl: 0x%08x\n", ++ cfc->ControlReg, cfc->HostStatus, cfc->BuffCtrlReg); ++ ++ while( !( cfc->HostStatus & INTA_BIT)) ++ ; ++ ++ cfc->HostStatus = INTA_BIT; ++ ++ /* Stop DMA */ ++ cfc->ControlReg &= ~ENDMA_BIT; ++ cfc->ControlReg &= ~MODE_BIT; ++ ++ do { ++ data = ReadCFCardByte( BLKMEM_STATUS_REG, COMMON_MEM); ++ ++ } while( data & BUSY_BIT); ++ } ++ else { ++ while( size > 0){ ++ ++ /* read data from FIFO */ ++ if( size >= ( CFC_READ_FIFO_LEN << 2)) ++ count = CFC_READ_FIFO_LEN; ++ else ++ count = size >> 2; ++ ++ /* read data from FIFO */ ++ DEBUG( CF_DEBUG, 0, "\n"); ++ size -= ( count << 2); ++ } ++ } ++ ++ data = ReadCFCardByte( BLKMEM_STATUS_REG, COMMON_MEM); ++ ++ while( 1){ ++ ++ if( data & ERR_BIT){ ++ ++ DEBUG( CF_DEBUG, 1, "ERROR: ( CFReadSector) CF Read sector error.\n"); ++ return FALSE; ++ } ++ else if( data & DWF_BIT){ ++ ++ DEBUG( CF_DEBUG, 1, "ERROR: ( CFReadSector) CF write fault error.\n"); ++ return FALSE; ++ } ++ else if( data & ( RDY_BIT | DSC_BIT)){ ++ ++ break; ++ } ++ data = ReadCFCardByte( BLKMEM_STATUS_REG, COMMON_MEM); ++ } ++ ++ return TRUE; ++} ++ ++static int cfc_write_block( cf_card_t *info, uint size, uint *buf) ++{ ++ unsigned long timeout = jiffies + CFC_TIMEOUT_BASE * 3 *( ( size + 511) >> 9); ++ uint count; ++ u8 data; ++ dmad_chreq *ch_req = (dmad_chreq *)info->private; ++ dmad_drb *drb = 0; ++ u32 drb_size = 0; ++ dma_addr_t addr_iter; ++ ++ if( info->DMAEnable){ ++ ++ DEBUG( CF_DEBUG, 1, "size: %d, buf: %p) - DMA Write\n", size, buf); ++ ++ init_completion(&cf_dma_cmpl); ++ ++ if (dma_buf) ++ consistent_sync(__va(dma_buf), size, DMA_TO_DEVICE); ++ else ++ consistent_sync(buf, size, DMA_TO_DEVICE); ++ ++ //prepare parameter for add dma entry ++ dmad_config_channel_dir(ch_req, DMAD_DIR_A1_TO_A0); ++ ++ drb_size = dmad_max_size_per_drb(ch_req); ++ ++ if (dma_buf) ++ addr_iter = dma_buf; // given dest phy addr ++ else ++ addr_iter = __pa(buf); ++ ++ cfc->ControlReg |= ENDMA_BIT; ++ ++ while (size > 0) { ++ ++ if (unlikely(0 != dmad_alloc_drb(ch_req, &drb) || (drb == 0))) { ++ printk(KERN_ERR "%s() Failed to allocate dma request block!\n", __func__); ++ return FALSE; ++ } ++ ++ drb->addr0 = FTCFC_PA_BASE + 0x10; ++ drb->addr1 = addr_iter; ++ ++ if (size <= drb_size) { ++ drb->req_cycle = dmad_bytes_to_cycles(ch_req, size); ++ drb->sync = &cf_dma_cmpl; ++ size = 0; ++ } else { ++ drb->req_cycle = dmad_bytes_to_cycles(ch_req, drb_size); ++ drb->sync = 0; ++ size -= drb_size; ++ addr_iter += drb_size; ++ } ++ //printk(KERN_INFO "%s() size_remain 0x%08x.\n", __func__, size); ++ ++ if (unlikely(0 != dmad_submit_request(ch_req, drb, 1))) { ++ printk(KERN_ERR "%s() Failed to submit dma request block!\n", __func__); ++ return FALSE; ++ } ++ } ++ ++ if (wait_for_completion_timeout(&cf_dma_cmpl, timeout - jiffies) == 0) ++ printk("write timeout\n"); ++ ++ while( !( cfc->HostStatus & INTA_BIT)) ++ ; ++ ++ cfc->HostStatus = INTA_BIT; ++ /* Stop DMA */ ++ cfc->ControlReg &= ~ENDMA_BIT; ++ cfc->ControlReg &= ~MODE_BIT; ++ ++ do{ ++ data = ReadCFCardByte( BLKMEM_STATUS_REG, COMMON_MEM); ++ ++ } while( data & BUSY_BIT); ++ } ++ else { ++ while( size > 0){ ++ ++ /* write data from FIFO */ ++ if( size >= ( CFC_WRITE_FIFO_LEN << 2)) ++ count = CFC_WRITE_FIFO_LEN; ++ else ++ count = ( size >> 2); ++ ++ size -= ( count << 2); ++ } ++ } ++ ++ data = ReadCFCardByte( BLKMEM_STATUS_REG, COMMON_MEM); ++ ++ while( 1){ ++ ++ if( data & ERR_BIT){ ++ ++ DEBUG( CF_DEBUG, 1, "ERROR: ( CFReadSector) CF Read sector error.\n"); ++ return FALSE; ++ } ++ else if( data & DWF_BIT){ ++ ++ DEBUG( CF_DEBUG, 1, "ERROR: ( CFReadSector) CF write fault error.\n"); ++ return FALSE; ++ } ++ else if( data & ( RDY_BIT | DSC_BIT)) ++ break; ++ ++ data = ReadCFCardByte( BLKMEM_STATUS_REG, COMMON_MEM); ++ } ++ return TRUE; ++} ++ ++static int cf_card_insert( cf_card_t *info) ++{ ++ DEBUG( CF_DEBUG, 1, "Enter\n"); ++ CardReset(); ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++ ++ return TRUE; ++} ++ ++/* Free IRQ and DMA resources */ ++static void cf_free( cf_dev_t *dev) ++{ ++ release_region( FTCFC_VA_BASE, 0x48); /* return ioport */ ++ free_irq( FTCFC_IRQ, dev); /* return Hotswapping irq */ ++ ++ if (cf_card_info.DMAEnable) { ++#if (CF_DEBUG) ++ if (dev->ch_req.controller == DMAD_DMAC_APB_CORE) ++ printk("%s: free APB dma channel (%d)\n", __func__, dev->ch_req.channel); ++ else ++ printk("%s: free AHB dma channel (%d)\n", __func__, dev->ch_req.channel); ++#endif ++ dmad_channel_free(&dev->ch_req); ++ cf_card_info.DMAEnable = FALSE; ++ } ++} ++ ++static int cf_card_remove( cf_card_t *info) ++{ ++ cf_err_code = ERR_NO_ERROR; ++ ++ info->ActiveState = FALSE; ++ info->WriteProtect = FALSE; ++ info->RCA = 0; ++ ++ /* reset host interface controller */ ++ cfc->ControlReg |= RESET_BIT; ++ ++ mdelay( 100); ++ cfc->ControlReg &= ~RESET_BIT; /* must manually clear reset bit */ ++ return TRUE; ++} ++ ++irqreturn_t cf_hotswap_interrupt_handler( int irq, void *dev_id) ++{ ++ cf_dev_t *dev = dev_id; ++ ++ DEBUG( CF_DEBUG, 1, "irq: %d\n", irq); ++ ++ /* ++ * When the card is inserted or removed, we must delay a short time to make sure ++ * the SDC_STATUS_REG_CARD_INSERT bit of status register is stable ++ */ ++ ++ if( cfc->HostStatus & INT_CD_BIT ){ ++ ++ mdelay( 100); /* wait 0.1 sec for card stable */ ++ ++ DEBUG( CF_DEBUG, 1, "Card %s\n", cfc->HostStatus & CARD_DETECT_BIT ? "Insert" : "Remove"); ++ if( cfc->HostStatus & CARD_DETECT_BIT ){ ++ ++ dev->card_state = CF_CARD_INSERT; ++ cf_card_insert( &cf_card_info); ++ } ++ else { ++ dev->card_state = CF_CARD_REMOVE; ++ cf_card_remove( &cf_card_info); ++ } ++ ++ cfc->HostStatus = INT_CD_BIT; ++ } ++ else{ ++ DEBUG( CF_DEBUG, 1, "cfc->HostStatus & INT_CD_BIT == 0\n"); ++ } ++ ++ DEBUG( CF_DEBUG, 1, "Exit: card state = %d\n", dev->card_state); ++ return IRQ_HANDLED; ++} ++ ++int cf_read_multiple_block( cf_card_t *info, uint addr, uint count, uint size, uint timeout, unchar *buf) ++{ ++ u8 data; ++ ++ DEBUG( CF_DEBUG, 1, "read block addr: 0x%x(%d) sectors: %d\n", addr, addr, count); ++ ++ cf_err_code = ERR_NO_ERROR; ++ Translate_Config_HCS( addr, count); ++ CF_SendCommand( ATA_READ_SECTOR); ++ ++ do{ ++ data = ReadCFCardByte( BLKMEM_STATUS_REG, COMMON_MEM); ++ ++ } while( data & BUSY_BIT); ++ ++ SetTransferSizeEx( BLKMEM_DATA_REG, COMMON_MEM, READ_OP, NOINCADR, CF_SECTOR_SIZE * count); ++ ++ if( first_run == 0){ ++ ++ udelay( 100000); ++ ++ first_run = 1; ++ } ++ ++ if( !cfc_read_block( info, CF_SECTOR_SIZE * count, ( uint *)buf)) ++ return FALSE; ++ ++ if( cf_err_code != ERR_NO_ERROR){ ++ ++ DEBUG( CF_DEBUG, 1, "error = 0x%x\n", cf_err_code); ++ DEBUG( CF_DEBUG, 1, "r addr %d count %d\n", addr, count); ++ ++ return FALSE; ++ } ++ ++ return TRUE; ++} ++ ++int cf_write_multiple_block( cf_card_t *info, uint addr, uint count, uint size, uint timeout, unchar *buf) ++{ ++ u8 data; ++ ++ DEBUG( CF_DEBUG, 1, "write block addr: 0x%08lx, sectors: %x, sector: %x\n", ( unsigned long)addr, count, 512); ++ ++ cf_err_code = ERR_NO_ERROR; ++ Translate_Config_HCS( addr, count); ++ CF_SendCommand( ATA_WRITE_SECTOR); ++ do { ++ data = ReadCFCardByte( BLKMEM_STATUS_REG, COMMON_MEM); ++ } while( data & BUSY_BIT); ++ ++ SetTransferSizeEx( BLKMEM_DATA_REG, COMMON_MEM, WRITE_OP, NOINCADR, CF_SECTOR_SIZE * count); ++ ++ if( !cfc_write_block( info, CF_SECTOR_SIZE*count, ( uint *) buf)) ++ return FALSE; ++ ++ if( cf_err_code != ERR_NO_ERROR){ ++ ++ DEBUG( CF_DEBUG, 1, "error: 0x%08lx\n", ( unsigned long)cf_err_code); ++ DEBUG( CF_DEBUG, 1, "w addr: %d, count: %d\n", addr, count); ++ return FALSE; ++ } ++ ++ return TRUE; ++} ++ ++ ++/*************************************************************************** ++ * SD Card Read/Write/Erase Function ++ ***************************************************************************/ ++int cf_read_sector( cf_card_t *info, uint addr, uint count, unchar *buf) ++{ ++ DEBUG( CF_DEBUG, 1, "Enter\n"); ++ ++ if( !cf_read_multiple_block( info, addr + cf_devices->lba_sec_offset, count, ++ info->CSD.ReadBlockLength, info->ReadAccessTimoutCycle, buf)){ ++ ++ DEBUG( CF_DEBUG, 1, "read failed\n"); ++ return FALSE; ++ } ++ ++ DEBUG( CF_DEBUG, 1, "read ok\n"); ++ ++ if( cf_devices->lba_sec_offset == 0){ ++ ++ if( !cf_read_multiple_block( info, 0, 1, info->CSD.ReadBlockLength, info->ReadAccessTimoutCycle, buf)) ++ return FALSE; ++ ++ /* lba ( ??) sector offset */ ++ cf_devices->lba_sec_offset = ( *( buf + 0x1C6)) ++ | ( *( buf + 0x1C7)) << 8 ++ | ( *( buf + 0x1C8)) << 16 ++ | ( *( buf + 0x1C9)) << 24; ++ ++ /* device total sector number */ ++ g_cf_sectors = ( *( buf + 0x1CA)) ++ | ( *( buf + 0x1CB)) << 8 ++ | ( *( buf + 0x1CC)) << 16 ++ | ( *( buf + 0x1CD)) << 24; ++ ++ /* only for testing , it only to let format command ok */ ++ if( ( buf[ 0x1be] != 0x0) && ( buf[ 0x1be] != 0x80)) /* partition identify */ ++ cf_devices->lba_sec_offset = 0; /* sector 0 is PBR */ ++ else ++ cf_devices->lba_sec_offset = ( buf[ 0x1c6]) ++ | ( buf[ 0x1c7] << 8) ++ | ( buf[ 0x1c8] << 16) ++ | ( buf[ 0x1c9] << 24); ++ ++ DEBUG( CF_DEBUG, 1, "lba_sec_offet is %d\n", cf_devices->lba_sec_offset); ++ DEBUG( CF_DEBUG, 1, "the device( partition) total sector number is %d\n", g_cf_sectors); ++ } ++ ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++ return TRUE; ++} ++ ++int cf_write_sector( cf_card_t *info, uint addr, uint count, unchar *buf) ++{ ++ if( !cf_write_multiple_block( info, addr+cf_devices->lba_sec_offset, count, ++ info->CSD.ReadBlockLength, info->ReadAccessTimoutCycle, buf)){ ++ ++ DEBUG( CF_DEBUG, 1, "write failed\n"); ++ ++ return FALSE; ++ } ++ ++ DEBUG( CF_DEBUG, 1, "write ok\n"); ++ ++ return TRUE; ++} ++ ++/* ++ * Perform an actual transfer: ++ * Returns: # of sectors transferred. 0 = error ++ */ ++int cf_transfer( cf_dev_t *device, const struct request *req) ++{ ++ int status = 0; ++ int count = 0; ++ ++ struct bio *bio = req->bio; ++ struct bio_vec *bvec; ++ struct req_iterator iter; ++ ++ DEBUG( CF_DEBUG, 1, "req sector: %d, phys_seg: %d, buf: 0x%08lx\n", ++ (int)req->__sector, req->nr_phys_segments, ++ (unsigned long)bio_data(bio)); ++ ++ spin_unlock_irq( &device->lock); ++ ++ rq_for_each_segment( bvec, req, iter){ ++ ++ unsigned char *buf = page_address( bvec->bv_page) + bvec->bv_offset; ++ int sectors = bio_cur_bytes( bio) >> 9; ++ ++ DEBUG( CF_DEBUG, 1, "bvec[%2d]: sector: %d, count: %d, curr: %d, buf: 0x%08lx\n", ++ iter.i, ( int)bio->bi_sector, count, ( int)sectors, ( unsigned long)buf); ++ ++ cf_card_info.private = (void *)&device->ch_req; ++ ++ if( rq_data_dir( req) == 0) /* Read */ ++ status = cf_read_sector( &cf_card_info, bio->bi_sector, sectors, buf); ++ else ++ status = cf_write_sector( &cf_card_info, bio->bi_sector, sectors, buf); ++ ++ DEBUG( CF_DEBUG, 1, "status: %d\n", status); ++ ++ if (status <= 0) { ++ spin_lock_irq( &device->lock); ++ return count; ++ } ++ ++ count += sectors; ++ bio->bi_sector += sectors; ++ } ++ ++ spin_lock_irq( &device->lock); ++ ++ if( status <= 0) ++ return 0; ++ else ++ return count; ++} ++ ++static int cf_card_setup(cf_dev_t *dev) ++{ ++ int i; ++ ++ DEBUG( CF_DEBUG, 1, "Enter\n"); ++ first_run = 0; ++ cf_err_code = ERR_NO_ERROR; ++ ++ cf_card_info.ActiveState = FALSE; ++ cf_card_info.WriteProtect = FALSE; ++ cf_card_info.IOAddr = FTCFC_VA_BASE; ++ cf_card_info.DMAEnable = TRUE; ++ cf_card_info.DMAChannel = dev->ch_req.channel; ++ cf_card_info.SysFrequency = AHB_CLK_IN / 2; ++ cf_card_info.RCA = 0; ++ ++ DEBUG( CF_DEBUG, 1, "DMA Enable is %d, Sys frequency = %d\n", cf_card_info.DMAEnable, cf_card_info.SysFrequency); ++ ++ if( !cf_card_insert( &cf_card_info)) ++ return FALSE; ++ ++ /* Marketing MB is not 1048576 */ ++ DEBUG( CF_DEBUG, 1, "FTCFC010: CF Card Capacity = %d KBytes\n", g_cf_sectors >> 1); ++ ++ for( i = 0; i < CF_DEVS; i++) ++ cf_devices[i].size = g_cf_sectors; /* unit is block, not bytes */ ++ ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++ ++ return TRUE; ++} ++ ++int cf_ioctl( struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) ++{ ++ int size; ++ struct hd_geometry geo; ++ cf_dev_t *device = bdev->bd_disk->private_data; ++ ++ DEBUG( CF_DEBUG, 1, "ioctl 0x%x 0x%lx\n", cmd, arg); ++ ++ switch ( cmd){ ++ ++ case BLKGETSIZE: ++ /* ++ * Return the device size, expressed in sectors ++ * FIXME: distinguish between kernel sector size and media sector size ++ */ ++ size = device->size; ++ __copy_to_user ( ( long *) arg, &size, sizeof ( long)); ++ return 0; ++ ++ case HDIO_GETGEO: ++ /* ++ * get geometry: we have to fake one... trim the size to a ++ * multiple of 64 ( 32k): tell we have 16 sectors, 4 heads, ++ * whatever cylinders. Tell also that data starts at sector. 4. ++ */ ++ geo.cylinders = ( device->size / 4) / 8; /* ?? only for test */ ++ geo.heads = 4; ++ geo.sectors = 8; ++ geo.start = 0; ++ __copy_to_user ( ( void *) arg, &geo, sizeof ( geo)); ++ return 0; ++ ++ default: ++ /* For ioctls we don't understand, let the block layer handle them */ ++ return -ENOTTY; ++ } ++ ++ return -ENOTTY; /* unknown command */ ++} ++ ++static void cf_request( struct request_queue *q) ++{ ++ cf_dev_t *dev; ++ int ret = 0; ++ struct request *req; ++ static int act = 0; ++ ++ if( act) ++ return; ++ ++ act = 1; ++ ++ while( ( req = blk_fetch_request( q)) != NULL){ ++ ++ dev = req->rq_disk->private_data; ++ ++ if( !dev || dev->card_state == CF_CARD_REMOVE){ ++ ++ DEBUG( CF_DEBUG, 1, "CF: locating device error\n"); ++ __blk_end_request_cur( req, -EIO); ++ ++ act = 0; ++ return; ++ } ++ ++ ret = cf_transfer( dev, req); ++ __blk_end_request( req, 0, ret << 9); ++ } ++ ++ act = 0; ++} ++ ++static int cf_dma_ch_alloc(cf_dev_t *dev) ++{ ++ dmad_chreq *ch_req = &dev->ch_req; ++ ++ memset(ch_req, 0, sizeof(dmad_chreq)); ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++ ++ ch_req->apb_req.addr0_ctrl = APBBR_ADDRINC_FIXED; /* (in) APBBR_ADDRINC_xxx */ ++ ch_req->apb_req.addr0_reqn = APBBR_REQN_CFC; /* (in) APBBR_REQN_xxx (also used to help determine bus selection) */ ++ ch_req->apb_req.addr1_ctrl = APBBR_ADDRINC_I4X; /* (in) APBBR_ADDRINC_xxx */ ++ ch_req->apb_req.addr1_reqn = APBBR_REQN_NONE; /* (in) APBBR_REQN_xxx (also used to help determine bus selection) */ ++ ch_req->apb_req.burst_mode = 0; /* (in) Burst mode (0: no burst 1-, 1: burst 4- data cycles per dma cycle) */ ++ ch_req->apb_req.data_width = APBBR_DATAWIDTH_4; /* (in) APBBR_DATAWIDTH_4(word), APBBR_DATAWIDTH_2(half-word), APBBR_DATAWIDTH_1(byte) */ ++ ch_req->apb_req.tx_dir = DMAD_DIR_A0_TO_A1; /* (in) DMAD_DIR_A0_TO_A1, DMAD_DIR_A1_TO_A0 */ ++ ++ ch_req->controller = DMAD_DMAC_APB_CORE; /* (in) DMAD_DMAC_AHB_CORE, DMAD_DMAC_APB_CORE */ ++ ch_req->flags = DMAD_FLAGS_SLEEP_BLOCK | DMAD_FLAGS_BIDIRECTION; ++ ++ if (dmad_channel_alloc(ch_req) != 0) { ++ memset(ch_req, 0, sizeof(dmad_chreq)); ++ printk(KERN_INFO "%s: APB dma channel allocation failed\n", __func__); ++ goto _try_ahb; ++ } ++ ++#if (CF_DEBUG) ++ printk("%s: APB dma channel allocated (ch: %d)\n", __func__, ch_req->channel); ++#endif ++ ++ return 0; ++ ++_try_ahb: ++ ++#endif /* CONFIG_PLATFORM_APBDMA */ ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ ++ ch_req->ahb_req.sync = 1; /* (in) non-zero if src and dst have different clock domain */ ++ ch_req->ahb_req.priority = DMAC_CSR_CHPRI_1; /* (in) DMAC_CSR_CHPRI_0 (lowest) ~ DMAC_CSR_CHPRI_3 (highest) */ ++ ch_req->ahb_req.hw_handshake = 1; /* (in) non-zero to enable hardware handshake mode */ ++ ch_req->ahb_req.burst_size = DMAC_CSR_SIZE_4; /* (in) DMAC_CSR_SIZE_1 ~ DMAC_CSR_SIZE_256 */ ++ ch_req->ahb_req.addr0_width = DMAC_CSR_WIDTH_32; /* (in) DMAC_CSR_WIDTH_8, DMAC_CSR_WIDTH_16, or DMAC_CSR_WIDTH_32 */ ++ ch_req->ahb_req.addr0_ctrl = DMAC_CSR_AD_FIX; /* (in) DMAC_CSR_AD_INC, DMAC_CSR_AD_DEC, or DMAC_CSR_AD_FIX */ ++ ch_req->ahb_req.addr0_reqn = DMAC_REQN_CFC; /* (in) DMAC_REQN_xxx (also used to help determine channel number) */ ++ ch_req->ahb_req.addr1_width = DMAC_CSR_WIDTH_32; /* (in) DMAC_CSR_WIDTH_8, DMAC_CSR_WIDTH_16, or DMAC_CSR_WIDTH_32 */ ++ ch_req->ahb_req.addr1_ctrl = DMAC_CSR_AD_INC; /* (in) DMAC_CSR_AD_INC, DMAC_CSR_AD_DEC, or DMAC_CSR_AD_FIX */ ++ ch_req->ahb_req.addr1_reqn = DMAC_REQN_NONE; /* (in) DMAC_REQN_xxx (also used to help determine channel number) */ ++ ch_req->ahb_req.tx_dir = DMAD_DIR_A0_TO_A1; /* (in) DMAD_DIR_A0_TO_A1, DMAD_DIR_A1_TO_A0 */ ++ ++ ch_req->controller = DMAD_DMAC_AHB_CORE; /* (in) DMAD_DMAC_AHB_CORE, DMAD_DMAC_APB_CORE */ ++ ch_req->flags = DMAD_FLAGS_SLEEP_BLOCK | DMAD_FLAGS_BIDIRECTION; ++ ++ if (dmad_channel_alloc(ch_req) != 0) { ++ memset(ch_req, 0, sizeof(dmad_chreq)); ++ printk(KERN_INFO "%s: AHB dma channel allocation failed\n", __func__); ++ goto _err_exit; ++ } ++ ++#if (CF_DEBUG) ++ printk("%s: AHB dma channel allocated (ch: %d)\n", __func__, ch_req->channel); ++#endif ++ ++ return 0; ++ ++_err_exit: ++ ++#endif /* CONFIG_PLATFORM_AHBDMA */ ++ ++ return -ENODEV; ++} ++ ++/* ++ * Note no locks taken out here. In a worst case scenario, we could drop ++ * a chunk of system memory. But that should never happen, since validation ++ * happens at open or mount time, when locks are held. ++ */ ++static int cf_revalidate( struct gendisk *gd) ++{ ++ cf_dev_t *dev = gd->private_data; ++ ++ DEBUG( CF_DEBUG, 1, "Enter\n"); ++ dev->card_state = cfc->HostStatus & CARD_DETECT_BIT ? CF_CARD_INSERT : CF_CARD_REMOVE; ++ ++ DEBUG( CF_DEBUG, 1, "card state: %s\n", ++ dev->card_state == CF_CARD_INSERT ? "INSERT" : ++ dev->card_state == CF_CARD_WORK ? "WORK" : "REMOVE"); ++ ++ if( !dev->usage){ ++ ++ if( cf_card_setup(dev) != TRUE){ ++ ++ DEBUG( CF_DEBUG, 1, "cf_card_setup failed\n"); ++ dev->card_state = CF_CARD_REMOVE; ++ ++ return -1; ++ } ++ else { ++ DEBUG( CF_DEBUG, 1, "CFC Driver with DMA Mode\n"); ++ ++ if (cf_card_info.DMAEnable) { ++ /* acquire dma channel */ ++ if (cf_dma_ch_alloc(dev) != 0) { ++ cf_card_info.DMAEnable = FALSE; ++ cf_free( dev); ++ DEBUG( CF_DEBUG, 1, "Request DMA resource failed\n"); ++ return -1; ++ } ++ DEBUG( CF_DEBUG, 1, "Request DMA resource success\n"); ++ } ++ ++ /* SDC interrupt, currently only for HotSwap */ ++ DEBUG( CF_DEBUG, 1, "Request CFC IRQ: %d\n", FTCFC_IRQ); ++ ++ if( request_irq( FTCFC_IRQ, cf_hotswap_interrupt_handler, IRQF_DISABLED, "CF controller", dev) != 0){ ++ ++ DEBUG( CF_DEBUG, 1, "Unable to allocate CFC IRQ: 0x%x\n", FTCFC_IRQ); ++ cf_free( dev); ++ return -1; ++ } ++ ++ /* require io port address for sd controller */ ++ if( request_region( FTCFC_VA_BASE, 0x48, "CF Controller") == NULL){ ++ ++ DEBUG( CF_DEBUG, 1, "request io port of sd controller fail\n"); ++ cf_free( dev); ++ return -1; ++ } ++ ++ dev->card_state = CF_CARD_WORK; ++ } ++ } ++ ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++ ++ return 0; ++} ++ ++/* TODO: forbids open for write when WRITE_PROTECT = 1 */ ++static int cf_open( struct block_device *bdev, fmode_t mode) ++{ ++ cf_dev_t *dev= bdev->bd_disk->private_data; /* device information */ ++ ++ DEBUG( CF_DEBUG, 1, "Enter\n"); ++ ++ if( ( cfc->HostStatus & CARD_DETECT_BIT) != CARD_DETECT_BIT){ ++ ++ DEBUG( CF_DEBUG, 1, "Error: ( ENOMEDIUM)\n"); ++ return -ENOMEDIUM; ++ } ++ ++ if( CFCardInit()){ ++ ++ DEBUG( CF_DEBUG, 1, "root initialize failed\n"); ++ return FALSE; ++ } ++ ++ if( !dev->usage){ ++ ++ dev->media_change = 1; ++ DEBUG( CF_DEBUG, 1, "forced check_disk_change check\n"); ++ check_disk_change( bdev); ++ } ++ else{ ++ dev->media_change = 0; ++ } ++ ++ DEBUG( CF_DEBUG, 1, "set_capacity() to %d blocks ( %d KBytes)\n", dev->size, dev->size >> 1); ++ set_capacity( dev->gd, dev->size); ++ dev->usage++; ++ cf_devices->lba_sec_offset = 0; ++ DEBUG( CF_DEBUG, 1, "dev: 0x%08lx, cf_devices: 0x%08lx\n", ( unsigned long)dev, ( unsigned long)cf_devices); ++ ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++ ++ return 0; /* success */ ++} ++ ++static int cf_release( struct gendisk *gd, fmode_t mode) ++{ ++ cf_dev_t *dev = gd->private_data; ++ ++ DEBUG( CF_DEBUG, 1, "Enter\n"); ++ ++ dev->usage--; ++ ++ if( !dev->usage) ++ cf_free( dev); ++ ++ DEBUG( CF_DEBUG, 1, "Exit\n"); ++ ++ return 0; ++} ++ ++static void setup_device( struct cf_dev *dev, int which) ++{ ++ memset ( dev, 0, sizeof ( struct cf_dev)); ++ dev->size = 0; ++ spin_lock_init( &dev->lock); ++ ++ dev->queue = blk_init_queue( cf_request, &dev->lock); ++ ++ if( !dev->queue) ++ goto out_vfree; ++ ++ blk_queue_logical_block_size( dev->queue, hardsect_size); ++ dev->queue->queuedata = dev; ++ ++ dev->card_state = CF_CARD_REMOVE; ++ ++ /* And the gendisk structure. */ ++ dev->gd = alloc_disk( CF_MINORS); ++ if( ! dev->gd){ ++ ++ DEBUG( CF_DEBUG, 1, "alloc_disk failure\n"); ++ goto out_vfree; ++ } ++ ++ dev->gd->flags = GENHD_FL_REMOVABLE | GENHD_FL_SUPPRESS_PARTITION_INFO; ++ dev->gd->major = cf_major; ++ dev->gd->first_minor = which * CF_MINORS; ++ dev->gd->minors = CF_MINORS; ++ dev->gd->fops = &cf_fops; ++ dev->gd->queue = dev->queue; ++ dev->gd->private_data = dev; ++ ++ snprintf ( dev->gd->disk_name, 32, "cpecf%c", which + 'a'); ++ set_capacity( dev->gd, 0); ++ add_disk( dev->gd); ++ ++ return; ++ ++out_vfree: ++ ++ return; ++} ++ ++static int cf_media_changed( struct gendisk *gd) ++{ ++ struct cf_dev *dev = gd->private_data; ++ ++ DEBUG( CF_DEBUG, 1, "cf_media_changed = %d\n", dev->media_change); ++ ++ return dev->media_change; ++} ++ ++struct block_device_operations cf_fops = { ++ ++ .owner = THIS_MODULE, ++ .open = cf_open, ++ .release = cf_release, ++ .ioctl = cf_ioctl, ++ .revalidate_disk = cf_revalidate, ++ .media_changed = cf_media_changed, ++}; ++ ++static int __init cf_module_init( void) ++{ ++ int result= -ENOMEM, i; ++ ++ DEBUG( 1, 0, "Faraday CF controller Driver (DMA mode)\n"); ++ ++ cf_major = register_blkdev( cf_major, DEVICE_NAME); ++ ++ if( cf_major <= 0){ ++ ++ DEBUG( CF_DEBUG, 1, ":unable to get major number\n"); ++ return -EBUSY; ++ } ++ ++ DEBUG( 1, 0, "CF: make node with 'mknod /dev/cpecf b %d 0'\n", cf_major); ++ ++ cf_devices = kzalloc( CF_DEVS * sizeof( cf_dev_t), GFP_KERNEL); ++ ++ if( !cf_devices) ++ goto fail_malloc; ++ ++ for( i = 0; i < CF_DEVS; i++) ++ setup_device( cf_devices + i, i); ++ ++ return 0; ++ ++fail_malloc: ++ ++ if( cf_devices) ++ kfree( cf_devices); ++ ++ unregister_blkdev( cf_major, DEVICE_NAME); ++ ++ return result; ++} ++ ++static void cf_module_cleanup( void) ++{ ++ int i; ++ ++ if( cf_devices){ ++ ++ for( i = 0; i < CF_DEVS; i++){ ++ ++ del_gendisk( cf_devices[i].gd); ++ put_disk( cf_devices[i].gd); ++ ++ if( cf_devices[i].queue) ++ blk_cleanup_queue( cf_devices[i].queue); ++ } ++ ++ kfree( cf_devices); ++ } ++ ++ unregister_blkdev( cf_major, DEVICE_NAME); ++} ++ ++module_init( cf_module_init); ++module_exit( cf_module_cleanup); ++ ++MODULE_AUTHOR( "Faraday Corp."); ++MODULE_LICENSE( "GPL"); +diff -Nur linux-3.4.110.orig/drivers/block/ftcfc010.h linux-3.4.110/drivers/block/ftcfc010.h +--- linux-3.4.110.orig/drivers/block/ftcfc010.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/block/ftcfc010.h 2016-04-07 10:20:51.022084119 +0200 +@@ -0,0 +1,438 @@ ++/* drivers/block/CPECF/ftcfc.h ++ * ++ * Faraday FTCFC010 Device Driver ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * ++ * All Rights Reserved ++ */ ++ ++#ifndef _FTCFC_H_ ++#define _FTCFC_H_ ++ ++#define CF_DEBUG 1 ++ ++#ifndef TRUE ++#define TRUE 1 ++#endif ++ ++#ifndef FALSE ++#define FALSE 0 ++#endif ++ ++#define CF_DEVS 1 /* number of disks */ ++#define CF_MINORS 16 /* minors per disk */ ++#define CF_SECTOR_SIZE 512 /* sector size */ ++ ++/* SD Card State */ ++#define CF_CARD_REMOVE 0 ++#define CF_CARD_INSERT 1 ++#define CF_CARD_WORK 2 ++ ++/* data window register */ ++#define CFC_READ_FIFO_LEN 4 ++#define CFC_WRITE_FIFO_LEN 4 ++ ++/* card type, sd or mmc */ ++#define MEMORY_CARD_TYPE_SD 0 ++#define MEMORY_CARD_TYPE_MMC 1 ++ ++/***************************************************************************** ++ * SYSTEM ERROR_CODE ++ ****************************************************************************/ ++#define ERR_NO_ERROR 0x00000000 ++ ++/* command error */ ++#define ERR_DATA_CRC_ERROR 0x00000100 ++#define ERR_RSP_CRC_ERROR 0x00000200 ++#define ERR_DATA_TIMEOUT_ERROR 0x00000400 ++#define ERR_RSP_TIMEOUT_ERROR 0x00000800 ++ ++#define ERR_WAIT_OVERRUN_TIMEOUT 0x00001000 ++#define ERR_WAIT_UNDERRUN_TIMEOUT 0x00002000 ++#define ERR_WAIT_DATA_CRC_TIMEOUT 0x00004000 ++#define ERR_WAIT_TRANSFER_END_TIMEOUT 0x00008000 ++ ++#define ERR_SEND_COMMAND_TIMEOUT 0x00010000 ++ ++/* using APB DMA error */ ++#define ERR_APBDMA_RSP_ERROR 0x02000000 ++ ++/* ++ * Please refer SanDisk SD Manual v1.9 Section 5.1.9.2 (page 5-76) to set the timeout setting ++ */ ++#if CF_DEBUG ++#define CFC_TIMEOUT_BASE (HZ/2) /* Unit is 500 ms */ ++#else ++#define CFC_TIMEOUT_BASE (HZ/3) /* Unit is 333 ms */ ++#endif ++ ++typedef struct _cf_cid_t ++{ ++ uint ManufacturerID; ++ uint ApplicationID; ++ unchar ProductName[7]; ++ uint ProductRevisionHigh; ++ uint ProductRevisionLow; ++ uint ProductSerialNumber; ++ uint ManufactureMonth; ++ uint ManufactureYear; ++} cf_cid_t; ++ ++typedef struct _cf_ccf_t ++{ ++ uint CSDStructure; ++ uint MMCSpecVersion; ++ uint TAAC_u; ++ uint NSAC_u; ++ uint TransferSpeed; ++ uint CardCmdClass; ++ uint ReadBlockLength; ++ uint ReadBlockPartial; ++ uint WriteBlockMisalign; ++ uint ReadBlockMisalign; ++ uint DSRImplemant; ++ uint BlockNumber; ++ uint MemorySize; ++ uint VDDReadMin_u; ++ uint VDDReadMax_u; ++ uint VDDWriteMin_u; ++ uint VDDWriteMax_u; ++ uint EraseBlkEnable; ++ uint EraseSectorSize; ++ uint WriteProtectGroupSize; ++ uint WriteProtectGroupEnable; ++ uint WriteSpeedFactor; ++ uint WriteBlockLength; ++ unchar WriteBlockPartial; ++ unchar CopyFlag; ++ unchar PermanentWriteProtect; ++ unchar TemporaryWriteProtect; ++ unchar FileFormat; ++} cf_ccf_t; ++ ++/***************************************************************************** ++ * SD SCR register ++ ****************************************************************************/ ++typedef struct _cf_scr_t ++{ ++ uint Reserved:16; ++ uint SD_BUS_WIDTH:4; ++ uint SD_SECURITY:3; ++ uint DATA_STAT_AFTER_ERASE:1; ++ uint SD_SPEC:4; ++ uint SCR_STRUCTURE:4; ++ ++ uint ManufacturerReserved; ++} cf_scr_t; ++ ++/***************************************************************************** ++ * sd card structure ++ ****************************************************************************/ ++typedef struct _cf_card_t ++{ ++ /* host interface configuration */ ++ uint IOAddr; /* host controller register base address */ ++ uint DMAEnable; ++ uint DMAChannel; ++ ++ uint CardType; ++ ++ uint CIDWord[4]; ++ cf_cid_t CID; ++ ++ uint CSDWord[4]; ++ cf_ccf_t CSD; ++ ++ ushort RCA; ++ cf_scr_t SCR; ++ ++ /* access time out */ ++ uint ReadAccessTimoutCycle; ++ uint WriteAccessTimoutCycle; ++ ++ /* system configurations */ ++ uint SysFrequency; ++ ++ /* card status */ ++ int ActiveState; ++ int WriteProtect; ++ ++ void *private; ++} cf_card_t; ++ ++typedef struct _CF_Dev ++{ ++ u32 dev_size; /* used same as sect_num */ ++ u32 sect_num; /* sector num */ ++ u8 blksize_bit; /* It's not used */ ++ u32 blksize; /* size of block in file system, 1024 */ ++ ++ u32 phy_blksize; /* Physical size of block on the CF. It's not used */ ++ u32 phy_sectorsize; /* the size of erasable sector. It's not used */ ++ u32 wp_grp_size; /* the size of write protected group. It's not used */ ++ ++ spinlock_t lock; /* synchronization */ ++ struct semaphore sema; /* synchronization */ ++ u32 usage; ++ ++ u8 busy; ++ ++ wait_queue_head_t select_wait; /* Kernel thread blocked on it. It's not used */ ++ u8 cmd; /* What command being executed. */ ++ u32 result; /* set by tasklet */ ++ u8 *buff; ++ ++ u32 lba_sec_offset; ++ ++ /*APB_DMA */ ++ ++ u8 DMATransDir; /* 0: IDLE, 1:READ, 2:WRITE */ ++} CF_Dev; ++ ++#define CFC_R_IDLE 0x00 ++#define CFC_R_START_TRANSFER 0x01 ++#define CFC_R_SET_CFCARD_REG 0x02 ++#define CFC_R_SET_CFCARD_READ_CMD 0x03 ++#define CFC_R_CHK_CFCARD_READY 0x04 ++#define CFC_R_SET_DMA_REG 0x05 ++#define CFC_R_DMA_START 0x06 ++#define CFC_R_DMA_INT 0x07 ++#define CFC_R_DMA_FINISH 0x08 ++#define CFC_R_PIO_START 0x09 ++#define CFC_R_PIO_FINISH 0x0A ++#define CFC_R_CHECK_CFCARD_BUSY 0x0B ++#define CFC_R_CHECK_CFCARD_READY 0x0C ++ ++#define CFC_W_IDLE 0x00 ++#define CFC_W_START_TRANSFER 0x10 ++#define CFC_W_SET_CFCARD_REG 0x20 ++#define CFC_W_SET_CFCARD_READ_CMD 0x30 ++#define CFC_W_CHK_CFCARD_READY 0x40 ++#define CFC_W_SET_DMA_REG 0x50 ++#define CFC_W_DMA_START 0x60 ++#define CFC_W_DMA_INT 0x70 ++#define CFC_W_DMA_FINISH 0x80 ++#define CFC_W_PIO_START 0x90 ++#define CFC_W_PIO_FINISH 0xA0 ++#define CFC_W_CHECK_CFCARD_BUSY 0xB0 ++#define CFC_W_CHECK_CFCARD_READY 0xC0 ++ ++#define DMA_IDLE 0x00 ++#define DMA_READ 0x01 ++#define DMA_WRITE 0x02 ++ ++typedef struct CFCTYPE ++{ ++ u32 HostStatus; /* 0x00 */ ++ u32 ControlReg; /* 0x04 */ ++ u32 TimeCfgReg; /* 0x08 */ ++ u32 BuffCtrlReg; /* 0x0C */ ++ u32 BufferData; /* 0x10 */ ++ u32 MultiSector; /* 0x14 */ ++ ++ /* Enchanced Feature */ ++ u32 TransSzMode2En; /* 0x18 */ ++ u32 TransSzMode2Cnt; /* 0x1C */ ++ u32 Reserved[4]; /* 0x20~0x2F is reserved */ ++ u32 Revision; /* 0x30 */ ++ u32 Feature; /* 0x34 (define buffer size) */ ++ ++} CFCTYPE; ++ ++#define cfc ((volatile struct CFCTYPE *) (FTCFC_VA_BASE)) ++ ++typedef struct _tag_CFCardInfo ++{ ++ u32 SectTotal; ++ u8 ConfigOptionReg; ++ u8 ConfigAndStatusReg; ++ u8 PinReplaceReg; ++ u8 SocketCopyReg; ++ ++} CF_CARD_INFO; ++ ++#define CFC_C_Complete 0x00000400 ++#define CFC_B_Ready 0x00000200 ++#define CFC_8bit 0x00000040 ++#define CFC_16bit 0x00000000 ++#define CFC_Reset 0x00000020 ++#define CFC_T_2048 0x000C0000 ++#define CFC_T_1024 0x000B0000 ++#define CFC_T_512 0x000A0000 ++#define CFC_T_256 0x00090000 ++#define CFC_T_128 0x00080000 ++#define CFC_T_64 0x00070000 ++#define CFC_T_32 0x00060000 ++#define CFC_T_16 0x00050000 ++#define CFC_T_8 0x00040000 ++#define CFC_T_4 0x00030000 ++#define CFC_T_2 0x00020000 ++#define CFC_T_1 0x00010000 ++#define CFC_Read 0x00000000 ++#define CFC_Write 0x00008000 ++#define CFC_Increment 0x00004000 ++#define CFC_Attribute 0x00000000 ++#define CFC_Memory 0x00002000 ++#define CFC_IO 0x00003000 ++ ++/* CF status register bit mapping */ ++#define RDY_nIREQ_BIT 0x0001 ++#define CARD_DETECT_BIT 0x0002 ++#define VOL33_SENSE_BIT 0x0004 ++#define VOL40_SENSE_BIT 0x0008 ++#define STATUS_CHANGE_BIT 0x0010 ++#define SPKR_BIT 0x0020 ++#define BUF_ACTIVE_BIT 0x0100 ++#define BUF_DATA_RDY_BIT 0x0200 ++#define INTA_BIT 0x0400 ++#define BUF_SIZE_BITS 0xF000 ++#define INT_CD_BIT 0x10000 ++#define INT_IO_BIT 0x20000 ++ ++/* CF control register bit mapping */ ++#define POWER_CONTROL_BIT 0x000F ++#define FLOW_CONTROL_BIT 0x0010 ++#define RESET_BIT 0x0020 ++#define MODE_BIT 0x0040 ++#define DMA_BIT 0x0100 ++ ++#define PWR_ON 1 ++#define PWR_OFF 0 ++ ++#define SIGNAL_ON 0x0010 ++#define SIGNAL_OFF 0x0000 ++ ++#define BYTE_MODE 0x0040 ++#define WORD_MODE 0x0000 ++ ++#define ENDMA_BIT 0x0100 ++#define DISDMA_BIT 0x0000 ++ ++#define CARD_DETECT_INT_MASK 0x0200 ++#define DATA_CMP_INT_MASK 0x0400 ++#define IO_INT_MASK 0x0800 ++ ++/* active buffer control register bit mapping */ ++#define ADR_BIT 0x007FF ++#define TYPE_BIT 0x03000 ++#define INCADR_BIT 0x04000 ++#define RW_BIT 0x08000 ++#define TRANS_SIZE_CONTROL_BIT 0xF0000 ++ ++#define INCADR 0x04000 ++#define NOINCADR 0x00000 ++#define TRANS_SIZE_LOC 16 ++ ++#define SIZE_1_BYTE 0x01 ++#define SIZE_2_BYTE 0x02 ++#define SIZE_4_BYTE 0x03 ++#define SIZE_8_BYTE 0x04 ++#define SIZE_16_BYTE 0x05 ++#define SIZE_32_BYTE 0x06 ++#define SIZE_64_BYTE 0x07 ++#define SIZE_128_BYTE 0x08 ++#define SIZE_256_BYTE 0x09 ++#define SIZE_512_BYTE 0x0a ++#define SIZE_1024_BYTE 0x0b ++#define SIZE_2048_BYTE 0x0c ++ ++#define READ_OP 0x00000 ++#define WRITE_OP 0x08000 ++ ++/* type description */ ++#define ATTRIBUTE_MEM 0x00000 ++#define COMMON_MEM 0x02000 ++ ++/* ++ * CF card ++ * memory map register ++ * IO block register ++ */ ++#define BLKMEM_DATA_REG 0x000 ++#define BLKMEM_ERROR_REG ( BLKMEM_DATA_REG + 0x01) ++#define BLKMEM_FEATURE_REG ( BLKMEM_DATA_REG + 0x01) ++#define BLKMEM_SECTOR_COUNT_REG ( BLKMEM_DATA_REG + 0x02) ++#define BLKMEM_SECTOR_NUMBER_REG ( BLKMEM_DATA_REG + 0x03) ++#define BLKMEM_CYLINDER_LOW_REG ( BLKMEM_DATA_REG + 0x04) ++#define BLKMEM_CYLINDER_HIGH_REG ( BLKMEM_DATA_REG + 0x05) ++#define BLKMEM_DRIVE_REG ( BLKMEM_DATA_REG + 0x06) ++#define BLKMEM_STATUS_REG ( BLKMEM_DATA_REG + 0x07) ++#define BLKMEM_COMMAND_REG ( BLKMEM_DATA_REG + 0x07) ++ ++#define BLKMEM_EVEN_DATA_REG ( BLKMEM_DATA_REG + 0x08) ++#define BLKMEM_ODD_DATA_REG ( BLKMEM_DATA_REG + 0x09) ++#define BLKMEM_DUP_ERROR_REG ( BLKMEM_DATA_REG + 0x0d) ++#define BLKMEM_DUP_FEATURE_REG ( BLKMEM_DATA_REG + 0x0d) ++#define BLKMEM_DEV_CONTROL_REG ( BLKMEM_DATA_REG + 0x0e) ++#define BLKMEM_DRIVE_ADDR_REG ( BLKMEM_DATA_REG + 0x0f) ++#define BLKMEM_WINDOW_REG ( BLKMEM_DATA_REG + 0x400) ++#define BLKMEM_MAX_WINDOW_REG ( BLKMEM_DATA_REG + 0x7ff) ++ ++/* status register */ ++#define BUSY_BIT 0x80 ++#define RDY_BIT 0x40 ++#define DWF_BIT 0x20 ++#define DSC_BIT 0x10 ++#define DRQ_BIT 0x08 ++#define CORR_BIT 0x04 ++#define ERR_BIT 0x01 ++ ++/* CF ATA command */ ++#define ATA_CHECK_POWER_MODE 0xe5 ++#define ATA_EXECUTE_DRIVE_DIAG 0x90 ++#define ATA_ERASE_SECTOR 0xc0 ++#define ATA_FORMAT_TRACK 0x50 ++#define ATA_IDENTIFY_DRIVE 0xec ++#define ATA_IDLE 0xe3 ++#define ATA_IDLE_IMMEDIATE 0xe1 ++#define ATA_INIT_DRIVE_PARA 0x91 ++#define ATA_READ_BUFFER 0xe4 ++#define ATA_READ_LONG_SECTOR 0x22 ++#define ATA_READ_MULTIPLE 0xc4 ++#define ATA_READ_SECTOR 0x21 ++#define ATA_READ_VERIFY_SECTOR 0x40 ++#define ATA_RECALIBRATE 0x10 ++#define ATA_REQUEST_SENSE 0x03 ++#define ATA_SECURITY_DISABLE_PASSWORD 0xf6 ++#define ATA_SECURITY_EREASE_PREPARE 0xf3 ++#define ATA_SECURITY_ERASE_UNIT 0xf4 ++#define ATA_SECURITY_FREEZE_LOCK 0xf5 ++#define ATA_SECURITY_SET_PASSWORD 0xf1 ++#define ATA_SECURITY_UNLOCK 0xf2 ++#define ATA_SEEK 0x70 ++#define ATA_SET_FEATURE 0xef ++#define ATA_SET_MULTIPLE_MODE 0xc6 ++#define ATA_SET_SLEEP_MODE 0xe6 ++#define ATA_STANDBY 0xe2 ++#define ATA_STANDBY_IMMEDIATE 0xe0 ++#define ATA_TRANSFER_SECTOR 0x87 ++#define ATA_WEAR_LEVEL 0xf5 ++#define ATA_WRITE_BUFFER 0xe8 ++#define ATA_WRITE_LONG_SECTOR 0x32 ++#define ATA_WRITE_MULTIPLE 0xc5 ++#define ATA_WRITE_MULTIPLE_WO_ERASE 0xcd ++#define ATA_WRITE_SECTOR 0x30 ++#define ATA_WRITE_SECTOR_WO_ERASE 0x38 ++#define ATA_WRITE_VERIFY 0x3c ++ ++#define CF_MEM_MAP_MODE 0x0 ++/* attribute memory space register description */ ++#define ATTRIBUTE_MEM_BASE 0 ++#define ATTRIBUTE_MEM_CONFIG_BASE 0x200 ++#define CONFIG_OPTION_REG ( ATTRIBUTE_MEM_CONFIG_BASE + 0x00) ++#define CARD_CONFIG_STATUS_REG ( ATTRIBUTE_MEM_CONFIG_BASE + 0x02) ++#define PIN_REPLACE_REG ( ATTRIBUTE_MEM_CONFIG_BASE + 0x04) ++#define SOCKET_COPY_REG ( ATTRIBUTE_MEM_CONFIG_BASE + 0x06) ++ ++/* configuration option register */ ++#define CONF0_BIT 0x01 ++#define CONF1_BIT 0x02 ++#define CONF2_BIT 0x04 ++#define CONF3_BIT 0x08 ++#define CONF4_BIT 0x10 ++#define CONF5_BIT 0x20 ++#define LEVLREQ_BIT 0x40 ++#define SRESET_BIT 0x80 ++ ++#endif +diff -Nur linux-3.4.110.orig/drivers/block/ftsdc010.c linux-3.4.110/drivers/block/ftsdc010.c +--- linux-3.4.110.orig/drivers/block/ftsdc010.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/block/ftsdc010.c 2016-04-07 10:20:51.022084119 +0200 +@@ -0,0 +1,2206 @@ ++/* drivers/block/CPESD/ftsdc010.c ++ ******************************************************************************* ++ * Faraday FTSDC010 Device Driver ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * ++ * All Rights Reserved ++ * ++ * Porting to Linux 2.6 on 20050815 ++ * Author: Chris Lee, I-Jui Sung, Peter Liao (support APB DMA) ++ * Version: 0.2 ++ * History: ++ * 0.1 new creation ++ * 0.2 Porting to meet the style of linux dma ++ * 0.3 modify dma usage to virtual irq of dma interrupt ++ * 0.4 (20050701) Improve r/w performance ++ * 0.5 Porting to Linux 2.6 and replace busy_loop checking with timer's timeout ++ * Todo: ++ ******************************************************************************* ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* HDIO_GETGEO */ ++#include ++#include ++#include /* invalidate_bdev */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define IPMODULE SDC ++#define IPNAME FTSDC010 ++ ++/* Define CONFIG_FTSDC010_USE_TIMER_DELAY if you want to use software timer instead of busy loop checking */ ++#define CONFIG_FTSDC010_USE_TIMER_DELAY ++#undef CONFIG_FTSDC010_USE_TIMER_DELAY ++#include "ftsdc010.h" ++#include ++MODULE_AUTHOR("Faraday Corp."); ++MODULE_LICENSE("Faraday License"); ++ ++/* options */ ++#define FORCE_PCI_CONSISTENCY 1 /* define it to 1 if met consistency problems */ ++#define KERNEL_SECTOR_SIZE 512 /* Use this when we refer to kernel related sector size */ ++ ++static int hardsect_size = 512; ++module_param(hardsect_size, int, 0); ++/*------------------------------------------------------------------------------ ++ * Predefine for block device ++ */ ++#define MAJOR_NR sd_major /* force definitions on in blk.h */ ++static int sd_major=0;//SD_MAJOR; /* must be declared before including blk.h */ ++#define SD_SHIFT 4 /* max 16 partitions */ ++#define DEVICE_NAME "Faraday SDC" /* name for messaging */ ++#define DEVICE_REQUEST sd_request ++#define DEVICE_NR(device) (MINOR(device) >> SD_SHIFT) ++//#define DEVICE_INTR sd_intrptr /* pointer to the bottom half */ ++#define DEVICE_NO_RANDOM /* no entropy to contribute */ ++#define DEVICE_OFF(d) /* do-nothing */ ++ ++/*#include ++#include */ /* blk_ioctl() */ ++ ++/*------------------------------------------------------------------------------ ++ * Macro definition ++ */ ++#define FTSDC_VA_BASE IP_VA_BASE(0) ++#define FTSDC_PA_BASE IP_PA_BASE(0) ++#define FTSDC_IRQ IP_IRQ(0) ++#define SDC_W_REG(offset, value) outl(value, IP_VA_BASE(0) + offset) ++#define SDC_R_REG(offset) inl(IP_VA_BASE(0) + offset) ++ ++ ++/*------------------------------------------------------------------------------ ++ * Global variable ++ */ ++ ++/* The following items are obtained through kmalloc() in sd_module_init() */ ++ ++struct block_device_operations sd_fops; ++/* our device structure */ ++struct sd_dev { ++ int size; /* device size in sectors */ ++ int usage; /* # of users currently */ ++ int media_change; /* Flag: media changed? */ ++ struct gendisk *gd; /* The gendisk structure */ ++ spinlock_t lock; /* For mutual exclusion */ ++ struct request_queue *queue; /* The device request queue */ ++ int card_state; ++ dmad_chreq ch_req; ++ bool dma_enable; ++}; ++static struct sd_dev *sd_devices = NULL; ++ ++static sd_card_t sd_card_info; ++static int sector_offset,Do_onetime; ++ ++dma_addr_t dma_buf = 0; /* ?? non-zero for for manually debug ?? */ ++struct completion sd_dma_cmpl; ++ ++static int sync_mode=0; ++ ++static uint first_run = 0; ++uint sd_err_code; ++ ++#define FILE_FORMAT_HARD_DISK_LIKE 0 ++#define FILE_FORMAT_FLOPPY_LIKE 1 ++#define FILE_FORMAT_UNIVERSAL 2 ++#define FILE_FORMAT_UNKNOW 3 ++#define FILE_FORMAT_RESERVED 4 ++ ++#define K 1000 ++ ++uint TAAC_TimeUnitTable[] = { // unit is ns ++ 1, 10, 100, 1 * K, 10 * K, 100 * K, 1 * K * K, 10 * K * K ++}; ++ ++uint TRANS_SPEED_RateUintTable[] = { ++ 100 * K, 1 * K * K, 10 * K * K, 100 * K * K ++}; ++ ++uint TRANS_SPEED_TimeValueTable_u[] = { // unit=1/10 ++ 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 ++}; ++ ++uint VDD_CURR_MIN_Table_u[] = { // unit=1/10 ++ 5, 10, 50, 100, 250, 350, 600, 1000 ++}; ++ ++uint VDD_CURR_MAX_Table_u[] = { ++ 1, 5, 10, 25, 35, 45, 80, 200 ++}; ++ ++uint TAAC_TimeValueTable_u[] = { // unit=1/10 ++ 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 ++}; ++ ++ ++unsigned int SDC_READ_FIFO_LEN; ++unsigned int SDC_WRITE_FIFO_LEN; ++ ++/*------------------------------------------------------------------------------ ++ * Local declaration of function ++ */ ++static int sd_revalidate(struct gendisk *gd); ++static int sd_media_changed(struct gendisk *gd); ++static int sd_card_setup(struct sd_dev *dev); ++int sd_check_err(uint status); ++int sd_get_scr(sd_card_t *info, uint *scr); ++int sd_set_transfer_state(sd_card_t *info); ++uint sd_block_size_convert(uint size); ++int sd_read_sector(sd_card_t *info, uint addr, uint count, unchar *buf); ++void sd_reset_host_controller(void); ++/*------------------------------------------------------------------------------ ++ * Local function ++ */ ++ ++/* ++ * SD host controller operation ++ */ ++int sdc_send_cmd(uint cmd, uint arg, uint *rsp) ++{ ++#ifndef CONFIG_FTSDC010_USE_TIMER_DELAY ++ int count = 0; ++#else ++ unsigned long timeout = jiffies + SDC_GET_STATUS_RETRY_TIMEOUT_COUNT; ++#endif ++ int i; ++ uint status; ++ P_DEBUG("SD Cmd is %d\n",cmd); ++ ++ /* clear command relative bits of status register */ ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_RSP_CRC_FAIL | SDC_STATUS_REG_RSP_TIMEOUT | SDC_STATUS_REG_RSP_CRC_OK| SDC_STATUS_REG_CMD_SEND); ++ /* write argument to arugument register if necessary */ ++ SDC_W_REG(SDC_ARGU_REG, arg); ++ /* send command */ ++ SDC_W_REG(SDC_CMD_REG, cmd | SDC_CMD_REG_CMD_EN); ++ ++ /* wait for the CMD_SEND bit of status register is set */ ++#ifndef CONFIG_FTSDC010_USE_TIMER_DELAY ++ while (count++ < SDC_GET_STATUS_RETRY_COUNT) { ++#else ++ while (time_before(jiffies, timeout)) { ++#endif ++ status = SDC_R_REG(SDC_STATUS_REG); ++ if (!(cmd & SDC_CMD_REG_NEED_RSP)) { ++ /* if this command does not need response, wait command sent flag */ ++ if (status & SDC_STATUS_REG_CMD_SEND) { ++ /* clear command sent bit */ ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_CMD_SEND); ++ sd_err_code = ERR_NO_ERROR; ++ return TRUE; ++ } ++ } else { ++ /* if this command needs response */ ++ if (status & SDC_STATUS_REG_RSP_TIMEOUT) { ++ /* clear response timeout bit */ ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_RSP_TIMEOUT); ++ sd_err_code = ERR_RSP_TIMEOUT_ERROR; ++ printk("%s() ERR_RSP_TIMEOUT_ERROR\n", __func__); ++ return FALSE; ++ } else if (status & SDC_STATUS_REG_RSP_CRC_FAIL) { ++ /* clear response fail bit */ ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_RSP_CRC_FAIL); ++ sd_err_code = ERR_RSP_CRC_ERROR; ++ printk("%s() ERR_RSP_CRC_ERROR\n", __func__); ++ return FALSE; ++ } else if (status & SDC_STATUS_REG_RSP_CRC_OK) { ++ /* clear response OK bit */ ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_RSP_CRC_OK); ++ /* if it is long response */ ++ if (cmd & SDC_CMD_REG_LONG_RSP) ++ for (i = 0; i < 4; i++, rsp++) ++ *rsp = SDC_R_REG(SDC_RESPONSE0_REG + (i * 4)); ++ else ++ *rsp = SDC_R_REG(SDC_RESPONSE0_REG); ++ sd_err_code = ERR_NO_ERROR; ++ return TRUE; ++ } ++ } ++ } ++ sd_err_code = ERR_SEND_COMMAND_TIMEOUT; ++ P_DEBUG("%s() ERR_SEND_COMMAND_TIMEOUT\n", __func__); ++ return FALSE; ++} ++ ++int sdc_check_tx_ready(void) ++{ ++ uint status; ++#ifndef CONFIG_FTSDC010_USE_TIMER_DELAY ++ int count = 0; ++ while (count++ < SDC_GET_STATUS_RETRY_COUNT) { ++#else ++ unsigned long timeout = jiffies + SDC_GET_STATUS_RETRY_TIMEOUT_COUNT; ++ while (time_before(jiffies, timeout)) { ++#endif ++ status = SDC_R_REG(SDC_STATUS_REG); ++ if (status & SDC_STATUS_REG_FIFO_UNDERRUN) { ++ /* clear FIFO underrun bit */ ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_FIFO_UNDERRUN); ++ return TRUE; ++ } else if (status & SDC_STATUS_REG_DATA_TIMEOUT) { ++ /* clear data timeout bit */ ++ printk("Wait Write FIFO TimeOut\n"); ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_DATA_TIMEOUT); ++ sd_err_code = ERR_DATA_TIMEOUT_ERROR; ++ return FALSE; ++ } ++ } ++ sd_err_code = ERR_WAIT_UNDERRUN_TIMEOUT; ++ P_DEBUG("%s() ERR_WAIT_UNDERRUN_TIMEOUT\n", __func__); ++ return FALSE; ++} ++ ++int sdc_check_rx_ready(void) ++{ ++ uint status; ++#ifndef CONFIG_FTSDC010_USE_TIMER_DELAY ++ int count = 0; ++ while (count++ < SDC_GET_STATUS_RETRY_COUNT) { ++#else ++ unsigned long timeout = jiffies + SDC_GET_STATUS_RETRY_TIMEOUT_COUNT; ++ while (time_before(jiffies, timeout)) { ++#endif ++ status = SDC_R_REG(SDC_STATUS_REG); ++ if (status & SDC_STATUS_REG_FIFO_OVERRUN) { ++ /* clear FIFO overrun bit */ ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_FIFO_OVERRUN); ++ return TRUE; ++ } else if (status & SDC_STATUS_REG_DATA_TIMEOUT) { ++ /* clear data timeout bit */ ++ printk("Wait Read FIFO TimeOut\n"); ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_DATA_TIMEOUT); ++ sd_err_code = ERR_DATA_TIMEOUT_ERROR; ++ return FALSE; ++ } ++ } ++ sd_err_code = ERR_WAIT_OVERRUN_TIMEOUT; ++ P_DEBUG("%s() ERR_WAIT_OVERRUN_TIMEOUT\n", __func__); ++ return FALSE; ++} ++ ++int sdc_check_data_crc(void) ++{ ++ uint status=0; ++#ifndef CONFIG_FTSDC010_USE_TIMER_DELAY ++ int count = 0; ++ while (count++ < SDC_GET_STATUS_RETRY_COUNT) { ++#else ++ unsigned long timeout = jiffies + SDC_GET_STATUS_RETRY_TIMEOUT_COUNT; ++ while (time_before(jiffies, timeout)) { ++#endif ++ status = SDC_R_REG(SDC_STATUS_REG); ++ if (status & SDC_STATUS_REG_DATA_CRC_OK) { ++ P_DEBUGG("%s : receive data ok, status=0x%x\n", __func__, status); ++ /* clear data CRC OK bit */ ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_DATA_CRC_OK); ++ return TRUE; ++ } else if (status & SDC_STATUS_REG_DATA_CRC_FAIL) { ++ /* clear data CRC fail bit */ ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_DATA_CRC_FAIL); ++ sd_err_code = ERR_DATA_CRC_ERROR; ++ printk("%s() ERR_DATA_CRC_ERROR\n", __func__); ++ return FALSE; ++ } else if (status & SDC_STATUS_REG_DATA_TIMEOUT) { ++ /* clear data timeout bit */ ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_DATA_TIMEOUT); ++ sd_err_code = ERR_DATA_TIMEOUT_ERROR; ++ printk("%s() ERR_DATA_TIMEOUT_ERROR\n", __func__); ++ return FALSE; ++ } ++ } ++ P_DEBUG("%s() ERR_WAIT_DATA_CRC_TIMEOUT, status=0x%x\n", __func__, status); ++ sd_err_code = ERR_WAIT_DATA_CRC_TIMEOUT; ++ return FALSE; ++} ++ ++static inline int sdc_check_data_end(void) ++{ ++ uint status; ++#ifndef CONFIG_FTSDC010_USE_TIMER_DELAY ++ int count = 0; ++ while (count++ < SDC_GET_STATUS_RETRY_COUNT) { ++#else ++ unsigned long timeout = jiffies + SDC_GET_STATUS_RETRY_TIMEOUT_COUNT; ++ while (time_before(jiffies, timeout)) { ++#endif ++ status = SDC_R_REG(SDC_STATUS_REG); ++ if (status & SDC_STATUS_REG_DATA_END) { ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_DATA_END); ++ return TRUE; ++ } else if (status & SDC_STATUS_REG_DATA_TIMEOUT) { ++ /* clear data timeout bit */ ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_DATA_TIMEOUT); ++ sd_err_code = ERR_DATA_TIMEOUT_ERROR; ++ printk("%s() ERR_DATA_TIMEOUT_ERROR\n", __func__); ++ return FALSE; ++ } ++ } ++ sd_err_code = ERR_WAIT_TRANSFER_END_TIMEOUT; ++ P_DEBUG("%s() ERR_WAIT_TRANSFER_END_TIMEOUT\n", __func__); ++ return FALSE; ++} ++ ++int sdc_set_bus_width_cmd(sd_card_t *info, uint width) ++{ ++ uint status; ++ ++ /* send CMD55 to indicate to the card that the next command is an application specific command */ ++ if (!sdc_send_cmd(SD_APP_CMD | SDC_CMD_REG_NEED_RSP, (((uint)info->RCA) << 16), &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ /* send ACMD6 to set bus width */ ++ if (!sdc_send_cmd(SD_SET_BUS_WIDTH_CMD | SDC_CMD_REG_APP_CMD | SDC_CMD_REG_NEED_RSP, width, &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ ++ return TRUE; ++} ++ ++int sdc_set_bus_width(sd_card_t *info) ++{ ++ uint width; ++ ++ /* if it is not SD card, it does not support wide bus */ ++ if (info->CardType != MEMORY_CARD_TYPE_SD) ++ return TRUE; ++ /* get SCR register */ ++ if (!sd_get_scr(info, (uint *) &info->SCR)) ++ return FALSE; ++ /* if host controller does not support wide bus, return */ ++ if ((SDC_R_REG(SDC_BUS_WIDTH_REG) & SDC_WIDE_BUS_SUPPORT) != SDC_WIDE_BUS_SUPPORT) ++ return TRUE; ++ if (!sd_set_transfer_state(info)) ++ return FALSE; ++ if (info->SCR.SD_BUS_WIDTH & SD_SCR_4_BIT_BIT) ++ width = SD_BUS_WIDTH_4_BIT; ++ else ++ width = SD_BUS_WIDTH_1_BIT; ++ if (!sdc_set_bus_width_cmd(info, width)) ++ return FALSE; ++ if (width == SD_BUS_WIDTH_1_BIT) ++ SDC_W_REG(SDC_BUS_WIDTH_REG, SDC_BUS_WIDTH_REG_SINGLE_BUS); ++ else ++ SDC_W_REG(SDC_BUS_WIDTH_REG, SDC_BUS_WIDTH_REG_WIDE_BUS); ++ ++ return TRUE; ++} ++ ++static inline int sdc_pre_erase_cmd(uint nr_blocks) ++{ ++ uint status; ++ sd_card_t *info=&sd_card_info; ++ /* send CMD55 to indicate to the card that the next command is an application specific command */ ++ if (!sdc_send_cmd(SD_APP_CMD | SDC_CMD_REG_NEED_RSP, (((uint)info->RCA) << 16), &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ /* send ACMD6 to set bus width */ ++ if (!sdc_send_cmd(23 | SDC_CMD_REG_APP_CMD | SDC_CMD_REG_NEED_RSP, nr_blocks, &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ ++ return TRUE; ++} ++ ++ ++uint sdc_set_bus_clock(sd_card_t *info, uint clock) ++{ ++ uint div = 0, reg; ++ ++ while (clock < (info->SysFrequency / (2 * (div + 1)))) ++ div++; ++ /* write clock divided */ ++ reg = SDC_R_REG(SDC_CLOCK_CTRL_REG); ++ reg &= (~SDC_CLOCK_REG_CLK_DIV | 0x80); //ijsung: preserv SD or MMC ++ reg += div & SDC_CLOCK_REG_CLK_DIV; ++ SDC_W_REG(SDC_CLOCK_CTRL_REG, reg); ++ P_DEBUG("%s: SD clock=%d, info->SysFrequency=%d, div=%d\n",__func__,clock, info->SysFrequency, div); ++ return info->SysFrequency / (2 * (div + 1)); ++} ++ ++static inline int sdc_set_block_size(uint size) ++{ ++ uint status; ++ static uint last_size=0; ++ if (size == last_size) ++ return TRUE; ++ else ++ last_size=size; ++ ++ if (!sdc_send_cmd(SD_SET_BLOCKLEN_CMD | SDC_CMD_REG_NEED_RSP, size, &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ return TRUE; ++} ++ ++void sdc_set_card_type(int type) ++{ ++ uint reg; ++ ++ reg = SDC_R_REG(SDC_CLOCK_CTRL_REG); ++ reg &= ~SDC_CLOCK_REG_CARD_TYPE; ++ ++ if (type == MEMORY_CARD_TYPE_SD) { ++ reg |= SDC_CARD_TYPE_SD; ++ } ++ else { ++ reg |= SDC_CARD_TYPE_MMC; ++ } ++ ++ SDC_W_REG(SDC_CLOCK_CTRL_REG, reg); ++} ++ ++int sdc_read_block(sd_card_t *info, uint size, uint *buf) ++{ ++ /* ++ * Please refer SanDisk SD Manual v1.9 Section 5.1.9.2 (page 5-76) to set the timeout setting ++ */ ++ unsigned long timeout = jiffies + SDC_TIMEOUT_BASE*((size+511)>>9); ++ uint count, i; ++ dmad_chreq *ch_req = (dmad_chreq *)info->private; ++ dmad_drb *drb = 0; ++ u32 drb_size = 0; ++ dma_addr_t addr_iter; ++ ++ //if (info->DMAEnable) { ++ if ((info->DMAEnable) && ((size & 0xf) == 0)) { ++ P_DEBUG("%s:size=%d, buf=%p) - DMA Read\n", __func__, size,buf ); ++ P_DEBUG("dma_buf = %d\n", dma_buf); ++ ++ init_completion(&sd_dma_cmpl); ++ ++ if (dma_buf) ++ consistent_sync(__va(dma_buf), size, DMA_FROM_DEVICE); ++ else ++ consistent_sync(buf, size, DMA_FROM_DEVICE); ++ ++ //prepare parameter for add dma entry ++ dmad_config_channel_dir(ch_req, DMAD_DIR_A0_TO_A1); ++ ++ drb_size = dmad_max_size_per_drb(ch_req); ++ ++ if (dma_buf) ++ addr_iter = dma_buf; // given dest phy addr ++ else ++ addr_iter = __pa(buf); ++ ++ while (size > 0) { ++ ++ if (unlikely(0 != dmad_alloc_drb(ch_req, &drb) || (drb == 0))) { ++ printk(KERN_ERR "%s() Failed to allocate dma request block!\n", __func__); ++ return FALSE; ++ } ++ ++ drb->addr0 = FTSDC_PA_BASE + SDC_DATA_WINDOW_REG; ++ drb->addr1 = addr_iter; ++ ++ if (size <= drb_size) { ++ drb->req_cycle = dmad_bytes_to_cycles(ch_req, size); ++ drb->sync = &sd_dma_cmpl; ++ size = 0; ++ } else { ++ drb->req_cycle = dmad_bytes_to_cycles(ch_req, drb_size); ++ drb->sync = 0; ++ size -= drb_size; ++ addr_iter += drb_size; ++ } ++ //printk(KERN_INFO "%s() size_remain 0x%08x.\n", __func__, size); ++ ++ if (unlikely(0 != dmad_submit_request(ch_req, drb, 1))) { ++ printk(KERN_ERR "%s() Failed to submit dma request block!\n", __func__); ++ return FALSE; ++ } ++ } ++ ++ if (wait_for_completion_timeout(&sd_dma_cmpl, timeout - jiffies) == 0) ++ printk("%s: read timeout\n", __func__); ++ } else { ++ while (size > 0) { ++ if (!sdc_check_rx_ready()) { ++ printk("error...........\n"); ++ return FALSE; ++ } ++ /* read data from FIFO */ ++ if (size >= (SDC_READ_FIFO_LEN << 2)) ++ count = SDC_READ_FIFO_LEN; ++ else ++ count = size >> 2; ++ /* read data from FIFO */ ++ P_DEBUG("\n"); ++ for (i = 1; i <= count; i++, buf++) ++ { ++ *buf = SDC_R_REG(SDC_DATA_WINDOW_REG); ++ P_DEBUG("%.8x ",*buf); ++ } ++ size -= (count << 2); ++ } ++ } ++ return sdc_check_data_crc(); ++} ++ ++int sdc_write_block(sd_card_t *info, uint size, uint *buf) ++{ ++ unsigned long timeout = jiffies + SDC_TIMEOUT_BASE*3*((size+511)>>9); ++ uint count, i; ++ dmad_chreq *ch_req = (dmad_chreq *)info->private; ++ dmad_drb *drb = 0; ++ u32 drb_size = 0; ++ dma_addr_t addr_iter; ++ ++ //if (info->DMAEnable) { ++ if ((info->DMAEnable) && ((size & 0xf) == 0)) { ++ P_DEBUG("%s:size=%d, buf=%p) - DMA Write\n", __func__, size,buf ); ++ ++ init_completion(&sd_dma_cmpl); ++ ++ if (dma_buf) ++ consistent_sync(__va(dma_buf), size, DMA_TO_DEVICE); ++ else ++ consistent_sync(buf, size, DMA_TO_DEVICE); ++ ++ //prepare parameter for add dma entry ++ dmad_config_channel_dir(ch_req, DMAD_DIR_A1_TO_A0); ++ ++ drb_size = dmad_max_size_per_drb(ch_req); ++ ++ if (dma_buf) ++ addr_iter = dma_buf; // given dest phy addr ++ else ++ addr_iter = __pa(buf); ++ ++ while (size > 0) { ++ ++ if (unlikely(0 != dmad_alloc_drb(ch_req, &drb) || (drb == 0))) { ++ printk(KERN_ERR "%s() Failed to allocate dma request block!\n", __func__); ++ return FALSE; ++ } ++ ++ drb->addr0 = FTSDC_PA_BASE + SDC_DATA_WINDOW_REG; ++ drb->addr1 = addr_iter; ++ ++ if (size <= drb_size) { ++ drb->req_cycle = dmad_bytes_to_cycles(ch_req, size); ++ drb->sync = &sd_dma_cmpl; ++ size = 0; ++ } else { ++ drb->req_cycle = dmad_bytes_to_cycles(ch_req, drb_size); ++ drb->sync = 0; ++ size -= drb_size; ++ addr_iter += drb_size; ++ } ++ //printk(KERN_INFO "%s() size_remain 0x%08x.\n", __func__, size); ++ ++ if (unlikely(0 != dmad_submit_request(ch_req, drb, 1))) { ++ printk(KERN_ERR "%s() Failed to submit dma request block!\n", __func__); ++ return FALSE; ++ } ++ } ++ ++ if (wait_for_completion_timeout(&sd_dma_cmpl, timeout - jiffies) == 0) ++ printk("write timeout\n"); ++ } else { ++ while (size > 0) { ++ if (!sdc_check_tx_ready()) ++ return FALSE; ++ /* write data from FIFO */ ++ if (size >= (SDC_WRITE_FIFO_LEN << 2)) ++ count = SDC_WRITE_FIFO_LEN; ++ else ++ count = (size >> 2) ; ++ /* write data from FIFO */ ++ for (i = 0; i < count; i++, buf++) ++ SDC_W_REG(SDC_DATA_WINDOW_REG, *buf); ++ size -= (count << 2); ++ } ++ } ++ return sdc_check_data_crc(); ++} ++ ++void sdc_config_transfer(sd_card_t *SDCard, uint len, uint size, uint rw, uint timeout) ++{ ++ u32 con; ++ /* write timeout */ ++ SDC_W_REG(SDC_DATA_TIMER_REG, timeout * 2); ++ /* set data length */ ++ SDC_W_REG(SDC_DATA_LEN_REG, len); ++ ++ /* set data block */ ++ if (SDCard->DMAEnable) { ++ con = sd_block_size_convert(size) | SDC_DATA_CTRL_REG_DMA_EN | rw | SDC_DATA_CTRL_REG_DATA_EN; ++ con |= SDC_DMA_TYPE_4; ++ P_DEBUG("%s() transfer DMA mode\n", __func__); ++ SDC_W_REG(SDC_DATA_CTRL_REG, con); ++ } else { ++ P_DEBUG("%s() transfer nonDMA mode\n", __func__); ++ SDC_W_REG(SDC_DATA_CTRL_REG, sd_block_size_convert(size) | rw | SDC_DATA_CTRL_REG_DATA_EN); ++ } ++} ++ ++/* Note: This funciton may be called by interrupt handler */ ++void sdc_reset(void) ++{ ++ uint ret; ++ unsigned long delay = jiffies + (HZ/10)*3; //Delay 300ms ++ ++ /* reset host interface */ ++ SDC_W_REG(SDC_CMD_REG, SDC_CMD_REG_SDC_RST); ++ ++ /* loop, until the reset bit is clear */ ++ do { ++ ret = SDC_R_REG(SDC_CMD_REG); ++ } while ((ret & SDC_CMD_REG_SDC_RST) != 0); ++ #if 0 ++ udelay(1000); ++ #else ++ while(time_before(jiffies, delay)); ++ #endif ++} ++ ++/* ++ * SD card operation ++ */ ++void sd_endian_change(uint *dt, int len) ++{ ++ uint ul; ++ ++ for(; len > 0; len--, dt++) { ++ ul = *dt; ++ ((unchar *)dt)[0] = ((unchar *)&ul)[3]; ++ ((unchar *)dt)[1] = ((unchar *)&ul)[2]; ++ ((unchar *)dt)[2] = ((unchar *)&ul)[1]; ++ ((unchar *)dt)[3] = ((unchar *)&ul)[0]; ++ } ++} ++ ++int sd_get_ocr(sd_card_t *info, uint hocr, uint *cocr) ++{ ++ uint status; ++ int count = 0; ++ ++ do { ++ if (info->CardType == MEMORY_CARD_TYPE_SD) { ++ /* send CMD55 to indicate to the card that the next command is an application specific command */ ++ if (!sdc_send_cmd(SD_APP_CMD | SDC_CMD_REG_NEED_RSP, ((uint) info->RCA) << 16, &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ /* send ACMD41 to get OCR register */ ++ if (!sdc_send_cmd(SD_APP_OP_COND | SDC_CMD_REG_APP_CMD | SDC_CMD_REG_NEED_RSP, (uint) hocr, (uint *) cocr)) ++ return FALSE; ++ } else { ++ /* send CMD1 to get OCR register */ ++ if (!sdc_send_cmd(SD_MMC_OP_COND | SDC_CMD_REG_NEED_RSP, (uint) hocr, (uint *) cocr)) ++ return FALSE; ++ } ++ if (count++ > SD_CARD_GET_OCR_RETRY_COUNT) { ++ sd_err_code = ERR_SD_CARD_IS_BUSY; ++ printk("%s : ERR_SD_CARD_IS_BUSY\n", __func__); ++ return FALSE; ++ } ++ udelay(1000); /* According to spec, at most 1 msec or 74 clock cycles */ ++ } while ((*cocr & SD_OCR_BUSY_BIT) != SD_OCR_BUSY_BIT); ++ ++ return TRUE; ++} ++ ++int sd_get_scr(sd_card_t *info, uint *scr) ++{ ++ uint status; ++ ++ if (!sd_set_transfer_state(info)) ++ return FALSE; ++ if (!sdc_set_block_size(8)) ++ return FALSE; ++ sdc_config_transfer(info, 8, 8, SDC_DATA_CTRL_REG_DATA_READ, 0xFFFFFFFF); ++ /* send CMD55 to indicate to the card that the next command is an application specific command */ ++ if (!sdc_send_cmd(SD_APP_CMD | SDC_CMD_REG_NEED_RSP, ((uint) info->RCA) << 16, &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ /* send ACMD51 to get SCR */ ++ if (!sdc_send_cmd(SD_SEND_SCR_CMD | SDC_CMD_REG_APP_CMD | SDC_CMD_REG_NEED_RSP, 0, &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ if (!sdc_read_block(info, 8, (uint *) scr)) ++ return FALSE; ++ if (!sdc_check_data_end()) ++ return FALSE; ++ sd_endian_change(scr, 2); ++ ++ return TRUE; ++} ++ ++int sd_check_err(uint status) ++{ ++ if (status & SD_STATUS_ERROR_BITS) { ++ sd_err_code = ERR_SD_CARD_STATUS_ERROR; ++ printk("%s() ERR_SD_CARD_STATUS_ERROR %X\n", __func__, status); ++ return FALSE; ++ } ++ sd_err_code = ERR_NO_ERROR; ++ return TRUE; ++} ++ ++int sd_get_card_state(sd_card_t *info, uint *ret) ++{ ++ uint status; ++ ++ /* send CMD13 to get card status */ ++ if (!sdc_send_cmd(SD_SEND_STATUS_CMD | SDC_CMD_REG_NEED_RSP, ((uint) info->RCA) << 16, &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ *ret = (status & SD_STATUS_CURRENT_STATE) >> SD_STATUS_CURRENT_STATE_LOC; ++ return TRUE; ++} ++ ++int sd_operation_complete(sd_card_t *info, uint finish) ++{ ++ uint state; ++ int count = 0; ++ while (count++ < SD_CARD_WAIT_OPERATION_COMPLETE_RETRY_COUNT) { ++ if (!sd_get_card_state(info, &state)) ++ return FALSE; ++ if (state == finish) ++ return TRUE; ++ } ++ P_DEBUG("%s() error\n", __func__); ++ return FALSE; ++} ++ ++int sd_stop_transmission(void) ++{ ++ uint status; ++ ++ /* send CMD12 to stop transmission */ ++ if (!sdc_send_cmd(SD_STOP_TRANSMISSION_CMD | SDC_CMD_REG_NEED_RSP, 0, &status)) ++ return FALSE; ++ ++ if (!sd_check_err(status)) ++ return FALSE; ++ ++ return TRUE; ++} ++ ++int sd_set_card_standby(sd_card_t *info) ++{ ++ uint state; ++ int count = 0; ++ while (count++ < SD_CARD_STATE_CHANGE_RETRY_COUNT) { ++ if (!sd_get_card_state(info, &state)) ++ return FALSE; ++ ++ switch (state) { ++ case SD_IDLE_STATE: ++ case SD_READY_STATE: ++ case SD_IDENT_STATE: ++ printk("%s() error\n", __func__); ++ return FALSE; ++ case SD_DIS_STATE: ++ return sd_operation_complete(info, SD_STBY_STATE); ++ case SD_TRAN_STATE: ++ if (!sdc_send_cmd(SD_SELECT_CARD_CMD, 0, NULL)) ++ return FALSE; ++ break; ++ case SD_DATA_STATE: ++ if (sd_operation_complete(info, SD_TRAN_STATE)) ++ return TRUE; ++ if (sd_err_code != ERR_NO_ERROR) ++ return FALSE; ++ if (!sd_stop_transmission()) ++ return FALSE; ++ break; ++ case SD_RCV_STATE: ++ if (sd_operation_complete(info, SD_TRAN_STATE)) ++ return TRUE; ++ if (sd_err_code != ERR_NO_ERROR) ++ return FALSE; ++ if (!sd_stop_transmission()) ++ return FALSE; ++ break; ++ case SD_PRG_STATE: ++ if (!sd_operation_complete(info, SD_TRAN_STATE)) ++ return FALSE; ++ break; ++ case SD_STBY_STATE: ++ return TRUE; ++ } ++ } ++ P_DEBUG("%s() error\n", __func__); ++ return FALSE; ++} ++ ++uint two_power(uint n) ++{ ++ uint pow = 1; ++ ++ for (; n > 0; n--) ++ pow <<= 1; ++ return pow; ++} ++ ++int sd_csd_parse(sd_csd_t *csd, uint *csd_word) ++{ ++ sd_csd_bit_t *csd_bit; ++ uint mult, blocks, len; ++ ++ if ((csd_word[0] & 0x00000001) != 1) { ++ sd_err_code = ERR_CSD_REGISTER_ERROR; ++ printk("%s() ERR_CSD_REGISTER_ERROR\n", __func__); ++ return FALSE; ++ } ++ ++ csd_bit = (sd_csd_bit_t *) csd_word; ++ csd->CSDStructure = csd_bit->CSD_STRUCTURE; ++ csd->MMCSpecVersion = csd_bit->MMC_SPEC_VERS; ++ csd->TAAC_u = TAAC_TimeValueTable_u[csd_bit->TAAC_TimeValue] * TAAC_TimeUnitTable[csd_bit->TAAC_TimeUnit] / 10; ++ csd->NSAC_u = csd_bit->NSAC * 100; ++ csd->TransferSpeed = TRANS_SPEED_RateUintTable[csd_bit->TRAN_SPEED_RateUnit] * TRANS_SPEED_TimeValueTable_u[csd_bit->TRAN_SPEED_TimeValue] / 10; ++ csd->CardCmdClass = csd_bit->CCC; ++ csd->ReadBlockLength = two_power(csd_bit->READ_BL_LEN); ++ csd->ReadBlockPartial = csd_bit->READ_BL_PARTIAL; ++ csd->WriteBlockMisalign = csd_bit->WRITE_BLK_MISALIGN; ++ csd->ReadBlockMisalign = csd_bit->READ_BLK_MISALIGN; ++ csd->DSRImplemant = csd_bit->DSR_IMP; ++ mult = 1 << (csd_bit->C_SIZE_MULT + 2); ++ blocks = ((csd_bit->C_SIZE_1 | (csd_bit->C_SIZE_2 << 2)) + 1) * mult; ++ len = 1 << (csd_bit->READ_BL_LEN); ++ csd->BlockNumber = blocks; ++ csd->MemorySize = blocks * len; ++ csd->VDDReadMin_u = VDD_CURR_MIN_Table_u[csd_bit->VDD_R_CURR_MIN]; ++ csd->VDDReadMax_u = VDD_CURR_MAX_Table_u[csd_bit->VDD_R_CURR_MAX]; ++ csd->VDDWriteMin_u = VDD_CURR_MIN_Table_u[csd_bit->VDD_W_CURR_MIN]; ++ csd->VDDWriteMax_u = VDD_CURR_MAX_Table_u[csd_bit->VDD_W_CURR_MAX]; ++ csd->EraseBlkEnable = csd_bit->ERASE_BLK_ENABLE; ++ csd->EraseSectorSize = csd_bit->ERASE_SECTOR_SIZE + 1; ++ csd->WriteProtectGroupSize = csd_bit->WP_GRP_SIZE + 1; ++ csd->WriteProtectGroupEnable = csd_bit->WP_GRP_ENABLE; ++ csd->WriteSpeedFactor = two_power(csd_bit->R2W_FACTOR); ++ csd->WriteBlockLength = two_power(csd_bit->WRITE_BL_LEN); ++ csd->WriteBlockPartial = csd_bit->WRITE_BL_PARTIAL; ++ csd->CopyFlag = csd_bit->COPY; ++ csd->PermanentWriteProtect = csd_bit->PERM_WRITE_PROTECT; ++ csd->TemporaryWriteProtect = csd_bit->TMP_WRITE_PROTECT; ++ ++ if (csd_bit->FILE_FORMAT_GRP == 0) ++ csd->FileFormat = csd_bit->FILE_FORMAT; ++ else ++ csd->FileFormat = FILE_FORMAT_RESERVED; ++ ++ return TRUE; ++} ++ ++int sd_cid_parse(sd_cid_t *cid, uint *cid_word) ++{ ++ unchar *ptr; ++ int i; ++ ++ if ((cid_word[0] & 0x00000001) != 1) ++ { ++ sd_err_code = ERR_CID_REGISTER_ERROR; ++ printk("%s() ERR_CID_REGISTER_ERROR\n", __func__); ++ return FALSE; ++ } ++ ++ cid->ManufacturerID = (cid_word[3] & 0xFF000000) >> 24; ++ cid->ApplicationID = (cid_word[3] & 0x00FFFF00) >> 8; ++ ++ ptr = (unchar *) cid_word; ++ ptr += 15 - 3; ++ for (i = 0; i < 6; i++, ptr--) ++ cid->ProductName[i] = *ptr; ++ cid->ProductName[6] = '\0' ; ++ ++ cid->ProductRevisionLow = (cid_word[1] & 0x00F00000) >> 20; ++ cid->ProductRevisionHigh = (cid_word[1] & 0x000F0000) >> 16; ++ cid->ProductSerialNumber = ((cid_word[1] & 0x0000FFFF) << 16) + ((cid_word[0] & 0xFFFF0000) >> 16); ++ cid->ManufactureMonth = ((cid_word[0] & 0x00000F00) >> 8); ++ cid->ManufactureYear = ((cid_word[0] & 0x0000F000) >> 12) + SD_DEFAULT_YEAR_CODE; ++ ++ return TRUE; ++} ++ ++uint sd_read_timeout_cycle(uint clock, sd_csd_t *csd) ++{ ++#if 1 //ivan for 100ms maximux from document "ProdManualIndGradeSDv1.0[1].pdf" chapter A-2 ++ return clock/10; // /10; ++#else ++ uint ret, total, per; ++ ++ per = 1000000000 / clock; ++ total = (csd->TAAC_u + (csd->NSAC_u * 100 * per)) * 100; ++ ++ if (total > (100 * 1000 * 1000)) ++ total = 100 * 1000 * 1000; ++ ret = total / per; ++ ++ return ret; ++#endif ++} ++ ++uint sd_block_size_convert(uint size) ++{ ++ uint ret = 0; ++ ++ while (size >= 2) { ++ size >>= 1; ++ ret++; ++ } ++ return ret; ++} ++ ++int sd_select_card(sd_card_t *info) ++{ ++ uint status; ++ ++ /* send CMD7 with valid RCA to select */ ++ if (!sdc_send_cmd(SD_SELECT_CARD_CMD | SDC_CMD_REG_NEED_RSP, ((uint)info->RCA) << 16, &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ return TRUE; ++} ++ ++int sd_set_transfer_state(sd_card_t *info) ++{ ++ uint state; ++ int count = 0; ++ while (count++ < SD_CARD_STATE_CHANGE_RETRY_COUNT) { ++ if (!sd_get_card_state(info, &state)) ++ return FALSE; ++ ++ switch (state) { ++ case SD_IDLE_STATE: ++ case SD_READY_STATE: ++ case SD_IDENT_STATE: ++ printk("%s() error\n", __func__); ++ return FALSE; ++ case SD_DIS_STATE: ++ if (!sd_operation_complete(info, SD_STBY_STATE)) ++ return FALSE; ++ break; ++ case SD_TRAN_STATE: ++ return TRUE; ++ case SD_DATA_STATE: ++ if (sd_operation_complete(info, SD_TRAN_STATE)) ++ return TRUE; ++ if (sd_err_code != ERR_NO_ERROR) ++ return FALSE; ++ if (!sd_stop_transmission()) ++ return FALSE; ++ break; ++ case SD_RCV_STATE: ++ if (sd_operation_complete(info, SD_TRAN_STATE)) ++ return TRUE; ++ if (sd_err_code != ERR_NO_ERROR) ++ return FALSE; ++ if (!sd_stop_transmission()) ++ return FALSE; ++ break; ++ case SD_PRG_STATE: ++ if (!sd_operation_complete(info, SD_TRAN_STATE)) ++ return FALSE; ++ break; ++ case SD_STBY_STATE: ++ if (!sd_select_card(info)) ++ return FALSE; ++ } ++ } ++ P_DEBUG("%s() error\n", __func__); ++ return FALSE; ++} ++ ++uint sd_write_timeout_cycle(uint clock, sd_csd_t *CSD) ++{ ++#if 1 //ivan for 250ms maximux from document ++ return clock/4; //ijsung hack ++#else ++ uint ret, total, pre; ++ ++ pre = 1000000000 / clock; ++ total = CSD->WriteSpeedFactor * 100 * (CSD->TAAC_u + (CSD->NSAC_u * 100 * pre)); ++ ++ if (total > (100 * 1000 * 1000)) ++ total = 100 * 1000 * 1000; ++ ret = total / pre; ++ ++ return ret; ++#endif ++} ++ ++int sd_card_identify(sd_card_t *info) ++{ ++ uint rca, status, cid[4]; ++ ++ /* reset all cards */ ++ if (!sdc_send_cmd(SD_GO_IDLE_STATE_CMD, 0, NULL)) ++ return FALSE; ++ udelay(1000); ++ /* Do operating voltage range validation */ ++ /* get OCR register */ ++ if (!sd_get_ocr(info, SDC_OCR, (uint *) &info->OCR)) ++ return FALSE; ++ /* ckeck the operation conditions */ ++ if ((info->OCR & SDC_OCR) == 0) { ++ sd_err_code = ERR_OUT_OF_VOLF_RANGE; ++ return FALSE; ++ } ++ ++ /* send CMD2 to get CID register */ ++ if (!sdc_send_cmd(SD_ALL_SEND_CID_CMD | SDC_CMD_REG_NEED_RSP | SDC_CMD_REG_LONG_RSP, 0, cid)) ++ return FALSE; ++ if (info->CardType == MEMORY_CARD_TYPE_SD) { ++ /* send CMD3 to get RCA register */ ++ if (!sdc_send_cmd(SD_SEND_RELATIVE_ADDR_CMD | SDC_CMD_REG_NEED_RSP, 0, &rca)) ++ return FALSE; ++ info->RCA = (ushort) (rca >> 16); ++ } else { ++ /* so far, we only support one interface, so we can give RCA any value */ ++ info->RCA = 0x1; ++ /* send CMD3 to set RCA register */ ++ if (!sdc_send_cmd(SD_SEND_RELATIVE_ADDR_CMD | SDC_CMD_REG_NEED_RSP, (info->RCA << 16), &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ } ++ ++ return TRUE; ++} ++ ++int sd_card_init(sd_card_t *info) ++{ ++ uint clock; ++ ++ P_DEBUG("--> %s\n", __func__); ++ if ((SDC_R_REG(SDC_STATUS_REG) & SDC_STATUS_REG_CARD_INSERT) != SDC_CARD_INSERT) ++ return FALSE; ++ sd_err_code = ERR_NO_ERROR; ++ /* At first, set card type to SD */ ++ info->CardType = MEMORY_CARD_TYPE_SD; ++ /* set memory card type */ ++ sdc_set_card_type(info->CardType); ++ /* start card idenfication process */ ++ if (!sd_card_identify(info)) { ++ printk("this is not SD card\n"); ++ sd_err_code = ERR_NO_ERROR; ++ info->CardType = MEMORY_CARD_TYPE_MMC; ++ /* set memory card type */ ++ sdc_set_card_type(info->CardType); ++ if (!sd_card_identify(info)) ++ return FALSE; ++ } ++ ++ /* get CSD */ ++ if (!sd_set_card_standby(info)) ++ return FALSE; ++ /* send CMD9 to get CSD register */ ++ if (!sdc_send_cmd(SD_SEND_CSD_CMD | SDC_CMD_REG_NEED_RSP | SDC_CMD_REG_LONG_RSP, ((uint) info->RCA) << 16, info->CSDWord)) ++ return FALSE; ++ sd_csd_parse(&info->CSD, info->CSDWord); ++ ++ if (info->CSD.ReadBlockLength != SD_SECTOR_SIZE) { ++ printk("Sector size is mis-matched (SD CSD report=0x%X,SD_SECTOR_SIZE=0x%X)\n", info->CSD.ReadBlockLength, SD_SECTOR_SIZE); ++ info->CSD.ReadBlockLength = SD_SECTOR_SIZE; ++// return FALSE; ++ } ++ ++ /* get CID */ ++ /* send CMD10 to get CID register */ ++ if (!sdc_send_cmd(SD_SEND_CID_CMD | SDC_CMD_REG_NEED_RSP | SDC_CMD_REG_LONG_RSP, ((uint) info->RCA) << 16, info->CIDWord)) ++ return FALSE; ++ sd_cid_parse(&info->CID, info->CIDWord); ++ ++ /* Set card bus clock. sdc_set_bus_clock() will give the real card bus clock has been set. */ ++ clock = sdc_set_bus_clock(info, info->CSD.TransferSpeed); ++ info->ReadAccessTimoutCycle = sd_read_timeout_cycle(clock, &(info->CSD)); ++ info->WriteAccessTimoutCycle = sd_write_timeout_cycle(clock, &(info->CSD)); ++ /* set bus width */ ++ if (!sdc_set_bus_width(info)) ++ return FALSE; ++ ++ /* check write protect */ ++ info->WriteProtect = ((SDC_R_REG(SDC_STATUS_REG) & SDC_STATUS_REG_CARD_LOCK) == SDC_STATUS_REG_CARD_LOCK) ? TRUE : FALSE; ++ if(info->WriteProtect == TRUE) ++ printk("SD/MMC Card is Write Protected\n"); ++ info->ActiveState = TRUE; ++ P_DEBUG("<-- %s\n", __func__); ++ ++ return TRUE; ++} ++ ++int sd_card_insert(sd_card_t *info) ++{ ++ P_DEBUG("--> %s\n", __func__); ++ /* reset host interface controller */ ++ sdc_reset(); ++ /* turn on clock using default clock*/ ++ SDC_W_REG(SDC_CLOCK_CTRL_REG, SDC_R_REG(SDC_CLOCK_CTRL_REG)&0xff); ++ ++ if (!sd_card_init(info)) { ++ printk("root initialize failed\n"); ++ return FALSE; ++ } ++ /* set interrupt mask register */ ++ SDC_W_REG(SDC_INT_MASK_REG, SDC_STATUS_REG_CARD_CHANGE | SDC_STATUS_REG_DATA_TIMEOUT); ++ P_DEBUG("<-- %s\n", __func__); ++ ++ return TRUE; ++} ++ ++int sd_card_remove(sd_card_t *info) ++{ ++ sd_err_code = ERR_NO_ERROR; ++ ++ info->ActiveState = FALSE; ++ info->WriteProtect = FALSE; ++ info->RCA = 0; ++ /* reset host interface controller */ ++ sdc_reset(); ++ /* set interrupt mask register */ ++ SDC_W_REG(SDC_INT_MASK_REG, SDC_STATUS_REG_CARD_CHANGE | SDC_STATUS_REG_DATA_TIMEOUT); ++ sd_err_code = ERR_CARD_NOT_EXIST; ++ /* turn off clock */ ++ SDC_W_REG(SDC_CLOCK_CTRL_REG, SDC_R_REG(SDC_CLOCK_CTRL_REG) | 0x100); ++ return TRUE; ++} ++ ++irqreturn_t sd_hotswap_interrupt_handler(int irq, void *dev_id) ++{ ++ uint status; ++ struct sd_dev *dev = dev_id; ++ ++ P_DEBUG("--> %s, irq=%d\n", __func__, irq); ++ /* When the card is inserted or removed, we must delay a short time to make sure */ ++ /* the SDC_STATUS_REG_CARD_INSERT bit of status register is stable */ ++ udelay(1000); ++ status = SDC_R_REG(SDC_STATUS_REG); ++ if ((status & SDC_STATUS_REG_CARD_CHANGE) == SDC_STATUS_REG_CARD_CHANGE) { ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_CARD_CHANGE | SDC_STATUS_REG_DATA_TIMEOUT); ++ if ((status & SDC_STATUS_REG_CARD_INSERT) == SDC_CARD_INSERT) { ++ dev->card_state = SD_CARD_INSERT; ++ printk("Card Insert\n"); ++ sd_card_insert(&sd_card_info); ++ } else { ++ dev->card_state = SD_CARD_REMOVE; ++ printk("Card Remove\n"); ++ sd_card_remove(&sd_card_info); ++ /* remove all current transfers as I/O error*/ ++#if 0 //ASYNC ++ spin_lock_irqsave(&io_request_lock, status); ++ INIT_REQUEST; ++ while(!QUEUE_EMPTY) ++ end_request(0); ++ bh_busy=0; ++ spin_unlock_irqrestore(&io_request_lock, status); ++#endif ++ } ++ } else if ((status & SDC_STATUS_REG_DATA_TIMEOUT) == SDC_STATUS_REG_DATA_TIMEOUT) { ++ SDC_W_REG(SDC_CLEAR_REG, SDC_STATUS_REG_CARD_CHANGE | SDC_STATUS_REG_DATA_TIMEOUT); ++ ++#if 0 //ASYNC ++ printk("Data timeout. Retry.\n"); ++ sd_clustered_bh(2); ++#else ++ printk("Data timeout. Retry.\n"); ++#endif ++ } ++ P_DEBUGG("card state=%d\n", dev->card_state); ++ P_DEBUG("<-- %s\n", __func__); ++ return IRQ_HANDLED; ++} ++ ++/*------------------------------------ ++ * Block-driver specific functions ++ */ ++/* ++ * Find the device for this request. ++ */ ++#if 0 ++static inline struct sd_dev *sd_locate_device(const struct request *req) ++{ ++ int devno; ++ struct sd_dev *dev; ++ ++ P_DEBUG("--> %s\n", __func__); ++#if 0 ++ /* Check if the minor number is in range */ ++ devno = DEVICE_NR(req->rq_dev); ++ P_DEBUGG("minor=%d\n", devno); ++ if (devno >= SD_DEVS) { ++ static int count = 0; ++ ++ if (count++ < 5) /* print the message at most five times */ ++ P_DEBUG("request for unknown device\n"); ++ return NULL; ++ } ++#endif ++ dev = sd_devices + devno; ++ P_DEBUGG("card_state=%d\n", dev->card_state); ++ P_DEBUG("<-- %s\n", __func__); ++ return dev; ++} ++ ++int sd_card_check_exist(sd_card_t *info) ++{ ++ /* if card is not exist */ ++ if ((SDC_R_REG(SDC_STATUS_REG) & SDC_STATUS_REG_CARD_INSERT) != SDC_CARD_INSERT) { ++ sd_card_remove(info); ++ return FALSE; ++ } ++ /* if card is not active */ ++ if (!info->ActiveState) ++ { ++ return sd_card_insert(info); ++ } ++ return TRUE; ++} ++#endif ++ ++void sd_reset_host_controller(void) ++{ ++ uint clock, mask, width; ++ ++ /* read register */ ++ clock = SDC_R_REG(SDC_CLOCK_CTRL_REG); ++ width = SDC_R_REG(SDC_BUS_WIDTH_REG); ++ mask = SDC_R_REG(SDC_INT_MASK_REG); ++ /* reset host interface */ ++ sdc_reset(); ++ /* restore register */ ++ SDC_W_REG(SDC_CLOCK_CTRL_REG, clock); ++ SDC_W_REG(SDC_BUS_WIDTH_REG, width); ++ SDC_W_REG(SDC_INT_MASK_REG, mask); ++} ++ ++int sd_read_single_block(sd_card_t *info, uint addr, uint size, uint timeout, unchar *buf) ++{ ++ uint status; ++ ++ if (!sdc_set_block_size(size)) ++ return FALSE; ++ ++ sdc_config_transfer(info, size, size, SDC_DATA_CTRL_REG_DATA_READ, timeout); ++ ++ if (!sdc_send_cmd(SD_READ_SINGLE_BLOCK_CMD | SDC_CMD_REG_NEED_RSP, addr, &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ ++#ifdef DELAY_FOR_DMA_READ ++ if (first_run==0) { ++ int i=0; ++ for(i=0;i<10;i++) ++ udelay(1000); ++ first_run=1; ++ } ++#endif ++ if (!sdc_read_block(info, size, (uint *) buf)) ++ return FALSE; ++ ++ if (sd_err_code != ERR_NO_ERROR) { ++ printk("%s() error=0x%X\n", __func__, sd_err_code); ++ sd_reset_host_controller(); ++ return FALSE; ++ } else { ++ if (!sdc_check_data_end()) { ++ sd_stop_transmission(); ++ printk("%s()2 error=0x%X\n", __func__, sd_err_code); ++ return FALSE; ++ } ++ } ++ ++ return TRUE; ++} ++ ++int sd_write_single_block(sd_card_t *info, uint addr, uint size, uint timeout, unchar *buf) ++{ ++ uint status; ++ ++ if (!sdc_set_block_size(size)) ++ return FALSE; ++ ++ sdc_config_transfer(info, size, size, SDC_DATA_CTRL_REG_DATA_WRITE, timeout); ++ ++ if (!sdc_send_cmd(SD_WRITE_SINGLE_BLOCK_CMD | SDC_CMD_REG_NEED_RSP, addr, &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ ++ if (!sdc_write_block(info, size, (uint *) buf)) ++ return FALSE; ++ if (sd_err_code != ERR_NO_ERROR) { ++ printk("%s() error=0x%X\n", __func__, sd_err_code); ++ sd_reset_host_controller(); ++ return FALSE; ++ } else { ++ if (!sdc_check_data_end()) { ++ sd_stop_transmission(); ++ printk("%s()2 error=0x%X\n", __func__, sd_err_code); ++ return FALSE; ++ } ++ } ++ ++ return TRUE; ++} ++ ++int sd_read_multiple_block(sd_card_t *info, uint addr, uint count, uint size, uint timeout, unchar *buf) ++{ ++ uint err, status; ++ ++ if (!sdc_set_block_size(size)) ++ return FALSE; ++ ++ sdc_config_transfer(info, count * size, size, SDC_DATA_CTRL_REG_DATA_READ, timeout); ++ ++ if (!sdc_send_cmd(SD_READ_MULTIPLE_BLOCK_CMD | SDC_CMD_REG_NEED_RSP, addr, &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++ ++#ifdef DELAY_FOR_DMA_READ ++ if (first_run==0) { ++ int i=0; ++ for(i=0;i<10;i++) ++ udelay(1000); ++ first_run=1; ++ } ++#endif ++#if 0 //ijsung: Sometimes this will cause IRQ lost, and is slower. Use method below ++ while (count > 0) { ++ if (!sdc_read_block(info, size, (uint *) buf)) ++ return FALSE; ++ count--; ++ buf += size; ++ } ++#else //ijsung: DMA at once. ++ if (!sdc_read_block(info, size*count, (uint *) buf)) ++ return FALSE; ++#endif ++ if (sd_err_code != ERR_NO_ERROR) { ++ err = sd_err_code; ++ sd_stop_transmission(); ++ sd_reset_host_controller(); ++ sd_err_code |= err; ++ printk("%s() error=0x%X\n", __func__, sd_err_code); ++ return FALSE; ++ } else { ++ if (!sdc_check_data_end()) { ++ err = sd_err_code; ++ sd_stop_transmission(); ++ sd_err_code |= err; ++ printk("%s()2 error=0x%X\n", __func__, sd_err_code); ++ return FALSE; ++ } ++ if (!sd_stop_transmission()) ++ return FALSE; ++ } ++ ++ return TRUE; ++} ++ ++int sd_write_multiple_block(sd_card_t *info, uint addr, uint count, uint size, uint timeout, unchar *buf) ++{ ++ uint ErrorCode, status; ++ ++ if(!sdc_set_block_size(size)) ++ return FALSE; ++ ++ sdc_config_transfer(info, count * size, size, SDC_DATA_CTRL_REG_DATA_WRITE, timeout); ++ sdc_pre_erase_cmd(count); //ijsung: pre-erase ++ if (!sdc_send_cmd(SD_WRITE_MULTIPLE_BLOCK_CMD | SDC_CMD_REG_NEED_RSP, addr, &status)) ++ return FALSE; ++ if (!sd_check_err(status)) ++ return FALSE; ++#if 0 //ijsung: Sometimes this will cause IRQ lost, and is slower. Use method below ++ while (count > 0) { ++ if (!sdc_write_block(info, size, (uint *) buf)) ++ return FALSE; ++ count--; ++ buf += size; ++ } ++#else //ijsung: DMA at once. ++ if (!sdc_write_block(info, size*count, (uint *) buf)) ++ return FALSE; ++#endif ++ ++ if (sd_err_code != ERR_NO_ERROR) ++ { ++ ErrorCode = sd_err_code; ++ sd_stop_transmission(); ++ sd_reset_host_controller(); ++ sd_err_code |= ErrorCode; ++ printk("%s() error=0x%X\n", __func__, sd_err_code); ++ return FALSE; ++ } else { ++ if (!sdc_check_data_end()) { ++ ErrorCode = sd_err_code; ++ sd_stop_transmission(); ++ sd_err_code |= ErrorCode; ++ printk("%s()2 error=0x%X\n", __func__, sd_err_code); ++ return FALSE; ++ } ++ if (!sd_stop_transmission()) ++ return FALSE; ++ } ++ ++ return TRUE; ++} ++ ++int sd_wait_transfer_state(sd_card_t *info) ++{ ++ uint state; ++ int count = 0; ++ while (count++ < SD_CARD_WAIT_TRANSFER_STATE_RETRY_COUNT) { ++ if (!sd_get_card_state(info, &state)) ++ return FALSE; ++ ++ switch (state) { ++ case SD_IDLE_STATE: ++ case SD_READY_STATE: ++ case SD_IDENT_STATE: ++ case SD_DIS_STATE: ++ case SD_STBY_STATE: ++ printk("%s() error\n", __func__); ++ return FALSE; ++ case SD_TRAN_STATE: ++ return TRUE; ++ case SD_DATA_STATE: ++ case SD_RCV_STATE: ++ case SD_PRG_STATE: ++ break; ++ } ++ } ++ sd_err_code = ERR_SD_CARD_IS_BUSY; ++ P_DEBUG("%s() ERR_SD_CARD_IS_BUSY\n", __func__); ++ return FALSE; ++} ++ ++/*************************************************************************** ++SD Card Read/Write/Erase Function ++***************************************************************************/ ++int sd_read_sector(sd_card_t *info, uint addr, uint count, unchar *buf) ++{ ++ int cnt; ++ uint start; ++ sync_mode=1; ++ P_DEBUG("%s : sector = %d,count = %d\n",__func__,addr,count); ++ if (count > MAX_READ_SECTOR_NR) { ++ P_DEBUG("Readable Block Number Per Commands is 0x%X\n",MAX_READ_SECTOR_NR); ++ return FALSE; ++ } ++ ++ sd_err_code = ERR_NO_ERROR; ++ ++ if (!info->ActiveState) { ++ P_DEBUG("%s : SD card not active!!\n", __func__); ++ return FALSE; ++ } ++ ++ if (!sd_set_transfer_state(info)) ++ return FALSE; ++ ++ start = addr * info->CSD.ReadBlockLength; ++ cnt = (int) count; ++ ++ while (cnt > 0) { ++ if (cnt > 1) { ++ if (!sd_read_multiple_block(info, start, (cnt > MAX_MULTI_BLOCK_NUM) ? MAX_MULTI_BLOCK_NUM : cnt, ++ info->CSD.ReadBlockLength, info->ReadAccessTimoutCycle, buf)) ++ return FALSE; ++ } else { ++ if (!sd_read_single_block(info, start, info->CSD.ReadBlockLength, info->ReadAccessTimoutCycle, buf)) ++ return FALSE; ++ return TRUE; ++ } ++ ++ if (!sd_wait_transfer_state(info)) ++ return FALSE; ++ ++ cnt -= MAX_MULTI_BLOCK_NUM; ++ start += MAX_MULTI_BLOCK_NUM * info->CSD.ReadBlockLength; ++ buf += MAX_MULTI_BLOCK_NUM * info->CSD.ReadBlockLength; ++ } ++ ++ return TRUE; ++} ++ ++int sd_write_sector(sd_card_t *info, uint addr, uint count, unchar *buf) ++{ ++ int cnt; ++ uint start; ++ ++ if (count > MAX_WRITE_SECTOR_NR) { ++ P_DEBUG("Writable Block Number Per Commands is 0x%X\n",MAX_WRITE_SECTOR_NR); ++ return FALSE; ++ } ++ ++ sd_err_code = ERR_NO_ERROR; ++ ++ if (!info->ActiveState) { ++ P_DEBUG("%s : SD card not active!!\n", __func__); ++ return FALSE; ++ } ++ ++ if (info->WriteProtect == TRUE) { ++ sd_err_code = ERR_SD_CARD_IS_LOCK; ++ printk("Write Protected!!\n"); ++ return FALSE; ++ } ++ if (!sd_set_transfer_state(info)) ++ return FALSE; ++ ++ start = addr * info->CSD.ReadBlockLength; ++ cnt = (int) count; ++ ++ while (cnt > 0) { ++ if (cnt > 1) { ++ if (!sd_write_multiple_block(info, start, (cnt > MAX_MULTI_BLOCK_NUM) ? MAX_MULTI_BLOCK_NUM : cnt, ++ info->CSD.ReadBlockLength, info->WriteAccessTimoutCycle, buf)) ++ return FALSE; ++ } else { ++ if (!sd_write_single_block(info, start, info->CSD.ReadBlockLength, info->WriteAccessTimoutCycle, buf)) ++ return FALSE; ++ return TRUE; ++ } ++ ++ if (!sd_wait_transfer_state(info)) ++ return FALSE; ++ ++ cnt -= MAX_MULTI_BLOCK_NUM; ++ start += MAX_MULTI_BLOCK_NUM * info->CSD.ReadBlockLength; ++ buf += MAX_MULTI_BLOCK_NUM * info->CSD.ReadBlockLength; ++ } ++ ++ return TRUE; ++} ++/*---------------------------------------------- ++ * Perform an actual transfer: ++ * Returns: # of sectors transferred. 0 = error ++ */ ++static int sd_transfer(struct sd_dev *device, const struct request *req) ++{ ++ int status = 0; ++ int count = 0; ++ ++ struct bio *bio = req->bio; ++ struct bio_vec *bvec; ++ struct req_iterator iter; ++ ++ spin_unlock_irq( &device->lock); ++ ++#if 0 ++ P_DEBUG("\nreq sector: %d, nr_sectors: %d, hard_cur_sectors: %d phys_seg: %d, buf: 0x%08lx\n", ++ (int)req->sector, (int)req->nr_sectors, (int)req->hard_cur_sectors, ++ (int)req->nr_phys_segments, (ulong)bio_data( bio)); ++#endif ++ ++ rq_for_each_segment( bvec, req, iter) { ++ ++ unsigned char *buf = page_address( bvec->bv_page) + bvec->bv_offset; ++ int sectors = bio_cur_bytes(bio) >> 9; ++ ++ P_DEBUG("bvec[%2d]: sector: %d, count: %d, curr: %d, buf: 0x%08lx, ", ++ iter.i, (int)bio->bi_sector, count, (int)sectors, (unsigned long)buf); ++ ++ sd_card_info.private = (void *)&device->ch_req; ++ ++ if( rq_data_dir(req) == 0) /* Read */ ++ status = sd_read_sector( &sd_card_info, sector_offset + bio->bi_sector, sectors, buf); ++ else ++ status = sd_write_sector( &sd_card_info, sector_offset + bio->bi_sector, sectors, buf); ++ ++ P_DEBUG("status: %d\n", status); ++ ++ if (status <= 0) { ++ spin_lock_irq( &device->lock); ++ return count; ++ } ++ ++ count += sectors; ++ bio->bi_sector += sectors; ++ } ++ ++ if( ( req->__sector == 0) && !Do_onetime){ ++ ++ unsigned char *buf = bio_data( bio); ++ ++ if( ( buf[ 0x1be] != 0x0) && ( buf[ 0x1be] != 0x80)) /* partition identify */ ++ sector_offset = 0; //sector 0 is PBR ++ else ++ sector_offset = ( buf[ 0x1c6]) | ( buf[ 0x1c7] << 8) | ( buf[ 0x1c8] <<16) |( buf[ 0x1c9] << 24); ++ ++ P_DEBUG( "sector_offset = %d\n", sector_offset); ++ Do_onetime = 1; ++ } ++ ++ spin_lock_irq( &device->lock); ++ ++ if( status <= 0) ++ return 0; ++ else ++ return count; ++} ++ ++#ifdef SD_DEBUG ++uint sd_dev_info(void) ++{ ++ sd_csd_t *CSD; ++ sd_cid_t *CID; ++ ++ P_DEBUG("============SDCard=====================================\n"); ++#if 0 ++ if (!sd_card_check_exist(&sd_card_info)) ++ { ++ P_DEBUG("SD Card does not exist!!!"); ++ return FALSE; ++ } ++#else ++ if (!sd_card_info.ActiveState) { ++ P_DEBUG("%s : SD card not active!!\n", __func__); ++ return FALSE; ++ } ++#endif ++ ++ /* print OCR, RCA register */ ++ P_DEBUG("OCR>> 0x%08X RCA>> 0x%04X\n", (uint) sd_card_info.OCR, sd_card_info.RCA); ++ /* print CID register */ ++ P_DEBUG("CID>> 0x%08X 0x%08X 0x%08X 0x%08X\n", sd_card_info.CIDWord[0], sd_card_info.CIDWord[1], sd_card_info.CIDWord[2], sd_card_info.CIDWord[3]); ++ CID = &(sd_card_info.CID); ++ P_DEBUG(" MID:0x%02X OID:0x%04X PNM:%s PRV:%d.%d PSN:0x%08X\n", CID->ManufacturerID, CID->ApplicationID, CID->ProductName, ++ CID->ProductRevisionHigh, CID->ProductRevisionLow, CID->ProductSerialNumber); ++ P_DEBUG(" MDT:%d/%d\n", CID->ManufactureMonth, CID->ManufactureYear); ++ /* print CSD register */ ++ P_DEBUG("CSD>> 0x%08X 0x%08X 0x%08X 0x%08X\n", sd_card_info.CSDWord[0], sd_card_info.CSDWord[1], sd_card_info.CSDWord[2], sd_card_info.CSDWord[3]); ++ CSD = &(sd_card_info.CSD); ++ P_DEBUG(" CSDStructure:%d Spec.Version:%d\n", CSD->CSDStructure, CSD->MMCSpecVersion); ++ P_DEBUG(" TAAC:%dns NSAC:%d clock cycles\n", CSD->TAAC_u, CSD->NSAC_u); ++ P_DEBUG(" TransferSpeed:%d bit/s CardCommandClass:0x%03X\n", CSD->TransferSpeed, CSD->CardCmdClass); ++ P_DEBUG(" ReadBlLen:%d ReadBlPartial:%X WriteBlkMisalign:%X ReadBlkMisalign:%X\n", CSD->ReadBlockLength, CSD->ReadBlockPartial, CSD->WriteBlockMisalign, CSD->ReadBlockMisalign); ++ P_DEBUG(" DSP:%X BlockNumber:%d MemorySize:%d \n", CSD->DSRImplemant, CSD->BlockNumber, CSD->MemorySize); ++ P_DEBUG(" VDD_R_MIN:%d/10mA VDD_R_MAX:%dmA\n", (uint) CSD->VDDReadMin_u, (uint) CSD->VDDReadMax_u); ++ P_DEBUG(" VDD_W_MIN:%d/10mA VDD_W_MAX:%dmA\n", (uint) CSD->VDDWriteMin_u, (uint) CSD->VDDWriteMax_u); ++ P_DEBUG(" EraseBlkEnable:%d EraseSectorSize:%d WpGrpSize:%d WpGrpEnable:%X\n", CSD->EraseBlkEnable, CSD->EraseSectorSize, CSD->WriteProtectGroupSize, CSD->WriteProtectGroupEnable); ++ P_DEBUG(" WriteSpeedFactor:%d WriteBlLen:%d WriteBlPartial:%X\n", CSD->WriteSpeedFactor, CSD->WriteBlockLength, CSD->WriteBlockPartial); ++ P_DEBUG(" Copy:%X PermWrProtect:%X TmpWrProtect:%X FileFormat:%X\n", CSD->CopyFlag, CSD->PermanentWriteProtect, CSD->TemporaryWriteProtect, CSD->FileFormat); ++ P_DEBUG(" ReadTimoutCycle:0x%08X WriteTimoutCycle:0x%08X\n", sd_card_info.ReadAccessTimoutCycle, sd_card_info.WriteAccessTimoutCycle); ++ /* print SCR register */ ++ P_DEBUG("SCR>> 0x%08X 0x%08X \n", *(((uint *) &sd_card_info.SCR)), *(((uint *) &sd_card_info.SCR) + 1)); ++ P_DEBUG(" SCR_STRUCTURE:%d, SD_SPEC:%d, Data_status_after_erase:%d\n", sd_card_info.SCR.SCR_STRUCTURE, sd_card_info.SCR.SD_SPEC, sd_card_info.SCR.DATA_STAT_AFTER_ERASE); ++ P_DEBUG(" sd_security:%d, SD_BUS_WIDTH:%X\n", sd_card_info.SCR.SD_SECURITY, sd_card_info.SCR.SD_BUS_WIDTH); ++ ++ return TRUE; ++} ++#endif ++ ++static int sd_card_setup(struct sd_dev *dev) ++{ ++ uint sd_card_size; ++ int i; ++ ++ P_DEBUG("--> %s\n", __func__); ++ first_run = 0; ++ sd_err_code = ERR_NO_ERROR; ++ ++ sd_card_info.ActiveState = FALSE; ++ sd_card_info.WriteProtect = FALSE; ++ sd_card_info.IOAddr = FTSDC_VA_BASE; ++ sd_card_info.DMAEnable = FALSE; ++ ++ sd_card_info.SysFrequency = AHB_CLK_IN/2; ++ P_DEBUG("DMA Enable is %d, Sys frequency = %d\n", sd_card_info.DMAEnable, sd_card_info.SysFrequency); ++ sd_card_info.RCA = 0; ++ sd_card_info.Drive = 'S'; ++ ++ if (!sd_card_insert(&sd_card_info)) ++ return FALSE; ++#ifdef SD_DEBUG ++ if (!sd_dev_info()) ++ return FALSE; ++#endif ++ sd_card_size = sd_card_info.CSD.MemorySize; ++ printk(KERN_NOTICE "FTSDC010: SD Card Capacity=%d MB\n", sd_card_size/1000000); /* Marketing MB is not 1048576 */ ++ ++ for (i = 0; i < SD_DEVS; i++) { ++ sd_devices[i].size = sd_card_size / SD_SECTOR_SIZE; //unit is block, not bytes ++ ++#if 0 ++ sd_partitions[i << SD_SHIFT].nr_sects =sd_size * (SD_BLKSIZE / SD_SECTOR_SIZE); ++ P_DEBUG ("%s() %d-th device, size=%d blks(blks=%d),nr_sects=%ld\n", __func__, i, sd_size, SD_BLKSIZE, sd_partitions[i << SD_SHIFT].nr_sects); ++#endif ++ //sd_devices[i].card_state = SD_CARD_WORK; ++ //sema_init(&(sd_devices[i].sema), 1); // add by Charles Tsai*/ ++ } ++ ++ sd_card_info.DMAEnable = dev->dma_enable; ++ ++ P_DEBUG("<-- %s\n", __func__); ++ return TRUE; ++} ++ ++/* ++ * Driver stuff ++ */ ++/*------------------------------------ ++ * The ioctl implementation ++ */ ++int sd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) ++{ ++ int size; ++ struct hd_geometry geo; ++ struct sd_dev *device=bdev->bd_disk->private_data; ++ P_DEBUG ("ioctl 0x%x 0x%lx\n", cmd, arg); ++ switch (cmd) { ++ case BLKGETSIZE: ++ /* Return the device size, expressed in sectors */ ++ /* FIXME: distinguish between kernel sector size and media sector size */ ++ size = device->size; ++ __copy_to_user ((long *) arg, &size, sizeof (long)); ++ return 0; ++#if 0 ++ case BLKFLSBUF: /* flush */ ++ return blk_ioctl(inode->i_rdev, cmd, arg); ++ case BLKRAGET: /* return the readahead value */ ++ return blk_ioctl(inode->i_rdev, cmd, arg); ++ case BLKRASET: /* set the readahead value */ ++ if (!capable (CAP_SYS_RAWIO)) ++ return -EACCES; ++ if (arg > 0xff) ++ return -EINVAL; /* limit it */ ++ return 0; ++// case BLKRRPART: /* re-read partition table */ ++// return sd_revalidate (inode->i_rdev); ++#endif ++ case HDIO_GETGEO: ++ /* ++ * get geometry: we have to fake one... trim the size to a ++ * multiple of 64 (32k): tell we have 16 sectors, 4 heads, ++ * whatever cylinders. Tell also that data starts at sector. 4. ++ */ ++ geo.cylinders = (device->size/4)/8; /* ?? only for test */ ++ geo.heads = 4; ++ geo.sectors = 8; ++ geo.start = 0; ++ __copy_to_user ((void *) arg, &geo, sizeof (geo)); ++ return 0; ++ default: ++ /* ++ * For ioctls we don't understand, let the block layer handle them. ++ */ ++ return -ENOTTY;//blk_ioctl (inode->i_rdev, cmd, arg); ++ } ++ ++ return -ENOTTY; /* unknown command */ ++} ++ ++static void sd_request(struct request_queue *q) ++{ ++ struct sd_dev *dev; ++ static int active; ++#ifndef A320_SD_USE_ASYNC_DMA ++ int ret; ++ struct request *req; ++#else ++ if(bh_busy) ++ return; ++#endif ++ if(active) ++ return; ++ active = 1; ++ P_DEBUG("--> %s\n", __func__); ++repeat: ++ /* Locate the device */ ++ if((req=blk_fetch_request(q))==NULL) { ++ active = 0; ++ return; ++ } ++ dev = req->rq_disk->private_data; ++ if (!dev||dev->card_state == SD_CARD_REMOVE) { ++ if(!dev) ++ printk(KERN_NOTICE"SD: locating device error\n"); ++ __blk_end_request_cur(req, -EIO); ++ goto repeat; ++ } ++#ifndef A320_SD_USE_ASYNC_DMA ++ sync_mode=1; ++ //spin_unlock_irq(&io_request_lock); ++ ret = sd_transfer(dev, req); ++ __blk_end_request(req, 0, ret << 9); ++ //spin_lock_irq(&io_request_lock); ++ goto repeat; ++ ++#else ++ sync_mode=0; //Use new async DMA machanism ++ //printk("%s: set up initial DMA, from sector %d to buffer 0x%X\n", __func__, CURRENT->sector+sector_offset, CURRENT->buffer); ++ //sd_init_async_dma(); ++ sd_clustered_bh(1); ++ //bh_busy=1; ++#endif ++ P_DEBUG("<-- %s\n", __func__); ++} ++ ++#if 0 ++/*----------------------------------------- ++ * Support for removable devices ++ */ ++int sd_check_change(kdev_t i_rdev) ++{ ++ int minor = DEVICE_NR(i_rdev); ++ struct sd_dev *dev = sd_devices + minor; ++ ++ P_DEBUG("--> %s\n", __func__); ++ P_DEBUG("minor=%d\n", minor); ++ if (minor >= SD_DEVS) /* paranoid */ ++ return 0; ++ P_DEBUG("check change for dev %d\n", minor); ++ if (dev->usage) { ++ P_DEBUG("disk not change\n"); ++ P_DEBUG("<-- %s\n", __func__); ++ return 0; /* still valid */ ++ } ++ P_DEBUG("disk changed\n"); ++ P_DEBUG("<-- %s\n", __func__); ++ return 1; /* expired */ ++} ++#endif ++ ++static int sd_dma_ch_alloc(struct sd_dev *dev) ++{ ++ dmad_chreq *ch_req = &dev->ch_req; ++ ++ memset(ch_req, 0, sizeof(dmad_chreq)); ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++ ++ ch_req->apb_req.addr0_ctrl = APBBR_ADDRINC_FIXED; /* (in) APBBR_ADDRINC_xxx */ ++/* for amerald */ ++ if((inl(PMU_BASE) & AMERALD_MASK) == AMERALD_PRODUCT_ID){ ++ ch_req->apb_req.addr0_reqn = APBBR_REQN_SDC_AMERALD; ++ }else ++ ch_req->apb_req.addr0_reqn = APBBR_REQN_SDC; /* (in) APBBR_REQN_xxx (also used to help determine bus selection) */ ++ ch_req->apb_req.addr1_ctrl = APBBR_ADDRINC_I4X; /* (in) APBBR_ADDRINC_xxx */ ++ ch_req->apb_req.addr1_reqn = APBBR_REQN_NONE; /* (in) APBBR_REQN_xxx (also used to help determine bus selection) */ ++ ch_req->apb_req.burst_mode = 1; /* (in) Burst mode (0: no burst 1-, 1: burst 4- data cycles per dma cycle) */ ++ ch_req->apb_req.data_width = APBBR_DATAWIDTH_4; /* (in) APBBR_DATAWIDTH_4(word), APBBR_DATAWIDTH_2(half-word), APBBR_DATAWIDTH_1(byte) */ ++ ch_req->apb_req.tx_dir = DMAD_DIR_A0_TO_A1; /* (in) DMAD_DIR_A0_TO_A1, DMAD_DIR_A1_TO_A0 */ ++ ++ ch_req->controller = DMAD_DMAC_APB_CORE; /* (in) DMAD_DMAC_AHB_CORE, DMAD_DMAC_APB_CORE */ ++ ch_req->flags = DMAD_FLAGS_SLEEP_BLOCK | DMAD_FLAGS_BIDIRECTION; ++ ++ if (dmad_channel_alloc(ch_req) != 0) { ++ memset(ch_req, 0, sizeof(dmad_chreq)); ++ printk(KERN_INFO "%s: APB dma channel allocation failed\n", __func__); ++ goto _try_ahb; ++ } ++ ++ P_DEBUG("%s: APB dma channel allocated (ch: %d)\n", __func__, ch_req->channel); ++ //printk("%s: APB dma channel allocated (ch: %d)\n", __func__, ch_req->channel); ++ ++ return 0; ++ ++_try_ahb: ++ ++#endif /* CONFIG_PLATFORM_APBDMA */ ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ ++ ch_req->ahb_req.sync = 1; /* (in) non-zero if src and dst have different clock domain */ ++ ch_req->ahb_req.priority = DMAC_CSR_CHPRI_1; /* (in) DMAC_CSR_CHPRI_0 (lowest) ~ DMAC_CSR_CHPRI_3 (highest) */ ++ ch_req->ahb_req.hw_handshake = 1; /* (in) non-zero to enable hardware handshake mode */ ++ ch_req->ahb_req.burst_size = DMAC_CSR_SIZE_4; /* (in) DMAC_CSR_SIZE_1 ~ DMAC_CSR_SIZE_256 */ ++ ch_req->ahb_req.addr0_width = DMAC_CSR_WIDTH_32; /* (in) DMAC_CSR_WIDTH_8, DMAC_CSR_WIDTH_16, or DMAC_CSR_WIDTH_32 */ ++ ch_req->ahb_req.addr0_ctrl = DMAC_CSR_AD_FIX; /* (in) DMAC_CSR_AD_INC, DMAC_CSR_AD_DEC, or DMAC_CSR_AD_FIX */ ++ ch_req->ahb_req.addr0_reqn = DMAC_REQN_SDC; /* (in) DMAC_REQN_xxx (also used to help determine channel number) */ ++ ch_req->ahb_req.addr1_width = DMAC_CSR_WIDTH_32; /* (in) DMAC_CSR_WIDTH_8, DMAC_CSR_WIDTH_16, or DMAC_CSR_WIDTH_32 */ ++ ch_req->ahb_req.addr1_ctrl = DMAC_CSR_AD_INC; /* (in) DMAC_CSR_AD_INC, DMAC_CSR_AD_DEC, or DMAC_CSR_AD_FIX */ ++ ch_req->ahb_req.addr1_reqn = DMAC_REQN_NONE; /* (in) DMAC_REQN_xxx (also used to help determine channel number) */ ++ ch_req->ahb_req.tx_dir = DMAD_DIR_A0_TO_A1; /* (in) DMAD_DIR_A0_TO_A1, DMAD_DIR_A1_TO_A0 */ ++ ++ ch_req->controller = DMAD_DMAC_AHB_CORE; /* (in) DMAD_DMAC_AHB_CORE, DMAD_DMAC_APB_CORE */ ++ ch_req->flags = DMAD_FLAGS_SLEEP_BLOCK | DMAD_FLAGS_BIDIRECTION; ++ ++ if (dmad_channel_alloc(ch_req) != 0) { ++ memset(ch_req, 0, sizeof(dmad_chreq)); ++ printk(KERN_INFO "%s: AHB dma channel allocation failed\n", __func__); ++ goto _err_exit; ++ } ++ ++ P_DEBUG("%s: AHB dma channel allocated (ch: %d)\n", __func__, ch_req->channel); ++ //printk("%s: AHB dma channel allocated (ch: %d)\n", __func__, ch_req->channel); ++ ++ return 0; ++ ++_err_exit: ++ ++#endif /* CONFIG_PLATFORM_AHBDMA */ ++ ++ return -ENODEV; ++} ++ ++/*-------------------------------- ++ * Note no locks taken out here. In a worst case scenario, we could drop ++ * a chunk of system memory. But that should never happen, since validation ++ * happens at open or mount time, when locks are held. ++ */ ++static int sd_revalidate(struct gendisk *gd) ++{ ++ struct sd_dev *dev = gd->private_data; ++ ++ P_DEBUG("--> %s\n", __func__); ++ P_DEBUG("card state=%s\n", dev->card_state == SD_CARD_INSERT ? "INSERT" : dev->card_state == SD_CARD_WORK ? "WORK" : "REMOVE"); ++ ++ if (dev->usage == 0) { ++ if (sd_card_setup(dev) != TRUE) { ++ dev->card_state = SD_CARD_REMOVE; ++ return -1; ++ } else { ++ enable_irq(FTSDC_IRQ); ++ dev->card_state = SD_CARD_WORK; ++ } ++ } ++ P_DEBUG("<-- %s\n", __func__); ++ ++ return 0; ++} ++ ++/* ++ * Device open and close ++ * TODO: forbids open for write when WRITE_PROTECT=1 ++ */ ++int sd_open(struct block_device *bdev, fmode_t mode) ++{ ++ struct sd_dev *dev= bdev->bd_disk->private_data; /* device information */ ++ P_DEBUG("--> %s\n", __func__); ++ /* Early return if there's nothing in the card slot */ ++ if ((SDC_R_REG(SDC_STATUS_REG) & SDC_STATUS_REG_CARD_INSERT) != SDC_CARD_INSERT) { ++ P_DEBUG("<-- %s (ENOMEDIUM)\n", __func__); ++ return -ENOMEDIUM; ++ } ++ //spin_lock(&dev->lock); ++ if (!dev->usage) { ++ dev->media_change=1; ++ P_DEBUG("%s: forced check_disk_change check\n", __func__); ++ check_disk_change(bdev); ++ sector_offset=0; ++ Do_onetime=0; ++ } else ++ dev->media_change=0; ++ ++ /* Set size HERE: ++ * must rely on sd_revalidate to set correct size ++ * (seems on check_disk_change()) */ ++ P_DEBUG("%s: set_capacity() to %d blocks (%d bytes)\n",__func__, dev->size, dev->size*SD_SECTOR_SIZE); ++ set_capacity(dev->gd, dev->size); ++ dev->usage++; ++ P_DEBUG("<-- %s\n", __func__); ++ ++ return 0; /* success */ ++} ++ ++int sd_release(struct gendisk *gd, fmode_t mode) ++{ ++ struct sd_dev *dev = gd->private_data; ++ ++ disable_irq(FTSDC_IRQ); ++ printk(" + sd_release : umount SD\n"); ++ ++ P_DEBUG("--> %s\n", __func__); ++ dev->usage--; ++ P_DEBUG("<-- %s\n", __func__); ++ ++ return 0; ++} ++ ++/*-------------------------------------- ++ * The file operations ++ */ ++struct block_device_operations sd_fops = { ++ owner: THIS_MODULE, ++ open: sd_open, ++ release: sd_release, ++ ioctl: sd_ioctl, ++ revalidate_disk: sd_revalidate, ++ media_changed: sd_media_changed, ++}; ++ ++void init_sd_pmu(void) ++{ ++ ++#ifdef CONFIG_FIE8100_PLATFORM ++ unsigned int u32temp; ++ u32temp = *(volatile unsigned int *)(A320_PMU_VA_BASE + 0x14); ++ u32temp &= ~0x3000; ++ u32temp |= 0x2000; ++ *(volatile unsigned int *)(A320_PMU_VA_BASE + 0x14)=u32temp; ++#endif ++ ++#ifdef CONFIG_FIE7000_PLATFORM ++ *(volatile unsigned int *)(A320_PMU_VA_BASE + 0x114) = (*(volatile unsigned int *)(A320_PMU_VA_BASE + 0x114) & 0xFFFF0FFF) | 0x00002000; ++#endif ++} ++ ++/* ++ * Set up our internal device. ++ */ ++static int setup_device(struct sd_dev *dev) ++{ ++ /* ++ * Get some memory. ++ */ ++ memset (dev, 0, sizeof (struct sd_dev)); ++ dev->size = 0;//SD_DUMMY_SIZE/SD_SECTOR_SIZE; /* We'll fill this with correct size later*/ ++ spin_lock_init(&dev->lock); ++ /* Request Queue */ ++ dev->queue = blk_init_queue(sd_request, &dev->lock); ++ if (dev->queue == NULL) ++ return -EFAULT; ++ ++ blk_queue_logical_block_size(dev->queue, hardsect_size); ++ dev->queue->queuedata = dev; ++ ++ dev->card_state = SD_CARD_REMOVE; ++ ++ /* ++ * And the gendisk structure. ++ */ ++ dev->gd = alloc_disk(SD_MINORS); ++ if (! dev->gd) { ++ printk (KERN_NOTICE "alloc_disk failure\n"); ++ return -EFAULT; ++ } ++ ++ dev->gd->flags = GENHD_FL_REMOVABLE|GENHD_FL_SUPPRESS_PARTITION_INFO; ++ dev->gd->major = sd_major; ++ dev->gd->first_minor = 0; ++ dev->gd->minors = SD_MINORS; ++ dev->gd->fops = &sd_fops; ++ dev->gd->queue = dev->queue; ++ dev->gd->private_data = dev; ++ snprintf (dev->gd->disk_name, 32, "cpesd%c", 'a'); ++ set_capacity(dev->gd, 0); //SD_DUMMY_SIZE/SD_SECTOR_SIZE*(hardsect_size/KERNEL_SECTOR_SIZE)); ++ add_disk(dev->gd); ++ ++ /* ++ * dma alloc ++ */ ++ if (sd_dma_ch_alloc(dev) == 0) { ++ printk(KERN_NOTICE "Faraday SD controller Driver (DMA mode)\n"); ++ dev->dma_enable = true; ++ } else { ++ printk(KERN_NOTICE "Faraday SD controller Driver (PIO mode)\n"); ++ } ++ ++ return 0; ++} ++/* ++ * Look for a media change. ++ */ ++static int sd_media_changed(struct gendisk *gd) ++{ ++ struct sd_dev *dev = gd->private_data; ++ return dev->media_change; ++} ++ ++/* ++ * module stuff ++ */ ++static int __init sd_module_init(void) ++{ ++ int result=-ENOMEM; ++ spinlock_t complete_lock; ++ unsigned long iflags; ++ spin_lock_init(&complete_lock); ++ ++#ifdef CONFIG_PLAT_QEMU ++ SDC_READ_FIFO_LEN = SDC_WRITE_FIFO_LEN = SDC_R_REG(0x44) & 0xff; ++#else ++ if(SDC_R_REG(0xa0) == 0x00030101) ++ SDC_READ_FIFO_LEN = SDC_WRITE_FIFO_LEN = SDC_R_REG(0x9c) & 0xff; ++ else ++ SDC_READ_FIFO_LEN = SDC_WRITE_FIFO_LEN = SDC_R_REG(SDC_FEATURE_REG) & 0xff; ++#endif ++ /* Register SD driver */ ++ sd_major = register_blkdev(sd_major, DEVICE_NAME); ++ if (sd_major <= 0) { ++ printk(KERN_WARNING DEVICE_NAME ":unable to get major number\n"); ++ return -EBUSY; ++ } ++ init_sd_pmu(); /* Power on SDC */ ++ P_DEBUG("SD Major Number = %d\n", sd_major); ++ printk(KERN_ALERT "SD: make node with 'mknod /dev/cpesd b %d 0'\n", sd_major); ++ ++ sd_devices = kmalloc(sizeof(struct sd_dev), GFP_KERNEL); ++ if (!sd_devices) ++ goto fail_malloc; ++ memset(sd_devices, 0, sizeof(struct sd_dev)); ++ ++ if (setup_device(sd_devices)) ++ goto fail_malloc; ++ ++ P_DEBUG("Request SDC IRQ=%d\n", FTSDC_IRQ); ++ spin_lock_irqsave(&complete_lock, iflags); ++ if (request_irq(FTSDC_IRQ, sd_hotswap_interrupt_handler, IRQF_DISABLED, "SD controller", sd_devices) != 0) { ++ printk(KERN_ERR "Unable to allocate SDC IRQ=0x%X\n", FTSDC_IRQ); ++ goto fail_malloc; ++ } ++ disable_irq(FTSDC_IRQ); ++ spin_unlock_irqrestore(&complete_lock, iflags); ++ if (request_region(FTSDC_VA_BASE, 0x48, "SD Controller") == NULL) { ++ printk(KERN_ERR "request io port of sd controller fail\n"); ++ goto fail_mem; ++ } ++ ++ return 0; /* succeed */ ++ ++fail_mem: ++ free_irq(FTSDC_IRQ, sd_devices); ++ ++fail_malloc: ++ if (sd_devices) ++ kfree(sd_devices); ++ unregister_blkdev(sd_major, DEVICE_NAME); ++ return result; ++} ++ ++static void sd_module_cleanup(void) ++{ ++ P_DEBUG("--> %s\n", __func__); ++ ++ /* unregister the device now to avoid further operations during cleanup */ ++ ++ if (sd_devices) { ++ del_gendisk(sd_devices->gd); ++ put_disk(sd_devices->gd); ++ if(sd_devices->queue) ++ blk_cleanup_queue(sd_devices->queue); ++ ++ if (sd_devices->dma_enable) ++ dmad_channel_free(&sd_devices->ch_req); ++ ++ kfree(sd_devices); ++ } ++ ++ release_region(FTSDC_VA_BASE, 0x48); ++ free_irq(FTSDC_IRQ, sd_devices); ++ ++ unregister_blkdev(sd_major, DEVICE_NAME); ++ P_DEBUG("<-- %s\n", __func__); ++} ++ ++module_init(sd_module_init); ++module_exit(sd_module_cleanup); +diff -Nur linux-3.4.110.orig/drivers/block/ftsdc010.h linux-3.4.110/drivers/block/ftsdc010.h +--- linux-3.4.110.orig/drivers/block/ftsdc010.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/block/ftsdc010.h 2016-04-07 10:20:51.022084119 +0200 +@@ -0,0 +1,477 @@ ++/* drivers/block/CPESD/ftsdc010.h ++ * ++ * Faraday FTSDC010 Device Driver ++ * ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * ++ * All Rights Reserved ++ */ ++ ++#ifndef _FTSDC010_H_ ++#define _FTSDC010_H_ ++ ++#ifndef TRUE ++#define TRUE 1 ++#endif ++ ++#ifndef FALSE ++#define FALSE 0 ++#endif ++ ++//#define SD_DEBUG ++#define DELAY_FOR_DMA_READ ++ ++#ifdef SD_DEBUG ++ #define P_DEBUG(fmt, args...) printk(KERN_ALERT "SD: " fmt, ## args) ++#else ++ #define P_DEBUG(a...) ++#endif ++#define P_DEBUGG(a...) ++ ++#define MAX_READ_SECTOR_NR 96 //16 ++#define MAX_WRITE_SECTOR_NR MAX_READ_SECTOR_NR ++ ++#define SD_MAJOR 6 /* default major number, if zero, it means dynamic allocate */ ++#define SD_DEVS 1 /* number of disks */ ++#define SD_MINORS 16 /* minors per disk */ ++#define SD_RAHEAD 2 /* number of sectors */ ++#define SD_BLKSIZE 1024 /* block size */ ++#define SD_SECTOR_SIZE 512 /* sector size */ ++#define SD_DUMMY_SIZE (256*1024*1024) // for sake of hotswap/hotplug ++#if 0 ++typedef struct _sd_dev_t { ++ int size; ++ int usage; ++ //struct timer_list timer; ++ spinlock_t lock; ++ struct semaphore sema; // synchronization ++ int card_state; ++} sd_dev_t; ++#endif ++//---------SD Card State ++#define SD_CARD_REMOVE 0 ++#define SD_CARD_INSERT 1 ++#define SD_CARD_WORK 2 ++ ++/* so far, SD controller support 3.2-3.3 VDD */ ++#define SDC_OCR 0x00FF8000 ++ ++/* sd controller register */ ++#define SDC_CMD_REG 0x00000000 ++#define SDC_ARGU_REG 0x00000004 ++#define SDC_RESPONSE0_REG 0x00000008 ++#define SDC_RESPONSE1_REG 0x0000000C ++#define SDC_RESPONSE2_REG 0x00000010 ++#define SDC_RESPONSE3_REG 0x00000014 ++#define SDC_RSP_CMD_REG 0x00000018 ++#define SDC_DATA_CTRL_REG 0x0000001C ++#define SDC_DATA_TIMER_REG 0x00000020 ++#define SDC_DATA_LEN_REG 0x00000024 ++#define SDC_STATUS_REG 0x00000028 ++#define SDC_CLEAR_REG 0x0000002C ++#define SDC_INT_MASK_REG 0x00000030 ++#define SDC_POWER_CTRL_REG 0x00000034 ++#define SDC_CLOCK_CTRL_REG 0x00000038 ++#define SDC_BUS_WIDTH_REG 0x0000003C ++#define SDC_DATA_WINDOW_REG 0x00000040 ++#ifdef A320D_BUILDIN_SDC ++#define SDC_FEATURE_REG 0x00000044 ++#define SDC_REVISION_REG 0x00000048 ++#else ++#define SDC_MMC_INT_RSP_REG 0x00000044 ++#define SDC_GP_OUTPUT_REG 0x00000048 ++#define SDC_FEATURE_REG 0x0000009C ++#define SDC_REVISION_REG 0x000000A0 ++#endif ++ ++/* bit mapping of command register */ ++#define SDC_CMD_REG_INDEX 0x0000003F ++#define SDC_CMD_REG_NEED_RSP 0x00000040 ++#define SDC_CMD_REG_LONG_RSP 0x00000080 ++#define SDC_CMD_REG_APP_CMD 0x00000100 ++#define SDC_CMD_REG_CMD_EN 0x00000200 ++#define SDC_CMD_REG_SDC_RST 0x00000400 ++ ++/* bit mapping of response command register */ ++#define SDC_RSP_CMD_REG_INDEX 0x0000003F ++#define SDC_RSP_CMD_REG_APP 0x00000040 ++ ++/* bit mapping of data control register */ ++#define SDC_DATA_CTRL_REG_BLK_SIZE 0x0000000F ++#define SDC_DATA_CTRL_REG_DATA_WRITE 0x00000010 ++#define SDC_DATA_CTRL_REG_DATA_READ 0x00000000 ++#define SDC_DATA_CTRL_REG_DMA_EN 0x00000020 ++#define SDC_DATA_CTRL_REG_DATA_EN 0x00000040 ++ ++#define SDC_DMA_TYPE_1 0x00000000 ++#define SDC_DMA_TYPE_4 0x00000100 ++#define SDC_DMA_TYPE_8 0x00000200 ++ ++/* bit mapping of status/clear/mask register */ ++#define SDC_STATUS_REG_RSP_CRC_FAIL 0x00000001 ++#define SDC_STATUS_REG_DATA_CRC_FAIL 0x00000002 ++#define SDC_STATUS_REG_RSP_TIMEOUT 0x00000004 ++#define SDC_STATUS_REG_DATA_TIMEOUT 0x00000008 ++#define SDC_STATUS_REG_RSP_CRC_OK 0x00000010 ++#define SDC_STATUS_REG_DATA_CRC_OK 0x00000020 ++#define SDC_STATUS_REG_CMD_SEND 0x00000040 ++#define SDC_STATUS_REG_DATA_END 0x00000080 ++#define SDC_STATUS_REG_FIFO_UNDERRUN 0x00000100 ++#define SDC_STATUS_REG_FIFO_OVERRUN 0x00000200 ++#define SDC_STATUS_REG_CARD_CHANGE 0x00000400 ++#define SDC_STATUS_REG_CARD_INSERT 0x00000800 ++#define SDC_STATUS_REG_CARD_LOCK 0x00001000 ++ ++#define SDC_CARD_INSERT 0x0 ++#define SDC_CARD_REMOVE SDC_STATUS_REG_CARD_INSERT ++ ++/* bit mapping of power control register */ ++#define SDC_POWER_REG_POWER_ON 0x00000010 ++#define SDC_POWER_REG_POWER_BITS 0x0000000F ++ ++/* bit mapping of clock control register */ ++#define SDC_CLOCK_REG_CARD_TYPE 0x00000080 ++#define SDC_CLOCK_REG_CLK_DIV 0x0000007F ++ ++/* card type */ ++#define SDC_CARD_TYPE_SD SDC_CLOCK_REG_CARD_TYPE ++#define SDC_CARD_TYPE_MMC 0x0 ++ ++/* bit mapping of bus width register */ ++#define SDC_BUS_WIDTH_REG_SINGLE_BUS 0x00000001 ++#define SDC_BUS_WIDTH_REG_WIDE_BUS 0x00000004 ++#define SDC_WIDE_BUS_SUPPORT 0x00000008 ++ ++/* data window register */ ++//#define SDC_READ_FIFO_LEN 4 ++//#define SDC_WRITE_FIFO_LEN 4 ++ ++/* card type, sd or mmc */ ++#define MEMORY_CARD_TYPE_SD 0 ++#define MEMORY_CARD_TYPE_MMC 1 ++ ++/********************************************************************/ ++/* SYSTEM ERROR_CODE */ ++/********************************************************************/ ++#define ERR_NO_ERROR 0x00000000 ++ ++/* general error */ ++#define ERR_CARD_NOT_EXIST 0x00000001 ++#define ERR_OUT_OF_VOLF_RANGE 0x00000002 ++#define ERR_SD_PARTITIAL_READ_ERROR 0x00000004 ++#define ERR_SD_PARTITIAL_WRITE_ERROR 0x00000008 ++ ++#define ERR_SD_CARD_IS_LOCK 0x00000010 ++ ++/* command error */ ++#define ERR_DATA_CRC_ERROR 0x00000100 ++#define ERR_RSP_CRC_ERROR 0x00000200 ++#define ERR_DATA_TIMEOUT_ERROR 0x00000400 ++#define ERR_RSP_TIMEOUT_ERROR 0x00000800 ++ ++#define ERR_WAIT_OVERRUN_TIMEOUT 0x00001000 ++#define ERR_WAIT_UNDERRUN_TIMEOUT 0x00002000 ++#define ERR_WAIT_DATA_CRC_TIMEOUT 0x00004000 ++#define ERR_WAIT_TRANSFER_END_TIMEOUT 0x00008000 ++ ++#define ERR_SEND_COMMAND_TIMEOUT 0x00010000 ++ ++/* sd error */ ++#define ERR_SD_CARD_IS_BUSY 0x00100000 ++#define ERR_CID_REGISTER_ERROR 0x00200000 ++#define ERR_CSD_REGISTER_ERROR 0x00400000 ++ ++/* sd card status error */ ++#define ERR_SD_CARD_STATUS_ERROR 0x01000000 ++/* SDC using APB DMA error */ ++#define ERR_DMA_RSP_ERROR 0x02000000 ++ ++#define SD_SCR_1_BIT_BIT 0x0001 ++#define SD_SCR_4_BIT_BIT 0x0004 ++ ++/********************************************************************/ ++/* The bit mapping of SD Status register */ ++/********************************************************************/ ++#define SD_STATUS_OUT_OF_RANGE 0x80000000 ++#define SD_STATUS_ADDRESS_ERROR 0x40000000 ++#define SD_STATUS_BLOCK_LEN_ERROR 0x20000000 ++#define SD_STATUS_ERASE_SEQ_ERROR 0x10000000 ++#define SD_STATUS_ERASE_PARAM 0x08000000 ++#define SD_STATUS_WP_VIOLATION 0x04000000 ++#define SD_STATUS_CARD_IS_LOCK 0x02000000 ++#define SD_STATUS_LOCK_UNLOCK_FILED 0x01000000 ++#define SD_STATUS_COM_CRC_ERROR 0x00800000 ++#define SD_STATUS_ILLEGAL_COMMAND 0x00400000 ++#define SD_STATUS_CARD_ECC_FAILED 0x00200000 ++#define SD_STATUS_CC_ERROR 0x00100000 ++#define SD_STATUS_ERROR 0x00080000 ++#define SD_STATUS_UNDERRUN 0x00040000 ++#define SD_STATUS_OVERRUN 0x00020000 ++#define SD_STATUS_CID_CSD_OVERWRITE 0x00010000 ++#define SD_STATUS_WP_ERASE_SKIP 0x00008000 ++#define SD_STATUS_CARD_ECC_DISABLE 0x00004000 ++#define SD_STATUS_ERASE_RESET 0x00002000 ++#define SD_STATUS_CURRENT_STATE 0x00001E00 ++#define SD_STATUS_READY_FOR_DATA 0x00000100 ++#define SD_STATUS_APP_CMD 0x00000020 ++#define SD_STATUS_AKE_SEQ_ERROR 0x00000008 ++#define SD_STATUS_ERROR_BITS (SD_STATUS_ADDRESS_ERROR | SD_STATUS_BLOCK_LEN_ERROR | SD_STATUS_ERASE_SEQ_ERROR | SD_STATUS_ERASE_PARAM | SD_STATUS_WP_VIOLATION | SD_STATUS_LOCK_UNLOCK_FILED | SD_STATUS_CARD_ECC_FAILED | SD_STATUS_CC_ERROR | SD_STATUS_ERROR | SD_STATUS_UNDERRUN | SD_STATUS_OVERRUN | SD_STATUS_CID_CSD_OVERWRITE | SD_STATUS_WP_ERASE_SKIP | SD_STATUS_AKE_SEQ_ERROR) ++#define SD_STATUS_CURRENT_STATE_LOC 9 ++ ++/********************************************************************/ ++/* SD command response type */ ++/********************************************************************/ ++#define SD_NO_RESPONSE 0 ++#define SD_RESPONSE_R1 1 ++#define SD_RESPONSE_R1b 2 ++#define SD_RESPONSE_R2 3 ++#define SD_RESPONSE_R3 4 ++#define SD_RESPONSE_R6 5 ++ ++/********************************************************************/ ++/* SD command */ ++/********************************************************************/ ++#define SD_GO_IDLE_STATE_CMD 0 ++#define SD_MMC_OP_COND 1 ++#define SD_ALL_SEND_CID_CMD 2 ++#define SD_SEND_RELATIVE_ADDR_CMD 3 ++#define SD_SET_DSR_CMD 4 ++#define SD_SET_BUS_WIDTH_CMD 6 ++#define SD_SELECT_CARD_CMD 7 ++#define SD_SEND_CSD_CMD 9 ++#define SD_SEND_CID_CMD 10 ++#define SD_STOP_TRANSMISSION_CMD 12 ++#define SD_SEND_STATUS_CMD 13 ++#define SD_GO_INACTIVE_STATE_CMD 15 ++#define SD_SET_BLOCKLEN_CMD 16 ++#define SD_READ_SINGLE_BLOCK_CMD 17 ++#define SD_READ_MULTIPLE_BLOCK_CMD 18 ++#define SD_WRITE_SINGLE_BLOCK_CMD 24 ++#define SD_WRITE_MULTIPLE_BLOCK_CMD 25 ++#define SD_PROGRAM_CSD_CMD 27 ++#define SD_ERASE_SECTOR_START_CMD 32 ++#define SD_ERASE_SECTOR_END_CMD 33 ++#define SD_ERASE_CMD 38 ++#define SD_APP_OP_COND 41 ++#define SD_LOCK_UNLOCK_CMD 42 ++#define SD_SEND_SCR_CMD 51 ++#define SD_APP_CMD 55 ++#define SD_GET_CMD 56 ++ ++/* retry count */ ++#ifndef CONFIG_FTSDC010_USE_TIMER_DELAY ++#define SD_CARD_GET_OCR_RETRY_COUNT 0x1000 ++#define SD_CARD_WAIT_OPERATION_COMPLETE_RETRY_COUNT 8000 ++#define SD_CARD_STATE_CHANGE_RETRY_COUNT 30000 ++#define SD_CARD_WAIT_TRANSFER_STATE_RETRY_COUNT 30000 ++#define SDC_GET_STATUS_RETRY_COUNT 0x300000 ++#else ++#define SD_CARD_GET_OCR_RETRY_COUNT 0x1000 ++#define SD_CARD_WAIT_OPERATION_COMPLETE_RETRY_COUNT 8000 ++#define SD_CARD_STATE_CHANGE_RETRY_COUNT 10000 ++#define SD_CARD_WAIT_TRANSFER_STATE_RETRY_COUNT 10000 ++#endif ++ ++/* ++ * Please refer SanDisk SD Manual v1.9 Section 5.1.9.2 (page 5-76) to set the timeout setting ++ */ ++#ifdef SD_DEBUG ++#define SDC_TIMEOUT_BASE (HZ/2) // Unit is 500 ms ++#else ++#define SDC_TIMEOUT_BASE (HZ/3) // Unit is 333 ms ++#endif ++#define SDC_GET_STATUS_RETRY_TIMEOUT_COUNT (HZ*4) ++ ++/* sd card standby state */ ++#define SD_IDLE_STATE 0 ++#define SD_READY_STATE 1 ++#define SD_IDENT_STATE 2 ++#define SD_STBY_STATE 3 ++#define SD_TRAN_STATE 4 ++#define SD_DATA_STATE 5 ++#define SD_RCV_STATE 6 ++#define SD_PRG_STATE 7 ++#define SD_DIS_STATE 8 ++ ++#define SD_BUS_WIDTH_1_BIT 0 ++#define SD_BUS_WIDTH_4_BIT 2 ++ ++/********************************************************************/ ++/* SD card OCR register */ ++/********************************************************************/ ++#define SD_OCR_BUSY_BIT 0x80000000 ++ ++/********************************************************************/ ++/* SD CID register */ ++/********************************************************************/ ++#define SD_DEFAULT_MONTH_CODE 1 ++#define SD_DEFAULT_YEAR_CODE 2000 ++#define MAX_MULTI_BLOCK_NUM 126 ++ ++typedef struct _sd_cid_t ++{ ++ uint ManufacturerID; ++ uint ApplicationID; ++ unchar ProductName[7]; ++ uint ProductRevisionHigh; ++ uint ProductRevisionLow; ++ uint ProductSerialNumber; ++ uint ManufactureMonth; ++ uint ManufactureYear; ++} sd_cid_t; ++ ++/********************************************************************/ ++/* SD CSD register */ ++/********************************************************************/ ++#define SD_CSD_STRUCTURE_1_0 0 ++#define SD_CSD_STRUCTURE_1_1 1 ++ ++#define SD_CSD_SPEC_VERS_1_0_1_2 0 ++#define SD_CSD_SPEC_VERS_1_4 1 ++#define SD_CSD_SPEC_VERS_2_1 2 ++ ++#define SD_TAAC_TIME_UINT_BITS 0x07 ++#define SD_TAAC_TIME_VALUE_BITS 0x78 ++ ++typedef struct _sd_csd_t ++{ ++ uint CSDStructure; ++ uint MMCSpecVersion; ++ uint TAAC_u; ++ uint NSAC_u; ++ uint TransferSpeed; ++ uint CardCmdClass; ++ uint ReadBlockLength; ++ uint ReadBlockPartial; ++ uint WriteBlockMisalign; ++ uint ReadBlockMisalign; ++ uint DSRImplemant; ++ uint BlockNumber; ++ uint MemorySize; ++ uint VDDReadMin_u; ++ uint VDDReadMax_u; ++ uint VDDWriteMin_u; ++ uint VDDWriteMax_u; ++ uint EraseBlkEnable; ++ uint EraseSectorSize; ++ uint WriteProtectGroupSize; ++ uint WriteProtectGroupEnable; ++ uint WriteSpeedFactor; ++ uint WriteBlockLength; ++ unchar WriteBlockPartial; ++ unchar CopyFlag; ++ unchar PermanentWriteProtect; ++ unchar TemporaryWriteProtect; ++ unchar FileFormat; ++} sd_csd_t; ++ ++typedef struct _sd_csd_bit_t ++{ ++ uint NotUsed:1; ++ uint CRC:7; ++ uint MMCardReserved1:2; ++ uint FILE_FORMAT:2; ++ uint TMP_WRITE_PROTECT:1; ++ uint PERM_WRITE_PROTECT:1; ++ uint COPY:1; ++ uint FILE_FORMAT_GRP:1; ++ ++ uint Reserved2:5; ++ uint WRITE_BL_PARTIAL:1; ++ uint WRITE_BL_LEN:4; ++ uint R2W_FACTOR:3; ++ uint MMCardReserved0:2; ++ uint WP_GRP_ENABLE:1; ++ ++ uint WP_GRP_SIZE:7; ++ uint ERASE_SECTOR_SIZE:7; ++ uint ERASE_BLK_ENABLE:1; ++ uint C_SIZE_MULT:3; ++ uint VDD_W_CURR_MAX:3; ++ uint VDD_W_CURR_MIN:3; ++ uint VDD_R_CURR_MAX:3; ++ uint VDD_R_CURR_MIN:3; ++ ++ uint C_SIZE_1:2; ++ uint C_SIZE_2:10; // divide its into 2, 10bits ++ ++ uint Reserved1:2; ++ uint DSR_IMP:1; ++ uint READ_BLK_MISALIGN:1; ++ uint WRITE_BLK_MISALIGN:1; ++ uint READ_BL_PARTIAL:1; ++ ++ uint READ_BL_LEN:4; ++ uint CCC:12; ++ ++ uint TRAN_SPEED_RateUnit:3; ++ uint TRAN_SPEED_TimeValue:4; ++ uint TRAN_SPEED_Reserved:1; ++ ++ uint NSAC:8; ++ ++ uint TAAC_TimeUnit:3; ++ uint TAAC_TimeValue:4; ++ uint TAAC_Reserved:1; ++ ++ uint Reserved0:2; ++ uint MMC_SPEC_VERS:4; ++ uint CSD_STRUCTURE:2; ++} sd_csd_bit_t; ++ ++/********************************************************************/ ++/* SD SCR register */ ++/********************************************************************/ ++typedef struct _sd_scr_t ++{ ++ uint Reserved:16; ++ uint SD_BUS_WIDTH:4; ++ uint SD_SECURITY:3; ++ uint DATA_STAT_AFTER_ERASE:1; ++ uint SD_SPEC:4; ++ uint SCR_STRUCTURE:4; ++ ++ uint ManufacturerReserved; ++} sd_scr_t; ++ ++/********************************************************************/ ++/* sd card structure */ ++/********************************************************************/ ++typedef struct _sd_card_t ++{ ++ /* host interface configuration */ ++ uint IOAddr; /* host controller register base address */ ++ uint DMAEnable; ++ ++ uint CardType; ++ ++ /* card register */ ++ uint OCR; ++ ++ uint CIDWord[4]; ++ sd_cid_t CID; ++ ++ uint CSDWord[4]; ++ sd_csd_t CSD; ++ ++ ushort RCA; ++ sd_scr_t SCR; ++ ++ /* access time out */ ++ uint ReadAccessTimoutCycle; ++ uint WriteAccessTimoutCycle; ++ ++ /* Drive Name */ ++ uint Drive; ++ ++ /* system configurations */ ++ uint SysFrequency; ++ ++ /* card status */ ++ int ActiveState; ++ int WriteProtect; ++ ++ void *private; ++} sd_card_t; ++ ++#endif +diff -Nur linux-3.4.110.orig/drivers/block/Kconfig linux-3.4.110/drivers/block/Kconfig +--- linux-3.4.110.orig/drivers/block/Kconfig 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/block/Kconfig 2016-04-07 10:20:51.022084119 +0200 +@@ -498,6 +498,14 @@ + block device driver. It communicates with a back-end driver + in another domain which drives the actual block device. + ++config FTSDC010 ++ tristate "Faraday FTSDC010 driver" ++ depends on NDS32 ++ ++config FTCFC010 ++ tristate "Faraday FTCFC010 driver" ++ depends on NDS32 ++ + config XEN_BLKDEV_BACKEND + tristate "Xen block-device backend driver" + depends on XEN_BACKEND +diff -Nur linux-3.4.110.orig/drivers/block/Makefile linux-3.4.110/drivers/block/Makefile +--- linux-3.4.110.orig/drivers/block/Makefile 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/block/Makefile 2016-04-07 10:20:51.050085202 +0200 +@@ -43,3 +43,5 @@ + obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ + + swim_mod-y := swim.o swim_asm.o ++obj-$(CONFIG_FTSDC010) += ftsdc010.o ++obj-$(CONFIG_FTCFC010) += ftcfc010.o +diff -Nur linux-3.4.110.orig/drivers/gpio/gpio-ftgpio010.c linux-3.4.110/drivers/gpio/gpio-ftgpio010.c +--- linux-3.4.110.orig/drivers/gpio/gpio-ftgpio010.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/gpio/gpio-ftgpio010.c 2016-04-07 10:20:51.050085202 +0200 +@@ -0,0 +1,218 @@ ++#include ++#include ++#include ++#include ++#include ++ ++#define GPIO_DATA_OUT 0x00 ++#define GPIO_DATA_IN 0x04 ++#define PIN_DIR 0x08 ++#define PIN_BYPASS 0x0C ++#define GPIO_DATA_SET 0x10 ++#define GPIO_DATA_CLEAR 0x14 ++#define PIN_PULL_ENABLE 0x18 ++#define PIN_PULL_TYPE 0x1C ++#define INT_ENABLE 0x20 ++#define INT_RAW_STATE 0x24 ++#define INT_MASKED_STATE 0x28 ++#define INT_MASK 0x2C ++#define INT_CLEAR 0x30 ++#define INT_TRIGGER 0x34 ++#define INT_BOTH 0x38 ++#define INT_RISE_NEG 0x3C ++#define BOUNCE_ENABLE 0x40 ++#define BOUNCE_PRE_SCALE 0x44 ++ ++#define GPIO_READL(offset) \ ++ readl(GPIO_FTGPIO010_VA_BASE + (offset)) ++ ++#define GPIO_WRITEL(val, offset) \ ++ writel((val), GPIO_FTGPIO010_VA_BASE + (offset)) ++ ++#define FTGPIO010_VIRTUAL_IRQ_BASE 100 ++ ++static int irq_to_gpio(unsigned int irq) ++{ ++ return irq - FTGPIO010_VIRTUAL_IRQ_BASE; ++} ++ ++static int ftgpio_to_irq(struct gpio_chip *gc, unsigned int offset) ++{ ++ return FTGPIO010_VIRTUAL_IRQ_BASE + offset; ++} ++ ++static int ftgpio_get(struct gpio_chip *gc, unsigned int gpio) ++{ ++ return (GPIO_READL(GPIO_DATA_IN) >> gpio & 1); ++} ++ ++static void ftgpio_set(struct gpio_chip *gc, unsigned int gpio, int data) ++{ ++ unsigned long val; ++ ++ if (data) ++ val = GPIO_READL(GPIO_DATA_OUT) | (0x1UL << gpio); ++ else ++ val = GPIO_READL(GPIO_DATA_OUT) & ~(0x1UL << gpio); ++ ++ GPIO_WRITEL(val, GPIO_DATA_OUT); ++} ++ ++static int ftgpio_dir_in(struct gpio_chip *gc, unsigned int gpio) ++{ ++ unsigned long val; ++ ++ val = GPIO_READL(PIN_DIR) & ~(0x1UL << gpio); ++ GPIO_WRITEL(val, PIN_DIR); ++ ++ return 0; ++} ++ ++static int ftgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int data) ++{ ++ unsigned long val; ++ ++ val = GPIO_READL(PIN_DIR) | (0x1UL << gpio); ++ GPIO_WRITEL(val, PIN_DIR); ++ ++ gc->set(gc, gpio, data); ++ ++ return 0; ++} ++ ++static struct gpio_chip ftgpio_chip = { ++ ++ .label = "FTGPIO010", ++ .base = 0, ++ .ngpio = 16, ++ .direction_input = ftgpio_dir_in, ++ .direction_output = ftgpio_dir_out, ++ .get = ftgpio_get, ++ .set = ftgpio_set, ++ .to_irq = ftgpio_to_irq, ++}; ++ ++static void ftgpio_irq_ack(struct irq_data *data) ++{ ++ GPIO_WRITEL(0x1UL << irq_to_gpio(data->irq), INT_CLEAR); ++} ++ ++static void ftgpio_irq_mask(struct irq_data *data) ++{ ++ unsigned long val; ++ ++ val = GPIO_READL(INT_MASK) | (0x1UL << irq_to_gpio(data->irq)); ++ GPIO_WRITEL(val, INT_MASK); ++} ++ ++static void ftgpio_irq_unmask(struct irq_data *data) ++{ ++ unsigned long val; ++ val = GPIO_READL(INT_MASK) & ~(0x1UL << irq_to_gpio(data->irq)); ++ GPIO_WRITEL(val, INT_MASK); ++} ++ ++static int ftgpio_irq_set_type(struct irq_data *data, unsigned int flow_type) ++{ ++ unsigned long bit = 0x1UL << irq_to_gpio(data->irq); ++ unsigned long val; ++ val = GPIO_READL(INT_BOTH); ++ ++ if (flow_type & IRQF_TRIGGER_RISING && flow_type & IRQF_TRIGGER_FALLING) ++ GPIO_WRITEL(val | bit, INT_BOTH); ++ else ++ GPIO_WRITEL(val & ~bit, INT_BOTH); ++ ++ val = GPIO_READL(INT_RISE_NEG); ++ ++ if (flow_type & IRQF_TRIGGER_FALLING) ++ GPIO_WRITEL(val | bit, INT_RISE_NEG); ++ else if (flow_type & IRQF_TRIGGER_RISING) ++ GPIO_WRITEL(val & ~bit, INT_RISE_NEG); ++ ++ return 0; ++} ++ ++static struct irq_chip ftgpio_irq_chip = { ++ ++ .name = "FTGPIO010_irq", ++ .irq_ack = ftgpio_irq_ack, ++ .irq_mask = ftgpio_irq_mask, ++ .irq_unmask = ftgpio_irq_unmask, ++ .irq_set_type = ftgpio_irq_set_type, ++}; ++ ++static void gpio_irq_router(unsigned int irq, struct irq_desc *desc) ++{ ++ unsigned long status; ++ int i = 0; ++ ++ status = GPIO_READL(INT_RAW_STATE); ++ status &= ~((1 << 22) | (1 << 25) | (1 << 26)); ++ ++ while (status) { ++ ++ if (status & 0x1UL) ++ generic_handle_irq(gpio_to_irq(i)); ++ ++ status >>= 1; ++ i++; ++ } ++} ++ ++static int gpio_init(void) ++{ ++ int i; ++ ++ /* disable interrupt */ ++ GPIO_WRITEL(0x00000000UL, INT_ENABLE); ++ ++ /* mask interrupt */ ++ GPIO_WRITEL(0x0000FFFFUL, INT_MASK); ++ ++ /* triggered interrupt on both edge */ ++ GPIO_WRITEL(0x0000FFFFUL, INT_BOTH); ++ ++ /* clear interrupt */ ++ GPIO_WRITEL(0x0000FFFFUL, INT_CLEAR); ++ ++ /* enable de-bouncing */ ++ GPIO_WRITEL(0x0000FFFFUL, BOUNCE_ENABLE); ++ ++ /* enable interrupt */ ++ GPIO_WRITEL(0x0000FFFFUL, INT_ENABLE); ++ ++ gpiochip_add(&ftgpio_chip); ++ ++ for (i = 0; i < ftgpio_chip.ngpio; i++) { ++ ++ irq_set_chip(gpio_to_irq(i), &ftgpio_irq_chip); ++ irq_set_handler(gpio_to_irq(i), handle_level_irq); ++ } ++ ++ irq_set_chained_handler(GPIO_FTGPIO010_IRQ, gpio_irq_router); ++ ++ pr_info("GPIO module inserted\n"); ++ ++ return 0; ++} ++ ++static void __exit gpio_exit(void) ++{ ++ int i=0; ++ /* disable interrupt */ ++ GPIO_WRITEL(0x00000000UL, INT_ENABLE); ++ while(i ++#include ++#include ++#include ++ ++#include ++#include "cpe_ts.h" ++ ++ ++#include ++ ++#include ++#include ++ ++#define TOUCHSCREEN_IRQ 28 ++ ++#define ads_dbg( enabled, tagged, ...) \ ++ do{ \ ++ if( enabled){ \ ++ if( tagged) \ ++ printk( "[ %30s() ] ", __func__); \ ++ printk( __VA_ARGS__); \ ++ } \ ++ } while( 0) ++ ++#define TS_POLL_DELAY ( 1 * 1000000) /* ns delay before the first sample */ ++#define TS_POLL_PERIOD ( delay * 1000000) /* ns delay between samples */ ++ ++static int debug = 0; ++static int delay = 25; ++ ++module_param(debug, int, 0); ++module_param(delay, int, 0); ++ ++struct ads7846 ++{ ++ void __iomem * regs; ++ struct input_dev *input; ++ char phys[32]; ++ struct hrtimer timer; ++ int irq; ++ spinlock_t lock; ++ bool disabled; ++}; ++ ++struct ts_event ++{ ++ int x; ++ int y; ++ int z1, z2; ++ int Rt; ++}; ++ ++struct ads7846 touchscreen; ++ ++#define ADS_START ( 0x1UL << 7) ++#define ADS_A2A1A0_d_y ( 0x1UL << 4) /* differential */ ++#define ADS_A2A1A0_d_z1 ( 0x3UL << 4) /* differential */ ++#define ADS_A2A1A0_d_z2 ( 0x4UL << 4) /* differential */ ++#define ADS_A2A1A0_d_x ( 0x5UL << 4) /* differential */ ++#define ADS_12_BIT ( 0x0UL << 3) ++#define ADS_SER ( 0x1UL << 2) /* non-differential */ ++#define ADS_DFR ( 0x0UL << 2) /* differential */ ++#define ADS_PD10_PDOWN ( 0x0UL << 0) /* lowpower mode + penirq */ ++#define ADS_PD10_ADC_ON ( 0x1UL << 0) /* ADC on */ ++#define ADS_PD10_REF_ON ( 0x2UL << 0) /* vREF on + penirq */ ++#define ADS_PD10_ALL_ON ( 0x3UL << 0) /* ADC + vREF on */ ++ ++#define MAX_12BIT ( ( 0x1UL << 12) - 1) ++ ++#define READ_X ( ADS_A2A1A0_d_x | ADS_12_BIT | ADS_DFR) ++#define READ_Y ( ADS_A2A1A0_d_y | ADS_12_BIT | ADS_DFR) ++#define READ_Z1 ( ADS_A2A1A0_d_z1 | ADS_12_BIT | ADS_DFR) ++#define READ_Z2 ( ADS_A2A1A0_d_z2 | ADS_12_BIT | ADS_DFR) ++ ++static int ++read_val(struct ads7846 *ts, unsigned long cmd) ++{ ++ unsigned long data = 0; ++ int repeat = 5; ++ int i; ++ ++ ads_dbg(0, 1, "Queuing data: 0x%08lx\n", cmd << 16); ++ ++ for (i = 0; i < repeat; i++) ++ { ++ while (!(REG32(ts->regs + SSP_REG_SR) & SSP_SR_mskTFNF)); ++ REG32(ts->regs + SSP_REG_DR) = (ADS_START | cmd | ADS_PD10_ALL_ON) << 16; ++ } ++ ++ for (i = 0; i < repeat; i++) ++ { ++ while (!(REG32(ts->regs + SSP_REG_SR) & SSP_SR_mskRFVE)); ++ data = (REG32(ts->regs + SSP_REG_DR) >> 3) & 0xFFF; ++ } ++ ++ while (!(REG32(ts->regs + SSP_REG_SR) & SSP_SR_mskTFNF)); ++ REG32(ts->regs + SSP_REG_DR) = (ADS_START | cmd) << 16; ++ ++ while (!(REG32(ts->regs + SSP_REG_SR) & SSP_SR_mskRFVE)); ++ ++ data = (REG32(ts->regs + SSP_REG_DR) >> 3) & 0xFFF; ++ ads_dbg(0, 1, "CMD <%02lx> data: 0x%08lx( %ld)\n", cmd, data, data); ++ ++ return data; ++} ++ ++static int pendown(struct ads7846 *ts) ++{ ++ return read_val(ts, READ_Z1) > 40; ++} ++ ++static void report(struct ads7846 *ts, struct ts_event *e) ++{ ++ e->x = read_val(ts, READ_X); ++ e->y = read_val(ts, READ_Y); ++ e->z1 = read_val(ts, READ_Z1); ++ e->z2 = read_val(ts, READ_Z2); ++ ++ ads_dbg(debug, 1, "x: %4d, y: %4d, z1: %4d, z2: %4d\n", e->x, e->y, e->z1, e->z2); ++} ++ ++#define FILTER_LIMIT 35 ++ ++static enum hrtimer_restart ads7846_timer(struct hrtimer *handle) ++{ ++ struct ads7846 *ts = container_of(handle, struct ads7846, timer); ++ struct ts_event e; ++ struct irq_desc *desc = (struct irq_desc *)irq_get_irq_data((unsigned int)ts->irq); ++ static int xp = 0, yp = 0; ++ ++ if (ts->disabled) ++ return HRTIMER_NORESTART; ++ ++ if (!pendown(ts)) ++ { ++ ads_dbg(debug, 1, "Release\n"); ++ ++ input_report_key(ts->input, BTN_TOUCH, 0); ++ input_report_abs(ts->input, ABS_PRESSURE, 0); ++ input_sync(ts->input); ++ ++ if(desc->irq_data.chip->irq_ack) ++ desc->irq_data.chip->irq_ack(&desc->irq_data); ++ enable_irq(ts->irq); ++ return HRTIMER_NORESTART; ++ } ++ report(ts, &e); ++ ++#ifdef CONFIG_TOUCHSCREEN_CPE_TS_DEJITTER ++ if (abs(xp - e.x) > FILTER_LIMIT || abs(yp - e.y) > FILTER_LIMIT) ++ { ++#endif ++ input_report_key(ts->input, BTN_TOUCH, 1); ++ input_report_abs(ts->input, ABS_X, e.x); ++ input_report_abs(ts->input, ABS_Y, e.y); ++ input_report_abs(ts->input, ABS_PRESSURE, 50); ++ xp = e.x; ++ yp = e.y; ++#ifdef CONFIG_TOUCHSCREEN_CPE_TS_DEJITTER ++ } ++#endif ++ input_sync(ts->input); ++ hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD), HRTIMER_MODE_REL); ++ ads_dbg(0, 1, "Leave\n"); ++ return HRTIMER_NORESTART; ++} ++ ++static irqreturn_t ads7846_irq(int irq, void *handle) ++{ ++ struct ads7846 *ts = handle; ++ ++ if (ts->disabled) ++ return IRQ_HANDLED; ++ ++ disable_irq_nosync(irq); ++ hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_DELAY), HRTIMER_MODE_REL); ++ ++ return IRQ_HANDLED; ++} ++ ++static int xspi_init_hw(void __iomem *regs_base) ++{ ++ int rev = REG32(regs_base + SSP_REG_REV); ++ int reva = REG32(regs_base + SSP_REG_REV-0x20); ++ ++ if ((((rev & SSP_REV_mskMAJOR_REV) >> SSP_REV_offMAJOR_REV) != 1)&& ++ (((reva & SSP_REV_mskMAJOR_REV) >> SSP_REV_offMAJOR_REV) != 1)) ++ { ++ ads_dbg(1, 0, "ADS7846 Touchscreen controller initialized failed:\n" ++ "\tcannot detect Faraday SSP Controller\n"); ++ return -ENXIO; ++ } ++ REG32(regs_base + SSP_REG_CR2) |= (1UL << SSP_CR2_offSSPRST); ++ ++ REG32(regs_base + SSP_REG_CR1) = ++ (0UL << SSP_CR1_offPDL) | /* Padding Data Length */ ++ (23UL << SSP_CR1_offSDL) | /* Serial Data Length */ ++ (5UL << SSP_CR1_offSCLKDIV); /* SCLK Divider */ ++ ++ REG32(regs_base + SSP_REG_CR0) = ++ (1UL << SSP_CR0_offFFMT) | /* Frame Format */ ++ (3UL << SSP_CR0_offOPM) | /* Operation Mode */ ++ (0UL << SSP_CR0_offSCLKPO) | /* SCLK Polarity */ ++ (0UL << SSP_CR0_offSCLKPH); /* SCLK Phase */ ++ ++ REG32(regs_base + SSP_REG_CR2) |= (1UL << SSP_CR2_offTXFCLR) | (1UL << SSP_CR2_offRXFCLR); ++ REG32(regs_base + SSP_REG_CR2) |= (1UL << SSP_CR2_offSSPEN) | (1UL << SSP_CR2_offTXDOE); ++ return 0; ++} ++ ++static int ads7846_probe(struct platform_device *pdev) ++{ ++ struct ads7846 *ts = &touchscreen; ++ struct input_dev *input_dev; ++ int err = 0; ++ platform_set_drvdata(pdev, ts); ++ ++ ts->regs = ioremap(0x98b00000, 44); ++ err = xspi_init_hw(ts->regs); ++ if (err) ++ goto err_unmap; ++ input_dev = input_allocate_device(); ++ if (!input_dev) ++ { ++ err = -ENOMEM; ++ goto err_free_mem; ++ } ++ ++ ts->input = input_dev; ++ ++ hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ ts->timer.function = ads7846_timer; ++ ++ input_dev->name = "ADS7846 Touchscreen"; ++ input_dev->phys = ts->phys; ++ ++ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); ++ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); ++ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0); ++ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0); ++ input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0); ++ ++ ts->irq = TOUCHSCREEN_IRQ; ++ ++ if (request_irq(ts->irq, ads7846_irq, IRQF_TRIGGER_RISING, "touch screen", ts)) ++ goto err_free_mem; ++ ++ err = input_register_device(input_dev); ++ if (err) ++ goto err_free_irq; ++ ++ spin_lock_init(&ts->lock); ++ return 0; ++err_free_irq: ++ free_irq(ts->irq, ts); ++err_free_mem: ++ input_free_device(input_dev); ++err_unmap: ++ iounmap(ts->regs); ++ ++ return err; ++} ++ ++static int __devexit ads7846_remove(struct platform_device *pdev) ++{ ++ struct ads7846 *ts = platform_get_drvdata(pdev); ++ ++ disable_irq(ts->irq); ++ free_irq(ts->irq, ts); ++ iounmap(ts->regs); ++ input_unregister_device(ts->input); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++static int ads7846_suspend( struct platform_device *pdev, pm_message_t message) ++{ ++ struct ads7846 *ts = platform_get_drvdata(pdev); ++ ++ spin_lock_irq(&ts->lock); ++ ts->disabled = true; ++ disable_irq(ts->irq); ++ spin_unlock_irq(&ts->lock); ++ ++ return 0; ++} ++ ++static int ads7846_resume( struct platform_device *pdev) ++{ ++ struct ads7846 *ts = platform_get_drvdata(pdev); ++ ++ spin_lock_irq(&ts->lock); ++ ++ enable_irq(ts->irq); ++ ts->disabled = false; ++ ++ spin_unlock_irq(&ts->lock); ++ ++ return 0; ++} ++#else ++#define ads7846_suspend NULL ++#define ads7846_resume NULL ++#endif ++ ++static void platform_device_release(struct device *dev){ ++} ++ ++static struct resource ads7846_resources[] = ++{ ++ [0] = { ++ .start = SSP_FTSSP010_PA_BASE, ++ .end = SSP_FTSSP010_PA_LIMIT, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = TOUCHSCREEN_IRQ, //feed me! SSP_FTSSP010_IRQ in spec.h ++ .end = TOUCHSCREEN_IRQ, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct platform_device ads7846_device = ++{ ++ .name = "ads7846", ++ .id = -1, ++ .resource = ads7846_resources, ++ .num_resources = ARRAY_SIZE(ads7846_resources), ++ .dev = { ++ .release = platform_device_release, ++ }, ++}; ++ ++#if 0 ++static struct platform_driver ads7846_driver = ++{ ++ .driver = { ++ .name = "ads7846", ++ .owner = THIS_MODULE, ++ }, ++ .probe = ads7846_probe, ++ .remove = __devexit_p(ads7846_remove), ++ .suspend = ads7846_suspend, ++ .resume = ads7846_resume, ++}; ++#else ++static struct platform_driver ads7846_driver = ++{ ++ .driver = { ++ .name = "ads7846", ++ }, ++ .probe = ads7846_probe, ++ .remove = __devexit_p(ads7846_remove), ++ .suspend = ads7846_suspend, ++ .resume = ads7846_resume, ++}; ++#endif ++ ++static int __init ads7846_init(void) ++{ ++ platform_device_register(&ads7846_device); ++ return platform_driver_register(&ads7846_driver); ++} ++ ++static void __exit ads7846_exit(void) ++{ ++ platform_device_unregister(&ads7846_device); ++ platform_driver_unregister(&ads7846_driver); ++} ++ ++module_init(ads7846_init); ++module_exit(ads7846_exit); ++MODULE_DESCRIPTION("ADS7846 TouchScreen Driver"); ++MODULE_LICENSE("GPL"); +diff -Nur linux-3.4.110.orig/drivers/input/touchscreen/cpe_ts/cpe_ts.h linux-3.4.110/drivers/input/touchscreen/cpe_ts/cpe_ts.h +--- linux-3.4.110.orig/drivers/input/touchscreen/cpe_ts/cpe_ts.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/input/touchscreen/cpe_ts/cpe_ts.h 2016-04-07 10:20:51.050085202 +0200 +@@ -0,0 +1,218 @@ ++#ifndef SSP_FARADAY_H ++#define SSP_FARADAY_H ++ ++#define XILINX_SPI_NAME "faraday-spi" ++ ++/* ++ * Register definitions as per "OPB Serial Peripheral Interface ( SPI) ( v1.00e) ++ * Product Specification", DS464 ++ */ ++#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */ ++ ++#define XSPI_CR_ENABLE 0x02 ++#define XSPI_CR_MASTER_MODE 0x04 ++#define XSPI_CR_CPOL 0x08 ++#define XSPI_CR_CPHA 0x10 ++#define XSPI_CR_MODE_MASK ( XSPI_CR_CPHA | XSPI_CR_CPOL) ++#define XSPI_CR_TXFIFO_RESET 0x20 ++#define XSPI_CR_RXFIFO_RESET 0x40 ++#define XSPI_CR_MANUAL_SSELECT 0x80 ++#define XSPI_CR_TRANS_INHIBIT 0x100 ++ ++#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */ ++ ++#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */ ++#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */ ++#define XSPI_SR_TX_EMPTY_MASK 0x04 /* Transmit FIFO is empty */ ++#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */ ++#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */ ++ ++#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */ ++#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */ ++ ++#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */ ++ ++/* Register definitions as per "OPB IPIF ( v3.01c) Product Specification", DS414 ++ * IPIF registers are 32 bit ++ */ ++#define XIPIF_V123B_DGIER_OFFSET 0x1c /* IPIF global int enable reg */ ++#define XIPIF_V123B_GINTR_ENABLE 0x80000000 ++ ++#define XIPIF_V123B_IISR_OFFSET 0x20 /* IPIF interrupt status reg */ ++#define XIPIF_V123B_IIER_OFFSET 0x28 /* IPIF interrupt enable reg */ ++ ++#define XSPI_INTR_MODE_FAULT 0x01 /* Mode fault error */ ++#define XSPI_INTR_SLAVE_MODE_FAULT 0x02 /* Selected as slave while disabled */ ++#define XSPI_INTR_TX_EMPTY 0x04 /* TxFIFO is empty */ ++#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */ ++#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */ ++#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */ ++ ++#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */ ++#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */ ++ ++/*************************************************************************/ ++#if 0 ++#define SSP_REG_CR0 ( SSP_FTSSP010_VA_BASE + 0x00) /* SSP Control Register 0 */ ++#define SSP_REG_CR1 ( SSP_FTSSP010_VA_BASE + 0x04) /* SSP Control Register 1 */ ++#define SSP_REG_CR2 ( SSP_FTSSP010_VA_BASE + 0x08) /* SSP Control Register 2 */ ++#define SSP_REG_SR ( SSP_FTSSP010_VA_BASE + 0x0c) /* SSP Status Register */ ++#define SSP_REG_ICR ( SSP_FTSSP010_VA_BASE + 0x10) /* SSP Interrupt Control Register */ ++#define SSP_REG_ISR ( SSP_FTSSP010_VA_BASE + 0x14) /* SSP Interrupt Status Register */ ++#define SSP_REG_DR ( SSP_FTSSP010_VA_BASE + 0x18) /* SSP Data Register */ ++#define SSP_REG_ACL ( SSP_FTSSP010_VA_BASE + 0x20) /* AC-Link Slot Valid Register */ ++#define SSP_REG_REV ( SSP_FTSSP010_VA_BASE + 0x40) /* SSP Revision Register */ ++#define SSP_REG_FEA ( SSP_FTSSP010_VA_BASE + 0x44) /* SSP Feature Register */ ++#endif ++#define SSP_REG_CR0 0x00 /* SSP Control Register 0 */ ++#define SSP_REG_CR1 0x04 /* SSP Control Register 1 */ ++#define SSP_REG_CR2 0x08 /* SSP Control Register 2 */ ++#define SSP_REG_SR 0x0C /* SSP Status Register */ ++#define SSP_REG_ICR 0x10 /* SSP Interrupt Control Register */ ++#define SSP_REG_ISR 0x14 /* SSP Interrupt Status Register */ ++#define SSP_REG_DR 0x18 /* SSP Data Register */ ++#define SSP_REG_ACL 0x20 /* AC-Link Slot Valid Register */ ++#define SSP_REG_REV 0x60 /* SSP Revision Register */ ++#define SSP_REG_FEA 0x64 /* SSP Feature Register */ ++ ++#define SSP_CR0_offFFMT 12 /* Frame Format */ ++#define SSP_CR0_offFSDIST 8 /* Frame/Sync and Data Distance */ ++#define SSP_CR0_offLBM 7 /* Loopback Mode */ ++#define SSP_CR0_offLSB 6 /* Bit Sequence Indicator */ ++#define SSP_CR0_offFSPO 5 /* Frame/Sync Polarity */ ++#define SSP_CR0_offFSJSFY 4 /* Data Justify */ ++#define SSP_CR0_offOPM 2 /* Operation Mode */ ++#define SSP_CR0_offSCLKPO 1 /* SCLK Polarity */ ++#define SSP_CR0_offSCLKPH 0 /* SCLK Phase */ ++ ++#define SSP_CR0_mskFFMT ( 0x07UL << SSP_CR0_offFFMT) ++#define SSP_CR0_mskFSDIST ( 0x03UL << SSP_CR0_offFSDIST) ++#define SSP_CR0_mskLBM ( 0x01UL << SSP_CR0_offLBM) ++#define SSP_CR0_mskLSB ( 0x01UL << SSP_CR0_offLSB) ++#define SSP_CR0_mskFSPO ( 0x01UL << SSP_CR0_offFSPO) ++#define SSP_CR0_mskFSJSFY ( 0x01UL << SSP_CR0_offFSJSFY) ++#define SSP_CR0_mskOPM ( 0x03UL << SSP_CR0_offOPM) ++#define SSP_CR0_mskSCLKPO ( 0x01UL << SSP_CR0_offSCLKPO) ++#define SSP_CR0_mskSCLKPH ( 0x01UL << SSP_CR0_offSCLKPH) ++ ++#define SSP_CR1_offPDL 24 /* Padding Data Length */ ++#define SSP_CR1_offSDL 16 /* Serial Data Length */ ++#define SSP_CR1_offSCLKDIV 0 /* SCLK Divider */ ++ ++#define SSP_CR1_mskPDL ( 0xFFUL << SSP_CR1_offPDL) ++#define SSP_CR1_mskSDL ( 0x1FUL << SSP_CR1_offSDL) ++#define SSP_CR1_mskSCLKDIV ( 0xFFUL << SSP_CR1_offSCLKDIV) ++ ++#define SSP_CR2_offSSPRST 6 /* SSP Reset */ ++#define SSP_CR2_offACCRST 5 /* AC-Link Cold Reset Enable */ ++#define SSP_CR2_offACWRST 4 /* AC-Link Warm Reset Enable */ ++#define SSP_CR2_offTXFCLR 3 /* Transmit FIFO Clear */ ++#define SSP_CR2_offRXFCLR 2 /* Recieve FIFO clear */ ++#define SSP_CR2_offTXDOE 1 /* Transmit Data Output Enable */ ++#define SSP_CR2_offSSPEN 0 /* The SSP Enable */ ++ ++#define SSP_CR2_mskSSPRST ( 0x01UL << SSP_CR2_offSSPRST) ++#define SSP_CR2_mskACCRST ( 0x01UL << SSP_CR2_offACCRST) ++#define SSP_CR2_mskACWRST ( 0x01UL << SSP_CR2_offACWRST) ++#define SSP_CR2_mskTXFCLR ( 0x01UL << SSP_CR2_offTXFCLR) ++#define SSP_CR2_mskRXFCLR ( 0x01UL << SSP_CR2_offRXFCLR) ++#define SSP_CR2_mskTXDOE ( 0x01UL << SSP_CR2_offTXDOE) ++#define SSP_CR2_mskSSPEN ( 0x01UL << SSP_CR2_offSSPEN) ++ ++#define SSP_SR_offTFVE 12 /* Transmit FIFO Valid Entries */ ++#define SSP_SR_offRFVE 4 /* Recieve FIFO Valid Entries */ ++#define SSP_SR_offBUSY 2 /* Busy Indicator */ ++#define SSP_SR_offTFNF 1 /* Transmit FIFO not full */ ++#define SSP_SR_offRFF 0 /* Recieve FIFO full */ ++ ++#define SSP_SR_mskTFVE ( 0x1FUL << SSP_SR_offTFVE) ++#define SSP_SR_mskRFVE ( 0x1FUL << SSP_SR_offRFVE) ++#define SSP_SR_mskBUSY ( 0x01UL << SSP_SR_offBUSY) ++#define SSP_SR_mskTFNF ( 0x01UL << SSP_SR_offTFNF) ++#define SSP_SR_mskRFF ( 0x01UL << SSP_SR_offRFF) ++ ++#define SSP_ICR_offTFTHOD 12 /* Transmit FIFO Threshold */ ++#define SSP_ICR_offRFTHOD 8 /* Recieve FIFO Threshold */ ++#define SSP_ICR_offAC97FCEN 6 /* AC97 Frame Complete */ ++#define SSP_ICR_offTFDMAEN 5 /* Transmit DMA Request Enable */ ++#define SSP_ICR_offRFDMAEN 4 /* Recieve DMA Request Enable */ ++#define SSP_ICR_offTFTHIEN 3 /* Transmit FIFO Threshold Interrupt */ ++#define SSP_ICR_offRFTHIEN 2 /* Recieve FIFO Threshold Interrupt */ ++#define SSP_ICR_offTFURIEN 1 /* Transmit FIFO Underrun Interrupt Enable */ ++#define SSP_ICR_offRFORIEN 0 /* Recieve FIFO Overrun Interrupt Enable */ ++ ++#define SSP_ICR_mskTFTHOD ( 0x0FUL << SSP_ICR_offTFTHOD) ++#define SSP_ICR_mskRFTHOD ( 0x0FUL << SSP_ICR_offRFTHOD) ++#define SSP_ICR_mskAC97FCEN ( 0x01UL << SSP_ICR_offAC97FCEN) ++#define SSP_ICR_mskTFDMAEN ( 0x01UL << SSP_ICR_offTFDMAEN) ++#define SSP_ICR_mskRFDMAEN ( 0x01UL << SSP_ICR_offRFDMAEN) ++#define SSP_ICR_mskTFTHIEN ( 0x01UL << SSP_ICR_offTFTHIEN) ++#define SSP_ICR_mskRFTHIEN ( 0x01UL << SSP_ICR_offRFTHIEN) ++#define SSP_ICR_mskTFURIEN ( 0x01UL << SSP_ICR_offTFURIEN) ++#define SSP_ICR_mskRFORIEN ( 0x01UL << SSP_ICR_offRFORIEN) ++ ++#define SSP_ISR_offAC97FCI 4 /* AC97 Frame Complete Interrupt */ ++#define SSP_ISR_offTFTHI 3 /* Transmit FIFO Threshold Interrupt */ ++#define SSP_ISR_offRFTHI 2 /* Recieve FIFO Threshold Interrupt */ ++#define SSP_ISR_offTFURI 1 /* Transmit FIFO underrun Interrupt */ ++#define SSP_ISR_offRFORI 0 /* Recieve FIFO Overun Interrupt */ ++ ++#define SSP_ISR_mskAC97FCI ( 0x01UL << SSP_ISR_offAC97FCI) ++#define SSP_ISR_mskTFTHI ( 0x01UL << SSP_ISR_offTFTHI) ++#define SSP_ISR_mskRFTHI ( 0x01UL << SSP_ISR_offRFTHI) ++#define SSP_ISR_mskTFURI ( 0x01UL << SSP_ISR_offTFURI) ++#define SSP_ISR_mskRFORI ( 0x01UL << SSP_ISR_offRFORI) ++ ++#define SSP_ACL_offSLOT1V 14 /* The 1st Slot is Valid */ ++#define SSP_ACL_offSLOT2V 13 /* The 2nd Slot is Valid */ ++#define SSP_ACL_offSLOT3V 12 /* The 3th Slot is Valid */ ++#define SSP_ACL_offSLOT4V 11 /* The 4th Slot is Valid */ ++#define SSP_ACL_offSLOT5V 10 /* The 5th Slot is Valid */ ++#define SSP_ACL_offSLOT6V 9 /* The 6th Slot is Valid */ ++#define SSP_ACL_offSLOT7V 8 /* The 7th Slot is Valid */ ++#define SSP_ACL_offSLOT8V 7 /* The 8th Slot is Valid */ ++#define SSP_ACL_offSLOT9V 6 /* The 9th Slot is Valid */ ++#define SSP_ACL_offSLOT10V 5 /* The 10th Slot is Valid */ ++#define SSP_ACL_offSLOT11V 4 /* The 11th Slot is Valid */ ++#define SSP_ACL_offSLOT12V 3 /* The 12th Slot is Valid */ ++#define SSP_ACL_offCODECID 0 /* Codec ID, which will be shifted out as tag slot */ ++ ++#define SSP_ACL_mskSLOT1V ( 0x01UL << SSP_ACL_offSLOT1V) ++#define SSP_ACL_mskSLOT2V ( 0x01UL << SSP_ACL_offSLOT2V) ++#define SSP_ACL_mskSLOT3V ( 0x01UL << SSP_ACL_offSLOT3V) ++#define SSP_ACL_mskSLOT4V ( 0x01UL << SSP_ACL_offSLOT4V) ++#define SSP_ACL_mskSLOT5V ( 0x01UL << SSP_ACL_offSLOT5V) ++#define SSP_ACL_mskSLOT6V ( 0x01UL << SSP_ACL_offSLOT6V) ++#define SSP_ACL_mskSLOT7V ( 0x01UL << SSP_ACL_offSLOT7V) ++#define SSP_ACL_mskSLOT8V ( 0x01UL << SSP_ACL_offSLOT8V) ++#define SSP_ACL_mskSLOT9V ( 0x01UL << SSP_ACL_offSLOT9V) ++#define SSP_ACL_mskSLOT10V ( 0x01UL << SSP_ACL_offSLOT10V) ++#define SSP_ACL_mskSLOT11V ( 0x01UL << SSP_ACL_offSLOT11V) ++#define SSP_ACL_mskSLOT12V ( 0x01UL << SSP_ACL_offSLOT12V) ++#define SSP_ACL_mskCODECID ( 0x03UL << SSP_ACL_offCODECID) ++ ++#define SSP_REV_offMAJOR_REV 16 /* Major Revision Number */ ++#define SSP_REV_offMINOR_REV 8 /* Minor Revision Number */ ++#define SSP_REV_offREL_REV 0 /* Release Number */ ++ ++#define SSP_REV_mskMAJOR_REV ( 0xFFUL << SSP_REV_offMAJOR_REV) ++#define SSP_REV_mskMINOR_REV ( 0xFFUL << SSP_REV_offMINOR_REV) ++#define SSP_REV_mskREL_REV ( 0xFFUL << SSP_REV_offREL_REV) ++ ++#define SSP_FEA_offSSP_FCFG 27 /* The SSP Functional Configurations */ ++#define SSP_FEA_offSPIMWR_FCFG 26 /* Motorola's SPI and National Semiconductor's Microwire Configurations */ ++#define SSP_FEA_offI2S_FCFG 25 /* Philips's I2S Functional Configurations */ ++#define SSP_FEA_offAC97_FCFG 24 /* Intel's AC-Link Functional Configurations */ ++#define SSP_FEA_offTXFIFO_WIDTH 16 /* Transmit FIFO Size Configurations */ ++#define SSP_FEA_offRXFIFO_WIDTH 8 /* Recieve FIFO Size Configuration */ ++#define SSP_FEA_offFIFO_WIDTH 0 /* Transmit/Recieve FIFO Width */ ++ ++#define SSP_FEA_mskSSP_FCFG ( 0x01UL << SSP_FEA_offSSP_FCFG) ++#define SSP_FEA_mskSPIMWR_FCFG ( 0x01UL << SSP_FEA_offSPIMWR_FCFG) ++#define SSP_FEA_mskI2S_FCFG ( 0x01UL << SSP_FEA_offI2S_FCFG) ++#define SSP_FEA_mskAC97_FCFG ( 0x01UL << SSP_FEA_offAC97_FCFG) ++#define SSP_FEA_mskTXFIFO_WIDTH ( 0xFFUL << SSP_FEA_offTXFIFO_WIDTH) ++#define SSP_FEA_mskRXFIFO_WIDTH ( 0xFFUL << SSP_FEA_offRXFIFO_WIDTH) ++#define SSP_FEA_mskFIFO_WIDTH ( 0xFFUL << SSP_FEA_offFIFO_WIDTH) ++ ++#endif /* SSP_FARADAY */ +diff -Nur linux-3.4.110.orig/drivers/input/touchscreen/cpe_ts/Makefile linux-3.4.110/drivers/input/touchscreen/cpe_ts/Makefile +--- linux-3.4.110.orig/drivers/input/touchscreen/cpe_ts/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/input/touchscreen/cpe_ts/Makefile 2016-04-07 10:20:51.050085202 +0200 +@@ -0,0 +1 @@ ++obj-$(CONFIG_TOUCHSCREEN_CPE_TS) += cpe_ts.o +diff -Nur linux-3.4.110.orig/drivers/input/touchscreen/Kconfig linux-3.4.110/drivers/input/touchscreen/Kconfig +--- linux-3.4.110.orig/drivers/input/touchscreen/Kconfig 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/input/touchscreen/Kconfig 2016-04-07 10:20:51.050085202 +0200 +@@ -86,6 +86,26 @@ + To compile this driver as a module, choose M here: the + module will be called ad7879-spi. + ++config TOUCHSCREEN_CPE_TS ++ tristate "Touchscreen Driver for AG101/XC5" ++ help ++ This driver directly accesses SPI controller to communicate with ++ the ads7846 chip. ++ ++ Once we have a SPI controller driver ++ , we can adopt to the SPI ++ framework that kernel provides. ++ ++config TOUCHSCREEN_CPE_TS_DEJITTER ++ bool "Dejitter Detection" ++ depends on TOUCHSCREEN_CPE_TS ++ default y ++ help ++ Say Y here to enable dejitter detection in AG101/Leopard Touchscreen Driver. ++ ++ If unsure, say y. ++ ++ + config TOUCHSCREEN_ATMEL_MXT + tristate "Atmel mXT I2C Touchscreen" + depends on I2C +diff -Nur linux-3.4.110.orig/drivers/input/touchscreen/Makefile linux-3.4.110/drivers/input/touchscreen/Makefile +--- linux-3.4.110.orig/drivers/input/touchscreen/Makefile 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/input/touchscreen/Makefile 2016-04-07 10:20:51.050085202 +0200 +@@ -69,3 +69,4 @@ + obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o + obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o + obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o ++obj-$(CONFIG_TOUCHSCREEN_CPE_TS) += cpe_ts/ +diff -Nur linux-3.4.110.orig/drivers/mmc/core/bus.c linux-3.4.110/drivers/mmc/core/bus.c +--- linux-3.4.110.orig/drivers/mmc/core/bus.c 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/mmc/core/bus.c 2016-04-07 10:20:51.050085202 +0200 +@@ -26,7 +26,9 @@ + #include "bus.h" + + #define to_mmc_driver(d) container_of(d, struct mmc_driver, drv) +- ++#ifdef CONFIG_MMC_TEST ++static struct mmc_driver *mmc_test_drv; ++#endif + static ssize_t mmc_type_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -109,6 +111,11 @@ + struct mmc_driver *drv = to_mmc_driver(dev->driver); + struct mmc_card *card = mmc_dev_to_card(dev); + ++ #ifdef CONFIG_MMC_TEST ++ mmc_test_drv->probe(card); ++ printk("debug mmc_bus_probe\n"); ++ #endif ++ + return drv->probe(card); + } + +@@ -200,6 +207,11 @@ + int mmc_register_driver(struct mmc_driver *drv) + { + drv->drv.bus = &mmc_bus_type; ++ #ifdef CONFIG_MMC_TEST ++ printk("debug defined config_mmc_test in mmc_register_driver\n"); ++ if(!strcmp(drv->drv.name,"mmc_test")) ++ mmc_test_drv = drv; ++ #endif + return driver_register(&drv->drv); + } + +diff -Nur linux-3.4.110.orig/drivers/mmc/host/ftsdc010.c linux-3.4.110/drivers/mmc/host/ftsdc010.c +--- linux-3.4.110.orig/drivers/mmc/host/ftsdc010.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/mmc/host/ftsdc010.c 2016-04-07 10:20:51.050085202 +0200 +@@ -0,0 +1,1586 @@ ++/* drivers/mmc/host/ftsdc010.c ++ * Copyright (C) 2010 Andestech ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include "ftsdc010.h" ++ ++#define DRIVER_NAME "ftsdc010" ++ ++#define REG_READ(addr) readl((host->base + addr)) ++#define REG_WRITE(data, addr) writel((data), (host->base + addr)) ++ ++#define APB_CLK_IN (AHB_CLK_IN / 2) ++ ++enum dbg_channels { ++ dbg_err = (1 << 0), ++ dbg_debug = (1 << 1), ++ dbg_info = (1 << 2), ++ dbg_irq = (1 << 3), ++ dbg_sg = (1 << 4), ++ dbg_dma = (1 << 5), ++ dbg_pio = (1 << 6), ++ dbg_fail = (1 << 7), ++ dbg_conf = (1 << 8), ++}; ++ ++static struct workqueue_struct *mywq; ++ ++static const int dbgmap_err = dbg_fail; ++static const int dbgmap_info = dbg_info | dbg_conf; ++static const int dbgmap_debug = dbg_err | dbg_debug | dbg_info | dbg_conf; ++#if 1 ++#define dbg(host, channels, args...) \ ++ do { \ ++ if (dbgmap_err & channels) \ ++ dev_err(&host->pdev->dev, args); \ ++ else if (dbgmap_info & channels) \ ++ dev_info(&host->pdev->dev, args); \ ++ else if (dbgmap_debug & channels) \ ++ dev_dbg(&host->pdev->dev, args); \ ++ } while (0) ++#endif ++#if 0 ++#define dbg(host, channels, args...) \ ++ do { \ ++ printk(KERN_INFO "%s: ", "ftsdc");\ ++ printk(args); \ ++ } while(0) ++#endif ++ ++static void finalize_request(struct ftsdc_host *host); ++static void ftsdc_send_request(struct mmc_host *mmc); ++ ++#ifdef CONFIG_MMC_DEBUG ++ ++static void dbg_dumpregs(struct ftsdc_host *host, char *prefix) ++{ ++ u32 con, cmdarg, r0, r1, r2, r3, rcmd, dcon, dtimer, ++ dlen, sta, clr, imask, pcon, ccon, bwidth, scon1, ++ scon2, ssta, fea; ++ ++ con = REG_READ(SDC_CMD_REG); ++ cmdarg = REG_READ(SDC_ARGU_REG); ++ r0 = REG_READ(SDC_RESPONSE0_REG); ++ r1 = REG_READ(SDC_RESPONSE1_REG); ++ r2 = REG_READ(SDC_RESPONSE2_REG); ++ r3 = REG_READ(SDC_RESPONSE3_REG); ++ rcmd = REG_READ(SDC_RSP_CMD_REG); ++ dcon = REG_READ(SDC_DATA_CTRL_REG); ++ dtimer = REG_READ(SDC_DATA_TIMER_REG); ++ dlen = REG_READ(SDC_DATA_LEN_REG); ++ sta = REG_READ(SDC_STATUS_REG); ++ clr = REG_READ(SDC_CLEAR_REG); ++ imask = REG_READ(SDC_INT_MASK_REG); ++ pcon = REG_READ(SDC_POWER_CTRL_REG); ++ ccon = REG_READ(SDC_CLOCK_CTRL_REG); ++ bwidth = REG_READ(SDC_BUS_WIDTH_REG); ++ scon1 = REG_READ(SDC_SDIO_CTRL1_REG); ++ scon2 = REG_READ(SDC_SDIO_CTRL2_REG); ++ ssta = REG_READ(SDC_SDIO_STATUS_REG); ++ fea = REG_READ(SDC_FEATURE_REG); ++ ++ dbg(host, dbg_debug, "%s CON:[%08x] STA:[%08x] INT:[%08x], PWR:[%08x], CLK:[%08x]\n", ++ prefix, con, sta, imask, pcon, ccon); ++ ++ dbg(host, dbg_debug, "%s DCON:[%08x] DTIME:[%08x]" ++ " DLEN:[%08x] DWIDTH:[%08x]\n", ++ prefix, dcon, dtimer, dlen, bwidth); ++ ++ dbg(host, dbg_debug, "%s R0:[%08x] R1:[%08x]" ++ " R2:[%08x] R3:[%08x]\n", ++ prefix, r0, r1, r2, r3); ++ ++ dbg(host, dbg_debug, "%s SCON1:[%08x] SCON2:[%08x]" ++ " SSTA:[%08x] FEA:[%08x]\n", ++ prefix, scon1, scon2, ssta, fea); ++} ++ ++static void prepare_dbgmsg(struct ftsdc_host *host, struct mmc_command *cmd, ++ int stop) ++{ ++ snprintf(host->dbgmsg_cmd, 300, ++ "#%u%s op:%i arg:0x%08x flags:0x08%x retries:%u", ++ host->ccnt, (stop ? " (STOP)" : ""), ++ cmd->opcode, cmd->arg, cmd->flags, cmd->retries); ++ ++ if (cmd->data) { ++ snprintf(host->dbgmsg_dat, 300, ++ "#%u bsize:%u blocks:%u bytes:%u", ++ host->dcnt, cmd->data->blksz, ++ cmd->data->blocks, ++ cmd->data->blocks * cmd->data->blksz); ++ } else { ++ host->dbgmsg_dat[0] = '\0'; ++ } ++} ++ ++static void dbg_dumpcmd(struct ftsdc_host *host, struct mmc_command *cmd, ++ int fail) ++{ ++ unsigned int dbglvl = fail ? dbg_fail : dbg_debug; ++ ++ if (!cmd) ++ return; ++ ++ if (cmd->error == 0) { ++ dbg(host, dbglvl, "CMD[OK] %s R0:0x%08x\n", ++ host->dbgmsg_cmd, cmd->resp[0]); ++ } else { ++ dbg(host, dbglvl, "CMD[ERR %i] %s Status:%s\n", ++ cmd->error, host->dbgmsg_cmd, host->status); ++ } ++ ++ if (!cmd->data) ++ return; ++ ++ if (cmd->data->error == 0) { ++ dbg(host, dbglvl, "DAT[OK] %s\n", host->dbgmsg_dat); ++ } else { ++ dbg(host, dbglvl, "DAT[ERR %i] %s DCNT:0x%08x\n", ++ cmd->data->error, host->dbgmsg_dat, ++ REG_READ(SDC_DATA_LEN_REG)); ++ } ++} ++#else ++static void dbg_dumpcmd(struct ftsdc_host *host, ++ struct mmc_command *cmd, int fail) { } ++ ++static void prepare_dbgmsg(struct ftsdc_host *host, struct mmc_command *cmd, ++ int stop) { } ++ ++static void dbg_dumpregs(struct ftsdc_host *host, char *prefix) { } ++ ++#endif /* CONFIG_MMC_DEBUG */ ++ ++static inline bool ftsdc_dmaexist(struct ftsdc_host *host) ++{ ++ return (host->dma_req != NULL); ++} ++ ++static inline u32 enable_imask(struct ftsdc_host *host, u32 imask) ++{ ++ u32 newmask; ++ ++#ifdef CONFIG_MMC_DEBUG ++ if (imask & SDC_STATUS_REG_SDIO_INTR) printk("\n*** E ***\n"); ++#endif ++ newmask = REG_READ(SDC_INT_MASK_REG); ++ newmask |= imask; ++ ++ REG_WRITE(newmask, SDC_INT_MASK_REG); ++ ++ return newmask; ++} ++ ++static inline u32 disable_imask(struct ftsdc_host *host, u32 imask) ++{ ++ u32 newmask; ++ ++#ifdef CONFIG_MMC_DEBUG ++ if (imask & SDC_STATUS_REG_SDIO_INTR) printk("\n*** D ***\n"); ++#endif ++ newmask = REG_READ(SDC_INT_MASK_REG); ++ newmask &= ~imask; ++ ++ REG_WRITE(newmask, SDC_INT_MASK_REG); ++ ++ return newmask; ++} ++ ++static inline void clear_imask(struct ftsdc_host *host) ++{ ++ u32 mask = REG_READ(SDC_INT_MASK_REG); ++ ++ /* preserve the SDIO IRQ mask state */ ++ mask &= (SDC_INT_MASK_REG_SDIO_INTR | SDC_INT_MASK_REG_CARD_CHANGE); ++ REG_WRITE(mask, SDC_INT_MASK_REG); ++} ++ ++//static void ftsdc_check_sdio_irq(struct ftsdc_host *host) ++//{ ++// if (host->sdio_irqen) { ++// u32 con = REG_READ(SDC_STATUS_REG); ++// if (con & SDC_STATUS_REG_SDIO_INTR) { ++// printk(KERN_DEBUG "%s: signalling irq\n", __func__); ++// mmc_signal_sdio_irq(host->mmc); ++// } ++// } ++//} ++ ++static inline void get_data_buffer(struct ftsdc_host *host) ++{ ++ struct scatterlist *sg; ++ ++ BUG_ON(host->buf_sgptr >= host->mrq->data->sg_len); ++ ++ sg = &host->mrq->data->sg[host->buf_sgptr]; ++ ++ host->buf_bytes = sg->length; ++ host->buf_ptr = host->dodma ? (u32 *)sg->dma_address : sg_virt(sg); ++ host->buf_sgptr++; ++} ++ ++static inline u32 cal_blksz(unsigned int blksz) ++{ ++ u32 blksztwo = 0; ++ ++ while (blksz >>= 1) ++ blksztwo++; ++ ++ return blksztwo; ++} ++ ++/** ++ * ftsdc_enable_irq - enable IRQ, after having disabled it. ++ * @host: The device state. ++ * @more: True if more IRQs are expected from transfer. ++ * ++ * Enable the main IRQ if needed after it has been disabled. ++ * ++ * The IRQ can be one of the following states: ++ * - enable after data read/write ++ * - disable when handle data read/write ++ */ ++static void ftsdc_enable_irq(struct ftsdc_host *host, bool enable) ++{ ++ unsigned long flags; ++ local_irq_save(flags); ++ ++ host->irq_enabled = enable; ++ ++ if (enable) ++ enable_irq(host->irq); ++ else ++ disable_irq(host->irq); ++ ++ local_irq_restore(flags); ++} ++ ++static void do_pio_read(struct ftsdc_host *host) ++{ ++ u32 fifo; ++ u32 fifo_words; ++ u32 *ptr; ++ u32 status; ++ u32 retry = 0; ++ ++ ++ BUG_ON(host->buf_bytes != 0); ++ ++ while (host->buf_sgptr < host->mrq->data->sg_len) { ++ get_data_buffer(host); ++ ++ dbg(host, dbg_pio, ++ "pio_read(): new target: [%i]@[%p]\n", ++ host->buf_bytes, host->buf_ptr); ++ ++ while (host->buf_bytes) { ++ status = REG_READ(SDC_STATUS_REG); ++ ++ if (status & SDC_STATUS_REG_FIFO_OVERRUN) { ++ fifo = host->fifo_len > host->buf_bytes ? ++ host->buf_bytes : host->fifo_len; ++ ++#if 1 ++ dbg(host, dbg_pio, ++ "pio_read(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n", ++ fifo, host->buf_bytes, ++ REG_READ(SDC_DATA_LEN_REG)); ++#endif ++ ++ host->buf_bytes -= fifo; ++ host->buf_count += fifo; ++ ++ fifo_words = fifo >> 2; ++ ptr = host->buf_ptr; ++ while (fifo_words--) ++ *ptr++ = REG_READ(SDC_DATA_WINDOW_REG); ++ ++ host->buf_ptr = ptr; ++ //ADD by river 2010.10.26 for adding some delays for SD card to put data into FIFO again ++ //mdelay(1); ++ udelay(800); ++ //End ADD by river 2010.10.26 for adding some delays for SD card to put data into FIFO again ++ ++ /* sdio allow non-power-of-2 blksz */ ++ if (fifo & 3) { ++ u32 n = fifo & 3; ++ u32 data = REG_READ(SDC_DATA_WINDOW_REG); ++ u8 *p = (u8 *)host->buf_ptr; ++ ++ while (n--) { ++ *p++ = data; ++ data >>= 8; ++ } ++ } ++ } else { ++ udelay(1); ++ if (++retry >= SDC_PIO_RETRY) { ++ host->mrq->data->error = -EIO; ++ goto err; ++ } ++ } ++ } ++ } ++ ++err: ++ ++ host->buf_active = XFER_NONE; ++ host->complete_what = COMPLETION_FINALIZE; ++} ++ ++static void do_pio_write(struct ftsdc_host *host) ++{ ++ u32 fifo; ++ u32 *ptr; ++ u32 status; ++ u32 retry = 0; ++ ++ BUG_ON(host->buf_bytes != 0); ++ ++ while (host->buf_sgptr < host->mrq->data->sg_len) { ++ get_data_buffer(host); ++ ++ dbg(host, dbg_pio, ++ "pio_write(): new source: [%i]@[%p]\n", ++ host->buf_bytes, host->buf_ptr); ++ ++ while (host->buf_bytes) { ++ status = REG_READ(SDC_STATUS_REG); ++ if (status & SDC_STATUS_REG_FIFO_UNDERRUN) { ++ fifo = host->fifo_len > host->buf_bytes ? ++ host->buf_bytes : host->fifo_len; ++ ++ dbg(host, dbg_pio, ++ "pio_write(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n", ++ fifo, host->buf_bytes, ++ REG_READ(SDC_DATA_LEN_REG)); ++ ++ host->buf_bytes -= fifo; ++ host->buf_count += fifo; ++ ++ fifo = (fifo + 3) >> 2; ++ ptr = host->buf_ptr; ++ while (fifo--) { ++ REG_WRITE(*ptr, SDC_DATA_WINDOW_REG); ++ ptr++; ++ } ++ host->buf_ptr = ptr; ++ } else { ++ udelay(1); ++ if (++retry >= SDC_PIO_RETRY) { ++ host->mrq->data->error = -EIO; ++ goto err; ++ } ++ } ++ } ++ } ++ ++err: ++ host->buf_active = XFER_NONE; ++ host->complete_what = COMPLETION_FINALIZE; ++} ++ ++static void do_dma_access(struct ftsdc_host *host) ++{ ++ int res; ++ unsigned long timeout; ++ dmad_chreq *req = host->dma_req; ++ dmad_drb *drb = 0; ++ ++ while (host->buf_sgptr < host->mrq->data->sg_len) { ++ ++ INIT_COMPLETION(host->dma_complete); ++ get_data_buffer(host); ++ ++ dbg(host, dbg_dma, ++ "dma_%s(): new target: [%i]@[%p]\n", ++ host->buf_active == XFER_READ ? "read" : "write", ++ host->buf_bytes, host->buf_ptr); ++ ++ res = dmad_alloc_drb(req, &drb); ++ if (res != 0 || (drb == 0)) { ++ dbg(host, dbg_err, "%s() Failed to allocate dma request block!\n", __func__); ++ host->mrq->data->error = -ENODEV; ++ goto err; ++ } ++ ++ drb->addr0 = SDC_FTSDC010_0_PA_BASE + SDC_DATA_WINDOW_REG; ++ drb->addr1 = (dma_addr_t)host->buf_ptr; ++ drb->req_cycle = dmad_bytes_to_cycles(req, host->buf_bytes); ++ drb->sync = &host->dma_complete; ++ ++ timeout = SDC_TIMEOUT_BASE*((host->buf_bytes+511)>>9); ++ ++ res = dmad_submit_request(req, drb, 1); ++ if (res != 0) { ++ dbg(host, dbg_err, "%s() Failed to submit dma request block!\n", __func__); ++ host->mrq->data->error = -ENODEV; ++ goto err; ++ } ++ ++ dbg(host, dbg_err, "reach here!\n"); ++ if (wait_for_completion_timeout(&host->dma_complete, timeout) == 0) { ++ dbg(host, dbg_err, "%s: read timeout\n", __func__); ++ host->mrq->data->error = -ETIMEDOUT; ++ goto err; ++ } ++ } ++ ++ host->dma_finish = true; ++err: ++ host->buf_active = XFER_NONE; ++ host->complete_what = COMPLETION_FINALIZE; ++} ++ ++static void ftsdc_work(struct work_struct *work) ++{ ++ struct ftsdc_host *host = ++ container_of(work, struct ftsdc_host, work); ++ ++ ftsdc_enable_irq(host, false); ++ ++ if (host->dodma) { ++ do_dma_access(host); ++ } else { ++ if (host->buf_active == XFER_WRITE) ++ do_pio_write(host); ++ ++ if (host->buf_active == XFER_READ) ++ do_pio_read(host); ++ } ++ ++ tasklet_schedule(&host->pio_tasklet); ++ ftsdc_enable_irq(host, true); ++} ++ ++ ++static void pio_tasklet(unsigned long data) ++{ ++ struct ftsdc_host *host = (struct ftsdc_host *) data; ++ ++ if (host->complete_what == COMPLETION_XFER_PROGRESS) { ++ queue_work(mywq, (struct work_struct *)&host->work); ++ return; ++ } ++ ++ if (host->complete_what == COMPLETION_FINALIZE) { ++ clear_imask(host); ++ if (host->buf_active != XFER_NONE) { ++ dbg(host, dbg_err, "unfinished %s " ++ "- buf_count:[%u] buf_bytes:[%u]\n", ++ (host->buf_active == XFER_READ) ? "read" : "write", ++ host->buf_count, host->buf_bytes); ++ ++ if (host->mrq->data) ++ host->mrq->data->error = -EINVAL; ++ } ++ ++ finalize_request(host); ++ } ++} ++ ++ ++static void finalize_request(struct ftsdc_host *host) ++{ ++ struct mmc_request *mrq = host->mrq; ++ struct mmc_command *cmd; ++ u32 con; ++ int debug_as_failure = 0; ++ ++ if (host->complete_what != COMPLETION_FINALIZE) ++ return; ++ ++ if (!mrq) ++ return; ++ ++ cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; ++ ++ if (cmd->data && (cmd->error == 0) && ++ (cmd->data->error == 0)) { ++ if (host->dodma && (!host->dma_finish)) { ++ dbg(host, dbg_dma, "DMA Missing (%d)!\n", ++ host->dma_finish); ++ return; ++ } ++ host->dodma = false; ++ } ++ ++ /* Read response from controller. */ ++ if (cmd->flags & MMC_RSP_136) { ++ cmd->resp[3] = REG_READ(SDC_RESPONSE0_REG); ++ cmd->resp[2] = REG_READ(SDC_RESPONSE1_REG); ++ cmd->resp[1] = REG_READ(SDC_RESPONSE2_REG); ++ cmd->resp[0] = REG_READ(SDC_RESPONSE3_REG); ++ } else if (cmd->flags & MMC_RSP_PRESENT) { ++ cmd->resp[0] = REG_READ(SDC_RESPONSE0_REG); ++ } ++ ++ if (cmd->error) ++ debug_as_failure = 1; ++ ++ if (cmd->data && cmd->data->error) ++ debug_as_failure = 1; ++ ++ dbg_dumpcmd(host, cmd, debug_as_failure); ++ ++ clear_imask(host); ++ con = REG_READ(SDC_STATUS_REG); ++ con &= ~SDC_CLEAR_REG_SDIO_INTR; ++ REG_WRITE(con, SDC_CLEAR_REG); ++ ++ if (cmd->data && cmd->error) ++ cmd->data->error = cmd->error; ++ ++ if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) { ++ host->cmd_is_stop = 1; ++ ftsdc_send_request(host->mmc); ++ return; ++ } ++ ++ /* If we have no data transfer we are finished here */ ++ if (!mrq->data) ++ goto request_done; ++ ++ /* Calulate the amout of bytes transfer if there was no error */ ++ if (mrq->data->error == 0) { ++ mrq->data->bytes_xfered = ++ (mrq->data->blocks * mrq->data->blksz); ++ } else { ++ mrq->data->bytes_xfered = 0; ++ } ++ ++request_done: ++ host->complete_what = COMPLETION_NONE; ++ host->mrq = NULL; ++ ++ host->last_opcode = mrq->cmd->opcode; ++ mmc_request_done(host->mmc, mrq); ++} ++ ++static void ftsdc_send_command(struct ftsdc_host *host, ++ struct mmc_command *cmd) ++{ ++ u32 ccon = 0; ++ u32 newmask = 0; ++ u32 scon; ++ ++ if (cmd->data) { ++ host->complete_what = COMPLETION_XFER_PROGRESS; ++ newmask |= SDC_INT_MASK_REG_RSP_TIMEOUT; ++ } else if (cmd->flags & MMC_RSP_PRESENT) { ++ host->complete_what = COMPLETION_RSPFIN; ++ newmask |= SDC_INT_MASK_REG_RSP_TIMEOUT; ++ } else { ++ host->complete_what = COMPLETION_CMDSENT; ++ newmask |= SDC_INT_MASK_REG_CMD_SEND; ++ } ++ ++ ccon |= cmd->opcode & SDC_CMD_REG_INDEX; ++ ccon |= SDC_CMD_REG_CMD_EN; ++ ++ if (cmd->flags & MMC_RSP_PRESENT) { ++ ccon |= SDC_CMD_REG_NEED_RSP; ++ newmask |= SDC_INT_MASK_REG_RSP_CRC_OK | ++ SDC_INT_MASK_REG_RSP_CRC_FAIL; ++ } ++ ++ if (cmd->flags & MMC_RSP_136) ++ ccon |= SDC_CMD_REG_LONG_RSP; ++ ++ /* applicatiion specific cmd must follow an MMC_APP_CMD. The ++ * value will be updated in finalize_request function */ ++ if (host->last_opcode == MMC_APP_CMD) ++ ccon |= SDC_CMD_REG_APP_CMD; ++ ++ enable_imask(host, newmask); ++ REG_WRITE(cmd->arg, SDC_ARGU_REG); ++ ++ scon = REG_READ(SDC_SDIO_CTRL1_REG); ++ if (host->mmc->card != NULL && mmc_card_sdio(host->mmc->card)) ++ scon |= SDC_SDIO_CTRL1_REG_SDIO_ENABLE; ++ else ++ scon &= ~SDC_SDIO_CTRL1_REG_SDIO_ENABLE; ++ REG_WRITE(scon, SDC_SDIO_CTRL1_REG); ++ ++ dbg_dumpregs(host, ""); ++ dbg(host, dbg_debug, "CON[%x]\n", ccon); ++ ++ REG_WRITE(ccon, SDC_CMD_REG); ++} ++ ++static int ftsdc_setup_data(struct ftsdc_host *host, struct mmc_data *data) ++{ ++ u32 dcon, newmask = 0; ++ ++ /* configure data transfer paramter */ ++ ++ if (!data) ++ return 0; ++ if(host->mmc->card && host->mmc->card->type==(unsigned int)MMC_TYPE_SD){ ++ if (((data->blksz - 1) & data->blksz) != 0) { ++ pr_warning("%s: can't do non-power-of 2 sized block transfers (blksz %d)\n", __func__, data->blksz); ++ return -EINVAL; ++ } ++ } ++ ++ if (data->blksz <= 2) { ++ /* We cannot deal with unaligned blocks with more than ++ * one block being transfered. */ ++ ++ if (data->blocks > 1) { ++ pr_warning("%s: can't do non-word sized block transfers (blksz %d)\n", __func__, data->blksz); ++ return -EINVAL; ++ } ++ } ++ ++ /* data length */ ++ dcon = data->blksz * data->blocks; ++ REG_WRITE(dcon, SDC_DATA_LEN_REG); ++ ++ /* write data control */ ++ dcon = 0; ++ dcon = cal_blksz(data->blksz); ++ ++ /* enable UNDERFUN will trigger interrupt immediatedly ++ * So setup it when rsp is received successfully ++ */ ++ if (data->flags & MMC_DATA_WRITE) { ++ dcon |= SDC_DATA_CTRL_REG_DATA_WRITE; ++ } else { ++ dcon &= ~SDC_DATA_CTRL_REG_DATA_WRITE; ++ newmask |= SDC_INT_MASK_REG_FIFO_OVERRUN; ++ } ++ ++ /* always reset fifo since last transfer may fail */ ++ dcon |= SDC_DATA_CTRL_REG_FIFO_RST; ++ ++ /* enable data transfer which will be pended until cmd is send */ ++ dcon |= SDC_DATA_CTRL_REG_DATA_EN; ++ ++ if (ftsdc_dmaexist(host) && ++ ((data->blksz * data->blocks) & 0xf) == 0) { ++ newmask &= ~SDC_INT_MASK_REG_FIFO_OVERRUN; ++ dcon |= SDC_DATA_CTRL_REG_DMA_EN; ++ dcon |= SDC_DMA_TYPE_4; ++ host->dodma = true; ++ ++ } ++ ++ REG_WRITE(dcon, SDC_DATA_CTRL_REG); ++ ++ /* add to IMASK register */ ++ newmask |= SDC_INT_MASK_REG_DATA_CRC_FAIL | ++ SDC_INT_MASK_REG_DATA_TIMEOUT; ++ ++ enable_imask(host, newmask); ++ ++ /* handle sdio */ ++ dcon = SDC_SDIO_CTRL1_REG_READ_WAIT_ENABLE & REG_READ(SDC_SDIO_CTRL1_REG); ++ dcon |= data->blksz | data->blocks << 15; ++ if (1 < data->blocks) ++ dcon |= SDC_SDIO_CTRL1_REG_SDIO_BLK_MODE; ++ REG_WRITE(dcon, SDC_SDIO_CTRL1_REG); ++ ++ return 0; ++} ++ ++#define BOTH_DIR (MMC_DATA_WRITE | MMC_DATA_READ) ++ ++static int ftsdc_prepare_buffer(struct ftsdc_host *host, struct mmc_data *data) ++{ ++ int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; ++ ++ if ((!host->mrq) || (!host->mrq->data)) ++ return -EINVAL; ++ ++ BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); ++ ++ host->buf_sgptr = 0; ++ host->buf_bytes = 0; ++ host->buf_count = 0; ++ host->buf_active = rw ? XFER_WRITE : XFER_READ; ++ ++ if (host->dodma) { ++ u32 dma_len; ++ u32 drb_size; ++ dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, ++ rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE); ++ if (dma_len == 0) ++ return -ENOMEM; ++ ++ ++ dmad_config_channel_dir(host->dma_req, ++ rw ? DMAD_DIR_A1_TO_A0 : DMAD_DIR_A0_TO_A1); ++ ++ drb_size = dmad_max_size_per_drb(host->dma_req); ++ if (drb_size < (data->blksz & data->blocks)) ++ return -ENODEV; ++ ++ host->dma_finish = false; ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t ftsdc_irq(int irq, void *dev_id) ++{ ++ struct ftsdc_host *host = dev_id; ++ struct mmc_command *cmd; ++ u32 mci_status; ++ u32 mci_clear; ++ u32 mci_imsk; ++ unsigned long iflags; ++ ++ mci_status = REG_READ(SDC_STATUS_REG); ++ mci_imsk = REG_READ(SDC_INT_MASK_REG); ++ ++ dbg(host, dbg_debug, "irq: status:0x%08x, mask : %08x\n", mci_status, mci_imsk); ++ ++ if (mci_status & SDC_STATUS_REG_SDIO_INTR) { ++ if (mci_imsk & SDC_INT_MASK_REG_SDIO_INTR) { ++ mci_clear = SDC_CLEAR_REG_SDIO_INTR; ++ REG_WRITE(mci_clear, SDC_CLEAR_REG); ++ ++ mmc_signal_sdio_irq(host->mmc); ++ return IRQ_HANDLED; ++ } ++ } ++ ++ spin_lock_irqsave(&host->complete_lock, iflags); ++ ++ mci_status = REG_READ(SDC_STATUS_REG); ++ mci_clear = 0; ++ ++ if (mci_status & SDC_STATUS_REG_CARD_CHANGE) { ++ if ((mci_status & SDC_STATUS_REG_CARD_DETECT) ++ == SDC_CARD_INSERT) { ++ host->status = "card insert"; ++ mmc_detect_change(host->mmc, msecs_to_jiffies(500)); ++ } else { ++ host->status = "card remove"; ++ } ++ ++ mci_clear |= SDC_CLEAR_REG_CARD_CHANGE; ++ dbg(host, dbg_irq, "%s\n", host->status); ++ ++ if (host->complete_what != COMPLETION_NONE) { ++ host->mrq->cmd->error = -EIO; ++ goto close_transfer; ++ } ++ ++ goto irq_out; ++ } ++ ++ if ((host->complete_what == COMPLETION_NONE) || ++ (host->complete_what == COMPLETION_FINALIZE)) { ++ host->status = "nothing to complete"; ++ mci_clear = -1u; ++ goto irq_out; ++ } ++ ++ if (!host->mrq) { ++ host->status = "no active mrq"; ++ clear_imask(host); ++ goto irq_out; ++ } ++ ++ cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd; ++ ++ if (!cmd) { ++ host->status = "no active cmd"; ++ clear_imask(host); ++ goto irq_out; ++ } ++ ++ if (mci_status & SDC_STATUS_REG_CMD_SEND) { ++ mci_clear |= SDC_CLEAR_REG_CMD_SEND; ++ ++ if (host->complete_what == COMPLETION_CMDSENT) { ++ host->status = "ok: command sent"; ++ goto close_transfer; ++ } else { ++ host->status = "error: command sent(status not match)"; ++ cmd->error = -EINVAL; ++ goto fail_transfer; ++ } ++ } ++ ++ /* handle error status */ ++ if (mci_status & SDC_STATUS_REG_RSP_TIMEOUT) { ++ dbg(host, dbg_err, "CMDSTAT: error RSP TIMEOUT\n"); ++ mci_clear |= SDC_CLEAR_REG_RSP_TIMEOUT; ++ cmd->error = -ETIMEDOUT; ++ host->status = "error: response timeout"; ++ goto fail_transfer; ++ } ++ ++ if (mci_status & SDC_STATUS_REG_RSP_CRC_FAIL) { ++ mci_clear |= SDC_CLEAR_REG_RSP_CRC_FAIL; ++ /* This is wierd hack */ ++ if (cmd->flags & MMC_RSP_CRC) { ++ dbg(host, dbg_err, "CMDSTAT: error RSP CRC\n"); ++ cmd->error = -EILSEQ; ++ host->status = "error: RSP CRC failed"; ++ goto fail_transfer; ++ } else { ++ host->status = "R3 or R4 type command"; ++ goto close_transfer; ++ } ++ } ++ ++ if (mci_status & SDC_STATUS_REG_RSP_CRC_OK) { ++ mci_clear |= SDC_CLEAR_REG_RSP_CRC_OK; ++ ++ if (host->complete_what == COMPLETION_XFER_PROGRESS) { ++ REG_WRITE(mci_clear, SDC_CLEAR_REG); ++ ++ host->status = "RSP recv OK"; ++ if (!cmd->data) ++ goto close_transfer; ++ ++ if (host->dodma) { ++ tasklet_schedule(&host->pio_tasklet); ++ host->status = "dma access"; ++ goto irq_out; ++ } ++ ++ if (host->buf_active == XFER_WRITE) ++ enable_imask(host, SDC_INT_MASK_REG_FIFO_UNDERRUN); ++ } else if (host->complete_what == COMPLETION_RSPFIN) { ++ goto close_transfer; ++ } ++ } ++ ++ /* handler data transfer */ ++ if (mci_status & SDC_STATUS_REG_DATA_TIMEOUT) { ++ dbg(host, dbg_err, "CMDSTAT: error DATA TIMEOUT\n"); ++ mci_clear |= SDC_CLEAR_REG_DATA_TIMEOUT; ++ cmd->error = -ETIMEDOUT; ++ host->status = "error: data timeout"; ++ goto fail_transfer; ++ } ++ ++ if (mci_status & SDC_STATUS_REG_DATA_CRC_FAIL) { ++ dbg(host, dbg_err, "CMDSTAT: error DATA CRC\n"); ++ mci_clear |= SDC_CLEAR_REG_DATA_CRC_FAIL; ++ cmd->error = -EILSEQ; ++ host->status = "error: data CRC fail"; ++ goto fail_transfer; ++ } ++ ++ if ((mci_status & SDC_STATUS_REG_FIFO_UNDERRUN) || ++ mci_status & SDC_STATUS_REG_FIFO_OVERRUN) { ++ ++ disable_imask(host, SDC_INT_MASK_REG_FIFO_OVERRUN | ++ SDC_INT_MASK_REG_FIFO_UNDERRUN); ++ ++ if (!host->dodma) { ++ if (host->buf_active == XFER_WRITE) { ++ tasklet_schedule(&host->pio_tasklet); ++ host->status = "pio tx"; ++ } else if (host->buf_active == XFER_READ) { ++ ++ tasklet_schedule(&host->pio_tasklet); ++ host->status = "pio rx"; ++ } ++ } ++ } ++ ++ goto irq_out; ++ ++fail_transfer: ++ host->buf_active = XFER_NONE; ++ ++close_transfer: ++ host->complete_what = COMPLETION_FINALIZE; ++ ++ clear_imask(host); ++ tasklet_schedule(&host->pio_tasklet); ++ ++irq_out: ++ REG_WRITE(mci_clear, SDC_CLEAR_REG); ++ ++ dbg(host, dbg_debug, "irq: %s\n", host->status); ++ spin_unlock_irqrestore(&host->complete_lock, iflags); ++ return IRQ_HANDLED; ++} ++ ++static void ftsdc_send_request(struct mmc_host *mmc) ++{ ++ struct ftsdc_host *host = mmc_priv(mmc); ++ struct mmc_request *mrq = host->mrq; ++ struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; ++ ++ host->ccnt++; ++ prepare_dbgmsg(host, cmd, host->cmd_is_stop); ++ dbg(host, dbg_debug, "%s\n", host->dbgmsg_cmd); ++ ++ if (cmd->data) { ++ int res = ftsdc_setup_data(host, cmd->data); ++ ++ host->dcnt++; ++ ++ if (res) { ++ dbg(host, dbg_err, "setup data error %d\n", res); ++ cmd->error = res; ++ cmd->data->error = res; ++ ++ mmc_request_done(mmc, mrq); ++ return; ++ } ++ ++ res = ftsdc_prepare_buffer(host, cmd->data); ++ ++ if (res) { ++ dbg(host, dbg_err, "data prepare error %d\n", res); ++ cmd->error = res; ++ cmd->data->error = res; ++ ++ mmc_request_done(mmc, mrq); ++ return; ++ } ++ } ++ ++ /* Send command */ ++ ftsdc_send_command(host, cmd); ++} ++ ++static int ftsdc_get_cd(struct mmc_host *mmc) ++{ ++ struct ftsdc_host *host = mmc_priv(mmc); ++ ++ u32 con = REG_READ(SDC_STATUS_REG); ++ dbg(host, dbg_debug, "get_cd status:%.8x\n\n", con); ++ ++ return (con & SDC_STATUS_REG_CARD_DETECT) ? 0 : 1; ++} ++ ++static void ftsdc_request(struct mmc_host *mmc, struct mmc_request *mrq) ++{ ++ struct ftsdc_host *host = mmc_priv(mmc); ++ ++/* work_around_for_amerald(mrq);*/ ++ host->status = "mmc request"; ++ host->cmd_is_stop = 0; ++ host->mrq = mrq; ++ if (ftsdc_get_cd(mmc) == 0) { ++ dbg(host, dbg_err, "%s: no medium present\n", __func__); ++ host->mrq->cmd->error = -ENOMEDIUM; ++ mmc_request_done(mmc, mrq); ++ } else { ++ ftsdc_send_request(mmc); ++ } ++ ++ dbg(host, dbg_debug, "send request \n"); ++} ++ ++static void ftsdc_set_clk(struct ftsdc_host *host, struct mmc_ios *ios) ++{ ++ u32 clk_div = 0; ++ u32 con; ++ ++ dbg(host, dbg_debug, "request clk : %u \n", ios->clock); ++ con = REG_READ(SDC_CLOCK_CTRL_REG); ++ if (ios->clock == 0) { ++ host->real_rate = 0; ++ con |= SDC_CLOCK_CTRL_REG_CLK_DIS; ++ } else { ++ clk_div = (APB_CLK_IN / (ios->clock << 1)) - 1; ++ host->real_rate = APB_CLK_IN / ((clk_div+1)<<1); ++ if (host->real_rate > ios->clock) { ++ ++clk_div; ++ host->real_rate = APB_CLK_IN / ((clk_div+1)<<1); ++ } ++ if (clk_div > 127) ++ dbg(host, dbg_err, "%s: no match clock rate, %u\n", __func__, ios->clock); ++ ++ con = (con & ~SDC_CLOCK_CTRL_REG_CLK_DIV) | (clk_div & SDC_CLOCK_CTRL_REG_CLK_DIV); ++ con &= ~SDC_CLOCK_CTRL_REG_CLK_DIS; ++ } ++ ++ REG_WRITE(con, SDC_CLOCK_CTRL_REG); ++} ++ ++static void ftsdc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ++{ ++ struct ftsdc_host *host = mmc_priv(mmc); ++ u32 con; ++ ++ con = REG_READ(SDC_POWER_CTRL_REG); ++ switch (ios->power_mode) { ++ case MMC_POWER_ON: ++ case MMC_POWER_UP: ++ con |= SDC_POWER_CTRL_REG_POWER_ON; ++ break; ++ case MMC_POWER_OFF: ++ default: ++ con &= ~SDC_POWER_CTRL_REG_POWER_ON; ++ break; ++ } ++ ++ REG_WRITE(con, SDC_POWER_CTRL_REG); ++ ++ ftsdc_set_clk(host, ios); ++ ++ if ((ios->power_mode == MMC_POWER_ON) || ++ (ios->power_mode == MMC_POWER_UP)) { ++ dbg(host, dbg_debug, "running at %ukHz (requested: %ukHz).\n", ++ host->real_rate/1000, ios->clock/1000); ++ } else { ++ dbg(host, dbg_debug, "powered down.\n"); ++ } ++ ++ host->bus_width = ios->bus_width; ++ /* write bus configure */ ++ con = REG_READ(SDC_BUS_WIDTH_REG); ++ ++ con &= ~(SDC_BUS_WIDTH_REG_SINGLE_BUS | ++ SDC_BUS_WIDTH_REG_WIDE_4_BUS | ++ SDC_BUS_WIDTH_REG_WIDE_8_BUS); ++ if (host->bus_width == MMC_BUS_WIDTH_1) ++ con |= SDC_BUS_WIDTH_REG_SINGLE_BUS; ++ else if (host->bus_width == MMC_BUS_WIDTH_4) ++ con |= SDC_BUS_WIDTH_REG_WIDE_4_BUS; ++ else if (host->bus_width == MMC_BUS_WIDTH_8) ++ con |= SDC_BUS_WIDTH_REG_WIDE_8_BUS; ++ else { ++ dbg(host, dbg_err, "set_ios: can't support bus mode"); ++ } ++ REG_WRITE(con, SDC_BUS_WIDTH_REG); ++ ++ /*set rsp and data timeout */ ++ con = -1; ++ REG_WRITE(con, SDC_DATA_TIMER_REG); ++} ++ ++static int ftsdc_get_ro(struct mmc_host *mmc) ++{ ++ struct ftsdc_host *host = mmc_priv(mmc); ++ u32 con = REG_READ(SDC_STATUS_REG); ++ dbg(host, dbg_debug, "get_ro status:%.8x\n", con); ++ ++ return (con & SDC_STATUS_REG_CARD_LOCK) ? 1 : 0; ++} ++ ++ ++static void ftsdc_enable_sdio_irq(struct mmc_host *mmc, int enable) ++{ ++ struct ftsdc_host *host = mmc_priv(mmc); ++ unsigned long flags; ++ u32 con; ++#ifdef CONFIG_MMC_DEBUG ++ u32 ena; ++#endif ++ ++ local_irq_save(flags); ++ ++ con = REG_READ(SDC_INT_MASK_REG); ++#ifdef CONFIG_MMC_DEBUG ++ ena = (con & SDC_STATUS_REG_SDIO_INTR) ? 1:0; ++ if (ena == enable) ++ printk("\n*** XXX ***\n"); ++#endif ++ ++ con = enable ? (con | SDC_STATUS_REG_SDIO_INTR) : (con & ~SDC_STATUS_REG_SDIO_INTR); ++ REG_WRITE(con, SDC_INT_MASK_REG); ++ ++#ifdef CONFIG_MMC_DEBUG ++ //check and ensure data out to SD host controller ++ ena = (REG_READ(SDC_INT_MASK_REG) & SDC_STATUS_REG_SDIO_INTR) ? 1:0; ++ if (ena != enable) { ++ printk("\n*** YYY ***\n"); ++ } ++#endif ++ ++ local_irq_restore(flags); ++} ++ ++static struct mmc_host_ops ftsdc_ops = { ++ .request = ftsdc_request, ++ .set_ios = ftsdc_set_ios, ++ .get_ro = ftsdc_get_ro, ++ .get_cd = ftsdc_get_cd, ++ .enable_sdio_irq = ftsdc_enable_sdio_irq, ++}; ++ ++#ifdef CONFIG_DEBUG_FS ++ ++static int ftsdc_state_show(struct seq_file *seq, void *v) ++{ ++ struct ftsdc_host *host = seq->private; ++ ++ seq_printf(seq, "Register base = 0x%08x\n", (u32)host->base); ++ seq_printf(seq, "Clock rate = %u\n", host->real_rate); ++ seq_printf(seq, "host status = %s\n", host->status); ++ seq_printf(seq, "IRQ = %d\n", host->irq); ++ seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled); ++ seq_printf(seq, "complete what = %d\n", host->complete_what); ++ seq_printf(seq, "dma support = %d\n", ftsdc_dmaexist(host)); ++ seq_printf(seq, "use dma = %d\n", host->dodma); ++ ++ return 0; ++} ++ ++static int ftsdc_state_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, ftsdc_state_show, inode->i_private); ++} ++ ++static const struct file_operations ftsdc_fops_state = { ++ .owner = THIS_MODULE, ++ .open = ftsdc_state_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++#define DBG_REG(_r) { .addr = SDC_## _r ## _REG, .name = #_r } ++ ++struct ftsdc_reg { ++ unsigned short addr; ++ unsigned char *name; ++} debug_regs[] = { ++ DBG_REG(CMD), ++ DBG_REG(ARGU), ++ DBG_REG(RESPONSE0), ++ DBG_REG(RESPONSE1), ++ DBG_REG(RESPONSE2), ++ DBG_REG(RESPONSE3), ++ DBG_REG(RSP_CMD), ++ DBG_REG(DATA_CTRL), ++ DBG_REG(DATA_TIMER), ++ DBG_REG(DATA_LEN), ++ DBG_REG(STATUS), ++ DBG_REG(CLEAR), ++ DBG_REG(INT_MASK), ++ DBG_REG(POWER_CTRL), ++ DBG_REG(CLOCK_CTRL), ++ DBG_REG(BUS_WIDTH), ++ DBG_REG(SDIO_CTRL1), ++ DBG_REG(SDIO_CTRL2), ++ DBG_REG(SDIO_STATUS), ++ DBG_REG(FEATURE), ++ DBG_REG(REVISION), ++ {} ++}; ++ ++static int ftsdc_regs_show(struct seq_file *seq, void *v) ++{ ++ struct ftsdc_host *host = seq->private; ++ struct ftsdc_reg *rptr = debug_regs; ++ ++ for (; rptr->name; rptr++) ++ seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name, ++ REG_READ(rptr->addr)); ++ ++ return 0; ++} ++ ++static int ftsdc_regs_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, ftsdc_regs_show, inode->i_private); ++} ++ ++static const struct file_operations ftsdc_fops_regs = { ++ .owner = THIS_MODULE, ++ .open = ftsdc_regs_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static void ftsdc_debugfs_attach(struct ftsdc_host *host) ++{ ++ struct device *dev = &host->pdev->dev; ++ ++ host->debug_root = debugfs_create_dir(dev_name(dev), NULL); ++ if (IS_ERR(host->debug_root)) { ++ dev_err(dev, "failed to create debugfs root\n"); ++ return; ++ } ++ ++ host->debug_state = debugfs_create_file("state", 0444, ++ host->debug_root, host, ++ &ftsdc_fops_state); ++ ++ if (IS_ERR(host->debug_state)) ++ dev_err(dev, "failed to create debug state file\n"); ++ ++ host->debug_regs = debugfs_create_file("regs", 0444, ++ host->debug_root, host, ++ &ftsdc_fops_regs); ++ ++ if (IS_ERR(host->debug_regs)) ++ dev_err(dev, "failed to create debug regs file\n"); ++} ++ ++static void ftsdc_debugfs_remove(struct ftsdc_host *host) ++{ ++ debugfs_remove(host->debug_regs); ++ debugfs_remove(host->debug_state); ++ debugfs_remove(host->debug_root); ++} ++ ++#else ++static inline void ftsdc_debugfs_attach(struct ftsdc_host *host) { } ++static inline void ftsdc_debugfs_remove(struct ftsdc_host *host) { } ++ ++#endif /* CONFIG_DEBUG_FS */ ++ ++#if (defined(CONFIG_PLATFORM_AHBDMA) || defined(CONFIG_PLATFORM_APBDMA)) ++static int ftsdc_alloc_dma(struct ftsdc_host *host) ++{ ++ dmad_chreq *req = host->dma_req; ++ ++ req = kzalloc(sizeof(dmad_chreq), GFP_KERNEL); ++#ifdef CONFIG_PLATFORM_APBDMA ++#ifdef CONFIG_PLAT_AG102 ++ //ADD by river 2010.10.20 ++ outl(inl(PCU_VA_BASE + 0x38) | 0x00000300, PCU_VA_BASE + 0x38); ++ //End ADD by river 2010.10.20 ++#endif ++ req->apb_req.addr0_ctrl = APBBR_ADDRINC_FIXED; /* (in) APBBR_ADDRINC_xxx */ ++/* for amerald */ ++ if((inl(PMU_BASE) & AMERALD_MASK) == AMERALD_PRODUCT_ID){ ++ req->apb_req.addr0_reqn = APBBR_REQN_SDC_AMERALD; ++ }else ++ { ++ req->apb_req.addr0_reqn = APBBR_REQN_SDC; /* (in) APBBR_REQN_xxx (also used to help determine bus selection) */ ++ } ++ req->apb_req.addr1_ctrl = APBBR_ADDRINC_I4X; /* (in) APBBR_ADDRINC_xxx */ ++ req->apb_req.addr1_reqn = APBBR_REQN_NONE; /* (in) APBBR_REQN_xxx (also used to help determine bus selection) */ ++ req->apb_req.burst_mode = 1; /* (in) Burst mode (0: no burst 1-, 1: burst 4- data cycles per dma cycle) */ ++ req->apb_req.data_width = APBBR_DATAWIDTH_4; /* (in) APBBR_DATAWIDTH_4(word), APBBR_DATAWIDTH_2(half-word), APBBR_DATAWIDTH_1(byte) */ ++ req->apb_req.tx_dir = DMAD_DIR_A0_TO_A1; /* (in) DMAD_DIR_A0_TO_A1, DMAD_DIR_A1_TO_A0 */ ++ req->controller = DMAD_DMAC_APB_CORE; /* (in) DMAD_DMAC_AHB_CORE, DMAD_DMAC_APB_CORE */ ++ req->flags = DMAD_FLAGS_SLEEP_BLOCK | DMAD_FLAGS_BIDIRECTION; ++ ++ if (dmad_channel_alloc(req) == 0) { ++ dbg(host, dbg_debug, "%s: APB dma channel allocated (ch: %d)\n", __func__, req->channel); ++ host->dma_req = req; ++ return 0; ++ } ++ ++ memset(req, 0, sizeof(dmad_chreq)); ++ dbg(host, dbg_info, "%s: APB dma channel allocation failed\n", __func__); ++#endif /* CONFIG_PLATFORM_APBDMA */ ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ req->ahb_req.sync = 1; /* (in) non-zero if src and dst have different clock domain */ ++ req->ahb_req.priority = DMAC_CSR_CHPRI_1; /* (in) DMAC_CSR_CHPRI_0 (lowest) ~ DMAC_CSR_CHPRI_3 (highest) */ ++ req->ahb_req.hw_handshake = 1; /* (in) non-zero to enable hardware handshake mode */ ++ req->ahb_req.burst_size = DMAC_CSR_SIZE_4; /* (in) DMAC_CSR_SIZE_1 ~ DMAC_CSR_SIZE_256 */ ++ req->ahb_req.addr0_width = DMAC_CSR_WIDTH_32; /* (in) DMAC_CSR_WIDTH_8, DMAC_CSR_WIDTH_16, or DMAC_CSR_WIDTH_32 */ ++ req->ahb_req.addr0_ctrl = DMAC_CSR_AD_FIX; /* (in) DMAC_CSR_AD_INC, DMAC_CSR_AD_DEC, or DMAC_CSR_AD_FIX */ ++ req->ahb_req.addr0_reqn = DMAC_REQN_SDC; /* (in) DMAC_REQN_xxx (also used to help determine channel number) */ ++ req->ahb_req.addr1_width = DMAC_CSR_WIDTH_32; /* (in) DMAC_CSR_WIDTH_8, DMAC_CSR_WIDTH_16, or DMAC_CSR_WIDTH_32 */ ++ req->ahb_req.addr1_ctrl = DMAC_CSR_AD_INC; /* (in) DMAC_CSR_AD_INC, DMAC_CSR_AD_DEC, or DMAC_CSR_AD_FIX */ ++ req->ahb_req.addr1_reqn = DMAC_REQN_NONE; /* (in) DMAC_REQN_xxx (also used to help determine channel number) */ ++ req->ahb_req.tx_dir = DMAD_DIR_A0_TO_A1; /* (in) DMAD_DIR_A0_TO_A1, DMAD_DIR_A1_TO_A0 */ ++ ++ req->controller = DMAD_DMAC_AHB_CORE; /* (in) DMAD_DMAC_AHB_CORE, DMAD_DMAC_APB_CORE */ ++ req->flags = DMAD_FLAGS_SLEEP_BLOCK | DMAD_FLAGS_BIDIRECTION; ++ ++ if (dmad_channel_alloc(req) == 0) { ++ dbg(host, dbg_debug, "%s: AHB dma channel allocated (ch: %d)\n", __func__, req->channel); ++ host->dma_req = req; ++ return 0; ++ } ++ ++ dbg(host, dbg_info, "%s: AHB dma channel allocation failed\n", __func__); ++#endif ++ ++ kfree(req); ++ return -ENODEV; ++ ++} ++#endif ++ ++static int __devinit ftsdc_probe(struct platform_device *pdev) ++{ ++ struct ftsdc_host *host; ++ struct mmc_host *mmc; ++ int ret = -ENOMEM; ++ u32 con; ++ ++ mmc = mmc_alloc_host(sizeof(struct ftsdc_host), &pdev->dev); ++ if (!mmc) { ++// ret = -ENOMEM; ++ goto probe_out; ++ } ++ ++ host = mmc_priv(mmc); ++ host->mmc = mmc; ++ host->pdev = pdev; ++ ++ mywq = create_workqueue("atcsdc_queue"); ++ if (NULL == mywq) ++ goto probe_free_host; ++ ++ spin_lock_init(&host->complete_lock); ++ tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host); ++ init_completion(&host->dma_complete); ++ INIT_WORK(&host->work, ftsdc_work); ++ ++ host->complete_what = COMPLETION_NONE; ++ host->buf_active = XFER_NONE; ++ ++#if (defined(CONFIG_PLATFORM_AHBDMA) || defined(CONFIG_PLATFORM_APBDMA)) ++ ftsdc_alloc_dma(host); ++#endif ++ ++ host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!host->mem) { ++ dev_err(&pdev->dev, ++ "failed to get io memory region resouce.\n"); ++ ++ ret = -ENOENT; ++ goto probe_free_host; ++ } ++ ++ host->mem = request_mem_region(host->mem->start, ++ resource_size(host->mem), pdev->name); ++ ++ if (!host->mem) { ++ dev_err(&pdev->dev, "failed to request io memory region.\n"); ++ ret = -ENOENT; ++ goto probe_free_wq; ++ } ++ ++ host->base = (void __iomem *) SDC_FTSDC010_0_VA_BASE; ++ host->irq = SDC_FTSDC010_IRQ; ++ if (request_irq(host->irq, ftsdc_irq, IRQF_DISABLED, DRIVER_NAME, host)) { ++ dev_err(&pdev->dev, "failed to request mci interrupt.\n"); ++ ret = -ENOENT; ++ goto probe_free_mem_region; ++ } ++ host->irq_enabled = true; ++ ++ /* enable card change interruption */ ++ con = REG_READ(SDC_INT_MASK_REG); ++ con |= SDC_INT_MASK_REG_CARD_CHANGE; ++ REG_WRITE(con, SDC_INT_MASK_REG); ++ ++ con = REG_READ(SDC_BUS_WIDTH_REG); ++ ++ mmc->ops = &ftsdc_ops; ++ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; ++ ++ if (con & SDC_WIDE_4_BUS_SUPPORT) ++ mmc->caps |= MMC_CAP_4_BIT_DATA; ++ else if (con & SDC_WIDE_8_BUS_SUPPORT) ++ mmc->caps |= MMC_CAP_8_BIT_DATA; ++ ++#ifndef A320D_BUILDIN_SDC ++ mmc->caps |= MMC_CAP_SDIO_IRQ; ++#endif ++ ++ mmc->f_min = APB_CLK_IN / (2 * 128); ++ mmc->f_max = APB_CLK_IN / 2; ++ ++ /* limit SDIO mode max size */ ++ mmc->max_req_size = 128 * 1024 * 1024 - 1; ++ mmc->max_blk_size = 2047; ++ mmc->max_req_size = (mmc->max_req_size + 1) / (mmc->max_blk_size + 1); ++ mmc->max_seg_size = mmc->max_req_size; ++ mmc->max_blk_count = (1<<17)-1; ++ ++ /* kernel default value. see Doc/block/biodocs.txt */ ++ /* ++ 'struct mmc_host' has no member named 'max_phys_segs' ++ 'struct mmc_host' has no member named 'max_hw_segs' ++ */ ++// mmc->max_phys_segs = 128; ++// mmc->max_hw_segs = 128; ++ ++ /* set fifo lenght and default threshold half */ ++ con = REG_READ(SDC_FEATURE_REG); ++ host->fifo_len = (con & SDC_FEATURE_REG_FIFO_DEPTH) * sizeof(u32); ++ ++ dbg(host, dbg_debug, ++ "probe: mapped mci_base:%p irq:%u.\n", ++ host->base, host->irq); ++ ++ dbg_dumpregs(host, ""); ++ ++ ret = mmc_add_host(mmc); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to add mmc host.\n"); ++ goto probe_free_irq; ++ } ++ ++ ftsdc_debugfs_attach(host); ++ ++ platform_set_drvdata(pdev, mmc); ++ dev_info(&pdev->dev, "%s - using %s SDIO IRQ\n", mmc_hostname(mmc), ++ mmc->caps & MMC_CAP_SDIO_IRQ ? "hw" : "sw"); ++ ++ return 0; ++ ++ probe_free_irq: ++ free_irq(host->irq, host); ++ ++ probe_free_mem_region: ++ release_mem_region(host->mem->start, resource_size(host->mem)); ++ ++probe_free_wq: ++ destroy_workqueue(mywq); ++ ++ probe_free_host: ++ mmc_free_host(mmc); ++ ++ probe_out: ++ return ret; ++} ++ ++static void ftsdc_shutdown(struct platform_device *pdev) ++{ ++ struct mmc_host *mmc = platform_get_drvdata(pdev); ++ struct ftsdc_host *host = mmc_priv(mmc); ++ ++ flush_workqueue(mywq); ++ destroy_workqueue(mywq); ++ ++ ftsdc_debugfs_remove(host); ++ mmc_remove_host(mmc); ++} ++ ++static int __devexit ftsdc_remove(struct platform_device *pdev) ++{ ++ struct mmc_host *mmc = platform_get_drvdata(pdev); ++ struct ftsdc_host *host = mmc_priv(mmc); ++ ++ ftsdc_shutdown(pdev); ++ ++ tasklet_disable(&host->pio_tasklet); ++ ++ if (ftsdc_dmaexist(host)) ++ kfree(host->dma_req); ++ ++ free_irq(host->irq, host); ++ ++ release_mem_region(host->mem->start, resource_size(host->mem)); ++ ++ mmc_free_host(mmc); ++ return 0; ++} ++ ++ ++#ifdef CONFIG_PM ++static int ftsdc_free_dma(struct ftsdc_host *host) ++{ ++ dmad_channel_free(host->dma_req); ++ return 0; ++} ++ ++static int ftsdc_suspend(struct platform_device *pdev, pm_message_t state) ++{ ++ struct mmc_host *mmc = platform_get_drvdata(pdev); ++ struct ftsdc_host *host = mmc_priv(mmc); ++ int ret = 0; ++ if (mmc) { ++ ftsdc_free_dma(host); ++ ret = mmc_suspend_host(mmc); ++ } ++ return ret; ++ ++} ++ ++static int ftsdc_resume(struct platform_device *pdev) ++{ ++ struct mmc_host *mmc = platform_get_drvdata(pdev); ++ int ret = 0; ++ struct ftsdc_host *host = mmc_priv(mmc); ++ if (mmc) { ++#if (defined(CONFIG_PLATFORM_AHBDMA) || defined(CONFIG_PLATFORM_APBDMA)) ++ ftsdc_alloc_dma(host); ++#endif ++ ret = mmc_resume_host(mmc); ++ } ++ return ret; ++} ++ ++static void platform_device_release(struct device *dev){ ++} ++ ++ ++#else ++#define ftsdc_suspend NULL ++#define ftsdc_resume NULL ++#endif ++ ++ ++static struct platform_driver ftsdc_driver = { ++ .driver = { ++ .name = "ftsdc010", ++ .owner = THIS_MODULE, ++ }, ++ .probe = ftsdc_probe, ++ .remove = __devexit_p(ftsdc_remove), ++ .shutdown = ftsdc_shutdown, ++ .suspend = ftsdc_suspend, ++ .resume = ftsdc_resume, ++}; ++ ++static struct resource sdc_resource[] = { ++ [0] = { ++ .start = SDC_FTSDC010_0_VA_BASE, ++ .end = SDC_FTSDC010_0_VA_BASE + 0x1000 - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = SDC_FTSDC010_0_IRQ, ++ .flags = IORESOURCE_IRQ, ++ } ++ ++}; ++ ++struct platform_device sdc_device = { ++ .name = "ftsdc010", ++ .id = 0, ++ .num_resources = ARRAY_SIZE(sdc_resource), ++ .resource = sdc_resource, ++ .dev = { ++ .release = platform_device_release, ++ }, ++}; ++ ++static int __init ftsdc_init(void) ++{ ++ platform_device_register(&sdc_device); ++ return platform_driver_register(&ftsdc_driver); ++} ++ ++static void __exit ftsdc_exit(void) ++{ ++ platform_driver_unregister(&ftsdc_driver); ++ platform_device_unregister(&sdc_device); ++} ++module_init(ftsdc_init); ++module_exit(ftsdc_exit); ++ ++MODULE_DESCRIPTION("Andestech Leopard MMC/SD Card Interface driver"); ++MODULE_LICENSE("GPL v2"); +diff -Nur linux-3.4.110.orig/drivers/mmc/host/ftsdc010.h linux-3.4.110/drivers/mmc/host/ftsdc010.h +--- linux-3.4.110.orig/drivers/mmc/host/ftsdc010.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/mmc/host/ftsdc010.h 2016-04-07 10:20:51.050085202 +0200 +@@ -0,0 +1,240 @@ ++/* ++ * linux/driver/mmc/ftsdc010.h - Andestech MMC/SD driver ++ * Andestech FTSDC010 Device Driver ++ * ++ * Andestech (C) 2005 Faraday Corp. (http://www.Andestech.com) ++ * ++ * All Rights Reserved ++ */ ++ ++#ifndef _FTSDC010_H_ ++#define _FTSDC010_H_ ++ ++//#define SD_DEBUG ++#define DELAY_FOR_DMA_READ ++ ++#ifdef SD_DEBUG ++ #define P_DEBUG(fmt, args...) printk(KERN_ALERT "SD: " fmt, ## args) ++#else ++ #define P_DEBUG(a...) ++#endif ++#define P_DEBUGG(a...) ++ ++/* used for dma timeout */ ++#define SDC_TIMEOUT_BASE (HZ/2) // Unit is 500 ms ++ ++/* used for pio retry times */ ++#define SDC_PIO_RETRY 0x300000 ++ ++/* sd controller register */ ++#define SDC_CMD_REG 0x00000000 ++#define SDC_ARGU_REG 0x00000004 ++#define SDC_RESPONSE0_REG 0x00000008 ++#define SDC_RESPONSE1_REG 0x0000000C ++#define SDC_RESPONSE2_REG 0x00000010 ++#define SDC_RESPONSE3_REG 0x00000014 ++#define SDC_RSP_CMD_REG 0x00000018 ++#define SDC_DATA_CTRL_REG 0x0000001C ++#define SDC_DATA_TIMER_REG 0x00000020 ++#define SDC_DATA_LEN_REG 0x00000024 ++#define SDC_STATUS_REG 0x00000028 ++#define SDC_CLEAR_REG 0x0000002C ++#define SDC_INT_MASK_REG 0x00000030 ++#define SDC_POWER_CTRL_REG 0x00000034 ++#define SDC_CLOCK_CTRL_REG 0x00000038 ++#define SDC_BUS_WIDTH_REG 0x0000003C ++#define SDC_DATA_WINDOW_REG 0x00000040 ++ ++#ifdef A320D_BUILDIN_SDC ++#define SDC_FEATURE_REG 0x00000044 ++#define SDC_REVISION_REG 0x00000048 ++#else ++#define SDC_MMC_INT_RSP_REG 0x00000044 ++#define SDC_GP_OUTPUT_REG 0x00000048 ++#define SDC_FEATURE_REG 0x0000009C ++#define SDC_REVISION_REG 0x000000A0 ++#endif ++ ++#define SDC_SDIO_CTRL1_REG 0x0000006C ++#define SDC_SDIO_CTRL2_REG 0x00000070 ++#define SDC_SDIO_STATUS_REG 0x00000074 ++ ++/* bit mapping of command register */ ++#define SDC_CMD_REG_INDEX 0x0000003F ++#define SDC_CMD_REG_NEED_RSP 0x00000040 ++#define SDC_CMD_REG_LONG_RSP 0x00000080 ++#define SDC_CMD_REG_APP_CMD 0x00000100 ++#define SDC_CMD_REG_CMD_EN 0x00000200 ++#define SDC_CMD_REG_SDC_RST 0x00000400 ++#define SDC_CMD_MMC_INT_STOP 0x00000800 ++ ++/* bit mapping of response command register */ ++#define SDC_RSP_CMD_REG_INDEX 0x0000003F ++#define SDC_RSP_CMD_REG_APP 0x00000040 ++ ++/* bit mapping of data control register */ ++#define SDC_DATA_CTRL_REG_BLK_SIZE 0x0000000F ++#define SDC_DATA_CTRL_REG_DATA_WRITE 0x00000010 ++#define SDC_DATA_CTRL_REG_DMA_EN 0x00000020 ++#define SDC_DATA_CTRL_REG_DATA_EN 0x00000040 ++#define SDC_DATA_CTRL_REG_FIFOTH 0x00000080 ++#define SDC_DATA_CTRL_REG_DMA_TYPE 0x00000300 ++#define SDC_DATA_CTRL_REG_FIFO_RST 0x00000400 ++#define SDC_CPRM_DATA_CHANGE_ENDIAN_EN 0x00000800 ++#define SDC_CPRM_DATA_SWAP_HL_EN 0x00001000 ++ ++#define SDC_DMA_TYPE_1 0x00000000 ++#define SDC_DMA_TYPE_4 0x00000100 ++#define SDC_DMA_TYPE_8 0x00000200 ++ ++/* bit mapping of status register */ ++#define SDC_STATUS_REG_RSP_CRC_FAIL 0x00000001 ++#define SDC_STATUS_REG_DATA_CRC_FAIL 0x00000002 ++#define SDC_STATUS_REG_RSP_TIMEOUT 0x00000004 ++#define SDC_STATUS_REG_DATA_TIMEOUT 0x00000008 ++#define SDC_STATUS_REG_RSP_CRC_OK 0x00000010 ++#define SDC_STATUS_REG_DATA_CRC_OK 0x00000020 ++#define SDC_STATUS_REG_CMD_SEND 0x00000040 ++#define SDC_STATUS_REG_DATA_END 0x00000080 ++#define SDC_STATUS_REG_FIFO_UNDERRUN 0x00000100 ++#define SDC_STATUS_REG_FIFO_OVERRUN 0x00000200 ++#define SDC_STATUS_REG_CARD_CHANGE 0x00000400 ++#define SDC_STATUS_REG_CARD_DETECT 0x00000800 ++#define SDC_STATUS_REG_CARD_LOCK 0x00001000 ++#define SDC_STATUS_REG_CP_READY 0x00002000 ++#define SDC_STATUS_REG_CP_BUF_READY 0x00004000 ++#define SDC_STATUS_REG_PLAIN_TEXT_READY 0x00008000 ++#define SDC_STATUS_REG_SDIO_INTR 0x00010000 ++ ++/* bit mapping of clear register */ ++#define SDC_CLEAR_REG_RSP_CRC_FAIL 0x00000001 ++#define SDC_CLEAR_REG_DATA_CRC_FAIL 0x00000002 ++#define SDC_CLEAR_REG_RSP_TIMEOUT 0x00000004 ++#define SDC_CLEAR_REG_DATA_TIMEOUT 0x00000008 ++#define SDC_CLEAR_REG_RSP_CRC_OK 0x00000010 ++#define SDC_CLEAR_REG_DATA_CRC_OK 0x00000020 ++#define SDC_CLEAR_REG_CMD_SEND 0x00000040 ++#define SDC_CLEAR_REG_DATA_END 0x00000080 ++#define SDC_CLEAR_REG_CARD_CHANGE 0x00000400 ++#define SDC_CLEAR_REG_SDIO_INTR 0x00010000 ++ ++/* bit mapping of int_mask register */ ++#define SDC_INT_MASK_REG_RSP_CRC_FAIL 0x00000001 ++#define SDC_INT_MASK_REG_DATA_CRC_FAIL 0x00000002 ++#define SDC_INT_MASK_REG_RSP_TIMEOUT 0x00000004 ++#define SDC_INT_MASK_REG_DATA_TIMEOUT 0x00000008 ++#define SDC_INT_MASK_REG_RSP_CRC_OK 0x00000010 ++#define SDC_INT_MASK_REG_DATA_CRC_OK 0x00000020 ++#define SDC_INT_MASK_REG_CMD_SEND 0x00000040 ++#define SDC_INT_MASK_REG_DATA_END 0x00000080 ++#define SDC_INT_MASK_REG_FIFO_UNDERRUN 0x00000100 ++#define SDC_INT_MASK_REG_FIFO_OVERRUN 0x00000200 ++#define SDC_INT_MASK_REG_CARD_CHANGE 0x00000400 ++#define SDC_INT_MASK_REG_CARD_LOCK 0x00001000 ++#define SDC_INT_MASK_REG_CP_READY 0x00002000 ++#define SDC_INT_MASK_REG_CP_BUF_READY 0x00004000 ++#define SDC_INT_MASK_REG_PLAIN_TEXT_READY 0x00008000 ++#define SDC_INT_MASK_REG_SDIO_INTR 0x00010000 ++ ++ ++#define SDC_CARD_INSERT 0x0 ++#define SDC_CARD_REMOVE SDC_STATUS_REG_CARD_DETECT ++ ++/* bit mapping of power control register */ ++#define SDC_POWER_CTRL_REG_POWER_ON 0x00000010 ++#define SDC_POWER_CTRL_REG_POWER_BITS 0x0000000F ++ ++/* bit mapping of clock control register */ ++#define SDC_CLOCK_CTRL_REG_CLK_DIV 0x0000007F ++#define SDC_CLOCK_CTRL_REG_CARD_TYPE 0x00000080 ++#define SDC_CLOCK_CTRL_REG_CLK_DIS 0x00000100 ++ ++/* card type */ ++#define SDC_CARD_TYPE_SD SDC_CLOCK_REG_CARD_TYPE ++#define SDC_CARD_TYPE_MMC 0x0 ++ ++/* bit mapping of bus width register */ ++#define SDC_BUS_WIDTH_REG_SINGLE_BUS 0x00000001 ++#define SDC_BUS_WIDTH_REG_WIDE_8_BUS 0x00000002 ++#define SDC_BUS_WIDTH_REG_WIDE_4_BUS 0x00000004 ++#define SDC_BUS_WIDTH_REG_WIDE_BUS_SUPPORT 0x00000018 ++#define SDC_BUS_WIDTH_REG_CARD_DETECT 0x00000020 ++ ++#define SDC_WIDE_4_BUS_SUPPORT 0x00000008 ++#define SDC_WIDE_8_BUS_SUPPORT 0x00000010 ++ ++/* bit mapping of feature register */ ++#define SDC_FEATURE_REG_FIFO_DEPTH 0x000000FF ++#define SDC_FEATURE_REG_CPRM_FUNCTION 0x00000100 ++ ++/* bit mapping of sdio control register */ ++#define SDC_SDIO_CTRL1_REG_SDIO_BLK_NO 0xFFFF8000 ++#define SDC_SDIO_CTRL1_REG_SDIO_ENABLE 0x00004000 ++#define SDC_SDIO_CTRL1_REG_READ_WAIT_ENABLE 0x00002000 ++#define SDC_SDIO_CTRL1_REG_SDIO_BLK_MODE 0x00001000 ++#define SDC_SDIO_CTRL1_REG_SDIO_BLK_SIZE 0x00000FFF ++ ++/* bit mapping of sdio status register */ ++#define SDC_SDIO_SDIO_STATUS_REG_FIFO_REMAIN_NO 0x00FE0000 ++#define SDC_SDIO_SDIO_STATUS_REG_SDIO_BLK_CNT 0x0001FFFF ++ ++enum ftsdc_waitfor { ++ COMPLETION_NONE, ++ COMPLETION_FINALIZE, ++ COMPLETION_CMDSENT, ++ COMPLETION_RSPFIN, ++ COMPLETION_XFER_PROGRESS, ++}; ++ ++struct ftsdc_host { ++ struct platform_device *pdev; ++ struct mmc_host *mmc; ++ struct resource *mem; ++ struct clk *clk; ++ void __iomem *base; ++ int irq; ++ ++ unsigned int real_rate; ++ bool irq_enabled; ++ unsigned int fifo_len; /* bytes */ ++ unsigned int last_opcode; /* keep last successful cmd to judge application specific command */ ++ ++ struct mmc_request *mrq; ++ int cmd_is_stop; ++ ++ spinlock_t complete_lock; ++ enum ftsdc_waitfor complete_what; ++ ++ struct completion dma_complete; ++ dmad_chreq *dma_req; ++ bool dodma; ++ bool dma_finish; ++ ++ ++ u32 buf_sgptr; /* keep next scallterlist buffer index */ ++ u32 buf_bytes; /* keep current total scallterlist buffer length */ ++ u32 buf_count; /* keep real data size rw from sd */ ++ u32 *buf_ptr; /* keep current scallterlist buffer address */ ++#define XFER_NONE 0 ++#define XFER_READ 1 ++#define XFER_WRITE 2 ++ u32 buf_active; /* keep current transfer mode */ ++ ++ int bus_width; ++ ++ char dbgmsg_cmd[301]; ++ char dbgmsg_dat[301]; ++ char *status; ++ ++ unsigned int ccnt, dcnt; ++ struct tasklet_struct pio_tasklet; ++ struct work_struct work; ++ ++#ifdef CONFIG_DEBUG_FS ++ struct dentry *debug_root; ++ struct dentry *debug_state; ++ struct dentry *debug_regs; ++#endif ++}; ++ ++#endif +diff -Nur linux-3.4.110.orig/drivers/mmc/host/Kconfig linux-3.4.110/drivers/mmc/host/Kconfig +--- linux-3.4.110.orig/drivers/mmc/host/Kconfig 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/mmc/host/Kconfig 2016-04-07 10:20:51.050085202 +0200 +@@ -609,3 +609,8 @@ + + Note: These controllers only support SDIO cards and do not + support MMC or SD memory cards. ++ ++config MMC_FTSDC010 ++ tristate "Andestech MMC/SD function support" ++ depends on NDS32 && !FTSDC010 ++ +diff -Nur linux-3.4.110.orig/drivers/mmc/host/Makefile linux-3.4.110/drivers/mmc/host/Makefile +--- linux-3.4.110.orig/drivers/mmc/host/Makefile 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/mmc/host/Makefile 2016-04-07 10:20:51.050085202 +0200 +@@ -45,6 +45,7 @@ + obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o + obj-$(CONFIG_MMC_VUB300) += vub300.o + obj-$(CONFIG_MMC_USHC) += ushc.o ++obj-$(CONFIG_MMC_FTSDC010) += ftsdc010.o + + obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o + obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o +diff -Nur linux-3.4.110.orig/drivers/net/ethernet/faraday/ftmac100.c linux-3.4.110/drivers/net/ethernet/faraday/ftmac100.c +--- linux-3.4.110.orig/drivers/net/ethernet/faraday/ftmac100.c 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/net/ethernet/faraday/ftmac100.c 2016-04-07 10:20:51.054085357 +0200 +@@ -1,1202 +1,1210 @@ + /* +- * Faraday FTMAC100 10/100 Ethernet ++ * drivers/net/ftmac100.c + * +- * (C) Copyright 2009-2011 Faraday Technology +- * Po-Yu Chuang ++ * Faraday FTMAC100 Device Driver + * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * ++ * All Rights Reserved + * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, write to the Free Software +- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +- +-#include +-#include +-#include ++#include + #include ++#include ++ ++#include ++#include /* printk() */ ++#include /* kmalloc() */ ++#include /* error codes */ ++#include /* size_t */ + #include +-#include +-#include +-#include +-#include ++ ++#include + #include ++#include /* struct device, and other headers */ ++#include /* eth_type_trans */ ++#include /* struct iphdr */ ++#include /* struct tcphdr */ ++#include ++ ++#include ++#include ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include + ++#include // dma_clean_range ++#include + #include "ftmac100.h" + +-#define DRV_NAME "ftmac100" +-#define DRV_VERSION "0.2" +- +-#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */ +-#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */ ++#define IPMODULE MAC ++#define IPNAME FTMAC100 + +-#define MAX_PKT_SIZE 1518 +-#define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */ ++#define ENABLE_BOTTOM_HALF 0 ++#define ZERO_COPY 1 + +-#if MAX_PKT_SIZE > 0x7ff +-#error invalid MAX_PKT_SIZE +-#endif +- +-#if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE +-#error invalid RX_BUF_SIZE +-#endif +- +-/****************************************************************************** +- * private data +- *****************************************************************************/ +-struct ftmac100_descs { +- struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES]; +- struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES]; +-}; +- +-struct ftmac100 { +- struct resource *res; +- void __iomem *base; +- int irq; +- +- struct ftmac100_descs *descs; +- dma_addr_t descs_dma_addr; +- +- unsigned int rx_pointer; +- unsigned int tx_clean_pointer; +- unsigned int tx_pointer; +- unsigned int tx_pending; +- +- spinlock_t tx_lock; +- +- struct net_device *netdev; +- struct device *dev; +- struct napi_struct napi; ++#define FTMAC100_DEBUG 0 ++#define CARDNAME "FTMAC100" + +- struct mii_if_info mii; +-}; +- +-static int ftmac100_alloc_rx_page(struct ftmac100 *priv, +- struct ftmac100_rxdes *rxdes, gfp_t gfp); +- +-/****************************************************************************** +- * internal functions (hardware register access) +- *****************************************************************************/ +-#define INT_MASK_ALL_ENABLED (FTMAC100_INT_RPKT_FINISH | \ +- FTMAC100_INT_NORXBUF | \ +- FTMAC100_INT_XPKT_OK | \ +- FTMAC100_INT_XPKT_LOST | \ +- FTMAC100_INT_RPKT_LOST | \ +- FTMAC100_INT_AHB_ERR | \ +- FTMAC100_INT_PHYSTS_CHG) ++MODULE_AUTHOR("Faraday Corp."); ++MODULE_LICENSE("GPL"); + +-#define INT_MASK_ALL_DISABLED 0 ++static const char version[] = "Faraday FTMAC100 Driver (Linux 2.6) 09/28/05 - (C) 2005 Faraday Corp.\n"; ++static volatile int trans_busy = 0; ++static const char mac_string[] = "Faraday MAC"; + +-static void ftmac100_enable_all_int(struct ftmac100 *priv) +-{ +- iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR); +-} ++#if FTMAC100_DEBUG > 2 ++static void print_packet( unsigned char * buf, int length ); ++#endif + +-static void ftmac100_disable_all_int(struct ftmac100 *priv) +-{ +- iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR); +-} ++#if FTMAC100_DEBUG > 0 ++#define PRINTK printk ++#else ++#define PRINTK(x...) ++#endif + +-static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr) +-{ +- iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR); ++static int ftmac100_open(struct net_device *dev); ++static void ftmac100_timeout (struct net_device *dev); ++static int ftmac100_close(struct net_device *dev); ++static struct net_device_stats * ftmac100_query_statistics( struct net_device *dev); ++#ifdef HAVE_MULTICAST ++static void ftmac100_set_multicast_list(struct net_device *dev); ++#endif ++static void ftmac100_phy_configure(struct net_device* dev) {}; ++static irqreturn_t ftmac100_interrupt(int irq, void * dev_id); ++static void ftmac100_rcv(void *dev); ++static int ftmac100_setup(struct net_device *dev); ++static int ftmac100_reset( struct net_device* dev ); ++static void ftmac100_enable( struct net_device *dev ); ++ ++static void put_mac(int base, unsigned char *mac_addr) ++{ ++ int val; ++ ++ val = ((u32)mac_addr[0])<<8 | (u32)mac_addr[1]; ++ outl(val, base); ++ val = ((((u32)mac_addr[2])<<24)&0xff000000) | ++ ((((u32)mac_addr[3])<<16)&0xff0000) | ++ ((((u32)mac_addr[4])<<8)&0xff00) | ++ ((((u32)mac_addr[5])<<0)&0xff); ++ outl(val, base+4); ++} ++ ++static void get_mac(int base, unsigned char *mac_addr) ++{ ++ int val; ++ ++ //printk("+get_mac\n"); ++ ++ val = inl(base); ++ mac_addr[0] = (val>>8)&0xff; ++ mac_addr[1] = val&0xff; ++ val = inl(base+4); //john add +4 ++ mac_addr[2] = (val>>24)&0xff; ++ mac_addr[3] = (val>>16)&0xff; ++ mac_addr[4] = (val>>8)&0xff; ++ mac_addr[5] = val&0xff; ++} ++ ++static void auto_get_mac(int id,unsigned char *mac_addr) ++{ ++ get_mac(MAC_FTMAC100_0_VA_BASE + MAC_MADR_REG, mac_addr); ++ ++ if (memcmp(mac_addr, "\0\0\0\0\0\0", 6) == 0) { ++ mac_addr[0]=0; ++ mac_addr[1]=0x23; ++ mac_addr[2]=0x96; ++ mac_addr[3]=0x00; ++ mac_addr[4]=0xff; ++ mac_addr[5]=0x00; ++ } + } + +-static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr) ++/* ++ * Print the Ethernet address ++ */ ++static void ft_print_mac(unsigned char *mac_addr) + { +- iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR); ++ printk("ADDR: %pM\n", mac_addr); + } + +-static void ftmac100_txdma_start_polling(struct ftmac100 *priv) +-{ +- iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD); +-} + +-static int ftmac100_reset(struct ftmac100 *priv) ++#ifdef HAVE_MULTICAST ++/* ++ * Finds the CRC32 of a set of bytes. ++ * Again, from Peter Cammaert's code. ++ */ ++static int crc32( char * s, int length ) + { +- struct net_device *netdev = priv->netdev; +- int i; +- +- /* NOTE: reset clears all registers */ +- iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR); +- +- for (i = 0; i < 5; i++) { +- unsigned int maccr; +- +- maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR); +- if (!(maccr & FTMAC100_MACCR_SW_RST)) { +- /* +- * FTMAC100_MACCR_SW_RST cleared does not indicate +- * that hardware reset completed (what the f*ck). +- * We still need to wait for a while. +- */ +- udelay(500); +- return 0; ++ /* indices */ ++ int perByte; ++ int perBit; ++ /* crc polynomial for Ethernet */ ++ const unsigned long poly = 0xedb88320; ++ /* crc value - preinitialized to all 1's */ ++ unsigned long crc_value = 0xffffffff; ++ ++ //printk("+crc32\n"); ++ ++ for ( perByte = 0; perByte < length; perByte ++ ) { ++ unsigned char c; ++ ++ c = *(s++); ++ for ( perBit = 0; perBit < 8; perBit++ ) { ++ crc_value = (crc_value>>1)^ ++ (((crc_value^c)&0x01)?poly:0); ++ c >>= 1; + } +- +- udelay(1000); + } +- +- netdev_err(netdev, "software reset failed\n"); +- return -EIO; +-} +- +-static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac) +-{ +- unsigned int maddr = mac[0] << 8 | mac[1]; +- unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; +- +- iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR); +- iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR); ++ return crc_value; + } ++#endif + +-#define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \ +- FTMAC100_MACCR_RCV_EN | \ +- FTMAC100_MACCR_XDMA_EN | \ +- FTMAC100_MACCR_RDMA_EN | \ +- FTMAC100_MACCR_CRC_APD | \ +- FTMAC100_MACCR_FULLDUP | \ +- FTMAC100_MACCR_RX_RUNT | \ +- FTMAC100_MACCR_RX_BROADPKT) +- +-static int ftmac100_start_hw(struct ftmac100 *priv) ++static int ftmac100_reset( struct net_device* dev ) + { +- struct net_device *netdev = priv->netdev; ++ unsigned ioaddr = dev->base_addr; ++ int rcount; + +- if (ftmac100_reset(priv)) +- return -EIO; ++ PRINTK("+ftmac100_reset:I/O addr=%X\n", ioaddr); + +- /* setup ring buffer base registers */ +- ftmac100_set_rx_ring_base(priv, +- priv->descs_dma_addr + +- offsetof(struct ftmac100_descs, rxdes)); +- ftmac100_set_tx_ring_base(priv, +- priv->descs_dma_addr + +- offsetof(struct ftmac100_descs, txdes)); ++ outl( SW_RST_bit, ioaddr + MACCR_REG ); + +- iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC); ++ /* this should pause enough for the chip to be happy */ ++ for (rcount = 0; (inl( ioaddr + MACCR_REG ) & SW_RST_bit) != 0; rcount++) { ++ //mdelay(10); ++ msleep_interruptible(10); ++ if (rcount > 5) // Retry 5 times ++ return -ENODEV; ++ } + +- ftmac100_set_mac(priv, netdev->dev_addr); ++ outl( 0, ioaddr + IMR_REG ); /* Disable all interrupts */ ++ if (inl(ioaddr+IMR_REG)!=0) ++ return -ENODEV; + +- iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR); +- return 0; ++ return 0; + } + +-static void ftmac100_stop_hw(struct ftmac100 *priv) +-{ +- iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR); +-} + +-/****************************************************************************** +- * internal functions (receive descriptor) +- *****************************************************************************/ +-static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes) +-{ +- return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS); +-} +- +-static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes) ++/* ++ . Function: ftmac100_enable ++ . Purpose: let the chip talk to the outside work ++ . Method: ++ . 1. Enable the transmitter ++ . 2. Enable the receiver ++ . 3. Enable interrupts ++*/ ++static void ftmac100_enable( struct net_device *dev ) + { +- return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS); +-} ++ unsigned int ioaddr = dev->base_addr; ++ int i; ++ struct ftmac100_local *priv = (struct ftmac100_local *)netdev_priv(dev); ++ char mac_addr[6]; + +-static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes) +-{ +- return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN); +-} ++ //printk("+ftmac100_enable\n"); ++ PRINTK("%s:ftmac100_enable ioaddr=%X\n", dev->name, ioaddr); + +-static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes) +-{ +- /* clear status bits */ +- rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN); ++ for (i=0; irx_descs[i].RXDMA_OWN = OWNBY_FTMAC100; // owned by FTMAC100 ++ } ++ priv->rx_idx = 0; ++ ++ for (i=0; itx_descs[i].TXDMA_OWN = OWNBY_SOFTWARE; // owned by software ++ } ++ priv->tx_idx = 0; ++ ++ ++ /* set the MAC address */ ++ put_mac(ioaddr + MAC_MADR_REG, dev->dev_addr); ++ ++ //john add ++ get_mac(ioaddr + MAC_MADR_REG, mac_addr); ++ ft_print_mac(mac_addr); ++ ++ outl( priv->rx_descs_dma, ioaddr + RXR_BADR_REG); ++ outl( priv->tx_descs_dma, ioaddr + TXR_BADR_REG); ++ outl( 0x00001010, ioaddr + ITC_REG); // TODO: threshold too small ++ outl( (0UL<maccr_val, ioaddr + MACCR_REG ); ++ PRINTK("%s:ftmac100_enable DONE\n", dev->name); + } + +-static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes) ++/* ++ . Function: ftmac100_shutdown ++ . Purpose: closes down the SMC91xxx chip. ++ . Method: ++ . 1. zero the interrupt mask ++ . 2. clear the enable receive flag ++ . 3. clear the enable xmit flags ++ . ++ . TODO: ++ . (1) maybe utilize power down mode. ++ . Why not yet? Because while the chip will go into power down mode, ++ . the manual says that it will wake up in response to any I/O requests ++ . in the register space. Empirical results do not show this working. ++*/ ++static void ftmac100_shutdown( unsigned int ioaddr ) + { +- return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR); +-} ++ //printk("+ftmac100_shutdown\n"); + +-static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes) +-{ +- return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR); ++ outl( 0, ioaddr + IMR_REG ); ++ outl( 0, ioaddr + MACCR_REG ); + } + +-static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes) ++/* ++ * Function: ftmac100_wait_to_send_packet( struct sk_buff * skb, struct device * ) ++ * Purpose: ++ * Attempt to allocate memory for a packet, if chip-memory is not ++ * available, then tell the card to generate an interrupt when it ++ * is available* ++ * ++ * Algorithm: ++ * ++ * o if the saved_skb is not currently null, then drop this packet ++ * on the floor. This should never happen, because of TBUSY. ++ * o if the saved_skb is null, then replace it with the current packet, ++ * o See if I can sending it now. ++ * o (NO): Enable interrupts and let the interrupt handler deal with it. ++ * o (YES):Send it now. ++ */ ++static int ftmac100_wait_to_send_packet( struct sk_buff * skb, struct net_device * dev ) + { +- return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL); +-} ++ struct ftmac100_local *priv=(struct ftmac100_local *)netdev_priv(dev); ++ unsigned int ioaddr=dev->base_addr; ++ volatile TX_DESC *cur_desc; ++ int length; ++ unsigned long flags; ++ ++ //printk("+ftmac100_wait_to_send_packet\n"); ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ if (skb==NULL) { ++ printk("%s(%d): NULL skb???\n", __FILE__,__LINE__); ++ spin_unlock_irqrestore(&priv->lock, flags); ++ return 0; ++ } ++ ++ cur_desc = &priv->tx_descs[priv->tx_idx]; ++#if ZERO_COPY ++ /* Record buffer address to be freed later */ ++ priv->tx_skbuff[priv->tx_idx] = skb; ++#endif ++ ++#ifdef not_complete_yet ++ if (cur_desc->TXDMA_OWN != TX_OWNBY_SOFTWARE) /// no empty transmit descriptor ++ { ++ DO_PRINT("no empty transmit descriptor\n"); ++ DO_PRINT("jiffies = %d\n", jiffies); ++ lp->stats.tx_dropped++; ++ netif_stop_queue(dev); /// waiting to do: ++ spin_unlock_irqrestore(&lp->lock, flags); ++ ++ return 1; ++ } ++#endif /* end_of_not */ ++ ++ for (; cur_desc->TXDMA_OWN != OWNBY_SOFTWARE; ) { ++ PRINTK( KERN_WARNING "NO empty TX\n"); //printk("no empty TX descriptor:0x%x:0x%x\n",(unsigned)cur_desc,(unsigned)cur_desc[0]); ++ udelay(1); ++ } ++ length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; ++ length = min(length, TX_BUF_SIZE); // truncate jumbo packets ++ ++#if FTMAC100_DEBUG > 2 ++ printk("Transmitting Packet at 0x%x\n",(unsigned int)cur_desc->VIR_TXBUF_BADR); ++ print_packet( skb->data, length ); ++#endif + +-static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes) +-{ +- return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT); +-} ++#if ZERO_COPY ++ cur_desc->VIR_TXBUF_BADR = (unsigned)skb->data; ++ cur_desc->TXBUF_BADR = virt_to_phys(skb->data); ++ cpu_dma_wb_range((unsigned)skb->data, ((unsigned)(skb->data) + length + CACHE_LINE_SIZE( DCACHE) - 1 )&(~(CACHE_LINE_SIZE( DCACHE)-1)) ); + +-static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes) +-{ +- return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB); +-} ++#else ++ memcpy((char *)cur_desc->VIR_TXBUF_BADR, skb->data, length); ++#endif + +-static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes) +-{ +- return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL; ++ cur_desc->TXBUF_Size = length; ++ cur_desc->LTS = 1; ++ cur_desc->FTS = 1; ++ ++ cur_desc->TX2FIC = 0; ++ cur_desc->TXIC = 0; ++ ++ cur_desc->TXDMA_OWN = OWNBY_FTMAC100; ++ ++ outl( 0xffffffff, ioaddr + TXPD_REG ); ++ ++ priv->tx_idx = (priv->tx_idx + 1) & (TXDES_NUM-1); ++ priv->stats.tx_packets++; ++ priv->stats.tx_bytes += skb->len; ++#if !ZERO_COPY ++ dev_kfree_skb_any (skb); ++#endif ++ dev->trans_start = jiffies; ++ spin_unlock_irqrestore(&priv->lock, flags); ++ return 0; + } + +-static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes) +-{ +- return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST); +-} ++//#define dma_allocate(x,y,z,w) dma_alloc_coherent((x),(y),(dma_addr_t*)(z),(w)) ++#define dma_allocate(x,y,z,w) dma_alloc_writecombine((x),(y),(dma_addr_t*)(z),(w)) + +-static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes, +- unsigned int size) ++static int ftmac100_ringbuf_alloc(struct net_device *dev,struct ftmac100_local *priv) + { +- rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR); +- rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size)); +-} ++ int i; + +-static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes) +-{ +- rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR); +-} ++ //printk("+ftmac100_ringbuf_alloc\n"); + +-static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes, +- dma_addr_t addr) +-{ +- rxdes->rxdes2 = cpu_to_le32(addr); +-} ++ priv->rx_descs = dma_allocate( NULL, sizeof(RX_DESC)*RXDES_NUM, &(priv->rx_descs_dma), GFP_KERNEL ); ++ if (priv->rx_descs == NULL || (( (u32)priv->rx_descs & 0xf)!=0)) { ++ printk("Receive Ring Buffer allocation error\n"); ++ return -ENOMEM; ++ } ++#if FTMAC100_DEBUG > 2 ++ else ++ printk( KERN_INFO "* Allocated RX descs=%X, bus addr=%X, size=%d*%d=%d\n", ++ (unsigned)priv->rx_descs, (unsigned)priv->rx_descs_dma, ++ sizeof(RX_DESC),RXDES_NUM,sizeof(RX_DESC)*RXDES_NUM ); ++#endif ++ memset((unsigned int *)priv->rx_descs, 0, sizeof(RX_DESC)*RXDES_NUM); + +-static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes) +-{ +- return le32_to_cpu(rxdes->rxdes2); ++ priv->rx_buf = dma_allocate( NULL, RX_BUF_SIZE*RXDES_NUM, &(priv->rx_buf_dma), GFP_KERNEL ); ++ if (priv->rx_buf == NULL || (( (u32)priv->rx_buf & 3)!=0)) { ++ printk("Receive Ring Buffer allocation error\n"); ++ return -ENOMEM; ++ } ++#if FTMAC100_DEBUG > 2 ++ else ++ printk( KERN_INFO "* Allocated RX buf=%X, bus addr=%X, size=%d*%d=%d\n", ++ (unsigned)priv->rx_buf, (unsigned)priv->rx_buf_dma, ++ RX_BUF_SIZE, RXDES_NUM, RX_BUF_SIZE*RXDES_NUM ); ++#endif ++ ++ memset((void *)priv->rx_buf,0,sizeof(RX_BUF_SIZE)*RXDES_NUM); ++ ++ for (i=0; irx_descs[i].RXBUF_Size = RX_BUF_SIZE; ++ priv->rx_descs[i].EDOTR = 0; // not last descriptor ++ priv->rx_descs[i].RXBUF_BADR = priv->rx_buf_dma+RX_BUF_SIZE*i; ++ priv->rx_descs[i].VIR_RXBUF_BADR=(unsigned int)priv->rx_buf+RX_BUF_SIZE*i; ++ } ++ priv->rx_descs[RXDES_NUM-1].EDOTR = 1; // is last descriptor ++ ++ priv->tx_descs = dma_allocate( NULL, sizeof(TX_DESC)*TXDES_NUM, &(priv->tx_descs_dma), GFP_KERNEL ); ++ if (priv->tx_descs == NULL || (( (u32)priv->tx_descs & 0xf)!=0)) { ++ printk("Transmit Ring Buffer allocation error\n"); ++ return -ENOMEM; ++ } ++#if FTMAC100_DEBUG > 2 ++ else ++ printk( KERN_INFO "* Allocated TX descs=%X, bus addr=%X, size=%d*%d=%d\n", ++ (unsigned)priv->tx_descs, (unsigned)priv->tx_descs_dma, sizeof(TX_DESC),TXDES_NUM,sizeof(TX_DESC)*TXDES_NUM); ++#endif ++ memset((void *)priv->tx_descs,0,sizeof(TX_DESC)*TXDES_NUM); ++ ++ priv->tx_buf = dma_allocate( NULL, TX_BUF_SIZE*TXDES_NUM, &(priv->tx_buf_dma), GFP_KERNEL ); ++ if (priv->tx_buf == NULL || (( (u32)priv->tx_buf & 0x3)!=0)) { ++ printk("Transmit Ring Buffer allocation error\n"); ++ return -ENOMEM; ++ } ++#if FTMAC100_DEBUG > 2 ++ else ++ printk( KERN_INFO "* Allocated TX buf=%X, bus addr=%X, size=%d*%d=%d\n", ++ (unsigned)priv->tx_buf, (unsigned)priv->tx_buf_dma, ++ TX_BUF_SIZE, TXDES_NUM, TX_BUF_SIZE*TXDES_NUM ); ++#endif ++ ++ memset((void *)priv->tx_buf,0,sizeof(TX_BUF_SIZE)*TXDES_NUM); ++ ++ for (i=0; itx_descs[i].EDOTR = 0; // not last descriptor ++ priv->tx_descs[i].TXBUF_BADR=priv->tx_buf_dma+TX_BUF_SIZE*i; ++ priv->tx_descs[i].VIR_TXBUF_BADR=(unsigned int)priv->tx_buf+TX_BUF_SIZE*i; ++ } ++ priv->tx_descs[TXDES_NUM-1].EDOTR = 1; // is last descriptor ++ return 0; + } + + /* +- * rxdes3 is not used by hardware. We use it to keep track of page. +- * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). ++ * Function: ftmac100_poll( struct net_device *dev ) ++ * ++ * Purpose: ++ * poll interface callback function. + */ +-static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page) ++void ftmac100_poll(struct net_device *dev) + { +- rxdes->rxdes3 = (unsigned int)page; ++ disable_irq(dev->irq); ++ ftmac100_interrupt(dev->irq, dev); ++ enable_irq(dev->irq); + } + +-static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes) +-{ +- return (struct page *)rxdes->rxdes3; +-} +- +-/****************************************************************************** +- * internal functions (receive) +- *****************************************************************************/ +-static int ftmac100_next_rx_pointer(int pointer) ++/* ++ * Function: ftmac100_setup( struct net_device *dev ) ++ * ++ * Purpose: ++ * Tests to see if the device 'dev' points to an ftmac100 chip. ++ * Returns a 0 on success ++ */ ++static const struct net_device_ops ftmac100_ops = { ++ .ndo_init = ftmac100_setup, ++ .ndo_open = ftmac100_open, ++ .ndo_stop = ftmac100_close, ++ .ndo_start_xmit = ftmac100_wait_to_send_packet, ++ .ndo_get_stats = ftmac100_query_statistics, ++ .ndo_tx_timeout = ftmac100_timeout, ++#ifdef HAVE_MULTICAST ++ .ndo_set_multicast_list = ftmac100_set_multicast_list, ++#endif ++ .ndo_set_mac_address = eth_mac_addr, ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ .ndo_poll_controller = ftmac100_poll, ++#endif ++}; ++static int ftmac100_setup(struct net_device *dev) + { +- return (pointer + 1) & (RX_QUEUE_ENTRIES - 1); +-} ++ int retval; ++ static unsigned version_printed = 0; ++ struct ftmac100_local *priv; ++ ++ if (version_printed++ == 0) ++ printk(KERN_INFO "%s", version); ++ ++ /* Now, print out the card info, in a short format.. */ ++ printk(KERN_INFO "%s: device at %#3x IRQ:%d NOWAIT:%d\n",dev->name, ++ (unsigned)dev->base_addr, dev->irq, dev->dma); ++ ++ /* Initialize priviate data */ ++ priv = (struct ftmac100_local *)netdev_priv(dev); ++ memset(priv, 0, sizeof(struct ftmac100_local)); ++ spin_lock_init(&priv->lock); ++ priv->maccr_val = FULLDUP_bit ++ | CRC_APD_bit ++ | MDC_SEL_bit ++ | RCV_EN_bit ++ | XMT_EN_bit ++ | RDMA_EN_bit ++ | XDMA_EN_bit ; ++ retval = ftmac100_ringbuf_alloc(dev,priv); ++ if (retval) ++ goto err_out; ++ ++ /* now, reset the chip, and put it into a known state */ ++ retval = ftmac100_reset( dev ); ++ if (retval) { ++ printk( "%s: unable to reset.\n", dev->name ); ++ goto err_out; ++ } ++ ++ /* Fill in the fields of the device structure with ethernet values. */ ++ ether_setup(dev); ++ ++ /* Grab the IRQ */ ++ retval = request_irq(dev->irq, &ftmac100_interrupt, IRQF_DISABLED, dev->name, dev); ++ if (retval) { ++ printk("%s: unable to get IRQ %d (irqval=%d).\n", dev->name, dev->irq, retval); ++ goto err_out; ++ } ++ ++#if 0 ++ if ((proc_ftmac100 = create_proc_entry( "ftmac100", 0, 0 ))) { ++ proc_ftmac100->read_proc = ftmac100_read_proc; ++ proc_ftmac100->data = dev; ++ proc_ftmac100->owner = THIS_MODULE; ++ } ++#endif + +-static void ftmac100_rx_pointer_advance(struct ftmac100 *priv) +-{ +- priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer); +-} ++ return 0; + +-static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv) +-{ +- return &priv->descs->rxdes[priv->rx_pointer]; ++err_out: ++ dma_free_coherent( NULL, sizeof(RX_DESC)*RXDES_NUM, (void*)priv->rx_descs, (dma_addr_t)priv->rx_descs_dma ); ++ dma_free_coherent( NULL, RX_BUF_SIZE*RXDES_NUM, (void*)priv->rx_buf, (dma_addr_t)priv->rx_buf_dma ); ++ dma_free_coherent( NULL, sizeof(TX_DESC)*TXDES_NUM, (void*)priv->tx_descs, (dma_addr_t)priv->tx_descs_dma ); ++ dma_free_coherent( NULL, TX_BUF_SIZE*TXDES_NUM, (void*)priv->tx_buf, (dma_addr_t)priv->tx_buf_dma ); ++ priv->rx_descs = NULL; priv->rx_descs_dma = 0; ++ priv->rx_buf = NULL; priv->rx_buf_dma = 0; ++ priv->tx_descs = NULL; priv->tx_descs_dma = 0; ++ priv->tx_buf = NULL; priv->tx_buf_dma = 0; ++ return retval; + } + +-static struct ftmac100_rxdes * +-ftmac100_rx_locate_first_segment(struct ftmac100 *priv) +-{ +- struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv); + +- while (!ftmac100_rxdes_owned_by_dma(rxdes)) { +- if (ftmac100_rxdes_first_segment(rxdes)) +- return rxdes; ++#if FTMAC100_DEBUG > 2 ++static void print_packet( unsigned char * buf, int length ) ++{ ++#if FTMAC100_DEBUG > 3 ++ int i; ++ int remainder; ++ int lines; ++#endif + +- ftmac100_rxdes_set_dma_own(rxdes); +- ftmac100_rx_pointer_advance(priv); +- rxdes = ftmac100_current_rxdes(priv); +- } ++// printk("Packet of length %d \n", length ); + +- return NULL; ++#if FTMAC100_DEBUG > 3 ++ lines = length >> 4; ++ remainder = length & 15; ++ ++ for ( i = 0; i < lines ; i ++ ) { ++ int cur; ++ for ( cur = 0; cur < 8; cur ++ ) { ++ unsigned char a, b; ++ a = *(buf ++ ); ++ b = *(buf ++ ); ++ printk("%02x%02x ", a, b ); ++ } ++ printk("\n"); ++ } ++ for ( i = 0; i < remainder/2 ; i++ ) { ++ unsigned char a, b; ++ ++ a = *(buf ++ ); ++ b = *(buf ++ ); ++ printk("%02x%02x ", a, b ); ++ } ++ printk("\n"); ++#endif + } ++#endif + +-static bool ftmac100_rx_packet_error(struct ftmac100 *priv, +- struct ftmac100_rxdes *rxdes) +-{ +- struct net_device *netdev = priv->netdev; +- bool error = false; +- +- if (unlikely(ftmac100_rxdes_rx_error(rxdes))) { +- if (net_ratelimit()) +- netdev_info(netdev, "rx err\n"); +- +- netdev->stats.rx_errors++; +- error = true; +- } +- +- if (unlikely(ftmac100_rxdes_crc_error(rxdes))) { +- if (net_ratelimit()) +- netdev_info(netdev, "rx crc err\n"); +- +- netdev->stats.rx_crc_errors++; +- error = true; +- } +- +- if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) { +- if (net_ratelimit()) +- netdev_info(netdev, "rx frame too long\n"); +- +- netdev->stats.rx_length_errors++; +- error = true; +- } else if (unlikely(ftmac100_rxdes_runt(rxdes))) { +- if (net_ratelimit()) +- netdev_info(netdev, "rx runt\n"); +- +- netdev->stats.rx_length_errors++; +- error = true; +- } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) { +- if (net_ratelimit()) +- netdev_info(netdev, "rx odd nibble\n"); +- +- netdev->stats.rx_length_errors++; +- error = true; +- } +- +- return error; +-} + +-static void ftmac100_rx_drop_packet(struct ftmac100 *priv) ++/* ++ * Open and Initialize the board ++ * ++ * Set up everything, reset the card, etc .. ++ * ++ */ ++static int ftmac100_open(struct net_device *dev) + { +- struct net_device *netdev = priv->netdev; +- struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv); +- bool done = false; +- +- if (net_ratelimit()) +- netdev_dbg(netdev, "drop packet %p\n", rxdes); ++ int retval = 0; ++ PRINTK("+%s:ftmac100_open\n", dev->name); + +- do { +- if (ftmac100_rxdes_last_segment(rxdes)) +- done = true; ++ //netif_start_queue(dev); + +- ftmac100_rxdes_set_dma_own(rxdes); +- ftmac100_rx_pointer_advance(priv); +- rxdes = ftmac100_current_rxdes(priv); +- } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes)); +- +- netdev->stats.rx_dropped++; ++ /* reset the hardware */ ++ ftmac100_reset( dev ); ++ retval = ftmac100_reset( dev ); ++ if (retval) { ++ printk( "%s: unable to reset.\n", dev->name ); ++ retval = -ENODEV; ++ } else { ++ ftmac100_enable( dev ); ++ ++ /* Configure the PHY */ ++ ftmac100_phy_configure(dev); ++ ++ netif_start_queue(dev); ++ ++ PRINTK("+%s:ftmac100_open DONE\n", dev->name); ++ } ++ ++ return retval; + } + +-static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed) +-{ +- struct net_device *netdev = priv->netdev; +- struct ftmac100_rxdes *rxdes; +- struct sk_buff *skb; +- struct page *page; +- dma_addr_t map; +- int length; + +- rxdes = ftmac100_rx_locate_first_segment(priv); +- if (!rxdes) +- return false; +- +- if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) { +- ftmac100_rx_drop_packet(priv); +- return true; +- } +- +- /* +- * It is impossible to get multi-segment packets +- * because we always provide big enough receive buffers. +- */ +- if (unlikely(!ftmac100_rxdes_last_segment(rxdes))) +- BUG(); +- +- /* start processing */ +- skb = netdev_alloc_skb_ip_align(netdev, 128); +- if (unlikely(!skb)) { +- if (net_ratelimit()) +- netdev_err(netdev, "rx skb alloc failed\n"); +- +- ftmac100_rx_drop_packet(priv); +- return true; +- } +- +- if (unlikely(ftmac100_rxdes_multicast(rxdes))) +- netdev->stats.multicast++; +- +- map = ftmac100_rxdes_get_dma_addr(rxdes); +- dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); +- +- length = ftmac100_rxdes_frame_length(rxdes); +- page = ftmac100_rxdes_get_page(rxdes); +- skb_fill_page_desc(skb, 0, page, 0, length); +- skb->len += length; +- skb->data_len += length; +- +- /* page might be freed in __pskb_pull_tail() */ +- if (length > 64) +- skb->truesize += PAGE_SIZE; +- __pskb_pull_tail(skb, min(length, 64)); +- +- ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC); +- +- ftmac100_rx_pointer_advance(priv); +- +- skb->protocol = eth_type_trans(skb, netdev); +- +- netdev->stats.rx_packets++; +- netdev->stats.rx_bytes += skb->len; ++/* ++ * Called by the kernel to send a packet out into the void ++ * of the net. This routine is largely based on ++ * skeleton.c, from Becker. ++ * ++ */ ++static void ftmac100_timeout (struct net_device *dev) ++{ ++ /* If we get here, some higher level has decided we are broken. ++ There should really be a "kick me" function call instead. */ ++ printk(KERN_WARNING "%s: transmit timed out?\n",dev->name); + +- /* push packet to protocol stack */ +- netif_receive_skb(skb); ++ //printk("+ftmac100_timeout\n"); + +- (*processed)++; +- return true; ++ ftmac100_reset( dev ); ++ ftmac100_enable( dev ); ++ ftmac100_phy_configure(dev); ++ netif_wake_queue(dev); ++ dev->trans_start = jiffies; + } + +-/****************************************************************************** +- * internal functions (transmit descriptor) +- *****************************************************************************/ +-static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes) ++#if ZERO_COPY ++/* ++ * Free transmitted skb buffer when it's safe. ++ */ ++static void ftmac100_free_tx (struct net_device *dev, int irq) + { +- /* clear all except end of ring bit */ +- txdes->txdes0 = 0; +- txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR); +- txdes->txdes2 = 0; +- txdes->txdes3 = 0; +-} ++ struct ftmac100_local *priv = (struct ftmac100_local *)netdev_priv(dev); ++ //volatile TX_DESC *cur_desc; ++ int entry = priv->old_tx & (TXDES_NUM-1); ++ ++ //enter spinlock ++ if (!irq) ++ spin_lock(&priv->lock); ++ ++ /* Free used tx skbuffs */ ++ while (entry != priv->tx_idx) { ++ struct sk_buff *skb; ++ ++ skb = priv->tx_skbuff[entry]; ++ if(skb) { ++ dev_kfree_skb_any (skb); ++ priv->tx_skbuff[entry] = 0; ++ } + +-static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes) +-{ +- return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN); +-} ++ entry = (entry + 1) & (TXDES_NUM-1); ++ } + +-static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes) +-{ +- /* +- * Make sure dma own bit will not be set before any other +- * descriptor fields. +- */ +- wmb(); +- txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN); +-} ++ if (!irq) ++ spin_unlock(&priv->lock); ++ //exit spinloc + +-static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes) +-{ +- return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL); ++ priv->old_tx = entry; ++ netif_wake_queue (dev); + } ++#endif + +-static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes) +-{ +- return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL); +-} ++/* ++ . ++ . This is the main routine of the driver, to handle the net_device when ++ . it needs some attention. ++ . ++ . So: ++ . first, save state of the chipset ++ . branch off into routines to handle each case, and acknowledge ++ . each to the interrupt register ++ . and finally restore state. ++ . ++ */ + +-static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes) +-{ +- txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR); +-} ++#if FTMAC100_DEBUG > 2 + +-static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes) ++static void dump_intc(void) + { +- txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS); ++ printk( " INTC[0] IRQSRC=%08X,MASK=%08X,MOD=%08X,LEV=%08X,STAT=%08X\n", ++ *((volatile unsigned*)(INTC_FTINTC010_VA_BASE+0)), ++ *((volatile unsigned*)(INTC_FTINTC010_VA_BASE+0x4)), ++ *((volatile unsigned*)(INTC_FTINTC010_VA_BASE+0xc)), ++ *((volatile unsigned*)(INTC_FTINTC010_VA_BASE+0x10)), ++ *((volatile unsigned*)(INTC_FTINTC010_VA_BASE+0x14)) ); ++ printk( " INTC[1] IRQSRC=%08X,MASK=%08X,MOD=%08X,LEV=%08X,STAT=%08X\n", ++ *((volatile unsigned*)(INTC_FTINTC010_1_VA_BASE+0)), ++ *((volatile unsigned*)(INTC_FTINTC010_1_VA_BASE+0x4)), ++ *((volatile unsigned*)(INTC_FTINTC010_1_VA_BASE+0xc)), ++ *((volatile unsigned*)(INTC_FTINTC010_1_VA_BASE+0x10)), ++ *((volatile unsigned*)(INTC_FTINTC010_1_VA_BASE+0x14)) ); + } ++#else ++#define dump_intc() ++#endif + +-static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes) ++#if FTMAC100_DEBUG > 2 ++static void show_intstatus(unsigned char status) + { +- txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS); ++ static int count = 0; ++ if (status & PHYSTS_CHG_bit) ++ printk( "[%d]%s ", count, "PHYSTS_CHG" ); ++ if (status & AHB_ERR_bit) ++ printk( "[%d]%s ", count, "AHB_ERR" ); ++ if (status & RPKT_LOST_bit) ++ printk( "[%d]%s ", count, "RPKT_LOST" ); ++ if (status & RPKT_SAV_bit) ++ printk( "[%d]%s ", count, "RPKT_SAV" ); ++ if (status & XPKT_LOST_bit) ++ printk( "[%d]%s ", count, "XPKT_LOST" ); ++ if (status & XPKT_OK_bit) ++ printk( "[%d]%s ", count, "XPKT_OK" ); ++ if (status & NOTXBUF_bit) ++ printk( "[%d]%s ", count, "NOTXBUF" ); ++ if (status & XPKT_FINISH_bit) ++ printk( "[%d]%s ", count, "XPKT_FINISH" ); ++ if (status & NORXBUF_bit) ++ printk( "[%d]%s ", count, "NORXBUF" ); ++ if (status & RPKT_FINISH_bit) ++ printk( "[%d]%s ", count, "RPKT_FINISH" ); ++ if (status & ~(PHYSTS_CHG_bit | AHB_ERR_bit | RPKT_LOST_bit | RPKT_SAV_bit ++ | XPKT_LOST_bit | XPKT_OK_bit | NOTXBUF_bit | XPKT_FINISH_bit ++ | NORXBUF_bit | RPKT_FINISH_bit)) ++ printk( "[%d]%s ", count, "" ); ++ count++; + } ++#else ++#define show_intstatus(x) ++#endif ++ + +-static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes) ++/* ++ * The interrupt handler ++ */ ++static irqreturn_t ftmac100_interrupt(int irq, void * dev_id) + { +- txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC); +-} ++ struct net_device *dev = dev_id; ++ unsigned int ioaddr = dev->base_addr; ++ unsigned status; // interrupt status ++ unsigned char mask; // interrupt mask ++ struct ftmac100_local *priv = (struct ftmac100_local *)netdev_priv(dev); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&priv->lock,flags); // Luke 08/18/2005 ins ++ PRINTK(KERN_INFO "+ftmac100_interrupt\n"); ++ dump_intc(); ++ ++ if (dev == NULL||priv == NULL) { ++ printk(KERN_WARNING "%s: irq %d for unknown device.\n", "ftmac100_interrupt", irq); ++ return IRQ_HANDLED; ++ } ++ ++ /* read the interrupt status register */ ++ mask = inl( ioaddr + IMR_REG ); ++ ++ /* read the status flag, and mask it */ ++ status = inl( ioaddr + ISR_REG ) & mask; ++ ++ show_intstatus(status); ++ ++ if ( status & RPKT_FINISH_bit ) ++ ftmac100_rcv(dev); ++ ++ if (status & NORXBUF_bit) { ++ //printk("<0x%x:NORXBUF>",status); ++ outl( mask & ~NORXBUF_bit, ioaddr + IMR_REG); ++ trans_busy = 1; ++ ++#if ENABLE_BOTTOM_HALF ++ priv->rcv_tq.sync = 0; ++ priv->rcv_tq.routine=ftmac100_rcv; ++ priv->rcv_tq.data = dev; ++ queue_task(&priv->rcv_tq, &tq_timer); ++ //queue_task(&priv->rcv_tq, &tq_immediate); ++#else ++ ftmac100_rcv( dev ); ++#endif ++ } ++ ++ if (status & AHB_ERR_bit) ++ printk("<0x%x:AHB_ERR>",status); ++ ++ if (status & XPKT_FINISH_bit) ++ printk( "[XPKT_FINISH]" ); ++ ++ /* ++ if (status & PHYSTS_CHG_bit) { ++ } ++ */ ++ if (status & XPKT_OK_bit) { ++#if ZERO_COPY ++ ftmac100_free_tx(dev,1); ++#endif ++ } ++ /* ++ if (status & NOTXBUF_bit) { ++ } ++ */ ++ ++ if (status & RPKT_LOST_bit) ++ priv->stats.rx_errors++; ++ ++ if (status & XPKT_LOST_bit) { ++#if ZERO_COPY ++ ftmac100_free_tx(dev,1); ++#endif ++ priv->stats.tx_errors++; ++ } ++ ++ PRINTK(KERN_INFO "+ftmac100_interrupt DONE\n"); ++ dump_intc(); ++ PRINTK(KERN_INFO "\n"); ++ spin_unlock_irqrestore(&priv->lock,flags); // Luke 08/18/2005 ins + +-static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes, +- unsigned int len) +-{ +- txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len)); ++ return IRQ_HANDLED; + } + +-static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes, +- dma_addr_t addr) +-{ +- txdes->txdes2 = cpu_to_le32(addr); +-} + +-static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes) +-{ +- return le32_to_cpu(txdes->txdes2); +-} + + /* +- * txdes3 is not used by hardware. We use it to keep track of socket buffer. +- * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). ++ . ftmac100_rcv - receive a packet from the card ++ . ++ . There is ( at least ) a packet waiting to be read from ++ . chip-memory. ++ . ++ . o Read the status ++ . o If an error, record it ++ . o otherwise, read in the packet + */ +-static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb) +-{ +- txdes->txdes3 = (unsigned int)skb; +-} + +-static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes) ++static void ftmac100_rcv(void *devp) + { +- return (struct sk_buff *)txdes->txdes3; ++ struct net_device *dev=(struct net_device *)devp; ++ struct ftmac100_local *priv = (struct ftmac100_local *)netdev_priv(dev); ++ unsigned int ioaddr=dev->base_addr; ++ int packet_length; ++ int rcv_cnt; ++ volatile RX_DESC *cur_desc; ++ int cpy_length; ++ int cur_idx; ++ int seg_length; ++ int have_package; ++ int have_frs; ++ int start_idx; ++ ++ struct sk_buff * skb; ++ unsigned char * data; ++ ++ PRINTK("+ ftmac100_rcv\n"); ++ ++ start_idx = priv->rx_idx; ++ ++ for (rcv_cnt=0; rcv_cnt<8 ; ++rcv_cnt) { ++ packet_length = 0; ++ cur_idx = priv->rx_idx; ++ ++ have_package = 0; ++ have_frs = 0; ++ for (; (cur_desc = &priv->rx_descs[priv->rx_idx])->RXDMA_OWN==0; ) { ++ have_package = 1; ++ priv->rx_idx = (priv->rx_idx+1) & (RXDES_NUM-1); ++ if (cur_desc->FRS) { ++ have_frs = 1; ++ if (cur_desc->RX_ERR || cur_desc->CRC_ERR || cur_desc->FTL ++ || cur_desc->RUNT || cur_desc->RX_ODD_NB) { ++ priv->stats.rx_errors++; // error frame.... ++ break; ++ } ++ if (cur_desc->MULTICAST) ++ priv->stats.multicast++; ++ packet_length = cur_desc->ReceiveFrameLength; // normal frame ++ } ++ if ( cur_desc->LRS ) // packet's last frame ++ break; ++ } ++ if (have_package==0) ++ goto done; ++ if (have_frs == 0) ++ priv->stats.rx_over_errors++; ++ ++ if (packet_length>0) { ++ // Allocate enough memory for entire receive frame, to be safe ++ skb = dev_alloc_skb( packet_length + 2 ); ++ ++ if ( skb == NULL ) { ++ printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", dev->name); ++ priv->stats.rx_dropped++; ++ goto done; ++ } ++ ++ skb_reserve( skb, 2 ); /* 16 bit alignment */ ++ skb->dev = dev; ++ ++ data = skb_put( skb, packet_length ); ++ cpy_length = 0; ++ for (; cur_idx!=priv->rx_idx; cur_idx = (cur_idx+1) & (RXDES_NUM-1)) { ++ seg_length = min(packet_length - cpy_length, RX_BUF_SIZE); ++ memcpy(data+cpy_length, (char *)priv->rx_descs[cur_idx].VIR_RXBUF_BADR, seg_length); ++ cpy_length += seg_length; ++ } ++ ++ skb->protocol = eth_type_trans(skb, dev); ++ netif_rx(skb); ++ dev->last_rx = jiffies; ++ priv->stats.rx_packets++; ++ priv->stats.rx_bytes += skb->len; ++ } ++ } ++ ++done: ++ if (start_idx != priv->rx_idx) { ++ for (cur_idx = (start_idx+1)%RXDES_NUM; cur_idx != priv->rx_idx; cur_idx = (cur_idx+1)%RXDES_NUM) { ++ priv->rx_descs[cur_idx].RXDMA_OWN = 1; ++ } ++ priv->rx_descs[start_idx].RXDMA_OWN = 1; ++ } ++ if (trans_busy == 1) { ++ outl( priv->maccr_val, ioaddr + MACCR_REG ); ++ outl( inl(ioaddr + IMR_REG) | NORXBUF_bit, ioaddr + IMR_REG); ++ } ++ ++ PRINTK("+ ftmac100_rcv DONE\n"); ++ ++ return; + } + +-/****************************************************************************** +- * internal functions (transmit) +- *****************************************************************************/ +-static int ftmac100_next_tx_pointer(int pointer) ++/* ++ . ftmac100_close ++ . ++ . this makes the board clean up everything that it can ++ . and not talk to the outside world. Caused by ++ . an 'ifconfig ethX down' ++ . ++ */ ++static int ftmac100_close(struct net_device *dev) + { +- return (pointer + 1) & (TX_QUEUE_ENTRIES - 1); +-} ++ //printk("+ftmac100_close\n"); + +-static void ftmac100_tx_pointer_advance(struct ftmac100 *priv) +-{ +- priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer); ++ netif_stop_queue(dev); ++ ++ ftmac100_shutdown( dev->base_addr ); ++#if ZERO_COPY ++ ftmac100_free_tx(dev,0); ++#endif ++ return 0; + } + +-static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv) ++/* ++ . Get the current statistics. ++ . This may be called with the card open or closed. ++ */ ++static struct net_device_stats* ftmac100_query_statistics(struct net_device *dev) + { +- priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer); +-} ++ struct ftmac100_local *priv = (struct ftmac100_local *)netdev_priv(dev); + +-static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv) +-{ +- return &priv->descs->txdes[priv->tx_pointer]; +-} ++ PRINTK("+ftmac100_query_statistics\n"); + +-static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv) +-{ +- return &priv->descs->txdes[priv->tx_clean_pointer]; ++ return &priv->stats; + } + +-static bool ftmac100_tx_complete_packet(struct ftmac100 *priv) +-{ +- struct net_device *netdev = priv->netdev; +- struct ftmac100_txdes *txdes; +- struct sk_buff *skb; +- dma_addr_t map; +- +- if (priv->tx_pending == 0) +- return false; + +- txdes = ftmac100_current_clean_txdes(priv); +- +- if (ftmac100_txdes_owned_by_dma(txdes)) +- return false; +- +- skb = ftmac100_txdes_get_skb(txdes); +- map = ftmac100_txdes_get_dma_addr(txdes); +- +- if (unlikely(ftmac100_txdes_excessive_collision(txdes) || +- ftmac100_txdes_late_collision(txdes))) { +- /* +- * packet transmitted to ethernet lost due to late collision +- * or excessive collision +- */ +- netdev->stats.tx_aborted_errors++; +- } else { +- netdev->stats.tx_packets++; +- netdev->stats.tx_bytes += skb->len; +- } ++#ifdef HAVE_MULTICAST + +- dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); +- dev_kfree_skb(skb); +- +- ftmac100_txdes_reset(txdes); +- +- ftmac100_tx_clean_pointer_advance(priv); +- +- spin_lock(&priv->tx_lock); +- priv->tx_pending--; +- spin_unlock(&priv->tx_lock); +- netif_wake_queue(netdev); +- +- return true; +-} ++/* ++ . Function: ftmac100_setmulticast( unsigned int ioaddr, int count, dev_mc_list * adds ) ++ . Purpose: ++ . This sets the internal hardware table to filter out unwanted multicast ++ . packets before they take up memory. ++ */ + +-static void ftmac100_tx_complete(struct ftmac100 *priv) ++static void ftmac100_setmulticast( unsigned int ioaddr, int count, struct dev_mc_list * addrs ) + { +- while (ftmac100_tx_complete_packet(priv)) +- ; ++ struct dev_mc_list * cur_addr; ++ int crc_val; ++ ++ //printk("+ftmac100_setmulticast\n"); ++ ++ for (cur_addr = addrs ; cur_addr!=NULL ; cur_addr = cur_addr->next ) { ++ if ( !( *cur_addr->dmi_addr & 1 ) ) ++ continue; ++ crc_val = crc32( cur_addr->dmi_addr, 6 ); ++ crc_val = (crc_val>>26)&0x3f; // ¨ú MSB 6 bit ++ if (crc_val >= 32) ++ outl(inl(ioaddr+MAHT1_REG) | (1UL<<(crc_val-32)), ioaddr+MAHT1_REG); ++ else ++ outl(inl(ioaddr+MAHT0_REG) | (1UL<netdev; +- struct ftmac100_txdes *txdes; +- unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; +- +- txdes = ftmac100_current_txdes(priv); +- ftmac100_tx_pointer_advance(priv); +- +- /* setup TX descriptor */ +- ftmac100_txdes_set_skb(txdes, skb); +- ftmac100_txdes_set_dma_addr(txdes, map); +- +- ftmac100_txdes_set_first_segment(txdes); +- ftmac100_txdes_set_last_segment(txdes); +- ftmac100_txdes_set_txint(txdes); +- ftmac100_txdes_set_buffer_size(txdes, len); + +- spin_lock(&priv->tx_lock); +- priv->tx_pending++; +- if (priv->tx_pending == TX_QUEUE_ENTRIES) +- netif_stop_queue(netdev); +- +- /* start transmit */ +- ftmac100_txdes_set_dma_own(txdes); +- spin_unlock(&priv->tx_lock); ++/* ++ . ftmac100_set_multicast_list ++ . ++ . This routine will, depending on the values passed to it, ++ . either make it accept multicast packets, go into ++ . promiscuous mode ( for TCPDUMP and cousins ) or accept ++ . a select set of multicast packets ++*/ ++static void ftmac100_set_multicast_list(struct net_device *dev) ++{ ++ unsigned int ioaddr = dev->base_addr; ++ struct ftmac100_local *priv = (struct ftmac100_local *)netdev_priv(dev); ++ ++ //printk("+ftmac100_set_multicast_list\n"); ++ ++ if (dev->flags & IFF_PROMISC) ++ priv->maccr_val |= RCV_ALL_bit; ++ else ++ priv->maccr_val &= ~RCV_ALL_bit; ++ ++ if ( !(dev->flags & IFF_ALLMULTI) ) ++ priv->maccr_val |= RX_MULTIPKT_bit; ++ else ++ priv->maccr_val &= ~RX_MULTIPKT_bit; ++ ++ if (dev->mc_count) { ++ priv->maccr_val |= HT_MULTI_EN_bit; ++ ftmac100_setmulticast( ioaddr, dev->mc_count, dev->mc_list ); ++ } ++ else ++ priv->maccr_val &= ~HT_MULTI_EN_bit; + +- ftmac100_txdma_start_polling(priv); +- return NETDEV_TX_OK; ++ outl( priv->maccr_val, ioaddr + MACCR_REG ); + } ++#endif + +-/****************************************************************************** +- * internal functions (buffer) +- *****************************************************************************/ +-static int ftmac100_alloc_rx_page(struct ftmac100 *priv, +- struct ftmac100_rxdes *rxdes, gfp_t gfp) +-{ +- struct net_device *netdev = priv->netdev; +- struct page *page; +- dma_addr_t map; +- +- page = alloc_page(gfp); +- if (!page) { +- if (net_ratelimit()) +- netdev_err(netdev, "failed to allocate rx page\n"); +- return -ENOMEM; +- } +- +- map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE); +- if (unlikely(dma_mapping_error(priv->dev, map))) { +- if (net_ratelimit()) +- netdev_err(netdev, "failed to map rx page\n"); +- __free_page(page); +- return -ENOMEM; +- } +- +- ftmac100_rxdes_set_page(rxdes, page); +- ftmac100_rxdes_set_dma_addr(rxdes, map); +- ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE); +- ftmac100_rxdes_set_dma_own(rxdes); +- return 0; +-} ++/* ++ * Module initialization function ++ */ + +-static void ftmac100_free_buffers(struct ftmac100 *priv) ++static int ftmac100_probe(struct platform_device *pdev) + { +- int i; ++ int result,thisresult; ++ struct net_device *dev; ++ struct resource *iores; + +- for (i = 0; i < RX_QUEUE_ENTRIES; i++) { +- struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i]; +- struct page *page = ftmac100_rxdes_get_page(rxdes); +- dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes); ++ PRINTK("+init_module\n"); + +- if (!page) +- continue; ++ result = -ENODEV; + +- dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); +- __free_page(page); ++ dev = alloc_etherdev(sizeof(struct ftmac100_local)); ++ if (!dev) { ++ printk(KERN_ERR "Fail allocating ethernet device"); ++ return -ENODEV; + } +- +- for (i = 0; i < TX_QUEUE_ENTRIES; i++) { +- struct ftmac100_txdes *txdes = &priv->descs->txdes[i]; +- struct sk_buff *skb = ftmac100_txdes_get_skb(txdes); +- dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes); +- +- if (!skb) +- continue; +- +- dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); +- dev_kfree_skb(skb); ++ iores = platform_get_resource(pdev, IORESOURCE_IO, 0); ++ /* Copy the parameters from the platform specification */ ++ dev->base_addr = iores->start; ++ dev->irq = platform_get_irq(pdev, 0); ++ dev->netdev_ops = &ftmac100_ops; ++ ++ //dev->dma = nowait; // Use DMA field for nowait ++ /* Setup initial mac address */ ++ auto_get_mac(pdev->id,dev->dev_addr); ++ ++ if ((thisresult = register_netdev(dev)) != 0) { ++ free_irq( dev->irq, dev ); ++ free_netdev(dev); ++ } else { ++ platform_set_drvdata(pdev, dev); + } ++ if (thisresult == 0) // any of the devices initialized, run ++ result = 0; + +- dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs), +- priv->descs, priv->descs_dma_addr); ++ return result; + } + +-static int ftmac100_alloc_buffers(struct ftmac100 *priv) +-{ +- int i; +- +- priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs), +- &priv->descs_dma_addr, GFP_KERNEL); +- if (!priv->descs) +- return -ENOMEM; +- +- memset(priv->descs, 0, sizeof(struct ftmac100_descs)); +- +- /* initialize RX ring */ +- ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); +- +- for (i = 0; i < RX_QUEUE_ENTRIES; i++) { +- struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i]; +- +- if (ftmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL)) +- goto err; +- } +- +- /* initialize TX ring */ +- ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]); +- return 0; +- +-err: +- ftmac100_free_buffers(priv); +- return -ENOMEM; +-} + +-/****************************************************************************** +- * struct mii_if_info functions +- *****************************************************************************/ +-static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg) ++/* ++ * Cleanup when module is removed with rmmod ++ */ ++ ++static int ftmac100_remove(struct platform_device *pdev) + { +- struct ftmac100 *priv = netdev_priv(netdev); +- unsigned int phycr; +- int i; +- +- phycr = FTMAC100_PHYCR_PHYAD(phy_id) | +- FTMAC100_PHYCR_REGAD(reg) | +- FTMAC100_PHYCR_MIIRD; +- +- iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR); +- +- for (i = 0; i < 10; i++) { +- phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR); +- +- if ((phycr & FTMAC100_PHYCR_MIIRD) == 0) +- return phycr & FTMAC100_PHYCR_MIIRDATA; +- +- udelay(100); +- } +- +- netdev_err(netdev, "mdio read timed out\n"); ++ struct net_device *dev; ++ struct ftmac100_local *priv; ++ PRINTK("+cleanup_module\n"); ++ ++ dev = platform_get_drvdata(pdev); ++ ++ priv = (struct ftmac100_local *)netdev_priv(dev); ++ if (priv->rx_descs) ++ dma_free_coherent( NULL, sizeof(RX_DESC)*RXDES_NUM, (void*)priv->rx_descs, (dma_addr_t)priv->rx_descs_dma ); ++ if (priv->rx_buf) ++ dma_free_coherent( NULL, RX_BUF_SIZE*RXDES_NUM, (void*)priv->rx_buf, (dma_addr_t)priv->rx_buf_dma ); ++ if (priv->tx_descs) ++ dma_free_coherent( NULL, sizeof(TX_DESC)*TXDES_NUM, (void*)priv->tx_descs, (dma_addr_t)priv->tx_descs_dma ); ++ if (priv->tx_buf) ++ dma_free_coherent( NULL, TX_BUF_SIZE*TXDES_NUM, (void*)priv->tx_buf, (dma_addr_t)priv->tx_buf_dma ); ++ priv->rx_descs = NULL; priv->rx_descs_dma = 0; ++ priv->rx_buf = NULL; priv->rx_buf_dma = 0; ++ priv->tx_descs = NULL; priv->tx_descs_dma = 0; ++ priv->tx_buf = NULL; priv->tx_buf_dma = 0; ++ ++ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */ ++ unregister_netdev(dev); ++ ++ free_irq(dev->irq, dev); ++ //TODO: where is the request_region ? ++ //release_region(devFMAC.base_addr, SMC_IO_EXTENT); ++ free_netdev(dev); + return 0; + } + +-static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg, +- int data) +-{ +- struct ftmac100 *priv = netdev_priv(netdev); +- unsigned int phycr; +- int i; +- +- phycr = FTMAC100_PHYCR_PHYAD(phy_id) | +- FTMAC100_PHYCR_REGAD(reg) | +- FTMAC100_PHYCR_MIIWR; +- +- data = FTMAC100_PHYWDATA_MIIWDATA(data); +- +- iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA); +- iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR); +- +- for (i = 0; i < 10; i++) { +- phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR); +- +- if ((phycr & FTMAC100_PHYCR_MIIWR) == 0) +- return; +- +- udelay(100); +- } +- +- netdev_err(netdev, "mdio write timed out\n"); +-} +- +-/****************************************************************************** +- * struct ethtool_ops functions +- *****************************************************************************/ +-static void ftmac100_get_drvinfo(struct net_device *netdev, +- struct ethtool_drvinfo *info) +-{ +- strcpy(info->driver, DRV_NAME); +- strcpy(info->version, DRV_VERSION); +- strcpy(info->bus_info, dev_name(&netdev->dev)); +-} +- +-static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +-{ +- struct ftmac100 *priv = netdev_priv(netdev); +- return mii_ethtool_gset(&priv->mii, cmd); +-} +- +-static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +-{ +- struct ftmac100 *priv = netdev_priv(netdev); +- return mii_ethtool_sset(&priv->mii, cmd); +-} +- +-static int ftmac100_nway_reset(struct net_device *netdev) ++static int ftmac100_suspend(struct platform_device *pdev, pm_message_t state) + { +- struct ftmac100 *priv = netdev_priv(netdev); +- return mii_nway_restart(&priv->mii); +-} +- +-static u32 ftmac100_get_link(struct net_device *netdev) +-{ +- struct ftmac100 *priv = netdev_priv(netdev); +- return mii_link_ok(&priv->mii); +-} +- +-static const struct ethtool_ops ftmac100_ethtool_ops = { +- .set_settings = ftmac100_set_settings, +- .get_settings = ftmac100_get_settings, +- .get_drvinfo = ftmac100_get_drvinfo, +- .nway_reset = ftmac100_nway_reset, +- .get_link = ftmac100_get_link, +-}; +- +-/****************************************************************************** +- * interrupt handler +- *****************************************************************************/ +-static irqreturn_t ftmac100_interrupt(int irq, void *dev_id) +-{ +- struct net_device *netdev = dev_id; +- struct ftmac100 *priv = netdev_priv(netdev); +- +- if (likely(netif_running(netdev))) { +- /* Disable interrupts for polling */ +- ftmac100_disable_all_int(priv); +- napi_schedule(&priv->napi); +- } +- +- return IRQ_HANDLED; +-} +- +-/****************************************************************************** +- * struct napi_struct functions +- *****************************************************************************/ +-static int ftmac100_poll(struct napi_struct *napi, int budget) +-{ +- struct ftmac100 *priv = container_of(napi, struct ftmac100, napi); +- struct net_device *netdev = priv->netdev; +- unsigned int status; +- bool completed = true; +- int rx = 0; +- +- status = ioread32(priv->base + FTMAC100_OFFSET_ISR); +- +- if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) { +- /* +- * FTMAC100_INT_RPKT_FINISH: +- * RX DMA has received packets into RX buffer successfully +- * +- * FTMAC100_INT_NORXBUF: +- * RX buffer unavailable +- */ +- bool retry; +- +- do { +- retry = ftmac100_rx_packet(priv, &rx); +- } while (retry && rx < budget); ++ struct net_device *ndev = platform_get_drvdata(pdev); + +- if (retry && rx == budget) +- completed = false; +- } +- +- if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) { +- /* +- * FTMAC100_INT_XPKT_OK: +- * packet transmitted to ethernet successfully +- * +- * FTMAC100_INT_XPKT_LOST: +- * packet transmitted to ethernet lost due to late +- * collision or excessive collision +- */ +- ftmac100_tx_complete(priv); +- } +- +- if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST | +- FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) { +- if (net_ratelimit()) +- netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status, +- status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "", +- status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "", +- status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "", +- status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : ""); +- +- if (status & FTMAC100_INT_NORXBUF) { +- /* RX buffer unavailable */ +- netdev->stats.rx_over_errors++; +- } +- +- if (status & FTMAC100_INT_RPKT_LOST) { +- /* received packet lost due to RX FIFO full */ +- netdev->stats.rx_fifo_errors++; +- } +- +- if (status & FTMAC100_INT_PHYSTS_CHG) { +- /* PHY link status change */ +- mii_check_link(&priv->mii); ++ if (ndev) { ++ if (netif_running(ndev)) { ++ netif_device_detach(ndev); ++ ftmac100_shutdown(ndev->base_addr); + } + } +- +- if (completed) { +- /* stop polling */ +- napi_complete(napi); +- ftmac100_enable_all_int(priv); +- } +- +- return rx; +-} +- +-/****************************************************************************** +- * struct net_device_ops functions +- *****************************************************************************/ +-static int ftmac100_open(struct net_device *netdev) +-{ +- struct ftmac100 *priv = netdev_priv(netdev); +- int err; +- +- err = ftmac100_alloc_buffers(priv); +- if (err) { +- netdev_err(netdev, "failed to allocate buffers\n"); +- goto err_alloc; +- } +- +- err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev); +- if (err) { +- netdev_err(netdev, "failed to request irq %d\n", priv->irq); +- goto err_irq; +- } +- +- priv->rx_pointer = 0; +- priv->tx_clean_pointer = 0; +- priv->tx_pointer = 0; +- priv->tx_pending = 0; +- +- err = ftmac100_start_hw(priv); +- if (err) +- goto err_hw; +- +- napi_enable(&priv->napi); +- netif_start_queue(netdev); +- +- ftmac100_enable_all_int(priv); +- +- return 0; +- +-err_hw: +- free_irq(priv->irq, netdev); +-err_irq: +- ftmac100_free_buffers(priv); +-err_alloc: +- return err; +-} +- +-static int ftmac100_stop(struct net_device *netdev) +-{ +- struct ftmac100 *priv = netdev_priv(netdev); +- +- ftmac100_disable_all_int(priv); +- netif_stop_queue(netdev); +- napi_disable(&priv->napi); +- ftmac100_stop_hw(priv); +- free_irq(priv->irq, netdev); +- ftmac100_free_buffers(priv); +- + return 0; + } + +-static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) ++static int ftmac100_resume(struct platform_device *pdev) + { +- struct ftmac100 *priv = netdev_priv(netdev); +- dma_addr_t map; +- +- if (unlikely(skb->len > MAX_PKT_SIZE)) { +- if (net_ratelimit()) +- netdev_dbg(netdev, "tx packet too big\n"); +- +- netdev->stats.tx_dropped++; +- dev_kfree_skb(skb); +- return NETDEV_TX_OK; +- } ++ struct net_device *ndev = platform_get_drvdata(pdev); + +- map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); +- if (unlikely(dma_mapping_error(priv->dev, map))) { +- /* drop packet */ +- if (net_ratelimit()) +- netdev_err(netdev, "map socket buffer failed\n"); +- +- netdev->stats.tx_dropped++; +- dev_kfree_skb(skb); +- return NETDEV_TX_OK; ++ if (ndev) { ++ if (netif_running(ndev)) { ++ ftmac100_reset(ndev); ++ ftmac100_enable(ndev); ++ netif_device_attach(ndev); ++ } + } +- +- return ftmac100_xmit(priv, skb, map); ++ return 0; + } + +-/* optional */ +-static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +-{ +- struct ftmac100 *priv = netdev_priv(netdev); +- struct mii_ioctl_data *data = if_mii(ifr); +- +- return generic_mii_ioctl(&priv->mii, data, cmd, NULL); ++static void platform_device_release(struct device *dev){ + } + +-static const struct net_device_ops ftmac100_netdev_ops = { +- .ndo_open = ftmac100_open, +- .ndo_stop = ftmac100_stop, +- .ndo_start_xmit = ftmac100_hard_start_xmit, +- .ndo_set_mac_address = eth_mac_addr, +- .ndo_validate_addr = eth_validate_addr, +- .ndo_do_ioctl = ftmac100_do_ioctl, ++static struct platform_driver ftmac100_driver = { ++ .probe = ftmac100_probe, ++ .remove = ftmac100_remove, ++ .suspend = ftmac100_suspend, ++ .resume = ftmac100_resume, ++ .driver = { ++ .name = "ftmac100", ++ }, + }; + +-/****************************************************************************** +- * struct platform_driver functions +- *****************************************************************************/ +-static int ftmac100_probe(struct platform_device *pdev) +-{ +- struct resource *res; +- int irq; +- struct net_device *netdev; +- struct ftmac100 *priv; +- int err; +- +- if (!pdev) +- return -ENODEV; +- +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +- if (!res) +- return -ENXIO; +- +- irq = platform_get_irq(pdev, 0); +- if (irq < 0) +- return irq; +- +- /* setup net_device */ +- netdev = alloc_etherdev(sizeof(*priv)); +- if (!netdev) { +- err = -ENOMEM; +- goto err_alloc_etherdev; +- } +- +- SET_NETDEV_DEV(netdev, &pdev->dev); +- SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops); +- netdev->netdev_ops = &ftmac100_netdev_ops; +- +- platform_set_drvdata(pdev, netdev); +- +- /* setup private data */ +- priv = netdev_priv(netdev); +- priv->netdev = netdev; +- priv->dev = &pdev->dev; +- +- spin_lock_init(&priv->tx_lock); +- +- /* initialize NAPI */ +- netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64); +- +- /* map io memory */ +- priv->res = request_mem_region(res->start, resource_size(res), +- dev_name(&pdev->dev)); +- if (!priv->res) { +- dev_err(&pdev->dev, "Could not reserve memory region\n"); +- err = -ENOMEM; +- goto err_req_mem; +- } +- +- priv->base = ioremap(res->start, resource_size(res)); +- if (!priv->base) { +- dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); +- err = -EIO; +- goto err_ioremap; +- } +- +- priv->irq = irq; +- +- /* initialize struct mii_if_info */ +- priv->mii.phy_id = 0; +- priv->mii.phy_id_mask = 0x1f; +- priv->mii.reg_num_mask = 0x1f; +- priv->mii.dev = netdev; +- priv->mii.mdio_read = ftmac100_mdio_read; +- priv->mii.mdio_write = ftmac100_mdio_write; +- +- /* register network device */ +- err = register_netdev(netdev); +- if (err) { +- dev_err(&pdev->dev, "Failed to register netdev\n"); +- goto err_register_netdev; +- } +- +- netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base); +- +- if (!is_valid_ether_addr(netdev->dev_addr)) { +- eth_hw_addr_random(netdev); +- netdev_info(netdev, "generated random MAC address %pM\n", +- netdev->dev_addr); +- } +- +- return 0; +- +-err_register_netdev: +- iounmap(priv->base); +-err_ioremap: +- release_resource(priv->res); +-err_req_mem: +- netif_napi_del(&priv->napi); +- platform_set_drvdata(pdev, NULL); +- free_netdev(netdev); +-err_alloc_etherdev: +- return err; +-} +- +-static int __exit ftmac100_remove(struct platform_device *pdev) +-{ +- struct net_device *netdev; +- struct ftmac100 *priv; +- +- netdev = platform_get_drvdata(pdev); +- priv = netdev_priv(netdev); +- +- unregister_netdev(netdev); +- +- iounmap(priv->base); +- release_resource(priv->res); +- +- netif_napi_del(&priv->napi); +- platform_set_drvdata(pdev, NULL); +- free_netdev(netdev); +- return 0; +-} ++static struct resource ftmac100_resources_a320[] = { ++ [0] = { ++ .start = MAC_FTMAC100_0_VA_BASE, ++ .end = MAC_FTMAC100_0_VA_LIMIT, ++ .flags = IORESOURCE_IO, ++ }, ++ [1] = { ++ .start = MAC_FTMAC100_0_IRQ, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; + +-static struct platform_driver ftmac100_driver = { +- .probe = ftmac100_probe, +- .remove = __exit_p(ftmac100_remove), +- .driver = { +- .name = DRV_NAME, +- .owner = THIS_MODULE, ++static struct platform_device ftmac100_device_a320 = { ++ .name = "ftmac100", ++ .id = 0, ++ .num_resources = ARRAY_SIZE(ftmac100_resources_a320), ++ .resource = ftmac100_resources_a320, ++ .dev = { ++ .release = platform_device_release, + }, + }; + +-/****************************************************************************** +- * initialization / finalization +- *****************************************************************************/ ++ + static int __init ftmac100_init(void) + { +- pr_info("Loading version " DRV_VERSION " ...\n"); ++ platform_device_register(&ftmac100_device_a320); + return platform_driver_register(&ftmac100_driver); + } + + static void __exit ftmac100_exit(void) + { + platform_driver_unregister(&ftmac100_driver); ++ platform_device_unregister(&ftmac100_device_a320); + } + + module_init(ftmac100_init); + module_exit(ftmac100_exit); +- +-MODULE_AUTHOR("Po-Yu Chuang "); +-MODULE_DESCRIPTION("FTMAC100 driver"); +-MODULE_LICENSE("GPL"); +diff -Nur linux-3.4.110.orig/drivers/net/ethernet/faraday/ftmac100.h linux-3.4.110/drivers/net/ethernet/faraday/ftmac100.h +--- linux-3.4.110.orig/drivers/net/ethernet/faraday/ftmac100.h 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/net/ethernet/faraday/ftmac100.h 2016-04-07 10:20:51.054085357 +0200 +@@ -1,180 +1,267 @@ + /* +- * Faraday FTMAC100 10/100 Ethernet ++ * drivers/net/ftmac100.h + * +- * (C) Copyright 2009-2011 Faraday Technology +- * Po-Yu Chuang ++ * Faraday FTMAC100 Device Driver + * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2005 Faraday Corp. (http://www.faraday-tech.com) ++ * ++ * All Rights Reserved + * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, write to the Free Software +- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +- */ +- +-#ifndef __FTMAC100_H +-#define __FTMAC100_H +- +-#define FTMAC100_OFFSET_ISR 0x00 +-#define FTMAC100_OFFSET_IMR 0x04 +-#define FTMAC100_OFFSET_MAC_MADR 0x08 +-#define FTMAC100_OFFSET_MAC_LADR 0x0c +-#define FTMAC100_OFFSET_MAHT0 0x10 +-#define FTMAC100_OFFSET_MAHT1 0x14 +-#define FTMAC100_OFFSET_TXPD 0x18 +-#define FTMAC100_OFFSET_RXPD 0x1c +-#define FTMAC100_OFFSET_TXR_BADR 0x20 +-#define FTMAC100_OFFSET_RXR_BADR 0x24 +-#define FTMAC100_OFFSET_ITC 0x28 +-#define FTMAC100_OFFSET_APTC 0x2c +-#define FTMAC100_OFFSET_DBLAC 0x30 +-#define FTMAC100_OFFSET_MACCR 0x88 +-#define FTMAC100_OFFSET_MACSR 0x8c +-#define FTMAC100_OFFSET_PHYCR 0x90 +-#define FTMAC100_OFFSET_PHYWDATA 0x94 +-#define FTMAC100_OFFSET_FCR 0x98 +-#define FTMAC100_OFFSET_BPR 0x9c +-#define FTMAC100_OFFSET_TS 0xc4 +-#define FTMAC100_OFFSET_DMAFIFOS 0xc8 +-#define FTMAC100_OFFSET_TM 0xcc +-#define FTMAC100_OFFSET_TX_MCOL_SCOL 0xd4 +-#define FTMAC100_OFFSET_RPF_AEP 0xd8 +-#define FTMAC100_OFFSET_XM_PG 0xdc +-#define FTMAC100_OFFSET_RUNT_TLCC 0xe0 +-#define FTMAC100_OFFSET_CRCER_FTL 0xe4 +-#define FTMAC100_OFFSET_RLC_RCC 0xe8 +-#define FTMAC100_OFFSET_BROC 0xec +-#define FTMAC100_OFFSET_MULCA 0xf0 +-#define FTMAC100_OFFSET_RP 0xf4 +-#define FTMAC100_OFFSET_XP 0xf8 +- +-/* +- * Interrupt status register & interrupt mask register +- */ +-#define FTMAC100_INT_RPKT_FINISH (1 << 0) +-#define FTMAC100_INT_NORXBUF (1 << 1) +-#define FTMAC100_INT_XPKT_FINISH (1 << 2) +-#define FTMAC100_INT_NOTXBUF (1 << 3) +-#define FTMAC100_INT_XPKT_OK (1 << 4) +-#define FTMAC100_INT_XPKT_LOST (1 << 5) +-#define FTMAC100_INT_RPKT_SAV (1 << 6) +-#define FTMAC100_INT_RPKT_LOST (1 << 7) +-#define FTMAC100_INT_AHB_ERR (1 << 8) +-#define FTMAC100_INT_PHYSTS_CHG (1 << 9) +- +-/* +- * Interrupt timer control register + */ +-#define FTMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0) +-#define FTMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4) +-#define FTMAC100_ITC_RXINT_TIME_SEL (1 << 7) +-#define FTMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8) +-#define FTMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12) +-#define FTMAC100_ITC_TXINT_TIME_SEL (1 << 15) + +-/* +- * Automatic polling timer control register +- */ +-#define FTMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0) +-#define FTMAC100_APTC_RXPOLL_TIME_SEL (1 << 4) +-#define FTMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8) +-#define FTMAC100_APTC_TXPOLL_TIME_SEL (1 << 12) ++#ifndef _FTMAC100_H_ ++#define _FTMAC100_H_ + +-/* +- * DMA burst length and arbitration control register +- */ +-#define FTMAC100_DBLAC_INCR4_EN (1 << 0) +-#define FTMAC100_DBLAC_INCR8_EN (1 << 1) +-#define FTMAC100_DBLAC_INCR16_EN (1 << 2) +-#define FTMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 3) +-#define FTMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 6) +-#define FTMAC100_DBLAC_RX_THR_EN (1 << 9) ++#define ISR_REG 0x00 // interrups status register ++#define IMR_REG 0x04 // interrupt maks register ++#define MAC_MADR_REG 0x08 // MAC address (Most significant) ++#define MAC_LADR_REG 0x0c // MAC address (Least significant) ++ ++#define MAHT0_REG 0x10 // Multicast Address Hash Table 0 register ++#define MAHT1_REG 0x14 // Multicast Address Hash Table 1 register ++#define TXPD_REG 0x18 // Transmit Poll Demand register ++#define RXPD_REG 0x1c // Receive Poll Demand register ++#define TXR_BADR_REG 0x20 // Transmit Ring Base Address register ++#define RXR_BADR_REG 0x24 // Receive Ring Base Address register ++#define ITC_REG 0x28 // interrupt timer control register ++#define APTC_REG 0x2c // Automatic Polling Timer control register ++#define DBLAC_REG 0x30 // DMA Burst Length and Arbitration control register ++ ++ ++ ++#define MACCR_REG 0x88 // MAC control register ++#define MACSR_REG 0x8c // MAC status register ++#define PHYCR_REG 0x90 // PHY control register ++#define PHYWDATA_REG 0x94 // PHY Write Data register ++#define FCR_REG 0x98 // Flow Control register ++#define BPR_REG 0x9c // back pressure register ++#define WOLCR_REG 0xa0 // Wake-On-Lan control register ++#define WOLSR_REG 0xa4 // Wake-On-Lan status register ++#define WFCRC_REG 0xa8 // Wake-up Frame CRC register ++#define WFBM1_REG 0xb0 // wake-up frame byte mask 1st double word register ++#define WFBM2_REG 0xb4 // wake-up frame byte mask 2nd double word register ++#define WFBM3_REG 0xb8 // wake-up frame byte mask 3rd double word register ++#define WFBM4_REG 0xbc // wake-up frame byte mask 4th double word register ++#define TM_REG 0xcc // test mode register ++ ++#define PHYSTS_CHG_bit (1UL<<9) ++#define AHB_ERR_bit (1UL<<8) ++#define RPKT_LOST_bit (1UL<<7) ++#define RPKT_SAV_bit (1UL<<6) ++#define XPKT_LOST_bit (1UL<<5) ++#define XPKT_OK_bit (1UL<<4) ++#define NOTXBUF_bit (1UL<<3) ++#define XPKT_FINISH_bit (1UL<<2) ++#define NORXBUF_bit (1UL<<1) ++#define RPKT_FINISH_bit (1UL<<0) ++ ++ ++#ifdef __NDS32_EB__ ++typedef struct ++{ ++ unsigned int Reserved2:19; ++ unsigned int TXPOLL_TIME_SEL:1; ++ unsigned int TXPOLL_CNT:4; ++ unsigned int Reserved1:3; ++ unsigned int RXPOLL_TIME_SEL:1; ++ unsigned int RXPOLL_CNT:4; ++}FTMAC100_APTCR_Status; ++#else ++typedef struct ++{ ++ unsigned int RXPOLL_CNT:4; ++ unsigned int RXPOLL_TIME_SEL:1; ++ unsigned int Reserved1:3; ++ unsigned int TXPOLL_CNT:4; ++ unsigned int TXPOLL_TIME_SEL:1; ++ unsigned int Reserved2:19; ++} FTMAC100_APTCR_Status; ++#endif ++ ++#define RX_BROADPKT_bit (1UL<<17) // Receiving broadcast packet ++#define RX_MULTIPKT_bit (1UL<<16) // receiving multicast packet ++#define FULLDUP_bit (1UL<<15) // full duplex ++#define CRC_APD_bit (1UL<<14) // append crc to transmit packet ++#define MDC_SEL_bit (1UL<<13) // set MDC as TX_CK/10 ++#define RCV_ALL_bit (1UL<<12) // not check incoming packet's destination address ++#define RX_FTL_bit (1UL<<11) // Store incoming packet even its length is great than 1518 byte ++#define RX_RUNT_bit (1UL<<10) // Store incoming packet even its length is les than 64 byte ++#define HT_MULTI_EN_bit (1UL<<9) ++#define RCV_EN_bit (1UL<<8) // receiver enable ++#define XMT_EN_bit (1UL<<5) // transmitter enable ++#define CRC_DIS_bit (1UL<<4) ++#define LOOP_EN_bit (1UL<<3) // Internal loop-back ++#define SW_RST_bit (1UL<<2) // software reset/ ++#define RDMA_EN_bit (1UL<<1) // enable DMA receiving channel ++#define XDMA_EN_bit (1UL<<0) // enable DMA transmitting channel ++ ++ ++// -------------------------------------------------------------------- ++// Receive Ring descriptor structure ++// -------------------------------------------------------------------- ++#ifdef __NDS32_EB__ ++typedef struct ++{ ++ // RXDES0 ++ unsigned int RXDMA_OWN:1; // 1 ==> owned by FTMAC100, 0 ==> owned by software ++ unsigned int Reserved3:1; ++ unsigned int FRS:1; ++ unsigned int LRS:1; ++ unsigned int Reserved2:5; ++ unsigned int RX_ODD_NB:1; ++ unsigned int RUNT:1; ++ unsigned int FTL:1; ++ unsigned int CRC_ERR:1; //19 ++ unsigned int RX_ERR:1; //18 ++ unsigned int BROARDCAST:1; //17 ++ unsigned int MULTICAST:1; //16 ++ unsigned int Reserved1:5; //11~15 ++ unsigned int ReceiveFrameLength:11;//0~10 ++ ++ // RXDES1 ++ unsigned int EDOTR:1; ++ unsigned int Reserved:20; ++ unsigned int RXBUF_Size:11; ++ ++ // RXDES2 ++ unsigned int RXBUF_BADR; ++ ++ unsigned int VIR_RXBUF_BADR; // not defined, §Ú­Ì®³¨Ó©ñ receive buffer ªº virtual address ++ ++}RX_DESC; ++ ++ ++typedef struct ++{ ++ // TXDES0 ++ unsigned int TXDMA_OWN:1; ++ unsigned int Reserved1:29; ++ unsigned int TXPKT_EXSCOL:1; ++ unsigned int TXPKT_LATECOL:1; ++ ++ // TXDES1 ++ unsigned int EDOTR:1; ++ unsigned int TXIC:1; ++ unsigned int TX2FIC:1; ++ unsigned int FTS:1; ++ unsigned int LTS:1; ++ unsigned int Reserved2:16; ++ unsigned int TXBUF_Size:11; ++ ++ // RXDES2 ++ unsigned int TXBUF_BADR; ++ unsigned int VIR_TXBUF_BADR; ++}TX_DESC; ++#else ++typedef struct ++{ ++ // RXDES0 ++ unsigned int ReceiveFrameLength:11;//0~10 ++ unsigned int Reserved1:5; //11~15 ++ unsigned int MULTICAST:1; //16 ++ unsigned int BROARDCAST:1; //17 ++ unsigned int RX_ERR:1; //18 ++ unsigned int CRC_ERR:1; //19 ++ unsigned int FTL:1; ++ unsigned int RUNT:1; ++ unsigned int RX_ODD_NB:1; ++ unsigned int Reserved2:5; ++ unsigned int LRS:1; ++ unsigned int FRS:1; ++ unsigned int Reserved3:1; ++ unsigned int RXDMA_OWN:1; // 1 ==> owned by FTMAC100, 0 ==> owned by software ++ ++ // RXDES1 ++ unsigned int RXBUF_Size:11; ++ unsigned int Reserved:20; ++ unsigned int EDOTR:1; ++ ++ // RXDES2 ++ unsigned int RXBUF_BADR; ++ ++ unsigned int VIR_RXBUF_BADR; // not defined, §Ú­Ì®³¨Ó©ñ receive buffer ªº virtual address ++ ++} RX_DESC; ++ ++ ++typedef struct ++{ ++ // TXDES0 ++ unsigned int TXPKT_LATECOL:1; ++ unsigned int TXPKT_EXSCOL:1; ++ unsigned int Reserved1:29; ++ unsigned int TXDMA_OWN:1; ++ ++ // TXDES1 ++ unsigned int TXBUF_Size:11; ++ unsigned int Reserved2:16; ++ unsigned int LTS:1; ++ unsigned int FTS:1; ++ unsigned int TX2FIC:1; ++ unsigned int TXIC:1; ++ unsigned int EDOTR:1; ++ ++ // RXDES2 ++ unsigned int TXBUF_BADR; ++ unsigned int VIR_TXBUF_BADR; ++} TX_DESC; ++#endif ++ ++// waiting to do: ++#define TXPOLL_CNT 8 ++#define RXPOLL_CNT 0 ++ ++#define OWNBY_SOFTWARE 0 ++#define OWNBY_FTMAC100 1 ++ ++// -------------------------------------------------------------------- ++// driver related definition ++// -------------------------------------------------------------------- ++#define RXDES_NUM 256 // must be 2's power ++#define RX_BUF_SIZE 1536 ++#define TXDES_NUM 64 // must be 2's power ++#define TX_BUF_SIZE 1536 // Luke Lee : Just bigger than 1518 ++ ++ ++struct ftmac100_local ++{ ++ // these are things that the kernel wants me to keep, so users ++ // can find out semi-useless statistics of how well the card is ++ // performing ++ struct net_device_stats stats; ++ ++ // Set to true during the auto-negotiation sequence ++ int autoneg_active; ++ ++ // Address of our PHY port ++ unsigned int phyaddr; ++ ++ // Type of PHY ++ unsigned int phytype; ++ ++ // Last contents of PHY Register 18 ++ unsigned int lastPhy18; ++ ++ spinlock_t lock; ++ ++ volatile RX_DESC *rx_descs; // receive ring base address ++ unsigned int rx_descs_dma; // receive ring physical base address ++ char *rx_buf; // receive buffer cpu address ++ int rx_buf_dma; // receive buffer physical address ++ int rx_idx; // receive descriptor ++ //struct sk_buff *rx_skbuff[RXDES_NUM]; ++ ++ volatile TX_DESC *tx_descs; ++ unsigned int tx_descs_dma; ++ char *tx_buf; ++ int tx_buf_dma; ++ int tx_idx; ++ int old_tx; ++ struct sk_buff *tx_skbuff[RXDES_NUM]; ++ ++ int maccr_val; ++}; + +-/* +- * MAC control register +- */ +-#define FTMAC100_MACCR_XDMA_EN (1 << 0) +-#define FTMAC100_MACCR_RDMA_EN (1 << 1) +-#define FTMAC100_MACCR_SW_RST (1 << 2) +-#define FTMAC100_MACCR_LOOP_EN (1 << 3) +-#define FTMAC100_MACCR_CRC_DIS (1 << 4) +-#define FTMAC100_MACCR_XMT_EN (1 << 5) +-#define FTMAC100_MACCR_ENRX_IN_HALFTX (1 << 6) +-#define FTMAC100_MACCR_RCV_EN (1 << 8) +-#define FTMAC100_MACCR_HT_MULTI_EN (1 << 9) +-#define FTMAC100_MACCR_RX_RUNT (1 << 10) +-#define FTMAC100_MACCR_RX_FTL (1 << 11) +-#define FTMAC100_MACCR_RCV_ALL (1 << 12) +-#define FTMAC100_MACCR_CRC_APD (1 << 14) +-#define FTMAC100_MACCR_FULLDUP (1 << 15) +-#define FTMAC100_MACCR_RX_MULTIPKT (1 << 16) +-#define FTMAC100_MACCR_RX_BROADPKT (1 << 17) +- +-/* +- * PHY control register +- */ +-#define FTMAC100_PHYCR_MIIRDATA 0xffff +-#define FTMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16) +-#define FTMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21) +-#define FTMAC100_PHYCR_MIIRD (1 << 26) +-#define FTMAC100_PHYCR_MIIWR (1 << 27) +- +-/* +- * PHY write data register +- */ +-#define FTMAC100_PHYWDATA_MIIWDATA(x) ((x) & 0xffff) +- +-/* +- * Transmit descriptor, aligned to 16 bytes +- */ +-struct ftmac100_txdes { +- unsigned int txdes0; +- unsigned int txdes1; +- unsigned int txdes2; /* TXBUF_BADR */ +- unsigned int txdes3; /* not used by HW */ +-} __attribute__ ((aligned(16))); +- +-#define FTMAC100_TXDES0_TXPKT_LATECOL (1 << 0) +-#define FTMAC100_TXDES0_TXPKT_EXSCOL (1 << 1) +-#define FTMAC100_TXDES0_TXDMA_OWN (1 << 31) +- +-#define FTMAC100_TXDES1_TXBUF_SIZE(x) ((x) & 0x7ff) +-#define FTMAC100_TXDES1_LTS (1 << 27) +-#define FTMAC100_TXDES1_FTS (1 << 28) +-#define FTMAC100_TXDES1_TX2FIC (1 << 29) +-#define FTMAC100_TXDES1_TXIC (1 << 30) +-#define FTMAC100_TXDES1_EDOTR (1 << 31) +- +-/* +- * Receive descriptor, aligned to 16 bytes +- */ +-struct ftmac100_rxdes { +- unsigned int rxdes0; +- unsigned int rxdes1; +- unsigned int rxdes2; /* RXBUF_BADR */ +- unsigned int rxdes3; /* not used by HW */ +-} __attribute__ ((aligned(16))); +- +-#define FTMAC100_RXDES0_RFL 0x7ff +-#define FTMAC100_RXDES0_MULTICAST (1 << 16) +-#define FTMAC100_RXDES0_BROADCAST (1 << 17) +-#define FTMAC100_RXDES0_RX_ERR (1 << 18) +-#define FTMAC100_RXDES0_CRC_ERR (1 << 19) +-#define FTMAC100_RXDES0_FTL (1 << 20) +-#define FTMAC100_RXDES0_RUNT (1 << 21) +-#define FTMAC100_RXDES0_RX_ODD_NB (1 << 22) +-#define FTMAC100_RXDES0_LRS (1 << 28) +-#define FTMAC100_RXDES0_FRS (1 << 29) +-#define FTMAC100_RXDES0_RXDMA_OWN (1 << 31) ++#endif + +-#define FTMAC100_RXDES1_RXBUF_SIZE(x) ((x) & 0x7ff) +-#define FTMAC100_RXDES1_EDORR (1 << 31) + +-#endif /* __FTMAC100_H */ +diff -Nur linux-3.4.110.orig/drivers/net/ethernet/faraday/Kconfig linux-3.4.110/drivers/net/ethernet/faraday/Kconfig +--- linux-3.4.110.orig/drivers/net/ethernet/faraday/Kconfig 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/net/ethernet/faraday/Kconfig 2016-04-07 10:20:51.054085357 +0200 +@@ -5,7 +5,7 @@ + config NET_VENDOR_FARADAY + bool "Faraday devices" + default y +- depends on ARM ++ depends on ARM || NDS32 + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from +@@ -20,7 +20,7 @@ + + config FTMAC100 + tristate "Faraday FTMAC100 10/100 Ethernet support" +- depends on ARM ++ depends on ARM || NDS32 + select NET_CORE + select MII + ---help--- +diff -Nur linux-3.4.110.orig/drivers/pci/Makefile linux-3.4.110/drivers/pci/Makefile +--- linux-3.4.110.orig/drivers/pci/Makefile 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/pci/Makefile 2016-04-07 10:20:51.054085357 +0200 +@@ -49,6 +49,7 @@ + obj-$(CONFIG_MICROBLAZE) += setup-bus.o + obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o + obj-$(CONFIG_SPARC_LEON) += setup-bus.o setup-irq.o ++obj-$(CONFIG_NDS32) += setup-bus.o setup-irq.o + + # + # ACPI Related PCI FW Functions +diff -Nur linux-3.4.110.orig/drivers/rtc/Kconfig linux-3.4.110/drivers/rtc/Kconfig +--- linux-3.4.110.orig/drivers/rtc/Kconfig 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/rtc/Kconfig 2016-04-07 10:20:51.054085357 +0200 +@@ -779,6 +779,16 @@ + This driver can also be built as a module. If so, the module + will be called rtc-ep93xx. + ++config RTC_DRV_FTRTC010 ++ tristate "Faraday Real Time Clock" ++ depends on NDS32 ++ help ++ If you say Y here you will get access to the real time clock ++ built into your AG101 CPU. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called rtc-ftrtc010. ++ + config RTC_DRV_SA1100 + tristate "SA11x0/PXA2xx/PXA910" + depends on ARCH_SA1100 || ARCH_PXA || ARCH_MMP +diff -Nur linux-3.4.110.orig/drivers/rtc/Makefile linux-3.4.110/drivers/rtc/Makefile +--- linux-3.4.110.orig/drivers/rtc/Makefile 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/rtc/Makefile 2016-04-07 10:20:51.054085357 +0200 +@@ -50,6 +50,7 @@ + obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o + obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o + obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o ++obj-$(CONFIG_RTC_DRV_FTRTC010) += rtc-ftrtc010.o + obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o + obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o + obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o +diff -Nur linux-3.4.110.orig/drivers/rtc/rtc-ftrtc010.c linux-3.4.110/drivers/rtc/rtc-ftrtc010.c +--- linux-3.4.110.orig/drivers/rtc/rtc-ftrtc010.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/rtc/rtc-ftrtc010.c 2016-04-07 10:20:51.054085357 +0200 +@@ -0,0 +1,371 @@ ++/* ++ * Faraday RTC Support ++ * ++ * Copyright (C) 2006, 2007, 2008 Paul Mundt ++ * Copyright (C) 2006 Jamie Lenehan ++ * Copyright (C) 2008 Angelo Castello ++ * Copyright (C) 2008 Roy Lee ++ * ++ * Based on the old arch/sh/kernel/cpu/rtc.c by: ++ * ++ * Copyright (C) 2000 Philipp Rumpf ++ * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka ++ * ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define PCLK ( AHB_CLK_IN / 2) ++ ++#define DRV_NAME "faraday-rtc" ++ ++#define RTC_REG( off) ( *( volatile unsigned long *)( rtc->regbase + ( off))) ++ ++#define RTC_CR RTC_REG( 0x20) /* Control */ ++#define RTC_CR_IE ( 0x1UL << 0) ++#define RTC_CR_IES ( 0x1UL << 1) ++#define RTC_CR_IEM ( 0x1UL << 2) ++#define RTC_CR_IEH ( 0x1UL << 3) ++#define RTC_CR_IED ( 0x1UL << 4) ++#define RTC_CR_ALRM ( 0x1UL << 5) ++#define RTC_CR_LOAD ( 0x1UL << 6) ++ ++#define RTC_RR RTC_REG( 0x1C) /* RTC Record Register */ ++#define RTC_DIV RTC_REG( 0x38) /* RTC Divede Register */ ++#define RTC_REV RTC_REG( 0x3C) /* RTC Divede Register */ ++ ++#define RTC_IR RTC_REG( 0x34) /* RTC interrupt state */ ++#define RTC_IR_IES ( 0x1UL << 0) /* RTC interrupt state */ ++#define RTC_IR_IEM ( 0x1UL << 1) /* RTC interrupt state */ ++#define RTC_IR_IEH ( 0x1UL << 2) /* RTC interrupt state */ ++#define RTC_IR_IED ( 0x1UL << 3) /* RTC interrupt state */ ++#define RTC_IR_ALRM ( 0x1UL << 4) /* RTC interrupt state */ ++ ++#define RTC_SECOND RTC_REG( 0x00) /* RTC sec */ ++#define RTC_MINUTE RTC_REG( 0x04) /* RTC min */ ++#define RTC_HOUR RTC_REG( 0x08) /* RTC hour */ ++#define RTC_DAYS RTC_REG( 0x0C) /* RTC day */ ++ ++#define RTC_ALRM_SECOND RTC_REG( 0x10) /* RTC alarm sec */ ++#define RTC_ALRM_MINUTE RTC_REG( 0x14) /* RTC alarm min */ ++#define RTC_ALRM_HOUR RTC_REG( 0x18) /* RTC alarm hour */ ++ ++#define RTC_WRITE_SECOND RTC_REG( 0x24) /* RTC write port for sec */ ++#define RTC_WRITE_MINUTE RTC_REG( 0x28) /* RTC write port for min */ ++#define RTC_WRITE_HOUR RTC_REG( 0x2C) /* RTC write port for hour */ ++#define RTC_WRITE_DAYS RTC_REG( 0x30) /* RTC write port for day */ ++ ++struct ft_rtc{ ++ ++ void __iomem *regbase; ++ struct resource *res; ++ unsigned int alarm_irq; ++ unsigned int interrupt_irq; ++ struct rtc_device *rtc_dev; ++ spinlock_t lock; /* Protects this structure */ ++}; ++ ++struct ft_rtc rtc_platform_data; ++ ++static irqreturn_t ft_rtc_interrupt( int irq, void *dev_id){ ++ ++ struct ft_rtc *rtc = dev_id; ++ if( RTC_IR & RTC_IR_IES){ ++ ++ RTC_IR &= ~RTC_IR_IES; ++ rtc_update_irq( rtc->rtc_dev, 1, RTC_UF | RTC_IRQF); ++ ++ return IRQ_HANDLED; ++ } ++ ++ return IRQ_NONE; ++} ++ ++static irqreturn_t ft_rtc_alarm( int irq, void *dev_id){ ++ ++ struct ft_rtc *rtc = dev_id; ++ if( RTC_IR & RTC_IR_ALRM){ ++ ++ RTC_CR &= ~RTC_CR_ALRM; ++ RTC_IR &= ~RTC_IR_ALRM; ++ rtc_update_irq( rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); ++ ++ return IRQ_HANDLED; ++ } ++ ++ return IRQ_NONE; ++} ++ ++static void ft_rtc_release( struct device *dev){ ++ ++ struct ft_rtc *rtc = dev_get_drvdata( dev); ++ ++ RTC_CR &= ~RTC_CR_IES; ++ RTC_CR &= ~RTC_CR_ALRM; ++} ++ ++/* rick add */ ++static int ft_alarm_irq_enable(struct device *dev, unsigned int enabled) ++{ ++// printk("\n\nft_alarm_irq_enable\n\n"); ++ ++ struct ft_rtc *rtc = dev_get_drvdata( dev); ++ spin_lock_irq(&rtc->lock); ++ if (enabled) ++ RTC_CR |= RTC_CR_ALRM; ++ else ++ RTC_CR &= ~RTC_CR_ALRM; ++ spin_unlock_irq(&rtc->lock); ++ return 0; ++} ++ ++static int ft_rtc_read_time( struct device *dev, struct rtc_time *tm){ ++ struct ft_rtc *rtc = dev_get_drvdata( dev); ++ unsigned long time = RTC_DAYS * 86400 + RTC_HOUR * 3600 + RTC_MINUTE * 60 + RTC_SECOND; ++ rtc_time_to_tm( time, tm); ++ if( rtc_valid_tm( tm) < 0) { ++ dev_err( dev, "invalid date\n"); ++ rtc_time_to_tm( 0, tm); ++ } ++ return 0; ++} ++ ++static int ft_rtc_set_time( struct device *dev, struct rtc_time *tm){ ++ ++ struct ft_rtc *rtc = dev_get_drvdata( dev); ++ unsigned long time = 0; ++ ++ rtc_tm_to_time( tm, &time); ++ ++ RTC_WRITE_DAYS = time / 86400; ++ time %= 86400; ++ ++ RTC_WRITE_HOUR = time / 3600; ++ time %= 3600; ++ ++ RTC_WRITE_MINUTE = time / 60; ++ time %= 60; ++ ++ RTC_WRITE_SECOND = time; ++ ++ RTC_CR |= RTC_CR_LOAD; ++ ++ return 0; ++} ++ ++static int ft_rtc_read_alarm( struct device *dev, struct rtc_wkalrm *wkalrm){ ++ ++ struct ft_rtc *rtc = dev_get_drvdata( dev); ++ struct rtc_time *tm = &wkalrm->time; ++ ++ tm->tm_sec = RTC_ALRM_SECOND; ++ tm->tm_min = RTC_ALRM_MINUTE; ++ tm->tm_hour = RTC_ALRM_HOUR; ++ ++ wkalrm->enabled = ( RTC_CR & RTC_CR_ALRM) ? 1 : 0; ++ ++ return 0; ++} ++ ++static int ft_rtc_set_alarm( struct device *dev, struct rtc_wkalrm *wkalrm){ ++ ++ struct ft_rtc *rtc = dev_get_drvdata( dev); ++ struct rtc_time *tm = &wkalrm->time; ++ int err = rtc_valid_tm( tm); ++ if( err < 0){ ++ ++ dev_err( dev, "invalid alarm value\n"); ++ return err; ++ } ++ ++ /* disable alarm interrupt and clear the alarm flag */ ++ RTC_CR &= ~RTC_CR_ALRM; ++ ++ /* set alarm time */ ++ RTC_ALRM_SECOND = tm->tm_sec; ++ RTC_ALRM_MINUTE = tm->tm_min; ++ RTC_ALRM_HOUR = tm->tm_hour; ++ ++ if( wkalrm->enabled) ++ RTC_CR |= RTC_CR_ALRM; ++ ++ return 0; ++} ++ ++/* ++static int ft_rtc_irq_set_state( struct device *dev, int enabled){ ++ struct ft_rtc *rtc = dev_get_drvdata( dev); ++ if( enabled) ++ RTC_CR |= RTC_CR_IES; ++ else ++ RTC_CR &= ~RTC_CR_IES; ++ ++ return 0; ++} ++ ++*/ ++ ++static struct rtc_class_ops ft_rtc_ops = { ++ ++ .release = ft_rtc_release, ++ .alarm_irq_enable = ft_alarm_irq_enable, ++ ++ .read_time = ft_rtc_read_time, ++ .set_time = ft_rtc_set_time, ++ .read_alarm = ft_rtc_read_alarm, ++ .set_alarm = ft_rtc_set_alarm, ++}; ++ ++static int __devinit ft_rtc_probe( struct platform_device *pdev){ ++ ++ struct ft_rtc *rtc = &rtc_platform_data; ++ int ret = -ENOENT; ++ spin_lock_init(&rtc->lock); ++ ++ if( ( rtc->alarm_irq = platform_get_irq( pdev, 0)) < 0) ++ goto err_exit; ++ ++ if( ( rtc->interrupt_irq = platform_get_irq( pdev, 1)) < 0) ++ goto err_exit; ++ ++ if( !( rtc->res = platform_get_resource( pdev, IORESOURCE_MEM, 0))) ++ goto err_exit; ++ ++ if( ( ret = request_irq( rtc->alarm_irq , ft_rtc_alarm, 0, "RTC Alarm : ftrtc010", rtc))) ++ goto err_exit; ++ ++ if( ( ret = request_irq( rtc->interrupt_irq , ft_rtc_interrupt, 0, "RTC Interrupt : ftrtc010", rtc))) ++ goto err_interrupt_irq; ++ ++ ret = -EBUSY; ++ ++ if( !( rtc->res = request_mem_region( rtc->res->start, rtc->res->end - rtc->res->start + 1, pdev->name))) ++ goto err_request_region; ++ ++ ret = -EINVAL; ++ ++ if( !( rtc->regbase = ioremap_nocache( rtc->res->start, rtc->res->end - rtc->res->start + 1))) ++ goto err_ioremap1; ++ ++ ++ //RTC_DIV = ( 0x1UL << 31) | 2; /* AG101 */ ++ RTC_CR |= RTC_CR_IE; ++ platform_set_drvdata( pdev, rtc); ++ device_init_wakeup(&pdev->dev, true); ++ ++ rtc->rtc_dev = rtc_device_register( DRV_NAME, &pdev->dev, &ft_rtc_ops, THIS_MODULE); ++ ++ if( IS_ERR( rtc->rtc_dev)) { ++ ++ ret = PTR_ERR( rtc->rtc_dev); ++ goto err_unmap; ++ } ++ ++ rtc->rtc_dev->max_user_freq = 256; ++ rtc->rtc_dev->irq_freq = 1; ++ return 0; ++ ++err_unmap: ++ iounmap( rtc->regbase); ++err_ioremap1: ++ release_resource( rtc->res); ++err_request_region: ++ free_irq( rtc->interrupt_irq, rtc); ++err_interrupt_irq: ++ free_irq( rtc->alarm_irq, rtc); ++err_exit: ++ return ret; ++} ++ ++static int __devexit ft_rtc_remove( struct platform_device *pdev){ ++ ++ struct ft_rtc *rtc = platform_get_drvdata( pdev); ++ ++ rtc_device_unregister( rtc->rtc_dev); ++ ++ RTC_CR &= ~RTC_CR_ALRM; ++ ++ free_irq( rtc->alarm_irq, rtc); ++ free_irq( rtc->interrupt_irq, rtc); ++ ++ iounmap( rtc->regbase); ++ ++ platform_set_drvdata( pdev, NULL); ++ ++ return 0; ++} ++ ++static void platform_device_release(struct device *dev){ ++} ++ ++static struct resource rtc_resources[] = { ++ { ++ .start = RTC_FTRTC010_PA_BASE, ++ .end = RTC_FTRTC010_PA_LIMIT, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = RTC_FTRTC010_IRQ0, ++ .end = RTC_FTRTC010_IRQ0, ++ .flags = IORESOURCE_IRQ, ++ }, ++ { ++ .start = RTC_FTRTC010_IRQ1, ++ .end = RTC_FTRTC010_IRQ1, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct platform_driver ft_rtc_platform_driver = { ++ ++ .driver = { ++ .name = DRV_NAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = ft_rtc_probe, ++ .remove = __devexit_p( ft_rtc_remove), ++}; ++ ++static struct platform_device rtc_device = { ++ ++ .name = DRV_NAME, ++ .id = 0, ++ .resource = rtc_resources, ++ .num_resources = ARRAY_SIZE( rtc_resources), ++ .dev = { ++ .platform_data = &rtc_platform_data, ++ .release = platform_device_release, ++ }, ++}; ++ ++static int __init ft_rtc_init( void){ ++ platform_device_register( &rtc_device); ++ return platform_driver_register( &ft_rtc_platform_driver); ++} ++ ++static void __exit ft_rtc_exit( void){ ++ ++ platform_driver_unregister( &ft_rtc_platform_driver); ++ platform_device_unregister( &rtc_device); ++} ++ ++module_init( ft_rtc_init); ++module_exit( ft_rtc_exit); ++ ++MODULE_DESCRIPTION( "SuperH on-chip RTC driver"); ++MODULE_AUTHOR( "Paul Mundt , " ++ "Jamie Lenehan , " ++ "Angelo Castello , " ++ "Roy Lee "); ++MODULE_LICENSE( "GPL"); ++MODULE_ALIAS( "platform:" DRV_NAME); +diff -Nur linux-3.4.110.orig/drivers/tty/serial/8250/8250.c linux-3.4.110/drivers/tty/serial/8250/8250.c +--- linux-3.4.110.orig/drivers/tty/serial/8250/8250.c 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/tty/serial/8250/8250.c 2016-04-07 10:20:51.054085357 +0200 +@@ -570,6 +570,7 @@ + serial_out(up, UART_ICR, value); + } + ++#ifndef CONFIG_NDS32 + static unsigned int serial_icr_read(struct uart_8250_port *up, int offset) + { + unsigned int value; +@@ -581,6 +582,7 @@ + + return value; + } ++#endif + + /* + * FIFO support. +@@ -722,6 +724,7 @@ + return count; + } + ++#ifndef CONFIG_NDS32 + /* + * Read UART ID using the divisor method - set DLL and DLM to zero + * and the revision will be in DLL and device type in DLM. We +@@ -749,7 +752,9 @@ + + return id; + } ++#endif + ++#ifndef CONFIG_NDS32 + /* + * This is a helper routine to autodetect StarTech/Exar/Oxsemi UART's. + * When this function is called we know it is at least a StarTech +@@ -842,6 +847,7 @@ + else + up->port.type = PORT_16650V2; + } ++#endif + + /* + * We detected a chip without a FIFO. Only two fall into +@@ -865,6 +871,7 @@ + up->port.type = PORT_16450; + } + ++#ifndef CONFIG_NDS32 + static int broken_efr(struct uart_8250_port *up) + { + /* +@@ -877,6 +884,7 @@ + + return 0; + } ++#endif + + static inline int ns16550a_goto_highspeed(struct uart_8250_port *up) + { +@@ -908,7 +916,7 @@ + + up->port.type = PORT_16550A; + up->capabilities |= UART_CAP_FIFO; +- ++#ifndef CONFIG_NDS32 + /* + * Check for presence of the EFR when DLAB is set. + * Only ST16C650V1 UARTs pass this test. +@@ -937,6 +945,7 @@ + autoconfig_has_efr(up); + return; + } ++#endif + + /* + * Check for a National Semiconductor SuperIO chip. +@@ -1156,10 +1165,11 @@ + * We also initialise the EFR (if any) to zero for later. The + * EFR occupies the same register location as the FCR and IIR. + */ ++#ifndef CONFIG_NDS32 + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); + serial_out(up, UART_EFR, 0); + serial_out(up, UART_LCR, 0); +- ++#endif + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); + scratch = serial_in(up, UART_IIR) >> 6; + +diff -Nur linux-3.4.110.orig/drivers/video/FTLCDC100/faradayfb.h linux-3.4.110/drivers/video/FTLCDC100/faradayfb.h +--- linux-3.4.110.orig/drivers/video/FTLCDC100/faradayfb.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/video/FTLCDC100/faradayfb.h 2016-04-07 10:20:51.058085512 +0200 +@@ -0,0 +1,215 @@ ++#ifndef __FARADAYFB_H ++#define __FARADAYFB_H ++ ++#define FARADAYFB_MODULE_NAME "faradayfb" ++#define DEBUG(enabled, tagged, ...) \ ++ do { \ ++ if (enabled) { \ ++ if (tagged) \ ++ printk("[ %30s() ] ", __func__); \ ++ printk(__VA_ARGS__); \ ++ } \ ++ } while (0) ++ ++#if defined(CONFIG_FFB_MODE_RGB) ++# define DEFAULT_COLOR FFB_MODE_RGB ++#elif defined(CONFIG_FFB_MODE_YUV422) ++# define DEFAULT_COLOR FFB_MODE_YUV422 ++#elif defined(CONFIG_FFB_MODE_YUV420) ++# define DEFAULT_COLOR FFB_MODE_YUV420 ++#endif ++ ++#if defined(CONFIG_FFB_MODE_8BPP) ++# define DEFAULT_BPP FFB_MODE_8BPP ++#elif defined(CONFIG_FFB_MODE_16BPP) ++# define DEFAULT_BPP FFB_MODE_16BPP ++#elif defined(CONFIG_FFB_MODE_24BPP) ++# define DEFAULT_BPP FFB_MODE_24BPP ++#endif ++ ++#define FFB_DEFAULT_MODE (DEFAULT_COLOR | DEFAULT_BPP) ++ ++struct lcd_param { ++ ++ unsigned long value; ++ unsigned long flags; ++}; ++ ++inline struct lcd_param* get_lcd_time(struct lcd_param* array, unsigned long num_array, unsigned long type) ++{ ++ int i; ++ ++ for (i = 0; i < num_array; i++) { ++ ++ struct lcd_param *r = &array[i]; ++ ++ if (r->flags & type & 0xff) ++ return r; ++ } ++ ++ return NULL; ++} ++ ++inline struct lcd_param* get_lcd_ctrl(struct lcd_param* array, unsigned long num_array, unsigned long type) ++{ ++ int i; ++ ++ for (i = 0; i < num_array; i++) { ++ ++ struct lcd_param *r = &array[i]; ++ ++ if ((r->flags & type & 0xff) && (r->flags & type & 0xff00)) ++ return r; ++ } ++ ++ return NULL; ++} ++ ++enum { RGB_8, RGB_16, RGB_24, NR_RGB}; ++ ++struct faradayfb_rgb { ++ ++ struct fb_bitfield red; ++ struct fb_bitfield green; ++ struct fb_bitfield blue; ++ struct fb_bitfield transp; ++}; ++ ++/* This structure describes the machine which we are running on. */ ++struct faradayfb_mach_info { ++ ++ unsigned long num_time0; ++ struct lcd_param * time0; ++ ++ unsigned long num_time1; ++ struct lcd_param * time1; ++ ++ unsigned long num_time2; ++ struct lcd_param * time2; ++ ++ unsigned long num_control; ++ struct lcd_param * control; ++ ++ unsigned long pixclock; ++ ++ unsigned long xres; ++ unsigned long yres; ++ ++ unsigned int max_bpp; ++ unsigned int sync; ++ ++ unsigned int cmap_greyscale:1, ++ cmap_inverse:1, ++ cmap_static:1, ++ unused:29; ++}; ++ ++struct faradayfb_info { ++ ++ struct fb_info *info; ++ struct faradayfb_rgb *rgb[NR_RGB]; ++ ++ unsigned int xres; ++ unsigned int yres; ++ unsigned int max_bpp; ++ ++ /* ++ * These are the addresses we mapped ++ * the framebuffer memory region to. ++ */ ++ dma_addr_t map_dma; ++ unsigned char * map_cpu; ++ unsigned int map_size; ++ ++ unsigned char * screen_cpu; ++ dma_addr_t screen_dma; ++ u32 * palette_cpu; ++ dma_addr_t palette_dma; ++ unsigned int palette_size; ++ ++ unsigned int cmap_inverse:1, ++ cmap_static:1, ++ unused:30; ++ ++ unsigned long time0; ++ unsigned long time1; ++ unsigned long time2; ++ unsigned long control; ++ unsigned long int_mask; ++ unsigned long io_base; ++ ++ unsigned int state; ++ unsigned int task_state; ++ struct semaphore ctrlr_sem; ++ wait_queue_head_t ctrlr_wait; ++ struct work_struct task; ++ ++ unsigned long smode; ++ unsigned long frame420_size; ++}; ++ ++/* ++ * These are the actions for set_ctrlr_state ++ */ ++#define C_DISABLE 0 ++#define C_ENABLE 1 ++#define C_DISABLE_CLKCHANGE 2 ++#define C_ENABLE_CLKCHANGE 3 ++#define C_REENABLE 4 ++#define C_DISABLE_PM 5 ++#define C_ENABLE_PM 6 ++#define C_STARTUP 7 ++ ++#define FARADAY_LCDTIME0_GET_HBP(x) ((((x) >> 24) & 0xFF) + 1) ++#define FARADAY_LCDTIME0_GET_HFP(x) ((((x) >> 16) & 0xFF) + 1) ++#define FARADAY_LCDTIME0_GET_HW(x) ((((x) >> 8) & 0xFF) + 1) ++#define FARADAY_LCDTIME1_GET_VBP(x) ((((x) >> 24) & 0xFF) ) ++#define FARADAY_LCDTIME1_GET_VFP(x) ((((x) >> 16) & 0xFF) ) ++#define FARADAY_LCDTIME1_GET_VW(x) ((((x) >> 8) & 0xFF) + 1) ++ ++#define FARADAY_LCDTIME0_HBP(x) ((((x) - 1) & 0xFF) << 24) ++#define FARADAY_LCDTIME0_HFP(x) ((((x) - 1) & 0xFF) << 16) ++#define FARADAY_LCDTIME0_HW(x) ((((x) - 1) & 0xFF) << 8) ++#define FARADAY_LCDTIME0_PL(x) (((((x) >> 4) - 1) & 0x3F) << 2) ++#define FARADAY_LCDTIME1_VBP(x) ((((x) ) & 0xFF) << 24) ++#define FARADAY_LCDTIME1_VFP(x) ((((x) ) & 0xFF) << 16) ++#define FARADAY_LCDTIME1_VW(x) ((((x) - 1) & 0xFF) << 8) ++#define FARADAY_LCDTIME1_LF(x) ((((x) - 1) & 0x3FF) ) ++ ++#define FFB_MODE_RGB 0x00000001 ++#define FFB_MODE_YUV420 0x00000002 ++#define FFB_MODE_YUV422 0x00000004 ++ ++#define FFB_MODE_8BPP 0x00000100 ++#define FFB_MODE_16BPP 0x00000200 ++#define FFB_MODE_24BPP 0x00000400 ++ ++/* Minimum X and Y resolutions */ ++#define MIN_XRES 64 ++#define MIN_YRES 64 ++ ++typedef struct LCDTag { ++ ++ u32 Timing0; /* 0x00 */ ++ u32 Timing1; /* 0x04 */ ++ u32 Timing2; /* 0x08 */ ++ u32 Timing3; /* 0x0C */ ++ u32 UPBase; /* 0x10 */ ++ u32 LPBase; /* 0x14 */ ++ u32 INTREnable; /* 0x18 */ ++ u32 Control; /* 0x1C */ ++ u32 Status; /* 0x20 */ ++ u32 Interrupt; /* 0x24 */ ++ u32 UPCurr; /* 0x28 */ ++ u32 LPCurr; /* 0x2C */ ++ u32 Reserved1[5]; /* 0x30~0x40 */ ++ u32 GPIO; /* 0x44 */ ++ u32 Reserved2[0x74 - 6]; /* 0x48~0x1FC */ ++ ++ u32 Palette[0x80]; /* 0x200~0x3FC */ ++ u32 TestReg[0x100]; /* 0x400~0x7FC */ ++ ++} LCD_Register; ++ ++#endif /* __FARADAYFB_H */ ++ +diff -Nur linux-3.4.110.orig/drivers/video/FTLCDC100/faradayfb-main.c linux-3.4.110/drivers/video/FTLCDC100/faradayfb-main.c +--- linux-3.4.110.orig/drivers/video/FTLCDC100/faradayfb-main.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/video/FTLCDC100/faradayfb-main.c 2016-04-07 10:20:51.058085512 +0200 +@@ -0,0 +1,911 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include "faradayfb.h" ++#include "lcd-info.c" ++#include "pingpong-module.c" ++ ++static u32 faradayfb_pseudo_palette[32]; ++static inline void faradayfb_lcd_power(struct fb_info *info, int on) ++{ ++ struct faradayfb_info *fbi = info->par; ++ volatile LCD_Register *plcd = (LCD_Register *)fbi->io_base; ++ ++ if (on) ++ fbi->control |= (1UL << 11); ++ else ++ fbi->control &= ~(1UL << 11); ++ ++ plcd->Control = fbi->control; ++} ++ ++static void faradayfb_setup_gpio(struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ volatile LCD_Register *plcd = (LCD_Register *)fbi->io_base; ++ ++ plcd->GPIO = 0x010000; ++} ++ ++static inline void faradayfb_pmu_on(void) ++{ ++ REG32(PMU_FTPMU010_VA_BASE + 0x44) |= 0x00700000; ++} ++ ++static inline void faradayfb_pmu_off(void) ++{ ++ REG32(PMU_FTPMU010_VA_BASE + 0x44) &= ~0x00700000; ++} ++ ++static void faradayfb_enable_controller(struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ volatile LCD_Register *plcd = (LCD_Register *)fbi->io_base; ++ ++ plcd->Timing0 = fbi->time0; ++ plcd->Timing1 = fbi->time1; ++ plcd->Timing2 = fbi->time2; ++ plcd->Control = fbi->control & ~0x01; ++ ++ plcd->UPBase = fbi->screen_dma | fbi->frame420_size; ++ ++ faradayfb_pmu_on(); ++ plcd->Control |= 0x01; ++ ++ DEBUG(0, 1, "Time0 = 0x%08x\n", plcd->Timing0); ++ DEBUG(0, 1, "Time1 = 0x%08x\n", plcd->Timing1); ++ DEBUG(0, 1, "Time2 = 0x%08x\n", plcd->Timing2); ++ DEBUG(0, 1, "Control = 0x%08x\n", plcd->Control); ++ DEBUG(0, 1, "UPBase = 0x%08x\n", plcd->UPBase); ++} ++ ++static void faradayfb_disable_controller(struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ volatile LCD_Register *plcd = (LCD_Register *)fbi->io_base; ++ DECLARE_WAITQUEUE(wait, current); ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ add_wait_queue(&fbi->ctrlr_wait, &wait); ++ ++ fbi->control &= ~0x0001; ++ plcd->Control = fbi->control; ++ faradayfb_pmu_off(); ++ ++ schedule_timeout(20 * HZ / 1000); ++ remove_wait_queue(&fbi->ctrlr_wait, &wait); ++} ++ ++static void faradayfb_enable_int(struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ volatile LCD_Register *plcd = (LCD_Register *)fbi->io_base; ++ ++ plcd->INTREnable = fbi->int_mask; ++} ++ ++static void faradayfb_disable_int(struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ volatile LCD_Register *plcd = (LCD_Register *)fbi->io_base; ++ ++ fbi->int_mask = plcd->INTREnable; ++ plcd->INTREnable = 0; ++ plcd->Status = 0x1e; ++} ++ ++static struct faradayfb_rgb def_rgb_8 = { ++ ++ .red = { .offset = 0, .length = 4 }, ++ .green = { .offset = 0, .length = 4 }, ++ .blue = { .offset = 0, .length = 4 }, ++ .transp = { .offset = 0, .length = 0 }, ++}; ++ ++static struct faradayfb_rgb def_rgb_16 = { ++ ++ .red = { .offset = 11, .length = 5, .msb_right = 0 }, ++ .green = { .offset = 5, .length = 6, .msb_right = 0 }, ++ .blue = { .offset = 0, .length = 5, .msb_right = 0 }, ++ .transp = { .offset = 15, .length = 0, .msb_right = 0 }, ++}; ++ ++static struct faradayfb_rgb def_rgb_24 = { ++ ++ .red = { .offset = 16, .length = 8, .msb_right = 0 }, ++ .green = { .offset = 8, .length = 8, .msb_right = 0 }, ++ .blue = { .offset = 0, .length = 8, .msb_right = 0 }, ++ .transp = { .offset = 0, .length = 0, .msb_right = 0 }, ++}; ++ ++static inline void faradayfb_schedule_work(struct fb_info *info, unsigned int state) ++{ ++ struct faradayfb_info *fbi = info->par; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ ++ /* ++ * We need to handle two requests being made at the same time. ++ * There are two important cases: ++ * 1. When we are changing VT (C_REENABLE) while unblanking (C_ENABLE) ++ * We must perform the unblanking, which will do our REENABLE for us. ++ * 2. When we are blanking, but immediately unblank before we have ++ * blanked. We do the "REENABLE" thing here as well, just to be sure. ++ */ ++ if (fbi->task_state == C_ENABLE && state == C_REENABLE) ++ state = (unsigned int) - 1; ++ ++ if (fbi->task_state == C_DISABLE && state == C_ENABLE) ++ state = C_REENABLE; ++ ++ if (state != (unsigned int) - 1) { ++ ++ fbi->task_state = state; ++ schedule_work(&fbi->task); ++ } ++ ++ local_irq_restore(flags); ++} ++ ++static int faradayfb_setpalettereg(unsigned int regno, unsigned int red, ++ unsigned int green, unsigned int blue, unsigned int trans, ++ struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ ++ if (regno < fbi->palette_size) { ++ ++ fbi->palette_cpu[regno] = ((red >> 0) & (0x1fUL << 11)) | ++ ((green >> 5) & (0x3F << 5)) | ++ ((blue >> 11) & (0x1f << 0)); ++ ++ return 0; ++ } ++ ++ return 1; ++} ++ ++static int faradayfb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, ++ unsigned int blue, unsigned int trans, struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ int ret = 1; ++ /* ++ * If inverse mode was selected, invert all the colours ++ * rather than the register number. The register number ++ * is what you poke into the framebuffer to produce the ++ * colour you requested. ++ */ ++ if (fbi->cmap_inverse) { ++ ++ red = 0xffff - red; ++ green = 0xffff - green; ++ blue = 0xffff - blue; ++ } ++ ++ /* ++ * If greyscale is true, then we convert the RGB value ++ * to greyscale no mater what visual we are using. ++ */ ++ if (info->var.grayscale) ++ red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; ++ ++ switch(info->fix.visual) { ++ ++ case FB_VISUAL_TRUECOLOR: ++ /* ++ * 12 or 16-bit True Colour. We encode the RGB value ++ * according to the RGB bitfield information. ++ */ ++ if (regno < 16) { ++ ++ u32 col; ++ red >>= (16 - info->var.red.length); ++ green >>= (16 - info->var.green.length); ++ blue >>= (16 - info->var.blue.length); ++ col = (red << info->var.red.offset) | ++ (green << info->var.green.offset) | ++ (blue << info->var.blue.offset) ; ++ ++ switch(info->var.bits_per_pixel) { ++ ++ /* is the following code correct?? */ ++ case 16: ++ case 32: ++ ((u32 *)(info->pseudo_palette))[regno] = col; ++ ret=0; ++ break; ++ } ++ } ++ break; ++ ++ case FB_VISUAL_STATIC_PSEUDOCOLOR: ++ case FB_VISUAL_PSEUDOCOLOR: ++ if (fbi->smode == FFB_MODE_RGB) ++ ret = faradayfb_setpalettereg(regno, red, green, blue, trans, info); ++ break; ++ } ++ ++ return ret; ++} ++ ++/* ++ * Round up in the following order: bits_per_pixel, xres, ++ * yres, xres_virtual, yres_virtual, xoffset, yoffset, grayscale, ++ * bitfields, horizontal timing, vertical timing. ++ */ ++static int faradayfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ int rgbidx; ++ ++ var->xres = min(var->xres, (unsigned int)MIN_XRES); ++ var->yres = min(var->yres, (unsigned int)MIN_YRES); ++ var->xres = max(var->xres, fbi->xres); ++ var->yres = max(var->yres, fbi->yres); ++ var->xres_virtual = max(var->xres_virtual, var->xres); ++ var->yres_virtual = max(var->yres_virtual, var->yres); ++ ++ DEBUG(0, 1, "var->bits_per_pixel = %d\n", var->bits_per_pixel); ++ ++ switch (var->bits_per_pixel) { ++ ++ case 1: case 2: case 4: case 8: ++ rgbidx = RGB_8; ++ break; ++ ++ case 16: ++ rgbidx = RGB_16; ++ break; ++ ++ case 32: ++ rgbidx = RGB_24; ++ break; ++ ++ default: ++ return -EINVAL; ++ } ++ ++ /* ++ * Copy the RGB parameters for this display ++ * from the machine specific parameters. ++ */ ++ var->red = fbi->rgb[ rgbidx]->red; ++ var->green = fbi->rgb[ rgbidx]->green; ++ var->blue = fbi->rgb[ rgbidx]->blue; ++ var->transp = fbi->rgb[ rgbidx]->transp; ++ ++ DEBUG(0, 1, "RGBT length = %d:%d:%d:%d\n", ++ var->red.length, var->green.length, var->blue.length, var->transp.length); ++ ++ DEBUG(0, 1, "RGBT offset = %d:%d:%d:%d\n", ++ var->red.offset, var->green.offset, var->blue.offset, var->transp.offset); ++ ++ DEBUG(0, 1, "Leave\n"); ++ ++ return 0; ++} ++ ++/* ++ * Configures LCD Controller based on entries in var parameter. Settings are ++ * only written to the controller if changes were made. ++ */ ++static int faradayfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ volatile LCD_Register *plcd = (LCD_Register *)fbi->io_base; ++ ++ DEBUG(0, 1, "var: xres=%d hslen=%d lm=%d rm=%d\n", var->xres, var->hsync_len, var->left_margin, var->right_margin); ++ DEBUG(0, 1, "var: yres=%d vslen=%d um=%d bm=%d\n", var->yres, var->vsync_len, var->upper_margin, var->lower_margin); ++ ++ fbi->time0 = FARADAY_LCDTIME0_HFP(var->left_margin) ++ | FARADAY_LCDTIME0_HBP(var->right_margin) ++ | FARADAY_LCDTIME0_HW(var->hsync_len) ++ | FARADAY_LCDTIME0_PL(var->xres); ++ ++ fbi->time1 = FARADAY_LCDTIME1_VBP(var->upper_margin) ++ | FARADAY_LCDTIME1_VFP(var->lower_margin) ++ | FARADAY_LCDTIME1_VW(var->vsync_len) ++ | FARADAY_LCDTIME1_LF(var->yres); ++ ++ if ((plcd->Timing0 != fbi->time0) ++ || (plcd->Timing1 != fbi->time1) ++ || (plcd->Timing2 != fbi->time2) ++ || (plcd->Control != fbi->control)) { ++ ++ faradayfb_schedule_work(info, C_REENABLE); ++ } ++ ++ return 0; ++} ++ ++/* Set the user defined part of the display for the specified console */ ++static int faradayfb_set_par(struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ struct fb_var_screeninfo *var = &info->var; ++ unsigned long palette_mem_size; ++ ++ if (var->bits_per_pixel == 16 || var->bits_per_pixel == 32) ++ info->fix.visual = FB_VISUAL_TRUECOLOR; ++ else if (!fbi->cmap_static) ++ info->fix.visual = FB_VISUAL_PSEUDOCOLOR; ++ else { ++ /* ++ * Some people have weird ideas about wanting static ++ * pseudocolor maps. I suspect their user space ++ * applications are broken. ++ */ ++ info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR; ++ } ++ ++ info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; ++ ++ fbi->palette_size = var->bits_per_pixel == 8 ? 256 : 16; ++ ++ palette_mem_size = fbi->palette_size * sizeof(u32); ++ ++ DEBUG(0, 1, "info->fix.line_length = %d\n", info->fix.line_length); ++ DEBUG(0, 1, "palette_mem_size = 0x%08lx\n", (unsigned long) palette_mem_size); ++ ++ fbi->palette_cpu = (u32 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); ++ ++ fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size; ++ ++ /* Set (any) board control register to handle new color depth */ ++ faradayfb_activate_var(var, info); ++ ++ DEBUG(0, 1, "Leave\n"); ++ ++ return 0; ++} ++ ++static irqreturn_t faradayfb_handle_irq(int irq, void *dev_id) ++{ ++#if 0 ++ struct fb_info *info = (struct fb_info*)dev_id; ++ struct faradayfb_info *fbi = info->par; ++ volatile LCD_Register *plcd = (LCD_Register *)fbi->io_base; ++ u32 status = plcd->Interrupt; ++#endif ++ return IRQ_HANDLED; ++} ++ ++/* ++ * This function must be called from task context only, since it will ++ * sleep when disabling the LCD controller, or if we get two contending ++ * processes trying to alter state. ++ */ ++static void set_ctrlr_state(struct fb_info *info, unsigned int state) ++{ ++ struct faradayfb_info *fbi = info->par; ++ unsigned int old_state; ++ ++ down(&fbi->ctrlr_sem); ++ ++ old_state = fbi->state; ++ ++ /* Hack around fbcon initialisation. */ ++ if (old_state == C_STARTUP && state == C_REENABLE) ++ state = C_ENABLE; ++ ++ switch (state) { ++ case C_DISABLE_PM: ++ case C_DISABLE: ++ ++ /* Disable controller */ ++ if (old_state != C_DISABLE) { ++ ++ fbi->state = state; ++ faradayfb_disable_int(info); ++ faradayfb_lcd_power(info, 0); ++ // faradayfb_disable_controller(info); ++ } ++ break; ++ ++ case C_REENABLE: ++ /* ++ * Re-enable the controller only if it was already ++ * enabled. This is so we reprogram the control ++ * registers. ++ */ ++ if (old_state == C_ENABLE) { ++ ++ faradayfb_disable_int(info); ++ faradayfb_lcd_power(info, 0); ++ faradayfb_disable_controller(info); ++ ++ faradayfb_setup_gpio(info); ++ faradayfb_lcd_power(info, 1); ++ faradayfb_enable_controller(info); ++ faradayfb_enable_int(info); ++ } ++ break; ++ ++ case C_ENABLE_PM: ++ /* ++ * Re-enable the controller after PM. This is not ++ * perfect - think about the case where we were doing ++ * a clock change, and we suspended half-way through. ++ */ ++ if (old_state != C_DISABLE_PM) ++ break; ++ /* fall through */ ++ ++ case C_ENABLE: ++ /* ++ * Power up the LCD screen, enable controller, and ++ * turn on the backlight. ++ */ ++ if (old_state != C_ENABLE) { ++ ++ fbi->state = C_ENABLE; ++ faradayfb_setup_gpio(info); ++ faradayfb_lcd_power(info, 1); ++ faradayfb_enable_controller(info); ++ faradayfb_enable_int(info); ++ } ++ break; ++ } ++ up(&fbi->ctrlr_sem); ++} ++ ++/* ++ * Blank the display by setting all palette values to zero. Note, the ++ * 12 and 16 bpp modes don't really use the palette, so this will not ++ * blank the display in all modes. ++ */ ++static int faradayfb_blank(int blank, struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ int i; ++ ++ switch (blank) { ++ case FB_BLANK_POWERDOWN: ++ case FB_BLANK_VSYNC_SUSPEND: ++ case FB_BLANK_HSYNC_SUSPEND: ++ case FB_BLANK_NORMAL: ++ ++ if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR || ++ info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) { ++ ++ for (i = 0; i < fbi->palette_size; i++) ++ faradayfb_setpalettereg(i, 0, 0, 0, 0, info); ++ } ++ ++ faradayfb_schedule_work(info, C_DISABLE); ++ ++ break; ++ ++ case FB_BLANK_UNBLANK: ++ ++ if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR || ++ info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) ++ fb_set_cmap(&info->cmap, info); ++ ++ faradayfb_schedule_work(info, C_ENABLE); ++ ++ break; ++ } ++ ++ return 0; ++} ++ ++/* ++ * Our LCD controller task (which is called when we blank or unblank) ++ * via keventd. ++ */ ++static void faradayfb_task(struct work_struct *dummy) ++{ ++ struct faradayfb_info *fbi = container_of(dummy, struct faradayfb_info, task); ++ struct fb_info *info = fbi->info; ++ unsigned int state; ++ ++ state = xchg(&fbi->task_state, -1); ++ set_ctrlr_state(info, state); ++} ++ ++/* Fake monspecs to fill in fbinfo structure */ ++static struct fb_monspecs monspecs = { ++ ++ .hfmin = 30000, ++ .hfmax = 70000, ++ .vfmin = 50, ++ .vfmax = 65, ++}; ++ ++static int ffb_get_mach_param(struct faradayfb_mach_info *inf, struct fb_info *info, unsigned long smode) ++{ ++ struct faradayfb_info *fbi = info->par; ++ struct lcd_param *tparam; ++ unsigned long bpp_mode; ++ ++ if (!(tparam = get_lcd_time(inf->time0, inf->num_time0, smode))) ++ return -1; ++ ++ fbi->time0 = tparam->value; ++ ++ if (!(tparam = get_lcd_time(inf->time1, inf->num_time1, smode))) ++ return -1; ++ ++ fbi->time1 = tparam->value; ++ ++ if (!(tparam = get_lcd_time(inf->time2, inf->num_time2, smode))) ++ return -1; ++ ++ fbi->time2 = tparam->value; ++ ++ switch(info->var.bits_per_pixel) { ++ ++ case 1: case 2: case 4: case 8: ++ bpp_mode = FFB_MODE_8BPP; ++ break; ++ ++ case 16: ++ bpp_mode = FFB_MODE_16BPP; ++ break; ++ ++ case 32: ++ bpp_mode = FFB_MODE_24BPP; ++ break; ++ ++ default: ++ DEBUG(1, 1, "Unsupported BPP, set default BPP to 16\n"); ++ bpp_mode = FFB_MODE_16BPP; ++ break; ++ } ++ ++ if (!(tparam = get_lcd_ctrl(inf->control, inf->num_control, smode | bpp_mode))) ++ return -1; ++ ++ fbi->control = tparam->value; ++ ++ fbi->xres = inf->xres; ++ fbi->yres = inf->yres; ++ ++ return 0; ++} ++ ++static int faradayfb_set_var(struct fb_info *info, struct faradayfb_mach_info *inf, unsigned long type) ++{ ++ struct faradayfb_info *fbi = info->par; ++ unsigned long t_smode = 0; ++ ++ if (!type) ++ t_smode = fbi->smode; ++ else ++ t_smode = type; ++ ++ if (ffb_get_mach_param(inf, info, t_smode)) { ++ ++ DEBUG(1, 1, "Not support mode(%lx)\n", t_smode); ++ return -1; ++ } ++ ++ info->var.xres = fbi->xres; ++ info->var.xres_virtual = fbi->xres; ++ info->var.yres = fbi->yres; ++ info->var.yres_virtual = fbi->yres; ++ ++ info->var.upper_margin = FARADAY_LCDTIME1_GET_VBP(fbi->time1); ++ info->var.lower_margin = FARADAY_LCDTIME1_GET_VFP(fbi->time1); ++ info->var.vsync_len = FARADAY_LCDTIME1_GET_VW(fbi->time1); ++ info->var.left_margin = FARADAY_LCDTIME0_GET_HFP(fbi->time0); ++ info->var.right_margin = FARADAY_LCDTIME0_GET_HBP(fbi->time0); ++ info->var.hsync_len = FARADAY_LCDTIME0_GET_HW(fbi->time0); ++ ++ fbi->int_mask = 0; ++ ++ if (t_smode & FFB_MODE_YUV420) ++ fbi->frame420_size = (((info->var.xres * info->var.yres + 0xffff) & 0xffff0000) >> 16) << 2; ++ else ++ fbi->frame420_size = 0; ++ ++ return 0; ++} ++ ++static struct fb_ops faradayfb_ops = { ++ ++ .owner = THIS_MODULE, ++ .fb_check_var = faradayfb_check_var, ++ .fb_set_par = faradayfb_set_par, ++ .fb_setcolreg = faradayfb_setcolreg, ++ .fb_fillrect = cfb_fillrect, ++ .fb_copyarea = cfb_copyarea, ++ .fb_imageblit = cfb_imageblit, ++ .fb_blank = faradayfb_blank, ++ .fb_mmap = faradayfb_mmap, ++ .fb_ioctl = faradayfb_ioctl, ++}; ++ ++static struct fb_info * __init faradayfb_init_fbinfo(struct device *dev) ++{ ++ struct faradayfb_mach_info *inf; ++ struct fb_info *info; ++ struct faradayfb_info *fbi; ++ ++ if (!(info = framebuffer_alloc(sizeof(struct faradayfb_info), dev))) ++ return NULL; ++ ++ fbi = info->par; ++ fbi->info = info; ++ ++ fbi->io_base = LCD_FTLCDC100_0_VA_BASE; ++ strcpy(info->fix.id, FARADAYFB_MODULE_NAME); ++ ++ info->fix.type = FB_TYPE_PACKED_PIXELS; ++ info->fix.type_aux = 0; ++ info->fix.xpanstep = 0; ++ info->fix.ypanstep = 0; ++ info->fix.ywrapstep = 0; ++ info->fix.accel = FB_ACCEL_NONE; ++ ++ info->var.nonstd = 0; ++ info->var.activate = FB_ACTIVATE_NOW; ++ info->var.height = -1; ++ info->var.width = -1; ++ info->var.accel_flags = 0; ++ info->var.vmode = FB_VMODE_NONINTERLACED; ++ ++ info->fbops = &faradayfb_ops; ++ info->flags = FBINFO_DEFAULT; ++ info->monspecs = monspecs; ++ info->pseudo_palette = &faradayfb_pseudo_palette; ++ ++ fbi->rgb[RGB_8] = &def_rgb_8; ++ fbi->rgb[RGB_16] = &def_rgb_16; ++ fbi->rgb[RGB_24] = &def_rgb_24; ++ ++ inf = (struct faradayfb_mach_info*)dev->platform_data; ++ ++ /* ++ * People just don't seem to get this. We don't support ++ * anything but correct entries now, so panic if someone ++ * does something stupid. ++ */ ++ ++ fbi->xres = inf->xres; ++ fbi->yres = inf->yres; ++ fbi->max_bpp = 32; ++#if defined(CONFIG_FFB_MODE_8BPP) ++ info->var.bits_per_pixel = min((u32)8, inf->max_bpp); ++#elif defined(CONFIG_FFB_MODE_16BPP) ++ info->var.bits_per_pixel = min((u32)16, inf->max_bpp); ++#elif defined(CONFIG_FFB_MODE_24BPP) ++ info->var.bits_per_pixel = min((u32)32, inf->max_bpp); ++ info->var.bits_per_pixel = 32; ++#endif ++ ++ info->var.pixclock = inf->pixclock; ++ info->var.sync = inf->sync; ++ info->var.grayscale = inf->cmap_greyscale; ++ fbi->cmap_inverse = inf->cmap_inverse; ++ fbi->cmap_static = inf->cmap_static; ++ ++ fbi->smode = FFB_DEFAULT_MODE; ++ ++ if (faradayfb_set_var(info, inf, 0)) ++ goto err; ++ ++ fbi->state = C_STARTUP; ++ fbi->task_state = (unsigned char) - 1; ++ info->fix.smem_len = faradayfb_cal_frame_buf_size(fbi); ++ ++ init_waitqueue_head(&fbi->ctrlr_wait); ++ INIT_WORK(&fbi->task, faradayfb_task); ++ init_MUTEX(&fbi->ctrlr_sem); ++ ++ return info; ++err: ++ kfree(fbi); ++ kfree(info); ++ ++ return NULL; ++} ++ ++static int faradayfb_probe(struct platform_device *pdev) ++{ ++ struct fb_info *info; ++ struct faradayfb_info *fbi; ++ int irq; ++ int ret = 0; ++ ++ ret = -ENOMEM; ++ ++ if (!(info = faradayfb_init_fbinfo(&pdev->dev))) { ++ ++ DEBUG(1, 1, "unable to allocate memory for device info\n"); ++ goto err_exit; ++ } ++ ++ fbi = info->par; ++ ++ /* Initialize video memory */ ++ if ((ret = faradayfb_map_video_memory(info)) < 0) ++ goto err_free_mem; ++ ++ ret = -EINVAL; ++ ++ if ((irq = platform_get_irq(pdev, 0)) <= 0) ++ goto err_free_map; ++ ++ if (request_irq(irq, faradayfb_handle_irq, IRQF_DISABLED, "LCD", info)) { ++ ++ DEBUG(1, 1, "request_irq failed: %d\n", ret); ++ goto err_free_map; ++ } ++ ++ /* ++ * This makes sure that our colour bitfield ++ * descriptors are correctly initialised. ++ */ ++ faradayfb_check_var(&info->var, info); ++ faradayfb_set_par(info); ++ ++ dev_set_drvdata(&pdev->dev, info); ++ ++// <============================================ ++ if ((ret = register_framebuffer(info)) < 0) { ++ ++ DEBUG(1, 1, "register_framebuffer failed\n"); ++ goto err_free_irq; ++ } ++// <============================================ ++ ++ faradayfb_clean_screen(info); ++ ++ return 0; ++ ++err_free_irq: ++ dev_set_drvdata(&pdev->dev, NULL); ++ free_irq(irq, info); ++err_free_map: ++ faradayfb_unmap_video_memory(info); ++err_free_mem: ++ kfree(info->par); ++ kfree(info); ++err_exit: ++ return ret; ++} ++ ++/* Called when the device is being detached from the driver */ ++static int faradayfb_remove(struct platform_device *pdev) ++{ ++ struct fb_info *info = platform_get_drvdata(pdev); ++ int irq; ++ ++ set_ctrlr_state(info, C_DISABLE); ++ ++ irq = platform_get_irq(pdev, 0); ++ free_irq(irq, info); ++ ++ unregister_framebuffer(info); ++ faradayfb_unmap_video_memory(info); ++ dev_set_drvdata(&pdev->dev, NULL); ++ kfree(info->par); ++ kfree(info); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++ ++/* suspend and resume support for the lcd controller */ ++static int faradayfb_suspend(struct platform_device *pdev, pm_message_t mesg) ++{ ++ struct fb_info *info = platform_get_drvdata(pdev); ++ ++ // if (level == SUSPEND_DISABLE || level == SUSPEND_POWER_DOWN) ++ if (mesg.event == PM_EVENT_PRETHAW || mesg.event & PM_EVENT_SLEEP) ++ set_ctrlr_state(info, C_DISABLE_PM); ++ ++ return 0; ++} ++ ++static int faradayfb_resume(struct platform_device *pdev) ++{ ++ struct fb_info *info = platform_get_drvdata(pdev); ++ ++ // need modify ++ // if (level == RESUME_ENABLE) ++ set_ctrlr_state(info, C_ENABLE_PM); ++ ++ return 0; ++} ++ ++#else ++#define faradayfb_suspend NULL ++#define faradayfb_resume NULL ++#endif ++ ++static struct resource faradayfb_resource[] = { ++ ++ { ++ .start = LCD_FTLCDC100_0_PA_BASE, ++ .end = LCD_FTLCDC100_0_PA_LIMIT, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = LCD_FTLCDC100_0_IRQ, ++ .end = LCD_FTLCDC100_0_IRQ, ++ .flags = IORESOURCE_IRQ, ++ } ++}; ++ ++static u64 faradayfb_dmamask = ~(u32)0; ++ ++static struct platform_device faradayfb_device = { ++ ++ .name = "faraday-lcd", ++ .id = -1, ++ .num_resources = ARRAY_SIZE(faradayfb_resource), ++ .resource = faradayfb_resource, ++ .dev = { ++ ++ .platform_data = &ffb_mach_info, ++ .dma_mask = &faradayfb_dmamask, ++ .coherent_dma_mask = 0xffffffff, ++ } ++}; ++ ++static struct platform_driver faradayfb_driver = { ++ .probe = faradayfb_probe, ++ .remove = faradayfb_remove, ++ .suspend = faradayfb_suspend, ++ .resume = faradayfb_resume, ++ .driver = { ++ .name = "faraday-lcd", ++ }, ++}; ++ ++/* ++rick note faraday driver ++*/ ++ ++/* Register both the driver and the device */ ++int __init faradayfb_init(void) ++{ ++ int ret = 0; ++ /* Register the device with LDM */ ++ ++#if 1 ++ if (platform_device_register(&faradayfb_device)) { ++ ++ DEBUG(1, 1, "failed to register faradayfb device\n"); ++ ret = -ENODEV; ++ goto exit; ++ } ++#endif ++ /* Register the driver with LDM */ ++ if (platform_driver_register(&faradayfb_driver)) { ++ ++ DEBUG(1, 1, "failed to register faradayfb driver\n"); ++ platform_device_unregister(&faradayfb_device); ++ ret = -ENODEV; ++ goto exit; ++ } ++exit: ++ return ret; ++} ++ ++static void __exit faradayfb_cleanup(void) ++{ ++ platform_driver_unregister(&faradayfb_driver); ++ platform_device_unregister(&faradayfb_device); ++} ++ ++module_init(faradayfb_init); ++module_exit(faradayfb_cleanup); ++MODULE_DESCRIPTION("Faraday LCD driver"); ++MODULE_AUTHOR("Francis Huang "); ++MODULE_LICENSE("GPL"); +diff -Nur linux-3.4.110.orig/drivers/video/FTLCDC100/Kconfig linux-3.4.110/drivers/video/FTLCDC100/Kconfig +--- linux-3.4.110.orig/drivers/video/FTLCDC100/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/video/FTLCDC100/Kconfig 2016-04-07 10:20:51.058085512 +0200 +@@ -0,0 +1,74 @@ ++config FB_FTLCDC100 ++ tristate "Faraday FTLCDC100 driver" ++ depends on FB && NDS32 ++ select FB_CFB_FILLRECT ++ select FB_CFB_COPYAREA ++ select FB_CFB_IMAGEBLIT ++ ++ choice ++ prompt "Default LCD Panel" ++ depends on FB_FTLCDC100 ++ default PANEL_AUA036QN01 ++ help ++ This option select a default panel setting for the LCD controller ++ ++ config PANEL_AUA036QN01 ++ bool "AU 3.5 inch LCD Panel" ++ ++ config PANEL_CH7013A ++ bool "Chrontel Digital PC to TV Encoder" ++ select I2C ++ select I2C_FARADAY ++ select CH7013A ++ ++ config PANEL_AUA070VW04 ++ bool "AU 7.0 inch LCD Panel" ++ ++ config PANEL_LW500AC9601 ++ bool "CHIMEI 5.0 inch LCD panel" ++ ++ endchoice ++ ++ # config FTLCD_OSD ++ # bool "Enable OSD (On Screen Display)" ++ # depends on FB_FTLCDC100 ++ # default n ++ # ---help--- ++ # This enables access to the OSD (On Screen Display) for Faraday ++ # FTLCDC100 LCD control. Disabling OSD will reduce the size of ++ # the kernel by approximately 6kb. ++ # ++ ++ choice ++ prompt "Default Color Mode" ++ depends on FB_FTLCDC100 ++ default FFB_MODE_RGB ++ help ++ This option select default color mode ++ ++ config FFB_MODE_RGB ++ bool "RGB Mode" ++ config FFB_MODE_YUV422 ++ bool "YUV422 Mode" ++ config FFB_MODE_YUV420 ++ bool "YUV420 Mode" ++ endchoice ++ ++ choice ++ prompt "Default BPP" ++ depends on FB_FTLCDC100 ++ default FFB_MODE_16BPP ++ help ++ This option select default BPP (bits-per-pixel) ++ ++ config FFB_MODE_8BPP ++ depends on FFB_MODE_RGB || FFB_MODE_YUV420 ++ bool "8 bits-per-pixel" ++ config FFB_MODE_16BPP ++ depends on FFB_MODE_RGB || FFB_MODE_YUV422 ++ bool "16 bits-per-pixel" ++ config FFB_MODE_24BPP ++ depends on FFB_MODE_RGB ++ bool "24 bits-per-pixel" ++ endchoice ++ +diff -Nur linux-3.4.110.orig/drivers/video/FTLCDC100/lcd-info.c linux-3.4.110/drivers/video/FTLCDC100/lcd-info.c +--- linux-3.4.110.orig/drivers/video/FTLCDC100/lcd-info.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/video/FTLCDC100/lcd-info.c 2016-04-07 10:20:51.058085512 +0200 +@@ -0,0 +1,264 @@ ++/* ++ * HBP : Horizontal Back Porch ++ * HFP : Horizontal Front Porch ++ * HSPW: Horizontal Sync. Pulse Width ++ * PPL : Pixels-per-line = 16(PPL+1) ++ */ ++#define ENC_PARAM_TIME0(HBP, HFP, HSPW, PPL) \ ++ ((((HBP) - 1) << 24) | \ ++ (((HFP) - 1) << 16) | \ ++ (((HSPW) - 1) << 8 ) | \ ++ ((((PPL) >> 4) - 1) << 2 )) ++ ++/* ++ * HBP : Vertical Back Porch ++ * HFP : Vertical Front Porch ++ * HSPW: Vertical Sync. Pulse Width ++ * LPP : Lines-per-panel = LPP + 1 ++ */ ++#define ENC_PARAM_TIME1(VBP, VFP, VSPW, LPP) \ ++ ((((VBP) ) << 24) | \ ++ (((VFP) ) << 16) | \ ++ (((VSPW) - 1) << 10) | \ ++ (((LPP) - 1) )) ++ ++/* ++ * PRA : Pixel Rate Adaptive ++ * IOE : Invert Panel Output Enable ++ * IPC : Invert Panel Clock (Test Chip Testing) ++ * IHS : Invert Horisontal Sync. ++ * IVS : Invert Versical Sync. ++ * PCD : Panel Clock Divisor ++ */ ++#define ENC_PARAM_TIME2(PRA, IOE, IPC, IHS, IVS, PCD) \ ++ (((PRA) << 15) | \ ++ ((IOE) << 14) | \ ++ ((IPC) << 13) | \ ++ ((IHS) << 12) | \ ++ ((IVS) << 11) | \ ++ (((PCD) - 1) )) ++ ++/* ++ * Enable YCbCr ++ * Enable YCbCr420 ++ * FIFO threadhold ++ * Panel type, 0-6bit, 1-8bit ++ * LcdVComp, when to generate interrupt, 1: start of back_porch ++ * Power Enable ++ * Big Endian Pixel/Byte Ordering ++ * BGR ++ * TFT ++ * LCD bits per pixel ++ * Controller Enable ++ */ ++ ++#define ENC_PARAM_CTRL(ENYUV, ENYUV420, FIFOTH, PTYPE, VCOMP, LCD_ON, ENDIAN, BGR, TFT, BPP, LCD_EN) \ ++ ((ENYUV << 18) | \ ++ (ENYUV420 << 17) | \ ++ (FIFOTH << 16) | \ ++ (PTYPE << 15) | \ ++ (VCOMP << 12) | \ ++ (LCD_ON << 11) | \ ++ (ENDIAN << 9) | \ ++ (BGR << 8) | \ ++ (TFT << 5) | \ ++ (BPP << 1) | \ ++ (LCD_EN)) ++ ++static struct lcd_param control[] = { ++ ++ { ++ .value = ENC_PARAM_CTRL(0, 0, 1, 1, 0x3, 1, 0x0, 1, 1, 0x3, 1), ++ .flags = FFB_MODE_RGB | FFB_MODE_8BPP, ++ }, ++ { ++ .value = ENC_PARAM_CTRL(1, 1, 1, 1, 0x3, 1, 0x0, 0, 1, 0x3, 1), ++ .flags = FFB_MODE_YUV420 | FFB_MODE_8BPP, ++ }, ++ { ++ .value = ENC_PARAM_CTRL(1, 0, 1, 1, 0x3, 1, 0x0, 0, 1, 0x4, 1), ++ .flags = FFB_MODE_YUV422 | FFB_MODE_16BPP, ++ }, ++ { ++ .value = ENC_PARAM_CTRL(0, 0, 1, 1, 0x3, 1, 0x0, 1, 1, 0x4, 1), ++ .flags = FFB_MODE_RGB | FFB_MODE_16BPP, ++ }, ++ { ++ .value = ENC_PARAM_CTRL(0, 0, 1, 1, 0x3, 1, 0x0, 1, 1, 0x5, 1), ++ .flags = FFB_MODE_RGB | FFB_MODE_24BPP, ++ }, ++}; ++ ++#ifdef CONFIG_PANEL_AUA036QN01 ++ ++static struct lcd_param time0[] = { ++ ++ { ++ .value = ENC_PARAM_TIME0(7, 6, 1, 320), ++ .flags = FFB_MODE_RGB | FFB_MODE_YUV420 | FFB_MODE_YUV422, ++ }, ++}; ++ ++static struct lcd_param time1[] = { ++ ++ { ++ .value = ENC_PARAM_TIME1(1, 1, 1, 240), ++ .flags = FFB_MODE_RGB | FFB_MODE_YUV420 | FFB_MODE_YUV422, ++ }, ++}; ++ ++static struct lcd_param time2[] = { ++ ++ { ++ .value = ENC_PARAM_TIME2(0, 0, 1, 1, 1, 0x7), ++ .flags = FFB_MODE_RGB | FFB_MODE_YUV420 | FFB_MODE_YUV422, ++ }, ++}; ++ ++static struct faradayfb_mach_info ffb_mach_info = { ++ ++ .pixclock = 171521, ++ .xres = 320, ++ .yres = 240, ++ .max_bpp = 24, ++ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, ++ .num_time0 = ARRAY_SIZE(time0), ++ .time0 = time0, ++ .num_time1 = ARRAY_SIZE(time1), ++ .time1 = time1, ++ .num_time2 = ARRAY_SIZE(time2), ++ .time2 = time2, ++ .num_control = ARRAY_SIZE(control), ++ .control = control, ++}; ++#endif ++ ++#ifdef CONFIG_PANEL_AUA070VW04 ++static struct lcd_param time0[] = { ++ ++ { ++ .value = ENC_PARAM_TIME0(88, 40, 128, 800), ++ .flags = FFB_MODE_RGB | FFB_MODE_YUV420 | FFB_MODE_YUV422, ++ }, ++}; ++ ++static struct lcd_param time1[] = { ++ ++ { ++ .value = ENC_PARAM_TIME1(21, 1, 3, 480), ++ .flags = FFB_MODE_RGB | FFB_MODE_YUV420 | FFB_MODE_YUV422, ++ }, ++}; ++ ++static struct lcd_param time2[] = { ++ ++ { ++ .value = ENC_PARAM_TIME2(0, 1, 1, 1, 1, 0x5), ++ .flags = FFB_MODE_RGB | FFB_MODE_YUV420 | FFB_MODE_YUV422, ++ }, ++}; ++ ++static struct faradayfb_mach_info ffb_mach_info = { ++ ++ .pixclock = 171521, ++ .xres = 800, ++ .yres = 480, ++ .max_bpp = 24, ++ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, ++ .num_time0 = ARRAY_SIZE(time0), ++ .time0 = time0, ++ .num_time1 = ARRAY_SIZE(time1), ++ .time1 = time1, ++ .num_time2 = ARRAY_SIZE(time2), ++ .time2 = time2, ++ .num_control = ARRAY_SIZE(control), ++ .control = control, ++}; ++ ++#endif ++ ++#ifdef CONFIG_PANEL_LW500AC9601 ++static struct lcd_param time0[] = { ++ ++ { ++ .value = ENC_PARAM_TIME0(88, 40, 128, 800), ++ .flags = FFB_MODE_RGB | FFB_MODE_YUV420 | FFB_MODE_YUV422, ++ }, ++}; ++ ++static struct lcd_param time1[] = { ++ ++ { ++ .value = ENC_PARAM_TIME1(21, 1, 3, 480), ++ .flags = FFB_MODE_RGB | FFB_MODE_YUV420 | FFB_MODE_YUV422, ++ }, ++}; ++ ++static struct lcd_param time2[] = { ++ ++ { ++ .value = ENC_PARAM_TIME2( 0, 0, 1, 1, 1, 0x3), ++ .flags = FFB_MODE_RGB | FFB_MODE_YUV420 | FFB_MODE_YUV422, ++ }, ++}; ++ ++static struct faradayfb_mach_info ffb_mach_info = { ++ ++ .pixclock = 171521, ++ .xres = 800, ++ .yres = 480, ++ .max_bpp = 24, ++ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, ++ .num_time0 = ARRAY_SIZE(time0), ++ .time0 = time0, ++ .num_time1 = ARRAY_SIZE(time1), ++ .time1 = time1, ++ .num_time2 = ARRAY_SIZE(time2), ++ .time2 = time2, ++ .num_control = ARRAY_SIZE(control), ++ .control = control, ++}; ++#endif ++ ++#ifdef CONFIG_CH7013A ++static struct lcd_param time0[] = { ++ ++ { ++ .value = ENC_PARAM_TIME0(42, 10, 96, 640), ++ .flags = FFB_MODE_RGB, ++ }, ++}; ++ ++static struct lcd_param time1[] = { ++ ++ { ++ .value = ENC_PARAM_TIME1(28, 5, 2, 480), ++ .flags = FFB_MODE_RGB, ++ }, ++}; ++static struct lcd_param time2[] = { ++ ++ { ++ .value = ENC_PARAM_TIME2(0, 1, 1, 0, 0, 0x3), ++ .flags = FFB_MODE_RGB, ++ }, ++}; ++ ++static struct faradayfb_mach_info ffb_mach_info = { ++ ++ .pixclock = 37910, ++ .xres = 640, ++ .yres = 480, ++ .max_bpp = 24, ++ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, ++ .num_time0 = ARRAY_SIZE(time0), ++ .time0 = time0, ++ .num_time1 = ARRAY_SIZE(time1), ++ .time1 = time1, ++ .num_time2 = ARRAY_SIZE(time2), ++ .time2 = time2, ++ .num_control = ARRAY_SIZE(control), ++ .control = control, ++}; ++ ++#endif /* CONFIG_CH7013A */ +diff -Nur linux-3.4.110.orig/drivers/video/FTLCDC100/Makefile linux-3.4.110/drivers/video/FTLCDC100/Makefile +--- linux-3.4.110.orig/drivers/video/FTLCDC100/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/video/FTLCDC100/Makefile 2016-04-07 10:20:51.058085512 +0200 +@@ -0,0 +1 @@ ++obj-$(CONFIG_FB_FTLCDC100) += faradayfb-main.o +diff -Nur linux-3.4.110.orig/drivers/video/FTLCDC100/pingpong-module.c linux-3.4.110/drivers/video/FTLCDC100/pingpong-module.c +--- linux-3.4.110.orig/drivers/video/FTLCDC100/pingpong-module.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/video/FTLCDC100/pingpong-module.c 2016-04-07 10:20:51.058085512 +0200 +@@ -0,0 +1,592 @@ ++void *fmem_alloc(size_t size, dma_addr_t *dma_handle, unsigned long flags) ++{ ++ struct page *page; ++ void *cpu_addr = NULL; ++ ++ size = PAGE_ALIGN(size); ++ ++ if (!(page = alloc_pages(GFP_DMA, get_order(size)))) { ++ ++ DEBUG(1, 1, "alloc_pages fail! (requested %#x)", size); ++ goto no_page; ++ } ++ ++ *dma_handle = page_to_phys(page); ++ ++ if ((cpu_addr = __ioremap(*dma_handle, size, flags, 1))) { ++ ++ do { ++ SetPageReserved(page); ++ page++; ++ } while (size -= PAGE_SIZE); ++ } ++ else { ++ __free_pages(page, get_order(size)); ++ DEBUG(1, 1, "__ioremap fail! (phy %#x)", *dma_handle); ++ } ++no_page: ++ return cpu_addr; ++} ++ ++void fmem_free(size_t size, void *cpu_addr, dma_addr_t handle) ++{ ++ struct page *page = pfn_to_page(handle >> PAGE_SHIFT); ++ ++ __iounmap(cpu_addr); ++ size = PAGE_ALIGN(size); ++ ++ do { ++ ClearPageReserved(page); ++ __free_page(page); ++ page++; ++ ++ } while (size -= PAGE_SIZE); ++} ++ ++static int faradayfb_mmap(struct fb_info *info, struct vm_area_struct *vma) ++{ ++ struct faradayfb_info *fbi = info->par; ++ unsigned long off = vma->vm_pgoff << PAGE_SHIFT; ++ int ret = -EINVAL; ++ ++ DEBUG(0, 1, "Enter\n"); ++ ++ if (off < info->fix.smem_len) { ++ ++ off += fbi->screen_dma & PAGE_MASK; ++ vma->vm_pgoff = off >> PAGE_SHIFT; ++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); ++ vma->vm_flags |= VM_RESERVED; ++ ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, ++ vma->vm_end - vma->vm_start, vma->vm_page_prot); ++ } ++ else { ++ DEBUG(1, 1, "buffer mapping error !!\n"); ++ } ++ ++ DEBUG(0, 1, "Leave\n"); ++ ++ return ret; ++} ++ ++/* ++ * Allocates the DRAM memory for the frame buffer. This buffer is ++ * remapped into a non-cached, non-buffered, memory region to ++ * allow palette and pixel writes to occur without flushing the ++ * cache. Once this area is remapped, all virtual memory ++ * access to the video memory should occur at the new region. ++ */ ++static int __init faradayfb_map_video_memory(struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ ++ /* ++ * We reserve one page for the palette, plus the size ++ * of the framebuffer. ++ */ ++ fbi->map_size = PAGE_ALIGN(info->fix.smem_len + PAGE_SIZE); ++ ++ fbi->map_cpu = fmem_alloc(fbi->map_size, &fbi->map_dma, pgprot_writecombine(PAGE_KERNEL)); ++ // fbi->map_cpu = dma_alloc_writecombine(fbi->info->dev, fbi->map_size, &fbi->map_dma, GFP_KERNEL); ++ ++ if (fbi->map_cpu) { ++ ++ memset(fbi->map_cpu, 0x1d, fbi->map_size); ++ info->screen_base = fbi->map_cpu + PAGE_SIZE; ++ fbi->screen_dma = fbi->map_dma + PAGE_SIZE; ++ info->fix.smem_start = fbi->screen_dma; ++ } ++ ++ return fbi->map_cpu ? 0 : -ENOMEM; ++} ++ ++static inline void faradayfb_clean_screen(struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ int size = info->var.xres * info->var.yres; ++ ++ if (fbi->smode & FFB_MODE_YUV422) { ++ ++ memset(fbi->map_cpu, 16, size * info->var.bits_per_pixel / 8); ++ } ++ else if (fbi->smode & FFB_MODE_YUV420) { ++ ++ memset(fbi->map_cpu, 16, size); ++ memset(fbi->map_cpu + PAGE_SIZE + ((size + 0xffff) & 0xffff0000), 128, size / 4); ++ memset(fbi->map_cpu + PAGE_SIZE + ((size + 0xffff) & 0xffff0000) * 5 / 4, 128, size / 4); ++ } ++} ++ ++static inline void faradayfb_unmap_video_memory(struct fb_info *info) ++{ ++ struct faradayfb_info *fbi = info->par; ++ fmem_free(fbi->map_size, fbi->map_cpu, fbi->map_dma); ++} ++ ++#define FRAME_SIZE_RGB(xres, yres, mbpp) ((xres) * (yres) * (mbpp) / 8) ++#define FRAME_SIZE_YUV422(xres, yres, mbpp) (((xres) * (yres) * (mbpp) / 8) * 2) ++#define FRAME_SIZE_YUV420(xres, yres, mbpp) (((((xres) * (yres) * (mbpp) / 8) + 0xffff) & 0xffff0000) * 3 / 2) ++ ++static inline u32 faradayfb_cal_frame_buf_size(struct faradayfb_info *fbi) ++{ ++ u32 size_rgb = FRAME_SIZE_RGB(fbi->xres, fbi->yres, fbi->max_bpp); ++ u32 size_yuv422 = FRAME_SIZE_YUV422(fbi->xres, fbi->yres, 8); ++ u32 size_yuv420 = FRAME_SIZE_YUV420(fbi->xres, fbi->yres, 8); ++ ++ return max(size_rgb, max(size_yuv422, size_yuv420)); ++} ++ ++#ifdef CONFIG_FTLCD_OSD ++/************************************ ++ * OSD ++ ***********************************/ ++#define FOSD_SETPOS 0x46e1 ++#define FOSD_SETDIM 0x46e2 ++#define FOSD_SETSCAL 0x46e3 ++#define FLCD_SET_TRANSPARENT 0x46e4 ++#define FLCD_SET_STRING 0x46e5 ++#define FOSD_ON 0x46e6 ++#define FOSD_OFF 0x46e7 ++#define FRREG 0x46e8 ++#define FWREG 0x46e9 ++ ++#define dig_alpha (16 * 10) + (16 * 26) + (16 * 3) ++ ++struct fosd_string { ++ ++ unsigned int Str_row; ++ unsigned int display_mode; ++ unsigned int fg_color; ++ unsigned int bg_color; ++ unsigned char Str_OSD[30]; ++}; ++ ++struct fosd_data { ++ ++ unsigned int HPos; ++ unsigned int VPos; ++ unsigned int HDim; ++ unsigned int VDim; ++ unsigned int transparent_level; ++ unsigned int HScal; ++ unsigned int VScal; ++ struct fosd_string Str_Data[10]; ++}; ++ ++unsigned int OSD_Font[] = { ++ ++ /* 0 */ ++ 0x00, 0x00, 0x00, 0x3e, 0x63, 0x67, 0x6f, 0x7b, ++ 0x73, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, ++ ++ /* 1 */ ++ 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x0c, 0x0c, ++ 0x0c, 0x0c, 0x0c, 0x3f, 0x00, 0x00, 0x00, 0x00, ++ ++ /* 2 */ ++ 0x00, 0x00, 0x00, 0x3e, 0x63, 0x03, 0x06, 0x0c, ++ 0x18, 0x30, 0x63, 0x7f, 0x00, 0x00, 0x00, 0x00, ++ ++ /* 3 */ ++ 0x00, 0x00, 0x00, 0x3E, 0x63, 0x03, 0x03, 0x1e, ++ 0x03, 0x03, 0x63, 0x3E, 0x00, 0x00, 0x00, 0x00, ++ ++ /* 4 */ ++ 0x00, 0x00, 0x00, 0x06, 0x0e, 0x1e, 0x36, 0x66, ++ 0x7f, 0x06, 0x06, 0x0f, 0x00, 0x00, 0x00, 0x00, ++ ++ /* 5 */ ++ 0x00, 0x00, 0x00, 0x7f, 0x60, 0x60, 0x60, 0x7e, ++ 0x03, 0x03, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, ++ ++ /* 6 */ ++ 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7e, ++ 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, ++ ++ /* 7 */ ++ 0x00, 0x00, 0x00, 0x7f, 0x63, 0x03, 0x06, 0x0c, ++ 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, ++ ++ /* 8 */ ++ 0x00, 0x00, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x3e, ++ 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, ++ ++ /* 9 */ ++ 0x00, 0x00, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x3f, ++ 0x03, 0x03, 0x06, 0x3c, 0x00, 0x00, 0x00, 0x00, ++ ++ /* A */ ++ 0x00, 0x00, 0x00, 0x08, 0x1c, 0x36, 0x63, 0x63, ++ 0x7f, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, ++ ++ /* B */ ++ 0x00, 0x00, 0x00, 0x7E, 0x33, 0x33, 0x33, 0x3E, ++ 0x33, 0x33, 0x33, 0x7E, 0x00, 0x00, 0x00, 0x00, ++ ++ /* C */ ++ 0x00, 0x00, 0x00, 0x1E, 0x33, 0x61, 0x60, 0x60, ++ 0x60, 0x61, 0x33, 0x1E, 0x00, 0x00, 0x00, 0x00, ++ ++ /* D */ ++ 0x00, 0x00, 0x00, 0x7c, 0x36, 0x33, 0x33, 0x33, ++ 0x33, 0x33, 0x36, 0x7C, 0x00, 0x00, 0x00, 0x00, ++ ++ /* E */ ++ 0x00, 0x00, 0x00, 0x7f, 0x33, 0x31, 0x34, 0x3c, ++ 0x34, 0x31, 0x33, 0x7f, 0x00, 0x00, 0x00, 0x00, ++ ++ /* F */ ++ 0x00, 0x00, 0x00, 0x7f, 0x33, 0x31, 0x34, 0x3c, ++ 0x34, 0x30, 0x30, 0x78, 0x00, 0x00, 0x00, 0x00, ++ ++ /* G */ ++ 0x00, 0x00, 0x00, 0x1E, 0x33, 0x61, 0x60, 0x60, ++ 0x6F, 0x63, 0x36, 0x7C, 0x00, 0x00, 0x00, 0x00, ++ ++ /* H */ ++ 0x00, 0x00, 0x00, 0x63, 0x63, 0x63, 0x64, 0x7f, ++ 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, ++ ++ /* I */ ++ 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, ++ 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, ++ ++ /* J */ ++ 0x00, 0x00, 0x00, 0x0f, 0x06, 0x06, 0x06, 0x06, ++ 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, ++ ++ /* K */ ++ 0x00, 0x00, 0x00, 0x73, 0x33, 0x36, 0x36, 0x3c, ++ 0x36, 0x36, 0x33, 0x73, 0x00, 0x00, 0x00, 0x00, ++ ++ /* L */ ++ 0x00, 0x00, 0x00, 0x78, 0x30, 0x30, 0x30, 0x30, ++ 0x30, 0x31, 0x33, 0x7f, 0x00, 0x00, 0x00, 0x00, ++ ++ /* M */ ++ 0x00, 0x00, 0x00, 0x63, 0x77, 0x7f, 0x6b, 0x63, ++ 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, ++ ++ /* N */ ++ 0x00, 0x00, 0x00, 0x63, 0x73, 0x7b, 0x7f, 0x6f, ++ 0x67, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, ++ ++ /* O */ ++ 0x00, 0x00, 0x00, 0x1c, 0x36, 0x63, 0x63, 0x63, ++ 0x63, 0x63, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00, ++ ++ /* P */ ++ 0x00, 0x00, 0x00, 0x7e, 0x33, 0x33, 0x33, 0x3e, ++ 0x30, 0x30, 0x30, 0x78, 0x00, 0x00, 0x00, 0x00, ++ ++ /* Q */ ++ 0x00, 0x00, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x63, ++ 0x6b, 0x6f, 0x3e, 0x06, 0x07, 0x00, 0x00, 0x00, ++ ++ /* R */ ++ 0x00, 0x00, 0x00, 0x7e, 0x33, 0x33, 0x33, 0x3e, ++ 0x36, 0x33, 0x33, 0x73, 0x00, 0x00, 0x00, 0x00, ++ ++ /* S */ ++ 0x00, 0x00, 0x00, 0x3e, 0x63, 0x63, 0x30, 0x1c, ++ 0x06, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, ++ ++ /* T */ ++ 0x00, 0x00, 0x00, 0xFF, 0x99, 0x18, 0x18, 0x18, ++ 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, ++ ++ /* U */ ++ 0x00, 0x00, 0x00, 0x63, 0x63, 0x63, 0x63, 0x63, ++ 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, ++ ++ /* V */ ++ 0x00, 0x00, 0x00, 0x63, 0x63, 0x63, 0x63, 0x63, ++ 0x63, 0x36, 0x1c, 0x08, 0x00, 0x00, 0x00, 0x00, ++ ++ /* W */ ++ 0x00, 0x00, 0x00, 0x63, 0x63, 0x63, 0x63, 0x63, ++ 0x6b, 0x7f, 0x77, 0x63, 0x00, 0x00, 0x00, 0x00, ++ ++ /* X */ ++ 0x00, 0x00, 0x00, 0x63, 0x63, 0x63, 0x36, 0x1c, ++ 0x36, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, ++ ++ /* Y */ ++ 0x00, 0x00, 0x00, 0x63, 0x63, 0x63, 0x36, 0x1c, ++ 0x1c, 0x1c, 0x1c, 0x3e, 0x00, 0x00, 0x00, 0x00, ++ ++ /* Z */ ++ 0x00, 0x00, 0x00, 0x7f, 0x63, 0x46, 0x0c, 0x18, ++ 0x30, 0x61, 0x63, 0x7f, 0x00, 0x00, 0x00, 0x00, ++ ++ /* space */ ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ ++ /* = */ ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7E, 0x00, ++ 0x00, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ ++ /* , */ ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, ++}; ++ ++void OSD_On(struct faradayfb_info *fbi) ++{ ++ REG32(fbi->io_base + 0x34) &= 0xfffffffe; ++ REG32(fbi->io_base + 0x34) |= 1; ++} ++ ++void OSD_Off(struct faradayfb_info *fbi) ++{ ++ REG32(fbi->io_base + 0x34) &= 0xfffffffe; ++} ++ ++void OSD_Pos(struct faradayfb_info *fbi, int HPos, int VPos) ++{ ++ REG32(fbi->io_base + 0x38) = (HPos << 10) | VPos; ++} ++ ++void OSD_Dim(struct faradayfb_info *fbi, int HDim, int VDim) ++{ ++ REG32(fbi->io_base + 0x34) &= 0x0000001f; ++ REG32(fbi->io_base + 0x34) |= ((HDim << 10) | (VDim << 5)); ++} ++ ++void OSD_transparent(struct faradayfb_info *fbi, int level) ++{ ++ REG32(fbi->io_base + 0x40) &= 0xffffff00; ++ REG32(fbi->io_base + 0x40) |= (level << 6); ++} ++ ++void OSD_fg_color(struct faradayfb_info *fbi, int pal0, int pal1, int pal2, int pal3) ++{ ++ REG32(fbi->io_base + 0x3c) = (pal0) | (pal1 << 8) | (pal2 << 16) | (pal3<< 24); ++} ++ ++void OSD_bg_color(struct faradayfb_info *fbi, int pal1, int pal2, int pal3) ++{ ++ REG32(fbi->io_base + 0x40) &= 0x000000ff; ++ REG32(fbi->io_base + 0x40) |= (pal1 << 8) | (pal2 << 16) | (pal3 << 24); ++} ++ ++void OSD_Scal(struct faradayfb_info *fbi, int HScal, int VScal) ++{ ++ REG32(fbi->io_base + 0x34) &= 0xffffffe1; ++ REG32(fbi->io_base + 0x34) |= (HScal << 3) | (VScal << 1); ++} ++ ++void OSD_putc(struct faradayfb_info *fbi, char c, int position, unsigned int value) ++{ ++ if (c >= '0' && c <= '9') ++ REG32(fbi->io_base + 0xc000 + position * 4) = ((c -'0') << 4) | value; ++ ++ else if (c>= 'A' && c <= 'Z') ++ REG32(fbi->io_base + 0xc000 + position * 4) = ((c - 'A' + 10) << 4) | value; ++ ++ if (c == ' ') ++ REG32(fbi->io_base + 0xc000 + position * 4) = (('Z' - 'A' + 10 + 1) << 4) | value; ++ ++ if (c == '=') ++ REG32(fbi->io_base + 0xc000 + position * 4) = (('Z' - 'A' + 10 + 2) << 4) | value; ++ ++ if (c == ',') ++ REG32(fbi->io_base + 0xc000 + position * 4) = (('Z' - 'A' + 10 + 3) << 4) | value; ++} ++ ++void OSD_puts(struct faradayfb_info *fbi, char *str, int position, unsigned int value) ++{ ++ int i; ++ ++ for (i = 0; i < strlen(str); i++) ++ OSD_putc(fbi, *(str + i), position + i, value); ++} ++ ++void OSD_String(struct faradayfb_info *fbi, int row, int mode, char *str, int fg_color, int bg_color) ++{ ++ int i, j, x, y; ++ unsigned int value = fg_color | bg_color; ++ ++ /* 10 digit & 26 alphabet & ' ' & '=' & ',' */ ++ for (i = 0; i < dig_alpha; i++) { ++ ++ x = 0; ++ y = OSD_Font[i]; ++ ++ for (j = 0; j < 12; j ++) { /* reorder */ ++ if (y & 1) ++ x |= 1; ++ y >>= 1; ++ x <<= 1; ++ } ++ ++ x >>= 1; ++ REG32(fbi->io_base + 0x8000 + i * 4) = x; ++ } ++ ++ OSD_puts(fbi, str, row, value); ++ ++ if (mode == 2) { /* YCbCr */ ++ ++ OSD_fg_color(fbi, 0x57, 0x88, 0x3B, 0xFF); ++ OSD_bg_color(fbi, 0x57, 0x88, 0x3B); ++ } ++ else { ++ OSD_fg_color(fbi, 0x07, 0x38, 0xC0, 0xFF); ++ OSD_bg_color(fbi, 0x07, 0x38, 0xc0); ++ } ++} ++#endif /* CONFIG_FTLCD_OSD */ ++ ++struct andesIO { ++ ++ unsigned long Regaddr; ++ unsigned long Regoffset; ++ unsigned long Regvalue; ++}; ++ ++static int faradayfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) ++{ ++ int ret = 0; ++#ifdef CONFIG_FTLCD_OSD ++ struct faradayfb_info *fbi = info->par; ++ int i; ++ struct fosd_data fosd; ++ ++ struct andesIO IOAccess; ++ unsigned long *Regaccess; ++#endif ++ ++ DEBUG(0, 1, "Enter\n"); ++ ++ switch (cmd) { ++#ifdef CONFIG_FTLCD_OSD ++ case FRREG: ++ ++ if (copy_from_user(&IOAccess, (struct andesIO *)arg, sizeof(struct andesIO))) { ++ ++ ret = -EFAULT; ++ break; ++ } ++ ++ Regaccess = (unsigned long *)(((IOAccess.Regaddr >> 4) | (unsigned long)0xF0000000) + IOAccess.Regoffset); ++ ++ IOAccess.Regvalue = *(Regaccess); ++ ++ if (copy_to_user((struct andesIO *)arg, &IOAccess, sizeof(struct andesIO))) { ++ ret = -EFAULT; ++ break; ++ } ++ ++ break; ++ ++ case FWREG: ++ ++ if (copy_from_user(&IOAccess, (struct andesIO *)arg, sizeof(struct andesIO))) { ++ ret = -EFAULT; ++ break; ++ } ++ ++ Regaccess = (unsigned long *)(((IOAccess.Regaddr >> 4) | (unsigned long)0xF0000000) + IOAccess.Regoffset); ++ *(Regaccess) = IOAccess.Regvalue; ++ ++ break; ++ ++ case FOSD_ON: ++ ++ DEBUG(1, 1, "FOSD_ON:\n"); ++ OSD_On(fbi); ++ break; ++ ++ case FOSD_OFF: ++ ++ DEBUG(1, 1, "FOSD_OFF:\n"); ++ OSD_Off(fbi); ++ break; ++ ++ case FOSD_SETPOS: ++ ++ DEBUG(1, 1, "FOSD_SETPOS:\n"); ++ ++ if (copy_from_user(&fosd, (unsigned int *)arg, 2 * sizeof(unsigned int))) { ++ ++ ret = -EFAULT; ++ break; ++ } ++ ++ OSD_Pos(fbi, fosd.HPos, fosd.VPos); ++ DEBUG(1, 1, "OSD_Pos = %d %d\n", fosd.HPos, fosd.VPos); ++ break; ++ ++ case FOSD_SETDIM: ++ ++ DEBUG(1, 1, "FOSD_SETDIM:\n"); ++ ++ if (copy_from_user(&fosd, (unsigned int *)arg, 4 * sizeof(unsigned int))) { ++ ++ ret = -EFAULT; ++ break; ++ } ++ ++ OSD_Dim(fbi, fosd.HDim, fosd.VDim); ++ DEBUG(1, 1, "OSD_Dim = %d %d\n", fosd.HDim, fosd.VDim); ++ break; ++ ++ case FOSD_SETSCAL: ++ ++ DEBUG(1, 1, "FOSD_SETSCAL:\n"); ++ ++ if (copy_from_user(&fosd, (unsigned int *)arg, 7 * sizeof(unsigned int))) { ++ ret = -EFAULT; ++ break; ++ } ++ ++ OSD_Scal(fbi, fosd.HScal, fosd.VScal); ++ break; ++ ++ case FLCD_SET_TRANSPARENT: ++ ++ DEBUG(1, 1, "FLCD_SET_TRANSPARENT:\n"); ++ ++ if (copy_from_user(&fosd, (unsigned int *)arg, 5 * sizeof(unsigned int))) { ++ ++ ret = -EFAULT; ++ break; ++ } ++ ++ OSD_transparent(fbi, fosd.transparent_level); ++ DEBUG(1, 1, "OSD_transparent = %d\n", fosd.transparent_level); ++ break; ++ ++ case FLCD_SET_STRING: ++ ++ DEBUG(1, 1, "FLCD_SET_STRING:\n"); ++ ++ if (copy_from_user(&fosd, (unsigned int *)arg, sizeof(struct fosd_data))) { ++ ++ ret = -EFAULT; ++ break; ++ } ++ ++ for (i = 0; i < fosd.VDim; i++) ++ ++ OSD_String(fbi, fosd.Str_Data[i].Str_row, ++ fosd.Str_Data[i].display_mode, ++ fosd.Str_Data[i].Str_OSD, ++ fosd.Str_Data[i].fg_color, ++ fosd.Str_Data[i].bg_color); ++ break; ++#endif /* CONFIG_FTLCD_OSD */ ++ ++ default: ++ ++ DEBUG(1, 1, "IOCTL CMD(0x%08u) no define!\n", cmd); ++ ret = -EFAULT; ++ break; ++ } ++ ++ DEBUG(0, 1, "Leave\n"); ++ return ret; ++} ++ +diff -Nur linux-3.4.110.orig/drivers/video/Kconfig linux-3.4.110/drivers/video/Kconfig +--- linux-3.4.110.orig/drivers/video/Kconfig 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/video/Kconfig 2016-04-07 10:20:51.058085512 +0200 +@@ -2166,6 +2166,8 @@ + ---help--- + Say Y here to enable support for PNX4008 RGB Framebuffer + ++source "drivers/video/FTLCDC100/Kconfig" ++ + config FB_IBM_GXT4500 + tristate "Framebuffer support for IBM GXT4500P adaptor" + depends on FB && PPC +diff -Nur linux-3.4.110.orig/drivers/video/Makefile linux-3.4.110/drivers/video/Makefile +--- linux-3.4.110.orig/drivers/video/Makefile 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/video/Makefile 2016-04-07 10:20:51.058085512 +0200 +@@ -143,6 +143,7 @@ + obj-$(CONFIG_FB_CARMINE) += carminefb.o + obj-$(CONFIG_FB_MB862XX) += mb862xx/ + obj-$(CONFIG_FB_MSM) += msm/ ++obj-$(CONFIG_FB_FTLCDC100) += FTLCDC100/ + obj-$(CONFIG_FB_NUC900) += nuc900fb.o + obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o + obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o +diff -Nur linux-3.4.110.orig/drivers/watchdog/ftwdt010_wdt.c linux-3.4.110/drivers/watchdog/ftwdt010_wdt.c +--- linux-3.4.110.orig/drivers/watchdog/ftwdt010_wdt.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/drivers/watchdog/ftwdt010_wdt.c 2016-04-07 10:20:51.058085512 +0200 +@@ -0,0 +1,127 @@ ++/* ++ * Watchdog driver for the FTWDT010 Watch Dog Driver ++ * ++ * (c) Copyright 2004 Faraday Technology Corp. (www.faraday-tech.com) ++ * Based on sa1100_wdt.c by Oleg Drokin ++ * Based on SoftDog driver by Alan Cox ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version ++ * 2 of the License, or (at your option) any later version. ++ * ++ * 27/11/2004 Initial release ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DEBUG( str, ...) \ ++ do{ \ ++ if( debug) \ ++ printk( str, ##__VA_ARGS__); \ ++ } while(0) ++ ++#define wdcounter (*( volatile unsigned long *)( WDT_FTWDT010_VA_BASE + 0x00)) ++#define wdload (*( volatile unsigned long *)( WDT_FTWDT010_VA_BASE + 0x04)) ++#define wdrestart (*( volatile unsigned long *)( WDT_FTWDT010_VA_BASE + 0x08)) ++#define wdcr (*( volatile unsigned long *)( WDT_FTWDT010_VA_BASE + 0x0C)) ++#define wdstatus (*( volatile unsigned long *)( WDT_FTWDT010_VA_BASE + 0x10)) ++#define wdclear (*( volatile unsigned long *)( WDT_FTWDT010_VA_BASE + 0x14)) ++#define wdintrcter (*( volatile unsigned long *)( WDT_FTWDT010_VA_BASE + 0x18)) ++ ++#define TIMER_MARGIN 60 /* (secs) Default is 1 minute */ ++#define RESTART_MAGIC 0x5AB9 ++#define PCLK (AHB_CLK_IN / 2) ++ ++static int debug = 0; ++static int timeout = TIMER_MARGIN; /* in seconds */ ++ ++module_param(debug, int, 0); ++module_param(timeout, int, 0); ++ ++static int ftwdt010_dog_open(struct inode *inode, struct file *file){ ++ ++#if 0 ++ /* Allow only one person to hold it open */ ++ if( test_and_set_bit( 1, &ftwdt010_wdt_users)) ++ return -EBUSY; ++ ++ ftwdt010_wdt_users = 1; ++#endif ++ DEBUG("Activating WDT..\n"); ++ ++ wdcr = 0; ++ wdload = PCLK * timeout; ++ wdrestart = RESTART_MAGIC; /* Magic number */ ++ wdcr = 0x03; /* Enable WDT */ ++ ++ return 0; ++} ++ ++static int ftwdt010_dog_release(struct inode *inode, struct file *file){ ++ ++#ifndef CONFIG_WATCHDOG_NOWAYOUT ++ /* ++ * Shut off the timer. ++ * Lock it in if it's a module and we defined ...NOWAYOUT ++ */ ++ wdcr = 0; ++ DEBUG( "Deactivating WDT..\n"); ++#endif ++ return 0; ++} ++ ++static ssize_t ftwdt010_dog_write(struct file *file, const char *data, size_t len, loff_t *ppos){ ++ ++ if(len){ ++ ++ wdrestart = RESTART_MAGIC; ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static struct file_operations ftwdt010_dog_fops = { ++ ++ .owner = THIS_MODULE, ++ .llseek = no_llseek, ++ .write = ftwdt010_dog_write, ++ .open = ftwdt010_dog_open, ++ .release = ftwdt010_dog_release, ++}; ++ ++static struct miscdevice ftwdt010_dog_miscdev = { ++ ++ WATCHDOG_MINOR, ++ "FTWDT010 watchdog", ++ &ftwdt010_dog_fops ++}; ++ ++static int __init ftwdt010_dog_init( void){ ++ ++ int ret; ++ ++ ret = misc_register(&ftwdt010_dog_miscdev); ++ ++ if( ret) ++ return ret; ++ ++ DEBUG("FTWDT010 watchdog timer: timer timeout %d sec\n", timeout); ++ ++ return 0; ++} ++ ++static void __exit ftwdt010_dog_exit( void){ ++ ++ misc_deregister(&ftwdt010_dog_miscdev); ++} ++ ++module_init(ftwdt010_dog_init); ++module_exit(ftwdt010_dog_exit); ++MODULE_AUTHOR("Faraday Corp."); ++MODULE_LICENSE("GPL"); +diff -Nur linux-3.4.110.orig/drivers/watchdog/Kconfig linux-3.4.110/drivers/watchdog/Kconfig +--- linux-3.4.110.orig/drivers/watchdog/Kconfig 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/watchdog/Kconfig 2016-04-07 10:20:51.058085512 +0200 +@@ -64,6 +64,12 @@ + To compile this driver as a module, choose M here: the + module will be called softdog. + ++config FTWDT010_WATCHDOG ++ tristate "FTWDT010_WATCHDOG" ++ help ++ Support for Faraday ftwdt010 watchdog. When the watchdog triigers the ++ system will be reset. ++ + config WM831X_WATCHDOG + tristate "WM831x watchdog" + depends on MFD_WM831X +diff -Nur linux-3.4.110.orig/drivers/watchdog/Makefile linux-3.4.110/drivers/watchdog/Makefile +--- linux-3.4.110.orig/drivers/watchdog/Makefile 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/drivers/watchdog/Makefile 2016-04-07 10:20:51.058085512 +0200 +@@ -163,6 +163,7 @@ + obj-$(CONFIG_XEN_WDT) += xen_wdt.o + + # Architecture Independent ++obj-$(CONFIG_FTWDT010_WATCHDOG) += ftwdt010_wdt.o + obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o + obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o + obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o +diff -Nur linux-3.4.110.orig/include/linux/mm.h linux-3.4.110/include/linux/mm.h +--- linux-3.4.110.orig/include/linux/mm.h 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/include/linux/mm.h 2016-04-07 10:20:51.058085512 +0200 +@@ -157,6 +157,7 @@ + #define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ + #define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ + #define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */ ++#define FAULT_FLAG_TRIED 0x40 /* second try */ + + /* + * This interface is used by x86 PAT code to identify a pfn mapping that is +diff -Nur linux-3.4.110.orig/include/linux/semaphore.h linux-3.4.110/include/linux/semaphore.h +--- linux-3.4.110.orig/include/linux/semaphore.h 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/include/linux/semaphore.h 2016-04-07 10:20:51.062085666 +0200 +@@ -36,6 +36,9 @@ + lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); + } + ++#define init_MUTEX(sem) sema_init(sem, 1) ++#define init_MUTEX_LOCKED(sem) sema_init(sem, 0) ++ + extern void down(struct semaphore *sem); + extern int __must_check down_interruptible(struct semaphore *sem); + extern int __must_check down_killable(struct semaphore *sem); +diff -Nur linux-3.4.110.orig/init/Kconfig linux-3.4.110/init/Kconfig +--- linux-3.4.110.orig/init/Kconfig 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/init/Kconfig 2016-04-07 10:20:51.062085666 +0200 +@@ -966,7 +966,7 @@ + + config UID16 + bool "Enable 16-bit UID system calls" if EXPERT +- depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION) ++ depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || NDS32 || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION) + default y + help + This enables the legacy 16-bit UID syscall wrappers. +diff -Nur linux-3.4.110.orig/kernel/Makefile linux-3.4.110/kernel/Makefile +--- linux-3.4.110.orig/kernel/Makefile 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/kernel/Makefile 2016-04-07 10:20:51.062085666 +0200 +@@ -20,6 +20,7 @@ + CFLAGS_REMOVE_rtmutex-debug.o = -pg + CFLAGS_REMOVE_cgroup-debug.o = -pg + CFLAGS_REMOVE_irq_work.o = -pg ++CFLAGS_REMOVE_kallsyms.o = -pg + endif + + obj-y += sched/ +diff -Nur linux-3.4.110.orig/kernel/trace/Kconfig linux-3.4.110/kernel/trace/Kconfig +--- linux-3.4.110.orig/kernel/trace/Kconfig 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/kernel/trace/Kconfig 2016-04-07 10:20:51.062085666 +0200 +@@ -141,7 +141,7 @@ + config FUNCTION_TRACER + bool "Kernel Function Tracer" + depends on HAVE_FUNCTION_TRACER +- select FRAME_POINTER if !ARM_UNWIND && !PPC && !S390 && !MICROBLAZE ++ select FRAME_POINTER if !ARM_UNWIND && !PPC && !S390 && !MICROBLAZE && !NDS32 + select KALLSYMS + select GENERIC_TRACER + select CONTEXT_SWITCH_TRACER +diff -Nur linux-3.4.110.orig/lib/Kconfig.debug linux-3.4.110/lib/Kconfig.debug +--- linux-3.4.110.orig/lib/Kconfig.debug 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/lib/Kconfig.debug 2016-04-07 10:20:51.062085666 +0200 +@@ -615,7 +615,7 @@ + bool + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT + select STACKTRACE +- select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE ++ select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !NDS32 + select KALLSYMS + select KALLSYMS_ALL + +@@ -1122,7 +1122,7 @@ + depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT + depends on !X86_64 + select STACKTRACE +- select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND ++ select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !NDS32 + help + Provide stacktrace filter for fault-injection capabilities + +@@ -1132,7 +1132,7 @@ + depends on DEBUG_KERNEL + depends on STACKTRACE_SUPPORT + depends on PROC_FS +- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND ++ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !NDS32 + select KALLSYMS + select KALLSYMS_ALL + select STACKTRACE +diff -Nur linux-3.4.110.orig/sound/Kconfig linux-3.4.110/sound/Kconfig +--- linux-3.4.110.orig/sound/Kconfig 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/sound/Kconfig 2016-04-07 10:20:51.062085666 +0200 +@@ -93,6 +93,8 @@ + + source "sound/sh/Kconfig" + ++source "sound/nds32/Kconfig" ++ + # the following will depend on the order of config. + # here assuming USB is defined before ALSA + source "sound/usb/Kconfig" +diff -Nur linux-3.4.110.orig/sound/Makefile linux-3.4.110/sound/Makefile +--- linux-3.4.110.orig/sound/Makefile 2015-10-22 03:20:09.000000000 +0200 ++++ linux-3.4.110/sound/Makefile 2016-04-07 10:20:51.062085666 +0200 +@@ -6,7 +6,7 @@ + obj-$(CONFIG_SOUND_PRIME) += oss/ + obj-$(CONFIG_DMASOUND) += oss/ + obj-$(CONFIG_SND) += core/ i2c/ drivers/ isa/ pci/ ppc/ arm/ sh/ synth/ usb/ \ +- firewire/ sparc/ spi/ parisc/ pcmcia/ mips/ soc/ atmel/ ++ firewire/ sparc/ spi/ parisc/ pcmcia/ mips/ soc/ atmel/ nds32/ + obj-$(CONFIG_SND_AOA) += aoa/ + + # This one must be compilable even if sound is configured out +diff -Nur linux-3.4.110.orig/sound/nds32/FTSSP010_ALSA.c linux-3.4.110/sound/nds32/FTSSP010_ALSA.c +--- linux-3.4.110.orig/sound/nds32/FTSSP010_ALSA.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/sound/nds32/FTSSP010_ALSA.c 2016-04-07 10:20:51.062085666 +0200 +@@ -0,0 +1,2020 @@ ++/* FTSSP010 - UDA1345TS module: ++ * ++ * $log$ ++ * ++ * 2006/02/23: I-Jui Sung: OSS emulation half-duplex ++ * playback/capture at 48K, 44.1K, 8K ++ * with mono/stereo 16bit/8bit ++ * ++ * 2006/02/22: I-Jui Sung: OSS emulation playback at 44.1KHz ++ * 16-bit mono completed. Relying ALSA to ++ * resample ++ * 2009/02/24: dma upgrade checking list: ++ * - ac97 mode playback ................. ok ++ * - ac97 mode capture .................. ok ++ * - i2s mode playback .................. ok ++ * - i2s mode capture ................... ok ++ * - mixer support (snd_ctl_add, ...) ... todo ++ * - debug /proc entry .................. ok ++ */ ++ ++ ++#include ++#include ++#include ++#include ++//#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include "FTSSP010_UDA1345TS.h" ++ ++//ADD by river 2011.06.02 ++struct alc5630_data; ++//End ADD by river 2011.06.02 ++ ++void init_hw(unsigned int cardno,unsigned int ac97, struct i2c_client *client); ++ ++#if (!defined(CONFIG_PLATFORM_AHBDMA) && !defined(CONFIG_PLATFORM_APBDMA)) ++#warning needs ahb/apb dma to wrok ++#endif ++ ++/* --------------------------------------------------------------------------- ++ * Define the debug level of FTSSP_DEBUG ++ */ ++#define FTSSP_DEBUG 0 ++#define FTSSP_DEBUG_VERBOSE 0 ++#define FTSSP_PROC_FS 0 ++ ++#undef VVDBG ++#if (FTSSP_DEBUG_VERBOSE) ++#define VVDBG(vvar...) (void)0 ++//#define VVDBG(vvar...) printk(KERN_INFO vvar) ++#else ++#define VVDBG(vvar...) (void)0 ++#endif ++ ++#undef ERR ++#define ERR(vvar...) printk(KERN_ERR vvar) ++ ++#undef INFO ++#define INFO(vvar...) printk(KERN_INFO vvar) ++ ++#if (FTSSP_DEBUG) ++#undef DBG ++#define DBG(vvar...) printk(KERN_INFO vvar) ++#else ++#define DBG(vvar...) (void)0 ++#endif ++ ++#if (FTSSP_DEBUG_VERBOSE) ++#undef VDBG ++#define VDBG(vvar...) printk(KERN_INFO vvar) ++#else ++#define VDBG(vvar...) (void)0 ++#endif ++ ++#ifdef CONFIG_SND_FTSSP010_I2S ++//ADD by river 2011.02.11 ++static struct i2c_client *g_i2c_client; ++ ++////////// ADD by river 2011.01.26 ++// Each client has this additional data ++struct alc5630_data { ++ struct i2c_client *client; ++ struct delayed_work work; ++ unsigned long gpio2_value; ++ struct mutex mtx; ++}; ++ ++static int alc5630_i2c_attach(struct i2c_adapter *adapter); ++static int alc5630_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id); ++static int alc5630_i2c_suspend(struct i2c_client *i2c_client, pm_message_t mesg); ++static int alc5630_i2c_resume(struct i2c_client *i2c_client); ++ ++static int alc5630_i2c_remove(struct i2c_client *client); ++static int ftssp_alsa_init(struct i2c_client *client); ++ ++ ++static int i2s_alc5630_read(unsigned int raddr, char *data, struct i2c_client *client) ++{ ++ struct i2c_adapter *adap = client->adapter; ++ struct i2c_msg msg; ++ int ret, i2c_value; ++ ++ //Reading ALC5630 register ++ msg.addr = raddr; ++ msg.flags = (client->flags & I2C_M_TEN) | I2C_M_RD; ++ msg.len = 1; ++ msg.buf = (char *)data; ++ ++ //ret = i2c_transfer(adap, &msg, 1); ++ #ifndef CONFIG_SND_FTSSP010_AC97 ++ ret = i2c_transfer(adap, &msg, 1); ++ #endif ++ ++ if (ret != 0) { ++ printk("i2c read failed\n"); ++ return -1; ++ } ++ else ++ { ++ i2c_value = (data[0]&0xff) << 8 | (data[1]&0xff); ++ return i2c_value; ++ } ++ ++} ++ ++static void i2s_alc5630_write(unsigned int addr, unsigned int data, struct i2c_client *client) ++{ ++ ++ struct i2c_adapter *adap = client->adapter; ++ struct i2c_msg msg; ++ int ret, i2c_value; ++ char buf[3]; ++ ++ //Writing ALC5630 register ++ i2c_value = 0x0; ++ msg.addr = addr; ++ msg.flags = (client->flags & I2C_M_TEN) | ~I2C_M_RD; ++ msg.len = 1; ++ ++ buf[0] = (data >> 8) & 0xff; ++ buf[1] = data & 0xff; ++ msg.buf = (char *)buf; ++ ++ //ret = i2c_transfer(adap, &msg, 1); ++ #ifndef CONFIG_SND_FTSSP010_AC97 ++ ret = i2c_transfer(adap, &msg, 1); ++ #endif ++ ++ if (ret != 0) { ++ printk("i2c write failed\n"); ++ } ++ ++} ++ ++static void i2s_alc5630_read_test(struct i2c_client *client) ++{ ++ char data[3]; ++ //printk(">>>>> : i2s_alc5630_read_test().....\n"); ++ printk("Reg 0x%02x = 0x%08x\n", 0x0, i2s_alc5630_read(0x0, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x02, i2s_alc5630_read(0x02, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x04, i2s_alc5630_read(0x04, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x06, i2s_alc5630_read(0x06, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x08, i2s_alc5630_read(0x08, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x0a, i2s_alc5630_read(0x0a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x0c, i2s_alc5630_read(0x0c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x0e, i2s_alc5630_read(0x0e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x10, i2s_alc5630_read(0x10, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x12, i2s_alc5630_read(0x12, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x14, i2s_alc5630_read(0x14, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x16, i2s_alc5630_read(0x16, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x18, i2s_alc5630_read(0x18, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x1a, i2s_alc5630_read(0x1a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x1c, i2s_alc5630_read(0x1c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x1e, i2s_alc5630_read(0x1e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x20, i2s_alc5630_read(0x20, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x22, i2s_alc5630_read(0x22, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x24, i2s_alc5630_read(0x24, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x26, i2s_alc5630_read(0x26, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x28, i2s_alc5630_read(0x28, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x2a, i2s_alc5630_read(0x2a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x2c, i2s_alc5630_read(0x2c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x2e, i2s_alc5630_read(0x2e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x30, i2s_alc5630_read(0x30, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x32, i2s_alc5630_read(0x32, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x34, i2s_alc5630_read(0x34, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x36, i2s_alc5630_read(0x36, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x38, i2s_alc5630_read(0x38, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x3a, i2s_alc5630_read(0x3a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x3c, i2s_alc5630_read(0x3c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x3e, i2s_alc5630_read(0x3e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x40, i2s_alc5630_read(0x40, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x42, i2s_alc5630_read(0x42, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x44, i2s_alc5630_read(0x44, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x46, i2s_alc5630_read(0x46, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x48, i2s_alc5630_read(0x48, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x4a, i2s_alc5630_read(0x4a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x4c, i2s_alc5630_read(0x4c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x4e, i2s_alc5630_read(0x4e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x50, i2s_alc5630_read(0x50, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x52, i2s_alc5630_read(0x52, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x54, i2s_alc5630_read(0x54, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x56, i2s_alc5630_read(0x56, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x58, i2s_alc5630_read(0x58, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x5a, i2s_alc5630_read(0x5a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x5c, i2s_alc5630_read(0x5c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x5e, i2s_alc5630_read(0x5e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x60, i2s_alc5630_read(0x60, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x62, i2s_alc5630_read(0x62, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x64, i2s_alc5630_read(0x64, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x66, i2s_alc5630_read(0x66, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x68, i2s_alc5630_read(0x68, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x6a, i2s_alc5630_read(0x6a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x6c, i2s_alc5630_read(0x6c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x6e, i2s_alc5630_read(0x6e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x70, i2s_alc5630_read(0x70, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x72, i2s_alc5630_read(0x72, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x74, i2s_alc5630_read(0x74, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x76, i2s_alc5630_read(0x76, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x78, i2s_alc5630_read(0x78, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x7a, i2s_alc5630_read(0x7a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x7c, i2s_alc5630_read(0x7c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x7e, i2s_alc5630_read(0x7e, data,client)); ++ ++} ++ ++static int alc5630_i2c_attach(struct i2c_adapter *adapter){ ++ ++ struct i2c_board_info info; ++ struct i2c_client *client; ++ ++ int ret=0; ++ ++ //i2c_dbg( 1, "alc5630_i2c_attach() is called.\n"); ++ //printk(">>>>>>>>>> (2) alc5630_i2c_attach() is called.\n"); ++ ++ //ret = i2c_probe(adapter, &addr_data, alc5630_detect); ++ memset(&info, 0, sizeof(struct i2c_board_info)); ++ strlcpy(info.type, "alc5630_codec", I2C_NAME_SIZE); ++ //info.addr = 0x3e; ++ info.addr = 0x66; ++ //info.platform_data = node; ++ ++ //client = i2c_new_device(adapter, &info); ++ #ifndef CONFIG_SND_FTSSP010_AC97 ++ client = i2c_new_device(adapter, &info); ++ #endif ++ if (!client) { ++ printk("$$$$$????? : i2c_new_device() failed.....\n"); ++ return -ENODEV; ++ } ++ /* ++ * We know the driver is already loaded, so the device should be ++ * already bound. If not it means binding failed, and then there ++ * is no point in keeping the device instantiated. ++ */ ++ if (!client->driver) { ++ //i2c_unregister_device(client); ++ #ifndef CONFIG_SND_FTSSP010_AC97 ++ i2c_unregister_device(client); ++ #endif ++ return -ENODEV; ++ } ++ ++ /* ++ * Let i2c-core delete that device on driver removal. ++ * This is safe because i2c-core holds the core_lock mutex for us. ++ */ ++ list_add_tail(&client->detected, &client->driver->clients); ++ ++ return ret; ++} ++ ++static irqreturn_t gpio2_irq(int irq, void *dev_id) ++{ ++ ++ struct i2c_client *client = (struct i2c_client *)dev_id; ++ struct alc5630_data *alc5630 = i2c_get_clientdata(client); ++ ++ unsigned long org_mask, org_dir; ++ //printk("@@@@@#####$$$$$!!!!! : gpio2_irq is detected ???....\n"); ++ ++ //Mask GPIO interrupt ++ //REG32(AMIC_VA_BASE + 0x80) = REG32(AMIC_VA_BASE + 0x80) & ~(1 << 13); //GPIO interrupt disable ++ ++ //Dump AMIC contents and ir~ ++ unsigned long ir0,ir1,ir2, ir3, ir4, ir5; ++ unsigned long ir6, ir7, ir8, ir9, ir10; ++ unsigned long ir11, ir12, ir13, ir14, ir15; ++ unsigned long gpio2_value, value; ++ char data[3]; ++ ++ ++ //__asm__ volatile ("setgie.d\n\t"); ++ //REG32(AMIC_VA_BASE + 0x80) = 0; ++ ++ ++ //Get original gpio direction ++ org_dir = REG32(GPIO_VA_BASE + 0x08); ++ ++ //Get original gpio interrupt mask ++ org_mask = REG32(GPIO_VA_BASE + 0x2c); ++ ++ //Mask all gpio interrupt ++ REG32(GPIO_VA_BASE + 0x2c) = 0x0000FFFF; ++ //Clear all gpio interrupt ++ REG32(GPIO_VA_BASE + 0x30) = 0x0000FFFF; ++ ++ //Get gpio pin value ++ REG32(GPIO_VA_BASE + 0x08) = REG32(GPIO_VA_BASE + 0x08) & ~(0x1UL << 2); //Set gpio2 direction => input ++ gpio2_value = (REG32(GPIO_VA_BASE + 0x04) >> 2 & 1); ++ //printk("The gpio2 value is 0x%08x\n",gpio2_value); ++ ++ ++ alc5630->gpio2_value = gpio2_value; ++ schedule_delayed_work(&alc5630->work, 1); ++ ++ /*if (gpio3_value==0x1) { ++ i2s_alc5630_write(0x02, 0x0000, g_i2c_client); ++ i2s_alc5630_write(0x3A, i2s_alc5630_read(0x3A, data,g_i2c_client)|0x0040 , g_i2c_client); ++ printk("External speaker is plugged in... and Try to mute internal speaker......\n"); ++ } ++ else { ++ i2s_alc5630_write(0x02, 0x5F5F, g_i2c_client); ++ i2s_alc5630_write(0x3A, i2s_alc5630_read(0x3A, data,g_i2c_client)|0x0440 , g_i2c_client); ++ printk("External speaker is pulled ... and Try to unmute internal speaker......\n"); ++ }*/ ++ ++ ++ //Set gpio3 pin value to 0 ++ //REG32(GPIO_VA_BASE + 0x08) = REG32(GPIO_VA_BASE + 0x08) | (0x1UL << 3); //Set gpio3 direction => output ++ //value = REG32(GPIO_VA_BASE + 0x00) & ~(0x1UL << 3); ++ //REG32(GPIO_VA_BASE + 0x00) = value; ++ ++ //Get gpio pin value again ++ //REG32(GPIO_VA_BASE + 0x08) = REG32(GPIO_VA_BASE + 0x08) & ~(0x1UL << 3); //Set gpio3 direction => input ++ //gpio3_value = (REG32(GPIO_VA_BASE + 0x04) >> 3 & 1); ++ //printk("The gpio3 value is 0x%08x\n",gpio3_value); ++ ++ //restore original gpio interrupt mask ++ REG32(GPIO_VA_BASE + 0x2c) = org_mask; ++ ++ //restore original gpio direction ++ REG32(GPIO_VA_BASE + 0x08) = org_dir; ++ ++ //unmask GPIO interrupt ++ //REG32(AMIC_VA_BASE + 0x80) = REG32(AMIC_VA_BASE + 0x80) | (1 << 13); //GPIO interrupt enable ++ REG32(AMIC_VA_BASE + 0x84) = REG32(AMIC_VA_BASE + 0x84) | (1 << 13); //Clear GPIO interrupt ++ REG32(GPIO_VA_BASE + 0x30) = 0x0000FFFF; //Clear all gpio interrupt ++ ++ //printk("@@@@@#####$$$$$!!!!! : gpio2_irq() is exited....\n"); ++ ++ return IRQ_HANDLED; ++} ++ ++static void iic_work(struct work_struct *work) ++{ ++ char data[3]; ++ struct alc5630_data *alc5630 = ++ container_of(to_delayed_work(work), struct alc5630_data, work); ++ ++ ++ if (alc5630->gpio2_value==0x0) { ++ //i2s_alc5630_write(0x02, 0x0000, alc5630->client); ++ i2s_alc5630_write(0x02, 0x5F5F, alc5630->client); ++ i2s_alc5630_write(0x3A, (i2s_alc5630_read(0x3A, data,alc5630->client) & 0xFBFF) | 0x0040 , alc5630->client); ++ //printk("External speaker is plugged in... and Try to mute internal speaker......\n"); ++ //i2s_alc5630_read_test(alc5630->client); ++ } ++ else { ++ //i2s_alc5630_write(0x02, 0x5F5F, alc5630->client); ++ i2s_alc5630_write(0x02, 0x0000, alc5630->client); ++ i2s_alc5630_write(0x3A, i2s_alc5630_read(0x3A, data,alc5630->client)|0x0440 , alc5630->client); ++ //printk("External speaker is pulled ... and Try to unmute internal speaker......\n"); ++ //i2s_alc5630_read_test(alc5630->client); ++ } ++ //ftsdc_enable_irq(host, false); ++ ++ ++ //ftsdc_enable_irq(host, true); ++} ++ ++static int alc5630_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) ++{ ++ ++ struct alc5630_data *alc5630; ++ struct i2c_adapter *adap = client->adapter; ++ //unsigned char ret1, ret2; ++ struct i2c_msg msg; ++ char rbuf[3], wbuf[3]; ++ int ret, i2c_rvalue, i2c_wvalue; ++ unsigned int gpio2_value; ++ char data[3]; ++ ++ //i2c_dbg( 1, "alc5630_i2c_probe() is called.\n"); ++ //printk(">>>>>>>>>> (3) alc5630_i2c_probe() is called.\n"); ++ ++ alc5630 = kzalloc(sizeof(struct alc5630_data), GFP_KERNEL); ++ ++ if (!alc5630) ++ return -ENOMEM; ++ ++ mutex_init(&alc5630->mtx); ++ client-> flags = 0; ++ alc5630->client = client; ++ i2c_set_clientdata(client, alc5630); ++ ++ //ADD by river 2011.05.18 for GPIO2 interrupt => edge trigger and rising edge ++ REG32(GPIO_VA_BASE + 0x20) = 0x00000000; ++ REG32(GPIO_VA_BASE + 0x2c) = 0x00000000; ++ REG32(GPIO_VA_BASE + 0x2c) = 0x00000000; ++ REG32(GPIO_VA_BASE + 0x30) = 0x0000FFFF; ++ REG32(GPIO_VA_BASE + 0x38) = 0x0000FFFF; ++ REG32(GPIO_VA_BASE + 0x40) = 0x0000FFFF; ++ ++ ++ REG32(GPIO_VA_BASE + 0x08) = REG32(GPIO_VA_BASE + 0x08) & ~(1 << 2); //GPIO2 as input ++ REG32(GPIO_VA_BASE + 0x34) = REG32(GPIO_VA_BASE + 0x34) & ~(1 << 2); //GPIO2 edge trigger ++ //REG32(GPIO_VA_BASE + 0x38) = REG32(GPIO_VA_BASE + 0x38) & ~(1 << 2); //GPIO2 single edge ++ REG32(GPIO_VA_BASE + 0x38) = REG32(GPIO_VA_BASE + 0x38) | (1 << 2); //GPIO2 both edge ++ REG32(GPIO_VA_BASE + 0x3c) = REG32(GPIO_VA_BASE + 0x3c) & ~(1 << 2); //GPIO2 rising edge ++ REG32(GPIO_VA_BASE + 0x20) = REG32(GPIO_VA_BASE + 0x20) | (1 << 2); //GPIO2 pin interrupt enable ++ ++ REG32(AMIC_VA_BASE + 0x20) = REG32(AMIC_VA_BASE + 0x20) | (1 << 13); //Interrupt Trigger Mode (edge trigger) ++ REG32(AMIC_VA_BASE + 0x24) = REG32(AMIC_VA_BASE + 0x24) & ~(1 << 13); //Interrupt Trigger edge(rising edge) ++ REG32(AMIC_VA_BASE + 0x80) = REG32(AMIC_VA_BASE + 0x80) | (1 << 13); //GPIO interrupt enabled ++ //REG32(AMIC_VA_BASE + 0x80) = 0; //GPIO interrupt enabled ++ ++ if (request_irq(GPIO_FTGPIO010_IRQ, gpio2_irq, IRQF_SHARED, "gpio2", client)) { ++ printk("Failed to request GPIO2 interrupt.\n"); ++ goto fail; ++ } ++ ++ //ADD by river 2011.07.11 ++ //REG32(GPIO_VA_BASE + 0x08) = REG32(GPIO_VA_BASE + 0x08) & ~(0x1UL << 3); //Set gpio3 direction => input ++ gpio2_value = (REG32(GPIO_VA_BASE + 0x04) >> 2 & 1); ++ //printk("The gpio2 value is 0x%08x\n",gpio2_value); ++ alc5630->gpio2_value = gpio2_value; ++ ++ if (gpio2_value==0x0) { ++ i2s_alc5630_write(0x02, 0x5F5F, alc5630->client); ++ i2s_alc5630_write(0x3A, (i2s_alc5630_read(0x3A, data,alc5630->client) & 0xFBFF) | 0x0040 , alc5630->client); ++ //printk("@@@@@ alc5630_i2c_probe : External speaker is plugged in... and Try to mute internal speaker......\n"); ++ //i2s_alc5630_read_test(alc5630->client); ++ } ++ else { ++ i2s_alc5630_write(0x02, 0x0000, alc5630->client); ++ i2s_alc5630_write(0x3A, i2s_alc5630_read(0x3A, data,alc5630->client)|0x0440 , alc5630->client); ++ //printk("@@@@@ alc5630_i2c_probe : External speaker is pulled ... and Try to unmute internal speaker......\n"); ++ //i2s_alc5630_read_test(alc5630->client); ++ } ++ ++ //End ADD by river 2011.07.11 ++ ++ //ADD by river 2011.06.02 ++ INIT_DELAYED_WORK(&alc5630->work, iic_work); ++ //End ADD by river 2011.06.02 ++ //End ADD by river 2011.05.18 ++ ++ ftssp_alsa_init(client); ++ ++ return 0; ++ fail: ++ mutex_destroy(&alc5630->mtx); ++ kfree(alc5630); ++ return -EINVAL; ++ ++} ++ ++////////End ADD by river 2011.01.26 ++#endif ++ ++/* --------------------------------------------------------------------------- ++ * Preserved size of memory space for audio DMA ring ++ */ ++#define FTSSP_HW_DMA_SIZE (512 * 1024) ++ ++/* Buffer sizes reported to ALSA layer - AC97 mode */ ++ ++/* ring size, exported to application */ ++#define AC97_HW_BUFFER_BYTES_MAX (42 * 1024) ++/* should not exceed AC97_HW_PERIOD_BYTES_MAX */ ++#define AC97_HW_PERIOD_BYTES_MIN (2 * 1024) ++/* AC97_HW_PERIOD_BYTES_MAX * AC97_HW_PERIODS_MAX <= AC97_HW_BUFFER_SIZE */ ++#define AC97_HW_PERIOD_BYTES_MAX (8 * 1024) ++/* 3 <= AC97_HW_PERIODS_MIN <= AC97_HW_PERIODS_MAX */ ++#define AC97_HW_PERIODS_MIN 3 ++/* AC97_HW_PERIOD_BYTES_MAX * AC97_HW_PERIODS_MAX <= AC97_HW_BUFFER_SIZE */ ++#define AC97_HW_PERIODS_MAX 5 ++ ++/* Driver internal dma buffer size, x2 for S16_LE(16-bits) to AC97 (20-bits), ++ * x6 for sampling rate converion from minimum 8k to AC97 48k. ++ * ++ * Note that AC97 mode cannot do playback and recording simultaneouly. So we ++ * use up all FTSSP_HW_DMA_SIZE of memory. ++ */ ++#define AC97_HW_DMA_SIZE (AC97_HW_BUFFER_BYTES_MAX * 2 * 6) ++ ++/* Buffer sizes reported to ALSA layer - I2S mode */ ++ ++/* ring size, exported to application */ ++#define I2S_HW_BUFFER_BYTES_MAX (256 * 1024) ++/* should not exceed I2S_HW_PERIOD_BYTES_MAX */ ++#define I2S_HW_PERIOD_BYTES_MIN (2 * 1024) ++/* I2S_HW_PERIOD_BYTES_MAX * I2S_HW_PERIODS_MAX <= I2S_HW_BUFFER_SIZE */ ++#define I2S_HW_PERIOD_BYTES_MAX (32 * 1024) ++/* 3 <= I2S_HW_PERIODS_MIN <= I2S_HW_PERIODS_MAX */ ++#define I2S_HW_PERIODS_MIN 3 ++/* I2S_HW_PERIOD_BYTES_MAX * I2S_HW_PERIODS_MAX <= I2S_HW_BUFFER_SIZE */ ++#define I2S_HW_PERIODS_MAX 8 ++ ++/* Page-in size for playback and capture each. Note that I2S mode can do ++ * playback and recording simultaneouly, so this size should be less than or ++ * equal to FTSSP_HW_DMA_SIZE/2 ++ */ ++#define I2S_HW_DMA_SIZE (I2S_HW_BUFFER_BYTES_MAX) ++ ++/* --------------------------------------------------------------------------- ++ * Audio formats ++ */ ++#define AC97_CODEC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE) ++#define AC97_CODEC_SAMPLE_RATES (SNDRV_PCM_RATE_48000 | \ ++ SNDRV_PCM_RATE_16000 | \ ++ SNDRV_PCM_RATE_8000) ++#define AC97_CODEC_SAMPLE_RATE_MIN (8000) ++#define AC97_CODEC_SAMPLE_RATE_MAX (48000) ++ ++#define I2S_CODEC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE) ++#define I2S_CODEC_SAMPLE_RATES (SNDRV_PCM_RATE_48000 | \ ++ SNDRV_PCM_RATE_44100 | \ ++ SNDRV_PCM_RATE_32000 | \ ++ SNDRV_PCM_RATE_22050 | \ ++ SNDRV_PCM_RATE_16000 | \ ++ SNDRV_PCM_RATE_11025 | \ ++ SNDRV_PCM_RATE_8000) ++#define I2S_CODEC_SAMPLE_RATE_MIN (8000) ++#define I2S_CODEC_SAMPLE_RATE_MAX (48000) ++ ++ ++/* --------------------------------------------------------------------------- ++ * Configuration ++ */ ++#if (CONFIG_PROC_FS == 0) ++#undef FTSSP_PROC_FS ++#define FTSSP_PROC_FS 0 ++#else ++#if (FTSSP_PROC_FS) ++#include ++#endif /* FTSSP_PROC_FS */ ++#endif /* CONFIG_PROC_FS */ ++ ++#define FTSSP_CARD_ID "ftssp010" ++#define FTSSP_DRIVER_NAME "ftssp" ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Faraday Technology Corp."); ++MODULE_DESCRIPTION("FTSSP010 Linux 2.6 Driver"); ++ ++static int cardno = 0; ++//static const unsigned int SSP_FTSSP010_pa_base[SSP_FTSSP010_IRQ_COUNT] = ++// { SSP_FTSSP010_PA_BASE }; ++ ++/* Driver mode */ ++#ifdef CONFIG_SND_FTSSP010_AC97 ++static int ac97 = 1; ++#else ++static int ac97 = 0; ++#endif ++ ++// ---------------------------------------------- ++module_param(cardno, int, 0); ++MODULE_PARM_DESC(cardno, "FTSSP No."); ++ ++module_param(ac97, int, 0); ++MODULE_PARM_DESC(ac97, "AC97 mode"); ++// ---------------------------------------------- ++ ++/* --------------------------------------------------------------------------- ++ * Structures ++ */ ++ ++/* private data for card */ ++typedef struct { ++ struct snd_card *card; ++ struct snd_pcm *pcm; ++ struct snd_pcm_substream *substream_tx; ++ struct snd_pcm_substream *substream_rx; ++#if (FTSSP_PROC_FS) ++ struct snd_info_entry *info_buf_max; ++ struct snd_info_entry *info_period_min; ++ struct snd_info_entry *info_period_max; ++ struct snd_info_entry *info_periods_min; ++ struct snd_info_entry *info_periods_max; ++#endif ++} ftssp_chip; ++ ++/* dma request descriptors */ ++dmad_chreq dma_chreq_tx = { ++ .channel = -1, ++ .drq = NULL, ++}; ++ ++dmad_chreq dma_chreq_rx = { ++ .channel = -1, ++ .drq = NULL, ++}; ++ ++/* Holds ALSA card instance pointers */ ++struct snd_card *ftssp_cards[SSP_FTSSP010_COUNT]; ++ ++/* snd_pcm_hardware */ ++static struct snd_pcm_hardware snd_ftssp_pcm_hw = ++{ ++ .info = SNDRV_PCM_INFO_INTERLEAVED, ++ .formats = I2S_CODEC_FORMATS, ++ .rates = I2S_CODEC_SAMPLE_RATES, ++ .rate_min = I2S_CODEC_SAMPLE_RATE_MIN, ++ .rate_max = I2S_CODEC_SAMPLE_RATE_MAX, ++ .channels_min = 1, ++ .channels_max = 2, ++ .buffer_bytes_max = I2S_HW_BUFFER_BYTES_MAX, ++ .period_bytes_min = I2S_HW_PERIOD_BYTES_MIN, ++ .period_bytes_max = I2S_HW_PERIOD_BYTES_MAX, ++ .periods_min = I2S_HW_PERIODS_MIN, ++ .periods_max = I2S_HW_PERIODS_MAX, ++}; ++ ++/* private data for a substream (playback or capture) */ ++/* function pointer for set up AHBDMA for this substream */ ++typedef void (*start_t)(int cardno, unsigned use_dma); ++typedef void (*pmu_set_clocking_t)(unsigned int); ++typedef void (*ftssp010_config_t)(int cardno, unsigned is_stereo, ++ unsigned speed, int use8bit); ++ ++typedef struct { ++ u32 busy; ++ spinlock_t dma_lock; ++ unsigned dma_area_va; ++ int dma_width; ++ unsigned int tx_period; ++ unsigned int rx_period; ++ ++ start_t start; ++ pmu_set_clocking_t pmu_set_clocking; ++ ftssp010_config_t hw_config; ++} ftssp_substream; ++ ++static ftssp_substream ftssp010_substreams[2] = { ++ /* Playback substream */ ++ { ++ busy : 0, ++ start : ftssp010_start_tx, ++ pmu_set_clocking : pmu_set_i2s_clocking, ++ hw_config : ftssp010_config_tx, ++ }, ++ /* Capture substream */ ++ { ++ busy : 0, ++ start : ftssp010_start_rx, ++ pmu_set_clocking : pmu_set_i2s_clocking, ++ hw_config : ftssp010_config_rx, ++ } ++}; ++ ++/* (AC97 only) Convert 16 bits PCM data in user buffer to/from 20 bits PCM data ++ * (32 bits actaully in dma buffer) for AC97 codec. ++ */ ++static int snd_ftssp_playback_copy(struct snd_pcm_substream *substream, ++ int channel, snd_pcm_uframes_t pos, void *usr_buf, ++ snd_pcm_uframes_t count) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ ftssp_substream *ftssp010_substream = ++ (ftssp_substream *) runtime->private_data; ++ ++ ++ //printk("~~~~~ : snd_ftssp_playback_copy() is invoked....\n"); ++ u32 *dma_va = NULL; ++ u16 *usr_va = usr_buf; ++ int copy_words; ++ int pcm_data; ++ ++ dmad_chreq *dma_chreq; ++ ++ /* frames_to_bytes(runtime, pos + count) * 2(bytes/per pcm) / ++ * 4(bytes per dma unit) */ ++ u32 sw_ptr = (u32)frames_to_bytes(runtime, pos + count) >> 1; ++ ++ switch (runtime->rate) { ++ case 8000: ++ sw_ptr *= 6; ++ ++ dma_va = (unsigned *)(ftssp010_substream->dma_area_va + ++ frames_to_bytes(runtime, pos * 6) * 2); ++ ++ VVDBG("%s: pos(0x%08x) count(0x%08x) next_pos(0x%08x)\n", ++ __func__, (u32)pos, (u32)count, (u32)(pos + count)); ++ VVDBG("%s: va base(0x%08x) range (0x%08x ~ 0x%08x)\n", ++ __func__, (u32)ftssp010_substream->dma_area_va, ++ (u32)dma_va, ++ (u32)dma_va + ++ (u32)2 * frames_to_bytes(runtime, count * 6)); ++ ++ if (runtime->channels == 1) { ++ while (count--) { ++ dma_va[0] = (u32)(*usr_va++) << 4; ++ dma_va[1] = dma_va[2] = dma_va[3] = ++ dma_va[4] = dma_va[5] = dma_va[0]; ++ //memcpy(&dma_va[1], &dma_va[0], 5 * 4 * 1); ++ dma_va += 6; ++ } ++ } else { // assume 2 channels ++ while (count--) { ++ dma_va[0] = (u32)(*usr_va++) << 4; ++ dma_va[1] = (u32)(*usr_va++) << 4; ++ dma_va[2] = dma_va[4] = dma_va[6] = ++ dma_va[8] = dma_va[10] = dma_va[0]; ++ dma_va[3] = dma_va[5] = dma_va[7] = ++ dma_va[9] = dma_va[11] = dma_va[0]; ++ //memcpy(&dma_va[2], &dma_va[0], 5 * 4 * 2); ++ dma_va += 12; ++ } ++ } ++ break; ++ case 16000: ++ sw_ptr *= 3; ++ ++ dma_va = (unsigned *)(ftssp010_substream->dma_area_va + ++ frames_to_bytes(runtime, pos * 3) * 2); ++ ++ VVDBG("%s: pos(0x%08x) count(0x%08x) next_pos(0x%08x)\n", ++ __func__, (u32)pos, (u32)count, (u32)(pos + count)); ++ VVDBG("%s: va base(0x%08x) range (0x%08x ~ 0x%08x)\n", ++ __func__, (u32)ftssp010_substream->dma_area_va, ++ (u32)dma_va, ++ (u32)dma_va + ++ (u32)2 * frames_to_bytes(runtime, count * 3)); ++ ++ if (runtime->channels == 1) { ++ while (count--) { ++ dma_va[0] = (u32)(*usr_va++) << 4; ++ dma_va[1] = dma_va[2] = dma_va[0]; ++ //memcpy(&dma_va[1], &dma_va[0], 2 * 4 * 1); ++ dma_va += 3; ++ } ++ } else { // assume 2 channels ++ while (count--) { ++ dma_va[0] = (u32)(*usr_va++) << 4; ++ dma_va[1] = (u32)(*usr_va++) << 4; ++ dma_va[2] = dma_va[4] = dma_va[0]; ++ dma_va[3] = dma_va[5] = dma_va[1]; ++ //memcpy(&dma_va[2], &dma_va[0], 2 * 4 * 2); ++ dma_va += 6; ++ } ++ } ++ break; ++ case 48000: ++ default: ++ dma_va = (unsigned *)(ftssp010_substream->dma_area_va + ++ frames_to_bytes(runtime, pos) * 2); ++ copy_words = 2 * frames_to_bytes(runtime, count) / sizeof(u32); ++ ++ VVDBG("%s: pos(0x%08x) count(0x%08x) next_pos(0x%08x)\n", ++ __func__, (u32)pos, (u32)count, (u32)(pos + count)); ++ VVDBG("%s: va base(0x%08x) range (0x%08x ~ 0x%08x)\n", ++ __func__, (u32)ftssp010_substream->dma_area_va, ++ (u32)dma_va, ++ (u32)dma_va + (u32)copy_words*4); ++ ++ while (copy_words--) { ++ pcm_data = (*usr_va++); ++ *dma_va++= pcm_data << 4; ++ } ++ break; ++ } ++ ++ dma_chreq = &dma_chreq_tx; ++ ++ if (dmad_update_ring_sw_ptr(dma_chreq, sw_ptr, ++ (runtime->status->state == SNDRV_PCM_STATE_RUNNING) ? 1:0) != 0) ++ { ++ ERR("%s: failed to update sw-pointer!\n", __func__); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++static int snd_ftssp_capture_copy(struct snd_pcm_substream *substream, ++ int channel, snd_pcm_uframes_t pos, void *usr_buf, ++ snd_pcm_uframes_t count) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ ftssp_substream *ftssp010_substream = ++ (ftssp_substream *) runtime->private_data; ++ ++ //printk("~~~~~ : snd_ftssp_capture_copy() is invoked....\n"); ++ //printk(">>>>>>>>>> : snd_ftssp_capture_copy() for recording....\n"); ++ u32 *dma_va = NULL; ++ u16 *usr_va = usr_buf; ++ ++ switch (runtime->rate) { ++ case 8000: ++ dma_va = (unsigned *)(ftssp010_substream->dma_area_va + ++ frames_to_bytes(runtime, pos * 6) * 2); ++ ++ VVDBG("%s: pos(0x%08x) count(0x%08x) next_pos(0x%08x)\n", ++ __func__, (u32)pos, (u32)count, (u32)(pos + count)); ++ VVDBG("%s: va base(0x%08x) range (0x%08x ~ 0x%08x)\n", ++ __func__, (u32)ftssp010_substream->dma_area_va, ++ (u32)dma_va, ++ (u32)dma_va + ++ (u32)2 * frames_to_bytes(runtime, count * 6)); ++ ++ if (runtime->channels == 1) { ++ while (count--) { ++ *usr_va++ = (u16)(dma_va[0] >> 4); ++ dma_va += 6; ++ } ++ } else { ++ while (count--) { ++ usr_va[0] = (u16)(dma_va[0] >> 4); ++ ++ /* [hw-limit] only slot-3 has valid data in ++ * recording mode -- check TAG_DATA_MONO ++ * defined in "FTSSP010_lib.c". Mask out ++ * one channel to avoid hi-freq noise. ++ */ ++ usr_va[1] = usr_va[0]; ++ usr_va += 2; ++ dma_va += 12; ++ } ++ } ++ break; ++ case 16000: ++ dma_va = (unsigned *)(ftssp010_substream->dma_area_va + ++ frames_to_bytes(runtime, pos * 3) * 2); ++ ++ VVDBG("%s: pos(0x%08x) count(0x%08x) next_pos(0x%08x)\n", ++ __func__, (u32)pos, (u32)count, (u32)(pos + count)); ++ VVDBG("%s: va base(0x%08x) range (0x%08x ~ 0x%08x)\n", ++ __func__, (u32)ftssp010_substream->dma_area_va, ++ (u32)dma_va, ++ (u32)dma_va + ++ (u32)2 * frames_to_bytes(runtime, count * 3)); ++ ++ if (runtime->channels == 1) { ++ while (count--) { ++ *usr_va++ = (u16)(dma_va[0] >> 4); ++ dma_va += 3; ++ } ++ } else { ++ while (count--) { ++ usr_va[0] = (u16)(dma_va[0] >> 4); ++ ++ /* [hw-limit] only slot-3 has valid data in ++ * recording mode -- check TAG_DATA_MONO ++ * defined in "FTSSP010_lib.c". Mask out ++ * one channel to avoid hi-freq noise. ++ */ ++ usr_va[1] = usr_va[0]; ++ usr_va += 2; ++ dma_va += 6; ++ } ++ } ++ break; ++ case 48000: ++ default: ++ dma_va = (unsigned *)(ftssp010_substream->dma_area_va + ++ frames_to_bytes(runtime, pos) * 2); ++ ++ VVDBG("%s: pos(0x%08x) count(0x%08x) next_pos(0x%08x)\n", ++ __func__, (u32)pos, (u32)count, (u32)(pos + count)); ++ VVDBG("%s: va base(0x%08x) range (0x%08x ~ 0x%08x)\n", ++ __func__, (u32)ftssp010_substream->dma_area_va, ++ (u32)dma_va, ++ (u32)dma_va + ++ (u32)2 * frames_to_bytes(runtime, count)); ++ ++ if (runtime->channels == 1) { ++ while (count--) { ++ *usr_va++ = (u16)(*dma_va++ >> 4); ++ } ++ } else { ++ while (count--) { ++ usr_va[0] = (u16)(dma_va[0] >> 4); ++ ++ /* [hw-limit] only slot-3 has valid data in ++ * recording mode -- check TAG_DATA_MONO ++ * defined in "FTSSP010_lib.c". Mask out ++ * one channel to avoid hi-freq noise. ++ */ ++ usr_va[1] = usr_va[0]; ++ usr_va += 2; ++ dma_va += 2; ++ } ++ } ++ break; ++ } ++ ++ return 0; ++} ++ ++/** ++ * These dma callbacks are called in interrupt context. ++ * @data: pointer to the chip-wide structure. ++ * TODO: use stream-specifc data ++ */ ++__attribute__((__unused__)) ++static void ftssp_dma_callback_tx(int ch, u16 int_status, void *data) ++{ ++ ftssp_chip *chip = (ftssp_chip *)data; ++ ++ //printk("~~~~~ : ftssp_dma_callback_tx() is invoked....\n"); ++ if (!ac97) { ++ /* in i2s mode, no indication to driver for user data length. ++ * For simplicity, just go ahead by one period */ ++ ++ struct snd_pcm_runtime *runtime = chip->substream_tx->runtime; ++ ftssp_substream *ftssp010_substream = ++ (ftssp_substream *)runtime->private_data; ++ u32 sw_ptr; ++ u32 tx_period = ftssp010_substream->tx_period + 1; ++ ++ if (tx_period == runtime->periods) ++ sw_ptr = runtime->buffer_size; ++ else ++ sw_ptr = tx_period * runtime->period_size; ++ ++ sw_ptr = (u32)frames_to_bytes(runtime, sw_ptr) >> 1; ++ ++ if (dmad_update_ring_sw_ptr(&dma_chreq_tx, (u32)sw_ptr, 0)) { ++ ERR("%s: failed to update sw-pointer!\n", __func__); ++ } ++ ++ ftssp010_substream->tx_period = tx_period % runtime->periods; ++ } ++ ++ snd_pcm_period_elapsed(chip->substream_tx); ++} ++ ++__attribute__((__unused__)) ++static void ftssp_dma_callback_rx(int ch, u16 int_status, void *data) ++{ ++ ftssp_chip *chip = (ftssp_chip *)data; ++ struct snd_pcm_runtime *runtime = chip->substream_rx->runtime; ++ ftssp_substream *ftssp010_substream = ++ (ftssp_substream *)runtime->private_data; ++ ++ //printk("~~~~~ : ftssp_dma_callback_rx() is invoked....\n"); ++ //printk(">>>>>>>>>> : ftssp_dma_callback_rx() for recording....\n"); ++ ++ u32 sw_ptr; ++ u32 rx_period = ftssp010_substream->rx_period + 1; ++ ++ if (rx_period == runtime->periods) ++ sw_ptr = runtime->buffer_size; ++ else ++ sw_ptr = rx_period * runtime->period_size; ++ ++ if (ac97) { ++ switch (runtime->rate) { ++ case 8000: ++ sw_ptr = sw_ptr * 6; ++ break; ++ case 16000: ++ sw_ptr = sw_ptr * 3; ++ break; ++ case 48000: ++ default: ++ break; ++ } ++ } ++ sw_ptr = (u32)frames_to_bytes(runtime, sw_ptr) >> 1; ++ ++ if (dmad_update_ring_sw_ptr(&dma_chreq_rx, (u32)sw_ptr, 0) != 0) { ++ ERR("%s: failed to update sw-pointer!\n", __func__); ++ } ++ ++ ftssp010_substream->rx_period = rx_period % runtime->periods; ++ ++ snd_pcm_period_elapsed(chip->substream_rx); ++} ++ ++static inline int snd_ftssp_dma_ch_alloc(struct snd_pcm_substream *substream) ++{ ++ dmad_chreq *ch_req __attribute__((__unused__)) = 0; ++ ++ //printk("~~~~~ WATCH : snd_ftssp_dma_ch_alloc() is invoked....\n"); ++#ifdef CONFIG_PLATFORM_APBDMA ++ ++ if (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) { ++ ch_req = &dma_chreq_tx; ++ ch_req->completion_cb = ftssp_dma_callback_tx; ++ ch_req->apb_req.tx_dir = DMAD_DIR_A0_TO_A1; ++ /*for amerald ac97 ssp2 */ ++ if((inl(PMU_BASE) & AMERALD_MASK) == AMERALD_PRODUCT_ID) ++ { ++ ch_req->apb_req.dev_reqn = APBBR_REQN_I2SAC97TX_AMERALD; ++ } ++ else ++ ch_req->apb_req.dev_reqn = APBBR_REQN_I2SAC97TX; ++ } else { ++ ch_req = &dma_chreq_rx; ++ ch_req->completion_cb = ftssp_dma_callback_rx; ++ ch_req->apb_req.tx_dir = DMAD_DIR_A1_TO_A0; ++ /*for amerald ac97 ssp2 */ ++ if((inl(PMU_BASE) & AMERALD_MASK) == AMERALD_PRODUCT_ID) ++ { ++ ch_req->apb_req.dev_reqn = APBBR_REQN_I2SAC97RX_AMERALD; ++ } ++ else ++ ch_req->apb_req.dev_reqn = APBBR_REQN_I2SAC97RX; ++ } ++ ++ ch_req->controller = DMAD_DMAC_APB_CORE; ++ ch_req->flags = DMAD_FLAGS_RING_MODE; ++ ch_req->ring_base = 0; ++ ch_req->dev_addr = (dma_addr_t)FTSSP010_DATA_PA(cardno); ++ ch_req->periods = 0; ++ ch_req->period_size = 0; ++ ++ if (ac97) { ++ ch_req->apb_req.ring_ctrl = APBBR_ADDRINC_I4X; ++ ch_req->apb_req.ring_reqn = APBBR_REQN_NONE; ++ ch_req->apb_req.dev_ctrl = APBBR_ADDRINC_FIXED; ++ ch_req->apb_req.burst_mode = 0; ++ ch_req->apb_req.data_width = APBBR_DATAWIDTH_4; ++ } else { ++ ch_req->apb_req.ring_ctrl = APBBR_ADDRINC_I2X; ++ ch_req->apb_req.ring_reqn = APBBR_REQN_NONE; ++ ch_req->apb_req.dev_ctrl = APBBR_ADDRINC_FIXED; ++ ch_req->apb_req.burst_mode = 0; ++ ch_req->apb_req.data_width = APBBR_DATAWIDTH_2; ++ } ++ ++ ch_req->completion_data = (void *)snd_pcm_substream_chip(substream); ++ ++ if (dmad_channel_alloc(ch_req) != 0) { ++ ERR("%s: APBDMA channel allocation failed\n",__func__); ++ goto _try_ahb; ++ } ++ ++ DBG("%s: APBDMA channel allocated (ch: %d) ring_mode\n", ++ __func__, ch_req->channel); ++ ++ return 0; ++ ++_try_ahb: ++ ++#endif /* CONFIG_PLATFORM_APBDMA */ ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ if (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) { ++ ch_req = &dma_chreq_tx; ++ ch_req->completion_cb = ftssp_dma_callback_tx; ++ ch_req->ahb_req.tx_dir = DMAD_DIR_A0_TO_A1; ++ if((inl(PMU_BASE) & AMERALD_MASK) == AMERALD_PRODUCT_ID) ++ { ++ ch_req->ahb_req.dev_reqn = DMAC_REQN_I2SAC97TX_AMERALD; ++ } ++ else ++ ch_req->ahb_req.dev_reqn = DMAC_REQN_I2SAC97TX; ++ } else { ++ ch_req = &dma_chreq_rx; ++ ch_req->completion_cb = ftssp_dma_callback_rx; ++ ch_req->ahb_req.tx_dir = DMAD_DIR_A1_TO_A0; ++ if((inl(PMU_BASE) & AMERALD_MASK) == AMERALD_PRODUCT_ID) ++ { ++ ch_req->ahb_req.dev_reqn = DMAC_REQN_I2SAC97RX_AMERALD; ++ } ++ else ++ ch_req->ahb_req.dev_reqn = DMAC_REQN_I2SAC97RX; ++ } ++ ++ ch_req->controller = DMAD_DMAC_AHB_CORE; ++ ch_req->flags = DMAD_FLAGS_RING_MODE; ++ ch_req->ring_base = 0; ++ ch_req->dev_addr = (dma_addr_t)FTSSP010_DATA_PA(cardno); ++ ch_req->periods = 0; ++ ch_req->period_size = 0; ++ ++ ch_req->ahb_req.sync = 1; ++ ch_req->ahb_req.priority = DMAC_CSR_CHPRI_2; ++ ch_req->ahb_req.hw_handshake = 1; ++ ch_req->ahb_req.burst_size = DMAC_CSR_SIZE_1; ++ ++ if (ac97) { ++ ch_req->ahb_req.ring_width = DMAC_CSR_WIDTH_32; ++ ch_req->ahb_req.ring_ctrl = DMAC_CSR_AD_INC; ++ ch_req->ahb_req.ring_reqn = DMAC_REQN_NONE; ++ ch_req->ahb_req.dev_width = DMAC_CSR_WIDTH_32; ++ ch_req->ahb_req.dev_ctrl = DMAC_CSR_AD_FIX; ++ } else { ++ ch_req->ahb_req.ring_width = DMAC_CSR_WIDTH_16; ++ ch_req->ahb_req.ring_ctrl = DMAC_CSR_AD_INC; ++ ch_req->ahb_req.ring_reqn = DMAC_REQN_NONE; ++ ch_req->ahb_req.dev_width = DMAC_CSR_WIDTH_16; ++ ch_req->ahb_req.dev_ctrl = DMAC_CSR_AD_FIX; ++ } ++ ++ ch_req->completion_data = (void *)snd_pcm_substream_chip(substream); ++ ++ if (dmad_channel_alloc(ch_req) != 0) { ++ ERR("%s: AHBDMA channel allocation failed\n", __func__); ++ goto _err_exit; ++ } ++ ++ DBG("%s: AHBDMA channel allocated (ch: %d) ring_mode\n", ++ __func__, ch_req->channel); ++ ++ return 0; ++ ++_err_exit: ++ ++#endif /* CONFIG_PLATFORM_AHBDMA */ ++ ++ return -ENODEV; ++} ++ ++static inline ftssp_substream *ftssp010_substream_new(int stream_id) ++{ ++ ftssp_substream *s = NULL; ++ ++ //printk("~~~~~ : ftssp010_substream_new() is invoked....\n"); ++ ++ switch (stream_id) { ++ case SNDRV_PCM_STREAM_PLAYBACK: ++ s = &ftssp010_substreams[0]; ++ break; ++ case SNDRV_PCM_STREAM_CAPTURE: ++ s = &ftssp010_substreams[1]; ++ break; ++ default: ++ ERR("%s: wrong stream type (%d)\n", __func__, stream_id); ++ return NULL; ++ } ++ ++ if (s->busy) { ++ ERR("%s: device busy!\n", __func__); ++ return NULL; ++ } ++ s->busy = 1; ++ ++ spin_lock_init(&s->dma_lock); ++ ++ return s; ++} ++ ++static int snd_ftssp_pcm_open(struct snd_pcm_substream *substream) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ int stream_id = substream->pstr->stream; ++ ++ VDBG("%s, %s\n", __func__, ++ (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) ? ++ "playback" : "capture"); ++ ++ //printk("~~~~~ : snd_ftssp_pcm_open() is invoked....\n"); ++ ++ /* Both playback and capture share a hardware description */ ++ runtime->hw = snd_ftssp_pcm_hw; ++ ++ /* Allocate & Initialize stream-specific data */ ++ runtime->private_data = ftssp010_substream_new(stream_id); ++ ++ if (runtime->private_data) { ++ //printk("~~~~~ YAYAYA @@@@@ : Calling snd_ftssp_dma_ch_alloc().\n"); ++ return snd_ftssp_dma_ch_alloc(substream); ++ } ++ else ++ return -EBUSY; ++} ++ ++static int snd_ftssp_pcm_close(struct snd_pcm_substream *substream) ++{ ++ int stream_id = substream->pstr->stream; ++ ftssp_substream *ftssp010_substream = ++ (ftssp_substream *)substream->runtime->private_data; ++ ++ VDBG("%s, %s\n", __func__, ++ (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) ? ++ "playback" : "capture"); ++ ++ //printk("~~~~~ : snd_ftssp_pcm_close() is invoked....\n"); ++ ++ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) ++ dmad_channel_free(&dma_chreq_tx); ++ else ++ dmad_channel_free(&dma_chreq_rx); ++ ++ ftssp010_substream->busy = 0; ++ return 0; ++} ++ ++static int snd_ftssp_pcm_hw_params(struct snd_pcm_substream *substream, ++ struct snd_pcm_hw_params *hw_params) ++{ ++ VDBG("%s, %s\n", __func__, ++ (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) ? ++ "playback" : "capture"); ++ ++ //printk("~~~~~ : snd_ftssp_pcm_hw_params() is invoked....\n"); ++ ++ if (ac97) ++ return snd_pcm_lib_malloc_pages(substream, AC97_HW_DMA_SIZE); ++ else ++ return snd_pcm_lib_malloc_pages(substream, I2S_HW_DMA_SIZE); ++} ++ ++static int snd_ftssp_pcm_hw_free(struct snd_pcm_substream *substream) ++{ ++ VDBG("%s, %s\n", __func__, ++ (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) ? ++ "playback" : "capture"); ++ //printk("~~~~~ : snd_ftssp_pcm_hw_free() is invoked....\n"); ++ ++ if (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ dmad_drain_requests(&dma_chreq_tx, 1); ++ else ++ dmad_drain_requests(&dma_chreq_rx, 1); ++ ++ return snd_pcm_lib_free_pages(substream); ++} ++ ++/* Prepare FTSSP010 AHBDMA for playback & capture */ ++static int snd_ftssp_pcm_prepare(struct snd_pcm_substream *substream) ++{ ++ ftssp_chip *chip = snd_pcm_substream_chip(substream); ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ ++ //printk("~~~~~ : snd_ftssp_pcm_prepare() is invoked....\n"); ++ //printk("@@@@@ >>>>> : snd_ftssp_pcm_prepare() is called.\n"); ++ ++ ftssp_substream *ftssp010_substream = ++ (ftssp_substream *)runtime->private_data; ++ ++ int stream_id = substream->pstr->stream; ++ dmad_chreq *dma_chreq; ++ unsigned period_size, buffer_size; ++ ++ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) ++ dma_chreq = &dma_chreq_tx; ++ else ++ dma_chreq = &dma_chreq_rx; ++ ++ period_size = frames_to_bytes(runtime, runtime->period_size); ++ buffer_size = frames_to_bytes(runtime, runtime->buffer_size); ++ ++ if (runtime->format != SNDRV_PCM_FORMAT_S16_LE) ++ return -ENODEV; ++ ++ if (ac97) { ++ switch (runtime->rate) { ++ case 8000: ++ period_size *= 12; ++ buffer_size *= 12; ++ break; ++ case 16000: ++ period_size *= 6; ++ buffer_size *= 6; ++ break; ++ case 48000: ++ default: ++ period_size *= 2; ++ buffer_size *= 2; ++ break; ++ } ++ ++ ftssp010_substream->dma_width = 4; ++ } else { ++ ftssp010_substream->dma_width = 2; ++ } ++ ++ dmad_drain_requests(dma_chreq, 1); ++ ++ dma_chreq->ring_base = (dma_addr_t)runtime->dma_addr; ++ dma_chreq->periods = (dma_addr_t)runtime->periods; ++ if (ac97) { ++ dma_chreq->period_size = (dma_addr_t)(period_size >> 2); ++ dma_chreq->ring_size = (dma_addr_t)(buffer_size >> 2); ++ } else { ++ dma_chreq->period_size = (dma_addr_t)(period_size >> 1); ++ dma_chreq->ring_size = (dma_addr_t)(buffer_size >> 1); ++ } ++ dmad_update_ring(dma_chreq); ++ ++ /* Set PMU, FTSSP010, and DMA */ ++ spin_lock(&ftssp010_substream->dma_lock); ++ ++ /* keep DMA buffer VA for copy() callback */ ++ // todo: support playback/capture simultaneously ++ ftssp010_substream->dma_area_va = (u32)runtime->dma_area; ++ ++ if (ac97) { ++ ftssp010_substream->pmu_set_clocking(48000); ++ ftssp010_substream->hw_config(cardno, ++ runtime->channels > 1 ? 1 : 0, /* 1: stereo, 0: mono */ ++ 48000, ftssp010_substream->dma_width); ++ } else { ++ ++ ftssp010_substream->pmu_set_clocking(runtime->rate); ++ ftssp010_substream->hw_config(cardno, ++ runtime->channels > 1 ? 1 : 0, /* 1: stereo, 0: mono */ ++ runtime->rate, ftssp010_substream->dma_width); ++ } ++ ++ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) { ++ ftssp010_substream->tx_period = 0; ++ chip->substream_tx = substream; ++ } else { ++ ftssp010_substream->rx_period = 0; ++ chip->substream_rx = substream; ++ } ++ ++ spin_unlock(&ftssp010_substream->dma_lock); ++ ++ VVDBG("%s <<\n", __func__); ++ ++ return 0; ++} ++ ++static inline int snd_ftssp_start_play(ftssp_substream *ftssp010_substream, ++ struct snd_pcm_runtime *runtime) ++{ ++ int err = 0; ++ ++ //printk("~~~~~ : snd_ftssp_start_play() is invoked....\n"); ++ ++ if (ac97) { ++ /* in ac97 mode, user data was fed to dma buffer through ++ * driver-provided copy callback */ ++ err = dmad_kickoff_requests(&dma_chreq_tx); ++ if (err != 0) { ++ ERR("%s: failed to kickoff dma!\n", __func__); ++ return err; ++ } ++ } else { ++ /* in i2s mode, no indication to driver for user data length ++ * (except start threshold). For simplicity at start, just go ++ * ahead by one cycle */ ++ ++ u32 sw_ptr = ++ (u32)frames_to_bytes(runtime, runtime->buffer_size) >>1; ++ ++ err = dmad_update_ring_sw_ptr(&dma_chreq_tx, sw_ptr, 0); ++ if (err != 0) { ++ ERR("%s: failed to update sw-pointer!\n", __func__); ++ return err; ++ } ++ ++ err = dmad_kickoff_requests(&dma_chreq_tx); ++ if (err != 0) { ++ ERR("%s: failed to kickoff dma!\n", __func__); ++ return err; ++ } ++ } ++ ++ ftssp010_substream->start(cardno, 1); ++ ++ //ADD by river 2011.03.08 ++ //i2s_alc5630_write(0x02, 0x0000, g_i2c_client); ++ //i2s_alc5630_write(0x04, 0x0000, g_i2c_client); ++ //i2s_alc5630_write(0x0c, 0x1010, g_i2c_client); ++ //i2s_alc5630_write(0x10, 0xff03, g_i2c_client); ++ //End ADD by river 2011.03.08 ++ ++ return 0; ++} ++ ++static inline int snd_ftssp_start_record(ftssp_substream *ftssp010_substream, ++ struct snd_pcm_runtime *runtime) ++{ ++ int err = 0; ++ ++ //printk("~~~~~ : snd_ftssp_start_record() is invoked....\n"); ++ ++ u32 sw_ptr = (u32)frames_to_bytes(runtime, runtime->buffer_size); ++ ++ if (ac97) { ++ switch (runtime->rate) { ++ case 8000: ++ sw_ptr = (sw_ptr * 3); ++ break; ++ case 16000: ++ sw_ptr = (sw_ptr * 3) >> 1; ++ break; ++ case 48000: ++ default: ++ sw_ptr = sw_ptr >> 1; ++ break; ++ } ++ } else { ++ ++ //printk(">>>>>>>>>> : snd_ftssp_start_record() for recording....\n"); ++ sw_ptr = sw_ptr >> 1; ++ } ++ ++ err = dmad_update_ring_sw_ptr(&dma_chreq_rx, sw_ptr, 0); ++ if (err != 0) { ++ ERR("%s: failed to update sw-pointer!\n", __func__); ++ return err; ++ } ++ ++ err = dmad_kickoff_requests(&dma_chreq_rx); ++ if (err != 0) { ++ ERR("%s: failed to kickoff dma!\n", __func__); ++ return err; ++ } ++ ++ ftssp010_substream->start(cardno, 1); ++ ++ return 0; ++} ++ ++/* Triggers AHBDMA for playback & capture */ ++static int snd_ftssp_pcm_trigger(struct snd_pcm_substream * substream, int cmd) ++{ ++ ftssp_substream *ftssp010_substream = ++ (ftssp_substream *)substream->runtime->private_data; ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ int err = 0; ++ int stream_id = substream->pstr->stream; ++ ++ //printk("~~~~~ : snd_ftssp_pcm_trigger() is invoked....\n"); ++ ++ /* note local interrupts are already disabled in the midlevel code */ ++ spin_lock(&ftssp010_substream->dma_lock); ++ ++ switch (cmd) { ++ case SNDRV_PCM_TRIGGER_START: ++ ++ VDBG("%s: SNDRV_PCM_TRIGGER_START state(0x%08x)\n", ++ __func__, (u32)runtime->status->state); ++ ++ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) { ++ err = snd_ftssp_start_play(ftssp010_substream, runtime); ++ } else { ++ err = snd_ftssp_start_record(ftssp010_substream, ++ runtime); ++ } ++ break; ++ ++ case SNDRV_PCM_TRIGGER_STOP: ++ ++ VDBG("%s: SNDRV_PCM_TRIGGER_STOP state(0x%08x)\n", ++ __func__, (u32)substream->runtime->status->state); ++ ++ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) { ++ ftssp010_stop_tx(cardno); ++ dmad_drain_requests(&dma_chreq_tx, 1); ++ } else { ++ ftssp010_stop_rx(cardno); ++ dmad_drain_requests(&dma_chreq_rx, 1); ++ } ++ break; ++ default: ++ err = -EINVAL; ++ break; ++ } ++ ++ spin_unlock(&ftssp010_substream->dma_lock); ++ return err; ++} ++ ++// pcm middle-layer call this function within irq (snd_pcm_period_elapsed) or ++// with local irq disabled (snd_pcm_lib_write1) ++static snd_pcm_uframes_t snd_ftssp_pcm_pointer( ++ struct snd_pcm_substream *substream) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ u32 hw_ptr; ++ snd_pcm_uframes_t ret; ++ int stream_id = substream->pstr->stream; ++ ++ //printk("~~~~~ : snd_ftssp_pcm_pointer() is invoked....\n"); ++ ++ ++ /* Fetch DMA pointer, with spin lock */ ++ //spin_lock_irqsave(&ftssp010_substream->dma_lock, flags); ++ ++ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) { ++ hw_ptr = dmad_probe_ring_hw_ptr(&dma_chreq_tx); ++ } else { ++ hw_ptr = dmad_probe_ring_hw_ptr(&dma_chreq_rx); ++ } ++ ++ //spin_unlock_irqrestore(&ftssp010_substream->dma_lock, flags); ++ ++ if (ac97) { ++ ret = bytes_to_frames(runtime, hw_ptr << 1); ++ ++ switch (runtime->rate) { ++ case 8000: ++ ret = ret / 6; ++ break; ++ case 16000: ++ ret = ret / 3; ++ break; ++ case 48000: ++ default: ++ break; ++ } ++ } else { ++ ret = bytes_to_frames(runtime, hw_ptr << 1); ++ } ++ ++ ++ VVDBG("%s: hw_ptr(0x%08x) ret(0x%08x)\n", ++ (stream_id == SNDRV_PCM_STREAM_PLAYBACK) ? "p" : "c", ++ (u32)hw_ptr, (u32)ret); ++ ++ /* ALSA requires return value 0 <= ret < buffer_size */ ++ if (ret >= runtime->buffer_size) ++ return 0; ++ return ret; ++} ++ ++/* For FTSSP010 driver, operations are shared among playback & capture */ ++static struct snd_pcm_ops snd_ftssp_playback_ops = { ++ .open = snd_ftssp_pcm_open, ++ .close = snd_ftssp_pcm_close, ++ .ioctl = snd_pcm_lib_ioctl, ++ .hw_params = snd_ftssp_pcm_hw_params, ++ .hw_free = snd_ftssp_pcm_hw_free, ++ .prepare = snd_ftssp_pcm_prepare, ++ .trigger = snd_ftssp_pcm_trigger, ++ .pointer = snd_ftssp_pcm_pointer, ++ .copy = NULL, ++}; ++ ++static struct snd_pcm_ops snd_ftssp_capture_ops = { ++ .open = snd_ftssp_pcm_open, ++ .close = snd_ftssp_pcm_close, ++ .ioctl = snd_pcm_lib_ioctl, ++ .hw_params = snd_ftssp_pcm_hw_params, ++ .hw_free = snd_ftssp_pcm_hw_free, ++ .prepare = snd_ftssp_pcm_prepare, ++ .trigger = snd_ftssp_pcm_trigger, ++ .pointer = snd_ftssp_pcm_pointer, ++ .copy = NULL, ++}; ++ ++/* ALSA PCM constructor */ ++static int snd_ftssp_new_pcm(ftssp_chip *chip) ++{ ++ struct snd_pcm *pcm; ++ int err; ++ ++ //printk("~~~~~ : snd_ftssp_new_pcm() is invoked....\n"); ++ ++ /* PCM device #0 with 1 playback and 1 capture */ ++ if ((err = snd_pcm_new(chip->card, "ftssp_pcm", 0, 1, 1, &pcm)) < 0) ++ return err; ++ ++ pcm->private_data = chip; ++ strcpy(pcm->name, "ftssp_pcm device"); ++ chip->pcm = pcm; ++ ++ /* set operators for playback and capture*/ ++ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, ++ &snd_ftssp_playback_ops); ++ ++ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, ++ &snd_ftssp_capture_ops); ++ ++ /* Pre-allocate buffer, as suggested by ALSA driver document */ ++ // todo: support playback/capture simultaneously ++ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, ++ NULL, FTSSP_HW_DMA_SIZE, FTSSP_HW_DMA_SIZE); ++ ++ /* Force half-duplex (on A320D, or AC97 mode) */ ++ if (ac97) ++ pcm->info_flags |= SNDRV_PCM_INFO_HALF_DUPLEX; ++ ++ return 0; ++} ++ ++#if (FTSSP_PROC_FS) ++static void snd_ftssp_buf_max_read(struct snd_info_entry *entry, ++ struct snd_info_buffer *buffer) ++{ ++ snd_iprintf(buffer, "%d\n", snd_ftssp_pcm_hw.buffer_bytes_max); ++} ++ ++static void snd_ftssp_buf_max_write(struct snd_info_entry *entry, ++ struct snd_info_buffer *buffer) ++{ ++ char tmp[128]; ++ char *ptr_e; ++ u32 val; ++ ++ if (buffer->size == 0) ++ return; ++ ++ memset(tmp, 0, 128); ++ snd_info_get_str(tmp, buffer->buffer, 127); ++ ++ val = simple_strtoul(tmp, &ptr_e, 10); ++ if (*ptr_e == 'k') ++ val *= 1024; ++ else if (*ptr_e == 'm') ++ val *= 1024 * 1024; ++ ++ if (ac97) { ++ if (val > AC97_HW_BUFFER_BYTES_MAX) ++ val = AC97_HW_BUFFER_BYTES_MAX; ++ } else { ++ if (val > I2S_HW_BUFFER_BYTES_MAX) ++ val = I2S_HW_BUFFER_BYTES_MAX; ++ } ++ ++ snd_ftssp_pcm_hw.buffer_bytes_max = (size_t)val; ++} ++ ++static void snd_ftssp_period_min_read(struct snd_info_entry *entry, ++ struct snd_info_buffer *buffer) ++{ ++ snd_iprintf(buffer, "%d\n", snd_ftssp_pcm_hw.period_bytes_min); ++} ++ ++static void snd_ftssp_period_min_write(struct snd_info_entry *entry, ++ struct snd_info_buffer *buffer) ++{ ++ char tmp[128]; ++ char *ptr_e; ++ u32 val; ++ ++ if (buffer->size == 0) ++ return; ++ ++ memset(tmp, 0, 128); ++ snd_info_get_str(tmp, buffer->buffer, 127); ++ ++ val = simple_strtoul(tmp, &ptr_e, 10); ++ if (*ptr_e == 'k') ++ val *= 1024; ++ else if (*ptr_e == 'm') ++ val *= 1024 * 1024; ++ ++ snd_ftssp_pcm_hw.period_bytes_min = (size_t)val; ++ ++ if ((val * snd_ftssp_pcm_hw.periods_max) > ++ snd_ftssp_pcm_hw.buffer_bytes_max) { ++ INFO("\nWarning: period_bytes(%d) * periods(%d) exceeds " ++ "hw_buffer_size(%d).\n", ++ snd_ftssp_pcm_hw.period_bytes_min, ++ snd_ftssp_pcm_hw.periods_max, ++ snd_ftssp_pcm_hw.buffer_bytes_max); ++ INFO(" Unexpected access violation may occur!\n"); ++ } ++} ++ ++static void snd_ftssp_period_max_read(struct snd_info_entry *entry, ++ struct snd_info_buffer *buffer) ++{ ++ snd_iprintf(buffer, "%d\n", snd_ftssp_pcm_hw.period_bytes_max); ++} ++ ++static void snd_ftssp_period_max_write(struct snd_info_entry *entry, ++ struct snd_info_buffer *buffer) ++{ ++ char tmp[128]; ++ char *ptr_e; ++ u32 val; ++ ++ if (buffer->size == 0) ++ return; ++ ++ memset(tmp, 0, 128); ++ snd_info_get_str(tmp, buffer->buffer, 127); ++ ++ val = simple_strtoul(tmp, &ptr_e, 10); ++ if (*ptr_e == 'k') ++ val *= 1024; ++ else if (*ptr_e == 'm') ++ val *= 1024 * 1024; ++ ++ snd_ftssp_pcm_hw.period_bytes_max = (size_t)val; ++ ++ if ((val * snd_ftssp_pcm_hw.periods_max) > ++ snd_ftssp_pcm_hw.buffer_bytes_max) { ++ INFO("\nWarning: period_bytes(%d) * periods(%d) exceeds " ++ "hw_buffer_size(%d).\n", ++ snd_ftssp_pcm_hw.period_bytes_max, ++ snd_ftssp_pcm_hw.periods_max, ++ snd_ftssp_pcm_hw.buffer_bytes_max); ++ INFO(" Unexpected access violation may occur!\n"); ++ } ++} ++ ++static void snd_ftssp_periods_min_read(struct snd_info_entry *entry, ++ struct snd_info_buffer *buffer) ++{ ++ snd_iprintf(buffer, "%d\n", snd_ftssp_pcm_hw.periods_min); ++} ++ ++static void snd_ftssp_periods_min_write(struct snd_info_entry *entry, ++ struct snd_info_buffer *buffer) ++{ ++ char tmp[128]; ++ char *ptr_e; ++ u32 val; ++ ++ if (buffer->size == 0) ++ return; ++ ++ memset(tmp, 0, 128); ++ snd_info_get_str(tmp, buffer->buffer, 127); ++ ++ val = simple_strtoul(tmp, &ptr_e, 10); ++ if (*ptr_e == 'k') ++ val *= 1024; ++ else if (*ptr_e == 'm') ++ val *= 1024 * 1024; ++ ++ snd_ftssp_pcm_hw.periods_min = (size_t)val; ++ ++ if ((val * snd_ftssp_pcm_hw.period_bytes_max) > ++ snd_ftssp_pcm_hw.buffer_bytes_max) { ++ INFO("\nWarning: period_bytes(%d) * periods(%d) exceeds " ++ "hw_buffer_size(%d).\n", ++ snd_ftssp_pcm_hw.period_bytes_max, ++ snd_ftssp_pcm_hw.periods_min, ++ snd_ftssp_pcm_hw.buffer_bytes_max); ++ INFO(" Unexpected access violation may occur!\n"); ++ } ++} ++ ++static void snd_ftssp_periods_max_read(struct snd_info_entry *entry, ++ struct snd_info_buffer *buffer) ++{ ++ snd_iprintf(buffer, "%d\n", snd_ftssp_pcm_hw.periods_max); ++} ++ ++static void snd_ftssp_periods_max_write(struct snd_info_entry *entry, ++ struct snd_info_buffer *buffer) ++{ ++ char tmp[128]; ++ char *ptr_e; ++ u32 val; ++ ++ if (buffer->size == 0) ++ return; ++ ++ memset(tmp, 0, 128); ++ snd_info_get_str(tmp, buffer->buffer, 127); ++ ++ val = simple_strtoul(tmp, &ptr_e, 10); ++ if (*ptr_e == 'k') ++ val *= 1024; ++ else if (*ptr_e == 'm') ++ val *= 1024 * 1024; ++ ++ snd_ftssp_pcm_hw.periods_max = (size_t)val; ++ ++ if ((val * snd_ftssp_pcm_hw.period_bytes_max) > ++ snd_ftssp_pcm_hw.buffer_bytes_max) { ++ INFO("\nWarning: period_bytes(%d) * periods(%d) exceeds " ++ "hw_buffer_size(%d).\n", ++ snd_ftssp_pcm_hw.period_bytes_max, ++ snd_ftssp_pcm_hw.periods_max, ++ snd_ftssp_pcm_hw.buffer_bytes_max); ++ INFO(" Unexpected access violation may occur!\n"); ++ } ++} ++#endif //FTSSP_PROC_FS ++ ++static inline void ftssp_ac97_init(void) ++{ ++ //driver_name = AC97_DRIVER_NAME; ++ //codec_info = AC97_CODEC_NAME; ++ ++ /* Change codec-dependent callbacks to AC97 */ ++ ftssp010_substreams[0].pmu_set_clocking = pmu_set_ac97_clocking; ++ ftssp010_substreams[0].hw_config = ftssp010_config_ac97_play; ++ ftssp010_substreams[1].pmu_set_clocking = pmu_set_ac97_clocking; ++ ftssp010_substreams[1].hw_config = ftssp010_config_ac97_rec; ++ ++ snd_ftssp_playback_ops.copy = snd_ftssp_playback_copy; ++ snd_ftssp_capture_ops.copy = snd_ftssp_capture_copy; ++ ++ snd_ftssp_pcm_hw.rates = AC97_CODEC_SAMPLE_RATES; ++ snd_ftssp_pcm_hw.rate_min = AC97_CODEC_SAMPLE_RATE_MIN; ++ snd_ftssp_pcm_hw.rate_max = AC97_CODEC_SAMPLE_RATE_MAX; ++ snd_ftssp_pcm_hw.formats = AC97_CODEC_FORMATS; ++ snd_ftssp_pcm_hw.buffer_bytes_max = AC97_HW_BUFFER_BYTES_MAX; ++ snd_ftssp_pcm_hw.period_bytes_min = AC97_HW_PERIOD_BYTES_MIN; ++ snd_ftssp_pcm_hw.period_bytes_max = AC97_HW_PERIOD_BYTES_MAX; ++ snd_ftssp_pcm_hw.periods_min = AC97_HW_PERIODS_MIN; ++ snd_ftssp_pcm_hw.periods_max = AC97_HW_PERIODS_MAX; ++} ++ ++static int ftssp_alsa_init(struct i2c_client *client) ++{ ++ ftssp_chip *chip; ++ int err; ++ ++ //ADD by river 2011.03.08 ++ //g_i2c_client = client; ++ //End ADD by river 2011.03.08 ++ //printk(">>>>>>>>> (4) ftssp_alsa_init().\n"); ++ init_hw(cardno, ac97, client); ++ ++ if (ac97) ++ ftssp_ac97_init(); ++ ++ DBG("%s: FTSSP010 #%d (Physical Addr=0x%08X), mode: %s\n", ++ __func__, ++ cardno, SSP_FTSSP010_pa_base[cardno], ++ ac97 ? "ac97" : "i2s"); ++ ++ err = snd_card_create(cardno, FTSSP_CARD_ID, THIS_MODULE, ++ sizeof(ftssp_chip), &ftssp_cards[cardno]); ++ if (err < 0) ++ return err; ++ ++ if (ac97) { ++ sprintf(ftssp_cards[cardno]->driver, FTSSP_DRIVER_NAME); ++ sprintf(ftssp_cards[cardno]->shortname, ++ FTSSP_DRIVER_NAME "_ac97"); ++ sprintf(ftssp_cards[cardno]->longname, ++ FTSSP_DRIVER_NAME "_ac97 controller"); ++ } else { ++ sprintf(ftssp_cards[cardno]->driver, FTSSP_DRIVER_NAME); ++ sprintf(ftssp_cards[cardno]->shortname, ++ FTSSP_DRIVER_NAME "_i2s"); ++ sprintf(ftssp_cards[cardno]->longname, ++ FTSSP_DRIVER_NAME "_i2s controller"); ++ } ++ ++ // PCM ++ chip = (ftssp_chip *)(ftssp_cards[cardno]->private_data); ++ chip->card = ftssp_cards[cardno]; ++ ++ if ((err = snd_ftssp_new_pcm(chip))) { ++ ERR("%s, Can't new PCM devices\n",__func__); ++ return -ENODEV; ++ } ++ ++#if (FTSSP_PROC_FS) ++ // new a proc entries subordinate to card->proc_root for debugging ++ // /proc/card#/buf_max ++ snd_card_proc_new(chip->card, "buf_max", &chip->info_buf_max); ++ if (chip->info_buf_max) { ++ chip->info_buf_max->c.text.read = snd_ftssp_buf_max_read; ++ chip->info_buf_max->c.text.write = snd_ftssp_buf_max_write; ++ } ++ // /proc/card#/period_min ++ snd_card_proc_new(chip->card, "period_size_min", ++ &chip->info_period_min); ++ if (chip->info_period_min) { ++ chip->info_period_min->c.text.read = snd_ftssp_period_min_read; ++ chip->info_period_min->c.text.write = ++ snd_ftssp_period_min_write; ++ } ++ // /proc/card#/period_max ++ snd_card_proc_new(chip->card, "period_size_max", ++ &chip->info_period_max); ++ if (chip->info_period_max) { ++ chip->info_period_max->c.text.read = snd_ftssp_period_max_read; ++ chip->info_period_max->c.text.write = ++ snd_ftssp_period_max_write; ++ } ++ // /proc/card#/periods_min ++ snd_card_proc_new(chip->card, "periods_min", &chip->info_periods_min); ++ if (chip->info_periods_min) { ++ chip->info_periods_min->c.text.read = ++ snd_ftssp_periods_min_read; ++ chip->info_periods_min->c.text.write = ++ snd_ftssp_periods_min_write; ++ } ++ // /proc/card#/periods_max ++ snd_card_proc_new(chip->card, "periods_max", &chip->info_periods_max); ++ if (chip->info_periods_max) { ++ chip->info_periods_max->c.text.read = ++ snd_ftssp_periods_max_read; ++ chip->info_periods_max->c.text.write = ++ snd_ftssp_periods_max_write; ++ } ++#endif ++ ++ // Register the card to ALSA ++ if ((err = snd_card_register(chip->card)) == 0) { ++ INFO("%s card registered!\n", FTSSP_CARD_ID); ++ } ++ ++ return 0; ++} ++ ++#ifdef CONFIG_SND_FTSSP010_I2S ++//ADD by river 2011.01.26 ++static int alc5630_i2c_remove(struct i2c_client *client) ++{ ++ struct alc5630_data *alc5630 = i2c_get_clientdata(client); ++ ++ // power down codec chip ++ //tas_write_reg(tas, TAS_REG_ACR, 1, &tmp); ++ ++ mutex_destroy(&alc5630->mtx); ++ kfree(alc5630); ++ return 0; ++} ++ ++static const struct i2c_device_id alc5630_i2c_id[] = { ++ { "alc5630_codec", 0 }, ++ { } ++}; ++ ++// This is the driver that will be inserted ++static struct i2c_driver alc5630_driver = { ++ .driver = { ++ .name = "alc5630_codec", ++ .owner = THIS_MODULE, ++ }, ++ .attach_adapter = alc5630_i2c_attach, ++ .probe = alc5630_i2c_probe, ++ .remove = alc5630_i2c_remove, ++ .suspend = alc5630_i2c_suspend, ++ .resume = alc5630_i2c_resume, ++ .id_table = alc5630_i2c_id, ++}; ++ ++static int alc5630_i2c_suspend(struct i2c_client *i2c_client, pm_message_t mesg) ++{ ++ //printk("@@@@@ TRACE by river 2011.05.10 : alc5630_i2c_suspend() is invoked.\n"); ++ ++ //i2c_del_driver(&alc5630_driver); ++ //#ifndef CONFIG_SND_FTSSP010_AC97 ++ // i2c_del_driver(&alc5630_driver); ++ //#endif ++ //printk("@@@@@ TRACE by river 2011.05.10 : alc5630_i2c_suspend() -2 is invoked.\n"); ++ dmad_channel_free(&dma_chreq_tx); ++ dmad_channel_free(&dma_chreq_rx); ++ //printk("@@@@@ TRACE by river 2011.05.10 : alc5630_i2c_suspend() -3 is invoked.\n"); ++ //snd_card_free(ftssp_cards[cardno]); ++ ++ return 0; ++ ++} ++ ++static int alc5630_i2c_resume(struct i2c_client *i2c_client) ++{ ++ ++ //printk("@@@@@ TRACE by river 2011.05.10 : alc5630_i2c_resume() is invoked.\n"); ++ //#ifdef CONFIG_SND_FTSSP010_AC97 ++ // ftssp_alsa_init(NULL); ++ //#else ++ // return i2c_add_driver(&alc5630_driver); ++ //#endif ++ ++ return 0; ++ ++} ++#endif ++ ++static __init int ftssp_alsa_i2c_i2s_init(void) ++{ ++ ++ //printk(">>>>>>>>>> (1) ftssp_alsa_i2c_i2s_init().\n"); ++ //return i2c_add_driver(&alc5630_driver); ++ #ifdef CONFIG_SND_FTSSP010_AC97 ++ return ftssp_alsa_init(NULL); ++ #else ++ return i2c_add_driver(&alc5630_driver); ++ #endif ++} ++//End ADD by river 2011.01.26 ++ ++static __exit void ftssp_alsa_i2c_i2s_exit(void) ++{ ++ DBG("%s, cleaning up\n",__func__); ++ ++ //i2c_del_driver(&alc5630_driver); ++ #ifndef CONFIG_SND_FTSSP010_AC97 ++ i2c_del_driver(&alc5630_driver); ++ #endif ++ ++ dmad_channel_free(&dma_chreq_tx); ++ dmad_channel_free(&dma_chreq_rx); ++ ++ snd_card_free(ftssp_cards[cardno]); ++} ++ ++/*static __exit void ftssp_alsa_exit(void) ++{ ++ DBG("%s, cleaning up\n",__func__); ++ ++ dmad_channel_free(&dma_chreq_tx); ++ dmad_channel_free(&dma_chreq_rx); ++ ++ snd_card_free(ftssp_cards[cardno]); ++}*/ ++ ++//MOD by river 2011.01.26 ++//module_init(ftssp_alsa_init); ++//module_exit(ftssp_alsa_exit); ++module_init(ftssp_alsa_i2c_i2s_init); ++module_exit(ftssp_alsa_i2c_i2s_exit); ++//End MOD by river 2011.01.26 +diff -Nur linux-3.4.110.orig/sound/nds32/FTSSP010_HDA.c linux-3.4.110/sound/nds32/FTSSP010_HDA.c +--- linux-3.4.110.orig/sound/nds32/FTSSP010_HDA.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/sound/nds32/FTSSP010_HDA.c 2016-04-07 10:20:51.062085666 +0200 +@@ -0,0 +1,745 @@ ++/* FTSSP010 - UDA1345TS module: ++ * ++ * $log$ ++ * ++ * 2006/02/23: I-Jui Sung: OSS emulation half-duplex ++ * playback/capture at 48K, 44.1K, 8K ++ * with mono/stereo 16bit/8bit ++ * ++ * 2006/02/22: I-Jui Sung: OSS emulation playback at 44.1KHz ++ * 16-bit mono completed. Relying ALSA to ++ * resample ++ * 2009/02/24: dma upgrade checking list: ++ * - ac97 mode playback ................. ok ++ * - ac97 mode capture .................. ok ++ * - i2s mode playback .................. ok ++ * - i2s mode capture ................... ok ++ * - mixer support (snd_ctl_add, ...) ... todo ++ * - debug /proc entry .................. ok ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "FTSSP010_HDA.h" ++void init_hw(unsigned int cardno); ++ ++#if (!defined(CONFIG_PLATFORM_AHBDMA) && !defined(CONFIG_PLATFORM_APBDMA)) ++#warning needs ahb/apb dma to wrok ++#endif ++ ++/* --------------------------------------------------------------------------- ++ * Define the debug level of FTSSP_DEBUG ++ */ ++#define FTSSP_DEBUG 0 ++#define FTSSP_DEBUG_VERBOSE 0 ++#define FTSSP_PROC_FS 1 ++ ++#undef VVDBG ++#if (FTSSP_DEBUG_VERBOSE) ++//#define VVDBG(vvar...) (void)0 ++#define VVDBG(vvar...) printk(KERN_INFO vvar) ++#else ++#define VVDBG(vvar...) (void)0 ++#endif ++ ++#undef ERR ++#define ERR(vvar...) printk(KERN_ERR vvar) ++ ++#undef INFO ++#define INFO(vvar...) printk(KERN_INFO vvar) ++ ++#if (FTSSP_DEBUG) ++#undef DBG ++#define DBG(vvar...) printk(KERN_INFO vvar) ++#else ++#define DBG(vvar...) (void)0 ++#endif ++ ++#if (FTSSP_DEBUG_VERBOSE) ++#undef VDBG ++#define VDBG(vvar...) printk(KERN_INFO vvar) ++#else ++#define VDBG(vvar...) (void)0 ++#endif ++ ++/* --------------------------------------------------------------------------- ++ * Preserved size of memory space for audio DMA ring ++ */ ++#define FTSSP_HW_DMA_SIZE (512 * 1024) ++ ++ ++/* HDA HW configuration*/ ++/* ring size, exported to application */ ++#define HDA_HW_BUFFER_BYTES_MAX (256 * 1024) ++#define HDA_HW_PERIOD_BYTES_MIN (2 * 1024) ++#define HDA_HW_PERIOD_BYTES_MAX (32 * 1024) ++#define HDA_HW_PERIODS_MIN 3 ++#define HDA_HW_PERIODS_MAX 8 ++ ++#define HDA_HW_DMA_SIZE (HDA_HW_BUFFER_BYTES_MAX) ++ ++ ++/* --------------------------------------------------------------------------- ++ * Audio formats ++ */ ++ ++/* HDA formats */ ++#define HDA_CODEC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE) ++#define HDA_CODEC_SAMPLE_RATES (SNDRV_PCM_RATE_192000 | \ ++ SNDRV_PCM_RATE_176400| \ ++ SNDRV_PCM_RATE_96000 | \ ++ SNDRV_PCM_RATE_88200 | \ ++ SNDRV_PCM_RATE_48000 | \ ++ SNDRV_PCM_RATE_44100 | \ ++ SNDRV_PCM_RATE_32000 | \ ++ SNDRV_PCM_RATE_22050 | \ ++ SNDRV_PCM_RATE_16000 | \ ++ SNDRV_PCM_RATE_11025 | \ ++ SNDRV_PCM_RATE_8000) ++#define HDA_CODEC_SAMPLE_RATE_MIN (8000) ++#define HDA_CODEC_SAMPLE_RATE_MAX (192000) ++ ++/* --------------------------------------------------------------------------- ++ * Configuration ++ */ ++#if (CONFIG_PROC_FS == 0) ++#undef FTSSP_PROC_FS ++#define FTSSP_PROC_FS 0 ++#else ++#if (FTSSP_PROC_FS) ++#include ++#endif /* FTSSP_PROC_FS */ ++#endif /* CONFIG_PROC_FS */ ++ ++#define FTSSP_CARD_ID "ftssp010" ++#define FTSSP_DRIVER_NAME "ftssp" ++ ++MODULE_LICENSE("Faraday License"); ++MODULE_AUTHOR("Faraday Technology Corp."); ++MODULE_DESCRIPTION("FTSSP010 Linux 2.6 Driver"); ++ ++static int cardno = 0; ++static const unsigned int SSP_FTSSP010_pa_base[SSP_FTSSP010_IRQ_COUNT] = ++ { SSP_FTSSP010_PA_BASE }; ++ ++/* Driver mode */ ++ ++// ---------------------------------------------- ++module_param(cardno, int, 0); ++MODULE_PARM_DESC(cardno, "FTSSP No."); ++ ++// ---------------------------------------------- ++ ++/* --------------------------------------------------------------------------- ++ * Structures ++ */ ++ ++/* private data for card */ ++typedef struct { ++ struct snd_card *card; ++ struct snd_pcm *pcm; ++ struct snd_pcm_substream *substream_tx; ++ struct snd_pcm_substream *substream_rx; ++} ftssp_chip; ++ ++/* dma request descriptors */ ++dmad_chreq dma_chreq_tx = { ++ .channel = -1, ++ .drq = NULL, ++}; ++ ++dmad_chreq dma_chreq_rx = { ++ .channel = -1, ++ .drq = NULL, ++}; ++ ++/* Holds ALSA card instance pointers */ ++struct snd_card *ftssp_cards[SSP_FTSSP010_COUNT]; ++ ++/* snd_pcm_hardware */ ++static struct snd_pcm_hardware snd_ftssp_pcm_hw = ++{ ++ .info = SNDRV_PCM_INFO_INTERLEAVED, ++ .formats = HDA_CODEC_FORMATS, ++ .rates = HDA_CODEC_SAMPLE_RATES, ++ .rate_min = HDA_CODEC_SAMPLE_RATE_MIN, ++ .rate_max = HDA_CODEC_SAMPLE_RATE_MAX, ++ .channels_min = 1, ++ .channels_max = 2, ++ .buffer_bytes_max = HDA_HW_BUFFER_BYTES_MAX, ++ .period_bytes_min = HDA_HW_PERIOD_BYTES_MIN, ++ .period_bytes_max = HDA_HW_PERIOD_BYTES_MAX, ++ .periods_min = HDA_HW_PERIODS_MIN, ++ .periods_max = HDA_HW_PERIODS_MAX, ++}; ++ ++/* private data for a substream (playback or capture) */ ++/* function pointer for set up AHBDMA for this substream */ ++typedef void (*start_t)(int cardno, unsigned use_dma); ++typedef void (*ftssp010_config_t)(int cardno, unsigned is_stereo, ++ unsigned speed, int use8bit); ++ ++typedef struct { ++ u32 busy; ++ spinlock_t dma_lock; ++ unsigned dma_area_va; ++ int dma_width; ++ unsigned int tx_period; ++ unsigned int rx_period; ++ ++ start_t start; ++ ftssp010_config_t hw_config; ++} ftssp_substream; ++ ++static ftssp_substream ftssp010_substreams[2] = { ++ /* Playback substream */ ++ { ++ busy : 0, ++ hw_config : ftssp010_config_hda_play, ++ }, ++ /* Capture substream */ ++ { ++ busy : 0, ++ hw_config : ftssp010_config_hda_rec, ++ } ++}; ++ ++ ++/** ++ * These dma callbacks are called in interrupt context. ++ * @data: pointer to the chip-wide structure. ++ * TODO: use stream-specifc data ++ */ ++__attribute__((__unused__)) ++static void ftssp_dma_callback_tx(int ch, u16 int_status, void *data) ++{ ++ ftssp_chip *chip = (ftssp_chip *)data; ++ struct snd_pcm_runtime *runtime = chip->substream_tx->runtime; ++ ftssp_substream *ftssp010_substream = ++ (ftssp_substream *)runtime->private_data; ++ u32 sw_ptr; ++ u32 tx_period = ftssp010_substream->tx_period + 1; ++ ++ if (tx_period == runtime->periods) ++ sw_ptr = runtime->buffer_size; ++ else ++ sw_ptr = tx_period * runtime->period_size; ++ ++ sw_ptr = (u32)frames_to_bytes(runtime, sw_ptr) >> 1; ++ ++ if (dmad_update_ring_sw_ptr(&dma_chreq_tx, (u32)sw_ptr, 0)) { ++ ERR("%s: failed to update sw-pointer!\n", __func__); ++ } ++ ++ ftssp010_substream->tx_period = tx_period % runtime->periods; ++ snd_pcm_period_elapsed(chip->substream_tx); ++} ++ ++__attribute__((__unused__)) ++static void ftssp_dma_callback_rx(int ch, u16 int_status, void *data) ++{ ++ ftssp_chip *chip = (ftssp_chip *)data; ++ struct snd_pcm_runtime *runtime = chip->substream_rx->runtime; ++ ftssp_substream *ftssp010_substream = ++ (ftssp_substream *)runtime->private_data; ++ u32 sw_ptr; ++ u32 rx_period = ftssp010_substream->rx_period + 1; ++ ++ if (rx_period == runtime->periods) ++ sw_ptr = runtime->buffer_size; ++ else ++ sw_ptr = rx_period * runtime->period_size; ++ sw_ptr = (u32)frames_to_bytes(runtime, sw_ptr) >> 1; ++ ++ if (dmad_update_ring_sw_ptr(&dma_chreq_rx, (u32)sw_ptr, 0) != 0) { ++ ERR("%s: failed to update sw-pointer!\n", __func__); ++ } ++ ++ ftssp010_substream->rx_period = rx_period % runtime->periods; ++ ++ snd_pcm_period_elapsed(chip->substream_rx); ++} ++ ++static inline int snd_ftssp_dma_ch_alloc(struct snd_pcm_substream *substream) ++{ ++ dmad_chreq *ch_req __attribute__((__unused__)) = 0; ++ ++#ifdef CONFIG_PLATFORM_APBDMA ++ ++ if (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) { ++ ch_req = &dma_chreq_tx; ++ ch_req->completion_cb = ftssp_dma_callback_tx; ++ ch_req->apb_req.tx_dir = DMAD_DIR_A0_TO_A1; ++ ch_req->apb_req.dev_reqn = APBBR_REQN_I2SAC97TX; ++ } else { ++ ch_req = &dma_chreq_rx; ++ ch_req->completion_cb = ftssp_dma_callback_rx; ++ ch_req->apb_req.tx_dir = DMAD_DIR_A1_TO_A0; ++ ch_req->apb_req.dev_reqn = APBBR_REQN_I2SAC97RX; ++ } ++ ++ ch_req->controller = DMAD_DMAC_APB_CORE; ++ ch_req->flags = DMAD_FLAGS_RING_MODE; ++ ch_req->ring_base = 0; ++ ch_req->dev_addr = (dma_addr_t)FTSSP010_DATA_PA(cardno); ++ ch_req->periods = 0; ++ ch_req->period_size = 0; ++ ++ ch_req->apb_req.ring_ctrl = APBBR_ADDRINC_I2X; ++ ch_req->apb_req.ring_reqn = APBBR_REQN_NONE; ++ ch_req->apb_req.dev_ctrl = APBBR_ADDRINC_FIXED; ++ ch_req->apb_req.burst_mode = 0; ++ ch_req->apb_req.data_width = APBBR_DATAWIDTH_2; ++ ch_req->completion_data = (void *)snd_pcm_substream_chip(substream); ++ ++ ch_req->completion_data = (void *)snd_pcm_substream_chip(substream); ++ ++ if (dmad_channel_alloc(ch_req) != 0) { ++ ERR("%s: APBDMA channel allocation failed\n",__func__); ++ goto _try_ahb; ++ } ++ ++ DBG("%s: APBDMA channel allocated (ch: %d) ring_mode\n", ++ __func__, ch_req->channel); ++ ++ return 0; ++ ++_try_ahb: ++ ++#endif /* CONFIG_PLATFORM_APBDMA */ ++ ++#ifdef CONFIG_PLATFORM_AHBDMA ++ ++ if (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) { ++ ch_req = &dma_chreq_tx; ++ ch_req->completion_cb = ftssp_dma_callback_tx; ++ ch_req->ahb_req.tx_dir = DMAD_DIR_A0_TO_A1; ++ ch_req->ahb_req.dev_reqn = DMAC_REQN_I2SAC97TX; ++ } else { ++ ch_req = &dma_chreq_rx; ++ ch_req->completion_cb = ftssp_dma_callback_rx; ++ ch_req->ahb_req.tx_dir = DMAD_DIR_A1_TO_A0; ++ ch_req->ahb_req.dev_reqn = DMAC_REQN_I2SAC97RX; ++ } ++ ++ ch_req->controller = DMAD_DMAC_AHB_CORE; ++ ch_req->flags = DMAD_FLAGS_RING_MODE; ++ ch_req->ring_base = 0; ++ ch_req->dev_addr = (dma_addr_t)FTSSP010_DATA_PA(cardno); ++ ch_req->periods = 0; ++ ch_req->period_size = 0; ++ ++ ch_req->ahb_req.sync = 1; ++ ch_req->ahb_req.priority = DMAC_CSR_CHPRI_2; ++ ch_req->ahb_req.hw_handshake = 1; ++ ch_req->ahb_req.burst_size = DMAC_CSR_SIZE_1; ++ ++ ch_req->ahb_req.ring_width = DMAC_CSR_WIDTH_32; ++ ch_req->ahb_req.ring_ctrl = DMAC_CSR_AD_INC; ++ ch_req->ahb_req.ring_reqn = DMAC_REQN_NONE; ++ ch_req->ahb_req.dev_width = DMAC_CSR_WIDTH_32; ++ ch_req->ahb_req.dev_ctrl = DMAC_CSR_AD_FIX; ++ ++ ch_req->completion_data = (void *)snd_pcm_substream_chip(substream); ++ ++ if (dmad_channel_alloc(ch_req) != 0) { ++ ERR("%s: AHBDMA channel allocation failed\n", __func__); ++ goto _err_exit; ++ } ++ ++ DBG("%s: AHBDMA channel allocated (ch: %d) ring_mode\n", ++ __func__, ch_req->channel); ++ ++ return 0; ++ ++_err_exit: ++ ++#endif /* CONFIG_PLATFORM_AHBDMA */ ++ ++ return -ENODEV; ++} ++ ++static inline ftssp_substream *ftssp010_substream_new(int stream_id) ++{ ++ ftssp_substream *s = NULL; ++ ++ switch (stream_id) { ++ case SNDRV_PCM_STREAM_PLAYBACK: ++ s = &ftssp010_substreams[0]; ++ break; ++ case SNDRV_PCM_STREAM_CAPTURE: ++ s = &ftssp010_substreams[1]; ++ break; ++ default: ++ ERR("%s: wrong stream type (%d)\n", __func__, stream_id); ++ return NULL; ++ } ++ ++ if (s->busy) { ++ ERR("%s: device busy!\n", __func__); ++ return NULL; ++ } ++ s->busy = 1; ++ ++ spin_lock_init(&s->dma_lock); ++ ++ return s; ++} ++ ++static int snd_ftssp_pcm_open(struct snd_pcm_substream *substream) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ int stream_id = substream->pstr->stream; ++ ++ VDBG("%s, %s\n", __func__, ++ (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) ? ++ "playback" : "capture"); ++ ++ /* Both playback and capture share a hardware description */ ++ runtime->hw = snd_ftssp_pcm_hw; ++ ++ /* Allocate & Initialize stream-specific data */ ++ runtime->private_data = ftssp010_substream_new(stream_id); ++ ++ if (runtime->private_data) ++ return snd_ftssp_dma_ch_alloc(substream); ++ else ++ return -EBUSY; ++} ++ ++static int snd_ftssp_pcm_close(struct snd_pcm_substream *substream) ++{ ++ int stream_id = substream->pstr->stream; ++ ftssp_substream *ftssp010_substream = ++ (ftssp_substream *)substream->runtime->private_data; ++ ++ VDBG("%s, %s\n", __func__, ++ (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) ? ++ "playback" : "capture"); ++ ++ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) ++ dmad_channel_free(&dma_chreq_tx); ++ else ++ dmad_channel_free(&dma_chreq_rx); ++ ++ ftssp010_substream->busy = 0; ++ return 0; ++} ++ ++static int snd_ftssp_pcm_hw_params(struct snd_pcm_substream *substream, ++ struct snd_pcm_hw_params *hw_params) ++{ ++ VDBG("%s, %s\n", __func__, ++ (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) ? ++ "playback" : "capture"); ++ ++ return snd_pcm_lib_malloc_pages(substream, HDA_HW_DMA_SIZE); ++} ++ ++static int snd_ftssp_pcm_hw_free(struct snd_pcm_substream *substream) ++{ ++ VDBG("%s, %s\n", __func__, ++ (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) ? ++ "playback" : "capture"); ++ ++ if (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ dmad_drain_requests(&dma_chreq_tx, 1); ++ else ++ dmad_drain_requests(&dma_chreq_rx, 1); ++ ++ return snd_pcm_lib_free_pages(substream); ++} ++ ++/* Prepare FTSSP010 AHBDMA for playback & capture */ ++static int snd_ftssp_pcm_prepare(struct snd_pcm_substream *substream) ++{ ++ ftssp_chip *chip = snd_pcm_substream_chip(substream); ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ ++ ftssp_substream *ftssp010_substream = ++ (ftssp_substream *)runtime->private_data; ++ ++ int stream_id = substream->pstr->stream; ++ dmad_chreq *dma_chreq; ++ unsigned period_size, buffer_size; ++ VVDBG("%s before spin_lock<<\n", __func__); ++ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) ++ dma_chreq = &dma_chreq_tx; ++ else ++ dma_chreq = &dma_chreq_rx; ++ ++ period_size = frames_to_bytes(runtime, runtime->period_size); ++ buffer_size = frames_to_bytes(runtime, runtime->buffer_size); ++ ++ if (runtime->format != SNDRV_PCM_FORMAT_S16_LE) ++ return -ENODEV; ++ ++ ftssp010_substream->dma_width = 4; ++ ++ dmad_drain_requests(dma_chreq, 1); ++ ++ dma_chreq->ring_base = (dma_addr_t)runtime->dma_addr; ++ dma_chreq->periods = (dma_addr_t)runtime->periods; ++ dma_chreq->period_size = (dma_addr_t)(period_size >> 1); ++ dma_chreq->ring_size = (dma_addr_t)(buffer_size >> 1); ++ dmad_update_ring(dma_chreq); ++ ++ /* Set PMU, FTSSP010, and DMA */ ++ spin_lock(&ftssp010_substream->dma_lock); ++ ++ /* keep DMA buffer VA for copy() callback */ ++ // todo: support playback/capture simultaneously ++ ftssp010_substream->dma_area_va = (u32)runtime->dma_area; ++ VVDBG("%s before hw_config<<\n", __func__); ++ ftssp010_substream->hw_config(cardno, ++ runtime->channels > 1 ? 1 : 0, /* 1: stereo, 0: mono */ ++ runtime->rate, ftssp010_substream->dma_width); ++ VVDBG("%s after hw_config<<\n", __func__); ++ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) { ++ ftssp010_substream->tx_period = 0; ++ chip->substream_tx = substream; ++ } else { ++ ftssp010_substream->rx_period = 0; ++ chip->substream_rx = substream; ++ } ++ ++ spin_unlock(&ftssp010_substream->dma_lock); ++ VVDBG("%s after spin_unlock <<\n", __func__); ++ ++ return 0; ++} ++ ++static inline int snd_ftssp_start_play(ftssp_substream *ftssp010_substream, ++ struct snd_pcm_runtime *runtime) ++{ ++ int err = 0; ++ u32 sw_ptr = ++ (u32)frames_to_bytes(runtime, runtime->buffer_size) >> 1; ++ ++ err = dmad_update_ring_sw_ptr(&dma_chreq_tx, sw_ptr, 0); ++ if (err != 0) { ++ ERR("%s: failed to update sw-pointer!\n", __func__); ++ return err; ++ } ++ err = dmad_kickoff_requests(&dma_chreq_tx); ++ if (err != 0) { ++ ERR("%s: failed to kickoff dma!\n", __func__); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static inline int snd_ftssp_start_record(ftssp_substream *ftssp010_substream, ++ struct snd_pcm_runtime *runtime) ++{ ++ int err = 0; ++ u32 sw_ptr = (u32)frames_to_bytes(runtime, runtime->buffer_size); ++ ++ sw_ptr = sw_ptr >> 1; ++ printk(">>>>>>>>>> : snd_ftssp_start_record() for recording....\n"); ++ err = dmad_update_ring_sw_ptr(&dma_chreq_rx, sw_ptr, 0); ++ if (err != 0) { ++ ERR("%s: failed to update sw-pointer!\n", __func__); ++ return err; ++ } ++ ++ err = dmad_kickoff_requests(&dma_chreq_rx); ++ if (err != 0) { ++ ERR("%s: failed to kickoff dma!\n", __func__); ++ return err; ++ } ++ ++ ++ return 0; ++} ++ ++/* Triggers AHBDMA for playback & capture */ ++static int snd_ftssp_pcm_trigger(struct snd_pcm_substream * substream, int cmd) ++{ ++ ftssp_substream *ftssp010_substream = ++ (ftssp_substream *)substream->runtime->private_data; ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ int err = 0; ++ int stream_id = substream->pstr->stream; ++ ++ /* note local interrupts are already disabled in the midlevel code */ ++ spin_lock(&ftssp010_substream->dma_lock); ++ ++ switch (cmd) { ++ case SNDRV_PCM_TRIGGER_START: ++ ++ VDBG("%s: SNDRV_PCM_TRIGGER_START state(0x%08x)\n", ++ __func__, (u32)runtime->status->state); ++ ++ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) { ++ err = snd_ftssp_start_play(ftssp010_substream, runtime); ++ } else { ++ err = snd_ftssp_start_record(ftssp010_substream, ++ runtime); ++ } ++ break; ++ ++ case SNDRV_PCM_TRIGGER_STOP: ++ ++ VDBG("%s: SNDRV_PCM_TRIGGER_STOP state(0x%08x)\n", ++ __func__, (u32)substream->runtime->status->state); ++ ++ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) { ++ ftssp010_stop_tx(cardno); ++ dmad_drain_requests(&dma_chreq_tx, 1); ++ } else { ++ ftssp010_stop_rx(cardno); ++ dmad_drain_requests(&dma_chreq_rx, 1); ++ } ++ break; ++ default: ++ err = -EINVAL; ++ break; ++ } ++ ++ spin_unlock(&ftssp010_substream->dma_lock); ++ return err; ++} ++ ++// pcm middle-layer call this function within irq (snd_pcm_period_elapsed) or ++// with local irq disabled (snd_pcm_lib_write1) ++static snd_pcm_uframes_t snd_ftssp_pcm_pointer( ++ struct snd_pcm_substream *substream) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ u32 hw_ptr; ++ snd_pcm_uframes_t ret; ++ int stream_id = substream->pstr->stream; ++ ++ /* Fetch DMA pointer, with spin lock */ ++ //spin_lock_irqsave(&ftssp010_substream->dma_lock, flags); ++ ++ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) { ++ hw_ptr = dmad_probe_ring_hw_ptr(&dma_chreq_tx); ++ } else { ++ hw_ptr = dmad_probe_ring_hw_ptr(&dma_chreq_rx); ++ } ++ ret = bytes_to_frames(runtime, hw_ptr << 1); ++ //spin_unlock_irqrestore(&ftssp010_substream->dma_lock, flags); ++ VVDBG("%s: hw_ptr(0x%08x) ret(0x%08x)\n", ++ (stream_id == SNDRV_PCM_STREAM_PLAYBACK) ? "p" : "c", ++ (u32)hw_ptr, (u32)ret); ++ ++ /* ALSA requires return value 0 <= ret < buffer_size */ ++ if (ret >= runtime->buffer_size) ++ return 0; ++ return ret; ++} ++ ++/* For FTSSP010 driver, operations are shared among playback & capture */ ++static struct snd_pcm_ops snd_ftssp_playback_ops = { ++ .open = snd_ftssp_pcm_open, ++ .close = snd_ftssp_pcm_close, ++ .ioctl = snd_pcm_lib_ioctl, ++ .hw_params = snd_ftssp_pcm_hw_params, ++ .hw_free = snd_ftssp_pcm_hw_free, ++ .prepare = snd_ftssp_pcm_prepare, ++ .trigger = snd_ftssp_pcm_trigger, ++ .pointer = snd_ftssp_pcm_pointer, ++ .copy = NULL, ++}; ++ ++static struct snd_pcm_ops snd_ftssp_capture_ops = { ++ .open = snd_ftssp_pcm_open, ++ .close = snd_ftssp_pcm_close, ++ .ioctl = snd_pcm_lib_ioctl, ++ .hw_params = snd_ftssp_pcm_hw_params, ++ .hw_free = snd_ftssp_pcm_hw_free, ++ .prepare = snd_ftssp_pcm_prepare, ++ .trigger = snd_ftssp_pcm_trigger, ++ .pointer = snd_ftssp_pcm_pointer, ++ .copy = NULL, ++}; ++ ++/* ALSA PCM constructor */ ++static int snd_ftssp_new_pcm(ftssp_chip *chip) ++{ ++ struct snd_pcm *pcm; ++ int err; ++ ++ /* PCM device #0 with 1 playback and 1 capture */ ++ if ((err = snd_pcm_new(chip->card, "ftssp_pcm", 0, 1, 1, &pcm)) < 0) ++ return err; ++ ++ pcm->private_data = chip; ++ strcpy(pcm->name, "ftssp_pcm device"); ++ chip->pcm = pcm; ++ ++ /* set operators for playback and capture*/ ++ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, ++ &snd_ftssp_playback_ops); ++ ++ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, ++ &snd_ftssp_capture_ops); ++ ++ /* Pre-allocate buffer, as suggested by ALSA driver document */ ++ // todo: support playback/capture simultaneously ++ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, ++ NULL, FTSSP_HW_DMA_SIZE, FTSSP_HW_DMA_SIZE); ++ ++ return 0; ++} ++ ++static __init int ftssp_alsa_init(void) ++{ ++ ftssp_chip *chip; ++ int err; ++ init_hw(cardno); ++ INFO("After init_hw!\n"); ++ err = snd_card_create(cardno, FTSSP_CARD_ID, THIS_MODULE, ++ sizeof(ftssp_chip), &ftssp_cards[cardno]); ++ INFO("After snd_card_create!\n"); ++ if (err < 0) ++ return err; ++ sprintf(ftssp_cards[cardno]->driver, FTSSP_DRIVER_NAME); ++ sprintf(ftssp_cards[cardno]->shortname, ++ FTSSP_DRIVER_NAME "_HDA"); ++ sprintf(ftssp_cards[cardno]->longname, ++ FTSSP_DRIVER_NAME "_HDA controller"); ++ /* PCM */ ++ chip = (ftssp_chip *)(ftssp_cards[cardno]->private_data); ++ chip->card = ftssp_cards[cardno]; ++ ++ if ((err = snd_ftssp_new_pcm(chip))) { ++ ERR("%s, Can't new PCM devices\n",__func__); ++ return -ENODEV; ++ } ++ ++ ++ /* Register the card to ALSA */ ++ if ((err = snd_card_register(chip->card)) == 0) { ++ INFO("%s card registered!\n", FTSSP_CARD_ID); ++ } ++ ++ return 0; ++} ++ ++static __exit void ftssp_alsa_exit(void) ++{ ++ DBG("%s, cleaning up\n",__func__); ++ ++ dmad_channel_free(&dma_chreq_tx); ++ dmad_channel_free(&dma_chreq_rx); ++ ++ snd_card_free(ftssp_cards[cardno]); ++} ++ ++module_init(ftssp_alsa_init); ++module_exit(ftssp_alsa_exit); +diff -Nur linux-3.4.110.orig/sound/nds32/FTSSP010_HDA.h linux-3.4.110/sound/nds32/FTSSP010_HDA.h +--- linux-3.4.110.orig/sound/nds32/FTSSP010_HDA.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/sound/nds32/FTSSP010_HDA.h 2016-04-07 10:20:51.062085666 +0200 +@@ -0,0 +1,34 @@ ++/* FTSSP010 - HDA supporting library header */ ++/* ++ * ++ * $log$ ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++#define FTSSP010_DATA(x) (SSP_FTSSP010_va_base[(x)]+0x18) ++#define FTSSP010_DATA_PA(x) (SSP_FTSSP010_pa_base[(x)]+0x18) ++ ++ ++/* Returns FTSSP010 status */ ++extern void ftssp010_set_int_control(int cardno, unsigned val); ++extern int ftssp010_get_status(int cardno); ++extern unsigned ftssp010_get_int_status(int cardno); ++/* Polls FIFO full register */ ++extern int ftssp010_tx_fifo_not_full(int cardno); ++ ++/* Configure FTSSP010 to a given sampling rate and channel number */ ++extern void ftssp010_config_hda_play(int cardno, unsigned is_stereo, unsigned speed, int use8bit); ++ ++extern void ftssp010_config_hda_rec(int cardno, unsigned is_stereo, unsigned speed, int use8bit); ++ ++extern void ftssp010_stop_tx(int cardno); ++extern void ftssp010_stop_rx(int cardno); ++ ++ +diff -Nur linux-3.4.110.orig/sound/nds32/FTSSP010_HDA_lib.c linux-3.4.110/sound/nds32/FTSSP010_HDA_lib.c +--- linux-3.4.110.orig/sound/nds32/FTSSP010_HDA_lib.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/sound/nds32/FTSSP010_HDA_lib.c 2016-04-07 10:20:51.062085666 +0200 +@@ -0,0 +1,477 @@ ++/* FTSSP010 - UDA1345TS module ++ * ++ * $log$ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "hda.h" ++ ++#if 0 ++MODULE_LICENSE("Faraday License"); ++MODULE_AUTHOR("Faraday Technology Corp."); ++MODULE_DESCRIPTION("FTSSP010 - UDA1345TS Linux 2.6 Library"); ++#endif ++#undef ERR ++#define ERR(vvar...) printk(KERN_ERR vvar) ++#define DBG(vvar...) printk(KERN_INFO vvar) ++ ++#define SSP_TXFCLR 0x8 ++#define SSP_RXFCLR 0x4 ++#define SSP_RFIEN 0x4 ++#define SSP_TFIEN 0x8 ++#define SSP_SSPEN 0x1 ++#define SSP_TXDOE 0x2 ++#define SSP_RXDMAEN 0x10 ++#define SSP_TXDMAEN 0x20 ++#define SSP_RFURIEN 0x1 ++#define SSP_TFURIEN 0x2 ++#define HDA_STRNUM_RX 1 ++#define HDA_STRNUM_TX 2 ++#define HDA_CHANUM 0 ++#define HDA_CRST_MASK 0x20 ++/* Initialize FTSSP010 to output to UDA1345TS via I2S */ ++#define FTSSP010_CONTROL0(x) (SSP_FTSSP010_va_base[(x)]+0x0) ++#define FTSSP010_CONTROL0_OPM_STEREO 0xC ++#define FTSSP010_CONTROL0_OPM_MONO 0x8 ++ ++#define FTSSP010_CONTROL1(x) (SSP_FTSSP010_va_base[(x)]+0x4) ++#define FTSSP010_CONTROL2(x) (SSP_FTSSP010_va_base[(x)]+0x8) ++ ++#define FTSSP010_INT_CONTROL(x) (SSP_FTSSP010_va_base[(x)]+0x10) ++#define FTSSP010_STATUS(x) (SSP_FTSSP010_va_base[(x)]+0xC) ++#define FTSSP010_INT_STATUS(x) (SSP_FTSSP010_va_base[(x)]+0x14) ++#define FTSSP010_DATA(x) (SSP_FTSSP010_va_base[(x)]+0x18) ++#define FTSSP010_INFO(x) (SSP_FTSSP010_va_base[(x)]+0x1C) ++#define FTSSP010_AC_COMMAND(x) (SSP_FTSSP010_va_base[(x)]+0x28) ++#define FTSSP010_IRSPR(x) (SSP_FTSSP010_va_base[(x)]+0x2C) ++#define FTSSP010_ICMDST(x) (SSP_FTSSP010_va_base[(x)]+0x30) ++#define HDA_REG_OSDC(x) (SSP_FTSSP010_va_base[(x)]+0x50) ++#define HDA_REG_ISDC(x) (SSP_FTSSP010_va_base[(x)]+0x54) ++ ++static const unsigned int SSP_FTSSP010_va_base[SSP_FTSSP010_IRQ_COUNT] = { SSP_FTSSP010_VA_BASE }; ++static const unsigned int SSP_FTSSP010_pa_base[SSP_FTSSP010_IRQ_COUNT] = { SSP_FTSSP010_PA_BASE }; ++ ++void SetSSP_Enable(int cardno, int enable) ++{ ++ volatile unsigned int ctrl = 0; ++ ++ ctrl = inl(FTSSP010_CONTROL2(cardno)); ++ if(enable) ++ ctrl |= SSP_SSPEN + SSP_TXDOE; ++ else ++ ctrl &= ~(SSP_SSPEN + SSP_TXDOE); ++ ++ outl(ctrl, FTSSP010_CONTROL2(cardno)); ++} ++void SetSSP_Enable_rx(int cardno, int enable) ++{ ++ volatile unsigned int ctrl = 0; ++ ++ ctrl = inl(FTSSP010_CONTROL2(cardno)); ++ if(enable) ++ ctrl |= SSP_SSPEN ; ++ else ++ ctrl &= ~(SSP_SSPEN); ++ ++ outl(ctrl, FTSSP010_CONTROL2(cardno)); ++} ++ ++void SetSSP_FIFO_Threshold(int cardno, unsigned int trans_len,unsigned int rec_len) ++{ ++ volatile unsigned int ctrl = 0; ++ ctrl = inl(FTSSP010_INT_CONTROL(cardno)); ++ ++ ctrl &= ~0x0000FF00; ++ ctrl |= ((trans_len << 12) + (rec_len << 8)) & 0x0000FF00; ++ ++ outl(ctrl, FTSSP010_INT_CONTROL(cardno)); ++} ++void SetSSP_IntMask(int cardno,int Mask) ++{ ++ volatile unsigned int ctrl = 0; ++ ctrl = inl(FTSSP010_INT_CONTROL(cardno)); ++ ctrl &= ~0x3F; ++ ctrl |= Mask; ++ outw(ctrl, FTSSP010_INT_CONTROL(cardno)); ++} ++void SetSSP_TXFIFO(int cardno, unsigned int threshold,unsigned int underrun) ++{ ++ volatile unsigned int data = 0; ++ ++ data = inl(FTSSP010_INT_CONTROL(cardno)); ++ ++ if (threshold) ++ data |= SSP_TFIEN; ++ else ++ data &= ~SSP_TFIEN; //Howard@2007-4-13 ++ ++ if (underrun) ++ data |= SSP_TFURIEN; ++ else ++ data &= ~SSP_TFURIEN; //Howard@2007-4-13 ++ ++ outl(data, FTSSP010_INT_CONTROL(cardno)); ++} ++void SetSSP_RXFIFO(int cardno,unsigned int threshold,unsigned int underrun) ++{ ++ volatile unsigned int data = 0; ++ ++ data = inl(FTSSP010_INT_CONTROL(cardno)); ++ ++ if (threshold) ++ data |= SSP_RFIEN; ++ else ++ data &= ~SSP_RFIEN; //Howard@2007-4-13 ++ ++ if (underrun) ++ data |= SSP_RFURIEN; ++ else ++ data &= ~SSP_RFURIEN; //Howard@2007-4-13 ++ ++ outl(data, FTSSP010_INT_CONTROL(cardno)); ++} ++void SetSSP_DMA(int cardno, unsigned int trans,unsigned int rec) ++{ ++ volatile unsigned int data = 0; ++ ++ data = inl(FTSSP010_INT_CONTROL(cardno)); ++ ++ if (trans) ++ data |= SSP_TXDMAEN; ++ else ++ data &= ~SSP_TXDMAEN; ++ ++ if (rec) ++ data |= SSP_RXDMAEN; ++ else ++ data &= ~SSP_RXDMAEN; ++ ++ outl(data, FTSSP010_INT_CONTROL(cardno)); ++} ++void SSPClearTxFIFO(int cardno) ++{ ++ volatile unsigned int data = 0; ++ ++ data = inl(FTSSP010_CONTROL2(cardno)); ++ data |= SSP_TXFCLR; ++ outl(data, FTSSP010_CONTROL2(cardno)); ++} ++ ++ ++void SSPClearRxFIFO(int cardno) ++{ ++ volatile unsigned int data = 0; ++ ++ data = inl(FTSSP010_CONTROL2(cardno)); ++ data |= SSP_RXFCLR; ++ outl(data, FTSSP010_CONTROL2(cardno)); ++} ++ ++void ftssp010_set_int_control(int cardno, unsigned val) ++{ ++ outl(val, FTSSP010_INT_CONTROL(cardno)); ++} ++ ++unsigned ftssp010_get_int_status(int cardno) ++{ ++ return (inl(FTSSP010_INT_STATUS(cardno))); ++} ++ ++int ftssp010_get_status(int cardno) ++{ ++ return (inl(FTSSP010_STATUS(cardno))); ++} ++ ++int ftssp010_tx_fifo_not_full(int cardno) ++{ ++ return (inl(FTSSP010_STATUS(cardno))&0x2)==0x2; ++} ++ ++int ftssp010_tx_fifo_vaild_entries(int cardno) ++{ ++ return (inl(FTSSP010_STATUS(cardno))>>12) & 0x1f; ++} ++ ++/* Configure FTSSP010 to a given sampling rate and channel number ++ * for HDA mode in playback mode ++ */ ++void init_hw(unsigned int cardno) ++{ ++ /* Step 1: Set HDA Mode & HDA Format */ ++ outl(HDA_MODE | 0x4000, FTSSP010_CONTROL0(cardno)); /* set FTSSP010 to HDA mode */ ++ mdelay(50); ++ outl(HDA_CRST_CLR | (5 << HDA_RST_FCNT_OFS), FTSSP010_CONTROL2(cardno)); /* Cold Reset AC-Link */ ++ mdelay(50); ++ while((inl(FTSSP010_CONTROL2(cardno)) & HDA_CRST_MASK) != HDA_CRST_CLR); ++ SetSSP_IntMask(cardno, 0); ++ ++} ++void SSPClearFIFO(int cardno, unsigned int tx, unsigned int rx) ++{ ++ unsigned int data; ++ ++ if (tx == 1) { ++ //clear TX ++ data = inl(FTSSP010_CONTROL2(cardno)); ++ //data = REG32(SSP_REG_CTRL2); ++ data |= SSP_TXFCLR; ++ outl(data, FTSSP010_CONTROL2(cardno)); ++ //REG32(SSP_REG_CTRL2) = data; ++ } ++ ++ if (rx == 1) { ++ //clear RX ++ data = inl(FTSSP010_CONTROL2(cardno)); ++ //data = REG32(SSP_REG_CTRL2); ++ data |= SSP_RXFCLR; ++ //REG32(SSP_REG_CTRL2) = data; ++ outl(data, FTSSP010_CONTROL2(cardno)); ++ } ++} ++void hda_cmd_rsp(int cardno, unsigned int node_num, unsigned int verb_num, unsigned int func_num) ++{ ++ //unsigned int HDA_REG_ICMDST; ++ volatile unsigned int hda_cmd = HDA_COD_DEVADDR | (node_num << 20) | (verb_num << 8) | func_num; ++ //Wait CMD Bus Not Busy ++ while((inl(FTSSP010_ICMDST(cardno)) & HDA_ICB_MASK) != 0); ++ //Write CMD into ICW ++ outl(hda_cmd, FTSSP010_AC_COMMAND(cardno)); ++ mdelay(50); ++ //Wait new resp is latched into IRR ++ while((inl(FTSSP010_ICMDST(cardno)) & HDA_IRV_MASK) == 0); ++ //Get Resp From IRR & Compare with Expected Resp ++ //clear IRV ++ outl(inl(FTSSP010_ICMDST(cardno)) | (1 << HDA_IRV_OFFSET), FTSSP010_ICMDST(cardno)); ++ mdelay(50); ++} ++void hda_cmdrsp_proc(int cardno, unsigned int node_num, unsigned int issue_cmd, unsigned int expect_rsp) ++{ ++ unsigned int hda_cmd = HDA_COD_DEVADDR | node_num | issue_cmd; ++ unsigned int HDA_REG_ICMDST; ++ //Wait CMD Bus Not Busy ++ while((inl(FTSSP010_ICMDST(cardno)) & HDA_ICB_MASK) != 0); ++ ++ //Write CMD into ICW ++ //REG32(HDA_REG_ICMDW) = hda_cmd; ++ outl(hda_cmd, FTSSP010_AC_COMMAND(cardno)); ++ mdelay(50); ++ //Wait new resp is latched into IRR ++ while((inl(FTSSP010_ICMDST(cardno)) & HDA_IRV_MASK) == 0); ++ //Get Resp From IRR & Compare with Expected Resp ++ if (inl(FTSSP010_IRSPR(cardno)) != expect_rsp) { ++ ERR("%s: unexpected rsp!",__func__); ++ } ++ else { ++ //clear IRV ++ outl(inl(FTSSP010_ICMDST(cardno)) | (1 << HDA_IRV_OFFSET), FTSSP010_ICMDST(cardno)); ++ } ++} ++unsigned int hda_covfmt_setup(unsigned int type, unsigned int base, unsigned int mult, unsigned int div, unsigned int bits, unsigned int chnum) ++{ ++ unsigned int hda_fmt = type << HDA_FMT_TYPE_OFS | ++ base << HDA_FMT_BASE_OFS | ++ mult << HDA_FMT_MULT_OFS | ++ div << HDA_FMT_DIV_OFS | ++ bits << HDA_FMT_BITS_OFS | ++ chnum << HDA_FMT_CHNUM_OFS; ++ return hda_fmt; ++} ++unsigned int hda_covstr_setup(unsigned int stream, unsigned int channel) ++{ ++ unsigned int hda_str = stream << HDA_STR_STR_OFS | ++ channel << HDA_STR_CHA_OFS ; ++ return hda_str; ++} ++unsigned int hda_converter_setup(int cardno, unsigned int mode, unsigned int type, unsigned int base, unsigned mult, unsigned int div, unsigned int bits, unsigned int chnum, unsigned int stream, unsigned int channel) ++{ ++ unsigned int hda_fmt = 0; ++ unsigned int hda_str = 0; ++ unsigned int exp_rsp = 0; ++ unsigned int node_num = 0; ++ ++ if (mode == HDA_OUT_STR) { ++ node_num = HDA_OUTCOV_NODE; ++ } ++ else { ++ node_num = HDA_INCOV_NODE; ++ } ++ //a: Set Converter Format to Converter ++ hda_fmt = hda_covfmt_setup(type, base, mult, div, bits, chnum); ++ exp_rsp = HDA_RESP_ZERO_VAL; ++ hda_cmdrsp_proc(cardno, node_num, (HDA_CMD_SETCVTFMT | hda_fmt), exp_rsp); ++ //b: Get Converter Format from Converter ++ exp_rsp = hda_fmt; ++ hda_cmdrsp_proc(cardno, node_num, HDA_CMD_GETCVTFMT, exp_rsp); ++ //c: Set Converter Stream to Converter ++ hda_str = hda_covstr_setup(stream, channel); ++ exp_rsp = HDA_RESP_ZERO_VAL; ++ hda_cmdrsp_proc(cardno, node_num, (HDA_CMD_SETCVTSTR | hda_str), exp_rsp); ++ //d: Get Converter Stream from Converter ++ exp_rsp = hda_str; ++ hda_cmdrsp_proc(cardno, node_num, HDA_CMD_GETCVTSTR, exp_rsp); ++ ++ return hda_fmt; ++} ++void hda_iosdc_setup(int cardno, unsigned int io_sel, unsigned int ctrl, unsigned int stream_num, unsigned int hda_fmt) ++{ ++ unsigned int hda_reg_base = (io_sel == 1)? HDA_REG_OSDC(cardno) : HDA_REG_ISDC(cardno); ++ if (ctrl == 1) {//run ++ outl((stream_num << HDA_SDC_STNUM_OFFSET) | hda_fmt, hda_reg_base); ++ outl((inl(hda_reg_base) | (1 << HDA_SDC_SRUN_OFFSET)),hda_reg_base); ++ } ++ ++} ++static void _ftssp010_config_hda(int cardno, unsigned is_stereo, unsigned speed, int is_rec) ++{ ++ unsigned int hda_fmt = 0; ++ unsigned int div = 0,mult = 0, base = 0; ++ switch (speed) { ++ case 44100: ++ base = 1; ++ div = 0; ++ break; ++ case 48000: ++ div = 0; ++ break; ++ case 96000: ++ mult = 1; ++ break; ++ case 192000: ++ mult = 3; ++ break; ++ } ++ ++ if (is_rec) { /* Recording */ ++ /* ------------------------------------------------ */ ++ /* Codec initialization */ ++ /* ------------------------------------------------ */ ++ /* Step 3: Codec Evaluation */ ++ /* root_node -> node 1(Audio Func Group) */ ++ /* -> node 2(Output Converter) */ ++ /* -> node 3(Input Converter) */ ++ /* ------------------------------------------------ */ ++ //input ++ hda_cmd_rsp(cardno, 0x1a, 0x707, 0x20); //pin complex => input enable, port-c LINE1 ++ hda_cmd_rsp(cardno, 0x08, 0x3f0, 0x3f);//pin amp => max LINE ADC amplifier gain ++ hda_cmd_rsp(cardno, 0x1a, 0x3f0, 0x00);//pin complex => no mute ++ hda_cmd_rsp(cardno, 0x23, 0x3f2, 0x3f);//pin complex => no mute ++ /* ------------------------------ */ ++ /* Step 4: Output Converter Setup */ ++ /* ------------------------------ */ ++ /* Set Converter Format & Stream to Node 2(Output Converter) */ ++ /* type = PCM, base = 48K, mult = xN, div = /M, bits = 8~32bits/sample, chnum = 1 */ ++ /* stream = 1, lowest channel = 0 */ ++ ++ hda_fmt = hda_converter_setup(cardno, HDA_IN_STR, //MODE ++ 0, base, mult, div, 1, 1, //FORMAT ++ HDA_STRNUM_RX, HDA_CHANUM); //STREAM ++ /* ---------------------------- */ ++ /* Step 5: Enable Output Stream */ ++ /* ---------------------------- */ ++ hda_iosdc_setup(cardno, HDA_IN_STR, HDA_STR_RUN, HDA_STRNUM_RX, hda_fmt); ++ SetSSP_Enable_rx(cardno, 1); ++ SetSSP_RXFIFO(cardno, 0, 0); ++ SetSSP_TXFIFO(cardno, 0, 0); ++ SetSSP_FIFO_Threshold(cardno,12,12); ++ SetSSP_DMA(cardno, 0, 1); ++ ++ SSPClearTxFIFO(cardno); ++ SSPClearRxFIFO(cardno); ++ ++ //SetSSP_Enable_rx(cardno, 0); ++ ++ } else { /* Playback */ ++ /* ------------------------------------------------ */ ++ /* Codec initialization */ ++ /* ------------------------------------------------ */ ++ /* Step 3: Codec Evaluation */ ++ /* root_node -> node 1(Audio Func Group) */ ++ /* -> node 2(Output Converter) */ ++ /* -> node 3(Input Converter) */ ++ /* ------------------------------------------------ */ ++ //DBG("%s before_cmd_rsp<<",__func__); ++ hda_cmd_rsp(cardno, 0x14, 0x707, 0x40);//pin complex => output ++ //DBG("%s after hda_cmd_rsp< output>>",__func__); ++ hda_cmd_rsp(cardno, 0x0c, 0x3f0, 0x3f);//pin amp => 0x3f ++ hda_cmd_rsp(cardno, 0x14, 0x3f0, 0x00);//pin complex => no mute ++ //DBG("%s after hda_cmd_rsp",__func__); ++ //msleep_interruptible(10); ++ /* ------------------------------ */ ++ /* Step 4: Output Converter Setup */ ++ /* ------------------------------ */ ++ /* Set Converter Format & Stream to Node 2(Output Converter) */ ++ /* type = PCM, base = 48K, mult = xN, div = /M, bits = 8~32bits/sample, chnum = 1 */ ++ /* stream = 1, lowest channel = 0 */ ++ hda_fmt = hda_converter_setup(cardno, HDA_OUT_STR, //MODE ++ 0, base, mult, div, 1, 1, //FORMAT ++ HDA_STRNUM_TX, HDA_CHANUM); //STREAM ++ //DBG("%s after hda_converter_setup",__func__); ++ /* ---------------------------- */ ++ /* Step 5: Enable Output Stream */ ++ /* ---------------------------- */ ++ hda_iosdc_setup(cardno, HDA_OUT_STR, HDA_STR_RUN, HDA_STRNUM_TX, hda_fmt); ++ SetSSP_Enable(cardno, 1); ++ SetSSP_RXFIFO(cardno, 0, 0); ++ SetSSP_TXFIFO(cardno, 0, 0); ++ //outl(0xC, FTSSP010_CONTROL2(cardno)); /* Disable FTSSP010, clear RX/TX Fifo. */ ++ SetSSP_FIFO_Threshold(cardno,12,12); ++ SetSSP_DMA(cardno,1,0); ++ ++ SSPClearTxFIFO(cardno); ++ SSPClearRxFIFO(cardno); ++ } ++ ++ while(inl(FTSSP010_INT_STATUS(cardno))&0x3); ++} ++/* for HDA */ ++void ftssp010_config_hda_play(int cardno, unsigned is_stereo, unsigned speed, int use8bit) ++{ ++ _ftssp010_config_hda(cardno, is_stereo, speed, 0); ++} ++ ++void ftssp010_config_hda_rec(int cardno, unsigned is_stereo, unsigned speed, int use8bit) ++{ ++ _ftssp010_config_hda(cardno, is_stereo, speed, 1); ++} ++ ++ ++ ++void ftssp010_stop_tx(int cardno) ++{ ++ unsigned int hda_reg_base = HDA_REG_OSDC(cardno); ++ SetSSP_Enable(cardno,0); ++ SetSSP_DMA(cardno,0,0); ++ ++ /* turn off output node */ ++ hda_cmd_rsp(cardno,0x14, 0x707, 0); //pin complex => input enable, port-c LINE1 ++ hda_cmd_rsp(cardno,0x0c, 0x3f0, 0x80); //pin amp => max LINE ADC amplifier gain ++ hda_cmd_rsp(cardno,0x14, 0x3f0, 0x80); //pin complex => no mute ++ ++ outl(0, hda_reg_base); ++ SSPClearFIFO(cardno, 1, 0); ++ //outl(inl(FTSSP010_INT_CONTROL(cardno)) & (~0x22), FTSSP010_INT_CONTROL(cardno)); ++} ++ ++void ftssp010_stop_rx(int cardno) ++{ ++ unsigned int hda_reg_base = HDA_REG_ISDC(cardno); ++ SetSSP_Enable_rx(cardno, 0); ++ SetSSP_DMA(cardno, 0, 0); ++ ++ /* turn off output node */ ++ hda_cmd_rsp(cardno,0x1a, 0x707, 0); //pin complex => input enable, port-c LINE1 ++ hda_cmd_rsp(cardno,0x08, 0x3f0, 0x80);//pin amp => LINE ADC amplifier gain ++ hda_cmd_rsp(cardno,0x1a, 0x3f0, 0x80);//pin complex => mute ++ hda_cmd_rsp(cardno,0x23, 0x3f2, 0x80);//pin complex => no mute ++ ++ outl(0, hda_reg_base); ++ SSPClearFIFO(cardno, 0, 1); ++ //outl(inl(FTSSP010_INT_CONTROL(cardno)) & (~0x11), FTSSP010_INT_CONTROL(cardno)); ++} ++ +diff -Nur linux-3.4.110.orig/sound/nds32/FTSSP010_lib.c linux-3.4.110/sound/nds32/FTSSP010_lib.c +--- linux-3.4.110.orig/sound/nds32/FTSSP010_lib.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/sound/nds32/FTSSP010_lib.c 2016-04-07 10:20:51.062085666 +0200 +@@ -0,0 +1,795 @@ ++/* FTSSP010 - UDA1345TS module ++ * ++ * $log$ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "FTSSP010_UDA1345TS.h" ++ ++#if 0 ++MODULE_LICENSE("Faraday License"); ++MODULE_AUTHOR("Faraday Technology Corp."); ++MODULE_DESCRIPTION("FTSSP010 - UDA1345TS Linux 2.6 Library"); ++#endif ++ ++#define PMU_PDLLCR1 (PMU_FTPMU010_VA_BASE+0x34) ++#define PMU_MFPSR (PMU_FTPMU010_VA_BASE+0x28) ++#define PMU_I2SAC97_REQACKCFG (PMU_FTPMU010_VA_BASE+0xbc) ++#define PMU_C4 (PMU_FTPMU010_VA_BASE+0xc4) ++ ++#define SSPCLK_TO_SCLKDIV(sspclk_div2,bps) ((sspclk_div2)/(bps)-1) ++ ++// Each client has this additional data ++struct alc5630_data { ++ struct i2c_client *client; ++ struct delayed_work work; ++ unsigned long gpio2_value; ++ struct mutex mtx; ++}; ++ ++//ADD by river 2011.01.26 ++static int i2s_alc5630_read(unsigned int raddr, char *data, struct i2c_client *client) ++{ ++#ifndef CONFIG_SND_FTSSP010_AC97 ++ struct i2c_adapter *adap = client->adapter; ++ int ret; ++#endif ++ struct i2c_msg msg; ++ int i2c_value; ++ ++ //Reading ALC5630 register ++ msg.addr = raddr; ++ msg.flags = (client->flags & I2C_M_TEN) | I2C_M_RD; ++ msg.len = 1; ++ msg.buf = (char *)data; ++ ++ //ret = i2c_transfer(adap, &msg, 1); ++#ifndef CONFIG_SND_FTSSP010_AC97 ++ ret = i2c_transfer(adap, &msg, 1); ++ if (ret != 0) { ++ printk("i2c read failed\n"); ++ return -1; ++ } ++ else ++#endif ++ { ++ i2c_value = (data[0]&0xff) << 8 | (data[1]&0xff); ++ return i2c_value; ++ } ++} ++ ++static void i2s_alc5630_write(unsigned int raddr, unsigned int data, struct i2c_client *client) ++{ ++#ifndef CONFIG_SND_FTSSP010_AC97 ++ struct i2c_adapter *adap = client->adapter; ++ int ret; ++#endif ++ struct i2c_msg msg; ++ char buf[3]; ++ ++ //Writing ALC5630 register ++ msg.addr = raddr; ++ msg.flags = (client->flags & I2C_M_TEN) | ~I2C_M_RD; ++ msg.len = 1; ++ ++ buf[0] = (data >> 8) & 0xff; ++ buf[1] = data & 0xff; ++ msg.buf = (char *)buf; ++ ++ //ret = i2c_transfer(adap, &msg, 1); ++#ifndef CONFIG_SND_FTSSP010_AC97 ++ ret = i2c_transfer(adap, &msg, 1); ++ if (ret != 0) ++ { ++ printk("i2c write failed\n"); ++ } ++#endif ++} ++ ++/* ++static void i2s_alc5630_master_stereo_mode(struct i2c_client *client) ++{ ++ //printk(">>>>>>>>> (7) i2s_alc5630_master_stereo_mode() is called.\n"); ++ i2s_alc5630_write(0x02, 0x5f5f, client); ++ mdelay(50); ++ i2s_alc5630_write(0x04, 0x5f5f, client); ++ mdelay(50); ++ i2s_alc5630_write(0x26, 0x000f, client); ++ mdelay(50); ++ i2s_alc5630_write(0x34, 0x0000, client); // codec master mode ++ //i2s_alc5630_write(0x34, 0x8000, client); // codec slave mode ++ mdelay(50); ++ i2s_alc5630_write(0x3a, 0x0801, client); ++ mdelay(50); ++ i2s_alc5630_write(0x3c, 0xffff, client); ++ mdelay(50); ++ i2s_alc5630_write(0x3e, 0xffff, client); ++ mdelay(50); ++ i2s_alc5630_write(0x60, 0x3075, client); // codec master mode. divider. ++ mdelay(50); ++ i2s_alc5630_write(0x62, 0x1010, client); // codec master mode. divider. ++ ++} ++*/ ++ ++/* ++static void i2s_alc5630_read_test(struct i2c_client *client) ++{ ++ char data[3]; ++ printk(">>>>> : i2s_alc5630_read_test().....\n"); ++ printk("Reg 0x%02x = 0x%08x\n", 0x0, i2s_alc5630_read(0x0, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x02, i2s_alc5630_read(0x02, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x04, i2s_alc5630_read(0x04, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x06, i2s_alc5630_read(0x06, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x08, i2s_alc5630_read(0x08, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x0a, i2s_alc5630_read(0x0a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x0c, i2s_alc5630_read(0x0c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x0e, i2s_alc5630_read(0x0e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x10, i2s_alc5630_read(0x10, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x12, i2s_alc5630_read(0x12, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x14, i2s_alc5630_read(0x14, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x16, i2s_alc5630_read(0x16, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x18, i2s_alc5630_read(0x18, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x1a, i2s_alc5630_read(0x1a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x1c, i2s_alc5630_read(0x1c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x1e, i2s_alc5630_read(0x1e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x20, i2s_alc5630_read(0x20, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x22, i2s_alc5630_read(0x22, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x24, i2s_alc5630_read(0x24, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x26, i2s_alc5630_read(0x26, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x28, i2s_alc5630_read(0x28, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x2a, i2s_alc5630_read(0x2a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x2c, i2s_alc5630_read(0x2c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x2e, i2s_alc5630_read(0x2e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x30, i2s_alc5630_read(0x30, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x32, i2s_alc5630_read(0x32, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x34, i2s_alc5630_read(0x34, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x36, i2s_alc5630_read(0x36, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x38, i2s_alc5630_read(0x38, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x3a, i2s_alc5630_read(0x3a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x3c, i2s_alc5630_read(0x3c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x3e, i2s_alc5630_read(0x3e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x40, i2s_alc5630_read(0x40, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x42, i2s_alc5630_read(0x42, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x44, i2s_alc5630_read(0x44, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x46, i2s_alc5630_read(0x46, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x48, i2s_alc5630_read(0x48, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x4a, i2s_alc5630_read(0x4a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x4c, i2s_alc5630_read(0x4c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x4e, i2s_alc5630_read(0x4e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x50, i2s_alc5630_read(0x50, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x52, i2s_alc5630_read(0x52, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x54, i2s_alc5630_read(0x54, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x56, i2s_alc5630_read(0x56, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x58, i2s_alc5630_read(0x58, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x5a, i2s_alc5630_read(0x5a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x5c, i2s_alc5630_read(0x5c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x5e, i2s_alc5630_read(0x5e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x60, i2s_alc5630_read(0x60, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x62, i2s_alc5630_read(0x62, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x64, i2s_alc5630_read(0x64, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x66, i2s_alc5630_read(0x66, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x68, i2s_alc5630_read(0x68, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x6a, i2s_alc5630_read(0x6a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x6c, i2s_alc5630_read(0x6c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x6e, i2s_alc5630_read(0x6e, data,client)); ++ ++ printk("Reg 0x%02x = 0x%08x\n", 0x70, i2s_alc5630_read(0x70, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x72, i2s_alc5630_read(0x72, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x74, i2s_alc5630_read(0x74, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x76, i2s_alc5630_read(0x76, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x78, i2s_alc5630_read(0x78, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x7a, i2s_alc5630_read(0x7a, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x7c, i2s_alc5630_read(0x7c, data,client)); ++ printk("Reg 0x%02x = 0x%08x\n", 0x7e, i2s_alc5630_read(0x7e, data,client)); ++ ++} ++*/ ++ ++static void i2s_al5630_slave_stereo_mode(struct i2c_client *client) ++{ ++ ++ ++ i2s_alc5630_write(0x34, 0x8000, client); // codec slave mode ++ i2s_alc5630_write(0x0c, 0x1010, client); ++ i2s_alc5630_write(0x10, 0xee03, client); ++ i2s_alc5630_write(0x1c, 0x0748, client); ++ //i2s_alc5630_write(0x02, 0x8080, client); ++ //i2s_alc5630_write(0x04, 0x8888, client); ++ i2s_alc5630_write(0x62, 0x0000, client); ++ ++} ++ ++//End ADD by river 2011.01.26 ++ ++ ++/* Drive PMU to generate I2S main clocking signal. Also configures PMU to set correct DMA REQ/ACK pair */ ++void pmu_set_i2s_clocking(unsigned int speed) ++{ ++ unsigned int pmu_pdllcr1; /* PLL/DLL Control Register 1 */ ++ /* Configure PMU to generate I2S main clock */ ++ #ifdef CONFIG_PLAT_AG101 ++ pmu_pdllcr1 = inl(PMU_PDLLCR1)&0xfff0ffff; /* Bit 19-16 are relevent */ ++ #endif ++ ++ switch (speed) { ++ case 8000: ++ pmu_pdllcr1 |= 0x00000000; /* 2.048MHz x2 */ ++ break; ++ case 11025: ++ pmu_pdllcr1 |= 0x00010000; /* 2.8224MHz x2 */ ++ break; ++ case 16000: ++ pmu_pdllcr1 |= 0x00020000; /* 4.096MHz x2 */ ++ break; ++ case 22050: ++ pmu_pdllcr1 |= 0x00030000; /* 5.6448MHz x2 */ ++ break; ++ case 32000: ++ pmu_pdllcr1 |= 0x00040000; /* 8.192MHz x2 */ ++ break; ++ case 44100: ++ pmu_pdllcr1 |= 0x00050000; /* 11.2896Mhz x2 */ ++ break; ++ case 48000: ++ pmu_pdllcr1 |= 0x00060000; /* 12.2880MHz x2 */ ++ break; ++ default: ++ printk("%s: Unknown i2s speed %d\n",__func__,speed); ++ }; ++ ++ #ifdef CONFIG_PLAT_AG101 ++ outl(pmu_pdllcr1, PMU_PDLLCR1); ++ /* Configure PMU to select I2S output (instead of AC97) */ ++ outl(inl(PMU_MFPSR)&(~(1<<3)), PMU_MFPSR); /* clear bit 3 of MFPSR*/ ++ #endif ++} ++ ++/* Drive PMU to generate AC97 main clocking signal. Also configures PMU to set correct DMA REQ/ACK pair */ ++void pmu_set_ac97_clocking(unsigned int speed) ++{ ++ /* Configure PMU to select AC97 output (instead of I2S) */ ++ /* Set GPIO[26] to AC97 clock, use 49.152MHz main clock (AC97 CLK1) */ ++ //outl(inl(PMU_MFPSR)|((1<<13)|(1<<3)), PMU_MFPSR); /* Set bit 13 & 3 of MFPSR*/ ++ #ifndef CONFIG_PLAT_AG102 ++ outl(inl(PMU_MFPSR)|((1<<13)|(1<<3)), PMU_MFPSR); /* Set bit 13 & 3 of MFPSR*/ ++ #endif ++} ++ ++/* Programs PMU to set I2S/AC97 DMA Channel, ch=0-7 */ ++void pmu_set_i2s_dma_channel(unsigned ch) ++{ ++ #ifdef CONFIG_PLAT_AG101 ++ ch&=0x7; ++ //outl((inl(PMU_I2SAC97_REQACKCFG)&(~0x7))|ch, PMU_I2SAC97_REQACKCFG); ++ outl(0xa, PMU_I2SAC97_REQACKCFG); ++ outl(0xb, PMU_C4); ++ #endif ++} ++ ++/* Initialize FTSSP010 to output to UDA1345TS via I2S */ ++#define FTSSP010_CONTROL0(x) (SSP_FTSSP010_va_base[(x)]+0x0) ++#define FTSSP010_CONTROL0_OPM_STEREO 0xC ++#define FTSSP010_CONTROL0_OPM_MONO 0x8 ++ ++#define FTSSP010_CONTROL1(x) (SSP_FTSSP010_va_base[(x)]+0x4) ++#define FTSSP010_CONTROL2(x) (SSP_FTSSP010_va_base[(x)]+0x8) ++ ++#define FTSSP010_INT_CONTROL(x) (SSP_FTSSP010_va_base[(x)]+0x10) ++#define FTSSP010_STATUS(x) (SSP_FTSSP010_va_base[(x)]+0xC) ++#define FTSSP010_INT_STATUS(x) (SSP_FTSSP010_va_base[(x)]+0x14) ++#define FTSSP010_DATA(x) (SSP_FTSSP010_va_base[(x)]+0x18) ++#define FTSSP010_INFO(x) (SSP_FTSSP010_va_base[(x)]+0x1C) ++ ++//static const unsigned int SSP_FTSSP010_va_base[SSP_FTSSP010_IRQ_COUNT] = { SSP_FTSSP010_VA_BASE }; ++//static const unsigned int SSP_FTSSP010_pa_base[SSP_FTSSP010_IRQ_COUNT] = { SSP_FTSSP010_PA_BASE }; ++ ++//ADD by river 2011.02.11 ++static struct i2c_client *g_i2c_client; ++ ++ ++#define FTSSP010_ACLINK_SLOT_VALID(x) (SSP_FTSSP010_va_base[(x)]+0x20) ++ ++void ftssp010_set_int_control(int cardno, unsigned val) ++{ ++ outl(val, FTSSP010_INT_CONTROL(cardno)); ++} ++ ++unsigned ftssp010_get_int_status(int cardno) ++{ ++ return (inl(FTSSP010_INT_STATUS(cardno))); ++} ++ ++int ftssp010_get_status(int cardno) ++{ ++ return (inl(FTSSP010_STATUS(cardno))); ++} ++ ++int ftssp010_tx_fifo_not_full(int cardno) ++{ ++ return (inl(FTSSP010_STATUS(cardno))&0x2)==0x2; ++} ++ ++int ftssp010_tx_fifo_vaild_entries(int cardno) ++{ ++ return (inl(FTSSP010_STATUS(cardno))>>12) & 0x1f; ++} ++ ++#include "FTSSP010_W83972D.h" ++ ++// AC97 codec tags ++#define TAG_COMMAND 0xe000 ++#define TAG_DATA 0x9800 /* Slot 3/4 */ ++#define TAG_DATA_MONO 0x9000 /* Slot 3 */ ++//#define TAG_DATA_LINE_IN 0x9000 /* Slot 3 */ ++ ++void ftssp010_ac97_write_codec_start(unsigned int cardno) ++{ ++ outl(0x0, FTSSP010_INT_CONTROL(cardno));/*Disable interrupts & DMA req */ ++ outl(0xC, FTSSP010_CONTROL2(cardno)); /* Disable FTSSP010, clear RX/TX Fifo. */ ++ outl(TAG_COMMAND, FTSSP010_ACLINK_SLOT_VALID(cardno)); ++} ++ ++void ftssp010_ac97_write_codec(unsigned int cardno,unsigned int reg,unsigned int data) ++{ ++ outl(reg << 12, FTSSP010_DATA(cardno)); ++ mdelay(50); ++ outl(data << 4, FTSSP010_DATA(cardno)); ++ mdelay(50); ++} ++ ++void ftssp010_ac97_write_codec_commit(unsigned int cardno) ++{ ++ while((inl(FTSSP010_CONTROL2(cardno))&0x1)==0) { ++ outl(0x3 , FTSSP010_CONTROL2(cardno)); /* SSPEN + TXDOE */ ++ } ++ while(ftssp010_tx_fifo_vaild_entries(cardno)) ++ ; ++ /* Wait for frame completion */ ++ while((inl(FTSSP010_INT_STATUS(cardno))&0x10)==0) ++ ; ++ outl(0x0, FTSSP010_CONTROL2(cardno)); ++} ++ ++/* Configure FTSSP010 to a given sampling rate and channel number ++ * for AC97 mode in playback mode ++ */ ++void init_hw(unsigned int cardno,unsigned int ac97, struct i2c_client *client) ++{ ++ ++ //printk(">>>>>>>>>> (5) init_hw() is called.\n"); ++ g_i2c_client = client; ++ ++ if(ac97) ++ { ++ //pmu_set_ac97_clocking(48000); ++ #ifndef CONFIG_PLAT_AG102 ++ pmu_set_ac97_clocking(48000); ++ #endif ++ outl(0x400c, FTSSP010_CONTROL0(cardno)); /* set FTSSP010 to AC97 mode */ ++ mdelay(50); ++ outl(0xc400, FTSSP010_INT_CONTROL(cardno)); ++ mdelay(50); ++ outl(0x20, FTSSP010_CONTROL2(cardno)); /* Cold Reset AC-Link */ ++ mdelay(50); ++ while(inl(FTSSP010_CONTROL2(cardno))) ++ mdelay(50); ++ outl(0x40, FTSSP010_CONTROL2(cardno)); /* Reset AC-Link */ ++ mdelay(1500); ++ } ++ else ++ { ++ ++ //printk(">>>>>>>>> (6) I2S mode selected....YAYAYA......\n"); ++ #ifdef CONFIG_PLAT_AG101 ++ outl(inl(PMU_MFPSR)&(~(1<<3)), PMU_MFPSR); /* clear bit 3 of MFPSR*/ ++ outl(0xa, PMU_I2SAC97_REQACKCFG); ++ outl(0xb, PMU_C4); ++ #endif ++ ++ //MOD by river 2011.01.26 ++ //i2s_alc5630_master_stereo_mode(client); ++ i2s_al5630_slave_stereo_mode(client); ++ //ssp_slave_stereo_mode(); ++ //End MOD by river 2011.01.26 ++ outl(0x311c, FTSSP010_CONTROL0(cardno)); /* I2S Master */ ++ outl(0, FTSSP010_CONTROL1(cardno)); /* I2S Master */ ++ outl(0xc400, FTSSP010_INT_CONTROL(cardno)); /* I2S Master */ ++ outl(0x40, FTSSP010_CONTROL2(cardno)); /* Reset AC-Link */ ++ ++ //i2s_alc5630_read_test(client); ++ } ++} ++static void _ftssp010_config_ac97(int cardno, unsigned is_stereo, unsigned speed, int is_rec) ++{ ++ /* Codec initialization */ ++ ftssp010_ac97_write_codec_start(cardno); ++ ftssp010_ac97_write_codec(cardno, W83972D_RESET, 0); ++ ftssp010_ac97_write_codec_commit(cardno); ++ ++ msleep_interruptible(10); ++ ftssp010_ac97_write_codec_start(cardno); ++ ++ if (is_rec) { /* Recording */ ++ /* Mute output */ ++ //ftssp010_ac97_write_codec(cardno, W83972D_STEREO_OUTPUT_CONTROL, 0x8000); ++ /* Mute PCM */ ++ //ftssp010_ac97_write_codec(cardno, W83972D_PCM_OUTPUT_CONTROL, 0x8000); ++ ++ /* Register 0x10, Line-In/Mic Gain */ ++ ftssp010_ac97_write_codec(cardno, W83972D_LINE_IN_VOLUME, 0x808); ++ //ftssp010_ac97_write_codec(cardno, W83972D_AUX_INPUT_CONTROL, 0x808); ++ ftssp010_ac97_write_codec(cardno, W83972D_MIC_VOLUME, 0x8); ++ /* FIXME: REC from line-in only */ ++ ++ /* Register 0x1A, Record Select=StereoMix */ ++ ftssp010_ac97_write_codec(cardno, W83972D_RECORD_SELECT, 0x505 /*404*/); ++ /* Register 0x1C, Record Gain=0db */ ++ ftssp010_ac97_write_codec(cardno, W83972D_RECORD_GAIN, 0x808); ++ ftssp010_ac97_write_codec(cardno, W83972D_RECORD_GAIN_MIC, 0x8); ++ } else { /* Playback */ ++ /* Register 0x10, Mute Line-In/Mic Gain */ ++ ftssp010_ac97_write_codec(cardno, W83972D_LINE_IN_VOLUME, 0x8000); ++ ftssp010_ac97_write_codec(cardno, W83972D_MIC_VOLUME, 0x8000); ++ ++ /* Register 0x1A, Mute Record Gains */ ++ ftssp010_ac97_write_codec(cardno, W83972D_RECORD_GAIN, 0x8000); ++ ftssp010_ac97_write_codec(cardno, W83972D_RECORD_GAIN_MIC, 0x8000); ++ ++ /* Output */ ++ ftssp010_ac97_write_codec(cardno, W83972D_STEREO_OUTPUT_CONTROL, 0); ++ ftssp010_ac97_write_codec(cardno, W83972D_PCM_OUTPUT_CONTROL, 0x808); ++ } ++ ++#if 0 ++ ftssp010_ac97_write_codec(cardno, W83972D_EXT_AUDIO_CONTROL, 0x1); ++ ftssp010_ac97_write_codec(cardno, W83972D_DAC_SAMPLE_RATE_CONTROL, speed); ++#endif ++ ++ ftssp010_ac97_write_codec_commit(cardno); ++ msleep_interruptible(10); ++ ++ /* Start data transfer */ ++// if(is_rec) { ++// outl(TAG_DATA_LINE_IN, FTSSP010_ACLINK_SLOT_VALID(cardno)); ++// } else { ++ if(is_stereo) ++ outl(TAG_DATA, FTSSP010_ACLINK_SLOT_VALID(cardno)); ++ else ++ outl(TAG_DATA_MONO, FTSSP010_ACLINK_SLOT_VALID(cardno)); ++// } ++ while(inl(FTSSP010_INT_STATUS(cardno))&0x3); ++// msleep_interruptible(10); ++} ++ ++void ftssp010_config_ac97_play(int cardno, unsigned is_stereo, unsigned speed, int use8bit) ++{ ++ _ftssp010_config_ac97(cardno, is_stereo, speed, 0); ++} ++ ++void ftssp010_config_ac97_rec(int cardno, unsigned is_stereo, unsigned speed, int use8bit) ++{ ++ _ftssp010_config_ac97(cardno, is_stereo, speed, 1); ++} ++/* ++ * Configure FTSSP010 to a given sampling rate and channel number ++ * for I2S mode ++ */ ++void ftssp010_config(int cardno, unsigned is_stereo, unsigned speed, int width, int is_rec) ++{ ++ int use8bit = (width == 1 ? 1 : 0); ++ unsigned opm, bps = 2 * (use8bit ? 8 : 16); /* bits per 1 second audio data. */ ++ unsigned fpclkdiv = 0; ++ ++ //ADD by river 2011.06.02 ++ struct alc5630_data *alc5630; ++ char data[3]; ++ opm = is_stereo ? FTSSP010_CONTROL0_OPM_STEREO : FTSSP010_CONTROL0_OPM_MONO; ++ //MOD by river 2011.01.27 ++ outl(0x3100 | opm, FTSSP010_CONTROL0(cardno)); /* I2S Master */ ++ //End MOD by river 2011.01.27 ++ ++#if 0 ++ printk("%s: use %dHz %d-bit %s \n",__func__, ++ speed, ++ use8bit?8:16, ++ is_stereo?"stereo":"mono" ++ ); ++#endif ++ ++ /* configures CONTROL1 to use suitable clock divider. ++ the I2S clock is generated from PMU. */ ++ bps *= speed; ++ switch(speed) { ++ case 8000: /* SCLK : 256KHZ */ ++ i2s_alc5630_write(0x44, 0x3ea0, g_i2c_client); //? ++ i2s_alc5630_write(0x60, 0x3174, g_i2c_client); ++ i2s_alc5630_write(0x62, 0x1010, g_i2c_client); ++ //i2s_alc5630_write(0x44, 0x6a0, g_i2c_client); //? ++ //i2s_alc5630_write(0x60, 0x3174, g_i2c_client); ++ //i2s_alc5630_write(0x62, 0x1010, g_i2c_client); ++ fpclkdiv = 0xBB; ++ ++ //fpclkdiv=SSPCLK_TO_SCLKDIV(2048000, bps); ++ break; ++ case 11025: /* SCLK : 352.8KHZ */ ++ i2s_alc5630_write(0x44, 0x3ea0, g_i2c_client); //? ++ i2s_alc5630_write(0x60, 0x3174, g_i2c_client); ++ i2s_alc5630_write(0x62, 0x1010, g_i2c_client); ++ fpclkdiv = 0x88; ++ //fpclkdiv=SSPCLK_TO_SCLKDIV(2822400, bps); ++ break; ++ case 16000: /* SCLK : 512KHZ */ ++ i2s_alc5630_write(0x44, 0x3ea0, g_i2c_client); //? ++ i2s_alc5630_write(0x60, 0x3174, g_i2c_client); ++ i2s_alc5630_write(0x62, 0x1010, g_i2c_client); ++ fpclkdiv = 0x5f; ++ //fpclkdiv=SSPCLK_TO_SCLKDIV(4096000, bps); ++ break; ++ case 22050: /* SCLK : 705.6KHZ */ ++ i2s_alc5630_write(0x44, 0x3ea0, g_i2c_client); ++ i2s_alc5630_write(0x60, 0x3174, g_i2c_client); ++ i2s_alc5630_write(0x62, 0x1010, g_i2c_client); ++ fpclkdiv = 0x45; ++ //fpclkdiv=SSPCLK_TO_SCLKDIV(5644800, bps); ++ break; ++ case 24000: /* SCLK : 768KHZ */ ++ i2s_alc5630_write(0x44, 0x3ea0, g_i2c_client); ++ i2s_alc5630_write(0x60, 0x3174, g_i2c_client); ++ i2s_alc5630_write(0x62, 0x1010, g_i2c_client); ++ fpclkdiv = 0x3e; ++ //fpclkdiv=SSPCLK_TO_SCLKDIV(5644800, bps); ++ break; ++ case 32000: /* SCLK : 1024KHZ */ ++ i2s_alc5630_write(0x44, 0x3ea0, g_i2c_client); ++ i2s_alc5630_write(0x60, 0x3174, g_i2c_client); ++ i2s_alc5630_write(0x62, 0x1010, g_i2c_client); ++ //fpclkdiv = 0x2e; ++ fpclkdiv = 0x2f; ++ //fpclkdiv=SSPCLK_TO_SCLKDIV(8192000, bps); ++ break; ++ case 44100: /* SCLK : 1.4112 MHZ */ /* 96 MHZ */ ++ i2s_alc5630_write(0x44, 0x3ea0, g_i2c_client); ++ i2s_alc5630_write(0x60, 0x3174, g_i2c_client); ++ i2s_alc5630_write(0x62, 0x1010, g_i2c_client); ++ fpclkdiv = 0x22; ++ //fpclkdiv=SSPCLK_TO_SCLKDIV(11289600, bps); ++ break; ++ case 48000: /* SCLK : 1.536 MHZ */ ++ i2s_alc5630_write(0x44, 0x3ea0, g_i2c_client); ++ i2s_alc5630_write(0x60, 0x3174, g_i2c_client); ++ i2s_alc5630_write(0x62, 0x1010, g_i2c_client); ++ fpclkdiv = 0x1f; ++ //fpclkdiv=SSPCLK_TO_SCLKDIV(12288000, bps); ++ ++ break; ++ default: ++ printk("%s: unsupported speed %d\n", __func__,speed); ++ return; ++ }; ++ ++ ++ if(!use8bit) { ++ outl(0xf0000|fpclkdiv, FTSSP010_CONTROL1(cardno)); /* 16bits */ ++ //outl(0xf0000|0x10, FTSSP010_CONTROL1(cardno)); /* 16bits */ ++ //outl(0xf0000|0x22, FTSSP010_CONTROL1(cardno)); /* 16bits */ ++ } else { ++ outl(0x70000|fpclkdiv, FTSSP010_CONTROL1(cardno)); /* 8bits */ ++ } ++ ++ //printk("#####$$$$$ : bps = %d\n", bps); ++ //printk("#####$$$$$ : speed = %d\n", speed); ++ //printk("#####$$$$$ : fpclkdiv = 0x%08x\n", fpclkdiv); ++ //printk("#####$$$$$ : FTSSP010_CONTROL1(cardno) = 0x%08x\n", inl(FTSSP010_CONTROL1(cardno))); ++ ++ if(is_rec) ++ outl(inl(FTSSP010_INT_CONTROL(cardno))&(~0x0f15), FTSSP010_INT_CONTROL(cardno)); /* Disable all interrupts */ ++ else ++ outl(inl(FTSSP010_INT_CONTROL(cardno))&(~0xf02a) , FTSSP010_INT_CONTROL(cardno)); /* Disable all interrupts */ ++ ++ outl(0xc, FTSSP010_CONTROL2(cardno)); /* clear FIFOs */ ++ ++ //ADD by river 2011.06.02 ++ alc5630 = i2c_get_clientdata(g_i2c_client); ++ //End ++ ++ if(is_rec) { ++ printk("ftssp010_config() for I2S mode in record.\n"); ++ //ADD by river 2011.03.21 ++ //TEST by river 2011.03.22 for recording => workable ++ //i2s_alc5630_write(0x0e, 0x8888, g_i2c_client); ++ i2s_alc5630_write(0x0e, 0x0808, g_i2c_client); ++ i2s_alc5630_write(0x10, 0xee03, g_i2c_client); ++ i2s_alc5630_write(0x22, 0x0500, g_i2c_client); ++ i2s_alc5630_write(0x1c, 0x0748, g_i2c_client); ++ i2s_alc5630_write(0x14, 0x1f1f, g_i2c_client); ++ i2s_alc5630_write(0x12, 0xdfdf, g_i2c_client); ++ ++ i2s_alc5630_write(0x26, 0x000f, g_i2c_client); ++ i2s_alc5630_write(0x3a, 0xffff, g_i2c_client); ++ i2s_alc5630_write(0x3c, 0xffff, g_i2c_client); ++ //i2s_alc5630_write(0x3e, 0xffff, g_i2c_client); ++ i2s_alc5630_write(0x3e, 0x80cf, g_i2c_client); ++ ++ i2s_alc5630_write(0x44, 0x3ea0, g_i2c_client); ++ i2s_alc5630_write(0x42, 0x2000, g_i2c_client); ++ i2s_alc5630_write(0x40, 0x8c0a, g_i2c_client); ++ ++ //i2s_alc5630_write(0x02, 0x0000, g_i2c_client); ++ i2s_alc5630_write(0x02, 0x8080, g_i2c_client); ++ i2s_alc5630_write(0x04, 0x0000, g_i2c_client); ++ //End TEST by river 2011.03.22 ++ ++ //printk("ftssp010_config() for I2S mode (Recording) ===> Dump register.\n"); ++ //i2s_alc5630_read_test(g_i2c_client); ++ ++ } ++ else { ++ //printk("ftssp010_config() for I2S mode in playback.\n"); ++ //ADD by river 2011.03.24 for record and playback case ++ i2s_alc5630_write(0x0e, 0x0808, g_i2c_client); ++ i2s_alc5630_write(0x12, 0xcbcb, g_i2c_client); ++ i2s_alc5630_write(0x14, 0x7f7f, g_i2c_client); ++ i2s_alc5630_write(0x22, 0x0000, g_i2c_client); ++ i2s_alc5630_write(0x3e, 0x8000, g_i2c_client); ++ i2s_alc5630_write(0x40, 0x0c0a, g_i2c_client); ++ i2s_alc5630_write(0x42, 0x0000, g_i2c_client); ++ //End ADD by river 2011.03.24 for record and playback case ++ ++ //TEST by river 2011.03.23 ++ i2s_alc5630_write(0x26, 0x0000, g_i2c_client); ++ i2s_alc5630_write(0x3c, 0x2000, g_i2c_client); ++ i2s_alc5630_write(0x3a, 0x0002, g_i2c_client); ++ i2s_alc5630_write(0x3c, 0xa330, g_i2c_client); ++ i2s_alc5630_write(0x3a, 0xc843, g_i2c_client); ++ //End TEST by river 2011.03.23 ++ ++ //ADD by river 2011.03.23 for HP Out De-pop ++ i2s_alc5630_write(0x3A, i2s_alc5630_read(0x3A, data,g_i2c_client)|0x0002 , g_i2c_client); ++ i2s_alc5630_write(0x04, i2s_alc5630_read(0x04, data,g_i2c_client)|0x8080 , g_i2c_client); ++ i2s_alc5630_write(0x3A, i2s_alc5630_read(0x3A, data,g_i2c_client)|0x0040 , g_i2c_client); ++ i2s_alc5630_write(0x3c, i2s_alc5630_read(0x3C, data,g_i2c_client)|0x2000 , g_i2c_client); ++ i2s_alc5630_write(0x3E, i2s_alc5630_read(0x3E, data,g_i2c_client)|0xfC00 , g_i2c_client); ++ i2s_alc5630_write(0x5E, i2s_alc5630_read(0x5E, data,g_i2c_client)|0x0100 , g_i2c_client); ++ mdelay(500); ++ i2s_alc5630_write(0x3A, i2s_alc5630_read(0x3A, data,g_i2c_client)|0x0200 , g_i2c_client); ++ i2s_alc5630_write(0x3A, i2s_alc5630_read(0x3A, data,g_i2c_client)|0x0100 , g_i2c_client); ++ i2s_alc5630_write(0x5E, i2s_alc5630_read(0x5E, data,g_i2c_client)& 0xfeff ,g_i2c_client); ++ //End ADD by river 2011.03.23 for HP Out De-pop ++ ++ ++ //TEST by river 2011.03.22 ++ i2s_alc5630_write(0x1c, 0x0748, g_i2c_client); ++ //End TEST by river 2011.03.22 ++ ++ ++ //TEST by river 2011.03.16 ++ //i2s_alc5630_write(0x02, 0x8080, g_i2c_client); ++ i2s_alc5630_write(0x26, 0x000f, g_i2c_client); ++ //End TEST by river 2011.03.16 ++ ++ //ADD by river 2011.03.23 ++ if (alc5630->gpio2_value==0x0) ++ i2s_alc5630_write(0x3A, (i2s_alc5630_read(0x3A, data,g_i2c_client) & 0xFBFF)|0x0040 , g_i2c_client); ++ else ++ i2s_alc5630_write(0x3A, i2s_alc5630_read(0x3A, data,g_i2c_client)|0x0440 , g_i2c_client); ++ ++ ++ i2s_alc5630_write(0x5E, i2s_alc5630_read(0x5E, data,g_i2c_client)|0x0020 , g_i2c_client); ++ i2s_alc5630_write(0x5E, i2s_alc5630_read(0x5E, data,g_i2c_client)|0x00c0 , g_i2c_client); ++ i2s_alc5630_write(0x04, i2s_alc5630_read(0x04, data,g_i2c_client)& 0x7f7f , g_i2c_client); ++ mdelay(30); ++ ++ if (alc5630->gpio2_value==0x0) { ++ //printk(">>>>>>>>>>>> Turn off internal speaker.....\n"); ++ i2s_alc5630_write(0x02, 0x5F5F, g_i2c_client); ++ //i2s_alc5630_write(0x02, 0x0000, g_i2c_client); ++ } ++ else { ++ //printk(">>>>>>>>>>>> Turn on internal speaker.....\n"); ++ i2s_alc5630_write(0x02, 0x0000, g_i2c_client); ++ //i2s_alc5630_write(0x02, 0x5F5F, g_i2c_client); ++ } ++ //End ADD by river 2011.03.23 ++ ++ ++ //printk("ftssp010_config() for I2S mode (Playback) ===> Dump register.\n"); ++ //i2s_alc5630_read_test(g_i2c_client); ++ } ++ ++#if 0 ++ /* Stuff TX fifo */ ++ while(ftssp010_tx_fifo_not_full(cardno)) { ++ outl(0x0, FTSSP010_DATA(cardno)); ++ } ++#endif ++} ++ ++void ftssp010_config_tx(int cardno, unsigned is_stereo, unsigned speed, int width) ++{ ++ return ftssp010_config(cardno, is_stereo, speed, width, 0); ++} ++ ++void ftssp010_config_rx(int cardno, unsigned is_stereo, unsigned speed, int width) ++{ ++ return ftssp010_config(cardno, is_stereo, speed, width, 1); ++} ++ ++/* Configures FTSSP010 to start TX. If use_dma being nonzero, ++ * FTSSP010 will use hardware handshake for DMA */ ++void ftssp010_start_tx(int cardno, unsigned use_dma) ++{ ++ unsigned bogus=0x800*3; ++ if(use_dma) { ++ /* Enable H/W DMA Request and set TX DMA threshold to 12*/ ++// outl(0xC022, FTSSP010_INT_CONTROL(cardno)); ++ outl(inl(FTSSP010_INT_CONTROL(cardno)) | 0xc422, FTSSP010_INT_CONTROL(cardno)); ++#if 0 ++ printk("%s: enable DMA request\n", __func__); ++#endif ++ } ++// outl(0x3, FTSSP010_CONTROL2(cardno)); ++ outl(inl(FTSSP010_CONTROL2(cardno)) | 0x3, FTSSP010_CONTROL2(cardno)); ++// printk("%s\n",__func__); ++// printk("int_control 0x%x\n",inl(FTSSP010_INT_CONTROL(cardno))); ++// printk("control2 0x%x\n",inl(FTSSP010_CONTROL2(cardno))); ++ if(!use_dma) { ++ while(bogus>0) { ++ while(!ftssp010_tx_fifo_not_full(cardno)) ++ udelay(50); ++ outl(0, FTSSP010_DATA(cardno)); ++ bogus--; ++ } ++ } ++} ++ ++/* Configures FTSSP010 to start RX. If use_dma being nonzero, ++ * FTSSP010 will use hardware handshake for DMA */ ++void ftssp010_start_rx(int cardno, unsigned use_dma) ++{ ++ if(use_dma) { ++ /* Enable H/W DMA Request and set RX DMA threshold to 2*/ ++// outl(0x0111, FTSSP010_INT_CONTROL(cardno)); ++ outl(inl(FTSSP010_INT_CONTROL(cardno)) | 0xc411, FTSSP010_INT_CONTROL(cardno)); ++#if 0 ++ printk("%s: enable DMA request\n", __func__); ++#endif ++ } ++// outl(0x3, FTSSP010_CONTROL2(cardno)); ++ outl(inl(FTSSP010_CONTROL2(cardno)) | 0x3, FTSSP010_CONTROL2(cardno)); ++// printk("%s\n",__func__); ++// printk("int_control 0x%x\n",inl(FTSSP010_INT_CONTROL(cardno))); ++// printk("control2 0x%x\n",inl(FTSSP010_CONTROL2(cardno))); ++} ++ ++void ftssp010_stop_tx(int cardno) ++{ ++// outl(0, FTSSP010_CONTROL2(cardno)); ++ outl(inl(FTSSP010_INT_CONTROL(cardno)) & (~0x22), FTSSP010_INT_CONTROL(cardno)); ++// printk("%s\n",__func__); ++// printk("int_control 0x%x\n",inl(FTSSP010_INT_CONTROL(cardno))); ++// printk("control2 0x%x\n",inl(FTSSP010_CONTROL2(cardno))); ++} ++ ++void ftssp010_stop_rx(int cardno) ++{ ++ //outl(0, FTSSP010_CONTROL2(cardno)); ++ outl(inl(FTSSP010_INT_CONTROL(cardno)) & (~0x11), FTSSP010_INT_CONTROL(cardno)); ++ //printk("%s\n",__func__); ++ //printk("int_control 0x%x\n",inl(FTSSP010_INT_CONTROL(cardno))); ++ //printk("control2 0x%x\n",inl(FTSSP010_CONTROL2(cardno))); ++} ++ +diff -Nur linux-3.4.110.orig/sound/nds32/FTSSP010_UDA1345TS.h linux-3.4.110/sound/nds32/FTSSP010_UDA1345TS.h +--- linux-3.4.110.orig/sound/nds32/FTSSP010_UDA1345TS.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/sound/nds32/FTSSP010_UDA1345TS.h 2016-04-07 10:20:51.066085821 +0200 +@@ -0,0 +1,81 @@ ++/* FTSSP010 - UDA1345TS supporting library header */ ++/* ++ * ++ * $log$ ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* Programming sequence: ++ * Suppose your playback format is 44.1KHz, 16 bit stereo ++ * PIO mode: ++ * pmu_set_i2s_clocking(44100); ++ * ftssp010_config(1, 44100, 0); ++ * ftssp010_start_tx(0); ++ * while(ftssp010_tx_fifo_not_full()) { ++ * Poke_your_PCM_data_to_FTSSP_data_port ++ * ++ * DMA mode: ++ * pmu_set_i2s_clocking(44100); ++ * ftssp010_config(1, 44100); ++ * ++ * pmu_set_i2s_dma_channel(ch); ++ * ftssp010_start_tx(1); ++ * ++ * ftssp010_stop_tx(); ++ */ ++#define FTSSP010_DATA(x) (SSP_FTSSP010_va_base[(x)]+0x18) ++#define FTSSP010_DATA_PA(x) (SSP_FTSSP010_pa_base[(x)]+0x18) ++ ++/* Initialize FTSSP010 to output to UDA1345TS via I2S */ ++#define FTSSP010_CONTROL0(x) (SSP_FTSSP010_va_base[(x)]+0x0) ++#define FTSSP010_CONTROL0_OPM_STEREO 0xC ++#define FTSSP010_CONTROL0_OPM_MONO 0x8 ++ ++#define FTSSP010_CONTROL1(x) (SSP_FTSSP010_va_base[(x)]+0x4) ++#define FTSSP010_CONTROL2(x) (SSP_FTSSP010_va_base[(x)]+0x8) ++ ++#define FTSSP010_INT_CONTROL(x) (SSP_FTSSP010_va_base[(x)]+0x10) ++#define FTSSP010_STATUS(x) (SSP_FTSSP010_va_base[(x)]+0xC) ++#define FTSSP010_INT_STATUS(x) (SSP_FTSSP010_va_base[(x)]+0x14) ++#define FTSSP010_DATA(x) (SSP_FTSSP010_va_base[(x)]+0x18) ++#define FTSSP010_INFO(x) (SSP_FTSSP010_va_base[(x)]+0x1C) ++ ++static const unsigned int SSP_FTSSP010_va_base[SSP_FTSSP010_IRQ_COUNT] = { SSP_FTSSP010_VA_BASE }; ++static const unsigned int SSP_FTSSP010_pa_base[SSP_FTSSP010_IRQ_COUNT] = { SSP_FTSSP010_PA_BASE }; ++ ++/* Drive PMU to generate I2S main clocking signal. Also configures PMU to set correct DMA REQ/ACK pair */ ++extern void pmu_set_i2s_clocking(unsigned int speed); ++/* Programs PMU to set I2S/AC97 DMA Channel, ch=0-7 */ ++extern void pmu_set_i2s_dma_channel(unsigned ch); ++ ++/* Drive PMU to generate AC97 main clocking signal. Also configures PMU to set correct DMA REQ/ACK pair */ ++extern void pmu_set_ac97_clocking(unsigned int speed); ++ ++/* Returns FTSSP010 status */ ++extern void ftssp010_set_int_control(int cardno, unsigned val); ++extern int ftssp010_get_status(int cardno); ++extern unsigned ftssp010_get_int_status(int cardno); ++/* Polls FIFO full register */ ++extern int ftssp010_tx_fifo_not_full(int cardno); ++/* Configure FTSSP010 to a given sampling rate and channel number */ ++extern void ftssp010_config_tx(int cardno, unsigned is_stereo, unsigned speed, int use8bit); ++extern void ftssp010_config_rx(int cardno, unsigned is_stereo, unsigned speed, int use8bit); ++ ++/* Configure FTSSP010 to a given sampling rate and channel number */ ++extern void ftssp010_config_ac97_play(int cardno, unsigned is_stereo, unsigned speed, int use8bit); ++ ++extern void ftssp010_config_ac97_rec(int cardno, unsigned is_stereo, unsigned speed, int use8bit); ++ ++/* Initialize FTSSP010 to output to UDA1345TS via I2S */ ++extern void ftssp010_start_tx(int cardno, unsigned use_dma); ++extern void ftssp010_start_rx(int cardno, unsigned use_dma); ++extern void ftssp010_stop_tx(int cardno); ++extern void ftssp010_stop_rx(int cardno); ++ ++ +diff -Nur linux-3.4.110.orig/sound/nds32/FTSSP010_W83972D.h linux-3.4.110/sound/nds32/FTSSP010_W83972D.h +--- linux-3.4.110.orig/sound/nds32/FTSSP010_W83972D.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/sound/nds32/FTSSP010_W83972D.h 2016-04-07 10:20:51.066085821 +0200 +@@ -0,0 +1,17 @@ ++/* AC97 Codec related */ ++ ++ ++/* Register Index for Winbond W83972D AC97 Codec */ ++#define W83972D_RESET 0x0 ++#define W83972D_STEREO_OUTPUT_CONTROL 0x2 ++#define W83972D_MIC_VOLUME 0xE ++#define W83972D_LINE_IN_VOLUME 0x10 ++#define W83972D_AUX_INPUT_CONTROL 0x16 ++#define W83972D_PCM_OUTPUT_CONTROL 0x18 ++#define W83972D_RECORD_SELECT 0x1A ++#define W83972D_RECORD_GAIN 0x1C ++#define W83972D_RECORD_GAIN_MIC 0x1E ++#define W83972D_EXT_AUDIO_CONTROL 0x2A ++#define W83972D_DAC_SAMPLE_RATE_CONTROL 0x2C ++#define W83972D_VER1 0x7C ++#define W83972D_VER2 0x7E +diff -Nur linux-3.4.110.orig/sound/nds32/hda.h linux-3.4.110/sound/nds32/hda.h +--- linux-3.4.110.orig/sound/nds32/hda.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/sound/nds32/hda.h 2016-04-07 10:20:51.066085821 +0200 +@@ -0,0 +1,106 @@ ++// HDA Mode definition ++ #define HDA_MODE 0x80000000 ++ ++ // HDA CTRL definition ++ #define HDA_CRST_SET 0x00 ++ #define HDA_CRST_CLR 0x20 ++ #define HDA_CRST_MASK 0x20 ++ #define HDA_RST_FCNT_OFS 8 ++ ++ // HDA LNKST definition ++ #define HDA_COD_ALIVE 0x20000000 ++ ++ // HDA INTC definition ++ #define HDA_INTC_USOLEN 0x40000 ++ #define HDA_INTC_SDIWEN 0x20000 ++ #define HDA_INTC_CRINTE 0x10000 ++ ++ // HDA INTST definition ++ #define HDA_INTST_USOLEN 0x80 ++ #define HDA_INTST_SDIWEN 0x40 ++ #define HDA_INTST_CRINTE 0x20 ++ ++ // HDA ICMDST mask & offset ++ #define HDA_IRRADD_MASK 0xf0 ++ #define HDA_IRRUNSOL_MASK 0x08 ++ #define HDA_IRV_MASK 0x02 ++ #define HDA_ICB_MASK 0x01 ++ #define HDA_IRRADD_OFFSET 4 ++ #define HDA_IRRUNSOL_OFFSET 3 ++ #define HDA_IRV_OFFSET 1 ++ #define HDA_ICB_OFFSET 0 ++ //HDA OSDC/ISDC bit offset ++ #define HDA_SDC_STNUM_OFFSET 28 ++ #define HDA_SDC_SRUN_OFFSET 15 ++ #define HDA_SDC_BASE_OFFSET 14 ++ #define HDA_SDC_MULT_OFFSET 11 ++ #define HDA_SDC_DIV_OFFSET 8 ++ #define HDA_SDC_BITS_OFFSET 4 ++ #define HDA_SDC_CHAN_OFFSET 0 ++ ++ // HDA Codec CMD ++ #define HDA_COD_DEVADDR 0x00000000 ++ #define HDA_ROOT_NODE 0x00000000 ++ #define HDA_AUDIO_NODE 0x00100000 ++ #define HDA_OUTCOV_NODE 0x00200000 ++ //#define HDA_INCOV_NODE 0x0FF00000 ++ #define HDA_INCOV_NODE 0x00800000 //kane, for real CODEC node mapping ++ #define HDA_CMD_FGRST 0x0007FF00 ++ #define HDA_CMD_GETVID 0x000F0000 ++ #define HDA_CMD_SUBNCNT 0x000F0004 ++ #define HDA_CMD_FGTYPE 0x000F0005 ++ #define HDA_CMD_AUDIOWCAP 0x000F0009 ++ #define HDA_CMD_SETCVTSTR 0x00070600 ++ #define HDA_CMD_GETCVTSTR 0x000F0600 ++ #define HDA_CMD_SETCVTFMT 0x00020000 ++ #define HDA_CMD_GETCVTFMT 0x000A0000 ++ #define HDA_CMD_SETPROCST 0x00070300 ++ #define HDA_CMD_TRIGUNSOL 0x00070400 ++ //HDA Codec Resp ++ #define HDA_RESP_ZERO_VAL 0x00000000 ++ #define HDA_RESP_EXP_VID 0x10ec0888 ++ #define HDA_RSP_NODNUM_MSK 0xff0000 ++ #define HDA_RSP_NODCNT_MSK 0x0000ff ++ #define HDA_RSP_NODNUM_OFS 16 ++ #define HDA_RSP_NODCNT_OFS 0 ++ #define HDA_RSP_FGNTYPE_MSK 0xff ++ #define HDA_RSP_FGNUSCAP_MSK 0x100 ++ #define HDA_RSP_FGNTYPE_OFS 0 ++ #define HDA_RSP_FGNUSCAP_OFS 8 ++ #define HDA_RSP_AWCTYPE_MSK 0xf00000 ++ #define HDA_RSP_AWCUSCAP_MSK 0x000080 ++ #define HDA_RSP_AWCSTE_MSK 0x000001 ++ #define HDA_RSP_AWCTYPE_OFS 20 ++ #define HDA_RSP_AWCUSCAP_OFS 7 ++ #define HDA_RSP_AWCSTE_OFS 0 ++ #define HDA_RSP_CVTCHA_MSK 0x00 ++ #define HDA_RSP_CVTSTR_MSK 0xf0 ++ #define HDA_RSP_FMTTYPE_MSK 0x8000 ++ #define HDA_RSP_FMTBASE_MSK 0x4000 ++ #define HDA_RSP_FMTMULT_MSK 0x3800 ++ #define HDA_RSP_FMTDIV_MSK 0x0700 ++ #define HDA_RSP_FMTBITS_MSK 0x0070 ++ #define HDA_RSP_FMTCHNUM_MSK 0x000f ++ //HDA Func Group definition ++ #define HDA_FG_AUDIO 0x1 ++ // HDA Audio Widiget definition ++ #define HDA_AWC_INPUT 0x1 ++ #define HDA_AWC_OUTPUT 0x0 ++ #define HDA_AWC_MONO 0x0 ++ #define HDA_AWC_STEREO 0x1 ++ // HDA Converter Format definition ++ #define HDA_FMT_TYPE_OFS 15 ++ #define HDA_FMT_BASE_OFS 14 ++ #define HDA_FMT_MULT_OFS 11 ++ #define HDA_FMT_DIV_OFS 8 ++ #define HDA_FMT_BITS_OFS 4 ++ #define HDA_FMT_CHNUM_OFS 0 ++ // HDA Coverter Stream definition ++ #define HDA_STR_STR_OFS 4 ++ #define HDA_STR_CHA_OFS 0 ++ // HDA IOSDC definition ++ #define HDA_IN_STR 0 ++ #define HDA_OUT_STR 1 ++ #define HDA_STR_STOP 0 ++ #define HDA_STR_RUN 1 ++ +diff -Nur linux-3.4.110.orig/sound/nds32/Kconfig linux-3.4.110/sound/nds32/Kconfig +--- linux-3.4.110.orig/sound/nds32/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/sound/nds32/Kconfig 2016-04-07 10:20:51.066085821 +0200 +@@ -0,0 +1,22 @@ ++menu "ALSA NDS32 devices" ++ depends on SND!=n && NDS32 ++ ++config SND_FTSSP010 ++ tristate "Faraday FTSSP010 audio Driver" ++ depends on SND && NDS32 ++ select SND_PCM ++# select SND_AC97_CODEC ++ ++choice ++ prompt "AC97/I2S/HDA selection" ++ depends on SND_FTSSP010 ++ default SND_FTSSP010_AC97 ++config SND_FTSSP010_AC97 ++ bool "AC97" ++config SND_FTSSP010_I2S ++ bool "I2S" ++config SND_FTSSP010_HDA ++ bool "HDA" ++endchoice ++endmenu ++ +diff -Nur linux-3.4.110.orig/sound/nds32/Makefile linux-3.4.110/sound/nds32/Makefile +--- linux-3.4.110.orig/sound/nds32/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/sound/nds32/Makefile 2016-04-07 10:20:51.066085821 +0200 +@@ -0,0 +1,10 @@ ++ifeq ($(CONFIG_SND_FTSSP010_AC97),y) ++snd-ftssp010-objs := FTSSP010_ALSA.o FTSSP010_lib.o ++endif ++ifeq ($(CONFIG_SND_FTSSP010_I2S),y) ++snd-ftssp010-objs := FTSSP010_ALSA.o FTSSP010_lib.o ++endif ++ifeq ($(CONFIG_SND_FTSSP010_HDA),y) ++snd-ftssp010-objs := FTSSP010_HDA.o FTSSP010_HDA_lib.o ++endif ++obj-$(CONFIG_SND_FTSSP010) += snd-ftssp010.o diff --git a/target/linux/patches/3.4.113/non-static.patch b/target/linux/patches/3.4.113/non-static.patch new file mode 100644 index 000000000..a967703d0 --- /dev/null +++ b/target/linux/patches/3.4.113/non-static.patch @@ -0,0 +1,33 @@ +diff -Nur linux-2.6.39-rc6.orig/fs/namei.c linux-2.6.39-rc6/fs/namei.c +--- linux-2.6.39-rc6.orig/fs/namei.c 2011-05-04 04:59:13.000000000 +0200 ++++ linux-2.6.39-rc6/fs/namei.c 2011-05-05 11:30:14.000000000 +0200 +@@ -1769,7 +1769,7 @@ + * needs parent already locked. Doesn't follow mounts. + * SMP-safe. + */ +-static struct dentry *lookup_hash(struct nameidata *nd) ++struct dentry *lookup_hash(struct nameidata *nd) + { + return __lookup_hash(&nd->last, nd->path.dentry, nd); + } +diff -Nur linux-2.6.39-rc6.orig/fs/splice.c linux-2.6.39-rc6/fs/splice.c +--- linux-2.6.39-rc6.orig/fs/splice.c 2011-05-04 04:59:13.000000000 +0200 ++++ linux-2.6.39-rc6/fs/splice.c 2011-05-05 11:31:04.000000000 +0200 +@@ -1081,7 +1081,7 @@ + /* + * Attempt to initiate a splice from pipe to file. + */ +-static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, ++long do_splice_from(struct pipe_inode_info *pipe, struct file *out, + loff_t *ppos, size_t len, unsigned int flags) + { + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, +@@ -1109,7 +1109,7 @@ + /* + * Attempt to initiate a splice from a file to a pipe. + */ +-static long do_splice_to(struct file *in, loff_t *ppos, ++long do_splice_to(struct file *in, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags) + { diff --git a/target/linux/patches/3.4.113/relocs.patch b/target/linux/patches/3.4.113/relocs.patch new file mode 100644 index 000000000..43c5bb580 --- /dev/null +++ b/target/linux/patches/3.4.113/relocs.patch @@ -0,0 +1,3131 @@ +diff -Nur linux-3.4.4.orig/arch/x86/tools/relocs.c linux-3.4.4/arch/x86/tools/relocs.c +--- linux-3.4.4.orig/arch/x86/tools/relocs.c 2012-06-22 20:37:50.000000000 +0200 ++++ linux-3.4.4/arch/x86/tools/relocs.c 2012-07-03 09:19:18.000000000 +0200 +@@ -5,12 +5,3123 @@ + #include + #include + #include +-#include +-#include ++//#include ++//#include + #define USE_BSD +-#include ++//#include + #include +-#include ++//#include ++ ++static inline void __put_unaligned_le16(int val, int *p) ++{ ++ *p++ = val; ++ *p++ = val >> 8; ++} ++ ++static inline void __put_unaligned_le32(int val, int *p) ++{ ++ __put_unaligned_le16(val >> 16, p + 2); ++ __put_unaligned_le16(val, p); ++} ++ ++static inline void __put_unaligned_le64(int val, int *p) ++{ ++ __put_unaligned_le32(val >> 32, p + 4); ++ __put_unaligned_le32(val, p); ++} ++ ++static inline void put_unaligned_le16(int val, void *p) ++{ ++ __put_unaligned_le16(val, p); ++} ++ ++static inline void put_unaligned_le32(int val, void *p) ++{ ++ __put_unaligned_le32(val, p); ++} ++ ++static inline void put_unaligned_le64(int val, void *p) ++{ ++ __put_unaligned_le64(val, p); ++} ++ ++/* Type for a 16-bit quantity. */ ++typedef uint16_t Elf32_Half; ++typedef uint16_t Elf64_Half; ++ ++/* Types for signed and unsigned 32-bit quantities. */ ++typedef uint32_t Elf32_Word; ++typedef int32_t Elf32_Sword; ++typedef uint32_t Elf64_Word; ++typedef int32_t Elf64_Sword; ++ ++/* Types for signed and unsigned 64-bit quantities. */ ++typedef uint64_t Elf32_Xword; ++typedef int64_t Elf32_Sxword; ++typedef uint64_t Elf64_Xword; ++typedef int64_t Elf64_Sxword; ++ ++/* Type of addresses. */ ++typedef uint32_t Elf32_Addr; ++typedef uint64_t Elf64_Addr; ++ ++/* Type of file offsets. */ ++typedef uint32_t Elf32_Off; ++typedef uint64_t Elf64_Off; ++ ++/* Type for section indices, which are 16-bit quantities. */ ++typedef uint16_t Elf32_Section; ++typedef uint16_t Elf64_Section; ++ ++/* Type for version symbol information. */ ++typedef Elf32_Half Elf32_Versym; ++typedef Elf64_Half Elf64_Versym; ++ ++ ++/* The ELF file header. This appears at the start of every ELF file. */ ++ ++#define EI_NIDENT (16) ++ ++typedef struct ++{ ++ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */ ++ Elf32_Half e_type; /* Object file type */ ++ Elf32_Half e_machine; /* Architecture */ ++ Elf32_Word e_version; /* Object file version */ ++ Elf32_Addr e_entry; /* Entry point virtual address */ ++ Elf32_Off e_phoff; /* Program header table file offset */ ++ Elf32_Off e_shoff; /* Section header table file offset */ ++ Elf32_Word e_flags; /* Processor-specific flags */ ++ Elf32_Half e_ehsize; /* ELF header size in bytes */ ++ Elf32_Half e_phentsize; /* Program header table entry size */ ++ Elf32_Half e_phnum; /* Program header table entry count */ ++ Elf32_Half e_shentsize; /* Section header table entry size */ ++ Elf32_Half e_shnum; /* Section header table entry count */ ++ Elf32_Half e_shstrndx; /* Section header string table index */ ++} Elf32_Ehdr; ++ ++typedef struct ++{ ++ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */ ++ Elf64_Half e_type; /* Object file type */ ++ Elf64_Half e_machine; /* Architecture */ ++ Elf64_Word e_version; /* Object file version */ ++ Elf64_Addr e_entry; /* Entry point virtual address */ ++ Elf64_Off e_phoff; /* Program header table file offset */ ++ Elf64_Off e_shoff; /* Section header table file offset */ ++ Elf64_Word e_flags; /* Processor-specific flags */ ++ Elf64_Half e_ehsize; /* ELF header size in bytes */ ++ Elf64_Half e_phentsize; /* Program header table entry size */ ++ Elf64_Half e_phnum; /* Program header table entry count */ ++ Elf64_Half e_shentsize; /* Section header table entry size */ ++ Elf64_Half e_shnum; /* Section header table entry count */ ++ Elf64_Half e_shstrndx; /* Section header string table index */ ++} Elf64_Ehdr; ++ ++/* Fields in the e_ident array. The EI_* macros are indices into the ++ array. The macros under each EI_* macro are the values the byte ++ may have. */ ++ ++#define EI_MAG0 0 /* File identification byte 0 index */ ++#define ELFMAG0 0x7f /* Magic number byte 0 */ ++ ++#define EI_MAG1 1 /* File identification byte 1 index */ ++#define ELFMAG1 'E' /* Magic number byte 1 */ ++ ++#define EI_MAG2 2 /* File identification byte 2 index */ ++#define ELFMAG2 'L' /* Magic number byte 2 */ ++ ++#define EI_MAG3 3 /* File identification byte 3 index */ ++#define ELFMAG3 'F' /* Magic number byte 3 */ ++ ++/* Conglomeration of the identification bytes, for easy testing as a word. */ ++#define ELFMAG "\177ELF" ++#define SELFMAG 4 ++#if __BYTE_ORDER == __LITTLE_ENDIAN ++# define ELFMAG_U32 ((uint32_t)(ELFMAG0 + 0x100 * (ELFMAG1 + (0x100 * (ELFMAG2 + 0x100 * ELFMAG3))))) ++#elif __BYTE_ORDER == __BIG_ENDIAN ++# define ELFMAG_U32 ((uint32_t)((((ELFMAG0 * 0x100) + ELFMAG1) * 0x100 + ELFMAG2) * 0x100 + ELFMAG3)) ++#endif ++ ++#define EI_CLASS 4 /* File class byte index */ ++#define ELFCLASSNONE 0 /* Invalid class */ ++#define ELFCLASS32 1 /* 32-bit objects */ ++#define ELFCLASS64 2 /* 64-bit objects */ ++#define ELFCLASSNUM 3 ++ ++#define EI_DATA 5 /* Data encoding byte index */ ++#define ELFDATANONE 0 /* Invalid data encoding */ ++#define ELFDATA2LSB 1 /* 2's complement, little endian */ ++#define ELFDATA2MSB 2 /* 2's complement, big endian */ ++#define ELFDATANUM 3 ++ ++#define EI_VERSION 6 /* File version byte index */ ++ /* Value must be EV_CURRENT */ ++ ++#define EI_OSABI 7 /* OS ABI identification */ ++#define ELFOSABI_NONE 0 /* UNIX System V ABI */ ++#define ELFOSABI_SYSV 0 /* Alias. */ ++#define ELFOSABI_HPUX 1 /* HP-UX */ ++#define ELFOSABI_NETBSD 2 /* NetBSD. */ ++#define ELFOSABI_LINUX 3 /* Linux. */ ++#define ELFOSABI_HURD 4 /* GNU/Hurd */ ++#define ELFOSABI_SOLARIS 6 /* Sun Solaris. */ ++#define ELFOSABI_AIX 7 /* IBM AIX. */ ++#define ELFOSABI_IRIX 8 /* SGI Irix. */ ++#define ELFOSABI_FREEBSD 9 /* FreeBSD. */ ++#define ELFOSABI_TRU64 10 /* Compaq TRU64 UNIX. */ ++#define ELFOSABI_MODESTO 11 /* Novell Modesto. */ ++#define ELFOSABI_OPENBSD 12 /* OpenBSD. */ ++#define ELFOSABI_OPENVMS 13 /* OpenVMS */ ++#define ELFOSABI_NSK 14 /* Hewlett-Packard Non-Stop Kernel */ ++#define ELFOSABI_AROS 15 /* Amiga Research OS */ ++#define ELFOSABI_ARM 97 /* ARM */ ++#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */ ++ ++#define EI_ABIVERSION 8 /* ABI version */ ++ ++#define EI_PAD 9 /* Byte index of padding bytes */ ++ ++/* Legal values for e_type (object file type). */ ++ ++#define ET_NONE 0 /* No file type */ ++#define ET_REL 1 /* Relocatable file */ ++#define ET_EXEC 2 /* Executable file */ ++#define ET_DYN 3 /* Shared object file */ ++#define ET_CORE 4 /* Core file */ ++#define ET_NUM 5 /* Number of defined types */ ++#define ET_LOOS 0xfe00 /* OS-specific range start */ ++#define ET_HIOS 0xfeff /* OS-specific range end */ ++#define ET_LOPROC 0xff00 /* Processor-specific range start */ ++#define ET_HIPROC 0xffff /* Processor-specific range end */ ++ ++/* Legal values for e_machine (architecture). */ ++ ++#define EM_NONE 0 /* No machine */ ++#define EM_M32 1 /* AT&T WE 32100 */ ++#define EM_SPARC 2 /* SUN SPARC */ ++#define EM_386 3 /* Intel 80386 */ ++#define EM_68K 4 /* Motorola m68k family */ ++#define EM_88K 5 /* Motorola m88k family */ ++#define EM_486 6 /* Intel 80486 *//* Reserved for future use */ ++#define EM_860 7 /* Intel 80860 */ ++#define EM_MIPS 8 /* MIPS R3000 big-endian */ ++#define EM_S370 9 /* IBM System/370 */ ++#define EM_MIPS_RS3_LE 10 /* MIPS R3000 little-endian */ ++ ++#define EM_PARISC 15 /* HPPA */ ++#define EM_VPP500 17 /* Fujitsu VPP500 */ ++#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */ ++#define EM_960 19 /* Intel 80960 */ ++#define EM_PPC 20 /* PowerPC */ ++#define EM_PPC64 21 /* PowerPC 64-bit */ ++#define EM_S390 22 /* IBM S390 */ ++ ++#define EM_V800 36 /* NEC V800 series */ ++#define EM_FR20 37 /* Fujitsu FR20 */ ++#define EM_RH32 38 /* TRW RH-32 */ ++#define EM_MCORE 39 /* Motorola M*Core */ /* May also be taken by Fujitsu MMA */ ++#define EM_RCE 39 /* Old name for MCore */ ++#define EM_ARM 40 /* ARM */ ++#define EM_FAKE_ALPHA 41 /* Digital Alpha */ ++#define EM_SH 42 /* Renesas SH */ ++#define EM_SPARCV9 43 /* SPARC v9 64-bit */ ++#define EM_TRICORE 44 /* Siemens Tricore */ ++#define EM_ARC 45 /* Argonaut RISC Core */ ++#define EM_H8_300 46 /* Renesas H8/300 */ ++#define EM_H8_300H 47 /* Renesas H8/300H */ ++#define EM_H8S 48 /* Renesas H8S */ ++#define EM_H8_500 49 /* Renesas H8/500 */ ++#define EM_IA_64 50 /* Intel Merced */ ++#define EM_MIPS_X 51 /* Stanford MIPS-X */ ++#define EM_COLDFIRE 52 /* Motorola Coldfire */ ++#define EM_68HC12 53 /* Motorola M68HC12 */ ++#define EM_MMA 54 /* Fujitsu MMA Multimedia Accelerator*/ ++#define EM_PCP 55 /* Siemens PCP */ ++#define EM_NCPU 56 /* Sony nCPU embeeded RISC */ ++#define EM_NDR1 57 /* Denso NDR1 microprocessor */ ++#define EM_STARCORE 58 /* Motorola Start*Core processor */ ++#define EM_ME16 59 /* Toyota ME16 processor */ ++#define EM_ST100 60 /* STMicroelectronic ST100 processor */ ++#define EM_TINYJ 61 /* Advanced Logic Corp. Tinyj emb.fam*/ ++#define EM_X86_64 62 /* AMD x86-64 architecture */ ++#define EM_PDSP 63 /* Sony DSP Processor */ ++ ++#define EM_FX66 66 /* Siemens FX66 microcontroller */ ++#define EM_ST9PLUS 67 /* STMicroelectronics ST9+ 8/16 mc */ ++#define EM_ST7 68 /* STmicroelectronics ST7 8 bit mc */ ++#define EM_68HC16 69 /* Motorola MC68HC16 microcontroller */ ++#define EM_68HC11 70 /* Motorola MC68HC11 microcontroller */ ++#define EM_68HC08 71 /* Motorola MC68HC08 microcontroller */ ++#define EM_68HC05 72 /* Motorola MC68HC05 microcontroller */ ++#define EM_SVX 73 /* Silicon Graphics SVx */ ++#define EM_ST19 74 /* STMicroelectronics ST19 8 bit mc */ ++#define EM_VAX 75 /* Digital VAX */ ++#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ ++#define EM_JAVELIN 77 /* Infineon Technologies 32-bit embedded processor */ ++#define EM_FIREPATH 78 /* Element 14 64-bit DSP Processor */ ++#define EM_ZSP 79 /* LSI Logic 16-bit DSP Processor */ ++#define EM_MMIX 80 /* Donald Knuth's educational 64-bit processor */ ++#define EM_HUANY 81 /* Harvard University machine-independent object files */ ++#define EM_PRISM 82 /* SiTera Prism */ ++#define EM_AVR 83 /* Atmel AVR 8-bit microcontroller */ ++#define EM_FR30 84 /* Fujitsu FR30 */ ++#define EM_D10V 85 /* Mitsubishi D10V */ ++#define EM_D30V 86 /* Mitsubishi D30V */ ++#define EM_V850 87 /* NEC v850 */ ++#define EM_M32R 88 /* Renesas M32R */ ++#define EM_MN10300 89 /* Matsushita MN10300 */ ++#define EM_MN10200 90 /* Matsushita MN10200 */ ++#define EM_PJ 91 /* picoJava */ ++#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */ ++#define EM_ARC_A5 93 /* ARC Cores Tangent-A5 */ ++#define EM_XTENSA 94 /* Tensilica Xtensa Architecture */ ++#define EM_IP2K 101 /* Ubicom IP2022 micro controller */ ++#define EM_CR 103 /* National Semiconductor CompactRISC */ ++#define EM_MSP430 105 /* TI msp430 micro controller */ ++#define EM_BLACKFIN 106 /* Analog Devices Blackfin */ ++#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */ ++#define EM_CRX 114 /* National Semiconductor CRX */ ++#define EM_NUM 95 ++#define EM_TI_C6000 140 ++ ++/* If it is necessary to assign new unofficial EM_* values, please pick large ++ random numbers (0x8523, 0xa7f2, etc.) to minimize the chances of collision ++ with official or non-GNU unofficial values. ++ ++ NOTE: Do not just increment the most recent number by one. ++ Somebody else somewhere will do exactly the same thing, and you ++ will have a collision. Instead, pick a random number. ++ ++ Normally, each entity or maintainer responsible for a machine with an ++ unofficial e_machine number should eventually ask registry@caldera.com for ++ an officially blessed number to be added to the list above. */ ++ ++/* picoJava */ ++#define EM_PJ_OLD 99 ++ ++/* Cygnus PowerPC ELF backend. Written in the absence of an ABI. */ ++#define EM_CYGNUS_POWERPC 0x9025 ++ ++/* Old version of Sparc v9, from before the ABI; this should be ++ removed shortly. */ ++#define EM_OLD_SPARCV9 11 ++ ++/* Old version of PowerPC, this should be removed shortly. */ ++#define EM_PPC_OLD 17 ++ ++/* (Deprecated) Temporary number for the OpenRISC processor. */ ++#define EM_OR32 0x8472 ++ ++/* Renesas M32C and M16C. */ ++#define EM_M32C 0xFEB0 ++ ++/* Cygnus M32R ELF backend. Written in the absence of an ABI. */ ++#define EM_CYGNUS_M32R 0x9041 ++ ++/* old S/390 backend magic number. Written in the absence of an ABI. */ ++#define EM_S390_OLD 0xa390 ++ ++/* D10V backend magic number. Written in the absence of an ABI. */ ++#define EM_CYGNUS_D10V 0x7650 ++ ++/* D30V backend magic number. Written in the absence of an ABI. */ ++#define EM_CYGNUS_D30V 0x7676 ++ ++/* V850 backend magic number. Written in the absense of an ABI. */ ++#define EM_CYGNUS_V850 0x9080 ++ ++/* mn10200 and mn10300 backend magic numbers. ++ Written in the absense of an ABI. */ ++#define EM_CYGNUS_MN10200 0xdead ++#define EM_CYGNUS_MN10300 0xbeef ++ ++/* FR30 magic number - no EABI available. */ ++#define EM_CYGNUS_FR30 0x3330 ++ ++/* AVR magic number ++ Written in the absense of an ABI. */ ++#define EM_AVR_OLD 0x1057 ++ ++/* OpenRISC magic number ++ Written in the absense of an ABI. */ ++#define EM_OPENRISC_OLD 0x3426 ++ ++/* DLX magic number ++ Written in the absense of an ABI. */ ++#define EM_DLX 0x5aa5 ++ ++#define EM_XSTORMY16 0xad45 ++ ++/* FRV magic number - no EABI available??. */ ++#define EM_CYGNUS_FRV 0x5441 ++ ++/* Ubicom IP2xxx; no ABI */ ++#define EM_IP2K_OLD 0x8217 ++ ++#define EM_MT 0x2530 /* Morpho MT; no ABI */ ++ ++/* MSP430 magic number ++ Written in the absense everything. */ ++#define EM_MSP430_OLD 0x1059 ++ ++/* Vitesse IQ2000. */ ++#define EM_IQ2000 0xFEBA ++ ++/* Old, unofficial value for Xtensa. */ ++#define EM_XTENSA_OLD 0xabc7 ++ ++/* Alpha backend magic number. Written in the absence of an ABI. */ ++#define EM_ALPHA 0x9026 ++ ++/* NIOS magic number - no EABI available. */ ++#define EM_NIOS32 0xFEBB ++ ++/* AVR32 magic number from ATMEL */ ++#define EM_AVR32 0x18ad ++ ++/* V850 backend magic number. Written in the absense of an ABI. */ ++#define EM_CYGNUS_V850 0x9080 ++ ++/* Legal values for e_version (version). */ ++ ++#define EV_NONE 0 /* Invalid ELF version */ ++#define EV_CURRENT 1 /* Current version */ ++#define EV_NUM 2 ++ ++/* Section header. */ ++ ++typedef struct ++{ ++ Elf32_Word sh_name; /* Section name (string tbl index) */ ++ Elf32_Word sh_type; /* Section type */ ++ Elf32_Word sh_flags; /* Section flags */ ++ Elf32_Addr sh_addr; /* Section virtual addr at execution */ ++ Elf32_Off sh_offset; /* Section file offset */ ++ Elf32_Word sh_size; /* Section size in bytes */ ++ Elf32_Word sh_link; /* Link to another section */ ++ Elf32_Word sh_info; /* Additional section information */ ++ Elf32_Word sh_addralign; /* Section alignment */ ++ Elf32_Word sh_entsize; /* Entry size if section holds table */ ++} Elf32_Shdr; ++ ++typedef struct ++{ ++ Elf64_Word sh_name; /* Section name (string tbl index) */ ++ Elf64_Word sh_type; /* Section type */ ++ Elf64_Xword sh_flags; /* Section flags */ ++ Elf64_Addr sh_addr; /* Section virtual addr at execution */ ++ Elf64_Off sh_offset; /* Section file offset */ ++ Elf64_Xword sh_size; /* Section size in bytes */ ++ Elf64_Word sh_link; /* Link to another section */ ++ Elf64_Word sh_info; /* Additional section information */ ++ Elf64_Xword sh_addralign; /* Section alignment */ ++ Elf64_Xword sh_entsize; /* Entry size if section holds table */ ++} Elf64_Shdr; ++ ++/* Special section indices. */ ++ ++#define SHN_UNDEF 0 /* Undefined section */ ++#define SHN_LORESERVE 0xff00 /* Start of reserved indices */ ++#define SHN_LOPROC 0xff00 /* Start of processor-specific */ ++#define SHN_BEFORE 0xff00 /* Order section before all others ++ (Solaris). */ ++#define SHN_AFTER 0xff01 /* Order section after all others ++ (Solaris). */ ++#define SHN_HIPROC 0xff1f /* End of processor-specific */ ++#define SHN_LOOS 0xff20 /* Start of OS-specific */ ++#define SHN_HIOS 0xff3f /* End of OS-specific */ ++#define SHN_ABS 0xfff1 /* Associated symbol is absolute */ ++#define SHN_COMMON 0xfff2 /* Associated symbol is common */ ++#define SHN_XINDEX 0xffff /* Index is in extra table. */ ++#define SHN_HIRESERVE 0xffff /* End of reserved indices */ ++ ++/* Legal values for sh_type (section type). */ ++ ++#define SHT_NULL 0 /* Section header table entry unused */ ++#define SHT_PROGBITS 1 /* Program data */ ++#define SHT_SYMTAB 2 /* Symbol table */ ++#define SHT_STRTAB 3 /* String table */ ++#define SHT_RELA 4 /* Relocation entries with addends */ ++#define SHT_HASH 5 /* Symbol hash table */ ++#define SHT_DYNAMIC 6 /* Dynamic linking information */ ++#define SHT_NOTE 7 /* Notes */ ++#define SHT_NOBITS 8 /* Program space with no data (bss) */ ++#define SHT_REL 9 /* Relocation entries, no addends */ ++#define SHT_SHLIB 10 /* Reserved */ ++#define SHT_DYNSYM 11 /* Dynamic linker symbol table */ ++#define SHT_INIT_ARRAY 14 /* Array of constructors */ ++#define SHT_FINI_ARRAY 15 /* Array of destructors */ ++#define SHT_PREINIT_ARRAY 16 /* Array of pre-constructors */ ++#define SHT_GROUP 17 /* Section group */ ++#define SHT_SYMTAB_SHNDX 18 /* Extended section indeces */ ++#define SHT_NUM 19 /* Number of defined types. */ ++#define SHT_LOOS 0x60000000 /* Start OS-specific */ ++#define SHT_GNU_HASH 0x6ffffff6 /* GNU-style hash table. */ ++#define SHT_GNU_LIBLIST 0x6ffffff7 /* Prelink library list */ ++#define SHT_CHECKSUM 0x6ffffff8 /* Checksum for DSO content. */ ++#define SHT_LOSUNW 0x6ffffffa /* Sun-specific low bound. */ ++#define SHT_SUNW_move 0x6ffffffa ++#define SHT_SUNW_COMDAT 0x6ffffffb ++#define SHT_SUNW_syminfo 0x6ffffffc ++#define SHT_GNU_verdef 0x6ffffffd /* Version definition section. */ ++#define SHT_GNU_verneed 0x6ffffffe /* Version needs section. */ ++#define SHT_GNU_versym 0x6fffffff /* Version symbol table. */ ++#define SHT_HISUNW 0x6fffffff /* Sun-specific high bound. */ ++#define SHT_HIOS 0x6fffffff /* End OS-specific type */ ++#define SHT_LOPROC 0x70000000 /* Start of processor-specific */ ++#define SHT_HIPROC 0x7fffffff /* End of processor-specific */ ++#define SHT_LOUSER 0x80000000 /* Start of application-specific */ ++#define SHT_HIUSER 0x8fffffff /* End of application-specific */ ++ ++/* Legal values for sh_flags (section flags). */ ++ ++#define SHF_WRITE (1 << 0) /* Writable */ ++#define SHF_ALLOC (1 << 1) /* Occupies memory during execution */ ++#define SHF_EXECINSTR (1 << 2) /* Executable */ ++#define SHF_MERGE (1 << 4) /* Might be merged */ ++#define SHF_STRINGS (1 << 5) /* Contains nul-terminated strings */ ++#define SHF_INFO_LINK (1 << 6) /* `sh_info' contains SHT index */ ++#define SHF_LINK_ORDER (1 << 7) /* Preserve order after combining */ ++#define SHF_OS_NONCONFORMING (1 << 8) /* Non-standard OS specific handling ++ required */ ++#define SHF_GROUP (1 << 9) /* Section is member of a group. */ ++#define SHF_TLS (1 << 10) /* Section hold thread-local data. */ ++#define SHF_MASKOS 0x0ff00000 /* OS-specific. */ ++#define SHF_MASKPROC 0xf0000000 /* Processor-specific */ ++#define SHF_ORDERED (1 << 30) /* Special ordering requirement ++ (Solaris). */ ++#define SHF_EXCLUDE (1 << 31) /* Section is excluded unless ++ referenced or allocated (Solaris).*/ ++ ++/* Section group handling. */ ++#define GRP_COMDAT 0x1 /* Mark group as COMDAT. */ ++ ++/* Symbol table entry. */ ++ ++typedef struct ++{ ++ Elf32_Word st_name; /* Symbol name (string tbl index) */ ++ Elf32_Addr st_value; /* Symbol value */ ++ Elf32_Word st_size; /* Symbol size */ ++ unsigned char st_info; /* Symbol type and binding */ ++ unsigned char st_other; /* Symbol visibility */ ++ Elf32_Section st_shndx; /* Section index */ ++} Elf32_Sym; ++ ++typedef struct ++{ ++ Elf64_Word st_name; /* Symbol name (string tbl index) */ ++ unsigned char st_info; /* Symbol type and binding */ ++ unsigned char st_other; /* Symbol visibility */ ++ Elf64_Section st_shndx; /* Section index */ ++ Elf64_Addr st_value; /* Symbol value */ ++ Elf64_Xword st_size; /* Symbol size */ ++} Elf64_Sym; ++ ++/* The syminfo section if available contains additional information about ++ every dynamic symbol. */ ++ ++typedef struct ++{ ++ Elf32_Half si_boundto; /* Direct bindings, symbol bound to */ ++ Elf32_Half si_flags; /* Per symbol flags */ ++} Elf32_Syminfo; ++ ++typedef struct ++{ ++ Elf64_Half si_boundto; /* Direct bindings, symbol bound to */ ++ Elf64_Half si_flags; /* Per symbol flags */ ++} Elf64_Syminfo; ++ ++/* Possible values for si_boundto. */ ++#define SYMINFO_BT_SELF 0xffff /* Symbol bound to self */ ++#define SYMINFO_BT_PARENT 0xfffe /* Symbol bound to parent */ ++#define SYMINFO_BT_LOWRESERVE 0xff00 /* Beginning of reserved entries */ ++ ++/* Possible bitmasks for si_flags. */ ++#define SYMINFO_FLG_DIRECT 0x0001 /* Direct bound symbol */ ++#define SYMINFO_FLG_PASSTHRU 0x0002 /* Pass-thru symbol for translator */ ++#define SYMINFO_FLG_COPY 0x0004 /* Symbol is a copy-reloc */ ++#define SYMINFO_FLG_LAZYLOAD 0x0008 /* Symbol bound to object to be lazy ++ loaded */ ++/* Syminfo version values. */ ++#define SYMINFO_NONE 0 ++#define SYMINFO_CURRENT 1 ++#define SYMINFO_NUM 2 ++ ++ ++/* How to extract and insert information held in the st_info field. */ ++ ++#define ELF32_ST_BIND(val) (((unsigned char) (val)) >> 4) ++#define ELF32_ST_TYPE(val) ((val) & 0xf) ++#define ELF32_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf)) ++ ++/* Both Elf32_Sym and Elf64_Sym use the same one-byte st_info field. */ ++#define ELF64_ST_BIND(val) ELF32_ST_BIND (val) ++#define ELF64_ST_TYPE(val) ELF32_ST_TYPE (val) ++#define ELF64_ST_INFO(bind, type) ELF32_ST_INFO ((bind), (type)) ++ ++/* Legal values for ST_BIND subfield of st_info (symbol binding). */ ++ ++#define STB_LOCAL 0 /* Local symbol */ ++#define STB_GLOBAL 1 /* Global symbol */ ++#define STB_WEAK 2 /* Weak symbol */ ++#define STB_NUM 3 /* Number of defined types. */ ++#define STB_LOOS 10 /* Start of OS-specific */ ++#define STB_HIOS 12 /* End of OS-specific */ ++#define STB_LOPROC 13 /* Start of processor-specific */ ++#define STB_HIPROC 15 /* End of processor-specific */ ++ ++/* Legal values for ST_TYPE subfield of st_info (symbol type). */ ++ ++#define STT_NOTYPE 0 /* Symbol type is unspecified */ ++#define STT_OBJECT 1 /* Symbol is a data object */ ++#define STT_FUNC 2 /* Symbol is a code object */ ++#define STT_SECTION 3 /* Symbol associated with a section */ ++#define STT_FILE 4 /* Symbol's name is file name */ ++#define STT_COMMON 5 /* Symbol is a common data object */ ++#define STT_TLS 6 /* Symbol is thread-local data object*/ ++#define STT_NUM 7 /* Number of defined types. */ ++#define STT_LOOS 10 /* Start of OS-specific */ ++#define STT_HIOS 12 /* End of OS-specific */ ++#define STT_LOPROC 13 /* Start of processor-specific */ ++#define STT_HIPROC 15 /* End of processor-specific */ ++ ++ ++/* Symbol table indices are found in the hash buckets and chain table ++ of a symbol hash table section. This special index value indicates ++ the end of a chain, meaning no further symbols are found in that bucket. */ ++ ++#define STN_UNDEF 0 /* End of a chain. */ ++ ++ ++/* How to extract and insert information held in the st_other field. */ ++ ++#define ELF32_ST_VISIBILITY(o) ((o) & 0x03) ++ ++/* For ELF64 the definitions are the same. */ ++#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o) ++ ++/* Symbol visibility specification encoded in the st_other field. */ ++#define STV_DEFAULT 0 /* Default symbol visibility rules */ ++#define STV_INTERNAL 1 /* Processor specific hidden class */ ++#define STV_HIDDEN 2 /* Sym unavailable in other modules */ ++#define STV_PROTECTED 3 /* Not preemptible, not exported */ ++ ++ ++/* Relocation table entry without addend (in section of type SHT_REL). */ ++ ++typedef struct ++{ ++ Elf32_Addr r_offset; /* Address */ ++ Elf32_Word r_info; /* Relocation type and symbol index */ ++} Elf32_Rel; ++ ++/* I have seen two different definitions of the Elf64_Rel and ++ Elf64_Rela structures, so we'll leave them out until Novell (or ++ whoever) gets their act together. */ ++/* The following, at least, is used on Sparc v9, MIPS, and Alpha. */ ++ ++typedef struct ++{ ++ Elf64_Addr r_offset; /* Address */ ++ Elf64_Xword r_info; /* Relocation type and symbol index */ ++} Elf64_Rel; ++ ++/* Relocation table entry with addend (in section of type SHT_RELA). */ ++ ++typedef struct ++{ ++ Elf32_Addr r_offset; /* Address */ ++ Elf32_Word r_info; /* Relocation type and symbol index */ ++ Elf32_Sword r_addend; /* Addend */ ++} Elf32_Rela; ++ ++typedef struct ++{ ++ Elf64_Addr r_offset; /* Address */ ++ Elf64_Xword r_info; /* Relocation type and symbol index */ ++ Elf64_Sxword r_addend; /* Addend */ ++} Elf64_Rela; ++ ++/* How to extract and insert information held in the r_info field. */ ++ ++#define ELF32_R_SYM(val) ((val) >> 8) ++#define ELF32_R_TYPE(val) ((val) & 0xff) ++#define ELF32_R_INFO(sym, type) (((sym) << 8) + ((type) & 0xff)) ++ ++#define ELF64_R_SYM(i) ((i) >> 32) ++#define ELF64_R_TYPE(i) ((i) & 0xffffffff) ++#define ELF64_R_INFO(sym,type) ((((Elf64_Xword) (sym)) << 32) + (type)) ++ ++/* Program segment header. */ ++ ++typedef struct ++{ ++ Elf32_Word p_type; /* Segment type */ ++ Elf32_Off p_offset; /* Segment file offset */ ++ Elf32_Addr p_vaddr; /* Segment virtual address */ ++ Elf32_Addr p_paddr; /* Segment physical address */ ++ Elf32_Word p_filesz; /* Segment size in file */ ++ Elf32_Word p_memsz; /* Segment size in memory */ ++ Elf32_Word p_flags; /* Segment flags */ ++ Elf32_Word p_align; /* Segment alignment */ ++} Elf32_Phdr; ++ ++typedef struct ++{ ++ Elf64_Word p_type; /* Segment type */ ++ Elf64_Word p_flags; /* Segment flags */ ++ Elf64_Off p_offset; /* Segment file offset */ ++ Elf64_Addr p_vaddr; /* Segment virtual address */ ++ Elf64_Addr p_paddr; /* Segment physical address */ ++ Elf64_Xword p_filesz; /* Segment size in file */ ++ Elf64_Xword p_memsz; /* Segment size in memory */ ++ Elf64_Xword p_align; /* Segment alignment */ ++} Elf64_Phdr; ++ ++/* Legal values for p_type (segment type). */ ++ ++#define PT_NULL 0 /* Program header table entry unused */ ++#define PT_LOAD 1 /* Loadable program segment */ ++#define PT_DYNAMIC 2 /* Dynamic linking information */ ++#define PT_INTERP 3 /* Program interpreter */ ++#define PT_NOTE 4 /* Auxiliary information */ ++#define PT_SHLIB 5 /* Reserved */ ++#define PT_PHDR 6 /* Entry for header table itself */ ++#define PT_TLS 7 /* Thread-local storage segment */ ++#define PT_NUM 8 /* Number of defined types */ ++#define PT_LOOS 0x60000000 /* Start of OS-specific */ ++#define PT_GNU_EH_FRAME 0x6474e550 /* GCC .eh_frame_hdr segment */ ++#define PT_GNU_STACK 0x6474e551 /* Indicates stack executability */ ++#define PT_GNU_RELRO 0x6474e552 /* Read-only after relocation */ ++#define PT_PAX_FLAGS 0x65041580 /* Indicates PaX flag markings */ ++#define PT_LOSUNW 0x6ffffffa ++#define PT_SUNWBSS 0x6ffffffa /* Sun Specific segment */ ++#define PT_SUNWSTACK 0x6ffffffb /* Stack segment */ ++#define PT_HISUNW 0x6fffffff ++#define PT_HIOS 0x6fffffff /* End of OS-specific */ ++#define PT_LOPROC 0x70000000 /* Start of processor-specific */ ++#define PT_HIPROC 0x7fffffff /* End of processor-specific */ ++ ++/* Legal values for p_flags (segment flags). */ ++ ++#define PF_X (1 << 0) /* Segment is executable */ ++#define PF_W (1 << 1) /* Segment is writable */ ++#define PF_R (1 << 2) /* Segment is readable */ ++#define PF_PAGEEXEC (1 << 4) /* Enable PAGEEXEC */ ++#define PF_NOPAGEEXEC (1 << 5) /* Disable PAGEEXEC */ ++#define PF_SEGMEXEC (1 << 6) /* Enable SEGMEXEC */ ++#define PF_NOSEGMEXEC (1 << 7) /* Disable SEGMEXEC */ ++#define PF_MPROTECT (1 << 8) /* Enable MPROTECT */ ++#define PF_NOMPROTECT (1 << 9) /* Disable MPROTECT */ ++#define PF_RANDEXEC (1 << 10) /* Enable RANDEXEC */ ++#define PF_NORANDEXEC (1 << 11) /* Disable RANDEXEC */ ++#define PF_EMUTRAMP (1 << 12) /* Enable EMUTRAMP */ ++#define PF_NOEMUTRAMP (1 << 13) /* Disable EMUTRAMP */ ++#define PF_RANDMMAP (1 << 14) /* Enable RANDMMAP */ ++#define PF_NORANDMMAP (1 << 15) /* Disable RANDMMAP */ ++#define PF_MASKOS 0x0ff00000 /* OS-specific */ ++#define PF_MASKPROC 0xf0000000 /* Processor-specific */ ++ ++/* Legal values for note segment descriptor types for core files. */ ++ ++#define NT_PRSTATUS 1 /* Contains copy of prstatus struct */ ++#define NT_FPREGSET 2 /* Contains copy of fpregset struct */ ++#define NT_PRPSINFO 3 /* Contains copy of prpsinfo struct */ ++#define NT_PRXREG 4 /* Contains copy of prxregset struct */ ++#define NT_TASKSTRUCT 4 /* Contains copy of task structure */ ++#define NT_PLATFORM 5 /* String from sysinfo(SI_PLATFORM) */ ++#define NT_AUXV 6 /* Contains copy of auxv array */ ++#define NT_GWINDOWS 7 /* Contains copy of gwindows struct */ ++#define NT_ASRS 8 /* Contains copy of asrset struct */ ++#define NT_PSTATUS 10 /* Contains copy of pstatus struct */ ++#define NT_PSINFO 13 /* Contains copy of psinfo struct */ ++#define NT_PRCRED 14 /* Contains copy of prcred struct */ ++#define NT_UTSNAME 15 /* Contains copy of utsname struct */ ++#define NT_LWPSTATUS 16 /* Contains copy of lwpstatus struct */ ++#define NT_LWPSINFO 17 /* Contains copy of lwpinfo struct */ ++#define NT_PRFPXREG 20 /* Contains copy of fprxregset struct*/ ++ ++/* Legal values for the note segment descriptor types for object files. */ ++ ++#define NT_VERSION 1 /* Contains a version string. */ ++ ++ ++/* Dynamic section entry. */ ++ ++typedef struct ++{ ++ Elf32_Sword d_tag; /* Dynamic entry type */ ++ union ++ { ++ Elf32_Word d_val; /* Integer value */ ++ Elf32_Addr d_ptr; /* Address value */ ++ } d_un; ++} Elf32_Dyn; ++ ++typedef struct ++{ ++ Elf64_Sxword d_tag; /* Dynamic entry type */ ++ union ++ { ++ Elf64_Xword d_val; /* Integer value */ ++ Elf64_Addr d_ptr; /* Address value */ ++ } d_un; ++} Elf64_Dyn; ++ ++/* Legal values for d_tag (dynamic entry type). */ ++ ++#define DT_NULL 0 /* Marks end of dynamic section */ ++#define DT_NEEDED 1 /* Name of needed library */ ++#define DT_PLTRELSZ 2 /* Size in bytes of PLT relocs */ ++#define DT_PLTGOT 3 /* Processor defined value */ ++#define DT_HASH 4 /* Address of symbol hash table */ ++#define DT_STRTAB 5 /* Address of string table */ ++#define DT_SYMTAB 6 /* Address of symbol table */ ++#define DT_RELA 7 /* Address of Rela relocs */ ++#define DT_RELASZ 8 /* Total size of Rela relocs */ ++#define DT_RELAENT 9 /* Size of one Rela reloc */ ++#define DT_STRSZ 10 /* Size of string table */ ++#define DT_SYMENT 11 /* Size of one symbol table entry */ ++#define DT_INIT 12 /* Address of init function */ ++#define DT_FINI 13 /* Address of termination function */ ++#define DT_SONAME 14 /* Name of shared object */ ++#define DT_RPATH 15 /* Library search path (deprecated) */ ++#define DT_SYMBOLIC 16 /* Start symbol search here */ ++#define DT_REL 17 /* Address of Rel relocs */ ++#define DT_RELSZ 18 /* Total size of Rel relocs */ ++#define DT_RELENT 19 /* Size of one Rel reloc */ ++#define DT_PLTREL 20 /* Type of reloc in PLT */ ++#define DT_DEBUG 21 /* For debugging; unspecified */ ++#define DT_TEXTREL 22 /* Reloc might modify .text */ ++#define DT_JMPREL 23 /* Address of PLT relocs */ ++#define DT_BIND_NOW 24 /* Process relocations of object */ ++#define DT_INIT_ARRAY 25 /* Array with addresses of init fct */ ++#define DT_FINI_ARRAY 26 /* Array with addresses of fini fct */ ++#define DT_INIT_ARRAYSZ 27 /* Size in bytes of DT_INIT_ARRAY */ ++#define DT_FINI_ARRAYSZ 28 /* Size in bytes of DT_FINI_ARRAY */ ++#define DT_RUNPATH 29 /* Library search path */ ++#define DT_FLAGS 30 /* Flags for the object being loaded */ ++#define DT_ENCODING 32 /* Start of encoded range */ ++#define DT_PREINIT_ARRAY 32 /* Array with addresses of preinit fct*/ ++#define DT_PREINIT_ARRAYSZ 33 /* size in bytes of DT_PREINIT_ARRAY */ ++#define DT_NUM 34 /* Number used */ ++#define DT_LOOS 0x6000000d /* Start of OS-specific */ ++#define DT_HIOS 0x6ffff000 /* End of OS-specific */ ++#define DT_LOPROC 0x70000000 /* Start of processor-specific */ ++#define DT_HIPROC 0x7fffffff /* End of processor-specific */ ++#define DT_PROCNUM DT_MIPS_NUM /* Most used by any processor */ ++ ++/* DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the ++ Dyn.d_un.d_val field of the Elf*_Dyn structure. This follows Sun's ++ approach. */ ++#define DT_VALRNGLO 0x6ffffd00 ++#define DT_GNU_PRELINKED 0x6ffffdf5 /* Prelinking timestamp */ ++#define DT_GNU_CONFLICTSZ 0x6ffffdf6 /* Size of conflict section */ ++#define DT_GNU_LIBLISTSZ 0x6ffffdf7 /* Size of library list */ ++#define DT_CHECKSUM 0x6ffffdf8 ++#define DT_PLTPADSZ 0x6ffffdf9 ++#define DT_MOVEENT 0x6ffffdfa ++#define DT_MOVESZ 0x6ffffdfb ++#define DT_FEATURE_1 0x6ffffdfc /* Feature selection (DTF_*). */ ++#define DT_POSFLAG_1 0x6ffffdfd /* Flags for DT_* entries, effecting ++ the following DT_* entry. */ ++#define DT_SYMINSZ 0x6ffffdfe /* Size of syminfo table (in bytes) */ ++#define DT_SYMINENT 0x6ffffdff /* Entry size of syminfo */ ++#define DT_VALRNGHI 0x6ffffdff ++#define DT_VALTAGIDX(tag) (DT_VALRNGHI - (tag)) /* Reverse order! */ ++#define DT_VALNUM 12 ++ ++/* DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the ++ Dyn.d_un.d_ptr field of the Elf*_Dyn structure. ++ ++ If any adjustment is made to the ELF object after it has been ++ built these entries will need to be adjusted. */ ++#define DT_ADDRRNGLO 0x6ffffe00 ++#define DT_GNU_HASH 0x6ffffef5 /* GNU-style hash table. */ ++#define DT_GNU_CONFLICT 0x6ffffef8 /* Start of conflict section */ ++#define DT_GNU_LIBLIST 0x6ffffef9 /* Library list */ ++#define DT_CONFIG 0x6ffffefa /* Configuration information. */ ++#define DT_DEPAUDIT 0x6ffffefb /* Dependency auditing. */ ++#define DT_AUDIT 0x6ffffefc /* Object auditing. */ ++#define DT_PLTPAD 0x6ffffefd /* PLT padding. */ ++#define DT_MOVETAB 0x6ffffefe /* Move table. */ ++#define DT_SYMINFO 0x6ffffeff /* Syminfo table. */ ++#define DT_ADDRRNGHI 0x6ffffeff ++#define DT_ADDRTAGIDX(tag) (DT_ADDRRNGHI - (tag)) /* Reverse order! */ ++#define DT_ADDRNUM 10 ++ ++/* The versioning entry types. The next are defined as part of the ++ GNU extension. */ ++#define DT_VERSYM 0x6ffffff0 ++ ++#define DT_RELACOUNT 0x6ffffff9 ++#define DT_RELCOUNT 0x6ffffffa ++ ++/* These were chosen by Sun. */ ++#define DT_FLAGS_1 0x6ffffffb /* State flags, see DF_1_* below. */ ++#define DT_VERDEF 0x6ffffffc /* Address of version definition ++ table */ ++#define DT_VERDEFNUM 0x6ffffffd /* Number of version definitions */ ++#define DT_VERNEED 0x6ffffffe /* Address of table with needed ++ versions */ ++#define DT_VERNEEDNUM 0x6fffffff /* Number of needed versions */ ++#define DT_VERSIONTAGIDX(tag) (DT_VERNEEDNUM - (tag)) /* Reverse order! */ ++#define DT_VERSIONTAGNUM 16 ++ ++/* Sun added these machine-independent extensions in the "processor-specific" ++ range. Be compatible. */ ++#define DT_AUXILIARY 0x7ffffffd /* Shared object to load before self */ ++#define DT_FILTER 0x7fffffff /* Shared object to get values from */ ++#define DT_EXTRATAGIDX(tag) ((Elf32_Word)-((Elf32_Sword) (tag) <<1>>1)-1) ++#define DT_EXTRANUM 3 ++ ++/* Values of `d_un.d_val' in the DT_FLAGS entry. */ ++#define DF_ORIGIN 0x00000001 /* Object may use DF_ORIGIN */ ++#define DF_SYMBOLIC 0x00000002 /* Symbol resolutions starts here */ ++#define DF_TEXTREL 0x00000004 /* Object contains text relocations */ ++#define DF_BIND_NOW 0x00000008 /* No lazy binding for this object */ ++#define DF_STATIC_TLS 0x00000010 /* Module uses the static TLS model */ ++ ++/* State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1 ++ entry in the dynamic section. */ ++#define DF_1_NOW 0x00000001 /* Set RTLD_NOW for this object. */ ++#define DF_1_GLOBAL 0x00000002 /* Set RTLD_GLOBAL for this object. */ ++#define DF_1_GROUP 0x00000004 /* Set RTLD_GROUP for this object. */ ++#define DF_1_NODELETE 0x00000008 /* Set RTLD_NODELETE for this object.*/ ++#define DF_1_LOADFLTR 0x00000010 /* Trigger filtee loading at runtime.*/ ++#define DF_1_INITFIRST 0x00000020 /* Set RTLD_INITFIRST for this object*/ ++#define DF_1_NOOPEN 0x00000040 /* Set RTLD_NOOPEN for this object. */ ++#define DF_1_ORIGIN 0x00000080 /* $ORIGIN must be handled. */ ++#define DF_1_DIRECT 0x00000100 /* Direct binding enabled. */ ++#define DF_1_TRANS 0x00000200 ++#define DF_1_INTERPOSE 0x00000400 /* Object is used to interpose. */ ++#define DF_1_NODEFLIB 0x00000800 /* Ignore default lib search path. */ ++#define DF_1_NODUMP 0x00001000 /* Object can't be dldump'ed. */ ++#define DF_1_CONFALT 0x00002000 /* Configuration alternative created.*/ ++#define DF_1_ENDFILTEE 0x00004000 /* Filtee terminates filters search. */ ++#define DF_1_DISPRELDNE 0x00008000 /* Disp reloc applied at build time. */ ++#define DF_1_DISPRELPND 0x00010000 /* Disp reloc applied at run-time. */ ++ ++/* Flags for the feature selection in DT_FEATURE_1. */ ++#define DTF_1_PARINIT 0x00000001 ++#define DTF_1_CONFEXP 0x00000002 ++ ++/* Flags in the DT_POSFLAG_1 entry effecting only the next DT_* entry. */ ++#define DF_P1_LAZYLOAD 0x00000001 /* Lazyload following object. */ ++#define DF_P1_GROUPPERM 0x00000002 /* Symbols from next object are not ++ generally available. */ ++ ++/* Version definition sections. */ ++ ++typedef struct ++{ ++ Elf32_Half vd_version; /* Version revision */ ++ Elf32_Half vd_flags; /* Version information */ ++ Elf32_Half vd_ndx; /* Version Index */ ++ Elf32_Half vd_cnt; /* Number of associated aux entries */ ++ Elf32_Word vd_hash; /* Version name hash value */ ++ Elf32_Word vd_aux; /* Offset in bytes to verdaux array */ ++ Elf32_Word vd_next; /* Offset in bytes to next verdef ++ entry */ ++} Elf32_Verdef; ++ ++typedef struct ++{ ++ Elf64_Half vd_version; /* Version revision */ ++ Elf64_Half vd_flags; /* Version information */ ++ Elf64_Half vd_ndx; /* Version Index */ ++ Elf64_Half vd_cnt; /* Number of associated aux entries */ ++ Elf64_Word vd_hash; /* Version name hash value */ ++ Elf64_Word vd_aux; /* Offset in bytes to verdaux array */ ++ Elf64_Word vd_next; /* Offset in bytes to next verdef ++ entry */ ++} Elf64_Verdef; ++ ++ ++/* Legal values for vd_version (version revision). */ ++#define VER_DEF_NONE 0 /* No version */ ++#define VER_DEF_CURRENT 1 /* Current version */ ++#define VER_DEF_NUM 2 /* Given version number */ ++ ++/* Legal values for vd_flags (version information flags). */ ++#define VER_FLG_BASE 0x1 /* Version definition of file itself */ ++#define VER_FLG_WEAK 0x2 /* Weak version identifier */ ++ ++/* Versym symbol index values. */ ++#define VER_NDX_LOCAL 0 /* Symbol is local. */ ++#define VER_NDX_GLOBAL 1 /* Symbol is global. */ ++#define VER_NDX_LORESERVE 0xff00 /* Beginning of reserved entries. */ ++#define VER_NDX_ELIMINATE 0xff01 /* Symbol is to be eliminated. */ ++ ++/* Auxialiary version information. */ ++ ++typedef struct ++{ ++ Elf32_Word vda_name; /* Version or dependency names */ ++ Elf32_Word vda_next; /* Offset in bytes to next verdaux ++ entry */ ++} Elf32_Verdaux; ++ ++typedef struct ++{ ++ Elf64_Word vda_name; /* Version or dependency names */ ++ Elf64_Word vda_next; /* Offset in bytes to next verdaux ++ entry */ ++} Elf64_Verdaux; ++ ++ ++/* Version dependency section. */ ++ ++typedef struct ++{ ++ Elf32_Half vn_version; /* Version of structure */ ++ Elf32_Half vn_cnt; /* Number of associated aux entries */ ++ Elf32_Word vn_file; /* Offset of filename for this ++ dependency */ ++ Elf32_Word vn_aux; /* Offset in bytes to vernaux array */ ++ Elf32_Word vn_next; /* Offset in bytes to next verneed ++ entry */ ++} Elf32_Verneed; ++ ++typedef struct ++{ ++ Elf64_Half vn_version; /* Version of structure */ ++ Elf64_Half vn_cnt; /* Number of associated aux entries */ ++ Elf64_Word vn_file; /* Offset of filename for this ++ dependency */ ++ Elf64_Word vn_aux; /* Offset in bytes to vernaux array */ ++ Elf64_Word vn_next; /* Offset in bytes to next verneed ++ entry */ ++} Elf64_Verneed; ++ ++ ++/* Legal values for vn_version (version revision). */ ++#define VER_NEED_NONE 0 /* No version */ ++#define VER_NEED_CURRENT 1 /* Current version */ ++#define VER_NEED_NUM 2 /* Given version number */ ++ ++/* Auxiliary needed version information. */ ++ ++typedef struct ++{ ++ Elf32_Word vna_hash; /* Hash value of dependency name */ ++ Elf32_Half vna_flags; /* Dependency specific information */ ++ Elf32_Half vna_other; /* Unused */ ++ Elf32_Word vna_name; /* Dependency name string offset */ ++ Elf32_Word vna_next; /* Offset in bytes to next vernaux ++ entry */ ++} Elf32_Vernaux; ++ ++typedef struct ++{ ++ Elf64_Word vna_hash; /* Hash value of dependency name */ ++ Elf64_Half vna_flags; /* Dependency specific information */ ++ Elf64_Half vna_other; /* Unused */ ++ Elf64_Word vna_name; /* Dependency name string offset */ ++ Elf64_Word vna_next; /* Offset in bytes to next vernaux ++ entry */ ++} Elf64_Vernaux; ++ ++ ++/* Legal values for vna_flags. */ ++#define VER_FLG_WEAK 0x2 /* Weak version identifier */ ++ ++ ++/* Auxiliary vector. */ ++ ++/* This vector is normally only used by the program interpreter. The ++ usual definition in an ABI supplement uses the name auxv_t. The ++ vector is not usually defined in a standard file, but it ++ can't hurt. We rename it to avoid conflicts. The sizes of these ++ types are an arrangement between the exec server and the program ++ interpreter, so we don't fully specify them here. */ ++ ++typedef struct ++{ ++ uint32_t a_type; /* Entry type */ ++ union ++ { ++ uint32_t a_val; /* Integer value */ ++ /* We use to have pointer elements added here. We cannot do that, ++ though, since it does not work when using 32-bit definitions ++ on 64-bit platforms and vice versa. */ ++ } a_un; ++} Elf32_auxv_t; ++ ++typedef struct ++{ ++ uint64_t a_type; /* Entry type */ ++ union ++ { ++ uint64_t a_val; /* Integer value */ ++ /* We use to have pointer elements added here. We cannot do that, ++ though, since it does not work when using 32-bit definitions ++ on 64-bit platforms and vice versa. */ ++ } a_un; ++} Elf64_auxv_t; ++ ++/* Legal values for a_type (entry type). */ ++ ++#define AT_NULL 0 /* End of vector */ ++#define AT_IGNORE 1 /* Entry should be ignored */ ++#define AT_EXECFD 2 /* File descriptor of program */ ++#define AT_PHDR 3 /* Program headers for program */ ++#define AT_PHENT 4 /* Size of program header entry */ ++#define AT_PHNUM 5 /* Number of program headers */ ++#define AT_PAGESZ 6 /* System page size */ ++#define AT_BASE 7 /* Base address of interpreter */ ++#define AT_FLAGS 8 /* Flags */ ++#define AT_ENTRY 9 /* Entry point of program */ ++#define AT_NOTELF 10 /* Program is not ELF */ ++#define AT_UID 11 /* Real uid */ ++#define AT_EUID 12 /* Effective uid */ ++#define AT_GID 13 /* Real gid */ ++#define AT_EGID 14 /* Effective gid */ ++#define AT_CLKTCK 17 /* Frequency of times() */ ++ ++/* Some more special a_type values describing the hardware. */ ++#define AT_PLATFORM 15 /* String identifying platform. */ ++#define AT_HWCAP 16 /* Machine dependent hints about ++ processor capabilities. */ ++ ++/* This entry gives some information about the FPU initialization ++ performed by the kernel. */ ++#define AT_FPUCW 18 /* Used FPU control word. */ ++ ++/* Cache block sizes. */ ++#define AT_DCACHEBSIZE 19 /* Data cache block size. */ ++#define AT_ICACHEBSIZE 20 /* Instruction cache block size. */ ++#define AT_UCACHEBSIZE 21 /* Unified cache block size. */ ++ ++/* A special ignored value for PPC, used by the kernel to control the ++ interpretation of the AUXV. Must be > 16. */ ++#define AT_IGNOREPPC 22 /* Entry should be ignored. */ ++ ++#define AT_SECURE 23 /* Boolean, was exec setuid-like? */ ++ ++/* Pointer to the global system page used for system calls and other ++ nice things. */ ++#define AT_SYSINFO 32 ++#define AT_SYSINFO_EHDR 33 ++ ++/* Shapes of the caches. Bits 0-3 contains associativity; bits 4-7 contains ++ log2 of line size; mask those to get cache size. */ ++#define AT_L1I_CACHESHAPE 34 ++#define AT_L1D_CACHESHAPE 35 ++#define AT_L2_CACHESHAPE 36 ++#define AT_L3_CACHESHAPE 37 ++ ++/* Note section contents. Each entry in the note section begins with ++ a header of a fixed form. */ ++ ++typedef struct ++{ ++ Elf32_Word n_namesz; /* Length of the note's name. */ ++ Elf32_Word n_descsz; /* Length of the note's descriptor. */ ++ Elf32_Word n_type; /* Type of the note. */ ++} Elf32_Nhdr; ++ ++typedef struct ++{ ++ Elf64_Word n_namesz; /* Length of the note's name. */ ++ Elf64_Word n_descsz; /* Length of the note's descriptor. */ ++ Elf64_Word n_type; /* Type of the note. */ ++} Elf64_Nhdr; ++ ++/* Known names of notes. */ ++ ++/* Solaris entries in the note section have this name. */ ++#define ELF_NOTE_SOLARIS "SUNW Solaris" ++ ++/* Note entries for GNU systems have this name. */ ++#define ELF_NOTE_GNU "GNU" ++ ++ ++/* Defined types of notes for Solaris. */ ++ ++/* Value of descriptor (one word) is desired pagesize for the binary. */ ++#define ELF_NOTE_PAGESIZE_HINT 1 ++ ++ ++/* Defined note types for GNU systems. */ ++ ++/* ABI information. The descriptor consists of words: ++ word 0: OS descriptor ++ word 1: major version of the ABI ++ word 2: minor version of the ABI ++ word 3: subminor version of the ABI ++*/ ++#define ELF_NOTE_ABI 1 ++ ++/* Known OSes. These value can appear in word 0 of an ELF_NOTE_ABI ++ note section entry. */ ++#define ELF_NOTE_OS_LINUX 0 ++#define ELF_NOTE_OS_GNU 1 ++#define ELF_NOTE_OS_SOLARIS2 2 ++#define ELF_NOTE_OS_FREEBSD 3 ++ ++ ++/* Move records. */ ++typedef struct ++{ ++ Elf32_Xword m_value; /* Symbol value. */ ++ Elf32_Word m_info; /* Size and index. */ ++ Elf32_Word m_poffset; /* Symbol offset. */ ++ Elf32_Half m_repeat; /* Repeat count. */ ++ Elf32_Half m_stride; /* Stride info. */ ++} Elf32_Move; ++ ++typedef struct ++{ ++ Elf64_Xword m_value; /* Symbol value. */ ++ Elf64_Xword m_info; /* Size and index. */ ++ Elf64_Xword m_poffset; /* Symbol offset. */ ++ Elf64_Half m_repeat; /* Repeat count. */ ++ Elf64_Half m_stride; /* Stride info. */ ++} Elf64_Move; ++ ++/* Macro to construct move records. */ ++#define ELF32_M_SYM(info) ((info) >> 8) ++#define ELF32_M_SIZE(info) ((unsigned char) (info)) ++#define ELF32_M_INFO(sym, size) (((sym) << 8) + (unsigned char) (size)) ++ ++#define ELF64_M_SYM(info) ELF32_M_SYM (info) ++#define ELF64_M_SIZE(info) ELF32_M_SIZE (info) ++#define ELF64_M_INFO(sym, size) ELF32_M_INFO (sym, size) ++ ++ ++/* Motorola 68k specific definitions. */ ++ ++/* Values for Elf32_Ehdr.e_flags. */ ++#define EF_CPU32 0x00810000 ++ ++/* m68k relocs. */ ++ ++#define R_68K_NONE 0 /* No reloc */ ++#define R_68K_32 1 /* Direct 32 bit */ ++#define R_68K_16 2 /* Direct 16 bit */ ++#define R_68K_8 3 /* Direct 8 bit */ ++#define R_68K_PC32 4 /* PC relative 32 bit */ ++#define R_68K_PC16 5 /* PC relative 16 bit */ ++#define R_68K_PC8 6 /* PC relative 8 bit */ ++#define R_68K_GOT32 7 /* 32 bit PC relative GOT entry */ ++#define R_68K_GOT16 8 /* 16 bit PC relative GOT entry */ ++#define R_68K_GOT8 9 /* 8 bit PC relative GOT entry */ ++#define R_68K_GOT32O 10 /* 32 bit GOT offset */ ++#define R_68K_GOT16O 11 /* 16 bit GOT offset */ ++#define R_68K_GOT8O 12 /* 8 bit GOT offset */ ++#define R_68K_PLT32 13 /* 32 bit PC relative PLT address */ ++#define R_68K_PLT16 14 /* 16 bit PC relative PLT address */ ++#define R_68K_PLT8 15 /* 8 bit PC relative PLT address */ ++#define R_68K_PLT32O 16 /* 32 bit PLT offset */ ++#define R_68K_PLT16O 17 /* 16 bit PLT offset */ ++#define R_68K_PLT8O 18 /* 8 bit PLT offset */ ++#define R_68K_COPY 19 /* Copy symbol at runtime */ ++#define R_68K_GLOB_DAT 20 /* Create GOT entry */ ++#define R_68K_JMP_SLOT 21 /* Create PLT entry */ ++#define R_68K_RELATIVE 22 /* Adjust by program base */ ++/* Keep this the last entry. */ ++#define R_68K_NUM 23 ++ ++/* Intel 80386 specific definitions. */ ++ ++/* i386 relocs. */ ++ ++#define R_386_NONE 0 /* No reloc */ ++#define R_386_32 1 /* Direct 32 bit */ ++#define R_386_PC32 2 /* PC relative 32 bit */ ++#define R_386_GOT32 3 /* 32 bit GOT entry */ ++#define R_386_PLT32 4 /* 32 bit PLT address */ ++#define R_386_COPY 5 /* Copy symbol at runtime */ ++#define R_386_GLOB_DAT 6 /* Create GOT entry */ ++#define R_386_JMP_SLOT 7 /* Create PLT entry */ ++#define R_386_RELATIVE 8 /* Adjust by program base */ ++#define R_386_GOTOFF 9 /* 32 bit offset to GOT */ ++#define R_386_GOTPC 10 /* 32 bit PC relative offset to GOT */ ++#define R_386_32PLT 11 ++#define R_386_TLS_TPOFF 14 /* Offset in static TLS block */ ++#define R_386_TLS_IE 15 /* Address of GOT entry for static TLS ++ block offset */ ++#define R_386_TLS_GOTIE 16 /* GOT entry for static TLS block ++ offset */ ++#define R_386_TLS_LE 17 /* Offset relative to static TLS ++ block */ ++#define R_386_TLS_GD 18 /* Direct 32 bit for GNU version of ++ general dynamic thread local data */ ++#define R_386_TLS_LDM 19 /* Direct 32 bit for GNU version of ++ local dynamic thread local data ++ in LE code */ ++#define R_386_16 20 ++#define R_386_PC16 21 ++#define R_386_8 22 ++#define R_386_PC8 23 ++#define R_386_TLS_GD_32 24 /* Direct 32 bit for general dynamic ++ thread local data */ ++#define R_386_TLS_GD_PUSH 25 /* Tag for pushl in GD TLS code */ ++#define R_386_TLS_GD_CALL 26 /* Relocation for call to ++ __tls_get_addr() */ ++#define R_386_TLS_GD_POP 27 /* Tag for popl in GD TLS code */ ++#define R_386_TLS_LDM_32 28 /* Direct 32 bit for local dynamic ++ thread local data in LE code */ ++#define R_386_TLS_LDM_PUSH 29 /* Tag for pushl in LDM TLS code */ ++#define R_386_TLS_LDM_CALL 30 /* Relocation for call to ++ __tls_get_addr() in LDM code */ ++#define R_386_TLS_LDM_POP 31 /* Tag for popl in LDM TLS code */ ++#define R_386_TLS_LDO_32 32 /* Offset relative to TLS block */ ++#define R_386_TLS_IE_32 33 /* GOT entry for negated static TLS ++ block offset */ ++#define R_386_TLS_LE_32 34 /* Negated offset relative to static ++ TLS block */ ++#define R_386_TLS_DTPMOD32 35 /* ID of module containing symbol */ ++#define R_386_TLS_DTPOFF32 36 /* Offset in TLS block */ ++#define R_386_TLS_TPOFF32 37 /* Negated offset in static TLS block */ ++/* Keep this the last entry. */ ++#define R_386_NUM 38 ++ ++/* Blackfin specific definitions. */ ++#define R_BFIN_UNUSED0 0x00 ++#define R_BFIN_PCREL5M2 0x01 ++#define R_BFIN_UNUSED1 0x02 ++#define R_BFIN_PCREL10 0x03 ++#define R_BFIN_PCREL12_JUMP 0x04 ++#define R_BFIN_RIMM16 0x05 ++#define R_BFIN_LUIMM16 0x06 ++#define R_BFIN_HUIMM16 0x07 ++#define R_BFIN_PCREL12_JUMP_S 0x08 ++#define R_BFIN_PCREL24_JUMP_X 0x09 ++#define R_BFIN_PCREL24 0x0a ++#define R_BFIN_UNUSEDB 0x0b ++#define R_BFIN_UNUSEDC 0x0c ++#define R_BFIN_PCREL24_JUMP_L 0x0d ++#define R_BFIN_PCREL24_CALL_X 0x0e ++#define R_BFIN_var_eq_symb 0x0f ++#define R_BFIN_BYTE_DATA 0x10 ++#define R_BFIN_BYTE2_DATA 0x11 ++#define R_BFIN_BYTE4_DATA 0x12 ++#define R_BFIN_PCREL11 0x13 ++ ++#define R_BFIN_GOT17M4 0x14 ++#define R_BFIN_GOTHI 0x15 ++#define R_BFIN_GOTLO 0x16 ++#define R_BFIN_FUNCDESC 0x17 ++#define R_BFIN_FUNCDESC_GOT17M4 0x18 ++#define R_BFIN_FUNCDESC_GOTHI 0x19 ++#define R_BFIN_FUNCDESC_GOTLO 0x1a ++#define R_BFIN_FUNCDESC_VALUE 0x1b ++#define R_BFIN_FUNCDESC_GOTOFF17M4 0x1c ++#define R_BFIN_FUNCDESC_GOTOFFHI 0x1d ++#define R_BFIN_FUNCDESC_GOTOFFLO 0x1e ++#define R_BFIN_GOTOFF17M4 0x1f ++#define R_BFIN_GOTOFFHI 0x20 ++#define R_BFIN_GOTOFFLO 0x21 ++ ++#define EF_BFIN_PIC 0x00000001 /* -fpic */ ++#define EF_BFIN_FDPIC 0x00000002 /* -mfdpic */ ++#define EF_BFIN_CODE_IN_L1 0x00000010 /* --code-in-l1 */ ++#define EF_BFIN_DATA_IN_L1 0x00000020 /* --data-in-l1 */ ++ ++/* FR-V specific definitions. */ ++#define R_FRV_NONE 0 /* No reloc. */ ++#define R_FRV_32 1 /* Direct 32 bit. */ ++/* Canonical function descriptor address. */ ++#define R_FRV_FUNCDESC 14 ++/* Private function descriptor initialization. */ ++#define R_FRV_FUNCDESC_VALUE 18 ++ ++ /* gpr support */ ++#define EF_FRV_GPR_MASK 0x00000003 /* mask for # of gprs */ ++#define EF_FRV_GPR_32 0x00000001 /* -mgpr-32 */ ++#define EF_FRV_GPR_64 0x00000002 /* -mgpr-64 */ ++ ++ /* fpr support */ ++#define EF_FRV_FPR_MASK 0x0000000c /* mask for # of fprs */ ++#define EF_FRV_FPR_32 0x00000004 /* -mfpr-32 */ ++#define EF_FRV_FPR_64 0x00000008 /* -mfpr-64 */ ++#define EF_FRV_FPR_NONE 0x0000000c /* -msoft-float */ ++ ++#define EF_FRV_PIC 0x00000100 ++#define EF_FRV_FDPIC 0x00008000 ++ ++/* SUN SPARC specific definitions. */ ++ ++/* Legal values for ST_TYPE subfield of st_info (symbol type). */ ++ ++#define STT_SPARC_REGISTER 13 /* Global register reserved to app. */ ++ ++/* Values for Elf64_Ehdr.e_flags. */ ++ ++#define EF_SPARCV9_MM 3 ++#define EF_SPARCV9_TSO 0 ++#define EF_SPARCV9_PSO 1 ++#define EF_SPARCV9_RMO 2 ++#define EF_SPARC_LEDATA 0x800000 /* little endian data */ ++#define EF_SPARC_EXT_MASK 0xFFFF00 ++#define EF_SPARC_32PLUS 0x000100 /* generic V8+ features */ ++#define EF_SPARC_SUN_US1 0x000200 /* Sun UltraSPARC1 extensions */ ++#define EF_SPARC_HAL_R1 0x000400 /* HAL R1 extensions */ ++#define EF_SPARC_SUN_US3 0x000800 /* Sun UltraSPARCIII extensions */ ++ ++/* SPARC relocs. */ ++ ++#define R_SPARC_NONE 0 /* No reloc */ ++#define R_SPARC_8 1 /* Direct 8 bit */ ++#define R_SPARC_16 2 /* Direct 16 bit */ ++#define R_SPARC_32 3 /* Direct 32 bit */ ++#define R_SPARC_DISP8 4 /* PC relative 8 bit */ ++#define R_SPARC_DISP16 5 /* PC relative 16 bit */ ++#define R_SPARC_DISP32 6 /* PC relative 32 bit */ ++#define R_SPARC_WDISP30 7 /* PC relative 30 bit shifted */ ++#define R_SPARC_WDISP22 8 /* PC relative 22 bit shifted */ ++#define R_SPARC_HI22 9 /* High 22 bit */ ++#define R_SPARC_22 10 /* Direct 22 bit */ ++#define R_SPARC_13 11 /* Direct 13 bit */ ++#define R_SPARC_LO10 12 /* Truncated 10 bit */ ++#define R_SPARC_GOT10 13 /* Truncated 10 bit GOT entry */ ++#define R_SPARC_GOT13 14 /* 13 bit GOT entry */ ++#define R_SPARC_GOT22 15 /* 22 bit GOT entry shifted */ ++#define R_SPARC_PC10 16 /* PC relative 10 bit truncated */ ++#define R_SPARC_PC22 17 /* PC relative 22 bit shifted */ ++#define R_SPARC_WPLT30 18 /* 30 bit PC relative PLT address */ ++#define R_SPARC_COPY 19 /* Copy symbol at runtime */ ++#define R_SPARC_GLOB_DAT 20 /* Create GOT entry */ ++#define R_SPARC_JMP_SLOT 21 /* Create PLT entry */ ++#define R_SPARC_RELATIVE 22 /* Adjust by program base */ ++#define R_SPARC_UA32 23 /* Direct 32 bit unaligned */ ++ ++/* Additional Sparc64 relocs. */ ++ ++#define R_SPARC_PLT32 24 /* Direct 32 bit ref to PLT entry */ ++#define R_SPARC_HIPLT22 25 /* High 22 bit PLT entry */ ++#define R_SPARC_LOPLT10 26 /* Truncated 10 bit PLT entry */ ++#define R_SPARC_PCPLT32 27 /* PC rel 32 bit ref to PLT entry */ ++#define R_SPARC_PCPLT22 28 /* PC rel high 22 bit PLT entry */ ++#define R_SPARC_PCPLT10 29 /* PC rel trunc 10 bit PLT entry */ ++#define R_SPARC_10 30 /* Direct 10 bit */ ++#define R_SPARC_11 31 /* Direct 11 bit */ ++#define R_SPARC_64 32 /* Direct 64 bit */ ++#define R_SPARC_OLO10 33 /* 10bit with secondary 13bit addend */ ++#define R_SPARC_HH22 34 /* Top 22 bits of direct 64 bit */ ++#define R_SPARC_HM10 35 /* High middle 10 bits of ... */ ++#define R_SPARC_LM22 36 /* Low middle 22 bits of ... */ ++#define R_SPARC_PC_HH22 37 /* Top 22 bits of pc rel 64 bit */ ++#define R_SPARC_PC_HM10 38 /* High middle 10 bit of ... */ ++#define R_SPARC_PC_LM22 39 /* Low miggle 22 bits of ... */ ++#define R_SPARC_WDISP16 40 /* PC relative 16 bit shifted */ ++#define R_SPARC_WDISP19 41 /* PC relative 19 bit shifted */ ++#define R_SPARC_7 43 /* Direct 7 bit */ ++#define R_SPARC_5 44 /* Direct 5 bit */ ++#define R_SPARC_6 45 /* Direct 6 bit */ ++#define R_SPARC_DISP64 46 /* PC relative 64 bit */ ++#define R_SPARC_PLT64 47 /* Direct 64 bit ref to PLT entry */ ++#define R_SPARC_HIX22 48 /* High 22 bit complemented */ ++#define R_SPARC_LOX10 49 /* Truncated 11 bit complemented */ ++#define R_SPARC_H44 50 /* Direct high 12 of 44 bit */ ++#define R_SPARC_M44 51 /* Direct mid 22 of 44 bit */ ++#define R_SPARC_L44 52 /* Direct low 10 of 44 bit */ ++#define R_SPARC_REGISTER 53 /* Global register usage */ ++#define R_SPARC_UA64 54 /* Direct 64 bit unaligned */ ++#define R_SPARC_UA16 55 /* Direct 16 bit unaligned */ ++#define R_SPARC_TLS_GD_HI22 56 ++#define R_SPARC_TLS_GD_LO10 57 ++#define R_SPARC_TLS_GD_ADD 58 ++#define R_SPARC_TLS_GD_CALL 59 ++#define R_SPARC_TLS_LDM_HI22 60 ++#define R_SPARC_TLS_LDM_LO10 61 ++#define R_SPARC_TLS_LDM_ADD 62 ++#define R_SPARC_TLS_LDM_CALL 63 ++#define R_SPARC_TLS_LDO_HIX22 64 ++#define R_SPARC_TLS_LDO_LOX10 65 ++#define R_SPARC_TLS_LDO_ADD 66 ++#define R_SPARC_TLS_IE_HI22 67 ++#define R_SPARC_TLS_IE_LO10 68 ++#define R_SPARC_TLS_IE_LD 69 ++#define R_SPARC_TLS_IE_LDX 70 ++#define R_SPARC_TLS_IE_ADD 71 ++#define R_SPARC_TLS_LE_HIX22 72 ++#define R_SPARC_TLS_LE_LOX10 73 ++#define R_SPARC_TLS_DTPMOD32 74 ++#define R_SPARC_TLS_DTPMOD64 75 ++#define R_SPARC_TLS_DTPOFF32 76 ++#define R_SPARC_TLS_DTPOFF64 77 ++#define R_SPARC_TLS_TPOFF32 78 ++#define R_SPARC_TLS_TPOFF64 79 ++/* Keep this the last entry. */ ++#define R_SPARC_NUM 80 ++ ++/* For Sparc64, legal values for d_tag of Elf64_Dyn. */ ++ ++#define DT_SPARC_REGISTER 0x70000001 ++#define DT_SPARC_NUM 2 ++ ++/* Bits present in AT_HWCAP, primarily for Sparc32. */ ++ ++#define HWCAP_SPARC_FLUSH 1 /* The cpu supports flush insn. */ ++#define HWCAP_SPARC_STBAR 2 ++#define HWCAP_SPARC_SWAP 4 ++#define HWCAP_SPARC_MULDIV 8 ++#define HWCAP_SPARC_V9 16 /* The cpu is v9, so v8plus is ok. */ ++#define HWCAP_SPARC_ULTRA3 32 ++ ++/* MIPS R3000 specific definitions. */ ++ ++/* Legal values for e_flags field of Elf32_Ehdr. */ ++ ++#define EF_MIPS_NOREORDER 1 /* A .noreorder directive was used */ ++#define EF_MIPS_PIC 2 /* Contains PIC code */ ++#define EF_MIPS_CPIC 4 /* Uses PIC calling sequence */ ++#define EF_MIPS_XGOT 8 ++#define EF_MIPS_64BIT_WHIRL 16 ++#define EF_MIPS_ABI2 32 ++#define EF_MIPS_ABI_ON32 64 ++#define EF_MIPS_ARCH 0xf0000000 /* MIPS architecture level */ ++ ++/* Legal values for MIPS architecture level. */ ++ ++#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */ ++#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */ ++#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */ ++#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */ ++#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */ ++#define EF_MIPS_ARCH_32 0x60000000 /* MIPS32 code. */ ++#define EF_MIPS_ARCH_64 0x70000000 /* MIPS64 code. */ ++ ++/* The following are non-official names and should not be used. */ ++ ++#define E_MIPS_ARCH_1 0x00000000 /* -mips1 code. */ ++#define E_MIPS_ARCH_2 0x10000000 /* -mips2 code. */ ++#define E_MIPS_ARCH_3 0x20000000 /* -mips3 code. */ ++#define E_MIPS_ARCH_4 0x30000000 /* -mips4 code. */ ++#define E_MIPS_ARCH_5 0x40000000 /* -mips5 code. */ ++#define E_MIPS_ARCH_32 0x60000000 /* MIPS32 code. */ ++#define E_MIPS_ARCH_64 0x70000000 /* MIPS64 code. */ ++ ++/* Special section indices. */ ++ ++#define SHN_MIPS_ACOMMON 0xff00 /* Allocated common symbols */ ++#define SHN_MIPS_TEXT 0xff01 /* Allocated test symbols. */ ++#define SHN_MIPS_DATA 0xff02 /* Allocated data symbols. */ ++#define SHN_MIPS_SCOMMON 0xff03 /* Small common symbols */ ++#define SHN_MIPS_SUNDEFINED 0xff04 /* Small undefined symbols */ ++ ++/* Legal values for sh_type field of Elf32_Shdr. */ ++ ++#define SHT_MIPS_LIBLIST 0x70000000 /* Shared objects used in link */ ++#define SHT_MIPS_MSYM 0x70000001 ++#define SHT_MIPS_CONFLICT 0x70000002 /* Conflicting symbols */ ++#define SHT_MIPS_GPTAB 0x70000003 /* Global data area sizes */ ++#define SHT_MIPS_UCODE 0x70000004 /* Reserved for SGI/MIPS compilers */ ++#define SHT_MIPS_DEBUG 0x70000005 /* MIPS ECOFF debugging information*/ ++#define SHT_MIPS_REGINFO 0x70000006 /* Register usage information */ ++#define SHT_MIPS_PACKAGE 0x70000007 ++#define SHT_MIPS_PACKSYM 0x70000008 ++#define SHT_MIPS_RELD 0x70000009 ++#define SHT_MIPS_IFACE 0x7000000b ++#define SHT_MIPS_CONTENT 0x7000000c ++#define SHT_MIPS_OPTIONS 0x7000000d /* Miscellaneous options. */ ++#define SHT_MIPS_SHDR 0x70000010 ++#define SHT_MIPS_FDESC 0x70000011 ++#define SHT_MIPS_EXTSYM 0x70000012 ++#define SHT_MIPS_DENSE 0x70000013 ++#define SHT_MIPS_PDESC 0x70000014 ++#define SHT_MIPS_LOCSYM 0x70000015 ++#define SHT_MIPS_AUXSYM 0x70000016 ++#define SHT_MIPS_OPTSYM 0x70000017 ++#define SHT_MIPS_LOCSTR 0x70000018 ++#define SHT_MIPS_LINE 0x70000019 ++#define SHT_MIPS_RFDESC 0x7000001a ++#define SHT_MIPS_DELTASYM 0x7000001b ++#define SHT_MIPS_DELTAINST 0x7000001c ++#define SHT_MIPS_DELTACLASS 0x7000001d ++#define SHT_MIPS_DWARF 0x7000001e /* DWARF debugging information. */ ++#define SHT_MIPS_DELTADECL 0x7000001f ++#define SHT_MIPS_SYMBOL_LIB 0x70000020 ++#define SHT_MIPS_EVENTS 0x70000021 /* Event section. */ ++#define SHT_MIPS_TRANSLATE 0x70000022 ++#define SHT_MIPS_PIXIE 0x70000023 ++#define SHT_MIPS_XLATE 0x70000024 ++#define SHT_MIPS_XLATE_DEBUG 0x70000025 ++#define SHT_MIPS_WHIRL 0x70000026 ++#define SHT_MIPS_EH_REGION 0x70000027 ++#define SHT_MIPS_XLATE_OLD 0x70000028 ++#define SHT_MIPS_PDR_EXCEPTION 0x70000029 ++ ++/* Legal values for sh_flags field of Elf32_Shdr. */ ++ ++#define SHF_MIPS_GPREL 0x10000000 /* Must be part of global data area */ ++#define SHF_MIPS_MERGE 0x20000000 ++#define SHF_MIPS_ADDR 0x40000000 ++#define SHF_MIPS_STRINGS 0x80000000 ++#define SHF_MIPS_NOSTRIP 0x08000000 ++#define SHF_MIPS_LOCAL 0x04000000 ++#define SHF_MIPS_NAMES 0x02000000 ++#define SHF_MIPS_NODUPE 0x01000000 ++ ++ ++/* Symbol tables. */ ++ ++/* MIPS specific values for `st_other'. */ ++#define STO_MIPS_DEFAULT 0x0 ++#define STO_MIPS_INTERNAL 0x1 ++#define STO_MIPS_HIDDEN 0x2 ++#define STO_MIPS_PROTECTED 0x3 ++#define STO_MIPS_PLT 0x8 ++#define STO_MIPS_SC_ALIGN_UNUSED 0xff ++ ++/* MIPS specific values for `st_info'. */ ++#define STB_MIPS_SPLIT_COMMON 13 ++ ++/* Entries found in sections of type SHT_MIPS_GPTAB. */ ++ ++typedef union ++{ ++ struct ++ { ++ Elf32_Word gt_current_g_value; /* -G value used for compilation */ ++ Elf32_Word gt_unused; /* Not used */ ++ } gt_header; /* First entry in section */ ++ struct ++ { ++ Elf32_Word gt_g_value; /* If this value were used for -G */ ++ Elf32_Word gt_bytes; /* This many bytes would be used */ ++ } gt_entry; /* Subsequent entries in section */ ++} Elf32_gptab; ++ ++/* Entry found in sections of type SHT_MIPS_REGINFO. */ ++ ++typedef struct ++{ ++ Elf32_Word ri_gprmask; /* General registers used */ ++ Elf32_Word ri_cprmask[4]; /* Coprocessor registers used */ ++ Elf32_Sword ri_gp_value; /* $gp register value */ ++} Elf32_RegInfo; ++ ++/* Entries found in sections of type SHT_MIPS_OPTIONS. */ ++ ++typedef struct ++{ ++ unsigned char kind; /* Determines interpretation of the ++ variable part of descriptor. */ ++ unsigned char size; /* Size of descriptor, including header. */ ++ Elf32_Section section; /* Section header index of section affected, ++ 0 for global options. */ ++ Elf32_Word info; /* Kind-specific information. */ ++} Elf_Options; ++ ++/* Values for `kind' field in Elf_Options. */ ++ ++#define ODK_NULL 0 /* Undefined. */ ++#define ODK_REGINFO 1 /* Register usage information. */ ++#define ODK_EXCEPTIONS 2 /* Exception processing options. */ ++#define ODK_PAD 3 /* Section padding options. */ ++#define ODK_HWPATCH 4 /* Hardware workarounds performed */ ++#define ODK_FILL 5 /* record the fill value used by the linker. */ ++#define ODK_TAGS 6 /* reserve space for desktop tools to write. */ ++#define ODK_HWAND 7 /* HW workarounds. 'AND' bits when merging. */ ++#define ODK_HWOR 8 /* HW workarounds. 'OR' bits when merging. */ ++ ++/* Values for `info' in Elf_Options for ODK_EXCEPTIONS entries. */ ++ ++#define OEX_FPU_MIN 0x1f /* FPE's which MUST be enabled. */ ++#define OEX_FPU_MAX 0x1f00 /* FPE's which MAY be enabled. */ ++#define OEX_PAGE0 0x10000 /* page zero must be mapped. */ ++#define OEX_SMM 0x20000 /* Force sequential memory mode? */ ++#define OEX_FPDBUG 0x40000 /* Force floating point debug mode? */ ++#define OEX_PRECISEFP OEX_FPDBUG ++#define OEX_DISMISS 0x80000 /* Dismiss invalid address faults? */ ++ ++#define OEX_FPU_INVAL 0x10 ++#define OEX_FPU_DIV0 0x08 ++#define OEX_FPU_OFLO 0x04 ++#define OEX_FPU_UFLO 0x02 ++#define OEX_FPU_INEX 0x01 ++ ++/* Masks for `info' in Elf_Options for an ODK_HWPATCH entry. */ ++ ++#define OHW_R4KEOP 0x1 /* R4000 end-of-page patch. */ ++#define OHW_R8KPFETCH 0x2 /* may need R8000 prefetch patch. */ ++#define OHW_R5KEOP 0x4 /* R5000 end-of-page patch. */ ++#define OHW_R5KCVTL 0x8 /* R5000 cvt.[ds].l bug. clean=1. */ ++ ++#define OPAD_PREFIX 0x1 ++#define OPAD_POSTFIX 0x2 ++#define OPAD_SYMBOL 0x4 ++ ++/* Entry found in `.options' section. */ ++ ++typedef struct ++{ ++ Elf32_Word hwp_flags1; /* Extra flags. */ ++ Elf32_Word hwp_flags2; /* Extra flags. */ ++} Elf_Options_Hw; ++ ++/* Masks for `info' in ElfOptions for ODK_HWAND and ODK_HWOR entries. */ ++ ++#define OHWA0_R4KEOP_CHECKED 0x00000001 ++#define OHWA1_R4KEOP_CLEAN 0x00000002 ++ ++/* MIPS relocs. */ ++ ++#define R_MIPS_NONE 0 /* No reloc */ ++#define R_MIPS_16 1 /* Direct 16 bit */ ++#define R_MIPS_32 2 /* Direct 32 bit */ ++#define R_MIPS_REL32 3 /* PC relative 32 bit */ ++#define R_MIPS_26 4 /* Direct 26 bit shifted */ ++#define R_MIPS_HI16 5 /* High 16 bit */ ++#define R_MIPS_LO16 6 /* Low 16 bit */ ++#define R_MIPS_GPREL16 7 /* GP relative 16 bit */ ++#define R_MIPS_LITERAL 8 /* 16 bit literal entry */ ++#define R_MIPS_GOT16 9 /* 16 bit GOT entry */ ++#define R_MIPS_PC16 10 /* PC relative 16 bit */ ++#define R_MIPS_CALL16 11 /* 16 bit GOT entry for function */ ++#define R_MIPS_GPREL32 12 /* GP relative 32 bit */ ++ ++#define R_MIPS_SHIFT5 16 ++#define R_MIPS_SHIFT6 17 ++#define R_MIPS_64 18 ++#define R_MIPS_GOT_DISP 19 ++#define R_MIPS_GOT_PAGE 20 ++#define R_MIPS_GOT_OFST 21 ++#define R_MIPS_GOT_HI16 22 ++#define R_MIPS_GOT_LO16 23 ++#define R_MIPS_SUB 24 ++#define R_MIPS_INSERT_A 25 ++#define R_MIPS_INSERT_B 26 ++#define R_MIPS_DELETE 27 ++#define R_MIPS_HIGHER 28 ++#define R_MIPS_HIGHEST 29 ++#define R_MIPS_CALL_HI16 30 ++#define R_MIPS_CALL_LO16 31 ++#define R_MIPS_SCN_DISP 32 ++#define R_MIPS_REL16 33 ++#define R_MIPS_ADD_IMMEDIATE 34 ++#define R_MIPS_PJUMP 35 ++#define R_MIPS_RELGOT 36 ++#define R_MIPS_JALR 37 ++#define R_MIPS_TLS_DTPMOD32 38 /* Module number 32 bit */ ++#define R_MIPS_TLS_DTPREL32 39 /* Module-relative offset 32 bit */ ++#define R_MIPS_TLS_DTPMOD64 40 /* Module number 64 bit */ ++#define R_MIPS_TLS_DTPREL64 41 /* Module-relative offset 64 bit */ ++#define R_MIPS_TLS_GD 42 /* 16 bit GOT offset for GD */ ++#define R_MIPS_TLS_LDM 43 /* 16 bit GOT offset for LDM */ ++#define R_MIPS_TLS_DTPREL_HI16 44 /* Module-relative offset, high 16 bits */ ++#define R_MIPS_TLS_DTPREL_LO16 45 /* Module-relative offset, low 16 bits */ ++#define R_MIPS_TLS_GOTTPREL 46 /* 16 bit GOT offset for IE */ ++#define R_MIPS_TLS_TPREL32 47 /* TP-relative offset, 32 bit */ ++#define R_MIPS_TLS_TPREL64 48 /* TP-relative offset, 64 bit */ ++#define R_MIPS_TLS_TPREL_HI16 49 /* TP-relative offset, high 16 bits */ ++#define R_MIPS_TLS_TPREL_LO16 50 /* TP-relative offset, low 16 bits */ ++#define R_MIPS_GLOB_DAT 51 ++#define R_MIPS_COPY 126 ++#define R_MIPS_JUMP_SLOT 127 ++/* Keep this the last entry. */ ++#define R_MIPS_NUM 128 ++ ++/* Legal values for p_type field of Elf32_Phdr. */ ++ ++#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */ ++#define PT_MIPS_RTPROC 0x70000001 /* Runtime procedure table. */ ++#define PT_MIPS_OPTIONS 0x70000002 ++ ++/* Special program header types. */ ++ ++#define PF_MIPS_LOCAL 0x10000000 ++ ++/* Legal values for d_tag field of Elf32_Dyn. */ ++ ++#define DT_MIPS_RLD_VERSION 0x70000001 /* Runtime linker interface version */ ++#define DT_MIPS_TIME_STAMP 0x70000002 /* Timestamp */ ++#define DT_MIPS_ICHECKSUM 0x70000003 /* Checksum */ ++#define DT_MIPS_IVERSION 0x70000004 /* Version string (string tbl index) */ ++#define DT_MIPS_FLAGS 0x70000005 /* Flags */ ++#define DT_MIPS_BASE_ADDRESS 0x70000006 /* Base address */ ++#define DT_MIPS_MSYM 0x70000007 ++#define DT_MIPS_CONFLICT 0x70000008 /* Address of CONFLICT section */ ++#define DT_MIPS_LIBLIST 0x70000009 /* Address of LIBLIST section */ ++#define DT_MIPS_LOCAL_GOTNO 0x7000000a /* Number of local GOT entries */ ++#define DT_MIPS_CONFLICTNO 0x7000000b /* Number of CONFLICT entries */ ++#define DT_MIPS_LIBLISTNO 0x70000010 /* Number of LIBLIST entries */ ++#define DT_MIPS_SYMTABNO 0x70000011 /* Number of DYNSYM entries */ ++#define DT_MIPS_UNREFEXTNO 0x70000012 /* First external DYNSYM */ ++#define DT_MIPS_GOTSYM 0x70000013 /* First GOT entry in DYNSYM */ ++#define DT_MIPS_HIPAGENO 0x70000014 /* Number of GOT page table entries */ ++#define DT_MIPS_RLD_MAP 0x70000016 /* Address of run time loader map. */ ++#define DT_MIPS_DELTA_CLASS 0x70000017 /* Delta C++ class definition. */ ++#define DT_MIPS_DELTA_CLASS_NO 0x70000018 /* Number of entries in ++ DT_MIPS_DELTA_CLASS. */ ++#define DT_MIPS_DELTA_INSTANCE 0x70000019 /* Delta C++ class instances. */ ++#define DT_MIPS_DELTA_INSTANCE_NO 0x7000001a /* Number of entries in ++ DT_MIPS_DELTA_INSTANCE. */ ++#define DT_MIPS_DELTA_RELOC 0x7000001b /* Delta relocations. */ ++#define DT_MIPS_DELTA_RELOC_NO 0x7000001c /* Number of entries in ++ DT_MIPS_DELTA_RELOC. */ ++#define DT_MIPS_DELTA_SYM 0x7000001d /* Delta symbols that Delta ++ relocations refer to. */ ++#define DT_MIPS_DELTA_SYM_NO 0x7000001e /* Number of entries in ++ DT_MIPS_DELTA_SYM. */ ++#define DT_MIPS_DELTA_CLASSSYM 0x70000020 /* Delta symbols that hold the ++ class declaration. */ ++#define DT_MIPS_DELTA_CLASSSYM_NO 0x70000021 /* Number of entries in ++ DT_MIPS_DELTA_CLASSSYM. */ ++#define DT_MIPS_CXX_FLAGS 0x70000022 /* Flags indicating for C++ flavor. */ ++#define DT_MIPS_PIXIE_INIT 0x70000023 ++#define DT_MIPS_SYMBOL_LIB 0x70000024 ++#define DT_MIPS_LOCALPAGE_GOTIDX 0x70000025 ++#define DT_MIPS_LOCAL_GOTIDX 0x70000026 ++#define DT_MIPS_HIDDEN_GOTIDX 0x70000027 ++#define DT_MIPS_PROTECTED_GOTIDX 0x70000028 ++#define DT_MIPS_OPTIONS 0x70000029 /* Address of .options. */ ++#define DT_MIPS_INTERFACE 0x7000002a /* Address of .interface. */ ++#define DT_MIPS_DYNSTR_ALIGN 0x7000002b ++#define DT_MIPS_INTERFACE_SIZE 0x7000002c /* Size of the .interface section. */ ++#define DT_MIPS_RLD_TEXT_RESOLVE_ADDR 0x7000002d /* Address of rld_text_rsolve ++ function stored in GOT. */ ++#define DT_MIPS_PERF_SUFFIX 0x7000002e /* Default suffix of dso to be added ++ by rld on dlopen() calls. */ ++#define DT_MIPS_COMPACT_SIZE 0x7000002f /* (O32)Size of compact rel section. */ ++#define DT_MIPS_GP_VALUE 0x70000030 /* GP value for aux GOTs. */ ++#define DT_MIPS_AUX_DYNAMIC 0x70000031 /* Address of aux .dynamic. */ ++/* The address of .got.plt in an executable using the new non-PIC ABI. */ ++#define DT_MIPS_PLTGOT 0x70000032 ++/* The base of the PLT in an executable using the new non-PIC ABI if that ++ PLT is writable. For a non-writable PLT, this is omitted or has a zero ++ value. */ ++#define DT_MIPS_RWPLT 0x70000034 ++#define DT_MIPS_NUM 0x35 ++ ++/* Legal values for DT_MIPS_FLAGS Elf32_Dyn entry. */ ++ ++#define RHF_NONE 0 /* No flags */ ++#define RHF_QUICKSTART (1 << 0) /* Use quickstart */ ++#define RHF_NOTPOT (1 << 1) /* Hash size not power of 2 */ ++#define RHF_NO_LIBRARY_REPLACEMENT (1 << 2) /* Ignore LD_LIBRARY_PATH */ ++#define RHF_NO_MOVE (1 << 3) ++#define RHF_SGI_ONLY (1 << 4) ++#define RHF_GUARANTEE_INIT (1 << 5) ++#define RHF_DELTA_C_PLUS_PLUS (1 << 6) ++#define RHF_GUARANTEE_START_INIT (1 << 7) ++#define RHF_PIXIE (1 << 8) ++#define RHF_DEFAULT_DELAY_LOAD (1 << 9) ++#define RHF_REQUICKSTART (1 << 10) ++#define RHF_REQUICKSTARTED (1 << 11) ++#define RHF_CORD (1 << 12) ++#define RHF_NO_UNRES_UNDEF (1 << 13) ++#define RHF_RLD_ORDER_SAFE (1 << 14) ++ ++/* Entries found in sections of type SHT_MIPS_LIBLIST. */ ++ ++typedef struct ++{ ++ Elf32_Word l_name; /* Name (string table index) */ ++ Elf32_Word l_time_stamp; /* Timestamp */ ++ Elf32_Word l_checksum; /* Checksum */ ++ Elf32_Word l_version; /* Interface version */ ++ Elf32_Word l_flags; /* Flags */ ++} Elf32_Lib; ++ ++typedef struct ++{ ++ Elf64_Word l_name; /* Name (string table index) */ ++ Elf64_Word l_time_stamp; /* Timestamp */ ++ Elf64_Word l_checksum; /* Checksum */ ++ Elf64_Word l_version; /* Interface version */ ++ Elf64_Word l_flags; /* Flags */ ++} Elf64_Lib; ++ ++ ++/* Legal values for l_flags. */ ++ ++#define LL_NONE 0 ++#define LL_EXACT_MATCH (1 << 0) /* Require exact match */ ++#define LL_IGNORE_INT_VER (1 << 1) /* Ignore interface version */ ++#define LL_REQUIRE_MINOR (1 << 2) ++#define LL_EXPORTS (1 << 3) ++#define LL_DELAY_LOAD (1 << 4) ++#define LL_DELTA (1 << 5) ++ ++/* Entries found in sections of type SHT_MIPS_CONFLICT. */ ++ ++typedef Elf32_Addr Elf32_Conflict; ++ ++ ++/* HPPA specific definitions. */ ++ ++/* Legal values for e_flags field of Elf32_Ehdr. */ ++ ++#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */ ++#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */ ++#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */ ++#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */ ++#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch ++ prediction. */ ++#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */ ++#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */ ++ ++/* Defined values for `e_flags & EF_PARISC_ARCH' are: */ ++ ++#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */ ++#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */ ++#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */ ++ ++/* Additional section indeces. */ ++ ++#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared ++ symbols in ANSI C. */ ++#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */ ++ ++/* Legal values for sh_type field of Elf32_Shdr. */ ++ ++#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */ ++#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */ ++#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */ ++ ++/* Legal values for sh_flags field of Elf32_Shdr. */ ++ ++#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */ ++#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */ ++#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */ ++ ++/* Legal values for ST_TYPE subfield of st_info (symbol type). */ ++ ++#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */ ++ ++#define STT_HP_OPAQUE (STT_LOOS + 0x1) ++#define STT_HP_STUB (STT_LOOS + 0x2) ++ ++/* HPPA relocs. */ ++ ++#define R_PARISC_NONE 0 /* No reloc. */ ++#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */ ++#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */ ++#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */ ++#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */ ++#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */ ++#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */ ++#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */ ++#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */ ++#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */ ++#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */ ++#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */ ++#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */ ++#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */ ++#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */ ++#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */ ++#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */ ++#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */ ++#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */ ++#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */ ++#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */ ++#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */ ++#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */ ++#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */ ++#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */ ++#define R_PARISC_FPTR64 64 /* 64 bits function address. */ ++#define R_PARISC_PLABEL32 65 /* 32 bits function address. */ ++#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */ ++#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */ ++#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */ ++#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */ ++#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */ ++#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */ ++#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */ ++#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */ ++#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */ ++#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */ ++#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */ ++#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */ ++#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */ ++#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */ ++#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */ ++#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */ ++#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */ ++#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */ ++#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */ ++#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */ ++#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */ ++#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */ ++#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */ ++#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */ ++#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */ ++#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */ ++#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */ ++#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */ ++#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */ ++#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */ ++#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */ ++#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */ ++#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */ ++#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */ ++#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */ ++#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */ ++#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */ ++#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */ ++#define R_PARISC_LORESERVE 128 ++#define R_PARISC_COPY 128 /* Copy relocation. */ ++#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */ ++#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */ ++#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */ ++#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */ ++#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */ ++#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */ ++#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/ ++#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */ ++#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */ ++#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */ ++#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */ ++#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */ ++#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */ ++#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */ ++#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */ ++#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/ ++#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/ ++#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */ ++#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */ ++#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */ ++#define R_PARISC_HIRESERVE 255 ++ ++/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */ ++ ++#define PT_HP_TLS (PT_LOOS + 0x0) ++#define PT_HP_CORE_NONE (PT_LOOS + 0x1) ++#define PT_HP_CORE_VERSION (PT_LOOS + 0x2) ++#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3) ++#define PT_HP_CORE_COMM (PT_LOOS + 0x4) ++#define PT_HP_CORE_PROC (PT_LOOS + 0x5) ++#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6) ++#define PT_HP_CORE_STACK (PT_LOOS + 0x7) ++#define PT_HP_CORE_SHM (PT_LOOS + 0x8) ++#define PT_HP_CORE_MMF (PT_LOOS + 0x9) ++#define PT_HP_PARALLEL (PT_LOOS + 0x10) ++#define PT_HP_FASTBIND (PT_LOOS + 0x11) ++#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12) ++#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13) ++#define PT_HP_STACK (PT_LOOS + 0x14) ++ ++#define PT_PARISC_ARCHEXT 0x70000000 ++#define PT_PARISC_UNWIND 0x70000001 ++ ++/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */ ++ ++#define PF_PARISC_SBP 0x08000000 ++ ++#define PF_HP_PAGE_SIZE 0x00100000 ++#define PF_HP_FAR_SHARED 0x00200000 ++#define PF_HP_NEAR_SHARED 0x00400000 ++#define PF_HP_CODE 0x01000000 ++#define PF_HP_MODIFY 0x02000000 ++#define PF_HP_LAZYSWAP 0x04000000 ++#define PF_HP_SBP 0x08000000 ++ ++ ++/* Alpha specific definitions. */ ++ ++/* Legal values for e_flags field of Elf64_Ehdr. */ ++ ++#define EF_ALPHA_32BIT 1 /* All addresses must be < 2GB. */ ++#define EF_ALPHA_CANRELAX 2 /* Relocations for relaxing exist. */ ++ ++/* Legal values for sh_type field of Elf64_Shdr. */ ++ ++/* These two are primerily concerned with ECOFF debugging info. */ ++#define SHT_ALPHA_DEBUG 0x70000001 ++#define SHT_ALPHA_REGINFO 0x70000002 ++ ++/* Legal values for sh_flags field of Elf64_Shdr. */ ++ ++#define SHF_ALPHA_GPREL 0x10000000 ++ ++/* Legal values for st_other field of Elf64_Sym. */ ++#define STO_ALPHA_NOPV 0x80 /* No PV required. */ ++#define STO_ALPHA_STD_GPLOAD 0x88 /* PV only used for initial ldgp. */ ++ ++/* Alpha relocs. */ ++ ++#define R_ALPHA_NONE 0 /* No reloc */ ++#define R_ALPHA_REFLONG 1 /* Direct 32 bit */ ++#define R_ALPHA_REFQUAD 2 /* Direct 64 bit */ ++#define R_ALPHA_GPREL32 3 /* GP relative 32 bit */ ++#define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */ ++#define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */ ++#define R_ALPHA_GPDISP 6 /* Add displacement to GP */ ++#define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */ ++#define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */ ++#define R_ALPHA_SREL16 9 /* PC relative 16 bit */ ++#define R_ALPHA_SREL32 10 /* PC relative 32 bit */ ++#define R_ALPHA_SREL64 11 /* PC relative 64 bit */ ++#define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ ++#define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ ++#define R_ALPHA_GPREL16 19 /* GP relative 16 bit */ ++#define R_ALPHA_COPY 24 /* Copy symbol at runtime */ ++#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */ ++#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */ ++#define R_ALPHA_RELATIVE 27 /* Adjust by program base */ ++#define R_ALPHA_TLS_GD_HI 28 ++#define R_ALPHA_TLSGD 29 ++#define R_ALPHA_TLS_LDM 30 ++#define R_ALPHA_DTPMOD64 31 ++#define R_ALPHA_GOTDTPREL 32 ++#define R_ALPHA_DTPREL64 33 ++#define R_ALPHA_DTPRELHI 34 ++#define R_ALPHA_DTPRELLO 35 ++#define R_ALPHA_DTPREL16 36 ++#define R_ALPHA_GOTTPREL 37 ++#define R_ALPHA_TPREL64 38 ++#define R_ALPHA_TPRELHI 39 ++#define R_ALPHA_TPRELLO 40 ++#define R_ALPHA_TPREL16 41 ++/* Keep this the last entry. */ ++#define R_ALPHA_NUM 46 ++ ++/* Magic values of the LITUSE relocation addend. */ ++#define LITUSE_ALPHA_ADDR 0 ++#define LITUSE_ALPHA_BASE 1 ++#define LITUSE_ALPHA_BYTOFF 2 ++#define LITUSE_ALPHA_JSR 3 ++#define LITUSE_ALPHA_TLS_GD 4 ++#define LITUSE_ALPHA_TLS_LDM 5 ++ ++/* Legal values for d_tag of Elf64_Dyn. */ ++#define DT_ALPHA_PLTRO (DT_LOPROC + 0) ++#define DT_ALPHA_NUM 1 ++ ++/* PowerPC specific declarations */ ++ ++/* Values for Elf32/64_Ehdr.e_flags. */ ++#define EF_PPC_EMB 0x80000000 /* PowerPC embedded flag */ ++ ++/* Cygnus local bits below */ ++#define EF_PPC_RELOCATABLE 0x00010000 /* PowerPC -mrelocatable flag*/ ++#define EF_PPC_RELOCATABLE_LIB 0x00008000 /* PowerPC -mrelocatable-lib ++ flag */ ++ ++/* PowerPC relocations defined by the ABIs */ ++#define R_PPC_NONE 0 ++#define R_PPC_ADDR32 1 /* 32bit absolute address */ ++#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */ ++#define R_PPC_ADDR16 3 /* 16bit absolute address */ ++#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */ ++#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */ ++#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */ ++#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */ ++#define R_PPC_ADDR14_BRTAKEN 8 ++#define R_PPC_ADDR14_BRNTAKEN 9 ++#define R_PPC_REL24 10 /* PC relative 26 bit */ ++#define R_PPC_REL14 11 /* PC relative 16 bit */ ++#define R_PPC_REL14_BRTAKEN 12 ++#define R_PPC_REL14_BRNTAKEN 13 ++#define R_PPC_GOT16 14 ++#define R_PPC_GOT16_LO 15 ++#define R_PPC_GOT16_HI 16 ++#define R_PPC_GOT16_HA 17 ++#define R_PPC_PLTREL24 18 ++#define R_PPC_COPY 19 ++#define R_PPC_GLOB_DAT 20 ++#define R_PPC_JMP_SLOT 21 ++#define R_PPC_RELATIVE 22 ++#define R_PPC_LOCAL24PC 23 ++#define R_PPC_UADDR32 24 ++#define R_PPC_UADDR16 25 ++#define R_PPC_REL32 26 ++#define R_PPC_PLT32 27 ++#define R_PPC_PLTREL32 28 ++#define R_PPC_PLT16_LO 29 ++#define R_PPC_PLT16_HI 30 ++#define R_PPC_PLT16_HA 31 ++#define R_PPC_SDAREL16 32 ++#define R_PPC_SECTOFF 33 ++#define R_PPC_SECTOFF_LO 34 ++#define R_PPC_SECTOFF_HI 35 ++#define R_PPC_SECTOFF_HA 36 ++ ++/* PowerPC relocations defined for the TLS access ABI. */ ++#define R_PPC_TLS 67 /* none (sym+add)@tls */ ++#define R_PPC_DTPMOD32 68 /* word32 (sym+add)@dtpmod */ ++#define R_PPC_TPREL16 69 /* half16* (sym+add)@tprel */ ++#define R_PPC_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */ ++#define R_PPC_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */ ++#define R_PPC_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */ ++#define R_PPC_TPREL32 73 /* word32 (sym+add)@tprel */ ++#define R_PPC_DTPREL16 74 /* half16* (sym+add)@dtprel */ ++#define R_PPC_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */ ++#define R_PPC_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */ ++#define R_PPC_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */ ++#define R_PPC_DTPREL32 78 /* word32 (sym+add)@dtprel */ ++#define R_PPC_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */ ++#define R_PPC_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */ ++#define R_PPC_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */ ++#define R_PPC_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */ ++#define R_PPC_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */ ++#define R_PPC_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */ ++#define R_PPC_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */ ++#define R_PPC_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */ ++#define R_PPC_GOT_TPREL16 87 /* half16* (sym+add)@got@tprel */ ++#define R_PPC_GOT_TPREL16_LO 88 /* half16 (sym+add)@got@tprel@l */ ++#define R_PPC_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */ ++#define R_PPC_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */ ++#define R_PPC_GOT_DTPREL16 91 /* half16* (sym+add)@got@dtprel */ ++#define R_PPC_GOT_DTPREL16_LO 92 /* half16* (sym+add)@got@dtprel@l */ ++#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */ ++#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */ ++ ++/* Keep this the last entry. */ ++#define R_PPC_NUM 95 ++ ++/* The remaining relocs are from the Embedded ELF ABI, and are not ++ in the SVR4 ELF ABI. */ ++#define R_PPC_EMB_NADDR32 101 ++#define R_PPC_EMB_NADDR16 102 ++#define R_PPC_EMB_NADDR16_LO 103 ++#define R_PPC_EMB_NADDR16_HI 104 ++#define R_PPC_EMB_NADDR16_HA 105 ++#define R_PPC_EMB_SDAI16 106 ++#define R_PPC_EMB_SDA2I16 107 ++#define R_PPC_EMB_SDA2REL 108 ++#define R_PPC_EMB_SDA21 109 /* 16 bit offset in SDA */ ++#define R_PPC_EMB_MRKREF 110 ++#define R_PPC_EMB_RELSEC16 111 ++#define R_PPC_EMB_RELST_LO 112 ++#define R_PPC_EMB_RELST_HI 113 ++#define R_PPC_EMB_RELST_HA 114 ++#define R_PPC_EMB_BIT_FLD 115 ++#define R_PPC_EMB_RELSDA 116 /* 16 bit relative offset in SDA */ ++ ++/* Diab tool relocations. */ ++#define R_PPC_DIAB_SDA21_LO 180 /* like EMB_SDA21, but lower 16 bit */ ++#define R_PPC_DIAB_SDA21_HI 181 /* like EMB_SDA21, but high 16 bit */ ++#define R_PPC_DIAB_SDA21_HA 182 /* like EMB_SDA21, adjusted high 16 */ ++#define R_PPC_DIAB_RELSDA_LO 183 /* like EMB_RELSDA, but lower 16 bit */ ++#define R_PPC_DIAB_RELSDA_HI 184 /* like EMB_RELSDA, but high 16 bit */ ++#define R_PPC_DIAB_RELSDA_HA 185 /* like EMB_RELSDA, adjusted high 16 */ ++ ++/* GNU relocs used in PIC code sequences. */ ++#define R_PPC_REL16 249 /* word32 (sym+add-.) */ ++#define R_PPC_REL16_LO 250 /* half16 (sym+add-.)@l */ ++#define R_PPC_REL16_HI 251 /* half16 (sym+add-.)@h */ ++#define R_PPC_REL16_HA 252 /* half16 (sym+add-.)@ha */ ++ ++/* This is a phony reloc to handle any old fashioned TOC16 references ++ that may still be in object files. */ ++#define R_PPC_TOC16 255 ++ ++/* PowerPC specific values for the Dyn d_tag field. */ ++#define DT_PPC_GOT (DT_LOPROC + 0) ++#define DT_PPC_NUM 1 ++ ++/* PowerPC64 relocations defined by the ABIs */ ++#define R_PPC64_NONE R_PPC_NONE ++#define R_PPC64_ADDR32 R_PPC_ADDR32 /* 32bit absolute address */ ++#define R_PPC64_ADDR24 R_PPC_ADDR24 /* 26bit address, word aligned */ ++#define R_PPC64_ADDR16 R_PPC_ADDR16 /* 16bit absolute address */ ++#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO /* lower 16bits of address */ ++#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI /* high 16bits of address. */ ++#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA /* adjusted high 16bits. */ ++#define R_PPC64_ADDR14 R_PPC_ADDR14 /* 16bit address, word aligned */ ++#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN ++#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN ++#define R_PPC64_REL24 R_PPC_REL24 /* PC-rel. 26 bit, word aligned */ ++#define R_PPC64_REL14 R_PPC_REL14 /* PC relative 16 bit */ ++#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN ++#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN ++#define R_PPC64_GOT16 R_PPC_GOT16 ++#define R_PPC64_GOT16_LO R_PPC_GOT16_LO ++#define R_PPC64_GOT16_HI R_PPC_GOT16_HI ++#define R_PPC64_GOT16_HA R_PPC_GOT16_HA ++ ++#define R_PPC64_COPY R_PPC_COPY ++#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT ++#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT ++#define R_PPC64_RELATIVE R_PPC_RELATIVE ++ ++#define R_PPC64_UADDR32 R_PPC_UADDR32 ++#define R_PPC64_UADDR16 R_PPC_UADDR16 ++#define R_PPC64_REL32 R_PPC_REL32 ++#define R_PPC64_PLT32 R_PPC_PLT32 ++#define R_PPC64_PLTREL32 R_PPC_PLTREL32 ++#define R_PPC64_PLT16_LO R_PPC_PLT16_LO ++#define R_PPC64_PLT16_HI R_PPC_PLT16_HI ++#define R_PPC64_PLT16_HA R_PPC_PLT16_HA ++ ++#define R_PPC64_SECTOFF R_PPC_SECTOFF ++#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO ++#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI ++#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA ++#define R_PPC64_ADDR30 37 /* word30 (S + A - P) >> 2 */ ++#define R_PPC64_ADDR64 38 /* doubleword64 S + A */ ++#define R_PPC64_ADDR16_HIGHER 39 /* half16 #higher(S + A) */ ++#define R_PPC64_ADDR16_HIGHERA 40 /* half16 #highera(S + A) */ ++#define R_PPC64_ADDR16_HIGHEST 41 /* half16 #highest(S + A) */ ++#define R_PPC64_ADDR16_HIGHESTA 42 /* half16 #highesta(S + A) */ ++#define R_PPC64_UADDR64 43 /* doubleword64 S + A */ ++#define R_PPC64_REL64 44 /* doubleword64 S + A - P */ ++#define R_PPC64_PLT64 45 /* doubleword64 L + A */ ++#define R_PPC64_PLTREL64 46 /* doubleword64 L + A - P */ ++#define R_PPC64_TOC16 47 /* half16* S + A - .TOC */ ++#define R_PPC64_TOC16_LO 48 /* half16 #lo(S + A - .TOC.) */ ++#define R_PPC64_TOC16_HI 49 /* half16 #hi(S + A - .TOC.) */ ++#define R_PPC64_TOC16_HA 50 /* half16 #ha(S + A - .TOC.) */ ++#define R_PPC64_TOC 51 /* doubleword64 .TOC */ ++#define R_PPC64_PLTGOT16 52 /* half16* M + A */ ++#define R_PPC64_PLTGOT16_LO 53 /* half16 #lo(M + A) */ ++#define R_PPC64_PLTGOT16_HI 54 /* half16 #hi(M + A) */ ++#define R_PPC64_PLTGOT16_HA 55 /* half16 #ha(M + A) */ ++ ++#define R_PPC64_ADDR16_DS 56 /* half16ds* (S + A) >> 2 */ ++#define R_PPC64_ADDR16_LO_DS 57 /* half16ds #lo(S + A) >> 2 */ ++#define R_PPC64_GOT16_DS 58 /* half16ds* (G + A) >> 2 */ ++#define R_PPC64_GOT16_LO_DS 59 /* half16ds #lo(G + A) >> 2 */ ++#define R_PPC64_PLT16_LO_DS 60 /* half16ds #lo(L + A) >> 2 */ ++#define R_PPC64_SECTOFF_DS 61 /* half16ds* (R + A) >> 2 */ ++#define R_PPC64_SECTOFF_LO_DS 62 /* half16ds #lo(R + A) >> 2 */ ++#define R_PPC64_TOC16_DS 63 /* half16ds* (S + A - .TOC.) >> 2 */ ++#define R_PPC64_TOC16_LO_DS 64 /* half16ds #lo(S + A - .TOC.) >> 2 */ ++#define R_PPC64_PLTGOT16_DS 65 /* half16ds* (M + A) >> 2 */ ++#define R_PPC64_PLTGOT16_LO_DS 66 /* half16ds #lo(M + A) >> 2 */ ++ ++/* PowerPC64 relocations defined for the TLS access ABI. */ ++#define R_PPC64_TLS 67 /* none (sym+add)@tls */ ++#define R_PPC64_DTPMOD64 68 /* doubleword64 (sym+add)@dtpmod */ ++#define R_PPC64_TPREL16 69 /* half16* (sym+add)@tprel */ ++#define R_PPC64_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */ ++#define R_PPC64_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */ ++#define R_PPC64_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */ ++#define R_PPC64_TPREL64 73 /* doubleword64 (sym+add)@tprel */ ++#define R_PPC64_DTPREL16 74 /* half16* (sym+add)@dtprel */ ++#define R_PPC64_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */ ++#define R_PPC64_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */ ++#define R_PPC64_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */ ++#define R_PPC64_DTPREL64 78 /* doubleword64 (sym+add)@dtprel */ ++#define R_PPC64_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */ ++#define R_PPC64_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */ ++#define R_PPC64_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */ ++#define R_PPC64_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */ ++#define R_PPC64_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */ ++#define R_PPC64_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */ ++#define R_PPC64_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */ ++#define R_PPC64_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */ ++#define R_PPC64_GOT_TPREL16_DS 87 /* half16ds* (sym+add)@got@tprel */ ++#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */ ++#define R_PPC64_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */ ++#define R_PPC64_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */ ++#define R_PPC64_GOT_DTPREL16_DS 91 /* half16ds* (sym+add)@got@dtprel */ ++#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */ ++#define R_PPC64_GOT_DTPREL16_HI 93 /* half16 (sym+add)@got@dtprel@h */ ++#define R_PPC64_GOT_DTPREL16_HA 94 /* half16 (sym+add)@got@dtprel@ha */ ++#define R_PPC64_TPREL16_DS 95 /* half16ds* (sym+add)@tprel */ ++#define R_PPC64_TPREL16_LO_DS 96 /* half16ds (sym+add)@tprel@l */ ++#define R_PPC64_TPREL16_HIGHER 97 /* half16 (sym+add)@tprel@higher */ ++#define R_PPC64_TPREL16_HIGHERA 98 /* half16 (sym+add)@tprel@highera */ ++#define R_PPC64_TPREL16_HIGHEST 99 /* half16 (sym+add)@tprel@highest */ ++#define R_PPC64_TPREL16_HIGHESTA 100 /* half16 (sym+add)@tprel@highesta */ ++#define R_PPC64_DTPREL16_DS 101 /* half16ds* (sym+add)@dtprel */ ++#define R_PPC64_DTPREL16_LO_DS 102 /* half16ds (sym+add)@dtprel@l */ ++#define R_PPC64_DTPREL16_HIGHER 103 /* half16 (sym+add)@dtprel@higher */ ++#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */ ++#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */ ++#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */ ++ ++/* Keep this the last entry. */ ++#define R_PPC64_NUM 107 ++ ++/* PowerPC64 specific values for the Dyn d_tag field. */ ++#define DT_PPC64_GLINK (DT_LOPROC + 0) ++#define DT_PPC64_OPD (DT_LOPROC + 1) ++#define DT_PPC64_OPDSZ (DT_LOPROC + 2) ++#define DT_PPC64_NUM 3 ++ ++ ++/* ARM specific declarations */ ++ ++/* Processor specific flags for the ELF header e_flags field. */ ++#define EF_ARM_RELEXEC 0x01 ++#define EF_ARM_HASENTRY 0x02 ++#define EF_ARM_INTERWORK 0x04 ++#define EF_ARM_APCS_26 0x08 ++#define EF_ARM_APCS_FLOAT 0x10 ++#define EF_ARM_PIC 0x20 ++#define EF_ARM_ALIGN8 0x40 /* 8-bit structure alignment is in use */ ++#define EF_ARM_NEW_ABI 0x80 ++#define EF_ARM_OLD_ABI 0x100 ++ ++/* Other constants defined in the ARM ELF spec. version B-01. */ ++/* NB. These conflict with values defined above. */ ++#define EF_ARM_SYMSARESORTED 0x04 ++#define EF_ARM_DYNSYMSUSESEGIDX 0x08 ++#define EF_ARM_MAPSYMSFIRST 0x10 ++#define EF_ARM_EABIMASK 0XFF000000 ++ ++#define EF_ARM_EABI_VERSION(flags) ((flags) & EF_ARM_EABIMASK) ++#define EF_ARM_EABI_UNKNOWN 0x00000000 ++#define EF_ARM_EABI_VER1 0x01000000 ++#define EF_ARM_EABI_VER2 0x02000000 ++ ++/* Additional symbol types for Thumb */ ++#define STT_ARM_TFUNC 0xd ++ ++/* ARM-specific values for sh_flags */ ++#define SHF_ARM_ENTRYSECT 0x10000000 /* Section contains an entry point */ ++#define SHF_ARM_COMDEF 0x80000000 /* Section may be multiply defined ++ in the input to a link step */ ++ ++/* ARM-specific program header flags */ ++#define PF_ARM_SB 0x10000000 /* Segment contains the location ++ addressed by the static base */ ++ ++/* Processor specific values for the Phdr p_type field. */ ++#define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */ ++ ++/* ARM relocs. */ ++ ++#define R_ARM_NONE 0 /* No reloc */ ++#define R_ARM_PC24 1 /* PC relative 26 bit branch */ ++#define R_ARM_ABS32 2 /* Direct 32 bit */ ++#define R_ARM_REL32 3 /* PC relative 32 bit */ ++#define R_ARM_PC13 4 ++#define R_ARM_ABS16 5 /* Direct 16 bit */ ++#define R_ARM_ABS12 6 /* Direct 12 bit */ ++#define R_ARM_THM_ABS5 7 ++#define R_ARM_ABS8 8 /* Direct 8 bit */ ++#define R_ARM_SBREL32 9 ++#define R_ARM_THM_PC22 10 ++#define R_ARM_THM_PC8 11 ++#define R_ARM_AMP_VCALL9 12 ++#define R_ARM_SWI24 13 ++#define R_ARM_THM_SWI8 14 ++#define R_ARM_XPC25 15 ++#define R_ARM_THM_XPC22 16 ++#define R_ARM_TLS_DTPMOD32 17 ++#define R_ARM_TLS_DTPOFF32 18 ++#define R_ARM_TLS_TPOFF32 19 ++#define R_ARM_COPY 20 /* Copy symbol at runtime */ ++#define R_ARM_GLOB_DAT 21 /* Create GOT entry */ ++#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */ ++#define R_ARM_RELATIVE 23 /* Adjust by program base */ ++#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */ ++#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */ ++#define R_ARM_GOT32 26 /* 32 bit GOT entry */ ++#define R_ARM_PLT32 27 /* 32 bit PLT address */ ++#define R_ARM_ALU_PCREL_7_0 32 ++#define R_ARM_ALU_PCREL_15_8 33 ++#define R_ARM_ALU_PCREL_23_15 34 ++#define R_ARM_LDR_SBREL_11_0 35 ++#define R_ARM_ALU_SBREL_19_12 36 ++#define R_ARM_ALU_SBREL_27_20 37 ++#define R_ARM_GNU_VTENTRY 100 ++#define R_ARM_GNU_VTINHERIT 101 ++#define R_ARM_THM_PC11 102 /* thumb unconditional branch */ ++#define R_ARM_THM_PC9 103 /* thumb conditional branch */ ++#define R_ARM_TLS_GD32 104 ++#define R_ARM_TLS_LDM32 105 ++#define R_ARM_TLS_LDO32 106 ++#define R_ARM_TLS_IE32 107 ++#define R_ARM_TLS_LE32 108 ++#define R_ARM_TLS_LDO12 109 ++#define R_ARM_TLS_LE12 110 ++#define R_ARM_TLS_IE12GP 111 ++#define R_ARM_RXPC25 249 ++#define R_ARM_RSBREL32 250 ++#define R_ARM_THM_RPC22 251 ++#define R_ARM_RREL32 252 ++#define R_ARM_RABS22 253 ++#define R_ARM_RPC24 254 ++#define R_ARM_RBASE 255 ++/* Keep this the last entry. */ ++#define R_ARM_NUM 256 ++ ++/* IA-64 specific declarations. */ ++ ++/* Processor specific flags for the Ehdr e_flags field. */ ++#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */ ++#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */ ++#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */ ++ ++/* Processor specific values for the Phdr p_type field. */ ++#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */ ++#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */ ++#define PT_IA_64_HP_OPT_ANOT (PT_LOOS + 0x12) ++#define PT_IA_64_HP_HSL_ANOT (PT_LOOS + 0x13) ++#define PT_IA_64_HP_STACK (PT_LOOS + 0x14) ++ ++/* Processor specific flags for the Phdr p_flags field. */ ++#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */ ++ ++/* Processor specific values for the Shdr sh_type field. */ ++#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */ ++#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */ ++ ++/* Processor specific flags for the Shdr sh_flags field. */ ++#define SHF_IA_64_SHORT 0x10000000 /* section near gp */ ++#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */ ++ ++/* Processor specific values for the Dyn d_tag field. */ ++#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0) ++#define DT_IA_64_NUM 1 ++ ++/* IA-64 relocations. */ ++#define R_IA64_NONE 0x00 /* none */ ++#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */ ++#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */ ++#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */ ++#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */ ++#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */ ++#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */ ++#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */ ++#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */ ++#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */ ++#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */ ++#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */ ++#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */ ++#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */ ++#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */ ++#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */ ++#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */ ++#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */ ++#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */ ++#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */ ++#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */ ++#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */ ++#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */ ++#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */ ++#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */ ++#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */ ++#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */ ++#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */ ++#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */ ++#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */ ++#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */ ++#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */ ++#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */ ++#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */ ++#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */ ++#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */ ++#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */ ++#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */ ++#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */ ++#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */ ++#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */ ++#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */ ++#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */ ++#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */ ++#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */ ++#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */ ++#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */ ++#define R_IA64_REL32MSB 0x6c /* data 4 + REL */ ++#define R_IA64_REL32LSB 0x6d /* data 4 + REL */ ++#define R_IA64_REL64MSB 0x6e /* data 8 + REL */ ++#define R_IA64_REL64LSB 0x6f /* data 8 + REL */ ++#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */ ++#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */ ++#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */ ++#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */ ++#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */ ++#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */ ++#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */ ++#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */ ++#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */ ++#define R_IA64_COPY 0x84 /* copy relocation */ ++#define R_IA64_SUB 0x85 /* Addend and symbol difference */ ++#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */ ++#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */ ++#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */ ++#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */ ++#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */ ++#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */ ++#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */ ++#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */ ++#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */ ++#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */ ++#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */ ++#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */ ++#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */ ++#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */ ++#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */ ++#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */ ++#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */ ++#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */ ++#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */ ++ ++/* SH specific declarations */ ++ ++/* SH specific values for `st_other'. */ ++ ++/* If set, this is a symbol pointing to SHmedia code, which will be branched ++ to, so need to add 1 to the symbol value. */ ++#define STO_SH5_ISA32 (1 << 2) ++ ++/* SH relocs. */ ++#define R_SH_NONE 0 ++#define R_SH_DIR32 1 ++#define R_SH_REL32 2 ++#define R_SH_DIR8WPN 3 ++#define R_SH_IND12W 4 ++#define R_SH_DIR8WPL 5 ++#define R_SH_DIR8WPZ 6 ++#define R_SH_DIR8BP 7 ++#define R_SH_DIR8W 8 ++#define R_SH_DIR8L 9 ++#define R_SH_SWITCH16 25 ++#define R_SH_SWITCH32 26 ++#define R_SH_USES 27 ++#define R_SH_COUNT 28 ++#define R_SH_ALIGN 29 ++#define R_SH_CODE 30 ++#define R_SH_DATA 31 ++#define R_SH_LABEL 32 ++#define R_SH_SWITCH8 33 ++#define R_SH_GNU_VTINHERIT 34 ++#define R_SH_GNU_VTENTRY 35 ++#define R_SH_TLS_GD_32 144 ++#define R_SH_TLS_LD_32 145 ++#define R_SH_TLS_LDO_32 146 ++#define R_SH_TLS_IE_32 147 ++#define R_SH_TLS_LE_32 148 ++#define R_SH_TLS_DTPMOD32 149 ++#define R_SH_TLS_DTPOFF32 150 ++#define R_SH_TLS_TPOFF32 151 ++#define R_SH_GOT32 160 ++#define R_SH_PLT32 161 ++#define R_SH_COPY 162 ++#define R_SH_GLOB_DAT 163 ++#define R_SH_JMP_SLOT 164 ++#define R_SH_RELATIVE 165 ++#define R_SH_GOTOFF 166 ++#define R_SH_GOTPC 167 ++#define R_SH_RELATIVE_LOW16 197 ++#define R_SH_RELATIVE_MEDLOW16 198 ++#define R_SH_IMM_LOW16 246 ++#define R_SH_IMM_LOW16_PCREL 247 ++#define R_SH_IMM_MEDLOW16 248 ++#define R_SH_IMM_MEDLOW16_PCREL 249 ++ ++/* Keep this the last entry. */ ++#define R_SH_NUM 256 ++ ++/* Additional s390 relocs */ ++ ++#define R_390_NONE 0 /* No reloc. */ ++#define R_390_8 1 /* Direct 8 bit. */ ++#define R_390_12 2 /* Direct 12 bit. */ ++#define R_390_16 3 /* Direct 16 bit. */ ++#define R_390_32 4 /* Direct 32 bit. */ ++#define R_390_PC32 5 /* PC relative 32 bit. */ ++#define R_390_GOT12 6 /* 12 bit GOT offset. */ ++#define R_390_GOT32 7 /* 32 bit GOT offset. */ ++#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */ ++#define R_390_COPY 9 /* Copy symbol at runtime. */ ++#define R_390_GLOB_DAT 10 /* Create GOT entry. */ ++#define R_390_JMP_SLOT 11 /* Create PLT entry. */ ++#define R_390_RELATIVE 12 /* Adjust by program base. */ ++#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */ ++#define R_390_GOTPC 14 /* 32 bit PC relative offset to GOT. */ ++#define R_390_GOT16 15 /* 16 bit GOT offset. */ ++#define R_390_PC16 16 /* PC relative 16 bit. */ ++#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */ ++#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */ ++#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */ ++#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */ ++#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */ ++#define R_390_64 22 /* Direct 64 bit. */ ++#define R_390_PC64 23 /* PC relative 64 bit. */ ++#define R_390_GOT64 24 /* 64 bit GOT offset. */ ++#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */ ++#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */ ++#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */ ++#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */ ++#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */ ++#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */ ++#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */ ++#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */ ++#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */ ++#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */ ++#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */ ++#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */ ++#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */ ++#define R_390_TLS_GDCALL 38 /* Tag for function call in general ++ dynamic TLS code. */ ++#define R_390_TLS_LDCALL 39 /* Tag for function call in local ++ dynamic TLS code. */ ++#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic ++ thread local data. */ ++#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic ++ thread local data. */ ++#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS ++ block offset. */ ++#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS ++ block offset. */ ++#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS ++ block offset. */ ++#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic ++ thread local data in LE code. */ ++#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic ++ thread local data in LE code. */ ++#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for ++ negated static TLS block offset. */ ++#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for ++ negated static TLS block offset. */ ++#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for ++ negated static TLS block offset. */ ++#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to ++ static TLS block. */ ++#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to ++ static TLS block. */ ++#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS ++ block. */ ++#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS ++ block. */ ++#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */ ++#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ ++#define R_390_TLS_TPOFF 56 /* Negated offset in static TLS ++ block. */ ++#define R_390_20 57 /* Direct 20 bit. */ ++#define R_390_GOT20 58 /* 20 bit GOT offset. */ ++#define R_390_GOTPLT20 59 /* 20 bit offset to jump slot. */ ++#define R_390_TLS_GOTIE20 60 /* 20 bit GOT offset for static TLS ++ block offset. */ ++/* Keep this the last entry. */ ++#define R_390_NUM 61 ++ ++ ++/* CRIS flags. */ ++#define EF_CRIS_VARIANT_MASK 0x0000000e ++#define EF_CRIS_VARIANT_ANY_V0_V10 0x00000000 ++#define EF_CRIS_VARIANT_V32 0x00000002 ++#define EF_CRIS_VARIANT_COMMON_V10_V32 0x00000004 ++ ++/* CRIS relocations. */ ++#define R_CRIS_NONE 0 ++#define R_CRIS_8 1 ++#define R_CRIS_16 2 ++#define R_CRIS_32 3 ++#define R_CRIS_8_PCREL 4 ++#define R_CRIS_16_PCREL 5 ++#define R_CRIS_32_PCREL 6 ++#define R_CRIS_GNU_VTINHERIT 7 ++#define R_CRIS_GNU_VTENTRY 8 ++#define R_CRIS_COPY 9 ++#define R_CRIS_GLOB_DAT 10 ++#define R_CRIS_JUMP_SLOT 11 ++#define R_CRIS_RELATIVE 12 ++#define R_CRIS_16_GOT 13 ++#define R_CRIS_32_GOT 14 ++#define R_CRIS_16_GOTPLT 15 ++#define R_CRIS_32_GOTPLT 16 ++#define R_CRIS_32_GOTREL 17 ++#define R_CRIS_32_PLT_GOTREL 18 ++#define R_CRIS_32_PLT_PCREL 19 ++ ++/* Keep this the last entry. */ ++#define R_CRIS_NUM 20 ++ ++ ++/* AMD x86-64 relocations. */ ++#define R_X86_64_NONE 0 /* No reloc */ ++#define R_X86_64_64 1 /* Direct 64 bit */ ++#define R_X86_64_PC32 2 /* PC relative 32 bit signed */ ++#define R_X86_64_GOT32 3 /* 32 bit GOT entry */ ++#define R_X86_64_PLT32 4 /* 32 bit PLT address */ ++#define R_X86_64_COPY 5 /* Copy symbol at runtime */ ++#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ ++#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ ++#define R_X86_64_RELATIVE 8 /* Adjust by program base */ ++#define R_X86_64_GOTPCREL 9 /* 32 bit signed PC relative ++ offset to GOT */ ++#define R_X86_64_32 10 /* Direct 32 bit zero extended */ ++#define R_X86_64_32S 11 /* Direct 32 bit sign extended */ ++#define R_X86_64_16 12 /* Direct 16 bit zero extended */ ++#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ ++#define R_X86_64_8 14 /* Direct 8 bit sign extended */ ++#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ ++#define R_X86_64_DTPMOD64 16 /* ID of module containing symbol */ ++#define R_X86_64_DTPOFF64 17 /* Offset in module's TLS block */ ++#define R_X86_64_TPOFF64 18 /* Offset in initial TLS block */ ++#define R_X86_64_TLSGD 19 /* 32 bit signed PC relative offset ++ to two GOT entries for GD symbol */ ++#define R_X86_64_TLSLD 20 /* 32 bit signed PC relative offset ++ to two GOT entries for LD symbol */ ++#define R_X86_64_DTPOFF32 21 /* Offset in TLS block */ ++#define R_X86_64_GOTTPOFF 22 /* 32 bit signed PC relative offset ++ to GOT entry for IE symbol */ ++#define R_X86_64_TPOFF32 23 /* Offset in initial TLS block */ ++ ++#define R_X86_64_NUM 24 ++ ++ ++/* AM33 relocations. */ ++#define R_MN10300_NONE 0 /* No reloc. */ ++#define R_MN10300_32 1 /* Direct 32 bit. */ ++#define R_MN10300_16 2 /* Direct 16 bit. */ ++#define R_MN10300_8 3 /* Direct 8 bit. */ ++#define R_MN10300_PCREL32 4 /* PC-relative 32-bit. */ ++#define R_MN10300_PCREL16 5 /* PC-relative 16-bit signed. */ ++#define R_MN10300_PCREL8 6 /* PC-relative 8-bit signed. */ ++#define R_MN10300_GNU_VTINHERIT 7 /* Ancient C++ vtable garbage... */ ++#define R_MN10300_GNU_VTENTRY 8 /* ... collection annotation. */ ++#define R_MN10300_24 9 /* Direct 24 bit. */ ++#define R_MN10300_GOTPC32 10 /* 32-bit PCrel offset to GOT. */ ++#define R_MN10300_GOTPC16 11 /* 16-bit PCrel offset to GOT. */ ++#define R_MN10300_GOTOFF32 12 /* 32-bit offset from GOT. */ ++#define R_MN10300_GOTOFF24 13 /* 24-bit offset from GOT. */ ++#define R_MN10300_GOTOFF16 14 /* 16-bit offset from GOT. */ ++#define R_MN10300_PLT32 15 /* 32-bit PCrel to PLT entry. */ ++#define R_MN10300_PLT16 16 /* 16-bit PCrel to PLT entry. */ ++#define R_MN10300_GOT32 17 /* 32-bit offset to GOT entry. */ ++#define R_MN10300_GOT24 18 /* 24-bit offset to GOT entry. */ ++#define R_MN10300_GOT16 19 /* 16-bit offset to GOT entry. */ ++#define R_MN10300_COPY 20 /* Copy symbol at runtime. */ ++#define R_MN10300_GLOB_DAT 21 /* Create GOT entry. */ ++#define R_MN10300_JMP_SLOT 22 /* Create PLT entry. */ ++#define R_MN10300_RELATIVE 23 /* Adjust by program base. */ ++ ++#define R_MN10300_NUM 24 ++ ++ ++/* M32R relocs. */ ++#define R_M32R_NONE 0 /* No reloc. */ ++#define R_M32R_16 1 /* Direct 16 bit. */ ++#define R_M32R_32 2 /* Direct 32 bit. */ ++#define R_M32R_24 3 /* Direct 24 bit. */ ++#define R_M32R_10_PCREL 4 /* PC relative 10 bit shifted. */ ++#define R_M32R_18_PCREL 5 /* PC relative 18 bit shifted. */ ++#define R_M32R_26_PCREL 6 /* PC relative 26 bit shifted. */ ++#define R_M32R_HI16_ULO 7 /* High 16 bit with unsigned low. */ ++#define R_M32R_HI16_SLO 8 /* High 16 bit with signed low. */ ++#define R_M32R_LO16 9 /* Low 16 bit. */ ++#define R_M32R_SDA16 10 /* 16 bit offset in SDA. */ ++#define R_M32R_GNU_VTINHERIT 11 ++#define R_M32R_GNU_VTENTRY 12 ++/* M32R relocs use SHT_RELA. */ ++#define R_M32R_16_RELA 33 /* Direct 16 bit. */ ++#define R_M32R_32_RELA 34 /* Direct 32 bit. */ ++#define R_M32R_24_RELA 35 /* Direct 24 bit. */ ++#define R_M32R_10_PCREL_RELA 36 /* PC relative 10 bit shifted. */ ++#define R_M32R_18_PCREL_RELA 37 /* PC relative 18 bit shifted. */ ++#define R_M32R_26_PCREL_RELA 38 /* PC relative 26 bit shifted. */ ++#define R_M32R_HI16_ULO_RELA 39 /* High 16 bit with unsigned low */ ++#define R_M32R_HI16_SLO_RELA 40 /* High 16 bit with signed low */ ++#define R_M32R_LO16_RELA 41 /* Low 16 bit */ ++#define R_M32R_SDA16_RELA 42 /* 16 bit offset in SDA */ ++#define R_M32R_RELA_GNU_VTINHERIT 43 ++#define R_M32R_RELA_GNU_VTENTRY 44 ++ ++#define R_M32R_GOT24 48 /* 24 bit GOT entry */ ++#define R_M32R_26_PLTREL 49 /* 26 bit PC relative to PLT shifted */ ++#define R_M32R_COPY 50 /* Copy symbol at runtime */ ++#define R_M32R_GLOB_DAT 51 /* Create GOT entry */ ++#define R_M32R_JMP_SLOT 52 /* Create PLT entry */ ++#define R_M32R_RELATIVE 53 /* Adjust by program base */ ++#define R_M32R_GOTOFF 54 /* 24 bit offset to GOT */ ++#define R_M32R_GOTPC24 55 /* 24 bit PC relative offset to GOT */ ++#define R_M32R_GOT16_HI_ULO 56 /* High 16 bit GOT entry with unsigned ++ low */ ++#define R_M32R_GOT16_HI_SLO 57 /* High 16 bit GOT entry with signed ++ low */ ++#define R_M32R_GOT16_LO 58 /* Low 16 bit GOT entry */ ++#define R_M32R_GOTPC_HI_ULO 59 /* High 16 bit PC relative offset to ++ GOT with unsigned low */ ++#define R_M32R_GOTPC_HI_SLO 60 /* High 16 bit PC relative offset to ++ GOT with signed low */ ++#define R_M32R_GOTPC_LO 61 /* Low 16 bit PC relative offset to ++ GOT */ ++#define R_M32R_GOTOFF_HI_ULO 62 /* High 16 bit offset to GOT ++ with unsigned low */ ++#define R_M32R_GOTOFF_HI_SLO 63 /* High 16 bit offset to GOT ++ with signed low */ ++#define R_M32R_GOTOFF_LO 64 /* Low 16 bit offset to GOT */ ++#define R_M32R_NUM 256 /* Keep this the last entry. */ ++ ++/* i960 Relocations */ ++#define R_960_NONE 0 ++#define R_960_12 1 ++#define R_960_32 2 ++#define R_960_IP24 3 ++#define R_960_SUB 4 ++#define R_960_OPTCALL 5 ++#define R_960_OPTCALLX 6 ++#define R_960_OPTCALLXA 7 ++/* Keep this the last entry. */ ++#define R_960_NUM 8 ++ ++ ++/* v850 relocations. */ ++#define R_V850_NONE 0 ++#define R_V850_9_PCREL 1 ++#define R_V850_22_PCREL 2 ++#define R_V850_HI16_S 3 ++#define R_V850_HI16 4 ++#define R_V850_LO16 5 ++#define R_V850_32 6 ++#define R_V850_16 7 ++#define R_V850_8 8 ++#define R_V850_SDA_16_16_OFFSET 9 /* For ld.b, st.b, set1, clr1, ++ not1, tst1, movea, movhi */ ++#define R_V850_SDA_15_16_OFFSET 10 /* For ld.w, ld.h, ld.hu, st.w, st.h */ ++#define R_V850_ZDA_16_16_OFFSET 11 /* For ld.b, st.b, set1, clr1, ++ not1, tst1, movea, movhi */ ++#define R_V850_ZDA_15_16_OFFSET 12 /* For ld.w, ld.h, ld.hu, st.w, st.h */ ++#define R_V850_TDA_6_8_OFFSET 13 /* For sst.w, sld.w */ ++#define R_V850_TDA_7_8_OFFSET 14 /* For sst.h, sld.h */ ++#define R_V850_TDA_7_7_OFFSET 15 /* For sst.b, sld.b */ ++#define R_V850_TDA_16_16_OFFSET 16 /* For set1, clr1, not1, tst1, ++ movea, movhi */ ++/* CYGNUS LOCAL v850e */ ++#define R_V850_TDA_4_5_OFFSET 17 /* For sld.hu */ ++#define R_V850_TDA_4_4_OFFSET 18 /* For sld.bu */ ++#define R_V850_SDA_16_16_SPLIT_OFFSET 19 /* For ld.bu */ ++#define R_V850_ZDA_16_16_SPLIT_OFFSET 20 /* For ld.bu */ ++#define R_V850_CALLT_6_7_OFFSET 21 /* For callt */ ++#define R_V850_CALLT_16_16_OFFSET 22 /* For callt */ ++/* END CYGNUS LOCAL */ ++#define R_V850_GNU_VTINHERIT 23 ++#define R_V850_GNU_VTENTRY 24 ++/* Keep this the last entry. */ ++#define R_V850_NUM 25 ++ ++/* Atmel AVR32 relocations. */ ++#define R_AVR32_NONE 0 ++#define R_AVR32_32 1 ++#define R_AVR32_16 2 ++#define R_AVR32_8 3 ++#define R_AVR32_32_PCREL 4 ++#define R_AVR32_16_PCREL 5 ++#define R_AVR32_8_PCREL 6 ++#define R_AVR32_DIFF32 7 ++#define R_AVR32_DIFF16 8 ++#define R_AVR32_DIFF8 9 ++#define R_AVR32_GOT32 10 ++#define R_AVR32_GOT16 11 ++#define R_AVR32_GOT8 12 ++#define R_AVR32_21S 13 ++#define R_AVR32_16U 14 ++#define R_AVR32_16S 15 ++#define R_AVR32_8S 16 ++#define R_AVR32_8S_EXT 17 ++#define R_AVR32_22H_PCREL 18 ++#define R_AVR32_18W_PCREL 19 ++#define R_AVR32_16B_PCREL 20 ++#define R_AVR32_16N_PCREL 21 ++#define R_AVR32_14UW_PCREL 22 ++#define R_AVR32_11H_PCREL 23 ++#define R_AVR32_10UW_PCREL 24 ++#define R_AVR32_9H_PCREL 25 ++#define R_AVR32_9UW_PCREL 26 ++#define R_AVR32_HI16 27 ++#define R_AVR32_LO16 28 ++#define R_AVR32_GOTPC 29 ++#define R_AVR32_GOTCALL 30 ++#define R_AVR32_LDA_GOT 31 ++#define R_AVR32_GOT21S 32 ++#define R_AVR32_GOT18SW 33 ++#define R_AVR32_GOT16S 34 ++#define R_AVR32_GOT7UW 35 ++#define R_AVR32_32_CPENT 36 ++#define R_AVR32_CPCALL 37 ++#define R_AVR32_16_CP 38 ++#define R_AVR32_9W_CP 39 ++#define R_AVR32_RELATIVE 40 ++#define R_AVR32_GLOB_DAT 41 ++#define R_AVR32_JMP_SLOT 42 ++#define R_AVR32_ALIGN 43 ++#define R_AVR32_NUM 44 ++ ++/* AVR32 dynamic tags */ ++#define DT_AVR32_GOTSZ 0x70000001 /* Total size of GOT in bytes */ ++ ++/* Renesas H8/300 Relocations */ ++#define R_H8_NONE 0 ++#define R_H8_DIR32 1 ++#define R_H8_DIR32_28 2 ++#define R_H8_DIR32_24 3 ++#define R_H8_DIR32_16 4 ++#define R_H8_DIR32U 6 ++#define R_H8_DIR32U_28 7 ++#define R_H8_DIR32U_24 8 ++#define R_H8_DIR32U_20 9 ++#define R_H8_DIR32U_16 10 ++#define R_H8_DIR24 11 ++#define R_H8_DIR24_20 12 ++#define R_H8_DIR24_16 13 ++#define R_H8_DIR24U 14 ++#define R_H8_DIR24U_20 15 ++#define R_H8_DIR24U_16 16 ++#define R_H8_DIR16 17 ++#define R_H8_DIR16U 18 ++#define R_H8_DIR16S_32 19 ++#define R_H8_DIR16S_28 20 ++#define R_H8_DIR16S_24 21 ++#define R_H8_DIR16S_20 22 ++#define R_H8_DIR16S 23 ++#define R_H8_DIR8 24 ++#define R_H8_DIR8U 25 ++#define R_H8_DIR8Z_32 26 ++#define R_H8_DIR8Z_28 27 ++#define R_H8_DIR8Z_24 28 ++#define R_H8_DIR8Z_20 29 ++#define R_H8_DIR8Z_16 30 ++#define R_H8_PCREL16 31 ++#define R_H8_PCREL8 32 ++#define R_H8_BPOS 33 ++#define R_H8_PCREL32 34 ++#define R_H8_GOT32O 35 ++#define R_H8_GOT16O 36 ++#define R_H8_DIR16A8 59 ++#define R_H8_DIR16R8 60 ++#define R_H8_DIR24A8 61 ++#define R_H8_DIR24R8 62 ++#define R_H8_DIR32A16 63 ++#define R_H8_ABS32 65 ++#define R_H8_ABS32A16 127 ++#define R_H8_NUM 128 ++ ++/* NIOS relocations. */ ++#define R_NIOS_NONE 0 ++#define R_NIOS_32 1 /* A 32 bit absolute relocation.*/ ++#define R_NIOS_LO16_LO5 2 /* A LO-16 5 bit absolute relocation. */ ++#define R_NIOS_LO16_HI11 3 /* A LO-16 top 11 bit absolute relocation. */ ++#define R_NIOS_HI16_LO5 4 /* A HI-16 5 bit absolute relocation. */ ++#define R_NIOS_HI16_HI11 5 /* A HI-16 top 11 bit absolute relocation. */ ++#define R_NIOS_PCREL6 6 /* A 6 bit relative relocation. */ ++#define R_NIOS_PCREL8 7 /* An 8 bit relative relocation. */ ++#define R_NIOS_PCREL11 8 /* An 11 bit relative relocation. */ ++#define R_NIOS_16 9 /* A 16 bit absolute relocation. */ ++#define R_NIOS_H_LO5 10 /* Low 5-bits of absolute relocation in halfwords. */ ++#define R_NIOS_H_HI11 11 /* Top 11 bits of 16-bit absolute relocation in halfwords. */ ++#define R_NIOS_H_XLO5 12 /* Low 5 bits of top 16-bits of 32-bit absolute relocation in halfwords. */ ++#define R_NIOS_H_XHI11 13 /* Top 11 bits of top 16-bits of 32-bit absolute relocation in halfwords. */ ++#define R_NIOS_H_16 14 /* Half-word @h value */ ++#define R_NIOS_H_32 15 /* Word @h value */ ++#define R_NIOS_GNU_VTINHERIT 200 /* GNU extension to record C++ vtable hierarchy */ ++#define R_NIOS_GNU_VTENTRY 201 /* GNU extension to record C++ vtable member usage */ ++/* Keep this the last entry. */ ++#define R_NIOS_NUM 202 ++ ++/* NIOS II relocations */ ++#define R_NIOS2_NONE 0 ++#define R_NIOS2_S16 1 ++#define R_NIOS2_U16 2 ++#define R_NIOS2_PCREL16 3 ++#define R_NIOS2_CALL26 4 ++#define R_NIOS2_IMM5 5 ++#define R_NIOS2_CACHE_OPX 6 ++#define R_NIOS2_IMM6 7 ++#define R_NIOS2_IMM8 8 ++#define R_NIOS2_HI16 9 ++#define R_NIOS2_LO16 10 ++#define R_NIOS2_HIADJ16 11 ++#define R_NIOS2_BFD_RELOC_32 12 ++#define R_NIOS2_BFD_RELOC_16 13 ++#define R_NIOS2_BFD_RELOC_8 14 ++#define R_NIOS2_GPREL 15 ++#define R_NIOS2_GNU_VTINHERIT 16 ++#define R_NIOS2_GNU_VTENTRY 17 ++#define R_NIOS2_UJMP 18 ++#define R_NIOS2_CJMP 19 ++#define R_NIOS2_CALLR 20 ++#define R_NIOS2_ALIGN 21 ++/* Keep this the last entry. */ ++#define R_NIOS2_NUM 22 ++ ++/* Xtensa-specific declarations */ ++ ++/* Xtensa values for the Dyn d_tag field. */ ++#define DT_XTENSA_GOT_LOC_OFF (DT_LOPROC + 0) ++#define DT_XTENSA_GOT_LOC_SZ (DT_LOPROC + 1) ++#define DT_XTENSA_NUM 2 ++ ++/* Xtensa relocations. */ ++#define R_XTENSA_NONE 0 ++#define R_XTENSA_32 1 ++#define R_XTENSA_RTLD 2 ++#define R_XTENSA_GLOB_DAT 3 ++#define R_XTENSA_JMP_SLOT 4 ++#define R_XTENSA_RELATIVE 5 ++#define R_XTENSA_PLT 6 ++#define R_XTENSA_OP0 8 ++#define R_XTENSA_OP1 9 ++#define R_XTENSA_OP2 10 ++#define R_XTENSA_ASM_EXPAND 11 ++#define R_XTENSA_ASM_SIMPLIFY 12 ++#define R_XTENSA_GNU_VTINHERIT 15 ++#define R_XTENSA_GNU_VTENTRY 16 ++#define R_XTENSA_DIFF8 17 ++#define R_XTENSA_DIFF16 18 ++#define R_XTENSA_DIFF32 19 ++#define R_XTENSA_SLOT0_OP 20 ++#define R_XTENSA_SLOT1_OP 21 ++#define R_XTENSA_SLOT2_OP 22 ++#define R_XTENSA_SLOT3_OP 23 ++#define R_XTENSA_SLOT4_OP 24 ++#define R_XTENSA_SLOT5_OP 25 ++#define R_XTENSA_SLOT6_OP 26 ++#define R_XTENSA_SLOT7_OP 27 ++#define R_XTENSA_SLOT8_OP 28 ++#define R_XTENSA_SLOT9_OP 29 ++#define R_XTENSA_SLOT10_OP 30 ++#define R_XTENSA_SLOT11_OP 31 ++#define R_XTENSA_SLOT12_OP 32 ++#define R_XTENSA_SLOT13_OP 33 ++#define R_XTENSA_SLOT14_OP 34 ++#define R_XTENSA_SLOT0_ALT 35 ++#define R_XTENSA_SLOT1_ALT 36 ++#define R_XTENSA_SLOT2_ALT 37 ++#define R_XTENSA_SLOT3_ALT 38 ++#define R_XTENSA_SLOT4_ALT 39 ++#define R_XTENSA_SLOT5_ALT 40 ++#define R_XTENSA_SLOT6_ALT 41 ++#define R_XTENSA_SLOT7_ALT 42 ++#define R_XTENSA_SLOT8_ALT 43 ++#define R_XTENSA_SLOT9_ALT 44 ++#define R_XTENSA_SLOT10_ALT 45 ++#define R_XTENSA_SLOT11_ALT 46 ++#define R_XTENSA_SLOT12_ALT 47 ++#define R_XTENSA_SLOT13_ALT 48 ++#define R_XTENSA_SLOT14_ALT 49 ++/* Keep this the last entry. */ ++#define R_XTENSA_NUM 50 ++ ++/* C6X specific relocs */ ++#define R_C6000_NONE 0 ++#define R_C6000_ABS32 1 ++#define R_C6000_ABS16 2 ++#define R_C6000_ABS8 3 ++#define R_C6000_PCR_S21 4 ++#define R_C6000_PCR_S12 5 ++#define R_C6000_PCR_S10 6 ++#define R_C6000_PCR_S7 7 ++#define R_C6000_ABS_S16 8 ++#define R_C6000_ABS_L16 9 ++#define R_C6000_ABS_H16 10 ++#define R_C6000_SBR_U15_B 11 ++#define R_C6000_SBR_U15_H 12 ++#define R_C6000_SBR_U15_W 13 ++#define R_C6000_SBR_S16 14 ++#define R_C6000_SBR_L16_B 15 ++#define R_C6000_SBR_L16_H 16 ++#define R_C6000_SBR_L16_W 17 ++#define R_C6000_SBR_H16_B 18 ++#define R_C6000_SBR_H16_H 19 ++#define R_C6000_SBR_H16_W 20 ++#define R_C6000_SBR_GOT_U15_W 21 ++#define R_C6000_SBR_GOT_L16_W 22 ++#define R_C6000_SBR_GOT_H16_W 23 ++#define R_C6000_DSBT_INDEX 24 ++#define R_C6000_PREL31 25 ++#define R_C6000_COPY 26 ++#define R_C6000_JUMP_SLOT 27 ++#define R_C6000_SBR_GOT32 28 ++#define R_C6000_PCR_H16 29 ++#define R_C6000_PCR_L16 30 ++#define R_C6000_ALIGN 253 ++#define R_C6000_FPHEAD 254 ++#define R_C6000_NOCMP 255 ++ ++/* C6x specific values for the Dyn d_tag field. */ ++#define DT_C6000_DSBT_BASE (DT_LOPROC + 0) ++#define DT_C6000_DSBT_SIZE (DT_LOPROC + 1) ++#define DT_C6000_PREEMPTMAP (DT_LOPROC + 2) ++#define DT_C6000_DSBT_INDEX (DT_LOPROC + 3) ++ ++#define DT_C6000_NUM 4 + + static void die(char *fmt, ...); + diff --git a/target/linux/patches/3.4.113/sparc-aout.patch b/target/linux/patches/3.4.113/sparc-aout.patch new file mode 100644 index 000000000..5cd74c2e7 --- /dev/null +++ b/target/linux/patches/3.4.113/sparc-aout.patch @@ -0,0 +1,24 @@ +diff -Nur linux-3.4.103.orig/arch/sparc/boot/Makefile linux-3.4.103/arch/sparc/boot/Makefile +--- linux-3.4.103.orig/arch/sparc/boot/Makefile 2014-08-13 20:07:43.000000000 -0500 ++++ linux-3.4.103/arch/sparc/boot/Makefile 2015-02-16 00:26:16.934618259 -0600 +@@ -74,9 +74,6 @@ + $(call if_changed,gzip) + @echo ' kernel: $@ is ready' + +-$(obj)/vmlinux.aout: vmlinux FORCE +- $(call if_changed,elftoaout) +- @echo ' kernel: $@ is ready' + else + + # The following lines make a readable image for U-Boot. +@@ -107,6 +104,10 @@ + + endif + ++$(obj)/vmlinux.aout: $(obj)/image FORCE ++ $(call if_changed,elftoaout) ++ @echo ' kernel: $@ is ready' ++ + $(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback System.map $(ROOT_IMG) FORCE + $(call if_changed,elftoaout) + $(call if_changed,piggy) diff --git a/target/linux/patches/3.4.113/sparc-include.patch b/target/linux/patches/3.4.113/sparc-include.patch new file mode 100644 index 000000000..2f8ffd061 --- /dev/null +++ b/target/linux/patches/3.4.113/sparc-include.patch @@ -0,0 +1,11 @@ +diff -Nur linux-2.6.39-rc7.orig/arch/sparc/boot/btfixupprep.c linux-2.6.39-rc7/arch/sparc/boot/btfixupprep.c +--- linux-2.6.39-rc7.orig/arch/sparc/boot/btfixupprep.c 2011-05-10 04:33:54.000000000 +0200 ++++ linux-2.6.39-rc7/arch/sparc/boot/btfixupprep.c 2011-05-21 13:34:40.000000000 +0200 +@@ -25,7 +25,6 @@ + #include + #include + #include +-#include + + #define MAXSYMS 1024 + diff --git a/target/linux/patches/3.4.113/startup.patch b/target/linux/patches/3.4.113/startup.patch new file mode 100644 index 000000000..c26430bcb --- /dev/null +++ b/target/linux/patches/3.4.113/startup.patch @@ -0,0 +1,34 @@ +diff -Nur linux-3.4.82.orig/init/initramfs.c linux-3.4.82/init/initramfs.c +--- linux-3.4.82.orig/init/initramfs.c 2014-02-22 19:33:35.000000000 +0100 ++++ linux-3.4.82/init/initramfs.c 2014-03-15 18:46:22.674928245 +0100 +@@ -606,6 +606,9 @@ + free_initrd(); + #endif + } ++#ifdef CONFIG_DEVTMPFS_MOUNT ++ devtmpfs_mount("dev"); ++#endif + return 0; + } + rootfs_initcall(populate_rootfs); +diff -Nur linux-3.4.82.orig/init/main.c linux-3.4.82/init/main.c +--- linux-3.4.82.orig/init/main.c 2014-02-22 19:33:35.000000000 +0100 ++++ linux-3.4.82/init/main.c 2014-03-15 18:46:22.674928245 +0100 +@@ -873,6 +873,8 @@ + if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) + printk(KERN_WARNING "Warning: unable to open an initial console.\n"); + ++ printk(KERN_WARNING "Starting Linux (built with OpenADK).\n"); ++ + (void) sys_dup(0); + (void) sys_dup(0); + /* +@@ -881,7 +883,7 @@ + */ + + if (!ramdisk_execute_command) +- ramdisk_execute_command = "/init"; ++ ramdisk_execute_command = "/sbin/init"; + + if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { + ramdisk_execute_command = NULL; diff --git a/target/linux/patches/3.4.113/usb-defaults-off.patch b/target/linux/patches/3.4.113/usb-defaults-off.patch new file mode 100644 index 000000000..31367108a --- /dev/null +++ b/target/linux/patches/3.4.113/usb-defaults-off.patch @@ -0,0 +1,32 @@ +diff -Nur linux-2.6.37.orig//drivers/usb/core/Kconfig linux-2.6.37/drivers/usb/core/Kconfig +--- linux-2.6.37.orig//drivers/usb/core/Kconfig 2011-01-05 01:50:19.000000000 +0100 ++++ linux-2.6.37/drivers/usb/core/Kconfig 2011-04-12 19:04:23.000000000 +0200 +@@ -59,7 +59,7 @@ + config USB_DEVICE_CLASS + bool "USB device class-devices (DEPRECATED)" + depends on USB +- default y ++ default n + ---help--- + Userspace access to USB devices is granted by device-nodes exported + directly from the usbdev in sysfs. Old versions of the driver +diff -Nur linux-2.6.37.orig//drivers/usb/host/Kconfig linux-2.6.37/drivers/usb/host/Kconfig +--- linux-2.6.37.orig//drivers/usb/host/Kconfig 2011-01-05 01:50:19.000000000 +0100 ++++ linux-2.6.37/drivers/usb/host/Kconfig 2011-04-12 19:04:48.000000000 +0200 +@@ -62,6 +62,7 @@ + config USB_EHCI_ROOT_HUB_TT + bool "Root Hub Transaction Translators" + depends on USB_EHCI_HCD ++ default n + ---help--- + Some EHCI chips have vendor-specific extensions to integrate + transaction translators, so that no OHCI or UHCI companion +@@ -74,7 +75,7 @@ + config USB_EHCI_TT_NEWSCHED + bool "Improved Transaction Translator scheduling" + depends on USB_EHCI_HCD +- default y ++ default n + ---help--- + This changes the periodic scheduling code to fill more of the low + and full speed bandwidth available from the Transaction Translator diff --git a/target/linux/patches/3.4.113/vga-cons-default-off.patch b/target/linux/patches/3.4.113/vga-cons-default-off.patch new file mode 100644 index 000000000..178aeeeb9 --- /dev/null +++ b/target/linux/patches/3.4.113/vga-cons-default-off.patch @@ -0,0 +1,12 @@ +diff -Nur linux-2.6.37.orig//drivers/video/console/Kconfig linux-2.6.37/drivers/video/console/Kconfig +--- linux-2.6.37.orig//drivers/video/console/Kconfig 2011-01-05 01:50:19.000000000 +0100 ++++ linux-2.6.37/drivers/video/console/Kconfig 2011-04-12 16:29:34.000000000 +0200 +@@ -7,7 +7,7 @@ + config VGA_CONSOLE + bool "VGA text console" if EMBEDDED || !X86 + depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) +- default y ++ default n + help + Saying Y here will allow you to use Linux in text mode through a + display that complies with the generic VGA standard. Virtually diff --git a/target/linux/patches/3.4.113/wlan-cf.patch b/target/linux/patches/3.4.113/wlan-cf.patch new file mode 100644 index 000000000..fc20759e2 --- /dev/null +++ b/target/linux/patches/3.4.113/wlan-cf.patch @@ -0,0 +1,11 @@ +diff -Nur linux-2.6.39.orig/drivers/net/wireless/hostap/hostap_cs.c linux-2.6.39/drivers/net/wireless/hostap/hostap_cs.c +--- linux-2.6.39.orig/drivers/net/wireless/hostap/hostap_cs.c 2011-05-19 06:06:34.000000000 +0200 ++++ linux-2.6.39/drivers/net/wireless/hostap/hostap_cs.c 2011-09-12 02:46:26.987984145 +0200 +@@ -623,6 +623,7 @@ + static struct pcmcia_device_id hostap_cs_ids[] = { + PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), + PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), ++ PCMCIA_DEVICE_MANF_CARD(0x0004, 0x2003), + PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), + PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), + PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), diff --git a/target/linux/patches/3.4.113/yaffs2.patch b/target/linux/patches/3.4.113/yaffs2.patch new file mode 100644 index 000000000..44c95915f --- /dev/null +++ b/target/linux/patches/3.4.113/yaffs2.patch @@ -0,0 +1,16550 @@ +diff -Nur linux-3.4.90.orig/fs/Kconfig linux-3.4.90/fs/Kconfig +--- linux-3.4.90.orig/fs/Kconfig 2014-05-13 14:11:45.000000000 +0200 ++++ linux-3.4.90/fs/Kconfig 2014-05-17 15:08:09.000000000 +0200 +@@ -203,6 +203,7 @@ + source "fs/befs/Kconfig" + source "fs/bfs/Kconfig" + source "fs/efs/Kconfig" ++source "fs/yaffs2/Kconfig" + source "fs/jffs2/Kconfig" + # UBIFS File system configuration + source "fs/ubifs/Kconfig" +diff -Nur linux-3.4.90.orig/fs/Makefile linux-3.4.90/fs/Makefile +--- linux-3.4.90.orig/fs/Makefile 2014-05-13 14:11:45.000000000 +0200 ++++ linux-3.4.90/fs/Makefile 2014-05-17 15:09:05.000000000 +0200 +@@ -97,6 +97,7 @@ + obj-$(CONFIG_UFS_FS) += ufs/ + obj-$(CONFIG_EFS_FS) += efs/ + obj-$(CONFIG_JFFS2_FS) += jffs2/ ++obj-$(CONFIG_YAFFS_FS) += yaffs2/ + obj-$(CONFIG_LOGFS) += logfs/ + obj-$(CONFIG_UBIFS_FS) += ubifs/ + obj-$(CONFIG_AFFS_FS) += affs/ +diff -Nur linux-3.4.90.orig/fs/yaffs2/Kconfig linux-3.4.90/fs/yaffs2/Kconfig +--- linux-3.4.90.orig/fs/yaffs2/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/Kconfig 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,171 @@ ++# ++# yaffs file system configurations ++# ++ ++config YAFFS_FS ++ tristate "yaffs2 file system support" ++ default n ++ depends on MTD_BLOCK ++ select YAFFS_YAFFS1 ++ select YAFFS_YAFFS2 ++ help ++ yaffs2, or Yet Another Flash File System, is a file system ++ optimised for NAND Flash chips. ++ ++ To compile the yaffs2 file system support as a module, choose M ++ here: the module will be called yaffs2. ++ ++ If unsure, say N. ++ ++ Further information on yaffs2 is available at ++ . ++ ++config YAFFS_YAFFS1 ++ bool "512 byte / page devices" ++ depends on YAFFS_FS ++ default y ++ help ++ Enable yaffs1 support -- yaffs for 512 byte / page devices ++ ++ Not needed for 2K-page devices. ++ ++ If unsure, say Y. ++ ++config YAFFS_9BYTE_TAGS ++ bool "Use older-style on-NAND data format with pageStatus byte" ++ depends on YAFFS_YAFFS1 ++ default n ++ help ++ ++ Older-style on-NAND data format has a "pageStatus" byte to record ++ chunk/page state. This byte is zero when the page is discarded. ++ Choose this option if you have existing on-NAND data using this ++ format that you need to continue to support. New data written ++ also uses the older-style format. Note: Use of this option ++ generally requires that MTD's oob layout be adjusted to use the ++ older-style format. See notes on tags formats and MTD versions ++ in yaffs_mtdif1.c. ++ ++ If unsure, say N. ++ ++config YAFFS_DOES_ECC ++ bool "Lets yaffs do its own ECC" ++ depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS ++ default n ++ help ++ This enables yaffs to use its own ECC functions instead of using ++ the ones from the generic MTD-NAND driver. ++ ++ If unsure, say N. ++ ++config YAFFS_ECC_WRONG_ORDER ++ bool "Use the same ecc byte order as Steven Hill's nand_ecc.c" ++ depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS ++ default n ++ help ++ This makes yaffs_ecc.c use the same ecc byte order as Steven ++ Hill's nand_ecc.c. If not set, then you get the same ecc byte ++ order as SmartMedia. ++ ++ If unsure, say N. ++ ++config YAFFS_YAFFS2 ++ bool "2048 byte (or larger) / page devices" ++ depends on YAFFS_FS ++ default y ++ help ++ Enable yaffs2 support -- yaffs for >= 2K bytes per page devices ++ ++ If unsure, say Y. ++ ++config YAFFS_AUTO_YAFFS2 ++ bool "Autoselect yaffs2 format" ++ depends on YAFFS_YAFFS2 ++ default y ++ help ++ Without this, you need to explicitely use yaffs2 as the file ++ system type. With this, you can say "yaffs" and yaffs or yaffs2 ++ will be used depending on the device page size (yaffs on ++ 512-byte page devices, yaffs2 on 2K page devices). ++ ++ If unsure, say Y. ++ ++config YAFFS_DISABLE_TAGS_ECC ++ bool "Disable yaffs from doing ECC on tags by default" ++ depends on YAFFS_FS && YAFFS_YAFFS2 ++ default n ++ help ++ This defaults yaffs to using its own ECC calculations on tags instead of ++ just relying on the MTD. ++ This behavior can also be overridden with tags_ecc_on and ++ tags_ecc_off mount options. ++ ++ If unsure, say N. ++ ++config YAFFS_ALWAYS_CHECK_CHUNK_ERASED ++ bool "Force chunk erase check" ++ depends on YAFFS_FS ++ default n ++ help ++ Normally yaffs only checks chunks before writing until an erased ++ chunk is found. This helps to detect any partially written ++ chunks that might have happened due to power loss. ++ ++ Enabling this forces on the test that chunks are erased in flash ++ before writing to them. This takes more time but is potentially ++ a bit more secure. ++ ++ Suggest setting Y during development and ironing out driver ++ issues etc. Suggest setting to N if you want faster writing. ++ ++ If unsure, say Y. ++ ++config YAFFS_EMPTY_LOST_AND_FOUND ++ bool "Empty lost and found on boot" ++ depends on YAFFS_FS ++ default n ++ help ++ If this is enabled then the contents of lost and found is ++ automatically dumped at mount. ++ ++ If unsure, say N. ++ ++config YAFFS_DISABLE_BLOCK_REFRESHING ++ bool "Disable yaffs2 block refreshing" ++ depends on YAFFS_FS ++ default n ++ help ++ If this is set, then block refreshing is disabled. ++ Block refreshing infrequently refreshes the oldest block in ++ a yaffs2 file system. This mechanism helps to refresh flash to ++ mitigate against data loss. This is particularly useful for MLC. ++ ++ If unsure, say N. ++ ++config YAFFS_DISABLE_BACKGROUND ++ bool "Disable yaffs2 background processing" ++ depends on YAFFS_FS ++ default n ++ help ++ If this is set, then background processing is disabled. ++ Background processing makes many foreground activities faster. ++ ++ If unsure, say N. ++ ++config YAFFS_DISABLE_BAD_BLOCK_MARKING ++ bool "Disable yaffs2 bad block marking" ++ depends on YAFFS_FS ++ default n ++ help ++ Useful during early flash bring up to prevent problems causing ++ lots of bad block marking. ++ ++ If unsure, say N. ++ ++config YAFFS_XATTR ++ bool "Enable yaffs2 xattr support" ++ depends on YAFFS_FS ++ default y ++ help ++ If this is set then yaffs2 will provide xattr support. ++ If unsure, say Y. +diff -Nur linux-3.4.90.orig/fs/yaffs2/Makefile linux-3.4.90/fs/yaffs2/Makefile +--- linux-3.4.90.orig/fs/yaffs2/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/Makefile 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,18 @@ ++# ++# Makefile for the linux YAFFS filesystem routines. ++# ++ ++obj-$(CONFIG_YAFFS_FS) += yaffs.o ++ ++yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o ++yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o ++yaffs-y += yaffs_tagscompat.o yaffs_tagsmarshall.o ++yaffs-y += yaffs_mtdif.o ++yaffs-y += yaffs_nameval.o yaffs_attribs.o ++yaffs-y += yaffs_allocator.o ++yaffs-y += yaffs_yaffs1.o ++yaffs-y += yaffs_yaffs2.o ++yaffs-y += yaffs_bitmap.o ++yaffs-y += yaffs_summary.o ++yaffs-y += yaffs_verify.o ++ +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_allocator.c linux-3.4.90/fs/yaffs2/yaffs_allocator.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_allocator.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_allocator.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,357 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yaffs_allocator.h" ++#include "yaffs_guts.h" ++#include "yaffs_trace.h" ++#include "yportenv.h" ++ ++/* ++ * Each entry in yaffs_tnode_list and yaffs_obj_list hold blocks ++ * of approx 100 objects that are themn allocated singly. ++ * This is basically a simplified slab allocator. ++ * ++ * We don't use the Linux slab allocator because slab does not allow ++ * us to dump all the objects in one hit when we do a umount and tear ++ * down all the tnodes and objects. slab requires that we first free ++ * the individual objects. ++ * ++ * Once yaffs has been mainlined I shall try to motivate for a change ++ * to slab to provide the extra features we need here. ++ */ ++ ++struct yaffs_tnode_list { ++ struct yaffs_tnode_list *next; ++ struct yaffs_tnode *tnodes; ++}; ++ ++struct yaffs_obj_list { ++ struct yaffs_obj_list *next; ++ struct yaffs_obj *objects; ++}; ++ ++struct yaffs_allocator { ++ int n_tnodes_created; ++ struct yaffs_tnode *free_tnodes; ++ int n_free_tnodes; ++ struct yaffs_tnode_list *alloc_tnode_list; ++ ++ int n_obj_created; ++ struct list_head free_objs; ++ int n_free_objects; ++ ++ struct yaffs_obj_list *allocated_obj_list; ++}; ++ ++static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev) ++{ ++ struct yaffs_allocator *allocator = ++ (struct yaffs_allocator *)dev->allocator; ++ struct yaffs_tnode_list *tmp; ++ ++ if (!allocator) { ++ BUG(); ++ return; ++ } ++ ++ while (allocator->alloc_tnode_list) { ++ tmp = allocator->alloc_tnode_list->next; ++ ++ kfree(allocator->alloc_tnode_list->tnodes); ++ kfree(allocator->alloc_tnode_list); ++ allocator->alloc_tnode_list = tmp; ++ } ++ ++ allocator->free_tnodes = NULL; ++ allocator->n_free_tnodes = 0; ++ allocator->n_tnodes_created = 0; ++} ++ ++static void yaffs_init_raw_tnodes(struct yaffs_dev *dev) ++{ ++ struct yaffs_allocator *allocator = dev->allocator; ++ ++ if (!allocator) { ++ BUG(); ++ return; ++ } ++ ++ allocator->alloc_tnode_list = NULL; ++ allocator->free_tnodes = NULL; ++ allocator->n_free_tnodes = 0; ++ allocator->n_tnodes_created = 0; ++} ++ ++static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes) ++{ ++ struct yaffs_allocator *allocator = ++ (struct yaffs_allocator *)dev->allocator; ++ int i; ++ struct yaffs_tnode *new_tnodes; ++ u8 *mem; ++ struct yaffs_tnode *curr; ++ struct yaffs_tnode *next; ++ struct yaffs_tnode_list *tnl; ++ ++ if (!allocator) { ++ BUG(); ++ return YAFFS_FAIL; ++ } ++ ++ if (n_tnodes < 1) ++ return YAFFS_OK; ++ ++ /* make these things */ ++ new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS); ++ mem = (u8 *) new_tnodes; ++ ++ if (!new_tnodes) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "yaffs: Could not allocate Tnodes"); ++ return YAFFS_FAIL; ++ } ++ ++ /* New hookup for wide tnodes */ ++ for (i = 0; i < n_tnodes - 1; i++) { ++ curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size]; ++ next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size]; ++ curr->internal[0] = next; ++ } ++ ++ curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size]; ++ curr->internal[0] = allocator->free_tnodes; ++ allocator->free_tnodes = (struct yaffs_tnode *)mem; ++ ++ allocator->n_free_tnodes += n_tnodes; ++ allocator->n_tnodes_created += n_tnodes; ++ ++ /* Now add this bunch of tnodes to a list for freeing up. ++ * NB If we can't add this to the management list it isn't fatal ++ * but it just means we can't free this bunch of tnodes later. ++ */ ++ tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS); ++ if (!tnl) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "Could not add tnodes to management list"); ++ return YAFFS_FAIL; ++ } else { ++ tnl->tnodes = new_tnodes; ++ tnl->next = allocator->alloc_tnode_list; ++ allocator->alloc_tnode_list = tnl; ++ } ++ ++ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Tnodes added"); ++ ++ return YAFFS_OK; ++} ++ ++struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev) ++{ ++ struct yaffs_allocator *allocator = ++ (struct yaffs_allocator *)dev->allocator; ++ struct yaffs_tnode *tn = NULL; ++ ++ if (!allocator) { ++ BUG(); ++ return NULL; ++ } ++ ++ /* If there are none left make more */ ++ if (!allocator->free_tnodes) ++ yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES); ++ ++ if (allocator->free_tnodes) { ++ tn = allocator->free_tnodes; ++ allocator->free_tnodes = allocator->free_tnodes->internal[0]; ++ allocator->n_free_tnodes--; ++ } ++ ++ return tn; ++} ++ ++/* FreeTnode frees up a tnode and puts it back on the free list */ ++void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn) ++{ ++ struct yaffs_allocator *allocator = dev->allocator; ++ ++ if (!allocator) { ++ BUG(); ++ return; ++ } ++ ++ if (tn) { ++ tn->internal[0] = allocator->free_tnodes; ++ allocator->free_tnodes = tn; ++ allocator->n_free_tnodes++; ++ } ++ dev->checkpoint_blocks_required = 0; /* force recalculation */ ++} ++ ++/*--------------- yaffs_obj alloaction ------------------------ ++ * ++ * Free yaffs_objs are stored in a list using obj->siblings. ++ * The blocks of allocated objects are stored in a linked list. ++ */ ++ ++static void yaffs_init_raw_objs(struct yaffs_dev *dev) ++{ ++ struct yaffs_allocator *allocator = dev->allocator; ++ ++ if (!allocator) { ++ BUG(); ++ return; ++ } ++ ++ allocator->allocated_obj_list = NULL; ++ INIT_LIST_HEAD(&allocator->free_objs); ++ allocator->n_free_objects = 0; ++} ++ ++static void yaffs_deinit_raw_objs(struct yaffs_dev *dev) ++{ ++ struct yaffs_allocator *allocator = dev->allocator; ++ struct yaffs_obj_list *tmp; ++ ++ if (!allocator) { ++ BUG(); ++ return; ++ } ++ ++ while (allocator->allocated_obj_list) { ++ tmp = allocator->allocated_obj_list->next; ++ kfree(allocator->allocated_obj_list->objects); ++ kfree(allocator->allocated_obj_list); ++ allocator->allocated_obj_list = tmp; ++ } ++ ++ INIT_LIST_HEAD(&allocator->free_objs); ++ allocator->n_free_objects = 0; ++ allocator->n_obj_created = 0; ++} ++ ++static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj) ++{ ++ struct yaffs_allocator *allocator = dev->allocator; ++ int i; ++ struct yaffs_obj *new_objs; ++ struct yaffs_obj_list *list; ++ ++ if (!allocator) { ++ BUG(); ++ return YAFFS_FAIL; ++ } ++ ++ if (n_obj < 1) ++ return YAFFS_OK; ++ ++ /* make these things */ ++ new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS); ++ list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS); ++ ++ if (!new_objs || !list) { ++ kfree(new_objs); ++ new_objs = NULL; ++ kfree(list); ++ list = NULL; ++ yaffs_trace(YAFFS_TRACE_ALLOCATE, ++ "Could not allocate more objects"); ++ return YAFFS_FAIL; ++ } ++ ++ /* Hook them into the free list */ ++ for (i = 0; i < n_obj; i++) ++ list_add(&new_objs[i].siblings, &allocator->free_objs); ++ ++ allocator->n_free_objects += n_obj; ++ allocator->n_obj_created += n_obj; ++ ++ /* Now add this bunch of Objects to a list for freeing up. */ ++ ++ list->objects = new_objs; ++ list->next = allocator->allocated_obj_list; ++ allocator->allocated_obj_list = list; ++ ++ return YAFFS_OK; ++} ++ ++struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev) ++{ ++ struct yaffs_obj *obj = NULL; ++ struct list_head *lh; ++ struct yaffs_allocator *allocator = dev->allocator; ++ ++ if (!allocator) { ++ BUG(); ++ return obj; ++ } ++ ++ /* If there are none left make more */ ++ if (list_empty(&allocator->free_objs)) ++ yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS); ++ ++ if (!list_empty(&allocator->free_objs)) { ++ lh = allocator->free_objs.next; ++ obj = list_entry(lh, struct yaffs_obj, siblings); ++ list_del_init(lh); ++ allocator->n_free_objects--; ++ } ++ ++ return obj; ++} ++ ++void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj) ++{ ++ ++ struct yaffs_allocator *allocator = dev->allocator; ++ ++ if (!allocator) { ++ BUG(); ++ return; ++ } ++ ++ /* Link into the free list. */ ++ list_add(&obj->siblings, &allocator->free_objs); ++ allocator->n_free_objects++; ++} ++ ++void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev) ++{ ++ ++ if (!dev->allocator) { ++ BUG(); ++ return; ++ } ++ ++ yaffs_deinit_raw_tnodes(dev); ++ yaffs_deinit_raw_objs(dev); ++ kfree(dev->allocator); ++ dev->allocator = NULL; ++} ++ ++void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev) ++{ ++ struct yaffs_allocator *allocator; ++ ++ if (dev->allocator) { ++ BUG(); ++ return; ++ } ++ ++ allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS); ++ if (allocator) { ++ dev->allocator = allocator; ++ yaffs_init_raw_tnodes(dev); ++ yaffs_init_raw_objs(dev); ++ } ++} ++ +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_allocator.h linux-3.4.90/fs/yaffs2/yaffs_allocator.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_allocator.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_allocator.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,30 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_ALLOCATOR_H__ ++#define __YAFFS_ALLOCATOR_H__ ++ ++#include "yaffs_guts.h" ++ ++void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev); ++void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev); ++ ++struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev); ++void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn); ++ ++struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev); ++void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj); ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_attribs.c linux-3.4.90/fs/yaffs2/yaffs_attribs.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_attribs.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_attribs.c 2014-05-17 16:37:59.000000000 +0200 +@@ -0,0 +1,166 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yaffs_guts.h" ++#include "yaffs_attribs.h" ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) ++static inline uid_t ia_uid_read(const struct iattr *iattr) ++{ ++ return from_kuid(&init_user_ns, iattr->ia_uid); ++} ++ ++static inline gid_t ia_gid_read(const struct iattr *iattr) ++{ ++ return from_kgid(&init_user_ns, iattr->ia_gid); ++} ++ ++static inline void ia_uid_write(struct iattr *iattr, uid_t uid) ++{ ++ iattr->ia_uid = make_kuid(&init_user_ns, uid); ++} ++ ++static inline void ia_gid_write(struct iattr *iattr, gid_t gid) ++{ ++ iattr->ia_gid = make_kgid(&init_user_ns, gid); ++} ++#else ++static inline uid_t ia_uid_read(const struct iattr *iattr) ++{ ++ return iattr->ia_uid; ++} ++ ++static inline gid_t ia_gid_read(const struct iattr *iattr) ++{ ++ return iattr->ia_gid; ++} ++ ++static inline void ia_uid_write(struct iattr *iattr, uid_t uid) ++{ ++ iattr->ia_uid = uid; ++} ++ ++static inline void ia_gid_write(struct iattr *iattr, gid_t gid) ++{ ++ iattr->ia_gid = gid; ++} ++#endif ++ ++void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh) ++{ ++ obj->yst_uid = oh->yst_uid; ++ obj->yst_gid = oh->yst_gid; ++ obj->yst_atime = oh->yst_atime; ++ obj->yst_mtime = oh->yst_mtime; ++ obj->yst_ctime = oh->yst_ctime; ++ obj->yst_rdev = oh->yst_rdev; ++} ++ ++void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj) ++{ ++ oh->yst_uid = obj->yst_uid; ++ oh->yst_gid = obj->yst_gid; ++ oh->yst_atime = obj->yst_atime; ++ oh->yst_mtime = obj->yst_mtime; ++ oh->yst_ctime = obj->yst_ctime; ++ oh->yst_rdev = obj->yst_rdev; ++ ++} ++ ++void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c) ++{ ++ obj->yst_mtime = Y_CURRENT_TIME; ++ if (do_a) ++ obj->yst_atime = obj->yst_mtime; ++ if (do_c) ++ obj->yst_ctime = obj->yst_mtime; ++} ++ ++void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev) ++{ ++ yaffs_load_current_time(obj, 1, 1); ++ obj->yst_rdev = rdev; ++ obj->yst_uid = uid; ++ obj->yst_gid = gid; ++} ++ ++static loff_t yaffs_get_file_size(struct yaffs_obj *obj) ++{ ++ YCHAR *alias = NULL; ++ obj = yaffs_get_equivalent_obj(obj); ++ ++ switch (obj->variant_type) { ++ case YAFFS_OBJECT_TYPE_FILE: ++ return obj->variant.file_variant.file_size; ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ alias = obj->variant.symlink_variant.alias; ++ if (!alias) ++ return 0; ++ return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH); ++ default: ++ return 0; ++ } ++} ++ ++int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr) ++{ ++ unsigned int valid = attr->ia_valid; ++ ++ if (valid & ATTR_MODE) ++ obj->yst_mode = attr->ia_mode; ++ if (valid & ATTR_UID) ++ obj->yst_uid = ia_uid_read(attr); ++ if (valid & ATTR_GID) ++ obj->yst_gid = ia_gid_read(attr); ++ ++ if (valid & ATTR_ATIME) ++ obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime); ++ if (valid & ATTR_CTIME) ++ obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime); ++ if (valid & ATTR_MTIME) ++ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime); ++ ++ if (valid & ATTR_SIZE) ++ yaffs_resize_file(obj, attr->ia_size); ++ ++ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL); ++ ++ return YAFFS_OK; ++ ++} ++ ++int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr) ++{ ++ unsigned int valid = 0; ++ ++ attr->ia_mode = obj->yst_mode; ++ valid |= ATTR_MODE; ++ ia_uid_write(attr, obj->yst_uid); ++ valid |= ATTR_UID; ++ ia_gid_write(attr, obj->yst_gid); ++ valid |= ATTR_GID; ++ ++ Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime; ++ valid |= ATTR_ATIME; ++ Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime; ++ valid |= ATTR_CTIME; ++ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime; ++ valid |= ATTR_MTIME; ++ ++ attr->ia_size = yaffs_get_file_size(obj); ++ valid |= ATTR_SIZE; ++ ++ attr->ia_valid = valid; ++ ++ return YAFFS_OK; ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_attribs.h linux-3.4.90/fs/yaffs2/yaffs_attribs.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_attribs.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_attribs.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,28 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_ATTRIBS_H__ ++#define __YAFFS_ATTRIBS_H__ ++ ++#include "yaffs_guts.h" ++ ++void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh); ++void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj); ++void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev); ++void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c); ++int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr); ++int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr); ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_bitmap.c linux-3.4.90/fs/yaffs2/yaffs_bitmap.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_bitmap.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_bitmap.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,97 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yaffs_bitmap.h" ++#include "yaffs_trace.h" ++/* ++ * Chunk bitmap manipulations ++ */ ++ ++static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk) ++{ ++ if (blk < dev->internal_start_block || blk > dev->internal_end_block) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "BlockBits block %d is not valid", ++ blk); ++ BUG(); ++ } ++ return dev->chunk_bits + ++ (dev->chunk_bit_stride * (blk - dev->internal_start_block)); ++} ++ ++void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk) ++{ ++ if (blk < dev->internal_start_block || blk > dev->internal_end_block || ++ chunk < 0 || chunk >= dev->param.chunks_per_block) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "Chunk Id (%d:%d) invalid", ++ blk, chunk); ++ BUG(); ++ } ++} ++ ++void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk) ++{ ++ u8 *blk_bits = yaffs_block_bits(dev, blk); ++ ++ memset(blk_bits, 0, dev->chunk_bit_stride); ++} ++ ++void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk) ++{ ++ u8 *blk_bits = yaffs_block_bits(dev, blk); ++ ++ yaffs_verify_chunk_bit_id(dev, blk, chunk); ++ blk_bits[chunk / 8] &= ~(1 << (chunk & 7)); ++} ++ ++void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk) ++{ ++ u8 *blk_bits = yaffs_block_bits(dev, blk); ++ ++ yaffs_verify_chunk_bit_id(dev, blk, chunk); ++ blk_bits[chunk / 8] |= (1 << (chunk & 7)); ++} ++ ++int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk) ++{ ++ u8 *blk_bits = yaffs_block_bits(dev, blk); ++ ++ yaffs_verify_chunk_bit_id(dev, blk, chunk); ++ return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0; ++} ++ ++int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk) ++{ ++ u8 *blk_bits = yaffs_block_bits(dev, blk); ++ int i; ++ ++ for (i = 0; i < dev->chunk_bit_stride; i++) { ++ if (*blk_bits) ++ return 1; ++ blk_bits++; ++ } ++ return 0; ++} ++ ++int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk) ++{ ++ u8 *blk_bits = yaffs_block_bits(dev, blk); ++ int i; ++ int n = 0; ++ ++ for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++) ++ n += hweight8(*blk_bits); ++ ++ return n; ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_bitmap.h linux-3.4.90/fs/yaffs2/yaffs_bitmap.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_bitmap.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_bitmap.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,33 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++/* ++ * Chunk bitmap manipulations ++ */ ++ ++#ifndef __YAFFS_BITMAP_H__ ++#define __YAFFS_BITMAP_H__ ++ ++#include "yaffs_guts.h" ++ ++void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk); ++void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk); ++void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk); ++void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk); ++int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk); ++int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk); ++int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk); ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_checkptrw.c linux-3.4.90/fs/yaffs2/yaffs_checkptrw.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_checkptrw.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_checkptrw.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,474 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yaffs_checkptrw.h" ++#include "yaffs_getblockinfo.h" ++ ++struct yaffs_checkpt_chunk_hdr { ++ int version; ++ int seq; ++ u32 sum; ++ u32 xor; ++} ; ++ ++ ++static int apply_chunk_offset(struct yaffs_dev *dev, int chunk) ++{ ++ return chunk - dev->chunk_offset; ++} ++ ++static int apply_block_offset(struct yaffs_dev *dev, int block) ++{ ++ return block - dev->block_offset; ++} ++ ++static void yaffs2_checkpt_init_chunk_hdr(struct yaffs_dev *dev) ++{ ++ struct yaffs_checkpt_chunk_hdr hdr; ++ ++ hdr.version = YAFFS_CHECKPOINT_VERSION; ++ hdr.seq = dev->checkpt_page_seq; ++ hdr.sum = dev->checkpt_sum; ++ hdr.xor = dev->checkpt_xor; ++ ++ dev->checkpt_byte_offs = sizeof(hdr); ++ ++ memcpy(dev->checkpt_buffer, &hdr, sizeof(hdr)); ++} ++ ++static int yaffs2_checkpt_check_chunk_hdr(struct yaffs_dev *dev) ++{ ++ struct yaffs_checkpt_chunk_hdr hdr; ++ ++ memcpy(&hdr, dev->checkpt_buffer, sizeof(hdr)); ++ ++ dev->checkpt_byte_offs = sizeof(hdr); ++ ++ return hdr.version == YAFFS_CHECKPOINT_VERSION && ++ hdr.seq == dev->checkpt_page_seq && ++ hdr.sum == dev->checkpt_sum && ++ hdr.xor == dev->checkpt_xor; ++} ++ ++static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev) ++{ ++ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks; ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "checkpt blocks_avail = %d", blocks_avail); ++ ++ return (blocks_avail <= 0) ? 0 : 1; ++} ++ ++static int yaffs_checkpt_erase(struct yaffs_dev *dev) ++{ ++ int i; ++ ++ if (!dev->drv.drv_erase_fn) ++ return 0; ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "checking blocks %d to %d", ++ dev->internal_start_block, dev->internal_end_block); ++ ++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) { ++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i); ++ int offset_i = apply_block_offset(dev, i); ++ int result; ++ ++ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) { ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "erasing checkpt block %d", i); ++ ++ dev->n_erasures++; ++ ++ result = dev->drv.drv_erase_fn(dev, offset_i); ++ if(result) { ++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY; ++ dev->n_erased_blocks++; ++ dev->n_free_chunks += ++ dev->param.chunks_per_block; ++ } else { ++ dev->drv.drv_mark_bad_fn(dev, offset_i); ++ bi->block_state = YAFFS_BLOCK_STATE_DEAD; ++ } ++ } ++ } ++ ++ dev->blocks_in_checkpt = 0; ++ ++ return 1; ++} ++ ++static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev) ++{ ++ int i; ++ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks; ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "allocating checkpt block: erased %d reserved %d avail %d next %d ", ++ dev->n_erased_blocks, dev->param.n_reserved_blocks, ++ blocks_avail, dev->checkpt_next_block); ++ ++ if (dev->checkpt_next_block >= 0 && ++ dev->checkpt_next_block <= dev->internal_end_block && ++ blocks_avail > 0) { ++ ++ for (i = dev->checkpt_next_block; i <= dev->internal_end_block; ++ i++) { ++ struct yaffs_block_info *bi; ++ ++ bi = yaffs_get_block_info(dev, i); ++ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) { ++ dev->checkpt_next_block = i + 1; ++ dev->checkpt_cur_block = i; ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "allocating checkpt block %d", i); ++ return; ++ } ++ } ++ } ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks"); ++ ++ dev->checkpt_next_block = -1; ++ dev->checkpt_cur_block = -1; ++} ++ ++static void yaffs2_checkpt_find_block(struct yaffs_dev *dev) ++{ ++ int i; ++ struct yaffs_ext_tags tags; ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "find next checkpt block: start: blocks %d next %d", ++ dev->blocks_in_checkpt, dev->checkpt_next_block); ++ ++ if (dev->blocks_in_checkpt < dev->checkpt_max_blocks) ++ for (i = dev->checkpt_next_block; i <= dev->internal_end_block; ++ i++) { ++ int chunk = i * dev->param.chunks_per_block; ++ enum yaffs_block_state state; ++ u32 seq; ++ ++ dev->tagger.read_chunk_tags_fn(dev, ++ apply_chunk_offset(dev, chunk), ++ NULL, &tags); ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "find next checkpt block: search: block %d state %d oid %d seq %d eccr %d", ++ i, (int) state, ++ tags.obj_id, tags.seq_number, ++ tags.ecc_result); ++ ++ if (tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA) ++ continue; ++ ++ dev->tagger.query_block_fn(dev, ++ apply_block_offset(dev, i), ++ &state, &seq); ++ if (state == YAFFS_BLOCK_STATE_DEAD) ++ continue; ++ ++ /* Right kind of block */ ++ dev->checkpt_next_block = tags.obj_id; ++ dev->checkpt_cur_block = i; ++ dev->checkpt_block_list[dev->blocks_in_checkpt] = i; ++ dev->blocks_in_checkpt++; ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "found checkpt block %d", i); ++ return; ++ } ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks"); ++ ++ dev->checkpt_next_block = -1; ++ dev->checkpt_cur_block = -1; ++} ++ ++int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing) ++{ ++ int i; ++ ++ dev->checkpt_open_write = writing; ++ ++ /* Got the functions we need? */ ++ if (!dev->tagger.write_chunk_tags_fn || ++ !dev->tagger.read_chunk_tags_fn || ++ !dev->drv.drv_erase_fn || ++ !dev->drv.drv_mark_bad_fn) ++ return 0; ++ ++ if (writing && !yaffs2_checkpt_space_ok(dev)) ++ return 0; ++ ++ if (!dev->checkpt_buffer) ++ dev->checkpt_buffer = ++ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS); ++ if (!dev->checkpt_buffer) ++ return 0; ++ ++ dev->checkpt_page_seq = 0; ++ dev->checkpt_byte_count = 0; ++ dev->checkpt_sum = 0; ++ dev->checkpt_xor = 0; ++ dev->checkpt_cur_block = -1; ++ dev->checkpt_cur_chunk = -1; ++ dev->checkpt_next_block = dev->internal_start_block; ++ ++ if (writing) { ++ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk); ++ yaffs2_checkpt_init_chunk_hdr(dev); ++ return yaffs_checkpt_erase(dev); ++ } ++ ++ /* Opening for a read */ ++ /* Set to a value that will kick off a read */ ++ dev->checkpt_byte_offs = dev->data_bytes_per_chunk; ++ /* A checkpoint block list of 1 checkpoint block per 16 block is ++ * (hopefully) going to be way more than we need */ ++ dev->blocks_in_checkpt = 0; ++ dev->checkpt_max_blocks = ++ (dev->internal_end_block - dev->internal_start_block) / 16 + 2; ++ dev->checkpt_block_list = ++ kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS); ++ ++ if (!dev->checkpt_block_list) ++ return 0; ++ ++ for (i = 0; i < dev->checkpt_max_blocks; i++) ++ dev->checkpt_block_list[i] = -1; ++ ++ return 1; ++} ++ ++int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum) ++{ ++ u32 composite_sum; ++ ++ composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xff); ++ *sum = composite_sum; ++ return 1; ++} ++ ++static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev) ++{ ++ int chunk; ++ int offset_chunk; ++ struct yaffs_ext_tags tags; ++ ++ if (dev->checkpt_cur_block < 0) { ++ yaffs2_checkpt_find_erased_block(dev); ++ dev->checkpt_cur_chunk = 0; ++ } ++ ++ if (dev->checkpt_cur_block < 0) ++ return 0; ++ ++ tags.is_deleted = 0; ++ tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */ ++ tags.chunk_id = dev->checkpt_page_seq + 1; ++ tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA; ++ tags.n_bytes = dev->data_bytes_per_chunk; ++ if (dev->checkpt_cur_chunk == 0) { ++ /* First chunk we write for the block? Set block state to ++ checkpoint */ ++ struct yaffs_block_info *bi = ++ yaffs_get_block_info(dev, dev->checkpt_cur_block); ++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT; ++ dev->blocks_in_checkpt++; ++ } ++ ++ chunk = ++ dev->checkpt_cur_block * dev->param.chunks_per_block + ++ dev->checkpt_cur_chunk; ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "checkpoint wite buffer nand %d(%d:%d) objid %d chId %d", ++ chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk, ++ tags.obj_id, tags.chunk_id); ++ ++ offset_chunk = apply_chunk_offset(dev, chunk); ++ ++ dev->n_page_writes++; ++ ++ dev->tagger.write_chunk_tags_fn(dev, offset_chunk, ++ dev->checkpt_buffer, &tags); ++ dev->checkpt_page_seq++; ++ dev->checkpt_cur_chunk++; ++ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) { ++ dev->checkpt_cur_chunk = 0; ++ dev->checkpt_cur_block = -1; ++ } ++ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk); ++ ++ yaffs2_checkpt_init_chunk_hdr(dev); ++ ++ ++ return 1; ++} ++ ++int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes) ++{ ++ int i = 0; ++ int ok = 1; ++ u8 *data_bytes = (u8 *) data; ++ ++ if (!dev->checkpt_buffer) ++ return 0; ++ ++ if (!dev->checkpt_open_write) ++ return -1; ++ ++ while (i < n_bytes && ok) { ++ dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes; ++ dev->checkpt_sum += *data_bytes; ++ dev->checkpt_xor ^= *data_bytes; ++ ++ dev->checkpt_byte_offs++; ++ i++; ++ data_bytes++; ++ dev->checkpt_byte_count++; ++ ++ if (dev->checkpt_byte_offs < 0 || ++ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) ++ ok = yaffs2_checkpt_flush_buffer(dev); ++ } ++ ++ return i; ++} ++ ++int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes) ++{ ++ int i = 0; ++ int ok = 1; ++ struct yaffs_ext_tags tags; ++ int chunk; ++ int offset_chunk; ++ u8 *data_bytes = (u8 *) data; ++ ++ if (!dev->checkpt_buffer) ++ return 0; ++ ++ if (dev->checkpt_open_write) ++ return -1; ++ ++ while (i < n_bytes && ok) { ++ ++ if (dev->checkpt_byte_offs < 0 || ++ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) { ++ ++ if (dev->checkpt_cur_block < 0) { ++ yaffs2_checkpt_find_block(dev); ++ dev->checkpt_cur_chunk = 0; ++ } ++ ++ if (dev->checkpt_cur_block < 0) { ++ ok = 0; ++ break; ++ } ++ ++ chunk = dev->checkpt_cur_block * ++ dev->param.chunks_per_block + ++ dev->checkpt_cur_chunk; ++ ++ offset_chunk = apply_chunk_offset(dev, chunk); ++ dev->n_page_reads++; ++ ++ /* read in the next chunk */ ++ dev->tagger.read_chunk_tags_fn(dev, ++ offset_chunk, ++ dev->checkpt_buffer, ++ &tags); ++ ++ if (tags.chunk_id != (dev->checkpt_page_seq + 1) || ++ tags.ecc_result > YAFFS_ECC_RESULT_FIXED || ++ tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA) { ++ ok = 0; ++ break; ++ } ++ if(!yaffs2_checkpt_check_chunk_hdr(dev)) { ++ ok = 0; ++ break; ++ } ++ ++ dev->checkpt_page_seq++; ++ dev->checkpt_cur_chunk++; ++ ++ if (dev->checkpt_cur_chunk >= ++ dev->param.chunks_per_block) ++ dev->checkpt_cur_block = -1; ++ ++ } ++ ++ *data_bytes = dev->checkpt_buffer[dev->checkpt_byte_offs]; ++ dev->checkpt_sum += *data_bytes; ++ dev->checkpt_xor ^= *data_bytes; ++ dev->checkpt_byte_offs++; ++ i++; ++ data_bytes++; ++ dev->checkpt_byte_count++; ++ } ++ ++ return i; ++} ++ ++int yaffs_checkpt_close(struct yaffs_dev *dev) ++{ ++ int i; ++ ++ if (dev->checkpt_open_write) { ++ if (dev->checkpt_byte_offs != ++ sizeof(sizeof(struct yaffs_checkpt_chunk_hdr))) ++ yaffs2_checkpt_flush_buffer(dev); ++ } else if (dev->checkpt_block_list) { ++ for (i = 0; ++ i < dev->blocks_in_checkpt && ++ dev->checkpt_block_list[i] >= 0; i++) { ++ int blk = dev->checkpt_block_list[i]; ++ struct yaffs_block_info *bi = NULL; ++ ++ if (dev->internal_start_block <= blk && ++ blk <= dev->internal_end_block) ++ bi = yaffs_get_block_info(dev, blk); ++ if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY) ++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT; ++ } ++ kfree(dev->checkpt_block_list); ++ dev->checkpt_block_list = NULL; ++ } ++ ++ dev->n_free_chunks -= ++ dev->blocks_in_checkpt * dev->param.chunks_per_block; ++ dev->n_erased_blocks -= dev->blocks_in_checkpt; ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "checkpoint byte count %d", ++ dev->checkpt_byte_count); ++ ++ if (dev->checkpt_buffer) { ++ /* free the buffer */ ++ kfree(dev->checkpt_buffer); ++ dev->checkpt_buffer = NULL; ++ return 1; ++ } else { ++ return 0; ++ } ++} ++ ++int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev) ++{ ++ /* Erase the checkpoint data */ ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "checkpoint invalidate of %d blocks", ++ dev->blocks_in_checkpt); ++ ++ return yaffs_checkpt_erase(dev); ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_checkptrw.h linux-3.4.90/fs/yaffs2/yaffs_checkptrw.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_checkptrw.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_checkptrw.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,33 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_CHECKPTRW_H__ ++#define __YAFFS_CHECKPTRW_H__ ++ ++#include "yaffs_guts.h" ++ ++int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing); ++ ++int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes); ++ ++int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes); ++ ++int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum); ++ ++int yaffs_checkpt_close(struct yaffs_dev *dev); ++ ++int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev); ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_ecc.c linux-3.4.90/fs/yaffs2/yaffs_ecc.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_ecc.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_ecc.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,281 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++/* ++ * This code implements the ECC algorithm used in SmartMedia. ++ * ++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes. ++ * The two unused bit are set to 1. ++ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two ++ * such ECC blocks are used on a 512-byte NAND page. ++ * ++ */ ++ ++#include "yportenv.h" ++ ++#include "yaffs_ecc.h" ++ ++/* Table generated by gen-ecc.c ++ * Using a table means we do not have to calculate p1..p4 and p1'..p4' ++ * for each byte of data. These are instead provided in a table in bits7..2. ++ * Bit 0 of each entry indicates whether the entry has an odd or even parity, ++ * and therefore this bytes influence on the line parity. ++ */ ++ ++static const unsigned char column_parity_table[] = { ++ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69, ++ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00, ++ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc, ++ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95, ++ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0, ++ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99, ++ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65, ++ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c, ++ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc, ++ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5, ++ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59, ++ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30, ++ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55, ++ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c, ++ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0, ++ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9, ++ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0, ++ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9, ++ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55, ++ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c, ++ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59, ++ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30, ++ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc, ++ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5, ++ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65, ++ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c, ++ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0, ++ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99, ++ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc, ++ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95, ++ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69, ++ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00, ++}; ++ ++ ++/* Calculate the ECC for a 256-byte block of data */ ++void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc) ++{ ++ unsigned int i; ++ unsigned char col_parity = 0; ++ unsigned char line_parity = 0; ++ unsigned char line_parity_prime = 0; ++ unsigned char t; ++ unsigned char b; ++ ++ for (i = 0; i < 256; i++) { ++ b = column_parity_table[*data++]; ++ col_parity ^= b; ++ ++ if (b & 0x01) { /* odd number of bits in the byte */ ++ line_parity ^= i; ++ line_parity_prime ^= ~i; ++ } ++ } ++ ++ ecc[2] = (~col_parity) | 0x03; ++ ++ t = 0; ++ if (line_parity & 0x80) ++ t |= 0x80; ++ if (line_parity_prime & 0x80) ++ t |= 0x40; ++ if (line_parity & 0x40) ++ t |= 0x20; ++ if (line_parity_prime & 0x40) ++ t |= 0x10; ++ if (line_parity & 0x20) ++ t |= 0x08; ++ if (line_parity_prime & 0x20) ++ t |= 0x04; ++ if (line_parity & 0x10) ++ t |= 0x02; ++ if (line_parity_prime & 0x10) ++ t |= 0x01; ++ ecc[1] = ~t; ++ ++ t = 0; ++ if (line_parity & 0x08) ++ t |= 0x80; ++ if (line_parity_prime & 0x08) ++ t |= 0x40; ++ if (line_parity & 0x04) ++ t |= 0x20; ++ if (line_parity_prime & 0x04) ++ t |= 0x10; ++ if (line_parity & 0x02) ++ t |= 0x08; ++ if (line_parity_prime & 0x02) ++ t |= 0x04; ++ if (line_parity & 0x01) ++ t |= 0x02; ++ if (line_parity_prime & 0x01) ++ t |= 0x01; ++ ecc[0] = ~t; ++ ++} ++ ++/* Correct the ECC on a 256 byte block of data */ ++ ++int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc, ++ const unsigned char *test_ecc) ++{ ++ unsigned char d0, d1, d2; /* deltas */ ++ ++ d0 = read_ecc[0] ^ test_ecc[0]; ++ d1 = read_ecc[1] ^ test_ecc[1]; ++ d2 = read_ecc[2] ^ test_ecc[2]; ++ ++ if ((d0 | d1 | d2) == 0) ++ return 0; /* no error */ ++ ++ if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 && ++ ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 && ++ ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) { ++ /* Single bit (recoverable) error in data */ ++ ++ unsigned byte; ++ unsigned bit; ++ ++ bit = byte = 0; ++ ++ if (d1 & 0x80) ++ byte |= 0x80; ++ if (d1 & 0x20) ++ byte |= 0x40; ++ if (d1 & 0x08) ++ byte |= 0x20; ++ if (d1 & 0x02) ++ byte |= 0x10; ++ if (d0 & 0x80) ++ byte |= 0x08; ++ if (d0 & 0x20) ++ byte |= 0x04; ++ if (d0 & 0x08) ++ byte |= 0x02; ++ if (d0 & 0x02) ++ byte |= 0x01; ++ ++ if (d2 & 0x80) ++ bit |= 0x04; ++ if (d2 & 0x20) ++ bit |= 0x02; ++ if (d2 & 0x08) ++ bit |= 0x01; ++ ++ data[byte] ^= (1 << bit); ++ ++ return 1; /* Corrected the error */ ++ } ++ ++ if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) { ++ /* Reccoverable error in ecc */ ++ ++ read_ecc[0] = test_ecc[0]; ++ read_ecc[1] = test_ecc[1]; ++ read_ecc[2] = test_ecc[2]; ++ ++ return 1; /* Corrected the error */ ++ } ++ ++ /* Unrecoverable error */ ++ ++ return -1; ++ ++} ++ ++/* ++ * ECCxxxOther does ECC calcs on arbitrary n bytes of data ++ */ ++void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes, ++ struct yaffs_ecc_other *ecc_other) ++{ ++ unsigned int i; ++ unsigned char col_parity = 0; ++ unsigned line_parity = 0; ++ unsigned line_parity_prime = 0; ++ unsigned char b; ++ ++ for (i = 0; i < n_bytes; i++) { ++ b = column_parity_table[*data++]; ++ col_parity ^= b; ++ ++ if (b & 0x01) { ++ /* odd number of bits in the byte */ ++ line_parity ^= i; ++ line_parity_prime ^= ~i; ++ } ++ ++ } ++ ++ ecc_other->col_parity = (col_parity >> 2) & 0x3f; ++ ecc_other->line_parity = line_parity; ++ ecc_other->line_parity_prime = line_parity_prime; ++} ++ ++int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes, ++ struct yaffs_ecc_other *read_ecc, ++ const struct yaffs_ecc_other *test_ecc) ++{ ++ unsigned char delta_col; /* column parity delta */ ++ unsigned delta_line; /* line parity delta */ ++ unsigned delta_line_prime; /* line parity delta */ ++ unsigned bit; ++ ++ delta_col = read_ecc->col_parity ^ test_ecc->col_parity; ++ delta_line = read_ecc->line_parity ^ test_ecc->line_parity; ++ delta_line_prime = ++ read_ecc->line_parity_prime ^ test_ecc->line_parity_prime; ++ ++ if ((delta_col | delta_line | delta_line_prime) == 0) ++ return 0; /* no error */ ++ ++ if (delta_line == ~delta_line_prime && ++ (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) { ++ /* Single bit (recoverable) error in data */ ++ ++ bit = 0; ++ ++ if (delta_col & 0x20) ++ bit |= 0x04; ++ if (delta_col & 0x08) ++ bit |= 0x02; ++ if (delta_col & 0x02) ++ bit |= 0x01; ++ ++ if (delta_line >= n_bytes) ++ return -1; ++ ++ data[delta_line] ^= (1 << bit); ++ ++ return 1; /* corrected */ ++ } ++ ++ if ((hweight32(delta_line) + ++ hweight32(delta_line_prime) + ++ hweight8(delta_col)) == 1) { ++ /* Reccoverable error in ecc */ ++ ++ *read_ecc = *test_ecc; ++ return 1; /* corrected */ ++ } ++ ++ /* Unrecoverable error */ ++ ++ return -1; ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_ecc.h linux-3.4.90/fs/yaffs2/yaffs_ecc.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_ecc.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_ecc.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,44 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++/* ++ * This code implements the ECC algorithm used in SmartMedia. ++ * ++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes. ++ * The two unused bit are set to 1. ++ * The ECC can correct single bit errors in a 256-byte page of data. ++ * Thus, two such ECC blocks are used on a 512-byte NAND page. ++ * ++ */ ++ ++#ifndef __YAFFS_ECC_H__ ++#define __YAFFS_ECC_H__ ++ ++struct yaffs_ecc_other { ++ unsigned char col_parity; ++ unsigned line_parity; ++ unsigned line_parity_prime; ++}; ++ ++void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc); ++int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc, ++ const unsigned char *test_ecc); ++ ++void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes, ++ struct yaffs_ecc_other *ecc); ++int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes, ++ struct yaffs_ecc_other *read_ecc, ++ const struct yaffs_ecc_other *test_ecc); ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_getblockinfo.h linux-3.4.90/fs/yaffs2/yaffs_getblockinfo.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_getblockinfo.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_getblockinfo.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,35 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_GETBLOCKINFO_H__ ++#define __YAFFS_GETBLOCKINFO_H__ ++ ++#include "yaffs_guts.h" ++#include "yaffs_trace.h" ++ ++/* Function to manipulate block info */ ++static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev ++ *dev, int blk) ++{ ++ if (blk < dev->internal_start_block || blk > dev->internal_end_block) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "**>> yaffs: get_block_info block %d is not valid", ++ blk); ++ BUG(); ++ } ++ return &dev->block_info[blk - dev->internal_start_block]; ++} ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_guts.c linux-3.4.90/fs/yaffs2/yaffs_guts.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_guts.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_guts.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,5146 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yportenv.h" ++#include "yaffs_trace.h" ++ ++#include "yaffs_guts.h" ++#include "yaffs_getblockinfo.h" ++#include "yaffs_tagscompat.h" ++#include "yaffs_tagsmarshall.h" ++#include "yaffs_nand.h" ++#include "yaffs_yaffs1.h" ++#include "yaffs_yaffs2.h" ++#include "yaffs_bitmap.h" ++#include "yaffs_verify.h" ++#include "yaffs_nand.h" ++#include "yaffs_packedtags2.h" ++#include "yaffs_nameval.h" ++#include "yaffs_allocator.h" ++#include "yaffs_attribs.h" ++#include "yaffs_summary.h" ++ ++/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */ ++#define YAFFS_GC_GOOD_ENOUGH 2 ++#define YAFFS_GC_PASSIVE_THRESHOLD 4 ++ ++#include "yaffs_ecc.h" ++ ++/* Forward declarations */ ++ ++static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk, ++ const u8 *buffer, int n_bytes, int use_reserve); ++ ++static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name, ++ int buffer_size); ++ ++/* Function to calculate chunk and offset */ ++ ++void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr, ++ int *chunk_out, u32 *offset_out) ++{ ++ int chunk; ++ u32 offset; ++ ++ chunk = (u32) (addr >> dev->chunk_shift); ++ ++ if (dev->chunk_div == 1) { ++ /* easy power of 2 case */ ++ offset = (u32) (addr & dev->chunk_mask); ++ } else { ++ /* Non power-of-2 case */ ++ ++ loff_t chunk_base; ++ ++ chunk /= dev->chunk_div; ++ ++ chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk; ++ offset = (u32) (addr - chunk_base); ++ } ++ ++ *chunk_out = chunk; ++ *offset_out = offset; ++} ++ ++/* Function to return the number of shifts for a power of 2 greater than or ++ * equal to the given number ++ * Note we don't try to cater for all possible numbers and this does not have to ++ * be hellishly efficient. ++ */ ++ ++static inline u32 calc_shifts_ceiling(u32 x) ++{ ++ int extra_bits; ++ int shifts; ++ ++ shifts = extra_bits = 0; ++ ++ while (x > 1) { ++ if (x & 1) ++ extra_bits++; ++ x >>= 1; ++ shifts++; ++ } ++ ++ if (extra_bits) ++ shifts++; ++ ++ return shifts; ++} ++ ++/* Function to return the number of shifts to get a 1 in bit 0 ++ */ ++ ++static inline u32 calc_shifts(u32 x) ++{ ++ u32 shifts; ++ ++ shifts = 0; ++ ++ if (!x) ++ return 0; ++ ++ while (!(x & 1)) { ++ x >>= 1; ++ shifts++; ++ } ++ ++ return shifts; ++} ++ ++/* ++ * Temporary buffer manipulations. ++ */ ++ ++static int yaffs_init_tmp_buffers(struct yaffs_dev *dev) ++{ ++ int i; ++ u8 *buf = (u8 *) 1; ++ ++ memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer)); ++ ++ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) { ++ dev->temp_buffer[i].in_use = 0; ++ buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS); ++ dev->temp_buffer[i].buffer = buf; ++ } ++ ++ return buf ? YAFFS_OK : YAFFS_FAIL; ++} ++ ++u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev) ++{ ++ int i; ++ ++ dev->temp_in_use++; ++ if (dev->temp_in_use > dev->max_temp) ++ dev->max_temp = dev->temp_in_use; ++ ++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { ++ if (dev->temp_buffer[i].in_use == 0) { ++ dev->temp_buffer[i].in_use = 1; ++ return dev->temp_buffer[i].buffer; ++ } ++ } ++ ++ yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers"); ++ /* ++ * If we got here then we have to allocate an unmanaged one ++ * This is not good. ++ */ ++ ++ dev->unmanaged_buffer_allocs++; ++ return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS); ++ ++} ++ ++void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer) ++{ ++ int i; ++ ++ dev->temp_in_use--; ++ ++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { ++ if (dev->temp_buffer[i].buffer == buffer) { ++ dev->temp_buffer[i].in_use = 0; ++ return; ++ } ++ } ++ ++ if (buffer) { ++ /* assume it is an unmanaged one. */ ++ yaffs_trace(YAFFS_TRACE_BUFFERS, ++ "Releasing unmanaged temp buffer"); ++ kfree(buffer); ++ dev->unmanaged_buffer_deallocs++; ++ } ++ ++} ++ ++/* ++ * Functions for robustisizing TODO ++ * ++ */ ++ ++static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk, ++ const u8 *data, ++ const struct yaffs_ext_tags *tags) ++{ ++ (void) dev; ++ (void) nand_chunk; ++ (void) data; ++ (void) tags; ++} ++ ++static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk, ++ const struct yaffs_ext_tags *tags) ++{ ++ (void) dev; ++ (void) nand_chunk; ++ (void) tags; ++} ++ ++void yaffs_handle_chunk_error(struct yaffs_dev *dev, ++ struct yaffs_block_info *bi) ++{ ++ if (!bi->gc_prioritise) { ++ bi->gc_prioritise = 1; ++ dev->has_pending_prioritised_gc = 1; ++ bi->chunk_error_strikes++; ++ ++ if (bi->chunk_error_strikes > 3) { ++ bi->needs_retiring = 1; /* Too many stikes, so retire */ ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "yaffs: Block struck out"); ++ ++ } ++ } ++} ++ ++static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk, ++ int erased_ok) ++{ ++ int flash_block = nand_chunk / dev->param.chunks_per_block; ++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block); ++ ++ yaffs_handle_chunk_error(dev, bi); ++ ++ if (erased_ok) { ++ /* Was an actual write failure, ++ * so mark the block for retirement.*/ ++ bi->needs_retiring = 1; ++ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, ++ "**>> Block %d needs retiring", flash_block); ++ } ++ ++ /* Delete the chunk */ ++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__); ++ yaffs_skip_rest_of_block(dev); ++} ++ ++/* ++ * Verification code ++ */ ++ ++/* ++ * Simple hash function. Needs to have a reasonable spread ++ */ ++ ++static inline int yaffs_hash_fn(int n) ++{ ++ if (n < 0) ++ n = -n; ++ return n % YAFFS_NOBJECT_BUCKETS; ++} ++ ++/* ++ * Access functions to useful fake objects. ++ * Note that root might have a presence in NAND if permissions are set. ++ */ ++ ++struct yaffs_obj *yaffs_root(struct yaffs_dev *dev) ++{ ++ return dev->root_dir; ++} ++ ++struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev) ++{ ++ return dev->lost_n_found; ++} ++ ++/* ++ * Erased NAND checking functions ++ */ ++ ++int yaffs_check_ff(u8 *buffer, int n_bytes) ++{ ++ /* Horrible, slow implementation */ ++ while (n_bytes--) { ++ if (*buffer != 0xff) ++ return 0; ++ buffer++; ++ } ++ return 1; ++} ++ ++static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk) ++{ ++ int retval = YAFFS_OK; ++ u8 *data = yaffs_get_temp_buffer(dev); ++ struct yaffs_ext_tags tags; ++ int result; ++ ++ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags); ++ ++ if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR) ++ retval = YAFFS_FAIL; ++ ++ if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) || ++ tags.chunk_used) { ++ yaffs_trace(YAFFS_TRACE_NANDACCESS, ++ "Chunk %d not erased", nand_chunk); ++ retval = YAFFS_FAIL; ++ } ++ ++ yaffs_release_temp_buffer(dev, data); ++ ++ return retval; ++ ++} ++ ++static int yaffs_verify_chunk_written(struct yaffs_dev *dev, ++ int nand_chunk, ++ const u8 *data, ++ struct yaffs_ext_tags *tags) ++{ ++ int retval = YAFFS_OK; ++ struct yaffs_ext_tags temp_tags; ++ u8 *buffer = yaffs_get_temp_buffer(dev); ++ int result; ++ ++ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags); ++ if (memcmp(buffer, data, dev->data_bytes_per_chunk) || ++ temp_tags.obj_id != tags->obj_id || ++ temp_tags.chunk_id != tags->chunk_id || ++ temp_tags.n_bytes != tags->n_bytes) ++ retval = YAFFS_FAIL; ++ ++ yaffs_release_temp_buffer(dev, buffer); ++ ++ return retval; ++} ++ ++ ++int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks) ++{ ++ int reserved_chunks; ++ int reserved_blocks = dev->param.n_reserved_blocks; ++ int checkpt_blocks; ++ ++ checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev); ++ ++ reserved_chunks = ++ (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block; ++ ++ return (dev->n_free_chunks > (reserved_chunks + n_chunks)); ++} ++ ++static int yaffs_find_alloc_block(struct yaffs_dev *dev) ++{ ++ int i; ++ struct yaffs_block_info *bi; ++ ++ if (dev->n_erased_blocks < 1) { ++ /* Hoosterman we've got a problem. ++ * Can't get space to gc ++ */ ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "yaffs tragedy: no more erased blocks"); ++ ++ return -1; ++ } ++ ++ /* Find an empty block. */ ++ ++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) { ++ dev->alloc_block_finder++; ++ if (dev->alloc_block_finder < dev->internal_start_block ++ || dev->alloc_block_finder > dev->internal_end_block) { ++ dev->alloc_block_finder = dev->internal_start_block; ++ } ++ ++ bi = yaffs_get_block_info(dev, dev->alloc_block_finder); ++ ++ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) { ++ bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING; ++ dev->seq_number++; ++ bi->seq_number = dev->seq_number; ++ dev->n_erased_blocks--; ++ yaffs_trace(YAFFS_TRACE_ALLOCATE, ++ "Allocated block %d, seq %d, %d left" , ++ dev->alloc_block_finder, dev->seq_number, ++ dev->n_erased_blocks); ++ return dev->alloc_block_finder; ++ } ++ } ++ ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "yaffs tragedy: no more erased blocks, but there should have been %d", ++ dev->n_erased_blocks); ++ ++ return -1; ++} ++ ++static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver, ++ struct yaffs_block_info **block_ptr) ++{ ++ int ret_val; ++ struct yaffs_block_info *bi; ++ ++ if (dev->alloc_block < 0) { ++ /* Get next block to allocate off */ ++ dev->alloc_block = yaffs_find_alloc_block(dev); ++ dev->alloc_page = 0; ++ } ++ ++ if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) { ++ /* No space unless we're allowed to use the reserve. */ ++ return -1; ++ } ++ ++ if (dev->n_erased_blocks < dev->param.n_reserved_blocks ++ && dev->alloc_page == 0) ++ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve"); ++ ++ /* Next page please.... */ ++ if (dev->alloc_block >= 0) { ++ bi = yaffs_get_block_info(dev, dev->alloc_block); ++ ++ ret_val = (dev->alloc_block * dev->param.chunks_per_block) + ++ dev->alloc_page; ++ bi->pages_in_use++; ++ yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page); ++ ++ dev->alloc_page++; ++ ++ dev->n_free_chunks--; ++ ++ /* If the block is full set the state to full */ ++ if (dev->alloc_page >= dev->param.chunks_per_block) { ++ bi->block_state = YAFFS_BLOCK_STATE_FULL; ++ dev->alloc_block = -1; ++ } ++ ++ if (block_ptr) ++ *block_ptr = bi; ++ ++ return ret_val; ++ } ++ ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!"); ++ ++ return -1; ++} ++ ++static int yaffs_get_erased_chunks(struct yaffs_dev *dev) ++{ ++ int n; ++ ++ n = dev->n_erased_blocks * dev->param.chunks_per_block; ++ ++ if (dev->alloc_block > 0) ++ n += (dev->param.chunks_per_block - dev->alloc_page); ++ ++ return n; ++ ++} ++ ++/* ++ * yaffs_skip_rest_of_block() skips over the rest of the allocation block ++ * if we don't want to write to it. ++ */ ++void yaffs_skip_rest_of_block(struct yaffs_dev *dev) ++{ ++ struct yaffs_block_info *bi; ++ ++ if (dev->alloc_block > 0) { ++ bi = yaffs_get_block_info(dev, dev->alloc_block); ++ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) { ++ bi->block_state = YAFFS_BLOCK_STATE_FULL; ++ dev->alloc_block = -1; ++ } ++ } ++} ++ ++static int yaffs_write_new_chunk(struct yaffs_dev *dev, ++ const u8 *data, ++ struct yaffs_ext_tags *tags, int use_reserver) ++{ ++ int attempts = 0; ++ int write_ok = 0; ++ int chunk; ++ ++ yaffs2_checkpt_invalidate(dev); ++ ++ do { ++ struct yaffs_block_info *bi = 0; ++ int erased_ok = 0; ++ ++ chunk = yaffs_alloc_chunk(dev, use_reserver, &bi); ++ if (chunk < 0) { ++ /* no space */ ++ break; ++ } ++ ++ /* First check this chunk is erased, if it needs ++ * checking. The checking policy (unless forced ++ * always on) is as follows: ++ * ++ * Check the first page we try to write in a block. ++ * If the check passes then we don't need to check any ++ * more. If the check fails, we check again... ++ * If the block has been erased, we don't need to check. ++ * ++ * However, if the block has been prioritised for gc, ++ * then we think there might be something odd about ++ * this block and stop using it. ++ * ++ * Rationale: We should only ever see chunks that have ++ * not been erased if there was a partially written ++ * chunk due to power loss. This checking policy should ++ * catch that case with very few checks and thus save a ++ * lot of checks that are most likely not needed. ++ * ++ * Mods to the above ++ * If an erase check fails or the write fails we skip the ++ * rest of the block. ++ */ ++ ++ /* let's give it a try */ ++ attempts++; ++ ++ if (dev->param.always_check_erased) ++ bi->skip_erased_check = 0; ++ ++ if (!bi->skip_erased_check) { ++ erased_ok = yaffs_check_chunk_erased(dev, chunk); ++ if (erased_ok != YAFFS_OK) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "**>> yaffs chunk %d was not erased", ++ chunk); ++ ++ /* If not erased, delete this one, ++ * skip rest of block and ++ * try another chunk */ ++ yaffs_chunk_del(dev, chunk, 1, __LINE__); ++ yaffs_skip_rest_of_block(dev); ++ continue; ++ } ++ } ++ ++ write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags); ++ ++ if (!bi->skip_erased_check) ++ write_ok = ++ yaffs_verify_chunk_written(dev, chunk, data, tags); ++ ++ if (write_ok != YAFFS_OK) { ++ /* Clean up aborted write, skip to next block and ++ * try another chunk */ ++ yaffs_handle_chunk_wr_error(dev, chunk, erased_ok); ++ continue; ++ } ++ ++ bi->skip_erased_check = 1; ++ ++ /* Copy the data into the robustification buffer */ ++ yaffs_handle_chunk_wr_ok(dev, chunk, data, tags); ++ ++ } while (write_ok != YAFFS_OK && ++ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts)); ++ ++ if (!write_ok) ++ chunk = -1; ++ ++ if (attempts > 1) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "**>> yaffs write required %d attempts", ++ attempts); ++ dev->n_retried_writes += (attempts - 1); ++ } ++ ++ return chunk; ++} ++ ++/* ++ * Block retiring for handling a broken block. ++ */ ++ ++static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block) ++{ ++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block); ++ ++ yaffs2_checkpt_invalidate(dev); ++ ++ yaffs2_clear_oldest_dirty_seq(dev, bi); ++ ++ if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) { ++ if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "yaffs: Failed to mark bad and erase block %d", ++ flash_block); ++ } else { ++ struct yaffs_ext_tags tags; ++ int chunk_id = ++ flash_block * dev->param.chunks_per_block; ++ ++ u8 *buffer = yaffs_get_temp_buffer(dev); ++ ++ memset(buffer, 0xff, dev->data_bytes_per_chunk); ++ memset(&tags, 0, sizeof(tags)); ++ tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK; ++ if (dev->tagger.write_chunk_tags_fn(dev, chunk_id - ++ dev->chunk_offset, ++ buffer, ++ &tags) != YAFFS_OK) ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "yaffs: Failed to write bad block marker to block %d", ++ flash_block); ++ ++ yaffs_release_temp_buffer(dev, buffer); ++ } ++ } ++ ++ bi->block_state = YAFFS_BLOCK_STATE_DEAD; ++ bi->gc_prioritise = 0; ++ bi->needs_retiring = 0; ++ ++ dev->n_retired_blocks++; ++} ++ ++/*---------------- Name handling functions ------------*/ ++ ++static u16 yaffs_calc_name_sum(const YCHAR *name) ++{ ++ u16 sum = 0; ++ u16 i = 1; ++ ++ if (!name) ++ return 0; ++ ++ while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) { ++ ++ /* 0x1f mask is case insensitive */ ++ sum += ((*name) & 0x1f) * i; ++ i++; ++ name++; ++ } ++ return sum; ++} ++ ++ ++void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name) ++{ ++ memset(obj->short_name, 0, sizeof(obj->short_name)); ++ ++ if (name && !name[0]) { ++ yaffs_fix_null_name(obj, obj->short_name, ++ YAFFS_SHORT_NAME_LENGTH); ++ name = obj->short_name; ++ } else if (name && ++ strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <= ++ YAFFS_SHORT_NAME_LENGTH) { ++ strcpy(obj->short_name, name); ++ } ++ ++ obj->sum = yaffs_calc_name_sum(name); ++} ++ ++void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj, ++ const struct yaffs_obj_hdr *oh) ++{ ++#ifdef CONFIG_YAFFS_AUTO_UNICODE ++ YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1]; ++ memset(tmp_name, 0, sizeof(tmp_name)); ++ yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name, ++ YAFFS_MAX_NAME_LENGTH + 1); ++ yaffs_set_obj_name(obj, tmp_name); ++#else ++ yaffs_set_obj_name(obj, oh->name); ++#endif ++} ++ ++loff_t yaffs_max_file_size(struct yaffs_dev *dev) ++{ ++ if(sizeof(loff_t) < 8) ++ return YAFFS_MAX_FILE_SIZE_32; ++ else ++ return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk; ++} ++ ++/*-------------------- TNODES ------------------- ++ ++ * List of spare tnodes ++ * The list is hooked together using the first pointer ++ * in the tnode. ++ */ ++ ++struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev) ++{ ++ struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev); ++ ++ if (tn) { ++ memset(tn, 0, dev->tnode_size); ++ dev->n_tnodes++; ++ } ++ ++ dev->checkpoint_blocks_required = 0; /* force recalculation */ ++ ++ return tn; ++} ++ ++/* FreeTnode frees up a tnode and puts it back on the free list */ ++static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn) ++{ ++ yaffs_free_raw_tnode(dev, tn); ++ dev->n_tnodes--; ++ dev->checkpoint_blocks_required = 0; /* force recalculation */ ++} ++ ++static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev) ++{ ++ yaffs_deinit_raw_tnodes_and_objs(dev); ++ dev->n_obj = 0; ++ dev->n_tnodes = 0; ++} ++ ++static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn, ++ unsigned pos, unsigned val) ++{ ++ u32 *map = (u32 *) tn; ++ u32 bit_in_map; ++ u32 bit_in_word; ++ u32 word_in_map; ++ u32 mask; ++ ++ pos &= YAFFS_TNODES_LEVEL0_MASK; ++ val >>= dev->chunk_grp_bits; ++ ++ bit_in_map = pos * dev->tnode_width; ++ word_in_map = bit_in_map / 32; ++ bit_in_word = bit_in_map & (32 - 1); ++ ++ mask = dev->tnode_mask << bit_in_word; ++ ++ map[word_in_map] &= ~mask; ++ map[word_in_map] |= (mask & (val << bit_in_word)); ++ ++ if (dev->tnode_width > (32 - bit_in_word)) { ++ bit_in_word = (32 - bit_in_word); ++ word_in_map++; ++ mask = ++ dev->tnode_mask >> bit_in_word; ++ map[word_in_map] &= ~mask; ++ map[word_in_map] |= (mask & (val >> bit_in_word)); ++ } ++} ++ ++u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn, ++ unsigned pos) ++{ ++ u32 *map = (u32 *) tn; ++ u32 bit_in_map; ++ u32 bit_in_word; ++ u32 word_in_map; ++ u32 val; ++ ++ pos &= YAFFS_TNODES_LEVEL0_MASK; ++ ++ bit_in_map = pos * dev->tnode_width; ++ word_in_map = bit_in_map / 32; ++ bit_in_word = bit_in_map & (32 - 1); ++ ++ val = map[word_in_map] >> bit_in_word; ++ ++ if (dev->tnode_width > (32 - bit_in_word)) { ++ bit_in_word = (32 - bit_in_word); ++ word_in_map++; ++ val |= (map[word_in_map] << bit_in_word); ++ } ++ ++ val &= dev->tnode_mask; ++ val <<= dev->chunk_grp_bits; ++ ++ return val; ++} ++ ++/* ------------------- End of individual tnode manipulation -----------------*/ ++ ++/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------ ++ * The look up tree is represented by the top tnode and the number of top_level ++ * in the tree. 0 means only the level 0 tnode is in the tree. ++ */ ++ ++/* FindLevel0Tnode finds the level 0 tnode, if one exists. */ ++struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev, ++ struct yaffs_file_var *file_struct, ++ u32 chunk_id) ++{ ++ struct yaffs_tnode *tn = file_struct->top; ++ u32 i; ++ int required_depth; ++ int level = file_struct->top_level; ++ ++ (void) dev; ++ ++ /* Check sane level and chunk Id */ ++ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL) ++ return NULL; ++ ++ if (chunk_id > YAFFS_MAX_CHUNK_ID) ++ return NULL; ++ ++ /* First check we're tall enough (ie enough top_level) */ ++ ++ i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS; ++ required_depth = 0; ++ while (i) { ++ i >>= YAFFS_TNODES_INTERNAL_BITS; ++ required_depth++; ++ } ++ ++ if (required_depth > file_struct->top_level) ++ return NULL; /* Not tall enough, so we can't find it */ ++ ++ /* Traverse down to level 0 */ ++ while (level > 0 && tn) { ++ tn = tn->internal[(chunk_id >> ++ (YAFFS_TNODES_LEVEL0_BITS + ++ (level - 1) * ++ YAFFS_TNODES_INTERNAL_BITS)) & ++ YAFFS_TNODES_INTERNAL_MASK]; ++ level--; ++ } ++ ++ return tn; ++} ++ ++/* add_find_tnode_0 finds the level 0 tnode if it exists, ++ * otherwise first expands the tree. ++ * This happens in two steps: ++ * 1. If the tree isn't tall enough, then make it taller. ++ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required. ++ * ++ * Used when modifying the tree. ++ * ++ * If the tn argument is NULL, then a fresh tnode will be added otherwise the ++ * specified tn will be plugged into the ttree. ++ */ ++ ++struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev, ++ struct yaffs_file_var *file_struct, ++ u32 chunk_id, ++ struct yaffs_tnode *passed_tn) ++{ ++ int required_depth; ++ int i; ++ int l; ++ struct yaffs_tnode *tn; ++ u32 x; ++ ++ /* Check sane level and page Id */ ++ if (file_struct->top_level < 0 || ++ file_struct->top_level > YAFFS_TNODES_MAX_LEVEL) ++ return NULL; ++ ++ if (chunk_id > YAFFS_MAX_CHUNK_ID) ++ return NULL; ++ ++ /* First check we're tall enough (ie enough top_level) */ ++ ++ x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS; ++ required_depth = 0; ++ while (x) { ++ x >>= YAFFS_TNODES_INTERNAL_BITS; ++ required_depth++; ++ } ++ ++ if (required_depth > file_struct->top_level) { ++ /* Not tall enough, gotta make the tree taller */ ++ for (i = file_struct->top_level; i < required_depth; i++) { ++ ++ tn = yaffs_get_tnode(dev); ++ ++ if (tn) { ++ tn->internal[0] = file_struct->top; ++ file_struct->top = tn; ++ file_struct->top_level++; ++ } else { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "yaffs: no more tnodes"); ++ return NULL; ++ } ++ } ++ } ++ ++ /* Traverse down to level 0, adding anything we need */ ++ ++ l = file_struct->top_level; ++ tn = file_struct->top; ++ ++ if (l > 0) { ++ while (l > 0 && tn) { ++ x = (chunk_id >> ++ (YAFFS_TNODES_LEVEL0_BITS + ++ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) & ++ YAFFS_TNODES_INTERNAL_MASK; ++ ++ if ((l > 1) && !tn->internal[x]) { ++ /* Add missing non-level-zero tnode */ ++ tn->internal[x] = yaffs_get_tnode(dev); ++ if (!tn->internal[x]) ++ return NULL; ++ } else if (l == 1) { ++ /* Looking from level 1 at level 0 */ ++ if (passed_tn) { ++ /* If we already have one, release it */ ++ if (tn->internal[x]) ++ yaffs_free_tnode(dev, ++ tn->internal[x]); ++ tn->internal[x] = passed_tn; ++ ++ } else if (!tn->internal[x]) { ++ /* Don't have one, none passed in */ ++ tn->internal[x] = yaffs_get_tnode(dev); ++ if (!tn->internal[x]) ++ return NULL; ++ } ++ } ++ ++ tn = tn->internal[x]; ++ l--; ++ } ++ } else { ++ /* top is level 0 */ ++ if (passed_tn) { ++ memcpy(tn, passed_tn, ++ (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8); ++ yaffs_free_tnode(dev, passed_tn); ++ } ++ } ++ ++ return tn; ++} ++ ++static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id, ++ int chunk_obj) ++{ ++ return (tags->chunk_id == chunk_obj && ++ tags->obj_id == obj_id && ++ !tags->is_deleted) ? 1 : 0; ++ ++} ++ ++static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk, ++ struct yaffs_ext_tags *tags, int obj_id, ++ int inode_chunk) ++{ ++ int j; ++ ++ for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) { ++ if (yaffs_check_chunk_bit ++ (dev, the_chunk / dev->param.chunks_per_block, ++ the_chunk % dev->param.chunks_per_block)) { ++ ++ if (dev->chunk_grp_size == 1) ++ return the_chunk; ++ else { ++ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL, ++ tags); ++ if (yaffs_tags_match(tags, ++ obj_id, inode_chunk)) { ++ /* found it; */ ++ return the_chunk; ++ } ++ } ++ } ++ the_chunk++; ++ } ++ return -1; ++} ++ ++int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk, ++ struct yaffs_ext_tags *tags) ++{ ++ /*Get the Tnode, then get the level 0 offset chunk offset */ ++ struct yaffs_tnode *tn; ++ int the_chunk = -1; ++ struct yaffs_ext_tags local_tags; ++ int ret_val = -1; ++ struct yaffs_dev *dev = in->my_dev; ++ ++ if (!tags) { ++ /* Passed a NULL, so use our own tags space */ ++ tags = &local_tags; ++ } ++ ++ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk); ++ ++ if (!tn) ++ return ret_val; ++ ++ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk); ++ ++ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id, ++ inode_chunk); ++ return ret_val; ++} ++ ++static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk, ++ struct yaffs_ext_tags *tags) ++{ ++ /* Get the Tnode, then get the level 0 offset chunk offset */ ++ struct yaffs_tnode *tn; ++ int the_chunk = -1; ++ struct yaffs_ext_tags local_tags; ++ struct yaffs_dev *dev = in->my_dev; ++ int ret_val = -1; ++ ++ if (!tags) { ++ /* Passed a NULL, so use our own tags space */ ++ tags = &local_tags; ++ } ++ ++ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk); ++ ++ if (!tn) ++ return ret_val; ++ ++ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk); ++ ++ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id, ++ inode_chunk); ++ ++ /* Delete the entry in the filestructure (if found) */ ++ if (ret_val != -1) ++ yaffs_load_tnode_0(dev, tn, inode_chunk, 0); ++ ++ return ret_val; ++} ++ ++int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk, ++ int nand_chunk, int in_scan) ++{ ++ /* NB in_scan is zero unless scanning. ++ * For forward scanning, in_scan is > 0; ++ * for backward scanning in_scan is < 0 ++ * ++ * nand_chunk = 0 is a dummy insert to make sure the tnodes are there. ++ */ ++ ++ struct yaffs_tnode *tn; ++ struct yaffs_dev *dev = in->my_dev; ++ int existing_cunk; ++ struct yaffs_ext_tags existing_tags; ++ struct yaffs_ext_tags new_tags; ++ unsigned existing_serial, new_serial; ++ ++ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) { ++ /* Just ignore an attempt at putting a chunk into a non-file ++ * during scanning. ++ * If it is not during Scanning then something went wrong! ++ */ ++ if (!in_scan) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "yaffs tragedy:attempt to put data chunk into a non-file" ++ ); ++ BUG(); ++ } ++ ++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__); ++ return YAFFS_OK; ++ } ++ ++ tn = yaffs_add_find_tnode_0(dev, ++ &in->variant.file_variant, ++ inode_chunk, NULL); ++ if (!tn) ++ return YAFFS_FAIL; ++ ++ if (!nand_chunk) ++ /* Dummy insert, bail now */ ++ return YAFFS_OK; ++ ++ existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk); ++ ++ if (in_scan != 0) { ++ /* If we're scanning then we need to test for duplicates ++ * NB This does not need to be efficient since it should only ++ * happen when the power fails during a write, then only one ++ * chunk should ever be affected. ++ * ++ * Correction for YAFFS2: This could happen quite a lot and we ++ * need to think about efficiency! TODO ++ * Update: For backward scanning we don't need to re-read tags ++ * so this is quite cheap. ++ */ ++ ++ if (existing_cunk > 0) { ++ /* NB Right now existing chunk will not be real ++ * chunk_id if the chunk group size > 1 ++ * thus we have to do a FindChunkInFile to get the ++ * real chunk id. ++ * ++ * We have a duplicate now we need to decide which ++ * one to use: ++ * ++ * Backwards scanning YAFFS2: The old one is what ++ * we use, dump the new one. ++ * YAFFS1: Get both sets of tags and compare serial ++ * numbers. ++ */ ++ ++ if (in_scan > 0) { ++ /* Only do this for forward scanning */ ++ yaffs_rd_chunk_tags_nand(dev, ++ nand_chunk, ++ NULL, &new_tags); ++ ++ /* Do a proper find */ ++ existing_cunk = ++ yaffs_find_chunk_in_file(in, inode_chunk, ++ &existing_tags); ++ } ++ ++ if (existing_cunk <= 0) { ++ /*Hoosterman - how did this happen? */ ++ ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "yaffs tragedy: existing chunk < 0 in scan" ++ ); ++ ++ } ++ ++ /* NB The deleted flags should be false, otherwise ++ * the chunks will not be loaded during a scan ++ */ ++ ++ if (in_scan > 0) { ++ new_serial = new_tags.serial_number; ++ existing_serial = existing_tags.serial_number; ++ } ++ ++ if ((in_scan > 0) && ++ (existing_cunk <= 0 || ++ ((existing_serial + 1) & 3) == new_serial)) { ++ /* Forward scanning. ++ * Use new ++ * Delete the old one and drop through to ++ * update the tnode ++ */ ++ yaffs_chunk_del(dev, existing_cunk, 1, ++ __LINE__); ++ } else { ++ /* Backward scanning or we want to use the ++ * existing one ++ * Delete the new one and return early so that ++ * the tnode isn't changed ++ */ ++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__); ++ return YAFFS_OK; ++ } ++ } ++ ++ } ++ ++ if (existing_cunk == 0) ++ in->n_data_chunks++; ++ ++ yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk); ++ ++ return YAFFS_OK; ++} ++ ++static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk) ++{ ++ struct yaffs_block_info *the_block; ++ unsigned block_no; ++ ++ yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk); ++ ++ block_no = chunk / dev->param.chunks_per_block; ++ the_block = yaffs_get_block_info(dev, block_no); ++ if (the_block) { ++ the_block->soft_del_pages++; ++ dev->n_free_chunks++; ++ yaffs2_update_oldest_dirty_seq(dev, block_no, the_block); ++ } ++} ++ ++/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all ++ * the chunks in the file. ++ * All soft deleting does is increment the block's softdelete count and pulls ++ * the chunk out of the tnode. ++ * Thus, essentially this is the same as DeleteWorker except that the chunks ++ * are soft deleted. ++ */ ++ ++static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn, ++ u32 level, int chunk_offset) ++{ ++ int i; ++ int the_chunk; ++ int all_done = 1; ++ struct yaffs_dev *dev = in->my_dev; ++ ++ if (!tn) ++ return 1; ++ ++ if (level > 0) { ++ for (i = YAFFS_NTNODES_INTERNAL - 1; ++ all_done && i >= 0; ++ i--) { ++ if (tn->internal[i]) { ++ all_done = ++ yaffs_soft_del_worker(in, ++ tn->internal[i], ++ level - 1, ++ (chunk_offset << ++ YAFFS_TNODES_INTERNAL_BITS) ++ + i); ++ if (all_done) { ++ yaffs_free_tnode(dev, ++ tn->internal[i]); ++ tn->internal[i] = NULL; ++ } else { ++ /* Can this happen? */ ++ } ++ } ++ } ++ return (all_done) ? 1 : 0; ++ } ++ ++ /* level 0 */ ++ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) { ++ the_chunk = yaffs_get_group_base(dev, tn, i); ++ if (the_chunk) { ++ yaffs_soft_del_chunk(dev, the_chunk); ++ yaffs_load_tnode_0(dev, tn, i, 0); ++ } ++ } ++ return 1; ++} ++ ++static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj) ++{ ++ struct yaffs_dev *dev = obj->my_dev; ++ struct yaffs_obj *parent; ++ ++ yaffs_verify_obj_in_dir(obj); ++ parent = obj->parent; ++ ++ yaffs_verify_dir(parent); ++ ++ if (dev && dev->param.remove_obj_fn) ++ dev->param.remove_obj_fn(obj); ++ ++ list_del_init(&obj->siblings); ++ obj->parent = NULL; ++ ++ yaffs_verify_dir(parent); ++} ++ ++void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj) ++{ ++ if (!directory) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "tragedy: Trying to add an object to a null pointer directory" ++ ); ++ BUG(); ++ return; ++ } ++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "tragedy: Trying to add an object to a non-directory" ++ ); ++ BUG(); ++ } ++ ++ if (obj->siblings.prev == NULL) { ++ /* Not initialised */ ++ BUG(); ++ } ++ ++ yaffs_verify_dir(directory); ++ ++ yaffs_remove_obj_from_dir(obj); ++ ++ /* Now add it */ ++ list_add(&obj->siblings, &directory->variant.dir_variant.children); ++ obj->parent = directory; ++ ++ if (directory == obj->my_dev->unlinked_dir ++ || directory == obj->my_dev->del_dir) { ++ obj->unlinked = 1; ++ obj->my_dev->n_unlinked_files++; ++ obj->rename_allowed = 0; ++ } ++ ++ yaffs_verify_dir(directory); ++ yaffs_verify_obj_in_dir(obj); ++} ++ ++static int yaffs_change_obj_name(struct yaffs_obj *obj, ++ struct yaffs_obj *new_dir, ++ const YCHAR *new_name, int force, int shadows) ++{ ++ int unlink_op; ++ int del_op; ++ struct yaffs_obj *existing_target; ++ ++ if (new_dir == NULL) ++ new_dir = obj->parent; /* use the old directory */ ++ ++ if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "tragedy: yaffs_change_obj_name: new_dir is not a directory" ++ ); ++ BUG(); ++ } ++ ++ unlink_op = (new_dir == obj->my_dev->unlinked_dir); ++ del_op = (new_dir == obj->my_dev->del_dir); ++ ++ existing_target = yaffs_find_by_name(new_dir, new_name); ++ ++ /* If the object is a file going into the unlinked directory, ++ * then it is OK to just stuff it in since duplicate names are OK. ++ * else only proceed if the new name does not exist and we're putting ++ * it into a directory. ++ */ ++ if (!(unlink_op || del_op || force || ++ shadows > 0 || !existing_target) || ++ new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) ++ return YAFFS_FAIL; ++ ++ yaffs_set_obj_name(obj, new_name); ++ obj->dirty = 1; ++ yaffs_add_obj_to_dir(new_dir, obj); ++ ++ if (unlink_op) ++ obj->unlinked = 1; ++ ++ /* If it is a deletion then we mark it as a shrink for gc */ ++ if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0) ++ return YAFFS_OK; ++ ++ return YAFFS_FAIL; ++} ++ ++/*------------------------ Short Operations Cache ------------------------------ ++ * In many situations where there is no high level buffering a lot of ++ * reads might be short sequential reads, and a lot of writes may be short ++ * sequential writes. eg. scanning/writing a jpeg file. ++ * In these cases, a short read/write cache can provide a huge perfomance ++ * benefit with dumb-as-a-rock code. ++ * In Linux, the page cache provides read buffering and the short op cache ++ * provides write buffering. ++ * ++ * There are a small number (~10) of cache chunks per device so that we don't ++ * need a very intelligent search. ++ */ ++ ++static int yaffs_obj_cache_dirty(struct yaffs_obj *obj) ++{ ++ struct yaffs_dev *dev = obj->my_dev; ++ int i; ++ struct yaffs_cache *cache; ++ int n_caches = obj->my_dev->param.n_caches; ++ ++ for (i = 0; i < n_caches; i++) { ++ cache = &dev->cache[i]; ++ if (cache->object == obj && cache->dirty) ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static void yaffs_flush_file_cache(struct yaffs_obj *obj) ++{ ++ struct yaffs_dev *dev = obj->my_dev; ++ int lowest = -99; /* Stop compiler whining. */ ++ int i; ++ struct yaffs_cache *cache; ++ int chunk_written = 0; ++ int n_caches = obj->my_dev->param.n_caches; ++ ++ if (n_caches < 1) ++ return; ++ do { ++ cache = NULL; ++ ++ /* Find the lowest dirty chunk for this object */ ++ for (i = 0; i < n_caches; i++) { ++ if (dev->cache[i].object == obj && ++ dev->cache[i].dirty) { ++ if (!cache || ++ dev->cache[i].chunk_id < lowest) { ++ cache = &dev->cache[i]; ++ lowest = cache->chunk_id; ++ } ++ } ++ } ++ ++ if (cache && !cache->locked) { ++ /* Write it out and free it up */ ++ chunk_written = ++ yaffs_wr_data_obj(cache->object, ++ cache->chunk_id, ++ cache->data, ++ cache->n_bytes, 1); ++ cache->dirty = 0; ++ cache->object = NULL; ++ } ++ } while (cache && chunk_written > 0); ++ ++ if (cache) ++ /* Hoosterman, disk full while writing cache out. */ ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "yaffs tragedy: no space during cache write"); ++} ++ ++/*yaffs_flush_whole_cache(dev) ++ * ++ * ++ */ ++ ++void yaffs_flush_whole_cache(struct yaffs_dev *dev) ++{ ++ struct yaffs_obj *obj; ++ int n_caches = dev->param.n_caches; ++ int i; ++ ++ /* Find a dirty object in the cache and flush it... ++ * until there are no further dirty objects. ++ */ ++ do { ++ obj = NULL; ++ for (i = 0; i < n_caches && !obj; i++) { ++ if (dev->cache[i].object && dev->cache[i].dirty) ++ obj = dev->cache[i].object; ++ } ++ if (obj) ++ yaffs_flush_file_cache(obj); ++ } while (obj); ++ ++} ++ ++/* Grab us a cache chunk for use. ++ * First look for an empty one. ++ * Then look for the least recently used non-dirty one. ++ * Then look for the least recently used dirty one...., flush and look again. ++ */ ++static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev) ++{ ++ int i; ++ ++ if (dev->param.n_caches > 0) { ++ for (i = 0; i < dev->param.n_caches; i++) { ++ if (!dev->cache[i].object) ++ return &dev->cache[i]; ++ } ++ } ++ return NULL; ++} ++ ++static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev) ++{ ++ struct yaffs_cache *cache; ++ struct yaffs_obj *the_obj; ++ int usage; ++ int i; ++ int pushout; ++ ++ if (dev->param.n_caches < 1) ++ return NULL; ++ ++ /* Try find a non-dirty one... */ ++ ++ cache = yaffs_grab_chunk_worker(dev); ++ ++ if (!cache) { ++ /* They were all dirty, find the LRU object and flush ++ * its cache, then find again. ++ * NB what's here is not very accurate, ++ * we actually flush the object with the LRU chunk. ++ */ ++ ++ /* With locking we can't assume we can use entry zero, ++ * Set the_obj to a valid pointer for Coverity. */ ++ the_obj = dev->cache[0].object; ++ usage = -1; ++ cache = NULL; ++ pushout = -1; ++ ++ for (i = 0; i < dev->param.n_caches; i++) { ++ if (dev->cache[i].object && ++ !dev->cache[i].locked && ++ (dev->cache[i].last_use < usage || ++ !cache)) { ++ usage = dev->cache[i].last_use; ++ the_obj = dev->cache[i].object; ++ cache = &dev->cache[i]; ++ pushout = i; ++ } ++ } ++ ++ if (!cache || cache->dirty) { ++ /* Flush and try again */ ++ yaffs_flush_file_cache(the_obj); ++ cache = yaffs_grab_chunk_worker(dev); ++ } ++ } ++ return cache; ++} ++ ++/* Find a cached chunk */ ++static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj, ++ int chunk_id) ++{ ++ struct yaffs_dev *dev = obj->my_dev; ++ int i; ++ ++ if (dev->param.n_caches < 1) ++ return NULL; ++ ++ for (i = 0; i < dev->param.n_caches; i++) { ++ if (dev->cache[i].object == obj && ++ dev->cache[i].chunk_id == chunk_id) { ++ dev->cache_hits++; ++ ++ return &dev->cache[i]; ++ } ++ } ++ return NULL; ++} ++ ++/* Mark the chunk for the least recently used algorithym */ ++static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache, ++ int is_write) ++{ ++ int i; ++ ++ if (dev->param.n_caches < 1) ++ return; ++ ++ if (dev->cache_last_use < 0 || ++ dev->cache_last_use > 100000000) { ++ /* Reset the cache usages */ ++ for (i = 1; i < dev->param.n_caches; i++) ++ dev->cache[i].last_use = 0; ++ ++ dev->cache_last_use = 0; ++ } ++ dev->cache_last_use++; ++ cache->last_use = dev->cache_last_use; ++ ++ if (is_write) ++ cache->dirty = 1; ++} ++ ++/* Invalidate a single cache page. ++ * Do this when a whole page gets written, ++ * ie the short cache for this page is no longer valid. ++ */ ++static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id) ++{ ++ struct yaffs_cache *cache; ++ ++ if (object->my_dev->param.n_caches > 0) { ++ cache = yaffs_find_chunk_cache(object, chunk_id); ++ ++ if (cache) ++ cache->object = NULL; ++ } ++} ++ ++/* Invalidate all the cache pages associated with this object ++ * Do this whenever ther file is deleted or resized. ++ */ ++static void yaffs_invalidate_whole_cache(struct yaffs_obj *in) ++{ ++ int i; ++ struct yaffs_dev *dev = in->my_dev; ++ ++ if (dev->param.n_caches > 0) { ++ /* Invalidate it. */ ++ for (i = 0; i < dev->param.n_caches; i++) { ++ if (dev->cache[i].object == in) ++ dev->cache[i].object = NULL; ++ } ++ } ++} ++ ++static void yaffs_unhash_obj(struct yaffs_obj *obj) ++{ ++ int bucket; ++ struct yaffs_dev *dev = obj->my_dev; ++ ++ /* If it is still linked into the bucket list, free from the list */ ++ if (!list_empty(&obj->hash_link)) { ++ list_del_init(&obj->hash_link); ++ bucket = yaffs_hash_fn(obj->obj_id); ++ dev->obj_bucket[bucket].count--; ++ } ++} ++ ++/* FreeObject frees up a Object and puts it back on the free list */ ++static void yaffs_free_obj(struct yaffs_obj *obj) ++{ ++ struct yaffs_dev *dev; ++ ++ if (!obj) { ++ BUG(); ++ return; ++ } ++ dev = obj->my_dev; ++ yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p", ++ obj, obj->my_inode); ++ if (obj->parent) ++ BUG(); ++ if (!list_empty(&obj->siblings)) ++ BUG(); ++ ++ if (obj->my_inode) { ++ /* We're still hooked up to a cached inode. ++ * Don't delete now, but mark for later deletion ++ */ ++ obj->defered_free = 1; ++ return; ++ } ++ ++ yaffs_unhash_obj(obj); ++ ++ yaffs_free_raw_obj(dev, obj); ++ dev->n_obj--; ++ dev->checkpoint_blocks_required = 0; /* force recalculation */ ++} ++ ++void yaffs_handle_defered_free(struct yaffs_obj *obj) ++{ ++ if (obj->defered_free) ++ yaffs_free_obj(obj); ++} ++ ++static int yaffs_generic_obj_del(struct yaffs_obj *in) ++{ ++ /* Iinvalidate the file's data in the cache, without flushing. */ ++ yaffs_invalidate_whole_cache(in); ++ ++ if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) { ++ /* Move to unlinked directory so we have a deletion record */ ++ yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0, ++ 0); ++ } ++ ++ yaffs_remove_obj_from_dir(in); ++ yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__); ++ in->hdr_chunk = 0; ++ ++ yaffs_free_obj(in); ++ return YAFFS_OK; ++ ++} ++ ++static void yaffs_soft_del_file(struct yaffs_obj *obj) ++{ ++ if (!obj->deleted || ++ obj->variant_type != YAFFS_OBJECT_TYPE_FILE || ++ obj->soft_del) ++ return; ++ ++ if (obj->n_data_chunks <= 0) { ++ /* Empty file with no duplicate object headers, ++ * just delete it immediately */ ++ yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top); ++ obj->variant.file_variant.top = NULL; ++ yaffs_trace(YAFFS_TRACE_TRACING, ++ "yaffs: Deleting empty file %d", ++ obj->obj_id); ++ yaffs_generic_obj_del(obj); ++ } else { ++ yaffs_soft_del_worker(obj, ++ obj->variant.file_variant.top, ++ obj->variant. ++ file_variant.top_level, 0); ++ obj->soft_del = 1; ++ } ++} ++ ++/* Pruning removes any part of the file structure tree that is beyond the ++ * bounds of the file (ie that does not point to chunks). ++ * ++ * A file should only get pruned when its size is reduced. ++ * ++ * Before pruning, the chunks must be pulled from the tree and the ++ * level 0 tnode entries must be zeroed out. ++ * Could also use this for file deletion, but that's probably better handled ++ * by a special case. ++ * ++ * This function is recursive. For levels > 0 the function is called again on ++ * any sub-tree. For level == 0 we just check if the sub-tree has data. ++ * If there is no data in a subtree then it is pruned. ++ */ ++ ++static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev, ++ struct yaffs_tnode *tn, u32 level, ++ int del0) ++{ ++ int i; ++ int has_data; ++ ++ if (!tn) ++ return tn; ++ ++ has_data = 0; ++ ++ if (level > 0) { ++ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) { ++ if (tn->internal[i]) { ++ tn->internal[i] = ++ yaffs_prune_worker(dev, ++ tn->internal[i], ++ level - 1, ++ (i == 0) ? del0 : 1); ++ } ++ ++ if (tn->internal[i]) ++ has_data++; ++ } ++ } else { ++ int tnode_size_u32 = dev->tnode_size / sizeof(u32); ++ u32 *map = (u32 *) tn; ++ ++ for (i = 0; !has_data && i < tnode_size_u32; i++) { ++ if (map[i]) ++ has_data++; ++ } ++ } ++ ++ if (has_data == 0 && del0) { ++ /* Free and return NULL */ ++ yaffs_free_tnode(dev, tn); ++ tn = NULL; ++ } ++ return tn; ++} ++ ++static int yaffs_prune_tree(struct yaffs_dev *dev, ++ struct yaffs_file_var *file_struct) ++{ ++ int i; ++ int has_data; ++ int done = 0; ++ struct yaffs_tnode *tn; ++ ++ if (file_struct->top_level < 1) ++ return YAFFS_OK; ++ ++ file_struct->top = ++ yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0); ++ ++ /* Now we have a tree with all the non-zero branches NULL but ++ * the height is the same as it was. ++ * Let's see if we can trim internal tnodes to shorten the tree. ++ * We can do this if only the 0th element in the tnode is in use ++ * (ie all the non-zero are NULL) ++ */ ++ ++ while (file_struct->top_level && !done) { ++ tn = file_struct->top; ++ ++ has_data = 0; ++ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) { ++ if (tn->internal[i]) ++ has_data++; ++ } ++ ++ if (!has_data) { ++ file_struct->top = tn->internal[0]; ++ file_struct->top_level--; ++ yaffs_free_tnode(dev, tn); ++ } else { ++ done = 1; ++ } ++ } ++ ++ return YAFFS_OK; ++} ++ ++/*-------------------- End of File Structure functions.-------------------*/ ++ ++/* alloc_empty_obj gets us a clean Object.*/ ++static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev) ++{ ++ struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev); ++ ++ if (!obj) ++ return obj; ++ ++ dev->n_obj++; ++ ++ /* Now sweeten it up... */ ++ ++ memset(obj, 0, sizeof(struct yaffs_obj)); ++ obj->being_created = 1; ++ ++ obj->my_dev = dev; ++ obj->hdr_chunk = 0; ++ obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN; ++ INIT_LIST_HEAD(&(obj->hard_links)); ++ INIT_LIST_HEAD(&(obj->hash_link)); ++ INIT_LIST_HEAD(&obj->siblings); ++ ++ /* Now make the directory sane */ ++ if (dev->root_dir) { ++ obj->parent = dev->root_dir; ++ list_add(&(obj->siblings), ++ &dev->root_dir->variant.dir_variant.children); ++ } ++ ++ /* Add it to the lost and found directory. ++ * NB Can't put root or lost-n-found in lost-n-found so ++ * check if lost-n-found exists first ++ */ ++ if (dev->lost_n_found) ++ yaffs_add_obj_to_dir(dev->lost_n_found, obj); ++ ++ obj->being_created = 0; ++ ++ dev->checkpoint_blocks_required = 0; /* force recalculation */ ++ ++ return obj; ++} ++ ++static int yaffs_find_nice_bucket(struct yaffs_dev *dev) ++{ ++ int i; ++ int l = 999; ++ int lowest = 999999; ++ ++ /* Search for the shortest list or one that ++ * isn't too long. ++ */ ++ ++ for (i = 0; i < 10 && lowest > 4; i++) { ++ dev->bucket_finder++; ++ dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS; ++ if (dev->obj_bucket[dev->bucket_finder].count < lowest) { ++ lowest = dev->obj_bucket[dev->bucket_finder].count; ++ l = dev->bucket_finder; ++ } ++ } ++ ++ return l; ++} ++ ++static int yaffs_new_obj_id(struct yaffs_dev *dev) ++{ ++ int bucket = yaffs_find_nice_bucket(dev); ++ int found = 0; ++ struct list_head *i; ++ u32 n = (u32) bucket; ++ ++ /* Now find an object value that has not already been taken ++ * by scanning the list. ++ */ ++ ++ while (!found) { ++ found = 1; ++ n += YAFFS_NOBJECT_BUCKETS; ++ if (1 || dev->obj_bucket[bucket].count > 0) { ++ list_for_each(i, &dev->obj_bucket[bucket].list) { ++ /* If there is already one in the list */ ++ if (i && list_entry(i, struct yaffs_obj, ++ hash_link)->obj_id == n) { ++ found = 0; ++ } ++ } ++ } ++ } ++ return n; ++} ++ ++static void yaffs_hash_obj(struct yaffs_obj *in) ++{ ++ int bucket = yaffs_hash_fn(in->obj_id); ++ struct yaffs_dev *dev = in->my_dev; ++ ++ list_add(&in->hash_link, &dev->obj_bucket[bucket].list); ++ dev->obj_bucket[bucket].count++; ++} ++ ++struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number) ++{ ++ int bucket = yaffs_hash_fn(number); ++ struct list_head *i; ++ struct yaffs_obj *in; ++ ++ list_for_each(i, &dev->obj_bucket[bucket].list) { ++ /* Look if it is in the list */ ++ in = list_entry(i, struct yaffs_obj, hash_link); ++ if (in->obj_id == number) { ++ /* Don't show if it is defered free */ ++ if (in->defered_free) ++ return NULL; ++ return in; ++ } ++ } ++ ++ return NULL; ++} ++ ++static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number, ++ enum yaffs_obj_type type) ++{ ++ struct yaffs_obj *the_obj = NULL; ++ struct yaffs_tnode *tn = NULL; ++ ++ if (number < 0) ++ number = yaffs_new_obj_id(dev); ++ ++ if (type == YAFFS_OBJECT_TYPE_FILE) { ++ tn = yaffs_get_tnode(dev); ++ if (!tn) ++ return NULL; ++ } ++ ++ the_obj = yaffs_alloc_empty_obj(dev); ++ if (!the_obj) { ++ if (tn) ++ yaffs_free_tnode(dev, tn); ++ return NULL; ++ } ++ ++ the_obj->fake = 0; ++ the_obj->rename_allowed = 1; ++ the_obj->unlink_allowed = 1; ++ the_obj->obj_id = number; ++ yaffs_hash_obj(the_obj); ++ the_obj->variant_type = type; ++ yaffs_load_current_time(the_obj, 1, 1); ++ ++ switch (type) { ++ case YAFFS_OBJECT_TYPE_FILE: ++ the_obj->variant.file_variant.file_size = 0; ++ the_obj->variant.file_variant.scanned_size = 0; ++ the_obj->variant.file_variant.shrink_size = ++ yaffs_max_file_size(dev); ++ the_obj->variant.file_variant.top_level = 0; ++ the_obj->variant.file_variant.top = tn; ++ break; ++ case YAFFS_OBJECT_TYPE_DIRECTORY: ++ INIT_LIST_HEAD(&the_obj->variant.dir_variant.children); ++ INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty); ++ break; ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ case YAFFS_OBJECT_TYPE_HARDLINK: ++ case YAFFS_OBJECT_TYPE_SPECIAL: ++ /* No action required */ ++ break; ++ case YAFFS_OBJECT_TYPE_UNKNOWN: ++ /* todo this should not happen */ ++ break; ++ } ++ return the_obj; ++} ++ ++static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev, ++ int number, u32 mode) ++{ ++ ++ struct yaffs_obj *obj = ++ yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY); ++ ++ if (!obj) ++ return NULL; ++ ++ obj->fake = 1; /* it is fake so it might not use NAND */ ++ obj->rename_allowed = 0; ++ obj->unlink_allowed = 0; ++ obj->deleted = 0; ++ obj->unlinked = 0; ++ obj->yst_mode = mode; ++ obj->my_dev = dev; ++ obj->hdr_chunk = 0; /* Not a valid chunk. */ ++ return obj; ++ ++} ++ ++ ++static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev) ++{ ++ int i; ++ ++ dev->n_obj = 0; ++ dev->n_tnodes = 0; ++ yaffs_init_raw_tnodes_and_objs(dev); ++ ++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { ++ INIT_LIST_HEAD(&dev->obj_bucket[i].list); ++ dev->obj_bucket[i].count = 0; ++ } ++} ++ ++struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev, ++ int number, ++ enum yaffs_obj_type type) ++{ ++ struct yaffs_obj *the_obj = NULL; ++ ++ if (number > 0) ++ the_obj = yaffs_find_by_number(dev, number); ++ ++ if (!the_obj) ++ the_obj = yaffs_new_obj(dev, number, type); ++ ++ return the_obj; ++ ++} ++ ++YCHAR *yaffs_clone_str(const YCHAR *str) ++{ ++ YCHAR *new_str = NULL; ++ int len; ++ ++ if (!str) ++ str = _Y(""); ++ ++ len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH); ++ new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS); ++ if (new_str) { ++ strncpy(new_str, str, len); ++ new_str[len] = 0; ++ } ++ return new_str; ++ ++} ++/* ++ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new ++ * link (ie. name) is created or deleted in the directory. ++ * ++ * ie. ++ * create dir/a : update dir's mtime/ctime ++ * rm dir/a: update dir's mtime/ctime ++ * modify dir/a: don't update dir's mtimme/ctime ++ * ++ * This can be handled immediately or defered. Defering helps reduce the number ++ * of updates when many files in a directory are changed within a brief period. ++ * ++ * If the directory updating is defered then yaffs_update_dirty_dirs must be ++ * called periodically. ++ */ ++ ++static void yaffs_update_parent(struct yaffs_obj *obj) ++{ ++ struct yaffs_dev *dev; ++ ++ if (!obj) ++ return; ++ dev = obj->my_dev; ++ obj->dirty = 1; ++ yaffs_load_current_time(obj, 0, 1); ++ if (dev->param.defered_dir_update) { ++ struct list_head *link = &obj->variant.dir_variant.dirty; ++ ++ if (list_empty(link)) { ++ list_add(link, &dev->dirty_dirs); ++ yaffs_trace(YAFFS_TRACE_BACKGROUND, ++ "Added object %d to dirty directories", ++ obj->obj_id); ++ } ++ ++ } else { ++ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL); ++ } ++} ++ ++void yaffs_update_dirty_dirs(struct yaffs_dev *dev) ++{ ++ struct list_head *link; ++ struct yaffs_obj *obj; ++ struct yaffs_dir_var *d_s; ++ union yaffs_obj_var *o_v; ++ ++ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories"); ++ ++ while (!list_empty(&dev->dirty_dirs)) { ++ link = dev->dirty_dirs.next; ++ list_del_init(link); ++ ++ d_s = list_entry(link, struct yaffs_dir_var, dirty); ++ o_v = list_entry(d_s, union yaffs_obj_var, dir_variant); ++ obj = list_entry(o_v, struct yaffs_obj, variant); ++ ++ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d", ++ obj->obj_id); ++ ++ if (obj->dirty) ++ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL); ++ } ++} ++ ++/* ++ * Mknod (create) a new object. ++ * equiv_obj only has meaning for a hard link; ++ * alias_str only has meaning for a symlink. ++ * rdev only has meaning for devices (a subset of special objects) ++ */ ++ ++static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type, ++ struct yaffs_obj *parent, ++ const YCHAR *name, ++ u32 mode, ++ u32 uid, ++ u32 gid, ++ struct yaffs_obj *equiv_obj, ++ const YCHAR *alias_str, u32 rdev) ++{ ++ struct yaffs_obj *in; ++ YCHAR *str = NULL; ++ struct yaffs_dev *dev = parent->my_dev; ++ ++ /* Check if the entry exists. ++ * If it does then fail the call since we don't want a dup. */ ++ if (yaffs_find_by_name(parent, name)) ++ return NULL; ++ ++ if (type == YAFFS_OBJECT_TYPE_SYMLINK) { ++ str = yaffs_clone_str(alias_str); ++ if (!str) ++ return NULL; ++ } ++ ++ in = yaffs_new_obj(dev, -1, type); ++ ++ if (!in) { ++ kfree(str); ++ return NULL; ++ } ++ ++ in->hdr_chunk = 0; ++ in->valid = 1; ++ in->variant_type = type; ++ ++ in->yst_mode = mode; ++ ++ yaffs_attribs_init(in, gid, uid, rdev); ++ ++ in->n_data_chunks = 0; ++ ++ yaffs_set_obj_name(in, name); ++ in->dirty = 1; ++ ++ yaffs_add_obj_to_dir(parent, in); ++ ++ in->my_dev = parent->my_dev; ++ ++ switch (type) { ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ in->variant.symlink_variant.alias = str; ++ break; ++ case YAFFS_OBJECT_TYPE_HARDLINK: ++ in->variant.hardlink_variant.equiv_obj = equiv_obj; ++ in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id; ++ list_add(&in->hard_links, &equiv_obj->hard_links); ++ break; ++ case YAFFS_OBJECT_TYPE_FILE: ++ case YAFFS_OBJECT_TYPE_DIRECTORY: ++ case YAFFS_OBJECT_TYPE_SPECIAL: ++ case YAFFS_OBJECT_TYPE_UNKNOWN: ++ /* do nothing */ ++ break; ++ } ++ ++ if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) { ++ /* Could not create the object header, fail */ ++ yaffs_del_obj(in); ++ in = NULL; ++ } ++ ++ if (in) ++ yaffs_update_parent(parent); ++ ++ return in; ++} ++ ++struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent, ++ const YCHAR *name, u32 mode, u32 uid, ++ u32 gid) ++{ ++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode, ++ uid, gid, NULL, NULL, 0); ++} ++ ++struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name, ++ u32 mode, u32 uid, u32 gid) ++{ ++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name, ++ mode, uid, gid, NULL, NULL, 0); ++} ++ ++struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent, ++ const YCHAR *name, u32 mode, u32 uid, ++ u32 gid, u32 rdev) ++{ ++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode, ++ uid, gid, NULL, NULL, rdev); ++} ++ ++struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent, ++ const YCHAR *name, u32 mode, u32 uid, ++ u32 gid, const YCHAR *alias) ++{ ++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode, ++ uid, gid, NULL, alias, 0); ++} ++ ++/* yaffs_link_obj returns the object id of the equivalent object.*/ ++struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name, ++ struct yaffs_obj *equiv_obj) ++{ ++ /* Get the real object in case we were fed a hard link obj */ ++ equiv_obj = yaffs_get_equivalent_obj(equiv_obj); ++ ++ if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK, ++ parent, name, 0, 0, 0, ++ equiv_obj, NULL, 0)) ++ return equiv_obj; ++ ++ return NULL; ++ ++} ++ ++ ++ ++/*---------------------- Block Management and Page Allocation -------------*/ ++ ++static void yaffs_deinit_blocks(struct yaffs_dev *dev) ++{ ++ if (dev->block_info_alt && dev->block_info) ++ vfree(dev->block_info); ++ else ++ kfree(dev->block_info); ++ ++ dev->block_info_alt = 0; ++ ++ dev->block_info = NULL; ++ ++ if (dev->chunk_bits_alt && dev->chunk_bits) ++ vfree(dev->chunk_bits); ++ else ++ kfree(dev->chunk_bits); ++ dev->chunk_bits_alt = 0; ++ dev->chunk_bits = NULL; ++} ++ ++static int yaffs_init_blocks(struct yaffs_dev *dev) ++{ ++ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1; ++ ++ dev->block_info = NULL; ++ dev->chunk_bits = NULL; ++ dev->alloc_block = -1; /* force it to get a new one */ ++ ++ /* If the first allocation strategy fails, thry the alternate one */ ++ dev->block_info = ++ kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS); ++ if (!dev->block_info) { ++ dev->block_info = ++ vmalloc(n_blocks * sizeof(struct yaffs_block_info)); ++ dev->block_info_alt = 1; ++ } else { ++ dev->block_info_alt = 0; ++ } ++ ++ if (!dev->block_info) ++ goto alloc_error; ++ ++ /* Set up dynamic blockinfo stuff. Round up bytes. */ ++ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8; ++ dev->chunk_bits = ++ kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS); ++ if (!dev->chunk_bits) { ++ dev->chunk_bits = ++ vmalloc(dev->chunk_bit_stride * n_blocks); ++ dev->chunk_bits_alt = 1; ++ } else { ++ dev->chunk_bits_alt = 0; ++ } ++ if (!dev->chunk_bits) ++ goto alloc_error; ++ ++ ++ memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info)); ++ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks); ++ return YAFFS_OK; ++ ++alloc_error: ++ yaffs_deinit_blocks(dev); ++ return YAFFS_FAIL; ++} ++ ++ ++void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no) ++{ ++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no); ++ int erased_ok = 0; ++ int i; ++ ++ /* If the block is still healthy erase it and mark as clean. ++ * If the block has had a data failure, then retire it. ++ */ ++ ++ yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE, ++ "yaffs_block_became_dirty block %d state %d %s", ++ block_no, bi->block_state, ++ (bi->needs_retiring) ? "needs retiring" : ""); ++ ++ yaffs2_clear_oldest_dirty_seq(dev, bi); ++ ++ bi->block_state = YAFFS_BLOCK_STATE_DIRTY; ++ ++ /* If this is the block being garbage collected then stop gc'ing */ ++ if (block_no == dev->gc_block) ++ dev->gc_block = 0; ++ ++ /* If this block is currently the best candidate for gc ++ * then drop as a candidate */ ++ if (block_no == dev->gc_dirtiest) { ++ dev->gc_dirtiest = 0; ++ dev->gc_pages_in_use = 0; ++ } ++ ++ if (!bi->needs_retiring) { ++ yaffs2_checkpt_invalidate(dev); ++ erased_ok = yaffs_erase_block(dev, block_no); ++ if (!erased_ok) { ++ dev->n_erase_failures++; ++ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, ++ "**>> Erasure failed %d", block_no); ++ } ++ } ++ ++ /* Verify erasure if needed */ ++ if (erased_ok && ++ ((yaffs_trace_mask & YAFFS_TRACE_ERASE) || ++ !yaffs_skip_verification(dev))) { ++ for (i = 0; i < dev->param.chunks_per_block; i++) { ++ if (!yaffs_check_chunk_erased(dev, ++ block_no * dev->param.chunks_per_block + i)) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ ">>Block %d erasure supposedly OK, but chunk %d not erased", ++ block_no, i); ++ } ++ } ++ } ++ ++ if (!erased_ok) { ++ /* We lost a block of free space */ ++ dev->n_free_chunks -= dev->param.chunks_per_block; ++ yaffs_retire_block(dev, block_no); ++ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, ++ "**>> Block %d retired", block_no); ++ return; ++ } ++ ++ /* Clean it up... */ ++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY; ++ bi->seq_number = 0; ++ dev->n_erased_blocks++; ++ bi->pages_in_use = 0; ++ bi->soft_del_pages = 0; ++ bi->has_shrink_hdr = 0; ++ bi->skip_erased_check = 1; /* Clean, so no need to check */ ++ bi->gc_prioritise = 0; ++ bi->has_summary = 0; ++ ++ yaffs_clear_chunk_bits(dev, block_no); ++ ++ yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no); ++} ++ ++static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev, ++ struct yaffs_block_info *bi, ++ int old_chunk, u8 *buffer) ++{ ++ int new_chunk; ++ int mark_flash = 1; ++ struct yaffs_ext_tags tags; ++ struct yaffs_obj *object; ++ int matching_chunk; ++ int ret_val = YAFFS_OK; ++ ++ memset(&tags, 0, sizeof(tags)); ++ yaffs_rd_chunk_tags_nand(dev, old_chunk, ++ buffer, &tags); ++ object = yaffs_find_by_number(dev, tags.obj_id); ++ ++ yaffs_trace(YAFFS_TRACE_GC_DETAIL, ++ "Collecting chunk in block %d, %d %d %d ", ++ dev->gc_chunk, tags.obj_id, ++ tags.chunk_id, tags.n_bytes); ++ ++ if (object && !yaffs_skip_verification(dev)) { ++ if (tags.chunk_id == 0) ++ matching_chunk = ++ object->hdr_chunk; ++ else if (object->soft_del) ++ /* Defeat the test */ ++ matching_chunk = old_chunk; ++ else ++ matching_chunk = ++ yaffs_find_chunk_in_file ++ (object, tags.chunk_id, ++ NULL); ++ ++ if (old_chunk != matching_chunk) ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "gc: page in gc mismatch: %d %d %d %d", ++ old_chunk, ++ matching_chunk, ++ tags.obj_id, ++ tags.chunk_id); ++ } ++ ++ if (!object) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "page %d in gc has no object: %d %d %d ", ++ old_chunk, ++ tags.obj_id, tags.chunk_id, ++ tags.n_bytes); ++ } ++ ++ if (object && ++ object->deleted && ++ object->soft_del && tags.chunk_id != 0) { ++ /* Data chunk in a soft deleted file, ++ * throw it away. ++ * It's a soft deleted data chunk, ++ * No need to copy this, just forget ++ * about it and fix up the object. ++ */ ++ ++ /* Free chunks already includes ++ * softdeleted chunks, how ever this ++ * chunk is going to soon be really ++ * deleted which will increment free ++ * chunks. We have to decrement free ++ * chunks so this works out properly. ++ */ ++ dev->n_free_chunks--; ++ bi->soft_del_pages--; ++ ++ object->n_data_chunks--; ++ if (object->n_data_chunks <= 0) { ++ /* remeber to clean up obj */ ++ dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id; ++ dev->n_clean_ups++; ++ } ++ mark_flash = 0; ++ } else if (object) { ++ /* It's either a data chunk in a live ++ * file or an ObjectHeader, so we're ++ * interested in it. ++ * NB Need to keep the ObjectHeaders of ++ * deleted files until the whole file ++ * has been deleted off ++ */ ++ tags.serial_number++; ++ dev->n_gc_copies++; ++ ++ if (tags.chunk_id == 0) { ++ /* It is an object Id, ++ * We need to nuke the ++ * shrinkheader flags since its ++ * work is done. ++ * Also need to clean up ++ * shadowing. ++ */ ++ struct yaffs_obj_hdr *oh; ++ oh = (struct yaffs_obj_hdr *) buffer; ++ ++ oh->is_shrink = 0; ++ tags.extra_is_shrink = 0; ++ oh->shadows_obj = 0; ++ oh->inband_shadowed_obj_id = 0; ++ tags.extra_shadows = 0; ++ ++ /* Update file size */ ++ if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) { ++ yaffs_oh_size_load(oh, ++ object->variant.file_variant.file_size); ++ tags.extra_file_size = ++ object->variant.file_variant.file_size; ++ } ++ ++ yaffs_verify_oh(object, oh, &tags, 1); ++ new_chunk = ++ yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1); ++ } else { ++ new_chunk = ++ yaffs_write_new_chunk(dev, buffer, &tags, 1); ++ } ++ ++ if (new_chunk < 0) { ++ ret_val = YAFFS_FAIL; ++ } else { ++ ++ /* Now fix up the Tnodes etc. */ ++ ++ if (tags.chunk_id == 0) { ++ /* It's a header */ ++ object->hdr_chunk = new_chunk; ++ object->serial = tags.serial_number; ++ } else { ++ /* It's a data chunk */ ++ yaffs_put_chunk_in_file(object, tags.chunk_id, ++ new_chunk, 0); ++ } ++ } ++ } ++ if (ret_val == YAFFS_OK) ++ yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__); ++ return ret_val; ++} ++ ++static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block) ++{ ++ int old_chunk; ++ int ret_val = YAFFS_OK; ++ int i; ++ int is_checkpt_block; ++ int max_copies; ++ int chunks_before = yaffs_get_erased_chunks(dev); ++ int chunks_after; ++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block); ++ ++ is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT); ++ ++ yaffs_trace(YAFFS_TRACE_TRACING, ++ "Collecting block %d, in use %d, shrink %d, whole_block %d", ++ block, bi->pages_in_use, bi->has_shrink_hdr, ++ whole_block); ++ ++ /*yaffs_verify_free_chunks(dev); */ ++ ++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL) ++ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING; ++ ++ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */ ++ ++ dev->gc_disable = 1; ++ ++ yaffs_summary_gc(dev, block); ++ ++ if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) { ++ yaffs_trace(YAFFS_TRACE_TRACING, ++ "Collecting block %d that has no chunks in use", ++ block); ++ yaffs_block_became_dirty(dev, block); ++ } else { ++ ++ u8 *buffer = yaffs_get_temp_buffer(dev); ++ ++ yaffs_verify_blk(dev, bi, block); ++ ++ max_copies = (whole_block) ? dev->param.chunks_per_block : 5; ++ old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk; ++ ++ for (/* init already done */ ; ++ ret_val == YAFFS_OK && ++ dev->gc_chunk < dev->param.chunks_per_block && ++ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) && ++ max_copies > 0; ++ dev->gc_chunk++, old_chunk++) { ++ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) { ++ /* Page is in use and might need to be copied */ ++ max_copies--; ++ ret_val = yaffs_gc_process_chunk(dev, bi, ++ old_chunk, buffer); ++ } ++ } ++ yaffs_release_temp_buffer(dev, buffer); ++ } ++ ++ yaffs_verify_collected_blk(dev, bi, block); ++ ++ if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) { ++ /* ++ * The gc did not complete. Set block state back to FULL ++ * because checkpointing does not restore gc. ++ */ ++ bi->block_state = YAFFS_BLOCK_STATE_FULL; ++ } else { ++ /* The gc completed. */ ++ /* Do any required cleanups */ ++ for (i = 0; i < dev->n_clean_ups; i++) { ++ /* Time to delete the file too */ ++ struct yaffs_obj *object = ++ yaffs_find_by_number(dev, dev->gc_cleanup_list[i]); ++ if (object) { ++ yaffs_free_tnode(dev, ++ object->variant.file_variant.top); ++ object->variant.file_variant.top = NULL; ++ yaffs_trace(YAFFS_TRACE_GC, ++ "yaffs: About to finally delete object %d", ++ object->obj_id); ++ yaffs_generic_obj_del(object); ++ object->my_dev->n_deleted_files--; ++ } ++ ++ } ++ chunks_after = yaffs_get_erased_chunks(dev); ++ if (chunks_before >= chunks_after) ++ yaffs_trace(YAFFS_TRACE_GC, ++ "gc did not increase free chunks before %d after %d", ++ chunks_before, chunks_after); ++ dev->gc_block = 0; ++ dev->gc_chunk = 0; ++ dev->n_clean_ups = 0; ++ } ++ ++ dev->gc_disable = 0; ++ ++ return ret_val; ++} ++ ++/* ++ * find_gc_block() selects the dirtiest block (or close enough) ++ * for garbage collection. ++ */ ++ ++static unsigned yaffs_find_gc_block(struct yaffs_dev *dev, ++ int aggressive, int background) ++{ ++ int i; ++ int iterations; ++ unsigned selected = 0; ++ int prioritised = 0; ++ int prioritised_exist = 0; ++ struct yaffs_block_info *bi; ++ int threshold; ++ ++ /* First let's see if we need to grab a prioritised block */ ++ if (dev->has_pending_prioritised_gc && !aggressive) { ++ dev->gc_dirtiest = 0; ++ bi = dev->block_info; ++ for (i = dev->internal_start_block; ++ i <= dev->internal_end_block && !selected; i++) { ++ ++ if (bi->gc_prioritise) { ++ prioritised_exist = 1; ++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL && ++ yaffs_block_ok_for_gc(dev, bi)) { ++ selected = i; ++ prioritised = 1; ++ } ++ } ++ bi++; ++ } ++ ++ /* ++ * If there is a prioritised block and none was selected then ++ * this happened because there is at least one old dirty block ++ * gumming up the works. Let's gc the oldest dirty block. ++ */ ++ ++ if (prioritised_exist && ++ !selected && dev->oldest_dirty_block > 0) ++ selected = dev->oldest_dirty_block; ++ ++ if (!prioritised_exist) /* None found, so we can clear this */ ++ dev->has_pending_prioritised_gc = 0; ++ } ++ ++ /* If we're doing aggressive GC then we are happy to take a less-dirty ++ * block, and search harder. ++ * else (leasurely gc), then we only bother to do this if the ++ * block has only a few pages in use. ++ */ ++ ++ if (!selected) { ++ int pages_used; ++ int n_blocks = ++ dev->internal_end_block - dev->internal_start_block + 1; ++ if (aggressive) { ++ threshold = dev->param.chunks_per_block; ++ iterations = n_blocks; ++ } else { ++ int max_threshold; ++ ++ if (background) ++ max_threshold = dev->param.chunks_per_block / 2; ++ else ++ max_threshold = dev->param.chunks_per_block / 8; ++ ++ if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD) ++ max_threshold = YAFFS_GC_PASSIVE_THRESHOLD; ++ ++ threshold = background ? (dev->gc_not_done + 2) * 2 : 0; ++ if (threshold < YAFFS_GC_PASSIVE_THRESHOLD) ++ threshold = YAFFS_GC_PASSIVE_THRESHOLD; ++ if (threshold > max_threshold) ++ threshold = max_threshold; ++ ++ iterations = n_blocks / 16 + 1; ++ if (iterations > 100) ++ iterations = 100; ++ } ++ ++ for (i = 0; ++ i < iterations && ++ (dev->gc_dirtiest < 1 || ++ dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH); ++ i++) { ++ dev->gc_block_finder++; ++ if (dev->gc_block_finder < dev->internal_start_block || ++ dev->gc_block_finder > dev->internal_end_block) ++ dev->gc_block_finder = ++ dev->internal_start_block; ++ ++ bi = yaffs_get_block_info(dev, dev->gc_block_finder); ++ ++ pages_used = bi->pages_in_use - bi->soft_del_pages; ++ ++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL && ++ pages_used < dev->param.chunks_per_block && ++ (dev->gc_dirtiest < 1 || ++ pages_used < dev->gc_pages_in_use) && ++ yaffs_block_ok_for_gc(dev, bi)) { ++ dev->gc_dirtiest = dev->gc_block_finder; ++ dev->gc_pages_in_use = pages_used; ++ } ++ } ++ ++ if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold) ++ selected = dev->gc_dirtiest; ++ } ++ ++ /* ++ * If nothing has been selected for a while, try the oldest dirty ++ * because that's gumming up the works. ++ */ ++ ++ if (!selected && dev->param.is_yaffs2 && ++ dev->gc_not_done >= (background ? 10 : 20)) { ++ yaffs2_find_oldest_dirty_seq(dev); ++ if (dev->oldest_dirty_block > 0) { ++ selected = dev->oldest_dirty_block; ++ dev->gc_dirtiest = selected; ++ dev->oldest_dirty_gc_count++; ++ bi = yaffs_get_block_info(dev, selected); ++ dev->gc_pages_in_use = ++ bi->pages_in_use - bi->soft_del_pages; ++ } else { ++ dev->gc_not_done = 0; ++ } ++ } ++ ++ if (selected) { ++ yaffs_trace(YAFFS_TRACE_GC, ++ "GC Selected block %d with %d free, prioritised:%d", ++ selected, ++ dev->param.chunks_per_block - dev->gc_pages_in_use, ++ prioritised); ++ ++ dev->n_gc_blocks++; ++ if (background) ++ dev->bg_gcs++; ++ ++ dev->gc_dirtiest = 0; ++ dev->gc_pages_in_use = 0; ++ dev->gc_not_done = 0; ++ if (dev->refresh_skip > 0) ++ dev->refresh_skip--; ++ } else { ++ dev->gc_not_done++; ++ yaffs_trace(YAFFS_TRACE_GC, ++ "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s", ++ dev->gc_block_finder, dev->gc_not_done, threshold, ++ dev->gc_dirtiest, dev->gc_pages_in_use, ++ dev->oldest_dirty_block, background ? " bg" : ""); ++ } ++ ++ return selected; ++} ++ ++/* New garbage collector ++ * If we're very low on erased blocks then we do aggressive garbage collection ++ * otherwise we do "leasurely" garbage collection. ++ * Aggressive gc looks further (whole array) and will accept less dirty blocks. ++ * Passive gc only inspects smaller areas and only accepts more dirty blocks. ++ * ++ * The idea is to help clear out space in a more spread-out manner. ++ * Dunno if it really does anything useful. ++ */ ++static int yaffs_check_gc(struct yaffs_dev *dev, int background) ++{ ++ int aggressive = 0; ++ int gc_ok = YAFFS_OK; ++ int max_tries = 0; ++ int min_erased; ++ int erased_chunks; ++ int checkpt_block_adjust; ++ ++ if (dev->param.gc_control_fn && ++ (dev->param.gc_control_fn(dev) & 1) == 0) ++ return YAFFS_OK; ++ ++ if (dev->gc_disable) ++ /* Bail out so we don't get recursive gc */ ++ return YAFFS_OK; ++ ++ /* This loop should pass the first time. ++ * Only loops here if the collection does not increase space. ++ */ ++ ++ do { ++ max_tries++; ++ ++ checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev); ++ ++ min_erased = ++ dev->param.n_reserved_blocks + checkpt_block_adjust + 1; ++ erased_chunks = ++ dev->n_erased_blocks * dev->param.chunks_per_block; ++ ++ /* If we need a block soon then do aggressive gc. */ ++ if (dev->n_erased_blocks < min_erased) ++ aggressive = 1; ++ else { ++ if (!background ++ && erased_chunks > (dev->n_free_chunks / 4)) ++ break; ++ ++ if (dev->gc_skip > 20) ++ dev->gc_skip = 20; ++ if (erased_chunks < dev->n_free_chunks / 2 || ++ dev->gc_skip < 1 || background) ++ aggressive = 0; ++ else { ++ dev->gc_skip--; ++ break; ++ } ++ } ++ ++ dev->gc_skip = 5; ++ ++ /* If we don't already have a block being gc'd then see if we ++ * should start another */ ++ ++ if (dev->gc_block < 1 && !aggressive) { ++ dev->gc_block = yaffs2_find_refresh_block(dev); ++ dev->gc_chunk = 0; ++ dev->n_clean_ups = 0; ++ } ++ if (dev->gc_block < 1) { ++ dev->gc_block = ++ yaffs_find_gc_block(dev, aggressive, background); ++ dev->gc_chunk = 0; ++ dev->n_clean_ups = 0; ++ } ++ ++ if (dev->gc_block > 0) { ++ dev->all_gcs++; ++ if (!aggressive) ++ dev->passive_gc_count++; ++ ++ yaffs_trace(YAFFS_TRACE_GC, ++ "yaffs: GC n_erased_blocks %d aggressive %d", ++ dev->n_erased_blocks, aggressive); ++ ++ gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive); ++ } ++ ++ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) && ++ dev->gc_block > 0) { ++ yaffs_trace(YAFFS_TRACE_GC, ++ "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d", ++ dev->n_erased_blocks, max_tries, ++ dev->gc_block); ++ } ++ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) && ++ (dev->gc_block > 0) && (max_tries < 2)); ++ ++ return aggressive ? gc_ok : YAFFS_OK; ++} ++ ++/* ++ * yaffs_bg_gc() ++ * Garbage collects. Intended to be called from a background thread. ++ * Returns non-zero if at least half the free chunks are erased. ++ */ ++int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency) ++{ ++ int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block; ++ ++ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency); ++ ++ yaffs_check_gc(dev, 1); ++ return erased_chunks > dev->n_free_chunks / 2; ++} ++ ++/*-------------------- Data file manipulation -----------------*/ ++ ++static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer) ++{ ++ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL); ++ ++ if (nand_chunk >= 0) ++ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk, ++ buffer, NULL); ++ else { ++ yaffs_trace(YAFFS_TRACE_NANDACCESS, ++ "Chunk %d not found zero instead", ++ nand_chunk); ++ /* get sane (zero) data if you read a hole */ ++ memset(buffer, 0, in->my_dev->data_bytes_per_chunk); ++ return 0; ++ } ++ ++} ++ ++void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash, ++ int lyn) ++{ ++ int block; ++ int page; ++ struct yaffs_ext_tags tags; ++ struct yaffs_block_info *bi; ++ ++ if (chunk_id <= 0) ++ return; ++ ++ dev->n_deletions++; ++ block = chunk_id / dev->param.chunks_per_block; ++ page = chunk_id % dev->param.chunks_per_block; ++ ++ if (!yaffs_check_chunk_bit(dev, block, page)) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Deleting invalid chunk %d", chunk_id); ++ ++ bi = yaffs_get_block_info(dev, block); ++ ++ yaffs2_update_oldest_dirty_seq(dev, block, bi); ++ ++ yaffs_trace(YAFFS_TRACE_DELETION, ++ "line %d delete of chunk %d", ++ lyn, chunk_id); ++ ++ if (!dev->param.is_yaffs2 && mark_flash && ++ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) { ++ ++ memset(&tags, 0, sizeof(tags)); ++ tags.is_deleted = 1; ++ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags); ++ yaffs_handle_chunk_update(dev, chunk_id, &tags); ++ } else { ++ dev->n_unmarked_deletions++; ++ } ++ ++ /* Pull out of the management area. ++ * If the whole block became dirty, this will kick off an erasure. ++ */ ++ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING || ++ bi->block_state == YAFFS_BLOCK_STATE_FULL || ++ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN || ++ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) { ++ dev->n_free_chunks++; ++ yaffs_clear_chunk_bit(dev, block, page); ++ bi->pages_in_use--; ++ ++ if (bi->pages_in_use == 0 && ++ !bi->has_shrink_hdr && ++ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING && ++ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) { ++ yaffs_block_became_dirty(dev, block); ++ } ++ } ++} ++ ++static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk, ++ const u8 *buffer, int n_bytes, int use_reserve) ++{ ++ /* Find old chunk Need to do this to get serial number ++ * Write new one and patch into tree. ++ * Invalidate old tags. ++ */ ++ ++ int prev_chunk_id; ++ struct yaffs_ext_tags prev_tags; ++ int new_chunk_id; ++ struct yaffs_ext_tags new_tags; ++ struct yaffs_dev *dev = in->my_dev; ++ ++ yaffs_check_gc(dev, 0); ++ ++ /* Get the previous chunk at this location in the file if it exists. ++ * If it does not exist then put a zero into the tree. This creates ++ * the tnode now, rather than later when it is harder to clean up. ++ */ ++ prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags); ++ if (prev_chunk_id < 1 && ++ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0)) ++ return 0; ++ ++ /* Set up new tags */ ++ memset(&new_tags, 0, sizeof(new_tags)); ++ ++ new_tags.chunk_id = inode_chunk; ++ new_tags.obj_id = in->obj_id; ++ new_tags.serial_number = ++ (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1; ++ new_tags.n_bytes = n_bytes; ++ ++ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "Writing %d bytes to chunk!!!!!!!!!", ++ n_bytes); ++ BUG(); ++ } ++ ++ new_chunk_id = ++ yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve); ++ ++ if (new_chunk_id > 0) { ++ yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0); ++ ++ if (prev_chunk_id > 0) ++ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__); ++ ++ yaffs_verify_file_sane(in); ++ } ++ return new_chunk_id; ++ ++} ++ ++ ++ ++static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set, ++ const YCHAR *name, const void *value, int size, ++ int flags) ++{ ++ struct yaffs_xattr_mod xmod; ++ int result; ++ ++ xmod.set = set; ++ xmod.name = name; ++ xmod.data = value; ++ xmod.size = size; ++ xmod.flags = flags; ++ xmod.result = -ENOSPC; ++ ++ result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod); ++ ++ if (result > 0) ++ return xmod.result; ++ else ++ return -ENOSPC; ++} ++ ++static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer, ++ struct yaffs_xattr_mod *xmod) ++{ ++ int retval = 0; ++ int x_offs = sizeof(struct yaffs_obj_hdr); ++ struct yaffs_dev *dev = obj->my_dev; ++ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr); ++ char *x_buffer = buffer + x_offs; ++ ++ if (xmod->set) ++ retval = ++ nval_set(x_buffer, x_size, xmod->name, xmod->data, ++ xmod->size, xmod->flags); ++ else ++ retval = nval_del(x_buffer, x_size, xmod->name); ++ ++ obj->has_xattr = nval_hasvalues(x_buffer, x_size); ++ obj->xattr_known = 1; ++ xmod->result = retval; ++ ++ return retval; ++} ++ ++static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name, ++ void *value, int size) ++{ ++ char *buffer = NULL; ++ int result; ++ struct yaffs_ext_tags tags; ++ struct yaffs_dev *dev = obj->my_dev; ++ int x_offs = sizeof(struct yaffs_obj_hdr); ++ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr); ++ char *x_buffer; ++ int retval = 0; ++ ++ if (obj->hdr_chunk < 1) ++ return -ENODATA; ++ ++ /* If we know that the object has no xattribs then don't do all the ++ * reading and parsing. ++ */ ++ if (obj->xattr_known && !obj->has_xattr) { ++ if (name) ++ return -ENODATA; ++ else ++ return 0; ++ } ++ ++ buffer = (char *)yaffs_get_temp_buffer(dev); ++ if (!buffer) ++ return -ENOMEM; ++ ++ result = ++ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags); ++ ++ if (result != YAFFS_OK) ++ retval = -ENOENT; ++ else { ++ x_buffer = buffer + x_offs; ++ ++ if (!obj->xattr_known) { ++ obj->has_xattr = nval_hasvalues(x_buffer, x_size); ++ obj->xattr_known = 1; ++ } ++ ++ if (name) ++ retval = nval_get(x_buffer, x_size, name, value, size); ++ else ++ retval = nval_list(x_buffer, x_size, value, size); ++ } ++ yaffs_release_temp_buffer(dev, (u8 *) buffer); ++ return retval; ++} ++ ++int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name, ++ const void *value, int size, int flags) ++{ ++ return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags); ++} ++ ++int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name) ++{ ++ return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0); ++} ++ ++int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value, ++ int size) ++{ ++ return yaffs_do_xattrib_fetch(obj, name, value, size); ++} ++ ++int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size) ++{ ++ return yaffs_do_xattrib_fetch(obj, NULL, buffer, size); ++} ++ ++static void yaffs_check_obj_details_loaded(struct yaffs_obj *in) ++{ ++ u8 *buf; ++ struct yaffs_obj_hdr *oh; ++ struct yaffs_dev *dev; ++ struct yaffs_ext_tags tags; ++ int result; ++ int alloc_failed = 0; ++ ++ if (!in || !in->lazy_loaded || in->hdr_chunk < 1) ++ return; ++ ++ dev = in->my_dev; ++ in->lazy_loaded = 0; ++ buf = yaffs_get_temp_buffer(dev); ++ ++ result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags); ++ oh = (struct yaffs_obj_hdr *)buf; ++ ++ in->yst_mode = oh->yst_mode; ++ yaffs_load_attribs(in, oh); ++ yaffs_set_obj_name_from_oh(in, oh); ++ ++ if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) { ++ in->variant.symlink_variant.alias = ++ yaffs_clone_str(oh->alias); ++ if (!in->variant.symlink_variant.alias) ++ alloc_failed = 1; /* Not returned */ ++ } ++ yaffs_release_temp_buffer(dev, buf); ++} ++ ++static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name, ++ const YCHAR *oh_name, int buff_size) ++{ ++#ifdef CONFIG_YAFFS_AUTO_UNICODE ++ if (dev->param.auto_unicode) { ++ if (*oh_name) { ++ /* It is an ASCII name, do an ASCII to ++ * unicode conversion */ ++ const char *ascii_oh_name = (const char *)oh_name; ++ int n = buff_size - 1; ++ while (n > 0 && *ascii_oh_name) { ++ *name = *ascii_oh_name; ++ name++; ++ ascii_oh_name++; ++ n--; ++ } ++ } else { ++ strncpy(name, oh_name + 1, buff_size - 1); ++ } ++ } else { ++#else ++ (void) dev; ++ { ++#endif ++ strncpy(name, oh_name, buff_size - 1); ++ } ++} ++ ++static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name, ++ const YCHAR *name) ++{ ++#ifdef CONFIG_YAFFS_AUTO_UNICODE ++ ++ int is_ascii; ++ YCHAR *w; ++ ++ if (dev->param.auto_unicode) { ++ ++ is_ascii = 1; ++ w = name; ++ ++ /* Figure out if the name will fit in ascii character set */ ++ while (is_ascii && *w) { ++ if ((*w) & 0xff00) ++ is_ascii = 0; ++ w++; ++ } ++ ++ if (is_ascii) { ++ /* It is an ASCII name, so convert unicode to ascii */ ++ char *ascii_oh_name = (char *)oh_name; ++ int n = YAFFS_MAX_NAME_LENGTH - 1; ++ while (n > 0 && *name) { ++ *ascii_oh_name = *name; ++ name++; ++ ascii_oh_name++; ++ n--; ++ } ++ } else { ++ /* Unicode name, so save starting at the second YCHAR */ ++ *oh_name = 0; ++ strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2); ++ } ++ } else { ++#else ++ dev = dev; ++ { ++#endif ++ strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1); ++ } ++} ++ ++/* UpdateObjectHeader updates the header on NAND for an object. ++ * If name is not NULL, then that new name is used. ++ */ ++int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force, ++ int is_shrink, int shadows, struct yaffs_xattr_mod *xmod) ++{ ++ ++ struct yaffs_block_info *bi; ++ struct yaffs_dev *dev = in->my_dev; ++ int prev_chunk_id; ++ int ret_val = 0; ++ int result = 0; ++ int new_chunk_id; ++ struct yaffs_ext_tags new_tags; ++ struct yaffs_ext_tags old_tags; ++ const YCHAR *alias = NULL; ++ u8 *buffer = NULL; ++ YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1]; ++ struct yaffs_obj_hdr *oh = NULL; ++ loff_t file_size = 0; ++ ++ strcpy(old_name, _Y("silly old name")); ++ ++ if (in->fake && in != dev->root_dir && !force && !xmod) ++ return ret_val; ++ ++ yaffs_check_gc(dev, 0); ++ yaffs_check_obj_details_loaded(in); ++ ++ buffer = yaffs_get_temp_buffer(in->my_dev); ++ oh = (struct yaffs_obj_hdr *)buffer; ++ ++ prev_chunk_id = in->hdr_chunk; ++ ++ if (prev_chunk_id > 0) { ++ result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id, ++ buffer, &old_tags); ++ ++ yaffs_verify_oh(in, oh, &old_tags, 0); ++ memcpy(old_name, oh->name, sizeof(oh->name)); ++ memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr)); ++ } else { ++ memset(buffer, 0xff, dev->data_bytes_per_chunk); ++ } ++ ++ oh->type = in->variant_type; ++ oh->yst_mode = in->yst_mode; ++ oh->shadows_obj = oh->inband_shadowed_obj_id = shadows; ++ ++ yaffs_load_attribs_oh(oh, in); ++ ++ if (in->parent) ++ oh->parent_obj_id = in->parent->obj_id; ++ else ++ oh->parent_obj_id = 0; ++ ++ if (name && *name) { ++ memset(oh->name, 0, sizeof(oh->name)); ++ yaffs_load_oh_from_name(dev, oh->name, name); ++ } else if (prev_chunk_id > 0) { ++ memcpy(oh->name, old_name, sizeof(oh->name)); ++ } else { ++ memset(oh->name, 0, sizeof(oh->name)); ++ } ++ ++ oh->is_shrink = is_shrink; ++ ++ switch (in->variant_type) { ++ case YAFFS_OBJECT_TYPE_UNKNOWN: ++ /* Should not happen */ ++ break; ++ case YAFFS_OBJECT_TYPE_FILE: ++ if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED && ++ oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED) ++ file_size = in->variant.file_variant.file_size; ++ yaffs_oh_size_load(oh, file_size); ++ break; ++ case YAFFS_OBJECT_TYPE_HARDLINK: ++ oh->equiv_id = in->variant.hardlink_variant.equiv_id; ++ break; ++ case YAFFS_OBJECT_TYPE_SPECIAL: ++ /* Do nothing */ ++ break; ++ case YAFFS_OBJECT_TYPE_DIRECTORY: ++ /* Do nothing */ ++ break; ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ alias = in->variant.symlink_variant.alias; ++ if (!alias) ++ alias = _Y("no alias"); ++ strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH); ++ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0; ++ break; ++ } ++ ++ /* process any xattrib modifications */ ++ if (xmod) ++ yaffs_apply_xattrib_mod(in, (char *)buffer, xmod); ++ ++ /* Tags */ ++ memset(&new_tags, 0, sizeof(new_tags)); ++ in->serial++; ++ new_tags.chunk_id = 0; ++ new_tags.obj_id = in->obj_id; ++ new_tags.serial_number = in->serial; ++ ++ /* Add extra info for file header */ ++ new_tags.extra_available = 1; ++ new_tags.extra_parent_id = oh->parent_obj_id; ++ new_tags.extra_file_size = file_size; ++ new_tags.extra_is_shrink = oh->is_shrink; ++ new_tags.extra_equiv_id = oh->equiv_id; ++ new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0; ++ new_tags.extra_obj_type = in->variant_type; ++ yaffs_verify_oh(in, oh, &new_tags, 1); ++ ++ /* Create new chunk in NAND */ ++ new_chunk_id = ++ yaffs_write_new_chunk(dev, buffer, &new_tags, ++ (prev_chunk_id > 0) ? 1 : 0); ++ ++ if (buffer) ++ yaffs_release_temp_buffer(dev, buffer); ++ ++ if (new_chunk_id < 0) ++ return new_chunk_id; ++ ++ in->hdr_chunk = new_chunk_id; ++ ++ if (prev_chunk_id > 0) ++ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__); ++ ++ if (!yaffs_obj_cache_dirty(in)) ++ in->dirty = 0; ++ ++ /* If this was a shrink, then mark the block ++ * that the chunk lives on */ ++ if (is_shrink) { ++ bi = yaffs_get_block_info(in->my_dev, ++ new_chunk_id / ++ in->my_dev->param.chunks_per_block); ++ bi->has_shrink_hdr = 1; ++ } ++ ++ ++ return new_chunk_id; ++} ++ ++/*--------------------- File read/write ------------------------ ++ * Read and write have very similar structures. ++ * In general the read/write has three parts to it ++ * An incomplete chunk to start with (if the read/write is not chunk-aligned) ++ * Some complete chunks ++ * An incomplete chunk to end off with ++ * ++ * Curve-balls: the first chunk might also be the last chunk. ++ */ ++ ++int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes) ++{ ++ int chunk; ++ u32 start; ++ int n_copy; ++ int n = n_bytes; ++ int n_done = 0; ++ struct yaffs_cache *cache; ++ struct yaffs_dev *dev; ++ ++ dev = in->my_dev; ++ ++ while (n > 0) { ++ yaffs_addr_to_chunk(dev, offset, &chunk, &start); ++ chunk++; ++ ++ /* OK now check for the curveball where the start and end are in ++ * the same chunk. ++ */ ++ if ((start + n) < dev->data_bytes_per_chunk) ++ n_copy = n; ++ else ++ n_copy = dev->data_bytes_per_chunk - start; ++ ++ cache = yaffs_find_chunk_cache(in, chunk); ++ ++ /* If the chunk is already in the cache or it is less than ++ * a whole chunk or we're using inband tags then use the cache ++ * (if there is caching) else bypass the cache. ++ */ ++ if (cache || n_copy != dev->data_bytes_per_chunk || ++ dev->param.inband_tags) { ++ if (dev->param.n_caches > 0) { ++ ++ /* If we can't find the data in the cache, ++ * then load it up. */ ++ ++ if (!cache) { ++ cache = ++ yaffs_grab_chunk_cache(in->my_dev); ++ cache->object = in; ++ cache->chunk_id = chunk; ++ cache->dirty = 0; ++ cache->locked = 0; ++ yaffs_rd_data_obj(in, chunk, ++ cache->data); ++ cache->n_bytes = 0; ++ } ++ ++ yaffs_use_cache(dev, cache, 0); ++ ++ cache->locked = 1; ++ ++ memcpy(buffer, &cache->data[start], n_copy); ++ ++ cache->locked = 0; ++ } else { ++ /* Read into the local buffer then copy.. */ ++ ++ u8 *local_buffer = ++ yaffs_get_temp_buffer(dev); ++ yaffs_rd_data_obj(in, chunk, local_buffer); ++ ++ memcpy(buffer, &local_buffer[start], n_copy); ++ ++ yaffs_release_temp_buffer(dev, local_buffer); ++ } ++ } else { ++ /* A full chunk. Read directly into the buffer. */ ++ yaffs_rd_data_obj(in, chunk, buffer); ++ } ++ n -= n_copy; ++ offset += n_copy; ++ buffer += n_copy; ++ n_done += n_copy; ++ } ++ return n_done; ++} ++ ++int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset, ++ int n_bytes, int write_through) ++{ ++ ++ int chunk; ++ u32 start; ++ int n_copy; ++ int n = n_bytes; ++ int n_done = 0; ++ int n_writeback; ++ loff_t start_write = offset; ++ int chunk_written = 0; ++ u32 n_bytes_read; ++ loff_t chunk_start; ++ struct yaffs_dev *dev; ++ ++ dev = in->my_dev; ++ ++ while (n > 0 && chunk_written >= 0) { ++ yaffs_addr_to_chunk(dev, offset, &chunk, &start); ++ ++ if (((loff_t)chunk) * ++ dev->data_bytes_per_chunk + start != offset || ++ start >= dev->data_bytes_per_chunk) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "AddrToChunk of offset %lld gives chunk %d start %d", ++ offset, chunk, start); ++ } ++ chunk++; /* File pos to chunk in file offset */ ++ ++ /* OK now check for the curveball where the start and end are in ++ * the same chunk. ++ */ ++ ++ if ((start + n) < dev->data_bytes_per_chunk) { ++ n_copy = n; ++ ++ /* Now calculate how many bytes to write back.... ++ * If we're overwriting and not writing to then end of ++ * file then we need to write back as much as was there ++ * before. ++ */ ++ ++ chunk_start = (((loff_t)(chunk - 1)) * ++ dev->data_bytes_per_chunk); ++ ++ if (chunk_start > in->variant.file_variant.file_size) ++ n_bytes_read = 0; /* Past end of file */ ++ else ++ n_bytes_read = ++ in->variant.file_variant.file_size - ++ chunk_start; ++ ++ if (n_bytes_read > dev->data_bytes_per_chunk) ++ n_bytes_read = dev->data_bytes_per_chunk; ++ ++ n_writeback = ++ (n_bytes_read > ++ (start + n)) ? n_bytes_read : (start + n); ++ ++ if (n_writeback < 0 || ++ n_writeback > dev->data_bytes_per_chunk) ++ BUG(); ++ ++ } else { ++ n_copy = dev->data_bytes_per_chunk - start; ++ n_writeback = dev->data_bytes_per_chunk; ++ } ++ ++ if (n_copy != dev->data_bytes_per_chunk || ++ !dev->param.cache_bypass_aligned || ++ dev->param.inband_tags) { ++ /* An incomplete start or end chunk (or maybe both ++ * start and end chunk), or we're using inband tags, ++ * or we're forcing writes through the cache, ++ * so we want to use the cache buffers. ++ */ ++ if (dev->param.n_caches > 0) { ++ struct yaffs_cache *cache; ++ ++ /* If we can't find the data in the cache, then ++ * load the cache */ ++ cache = yaffs_find_chunk_cache(in, chunk); ++ ++ if (!cache && ++ yaffs_check_alloc_available(dev, 1)) { ++ cache = yaffs_grab_chunk_cache(dev); ++ cache->object = in; ++ cache->chunk_id = chunk; ++ cache->dirty = 0; ++ cache->locked = 0; ++ yaffs_rd_data_obj(in, chunk, ++ cache->data); ++ } else if (cache && ++ !cache->dirty && ++ !yaffs_check_alloc_available(dev, ++ 1)) { ++ /* Drop the cache if it was a read cache ++ * item and no space check has been made ++ * for it. ++ */ ++ cache = NULL; ++ } ++ ++ if (cache) { ++ yaffs_use_cache(dev, cache, 1); ++ cache->locked = 1; ++ ++ memcpy(&cache->data[start], buffer, ++ n_copy); ++ ++ cache->locked = 0; ++ cache->n_bytes = n_writeback; ++ ++ if (write_through) { ++ chunk_written = ++ yaffs_wr_data_obj ++ (cache->object, ++ cache->chunk_id, ++ cache->data, ++ cache->n_bytes, 1); ++ cache->dirty = 0; ++ } ++ } else { ++ chunk_written = -1; /* fail write */ ++ } ++ } else { ++ /* An incomplete start or end chunk (or maybe ++ * both start and end chunk). Read into the ++ * local buffer then copy over and write back. ++ */ ++ ++ u8 *local_buffer = yaffs_get_temp_buffer(dev); ++ ++ yaffs_rd_data_obj(in, chunk, local_buffer); ++ memcpy(&local_buffer[start], buffer, n_copy); ++ ++ chunk_written = ++ yaffs_wr_data_obj(in, chunk, ++ local_buffer, ++ n_writeback, 0); ++ ++ yaffs_release_temp_buffer(dev, local_buffer); ++ } ++ } else { ++ /* A full chunk. Write directly from the buffer. */ ++ ++ chunk_written = ++ yaffs_wr_data_obj(in, chunk, buffer, ++ dev->data_bytes_per_chunk, 0); ++ ++ /* Since we've overwritten the cached data, ++ * we better invalidate it. */ ++ yaffs_invalidate_chunk_cache(in, chunk); ++ } ++ ++ if (chunk_written >= 0) { ++ n -= n_copy; ++ offset += n_copy; ++ buffer += n_copy; ++ n_done += n_copy; ++ } ++ } ++ ++ /* Update file object */ ++ ++ if ((start_write + n_done) > in->variant.file_variant.file_size) ++ in->variant.file_variant.file_size = (start_write + n_done); ++ ++ in->dirty = 1; ++ return n_done; ++} ++ ++int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset, ++ int n_bytes, int write_through) ++{ ++ yaffs2_handle_hole(in, offset); ++ return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through); ++} ++ ++/* ---------------------- File resizing stuff ------------------ */ ++ ++static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size) ++{ ++ ++ struct yaffs_dev *dev = in->my_dev; ++ loff_t old_size = in->variant.file_variant.file_size; ++ int i; ++ int chunk_id; ++ u32 dummy; ++ int last_del; ++ int start_del; ++ ++ if (old_size > 0) ++ yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy); ++ else ++ last_del = 0; ++ ++ yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1, ++ &start_del, &dummy); ++ last_del++; ++ start_del++; ++ ++ /* Delete backwards so that we don't end up with holes if ++ * power is lost part-way through the operation. ++ */ ++ for (i = last_del; i >= start_del; i--) { ++ /* NB this could be optimised somewhat, ++ * eg. could retrieve the tags and write them without ++ * using yaffs_chunk_del ++ */ ++ ++ chunk_id = yaffs_find_del_file_chunk(in, i, NULL); ++ ++ if (chunk_id < 1) ++ continue; ++ ++ if (chunk_id < ++ (dev->internal_start_block * dev->param.chunks_per_block) || ++ chunk_id >= ++ ((dev->internal_end_block + 1) * ++ dev->param.chunks_per_block)) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "Found daft chunk_id %d for %d", ++ chunk_id, i); ++ } else { ++ in->n_data_chunks--; ++ yaffs_chunk_del(dev, chunk_id, 1, __LINE__); ++ } ++ } ++} ++ ++void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size) ++{ ++ int new_full; ++ u32 new_partial; ++ struct yaffs_dev *dev = obj->my_dev; ++ ++ yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial); ++ ++ yaffs_prune_chunks(obj, new_size); ++ ++ if (new_partial != 0) { ++ int last_chunk = 1 + new_full; ++ u8 *local_buffer = yaffs_get_temp_buffer(dev); ++ ++ /* Rewrite the last chunk with its new size and zero pad */ ++ yaffs_rd_data_obj(obj, last_chunk, local_buffer); ++ memset(local_buffer + new_partial, 0, ++ dev->data_bytes_per_chunk - new_partial); ++ ++ yaffs_wr_data_obj(obj, last_chunk, local_buffer, ++ new_partial, 1); ++ ++ yaffs_release_temp_buffer(dev, local_buffer); ++ } ++ ++ obj->variant.file_variant.file_size = new_size; ++ ++ yaffs_prune_tree(dev, &obj->variant.file_variant); ++} ++ ++int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size) ++{ ++ struct yaffs_dev *dev = in->my_dev; ++ loff_t old_size = in->variant.file_variant.file_size; ++ ++ yaffs_flush_file_cache(in); ++ yaffs_invalidate_whole_cache(in); ++ ++ yaffs_check_gc(dev, 0); ++ ++ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) ++ return YAFFS_FAIL; ++ ++ if (new_size == old_size) ++ return YAFFS_OK; ++ ++ if (new_size > old_size) { ++ yaffs2_handle_hole(in, new_size); ++ in->variant.file_variant.file_size = new_size; ++ } else { ++ /* new_size < old_size */ ++ yaffs_resize_file_down(in, new_size); ++ } ++ ++ /* Write a new object header to reflect the resize. ++ * show we've shrunk the file, if need be ++ * Do this only if the file is not in the deleted directories ++ * and is not shadowed. ++ */ ++ if (in->parent && ++ !in->is_shadowed && ++ in->parent->obj_id != YAFFS_OBJECTID_UNLINKED && ++ in->parent->obj_id != YAFFS_OBJECTID_DELETED) ++ yaffs_update_oh(in, NULL, 0, 0, 0, NULL); ++ ++ return YAFFS_OK; ++} ++ ++int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync) ++{ ++ if (!in->dirty) ++ return YAFFS_OK; ++ ++ yaffs_flush_file_cache(in); ++ ++ if (data_sync) ++ return YAFFS_OK; ++ ++ if (update_time) ++ yaffs_load_current_time(in, 0, 0); ++ ++ return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ? ++ YAFFS_OK : YAFFS_FAIL; ++} ++ ++ ++/* yaffs_del_file deletes the whole file data ++ * and the inode associated with the file. ++ * It does not delete the links associated with the file. ++ */ ++static int yaffs_unlink_file_if_needed(struct yaffs_obj *in) ++{ ++ int ret_val; ++ int del_now = 0; ++ struct yaffs_dev *dev = in->my_dev; ++ ++ if (!in->my_inode) ++ del_now = 1; ++ ++ if (del_now) { ++ ret_val = ++ yaffs_change_obj_name(in, in->my_dev->del_dir, ++ _Y("deleted"), 0, 0); ++ yaffs_trace(YAFFS_TRACE_TRACING, ++ "yaffs: immediate deletion of file %d", ++ in->obj_id); ++ in->deleted = 1; ++ in->my_dev->n_deleted_files++; ++ if (dev->param.disable_soft_del || dev->param.is_yaffs2) ++ yaffs_resize_file(in, 0); ++ yaffs_soft_del_file(in); ++ } else { ++ ret_val = ++ yaffs_change_obj_name(in, in->my_dev->unlinked_dir, ++ _Y("unlinked"), 0, 0); ++ } ++ return ret_val; ++} ++ ++static int yaffs_del_file(struct yaffs_obj *in) ++{ ++ int ret_val = YAFFS_OK; ++ int deleted; /* Need to cache value on stack if in is freed */ ++ struct yaffs_dev *dev = in->my_dev; ++ ++ if (dev->param.disable_soft_del || dev->param.is_yaffs2) ++ yaffs_resize_file(in, 0); ++ ++ if (in->n_data_chunks > 0) { ++ /* Use soft deletion if there is data in the file. ++ * That won't be the case if it has been resized to zero. ++ */ ++ if (!in->unlinked) ++ ret_val = yaffs_unlink_file_if_needed(in); ++ ++ deleted = in->deleted; ++ ++ if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) { ++ in->deleted = 1; ++ deleted = 1; ++ in->my_dev->n_deleted_files++; ++ yaffs_soft_del_file(in); ++ } ++ return deleted ? YAFFS_OK : YAFFS_FAIL; ++ } else { ++ /* The file has no data chunks so we toss it immediately */ ++ yaffs_free_tnode(in->my_dev, in->variant.file_variant.top); ++ in->variant.file_variant.top = NULL; ++ yaffs_generic_obj_del(in); ++ ++ return YAFFS_OK; ++ } ++} ++ ++int yaffs_is_non_empty_dir(struct yaffs_obj *obj) ++{ ++ return (obj && ++ obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) && ++ !(list_empty(&obj->variant.dir_variant.children)); ++} ++ ++static int yaffs_del_dir(struct yaffs_obj *obj) ++{ ++ /* First check that the directory is empty. */ ++ if (yaffs_is_non_empty_dir(obj)) ++ return YAFFS_FAIL; ++ ++ return yaffs_generic_obj_del(obj); ++} ++ ++static int yaffs_del_symlink(struct yaffs_obj *in) ++{ ++ kfree(in->variant.symlink_variant.alias); ++ in->variant.symlink_variant.alias = NULL; ++ ++ return yaffs_generic_obj_del(in); ++} ++ ++static int yaffs_del_link(struct yaffs_obj *in) ++{ ++ /* remove this hardlink from the list associated with the equivalent ++ * object ++ */ ++ list_del_init(&in->hard_links); ++ return yaffs_generic_obj_del(in); ++} ++ ++int yaffs_del_obj(struct yaffs_obj *obj) ++{ ++ int ret_val = -1; ++ ++ switch (obj->variant_type) { ++ case YAFFS_OBJECT_TYPE_FILE: ++ ret_val = yaffs_del_file(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_DIRECTORY: ++ if (!list_empty(&obj->variant.dir_variant.dirty)) { ++ yaffs_trace(YAFFS_TRACE_BACKGROUND, ++ "Remove object %d from dirty directories", ++ obj->obj_id); ++ list_del_init(&obj->variant.dir_variant.dirty); ++ } ++ return yaffs_del_dir(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ ret_val = yaffs_del_symlink(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_HARDLINK: ++ ret_val = yaffs_del_link(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_SPECIAL: ++ ret_val = yaffs_generic_obj_del(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_UNKNOWN: ++ ret_val = 0; ++ break; /* should not happen. */ ++ } ++ return ret_val; ++} ++ ++ ++static void yaffs_empty_dir_to_dir(struct yaffs_obj *from_dir, ++ struct yaffs_obj *to_dir) ++{ ++ struct yaffs_obj *obj; ++ struct list_head *lh; ++ struct list_head *n; ++ ++ list_for_each_safe(lh, n, &from_dir->variant.dir_variant.children) { ++ obj = list_entry(lh, struct yaffs_obj, siblings); ++ yaffs_add_obj_to_dir(to_dir, obj); ++ } ++} ++ ++struct yaffs_obj *yaffs_retype_obj(struct yaffs_obj *obj, ++ enum yaffs_obj_type type) ++{ ++ /* Tear down the old variant */ ++ switch (obj->variant_type) { ++ case YAFFS_OBJECT_TYPE_FILE: ++ /* Nuke file data */ ++ yaffs_resize_file(obj, 0); ++ yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top); ++ obj->variant.file_variant.top = NULL; ++ break; ++ case YAFFS_OBJECT_TYPE_DIRECTORY: ++ /* Put the children in lost and found. */ ++ yaffs_empty_dir_to_dir(obj, obj->my_dev->lost_n_found); ++ if (!list_empty(&obj->variant.dir_variant.dirty)) ++ list_del_init(&obj->variant.dir_variant.dirty); ++ break; ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ /* Nuke symplink data */ ++ kfree(obj->variant.symlink_variant.alias); ++ obj->variant.symlink_variant.alias = NULL; ++ break; ++ case YAFFS_OBJECT_TYPE_HARDLINK: ++ list_del_init(&obj->hard_links); ++ break; ++ default: ++ break; ++ } ++ ++ memset(&obj->variant, 0, sizeof(obj->variant)); ++ ++ /*Set up new variant if the memset is not enough. */ ++ switch (type) { ++ case YAFFS_OBJECT_TYPE_DIRECTORY: ++ INIT_LIST_HEAD(&obj->variant.dir_variant.children); ++ INIT_LIST_HEAD(&obj->variant.dir_variant.dirty); ++ break; ++ case YAFFS_OBJECT_TYPE_FILE: ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ case YAFFS_OBJECT_TYPE_HARDLINK: ++ default: ++ break; ++ } ++ ++ obj->variant_type = type; ++ ++ return obj; ++ ++} ++ ++static int yaffs_unlink_worker(struct yaffs_obj *obj) ++{ ++ int del_now = 0; ++ ++ if (!obj) ++ return YAFFS_FAIL; ++ ++ if (!obj->my_inode) ++ del_now = 1; ++ ++ yaffs_update_parent(obj->parent); ++ ++ if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) { ++ return yaffs_del_link(obj); ++ } else if (!list_empty(&obj->hard_links)) { ++ /* Curve ball: We're unlinking an object that has a hardlink. ++ * ++ * This problem arises because we are not strictly following ++ * The Linux link/inode model. ++ * ++ * We can't really delete the object. ++ * Instead, we do the following: ++ * - Select a hardlink. ++ * - Unhook it from the hard links ++ * - Move it from its parent directory so that the rename works. ++ * - Rename the object to the hardlink's name. ++ * - Delete the hardlink ++ */ ++ ++ struct yaffs_obj *hl; ++ struct yaffs_obj *parent; ++ int ret_val; ++ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1]; ++ ++ hl = list_entry(obj->hard_links.next, struct yaffs_obj, ++ hard_links); ++ ++ yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1); ++ parent = hl->parent; ++ ++ list_del_init(&hl->hard_links); ++ ++ yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl); ++ ++ ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0); ++ ++ if (ret_val == YAFFS_OK) ++ ret_val = yaffs_generic_obj_del(hl); ++ ++ return ret_val; ++ ++ } else if (del_now) { ++ switch (obj->variant_type) { ++ case YAFFS_OBJECT_TYPE_FILE: ++ return yaffs_del_file(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_DIRECTORY: ++ list_del_init(&obj->variant.dir_variant.dirty); ++ return yaffs_del_dir(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ return yaffs_del_symlink(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_SPECIAL: ++ return yaffs_generic_obj_del(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_HARDLINK: ++ case YAFFS_OBJECT_TYPE_UNKNOWN: ++ default: ++ return YAFFS_FAIL; ++ } ++ } else if (yaffs_is_non_empty_dir(obj)) { ++ return YAFFS_FAIL; ++ } else { ++ return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir, ++ _Y("unlinked"), 0, 0); ++ } ++} ++ ++static int yaffs_unlink_obj(struct yaffs_obj *obj) ++{ ++ if (obj && obj->unlink_allowed) ++ return yaffs_unlink_worker(obj); ++ ++ return YAFFS_FAIL; ++} ++ ++int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name) ++{ ++ struct yaffs_obj *obj; ++ ++ obj = yaffs_find_by_name(dir, name); ++ return yaffs_unlink_obj(obj); ++} ++ ++/* Note: ++ * If old_name is NULL then we take old_dir as the object to be renamed. ++ */ ++int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name, ++ struct yaffs_obj *new_dir, const YCHAR *new_name) ++{ ++ struct yaffs_obj *obj = NULL; ++ struct yaffs_obj *existing_target = NULL; ++ int force = 0; ++ int result; ++ struct yaffs_dev *dev; ++ ++ if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { ++ BUG(); ++ return YAFFS_FAIL; ++ } ++ if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { ++ BUG(); ++ return YAFFS_FAIL; ++ } ++ ++ dev = old_dir->my_dev; ++ ++#ifdef CONFIG_YAFFS_CASE_INSENSITIVE ++ /* Special case for case insemsitive systems. ++ * While look-up is case insensitive, the name isn't. ++ * Therefore we might want to change x.txt to X.txt ++ */ ++ if (old_dir == new_dir && ++ old_name && new_name && ++ strcmp(old_name, new_name) == 0) ++ force = 1; ++#endif ++ ++ if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) > ++ YAFFS_MAX_NAME_LENGTH) ++ /* ENAMETOOLONG */ ++ return YAFFS_FAIL; ++ ++ if (old_name) ++ obj = yaffs_find_by_name(old_dir, old_name); ++ else{ ++ obj = old_dir; ++ old_dir = obj->parent; ++ } ++ ++ if (obj && obj->rename_allowed) { ++ /* Now handle an existing target, if there is one */ ++ existing_target = yaffs_find_by_name(new_dir, new_name); ++ if (yaffs_is_non_empty_dir(existing_target)) { ++ return YAFFS_FAIL; /* ENOTEMPTY */ ++ } else if (existing_target && existing_target != obj) { ++ /* Nuke the target first, using shadowing, ++ * but only if it isn't the same object. ++ * ++ * Note we must disable gc here otherwise it can mess ++ * up the shadowing. ++ * ++ */ ++ dev->gc_disable = 1; ++ yaffs_change_obj_name(obj, new_dir, new_name, force, ++ existing_target->obj_id); ++ existing_target->is_shadowed = 1; ++ yaffs_unlink_obj(existing_target); ++ dev->gc_disable = 0; ++ } ++ ++ result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0); ++ ++ yaffs_update_parent(old_dir); ++ if (new_dir != old_dir) ++ yaffs_update_parent(new_dir); ++ ++ return result; ++ } ++ return YAFFS_FAIL; ++} ++ ++/*----------------------- Initialisation Scanning ---------------------- */ ++ ++void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id, ++ int backward_scanning) ++{ ++ struct yaffs_obj *obj; ++ ++ if (backward_scanning) { ++ /* Handle YAFFS2 case (backward scanning) ++ * If the shadowed object exists then ignore. ++ */ ++ obj = yaffs_find_by_number(dev, obj_id); ++ if (obj) ++ return; ++ } ++ ++ /* Let's create it (if it does not exist) assuming it is a file so that ++ * it can do shrinking etc. ++ * We put it in unlinked dir to be cleaned up after the scanning ++ */ ++ obj = ++ yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE); ++ if (!obj) ++ return; ++ obj->is_shadowed = 1; ++ yaffs_add_obj_to_dir(dev->unlinked_dir, obj); ++ obj->variant.file_variant.shrink_size = 0; ++ obj->valid = 1; /* So that we don't read any other info. */ ++} ++ ++void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list) ++{ ++ struct list_head *lh; ++ struct list_head *save; ++ struct yaffs_obj *hl; ++ struct yaffs_obj *in; ++ ++ list_for_each_safe(lh, save, hard_list) { ++ hl = list_entry(lh, struct yaffs_obj, hard_links); ++ in = yaffs_find_by_number(dev, ++ hl->variant.hardlink_variant.equiv_id); ++ ++ if (in) { ++ /* Add the hardlink pointers */ ++ hl->variant.hardlink_variant.equiv_obj = in; ++ list_add(&hl->hard_links, &in->hard_links); ++ } else { ++ /* Todo Need to report/handle this better. ++ * Got a problem... hardlink to a non-existant object ++ */ ++ hl->variant.hardlink_variant.equiv_obj = NULL; ++ INIT_LIST_HEAD(&hl->hard_links); ++ } ++ } ++} ++ ++static void yaffs_strip_deleted_objs(struct yaffs_dev *dev) ++{ ++ /* ++ * Sort out state of unlinked and deleted objects after scanning. ++ */ ++ struct list_head *i; ++ struct list_head *n; ++ struct yaffs_obj *l; ++ ++ if (dev->read_only) ++ return; ++ ++ /* Soft delete all the unlinked files */ ++ list_for_each_safe(i, n, ++ &dev->unlinked_dir->variant.dir_variant.children) { ++ l = list_entry(i, struct yaffs_obj, siblings); ++ yaffs_del_obj(l); ++ } ++ ++ list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) { ++ l = list_entry(i, struct yaffs_obj, siblings); ++ yaffs_del_obj(l); ++ } ++} ++ ++/* ++ * This code iterates through all the objects making sure that they are rooted. ++ * Any unrooted objects are re-rooted in lost+found. ++ * An object needs to be in one of: ++ * - Directly under deleted, unlinked ++ * - Directly or indirectly under root. ++ * ++ * Note: ++ * This code assumes that we don't ever change the current relationships ++ * between directories: ++ * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL ++ * lost-n-found->parent == root_dir ++ * ++ * This fixes the problem where directories might have inadvertently been ++ * deleted leaving the object "hanging" without being rooted in the ++ * directory tree. ++ */ ++ ++static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj) ++{ ++ return (obj == dev->del_dir || ++ obj == dev->unlinked_dir || obj == dev->root_dir); ++} ++ ++static void yaffs_fix_hanging_objs(struct yaffs_dev *dev) ++{ ++ struct yaffs_obj *obj; ++ struct yaffs_obj *parent; ++ int i; ++ struct list_head *lh; ++ struct list_head *n; ++ int depth_limit; ++ int hanging; ++ ++ if (dev->read_only) ++ return; ++ ++ /* Iterate through the objects in each hash entry, ++ * looking at each object. ++ * Make sure it is rooted. ++ */ ++ ++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { ++ list_for_each_safe(lh, n, &dev->obj_bucket[i].list) { ++ obj = list_entry(lh, struct yaffs_obj, hash_link); ++ parent = obj->parent; ++ ++ if (yaffs_has_null_parent(dev, obj)) { ++ /* These directories are not hanging */ ++ hanging = 0; ++ } else if (!parent || ++ parent->variant_type != ++ YAFFS_OBJECT_TYPE_DIRECTORY) { ++ hanging = 1; ++ } else if (yaffs_has_null_parent(dev, parent)) { ++ hanging = 0; ++ } else { ++ /* ++ * Need to follow the parent chain to ++ * see if it is hanging. ++ */ ++ hanging = 0; ++ depth_limit = 100; ++ ++ while (parent != dev->root_dir && ++ parent->parent && ++ parent->parent->variant_type == ++ YAFFS_OBJECT_TYPE_DIRECTORY && ++ depth_limit > 0) { ++ parent = parent->parent; ++ depth_limit--; ++ } ++ if (parent != dev->root_dir) ++ hanging = 1; ++ } ++ if (hanging) { ++ yaffs_trace(YAFFS_TRACE_SCAN, ++ "Hanging object %d moved to lost and found", ++ obj->obj_id); ++ yaffs_add_obj_to_dir(dev->lost_n_found, obj); ++ } ++ } ++ } ++} ++ ++/* ++ * Delete directory contents for cleaning up lost and found. ++ */ ++static void yaffs_del_dir_contents(struct yaffs_obj *dir) ++{ ++ struct yaffs_obj *obj; ++ struct list_head *lh; ++ struct list_head *n; ++ ++ if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) ++ BUG(); ++ ++ list_for_each_safe(lh, n, &dir->variant.dir_variant.children) { ++ obj = list_entry(lh, struct yaffs_obj, siblings); ++ if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) ++ yaffs_del_dir_contents(obj); ++ yaffs_trace(YAFFS_TRACE_SCAN, ++ "Deleting lost_found object %d", ++ obj->obj_id); ++ yaffs_unlink_obj(obj); ++ } ++} ++ ++static void yaffs_empty_l_n_f(struct yaffs_dev *dev) ++{ ++ yaffs_del_dir_contents(dev->lost_n_found); ++} ++ ++ ++struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory, ++ const YCHAR *name) ++{ ++ int sum; ++ struct list_head *i; ++ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1]; ++ struct yaffs_obj *l; ++ ++ if (!name) ++ return NULL; ++ ++ if (!directory) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "tragedy: yaffs_find_by_name: null pointer directory" ++ ); ++ BUG(); ++ return NULL; ++ } ++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "tragedy: yaffs_find_by_name: non-directory" ++ ); ++ BUG(); ++ } ++ ++ sum = yaffs_calc_name_sum(name); ++ ++ list_for_each(i, &directory->variant.dir_variant.children) { ++ l = list_entry(i, struct yaffs_obj, siblings); ++ ++ if (l->parent != directory) ++ BUG(); ++ ++ yaffs_check_obj_details_loaded(l); ++ ++ /* Special case for lost-n-found */ ++ if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) { ++ if (!strcmp(name, YAFFS_LOSTNFOUND_NAME)) ++ return l; ++ } else if (l->sum == sum || l->hdr_chunk <= 0) { ++ /* LostnFound chunk called Objxxx ++ * Do a real check ++ */ ++ yaffs_get_obj_name(l, buffer, ++ YAFFS_MAX_NAME_LENGTH + 1); ++ if (!strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH)) ++ return l; ++ } ++ } ++ return NULL; ++} ++ ++/* GetEquivalentObject dereferences any hard links to get to the ++ * actual object. ++ */ ++ ++struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj) ++{ ++ if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) { ++ obj = obj->variant.hardlink_variant.equiv_obj; ++ yaffs_check_obj_details_loaded(obj); ++ } ++ return obj; ++} ++ ++/* ++ * A note or two on object names. ++ * * If the object name is missing, we then make one up in the form objnnn ++ * ++ * * ASCII names are stored in the object header's name field from byte zero ++ * * Unicode names are historically stored starting from byte zero. ++ * ++ * Then there are automatic Unicode names... ++ * The purpose of these is to save names in a way that can be read as ++ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII ++ * system to share files. ++ * ++ * These automatic unicode are stored slightly differently... ++ * - If the name can fit in the ASCII character space then they are saved as ++ * ascii names as per above. ++ * - If the name needs Unicode then the name is saved in Unicode ++ * starting at oh->name[1]. ++ ++ */ ++static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name, ++ int buffer_size) ++{ ++ /* Create an object name if we could not find one. */ ++ if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) { ++ YCHAR local_name[20]; ++ YCHAR num_string[20]; ++ YCHAR *x = &num_string[19]; ++ unsigned v = obj->obj_id; ++ num_string[19] = 0; ++ while (v > 0) { ++ x--; ++ *x = '0' + (v % 10); ++ v /= 10; ++ } ++ /* make up a name */ ++ strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX); ++ strcat(local_name, x); ++ strncpy(name, local_name, buffer_size - 1); ++ } ++} ++ ++int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size) ++{ ++ memset(name, 0, buffer_size * sizeof(YCHAR)); ++ yaffs_check_obj_details_loaded(obj); ++ if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) { ++ strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1); ++ } else if (obj->short_name[0]) { ++ strcpy(name, obj->short_name); ++ } else if (obj->hdr_chunk > 0) { ++ int result; ++ u8 *buffer = yaffs_get_temp_buffer(obj->my_dev); ++ ++ struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer; ++ ++ memset(buffer, 0, obj->my_dev->data_bytes_per_chunk); ++ ++ if (obj->hdr_chunk > 0) { ++ result = yaffs_rd_chunk_tags_nand(obj->my_dev, ++ obj->hdr_chunk, ++ buffer, NULL); ++ } ++ yaffs_load_name_from_oh(obj->my_dev, name, oh->name, ++ buffer_size); ++ ++ yaffs_release_temp_buffer(obj->my_dev, buffer); ++ } ++ ++ yaffs_fix_null_name(obj, name, buffer_size); ++ ++ return strnlen(name, YAFFS_MAX_NAME_LENGTH); ++} ++ ++loff_t yaffs_get_obj_length(struct yaffs_obj *obj) ++{ ++ /* Dereference any hard linking */ ++ obj = yaffs_get_equivalent_obj(obj); ++ ++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) ++ return obj->variant.file_variant.file_size; ++ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) { ++ if (!obj->variant.symlink_variant.alias) ++ return 0; ++ return strnlen(obj->variant.symlink_variant.alias, ++ YAFFS_MAX_ALIAS_LENGTH); ++ } else { ++ /* Only a directory should drop through to here */ ++ return obj->my_dev->data_bytes_per_chunk; ++ } ++} ++ ++int yaffs_get_obj_link_count(struct yaffs_obj *obj) ++{ ++ int count = 0; ++ struct list_head *i; ++ ++ if (!obj->unlinked) ++ count++; /* the object itself */ ++ ++ list_for_each(i, &obj->hard_links) ++ count++; /* add the hard links; */ ++ ++ return count; ++} ++ ++int yaffs_get_obj_inode(struct yaffs_obj *obj) ++{ ++ obj = yaffs_get_equivalent_obj(obj); ++ ++ return obj->obj_id; ++} ++ ++unsigned yaffs_get_obj_type(struct yaffs_obj *obj) ++{ ++ obj = yaffs_get_equivalent_obj(obj); ++ ++ switch (obj->variant_type) { ++ case YAFFS_OBJECT_TYPE_FILE: ++ return DT_REG; ++ break; ++ case YAFFS_OBJECT_TYPE_DIRECTORY: ++ return DT_DIR; ++ break; ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ return DT_LNK; ++ break; ++ case YAFFS_OBJECT_TYPE_HARDLINK: ++ return DT_REG; ++ break; ++ case YAFFS_OBJECT_TYPE_SPECIAL: ++ if (S_ISFIFO(obj->yst_mode)) ++ return DT_FIFO; ++ if (S_ISCHR(obj->yst_mode)) ++ return DT_CHR; ++ if (S_ISBLK(obj->yst_mode)) ++ return DT_BLK; ++ if (S_ISSOCK(obj->yst_mode)) ++ return DT_SOCK; ++ return DT_REG; ++ break; ++ default: ++ return DT_REG; ++ break; ++ } ++} ++ ++YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj) ++{ ++ obj = yaffs_get_equivalent_obj(obj); ++ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) ++ return yaffs_clone_str(obj->variant.symlink_variant.alias); ++ else ++ return yaffs_clone_str(_Y("")); ++} ++ ++/*--------------------------- Initialisation code -------------------------- */ ++ ++static int yaffs_check_dev_fns(struct yaffs_dev *dev) ++{ ++ struct yaffs_driver *drv = &dev->drv; ++ struct yaffs_tags_handler *tagger = &dev->tagger; ++ ++ /* Common functions, gotta have */ ++ if (!drv->drv_read_chunk_fn || ++ !drv->drv_write_chunk_fn || ++ !drv->drv_erase_fn) ++ return 0; ++ ++ if (dev->param.is_yaffs2 && ++ (!drv->drv_mark_bad_fn || !drv->drv_check_bad_fn)) ++ return 0; ++ ++ /* Install the default tags marshalling functions if needed. */ ++ yaffs_tags_compat_install(dev); ++ yaffs_tags_marshall_install(dev); ++ ++ /* Check we now have the marshalling functions required. */ ++ if (!tagger->write_chunk_tags_fn || ++ !tagger->read_chunk_tags_fn || ++ !tagger->query_block_fn || ++ !tagger->mark_bad_fn) ++ return 0; ++ ++ return 1; ++} ++ ++static int yaffs_create_initial_dir(struct yaffs_dev *dev) ++{ ++ /* Initialise the unlinked, deleted, root and lost+found directories */ ++ dev->lost_n_found = dev->root_dir = NULL; ++ dev->unlinked_dir = dev->del_dir = NULL; ++ dev->unlinked_dir = ++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR); ++ dev->del_dir = ++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR); ++ dev->root_dir = ++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT, ++ YAFFS_ROOT_MODE | S_IFDIR); ++ dev->lost_n_found = ++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND, ++ YAFFS_LOSTNFOUND_MODE | S_IFDIR); ++ ++ if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir ++ && dev->del_dir) { ++ yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found); ++ return YAFFS_OK; ++ } ++ return YAFFS_FAIL; ++} ++ ++/* Low level init. ++ * Typically only used by yaffs_guts_initialise, but also used by the ++ * Low level yaffs driver tests. ++ */ ++ ++int yaffs_guts_ll_init(struct yaffs_dev *dev) ++{ ++ ++ ++ yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_ll_init()"); ++ ++ if (!dev) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "yaffs: Need a device" ++ ); ++ return YAFFS_FAIL; ++ } ++ ++ if (dev->ll_init) ++ return YAFFS_OK; ++ ++ dev->internal_start_block = dev->param.start_block; ++ dev->internal_end_block = dev->param.end_block; ++ dev->block_offset = 0; ++ dev->chunk_offset = 0; ++ dev->n_free_chunks = 0; ++ ++ dev->gc_block = 0; ++ ++ if (dev->param.start_block == 0) { ++ dev->internal_start_block = dev->param.start_block + 1; ++ dev->internal_end_block = dev->param.end_block + 1; ++ dev->block_offset = 1; ++ dev->chunk_offset = dev->param.chunks_per_block; ++ } ++ ++ /* Check geometry parameters. */ ++ ++ if ((!dev->param.inband_tags && dev->param.is_yaffs2 && ++ dev->param.total_bytes_per_chunk < 1024) || ++ (!dev->param.is_yaffs2 && ++ dev->param.total_bytes_per_chunk < 512) || ++ (dev->param.inband_tags && !dev->param.is_yaffs2) || ++ dev->param.chunks_per_block < 2 || ++ dev->param.n_reserved_blocks < 2 || ++ dev->internal_start_block <= 0 || ++ dev->internal_end_block <= 0 || ++ dev->internal_end_block <= ++ (dev->internal_start_block + dev->param.n_reserved_blocks + 2) ++ ) { ++ /* otherwise it is too small */ ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ", ++ dev->param.total_bytes_per_chunk, ++ dev->param.is_yaffs2 ? "2" : "", ++ dev->param.inband_tags); ++ return YAFFS_FAIL; ++ } ++ ++ /* Sort out space for inband tags, if required */ ++ if (dev->param.inband_tags) ++ dev->data_bytes_per_chunk = ++ dev->param.total_bytes_per_chunk - ++ sizeof(struct yaffs_packed_tags2_tags_only); ++ else ++ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk; ++ ++ /* Got the right mix of functions? */ ++ if (!yaffs_check_dev_fns(dev)) { ++ /* Function missing */ ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "device function(s) missing or wrong"); ++ ++ return YAFFS_FAIL; ++ } ++ ++ if (yaffs_init_nand(dev) != YAFFS_OK) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed"); ++ return YAFFS_FAIL; ++ } ++ ++ return YAFFS_OK; ++} ++ ++ ++int yaffs_guts_format_dev(struct yaffs_dev *dev) ++{ ++ int i; ++ enum yaffs_block_state state; ++ u32 dummy; ++ ++ if(yaffs_guts_ll_init(dev) != YAFFS_OK) ++ return YAFFS_FAIL; ++ ++ if(dev->is_mounted) ++ return YAFFS_FAIL; ++ ++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) { ++ yaffs_query_init_block_state(dev, i, &state, &dummy); ++ if (state != YAFFS_BLOCK_STATE_DEAD) ++ yaffs_erase_block(dev, i); ++ } ++ ++ return YAFFS_OK; ++} ++ ++ ++int yaffs_guts_initialise(struct yaffs_dev *dev) ++{ ++ int init_failed = 0; ++ unsigned x; ++ int bits; ++ ++ if(yaffs_guts_ll_init(dev) != YAFFS_OK) ++ return YAFFS_FAIL; ++ ++ if (dev->is_mounted) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted"); ++ return YAFFS_FAIL; ++ } ++ ++ dev->is_mounted = 1; ++ ++ /* OK now calculate a few things for the device */ ++ ++ /* ++ * Calculate all the chunk size manipulation numbers: ++ */ ++ x = dev->data_bytes_per_chunk; ++ /* We always use dev->chunk_shift and dev->chunk_div */ ++ dev->chunk_shift = calc_shifts(x); ++ x >>= dev->chunk_shift; ++ dev->chunk_div = x; ++ /* We only use chunk mask if chunk_div is 1 */ ++ dev->chunk_mask = (1 << dev->chunk_shift) - 1; ++ ++ /* ++ * Calculate chunk_grp_bits. ++ * We need to find the next power of 2 > than internal_end_block ++ */ ++ ++ x = dev->param.chunks_per_block * (dev->internal_end_block + 1); ++ ++ bits = calc_shifts_ceiling(x); ++ ++ /* Set up tnode width if wide tnodes are enabled. */ ++ if (!dev->param.wide_tnodes_disabled) { ++ /* bits must be even so that we end up with 32-bit words */ ++ if (bits & 1) ++ bits++; ++ if (bits < 16) ++ dev->tnode_width = 16; ++ else ++ dev->tnode_width = bits; ++ } else { ++ dev->tnode_width = 16; ++ } ++ ++ dev->tnode_mask = (1 << dev->tnode_width) - 1; ++ ++ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled), ++ * so if the bitwidth of the ++ * chunk range we're using is greater than 16 we need ++ * to figure out chunk shift and chunk_grp_size ++ */ ++ ++ if (bits <= dev->tnode_width) ++ dev->chunk_grp_bits = 0; ++ else ++ dev->chunk_grp_bits = bits - dev->tnode_width; ++ ++ dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8; ++ if (dev->tnode_size < sizeof(struct yaffs_tnode)) ++ dev->tnode_size = sizeof(struct yaffs_tnode); ++ ++ dev->chunk_grp_size = 1 << dev->chunk_grp_bits; ++ ++ if (dev->param.chunks_per_block < dev->chunk_grp_size) { ++ /* We have a problem because the soft delete won't work if ++ * the chunk group size > chunks per block. ++ * This can be remedied by using larger "virtual blocks". ++ */ ++ yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large"); ++ ++ return YAFFS_FAIL; ++ } ++ ++ /* Finished verifying the device, continue with initialisation */ ++ ++ /* More device initialisation */ ++ dev->all_gcs = 0; ++ dev->passive_gc_count = 0; ++ dev->oldest_dirty_gc_count = 0; ++ dev->bg_gcs = 0; ++ dev->gc_block_finder = 0; ++ dev->buffered_block = -1; ++ dev->doing_buffered_block_rewrite = 0; ++ dev->n_deleted_files = 0; ++ dev->n_bg_deletions = 0; ++ dev->n_unlinked_files = 0; ++ dev->n_ecc_fixed = 0; ++ dev->n_ecc_unfixed = 0; ++ dev->n_tags_ecc_fixed = 0; ++ dev->n_tags_ecc_unfixed = 0; ++ dev->n_erase_failures = 0; ++ dev->n_erased_blocks = 0; ++ dev->gc_disable = 0; ++ dev->has_pending_prioritised_gc = 1; ++ /* Assume the worst for now, will get fixed on first GC */ ++ INIT_LIST_HEAD(&dev->dirty_dirs); ++ dev->oldest_dirty_seq = 0; ++ dev->oldest_dirty_block = 0; ++ ++ /* Initialise temporary buffers and caches. */ ++ if (!yaffs_init_tmp_buffers(dev)) ++ init_failed = 1; ++ ++ dev->cache = NULL; ++ dev->gc_cleanup_list = NULL; ++ ++ if (!init_failed && dev->param.n_caches > 0) { ++ int i; ++ void *buf; ++ int cache_bytes = ++ dev->param.n_caches * sizeof(struct yaffs_cache); ++ ++ if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES) ++ dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES; ++ ++ dev->cache = kmalloc(cache_bytes, GFP_NOFS); ++ ++ buf = (u8 *) dev->cache; ++ ++ if (dev->cache) ++ memset(dev->cache, 0, cache_bytes); ++ ++ for (i = 0; i < dev->param.n_caches && buf; i++) { ++ dev->cache[i].object = NULL; ++ dev->cache[i].last_use = 0; ++ dev->cache[i].dirty = 0; ++ dev->cache[i].data = buf = ++ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS); ++ } ++ if (!buf) ++ init_failed = 1; ++ ++ dev->cache_last_use = 0; ++ } ++ ++ dev->cache_hits = 0; ++ ++ if (!init_failed) { ++ dev->gc_cleanup_list = ++ kmalloc(dev->param.chunks_per_block * sizeof(u32), ++ GFP_NOFS); ++ if (!dev->gc_cleanup_list) ++ init_failed = 1; ++ } ++ ++ if (dev->param.is_yaffs2) ++ dev->param.use_header_file_size = 1; ++ ++ if (!init_failed && !yaffs_init_blocks(dev)) ++ init_failed = 1; ++ ++ yaffs_init_tnodes_and_objs(dev); ++ ++ if (!init_failed && !yaffs_create_initial_dir(dev)) ++ init_failed = 1; ++ ++ if (!init_failed && dev->param.is_yaffs2 && ++ !dev->param.disable_summary && ++ !yaffs_summary_init(dev)) ++ init_failed = 1; ++ ++ if (!init_failed) { ++ /* Now scan the flash. */ ++ if (dev->param.is_yaffs2) { ++ if (yaffs2_checkpt_restore(dev)) { ++ yaffs_check_obj_details_loaded(dev->root_dir); ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT | ++ YAFFS_TRACE_MOUNT, ++ "yaffs: restored from checkpoint" ++ ); ++ } else { ++ ++ /* Clean up the mess caused by an aborted ++ * checkpoint load then scan backwards. ++ */ ++ yaffs_deinit_blocks(dev); ++ ++ yaffs_deinit_tnodes_and_objs(dev); ++ ++ dev->n_erased_blocks = 0; ++ dev->n_free_chunks = 0; ++ dev->alloc_block = -1; ++ dev->alloc_page = -1; ++ dev->n_deleted_files = 0; ++ dev->n_unlinked_files = 0; ++ dev->n_bg_deletions = 0; ++ ++ if (!init_failed && !yaffs_init_blocks(dev)) ++ init_failed = 1; ++ ++ yaffs_init_tnodes_and_objs(dev); ++ ++ if (!init_failed ++ && !yaffs_create_initial_dir(dev)) ++ init_failed = 1; ++ ++ if (!init_failed && !yaffs2_scan_backwards(dev)) ++ init_failed = 1; ++ } ++ } else if (!yaffs1_scan(dev)) { ++ init_failed = 1; ++ } ++ ++ yaffs_strip_deleted_objs(dev); ++ yaffs_fix_hanging_objs(dev); ++ if (dev->param.empty_lost_n_found) ++ yaffs_empty_l_n_f(dev); ++ } ++ ++ if (init_failed) { ++ /* Clean up the mess */ ++ yaffs_trace(YAFFS_TRACE_TRACING, ++ "yaffs: yaffs_guts_initialise() aborted."); ++ ++ yaffs_deinitialise(dev); ++ return YAFFS_FAIL; ++ } ++ ++ /* Zero out stats */ ++ dev->n_page_reads = 0; ++ dev->n_page_writes = 0; ++ dev->n_erasures = 0; ++ dev->n_gc_copies = 0; ++ dev->n_retried_writes = 0; ++ ++ dev->n_retired_blocks = 0; ++ ++ yaffs_verify_free_chunks(dev); ++ yaffs_verify_blocks(dev); ++ ++ /* Clean up any aborted checkpoint data */ ++ if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0) ++ yaffs2_checkpt_invalidate(dev); ++ ++ yaffs_trace(YAFFS_TRACE_TRACING, ++ "yaffs: yaffs_guts_initialise() done."); ++ return YAFFS_OK; ++} ++ ++void yaffs_deinitialise(struct yaffs_dev *dev) ++{ ++ if (dev->is_mounted) { ++ int i; ++ ++ yaffs_deinit_blocks(dev); ++ yaffs_deinit_tnodes_and_objs(dev); ++ yaffs_summary_deinit(dev); ++ ++ if (dev->param.n_caches > 0 && dev->cache) { ++ ++ for (i = 0; i < dev->param.n_caches; i++) { ++ kfree(dev->cache[i].data); ++ dev->cache[i].data = NULL; ++ } ++ ++ kfree(dev->cache); ++ dev->cache = NULL; ++ } ++ ++ kfree(dev->gc_cleanup_list); ++ ++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) ++ kfree(dev->temp_buffer[i].buffer); ++ ++ dev->is_mounted = 0; ++ ++ yaffs_deinit_nand(dev); ++ } ++} ++ ++int yaffs_count_free_chunks(struct yaffs_dev *dev) ++{ ++ int n_free = 0; ++ int b; ++ struct yaffs_block_info *blk; ++ ++ blk = dev->block_info; ++ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) { ++ switch (blk->block_state) { ++ case YAFFS_BLOCK_STATE_EMPTY: ++ case YAFFS_BLOCK_STATE_ALLOCATING: ++ case YAFFS_BLOCK_STATE_COLLECTING: ++ case YAFFS_BLOCK_STATE_FULL: ++ n_free += ++ (dev->param.chunks_per_block - blk->pages_in_use + ++ blk->soft_del_pages); ++ break; ++ default: ++ break; ++ } ++ blk++; ++ } ++ return n_free; ++} ++ ++int yaffs_get_n_free_chunks(struct yaffs_dev *dev) ++{ ++ /* This is what we report to the outside world */ ++ int n_free; ++ int n_dirty_caches; ++ int blocks_for_checkpt; ++ int i; ++ ++ n_free = dev->n_free_chunks; ++ n_free += dev->n_deleted_files; ++ ++ /* Now count and subtract the number of dirty chunks in the cache. */ ++ ++ for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) { ++ if (dev->cache[i].dirty) ++ n_dirty_caches++; ++ } ++ ++ n_free -= n_dirty_caches; ++ ++ n_free -= ++ ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block); ++ ++ /* Now figure checkpoint space and report that... */ ++ blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev); ++ ++ n_free -= (blocks_for_checkpt * dev->param.chunks_per_block); ++ ++ if (n_free < 0) ++ n_free = 0; ++ ++ return n_free; ++} ++ ++ ++ ++/* ++ * Marshalling functions to get loff_t file sizes into and out of ++ * object headers. ++ */ ++void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize) ++{ ++ oh->file_size_low = (fsize & 0xFFFFFFFF); ++ oh->file_size_high = ((fsize >> 32) & 0xFFFFFFFF); ++} ++ ++loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh) ++{ ++ loff_t retval; ++ ++ if (sizeof(loff_t) >= 8 && ~(oh->file_size_high)) ++ retval = (((loff_t) oh->file_size_high) << 32) | ++ (((loff_t) oh->file_size_low) & 0xFFFFFFFF); ++ else ++ retval = (loff_t) oh->file_size_low; ++ ++ return retval; ++} ++ ++ ++void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10]) ++{ ++ int i; ++ struct yaffs_block_info *bi; ++ int s; ++ ++ for(i = 0; i < 10; i++) ++ bs[i] = 0; ++ ++ for(i = dev->internal_start_block; i <= dev->internal_end_block; i++) { ++ bi = yaffs_get_block_info(dev, i); ++ s = bi->block_state; ++ if(s > YAFFS_BLOCK_STATE_DEAD || s < YAFFS_BLOCK_STATE_UNKNOWN) ++ bs[0]++; ++ else ++ bs[s]++; ++ } ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_guts.h linux-3.4.90/fs/yaffs2/yaffs_guts.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_guts.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_guts.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,1007 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_GUTS_H__ ++#define __YAFFS_GUTS_H__ ++ ++#include "yportenv.h" ++ ++#define YAFFS_OK 1 ++#define YAFFS_FAIL 0 ++ ++/* Give us a Y=0x59, ++ * Give us an A=0x41, ++ * Give us an FF=0xff ++ * Give us an S=0x53 ++ * And what have we got... ++ */ ++#define YAFFS_MAGIC 0x5941ff53 ++ ++/* ++ * Tnodes form a tree with the tnodes in "levels" ++ * Levels greater than 0 hold 8 slots which point to other tnodes. ++ * Those at level 0 hold 16 slots which point to chunks in NAND. ++ * ++ * A maximum level of 8 thust supports files of size up to: ++ * ++ * 2^(3*MAX_LEVEL+4) ++ * ++ * Thus a max level of 8 supports files with up to 2^^28 chunks which gives ++ * a maximum file size of around 512Gbytees with 2k chunks. ++ */ ++#define YAFFS_NTNODES_LEVEL0 16 ++#define YAFFS_TNODES_LEVEL0_BITS 4 ++#define YAFFS_TNODES_LEVEL0_MASK 0xf ++ ++#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2) ++#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1) ++#define YAFFS_TNODES_INTERNAL_MASK 0x7 ++#define YAFFS_TNODES_MAX_LEVEL 8 ++#define YAFFS_TNODES_MAX_BITS (YAFFS_TNODES_LEVEL0_BITS + \ ++ YAFFS_TNODES_INTERNAL_BITS * \ ++ YAFFS_TNODES_MAX_LEVEL) ++#define YAFFS_MAX_CHUNK_ID ((1 << YAFFS_TNODES_MAX_BITS) - 1) ++ ++#define YAFFS_MAX_FILE_SIZE_32 0x7fffffff ++ ++/* Constants for YAFFS1 mode */ ++#define YAFFS_BYTES_PER_SPARE 16 ++#define YAFFS_BYTES_PER_CHUNK 512 ++#define YAFFS_CHUNK_SIZE_SHIFT 9 ++#define YAFFS_CHUNKS_PER_BLOCK 32 ++#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK) ++ ++#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024 ++#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32 ++ ++ ++ ++#define YAFFS_ALLOCATION_NOBJECTS 100 ++#define YAFFS_ALLOCATION_NTNODES 100 ++#define YAFFS_ALLOCATION_NLINKS 100 ++ ++#define YAFFS_NOBJECT_BUCKETS 256 ++ ++#define YAFFS_OBJECT_SPACE 0x40000 ++#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE - 1) ++ ++/* Binary data version stamps */ ++#define YAFFS_SUMMARY_VERSION 1 ++#define YAFFS_CHECKPOINT_VERSION 7 ++ ++#ifdef CONFIG_YAFFS_UNICODE ++#define YAFFS_MAX_NAME_LENGTH 127 ++#define YAFFS_MAX_ALIAS_LENGTH 79 ++#else ++#define YAFFS_MAX_NAME_LENGTH 255 ++#define YAFFS_MAX_ALIAS_LENGTH 159 ++#endif ++ ++#define YAFFS_SHORT_NAME_LENGTH 15 ++ ++/* Some special object ids for pseudo objects */ ++#define YAFFS_OBJECTID_ROOT 1 ++#define YAFFS_OBJECTID_LOSTNFOUND 2 ++#define YAFFS_OBJECTID_UNLINKED 3 ++#define YAFFS_OBJECTID_DELETED 4 ++ ++/* Fake object Id for summary data */ ++#define YAFFS_OBJECTID_SUMMARY 0x10 ++ ++/* Pseudo object ids for checkpointing */ ++#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20 ++#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21 ++ ++#define YAFFS_MAX_SHORT_OP_CACHES 20 ++ ++#define YAFFS_N_TEMP_BUFFERS 6 ++ ++/* We limit the number attempts at sucessfully saving a chunk of data. ++ * Small-page devices have 32 pages per block; large-page devices have 64. ++ * Default to something in the order of 5 to 10 blocks worth of chunks. ++ */ ++#define YAFFS_WR_ATTEMPTS (5*64) ++ ++/* Sequence numbers are used in YAFFS2 to determine block allocation order. ++ * The range is limited slightly to help distinguish bad numbers from good. ++ * This also allows us to perhaps in the future use special numbers for ++ * special purposes. ++ * EFFFFF00 allows the allocation of 8 blocks/second (~1Mbytes) for 15 years, ++ * and is a larger number than the lifetime of a 2GB device. ++ */ ++#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000 ++#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xefffff00 ++ ++/* Special sequence number for bad block that failed to be marked bad */ ++#define YAFFS_SEQUENCE_BAD_BLOCK 0xffff0000 ++ ++/* ChunkCache is used for short read/write operations.*/ ++struct yaffs_cache { ++ struct yaffs_obj *object; ++ int chunk_id; ++ int last_use; ++ int dirty; ++ int n_bytes; /* Only valid if the cache is dirty */ ++ int locked; /* Can't push out or flush while locked. */ ++ u8 *data; ++}; ++ ++/* yaffs1 tags structures in RAM ++ * NB This uses bitfield. Bitfields should not straddle a u32 boundary ++ * otherwise the structure size will get blown out. ++ */ ++ ++struct yaffs_tags { ++ u32 chunk_id:20; ++ u32 serial_number:2; ++ u32 n_bytes_lsb:10; ++ u32 obj_id:18; ++ u32 ecc:12; ++ u32 n_bytes_msb:2; ++}; ++ ++union yaffs_tags_union { ++ struct yaffs_tags as_tags; ++ u8 as_bytes[8]; ++}; ++ ++ ++/* Stuff used for extended tags in YAFFS2 */ ++ ++enum yaffs_ecc_result { ++ YAFFS_ECC_RESULT_UNKNOWN, ++ YAFFS_ECC_RESULT_NO_ERROR, ++ YAFFS_ECC_RESULT_FIXED, ++ YAFFS_ECC_RESULT_UNFIXED ++}; ++ ++enum yaffs_obj_type { ++ YAFFS_OBJECT_TYPE_UNKNOWN, ++ YAFFS_OBJECT_TYPE_FILE, ++ YAFFS_OBJECT_TYPE_SYMLINK, ++ YAFFS_OBJECT_TYPE_DIRECTORY, ++ YAFFS_OBJECT_TYPE_HARDLINK, ++ YAFFS_OBJECT_TYPE_SPECIAL ++}; ++ ++#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL ++ ++struct yaffs_ext_tags { ++ unsigned chunk_used; /* Status of the chunk: used or unused */ ++ unsigned obj_id; /* If 0 this is not used */ ++ unsigned chunk_id; /* If 0 this is a header, else a data chunk */ ++ unsigned n_bytes; /* Only valid for data chunks */ ++ ++ /* The following stuff only has meaning when we read */ ++ enum yaffs_ecc_result ecc_result; ++ unsigned block_bad; ++ ++ /* YAFFS 1 stuff */ ++ unsigned is_deleted; /* The chunk is marked deleted */ ++ unsigned serial_number; /* Yaffs1 2-bit serial number */ ++ ++ /* YAFFS2 stuff */ ++ unsigned seq_number; /* The sequence number of this block */ ++ ++ /* Extra info if this is an object header (YAFFS2 only) */ ++ ++ unsigned extra_available; /* Extra info available if not zero */ ++ unsigned extra_parent_id; /* The parent object */ ++ unsigned extra_is_shrink; /* Is it a shrink header? */ ++ unsigned extra_shadows; /* Does this shadow another object? */ ++ ++ enum yaffs_obj_type extra_obj_type; /* What object type? */ ++ ++ loff_t extra_file_size; /* Length if it is a file */ ++ unsigned extra_equiv_id; /* Equivalent object for a hard link */ ++}; ++ ++/* Spare structure for YAFFS1 */ ++struct yaffs_spare { ++ u8 tb0; ++ u8 tb1; ++ u8 tb2; ++ u8 tb3; ++ u8 page_status; /* set to 0 to delete the chunk */ ++ u8 block_status; ++ u8 tb4; ++ u8 tb5; ++ u8 ecc1[3]; ++ u8 tb6; ++ u8 tb7; ++ u8 ecc2[3]; ++}; ++ ++/*Special structure for passing through to mtd */ ++struct yaffs_nand_spare { ++ struct yaffs_spare spare; ++ int eccres1; ++ int eccres2; ++}; ++ ++/* Block data in RAM */ ++ ++enum yaffs_block_state { ++ YAFFS_BLOCK_STATE_UNKNOWN = 0, ++ ++ YAFFS_BLOCK_STATE_SCANNING, ++ /* Being scanned */ ++ ++ YAFFS_BLOCK_STATE_NEEDS_SCAN, ++ /* The block might have something on it (ie it is allocating or full, ++ * perhaps empty) but it needs to be scanned to determine its true ++ * state. ++ * This state is only valid during scanning. ++ * NB We tolerate empty because the pre-scanner might be incapable of ++ * deciding ++ * However, if this state is returned on a YAFFS2 device, ++ * then we expect a sequence number ++ */ ++ ++ YAFFS_BLOCK_STATE_EMPTY, ++ /* This block is empty */ ++ ++ YAFFS_BLOCK_STATE_ALLOCATING, ++ /* This block is partially allocated. ++ * At least one page holds valid data. ++ * This is the one currently being used for page ++ * allocation. Should never be more than one of these. ++ * If a block is only partially allocated at mount it is treated as ++ * full. ++ */ ++ ++ YAFFS_BLOCK_STATE_FULL, ++ /* All the pages in this block have been allocated. ++ * If a block was only partially allocated when mounted we treat ++ * it as fully allocated. ++ */ ++ ++ YAFFS_BLOCK_STATE_DIRTY, ++ /* The block was full and now all chunks have been deleted. ++ * Erase me, reuse me. ++ */ ++ ++ YAFFS_BLOCK_STATE_CHECKPOINT, ++ /* This block is assigned to holding checkpoint data. */ ++ ++ YAFFS_BLOCK_STATE_COLLECTING, ++ /* This block is being garbage collected */ ++ ++ YAFFS_BLOCK_STATE_DEAD ++ /* This block has failed and is not in use */ ++}; ++ ++#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1) ++ ++struct yaffs_block_info { ++ ++ s32 soft_del_pages:10; /* number of soft deleted pages */ ++ s32 pages_in_use:10; /* number of pages in use */ ++ u32 block_state:4; /* One of the above block states. */ ++ /* NB use unsigned because enum is sometimes ++ * an int */ ++ u32 needs_retiring:1; /* Data has failed on this block, */ ++ /*need to get valid data off and retire*/ ++ u32 skip_erased_check:1;/* Skip the erased check on this block */ ++ u32 gc_prioritise:1; /* An ECC check or blank check has failed. ++ Block should be prioritised for GC */ ++ u32 chunk_error_strikes:3; /* How many times we've had ecc etc ++ failures on this block and tried to reuse it */ ++ u32 has_summary:1; /* The block has a summary */ ++ ++ u32 has_shrink_hdr:1; /* This block has at least one shrink header */ ++ u32 seq_number; /* block sequence number for yaffs2 */ ++ ++}; ++ ++/* -------------------------- Object structure -------------------------------*/ ++/* This is the object structure as stored on NAND */ ++ ++struct yaffs_obj_hdr { ++ enum yaffs_obj_type type; ++ ++ /* Apply to everything */ ++ int parent_obj_id; ++ u16 sum_no_longer_used; /* checksum of name. No longer used */ ++ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1]; ++ ++ /* The following apply to all object types except for hard links */ ++ u32 yst_mode; /* protection */ ++ ++ u32 yst_uid; ++ u32 yst_gid; ++ u32 yst_atime; ++ u32 yst_mtime; ++ u32 yst_ctime; ++ ++ /* File size applies to files only */ ++ u32 file_size_low; ++ ++ /* Equivalent object id applies to hard links only. */ ++ int equiv_id; ++ ++ /* Alias is for symlinks only. */ ++ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1]; ++ ++ u32 yst_rdev; /* stuff for block and char devices (major/min) */ ++ ++ u32 win_ctime[2]; ++ u32 win_atime[2]; ++ u32 win_mtime[2]; ++ ++ u32 inband_shadowed_obj_id; ++ u32 inband_is_shrink; ++ ++ u32 file_size_high; ++ u32 reserved[1]; ++ int shadows_obj; /* This object header shadows the ++ specified object if > 0 */ ++ ++ /* is_shrink applies to object headers written when wemake a hole. */ ++ u32 is_shrink; ++ ++}; ++ ++/*--------------------------- Tnode -------------------------- */ ++ ++struct yaffs_tnode { ++ struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL]; ++}; ++ ++/*------------------------ Object -----------------------------*/ ++/* An object can be one of: ++ * - a directory (no data, has children links ++ * - a regular file (data.... not prunes :->). ++ * - a symlink [symbolic link] (the alias). ++ * - a hard link ++ */ ++ ++struct yaffs_file_var { ++ loff_t file_size; ++ loff_t scanned_size; ++ loff_t shrink_size; ++ int top_level; ++ struct yaffs_tnode *top; ++}; ++ ++struct yaffs_dir_var { ++ struct list_head children; /* list of child links */ ++ struct list_head dirty; /* Entry for list of dirty directories */ ++}; ++ ++struct yaffs_symlink_var { ++ YCHAR *alias; ++}; ++ ++struct yaffs_hardlink_var { ++ struct yaffs_obj *equiv_obj; ++ u32 equiv_id; ++}; ++ ++union yaffs_obj_var { ++ struct yaffs_file_var file_variant; ++ struct yaffs_dir_var dir_variant; ++ struct yaffs_symlink_var symlink_variant; ++ struct yaffs_hardlink_var hardlink_variant; ++}; ++ ++struct yaffs_obj { ++ u8 deleted:1; /* This should only apply to unlinked files. */ ++ u8 soft_del:1; /* it has also been soft deleted */ ++ u8 unlinked:1; /* An unlinked file.*/ ++ u8 fake:1; /* A fake object has no presence on NAND. */ ++ u8 rename_allowed:1; /* Some objects cannot be renamed. */ ++ u8 unlink_allowed:1; ++ u8 dirty:1; /* the object needs to be written to flash */ ++ u8 valid:1; /* When the file system is being loaded up, this ++ * object might be created before the data ++ * is available ++ * ie. file data chunks encountered before ++ * the header. ++ */ ++ u8 lazy_loaded:1; /* This object has been lazy loaded and ++ * is missing some detail */ ++ ++ u8 defered_free:1; /* Object is removed from NAND, but is ++ * still in the inode cache. ++ * Free of object is defered. ++ * until the inode is released. ++ */ ++ u8 being_created:1; /* This object is still being created ++ * so skip some verification checks. */ ++ u8 is_shadowed:1; /* This object is shadowed on the way ++ * to being renamed. */ ++ ++ u8 xattr_known:1; /* We know if this has object has xattribs ++ * or not. */ ++ u8 has_xattr:1; /* This object has xattribs. ++ * Only valid if xattr_known. */ ++ ++ u8 serial; /* serial number of chunk in NAND.*/ ++ u16 sum; /* sum of the name to speed searching */ ++ ++ struct yaffs_dev *my_dev; /* The device I'm on */ ++ ++ struct list_head hash_link; /* list of objects in hash bucket */ ++ ++ struct list_head hard_links; /* hard linked object chain*/ ++ ++ /* directory structure stuff */ ++ /* also used for linking up the free list */ ++ struct yaffs_obj *parent; ++ struct list_head siblings; ++ ++ /* Where's my object header in NAND? */ ++ int hdr_chunk; ++ ++ int n_data_chunks; /* Number of data chunks for this file. */ ++ ++ u32 obj_id; /* the object id value */ ++ ++ u32 yst_mode; ++ ++ YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1]; ++ ++#ifdef CONFIG_YAFFS_WINCE ++ u32 win_ctime[2]; ++ u32 win_mtime[2]; ++ u32 win_atime[2]; ++#else ++ u32 yst_uid; ++ u32 yst_gid; ++ u32 yst_atime; ++ u32 yst_mtime; ++ u32 yst_ctime; ++#endif ++ ++ u32 yst_rdev; ++ ++ void *my_inode; ++ ++ enum yaffs_obj_type variant_type; ++ ++ union yaffs_obj_var variant; ++ ++}; ++ ++struct yaffs_obj_bucket { ++ struct list_head list; ++ int count; ++}; ++ ++/* yaffs_checkpt_obj holds the definition of an object as dumped ++ * by checkpointing. ++ */ ++ ++struct yaffs_checkpt_obj { ++ int struct_type; ++ u32 obj_id; ++ u32 parent_id; ++ int hdr_chunk; ++ enum yaffs_obj_type variant_type:3; ++ u8 deleted:1; ++ u8 soft_del:1; ++ u8 unlinked:1; ++ u8 fake:1; ++ u8 rename_allowed:1; ++ u8 unlink_allowed:1; ++ u8 serial; ++ int n_data_chunks; ++ loff_t size_or_equiv_obj; ++}; ++ ++/*--------------------- Temporary buffers ---------------- ++ * ++ * These are chunk-sized working buffers. Each device has a few. ++ */ ++ ++struct yaffs_buffer { ++ u8 *buffer; ++ int in_use; ++}; ++ ++/*----------------- Device ---------------------------------*/ ++ ++struct yaffs_param { ++ const YCHAR *name; ++ ++ /* ++ * Entry parameters set up way early. Yaffs sets up the rest. ++ * The structure should be zeroed out before use so that unused ++ * and default values are zero. ++ */ ++ ++ int inband_tags; /* Use unband tags */ ++ u32 total_bytes_per_chunk; /* Should be >= 512, does not need to ++ be a power of 2 */ ++ int chunks_per_block; /* does not need to be a power of 2 */ ++ int spare_bytes_per_chunk; /* spare area size */ ++ int start_block; /* Start block we're allowed to use */ ++ int end_block; /* End block we're allowed to use */ ++ int n_reserved_blocks; /* Tuneable so that we can reduce ++ * reserved blocks on NOR and RAM. */ ++ ++ int n_caches; /* If <= 0, then short op caching is disabled, ++ * else the number of short op caches. ++ */ ++ int cache_bypass_aligned; /* If non-zero then bypass the cache for ++ * aligned writes. ++ */ ++ ++ int use_nand_ecc; /* Flag to decide whether or not to use ++ * NAND driver ECC on data (yaffs1) */ ++ int tags_9bytes; /* Use 9 byte tags */ ++ int no_tags_ecc; /* Flag to decide whether or not to do ECC ++ * on packed tags (yaffs2) */ ++ ++ int is_yaffs2; /* Use yaffs2 mode on this device */ ++ ++ int empty_lost_n_found; /* Auto-empty lost+found directory on mount */ ++ ++ int refresh_period; /* How often to check for a block refresh */ ++ ++ /* Checkpoint control. Can be set before or after initialisation */ ++ u8 skip_checkpt_rd; ++ u8 skip_checkpt_wr; ++ ++ int enable_xattr; /* Enable xattribs */ ++ ++ int max_objects; /* ++ * Set to limit the number of objects created. ++ * 0 = no limit. ++ */ ++ ++ /* The remove_obj_fn function must be supplied by OS flavours that ++ * need it. ++ * yaffs direct uses it to implement the faster readdir. ++ * Linux uses it to protect the directory during unlocking. ++ */ ++ void (*remove_obj_fn) (struct yaffs_obj *obj); ++ ++ /* Callback to mark the superblock dirty */ ++ void (*sb_dirty_fn) (struct yaffs_dev *dev); ++ ++ /* Callback to control garbage collection. */ ++ unsigned (*gc_control_fn) (struct yaffs_dev *dev); ++ ++ /* Debug control flags. Don't use unless you know what you're doing */ ++ int use_header_file_size; /* Flag to determine if we should use ++ * file sizes from the header */ ++ int disable_lazy_load; /* Disable lazy loading on this device */ ++ int wide_tnodes_disabled; /* Set to disable wide tnodes */ ++ int disable_soft_del; /* yaffs 1 only: Set to disable the use of ++ * softdeletion. */ ++ ++ int defered_dir_update; /* Set to defer directory updates */ ++ ++#ifdef CONFIG_YAFFS_AUTO_UNICODE ++ int auto_unicode; ++#endif ++ int always_check_erased; /* Force chunk erased check always on */ ++ ++ int disable_summary; ++ int disable_bad_block_marking; ++ ++}; ++ ++struct yaffs_driver { ++ int (*drv_write_chunk_fn) (struct yaffs_dev *dev, int nand_chunk, ++ const u8 *data, int data_len, ++ const u8 *oob, int oob_len); ++ int (*drv_read_chunk_fn) (struct yaffs_dev *dev, int nand_chunk, ++ u8 *data, int data_len, ++ u8 *oob, int oob_len, ++ enum yaffs_ecc_result *ecc_result); ++ int (*drv_erase_fn) (struct yaffs_dev *dev, int block_no); ++ int (*drv_mark_bad_fn) (struct yaffs_dev *dev, int block_no); ++ int (*drv_check_bad_fn) (struct yaffs_dev *dev, int block_no); ++ int (*drv_initialise_fn) (struct yaffs_dev *dev); ++ int (*drv_deinitialise_fn) (struct yaffs_dev *dev); ++}; ++ ++struct yaffs_tags_handler { ++ int (*write_chunk_tags_fn) (struct yaffs_dev *dev, ++ int nand_chunk, const u8 *data, ++ const struct yaffs_ext_tags *tags); ++ int (*read_chunk_tags_fn) (struct yaffs_dev *dev, ++ int nand_chunk, u8 *data, ++ struct yaffs_ext_tags *tags); ++ ++ int (*query_block_fn) (struct yaffs_dev *dev, int block_no, ++ enum yaffs_block_state *state, ++ u32 *seq_number); ++ int (*mark_bad_fn) (struct yaffs_dev *dev, int block_no); ++}; ++ ++struct yaffs_dev { ++ struct yaffs_param param; ++ struct yaffs_driver drv; ++ struct yaffs_tags_handler tagger; ++ ++ /* Context storage. Holds extra OS specific data for this device */ ++ ++ void *os_context; ++ void *driver_context; ++ ++ struct list_head dev_list; ++ ++ int ll_init; ++ /* Runtime parameters. Set up by YAFFS. */ ++ int data_bytes_per_chunk; ++ ++ /* Non-wide tnode stuff */ ++ u16 chunk_grp_bits; /* Number of bits that need to be resolved if ++ * the tnodes are not wide enough. ++ */ ++ u16 chunk_grp_size; /* == 2^^chunk_grp_bits */ ++ ++ /* Stuff to support wide tnodes */ ++ u32 tnode_width; ++ u32 tnode_mask; ++ u32 tnode_size; ++ ++ /* Stuff for figuring out file offset to chunk conversions */ ++ u32 chunk_shift; /* Shift value */ ++ u32 chunk_div; /* Divisor after shifting: 1 for 2^n sizes */ ++ u32 chunk_mask; /* Mask to use for power-of-2 case */ ++ ++ int is_mounted; ++ int read_only; ++ int is_checkpointed; ++ ++ /* Stuff to support block offsetting to support start block zero */ ++ int internal_start_block; ++ int internal_end_block; ++ int block_offset; ++ int chunk_offset; ++ ++ /* Runtime checkpointing stuff */ ++ int checkpt_page_seq; /* running sequence number of checkpt pages */ ++ int checkpt_byte_count; ++ int checkpt_byte_offs; ++ u8 *checkpt_buffer; ++ int checkpt_open_write; ++ int blocks_in_checkpt; ++ int checkpt_cur_chunk; ++ int checkpt_cur_block; ++ int checkpt_next_block; ++ int *checkpt_block_list; ++ int checkpt_max_blocks; ++ u32 checkpt_sum; ++ u32 checkpt_xor; ++ ++ int checkpoint_blocks_required; /* Number of blocks needed to store ++ * current checkpoint set */ ++ ++ /* Block Info */ ++ struct yaffs_block_info *block_info; ++ u8 *chunk_bits; /* bitmap of chunks in use */ ++ u8 block_info_alt:1; /* allocated using alternative alloc */ ++ u8 chunk_bits_alt:1; /* allocated using alternative alloc */ ++ int chunk_bit_stride; /* Number of bytes of chunk_bits per block. ++ * Must be consistent with chunks_per_block. ++ */ ++ ++ int n_erased_blocks; ++ int alloc_block; /* Current block being allocated off */ ++ u32 alloc_page; ++ int alloc_block_finder; /* Used to search for next allocation block */ ++ ++ /* Object and Tnode memory management */ ++ void *allocator; ++ int n_obj; ++ int n_tnodes; ++ ++ int n_hardlinks; ++ ++ struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS]; ++ u32 bucket_finder; ++ ++ int n_free_chunks; ++ ++ /* Garbage collection control */ ++ u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */ ++ u32 n_clean_ups; ++ ++ unsigned has_pending_prioritised_gc; /* We think this device might ++ have pending prioritised gcs */ ++ unsigned gc_disable; ++ unsigned gc_block_finder; ++ unsigned gc_dirtiest; ++ unsigned gc_pages_in_use; ++ unsigned gc_not_done; ++ unsigned gc_block; ++ unsigned gc_chunk; ++ unsigned gc_skip; ++ struct yaffs_summary_tags *gc_sum_tags; ++ ++ /* Special directories */ ++ struct yaffs_obj *root_dir; ++ struct yaffs_obj *lost_n_found; ++ ++ int buffered_block; /* Which block is buffered here? */ ++ int doing_buffered_block_rewrite; ++ ++ struct yaffs_cache *cache; ++ int cache_last_use; ++ ++ /* Stuff for background deletion and unlinked files. */ ++ struct yaffs_obj *unlinked_dir; /* Directory where unlinked and deleted ++ files live. */ ++ struct yaffs_obj *del_dir; /* Directory where deleted objects are ++ sent to disappear. */ ++ struct yaffs_obj *unlinked_deletion; /* Current file being ++ background deleted. */ ++ int n_deleted_files; /* Count of files awaiting deletion; */ ++ int n_unlinked_files; /* Count of unlinked files. */ ++ int n_bg_deletions; /* Count of background deletions. */ ++ ++ /* Temporary buffer management */ ++ struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS]; ++ int max_temp; ++ int temp_in_use; ++ int unmanaged_buffer_allocs; ++ int unmanaged_buffer_deallocs; ++ ++ /* yaffs2 runtime stuff */ ++ unsigned seq_number; /* Sequence number of currently ++ allocating block */ ++ unsigned oldest_dirty_seq; ++ unsigned oldest_dirty_block; ++ ++ /* Block refreshing */ ++ int refresh_skip; /* A skip down counter. ++ * Refresh happens when this gets to zero. */ ++ ++ /* Dirty directory handling */ ++ struct list_head dirty_dirs; /* List of dirty directories */ ++ ++ /* Summary */ ++ int chunks_per_summary; ++ struct yaffs_summary_tags *sum_tags; ++ ++ /* Statistics */ ++ u32 n_page_writes; ++ u32 n_page_reads; ++ u32 n_erasures; ++ u32 n_bad_queries; ++ u32 n_bad_markings; ++ u32 n_erase_failures; ++ u32 n_gc_copies; ++ u32 all_gcs; ++ u32 passive_gc_count; ++ u32 oldest_dirty_gc_count; ++ u32 n_gc_blocks; ++ u32 bg_gcs; ++ u32 n_retried_writes; ++ u32 n_retired_blocks; ++ u32 n_ecc_fixed; ++ u32 n_ecc_unfixed; ++ u32 n_tags_ecc_fixed; ++ u32 n_tags_ecc_unfixed; ++ u32 n_deletions; ++ u32 n_unmarked_deletions; ++ u32 refresh_count; ++ u32 cache_hits; ++ u32 tags_used; ++ u32 summary_used; ++ ++}; ++ ++/* The CheckpointDevice structure holds the device information that changes ++ *at runtime and must be preserved over unmount/mount cycles. ++ */ ++struct yaffs_checkpt_dev { ++ int struct_type; ++ int n_erased_blocks; ++ int alloc_block; /* Current block being allocated off */ ++ u32 alloc_page; ++ int n_free_chunks; ++ ++ int n_deleted_files; /* Count of files awaiting deletion; */ ++ int n_unlinked_files; /* Count of unlinked files. */ ++ int n_bg_deletions; /* Count of background deletions. */ ++ ++ /* yaffs2 runtime stuff */ ++ unsigned seq_number; /* Sequence number of currently ++ * allocating block */ ++ ++}; ++ ++struct yaffs_checkpt_validity { ++ int struct_type; ++ u32 magic; ++ u32 version; ++ u32 head; ++}; ++ ++struct yaffs_shadow_fixer { ++ int obj_id; ++ int shadowed_id; ++ struct yaffs_shadow_fixer *next; ++}; ++ ++/* Structure for doing xattr modifications */ ++struct yaffs_xattr_mod { ++ int set; /* If 0 then this is a deletion */ ++ const YCHAR *name; ++ const void *data; ++ int size; ++ int flags; ++ int result; ++}; ++ ++/*----------------------- YAFFS Functions -----------------------*/ ++ ++int yaffs_guts_initialise(struct yaffs_dev *dev); ++void yaffs_deinitialise(struct yaffs_dev *dev); ++ ++int yaffs_get_n_free_chunks(struct yaffs_dev *dev); ++ ++int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name, ++ struct yaffs_obj *new_dir, const YCHAR * new_name); ++ ++int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name); ++int yaffs_del_obj(struct yaffs_obj *obj); ++struct yaffs_obj *yaffs_retype_obj(struct yaffs_obj *obj, ++ enum yaffs_obj_type type); ++ ++ ++int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size); ++loff_t yaffs_get_obj_length(struct yaffs_obj *obj); ++int yaffs_get_obj_inode(struct yaffs_obj *obj); ++unsigned yaffs_get_obj_type(struct yaffs_obj *obj); ++int yaffs_get_obj_link_count(struct yaffs_obj *obj); ++ ++/* File operations */ ++int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset, ++ int n_bytes); ++int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset, ++ int n_bytes, int write_trhrough); ++int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size); ++ ++struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent, ++ const YCHAR *name, u32 mode, u32 uid, ++ u32 gid); ++ ++int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync); ++ ++/* Flushing and checkpointing */ ++void yaffs_flush_whole_cache(struct yaffs_dev *dev); ++ ++int yaffs_checkpoint_save(struct yaffs_dev *dev); ++int yaffs_checkpoint_restore(struct yaffs_dev *dev); ++ ++/* Directory operations */ ++struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name, ++ u32 mode, u32 uid, u32 gid); ++struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir, ++ const YCHAR *name); ++struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number); ++ ++/* Link operations */ ++struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR *name, ++ struct yaffs_obj *equiv_obj); ++ ++struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj); ++ ++/* Symlink operations */ ++struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent, ++ const YCHAR *name, u32 mode, u32 uid, ++ u32 gid, const YCHAR *alias); ++YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj); ++ ++/* Special inodes (fifos, sockets and devices) */ ++struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent, ++ const YCHAR *name, u32 mode, u32 uid, ++ u32 gid, u32 rdev); ++ ++int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR *name, ++ const void *value, int size, int flags); ++int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR *name, void *value, ++ int size); ++int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size); ++int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR *name); ++ ++/* Special directories */ ++struct yaffs_obj *yaffs_root(struct yaffs_dev *dev); ++struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev); ++ ++void yaffs_handle_defered_free(struct yaffs_obj *obj); ++ ++void yaffs_update_dirty_dirs(struct yaffs_dev *dev); ++ ++int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency); ++ ++/* Debug dump */ ++int yaffs_dump_obj(struct yaffs_obj *obj); ++ ++void yaffs_guts_test(struct yaffs_dev *dev); ++int yaffs_guts_ll_init(struct yaffs_dev *dev); ++ ++ ++/* A few useful functions to be used within the core files*/ ++void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash, ++ int lyn); ++int yaffs_check_ff(u8 *buffer, int n_bytes); ++void yaffs_handle_chunk_error(struct yaffs_dev *dev, ++ struct yaffs_block_info *bi); ++ ++u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev); ++void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer); ++ ++struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev, ++ int number, ++ enum yaffs_obj_type type); ++int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk, ++ int nand_chunk, int in_scan); ++void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR *name); ++void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj, ++ const struct yaffs_obj_hdr *oh); ++void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj); ++YCHAR *yaffs_clone_str(const YCHAR *str); ++void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list); ++void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no); ++int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, ++ int force, int is_shrink, int shadows, ++ struct yaffs_xattr_mod *xop); ++void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id, ++ int backward_scanning); ++int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks); ++struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev); ++struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev, ++ struct yaffs_file_var *file_struct, ++ u32 chunk_id, ++ struct yaffs_tnode *passed_tn); ++ ++int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset, ++ int n_bytes, int write_trhrough); ++void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size); ++void yaffs_skip_rest_of_block(struct yaffs_dev *dev); ++ ++int yaffs_count_free_chunks(struct yaffs_dev *dev); ++ ++struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev, ++ struct yaffs_file_var *file_struct, ++ u32 chunk_id); ++ ++u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn, ++ unsigned pos); ++ ++int yaffs_is_non_empty_dir(struct yaffs_obj *obj); ++ ++int yaffs_guts_format_dev(struct yaffs_dev *dev); ++ ++void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr, ++ int *chunk_out, u32 *offset_out); ++/* ++ * Marshalling functions to get loff_t file sizes into aand out of ++ * object headers. ++ */ ++void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize); ++loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh); ++loff_t yaffs_max_file_size(struct yaffs_dev *dev); ++ ++/* ++ * Debug function to count number of blocks in each state ++ * NB Needs to be called with correct number of integers ++ */ ++ ++void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10]); ++ ++int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk, ++ struct yaffs_ext_tags *tags); ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_linux.h linux-3.4.90/fs/yaffs2/yaffs_linux.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_linux.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_linux.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,48 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_LINUX_H__ ++#define __YAFFS_LINUX_H__ ++ ++#include "yportenv.h" ++ ++struct yaffs_linux_context { ++ struct list_head context_list; /* List of these we have mounted */ ++ struct yaffs_dev *dev; ++ struct super_block *super; ++ struct task_struct *bg_thread; /* Background thread for this device */ ++ int bg_running; ++ struct mutex gross_lock; /* Gross locking mutex*/ ++ u8 *spare_buffer; /* For mtdif2 use. Don't know the buffer size ++ * at compile time so we have to allocate it. ++ */ ++ struct list_head search_contexts; ++ struct task_struct *readdir_process; ++ unsigned mount_id; ++ int dirty; ++}; ++ ++#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context)) ++#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context)) ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) ++#define WRITE_SIZE_STR "writesize" ++#define WRITE_SIZE(mtd) ((mtd)->writesize) ++#else ++#define WRITE_SIZE_STR "oobblock" ++#define WRITE_SIZE(mtd) ((mtd)->oobblock) ++#endif ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_mtdif.c linux-3.4.90/fs/yaffs2/yaffs_mtdif.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_mtdif.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_mtdif.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,309 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yportenv.h" ++ ++#include "yaffs_mtdif.h" ++ ++#include "linux/mtd/mtd.h" ++#include "linux/types.h" ++#include "linux/time.h" ++#include "linux/major.h" ++#include "linux/mtd/nand.h" ++#include "linux/kernel.h" ++#include "linux/version.h" ++#include "linux/types.h" ++ ++#include "yaffs_trace.h" ++#include "yaffs_guts.h" ++#include "yaffs_linux.h" ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) ++#define MTD_OPS_AUTO_OOB MTD_OOB_AUTO ++#endif ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) ++#define mtd_erase(m, ei) (m)->erase(m, ei) ++#define mtd_write_oob(m, addr, pops) (m)->write_oob(m, addr, pops) ++#define mtd_read_oob(m, addr, pops) (m)->read_oob(m, addr, pops) ++#define mtd_block_isbad(m, offs) (m)->block_isbad(m, offs) ++#define mtd_block_markbad(m, offs) (m)->block_markbad(m, offs) ++#endif ++ ++ ++ ++int nandmtd_erase_block(struct yaffs_dev *dev, int block_no) ++{ ++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev); ++ u32 addr = ++ ((loff_t) block_no) * dev->param.total_bytes_per_chunk * ++ dev->param.chunks_per_block; ++ struct erase_info ei; ++ int retval = 0; ++ ++ ei.mtd = mtd; ++ ei.addr = addr; ++ ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block; ++ ei.time = 1000; ++ ei.retries = 2; ++ ei.callback = NULL; ++ ei.priv = (u_long) dev; ++ ++ retval = mtd_erase(mtd, &ei); ++ ++ if (retval == 0) ++ return YAFFS_OK; ++ ++ return YAFFS_FAIL; ++} ++ ++ ++static int yaffs_mtd_write(struct yaffs_dev *dev, int nand_chunk, ++ const u8 *data, int data_len, ++ const u8 *oob, int oob_len) ++{ ++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev); ++ loff_t addr; ++ struct mtd_oob_ops ops; ++ int retval; ++ ++ yaffs_trace(YAFFS_TRACE_MTD, ++ "yaffs_mtd_write(%p, %d, %p, %d, %p, %d)\n", ++ dev, nand_chunk, data, data_len, oob, oob_len); ++ ++ if (!data || !data_len) { ++ data = NULL; ++ data_len = 0; ++ } ++ ++ if (!oob || !oob_len) { ++ oob = NULL; ++ oob_len = 0; ++ } ++ ++ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk; ++ memset(&ops, 0, sizeof(ops)); ++ ops.mode = MTD_OPS_AUTO_OOB; ++ ops.len = (data) ? data_len : 0; ++ ops.ooblen = oob_len; ++ ops.datbuf = (u8 *)data; ++ ops.oobbuf = (u8 *)oob; ++ ++ retval = mtd_write_oob(mtd, addr, &ops); ++ if (retval) { ++ yaffs_trace(YAFFS_TRACE_MTD, ++ "write_oob failed, chunk %d, mtd error %d", ++ nand_chunk, retval); ++ } ++ return retval ? YAFFS_FAIL : YAFFS_OK; ++} ++ ++static int yaffs_mtd_read(struct yaffs_dev *dev, int nand_chunk, ++ u8 *data, int data_len, ++ u8 *oob, int oob_len, ++ enum yaffs_ecc_result *ecc_result) ++{ ++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev); ++ loff_t addr; ++ struct mtd_oob_ops ops; ++ int retval; ++ ++ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk; ++ memset(&ops, 0, sizeof(ops)); ++ ops.mode = MTD_OPS_AUTO_OOB; ++ ops.len = (data) ? data_len : 0; ++ ops.ooblen = oob_len; ++ ops.datbuf = data; ++ ops.oobbuf = oob; ++ ++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20)) ++ /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug; ++ * help it out with ops.len = ops.ooblen when ops.datbuf == NULL. ++ */ ++ ops.len = (ops.datbuf) ? ops.len : ops.ooblen; ++#endif ++ /* Read page and oob using MTD. ++ * Check status and determine ECC result. ++ */ ++ retval = mtd_read_oob(mtd, addr, &ops); ++ if (retval) ++ yaffs_trace(YAFFS_TRACE_MTD, ++ "read_oob failed, chunk %d, mtd error %d", ++ nand_chunk, retval); ++ ++ switch (retval) { ++ case 0: ++ /* no error */ ++ if(ecc_result) ++ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR; ++ break; ++ ++ case -EUCLEAN: ++ /* MTD's ECC fixed the data */ ++ if(ecc_result) ++ *ecc_result = YAFFS_ECC_RESULT_FIXED; ++ dev->n_ecc_fixed++; ++ break; ++ ++ case -EBADMSG: ++ default: ++ /* MTD's ECC could not fix the data */ ++ dev->n_ecc_unfixed++; ++ if(ecc_result) ++ *ecc_result = YAFFS_ECC_RESULT_UNFIXED; ++ return YAFFS_FAIL; ++ } ++ ++ return YAFFS_OK; ++} ++ ++static int yaffs_mtd_erase(struct yaffs_dev *dev, int block_no) ++{ ++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev); ++ ++ loff_t addr; ++ struct erase_info ei; ++ int retval = 0; ++ u32 block_size; ++ ++ block_size = dev->param.total_bytes_per_chunk * ++ dev->param.chunks_per_block; ++ addr = ((loff_t) block_no) * block_size; ++ ++ ei.mtd = mtd; ++ ei.addr = addr; ++ ei.len = block_size; ++ ei.time = 1000; ++ ei.retries = 2; ++ ei.callback = NULL; ++ ei.priv = (u_long) dev; ++ ++ retval = mtd_erase(mtd, &ei); ++ ++ if (retval == 0) ++ return YAFFS_OK; ++ ++ return YAFFS_FAIL; ++} ++ ++static int yaffs_mtd_mark_bad(struct yaffs_dev *dev, int block_no) ++{ ++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev); ++ int blocksize = dev->param.chunks_per_block * dev->param.total_bytes_per_chunk; ++ int retval; ++ ++ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", block_no); ++ ++ retval = mtd_block_markbad(mtd, (loff_t) blocksize * block_no); ++ return (retval) ? YAFFS_FAIL : YAFFS_OK; ++} ++ ++static int yaffs_mtd_check_bad(struct yaffs_dev *dev, int block_no) ++{ ++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev); ++ int blocksize = dev->param.chunks_per_block * dev->param.total_bytes_per_chunk; ++ int retval; ++ ++ yaffs_trace(YAFFS_TRACE_MTD, "checking block %d bad", block_no); ++ ++ retval = mtd_block_isbad(mtd, (loff_t) blocksize * block_no); ++ return (retval) ? YAFFS_FAIL : YAFFS_OK; ++} ++ ++static int yaffs_mtd_initialise(struct yaffs_dev *dev) ++{ ++ return YAFFS_OK; ++} ++ ++static int yaffs_mtd_deinitialise(struct yaffs_dev *dev) ++{ ++ return YAFFS_OK; ++} ++ ++ ++void yaffs_mtd_drv_install(struct yaffs_dev *dev) ++{ ++ struct yaffs_driver *drv = &dev->drv; ++ ++ drv->drv_write_chunk_fn = yaffs_mtd_write; ++ drv->drv_read_chunk_fn = yaffs_mtd_read; ++ drv->drv_erase_fn = yaffs_mtd_erase; ++ drv->drv_mark_bad_fn = yaffs_mtd_mark_bad; ++ drv->drv_check_bad_fn = yaffs_mtd_check_bad; ++ drv->drv_initialise_fn = yaffs_mtd_initialise; ++ drv->drv_deinitialise_fn = yaffs_mtd_deinitialise; ++} ++ ++ ++struct mtd_info * yaffs_get_mtd_device(dev_t sdev) ++{ ++ struct mtd_info *mtd; ++ ++ mtd = yaffs_get_mtd_device(sdev); ++ ++ /* Check it's an mtd device..... */ ++ if (MAJOR(sdev) != MTD_BLOCK_MAJOR) ++ return NULL; /* This isn't an mtd device */ ++ ++ /* Check it's NAND */ ++ if (mtd->type != MTD_NANDFLASH) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "yaffs: MTD device is not NAND it's type %d", ++ mtd->type); ++ return NULL; ++ } ++ ++ yaffs_trace(YAFFS_TRACE_OS, " %s %d", WRITE_SIZE_STR, WRITE_SIZE(mtd)); ++ yaffs_trace(YAFFS_TRACE_OS, " oobsize %d", mtd->oobsize); ++ yaffs_trace(YAFFS_TRACE_OS, " erasesize %d", mtd->erasesize); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) ++ yaffs_trace(YAFFS_TRACE_OS, " size %u", mtd->size); ++#else ++ yaffs_trace(YAFFS_TRACE_OS, " size %lld", mtd->size); ++#endif ++ ++ return mtd; ++} ++ ++int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags) ++{ ++ if (yaffs_version == 2) { ++ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE || ++ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) && ++ !inband_tags) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "MTD device does not have the right page sizes" ++ ); ++ return -1; ++ } ++ } else { ++ if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK || ++ mtd->oobsize != YAFFS_BYTES_PER_SPARE) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "MTD device does not support have the right page sizes" ++ ); ++ return -1; ++ } ++ } ++ ++ return 0; ++} ++ ++ ++void yaffs_put_mtd_device(struct mtd_info *mtd) ++{ ++ if(mtd) ++ put_mtd_device(mtd); ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_mtdif.h linux-3.4.90/fs/yaffs2/yaffs_mtdif.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_mtdif.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_mtdif.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,25 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_MTDIF_H__ ++#define __YAFFS_MTDIF_H__ ++ ++#include "yaffs_guts.h" ++ ++void yaffs_mtd_drv_install(struct yaffs_dev *dev); ++struct mtd_info * yaffs_get_mtd_device(dev_t sdev); ++void yaffs_put_mtd_device(struct mtd_info *mtd); ++int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags); ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_nameval.c linux-3.4.90/fs/yaffs2/yaffs_nameval.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_nameval.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_nameval.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,208 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++/* ++ * This simple implementation of a name-value store assumes a small number of ++* values and fits into a small finite buffer. ++ * ++ * Each attribute is stored as a record: ++ * sizeof(int) bytes record size. ++ * strnlen+1 bytes name null terminated. ++ * nbytes value. ++ * ---------- ++ * total size stored in record size ++ * ++ * This code has not been tested with unicode yet. ++ */ ++ ++#include "yaffs_nameval.h" ++ ++#include "yportenv.h" ++ ++static int nval_find(const char *xb, int xb_size, const YCHAR *name, ++ int *exist_size) ++{ ++ int pos = 0; ++ int size; ++ ++ memcpy(&size, xb, sizeof(int)); ++ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) { ++ if (!strncmp((YCHAR *) (xb + pos + sizeof(int)), ++ name, size)) { ++ if (exist_size) ++ *exist_size = size; ++ return pos; ++ } ++ pos += size; ++ if (pos < xb_size - sizeof(int)) ++ memcpy(&size, xb + pos, sizeof(int)); ++ else ++ size = 0; ++ } ++ if (exist_size) ++ *exist_size = 0; ++ return -ENODATA; ++} ++ ++static int nval_used(const char *xb, int xb_size) ++{ ++ int pos = 0; ++ int size; ++ ++ memcpy(&size, xb + pos, sizeof(int)); ++ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) { ++ pos += size; ++ if (pos < xb_size - sizeof(int)) ++ memcpy(&size, xb + pos, sizeof(int)); ++ else ++ size = 0; ++ } ++ return pos; ++} ++ ++int nval_del(char *xb, int xb_size, const YCHAR *name) ++{ ++ int pos = nval_find(xb, xb_size, name, NULL); ++ int size; ++ ++ if (pos < 0 || pos >= xb_size) ++ return -ENODATA; ++ ++ /* Find size, shift rest over this record, ++ * then zero out the rest of buffer */ ++ memcpy(&size, xb + pos, sizeof(int)); ++ memcpy(xb + pos, xb + pos + size, xb_size - (pos + size)); ++ memset(xb + (xb_size - size), 0, size); ++ return 0; ++} ++ ++int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf, ++ int bsize, int flags) ++{ ++ int pos; ++ int namelen = strnlen(name, xb_size); ++ int reclen; ++ int size_exist = 0; ++ int space; ++ int start; ++ ++ pos = nval_find(xb, xb_size, name, &size_exist); ++ ++ if (flags & XATTR_CREATE && pos >= 0) ++ return -EEXIST; ++ if (flags & XATTR_REPLACE && pos < 0) ++ return -ENODATA; ++ ++ start = nval_used(xb, xb_size); ++ space = xb_size - start + size_exist; ++ ++ reclen = (sizeof(int) + namelen + 1 + bsize); ++ ++ if (reclen > space) ++ return -ENOSPC; ++ ++ if (pos >= 0) { ++ nval_del(xb, xb_size, name); ++ start = nval_used(xb, xb_size); ++ } ++ ++ pos = start; ++ ++ memcpy(xb + pos, &reclen, sizeof(int)); ++ pos += sizeof(int); ++ strncpy((YCHAR *) (xb + pos), name, reclen); ++ pos += (namelen + 1); ++ memcpy(xb + pos, buf, bsize); ++ return 0; ++} ++ ++int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf, ++ int bsize) ++{ ++ int pos = nval_find(xb, xb_size, name, NULL); ++ int size; ++ ++ if (pos >= 0 && pos < xb_size) { ++ ++ memcpy(&size, xb + pos, sizeof(int)); ++ pos += sizeof(int); /* advance past record length */ ++ size -= sizeof(int); ++ ++ /* Advance over name string */ ++ while (xb[pos] && size > 0 && pos < xb_size) { ++ pos++; ++ size--; ++ } ++ /*Advance over NUL */ ++ pos++; ++ size--; ++ ++ /* If bsize is zero then this is a size query. ++ * Return the size, but don't copy. ++ */ ++ if (!bsize) ++ return size; ++ ++ if (size <= bsize) { ++ memcpy(buf, xb + pos, size); ++ return size; ++ } ++ } ++ if (pos >= 0) ++ return -ERANGE; ++ ++ return -ENODATA; ++} ++ ++int nval_list(const char *xb, int xb_size, char *buf, int bsize) ++{ ++ int pos = 0; ++ int size; ++ int name_len; ++ int ncopied = 0; ++ int filled = 0; ++ ++ memcpy(&size, xb + pos, sizeof(int)); ++ while (size > sizeof(int) && ++ size <= xb_size && ++ (pos + size) < xb_size && ++ !filled) { ++ pos += sizeof(int); ++ size -= sizeof(int); ++ name_len = strnlen((YCHAR *) (xb + pos), size); ++ if (ncopied + name_len + 1 < bsize) { ++ memcpy(buf, xb + pos, name_len * sizeof(YCHAR)); ++ buf += name_len; ++ *buf = '\0'; ++ buf++; ++ if (sizeof(YCHAR) > 1) { ++ *buf = '\0'; ++ buf++; ++ } ++ ncopied += (name_len + 1); ++ } else { ++ filled = 1; ++ } ++ pos += size; ++ if (pos < xb_size - sizeof(int)) ++ memcpy(&size, xb + pos, sizeof(int)); ++ else ++ size = 0; ++ } ++ return ncopied; ++} ++ ++int nval_hasvalues(const char *xb, int xb_size) ++{ ++ return nval_used(xb, xb_size) > 0; ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_nameval.h linux-3.4.90/fs/yaffs2/yaffs_nameval.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_nameval.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_nameval.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,28 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __NAMEVAL_H__ ++#define __NAMEVAL_H__ ++ ++#include "yportenv.h" ++ ++int nval_del(char *xb, int xb_size, const YCHAR * name); ++int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf, ++ int bsize, int flags); ++int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf, ++ int bsize); ++int nval_list(const char *xb, int xb_size, char *buf, int bsize); ++int nval_hasvalues(const char *xb, int xb_size); ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_nand.c linux-3.4.90/fs/yaffs2/yaffs_nand.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_nand.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_nand.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,122 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yaffs_nand.h" ++#include "yaffs_tagscompat.h" ++ ++#include "yaffs_getblockinfo.h" ++#include "yaffs_summary.h" ++ ++static int apply_chunk_offset(struct yaffs_dev *dev, int chunk) ++{ ++ return chunk - dev->chunk_offset; ++} ++ ++int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk, ++ u8 *buffer, struct yaffs_ext_tags *tags) ++{ ++ int result; ++ struct yaffs_ext_tags local_tags; ++ int flash_chunk = apply_chunk_offset(dev, nand_chunk); ++ ++ dev->n_page_reads++; ++ ++ /* If there are no tags provided use local tags. */ ++ if (!tags) ++ tags = &local_tags; ++ ++ result = dev->tagger.read_chunk_tags_fn(dev, flash_chunk, buffer, tags); ++ if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) { ++ ++ struct yaffs_block_info *bi; ++ bi = yaffs_get_block_info(dev, ++ nand_chunk / ++ dev->param.chunks_per_block); ++ yaffs_handle_chunk_error(dev, bi); ++ } ++ return result; ++} ++ ++int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev, ++ int nand_chunk, ++ const u8 *buffer, struct yaffs_ext_tags *tags) ++{ ++ int result; ++ int flash_chunk = apply_chunk_offset(dev, nand_chunk); ++ ++ dev->n_page_writes++; ++ ++ if (!tags) { ++ yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags"); ++ BUG(); ++ return YAFFS_FAIL; ++ } ++ ++ tags->seq_number = dev->seq_number; ++ tags->chunk_used = 1; ++ yaffs_trace(YAFFS_TRACE_WRITE, ++ "Writing chunk %d tags %d %d", ++ nand_chunk, tags->obj_id, tags->chunk_id); ++ ++ result = dev->tagger.write_chunk_tags_fn(dev, flash_chunk, ++ buffer, tags); ++ ++ yaffs_summary_add(dev, tags, nand_chunk); ++ ++ return result; ++} ++ ++int yaffs_mark_bad(struct yaffs_dev *dev, int block_no) ++{ ++ block_no -= dev->block_offset; ++ dev->n_bad_markings++; ++ ++ if (dev->param.disable_bad_block_marking) ++ return YAFFS_OK; ++ ++ return dev->tagger.mark_bad_fn(dev, block_no); ++} ++ ++ ++int yaffs_query_init_block_state(struct yaffs_dev *dev, ++ int block_no, ++ enum yaffs_block_state *state, ++ u32 *seq_number) ++{ ++ block_no -= dev->block_offset; ++ return dev->tagger.query_block_fn(dev, block_no, state, seq_number); ++} ++ ++int yaffs_erase_block(struct yaffs_dev *dev, int block_no) ++{ ++ int result; ++ ++ block_no -= dev->block_offset; ++ dev->n_erasures++; ++ result = dev->drv.drv_erase_fn(dev, block_no); ++ return result; ++} ++ ++int yaffs_init_nand(struct yaffs_dev *dev) ++{ ++ if (dev->drv.drv_initialise_fn) ++ return dev->drv.drv_initialise_fn(dev); ++ return YAFFS_OK; ++} ++ ++int yaffs_deinit_nand(struct yaffs_dev *dev) ++{ ++ if (dev->drv.drv_deinitialise_fn) ++ return dev->drv.drv_deinitialise_fn(dev); ++ return YAFFS_OK; ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_nand.h linux-3.4.90/fs/yaffs2/yaffs_nand.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_nand.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_nand.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,39 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_NAND_H__ ++#define __YAFFS_NAND_H__ ++#include "yaffs_guts.h" ++ ++int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk, ++ u8 *buffer, struct yaffs_ext_tags *tags); ++ ++int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev, ++ int nand_chunk, ++ const u8 *buffer, struct yaffs_ext_tags *tags); ++ ++int yaffs_mark_bad(struct yaffs_dev *dev, int block_no); ++ ++int yaffs_query_init_block_state(struct yaffs_dev *dev, ++ int block_no, ++ enum yaffs_block_state *state, ++ unsigned *seq_number); ++ ++int yaffs_erase_block(struct yaffs_dev *dev, int flash_block); ++ ++int yaffs_init_nand(struct yaffs_dev *dev); ++int yaffs_deinit_nand(struct yaffs_dev *dev); ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_packedtags1.c linux-3.4.90/fs/yaffs2/yaffs_packedtags1.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_packedtags1.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_packedtags1.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,56 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yaffs_packedtags1.h" ++#include "yportenv.h" ++ ++static const u8 all_ff[20] = { ++ 0xff, 0xff, 0xff, 0xff, ++ 0xff, 0xff, 0xff, 0xff, ++ 0xff, 0xff, 0xff, 0xff, ++ 0xff, 0xff, 0xff, 0xff, ++ 0xff, 0xff, 0xff, 0xff ++}; ++ ++void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt, ++ const struct yaffs_ext_tags *t) ++{ ++ pt->chunk_id = t->chunk_id; ++ pt->serial_number = t->serial_number; ++ pt->n_bytes = t->n_bytes; ++ pt->obj_id = t->obj_id; ++ pt->ecc = 0; ++ pt->deleted = (t->is_deleted) ? 0 : 1; ++ pt->unused_stuff = 0; ++ pt->should_be_ff = 0xffffffff; ++} ++ ++void yaffs_unpack_tags1(struct yaffs_ext_tags *t, ++ const struct yaffs_packed_tags1 *pt) ++{ ++ ++ if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) { ++ t->block_bad = 0; ++ if (pt->should_be_ff != 0xffffffff) ++ t->block_bad = 1; ++ t->chunk_used = 1; ++ t->obj_id = pt->obj_id; ++ t->chunk_id = pt->chunk_id; ++ t->n_bytes = pt->n_bytes; ++ t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR; ++ t->is_deleted = (pt->deleted) ? 0 : 1; ++ t->serial_number = pt->serial_number; ++ } else { ++ memset(t, 0, sizeof(struct yaffs_ext_tags)); ++ } ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_packedtags1.h linux-3.4.90/fs/yaffs2/yaffs_packedtags1.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_packedtags1.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_packedtags1.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,39 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */ ++ ++#ifndef __YAFFS_PACKEDTAGS1_H__ ++#define __YAFFS_PACKEDTAGS1_H__ ++ ++#include "yaffs_guts.h" ++ ++struct yaffs_packed_tags1 { ++ u32 chunk_id:20; ++ u32 serial_number:2; ++ u32 n_bytes:10; ++ u32 obj_id:18; ++ u32 ecc:12; ++ u32 deleted:1; ++ u32 unused_stuff:1; ++ unsigned should_be_ff; ++ ++}; ++ ++void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt, ++ const struct yaffs_ext_tags *t); ++void yaffs_unpack_tags1(struct yaffs_ext_tags *t, ++ const struct yaffs_packed_tags1 *pt); ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_packedtags2.c linux-3.4.90/fs/yaffs2/yaffs_packedtags2.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_packedtags2.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_packedtags2.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,197 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yaffs_packedtags2.h" ++#include "yportenv.h" ++#include "yaffs_trace.h" ++ ++/* This code packs a set of extended tags into a binary structure for ++ * NAND storage ++ */ ++ ++/* Some of the information is "extra" struff which can be packed in to ++ * speed scanning ++ * This is defined by having the EXTRA_HEADER_INFO_FLAG set. ++ */ ++ ++/* Extra flags applied to chunk_id */ ++ ++#define EXTRA_HEADER_INFO_FLAG 0x80000000 ++#define EXTRA_SHRINK_FLAG 0x40000000 ++#define EXTRA_SHADOWS_FLAG 0x20000000 ++#define EXTRA_SPARE_FLAGS 0x10000000 ++ ++#define ALL_EXTRA_FLAGS 0xf0000000 ++ ++/* Also, the top 4 bits of the object Id are set to the object type. */ ++#define EXTRA_OBJECT_TYPE_SHIFT (28) ++#define EXTRA_OBJECT_TYPE_MASK ((0x0f) << EXTRA_OBJECT_TYPE_SHIFT) ++ ++static void yaffs_dump_packed_tags2_tags_only( ++ const struct yaffs_packed_tags2_tags_only *ptt) ++{ ++ yaffs_trace(YAFFS_TRACE_MTD, ++ "packed tags obj %d chunk %d byte %d seq %d", ++ ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number); ++} ++ ++static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt) ++{ ++ yaffs_dump_packed_tags2_tags_only(&pt->t); ++} ++ ++static void yaffs_dump_tags2(const struct yaffs_ext_tags *t) ++{ ++ yaffs_trace(YAFFS_TRACE_MTD, ++ "ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d", ++ t->ecc_result, t->block_bad, t->chunk_used, t->obj_id, ++ t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number, ++ t->seq_number); ++ ++} ++ ++static int yaffs_check_tags_extra_packable(const struct yaffs_ext_tags *t) ++{ ++ if (t->chunk_id != 0 || !t->extra_available) ++ return 0; ++ ++ /* Check if the file size is too long to store */ ++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE && ++ (t->extra_file_size >> 31) != 0) ++ return 0; ++ return 1; ++} ++ ++void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt, ++ const struct yaffs_ext_tags *t) ++{ ++ ptt->chunk_id = t->chunk_id; ++ ptt->seq_number = t->seq_number; ++ ptt->n_bytes = t->n_bytes; ++ ptt->obj_id = t->obj_id; ++ ++ /* Only store extra tags for object headers. ++ * If it is a file then only store if the file size is short\ ++ * enough to fit. ++ */ ++ if (yaffs_check_tags_extra_packable(t)) { ++ /* Store the extra header info instead */ ++ /* We save the parent object in the chunk_id */ ++ ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id; ++ if (t->extra_is_shrink) ++ ptt->chunk_id |= EXTRA_SHRINK_FLAG; ++ if (t->extra_shadows) ++ ptt->chunk_id |= EXTRA_SHADOWS_FLAG; ++ ++ ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK; ++ ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT); ++ ++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK) ++ ptt->n_bytes = t->extra_equiv_id; ++ else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE) ++ ptt->n_bytes = (unsigned) t->extra_file_size; ++ else ++ ptt->n_bytes = 0; ++ } ++ ++ yaffs_dump_packed_tags2_tags_only(ptt); ++ yaffs_dump_tags2(t); ++} ++ ++void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt, ++ const struct yaffs_ext_tags *t, int tags_ecc) ++{ ++ yaffs_pack_tags2_tags_only(&pt->t, t); ++ ++ if (tags_ecc) ++ yaffs_ecc_calc_other((unsigned char *)&pt->t, ++ sizeof(struct yaffs_packed_tags2_tags_only), ++ &pt->ecc); ++} ++ ++void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t, ++ struct yaffs_packed_tags2_tags_only *ptt) ++{ ++ memset(t, 0, sizeof(struct yaffs_ext_tags)); ++ ++ if (ptt->seq_number == 0xffffffff) ++ return; ++ ++ t->block_bad = 0; ++ t->chunk_used = 1; ++ t->obj_id = ptt->obj_id; ++ t->chunk_id = ptt->chunk_id; ++ t->n_bytes = ptt->n_bytes; ++ t->is_deleted = 0; ++ t->serial_number = 0; ++ t->seq_number = ptt->seq_number; ++ ++ /* Do extra header info stuff */ ++ if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) { ++ t->chunk_id = 0; ++ t->n_bytes = 0; ++ ++ t->extra_available = 1; ++ t->extra_parent_id = ptt->chunk_id & (~(ALL_EXTRA_FLAGS)); ++ t->extra_is_shrink = ptt->chunk_id & EXTRA_SHRINK_FLAG ? 1 : 0; ++ t->extra_shadows = ptt->chunk_id & EXTRA_SHADOWS_FLAG ? 1 : 0; ++ t->extra_obj_type = ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT; ++ t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK; ++ ++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK) ++ t->extra_equiv_id = ptt->n_bytes; ++ else ++ t->extra_file_size = ptt->n_bytes; ++ } ++ yaffs_dump_packed_tags2_tags_only(ptt); ++ yaffs_dump_tags2(t); ++} ++ ++void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt, ++ int tags_ecc) ++{ ++ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR; ++ ++ if (pt->t.seq_number != 0xffffffff && tags_ecc) { ++ /* Chunk is in use and we need to do ECC */ ++ ++ struct yaffs_ecc_other ecc; ++ int result; ++ yaffs_ecc_calc_other((unsigned char *)&pt->t, ++ sizeof(struct yaffs_packed_tags2_tags_only), ++ &ecc); ++ result = ++ yaffs_ecc_correct_other((unsigned char *)&pt->t, ++ sizeof(struct yaffs_packed_tags2_tags_only), ++ &pt->ecc, &ecc); ++ switch (result) { ++ case 0: ++ ecc_result = YAFFS_ECC_RESULT_NO_ERROR; ++ break; ++ case 1: ++ ecc_result = YAFFS_ECC_RESULT_FIXED; ++ break; ++ case -1: ++ ecc_result = YAFFS_ECC_RESULT_UNFIXED; ++ break; ++ default: ++ ecc_result = YAFFS_ECC_RESULT_UNKNOWN; ++ } ++ } ++ yaffs_unpack_tags2_tags_only(t, &pt->t); ++ ++ t->ecc_result = ecc_result; ++ ++ yaffs_dump_packed_tags2(pt); ++ yaffs_dump_tags2(t); ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_packedtags2.h linux-3.4.90/fs/yaffs2/yaffs_packedtags2.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_packedtags2.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_packedtags2.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,47 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++/* This is used to pack YAFFS2 tags, not YAFFS1tags. */ ++ ++#ifndef __YAFFS_PACKEDTAGS2_H__ ++#define __YAFFS_PACKEDTAGS2_H__ ++ ++#include "yaffs_guts.h" ++#include "yaffs_ecc.h" ++ ++struct yaffs_packed_tags2_tags_only { ++ unsigned seq_number; ++ unsigned obj_id; ++ unsigned chunk_id; ++ unsigned n_bytes; ++}; ++ ++struct yaffs_packed_tags2 { ++ struct yaffs_packed_tags2_tags_only t; ++ struct yaffs_ecc_other ecc; ++}; ++ ++/* Full packed tags with ECC, used for oob tags */ ++void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt, ++ const struct yaffs_ext_tags *t, int tags_ecc); ++void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt, ++ int tags_ecc); ++ ++/* Only the tags part (no ECC for use with inband tags */ ++void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt, ++ const struct yaffs_ext_tags *t); ++void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t, ++ struct yaffs_packed_tags2_tags_only *pt); ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_summary.c linux-3.4.90/fs/yaffs2/yaffs_summary.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_summary.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_summary.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,312 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++/* Summaries write the useful part of the tags for the chunks in a block into an ++ * an array which is written to the last n chunks of the block. ++ * Reading the summaries gives all the tags for the block in one read. Much ++ * faster. ++ * ++ * Chunks holding summaries are marked with tags making it look like ++ * they are part of a fake file. ++ * ++ * The summary could also be used during gc. ++ * ++ */ ++ ++#include "yaffs_summary.h" ++#include "yaffs_packedtags2.h" ++#include "yaffs_nand.h" ++#include "yaffs_getblockinfo.h" ++#include "yaffs_bitmap.h" ++ ++/* ++ * The summary is built up in an array of summary tags. ++ * This gets written to the last one or two (maybe more) chunks in a block. ++ * A summary header is written as the first part of each chunk of summary data. ++ * The summary header must match or the summary is rejected. ++ */ ++ ++/* Summary tags don't need the sequence number because that is redundant. */ ++struct yaffs_summary_tags { ++ unsigned obj_id; ++ unsigned chunk_id; ++ unsigned n_bytes; ++}; ++ ++/* Summary header */ ++struct yaffs_summary_header { ++ unsigned version; /* Must match current version */ ++ unsigned block; /* Must be this block */ ++ unsigned seq; /* Must be this sequence number */ ++ unsigned sum; /* Just add up all the bytes in the tags */ ++}; ++ ++ ++static void yaffs_summary_clear(struct yaffs_dev *dev) ++{ ++ if (!dev->sum_tags) ++ return; ++ memset(dev->sum_tags, 0, dev->chunks_per_summary * ++ sizeof(struct yaffs_summary_tags)); ++} ++ ++ ++void yaffs_summary_deinit(struct yaffs_dev *dev) ++{ ++ kfree(dev->sum_tags); ++ dev->sum_tags = NULL; ++ kfree(dev->gc_sum_tags); ++ dev->gc_sum_tags = NULL; ++ dev->chunks_per_summary = 0; ++} ++ ++int yaffs_summary_init(struct yaffs_dev *dev) ++{ ++ int sum_bytes; ++ int chunks_used; /* Number of chunks used by summary */ ++ int sum_tags_bytes; ++ ++ sum_bytes = dev->param.chunks_per_block * ++ sizeof(struct yaffs_summary_tags); ++ ++ chunks_used = (sum_bytes + dev->data_bytes_per_chunk - 1)/ ++ (dev->data_bytes_per_chunk - ++ sizeof(struct yaffs_summary_header)); ++ ++ dev->chunks_per_summary = dev->param.chunks_per_block - chunks_used; ++ sum_tags_bytes = sizeof(struct yaffs_summary_tags) * ++ dev->chunks_per_summary; ++ dev->sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS); ++ dev->gc_sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS); ++ if (!dev->sum_tags || !dev->gc_sum_tags) { ++ yaffs_summary_deinit(dev); ++ return YAFFS_FAIL; ++ } ++ ++ yaffs_summary_clear(dev); ++ ++ return YAFFS_OK; ++} ++ ++static unsigned yaffs_summary_sum(struct yaffs_dev *dev) ++{ ++ u8 *sum_buffer = (u8 *)dev->sum_tags; ++ int i; ++ unsigned sum = 0; ++ ++ i = sizeof(struct yaffs_summary_tags) * ++ dev->chunks_per_summary; ++ while (i > 0) { ++ sum += *sum_buffer; ++ sum_buffer++; ++ i--; ++ } ++ ++ return sum; ++} ++ ++static int yaffs_summary_write(struct yaffs_dev *dev, int blk) ++{ ++ struct yaffs_ext_tags tags; ++ u8 *buffer; ++ u8 *sum_buffer = (u8 *)dev->sum_tags; ++ int n_bytes; ++ int chunk_in_nand; ++ int chunk_in_block; ++ int result; ++ int this_tx; ++ struct yaffs_summary_header hdr; ++ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr); ++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk); ++ ++ buffer = yaffs_get_temp_buffer(dev); ++ n_bytes = sizeof(struct yaffs_summary_tags) * ++ dev->chunks_per_summary; ++ memset(&tags, 0, sizeof(struct yaffs_ext_tags)); ++ tags.obj_id = YAFFS_OBJECTID_SUMMARY; ++ tags.chunk_id = 1; ++ chunk_in_block = dev->chunks_per_summary; ++ chunk_in_nand = dev->alloc_block * dev->param.chunks_per_block + ++ dev->chunks_per_summary; ++ hdr.version = YAFFS_SUMMARY_VERSION; ++ hdr.block = blk; ++ hdr.seq = bi->seq_number; ++ hdr.sum = yaffs_summary_sum(dev); ++ ++ do { ++ this_tx = n_bytes; ++ if (this_tx > sum_bytes_per_chunk) ++ this_tx = sum_bytes_per_chunk; ++ memcpy(buffer, &hdr, sizeof(hdr)); ++ memcpy(buffer + sizeof(hdr), sum_buffer, this_tx); ++ tags.n_bytes = this_tx + sizeof(hdr); ++ result = yaffs_wr_chunk_tags_nand(dev, chunk_in_nand, ++ buffer, &tags); ++ ++ if (result != YAFFS_OK) ++ break; ++ yaffs_set_chunk_bit(dev, blk, chunk_in_block); ++ bi->pages_in_use++; ++ dev->n_free_chunks--; ++ ++ n_bytes -= this_tx; ++ sum_buffer += this_tx; ++ chunk_in_nand++; ++ chunk_in_block++; ++ tags.chunk_id++; ++ } while (result == YAFFS_OK && n_bytes > 0); ++ yaffs_release_temp_buffer(dev, buffer); ++ ++ ++ if (result == YAFFS_OK) ++ bi->has_summary = 1; ++ ++ ++ return result; ++} ++ ++int yaffs_summary_read(struct yaffs_dev *dev, ++ struct yaffs_summary_tags *st, ++ int blk) ++{ ++ struct yaffs_ext_tags tags; ++ u8 *buffer; ++ u8 *sum_buffer = (u8 *)st; ++ int n_bytes; ++ int chunk_id; ++ int chunk_in_nand; ++ int chunk_in_block; ++ int result; ++ int this_tx; ++ struct yaffs_summary_header hdr; ++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk); ++ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr); ++ int sum_tags_bytes; ++ ++ sum_tags_bytes = sizeof(struct yaffs_summary_tags) * ++ dev->chunks_per_summary; ++ buffer = yaffs_get_temp_buffer(dev); ++ n_bytes = sizeof(struct yaffs_summary_tags) * dev->chunks_per_summary; ++ chunk_in_block = dev->chunks_per_summary; ++ chunk_in_nand = blk * dev->param.chunks_per_block + ++ dev->chunks_per_summary; ++ chunk_id = 1; ++ do { ++ this_tx = n_bytes; ++ if (this_tx > sum_bytes_per_chunk) ++ this_tx = sum_bytes_per_chunk; ++ result = yaffs_rd_chunk_tags_nand(dev, chunk_in_nand, ++ buffer, &tags); ++ ++ if (tags.chunk_id != chunk_id || ++ tags.obj_id != YAFFS_OBJECTID_SUMMARY || ++ tags.chunk_used == 0 || ++ tags.ecc_result > YAFFS_ECC_RESULT_FIXED || ++ tags.n_bytes != (this_tx + sizeof(hdr))) ++ result = YAFFS_FAIL; ++ if (result != YAFFS_OK) ++ break; ++ ++ if (st == dev->sum_tags) { ++ /* If we're scanning then update the block info */ ++ yaffs_set_chunk_bit(dev, blk, chunk_in_block); ++ bi->pages_in_use++; ++ } ++ memcpy(&hdr, buffer, sizeof(hdr)); ++ memcpy(sum_buffer, buffer + sizeof(hdr), this_tx); ++ n_bytes -= this_tx; ++ sum_buffer += this_tx; ++ chunk_in_nand++; ++ chunk_in_block++; ++ chunk_id++; ++ } while (result == YAFFS_OK && n_bytes > 0); ++ yaffs_release_temp_buffer(dev, buffer); ++ ++ if (result == YAFFS_OK) { ++ /* Verify header */ ++ if (hdr.version != YAFFS_SUMMARY_VERSION || ++ hdr.seq != bi->seq_number || ++ hdr.sum != yaffs_summary_sum(dev)) ++ result = YAFFS_FAIL; ++ } ++ ++ if (st == dev->sum_tags && result == YAFFS_OK) ++ bi->has_summary = 1; ++ ++ return result; ++} ++ ++int yaffs_summary_add(struct yaffs_dev *dev, ++ struct yaffs_ext_tags *tags, ++ int chunk_in_nand) ++{ ++ struct yaffs_packed_tags2_tags_only tags_only; ++ struct yaffs_summary_tags *sum_tags; ++ int block_in_nand = chunk_in_nand / dev->param.chunks_per_block; ++ int chunk_in_block = chunk_in_nand % dev->param.chunks_per_block; ++ ++ if (!dev->sum_tags) ++ return YAFFS_OK; ++ ++ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) { ++ yaffs_pack_tags2_tags_only(&tags_only, tags); ++ sum_tags = &dev->sum_tags[chunk_in_block]; ++ sum_tags->chunk_id = tags_only.chunk_id; ++ sum_tags->n_bytes = tags_only.n_bytes; ++ sum_tags->obj_id = tags_only.obj_id; ++ ++ if (chunk_in_block == dev->chunks_per_summary - 1) { ++ /* Time to write out the summary */ ++ yaffs_summary_write(dev, block_in_nand); ++ yaffs_summary_clear(dev); ++ yaffs_skip_rest_of_block(dev); ++ } ++ } ++ return YAFFS_OK; ++} ++ ++int yaffs_summary_fetch(struct yaffs_dev *dev, ++ struct yaffs_ext_tags *tags, ++ int chunk_in_block) ++{ ++ struct yaffs_packed_tags2_tags_only tags_only; ++ struct yaffs_summary_tags *sum_tags; ++ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) { ++ sum_tags = &dev->sum_tags[chunk_in_block]; ++ tags_only.chunk_id = sum_tags->chunk_id; ++ tags_only.n_bytes = sum_tags->n_bytes; ++ tags_only.obj_id = sum_tags->obj_id; ++ yaffs_unpack_tags2_tags_only(tags, &tags_only); ++ return YAFFS_OK; ++ } ++ return YAFFS_FAIL; ++} ++ ++void yaffs_summary_gc(struct yaffs_dev *dev, int blk) ++{ ++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk); ++ int i; ++ ++ if (!bi->has_summary) ++ return; ++ ++ for (i = dev->chunks_per_summary; ++ i < dev->param.chunks_per_block; ++ i++) { ++ if (yaffs_check_chunk_bit(dev, blk, i)) { ++ yaffs_clear_chunk_bit(dev, blk, i); ++ bi->pages_in_use--; ++ dev->n_free_chunks++; ++ } ++ } ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_summary.h linux-3.4.90/fs/yaffs2/yaffs_summary.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_summary.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_summary.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,37 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_SUMMARY_H__ ++#define __YAFFS_SUMMARY_H__ ++ ++#include "yaffs_packedtags2.h" ++ ++ ++int yaffs_summary_init(struct yaffs_dev *dev); ++void yaffs_summary_deinit(struct yaffs_dev *dev); ++ ++int yaffs_summary_add(struct yaffs_dev *dev, ++ struct yaffs_ext_tags *tags, ++ int chunk_in_block); ++int yaffs_summary_fetch(struct yaffs_dev *dev, ++ struct yaffs_ext_tags *tags, ++ int chunk_in_block); ++int yaffs_summary_read(struct yaffs_dev *dev, ++ struct yaffs_summary_tags *st, ++ int blk); ++void yaffs_summary_gc(struct yaffs_dev *dev, int blk); ++ ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_tagscompat.c linux-3.4.90/fs/yaffs2/yaffs_tagscompat.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_tagscompat.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_tagscompat.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,381 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yaffs_guts.h" ++#include "yaffs_tagscompat.h" ++#include "yaffs_ecc.h" ++#include "yaffs_getblockinfo.h" ++#include "yaffs_trace.h" ++ ++static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk); ++ ++ ++/********** Tags ECC calculations *********/ ++ ++ ++void yaffs_calc_tags_ecc(struct yaffs_tags *tags) ++{ ++ /* Calculate an ecc */ ++ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes; ++ unsigned i, j; ++ unsigned ecc = 0; ++ unsigned bit = 0; ++ ++ tags->ecc = 0; ++ ++ for (i = 0; i < 8; i++) { ++ for (j = 1; j & 0xff; j <<= 1) { ++ bit++; ++ if (b[i] & j) ++ ecc ^= bit; ++ } ++ } ++ tags->ecc = ecc; ++} ++ ++int yaffs_check_tags_ecc(struct yaffs_tags *tags) ++{ ++ unsigned ecc = tags->ecc; ++ ++ yaffs_calc_tags_ecc(tags); ++ ++ ecc ^= tags->ecc; ++ ++ if (ecc && ecc <= 64) { ++ /* TODO: Handle the failure better. Retire? */ ++ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes; ++ ++ ecc--; ++ ++ b[ecc / 8] ^= (1 << (ecc & 7)); ++ ++ /* Now recvalc the ecc */ ++ yaffs_calc_tags_ecc(tags); ++ ++ return 1; /* recovered error */ ++ } else if (ecc) { ++ /* Wierd ecc failure value */ ++ /* TODO Need to do somethiong here */ ++ return -1; /* unrecovered error */ ++ } ++ return 0; ++} ++ ++/********** Tags **********/ ++ ++static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr, ++ struct yaffs_tags *tags_ptr) ++{ ++ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr; ++ ++ yaffs_calc_tags_ecc(tags_ptr); ++ ++ spare_ptr->tb0 = tu->as_bytes[0]; ++ spare_ptr->tb1 = tu->as_bytes[1]; ++ spare_ptr->tb2 = tu->as_bytes[2]; ++ spare_ptr->tb3 = tu->as_bytes[3]; ++ spare_ptr->tb4 = tu->as_bytes[4]; ++ spare_ptr->tb5 = tu->as_bytes[5]; ++ spare_ptr->tb6 = tu->as_bytes[6]; ++ spare_ptr->tb7 = tu->as_bytes[7]; ++} ++ ++static void yaffs_get_tags_from_spare(struct yaffs_dev *dev, ++ struct yaffs_spare *spare_ptr, ++ struct yaffs_tags *tags_ptr) ++{ ++ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr; ++ int result; ++ ++ tu->as_bytes[0] = spare_ptr->tb0; ++ tu->as_bytes[1] = spare_ptr->tb1; ++ tu->as_bytes[2] = spare_ptr->tb2; ++ tu->as_bytes[3] = spare_ptr->tb3; ++ tu->as_bytes[4] = spare_ptr->tb4; ++ tu->as_bytes[5] = spare_ptr->tb5; ++ tu->as_bytes[6] = spare_ptr->tb6; ++ tu->as_bytes[7] = spare_ptr->tb7; ++ ++ result = yaffs_check_tags_ecc(tags_ptr); ++ if (result > 0) ++ dev->n_tags_ecc_fixed++; ++ else if (result < 0) ++ dev->n_tags_ecc_unfixed++; ++} ++ ++static void yaffs_spare_init(struct yaffs_spare *spare) ++{ ++ memset(spare, 0xff, sizeof(struct yaffs_spare)); ++} ++ ++static int yaffs_wr_nand(struct yaffs_dev *dev, ++ int nand_chunk, const u8 *data, ++ struct yaffs_spare *spare) ++{ ++ int data_size = dev->data_bytes_per_chunk; ++ ++ return dev->drv.drv_write_chunk_fn(dev, nand_chunk, ++ data, data_size, ++ (u8 *) spare, sizeof(*spare)); ++} ++ ++static int yaffs_rd_chunk_nand(struct yaffs_dev *dev, ++ int nand_chunk, ++ u8 *data, ++ struct yaffs_spare *spare, ++ enum yaffs_ecc_result *ecc_result, ++ int correct_errors) ++{ ++ int ret_val; ++ struct yaffs_spare local_spare; ++ int data_size; ++ int spare_size; ++ int ecc_result1, ecc_result2; ++ u8 calc_ecc[3]; ++ ++ if (!spare) { ++ /* If we don't have a real spare, then we use a local one. */ ++ /* Need this for the calculation of the ecc */ ++ spare = &local_spare; ++ } ++ data_size = dev->data_bytes_per_chunk; ++ spare_size = sizeof(struct yaffs_spare); ++ ++ if (dev->param.use_nand_ecc) ++ return dev->drv.drv_read_chunk_fn(dev, nand_chunk, ++ data, data_size, ++ (u8 *) spare, spare_size, ++ ecc_result); ++ ++ ++ /* Handle the ECC at this level. */ ++ ++ ret_val = dev->drv.drv_read_chunk_fn(dev, nand_chunk, ++ data, data_size, ++ (u8 *)spare, spare_size, ++ NULL); ++ if (!data || !correct_errors) ++ return ret_val; ++ ++ /* Do ECC correction if needed. */ ++ yaffs_ecc_calc(data, calc_ecc); ++ ecc_result1 = yaffs_ecc_correct(data, spare->ecc1, calc_ecc); ++ yaffs_ecc_calc(&data[256], calc_ecc); ++ ecc_result2 = yaffs_ecc_correct(&data[256], spare->ecc2, calc_ecc); ++ ++ if (ecc_result1 > 0) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "**>>yaffs ecc error fix performed on chunk %d:0", ++ nand_chunk); ++ dev->n_ecc_fixed++; ++ } else if (ecc_result1 < 0) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "**>>yaffs ecc error unfixed on chunk %d:0", ++ nand_chunk); ++ dev->n_ecc_unfixed++; ++ } ++ ++ if (ecc_result2 > 0) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "**>>yaffs ecc error fix performed on chunk %d:1", ++ nand_chunk); ++ dev->n_ecc_fixed++; ++ } else if (ecc_result2 < 0) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "**>>yaffs ecc error unfixed on chunk %d:1", ++ nand_chunk); ++ dev->n_ecc_unfixed++; ++ } ++ ++ if (ecc_result1 || ecc_result2) { ++ /* We had a data problem on this page */ ++ yaffs_handle_rd_data_error(dev, nand_chunk); ++ } ++ ++ if (ecc_result1 < 0 || ecc_result2 < 0) ++ *ecc_result = YAFFS_ECC_RESULT_UNFIXED; ++ else if (ecc_result1 > 0 || ecc_result2 > 0) ++ *ecc_result = YAFFS_ECC_RESULT_FIXED; ++ else ++ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR; ++ ++ return ret_val; ++} ++ ++/* ++ * Functions for robustisizing ++ */ ++ ++static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk) ++{ ++ int flash_block = nand_chunk / dev->param.chunks_per_block; ++ ++ /* Mark the block for retirement */ ++ yaffs_get_block_info(dev, flash_block + dev->block_offset)-> ++ needs_retiring = 1; ++ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, ++ "**>>Block %d marked for retirement", ++ flash_block); ++ ++ /* TODO: ++ * Just do a garbage collection on the affected block ++ * then retire the block ++ * NB recursion ++ */ ++} ++ ++static int yaffs_tags_compat_wr(struct yaffs_dev *dev, ++ int nand_chunk, ++ const u8 *data, const struct yaffs_ext_tags *ext_tags) ++{ ++ struct yaffs_spare spare; ++ struct yaffs_tags tags; ++ ++ yaffs_spare_init(&spare); ++ ++ if (ext_tags->is_deleted) ++ spare.page_status = 0; ++ else { ++ tags.obj_id = ext_tags->obj_id; ++ tags.chunk_id = ext_tags->chunk_id; ++ ++ tags.n_bytes_lsb = ext_tags->n_bytes & (1024 - 1); ++ ++ if (dev->data_bytes_per_chunk >= 1024) ++ tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3; ++ else ++ tags.n_bytes_msb = 3; ++ ++ tags.serial_number = ext_tags->serial_number; ++ ++ if (!dev->param.use_nand_ecc && data) { ++ yaffs_ecc_calc(data, spare.ecc1); ++ yaffs_ecc_calc(&data[256], spare.ecc2); ++ } ++ ++ yaffs_load_tags_to_spare(&spare, &tags); ++ } ++ return yaffs_wr_nand(dev, nand_chunk, data, &spare); ++} ++ ++static int yaffs_tags_compat_rd(struct yaffs_dev *dev, ++ int nand_chunk, ++ u8 *data, struct yaffs_ext_tags *ext_tags) ++{ ++ struct yaffs_spare spare; ++ struct yaffs_tags tags; ++ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN; ++ static struct yaffs_spare spare_ff; ++ static int init; ++ int deleted; ++ ++ if (!init) { ++ memset(&spare_ff, 0xff, sizeof(spare_ff)); ++ init = 1; ++ } ++ ++ if (!yaffs_rd_chunk_nand(dev, nand_chunk, ++ data, &spare, &ecc_result, 1)) ++ return YAFFS_FAIL; ++ ++ /* ext_tags may be NULL */ ++ if (!ext_tags) ++ return YAFFS_OK; ++ ++ deleted = (hweight8(spare.page_status) < 7) ? 1 : 0; ++ ++ ext_tags->is_deleted = deleted; ++ ext_tags->ecc_result = ecc_result; ++ ext_tags->block_bad = 0; /* We're reading it */ ++ /* therefore it is not a bad block */ ++ ext_tags->chunk_used = ++ memcmp(&spare_ff, &spare, sizeof(spare_ff)) ? 1 : 0; ++ ++ if (ext_tags->chunk_used) { ++ yaffs_get_tags_from_spare(dev, &spare, &tags); ++ ext_tags->obj_id = tags.obj_id; ++ ext_tags->chunk_id = tags.chunk_id; ++ ext_tags->n_bytes = tags.n_bytes_lsb; ++ ++ if (dev->data_bytes_per_chunk >= 1024) ++ ext_tags->n_bytes |= ++ (((unsigned)tags.n_bytes_msb) << 10); ++ ++ ext_tags->serial_number = tags.serial_number; ++ } ++ ++ return YAFFS_OK; ++} ++ ++static int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block) ++{ ++ struct yaffs_spare spare; ++ ++ memset(&spare, 0xff, sizeof(struct yaffs_spare)); ++ ++ spare.block_status = 'Y'; ++ ++ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL, ++ &spare); ++ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1, ++ NULL, &spare); ++ ++ return YAFFS_OK; ++} ++ ++static int yaffs_tags_compat_query_block(struct yaffs_dev *dev, ++ int block_no, ++ enum yaffs_block_state *state, ++ u32 *seq_number) ++{ ++ struct yaffs_spare spare0, spare1; ++ static struct yaffs_spare spare_ff; ++ static int init; ++ enum yaffs_ecc_result dummy; ++ ++ if (!init) { ++ memset(&spare_ff, 0xff, sizeof(spare_ff)); ++ init = 1; ++ } ++ ++ *seq_number = 0; ++ ++ /* Look for bad block markers in the first two chunks */ ++ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block, ++ NULL, &spare0, &dummy, 0); ++ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1, ++ NULL, &spare1, &dummy, 0); ++ ++ if (hweight8(spare0.block_status & spare1.block_status) < 7) ++ *state = YAFFS_BLOCK_STATE_DEAD; ++ else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0) ++ *state = YAFFS_BLOCK_STATE_EMPTY; ++ else ++ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN; ++ ++ return YAFFS_OK; ++} ++ ++void yaffs_tags_compat_install(struct yaffs_dev *dev) ++{ ++ if(dev->param.is_yaffs2) ++ return; ++ if(!dev->tagger.write_chunk_tags_fn) ++ dev->tagger.write_chunk_tags_fn = yaffs_tags_compat_wr; ++ if(!dev->tagger.read_chunk_tags_fn) ++ dev->tagger.read_chunk_tags_fn = yaffs_tags_compat_rd; ++ if(!dev->tagger.query_block_fn) ++ dev->tagger.query_block_fn = yaffs_tags_compat_query_block; ++ if(!dev->tagger.mark_bad_fn) ++ dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad; ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_tagscompat.h linux-3.4.90/fs/yaffs2/yaffs_tagscompat.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_tagscompat.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_tagscompat.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,44 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_TAGSCOMPAT_H__ ++#define __YAFFS_TAGSCOMPAT_H__ ++ ++ ++#include "yaffs_guts.h" ++ ++#if 0 ++ ++ ++int yaffs_tags_compat_wr(struct yaffs_dev *dev, ++ int nand_chunk, ++ const u8 *data, const struct yaffs_ext_tags *tags); ++int yaffs_tags_compat_rd(struct yaffs_dev *dev, ++ int nand_chunk, ++ u8 *data, struct yaffs_ext_tags *tags); ++int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no); ++int yaffs_tags_compat_query_block(struct yaffs_dev *dev, ++ int block_no, ++ enum yaffs_block_state *state, ++ u32 *seq_number); ++ ++#endif ++ ++ ++void yaffs_tags_compat_install(struct yaffs_dev *dev); ++void yaffs_calc_tags_ecc(struct yaffs_tags *tags); ++int yaffs_check_tags_ecc(struct yaffs_tags *tags); ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_tagsmarshall.c linux-3.4.90/fs/yaffs2/yaffs_tagsmarshall.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_tagsmarshall.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_tagsmarshall.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,199 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yaffs_guts.h" ++#include "yaffs_trace.h" ++#include "yaffs_packedtags2.h" ++ ++static int yaffs_tags_marshall_write(struct yaffs_dev *dev, ++ int nand_chunk, const u8 *data, ++ const struct yaffs_ext_tags *tags) ++{ ++ struct yaffs_packed_tags2 pt; ++ int retval; ++ ++ int packed_tags_size = ++ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt); ++ void *packed_tags_ptr = ++ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt; ++ ++ yaffs_trace(YAFFS_TRACE_MTD, ++ "yaffs_tags_marshall_write chunk %d data %p tags %p", ++ nand_chunk, data, tags); ++ ++ /* For yaffs2 writing there must be both data and tags. ++ * If we're using inband tags, then the tags are stuffed into ++ * the end of the data buffer. ++ */ ++ if (!data || !tags) ++ BUG(); ++ else if (dev->param.inband_tags) { ++ struct yaffs_packed_tags2_tags_only *pt2tp; ++ pt2tp = ++ (struct yaffs_packed_tags2_tags_only *)(data + ++ dev-> ++ data_bytes_per_chunk); ++ yaffs_pack_tags2_tags_only(pt2tp, tags); ++ } else { ++ yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc); ++ } ++ ++ retval = dev->drv.drv_write_chunk_fn(dev, nand_chunk, ++ data, dev->param.total_bytes_per_chunk, ++ (dev->param.inband_tags) ? NULL : packed_tags_ptr, ++ (dev->param.inband_tags) ? 0 : packed_tags_size); ++ ++ return retval; ++} ++ ++static int yaffs_tags_marshall_read(struct yaffs_dev *dev, ++ int nand_chunk, u8 *data, ++ struct yaffs_ext_tags *tags) ++{ ++ int retval = 0; ++ int local_data = 0; ++ u8 spare_buffer[100]; ++ enum yaffs_ecc_result ecc_result; ++ ++ struct yaffs_packed_tags2 pt; ++ ++ int packed_tags_size = ++ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt); ++ void *packed_tags_ptr = ++ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt; ++ ++ yaffs_trace(YAFFS_TRACE_MTD, ++ "yaffs_tags_marshall_read chunk %d data %p tags %p", ++ nand_chunk, data, tags); ++ ++ if (dev->param.inband_tags) { ++ if (!data) { ++ local_data = 1; ++ data = yaffs_get_temp_buffer(dev); ++ } ++ } ++ ++ if (dev->param.inband_tags || (data && !tags)) ++ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk, ++ data, dev->param.total_bytes_per_chunk, ++ NULL, 0, ++ &ecc_result); ++ else if (tags) ++ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk, ++ data, dev->param.total_bytes_per_chunk, ++ spare_buffer, packed_tags_size, ++ &ecc_result); ++ else ++ BUG(); ++ ++ ++ if (dev->param.inband_tags) { ++ if (tags) { ++ struct yaffs_packed_tags2_tags_only *pt2tp; ++ pt2tp = ++ (struct yaffs_packed_tags2_tags_only *) ++ &data[dev->data_bytes_per_chunk]; ++ yaffs_unpack_tags2_tags_only(tags, pt2tp); ++ } ++ } else if (tags) { ++ memcpy(packed_tags_ptr, spare_buffer, packed_tags_size); ++ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc); ++ } ++ ++ if (local_data) ++ yaffs_release_temp_buffer(dev, data); ++ ++ if (tags && ecc_result == YAFFS_ECC_RESULT_UNFIXED) { ++ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED; ++ dev->n_ecc_unfixed++; ++ } ++ ++ if (tags && ecc_result == -YAFFS_ECC_RESULT_FIXED) { ++ if (tags->ecc_result <= YAFFS_ECC_RESULT_NO_ERROR) ++ tags->ecc_result = YAFFS_ECC_RESULT_FIXED; ++ dev->n_ecc_fixed++; ++ } ++ ++ if (ecc_result < YAFFS_ECC_RESULT_UNFIXED) ++ return YAFFS_OK; ++ else ++ return YAFFS_FAIL; ++} ++ ++static int yaffs_tags_marshall_query_block(struct yaffs_dev *dev, int block_no, ++ enum yaffs_block_state *state, ++ u32 *seq_number) ++{ ++ int retval; ++ ++ yaffs_trace(YAFFS_TRACE_MTD, "yaffs_tags_marshall_query_block %d", ++ block_no); ++ ++ retval = dev->drv.drv_check_bad_fn(dev, block_no); ++ ++ if (retval== YAFFS_FAIL) { ++ yaffs_trace(YAFFS_TRACE_MTD, "block is bad"); ++ ++ *state = YAFFS_BLOCK_STATE_DEAD; ++ *seq_number = 0; ++ } else { ++ struct yaffs_ext_tags t; ++ ++ yaffs_tags_marshall_read(dev, ++ block_no * dev->param.chunks_per_block, ++ NULL, &t); ++ ++ if (t.chunk_used) { ++ *seq_number = t.seq_number; ++ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN; ++ } else { ++ *seq_number = 0; ++ *state = YAFFS_BLOCK_STATE_EMPTY; ++ } ++ } ++ ++ yaffs_trace(YAFFS_TRACE_MTD, ++ "block query returns seq %d state %d", ++ *seq_number, *state); ++ ++ if (retval == 0) ++ return YAFFS_OK; ++ else ++ return YAFFS_FAIL; ++} ++ ++static int yaffs_tags_marshall_mark_bad(struct yaffs_dev *dev, int block_no) ++{ ++ return dev->drv.drv_mark_bad_fn(dev, block_no); ++ ++} ++ ++ ++void yaffs_tags_marshall_install(struct yaffs_dev *dev) ++{ ++ if (!dev->param.is_yaffs2) ++ return; ++ ++ if (!dev->tagger.write_chunk_tags_fn) ++ dev->tagger.write_chunk_tags_fn = yaffs_tags_marshall_write; ++ ++ if (!dev->tagger.read_chunk_tags_fn) ++ dev->tagger.read_chunk_tags_fn = yaffs_tags_marshall_read; ++ ++ if (!dev->tagger.query_block_fn) ++ dev->tagger.query_block_fn = yaffs_tags_marshall_query_block; ++ ++ if (!dev->tagger.mark_bad_fn) ++ dev->tagger.mark_bad_fn = yaffs_tags_marshall_mark_bad; ++ ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_tagsmarshall.h linux-3.4.90/fs/yaffs2/yaffs_tagsmarshall.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_tagsmarshall.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_tagsmarshall.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,22 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_TAGSMARSHALL_H__ ++#define __YAFFS_TAGSMARSHALL_H__ ++ ++#include "yaffs_guts.h" ++void yaffs_tags_marshall_install(struct yaffs_dev *dev); ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_trace.h linux-3.4.90/fs/yaffs2/yaffs_trace.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_trace.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_trace.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,57 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YTRACE_H__ ++#define __YTRACE_H__ ++ ++extern unsigned int yaffs_trace_mask; ++extern unsigned int yaffs_wr_attempts; ++ ++/* ++ * Tracing flags. ++ * The flags masked in YAFFS_TRACE_ALWAYS are always traced. ++ */ ++ ++#define YAFFS_TRACE_OS 0x00000002 ++#define YAFFS_TRACE_ALLOCATE 0x00000004 ++#define YAFFS_TRACE_SCAN 0x00000008 ++#define YAFFS_TRACE_BAD_BLOCKS 0x00000010 ++#define YAFFS_TRACE_ERASE 0x00000020 ++#define YAFFS_TRACE_GC 0x00000040 ++#define YAFFS_TRACE_WRITE 0x00000080 ++#define YAFFS_TRACE_TRACING 0x00000100 ++#define YAFFS_TRACE_DELETION 0x00000200 ++#define YAFFS_TRACE_BUFFERS 0x00000400 ++#define YAFFS_TRACE_NANDACCESS 0x00000800 ++#define YAFFS_TRACE_GC_DETAIL 0x00001000 ++#define YAFFS_TRACE_SCAN_DEBUG 0x00002000 ++#define YAFFS_TRACE_MTD 0x00004000 ++#define YAFFS_TRACE_CHECKPOINT 0x00008000 ++ ++#define YAFFS_TRACE_VERIFY 0x00010000 ++#define YAFFS_TRACE_VERIFY_NAND 0x00020000 ++#define YAFFS_TRACE_VERIFY_FULL 0x00040000 ++#define YAFFS_TRACE_VERIFY_ALL 0x000f0000 ++ ++#define YAFFS_TRACE_SYNC 0x00100000 ++#define YAFFS_TRACE_BACKGROUND 0x00200000 ++#define YAFFS_TRACE_LOCK 0x00400000 ++#define YAFFS_TRACE_MOUNT 0x00800000 ++ ++#define YAFFS_TRACE_ERROR 0x40000000 ++#define YAFFS_TRACE_BUG 0x80000000 ++#define YAFFS_TRACE_ALWAYS 0xf0000000 ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_verify.c linux-3.4.90/fs/yaffs2/yaffs_verify.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_verify.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_verify.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,529 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yaffs_verify.h" ++#include "yaffs_trace.h" ++#include "yaffs_bitmap.h" ++#include "yaffs_getblockinfo.h" ++#include "yaffs_nand.h" ++ ++int yaffs_skip_verification(struct yaffs_dev *dev) ++{ ++ (void) dev; ++ return !(yaffs_trace_mask & ++ (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL)); ++} ++ ++static int yaffs_skip_full_verification(struct yaffs_dev *dev) ++{ ++ (void) dev; ++ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL)); ++} ++ ++static int yaffs_skip_nand_verification(struct yaffs_dev *dev) ++{ ++ (void) dev; ++ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND)); ++} ++ ++static const char * const block_state_name[] = { ++ "Unknown", ++ "Needs scan", ++ "Scanning", ++ "Empty", ++ "Allocating", ++ "Full", ++ "Dirty", ++ "Checkpoint", ++ "Collecting", ++ "Dead" ++}; ++ ++void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n) ++{ ++ int actually_used; ++ int in_use; ++ ++ if (yaffs_skip_verification(dev)) ++ return; ++ ++ /* Report illegal runtime states */ ++ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Block %d has undefined state %d", ++ n, bi->block_state); ++ ++ switch (bi->block_state) { ++ case YAFFS_BLOCK_STATE_UNKNOWN: ++ case YAFFS_BLOCK_STATE_SCANNING: ++ case YAFFS_BLOCK_STATE_NEEDS_SCAN: ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Block %d has bad run-state %s", ++ n, block_state_name[bi->block_state]); ++ } ++ ++ /* Check pages in use and soft deletions are legal */ ++ ++ actually_used = bi->pages_in_use - bi->soft_del_pages; ++ ++ if (bi->pages_in_use < 0 || ++ bi->pages_in_use > dev->param.chunks_per_block || ++ bi->soft_del_pages < 0 || ++ bi->soft_del_pages > dev->param.chunks_per_block || ++ actually_used < 0 || actually_used > dev->param.chunks_per_block) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Block %d has illegal values pages_in_used %d soft_del_pages %d", ++ n, bi->pages_in_use, bi->soft_del_pages); ++ ++ /* Check chunk bitmap legal */ ++ in_use = yaffs_count_chunk_bits(dev, n); ++ if (in_use != bi->pages_in_use) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Block %d has inconsistent values pages_in_use %d counted chunk bits %d", ++ n, bi->pages_in_use, in_use); ++} ++ ++void yaffs_verify_collected_blk(struct yaffs_dev *dev, ++ struct yaffs_block_info *bi, int n) ++{ ++ yaffs_verify_blk(dev, bi, n); ++ ++ /* After collection the block should be in the erased state */ ++ ++ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING && ++ bi->block_state != YAFFS_BLOCK_STATE_EMPTY) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "Block %d is in state %d after gc, should be erased", ++ n, bi->block_state); ++ } ++} ++ ++void yaffs_verify_blocks(struct yaffs_dev *dev) ++{ ++ int i; ++ int state_count[YAFFS_NUMBER_OF_BLOCK_STATES]; ++ int illegal_states = 0; ++ ++ if (yaffs_skip_verification(dev)) ++ return; ++ ++ memset(state_count, 0, sizeof(state_count)); ++ ++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) { ++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i); ++ yaffs_verify_blk(dev, bi, i); ++ ++ if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES) ++ state_count[bi->block_state]++; ++ else ++ illegal_states++; ++ } ++ ++ yaffs_trace(YAFFS_TRACE_VERIFY, "Block summary"); ++ ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "%d blocks have illegal states", ++ illegal_states); ++ if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Too many allocating blocks"); ++ ++ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "%s %d blocks", ++ block_state_name[i], state_count[i]); ++ ++ if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT]) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Checkpoint block count wrong dev %d count %d", ++ dev->blocks_in_checkpt, ++ state_count[YAFFS_BLOCK_STATE_CHECKPOINT]); ++ ++ if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY]) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Erased block count wrong dev %d count %d", ++ dev->n_erased_blocks, ++ state_count[YAFFS_BLOCK_STATE_EMPTY]); ++ ++ if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Too many collecting blocks %d (max is 1)", ++ state_count[YAFFS_BLOCK_STATE_COLLECTING]); ++} ++ ++/* ++ * Verify the object header. oh must be valid, but obj and tags may be NULL in ++ * which case those tests will not be performed. ++ */ ++void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh, ++ struct yaffs_ext_tags *tags, int parent_check) ++{ ++ if (obj && yaffs_skip_verification(obj->my_dev)) ++ return; ++ ++ if (!(tags && obj && oh)) { ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Verifying object header tags %p obj %p oh %p", ++ tags, obj, oh); ++ return; ++ } ++ ++ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN || ++ oh->type > YAFFS_OBJECT_TYPE_MAX) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Obj %d header type is illegal value 0x%x", ++ tags->obj_id, oh->type); ++ ++ if (tags->obj_id != obj->obj_id) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Obj %d header mismatch obj_id %d", ++ tags->obj_id, obj->obj_id); ++ ++ /* ++ * Check that the object's parent ids match if parent_check requested. ++ * ++ * Tests do not apply to the root object. ++ */ ++ ++ if (parent_check && tags->obj_id > 1 && !obj->parent) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Obj %d header mismatch parent_id %d obj->parent is NULL", ++ tags->obj_id, oh->parent_obj_id); ++ ++ if (parent_check && obj->parent && ++ oh->parent_obj_id != obj->parent->obj_id && ++ (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED || ++ obj->parent->obj_id != YAFFS_OBJECTID_DELETED)) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Obj %d header mismatch parent_id %d parent_obj_id %d", ++ tags->obj_id, oh->parent_obj_id, ++ obj->parent->obj_id); ++ ++ if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */ ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Obj %d header name is NULL", ++ obj->obj_id); ++ ++ if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff) /* Junk name */ ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Obj %d header name is 0xff", ++ obj->obj_id); ++} ++ ++void yaffs_verify_file(struct yaffs_obj *obj) ++{ ++ u32 x; ++ int required_depth; ++ int actual_depth; ++ int last_chunk; ++ u32 offset_in_chunk; ++ u32 the_chunk; ++ ++ u32 i; ++ struct yaffs_dev *dev; ++ struct yaffs_ext_tags tags; ++ struct yaffs_tnode *tn; ++ u32 obj_id; ++ ++ if (!obj) ++ return; ++ ++ if (yaffs_skip_verification(obj->my_dev)) ++ return; ++ ++ dev = obj->my_dev; ++ obj_id = obj->obj_id; ++ ++ ++ /* Check file size is consistent with tnode depth */ ++ yaffs_addr_to_chunk(dev, obj->variant.file_variant.file_size, ++ &last_chunk, &offset_in_chunk); ++ last_chunk++; ++ x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS; ++ required_depth = 0; ++ while (x > 0) { ++ x >>= YAFFS_TNODES_INTERNAL_BITS; ++ required_depth++; ++ } ++ ++ actual_depth = obj->variant.file_variant.top_level; ++ ++ /* Check that the chunks in the tnode tree are all correct. ++ * We do this by scanning through the tnode tree and ++ * checking the tags for every chunk match. ++ */ ++ ++ if (yaffs_skip_nand_verification(dev)) ++ return; ++ ++ for (i = 1; i <= last_chunk; i++) { ++ tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i); ++ ++ if (!tn) ++ continue; ++ ++ the_chunk = yaffs_get_group_base(dev, tn, i); ++ if (the_chunk > 0) { ++ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL, ++ &tags); ++ if (tags.obj_id != obj_id || tags.chunk_id != i) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)", ++ obj_id, i, the_chunk, ++ tags.obj_id, tags.chunk_id); ++ } ++ } ++} ++ ++void yaffs_verify_link(struct yaffs_obj *obj) ++{ ++ if (obj && yaffs_skip_verification(obj->my_dev)) ++ return; ++ ++ /* Verify sane equivalent object */ ++} ++ ++void yaffs_verify_symlink(struct yaffs_obj *obj) ++{ ++ if (obj && yaffs_skip_verification(obj->my_dev)) ++ return; ++ ++ /* Verify symlink string */ ++} ++ ++void yaffs_verify_special(struct yaffs_obj *obj) ++{ ++ if (obj && yaffs_skip_verification(obj->my_dev)) ++ return; ++} ++ ++void yaffs_verify_obj(struct yaffs_obj *obj) ++{ ++ struct yaffs_dev *dev; ++ u32 chunk_min; ++ u32 chunk_max; ++ u32 chunk_id_ok; ++ u32 chunk_in_range; ++ u32 chunk_wrongly_deleted; ++ u32 chunk_valid; ++ ++ if (!obj) ++ return; ++ ++ if (obj->being_created) ++ return; ++ ++ dev = obj->my_dev; ++ ++ if (yaffs_skip_verification(dev)) ++ return; ++ ++ /* Check sane object header chunk */ ++ ++ chunk_min = dev->internal_start_block * dev->param.chunks_per_block; ++ chunk_max = ++ (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1; ++ ++ chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min && ++ ((unsigned)(obj->hdr_chunk)) <= chunk_max); ++ chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0); ++ chunk_valid = chunk_in_range && ++ yaffs_check_chunk_bit(dev, ++ obj->hdr_chunk / dev->param.chunks_per_block, ++ obj->hdr_chunk % dev->param.chunks_per_block); ++ chunk_wrongly_deleted = chunk_in_range && !chunk_valid; ++ ++ if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted)) ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Obj %d has chunk_id %d %s %s", ++ obj->obj_id, obj->hdr_chunk, ++ chunk_id_ok ? "" : ",out of range", ++ chunk_wrongly_deleted ? ",marked as deleted" : ""); ++ ++ if (chunk_valid && !yaffs_skip_nand_verification(dev)) { ++ struct yaffs_ext_tags tags; ++ struct yaffs_obj_hdr *oh; ++ u8 *buffer = yaffs_get_temp_buffer(dev); ++ ++ oh = (struct yaffs_obj_hdr *)buffer; ++ ++ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags); ++ ++ yaffs_verify_oh(obj, oh, &tags, 1); ++ ++ yaffs_release_temp_buffer(dev, buffer); ++ } ++ ++ /* Verify it has a parent */ ++ if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) { ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Obj %d has parent pointer %p which does not look like an object", ++ obj->obj_id, obj->parent); ++ } ++ ++ /* Verify parent is a directory */ ++ if (obj->parent && ++ obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Obj %d's parent is not a directory (type %d)", ++ obj->obj_id, obj->parent->variant_type); ++ } ++ ++ switch (obj->variant_type) { ++ case YAFFS_OBJECT_TYPE_FILE: ++ yaffs_verify_file(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ yaffs_verify_symlink(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_DIRECTORY: ++ yaffs_verify_dir(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_HARDLINK: ++ yaffs_verify_link(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_SPECIAL: ++ yaffs_verify_special(obj); ++ break; ++ case YAFFS_OBJECT_TYPE_UNKNOWN: ++ default: ++ yaffs_trace(YAFFS_TRACE_VERIFY, ++ "Obj %d has illegaltype %d", ++ obj->obj_id, obj->variant_type); ++ break; ++ } ++} ++ ++void yaffs_verify_objects(struct yaffs_dev *dev) ++{ ++ struct yaffs_obj *obj; ++ int i; ++ struct list_head *lh; ++ ++ if (yaffs_skip_verification(dev)) ++ return; ++ ++ /* Iterate through the objects in each hash entry */ ++ ++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { ++ list_for_each(lh, &dev->obj_bucket[i].list) { ++ obj = list_entry(lh, struct yaffs_obj, hash_link); ++ yaffs_verify_obj(obj); ++ } ++ } ++} ++ ++void yaffs_verify_obj_in_dir(struct yaffs_obj *obj) ++{ ++ struct list_head *lh; ++ struct yaffs_obj *list_obj; ++ int count = 0; ++ ++ if (!obj) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify"); ++ BUG(); ++ return; ++ } ++ ++ if (yaffs_skip_verification(obj->my_dev)) ++ return; ++ ++ if (!obj->parent) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent"); ++ BUG(); ++ return; ++ } ++ ++ if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory"); ++ BUG(); ++ } ++ ++ /* Iterate through the objects in each hash entry */ ++ ++ list_for_each(lh, &obj->parent->variant.dir_variant.children) { ++ list_obj = list_entry(lh, struct yaffs_obj, siblings); ++ yaffs_verify_obj(list_obj); ++ if (obj == list_obj) ++ count++; ++ } ++ ++ if (count != 1) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "Object in directory %d times", ++ count); ++ BUG(); ++ } ++} ++ ++void yaffs_verify_dir(struct yaffs_obj *directory) ++{ ++ struct list_head *lh; ++ struct yaffs_obj *list_obj; ++ ++ if (!directory) { ++ BUG(); ++ return; ++ } ++ ++ if (yaffs_skip_full_verification(directory->my_dev)) ++ return; ++ ++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "Directory has wrong type: %d", ++ directory->variant_type); ++ BUG(); ++ } ++ ++ /* Iterate through the objects in each hash entry */ ++ ++ list_for_each(lh, &directory->variant.dir_variant.children) { ++ list_obj = list_entry(lh, struct yaffs_obj, siblings); ++ if (list_obj->parent != directory) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "Object in directory list has wrong parent %p", ++ list_obj->parent); ++ BUG(); ++ } ++ yaffs_verify_obj_in_dir(list_obj); ++ } ++} ++ ++static int yaffs_free_verification_failures; ++ ++void yaffs_verify_free_chunks(struct yaffs_dev *dev) ++{ ++ int counted; ++ int difference; ++ ++ if (yaffs_skip_verification(dev)) ++ return; ++ ++ counted = yaffs_count_free_chunks(dev); ++ ++ difference = dev->n_free_chunks - counted; ++ ++ if (difference) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "Freechunks verification failure %d %d %d", ++ dev->n_free_chunks, counted, difference); ++ yaffs_free_verification_failures++; ++ } ++} ++ ++int yaffs_verify_file_sane(struct yaffs_obj *in) ++{ ++ (void) in; ++ return YAFFS_OK; ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_verify.h linux-3.4.90/fs/yaffs2/yaffs_verify.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_verify.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_verify.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,43 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_VERIFY_H__ ++#define __YAFFS_VERIFY_H__ ++ ++#include "yaffs_guts.h" ++ ++void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, ++ int n); ++void yaffs_verify_collected_blk(struct yaffs_dev *dev, ++ struct yaffs_block_info *bi, int n); ++void yaffs_verify_blocks(struct yaffs_dev *dev); ++ ++void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh, ++ struct yaffs_ext_tags *tags, int parent_check); ++void yaffs_verify_file(struct yaffs_obj *obj); ++void yaffs_verify_link(struct yaffs_obj *obj); ++void yaffs_verify_symlink(struct yaffs_obj *obj); ++void yaffs_verify_special(struct yaffs_obj *obj); ++void yaffs_verify_obj(struct yaffs_obj *obj); ++void yaffs_verify_objects(struct yaffs_dev *dev); ++void yaffs_verify_obj_in_dir(struct yaffs_obj *obj); ++void yaffs_verify_dir(struct yaffs_obj *directory); ++void yaffs_verify_free_chunks(struct yaffs_dev *dev); ++ ++int yaffs_verify_file_sane(struct yaffs_obj *obj); ++ ++int yaffs_skip_verification(struct yaffs_dev *dev); ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_vfs.c linux-3.4.90/fs/yaffs2/yaffs_vfs.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_vfs.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_vfs.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,3600 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * Acknowledgements: ++ * Luc van OostenRyck for numerous patches. ++ * Nick Bane for numerous patches. ++ * Nick Bane for 2.5/2.6 integration. ++ * Andras Toth for mknod rdev issue. ++ * Michael Fischer for finding the problem with inode inconsistency. ++ * Some code bodily lifted from JFFS ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++/* ++ * ++ * This is the file system front-end to YAFFS that hooks it up to ++ * the VFS. ++ * ++ * Special notes: ++ * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with ++ * this superblock ++ * >> 2.6: sb->s_fs_info points to the struct yaffs_dev associated with this ++ * superblock ++ * >> inode->u.generic_ip points to the associated struct yaffs_obj. ++ */ ++ ++/* ++ * There are two variants of the VFS glue code. This variant should compile ++ * for any version of Linux. ++ */ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)) ++#define YAFFS_COMPILE_BACKGROUND ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)) ++#define YAFFS_COMPILE_FREEZER ++#endif ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) ++#define YAFFS_COMPILE_EXPORTFS ++#endif ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) ++#define YAFFS_USE_SETATTR_COPY ++#define YAFFS_USE_TRUNCATE_SETSIZE ++#endif ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) ++#define YAFFS_HAS_EVICT_INODE ++#endif ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13)) ++#define YAFFS_NEW_FOLLOW_LINK 1 ++#else ++#define YAFFS_NEW_FOLLOW_LINK 0 ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) ++#define YAFFS_HAS_WRITE_SUPER ++#endif ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) ++#include ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++ ++#if (YAFFS_NEW_FOLLOW_LINK == 1) ++#include ++#endif ++ ++#ifdef YAFFS_COMPILE_EXPORTFS ++#include ++#endif ++ ++#ifdef YAFFS_COMPILE_BACKGROUND ++#include ++#include ++#endif ++#ifdef YAFFS_COMPILE_FREEZER ++#include ++#endif ++ ++#include ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++ ++#include ++ ++#define UnlockPage(p) unlock_page(p) ++#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags) ++ ++/* FIXME: use sb->s_id instead ? */ ++#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf) ++ ++#else ++ ++#include ++#define BDEVNAME_SIZE 0 ++#define yaffs_devname(sb, buf) kdevname(sb->s_dev) ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)) ++/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */ ++#define __user ++#endif ++ ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) ++#define YPROC_ROOT (&proc_root) ++#else ++#define YPROC_ROOT NULL ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) ++#define Y_INIT_TIMER(a) init_timer(a) ++#else ++#define Y_INIT_TIMER(a) init_timer_on_stack(a) ++#endif ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27)) ++#define YAFFS_USE_WRITE_BEGIN_END 1 ++#else ++#define YAFFS_USE_WRITE_BEGIN_END 0 ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) ++#define YAFFS_SUPER_HAS_DIRTY ++#endif ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) ++#define set_nlink(inode, count) do { (inode)->i_nlink = (count); } while(0) ++#endif ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28)) ++static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size) ++{ ++ uint64_t result = partition_size; ++ do_div(result, block_size); ++ return (uint32_t) result; ++} ++#else ++#define YCALCBLOCKS(s, b) ((s)/(b)) ++#endif ++ ++#include ++#include ++ ++#include "yportenv.h" ++#include "yaffs_trace.h" ++#include "yaffs_guts.h" ++#include "yaffs_attribs.h" ++ ++#include "yaffs_linux.h" ++ ++#include "yaffs_mtdif.h" ++#include "yaffs_packedtags2.h" ++#include "yaffs_getblockinfo.h" ++ ++unsigned int yaffs_trace_mask = ++ YAFFS_TRACE_BAD_BLOCKS | ++ YAFFS_TRACE_ALWAYS | ++ 0; ++ ++unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS; ++unsigned int yaffs_auto_checkpoint = 1; ++unsigned int yaffs_gc_control = 1; ++unsigned int yaffs_bg_enable = 1; ++unsigned int yaffs_auto_select = 1; ++/* Module Parameters */ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++module_param(yaffs_trace_mask, uint, 0644); ++module_param(yaffs_wr_attempts, uint, 0644); ++module_param(yaffs_auto_checkpoint, uint, 0644); ++module_param(yaffs_gc_control, uint, 0644); ++module_param(yaffs_bg_enable, uint, 0644); ++#else ++MODULE_PARM(yaffs_trace_mask, "i"); ++MODULE_PARM(yaffs_wr_attempts, "i"); ++MODULE_PARM(yaffs_auto_checkpoint, "i"); ++MODULE_PARM(yaffs_gc_control, "i"); ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)) ++/* use iget and read_inode */ ++#define Y_IGET(sb, inum) iget((sb), (inum)) ++ ++#else ++/* Call local equivalent */ ++#define YAFFS_USE_OWN_IGET ++#define Y_IGET(sb, inum) yaffs_iget((sb), (inum)) ++ ++#endif ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)) ++#define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private) ++#else ++#define yaffs_inode_to_obj_lv(iptr) ((iptr)->u.generic_ip) ++#endif ++ ++#define yaffs_inode_to_obj(iptr) \ ++ ((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr))) ++#define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode) ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->s_fs_info) ++#else ++#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->u.generic_sbp) ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) ++#define Y_CLEAR_INODE(i) clear_inode(i) ++#else ++#define Y_CLEAR_INODE(i) end_writeback(i) ++#endif ++ ++ ++#define update_dir_time(dir) do {\ ++ (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \ ++ } while (0) ++ ++static void yaffs_fill_inode_from_obj(struct inode *inode, ++ struct yaffs_obj *obj); ++ ++ ++static void yaffs_gross_lock(struct yaffs_dev *dev) ++{ ++ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current); ++ mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock)); ++ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current); ++} ++ ++static void yaffs_gross_unlock(struct yaffs_dev *dev) ++{ ++ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current); ++ mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock)); ++} ++ ++ ++static int yaffs_readpage_nolock(struct file *f, struct page *pg) ++{ ++ /* Lifted from jffs2 */ ++ ++ struct yaffs_obj *obj; ++ unsigned char *pg_buf; ++ int ret; ++ loff_t pos = ((loff_t) pg->index) << PAGE_CACHE_SHIFT; ++ struct yaffs_dev *dev; ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_readpage_nolock at %lld, size %08x", ++ (long long)pos, ++ (unsigned)PAGE_CACHE_SIZE); ++ ++ obj = yaffs_dentry_to_obj(f->f_dentry); ++ ++ dev = obj->my_dev; ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++ BUG_ON(!PageLocked(pg)); ++#else ++ if (!PageLocked(pg)) ++ PAGE_BUG(pg); ++#endif ++ ++ pg_buf = kmap(pg); ++ /* FIXME: Can kmap fail? */ ++ ++ yaffs_gross_lock(dev); ++ ++ ret = yaffs_file_rd(obj, pg_buf, pos, PAGE_CACHE_SIZE); ++ ++ yaffs_gross_unlock(dev); ++ ++ if (ret >= 0) ++ ret = 0; ++ ++ if (ret) { ++ ClearPageUptodate(pg); ++ SetPageError(pg); ++ } else { ++ SetPageUptodate(pg); ++ ClearPageError(pg); ++ } ++ ++ flush_dcache_page(pg); ++ kunmap(pg); ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done"); ++ return ret; ++} ++ ++static int yaffs_readpage_unlock(struct file *f, struct page *pg) ++{ ++ int ret = yaffs_readpage_nolock(f, pg); ++ UnlockPage(pg); ++ return ret; ++} ++ ++static int yaffs_readpage(struct file *f, struct page *pg) ++{ ++ int ret; ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage"); ++ ret = yaffs_readpage_unlock(f, pg); ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done"); ++ return ret; ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) ++#define YCRED_FSUID() from_kuid(&init_user_ns, current_fsuid()) ++#define YCRED_FSGID() from_kgid(&init_user_ns, current_fsgid()) ++#else ++#define YCRED_FSUID() YCRED(current)->fsuid ++#define YCRED_FSGID() YCRED(current)->fsgid ++ ++static inline uid_t i_uid_read(const struct inode *inode) ++{ ++ return inode->i_uid; ++} ++ ++static inline gid_t i_gid_read(const struct inode *inode) ++{ ++ return inode->i_gid; ++} ++ ++static inline void i_uid_write(struct inode *inode, uid_t uid) ++{ ++ inode->i_uid = uid; ++} ++ ++static inline void i_gid_write(struct inode *inode, gid_t gid) ++{ ++ inode->i_gid = gid; ++} ++#endif ++ ++static void yaffs_set_super_dirty_val(struct yaffs_dev *dev, int val) ++{ ++ struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev); ++ ++ if (lc) ++ lc->dirty = val; ++ ++# ifdef YAFFS_SUPER_HAS_DIRTY ++ { ++ struct super_block *sb = lc->super; ++ ++ if (sb) ++ sb->s_dirt = val; ++ } ++#endif ++ ++} ++ ++static void yaffs_set_super_dirty(struct yaffs_dev *dev) ++{ ++ yaffs_set_super_dirty_val(dev, 1); ++} ++ ++static void yaffs_clear_super_dirty(struct yaffs_dev *dev) ++{ ++ yaffs_set_super_dirty_val(dev, 0); ++} ++ ++static int yaffs_check_super_dirty(struct yaffs_dev *dev) ++{ ++ struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev); ++ ++ if (lc && lc->dirty) ++ return 1; ++ ++# ifdef YAFFS_SUPER_HAS_DIRTY ++ { ++ struct super_block *sb = lc->super; ++ ++ if (sb && sb->s_dirt) ++ return 1; ++ } ++#endif ++ return 0; ++ ++} ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++static int yaffs_writepage(struct page *page, struct writeback_control *wbc) ++#else ++static int yaffs_writepage(struct page *page) ++#endif ++{ ++ struct yaffs_dev *dev; ++ struct address_space *mapping = page->mapping; ++ struct inode *inode; ++ unsigned long end_index; ++ char *buffer; ++ struct yaffs_obj *obj; ++ int n_written = 0; ++ unsigned n_bytes; ++ loff_t i_size; ++ ++ if (!mapping) ++ BUG(); ++ inode = mapping->host; ++ if (!inode) ++ BUG(); ++ i_size = i_size_read(inode); ++ ++ end_index = i_size >> PAGE_CACHE_SHIFT; ++ ++ if (page->index < end_index) ++ n_bytes = PAGE_CACHE_SIZE; ++ else { ++ n_bytes = i_size & (PAGE_CACHE_SIZE - 1); ++ ++ if (page->index > end_index || !n_bytes) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_writepage at %lld, inode size = %lld!!", ++ ((loff_t)page->index) << PAGE_CACHE_SHIFT, ++ inode->i_size); ++ yaffs_trace(YAFFS_TRACE_OS, ++ " -> don't care!!"); ++ ++ zero_user_segment(page, 0, PAGE_CACHE_SIZE); ++ set_page_writeback(page); ++ unlock_page(page); ++ end_page_writeback(page); ++ return 0; ++ } ++ } ++ ++ if (n_bytes != PAGE_CACHE_SIZE) ++ zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE); ++ ++ get_page(page); ++ ++ buffer = kmap(page); ++ ++ obj = yaffs_inode_to_obj(inode); ++ dev = obj->my_dev; ++ yaffs_gross_lock(dev); ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_writepage at %lld, size %08x", ++ ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes); ++ yaffs_trace(YAFFS_TRACE_OS, ++ "writepag0: obj = %lld, ino = %lld", ++ obj->variant.file_variant.file_size, inode->i_size); ++ ++ n_written = yaffs_wr_file(obj, buffer, ++ ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes, 0); ++ ++ yaffs_set_super_dirty(dev); ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "writepag1: obj = %lld, ino = %lld", ++ obj->variant.file_variant.file_size, inode->i_size); ++ ++ yaffs_gross_unlock(dev); ++ ++ kunmap(page); ++ set_page_writeback(page); ++ unlock_page(page); ++ end_page_writeback(page); ++ put_page(page); ++ ++ return (n_written == n_bytes) ? 0 : -ENOSPC; ++} ++ ++/* Space holding and freeing is done to ensure we have space available for write_begin/end */ ++/* For now we just assume few parallel writes and check against a small number. */ ++/* Todo: need to do this with a counter to handle parallel reads better */ ++ ++static ssize_t yaffs_hold_space(struct file *f) ++{ ++ struct yaffs_obj *obj; ++ struct yaffs_dev *dev; ++ ++ int n_free_chunks; ++ ++ obj = yaffs_dentry_to_obj(f->f_dentry); ++ ++ dev = obj->my_dev; ++ ++ yaffs_gross_lock(dev); ++ ++ n_free_chunks = yaffs_get_n_free_chunks(dev); ++ ++ yaffs_gross_unlock(dev); ++ ++ return (n_free_chunks > 20) ? 1 : 0; ++} ++ ++static void yaffs_release_space(struct file *f) ++{ ++ struct yaffs_obj *obj; ++ struct yaffs_dev *dev; ++ ++ obj = yaffs_dentry_to_obj(f->f_dentry); ++ ++ dev = obj->my_dev; ++ ++ yaffs_gross_lock(dev); ++ ++ yaffs_gross_unlock(dev); ++} ++ ++#if (YAFFS_USE_WRITE_BEGIN_END > 0) ++static int yaffs_write_begin(struct file *filp, struct address_space *mapping, ++ loff_t pos, unsigned len, unsigned flags, ++ struct page **pagep, void **fsdata) ++{ ++ struct page *pg = NULL; ++ pgoff_t index = pos >> PAGE_CACHE_SHIFT; ++ ++ int ret = 0; ++ int space_held = 0; ++ ++ /* Get a page */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) ++ pg = grab_cache_page_write_begin(mapping, index, flags); ++#else ++ pg = __grab_cache_page(mapping, index); ++#endif ++ ++ *pagep = pg; ++ if (!pg) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ yaffs_trace(YAFFS_TRACE_OS, ++ "start yaffs_write_begin index %d(%x) uptodate %d", ++ (int)index, (int)index, Page_Uptodate(pg) ? 1 : 0); ++ ++ /* Get fs space */ ++ space_held = yaffs_hold_space(filp); ++ ++ if (!space_held) { ++ ret = -ENOSPC; ++ goto out; ++ } ++ ++ /* Update page if required */ ++ ++ if (!Page_Uptodate(pg)) ++ ret = yaffs_readpage_nolock(filp, pg); ++ ++ if (ret) ++ goto out; ++ ++ /* Happy path return */ ++ yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok"); ++ ++ return 0; ++ ++out: ++ yaffs_trace(YAFFS_TRACE_OS, ++ "end yaffs_write_begin fail returning %d", ret); ++ if (space_held) ++ yaffs_release_space(filp); ++ if (pg) { ++ unlock_page(pg); ++ page_cache_release(pg); ++ } ++ return ret; ++} ++ ++#else ++ ++static int yaffs_prepare_write(struct file *f, struct page *pg, ++ unsigned offset, unsigned to) ++{ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_prepair_write"); ++ ++ if (!Page_Uptodate(pg)) ++ return yaffs_readpage_nolock(f, pg); ++ return 0; ++} ++#endif ++ ++ ++static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n, ++ loff_t * pos) ++{ ++ struct yaffs_obj *obj; ++ int n_written; ++ loff_t ipos; ++ struct inode *inode; ++ struct yaffs_dev *dev; ++ ++ obj = yaffs_dentry_to_obj(f->f_dentry); ++ ++ if (!obj) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_file_write: hey obj is null!"); ++ return -EINVAL; ++ } ++ ++ dev = obj->my_dev; ++ ++ yaffs_gross_lock(dev); ++ ++ inode = f->f_dentry->d_inode; ++ ++ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND) ++ ipos = inode->i_size; ++ else ++ ipos = *pos; ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_file_write about to write writing %u(%x) bytes to object %d at %lld", ++ (unsigned)n, (unsigned)n, obj->obj_id, ipos); ++ ++ n_written = yaffs_wr_file(obj, buf, ipos, n, 0); ++ ++ yaffs_set_super_dirty(dev); ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_file_write: %d(%x) bytes written", ++ (unsigned)n, (unsigned)n); ++ ++ if (n_written > 0) { ++ ipos += n_written; ++ *pos = ipos; ++ if (ipos > inode->i_size) { ++ inode->i_size = ipos; ++ inode->i_blocks = (ipos + 511) >> 9; ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_file_write size updated to %lld bytes, %d blocks", ++ ipos, (int)(inode->i_blocks)); ++ } ++ ++ } ++ yaffs_gross_unlock(dev); ++ return (n_written == 0) && (n > 0) ? -ENOSPC : n_written; ++} ++ ++ ++#if (YAFFS_USE_WRITE_BEGIN_END > 0) ++static int yaffs_write_end(struct file *filp, struct address_space *mapping, ++ loff_t pos, unsigned len, unsigned copied, ++ struct page *pg, void *fsdadata) ++{ ++ int ret = 0; ++ void *addr, *kva; ++ uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1); ++ ++ kva = kmap(pg); ++ addr = kva + offset_into_page; ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_write_end addr %p pos %lld n_bytes %d", ++ addr, pos, copied); ++ ++ ret = yaffs_file_write(filp, addr, copied, &pos); ++ ++ if (ret != copied) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_write_end not same size ret %d copied %d", ++ ret, copied); ++ SetPageError(pg); ++ } ++ ++ kunmap(pg); ++ ++ yaffs_release_space(filp); ++ unlock_page(pg); ++ page_cache_release(pg); ++ return ret; ++} ++#else ++ ++static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset, ++ unsigned to) ++{ ++ void *addr, *kva; ++ ++ loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset; ++ int n_bytes = to - offset; ++ int n_written; ++ ++ kva = kmap(pg); ++ addr = kva + offset; ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_commit_write addr %p pos %lld n_bytes %d", ++ addr, pos, n_bytes); ++ ++ n_written = yaffs_file_write(f, addr, n_bytes, &pos); ++ ++ if (n_written != n_bytes) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_commit_write not same size n_written %d n_bytes %d", ++ n_written, n_bytes); ++ SetPageError(pg); ++ } ++ kunmap(pg); ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_commit_write returning %d", ++ n_written == n_bytes ? 0 : n_written); ++ ++ return n_written == n_bytes ? 0 : n_written; ++} ++#endif ++ ++static struct address_space_operations yaffs_file_address_operations = { ++ .readpage = yaffs_readpage, ++ .writepage = yaffs_writepage, ++#if (YAFFS_USE_WRITE_BEGIN_END > 0) ++ .write_begin = yaffs_write_begin, ++ .write_end = yaffs_write_end, ++#else ++ .prepare_write = yaffs_prepare_write, ++ .commit_write = yaffs_commit_write, ++#endif ++}; ++ ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) ++static int yaffs_file_flush(struct file *file, fl_owner_t id) ++#else ++static int yaffs_file_flush(struct file *file) ++#endif ++{ ++ struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry); ++ ++ struct yaffs_dev *dev = obj->my_dev; ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_file_flush object %d (%s)", ++ obj->obj_id, ++ obj->dirty ? "dirty" : "clean"); ++ ++ yaffs_gross_lock(dev); ++ ++ yaffs_flush_file(obj, 1, 0); ++ ++ yaffs_gross_unlock(dev); ++ ++ return 0; ++} ++ ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) ++static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync) ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34)) ++static int yaffs_sync_object(struct file *file, int datasync) ++#else ++static int yaffs_sync_object(struct file *file, struct dentry *dentry, ++ int datasync) ++#endif ++{ ++ struct yaffs_obj *obj; ++ struct yaffs_dev *dev; ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34)) ++ struct dentry *dentry = file->f_path.dentry; ++#endif ++ ++ obj = yaffs_dentry_to_obj(dentry); ++ ++ dev = obj->my_dev; ++ ++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC, ++ "yaffs_sync_object"); ++ yaffs_gross_lock(dev); ++ yaffs_flush_file(obj, 1, datasync); ++ yaffs_gross_unlock(dev); ++ return 0; ++} ++ ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22)) ++static const struct file_operations yaffs_file_operations = { ++ .read = do_sync_read, ++ .write = do_sync_write, ++ .aio_read = generic_file_aio_read, ++ .aio_write = generic_file_aio_write, ++ .mmap = generic_file_mmap, ++ .flush = yaffs_file_flush, ++ .fsync = yaffs_sync_object, ++ .splice_read = generic_file_splice_read, ++ .splice_write = generic_file_splice_write, ++ .llseek = generic_file_llseek, ++}; ++ ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)) ++ ++static const struct file_operations yaffs_file_operations = { ++ .read = do_sync_read, ++ .write = do_sync_write, ++ .aio_read = generic_file_aio_read, ++ .aio_write = generic_file_aio_write, ++ .mmap = generic_file_mmap, ++ .flush = yaffs_file_flush, ++ .fsync = yaffs_sync_object, ++ .sendfile = generic_file_sendfile, ++}; ++ ++#else ++ ++static const struct file_operations yaffs_file_operations = { ++ .read = generic_file_read, ++ .write = generic_file_write, ++ .mmap = generic_file_mmap, ++ .flush = yaffs_file_flush, ++ .fsync = yaffs_sync_object, ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++ .sendfile = generic_file_sendfile, ++#endif ++}; ++#endif ++ ++ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)) ++static void zero_user_segment(struct page *page, unsigned start, unsigned end) ++{ ++ void *kaddr = kmap_atomic(page, KM_USER0); ++ memset(kaddr + start, 0, end - start); ++ kunmap_atomic(kaddr, KM_USER0); ++ flush_dcache_page(page); ++} ++#endif ++ ++ ++static int yaffs_vfs_setsize(struct inode *inode, loff_t newsize) ++{ ++#ifdef YAFFS_USE_TRUNCATE_SETSIZE ++ truncate_setsize(inode, newsize); ++ return 0; ++#else ++ truncate_inode_pages(&inode->i_data, newsize); ++ return 0; ++#endif ++ ++} ++ ++ ++static int yaffs_vfs_setattr(struct inode *inode, struct iattr *attr) ++{ ++#ifdef YAFFS_USE_SETATTR_COPY ++ setattr_copy(inode, attr); ++ return 0; ++#else ++ return inode_setattr(inode, attr); ++#endif ++ ++} ++ ++static int yaffs_setattr(struct dentry *dentry, struct iattr *attr) ++{ ++ struct inode *inode = dentry->d_inode; ++ int error = 0; ++ struct yaffs_dev *dev; ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_setattr of object %d", ++ yaffs_inode_to_obj(inode)->obj_id); ++#if 0 ++ /* Fail if a requested resize >= 2GB */ ++ if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31)) ++ error = -EINVAL; ++#endif ++ ++ if (error == 0) ++ error = inode_change_ok(inode, attr); ++ if (error == 0) { ++ int result; ++ if (!error) { ++ error = yaffs_vfs_setattr(inode, attr); ++ yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called"); ++ if (attr->ia_valid & ATTR_SIZE) { ++ yaffs_vfs_setsize(inode, attr->ia_size); ++ inode->i_blocks = (inode->i_size + 511) >> 9; ++ } ++ } ++ dev = yaffs_inode_to_obj(inode)->my_dev; ++ if (attr->ia_valid & ATTR_SIZE) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "resize to %d(%x)", ++ (int)(attr->ia_size), ++ (int)(attr->ia_size)); ++ } ++ yaffs_gross_lock(dev); ++ result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr); ++ if (result == YAFFS_OK) { ++ error = 0; ++ } else { ++ error = -EPERM; ++ } ++ yaffs_gross_unlock(dev); ++ ++ } ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error); ++ ++ return error; ++} ++ ++static int yaffs_setxattr(struct dentry *dentry, const char *name, ++ const void *value, size_t size, int flags) ++{ ++ struct inode *inode = dentry->d_inode; ++ int error = 0; ++ struct yaffs_dev *dev; ++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode); ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id); ++ ++ if (error == 0) { ++ int result; ++ dev = obj->my_dev; ++ yaffs_gross_lock(dev); ++ result = yaffs_set_xattrib(obj, name, value, size, flags); ++ if (result == YAFFS_OK) ++ error = 0; ++ else if (result < 0) ++ error = result; ++ yaffs_gross_unlock(dev); ++ ++ } ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error); ++ ++ return error; ++} ++ ++static ssize_t yaffs_getxattr(struct dentry * dentry, const char *name, ++ void *buff, size_t size) ++{ ++ struct inode *inode = dentry->d_inode; ++ int error = 0; ++ struct yaffs_dev *dev; ++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode); ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_getxattr \"%s\" from object %d", ++ name, obj->obj_id); ++ ++ if (error == 0) { ++ dev = obj->my_dev; ++ yaffs_gross_lock(dev); ++ error = yaffs_get_xattrib(obj, name, buff, size); ++ yaffs_gross_unlock(dev); ++ ++ } ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error); ++ ++ return error; ++} ++ ++static int yaffs_removexattr(struct dentry *dentry, const char *name) ++{ ++ struct inode *inode = dentry->d_inode; ++ int error = 0; ++ struct yaffs_dev *dev; ++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode); ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_removexattr of object %d", obj->obj_id); ++ ++ if (error == 0) { ++ int result; ++ dev = obj->my_dev; ++ yaffs_gross_lock(dev); ++ result = yaffs_remove_xattrib(obj, name); ++ if (result == YAFFS_OK) ++ error = 0; ++ else if (result < 0) ++ error = result; ++ yaffs_gross_unlock(dev); ++ ++ } ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_removexattr done returning %d", error); ++ ++ return error; ++} ++ ++static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size) ++{ ++ struct inode *inode = dentry->d_inode; ++ int error = 0; ++ struct yaffs_dev *dev; ++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode); ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_listxattr of object %d", obj->obj_id); ++ ++ if (error == 0) { ++ dev = obj->my_dev; ++ yaffs_gross_lock(dev); ++ error = yaffs_list_xattrib(obj, buff, size); ++ yaffs_gross_unlock(dev); ++ ++ } ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_listxattr done returning %d", error); ++ ++ return error; ++} ++ ++ ++static const struct inode_operations yaffs_file_inode_operations = { ++ .setattr = yaffs_setattr, ++ .setxattr = yaffs_setxattr, ++ .getxattr = yaffs_getxattr, ++ .listxattr = yaffs_listxattr, ++ .removexattr = yaffs_removexattr, ++}; ++ ++ ++static int yaffs_readlink(struct dentry *dentry, char __user * buffer, ++ int buflen) ++{ ++ unsigned char *alias; ++ int ret; ++ ++ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev; ++ ++ yaffs_gross_lock(dev); ++ ++ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry)); ++ ++ yaffs_gross_unlock(dev); ++ ++ if (!alias) ++ return -ENOMEM; ++ ++ ret = vfs_readlink(dentry, buffer, buflen, alias); ++ kfree(alias); ++ return ret; ++} ++ ++#if (YAFFS_NEW_FOLLOW_LINK == 1) ++static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd) ++{ ++ void *ret; ++#else ++static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd) ++{ ++ int ret ++#endif ++ unsigned char *alias; ++ int ret_int = 0; ++ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev; ++ ++ yaffs_gross_lock(dev); ++ ++ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry)); ++ yaffs_gross_unlock(dev); ++ ++ if (!alias) { ++ ret_int = -ENOMEM; ++ goto out; ++ } ++#if (YAFFS_NEW_FOLLOW_LINK == 1) ++ nd_set_link(nd, alias); ++ ret = alias; ++out: ++ if (ret_int) ++ ret = ERR_PTR(ret_int); ++ return ret; ++#else ++ ret = vfs_follow_link(nd, alias); ++ kfree(alias); ++out: ++ if (ret_int) ++ ret = ret_int; ++ return ret; ++#endif ++} ++ ++ ++#ifdef YAFFS_HAS_PUT_INODE ++ ++/* For now put inode is just for debugging ++ * Put inode is called when the inode **structure** is put. ++ */ ++static void yaffs_put_inode(struct inode *inode) ++{ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_put_inode: ino %d, count %d"), ++ (int)inode->i_ino, atomic_read(&inode->i_count); ++ ++} ++#endif ++ ++#if (YAFFS_NEW_FOLLOW_LINK == 1) ++void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias) ++{ ++ kfree(alias); ++} ++#endif ++ ++static const struct inode_operations yaffs_symlink_inode_operations = { ++ .readlink = yaffs_readlink, ++ .follow_link = yaffs_follow_link, ++#if (YAFFS_NEW_FOLLOW_LINK == 1) ++ .put_link = yaffs_put_link, ++#endif ++ .setattr = yaffs_setattr, ++ .setxattr = yaffs_setxattr, ++ .getxattr = yaffs_getxattr, ++ .listxattr = yaffs_listxattr, ++ .removexattr = yaffs_removexattr, ++}; ++ ++#ifdef YAFFS_USE_OWN_IGET ++ ++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino) ++{ ++ struct inode *inode; ++ struct yaffs_obj *obj; ++ struct yaffs_dev *dev = yaffs_super_to_dev(sb); ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino); ++ ++ inode = iget_locked(sb, ino); ++ if (!inode) ++ return ERR_PTR(-ENOMEM); ++ if (!(inode->i_state & I_NEW)) ++ return inode; ++ ++ /* NB This is called as a side effect of other functions, but ++ * we had to release the lock to prevent deadlocks, so ++ * need to lock again. ++ */ ++ ++ yaffs_gross_lock(dev); ++ ++ obj = yaffs_find_by_number(dev, inode->i_ino); ++ ++ yaffs_fill_inode_from_obj(inode, obj); ++ ++ yaffs_gross_unlock(dev); ++ ++ unlock_new_inode(inode); ++ return inode; ++} ++ ++#else ++ ++static void yaffs_read_inode(struct inode *inode) ++{ ++ /* NB This is called as a side effect of other functions, but ++ * we had to release the lock to prevent deadlocks, so ++ * need to lock again. ++ */ ++ ++ struct yaffs_obj *obj; ++ struct yaffs_dev *dev = yaffs_super_to_dev(inode->i_sb); ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_read_inode for %d", (int)inode->i_ino); ++ ++ if (current != yaffs_dev_to_lc(dev)->readdir_process) ++ yaffs_gross_lock(dev); ++ ++ obj = yaffs_find_by_number(dev, inode->i_ino); ++ ++ yaffs_fill_inode_from_obj(inode, obj); ++ ++ if (current != yaffs_dev_to_lc(dev)->readdir_process) ++ yaffs_gross_unlock(dev); ++} ++ ++#endif ++ ++ ++ ++struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev, ++ struct yaffs_obj *obj) ++{ ++ struct inode *inode; ++ ++ if (!sb) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_get_inode for NULL super_block!!"); ++ return NULL; ++ ++ } ++ ++ if (!obj) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_get_inode for NULL object!!"); ++ return NULL; ++ ++ } ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_get_inode for object %d", obj->obj_id); ++ ++ inode = Y_IGET(sb, obj->obj_id); ++ if (IS_ERR(inode)) ++ return NULL; ++ ++ /* NB Side effect: iget calls back to yaffs_read_inode(). */ ++ /* iget also increments the inode's i_count */ ++ /* NB You can't be holding gross_lock or deadlock will happen! */ ++ ++ return inode; ++} ++ ++ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) ++#define YCRED(x) x ++#else ++#define YCRED(x) (x->cred) ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) ++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, ++ dev_t rdev) ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode, ++ dev_t rdev) ++#else ++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode, ++ int rdev) ++#endif ++{ ++ struct inode *inode; ++ ++ struct yaffs_obj *obj = NULL; ++ struct yaffs_dev *dev; ++ ++ struct yaffs_obj *parent = yaffs_inode_to_obj(dir); ++ ++ int error = -ENOSPC; ++ uid_t uid = YCRED_FSUID(); ++ gid_t gid = ++ (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID(); ++ ++ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode)) ++ mode |= S_ISGID; ++ ++ if (parent) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_mknod: parent object %d type %d", ++ parent->obj_id, parent->variant_type); ++ } else { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_mknod: could not get parent object"); ++ return -EPERM; ++ } ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_mknod: making oject for %s, mode %x dev %x", ++ dentry->d_name.name, mode, rdev); ++ ++ dev = parent->my_dev; ++ ++ yaffs_gross_lock(dev); ++ ++ switch (mode & S_IFMT) { ++ default: ++ /* Special (socket, fifo, device...) */ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special"); ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++ obj = ++ yaffs_create_special(parent, dentry->d_name.name, mode, uid, ++ gid, old_encode_dev(rdev)); ++#else ++ obj = ++ yaffs_create_special(parent, dentry->d_name.name, mode, uid, ++ gid, rdev); ++#endif ++ break; ++ case S_IFREG: /* file */ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file"); ++ obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid, ++ gid); ++ break; ++ case S_IFDIR: /* directory */ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory"); ++ obj = yaffs_create_dir(parent, dentry->d_name.name, mode, ++ uid, gid); ++ break; ++ case S_IFLNK: /* symlink */ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink"); ++ obj = NULL; /* Do we ever get here? */ ++ break; ++ } ++ ++ /* Can not call yaffs_get_inode() with gross lock held */ ++ yaffs_gross_unlock(dev); ++ ++ if (obj) { ++ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj); ++ d_instantiate(dentry, inode); ++ update_dir_time(dir); ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_mknod created object %d count = %d", ++ obj->obj_id, atomic_read(&inode->i_count)); ++ error = 0; ++ yaffs_fill_inode_from_obj(dir, parent); ++ } else { ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object"); ++ error = -ENOMEM; ++ } ++ ++ return error; ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) ++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) ++#else ++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) ++#endif ++{ ++ int ret_val; ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mkdir"); ++ ret_val = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0); ++ return ret_val; ++} ++ ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) ++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode, ++ bool dummy) ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) ++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode, ++ struct nameidata *n) ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode, ++ struct nameidata *n) ++#else ++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode) ++#endif ++{ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_create"); ++ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0); ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) ++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry, ++ unsigned int dummy) ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry, ++ struct nameidata *n) ++#else ++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry) ++#endif ++{ ++ struct yaffs_obj *obj; ++ struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */ ++ ++ struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev; ++ ++ if (current != yaffs_dev_to_lc(dev)->readdir_process) ++ yaffs_gross_lock(dev); ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup for %d:%s", ++ yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name); ++ ++ obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name); ++ ++ obj = yaffs_get_equivalent_obj(obj); /* in case it was a hardlink */ ++ ++ /* Can't hold gross lock when calling yaffs_get_inode() */ ++ if (current != yaffs_dev_to_lc(dev)->readdir_process) ++ yaffs_gross_unlock(dev); ++ ++ if (obj) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_lookup found %d", obj->obj_id); ++ ++ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj); ++ } else { ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found"); ++ ++ } ++ ++/* added NCB for 2.5/6 compatability - forces add even if inode is ++ * NULL which creates dentry hash */ ++ d_add(dentry, inode); ++ ++ return NULL; ++} ++ ++/* ++ * Create a link... ++ */ ++static int yaffs_link(struct dentry *old_dentry, struct inode *dir, ++ struct dentry *dentry) ++{ ++ struct inode *inode = old_dentry->d_inode; ++ struct yaffs_obj *obj = NULL; ++ struct yaffs_obj *link = NULL; ++ struct yaffs_dev *dev; ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_link"); ++ ++ obj = yaffs_inode_to_obj(inode); ++ dev = obj->my_dev; ++ ++ yaffs_gross_lock(dev); ++ ++ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */ ++ link = ++ yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name, ++ obj); ++ ++ if (link) { ++ set_nlink(old_dentry->d_inode, yaffs_get_obj_link_count(obj)); ++ d_instantiate(dentry, old_dentry->d_inode); ++ atomic_inc(&old_dentry->d_inode->i_count); ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_link link count %d i_count %d", ++ old_dentry->d_inode->i_nlink, ++ atomic_read(&old_dentry->d_inode->i_count)); ++ } ++ ++ yaffs_gross_unlock(dev); ++ ++ if (link) { ++ update_dir_time(dir); ++ return 0; ++ } ++ ++ return -EPERM; ++} ++ ++static int yaffs_symlink(struct inode *dir, struct dentry *dentry, ++ const char *symname) ++{ ++ struct yaffs_obj *obj; ++ struct yaffs_dev *dev; ++ uid_t uid = YCRED_FSUID(); ++ gid_t gid = ++ (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID(); ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink"); ++ ++ if (strnlen(dentry->d_name.name, YAFFS_MAX_NAME_LENGTH + 1) > ++ YAFFS_MAX_NAME_LENGTH) ++ return -ENAMETOOLONG; ++ ++ if (strnlen(symname, YAFFS_MAX_ALIAS_LENGTH + 1) > ++ YAFFS_MAX_ALIAS_LENGTH) ++ return -ENAMETOOLONG; ++ ++ dev = yaffs_inode_to_obj(dir)->my_dev; ++ yaffs_gross_lock(dev); ++ obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name, ++ S_IFLNK | S_IRWXUGO, uid, gid, symname); ++ yaffs_gross_unlock(dev); ++ ++ if (obj) { ++ struct inode *inode; ++ ++ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj); ++ d_instantiate(dentry, inode); ++ update_dir_time(dir); ++ yaffs_trace(YAFFS_TRACE_OS, "symlink created OK"); ++ return 0; ++ } else { ++ yaffs_trace(YAFFS_TRACE_OS, "symlink not created"); ++ } ++ ++ return -ENOMEM; ++} ++ ++/* ++ * The VFS layer already does all the dentry stuff for rename. ++ * ++ * NB: POSIX says you can rename an object over an old object of the same name ++ */ ++static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry, ++ struct inode *new_dir, struct dentry *new_dentry) ++{ ++ struct yaffs_dev *dev; ++ int ret_val = YAFFS_FAIL; ++ struct yaffs_obj *target; ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename"); ++ dev = yaffs_inode_to_obj(old_dir)->my_dev; ++ ++ yaffs_gross_lock(dev); ++ ++ /* Check if the target is an existing directory that is not empty. */ ++ target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir), ++ new_dentry->d_name.name); ++ ++ if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY && ++ !list_empty(&target->variant.dir_variant.children)) { ++ ++ yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir"); ++ ++ ret_val = YAFFS_FAIL; ++ } else { ++ /* Now does unlinking internally using shadowing mechanism */ ++ yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj"); ++ ++ ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir), ++ old_dentry->d_name.name, ++ yaffs_inode_to_obj(new_dir), ++ new_dentry->d_name.name); ++ } ++ yaffs_gross_unlock(dev); ++ ++ if (ret_val == YAFFS_OK) { ++ if (target) ++ inode_dec_link_count(new_dentry->d_inode); ++ ++ update_dir_time(old_dir); ++ if (old_dir != new_dir) ++ update_dir_time(new_dir); ++ return 0; ++ } else { ++ return -ENOTEMPTY; ++ } ++} ++ ++ ++ ++ ++static int yaffs_unlink(struct inode *dir, struct dentry *dentry) ++{ ++ int ret_val; ++ ++ struct yaffs_dev *dev; ++ struct yaffs_obj *obj; ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_unlink %d:%s", ++ (int)(dir->i_ino), dentry->d_name.name); ++ obj = yaffs_inode_to_obj(dir); ++ dev = obj->my_dev; ++ ++ yaffs_gross_lock(dev); ++ ++ ret_val = yaffs_unlinker(obj, dentry->d_name.name); ++ ++ if (ret_val == YAFFS_OK) { ++ inode_dec_link_count(dentry->d_inode); ++ dir->i_version++; ++ yaffs_gross_unlock(dev); ++ update_dir_time(dir); ++ return 0; ++ } ++ yaffs_gross_unlock(dev); ++ return -ENOTEMPTY; ++} ++ ++ ++ ++static const struct inode_operations yaffs_dir_inode_operations = { ++ .create = yaffs_create, ++ .lookup = yaffs_lookup, ++ .link = yaffs_link, ++ .unlink = yaffs_unlink, ++ .symlink = yaffs_symlink, ++ .mkdir = yaffs_mkdir, ++ .rmdir = yaffs_unlink, ++ .mknod = yaffs_mknod, ++ .rename = yaffs_rename, ++ .setattr = yaffs_setattr, ++ .setxattr = yaffs_setxattr, ++ .getxattr = yaffs_getxattr, ++ .listxattr = yaffs_listxattr, ++ .removexattr = yaffs_removexattr, ++}; ++ ++/*-----------------------------------------------------------------*/ ++/* Directory search context allows us to unlock access to yaffs during ++ * filldir without causing problems with the directory being modified. ++ * This is similar to the tried and tested mechanism used in yaffs direct. ++ * ++ * A search context iterates along a doubly linked list of siblings in the ++ * directory. If the iterating object is deleted then this would corrupt ++ * the list iteration, likely causing a crash. The search context avoids ++ * this by using the remove_obj_fn to move the search context to the ++ * next object before the object is deleted. ++ * ++ * Many readdirs (and thus seach conexts) may be alive simulateously so ++ * each struct yaffs_dev has a list of these. ++ * ++ * A seach context lives for the duration of a readdir. ++ * ++ * All these functions must be called while yaffs is locked. ++ */ ++ ++struct yaffs_search_context { ++ struct yaffs_dev *dev; ++ struct yaffs_obj *dir_obj; ++ struct yaffs_obj *next_return; ++ struct list_head others; ++}; ++ ++/* ++ * yaffs_new_search() creates a new search context, initialises it and ++ * adds it to the device's search context list. ++ * ++ * Called at start of readdir. ++ */ ++static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir) ++{ ++ struct yaffs_dev *dev = dir->my_dev; ++ struct yaffs_search_context *sc = ++ kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS); ++ if (sc) { ++ sc->dir_obj = dir; ++ sc->dev = dev; ++ if (list_empty(&sc->dir_obj->variant.dir_variant.children)) ++ sc->next_return = NULL; ++ else ++ sc->next_return = ++ list_entry(dir->variant.dir_variant.children.next, ++ struct yaffs_obj, siblings); ++ INIT_LIST_HEAD(&sc->others); ++ list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts)); ++ } ++ return sc; ++} ++ ++/* ++ * yaffs_search_end() disposes of a search context and cleans up. ++ */ ++static void yaffs_search_end(struct yaffs_search_context *sc) ++{ ++ if (sc) { ++ list_del(&sc->others); ++ kfree(sc); ++ } ++} ++ ++/* ++ * yaffs_search_advance() moves a search context to the next object. ++ * Called when the search iterates or when an object removal causes ++ * the search context to be moved to the next object. ++ */ ++static void yaffs_search_advance(struct yaffs_search_context *sc) ++{ ++ if (!sc) ++ return; ++ ++ if (sc->next_return == NULL || ++ list_empty(&sc->dir_obj->variant.dir_variant.children)) ++ sc->next_return = NULL; ++ else { ++ struct list_head *next = sc->next_return->siblings.next; ++ ++ if (next == &sc->dir_obj->variant.dir_variant.children) ++ sc->next_return = NULL; /* end of list */ ++ else ++ sc->next_return = ++ list_entry(next, struct yaffs_obj, siblings); ++ } ++} ++ ++/* ++ * yaffs_remove_obj_callback() is called when an object is unlinked. ++ * We check open search contexts and advance any which are currently ++ * on the object being iterated. ++ */ ++static void yaffs_remove_obj_callback(struct yaffs_obj *obj) ++{ ++ ++ struct list_head *i; ++ struct yaffs_search_context *sc; ++ struct list_head *search_contexts = ++ &(yaffs_dev_to_lc(obj->my_dev)->search_contexts); ++ ++ /* Iterate through the directory search contexts. ++ * If any are currently on the object being removed, then advance ++ * the search context to the next object to prevent a hanging pointer. ++ */ ++ list_for_each(i, search_contexts) { ++ sc = list_entry(i, struct yaffs_search_context, others); ++ if (sc->next_return == obj) ++ yaffs_search_advance(sc); ++ } ++ ++} ++ ++ ++/*-----------------------------------------------------------------*/ ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) ++static int yaffs_readdir(struct file *file, struct dir_context *ctx) ++{ ++ struct yaffs_obj *obj; ++ struct yaffs_dev *dev; ++ struct yaffs_search_context *sc; ++ struct inode *inode = file->f_dentry->d_inode; ++ unsigned long offset, curoffs; ++ struct yaffs_obj *l; ++ int ret_val = 0; ++ ++ char name[YAFFS_MAX_NAME_LENGTH + 1]; ++ ++ obj = yaffs_dentry_to_obj(file->f_dentry); ++ dev = obj->my_dev; ++ ++ yaffs_gross_lock(dev); ++ ++ yaffs_dev_to_lc(dev)->readdir_process = current; ++ ++ offset = ctx->pos; ++ ++ sc = yaffs_new_search(obj); ++ if (!sc) { ++ ret_val = -ENOMEM; ++ goto out; ++ } ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_readdir: starting at %d", (int)offset); ++ ++ if (offset == 0) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_readdir: entry . ino %d", ++ (int)inode->i_ino); ++ yaffs_gross_unlock(dev); ++ if (!dir_emit_dot(file, ctx)) { ++ yaffs_gross_lock(dev); ++ goto out; ++ } ++ yaffs_gross_lock(dev); ++ offset++; ++ ctx->pos++; ++ } ++ if (offset == 1) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_readdir: entry .. ino %d", ++ (int)file->f_dentry->d_parent->d_inode->i_ino); ++ yaffs_gross_unlock(dev); ++ if (!dir_emit_dotdot(file, ctx)) { ++ yaffs_gross_lock(dev); ++ goto out; ++ } ++ yaffs_gross_lock(dev); ++ offset++; ++ ctx->pos++; ++ } ++ ++ curoffs = 1; ++ ++ /* If the directory has changed since the open or last call to ++ readdir, rewind to after the 2 canned entries. */ ++ if (file->f_version != inode->i_version) { ++ offset = 2; ++ ctx->pos = offset; ++ file->f_version = inode->i_version; ++ } ++ ++ while (sc->next_return) { ++ curoffs++; ++ l = sc->next_return; ++ if (curoffs >= offset) { ++ int this_inode = yaffs_get_obj_inode(l); ++ int this_type = yaffs_get_obj_type(l); ++ ++ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1); ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_readdir: %s inode %d", ++ name, yaffs_get_obj_inode(l)); ++ ++ yaffs_gross_unlock(dev); ++ ++ if (!dir_emit(ctx, name, strlen(name), ++ this_inode, this_type) < 0) { ++ yaffs_gross_lock(dev); ++ goto out; ++ } ++ ++ yaffs_gross_lock(dev); ++ ++ offset++; ++ ctx->pos++; ++ } ++ yaffs_search_advance(sc); ++ } ++ ++out: ++ yaffs_search_end(sc); ++ yaffs_dev_to_lc(dev)->readdir_process = NULL; ++ yaffs_gross_unlock(dev); ++ ++ return ret_val; ++} ++#else ++static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir) ++{ ++ struct yaffs_obj *obj; ++ struct yaffs_dev *dev; ++ struct yaffs_search_context *sc; ++ struct inode *inode = f->f_dentry->d_inode; ++ unsigned long offset, curoffs; ++ struct yaffs_obj *l; ++ int ret_val = 0; ++ ++ char name[YAFFS_MAX_NAME_LENGTH + 1]; ++ ++ obj = yaffs_dentry_to_obj(f->f_dentry); ++ dev = obj->my_dev; ++ ++ yaffs_gross_lock(dev); ++ ++ yaffs_dev_to_lc(dev)->readdir_process = current; ++ ++ offset = f->f_pos; ++ ++ sc = yaffs_new_search(obj); ++ if (!sc) { ++ ret_val = -ENOMEM; ++ goto out; ++ } ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_readdir: starting at %d", (int)offset); ++ ++ if (offset == 0) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_readdir: entry . ino %d", ++ (int)inode->i_ino); ++ yaffs_gross_unlock(dev); ++ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) { ++ yaffs_gross_lock(dev); ++ goto out; ++ } ++ yaffs_gross_lock(dev); ++ offset++; ++ f->f_pos++; ++ } ++ if (offset == 1) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_readdir: entry .. ino %d", ++ (int)f->f_dentry->d_parent->d_inode->i_ino); ++ yaffs_gross_unlock(dev); ++ if (filldir(dirent, "..", 2, offset, ++ f->f_dentry->d_parent->d_inode->i_ino, ++ DT_DIR) < 0) { ++ yaffs_gross_lock(dev); ++ goto out; ++ } ++ yaffs_gross_lock(dev); ++ offset++; ++ f->f_pos++; ++ } ++ ++ curoffs = 1; ++ ++ /* If the directory has changed since the open or last call to ++ readdir, rewind to after the 2 canned entries. */ ++ if (f->f_version != inode->i_version) { ++ offset = 2; ++ f->f_pos = offset; ++ f->f_version = inode->i_version; ++ } ++ ++ while (sc->next_return) { ++ curoffs++; ++ l = sc->next_return; ++ if (curoffs >= offset) { ++ int this_inode = yaffs_get_obj_inode(l); ++ int this_type = yaffs_get_obj_type(l); ++ ++ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1); ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_readdir: %s inode %d", ++ name, yaffs_get_obj_inode(l)); ++ ++ yaffs_gross_unlock(dev); ++ ++ if (filldir(dirent, ++ name, ++ strlen(name), ++ offset, this_inode, this_type) < 0) { ++ yaffs_gross_lock(dev); ++ goto out; ++ } ++ ++ yaffs_gross_lock(dev); ++ ++ offset++; ++ f->f_pos++; ++ } ++ yaffs_search_advance(sc); ++ } ++ ++out: ++ yaffs_search_end(sc); ++ yaffs_dev_to_lc(dev)->readdir_process = NULL; ++ yaffs_gross_unlock(dev); ++ ++ return ret_val; ++} ++#endif ++ ++static const struct file_operations yaffs_dir_operations = { ++ .read = generic_read_dir, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) ++ .iterate = yaffs_readdir, ++#else ++ .readdir = yaffs_readdir, ++#endif ++ .fsync = yaffs_sync_object, ++ .llseek = generic_file_llseek, ++}; ++ ++static void yaffs_fill_inode_from_obj(struct inode *inode, ++ struct yaffs_obj *obj) ++{ ++ if (inode && obj) { ++ ++ /* Check mode against the variant type and attempt to repair if broken. */ ++ u32 mode = obj->yst_mode; ++ switch (obj->variant_type) { ++ case YAFFS_OBJECT_TYPE_FILE: ++ if (!S_ISREG(mode)) { ++ obj->yst_mode &= ~S_IFMT; ++ obj->yst_mode |= S_IFREG; ++ } ++ ++ break; ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ if (!S_ISLNK(mode)) { ++ obj->yst_mode &= ~S_IFMT; ++ obj->yst_mode |= S_IFLNK; ++ } ++ ++ break; ++ case YAFFS_OBJECT_TYPE_DIRECTORY: ++ if (!S_ISDIR(mode)) { ++ obj->yst_mode &= ~S_IFMT; ++ obj->yst_mode |= S_IFDIR; ++ } ++ ++ break; ++ case YAFFS_OBJECT_TYPE_UNKNOWN: ++ case YAFFS_OBJECT_TYPE_HARDLINK: ++ case YAFFS_OBJECT_TYPE_SPECIAL: ++ default: ++ /* TODO? */ ++ break; ++ } ++ ++ inode->i_flags |= S_NOATIME; ++ ++ inode->i_ino = obj->obj_id; ++ inode->i_mode = obj->yst_mode; ++ i_uid_write(inode, obj->yst_uid); ++ i_gid_write(inode, obj->yst_gid); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) ++ inode->i_blksize = inode->i_sb->s_blocksize; ++#endif ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++ ++ inode->i_rdev = old_decode_dev(obj->yst_rdev); ++ inode->i_atime.tv_sec = (time_t) (obj->yst_atime); ++ inode->i_atime.tv_nsec = 0; ++ inode->i_mtime.tv_sec = (time_t) obj->yst_mtime; ++ inode->i_mtime.tv_nsec = 0; ++ inode->i_ctime.tv_sec = (time_t) obj->yst_ctime; ++ inode->i_ctime.tv_nsec = 0; ++#else ++ inode->i_rdev = obj->yst_rdev; ++ inode->i_atime = obj->yst_atime; ++ inode->i_mtime = obj->yst_mtime; ++ inode->i_ctime = obj->yst_ctime; ++#endif ++ inode->i_size = yaffs_get_obj_length(obj); ++ inode->i_blocks = (inode->i_size + 511) >> 9; ++ ++ set_nlink(inode, yaffs_get_obj_link_count(obj)); ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_fill_inode mode %x uid %d gid %d size %lld count %d", ++ inode->i_mode, i_uid_read(inode), i_gid_read(inode), ++ inode->i_size, atomic_read(&inode->i_count)); ++ ++ switch (obj->yst_mode & S_IFMT) { ++ default: /* fifo, device or socket */ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++ init_special_inode(inode, obj->yst_mode, ++ old_decode_dev(obj->yst_rdev)); ++#else ++ init_special_inode(inode, obj->yst_mode, ++ (dev_t) (obj->yst_rdev)); ++#endif ++ break; ++ case S_IFREG: /* file */ ++ inode->i_op = &yaffs_file_inode_operations; ++ inode->i_fop = &yaffs_file_operations; ++ inode->i_mapping->a_ops = ++ &yaffs_file_address_operations; ++ break; ++ case S_IFDIR: /* directory */ ++ inode->i_op = &yaffs_dir_inode_operations; ++ inode->i_fop = &yaffs_dir_operations; ++ break; ++ case S_IFLNK: /* symlink */ ++ inode->i_op = &yaffs_symlink_inode_operations; ++ break; ++ } ++ ++ yaffs_inode_to_obj_lv(inode) = obj; ++ ++ obj->my_inode = inode; ++ ++ } else { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_fill_inode invalid parameters"); ++ } ++ ++} ++ ++ ++ ++/* ++ * yaffs background thread functions . ++ * yaffs_bg_thread_fn() the thread function ++ * yaffs_bg_start() launches the background thread. ++ * yaffs_bg_stop() cleans up the background thread. ++ * ++ * NB: ++ * The thread should only run after the yaffs is initialised ++ * The thread should be stopped before yaffs is unmounted. ++ * The thread should not do any writing while the fs is in read only. ++ */ ++ ++static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev) ++{ ++ unsigned erased_chunks = ++ dev->n_erased_blocks * dev->param.chunks_per_block; ++ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev); ++ unsigned scattered = 0; /* Free chunks not in an erased block */ ++ ++ if (erased_chunks < dev->n_free_chunks) ++ scattered = (dev->n_free_chunks - erased_chunks); ++ ++ if (!context->bg_running) ++ return 0; ++ else if (scattered < (dev->param.chunks_per_block * 2)) ++ return 0; ++ else if (erased_chunks > dev->n_free_chunks / 2) ++ return 0; ++ else if (erased_chunks > dev->n_free_chunks / 4) ++ return 1; ++ else ++ return 2; ++} ++ ++#ifdef YAFFS_COMPILE_BACKGROUND ++ ++void yaffs_background_waker(unsigned long data) ++{ ++ wake_up_process((struct task_struct *)data); ++} ++ ++static int yaffs_bg_thread_fn(void *data) ++{ ++ struct yaffs_dev *dev = (struct yaffs_dev *)data; ++ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev); ++ unsigned long now = jiffies; ++ unsigned long next_dir_update = now; ++ unsigned long next_gc = now; ++ unsigned long expires; ++ unsigned int urgency; ++ ++ int gc_result; ++ struct timer_list timer; ++ ++ yaffs_trace(YAFFS_TRACE_BACKGROUND, ++ "yaffs_background starting for dev %p", (void *)dev); ++ ++#ifdef YAFFS_COMPILE_FREEZER ++ set_freezable(); ++#endif ++ while (context->bg_running) { ++ yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background"); ++ ++ if (kthread_should_stop()) ++ break; ++ ++#ifdef YAFFS_COMPILE_FREEZER ++ if (try_to_freeze()) ++ continue; ++#endif ++ yaffs_gross_lock(dev); ++ ++ now = jiffies; ++ ++ if (time_after(now, next_dir_update) && yaffs_bg_enable) { ++ yaffs_update_dirty_dirs(dev); ++ next_dir_update = now + HZ; ++ } ++ ++ if (time_after(now, next_gc) && yaffs_bg_enable) { ++ if (!dev->is_checkpointed) { ++ urgency = yaffs_bg_gc_urgency(dev); ++ gc_result = yaffs_bg_gc(dev, urgency); ++ if (urgency > 1) ++ next_gc = now + HZ / 20 + 1; ++ else if (urgency > 0) ++ next_gc = now + HZ / 10 + 1; ++ else ++ next_gc = now + HZ * 2; ++ } else { ++ /* ++ * gc not running so set to next_dir_update ++ * to cut down on wake ups ++ */ ++ next_gc = next_dir_update; ++ } ++ } ++ yaffs_gross_unlock(dev); ++#if 1 ++ expires = next_dir_update; ++ if (time_before(next_gc, expires)) ++ expires = next_gc; ++ if (time_before(expires, now)) ++ expires = now + HZ; ++ ++ Y_INIT_TIMER(&timer); ++ timer.expires = expires + 1; ++ timer.data = (unsigned long)current; ++ timer.function = yaffs_background_waker; ++ ++ set_current_state(TASK_INTERRUPTIBLE); ++ add_timer(&timer); ++ schedule(); ++ del_timer_sync(&timer); ++#else ++ msleep(10); ++#endif ++ } ++ ++ return 0; ++} ++ ++static int yaffs_bg_start(struct yaffs_dev *dev) ++{ ++ int retval = 0; ++ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev); ++ ++ if (dev->read_only) ++ return -1; ++ ++ context->bg_running = 1; ++ ++ context->bg_thread = kthread_run(yaffs_bg_thread_fn, ++ (void *)dev, "yaffs-bg-%d", ++ context->mount_id); ++ ++ if (IS_ERR(context->bg_thread)) { ++ retval = PTR_ERR(context->bg_thread); ++ context->bg_thread = NULL; ++ context->bg_running = 0; ++ } ++ return retval; ++} ++ ++static void yaffs_bg_stop(struct yaffs_dev *dev) ++{ ++ struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev); ++ ++ ctxt->bg_running = 0; ++ ++ if (ctxt->bg_thread) { ++ kthread_stop(ctxt->bg_thread); ++ ctxt->bg_thread = NULL; ++ } ++} ++#else ++static int yaffs_bg_thread_fn(void *data) ++{ ++ return 0; ++} ++ ++static int yaffs_bg_start(struct yaffs_dev *dev) ++{ ++ return 0; ++} ++ ++static void yaffs_bg_stop(struct yaffs_dev *dev) ++{ ++} ++#endif ++ ++ ++static void yaffs_flush_inodes(struct super_block *sb) ++{ ++ struct inode *iptr; ++ struct yaffs_obj *obj; ++ ++ list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) { ++ obj = yaffs_inode_to_obj(iptr); ++ if (obj) { ++ yaffs_trace(YAFFS_TRACE_OS, ++ "flushing obj %d", ++ obj->obj_id); ++ yaffs_flush_file(obj, 1, 0); ++ } ++ } ++} ++ ++static void yaffs_flush_super(struct super_block *sb, int do_checkpoint) ++{ ++ struct yaffs_dev *dev = yaffs_super_to_dev(sb); ++ if (!dev) ++ return; ++ ++ yaffs_flush_inodes(sb); ++ yaffs_update_dirty_dirs(dev); ++ yaffs_flush_whole_cache(dev); ++ if (do_checkpoint) ++ yaffs_checkpoint_save(dev); ++} ++ ++static LIST_HEAD(yaffs_context_list); ++struct mutex yaffs_context_lock; ++ ++static void yaffs_put_super(struct super_block *sb) ++{ ++ struct yaffs_dev *dev = yaffs_super_to_dev(sb); ++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev); ++ ++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS, ++ "yaffs_put_super"); ++ ++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND, ++ "Shutting down yaffs background thread"); ++ yaffs_bg_stop(dev); ++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND, ++ "yaffs background thread shut down"); ++ ++ yaffs_gross_lock(dev); ++ ++ yaffs_flush_super(sb, 1); ++ ++ yaffs_deinitialise(dev); ++ ++ yaffs_gross_unlock(dev); ++ ++ mutex_lock(&yaffs_context_lock); ++ list_del_init(&(yaffs_dev_to_lc(dev)->context_list)); ++ mutex_unlock(&yaffs_context_lock); ++ ++ if (yaffs_dev_to_lc(dev)->spare_buffer) { ++ kfree(yaffs_dev_to_lc(dev)->spare_buffer); ++ yaffs_dev_to_lc(dev)->spare_buffer = NULL; ++ } ++ ++ kfree(dev); ++ ++ yaffs_put_mtd_device(mtd); ++ ++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS, ++ "yaffs_put_super done"); ++} ++ ++ ++static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev) ++{ ++ return yaffs_gc_control; ++} ++ ++ ++#ifdef YAFFS_COMPILE_EXPORTFS ++ ++static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino, ++ uint32_t generation) ++{ ++ return Y_IGET(sb, ino); ++} ++ ++static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb, ++ struct fid *fid, int fh_len, ++ int fh_type) ++{ ++ return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ++ yaffs2_nfs_get_inode); ++} ++ ++static struct dentry *yaffs2_fh_to_parent(struct super_block *sb, ++ struct fid *fid, int fh_len, ++ int fh_type) ++{ ++ return generic_fh_to_parent(sb, fid, fh_len, fh_type, ++ yaffs2_nfs_get_inode); ++} ++ ++struct dentry *yaffs2_get_parent(struct dentry *dentry) ++{ ++ ++ struct super_block *sb = dentry->d_inode->i_sb; ++ struct dentry *parent = ERR_PTR(-ENOENT); ++ struct inode *inode; ++ unsigned long parent_ino; ++ struct yaffs_obj *d_obj; ++ struct yaffs_obj *parent_obj; ++ ++ d_obj = yaffs_inode_to_obj(dentry->d_inode); ++ ++ if (d_obj) { ++ parent_obj = d_obj->parent; ++ if (parent_obj) { ++ parent_ino = yaffs_get_obj_inode(parent_obj); ++ inode = Y_IGET(sb, parent_ino); ++ ++ if (IS_ERR(inode)) { ++ parent = ERR_CAST(inode); ++ } else { ++ parent = d_obtain_alias(inode); ++ if (!IS_ERR(parent)) { ++ parent = ERR_PTR(-ENOMEM); ++ iput(inode); ++ } ++ } ++ } ++ } ++ ++ return parent; ++} ++ ++/* Just declare a zero structure as a NULL value implies ++ * using the default functions of exportfs. ++ */ ++ ++static struct export_operations yaffs_export_ops = { ++ .fh_to_dentry = yaffs2_fh_to_dentry, ++ .fh_to_parent = yaffs2_fh_to_parent, ++ .get_parent = yaffs2_get_parent, ++}; ++ ++#endif ++ ++static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj) ++{ ++ /* Clear the association between the inode and ++ * the struct yaffs_obj. ++ */ ++ obj->my_inode = NULL; ++ yaffs_inode_to_obj_lv(inode) = NULL; ++ ++ /* If the object freeing was deferred, then the real ++ * free happens now. ++ * This should fix the inode inconsistency problem. ++ */ ++ yaffs_handle_defered_free(obj); ++} ++ ++#ifdef YAFFS_HAS_EVICT_INODE ++/* yaffs_evict_inode combines into one operation what was previously done in ++ * yaffs_clear_inode() and yaffs_delete_inode() ++ * ++ */ ++static void yaffs_evict_inode(struct inode *inode) ++{ ++ struct yaffs_obj *obj; ++ struct yaffs_dev *dev; ++ int deleteme = 0; ++ ++ obj = yaffs_inode_to_obj(inode); ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_evict_inode: ino %d, count %d %s", ++ (int)inode->i_ino, atomic_read(&inode->i_count), ++ obj ? "object exists" : "null object"); ++ ++ if (!inode->i_nlink && !is_bad_inode(inode)) ++ deleteme = 1; ++ truncate_inode_pages(&inode->i_data, 0); ++ Y_CLEAR_INODE(inode); ++ ++ if (deleteme && obj) { ++ dev = obj->my_dev; ++ yaffs_gross_lock(dev); ++ yaffs_del_obj(obj); ++ yaffs_gross_unlock(dev); ++ } ++ if (obj) { ++ dev = obj->my_dev; ++ yaffs_gross_lock(dev); ++ yaffs_unstitch_obj(inode, obj); ++ yaffs_gross_unlock(dev); ++ } ++} ++#else ++ ++/* clear is called to tell the fs to release any per-inode data it holds. ++ * The object might still exist on disk and is just being thrown out of the cache ++ * or else the object has actually been deleted and we're being called via ++ * the chain ++ * yaffs_delete_inode() -> clear_inode()->yaffs_clear_inode() ++ */ ++ ++static void yaffs_clear_inode(struct inode *inode) ++{ ++ struct yaffs_obj *obj; ++ struct yaffs_dev *dev; ++ ++ obj = yaffs_inode_to_obj(inode); ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_clear_inode: ino %d, count %d %s", ++ (int)inode->i_ino, atomic_read(&inode->i_count), ++ obj ? "object exists" : "null object"); ++ ++ if (obj) { ++ dev = obj->my_dev; ++ yaffs_gross_lock(dev); ++ yaffs_unstitch_obj(inode, obj); ++ yaffs_gross_unlock(dev); ++ } ++ ++} ++ ++/* delete is called when the link count is zero and the inode ++ * is put (ie. nobody wants to know about it anymore, time to ++ * delete the file). ++ * NB Must call clear_inode() ++ */ ++static void yaffs_delete_inode(struct inode *inode) ++{ ++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode); ++ struct yaffs_dev *dev; ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_delete_inode: ino %d, count %d %s", ++ (int)inode->i_ino, atomic_read(&inode->i_count), ++ obj ? "object exists" : "null object"); ++ ++ if (obj) { ++ dev = obj->my_dev; ++ yaffs_gross_lock(dev); ++ yaffs_del_obj(obj); ++ yaffs_gross_unlock(dev); ++ } ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13)) ++ truncate_inode_pages(&inode->i_data, 0); ++#endif ++ clear_inode(inode); ++} ++#endif ++ ++ ++ ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) ++static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf) ++{ ++ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev; ++ struct super_block *sb = dentry->d_sb; ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf) ++{ ++ struct yaffs_dev *dev = yaffs_super_to_dev(sb); ++#else ++static int yaffs_statfs(struct super_block *sb, struct statfs *buf) ++{ ++ struct yaffs_dev *dev = yaffs_super_to_dev(sb); ++#endif ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs"); ++ ++ yaffs_gross_lock(dev); ++ ++ buf->f_type = YAFFS_MAGIC; ++ buf->f_bsize = sb->s_blocksize; ++ buf->f_namelen = 255; ++ ++ if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) { ++ /* Do this if chunk size is not a power of 2 */ ++ ++ uint64_t bytes_in_dev; ++ uint64_t bytes_free; ++ ++ bytes_in_dev = ++ ((uint64_t) ++ ((dev->param.end_block - dev->param.start_block + ++ 1))) * ((uint64_t) (dev->param.chunks_per_block * ++ dev->data_bytes_per_chunk)); ++ ++ do_div(bytes_in_dev, sb->s_blocksize); /* bytes_in_dev becomes the number of blocks */ ++ buf->f_blocks = bytes_in_dev; ++ ++ bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) * ++ ((uint64_t) (dev->data_bytes_per_chunk)); ++ ++ do_div(bytes_free, sb->s_blocksize); ++ ++ buf->f_bfree = bytes_free; ++ ++ } else if (sb->s_blocksize > dev->data_bytes_per_chunk) { ++ ++ buf->f_blocks = ++ (dev->param.end_block - dev->param.start_block + 1) * ++ dev->param.chunks_per_block / ++ (sb->s_blocksize / dev->data_bytes_per_chunk); ++ buf->f_bfree = ++ yaffs_get_n_free_chunks(dev) / ++ (sb->s_blocksize / dev->data_bytes_per_chunk); ++ } else { ++ buf->f_blocks = ++ (dev->param.end_block - dev->param.start_block + 1) * ++ dev->param.chunks_per_block * ++ (dev->data_bytes_per_chunk / sb->s_blocksize); ++ ++ buf->f_bfree = ++ yaffs_get_n_free_chunks(dev) * ++ (dev->data_bytes_per_chunk / sb->s_blocksize); ++ } ++ ++ buf->f_files = 0; ++ buf->f_ffree = 0; ++ buf->f_bavail = buf->f_bfree; ++ ++ yaffs_gross_unlock(dev); ++ return 0; ++} ++ ++ ++ ++static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint) ++{ ++ ++ struct yaffs_dev *dev = yaffs_super_to_dev(sb); ++ unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4); ++ unsigned gc_urgent = yaffs_bg_gc_urgency(dev); ++ int do_checkpoint; ++ int dirty = yaffs_check_super_dirty(dev); ++ ++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND, ++ "yaffs_do_sync_fs: gc-urgency %d %s %s%s", ++ gc_urgent, ++ dirty ? "dirty" : "clean", ++ request_checkpoint ? "checkpoint requested" : "no checkpoint", ++ oneshot_checkpoint ? " one-shot" : ""); ++ ++ yaffs_gross_lock(dev); ++ do_checkpoint = ((request_checkpoint && !gc_urgent) || ++ oneshot_checkpoint) && !dev->is_checkpointed; ++ ++ if (dirty || do_checkpoint) { ++ yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint); ++ yaffs_clear_super_dirty(dev); ++ if (oneshot_checkpoint) ++ yaffs_auto_checkpoint &= ~4; ++ } ++ yaffs_gross_unlock(dev); ++ ++ return 0; ++} ++ ++ ++#ifdef YAFFS_HAS_WRITE_SUPER ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) ++static void yaffs_write_super(struct super_block *sb) ++#else ++static int yaffs_write_super(struct super_block *sb) ++#endif ++{ ++ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2); ++ ++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND, ++ "yaffs_write_super %s", ++ request_checkpoint ? " checkpt" : ""); ++ ++ yaffs_do_sync_fs(sb, request_checkpoint); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) ++ return 0; ++#endif ++} ++#endif ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) ++static int yaffs_sync_fs(struct super_block *sb, int wait) ++#else ++static int yaffs_sync_fs(struct super_block *sb) ++#endif ++{ ++ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1); ++ ++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC, ++ "yaffs_sync_fs%s", request_checkpoint ? " checkpt" : ""); ++ ++ yaffs_do_sync_fs(sb, request_checkpoint); ++ ++ return 0; ++} ++ ++ ++ ++static const struct super_operations yaffs_super_ops = { ++ .statfs = yaffs_statfs, ++ ++#ifndef YAFFS_USE_OWN_IGET ++ .read_inode = yaffs_read_inode, ++#endif ++#ifdef YAFFS_HAS_PUT_INODE ++ .put_inode = yaffs_put_inode, ++#endif ++ .put_super = yaffs_put_super, ++#ifdef YAFFS_HAS_EVICT_INODE ++ .evict_inode = yaffs_evict_inode, ++#else ++ .delete_inode = yaffs_delete_inode, ++ .clear_inode = yaffs_clear_inode, ++#endif ++ .sync_fs = yaffs_sync_fs, ++#ifdef YAFFS_HAS_WRITE_SUPER ++ .write_super = yaffs_write_super, ++#endif ++}; ++ ++struct yaffs_options { ++ int inband_tags; ++ int skip_checkpoint_read; ++ int skip_checkpoint_write; ++ int no_cache; ++ int tags_ecc_on; ++ int tags_ecc_overridden; ++ int lazy_loading_enabled; ++ int lazy_loading_overridden; ++ int empty_lost_and_found; ++ int empty_lost_and_found_overridden; ++ int disable_summary; ++}; ++ ++#define MAX_OPT_LEN 30 ++static int yaffs_parse_options(struct yaffs_options *options, ++ const char *options_str) ++{ ++ char cur_opt[MAX_OPT_LEN + 1]; ++ int p; ++ int error = 0; ++ ++ /* Parse through the options which is a comma seperated list */ ++ ++ while (options_str && *options_str && !error) { ++ memset(cur_opt, 0, MAX_OPT_LEN + 1); ++ p = 0; ++ ++ while (*options_str == ',') ++ options_str++; ++ ++ while (*options_str && *options_str != ',') { ++ if (p < MAX_OPT_LEN) { ++ cur_opt[p] = *options_str; ++ p++; ++ } ++ options_str++; ++ } ++ ++ if (!strcmp(cur_opt, "inband-tags")) { ++ options->inband_tags = 1; ++ } else if (!strcmp(cur_opt, "tags-ecc-off")) { ++ options->tags_ecc_on = 0; ++ options->tags_ecc_overridden = 1; ++ } else if (!strcmp(cur_opt, "tags-ecc-on")) { ++ options->tags_ecc_on = 1; ++ options->tags_ecc_overridden = 1; ++ } else if (!strcmp(cur_opt, "lazy-loading-off")) { ++ options->lazy_loading_enabled = 0; ++ options->lazy_loading_overridden = 1; ++ } else if (!strcmp(cur_opt, "lazy-loading-on")) { ++ options->lazy_loading_enabled = 1; ++ options->lazy_loading_overridden = 1; ++ } else if (!strcmp(cur_opt, "disable-summary")) { ++ options->disable_summary = 1; ++ } else if (!strcmp(cur_opt, "empty-lost-and-found-off")) { ++ options->empty_lost_and_found = 0; ++ options->empty_lost_and_found_overridden = 1; ++ } else if (!strcmp(cur_opt, "empty-lost-and-found-on")) { ++ options->empty_lost_and_found = 1; ++ options->empty_lost_and_found_overridden = 1; ++ } else if (!strcmp(cur_opt, "no-cache")) { ++ options->no_cache = 1; ++ } else if (!strcmp(cur_opt, "no-checkpoint-read")) { ++ options->skip_checkpoint_read = 1; ++ } else if (!strcmp(cur_opt, "no-checkpoint-write")) { ++ options->skip_checkpoint_write = 1; ++ } else if (!strcmp(cur_opt, "no-checkpoint")) { ++ options->skip_checkpoint_read = 1; ++ options->skip_checkpoint_write = 1; ++ } else { ++ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n", ++ cur_opt); ++ error = 1; ++ } ++ } ++ ++ return error; ++} ++ ++ ++static struct dentry *yaffs_make_root(struct inode *inode) ++{ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) ++ struct dentry *root = d_alloc_root(inode); ++ ++ if (!root) ++ iput(inode); ++ ++ return root; ++#else ++ return d_make_root(inode); ++#endif ++} ++ ++ ++ ++ ++static struct super_block *yaffs_internal_read_super(int yaffs_version, ++ struct super_block *sb, ++ void *data, int silent) ++{ ++ int n_blocks; ++ struct inode *inode = NULL; ++ struct dentry *root; ++ struct yaffs_dev *dev = 0; ++ char devname_buf[BDEVNAME_SIZE + 1]; ++ struct mtd_info *mtd; ++ int err; ++ char *data_str = (char *)data; ++ struct yaffs_linux_context *context = NULL; ++ struct yaffs_param *param; ++ ++ int read_only = 0; ++ int inband_tags = 0; ++ ++ struct yaffs_options options; ++ ++ unsigned mount_id; ++ int found; ++ struct yaffs_linux_context *context_iterator; ++ struct list_head *l; ++ ++ if (!sb) { ++ printk(KERN_INFO "yaffs: sb is NULL\n"); ++ return NULL; ++ } ++ ++ sb->s_magic = YAFFS_MAGIC; ++ sb->s_op = &yaffs_super_ops; ++ sb->s_flags |= MS_NOATIME; ++ ++ read_only = ((sb->s_flags & MS_RDONLY) != 0); ++ ++#ifdef YAFFS_COMPILE_EXPORTFS ++ sb->s_export_op = &yaffs_export_ops; ++#endif ++ ++ if (!sb->s_dev) ++ printk(KERN_INFO "yaffs: sb->s_dev is NULL\n"); ++ else if (!yaffs_devname(sb, devname_buf)) ++ printk(KERN_INFO "yaffs: devname is NULL\n"); ++ else ++ printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n", ++ sb->s_dev, ++ yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw"); ++ ++ if (!data_str) ++ data_str = ""; ++ ++ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str); ++ ++ memset(&options, 0, sizeof(options)); ++ ++ if (yaffs_parse_options(&options, data_str)) { ++ /* Option parsing failed */ ++ return NULL; ++ } ++ ++ sb->s_blocksize = PAGE_CACHE_SIZE; ++ sb->s_blocksize_bits = PAGE_CACHE_SHIFT; ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_read_super: Using yaffs%d", yaffs_version); ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_read_super: block size %d", (int)(sb->s_blocksize)); ++ ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "yaffs: Attempting MTD mount of %u.%u,\"%s\"", ++ MAJOR(sb->s_dev), MINOR(sb->s_dev), ++ yaffs_devname(sb, devname_buf)); ++ ++ /* Get the device */ ++ mtd = get_mtd_device(NULL, MINOR(sb->s_dev)); ++ if (IS_ERR(mtd)) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "yaffs: MTD device %u either not valid or unavailable", ++ MINOR(sb->s_dev)); ++ return NULL; ++ } ++ ++ if (yaffs_auto_select && yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2"); ++ yaffs_version = 2; ++ } ++ ++ /* Added NCB 26/5/2006 for completeness */ ++ if (yaffs_version == 2 && !options.inband_tags ++ && WRITE_SIZE(mtd) == 512) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1"); ++ yaffs_version = 1; ++ } ++ ++ if (mtd->oobavail < sizeof(struct yaffs_packed_tags2) || ++ options.inband_tags) ++ inband_tags = 1; ++ ++ if(yaffs_verify_mtd(mtd, yaffs_version, inband_tags) < 0) ++ return NULL; ++ ++ /* OK, so if we got here, we have an MTD that's NAND and looks ++ * like it has the right capabilities ++ * Set the struct yaffs_dev up for mtd ++ */ ++ ++ if (!read_only && !(mtd->flags & MTD_WRITEABLE)) { ++ read_only = 1; ++ printk(KERN_INFO ++ "yaffs: mtd is read only, setting superblock read only\n" ++ ); ++ sb->s_flags |= MS_RDONLY; ++ } ++ ++ dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL); ++ context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL); ++ ++ if (!dev || !context) { ++ kfree(dev); ++ kfree(context); ++ dev = NULL; ++ context = NULL; ++ ++ /* Deep shit could not allocate device structure */ ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "yaffs_read_super: Failed trying to allocate struct yaffs_dev." ++ ); ++ return NULL; ++ } ++ memset(dev, 0, sizeof(struct yaffs_dev)); ++ param = &(dev->param); ++ ++ memset(context, 0, sizeof(struct yaffs_linux_context)); ++ dev->os_context = context; ++ INIT_LIST_HEAD(&(context->context_list)); ++ context->dev = dev; ++ context->super = sb; ++ ++ dev->read_only = read_only; ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++ sb->s_fs_info = dev; ++#else ++ sb->u.generic_sbp = dev; ++#endif ++ ++ ++ dev->driver_context = mtd; ++ param->name = mtd->name; ++ ++ /* Set up the memory size parameters.... */ ++ ++ ++ param->n_reserved_blocks = 5; ++ param->n_caches = (options.no_cache) ? 0 : 10; ++ param->inband_tags = inband_tags; ++ ++ param->enable_xattr = 1; ++ if (options.lazy_loading_overridden) ++ param->disable_lazy_load = !options.lazy_loading_enabled; ++ ++ param->defered_dir_update = 1; ++ ++ if (options.tags_ecc_overridden) ++ param->no_tags_ecc = !options.tags_ecc_on; ++ ++ param->empty_lost_n_found = 1; ++ param->refresh_period = 500; ++ param->disable_summary = options.disable_summary; ++ ++ ++#ifdef CONFIG_YAFFS_DISABLE_BAD_BLOCK_MARKING ++ param->disable_bad_block_marking = 1; ++#endif ++ if (options.empty_lost_and_found_overridden) ++ param->empty_lost_n_found = options.empty_lost_and_found; ++ ++ /* ... and the functions. */ ++ if (yaffs_version == 2) { ++ param->is_yaffs2 = 1; ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) ++ param->total_bytes_per_chunk = mtd->writesize; ++ param->chunks_per_block = mtd->erasesize / mtd->writesize; ++#else ++ param->total_bytes_per_chunk = mtd->oobblock; ++ param->chunks_per_block = mtd->erasesize / mtd->oobblock; ++#endif ++ n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize); ++ ++ param->start_block = 0; ++ param->end_block = n_blocks - 1; ++ } else { ++ param->is_yaffs2 = 0; ++ n_blocks = YCALCBLOCKS(mtd->size, ++ YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK); ++ ++ param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK; ++ param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK; ++ } ++ ++ param->start_block = 0; ++ param->end_block = n_blocks - 1; ++ ++ yaffs_mtd_drv_install(dev); ++ ++ param->sb_dirty_fn = yaffs_set_super_dirty; ++ param->gc_control_fn = yaffs_gc_control_callback; ++ ++ yaffs_dev_to_lc(dev)->super = sb; ++ ++ param->use_nand_ecc = 1; ++ ++ param->skip_checkpt_rd = options.skip_checkpoint_read; ++ param->skip_checkpt_wr = options.skip_checkpoint_write; ++ ++ mutex_lock(&yaffs_context_lock); ++ /* Get a mount id */ ++ found = 0; ++ for (mount_id = 0; !found; mount_id++) { ++ found = 1; ++ list_for_each(l, &yaffs_context_list) { ++ context_iterator = ++ list_entry(l, struct yaffs_linux_context, ++ context_list); ++ if (context_iterator->mount_id == mount_id) ++ found = 0; ++ } ++ } ++ context->mount_id = mount_id; ++ ++ list_add_tail(&(yaffs_dev_to_lc(dev)->context_list), ++ &yaffs_context_list); ++ mutex_unlock(&yaffs_context_lock); ++ ++ /* Directory search handling... */ ++ INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts)); ++ param->remove_obj_fn = yaffs_remove_obj_callback; ++ ++ mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock)); ++ ++ yaffs_gross_lock(dev); ++ ++ err = yaffs_guts_initialise(dev); ++ ++ yaffs_trace(YAFFS_TRACE_OS, ++ "yaffs_read_super: guts initialised %s", ++ (err == YAFFS_OK) ? "OK" : "FAILED"); ++ ++ if (err == YAFFS_OK) ++ yaffs_bg_start(dev); ++ ++ if (!context->bg_thread) ++ param->defered_dir_update = 0; ++ ++ sb->s_maxbytes = yaffs_max_file_size(dev); ++ ++ /* Release lock before yaffs_get_inode() */ ++ yaffs_gross_unlock(dev); ++ ++ /* Create root inode */ ++ if (err == YAFFS_OK) ++ inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev)); ++ ++ if (!inode) ++ return NULL; ++ ++ inode->i_op = &yaffs_dir_inode_operations; ++ inode->i_fop = &yaffs_dir_operations; ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: got root inode"); ++ ++ root = yaffs_make_root(inode); ++ ++ if (!root) ++ return NULL; ++ ++ sb->s_root = root; ++ if(!dev->is_checkpointed) ++ yaffs_set_super_dirty(dev); ++ ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "yaffs_read_super: is_checkpointed %d", ++ dev->is_checkpointed); ++ ++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: done"); ++ return sb; ++} ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data, ++ int silent) ++{ ++ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL; ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) ++static struct dentry *yaffs_mount(struct file_system_type *fs_type, int flags, ++ const char *dev_name, void *data) ++{ ++ return mount_bdev(fs_type, flags, dev_name, data, yaffs_internal_read_super_mtd); ++} ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) ++static int yaffs_read_super(struct file_system_type *fs, ++ int flags, const char *dev_name, ++ void *data, struct vfsmount *mnt) ++{ ++ ++ return get_sb_bdev(fs, flags, dev_name, data, ++ yaffs_internal_read_super_mtd, mnt); ++} ++#else ++static struct super_block *yaffs_read_super(struct file_system_type *fs, ++ int flags, const char *dev_name, ++ void *data) ++{ ++ ++ return get_sb_bdev(fs, flags, dev_name, data, ++ yaffs_internal_read_super_mtd); ++} ++#endif ++ ++static struct file_system_type yaffs_fs_type = { ++ .owner = THIS_MODULE, ++ .name = "yaffs", ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) ++ .mount = yaffs_mount, ++#else ++ .get_sb = yaffs_read_super, ++#endif ++ .kill_sb = kill_block_super, ++ .fs_flags = FS_REQUIRES_DEV, ++}; ++#else ++static struct super_block *yaffs_read_super(struct super_block *sb, void *data, ++ int silent) ++{ ++ return yaffs_internal_read_super(1, sb, data, silent); ++} ++ ++static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super, ++ FS_REQUIRES_DEV); ++#endif ++ ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data, ++ int silent) ++{ ++ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL; ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) ++static struct dentry *yaffs2_mount(struct file_system_type *fs_type, int flags, ++ const char *dev_name, void *data) ++{ ++ return mount_bdev(fs_type, flags, dev_name, data, yaffs2_internal_read_super_mtd); ++} ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) ++static int yaffs2_read_super(struct file_system_type *fs, ++ int flags, const char *dev_name, void *data, ++ struct vfsmount *mnt) ++{ ++ return get_sb_bdev(fs, flags, dev_name, data, ++ yaffs2_internal_read_super_mtd, mnt); ++} ++#else ++static struct super_block *yaffs2_read_super(struct file_system_type *fs, ++ int flags, const char *dev_name, ++ void *data) ++{ ++ ++ return get_sb_bdev(fs, flags, dev_name, data, ++ yaffs2_internal_read_super_mtd); ++} ++#endif ++ ++static struct file_system_type yaffs2_fs_type = { ++ .owner = THIS_MODULE, ++ .name = "yaffs2", ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) ++ .mount = yaffs2_mount, ++#else ++ .get_sb = yaffs2_read_super, ++#endif ++ .kill_sb = kill_block_super, ++ .fs_flags = FS_REQUIRES_DEV, ++}; ++#else ++static struct super_block *yaffs2_read_super(struct super_block *sb, ++ void *data, int silent) ++{ ++ return yaffs_internal_read_super(2, sb, data, silent); ++} ++ ++static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super, ++ FS_REQUIRES_DEV); ++#endif ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) ++static struct proc_dir_entry *my_proc_entry; ++ ++static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev) ++{ ++ struct yaffs_param *param = &dev->param; ++ int bs[10]; ++ ++ yaffs_count_blocks_by_state(dev,bs); ++ ++ buf += sprintf(buf, "start_block.......... %d\n", param->start_block); ++ buf += sprintf(buf, "end_block............ %d\n", param->end_block); ++ buf += sprintf(buf, "total_bytes_per_chunk %d\n", ++ param->total_bytes_per_chunk); ++ buf += sprintf(buf, "use_nand_ecc......... %d\n", param->use_nand_ecc); ++ buf += sprintf(buf, "no_tags_ecc.......... %d\n", param->no_tags_ecc); ++ buf += sprintf(buf, "is_yaffs2............ %d\n", param->is_yaffs2); ++ buf += sprintf(buf, "inband_tags.......... %d\n", param->inband_tags); ++ buf += sprintf(buf, "empty_lost_n_found... %d\n", ++ param->empty_lost_n_found); ++ buf += sprintf(buf, "disable_lazy_load.... %d\n", ++ param->disable_lazy_load); ++ buf += sprintf(buf, "disable_bad_block_mrk %d\n", ++ param->disable_bad_block_marking); ++ buf += sprintf(buf, "refresh_period....... %d\n", ++ param->refresh_period); ++ buf += sprintf(buf, "n_caches............. %d\n", param->n_caches); ++ buf += sprintf(buf, "n_reserved_blocks.... %d\n", ++ param->n_reserved_blocks); ++ buf += sprintf(buf, "always_check_erased.. %d\n", ++ param->always_check_erased); ++ buf += sprintf(buf, "\n"); ++ buf += sprintf(buf, "block count by state\n"); ++ buf += sprintf(buf, "0:%d 1:%d 2:%d 3:%d 4:%d\n", ++ bs[0], bs[1], bs[2], bs[3], bs[4]); ++ buf += sprintf(buf, "5:%d 6:%d 7:%d 8:%d 9:%d\n", ++ bs[5], bs[6], bs[7], bs[8], bs[9]); ++ ++ return buf; ++} ++ ++static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev) ++{ ++ buf += sprintf(buf, "max file size....... %lld\n", ++ (long long) yaffs_max_file_size(dev)); ++ buf += sprintf(buf, "data_bytes_per_chunk. %d\n", ++ dev->data_bytes_per_chunk); ++ buf += sprintf(buf, "chunk_grp_bits....... %d\n", dev->chunk_grp_bits); ++ buf += sprintf(buf, "chunk_grp_size....... %d\n", dev->chunk_grp_size); ++ buf += sprintf(buf, "n_erased_blocks...... %d\n", dev->n_erased_blocks); ++ buf += sprintf(buf, "blocks_in_checkpt.... %d\n", ++ dev->blocks_in_checkpt); ++ buf += sprintf(buf, "\n"); ++ buf += sprintf(buf, "n_tnodes............. %d\n", dev->n_tnodes); ++ buf += sprintf(buf, "n_obj................ %d\n", dev->n_obj); ++ buf += sprintf(buf, "n_free_chunks........ %d\n", dev->n_free_chunks); ++ buf += sprintf(buf, "\n"); ++ buf += sprintf(buf, "n_page_writes........ %u\n", dev->n_page_writes); ++ buf += sprintf(buf, "n_page_reads......... %u\n", dev->n_page_reads); ++ buf += sprintf(buf, "n_erasures........... %u\n", dev->n_erasures); ++ buf += sprintf(buf, "n_gc_copies.......... %u\n", dev->n_gc_copies); ++ buf += sprintf(buf, "all_gcs.............. %u\n", dev->all_gcs); ++ buf += sprintf(buf, "passive_gc_count..... %u\n", ++ dev->passive_gc_count); ++ buf += sprintf(buf, "oldest_dirty_gc_count %u\n", ++ dev->oldest_dirty_gc_count); ++ buf += sprintf(buf, "n_gc_blocks.......... %u\n", dev->n_gc_blocks); ++ buf += sprintf(buf, "bg_gcs............... %u\n", dev->bg_gcs); ++ buf += sprintf(buf, "n_retried_writes..... %u\n", ++ dev->n_retried_writes); ++ buf += sprintf(buf, "n_retired_blocks..... %u\n", ++ dev->n_retired_blocks); ++ buf += sprintf(buf, "n_ecc_fixed.......... %u\n", dev->n_ecc_fixed); ++ buf += sprintf(buf, "n_ecc_unfixed........ %u\n", dev->n_ecc_unfixed); ++ buf += sprintf(buf, "n_tags_ecc_fixed..... %u\n", ++ dev->n_tags_ecc_fixed); ++ buf += sprintf(buf, "n_tags_ecc_unfixed... %u\n", ++ dev->n_tags_ecc_unfixed); ++ buf += sprintf(buf, "cache_hits........... %u\n", dev->cache_hits); ++ buf += sprintf(buf, "n_deleted_files...... %u\n", dev->n_deleted_files); ++ buf += sprintf(buf, "n_unlinked_files..... %u\n", ++ dev->n_unlinked_files); ++ buf += sprintf(buf, "refresh_count........ %u\n", dev->refresh_count); ++ buf += sprintf(buf, "n_bg_deletions....... %u\n", dev->n_bg_deletions); ++ buf += sprintf(buf, "tags_used............ %u\n", dev->tags_used); ++ buf += sprintf(buf, "summary_used......... %u\n", dev->summary_used); ++ ++ return buf; ++} ++ ++static int yaffs_proc_read(char *page, ++ char **start, ++ off_t offset, int count, int *eof, void *data) ++{ ++ struct list_head *item; ++ char *buf = page; ++ int step = offset; ++ int n = 0; ++ ++ /* Get proc_file_read() to step 'offset' by one on each sucessive call. ++ * We use 'offset' (*ppos) to indicate where we are in dev_list. ++ * This also assumes the user has posted a read buffer large ++ * enough to hold the complete output; but that's life in /proc. ++ */ ++ ++ *(int *)start = 1; ++ ++ /* Print header first */ ++ if (step == 0) ++ buf += ++ sprintf(buf, ++ "Multi-version YAFFS built:" __DATE__ " " __TIME__ ++ "\n"); ++ else if (step == 1) ++ buf += sprintf(buf, "\n"); ++ else { ++ step -= 2; ++ ++ mutex_lock(&yaffs_context_lock); ++ ++ /* Locate and print the Nth entry. Order N-squared but N is small. */ ++ list_for_each(item, &yaffs_context_list) { ++ struct yaffs_linux_context *dc = ++ list_entry(item, struct yaffs_linux_context, ++ context_list); ++ struct yaffs_dev *dev = dc->dev; ++ ++ if (n < (step & ~1)) { ++ n += 2; ++ continue; ++ } ++ if ((step & 1) == 0) { ++ buf += ++ sprintf(buf, "\nDevice %d \"%s\"\n", n, ++ dev->param.name); ++ buf = yaffs_dump_dev_part0(buf, dev); ++ } else { ++ buf = yaffs_dump_dev_part1(buf, dev); ++ } ++ ++ break; ++ } ++ mutex_unlock(&yaffs_context_lock); ++ } ++ ++ return buf - page < count ? buf - page : count; ++} ++ ++/** ++ * Set the verbosity of the warnings and error messages. ++ * ++ * Note that the names can only be a..z or _ with the current code. ++ */ ++ ++static struct { ++ char *mask_name; ++ unsigned mask_bitfield; ++} mask_flags[] = { ++ {"allocate", YAFFS_TRACE_ALLOCATE}, ++ {"always", YAFFS_TRACE_ALWAYS}, ++ {"background", YAFFS_TRACE_BACKGROUND}, ++ {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS}, ++ {"buffers", YAFFS_TRACE_BUFFERS}, ++ {"bug", YAFFS_TRACE_BUG}, ++ {"checkpt", YAFFS_TRACE_CHECKPOINT}, ++ {"deletion", YAFFS_TRACE_DELETION}, ++ {"erase", YAFFS_TRACE_ERASE}, ++ {"error", YAFFS_TRACE_ERROR}, ++ {"gc_detail", YAFFS_TRACE_GC_DETAIL}, ++ {"gc", YAFFS_TRACE_GC}, ++ {"lock", YAFFS_TRACE_LOCK}, ++ {"mtd", YAFFS_TRACE_MTD}, ++ {"nandaccess", YAFFS_TRACE_NANDACCESS}, ++ {"os", YAFFS_TRACE_OS}, ++ {"scan_debug", YAFFS_TRACE_SCAN_DEBUG}, ++ {"scan", YAFFS_TRACE_SCAN}, ++ {"mount", YAFFS_TRACE_MOUNT}, ++ {"tracing", YAFFS_TRACE_TRACING}, ++ {"sync", YAFFS_TRACE_SYNC}, ++ {"write", YAFFS_TRACE_WRITE}, ++ {"verify", YAFFS_TRACE_VERIFY}, ++ {"verify_nand", YAFFS_TRACE_VERIFY_NAND}, ++ {"verify_full", YAFFS_TRACE_VERIFY_FULL}, ++ {"verify_all", YAFFS_TRACE_VERIFY_ALL}, ++ {"all", 0xffffffff}, ++ {"none", 0}, ++ {NULL, 0}, ++}; ++ ++#define MAX_MASK_NAME_LENGTH 40 ++static int yaffs_proc_write_trace_options(struct file *file, const char *buf, ++ unsigned long count, void *data) ++{ ++ unsigned rg = 0, mask_bitfield; ++ char *end; ++ char *mask_name; ++ const char *x; ++ char substring[MAX_MASK_NAME_LENGTH + 1]; ++ int i; ++ int done = 0; ++ int add, len = 0; ++ int pos = 0; ++ ++ rg = yaffs_trace_mask; ++ ++ while (!done && (pos < count)) { ++ done = 1; ++ while ((pos < count) && isspace(buf[pos])) ++ pos++; ++ ++ switch (buf[pos]) { ++ case '+': ++ case '-': ++ case '=': ++ add = buf[pos]; ++ pos++; ++ break; ++ ++ default: ++ add = ' '; ++ break; ++ } ++ mask_name = NULL; ++ ++ mask_bitfield = simple_strtoul(buf + pos, &end, 0); ++ ++ if (end > buf + pos) { ++ mask_name = "numeral"; ++ len = end - (buf + pos); ++ pos += len; ++ done = 0; ++ } else { ++ for (x = buf + pos, i = 0; ++ (*x == '_' || (*x >= 'a' && *x <= 'z')) && ++ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++) ++ substring[i] = *x; ++ substring[i] = '\0'; ++ ++ for (i = 0; mask_flags[i].mask_name != NULL; i++) { ++ if (strcmp(substring, mask_flags[i].mask_name) ++ == 0) { ++ mask_name = mask_flags[i].mask_name; ++ mask_bitfield = ++ mask_flags[i].mask_bitfield; ++ done = 0; ++ break; ++ } ++ } ++ } ++ ++ if (mask_name != NULL) { ++ done = 0; ++ switch (add) { ++ case '-': ++ rg &= ~mask_bitfield; ++ break; ++ case '+': ++ rg |= mask_bitfield; ++ break; ++ case '=': ++ rg = mask_bitfield; ++ break; ++ default: ++ rg |= mask_bitfield; ++ break; ++ } ++ } ++ } ++ ++ yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS; ++ ++ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask); ++ ++ if (rg & YAFFS_TRACE_ALWAYS) { ++ for (i = 0; mask_flags[i].mask_name != NULL; i++) { ++ char flag; ++ flag = ((rg & mask_flags[i].mask_bitfield) == ++ mask_flags[i].mask_bitfield) ? '+' : '-'; ++ printk(KERN_DEBUG "%c%s\n", flag, ++ mask_flags[i].mask_name); ++ } ++ } ++ ++ return count; ++} ++ ++/* Debug strings are of the form: ++ * .bnnn print info on block n ++ * .cobjn,chunkn print nand chunk id for objn:chunkn ++ */ ++ ++static int yaffs_proc_debug_write(struct file *file, const char *buf, ++ unsigned long count, void *data) ++{ ++ ++ char str[100]; ++ char *p0; ++ char *p1; ++ long p1_val; ++ long p0_val; ++ char cmd; ++ struct list_head *item; ++ ++ memset(str, 0, sizeof(str)); ++ memcpy(str, buf, min(count, sizeof(str) -1)); ++ ++ cmd = str[1]; ++ ++ p0 = str + 2; ++ ++ p1 = p0; ++ ++ while (*p1 && *p1 != ',') { ++ p1++; ++ } ++ *p1 = '\0'; ++ p1++; ++ ++ p0_val = simple_strtol(p0, NULL, 0); ++ p1_val = simple_strtol(p1, NULL, 0); ++ ++ ++ mutex_lock(&yaffs_context_lock); ++ ++ /* Locate and print the Nth entry. Order N-squared but N is small. */ ++ list_for_each(item, &yaffs_context_list) { ++ struct yaffs_linux_context *dc = ++ list_entry(item, struct yaffs_linux_context, ++ context_list); ++ struct yaffs_dev *dev = dc->dev; ++ ++ if (cmd == 'b') { ++ struct yaffs_block_info *bi; ++ ++ bi = yaffs_get_block_info(dev,p0_val); ++ ++ if(bi) { ++ printk("Block %d: state %d, retire %d, use %d, seq %d\n", ++ (int)p0_val, bi->block_state, ++ bi->needs_retiring, bi->pages_in_use, ++ bi->seq_number); ++ } ++ } else if (cmd == 'c') { ++ struct yaffs_obj *obj; ++ int nand_chunk; ++ ++ obj = yaffs_find_by_number(dev, p0_val); ++ if (!obj) ++ printk("No obj %d\n", (int)p0_val); ++ else { ++ if(p1_val == 0) ++ nand_chunk = obj->hdr_chunk; ++ else ++ nand_chunk = ++ yaffs_find_chunk_in_file(obj, ++ p1_val, NULL); ++ printk("Nand chunk for %d:%d is %d\n", ++ (int)p0_val, (int)p1_val, nand_chunk); ++ } ++ } ++ } ++ ++ mutex_unlock(&yaffs_context_lock); ++ ++ return count; ++} ++ ++static int yaffs_proc_write(struct file *file, const char *buf, ++ unsigned long count, void *data) ++{ ++ if (buf[0] == '.') ++ return yaffs_proc_debug_write(file, buf, count, data); ++ return yaffs_proc_write_trace_options(file, buf, count, data); ++} ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */ ++ ++/* Stuff to handle installation of file systems */ ++struct file_system_to_install { ++ struct file_system_type *fst; ++ int installed; ++}; ++ ++static struct file_system_to_install fs_to_install[] = { ++ {&yaffs_fs_type, 0}, ++ {&yaffs2_fs_type, 0}, ++ {NULL, 0} ++}; ++ ++static int __init init_yaffs_fs(void) ++{ ++ int error = 0; ++ struct file_system_to_install *fsinst; ++ ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "yaffs built " __DATE__ " " __TIME__ " Installing."); ++ ++ mutex_init(&yaffs_context_lock); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) ++ /* Install the proc_fs entries */ ++ my_proc_entry = create_proc_entry("yaffs", ++ S_IRUGO | S_IFREG, YPROC_ROOT); ++ ++ if (my_proc_entry) { ++ my_proc_entry->write_proc = yaffs_proc_write; ++ my_proc_entry->read_proc = yaffs_proc_read; ++ my_proc_entry->data = NULL; ++ } else { ++ return -ENOMEM; ++ } ++#endif ++ ++ /* Now add the file system entries */ ++ ++ fsinst = fs_to_install; ++ ++ while (fsinst->fst && !error) { ++ error = register_filesystem(fsinst->fst); ++ if (!error) ++ fsinst->installed = 1; ++ fsinst++; ++ } ++ ++ /* Any errors? uninstall */ ++ if (error) { ++ fsinst = fs_to_install; ++ ++ while (fsinst->fst) { ++ if (fsinst->installed) { ++ unregister_filesystem(fsinst->fst); ++ fsinst->installed = 0; ++ } ++ fsinst++; ++ } ++ } ++ ++ return error; ++} ++ ++static void __exit exit_yaffs_fs(void) ++{ ++ ++ struct file_system_to_install *fsinst; ++ ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "yaffs built " __DATE__ " " __TIME__ " removing."); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) ++ remove_proc_entry("yaffs", YPROC_ROOT); ++#endif ++ ++ fsinst = fs_to_install; ++ ++ while (fsinst->fst) { ++ if (fsinst->installed) { ++ unregister_filesystem(fsinst->fst); ++ fsinst->installed = 0; ++ } ++ fsinst++; ++ } ++} ++ ++module_init(init_yaffs_fs) ++ module_exit(exit_yaffs_fs) ++ ++ MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system"); ++MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2011"); ++MODULE_LICENSE("GPL"); +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_yaffs1.c linux-3.4.90/fs/yaffs2/yaffs_yaffs1.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_yaffs1.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_yaffs1.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,422 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yaffs_yaffs1.h" ++#include "yportenv.h" ++#include "yaffs_trace.h" ++#include "yaffs_bitmap.h" ++#include "yaffs_getblockinfo.h" ++#include "yaffs_nand.h" ++#include "yaffs_attribs.h" ++ ++int yaffs1_scan(struct yaffs_dev *dev) ++{ ++ struct yaffs_ext_tags tags; ++ int blk; ++ int result; ++ int chunk; ++ int c; ++ int deleted; ++ enum yaffs_block_state state; ++ LIST_HEAD(hard_list); ++ struct yaffs_block_info *bi; ++ u32 seq_number; ++ struct yaffs_obj_hdr *oh; ++ struct yaffs_obj *in; ++ struct yaffs_obj *parent; ++ int alloc_failed = 0; ++ struct yaffs_shadow_fixer *shadow_fixers = NULL; ++ u8 *chunk_data; ++ ++ yaffs_trace(YAFFS_TRACE_SCAN, ++ "yaffs1_scan starts intstartblk %d intendblk %d...", ++ dev->internal_start_block, dev->internal_end_block); ++ ++ chunk_data = yaffs_get_temp_buffer(dev); ++ ++ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER; ++ ++ /* Scan all the blocks to determine their state */ ++ bi = dev->block_info; ++ for (blk = dev->internal_start_block; blk <= dev->internal_end_block; ++ blk++) { ++ yaffs_clear_chunk_bits(dev, blk); ++ bi->pages_in_use = 0; ++ bi->soft_del_pages = 0; ++ ++ yaffs_query_init_block_state(dev, blk, &state, &seq_number); ++ ++ bi->block_state = state; ++ bi->seq_number = seq_number; ++ ++ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK) ++ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD; ++ ++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, ++ "Block scanning block %d state %d seq %d", ++ blk, state, seq_number); ++ ++ if (state == YAFFS_BLOCK_STATE_DEAD) { ++ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, ++ "block %d is bad", blk); ++ } else if (state == YAFFS_BLOCK_STATE_EMPTY) { ++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty "); ++ dev->n_erased_blocks++; ++ dev->n_free_chunks += dev->param.chunks_per_block; ++ } ++ bi++; ++ } ++ ++ /* For each block.... */ ++ for (blk = dev->internal_start_block; ++ !alloc_failed && blk <= dev->internal_end_block; blk++) { ++ ++ cond_resched(); ++ ++ bi = yaffs_get_block_info(dev, blk); ++ state = bi->block_state; ++ ++ deleted = 0; ++ ++ /* For each chunk in each block that needs scanning.... */ ++ for (c = 0; ++ !alloc_failed && c < dev->param.chunks_per_block && ++ state == YAFFS_BLOCK_STATE_NEEDS_SCAN; c++) { ++ /* Read the tags and decide what to do */ ++ chunk = blk * dev->param.chunks_per_block + c; ++ ++ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL, ++ &tags); ++ ++ /* Let's have a good look at this chunk... */ ++ ++ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED || ++ tags.is_deleted) { ++ /* YAFFS1 only... ++ * A deleted chunk ++ */ ++ deleted++; ++ dev->n_free_chunks++; ++ } else if (!tags.chunk_used) { ++ /* An unassigned chunk in the block ++ * This means that either the block is empty or ++ * this is the one being allocated from ++ */ ++ ++ if (c == 0) { ++ /* We're looking at the first chunk in ++ *the block so the block is unused */ ++ state = YAFFS_BLOCK_STATE_EMPTY; ++ dev->n_erased_blocks++; ++ } else { ++ /* this is the block being allocated */ ++ yaffs_trace(YAFFS_TRACE_SCAN, ++ " Allocating from %d %d", ++ blk, c); ++ state = YAFFS_BLOCK_STATE_ALLOCATING; ++ dev->alloc_block = blk; ++ dev->alloc_page = c; ++ dev->alloc_block_finder = blk; ++ ++ } ++ ++ dev->n_free_chunks += ++ (dev->param.chunks_per_block - c); ++ } else if (tags.chunk_id > 0) { ++ /* chunk_id > 0 so it is a data chunk... */ ++ unsigned int endpos; ++ ++ yaffs_set_chunk_bit(dev, blk, c); ++ bi->pages_in_use++; ++ ++ in = yaffs_find_or_create_by_number(dev, ++ tags.obj_id, ++ YAFFS_OBJECT_TYPE_FILE); ++ /* PutChunkIntoFile checks for a clash ++ * (two data chunks with the same chunk_id). ++ */ ++ ++ if (!in) ++ alloc_failed = 1; ++ ++ if (in) { ++ if (!yaffs_put_chunk_in_file ++ (in, tags.chunk_id, chunk, 1)) ++ alloc_failed = 1; ++ } ++ ++ endpos = ++ (tags.chunk_id - 1) * ++ dev->data_bytes_per_chunk + ++ tags.n_bytes; ++ if (in && ++ in->variant_type == ++ YAFFS_OBJECT_TYPE_FILE && ++ in->variant.file_variant.scanned_size < ++ endpos) { ++ in->variant.file_variant.scanned_size = ++ endpos; ++ if (!dev->param.use_header_file_size) { ++ in->variant. ++ file_variant.file_size = ++ in->variant. ++ file_variant.scanned_size; ++ } ++ ++ } ++ } else { ++ /* chunk_id == 0, so it is an ObjectHeader. ++ * Make the object ++ */ ++ yaffs_set_chunk_bit(dev, blk, c); ++ bi->pages_in_use++; ++ ++ result = yaffs_rd_chunk_tags_nand(dev, chunk, ++ chunk_data, ++ NULL); ++ ++ oh = (struct yaffs_obj_hdr *)chunk_data; ++ ++ in = yaffs_find_by_number(dev, tags.obj_id); ++ if (in && in->variant_type != oh->type) { ++ /* This should not happen, but somehow ++ * Wev'e ended up with an obj_id that ++ * has been reused but not yet deleted, ++ * and worse still it has changed type. ++ * Delete the old object. ++ */ ++ ++ yaffs_del_obj(in); ++ in = NULL; ++ } ++ ++ in = yaffs_find_or_create_by_number(dev, ++ tags.obj_id, ++ oh->type); ++ ++ if (!in) ++ alloc_failed = 1; ++ ++ if (in && oh->shadows_obj > 0) { ++ ++ struct yaffs_shadow_fixer *fixer; ++ fixer = ++ kmalloc(sizeof ++ (struct yaffs_shadow_fixer), ++ GFP_NOFS); ++ if (fixer) { ++ fixer->next = shadow_fixers; ++ shadow_fixers = fixer; ++ fixer->obj_id = tags.obj_id; ++ fixer->shadowed_id = ++ oh->shadows_obj; ++ yaffs_trace(YAFFS_TRACE_SCAN, ++ " Shadow fixer: %d shadows %d", ++ fixer->obj_id, ++ fixer->shadowed_id); ++ ++ } ++ ++ } ++ ++ if (in && in->valid) { ++ /* We have already filled this one. ++ * We have a duplicate and need to ++ * resolve it. */ ++ ++ unsigned existing_serial = in->serial; ++ unsigned new_serial = ++ tags.serial_number; ++ ++ if (((existing_serial + 1) & 3) == ++ new_serial) { ++ /* Use new one - destroy the ++ * exisiting one */ ++ yaffs_chunk_del(dev, ++ in->hdr_chunk, ++ 1, __LINE__); ++ in->valid = 0; ++ } else { ++ /* Use existing - destroy ++ * this one. */ ++ yaffs_chunk_del(dev, chunk, 1, ++ __LINE__); ++ } ++ } ++ ++ if (in && !in->valid && ++ (tags.obj_id == YAFFS_OBJECTID_ROOT || ++ tags.obj_id == ++ YAFFS_OBJECTID_LOSTNFOUND)) { ++ /* We only load some info, don't fiddle ++ * with directory structure */ ++ in->valid = 1; ++ in->variant_type = oh->type; ++ ++ in->yst_mode = oh->yst_mode; ++ yaffs_load_attribs(in, oh); ++ in->hdr_chunk = chunk; ++ in->serial = tags.serial_number; ++ ++ } else if (in && !in->valid) { ++ /* we need to load this info */ ++ ++ in->valid = 1; ++ in->variant_type = oh->type; ++ ++ in->yst_mode = oh->yst_mode; ++ yaffs_load_attribs(in, oh); ++ in->hdr_chunk = chunk; ++ in->serial = tags.serial_number; ++ ++ yaffs_set_obj_name_from_oh(in, oh); ++ in->dirty = 0; ++ ++ /* directory stuff... ++ * hook up to parent ++ */ ++ ++ parent = ++ yaffs_find_or_create_by_number ++ (dev, oh->parent_obj_id, ++ YAFFS_OBJECT_TYPE_DIRECTORY); ++ if (!parent) ++ alloc_failed = 1; ++ if (parent && parent->variant_type == ++ YAFFS_OBJECT_TYPE_UNKNOWN) { ++ /* Set up as a directory */ ++ parent->variant_type = ++ YAFFS_OBJECT_TYPE_DIRECTORY; ++ INIT_LIST_HEAD(&parent-> ++ variant.dir_variant. ++ children); ++ } else if (!parent || ++ parent->variant_type != ++ YAFFS_OBJECT_TYPE_DIRECTORY) { ++ /* Hoosterman, a problem.... ++ * We're trying to use a ++ * non-directory as a directory ++ */ ++ ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found." ++ ); ++ parent = dev->lost_n_found; ++ } ++ ++ yaffs_add_obj_to_dir(parent, in); ++ ++ switch (in->variant_type) { ++ case YAFFS_OBJECT_TYPE_UNKNOWN: ++ /* Todo got a problem */ ++ break; ++ case YAFFS_OBJECT_TYPE_FILE: ++ if (dev->param. ++ use_header_file_size) ++ in->variant. ++ file_variant.file_size ++ = yaffs_oh_to_size(oh); ++ break; ++ case YAFFS_OBJECT_TYPE_HARDLINK: ++ in->variant. ++ hardlink_variant.equiv_id = ++ oh->equiv_id; ++ list_add(&in->hard_links, ++ &hard_list); ++ break; ++ case YAFFS_OBJECT_TYPE_DIRECTORY: ++ /* Do nothing */ ++ break; ++ case YAFFS_OBJECT_TYPE_SPECIAL: ++ /* Do nothing */ ++ break; ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ in->variant.symlink_variant. ++ alias = ++ yaffs_clone_str(oh->alias); ++ if (!in->variant. ++ symlink_variant.alias) ++ alloc_failed = 1; ++ break; ++ } ++ } ++ } ++ } ++ ++ if (state == YAFFS_BLOCK_STATE_NEEDS_SCAN) { ++ /* If we got this far while scanning, ++ * then the block is fully allocated. */ ++ state = YAFFS_BLOCK_STATE_FULL; ++ } ++ ++ if (state == YAFFS_BLOCK_STATE_ALLOCATING) { ++ /* If the block was partially allocated then ++ * treat it as fully allocated. */ ++ state = YAFFS_BLOCK_STATE_FULL; ++ dev->alloc_block = -1; ++ } ++ ++ bi->block_state = state; ++ ++ /* Now let's see if it was dirty */ ++ if (bi->pages_in_use == 0 && ++ !bi->has_shrink_hdr && ++ bi->block_state == YAFFS_BLOCK_STATE_FULL) ++ yaffs_block_became_dirty(dev, blk); ++ } ++ ++ /* Ok, we've done all the scanning. ++ * Fix up the hard link chains. ++ * We should now have scanned all the objects, now it's time to add ++ * these hardlinks. ++ */ ++ ++ yaffs_link_fixup(dev, &hard_list); ++ ++ /* ++ * Fix up any shadowed objects. ++ * There should not be more than one of these. ++ */ ++ { ++ struct yaffs_shadow_fixer *fixer; ++ struct yaffs_obj *obj; ++ ++ while (shadow_fixers) { ++ fixer = shadow_fixers; ++ shadow_fixers = fixer->next; ++ /* Complete the rename transaction by deleting the ++ * shadowed object then setting the object header ++ to unshadowed. ++ */ ++ obj = yaffs_find_by_number(dev, fixer->shadowed_id); ++ if (obj) ++ yaffs_del_obj(obj); ++ ++ obj = yaffs_find_by_number(dev, fixer->obj_id); ++ ++ if (obj) ++ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL); ++ ++ kfree(fixer); ++ } ++ } ++ ++ yaffs_release_temp_buffer(dev, chunk_data); ++ ++ if (alloc_failed) ++ return YAFFS_FAIL; ++ ++ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends"); ++ ++ return YAFFS_OK; ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_yaffs1.h linux-3.4.90/fs/yaffs2/yaffs_yaffs1.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_yaffs1.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_yaffs1.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,22 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_YAFFS1_H__ ++#define __YAFFS_YAFFS1_H__ ++ ++#include "yaffs_guts.h" ++int yaffs1_scan(struct yaffs_dev *dev); ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_yaffs2.c linux-3.4.90/fs/yaffs2/yaffs_yaffs2.c +--- linux-3.4.90.orig/fs/yaffs2/yaffs_yaffs2.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_yaffs2.c 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,1534 @@ ++/* ++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include "yaffs_guts.h" ++#include "yaffs_trace.h" ++#include "yaffs_yaffs2.h" ++#include "yaffs_checkptrw.h" ++#include "yaffs_bitmap.h" ++#include "yaffs_nand.h" ++#include "yaffs_getblockinfo.h" ++#include "yaffs_verify.h" ++#include "yaffs_attribs.h" ++#include "yaffs_summary.h" ++ ++/* ++ * Checkpoints are really no benefit on very small partitions. ++ * ++ * To save space on small partitions don't bother with checkpoints unless ++ * the partition is at least this big. ++ */ ++#define YAFFS_CHECKPOINT_MIN_BLOCKS 60 ++#define YAFFS_SMALL_HOLE_THRESHOLD 4 ++ ++/* ++ * Oldest Dirty Sequence Number handling. ++ */ ++ ++/* yaffs_calc_oldest_dirty_seq() ++ * yaffs2_find_oldest_dirty_seq() ++ * Calculate the oldest dirty sequence number if we don't know it. ++ */ ++void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev) ++{ ++ int i; ++ unsigned seq; ++ unsigned block_no = 0; ++ struct yaffs_block_info *b; ++ ++ if (!dev->param.is_yaffs2) ++ return; ++ ++ /* Find the oldest dirty sequence number. */ ++ seq = dev->seq_number + 1; ++ b = dev->block_info; ++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) { ++ if (b->block_state == YAFFS_BLOCK_STATE_FULL && ++ (b->pages_in_use - b->soft_del_pages) < ++ dev->param.chunks_per_block && ++ b->seq_number < seq) { ++ seq = b->seq_number; ++ block_no = i; ++ } ++ b++; ++ } ++ ++ if (block_no) { ++ dev->oldest_dirty_seq = seq; ++ dev->oldest_dirty_block = block_no; ++ } ++} ++ ++void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev) ++{ ++ if (!dev->param.is_yaffs2) ++ return; ++ ++ if (!dev->oldest_dirty_seq) ++ yaffs_calc_oldest_dirty_seq(dev); ++} ++ ++/* ++ * yaffs_clear_oldest_dirty_seq() ++ * Called when a block is erased or marked bad. (ie. when its seq_number ++ * becomes invalid). If the value matches the oldest then we clear ++ * dev->oldest_dirty_seq to force its recomputation. ++ */ ++void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev, ++ struct yaffs_block_info *bi) ++{ ++ ++ if (!dev->param.is_yaffs2) ++ return; ++ ++ if (!bi || bi->seq_number == dev->oldest_dirty_seq) { ++ dev->oldest_dirty_seq = 0; ++ dev->oldest_dirty_block = 0; ++ } ++} ++ ++/* ++ * yaffs2_update_oldest_dirty_seq() ++ * Update the oldest dirty sequence number whenever we dirty a block. ++ * Only do this if the oldest_dirty_seq is actually being tracked. ++ */ ++void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no, ++ struct yaffs_block_info *bi) ++{ ++ if (!dev->param.is_yaffs2) ++ return; ++ ++ if (dev->oldest_dirty_seq) { ++ if (dev->oldest_dirty_seq > bi->seq_number) { ++ dev->oldest_dirty_seq = bi->seq_number; ++ dev->oldest_dirty_block = block_no; ++ } ++ } ++} ++ ++int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi) ++{ ++ ++ if (!dev->param.is_yaffs2) ++ return 1; /* disqualification only applies to yaffs2. */ ++ ++ if (!bi->has_shrink_hdr) ++ return 1; /* can gc */ ++ ++ yaffs2_find_oldest_dirty_seq(dev); ++ ++ /* Can't do gc of this block if there are any blocks older than this ++ * one that have discarded pages. ++ */ ++ return (bi->seq_number <= dev->oldest_dirty_seq); ++} ++ ++/* ++ * yaffs2_find_refresh_block() ++ * periodically finds the oldest full block by sequence number for refreshing. ++ * Only for yaffs2. ++ */ ++u32 yaffs2_find_refresh_block(struct yaffs_dev *dev) ++{ ++ u32 b; ++ u32 oldest = 0; ++ u32 oldest_seq = 0; ++ struct yaffs_block_info *bi; ++ ++ if (!dev->param.is_yaffs2) ++ return oldest; ++ ++ /* ++ * If refresh period < 10 then refreshing is disabled. ++ */ ++ if (dev->param.refresh_period < 10) ++ return oldest; ++ ++ /* ++ * Fix broken values. ++ */ ++ if (dev->refresh_skip > dev->param.refresh_period) ++ dev->refresh_skip = dev->param.refresh_period; ++ ++ if (dev->refresh_skip > 0) ++ return oldest; ++ ++ /* ++ * Refresh skip is now zero. ++ * We'll do a refresh this time around.... ++ * Update the refresh skip and find the oldest block. ++ */ ++ dev->refresh_skip = dev->param.refresh_period; ++ dev->refresh_count++; ++ bi = dev->block_info; ++ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) { ++ ++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL) { ++ ++ if (oldest < 1 || bi->seq_number < oldest_seq) { ++ oldest = b; ++ oldest_seq = bi->seq_number; ++ } ++ } ++ bi++; ++ } ++ ++ if (oldest > 0) { ++ yaffs_trace(YAFFS_TRACE_GC, ++ "GC refresh count %d selected block %d with seq_number %d", ++ dev->refresh_count, oldest, oldest_seq); ++ } ++ ++ return oldest; ++} ++ ++int yaffs2_checkpt_required(struct yaffs_dev *dev) ++{ ++ int nblocks; ++ ++ if (!dev->param.is_yaffs2) ++ return 0; ++ ++ nblocks = dev->internal_end_block - dev->internal_start_block + 1; ++ ++ return !dev->param.skip_checkpt_wr && ++ !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS); ++} ++ ++int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev) ++{ ++ int retval; ++ int n_bytes = 0; ++ int n_blocks; ++ int dev_blocks; ++ ++ if (!dev->param.is_yaffs2) ++ return 0; ++ ++ if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) { ++ /* Not a valid value so recalculate */ ++ dev_blocks = dev->param.end_block - dev->param.start_block + 1; ++ n_bytes += sizeof(struct yaffs_checkpt_validity); ++ n_bytes += sizeof(struct yaffs_checkpt_dev); ++ n_bytes += dev_blocks * sizeof(struct yaffs_block_info); ++ n_bytes += dev_blocks * dev->chunk_bit_stride; ++ n_bytes += ++ (sizeof(struct yaffs_checkpt_obj) + sizeof(u32)) * ++ dev->n_obj; ++ n_bytes += (dev->tnode_size + sizeof(u32)) * dev->n_tnodes; ++ n_bytes += sizeof(struct yaffs_checkpt_validity); ++ n_bytes += sizeof(u32); /* checksum */ ++ ++ /* Round up and add 2 blocks to allow for some bad blocks, ++ * so add 3 */ ++ ++ n_blocks = ++ (n_bytes / ++ (dev->data_bytes_per_chunk * ++ dev->param.chunks_per_block)) + 3; ++ ++ dev->checkpoint_blocks_required = n_blocks; ++ } ++ ++ retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt; ++ if (retval < 0) ++ retval = 0; ++ return retval; ++} ++ ++/*--------------------- Checkpointing --------------------*/ ++ ++static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head) ++{ ++ struct yaffs_checkpt_validity cp; ++ ++ memset(&cp, 0, sizeof(cp)); ++ ++ cp.struct_type = sizeof(cp); ++ cp.magic = YAFFS_MAGIC; ++ cp.version = YAFFS_CHECKPOINT_VERSION; ++ cp.head = (head) ? 1 : 0; ++ ++ return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0; ++} ++ ++static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head) ++{ ++ struct yaffs_checkpt_validity cp; ++ int ok; ++ ++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp)); ++ ++ if (ok) ++ ok = (cp.struct_type == sizeof(cp)) && ++ (cp.magic == YAFFS_MAGIC) && ++ (cp.version == YAFFS_CHECKPOINT_VERSION) && ++ (cp.head == ((head) ? 1 : 0)); ++ return ok ? 1 : 0; ++} ++ ++static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp, ++ struct yaffs_dev *dev) ++{ ++ cp->n_erased_blocks = dev->n_erased_blocks; ++ cp->alloc_block = dev->alloc_block; ++ cp->alloc_page = dev->alloc_page; ++ cp->n_free_chunks = dev->n_free_chunks; ++ ++ cp->n_deleted_files = dev->n_deleted_files; ++ cp->n_unlinked_files = dev->n_unlinked_files; ++ cp->n_bg_deletions = dev->n_bg_deletions; ++ cp->seq_number = dev->seq_number; ++ ++} ++ ++static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev, ++ struct yaffs_checkpt_dev *cp) ++{ ++ dev->n_erased_blocks = cp->n_erased_blocks; ++ dev->alloc_block = cp->alloc_block; ++ dev->alloc_page = cp->alloc_page; ++ dev->n_free_chunks = cp->n_free_chunks; ++ ++ dev->n_deleted_files = cp->n_deleted_files; ++ dev->n_unlinked_files = cp->n_unlinked_files; ++ dev->n_bg_deletions = cp->n_bg_deletions; ++ dev->seq_number = cp->seq_number; ++} ++ ++static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev) ++{ ++ struct yaffs_checkpt_dev cp; ++ u32 n_bytes; ++ u32 n_blocks = dev->internal_end_block - dev->internal_start_block + 1; ++ int ok; ++ ++ /* Write device runtime values */ ++ yaffs2_dev_to_checkpt_dev(&cp, dev); ++ cp.struct_type = sizeof(cp); ++ ++ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)); ++ if (!ok) ++ return 0; ++ ++ /* Write block info */ ++ n_bytes = n_blocks * sizeof(struct yaffs_block_info); ++ ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) == n_bytes); ++ if (!ok) ++ return 0; ++ ++ /* Write chunk bits */ ++ n_bytes = n_blocks * dev->chunk_bit_stride; ++ ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) == n_bytes); ++ ++ return ok ? 1 : 0; ++} ++ ++static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev) ++{ ++ struct yaffs_checkpt_dev cp; ++ u32 n_bytes; ++ u32 n_blocks = ++ (dev->internal_end_block - dev->internal_start_block + 1); ++ int ok; ++ ++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp)); ++ if (!ok) ++ return 0; ++ ++ if (cp.struct_type != sizeof(cp)) ++ return 0; ++ ++ yaffs_checkpt_dev_to_dev(dev, &cp); ++ ++ n_bytes = n_blocks * sizeof(struct yaffs_block_info); ++ ++ ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes); ++ ++ if (!ok) ++ return 0; ++ ++ n_bytes = n_blocks * dev->chunk_bit_stride; ++ ++ ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes); ++ ++ return ok ? 1 : 0; ++} ++ ++static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp, ++ struct yaffs_obj *obj) ++{ ++ cp->obj_id = obj->obj_id; ++ cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0; ++ cp->hdr_chunk = obj->hdr_chunk; ++ cp->variant_type = obj->variant_type; ++ cp->deleted = obj->deleted; ++ cp->soft_del = obj->soft_del; ++ cp->unlinked = obj->unlinked; ++ cp->fake = obj->fake; ++ cp->rename_allowed = obj->rename_allowed; ++ cp->unlink_allowed = obj->unlink_allowed; ++ cp->serial = obj->serial; ++ cp->n_data_chunks = obj->n_data_chunks; ++ ++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) ++ cp->size_or_equiv_obj = obj->variant.file_variant.file_size; ++ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) ++ cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id; ++} ++ ++static int yaffs2_checkpt_obj_to_obj(struct yaffs_obj *obj, ++ struct yaffs_checkpt_obj *cp) ++{ ++ struct yaffs_obj *parent; ++ ++ if (obj->variant_type != cp->variant_type) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "Checkpoint read object %d type %d chunk %d does not match existing object type %d", ++ cp->obj_id, cp->variant_type, cp->hdr_chunk, ++ obj->variant_type); ++ return 0; ++ } ++ ++ obj->obj_id = cp->obj_id; ++ ++ if (cp->parent_id) ++ parent = yaffs_find_or_create_by_number(obj->my_dev, ++ cp->parent_id, ++ YAFFS_OBJECT_TYPE_DIRECTORY); ++ else ++ parent = NULL; ++ ++ if (parent) { ++ if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { ++ yaffs_trace(YAFFS_TRACE_ALWAYS, ++ "Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory", ++ cp->obj_id, cp->parent_id, ++ cp->variant_type, cp->hdr_chunk, ++ parent->variant_type); ++ return 0; ++ } ++ yaffs_add_obj_to_dir(parent, obj); ++ } ++ ++ obj->hdr_chunk = cp->hdr_chunk; ++ obj->variant_type = cp->variant_type; ++ obj->deleted = cp->deleted; ++ obj->soft_del = cp->soft_del; ++ obj->unlinked = cp->unlinked; ++ obj->fake = cp->fake; ++ obj->rename_allowed = cp->rename_allowed; ++ obj->unlink_allowed = cp->unlink_allowed; ++ obj->serial = cp->serial; ++ obj->n_data_chunks = cp->n_data_chunks; ++ ++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) ++ obj->variant.file_variant.file_size = cp->size_or_equiv_obj; ++ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) ++ obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj; ++ ++ if (obj->hdr_chunk > 0) ++ obj->lazy_loaded = 1; ++ return 1; ++} ++ ++static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in, ++ struct yaffs_tnode *tn, u32 level, ++ int chunk_offset) ++{ ++ int i; ++ struct yaffs_dev *dev = in->my_dev; ++ int ok = 1; ++ u32 base_offset; ++ ++ if (!tn) ++ return 1; ++ ++ if (level > 0) { ++ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) { ++ if (!tn->internal[i]) ++ continue; ++ ok = yaffs2_checkpt_tnode_worker(in, ++ tn->internal[i], ++ level - 1, ++ (chunk_offset << ++ YAFFS_TNODES_INTERNAL_BITS) + i); ++ } ++ return ok; ++ } ++ ++ /* Level 0 tnode */ ++ base_offset = chunk_offset << YAFFS_TNODES_LEVEL0_BITS; ++ ok = (yaffs2_checkpt_wr(dev, &base_offset, sizeof(base_offset)) == ++ sizeof(base_offset)); ++ if (ok) ++ ok = (yaffs2_checkpt_wr(dev, tn, dev->tnode_size) == ++ dev->tnode_size); ++ ++ return ok; ++} ++ ++static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj) ++{ ++ u32 end_marker = ~0; ++ int ok = 1; ++ ++ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE) ++ return ok; ++ ++ ok = yaffs2_checkpt_tnode_worker(obj, ++ obj->variant.file_variant.top, ++ obj->variant.file_variant. ++ top_level, 0); ++ if (ok) ++ ok = (yaffs2_checkpt_wr(obj->my_dev, &end_marker, ++ sizeof(end_marker)) == sizeof(end_marker)); ++ ++ return ok ? 1 : 0; ++} ++ ++static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj) ++{ ++ u32 base_chunk; ++ int ok = 1; ++ struct yaffs_dev *dev = obj->my_dev; ++ struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant; ++ struct yaffs_tnode *tn; ++ int nread = 0; ++ ++ ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) == ++ sizeof(base_chunk)); ++ ++ while (ok && (~base_chunk)) { ++ nread++; ++ /* Read level 0 tnode */ ++ ++ tn = yaffs_get_tnode(dev); ++ if (tn) ++ ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) == ++ dev->tnode_size); ++ else ++ ok = 0; ++ ++ if (tn && ok) ++ ok = yaffs_add_find_tnode_0(dev, ++ file_stuct_ptr, ++ base_chunk, tn) ? 1 : 0; ++ ++ if (ok) ++ ok = (yaffs2_checkpt_rd ++ (dev, &base_chunk, ++ sizeof(base_chunk)) == sizeof(base_chunk)); ++ } ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "Checkpoint read tnodes %d records, last %d. ok %d", ++ nread, base_chunk, ok); ++ ++ return ok ? 1 : 0; ++} ++ ++static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev) ++{ ++ struct yaffs_obj *obj; ++ struct yaffs_checkpt_obj cp; ++ int i; ++ int ok = 1; ++ struct list_head *lh; ++ ++ /* Iterate through the objects in each hash entry, ++ * dumping them to the checkpointing stream. ++ */ ++ ++ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) { ++ list_for_each(lh, &dev->obj_bucket[i].list) { ++ obj = list_entry(lh, struct yaffs_obj, hash_link); ++ if (!obj->defered_free) { ++ yaffs2_obj_checkpt_obj(&cp, obj); ++ cp.struct_type = sizeof(cp); ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "Checkpoint write object %d parent %d type %d chunk %d obj addr %p", ++ cp.obj_id, cp.parent_id, ++ cp.variant_type, cp.hdr_chunk, obj); ++ ++ ok = (yaffs2_checkpt_wr(dev, &cp, ++ sizeof(cp)) == sizeof(cp)); ++ ++ if (ok && ++ obj->variant_type == ++ YAFFS_OBJECT_TYPE_FILE) ++ ok = yaffs2_wr_checkpt_tnodes(obj); ++ } ++ } ++ } ++ ++ /* Dump end of list */ ++ memset(&cp, 0xff, sizeof(struct yaffs_checkpt_obj)); ++ cp.struct_type = sizeof(cp); ++ ++ if (ok) ++ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)); ++ ++ return ok ? 1 : 0; ++} ++ ++static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev) ++{ ++ struct yaffs_obj *obj; ++ struct yaffs_checkpt_obj cp; ++ int ok = 1; ++ int done = 0; ++ LIST_HEAD(hard_list); ++ ++ ++ while (ok && !done) { ++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp)); ++ if (cp.struct_type != sizeof(cp)) { ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "struct size %d instead of %d ok %d", ++ cp.struct_type, (int)sizeof(cp), ok); ++ ok = 0; ++ } ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "Checkpoint read object %d parent %d type %d chunk %d ", ++ cp.obj_id, cp.parent_id, cp.variant_type, ++ cp.hdr_chunk); ++ ++ if (ok && cp.obj_id == ~0) { ++ done = 1; ++ } else if (ok) { ++ obj = ++ yaffs_find_or_create_by_number(dev, cp.obj_id, ++ cp.variant_type); ++ if (obj) { ++ ok = yaffs2_checkpt_obj_to_obj(obj, &cp); ++ if (!ok) ++ break; ++ if (obj->variant_type == ++ YAFFS_OBJECT_TYPE_FILE) { ++ ok = yaffs2_rd_checkpt_tnodes(obj); ++ } else if (obj->variant_type == ++ YAFFS_OBJECT_TYPE_HARDLINK) { ++ list_add(&obj->hard_links, &hard_list); ++ } ++ } else { ++ ok = 0; ++ } ++ } ++ } ++ ++ if (ok) ++ yaffs_link_fixup(dev, &hard_list); ++ ++ return ok ? 1 : 0; ++} ++ ++static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev) ++{ ++ u32 checkpt_sum; ++ int ok; ++ ++ yaffs2_get_checkpt_sum(dev, &checkpt_sum); ++ ++ ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) == ++ sizeof(checkpt_sum)); ++ ++ if (!ok) ++ return 0; ++ ++ return 1; ++} ++ ++static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev) ++{ ++ u32 checkpt_sum0; ++ u32 checkpt_sum1; ++ int ok; ++ ++ yaffs2_get_checkpt_sum(dev, &checkpt_sum0); ++ ++ ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) == ++ sizeof(checkpt_sum1)); ++ ++ if (!ok) ++ return 0; ++ ++ if (checkpt_sum0 != checkpt_sum1) ++ return 0; ++ ++ return 1; ++} ++ ++static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev) ++{ ++ int ok = 1; ++ ++ if (!yaffs2_checkpt_required(dev)) { ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "skipping checkpoint write"); ++ ok = 0; ++ } ++ ++ if (ok) ++ ok = yaffs2_checkpt_open(dev, 1); ++ ++ if (ok) { ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "write checkpoint validity"); ++ ok = yaffs2_wr_checkpt_validity_marker(dev, 1); ++ } ++ if (ok) { ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "write checkpoint device"); ++ ok = yaffs2_wr_checkpt_dev(dev); ++ } ++ if (ok) { ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "write checkpoint objects"); ++ ok = yaffs2_wr_checkpt_objs(dev); ++ } ++ if (ok) { ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "write checkpoint validity"); ++ ok = yaffs2_wr_checkpt_validity_marker(dev, 0); ++ } ++ ++ if (ok) ++ ok = yaffs2_wr_checkpt_sum(dev); ++ ++ if (!yaffs_checkpt_close(dev)) ++ ok = 0; ++ ++ if (ok) ++ dev->is_checkpointed = 1; ++ else ++ dev->is_checkpointed = 0; ++ ++ return dev->is_checkpointed; ++} ++ ++static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev) ++{ ++ int ok = 1; ++ ++ if (!dev->param.is_yaffs2) ++ ok = 0; ++ ++ if (ok && dev->param.skip_checkpt_rd) { ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "skipping checkpoint read"); ++ ok = 0; ++ } ++ ++ if (ok) ++ ok = yaffs2_checkpt_open(dev, 0); /* open for read */ ++ ++ if (ok) { ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "read checkpoint validity"); ++ ok = yaffs2_rd_checkpt_validity_marker(dev, 1); ++ } ++ if (ok) { ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "read checkpoint device"); ++ ok = yaffs2_rd_checkpt_dev(dev); ++ } ++ if (ok) { ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "read checkpoint objects"); ++ ok = yaffs2_rd_checkpt_objs(dev); ++ } ++ if (ok) { ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "read checkpoint validity"); ++ ok = yaffs2_rd_checkpt_validity_marker(dev, 0); ++ } ++ ++ if (ok) { ++ ok = yaffs2_rd_checkpt_sum(dev); ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "read checkpoint checksum %d", ok); ++ } ++ ++ if (!yaffs_checkpt_close(dev)) ++ ok = 0; ++ ++ if (ok) ++ dev->is_checkpointed = 1; ++ else ++ dev->is_checkpointed = 0; ++ ++ return ok ? 1 : 0; ++} ++ ++void yaffs2_checkpt_invalidate(struct yaffs_dev *dev) ++{ ++ if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) { ++ dev->is_checkpointed = 0; ++ yaffs2_checkpt_invalidate_stream(dev); ++ } ++ if (dev->param.sb_dirty_fn) ++ dev->param.sb_dirty_fn(dev); ++} ++ ++int yaffs_checkpoint_save(struct yaffs_dev *dev) ++{ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "save entry: is_checkpointed %d", ++ dev->is_checkpointed); ++ ++ yaffs_verify_objects(dev); ++ yaffs_verify_blocks(dev); ++ yaffs_verify_free_chunks(dev); ++ ++ if (!dev->is_checkpointed) { ++ yaffs2_checkpt_invalidate(dev); ++ yaffs2_wr_checkpt_data(dev); ++ } ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT, ++ "save exit: is_checkpointed %d", ++ dev->is_checkpointed); ++ ++ return dev->is_checkpointed; ++} ++ ++int yaffs2_checkpt_restore(struct yaffs_dev *dev) ++{ ++ int retval; ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "restore entry: is_checkpointed %d", ++ dev->is_checkpointed); ++ ++ retval = yaffs2_rd_checkpt_data(dev); ++ ++ if (dev->is_checkpointed) { ++ yaffs_verify_objects(dev); ++ yaffs_verify_blocks(dev); ++ yaffs_verify_free_chunks(dev); ++ } ++ ++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, ++ "restore exit: is_checkpointed %d", ++ dev->is_checkpointed); ++ ++ return retval; ++} ++ ++int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size) ++{ ++ /* if new_size > old_file_size. ++ * We're going to be writing a hole. ++ * If the hole is small then write zeros otherwise write a start ++ * of hole marker. ++ */ ++ loff_t old_file_size; ++ loff_t increase; ++ int small_hole; ++ int result = YAFFS_OK; ++ struct yaffs_dev *dev = NULL; ++ u8 *local_buffer = NULL; ++ int small_increase_ok = 0; ++ ++ if (!obj) ++ return YAFFS_FAIL; ++ ++ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE) ++ return YAFFS_FAIL; ++ ++ dev = obj->my_dev; ++ ++ /* Bail out if not yaffs2 mode */ ++ if (!dev->param.is_yaffs2) ++ return YAFFS_OK; ++ ++ old_file_size = obj->variant.file_variant.file_size; ++ ++ if (new_size <= old_file_size) ++ return YAFFS_OK; ++ ++ increase = new_size - old_file_size; ++ ++ if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk && ++ yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1)) ++ small_hole = 1; ++ else ++ small_hole = 0; ++ ++ if (small_hole) ++ local_buffer = yaffs_get_temp_buffer(dev); ++ ++ if (local_buffer) { ++ /* fill hole with zero bytes */ ++ loff_t pos = old_file_size; ++ int this_write; ++ int written; ++ memset(local_buffer, 0, dev->data_bytes_per_chunk); ++ small_increase_ok = 1; ++ ++ while (increase > 0 && small_increase_ok) { ++ this_write = increase; ++ if (this_write > dev->data_bytes_per_chunk) ++ this_write = dev->data_bytes_per_chunk; ++ written = ++ yaffs_do_file_wr(obj, local_buffer, pos, this_write, ++ 0); ++ if (written == this_write) { ++ pos += this_write; ++ increase -= this_write; ++ } else { ++ small_increase_ok = 0; ++ } ++ } ++ ++ yaffs_release_temp_buffer(dev, local_buffer); ++ ++ /* If out of space then reverse any chunks we've added */ ++ if (!small_increase_ok) ++ yaffs_resize_file_down(obj, old_file_size); ++ } ++ ++ if (!small_increase_ok && ++ obj->parent && ++ obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED && ++ obj->parent->obj_id != YAFFS_OBJECTID_DELETED) { ++ /* Write a hole start header with the old file size */ ++ yaffs_update_oh(obj, NULL, 0, 1, 0, NULL); ++ } ++ ++ return result; ++} ++ ++struct yaffs_block_index { ++ int seq; ++ int block; ++}; ++ ++static int yaffs2_ybicmp(const void *a, const void *b) ++{ ++ int aseq = ((struct yaffs_block_index *)a)->seq; ++ int bseq = ((struct yaffs_block_index *)b)->seq; ++ int ablock = ((struct yaffs_block_index *)a)->block; ++ int bblock = ((struct yaffs_block_index *)b)->block; ++ ++ if (aseq == bseq) ++ return ablock - bblock; ++ ++ return aseq - bseq; ++} ++ ++static inline int yaffs2_scan_chunk(struct yaffs_dev *dev, ++ struct yaffs_block_info *bi, ++ int blk, int chunk_in_block, ++ int *found_chunks, ++ u8 *chunk_data, ++ struct list_head *hard_list, ++ int summary_available) ++{ ++ struct yaffs_obj_hdr *oh; ++ struct yaffs_obj *in; ++ struct yaffs_obj *parent; ++ int equiv_id; ++ loff_t file_size; ++ int is_shrink; ++ int is_unlinked; ++ struct yaffs_ext_tags tags; ++ int result; ++ int alloc_failed = 0; ++ int chunk = blk * dev->param.chunks_per_block + chunk_in_block; ++ struct yaffs_file_var *file_var; ++ struct yaffs_hardlink_var *hl_var; ++ struct yaffs_symlink_var *sl_var; ++ ++ if (summary_available) { ++ result = yaffs_summary_fetch(dev, &tags, chunk_in_block); ++ tags.seq_number = bi->seq_number; ++ } ++ ++ if (!summary_available || tags.obj_id == 0) { ++ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags); ++ dev->tags_used++; ++ } else { ++ dev->summary_used++; ++ } ++ ++ /* Let's have a good look at this chunk... */ ++ ++ if (!tags.chunk_used) { ++ /* An unassigned chunk in the block. ++ * If there are used chunks after this one, then ++ * it is a chunk that was skipped due to failing ++ * the erased check. Just skip it so that it can ++ * be deleted. ++ * But, more typically, We get here when this is ++ * an unallocated chunk and his means that ++ * either the block is empty or this is the one ++ * being allocated from ++ */ ++ ++ if (*found_chunks) { ++ /* This is a chunk that was skipped due ++ * to failing the erased check */ ++ } else if (chunk_in_block == 0) { ++ /* We're looking at the first chunk in ++ * the block so the block is unused */ ++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY; ++ dev->n_erased_blocks++; ++ } else { ++ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN || ++ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) { ++ if (dev->seq_number == bi->seq_number) { ++ /* Allocating from this block*/ ++ yaffs_trace(YAFFS_TRACE_SCAN, ++ " Allocating from %d %d", ++ blk, chunk_in_block); ++ ++ bi->block_state = ++ YAFFS_BLOCK_STATE_ALLOCATING; ++ dev->alloc_block = blk; ++ dev->alloc_page = chunk_in_block; ++ dev->alloc_block_finder = blk; ++ } else { ++ /* This is a partially written block ++ * that is not the current ++ * allocation block. ++ */ ++ yaffs_trace(YAFFS_TRACE_SCAN, ++ "Partially written block %d detected. gc will fix this.", ++ blk); ++ } ++ } ++ } ++ ++ dev->n_free_chunks++; ++ ++ } else if (tags.ecc_result == ++ YAFFS_ECC_RESULT_UNFIXED) { ++ yaffs_trace(YAFFS_TRACE_SCAN, ++ " Unfixed ECC in chunk(%d:%d), chunk ignored", ++ blk, chunk_in_block); ++ dev->n_free_chunks++; ++ } else if (tags.obj_id > YAFFS_MAX_OBJECT_ID || ++ tags.chunk_id > YAFFS_MAX_CHUNK_ID || ++ tags.obj_id == YAFFS_OBJECTID_SUMMARY || ++ (tags.chunk_id > 0 && ++ tags.n_bytes > dev->data_bytes_per_chunk) || ++ tags.seq_number != bi->seq_number) { ++ yaffs_trace(YAFFS_TRACE_SCAN, ++ "Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored", ++ blk, chunk_in_block, tags.obj_id, ++ tags.chunk_id, tags.n_bytes); ++ dev->n_free_chunks++; ++ } else if (tags.chunk_id > 0) { ++ /* chunk_id > 0 so it is a data chunk... */ ++ loff_t endpos; ++ loff_t chunk_base = (tags.chunk_id - 1) * ++ dev->data_bytes_per_chunk; ++ ++ *found_chunks = 1; ++ ++ yaffs_set_chunk_bit(dev, blk, chunk_in_block); ++ bi->pages_in_use++; ++ ++ in = yaffs_find_or_create_by_number(dev, ++ tags.obj_id, ++ YAFFS_OBJECT_TYPE_FILE); ++ if (!in) ++ /* Out of memory */ ++ alloc_failed = 1; ++ ++ if (in && ++ in->variant_type == YAFFS_OBJECT_TYPE_FILE && ++ chunk_base < in->variant.file_variant.shrink_size) { ++ /* This has not been invalidated by ++ * a resize */ ++ if (!yaffs_put_chunk_in_file(in, tags.chunk_id, ++ chunk, -1)) ++ alloc_failed = 1; ++ ++ /* File size is calculated by looking at ++ * the data chunks if we have not ++ * seen an object header yet. ++ * Stop this practice once we find an ++ * object header. ++ */ ++ endpos = chunk_base + tags.n_bytes; ++ ++ if (!in->valid && ++ in->variant.file_variant.scanned_size < endpos) { ++ in->variant.file_variant. ++ scanned_size = endpos; ++ in->variant.file_variant. ++ file_size = endpos; ++ } ++ } else if (in) { ++ /* This chunk has been invalidated by a ++ * resize, or a past file deletion ++ * so delete the chunk*/ ++ yaffs_chunk_del(dev, chunk, 1, __LINE__); ++ } ++ } else { ++ /* chunk_id == 0, so it is an ObjectHeader. ++ * Thus, we read in the object header and make ++ * the object ++ */ ++ *found_chunks = 1; ++ ++ yaffs_set_chunk_bit(dev, blk, chunk_in_block); ++ bi->pages_in_use++; ++ ++ oh = NULL; ++ in = NULL; ++ ++ if (tags.extra_available) { ++ in = yaffs_find_or_create_by_number(dev, ++ tags.obj_id, ++ tags.extra_obj_type); ++ if (!in) ++ alloc_failed = 1; ++ } ++ ++ if (!in || ++ (!in->valid && dev->param.disable_lazy_load) || ++ tags.extra_shadows || ++ (!in->valid && (tags.obj_id == YAFFS_OBJECTID_ROOT || ++ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND))) { ++ ++ /* If we don't have valid info then we ++ * need to read the chunk ++ * TODO In future we can probably defer ++ * reading the chunk and living with ++ * invalid data until needed. ++ */ ++ ++ result = yaffs_rd_chunk_tags_nand(dev, ++ chunk, ++ chunk_data, ++ NULL); ++ ++ oh = (struct yaffs_obj_hdr *)chunk_data; ++ ++ if (dev->param.inband_tags) { ++ /* Fix up the header if they got ++ * corrupted by inband tags */ ++ oh->shadows_obj = ++ oh->inband_shadowed_obj_id; ++ oh->is_shrink = ++ oh->inband_is_shrink; ++ } ++ ++ if (!in) { ++ in = yaffs_find_or_create_by_number(dev, ++ tags.obj_id, oh->type); ++ if (!in) ++ alloc_failed = 1; ++ } ++ } ++ ++ if (!in) { ++ /* TODO Hoosterman we have a problem! */ ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "yaffs tragedy: Could not make object for object %d at chunk %d during scan", ++ tags.obj_id, chunk); ++ return YAFFS_FAIL; ++ } ++ ++ if (in->valid) { ++ /* We have already filled this one. ++ * We have a duplicate that will be ++ * discarded, but we first have to suck ++ * out resize info if it is a file. ++ */ ++ if ((in->variant_type == YAFFS_OBJECT_TYPE_FILE) && ++ ((oh && oh->type == YAFFS_OBJECT_TYPE_FILE) || ++ (tags.extra_available && ++ tags.extra_obj_type == YAFFS_OBJECT_TYPE_FILE) ++ )) { ++ loff_t this_size = (oh) ? ++ yaffs_oh_to_size(oh) : ++ tags.extra_file_size; ++ u32 parent_obj_id = (oh) ? ++ oh->parent_obj_id : ++ tags.extra_parent_id; ++ ++ is_shrink = (oh) ? ++ oh->is_shrink : ++ tags.extra_is_shrink; ++ ++ /* If it is deleted (unlinked ++ * at start also means deleted) ++ * we treat the file size as ++ * being zeroed at this point. ++ */ ++ if (parent_obj_id == YAFFS_OBJECTID_DELETED || ++ parent_obj_id == YAFFS_OBJECTID_UNLINKED) { ++ this_size = 0; ++ is_shrink = 1; ++ } ++ ++ if (is_shrink && ++ in->variant.file_variant.shrink_size > ++ this_size) ++ in->variant.file_variant.shrink_size = ++ this_size; ++ ++ if (is_shrink) ++ bi->has_shrink_hdr = 1; ++ } ++ /* Use existing - destroy this one. */ ++ yaffs_chunk_del(dev, chunk, 1, __LINE__); ++ } ++ ++ if (!in->valid && in->variant_type != ++ (oh ? oh->type : tags.extra_obj_type)) { ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "yaffs tragedy: Bad type, %d != %d, for object %d at chunk %d during scan", ++ oh ? oh->type : tags.extra_obj_type, ++ in->variant_type, tags.obj_id, ++ chunk); ++ in = yaffs_retype_obj(in, oh ? oh->type : tags.extra_obj_type); ++ } ++ ++ if (!in->valid && ++ (tags.obj_id == YAFFS_OBJECTID_ROOT || ++ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) { ++ /* We only load some info, don't fiddle ++ * with directory structure */ ++ in->valid = 1; ++ ++ if (oh) { ++ in->yst_mode = oh->yst_mode; ++ yaffs_load_attribs(in, oh); ++ in->lazy_loaded = 0; ++ } else { ++ in->lazy_loaded = 1; ++ } ++ in->hdr_chunk = chunk; ++ ++ } else if (!in->valid) { ++ /* we need to load this info */ ++ in->valid = 1; ++ in->hdr_chunk = chunk; ++ if (oh) { ++ in->variant_type = oh->type; ++ in->yst_mode = oh->yst_mode; ++ yaffs_load_attribs(in, oh); ++ ++ if (oh->shadows_obj > 0) ++ yaffs_handle_shadowed_obj(dev, ++ oh->shadows_obj, 1); ++ ++ yaffs_set_obj_name_from_oh(in, oh); ++ parent = yaffs_find_or_create_by_number(dev, ++ oh->parent_obj_id, ++ YAFFS_OBJECT_TYPE_DIRECTORY); ++ file_size = yaffs_oh_to_size(oh); ++ is_shrink = oh->is_shrink; ++ equiv_id = oh->equiv_id; ++ } else { ++ in->variant_type = tags.extra_obj_type; ++ parent = yaffs_find_or_create_by_number(dev, ++ tags.extra_parent_id, ++ YAFFS_OBJECT_TYPE_DIRECTORY); ++ file_size = tags.extra_file_size; ++ is_shrink = tags.extra_is_shrink; ++ equiv_id = tags.extra_equiv_id; ++ in->lazy_loaded = 1; ++ } ++ in->dirty = 0; ++ ++ if (!parent) ++ alloc_failed = 1; ++ ++ /* directory stuff... ++ * hook up to parent ++ */ ++ ++ if (parent && ++ parent->variant_type == YAFFS_OBJECT_TYPE_UNKNOWN) { ++ /* Set up as a directory */ ++ parent->variant_type = ++ YAFFS_OBJECT_TYPE_DIRECTORY; ++ INIT_LIST_HEAD(&parent-> ++ variant.dir_variant.children); ++ } else if (!parent || ++ parent->variant_type != ++ YAFFS_OBJECT_TYPE_DIRECTORY) { ++ /* Hoosterman, another problem.... ++ * Trying to use a non-directory as a directory ++ */ ++ ++ yaffs_trace(YAFFS_TRACE_ERROR, ++ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found." ++ ); ++ parent = dev->lost_n_found; ++ } ++ yaffs_add_obj_to_dir(parent, in); ++ ++ is_unlinked = (parent == dev->del_dir) || ++ (parent == dev->unlinked_dir); ++ ++ if (is_shrink) ++ /* Mark the block */ ++ bi->has_shrink_hdr = 1; ++ ++ /* Note re hardlinks. ++ * Since we might scan a hardlink before its equivalent ++ * object is scanned we put them all in a list. ++ * After scanning is complete, we should have all the ++ * objects, so we run through this list and fix up all ++ * the chains. ++ */ ++ ++ switch (in->variant_type) { ++ case YAFFS_OBJECT_TYPE_UNKNOWN: ++ /* Todo got a problem */ ++ break; ++ case YAFFS_OBJECT_TYPE_FILE: ++ file_var = &in->variant.file_variant; ++ if (file_var->scanned_size < file_size) { ++ /* This covers the case where the file ++ * size is greater than the data held. ++ * This will happen if the file is ++ * resized to be larger than its ++ * current data extents. ++ */ ++ file_var->file_size = file_size; ++ file_var->scanned_size = file_size; ++ } ++ ++ if (file_var->shrink_size > file_size) ++ file_var->shrink_size = file_size; ++ ++ break; ++ case YAFFS_OBJECT_TYPE_HARDLINK: ++ hl_var = &in->variant.hardlink_variant; ++ if (!is_unlinked) { ++ hl_var->equiv_id = equiv_id; ++ list_add(&in->hard_links, hard_list); ++ } ++ break; ++ case YAFFS_OBJECT_TYPE_DIRECTORY: ++ /* Do nothing */ ++ break; ++ case YAFFS_OBJECT_TYPE_SPECIAL: ++ /* Do nothing */ ++ break; ++ case YAFFS_OBJECT_TYPE_SYMLINK: ++ sl_var = &in->variant.symlink_variant; ++ if (oh) { ++ sl_var->alias = ++ yaffs_clone_str(oh->alias); ++ if (!sl_var->alias) ++ alloc_failed = 1; ++ } ++ break; ++ } ++ } ++ } ++ return alloc_failed ? YAFFS_FAIL : YAFFS_OK; ++} ++ ++int yaffs2_scan_backwards(struct yaffs_dev *dev) ++{ ++ int blk; ++ int block_iter; ++ int start_iter; ++ int end_iter; ++ int n_to_scan = 0; ++ enum yaffs_block_state state; ++ int c; ++ int deleted; ++ LIST_HEAD(hard_list); ++ struct yaffs_block_info *bi; ++ u32 seq_number; ++ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1; ++ u8 *chunk_data; ++ int found_chunks; ++ int alloc_failed = 0; ++ struct yaffs_block_index *block_index = NULL; ++ int alt_block_index = 0; ++ int summary_available; ++ ++ yaffs_trace(YAFFS_TRACE_SCAN, ++ "yaffs2_scan_backwards starts intstartblk %d intendblk %d...", ++ dev->internal_start_block, dev->internal_end_block); ++ ++ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER; ++ ++ block_index = ++ kmalloc(n_blocks * sizeof(struct yaffs_block_index), GFP_NOFS); ++ ++ if (!block_index) { ++ block_index = ++ vmalloc(n_blocks * sizeof(struct yaffs_block_index)); ++ alt_block_index = 1; ++ } ++ ++ if (!block_index) { ++ yaffs_trace(YAFFS_TRACE_SCAN, ++ "yaffs2_scan_backwards() could not allocate block index!" ++ ); ++ return YAFFS_FAIL; ++ } ++ ++ dev->blocks_in_checkpt = 0; ++ ++ chunk_data = yaffs_get_temp_buffer(dev); ++ ++ /* Scan all the blocks to determine their state */ ++ bi = dev->block_info; ++ for (blk = dev->internal_start_block; blk <= dev->internal_end_block; ++ blk++) { ++ yaffs_clear_chunk_bits(dev, blk); ++ bi->pages_in_use = 0; ++ bi->soft_del_pages = 0; ++ ++ yaffs_query_init_block_state(dev, blk, &state, &seq_number); ++ ++ bi->block_state = state; ++ bi->seq_number = seq_number; ++ ++ if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA) ++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT; ++ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK) ++ bi->block_state = YAFFS_BLOCK_STATE_DEAD; ++ ++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, ++ "Block scanning block %d state %d seq %d", ++ blk, bi->block_state, seq_number); ++ ++ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) { ++ dev->blocks_in_checkpt++; ++ ++ } else if (bi->block_state == YAFFS_BLOCK_STATE_DEAD) { ++ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, ++ "block %d is bad", blk); ++ } else if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) { ++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty "); ++ dev->n_erased_blocks++; ++ dev->n_free_chunks += dev->param.chunks_per_block; ++ } else if (bi->block_state == ++ YAFFS_BLOCK_STATE_NEEDS_SCAN) { ++ /* Determine the highest sequence number */ ++ if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER && ++ seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) { ++ block_index[n_to_scan].seq = seq_number; ++ block_index[n_to_scan].block = blk; ++ n_to_scan++; ++ if (seq_number >= dev->seq_number) ++ dev->seq_number = seq_number; ++ } else { ++ /* TODO: Nasty sequence number! */ ++ yaffs_trace(YAFFS_TRACE_SCAN, ++ "Block scanning block %d has bad sequence number %d", ++ blk, seq_number); ++ } ++ } ++ bi++; ++ } ++ ++ yaffs_trace(YAFFS_TRACE_ALWAYS, "%d blocks to be sorted...", n_to_scan); ++ ++ cond_resched(); ++ ++ /* Sort the blocks by sequence number */ ++ sort(block_index, n_to_scan, sizeof(struct yaffs_block_index), ++ yaffs2_ybicmp, NULL); ++ ++ cond_resched(); ++ ++ yaffs_trace(YAFFS_TRACE_SCAN, "...done"); ++ ++ /* Now scan the blocks looking at the data. */ ++ start_iter = 0; ++ end_iter = n_to_scan - 1; ++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan); ++ ++ /* For each block.... backwards */ ++ for (block_iter = end_iter; ++ !alloc_failed && block_iter >= start_iter; ++ block_iter--) { ++ /* Cooperative multitasking! This loop can run for so ++ long that watchdog timers expire. */ ++ cond_resched(); ++ ++ /* get the block to scan in the correct order */ ++ blk = block_index[block_iter].block; ++ bi = yaffs_get_block_info(dev, blk); ++ deleted = 0; ++ ++ summary_available = yaffs_summary_read(dev, dev->sum_tags, blk); ++ ++ /* For each chunk in each block that needs scanning.... */ ++ found_chunks = 0; ++ if (summary_available) ++ c = dev->chunks_per_summary - 1; ++ else ++ c = dev->param.chunks_per_block - 1; ++ ++ for (/* c is already initialised */; ++ !alloc_failed && c >= 0 && ++ (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN || ++ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING); ++ c--) { ++ /* Scan backwards... ++ * Read the tags and decide what to do ++ */ ++ if (yaffs2_scan_chunk(dev, bi, blk, c, ++ &found_chunks, chunk_data, ++ &hard_list, summary_available) == ++ YAFFS_FAIL) ++ alloc_failed = 1; ++ } ++ ++ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN) { ++ /* If we got this far while scanning, then the block ++ * is fully allocated. */ ++ bi->block_state = YAFFS_BLOCK_STATE_FULL; ++ } ++ ++ /* Now let's see if it was dirty */ ++ if (bi->pages_in_use == 0 && ++ !bi->has_shrink_hdr && ++ bi->block_state == YAFFS_BLOCK_STATE_FULL) { ++ yaffs_block_became_dirty(dev, blk); ++ } ++ } ++ ++ yaffs_skip_rest_of_block(dev); ++ ++ if (alt_block_index) ++ vfree(block_index); ++ else ++ kfree(block_index); ++ ++ /* Ok, we've done all the scanning. ++ * Fix up the hard link chains. ++ * We have scanned all the objects, now it's time to add these ++ * hardlinks. ++ */ ++ yaffs_link_fixup(dev, &hard_list); ++ ++ yaffs_release_temp_buffer(dev, chunk_data); ++ ++ if (alloc_failed) ++ return YAFFS_FAIL; ++ ++ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends"); ++ ++ return YAFFS_OK; ++} +diff -Nur linux-3.4.90.orig/fs/yaffs2/yaffs_yaffs2.h linux-3.4.90/fs/yaffs2/yaffs_yaffs2.h +--- linux-3.4.90.orig/fs/yaffs2/yaffs_yaffs2.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yaffs_yaffs2.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,39 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YAFFS_YAFFS2_H__ ++#define __YAFFS_YAFFS2_H__ ++ ++#include "yaffs_guts.h" ++ ++void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev); ++void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev); ++void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev, ++ struct yaffs_block_info *bi); ++void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no, ++ struct yaffs_block_info *bi); ++int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi); ++u32 yaffs2_find_refresh_block(struct yaffs_dev *dev); ++int yaffs2_checkpt_required(struct yaffs_dev *dev); ++int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev); ++ ++void yaffs2_checkpt_invalidate(struct yaffs_dev *dev); ++int yaffs2_checkpt_save(struct yaffs_dev *dev); ++int yaffs2_checkpt_restore(struct yaffs_dev *dev); ++ ++int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size); ++int yaffs2_scan_backwards(struct yaffs_dev *dev); ++ ++#endif +diff -Nur linux-3.4.90.orig/fs/yaffs2/yportenv.h linux-3.4.90/fs/yaffs2/yportenv.h +--- linux-3.4.90.orig/fs/yaffs2/yportenv.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.90/fs/yaffs2/yportenv.h 2014-05-17 15:08:09.000000000 +0200 +@@ -0,0 +1,85 @@ ++/* ++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. ++ * ++ * Copyright (C) 2002-2011 Aleph One Ltd. ++ * for Toby Churchill Ltd and Brightstar Engineering ++ * ++ * Created by Charles Manning ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License version 2.1 as ++ * published by the Free Software Foundation. ++ * ++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. ++ */ ++ ++#ifndef __YPORTENV_H__ ++#define __YPORTENV_H__ ++ ++/* ++ * Define the MTD version in terms of Linux Kernel versions ++ * This allows yaffs to be used independantly of the kernel ++ * as well as with it. ++ */ ++ ++#define MTD_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) ++ ++#ifdef YAFFS_OUT_OF_TREE ++#include "moduleconfig.h" ++#endif ++ ++#include ++#define MTD_VERSION_CODE LINUX_VERSION_CODE ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* These type wrappings are used to support Unicode names in WinCE. */ ++#define YCHAR char ++#define YUCHAR unsigned char ++#define _Y(x) x ++ ++#define YAFFS_LOSTNFOUND_NAME "lost+found" ++#define YAFFS_LOSTNFOUND_PREFIX "obj" ++ ++ ++#define YAFFS_ROOT_MODE 0755 ++#define YAFFS_LOSTNFOUND_MODE 0700 ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++#define Y_CURRENT_TIME CURRENT_TIME.tv_sec ++#define Y_TIME_CONVERT(x) (x).tv_sec ++#else ++#define Y_CURRENT_TIME CURRENT_TIME ++#define Y_TIME_CONVERT(x) (x) ++#endif ++ ++#define compile_time_assertion(assertion) \ ++ ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; }) ++ ++ ++#define yaffs_printf(msk, fmt, ...) \ ++ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__) ++ ++#define yaffs_trace(msk, fmt, ...) do { \ ++ if (yaffs_trace_mask & (msk)) \ ++ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__); \ ++} while (0) ++ ++ ++#endif diff --git a/target/linux/patches/3.4.113/zlib-inflate.patch b/target/linux/patches/3.4.113/zlib-inflate.patch new file mode 100644 index 000000000..58e1f6d21 --- /dev/null +++ b/target/linux/patches/3.4.113/zlib-inflate.patch @@ -0,0 +1,12 @@ +diff -Nur linux-2.6.37.orig/lib/Kconfig linux-2.6.37/lib/Kconfig +--- linux-2.6.37.orig/lib/Kconfig 2011-01-05 01:50:19.000000000 +0100 ++++ linux-2.6.37/lib/Kconfig 2011-03-01 20:10:29.833370667 +0100 +@@ -95,7 +95,7 @@ + # compression support is select'ed if needed + # + config ZLIB_INFLATE +- tristate ++ boolean + + config ZLIB_DEFLATE + tristate -- cgit v1.2.3