summaryrefslogtreecommitdiff
path: root/target/linux/patches/4.1.13
diff options
context:
space:
mode:
authorWaldemar Brodkorb <wbx@openadk.org>2015-12-24 00:05:11 +0100
committerWaldemar Brodkorb <wbx@openadk.org>2015-12-24 00:06:34 +0100
commitfadf503d6599e1a1d3750fd11427b7ec91d9068e (patch)
tree83c0892ad14fda75cb3f776dc8222822a12a349f /target/linux/patches/4.1.13
parent77049ba01018d740d6c7f4958ffeba4aa50272c6 (diff)
bump kernelversions
Diffstat (limited to 'target/linux/patches/4.1.13')
-rw-r--r--target/linux/patches/4.1.13/aufs.patch35215
-rw-r--r--target/linux/patches/4.1.13/cleankernel.patch11
-rw-r--r--target/linux/patches/4.1.13/cris-header.patch12
-rw-r--r--target/linux/patches/4.1.13/initramfs-nosizelimit.patch57
-rw-r--r--target/linux/patches/4.1.13/realtime.patch27435
-rw-r--r--target/linux/patches/4.1.13/regmap-default-on.patch17
-rw-r--r--target/linux/patches/4.1.13/remove-warn.patch11
-rw-r--r--target/linux/patches/4.1.13/startup.patch37
8 files changed, 0 insertions, 62795 deletions
diff --git a/target/linux/patches/4.1.13/aufs.patch b/target/linux/patches/4.1.13/aufs.patch
deleted file mode 100644
index 749c90989..000000000
--- a/target/linux/patches/4.1.13/aufs.patch
+++ /dev/null
@@ -1,35215 +0,0 @@
-diff -Nur linux-4.1.10.orig/Documentation/ABI/testing/debugfs-aufs linux-4.1.10/Documentation/ABI/testing/debugfs-aufs
---- linux-4.1.10.orig/Documentation/ABI/testing/debugfs-aufs 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/ABI/testing/debugfs-aufs 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,50 @@
-+What: /debug/aufs/si_<id>/
-+Date: March 2009
-+Contact: J. R. Okajima <hooanon05g@gmail.com>
-+Description:
-+ Under /debug/aufs, a directory named si_<id> is created
-+ per aufs mount, where <id> is a unique id generated
-+ internally.
-+
-+What: /debug/aufs/si_<id>/plink
-+Date: Apr 2013
-+Contact: J. R. Okajima <hooanon05g@gmail.com>
-+Description:
-+ It has three lines and shows the information about the
-+ pseudo-link. The first line is a single number
-+ representing a number of buckets. The second line is a
-+ number of pseudo-links per buckets (separated by a
-+ blank). The last line is a single number representing a
-+ total number of psedo-links.
-+ When the aufs mount option 'noplink' is specified, it
-+ will show "1\n0\n0\n".
-+
-+What: /debug/aufs/si_<id>/xib
-+Date: March 2009
-+Contact: J. R. Okajima <hooanon05g@gmail.com>
-+Description:
-+ It shows the consumed blocks by xib (External Inode Number
-+ Bitmap), its block size and file size.
-+ When the aufs mount option 'noxino' is specified, it
-+ will be empty. About XINO files, see the aufs manual.
-+
-+What: /debug/aufs/si_<id>/xino0, xino1 ... xinoN
-+Date: March 2009
-+Contact: J. R. Okajima <hooanon05g@gmail.com>
-+Description:
-+ It shows the consumed blocks by xino (External Inode Number
-+ Translation Table), its link count, block size and file
-+ size.
-+ When the aufs mount option 'noxino' is specified, it
-+ will be empty. About XINO files, see the aufs manual.
-+
-+What: /debug/aufs/si_<id>/xigen
-+Date: March 2009
-+Contact: J. R. Okajima <hooanon05g@gmail.com>
-+Description:
-+ It shows the consumed blocks by xigen (External Inode
-+ Generation Table), its block size and file size.
-+ If CONFIG_AUFS_EXPORT is disabled, this entry will not
-+ be created.
-+ When the aufs mount option 'noxino' is specified, it
-+ will be empty. About XINO files, see the aufs manual.
-diff -Nur linux-4.1.10.orig/Documentation/ABI/testing/sysfs-aufs linux-4.1.10/Documentation/ABI/testing/sysfs-aufs
---- linux-4.1.10.orig/Documentation/ABI/testing/sysfs-aufs 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/ABI/testing/sysfs-aufs 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,31 @@
-+What: /sys/fs/aufs/si_<id>/
-+Date: March 2009
-+Contact: J. R. Okajima <hooanon05g@gmail.com>
-+Description:
-+ Under /sys/fs/aufs, a directory named si_<id> is created
-+ per aufs mount, where <id> is a unique id generated
-+ internally.
-+
-+What: /sys/fs/aufs/si_<id>/br0, br1 ... brN
-+Date: March 2009
-+Contact: J. R. Okajima <hooanon05g@gmail.com>
-+Description:
-+ It shows the abolute path of a member directory (which
-+ is called branch) in aufs, and its permission.
-+
-+What: /sys/fs/aufs/si_<id>/brid0, brid1 ... bridN
-+Date: July 2013
-+Contact: J. R. Okajima <hooanon05g@gmail.com>
-+Description:
-+ It shows the id of a member directory (which is called
-+ branch) in aufs.
-+
-+What: /sys/fs/aufs/si_<id>/xi_path
-+Date: March 2009
-+Contact: J. R. Okajima <hooanon05g@gmail.com>
-+Description:
-+ It shows the abolute path of XINO (External Inode Number
-+ Bitmap, Translation Table and Generation Table) file
-+ even if it is the default path.
-+ When the aufs mount option 'noxino' is specified, it
-+ will be empty. About XINO files, see the aufs manual.
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/design/01intro.txt linux-4.1.10/Documentation/filesystems/aufs/design/01intro.txt
---- linux-4.1.10.orig/Documentation/filesystems/aufs/design/01intro.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/design/01intro.txt 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,170 @@
-+
-+# Copyright (C) 2005-2015 Junjiro R. Okajima
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program. If not, see <http://www.gnu.org/licenses/>.
-+
-+Introduction
-+----------------------------------------
-+
-+aufs [ei ju: ef es] | [a u f s]
-+1. abbrev. for "advanced multi-layered unification filesystem".
-+2. abbrev. for "another unionfs".
-+3. abbrev. for "auf das" in German which means "on the" in English.
-+ Ex. "Butter aufs Brot"(G) means "butter onto bread"(E).
-+ But "Filesystem aufs Filesystem" is hard to understand.
-+
-+AUFS is a filesystem with features:
-+- multi layered stackable unification filesystem, the member directory
-+ is called as a branch.
-+- branch permission and attribute, 'readonly', 'real-readonly',
-+ 'readwrite', 'whiteout-able', 'link-able whiteout', etc. and their
-+ combination.
-+- internal "file copy-on-write".
-+- logical deletion, whiteout.
-+- dynamic branch manipulation, adding, deleting and changing permission.
-+- allow bypassing aufs, user's direct branch access.
-+- external inode number translation table and bitmap which maintains the
-+ persistent aufs inode number.
-+- seekable directory, including NFS readdir.
-+- file mapping, mmap and sharing pages.
-+- pseudo-link, hardlink over branches.
-+- loopback mounted filesystem as a branch.
-+- several policies to select one among multiple writable branches.
-+- revert a single systemcall when an error occurs in aufs.
-+- and more...
-+
-+
-+Multi Layered Stackable Unification Filesystem
-+----------------------------------------------------------------------
-+Most people already knows what it is.
-+It is a filesystem which unifies several directories and provides a
-+merged single directory. When users access a file, the access will be
-+passed/re-directed/converted (sorry, I am not sure which English word is
-+correct) to the real file on the member filesystem. The member
-+filesystem is called 'lower filesystem' or 'branch' and has a mode
-+'readonly' and 'readwrite.' And the deletion for a file on the lower
-+readonly branch is handled by creating 'whiteout' on the upper writable
-+branch.
-+
-+On LKML, there have been discussions about UnionMount (Jan Blunck,
-+Bharata B Rao and Valerie Aurora) and Unionfs (Erez Zadok). They took
-+different approaches to implement the merged-view.
-+The former tries putting it into VFS, and the latter implements as a
-+separate filesystem.
-+(If I misunderstand about these implementations, please let me know and
-+I shall correct it. Because it is a long time ago when I read their
-+source files last time).
-+
-+UnionMount's approach will be able to small, but may be hard to share
-+branches between several UnionMount since the whiteout in it is
-+implemented in the inode on branch filesystem and always
-+shared. According to Bharata's post, readdir does not seems to be
-+finished yet.
-+There are several missing features known in this implementations such as
-+- for users, the inode number may change silently. eg. copy-up.
-+- link(2) may break by copy-up.
-+- read(2) may get an obsoleted filedata (fstat(2) too).
-+- fcntl(F_SETLK) may be broken by copy-up.
-+- unnecessary copy-up may happen, for example mmap(MAP_PRIVATE) after
-+ open(O_RDWR).
-+
-+In linux-3.18, "overlay" filesystem (formerly known as "overlayfs") was
-+merged into mainline. This is another implementation of UnionMount as a
-+separated filesystem. All the limitations and known problems which
-+UnionMount are equally inherited to "overlay" filesystem.
-+
-+Unionfs has a longer history. When I started implementing a stackable
-+filesystem (Aug 2005), it already existed. It has virtual super_block,
-+inode, dentry and file objects and they have an array pointing lower
-+same kind objects. After contributing many patches for Unionfs, I
-+re-started my project AUFS (Jun 2006).
-+
-+In AUFS, the structure of filesystem resembles to Unionfs, but I
-+implemented my own ideas, approaches and enhancements and it became
-+totally different one.
-+
-+Comparing DM snapshot and fs based implementation
-+- the number of bytes to be copied between devices is much smaller.
-+- the type of filesystem must be one and only.
-+- the fs must be writable, no readonly fs, even for the lower original
-+ device. so the compression fs will not be usable. but if we use
-+ loopback mount, we may address this issue.
-+ for instance,
-+ mount /cdrom/squashfs.img /sq
-+ losetup /sq/ext2.img
-+ losetup /somewhere/cow
-+ dmsetup "snapshot /dev/loop0 /dev/loop1 ..."
-+- it will be difficult (or needs more operations) to extract the
-+ difference between the original device and COW.
-+- DM snapshot-merge may help a lot when users try merging. in the
-+ fs-layer union, users will use rsync(1).
-+
-+You may want to read my old paper "Filesystems in LiveCD"
-+(http://aufs.sourceforge.net/aufs2/report/sq/sq.pdf).
-+
-+
-+Several characters/aspects/persona of aufs
-+----------------------------------------------------------------------
-+
-+Aufs has several characters, aspects or persona.
-+1. a filesystem, callee of VFS helper
-+2. sub-VFS, caller of VFS helper for branches
-+3. a virtual filesystem which maintains persistent inode number
-+4. reader/writer of files on branches such like an application
-+
-+1. Callee of VFS Helper
-+As an ordinary linux filesystem, aufs is a callee of VFS. For instance,
-+unlink(2) from an application reaches sys_unlink() kernel function and
-+then vfs_unlink() is called. vfs_unlink() is one of VFS helper and it
-+calls filesystem specific unlink operation. Actually aufs implements the
-+unlink operation but it behaves like a redirector.
-+
-+2. Caller of VFS Helper for Branches
-+aufs_unlink() passes the unlink request to the branch filesystem as if
-+it were called from VFS. So the called unlink operation of the branch
-+filesystem acts as usual. As a caller of VFS helper, aufs should handle
-+every necessary pre/post operation for the branch filesystem.
-+- acquire the lock for the parent dir on a branch
-+- lookup in a branch
-+- revalidate dentry on a branch
-+- mnt_want_write() for a branch
-+- vfs_unlink() for a branch
-+- mnt_drop_write() for a branch
-+- release the lock on a branch
-+
-+3. Persistent Inode Number
-+One of the most important issue for a filesystem is to maintain inode
-+numbers. This is particularly important to support exporting a
-+filesystem via NFS. Aufs is a virtual filesystem which doesn't have a
-+backend block device for its own. But some storage is necessary to
-+keep and maintain the inode numbers. It may be a large space and may not
-+suit to keep in memory. Aufs rents some space from its first writable
-+branch filesystem (by default) and creates file(s) on it. These files
-+are created by aufs internally and removed soon (currently) keeping
-+opened.
-+Note: Because these files are removed, they are totally gone after
-+ unmounting aufs. It means the inode numbers are not persistent
-+ across unmount or reboot. I have a plan to make them really
-+ persistent which will be important for aufs on NFS server.
-+
-+4. Read/Write Files Internally (copy-on-write)
-+Because a branch can be readonly, when you write a file on it, aufs will
-+"copy-up" it to the upper writable branch internally. And then write the
-+originally requested thing to the file. Generally kernel doesn't
-+open/read/write file actively. In aufs, even a single write may cause a
-+internal "file copy". This behaviour is very similar to cp(1) command.
-+
-+Some people may think it is better to pass such work to user space
-+helper, instead of doing in kernel space. Actually I am still thinking
-+about it. But currently I have implemented it in kernel space.
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/design/02struct.txt linux-4.1.10/Documentation/filesystems/aufs/design/02struct.txt
---- linux-4.1.10.orig/Documentation/filesystems/aufs/design/02struct.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/design/02struct.txt 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,258 @@
-+
-+# Copyright (C) 2005-2015 Junjiro R. Okajima
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program. If not, see <http://www.gnu.org/licenses/>.
-+
-+Basic Aufs Internal Structure
-+
-+Superblock/Inode/Dentry/File Objects
-+----------------------------------------------------------------------
-+As like an ordinary filesystem, aufs has its own
-+superblock/inode/dentry/file objects. All these objects have a
-+dynamically allocated array and store the same kind of pointers to the
-+lower filesystem, branch.
-+For example, when you build a union with one readwrite branch and one
-+readonly, mounted /au, /rw and /ro respectively.
-+- /au = /rw + /ro
-+- /ro/fileA exists but /rw/fileA
-+
-+Aufs lookup operation finds /ro/fileA and gets dentry for that. These
-+pointers are stored in a aufs dentry. The array in aufs dentry will be,
-+- [0] = NULL (because /rw/fileA doesn't exist)
-+- [1] = /ro/fileA
-+
-+This style of an array is essentially same to the aufs
-+superblock/inode/dentry/file objects.
-+
-+Because aufs supports manipulating branches, ie. add/delete/change
-+branches dynamically, these objects has its own generation. When
-+branches are changed, the generation in aufs superblock is
-+incremented. And a generation in other object are compared when it is
-+accessed. When a generation in other objects are obsoleted, aufs
-+refreshes the internal array.
-+
-+
-+Superblock
-+----------------------------------------------------------------------
-+Additionally aufs superblock has some data for policies to select one
-+among multiple writable branches, XIB files, pseudo-links and kobject.
-+See below in detail.
-+About the policies which supports copy-down a directory, see
-+wbr_policy.txt too.
-+
-+
-+Branch and XINO(External Inode Number Translation Table)
-+----------------------------------------------------------------------
-+Every branch has its own xino (external inode number translation table)
-+file. The xino file is created and unlinked by aufs internally. When two
-+members of a union exist on the same filesystem, they share the single
-+xino file.
-+The struct of a xino file is simple, just a sequence of aufs inode
-+numbers which is indexed by the lower inode number.
-+In the above sample, assume the inode number of /ro/fileA is i111 and
-+aufs assigns the inode number i999 for fileA. Then aufs writes 999 as
-+4(8) bytes at 111 * 4(8) bytes offset in the xino file.
-+
-+When the inode numbers are not contiguous, the xino file will be sparse
-+which has a hole in it and doesn't consume as much disk space as it
-+might appear. If your branch filesystem consumes disk space for such
-+holes, then you should specify 'xino=' option at mounting aufs.
-+
-+Aufs has a mount option to free the disk blocks for such holes in XINO
-+files on tmpfs or ramdisk. But it is not so effective actually. If you
-+meet a problem of disk shortage due to XINO files, then you should try
-+"tmpfs-ino.patch" (and "vfs-ino.patch" too) in aufs4-standalone.git.
-+The patch localizes the assignment inumbers per tmpfs-mount and avoid
-+the holes in XINO files.
-+
-+Also a writable branch has three kinds of "whiteout bases". All these
-+are existed when the branch is joined to aufs, and their names are
-+whiteout-ed doubly, so that users will never see their names in aufs
-+hierarchy.
-+1. a regular file which will be hardlinked to all whiteouts.
-+2. a directory to store a pseudo-link.
-+3. a directory to store an "orphan"-ed file temporary.
-+
-+1. Whiteout Base
-+ When you remove a file on a readonly branch, aufs handles it as a
-+ logical deletion and creates a whiteout on the upper writable branch
-+ as a hardlink of this file in order not to consume inode on the
-+ writable branch.
-+2. Pseudo-link Dir
-+ See below, Pseudo-link.
-+3. Step-Parent Dir
-+ When "fileC" exists on the lower readonly branch only and it is
-+ opened and removed with its parent dir, and then user writes
-+ something into it, then aufs copies-up fileC to this
-+ directory. Because there is no other dir to store fileC. After
-+ creating a file under this dir, the file is unlinked.
-+
-+Because aufs supports manipulating branches, ie. add/delete/change
-+dynamically, a branch has its own id. When the branch order changes,
-+aufs finds the new index by searching the branch id.
-+
-+
-+Pseudo-link
-+----------------------------------------------------------------------
-+Assume "fileA" exists on the lower readonly branch only and it is
-+hardlinked to "fileB" on the branch. When you write something to fileA,
-+aufs copies-up it to the upper writable branch. Additionally aufs
-+creates a hardlink under the Pseudo-link Directory of the writable
-+branch. The inode of a pseudo-link is kept in aufs super_block as a
-+simple list. If fileB is read after unlinking fileA, aufs returns
-+filedata from the pseudo-link instead of the lower readonly
-+branch. Because the pseudo-link is based upon the inode, to keep the
-+inode number by xino (see above) is essentially necessary.
-+
-+All the hardlinks under the Pseudo-link Directory of the writable branch
-+should be restored in a proper location later. Aufs provides a utility
-+to do this. The userspace helpers executed at remounting and unmounting
-+aufs by default.
-+During this utility is running, it puts aufs into the pseudo-link
-+maintenance mode. In this mode, only the process which began the
-+maintenance mode (and its child processes) is allowed to operate in
-+aufs. Some other processes which are not related to the pseudo-link will
-+be allowed to run too, but the rest have to return an error or wait
-+until the maintenance mode ends. If a process already acquires an inode
-+mutex (in VFS), it has to return an error.
-+
-+
-+XIB(external inode number bitmap)
-+----------------------------------------------------------------------
-+Addition to the xino file per a branch, aufs has an external inode number
-+bitmap in a superblock object. It is also an internal file such like a
-+xino file.
-+It is a simple bitmap to mark whether the aufs inode number is in-use or
-+not.
-+To reduce the file I/O, aufs prepares a single memory page to cache xib.
-+
-+As well as XINO files, aufs has a feature to truncate/refresh XIB to
-+reduce the number of consumed disk blocks for these files.
-+
-+
-+Virtual or Vertical Dir, and Readdir in Userspace
-+----------------------------------------------------------------------
-+In order to support multiple layers (branches), aufs readdir operation
-+constructs a virtual dir block on memory. For readdir, aufs calls
-+vfs_readdir() internally for each dir on branches, merges their entries
-+with eliminating the whiteout-ed ones, and sets it to file (dir)
-+object. So the file object has its entry list until it is closed. The
-+entry list will be updated when the file position is zero and becomes
-+obsoleted. This decision is made in aufs automatically.
-+
-+The dynamically allocated memory block for the name of entries has a
-+unit of 512 bytes (by default) and stores the names contiguously (no
-+padding). Another block for each entry is handled by kmem_cache too.
-+During building dir blocks, aufs creates hash list and judging whether
-+the entry is whiteouted by its upper branch or already listed.
-+The merged result is cached in the corresponding inode object and
-+maintained by a customizable life-time option.
-+
-+Some people may call it can be a security hole or invite DoS attack
-+since the opened and once readdir-ed dir (file object) holds its entry
-+list and becomes a pressure for system memory. But I'd say it is similar
-+to files under /proc or /sys. The virtual files in them also holds a
-+memory page (generally) while they are opened. When an idea to reduce
-+memory for them is introduced, it will be applied to aufs too.
-+For those who really hate this situation, I've developed readdir(3)
-+library which operates this merging in userspace. You just need to set
-+LD_PRELOAD environment variable, and aufs will not consume no memory in
-+kernel space for readdir(3).
-+
-+
-+Workqueue
-+----------------------------------------------------------------------
-+Aufs sometimes requires privilege access to a branch. For instance,
-+in copy-up/down operation. When a user process is going to make changes
-+to a file which exists in the lower readonly branch only, and the mode
-+of one of ancestor directories may not be writable by a user
-+process. Here aufs copy-up the file with its ancestors and they may
-+require privilege to set its owner/group/mode/etc.
-+This is a typical case of a application character of aufs (see
-+Introduction).
-+
-+Aufs uses workqueue synchronously for this case. It creates its own
-+workqueue. The workqueue is a kernel thread and has privilege. Aufs
-+passes the request to call mkdir or write (for example), and wait for
-+its completion. This approach solves a problem of a signal handler
-+simply.
-+If aufs didn't adopt the workqueue and changed the privilege of the
-+process, then the process may receive the unexpected SIGXFSZ or other
-+signals.
-+
-+Also aufs uses the system global workqueue ("events" kernel thread) too
-+for asynchronous tasks, such like handling inotify/fsnotify, re-creating a
-+whiteout base and etc. This is unrelated to a privilege.
-+Most of aufs operation tries acquiring a rw_semaphore for aufs
-+superblock at the beginning, at the same time waits for the completion
-+of all queued asynchronous tasks.
-+
-+
-+Whiteout
-+----------------------------------------------------------------------
-+The whiteout in aufs is very similar to Unionfs's. That is represented
-+by its filename. UnionMount takes an approach of a file mode, but I am
-+afraid several utilities (find(1) or something) will have to support it.
-+
-+Basically the whiteout represents "logical deletion" which stops aufs to
-+lookup further, but also it represents "dir is opaque" which also stop
-+further lookup.
-+
-+In aufs, rmdir(2) and rename(2) for dir uses whiteout alternatively.
-+In order to make several functions in a single systemcall to be
-+revertible, aufs adopts an approach to rename a directory to a temporary
-+unique whiteouted name.
-+For example, in rename(2) dir where the target dir already existed, aufs
-+renames the target dir to a temporary unique whiteouted name before the
-+actual rename on a branch, and then handles other actions (make it opaque,
-+update the attributes, etc). If an error happens in these actions, aufs
-+simply renames the whiteouted name back and returns an error. If all are
-+succeeded, aufs registers a function to remove the whiteouted unique
-+temporary name completely and asynchronously to the system global
-+workqueue.
-+
-+
-+Copy-up
-+----------------------------------------------------------------------
-+It is a well-known feature or concept.
-+When user modifies a file on a readonly branch, aufs operate "copy-up"
-+internally and makes change to the new file on the upper writable branch.
-+When the trigger systemcall does not update the timestamps of the parent
-+dir, aufs reverts it after copy-up.
-+
-+
-+Move-down (aufs3.9 and later)
-+----------------------------------------------------------------------
-+"Copy-up" is one of the essential feature in aufs. It copies a file from
-+the lower readonly branch to the upper writable branch when a user
-+changes something about the file.
-+"Move-down" is an opposite action of copy-up. Basically this action is
-+ran manually instead of automatically and internally.
-+For desgin and implementation, aufs has to consider these issues.
-+- whiteout for the file may exist on the lower branch.
-+- ancestor directories may not exist on the lower branch.
-+- diropq for the ancestor directories may exist on the upper branch.
-+- free space on the lower branch will reduce.
-+- another access to the file may happen during moving-down, including
-+ UDBA (see "Revalidate Dentry and UDBA").
-+- the file should not be hard-linked nor pseudo-linked. they should be
-+ handled by auplink utility later.
-+
-+Sometimes users want to move-down a file from the upper writable branch
-+to the lower readonly or writable branch. For instance,
-+- the free space of the upper writable branch is going to run out.
-+- create a new intermediate branch between the upper and lower branch.
-+- etc.
-+
-+For this purpose, use "aumvdown" command in aufs-util.git.
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/design/03atomic_open.txt linux-4.1.10/Documentation/filesystems/aufs/design/03atomic_open.txt
---- linux-4.1.10.orig/Documentation/filesystems/aufs/design/03atomic_open.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/design/03atomic_open.txt 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,85 @@
-+
-+# Copyright (C) 2015 Junjiro R. Okajima
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program. If not, see <http://www.gnu.org/licenses/>.
-+
-+Support for a branch who has its ->atomic_open()
-+----------------------------------------------------------------------
-+The filesystems who implement its ->atomic_open() are not majority. For
-+example NFSv4 does, and aufs should call NFSv4 ->atomic_open,
-+particularly for open(O_CREAT|O_EXCL, 0400) case. Other than
-+->atomic_open(), NFSv4 returns an error for this open(2). While I am not
-+sure whether all filesystems who have ->atomic_open() behave like this,
-+but NFSv4 surely returns the error.
-+
-+In order to support ->atomic_open() for aufs, there are a few
-+approaches.
-+
-+A. Introduce aufs_atomic_open()
-+ - calls one of VFS:do_last(), lookup_open() or atomic_open() for
-+ branch fs.
-+B. Introduce aufs_atomic_open() calling create, open and chmod. this is
-+ an aufs user Pip Cet's approach
-+ - calls aufs_create(), VFS finish_open() and notify_change().
-+ - pass fake-mode to finish_open(), and then correct the mode by
-+ notify_change().
-+C. Extend aufs_open() to call branch fs's ->atomic_open()
-+ - no aufs_atomic_open().
-+ - aufs_lookup() registers the TID to an aufs internal object.
-+ - aufs_create() does nothing when the matching TID is registered, but
-+ registers the mode.
-+ - aufs_open() calls branch fs's ->atomic_open() when the matching
-+ TID is registered.
-+D. Extend aufs_open() to re-try branch fs's ->open() with superuser's
-+ credential
-+ - no aufs_atomic_open().
-+ - aufs_create() registers the TID to an internal object. this info
-+ represents "this process created this file just now."
-+ - when aufs gets EACCES from branch fs's ->open(), then confirm the
-+ registered TID and re-try open() with superuser's credential.
-+
-+Pros and cons for each approach.
-+
-+A.
-+ - straightforward but highly depends upon VFS internal.
-+ - the atomic behavaiour is kept.
-+ - some of parameters such as nameidata are hard to reproduce for
-+ branch fs.
-+ - large overhead.
-+B.
-+ - easy to implement.
-+ - the atomic behavaiour is lost.
-+C.
-+ - the atomic behavaiour is kept.
-+ - dirty and tricky.
-+ - VFS checks whether the file is created correctly after calling
-+ ->create(), which means this approach doesn't work.
-+D.
-+ - easy to implement.
-+ - the atomic behavaiour is lost.
-+ - to open a file with superuser's credential and give it to a user
-+ process is a bad idea, since the file object keeps the credential
-+ in it. It may affect LSM or something. This approach doesn't work
-+ either.
-+
-+The approach A is ideal, but it hard to implement. So here is a
-+variation of A, which is to be implemented.
-+
-+A-1. Introduce aufs_atomic_open()
-+ - calls branch fs ->atomic_open() if exists. otherwise calls
-+ vfs_create() and finish_open().
-+ - the demerit is that the several checks after branch fs
-+ ->atomic_open() are lost. in the ordinary case, the checks are
-+ done by VFS:do_last(), lookup_open() and atomic_open(). some can
-+ be implemented in aufs, but not all I am afraid.
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/design/03lookup.txt linux-4.1.10/Documentation/filesystems/aufs/design/03lookup.txt
---- linux-4.1.10.orig/Documentation/filesystems/aufs/design/03lookup.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/design/03lookup.txt 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,113 @@
-+
-+# Copyright (C) 2005-2015 Junjiro R. Okajima
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program. If not, see <http://www.gnu.org/licenses/>.
-+
-+Lookup in a Branch
-+----------------------------------------------------------------------
-+Since aufs has a character of sub-VFS (see Introduction), it operates
-+lookup for branches as VFS does. It may be a heavy work. But almost all
-+lookup operation in aufs is the simplest case, ie. lookup only an entry
-+directly connected to its parent. Digging down the directory hierarchy
-+is unnecessary. VFS has a function lookup_one_len() for that use, and
-+aufs calls it.
-+
-+When a branch is a remote filesystem, aufs basically relies upon its
-+->d_revalidate(), also aufs forces the hardest revalidate tests for
-+them.
-+For d_revalidate, aufs implements three levels of revalidate tests. See
-+"Revalidate Dentry and UDBA" in detail.
-+
-+
-+Test Only the Highest One for the Directory Permission (dirperm1 option)
-+----------------------------------------------------------------------
-+Let's try case study.
-+- aufs has two branches, upper readwrite and lower readonly.
-+ /au = /rw + /ro
-+- "dirA" exists under /ro, but /rw. and its mode is 0700.
-+- user invoked "chmod a+rx /au/dirA"
-+- the internal copy-up is activated and "/rw/dirA" is created and its
-+ permission bits are set to world readable.
-+- then "/au/dirA" becomes world readable?
-+
-+In this case, /ro/dirA is still 0700 since it exists in readonly branch,
-+or it may be a natively readonly filesystem. If aufs respects the lower
-+branch, it should not respond readdir request from other users. But user
-+allowed it by chmod. Should really aufs rejects showing the entries
-+under /ro/dirA?
-+
-+To be honest, I don't have a good solution for this case. So aufs
-+implements 'dirperm1' and 'nodirperm1' mount options, and leave it to
-+users.
-+When dirperm1 is specified, aufs checks only the highest one for the
-+directory permission, and shows the entries. Otherwise, as usual, checks
-+every dir existing on all branches and rejects the request.
-+
-+As a side effect, dirperm1 option improves the performance of aufs
-+because the number of permission check is reduced when the number of
-+branch is many.
-+
-+
-+Revalidate Dentry and UDBA (User's Direct Branch Access)
-+----------------------------------------------------------------------
-+Generally VFS helpers re-validate a dentry as a part of lookup.
-+0. digging down the directory hierarchy.
-+1. lock the parent dir by its i_mutex.
-+2. lookup the final (child) entry.
-+3. revalidate it.
-+4. call the actual operation (create, unlink, etc.)
-+5. unlock the parent dir
-+
-+If the filesystem implements its ->d_revalidate() (step 3), then it is
-+called. Actually aufs implements it and checks the dentry on a branch is
-+still valid.
-+But it is not enough. Because aufs has to release the lock for the
-+parent dir on a branch at the end of ->lookup() (step 2) and
-+->d_revalidate() (step 3) while the i_mutex of the aufs dir is still
-+held by VFS.
-+If the file on a branch is changed directly, eg. bypassing aufs, after
-+aufs released the lock, then the subsequent operation may cause
-+something unpleasant result.
-+
-+This situation is a result of VFS architecture, ->lookup() and
-+->d_revalidate() is separated. But I never say it is wrong. It is a good
-+design from VFS's point of view. It is just not suitable for sub-VFS
-+character in aufs.
-+
-+Aufs supports such case by three level of revalidation which is
-+selectable by user.
-+1. Simple Revalidate
-+ Addition to the native flow in VFS's, confirm the child-parent
-+ relationship on the branch just after locking the parent dir on the
-+ branch in the "actual operation" (step 4). When this validation
-+ fails, aufs returns EBUSY. ->d_revalidate() (step 3) in aufs still
-+ checks the validation of the dentry on branches.
-+2. Monitor Changes Internally by Inotify/Fsnotify
-+ Addition to above, in the "actual operation" (step 4) aufs re-lookup
-+ the dentry on the branch, and returns EBUSY if it finds different
-+ dentry.
-+ Additionally, aufs sets the inotify/fsnotify watch for every dir on branches
-+ during it is in cache. When the event is notified, aufs registers a
-+ function to kernel 'events' thread by schedule_work(). And the
-+ function sets some special status to the cached aufs dentry and inode
-+ private data. If they are not cached, then aufs has nothing to
-+ do. When the same file is accessed through aufs (step 0-3) later,
-+ aufs will detect the status and refresh all necessary data.
-+ In this mode, aufs has to ignore the event which is fired by aufs
-+ itself.
-+3. No Extra Validation
-+ This is the simplest test and doesn't add any additional revalidation
-+ test, and skip the revalidation in step 4. It is useful and improves
-+ aufs performance when system surely hide the aufs branches from user,
-+ by over-mounting something (or another method).
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/design/04branch.txt linux-4.1.10/Documentation/filesystems/aufs/design/04branch.txt
---- linux-4.1.10.orig/Documentation/filesystems/aufs/design/04branch.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/design/04branch.txt 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,74 @@
-+
-+# Copyright (C) 2005-2015 Junjiro R. Okajima
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program. If not, see <http://www.gnu.org/licenses/>.
-+
-+Branch Manipulation
-+
-+Since aufs supports dynamic branch manipulation, ie. add/remove a branch
-+and changing its permission/attribute, there are a lot of works to do.
-+
-+
-+Add a Branch
-+----------------------------------------------------------------------
-+o Confirm the adding dir exists outside of aufs, including loopback
-+ mount, and its various attributes.
-+o Initialize the xino file and whiteout bases if necessary.
-+ See struct.txt.
-+
-+o Check the owner/group/mode of the directory
-+ When the owner/group/mode of the adding directory differs from the
-+ existing branch, aufs issues a warning because it may impose a
-+ security risk.
-+ For example, when a upper writable branch has a world writable empty
-+ top directory, a malicious user can create any files on the writable
-+ branch directly, like copy-up and modify manually. If something like
-+ /etc/{passwd,shadow} exists on the lower readonly branch but the upper
-+ writable branch, and the writable branch is world-writable, then a
-+ malicious guy may create /etc/passwd on the writable branch directly
-+ and the infected file will be valid in aufs.
-+ I am afraid it can be a security issue, but aufs can do nothing except
-+ producing a warning.
-+
-+
-+Delete a Branch
-+----------------------------------------------------------------------
-+o Confirm the deleting branch is not busy
-+ To be general, there is one merit to adopt "remount" interface to
-+ manipulate branches. It is to discard caches. At deleting a branch,
-+ aufs checks the still cached (and connected) dentries and inodes. If
-+ there are any, then they are all in-use. An inode without its
-+ corresponding dentry can be alive alone (for example, inotify/fsnotify case).
-+
-+ For the cached one, aufs checks whether the same named entry exists on
-+ other branches.
-+ If the cached one is a directory, because aufs provides a merged view
-+ to users, as long as one dir is left on any branch aufs can show the
-+ dir to users. In this case, the branch can be removed from aufs.
-+ Otherwise aufs rejects deleting the branch.
-+
-+ If any file on the deleting branch is opened by aufs, then aufs
-+ rejects deleting.
-+
-+
-+Modify the Permission of a Branch
-+----------------------------------------------------------------------
-+o Re-initialize or remove the xino file and whiteout bases if necessary.
-+ See struct.txt.
-+
-+o rw --> ro: Confirm the modifying branch is not busy
-+ Aufs rejects the request if any of these conditions are true.
-+ - a file on the branch is mmap-ed.
-+ - a regular file on the branch is opened for write and there is no
-+ same named entry on the upper branch.
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/design/05wbr_policy.txt linux-4.1.10/Documentation/filesystems/aufs/design/05wbr_policy.txt
---- linux-4.1.10.orig/Documentation/filesystems/aufs/design/05wbr_policy.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/design/05wbr_policy.txt 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,64 @@
-+
-+# Copyright (C) 2005-2015 Junjiro R. Okajima
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program. If not, see <http://www.gnu.org/licenses/>.
-+
-+Policies to Select One among Multiple Writable Branches
-+----------------------------------------------------------------------
-+When the number of writable branch is more than one, aufs has to decide
-+the target branch for file creation or copy-up. By default, the highest
-+writable branch which has the parent (or ancestor) dir of the target
-+file is chosen (top-down-parent policy).
-+By user's request, aufs implements some other policies to select the
-+writable branch, for file creation several policies, round-robin,
-+most-free-space, and other policies. For copy-up, top-down-parent,
-+bottom-up-parent, bottom-up and others.
-+
-+As expected, the round-robin policy selects the branch in circular. When
-+you have two writable branches and creates 10 new files, 5 files will be
-+created for each branch. mkdir(2) systemcall is an exception. When you
-+create 10 new directories, all will be created on the same branch.
-+And the most-free-space policy selects the one which has most free
-+space among the writable branches. The amount of free space will be
-+checked by aufs internally, and users can specify its time interval.
-+
-+The policies for copy-up is more simple,
-+top-down-parent is equivalent to the same named on in create policy,
-+bottom-up-parent selects the writable branch where the parent dir
-+exists and the nearest upper one from the copyup-source,
-+bottom-up selects the nearest upper writable branch from the
-+copyup-source, regardless the existence of the parent dir.
-+
-+There are some rules or exceptions to apply these policies.
-+- If there is a readonly branch above the policy-selected branch and
-+ the parent dir is marked as opaque (a variation of whiteout), or the
-+ target (creating) file is whiteout-ed on the upper readonly branch,
-+ then the result of the policy is ignored and the target file will be
-+ created on the nearest upper writable branch than the readonly branch.
-+- If there is a writable branch above the policy-selected branch and
-+ the parent dir is marked as opaque or the target file is whiteouted
-+ on the branch, then the result of the policy is ignored and the target
-+ file will be created on the highest one among the upper writable
-+ branches who has diropq or whiteout. In case of whiteout, aufs removes
-+ it as usual.
-+- link(2) and rename(2) systemcalls are exceptions in every policy.
-+ They try selecting the branch where the source exists as possible
-+ since copyup a large file will take long time. If it can't be,
-+ ie. the branch where the source exists is readonly, then they will
-+ follow the copyup policy.
-+- There is an exception for rename(2) when the target exists.
-+ If the rename target exists, aufs compares the index of the branches
-+ where the source and the target exists and selects the higher
-+ one. If the selected branch is readonly, then aufs follows the
-+ copyup policy.
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/design/06fhsm.txt linux-4.1.10/Documentation/filesystems/aufs/design/06fhsm.txt
---- linux-4.1.10.orig/Documentation/filesystems/aufs/design/06fhsm.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/design/06fhsm.txt 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,120 @@
-+
-+# Copyright (C) 2011-2015 Junjiro R. Okajima
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program; if not, write to the Free Software
-+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-+
-+
-+File-based Hierarchical Storage Management (FHSM)
-+----------------------------------------------------------------------
-+Hierarchical Storage Management (or HSM) is a well-known feature in the
-+storage world. Aufs provides this feature as file-based with multiple
-+writable branches, based upon the principle of "Colder, the Lower".
-+Here the word "colder" means that the less used files, and "lower" means
-+that the position in the order of the stacked branches vertically.
-+These multiple writable branches are prioritized, ie. the topmost one
-+should be the fastest drive and be used heavily.
-+
-+o Characters in aufs FHSM story
-+- aufs itself and a new branch attribute.
-+- a new ioctl interface to move-down and to establish a connection with
-+ the daemon ("move-down" is a converse of "copy-up").
-+- userspace tool and daemon.
-+
-+The userspace daemon establishes a connection with aufs and waits for
-+the notification. The notified information is very similar to struct
-+statfs containing the number of consumed blocks and inodes.
-+When the consumed blocks/inodes of a branch exceeds the user-specified
-+upper watermark, the daemon activates its move-down process until the
-+consumed blocks/inodes reaches the user-specified lower watermark.
-+
-+The actual move-down is done by aufs based upon the request from
-+user-space since we need to maintain the inode number and the internal
-+pointer arrays in aufs.
-+
-+Currently aufs FHSM handles the regular files only. Additionally they
-+must not be hard-linked nor pseudo-linked.
-+
-+
-+o Cowork of aufs and the user-space daemon
-+ During the userspace daemon established the connection, aufs sends a
-+ small notification to it whenever aufs writes something into the
-+ writable branch. But it may cost high since aufs issues statfs(2)
-+ internally. So user can specify a new option to cache the
-+ info. Actually the notification is controlled by these factors.
-+ + the specified cache time.
-+ + classified as "force" by aufs internally.
-+ Until the specified time expires, aufs doesn't send the info
-+ except the forced cases. When aufs decide forcing, the info is always
-+ notified to userspace.
-+ For example, the number of free inodes is generally large enough and
-+ the shortage of it happens rarely. So aufs doesn't force the
-+ notification when creating a new file, directory and others. This is
-+ the typical case which aufs doesn't force.
-+ When aufs writes the actual filedata and the files consumes any of new
-+ blocks, the aufs forces notifying.
-+
-+
-+o Interfaces in aufs
-+- New branch attribute.
-+ + fhsm
-+ Specifies that the branch is managed by FHSM feature. In other word,
-+ participant in the FHSM.
-+ When nofhsm is set to the branch, it will not be the source/target
-+ branch of the move-down operation. This attribute is set
-+ independently from coo and moo attributes, and if you want full
-+ FHSM, you should specify them as well.
-+- New mount option.
-+ + fhsm_sec
-+ Specifies a second to suppress many less important info to be
-+ notified.
-+- New ioctl.
-+ + AUFS_CTL_FHSM_FD
-+ create a new file descriptor which userspace can read the notification
-+ (a subset of struct statfs) from aufs.
-+- Module parameter 'brs'
-+ It has to be set to 1. Otherwise the new mount option 'fhsm' will not
-+ be set.
-+- mount helpers /sbin/mount.aufs and /sbin/umount.aufs
-+ When there are two or more branches with fhsm attributes,
-+ /sbin/mount.aufs invokes the user-space daemon and /sbin/umount.aufs
-+ terminates it. As a result of remounting and branch-manipulation, the
-+ number of branches with fhsm attribute can be one. In this case,
-+ /sbin/mount.aufs will terminate the user-space daemon.
-+
-+
-+Finally the operation is done as these steps in kernel-space.
-+- make sure that,
-+ + no one else is using the file.
-+ + the file is not hard-linked.
-+ + the file is not pseudo-linked.
-+ + the file is a regular file.
-+ + the parent dir is not opaqued.
-+- find the target writable branch.
-+- make sure the file is not whiteout-ed by the upper (than the target)
-+ branch.
-+- make the parent dir on the target branch.
-+- mutex lock the inode on the branch.
-+- unlink the whiteout on the target branch (if exists).
-+- lookup and create the whiteout-ed temporary name on the target branch.
-+- copy the file as the whiteout-ed temporary name on the target branch.
-+- rename the whiteout-ed temporary name to the original name.
-+- unlink the file on the source branch.
-+- maintain the internal pointer array and the external inode number
-+ table (XINO).
-+- maintain the timestamps and other attributes of the parent dir and the
-+ file.
-+
-+And of course, in every step, an error may happen. So the operation
-+should restore the original file state after an error happens.
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/design/06mmap.txt linux-4.1.10/Documentation/filesystems/aufs/design/06mmap.txt
---- linux-4.1.10.orig/Documentation/filesystems/aufs/design/06mmap.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/design/06mmap.txt 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,72 @@
-+
-+# Copyright (C) 2005-2015 Junjiro R. Okajima
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program. If not, see <http://www.gnu.org/licenses/>.
-+
-+mmap(2) -- File Memory Mapping
-+----------------------------------------------------------------------
-+In aufs, the file-mapped pages are handled by a branch fs directly, no
-+interaction with aufs. It means aufs_mmap() calls the branch fs's
-+->mmap().
-+This approach is simple and good, but there is one problem.
-+Under /proc, several entries show the mmapped files by its path (with
-+device and inode number), and the printed path will be the path on the
-+branch fs's instead of virtual aufs's.
-+This is not a problem in most cases, but some utilities lsof(1) (and its
-+user) may expect the path on aufs.
-+
-+To address this issue, aufs adds a new member called vm_prfile in struct
-+vm_area_struct (and struct vm_region). The original vm_file points to
-+the file on the branch fs in order to handle everything correctly as
-+usual. The new vm_prfile points to a virtual file in aufs, and the
-+show-functions in procfs refers to vm_prfile if it is set.
-+Also we need to maintain several other places where touching vm_file
-+such like
-+- fork()/clone() copies vma and the reference count of vm_file is
-+ incremented.
-+- merging vma maintains the ref count too.
-+
-+This is not a good approach. It just fakes the printed path. But it
-+leaves all behaviour around f_mapping unchanged. This is surely an
-+advantage.
-+Actually aufs had adopted another complicated approach which calls
-+generic_file_mmap() and handles struct vm_operations_struct. In this
-+approach, aufs met a hard problem and I could not solve it without
-+switching the approach.
-+
-+There may be one more another approach which is
-+- bind-mount the branch-root onto the aufs-root internally
-+- grab the new vfsmount (ie. struct mount)
-+- lazy-umount the branch-root internally
-+- in open(2) the aufs-file, open the branch-file with the hidden
-+ vfsmount (instead of the original branch's vfsmount)
-+- ideally this "bind-mount and lazy-umount" should be done atomically,
-+ but it may be possible from userspace by the mount helper.
-+
-+Adding the internal hidden vfsmount and using it in opening a file, the
-+file path under /proc will be printed correctly. This approach looks
-+smarter, but is not possible I am afraid.
-+- aufs-root may be bind-mount later. when it happens, another hidden
-+ vfsmount will be required.
-+- it is hard to get the chance to bind-mount and lazy-umount
-+ + in kernel-space, FS can have vfsmount in open(2) via
-+ file->f_path, and aufs can know its vfsmount. But several locks are
-+ already acquired, and if aufs tries to bind-mount and lazy-umount
-+ here, then it may cause a deadlock.
-+ + in user-space, bind-mount doesn't invoke the mount helper.
-+- since /proc shows dev and ino, aufs has to give vma these info. it
-+ means a new member vm_prinode will be necessary. this is essentially
-+ equivalent to vm_prfile described above.
-+
-+I have to give up this "looks-smater" approach.
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/design/06xattr.txt linux-4.1.10/Documentation/filesystems/aufs/design/06xattr.txt
---- linux-4.1.10.orig/Documentation/filesystems/aufs/design/06xattr.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/design/06xattr.txt 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,96 @@
-+
-+# Copyright (C) 2014-2015 Junjiro R. Okajima
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program; if not, write to the Free Software
-+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-+
-+
-+Listing XATTR/EA and getting the value
-+----------------------------------------------------------------------
-+For the inode standard attributes (owner, group, timestamps, etc.), aufs
-+shows the values from the topmost existing file. This behaviour is good
-+for the non-dir entries since the bahaviour exactly matches the shown
-+information. But for the directories, aufs considers all the same named
-+entries on the lower branches. Which means, if one of the lower entry
-+rejects readdir call, then aufs returns an error even if the topmost
-+entry allows it. This behaviour is necessary to respect the branch fs's
-+security, but can make users confused since the user-visible standard
-+attributes don't match the behaviour.
-+To address this issue, aufs has a mount option called dirperm1 which
-+checks the permission for the topmost entry only, and ignores the lower
-+entry's permission.
-+
-+A similar issue can happen around XATTR.
-+getxattr(2) and listxattr(2) families behave as if dirperm1 option is
-+always set. Otherwise these very unpleasant situation would happen.
-+- listxattr(2) may return the duplicated entries.
-+- users may not be able to remove or reset the XATTR forever,
-+
-+
-+XATTR/EA support in the internal (copy,move)-(up,down)
-+----------------------------------------------------------------------
-+Generally the extended attributes of inode are categorized as these.
-+- "security" for LSM and capability.
-+- "system" for posix ACL, 'acl' mount option is required for the branch
-+ fs generally.
-+- "trusted" for userspace, CAP_SYS_ADMIN is required.
-+- "user" for userspace, 'user_xattr' mount option is required for the
-+ branch fs generally.
-+
-+Moreover there are some other categories. Aufs handles these rather
-+unpopular categories as the ordinary ones, ie. there is no special
-+condition nor exception.
-+
-+In copy-up, the support for XATTR on the dst branch may differ from the
-+src branch. In this case, the copy-up operation will get an error and
-+the original user operation which triggered the copy-up will fail. It
-+can happen that even all copy-up will fail.
-+When both of src and dst branches support XATTR and if an error occurs
-+during copying XATTR, then the copy-up should fail obviously. That is a
-+good reason and aufs should return an error to userspace. But when only
-+the src branch support that XATTR, aufs should not return an error.
-+For example, the src branch supports ACL but the dst branch doesn't
-+because the dst branch may natively un-support it or temporary
-+un-support it due to "noacl" mount option. Of course, the dst branch fs
-+may NOT return an error even if the XATTR is not supported. It is
-+totally up to the branch fs.
-+
-+Anyway when the aufs internal copy-up gets an error from the dst branch
-+fs, then aufs tries removing the just copied entry and returns the error
-+to the userspace. The worst case of this situation will be all copy-up
-+will fail.
-+
-+For the copy-up operation, there two basic approaches.
-+- copy the specified XATTR only (by category above), and return the
-+ error unconditionally if it happens.
-+- copy all XATTR, and ignore the error on the specified category only.
-+
-+In order to support XATTR and to implement the correct behaviour, aufs
-+chooses the latter approach and introduces some new branch attributes,
-+"icexsec", "icexsys", "icextr", "icexusr", and "icexoth".
-+They correspond to the XATTR namespaces (see above). Additionally, to be
-+convenient, "icex" is also provided which means all "icex*" attributes
-+are set (here the word "icex" stands for "ignore copy-error on XATTR").
-+
-+The meaning of these attributes is to ignore the error from setting
-+XATTR on that branch.
-+Note that aufs tries copying all XATTR unconditionally, and ignores the
-+error from the dst branch according to the specified attributes.
-+
-+Some XATTR may have its default value. The default value may come from
-+the parent dir or the environment. If the default value is set at the
-+file creating-time, it will be overwritten by copy-up.
-+Some contradiction may happen I am afraid.
-+Do we need another attribute to stop copying XATTR? I am unsure. For
-+now, aufs implements the branch attributes to ignore the error.
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/design/07export.txt linux-4.1.10/Documentation/filesystems/aufs/design/07export.txt
---- linux-4.1.10.orig/Documentation/filesystems/aufs/design/07export.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/design/07export.txt 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,58 @@
-+
-+# Copyright (C) 2005-2015 Junjiro R. Okajima
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program. If not, see <http://www.gnu.org/licenses/>.
-+
-+Export Aufs via NFS
-+----------------------------------------------------------------------
-+Here is an approach.
-+- like xino/xib, add a new file 'xigen' which stores aufs inode
-+ generation.
-+- iget_locked(): initialize aufs inode generation for a new inode, and
-+ store it in xigen file.
-+- destroy_inode(): increment aufs inode generation and store it in xigen
-+ file. it is necessary even if it is not unlinked, because any data of
-+ inode may be changed by UDBA.
-+- encode_fh(): for a root dir, simply return FILEID_ROOT. otherwise
-+ build file handle by
-+ + branch id (4 bytes)
-+ + superblock generation (4 bytes)
-+ + inode number (4 or 8 bytes)
-+ + parent dir inode number (4 or 8 bytes)
-+ + inode generation (4 bytes))
-+ + return value of exportfs_encode_fh() for the parent on a branch (4
-+ bytes)
-+ + file handle for a branch (by exportfs_encode_fh())
-+- fh_to_dentry():
-+ + find the index of a branch from its id in handle, and check it is
-+ still exist in aufs.
-+ + 1st level: get the inode number from handle and search it in cache.
-+ + 2nd level: if not found in cache, get the parent inode number from
-+ the handle and search it in cache. and then open the found parent
-+ dir, find the matching inode number by vfs_readdir() and get its
-+ name, and call lookup_one_len() for the target dentry.
-+ + 3rd level: if the parent dir is not cached, call
-+ exportfs_decode_fh() for a branch and get the parent on a branch,
-+ build a pathname of it, convert it a pathname in aufs, call
-+ path_lookup(). now aufs gets a parent dir dentry, then handle it as
-+ the 2nd level.
-+ + to open the dir, aufs needs struct vfsmount. aufs keeps vfsmount
-+ for every branch, but not itself. to get this, (currently) aufs
-+ searches in current->nsproxy->mnt_ns list. it may not be a good
-+ idea, but I didn't get other approach.
-+ + test the generation of the gotten inode.
-+- every inode operation: they may get EBUSY due to UDBA. in this case,
-+ convert it into ESTALE for NFSD.
-+- readdir(): call lockdep_on/off() because filldir in NFSD calls
-+ lookup_one_len(), vfs_getattr(), encode_fh() and others.
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/design/08shwh.txt linux-4.1.10/Documentation/filesystems/aufs/design/08shwh.txt
---- linux-4.1.10.orig/Documentation/filesystems/aufs/design/08shwh.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/design/08shwh.txt 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,52 @@
-+
-+# Copyright (C) 2005-2015 Junjiro R. Okajima
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program. If not, see <http://www.gnu.org/licenses/>.
-+
-+Show Whiteout Mode (shwh)
-+----------------------------------------------------------------------
-+Generally aufs hides the name of whiteouts. But in some cases, to show
-+them is very useful for users. For instance, creating a new middle layer
-+(branch) by merging existing layers.
-+
-+(borrowing aufs1 HOW-TO from a user, Michael Towers)
-+When you have three branches,
-+- Bottom: 'system', squashfs (underlying base system), read-only
-+- Middle: 'mods', squashfs, read-only
-+- Top: 'overlay', ram (tmpfs), read-write
-+
-+The top layer is loaded at boot time and saved at shutdown, to preserve
-+the changes made to the system during the session.
-+When larger changes have been made, or smaller changes have accumulated,
-+the size of the saved top layer data grows. At this point, it would be
-+nice to be able to merge the two overlay branches ('mods' and 'overlay')
-+and rewrite the 'mods' squashfs, clearing the top layer and thus
-+restoring save and load speed.
-+
-+This merging is simplified by the use of another aufs mount, of just the
-+two overlay branches using the 'shwh' option.
-+# mount -t aufs -o ro,shwh,br:/livesys/overlay=ro+wh:/livesys/mods=rr+wh \
-+ aufs /livesys/merge_union
-+
-+A merged view of these two branches is then available at
-+/livesys/merge_union, and the new feature is that the whiteouts are
-+visible!
-+Note that in 'shwh' mode the aufs mount must be 'ro', which will disable
-+writing to all branches. Also the default mode for all branches is 'ro'.
-+It is now possible to save the combined contents of the two overlay
-+branches to a new squashfs, e.g.:
-+# mksquashfs /livesys/merge_union /path/to/newmods.squash
-+
-+This new squashfs archive can be stored on the boot device and the
-+initramfs will use it to replace the old one at the next boot.
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/design/10dynop.txt linux-4.1.10/Documentation/filesystems/aufs/design/10dynop.txt
---- linux-4.1.10.orig/Documentation/filesystems/aufs/design/10dynop.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/design/10dynop.txt 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,47 @@
-+
-+# Copyright (C) 2010-2015 Junjiro R. Okajima
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program. If not, see <http://www.gnu.org/licenses/>.
-+
-+Dynamically customizable FS operations
-+----------------------------------------------------------------------
-+Generally FS operations (struct inode_operations, struct
-+address_space_operations, struct file_operations, etc.) are defined as
-+"static const", but it never means that FS have only one set of
-+operation. Some FS have multiple sets of them. For instance, ext2 has
-+three sets, one for XIP, for NOBH, and for normal.
-+Since aufs overrides and redirects these operations, sometimes aufs has
-+to change its behaviour according to the branch FS type. More importantly
-+VFS acts differently if a function (member in the struct) is set or
-+not. It means aufs should have several sets of operations and select one
-+among them according to the branch FS definition.
-+
-+In order to solve this problem and not to affect the behaviour of VFS,
-+aufs defines these operations dynamically. For instance, aufs defines
-+dummy direct_IO function for struct address_space_operations, but it may
-+not be set to the address_space_operations actually. When the branch FS
-+doesn't have it, aufs doesn't set it to its address_space_operations
-+while the function definition itself is still alive. So the behaviour
-+itself will not change, and it will return an error when direct_IO is
-+not set.
-+
-+The lifetime of these dynamically generated operation object is
-+maintained by aufs branch object. When the branch is removed from aufs,
-+the reference counter of the object is decremented. When it reaches
-+zero, the dynamically generated operation object will be freed.
-+
-+This approach is designed to support AIO (io_submit), Direct I/O and
-+XIP (DAX) mainly.
-+Currently this approach is applied to address_space_operations for
-+regular files only.
-diff -Nur linux-4.1.10.orig/Documentation/filesystems/aufs/README linux-4.1.10/Documentation/filesystems/aufs/README
---- linux-4.1.10.orig/Documentation/filesystems/aufs/README 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/Documentation/filesystems/aufs/README 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,383 @@
-+
-+Aufs4 -- advanced multi layered unification filesystem version 4.x
-+http://aufs.sf.net
-+Junjiro R. Okajima
-+
-+
-+0. Introduction
-+----------------------------------------
-+In the early days, aufs was entirely re-designed and re-implemented
-+Unionfs Version 1.x series. Adding many original ideas, approaches,
-+improvements and implementations, it becomes totally different from
-+Unionfs while keeping the basic features.
-+Recently, Unionfs Version 2.x series begin taking some of the same
-+approaches to aufs1's.
-+Unionfs is being developed by Professor Erez Zadok at Stony Brook
-+University and his team.
-+
-+Aufs4 supports linux-4.0 and later, and for linux-3.x series try aufs3.
-+If you want older kernel version support, try aufs2-2.6.git or
-+aufs2-standalone.git repository, aufs1 from CVS on SourceForge.
-+
-+Note: it becomes clear that "Aufs was rejected. Let's give it up."
-+ According to Christoph Hellwig, linux rejects all union-type
-+ filesystems but UnionMount.
-+<http://marc.info/?l=linux-kernel&m=123938533724484&w=2>
-+
-+PS. Al Viro seems have a plan to merge aufs as well as overlayfs and
-+ UnionMount, and he pointed out an issue around a directory mutex
-+ lock and aufs addressed it. But it is still unsure whether aufs will
-+ be merged (or any other union solution).
-+<http://marc.info/?l=linux-kernel&m=136312705029295&w=1>
-+
-+
-+1. Features
-+----------------------------------------
-+- unite several directories into a single virtual filesystem. The member
-+ directory is called as a branch.
-+- you can specify the permission flags to the branch, which are 'readonly',
-+ 'readwrite' and 'whiteout-able.'
-+- by upper writable branch, internal copyup and whiteout, files/dirs on
-+ readonly branch are modifiable logically.
-+- dynamic branch manipulation, add, del.
-+- etc...
-+
-+Also there are many enhancements in aufs, such as:
-+- test only the highest one for the directory permission (dirperm1)
-+- copyup on open (coo=)
-+- 'move' policy for copy-up between two writable branches, after
-+ checking free space.
-+- xattr, acl
-+- readdir(3) in userspace.
-+- keep inode number by external inode number table
-+- keep the timestamps of file/dir in internal copyup operation
-+- seekable directory, supporting NFS readdir.
-+- whiteout is hardlinked in order to reduce the consumption of inodes
-+ on branch
-+- do not copyup, nor create a whiteout when it is unnecessary
-+- revert a single systemcall when an error occurs in aufs
-+- remount interface instead of ioctl
-+- maintain /etc/mtab by an external command, /sbin/mount.aufs.
-+- loopback mounted filesystem as a branch
-+- kernel thread for removing the dir who has a plenty of whiteouts
-+- support copyup sparse file (a file which has a 'hole' in it)
-+- default permission flags for branches
-+- selectable permission flags for ro branch, whether whiteout can
-+ exist or not
-+- export via NFS.
-+- support <sysfs>/fs/aufs and <debugfs>/aufs.
-+- support multiple writable branches, some policies to select one
-+ among multiple writable branches.
-+- a new semantics for link(2) and rename(2) to support multiple
-+ writable branches.
-+- no glibc changes are required.
-+- pseudo hardlink (hardlink over branches)
-+- allow a direct access manually to a file on branch, e.g. bypassing aufs.
-+ including NFS or remote filesystem branch.
-+- userspace wrapper for pathconf(3)/fpathconf(3) with _PC_LINK_MAX.
-+- and more...
-+
-+Currently these features are dropped temporary from aufs4.
-+See design/08plan.txt in detail.
-+- nested mount, i.e. aufs as readonly no-whiteout branch of another aufs
-+ (robr)
-+- statistics of aufs thread (/sys/fs/aufs/stat)
-+
-+Features or just an idea in the future (see also design/*.txt),
-+- reorder the branch index without del/re-add.
-+- permanent xino files for NFSD
-+- an option for refreshing the opened files after add/del branches
-+- light version, without branch manipulation. (unnecessary?)
-+- copyup in userspace
-+- inotify in userspace
-+- readv/writev
-+
-+
-+2. Download
-+----------------------------------------
-+There are three GIT trees for aufs4, aufs4-linux.git,
-+aufs4-standalone.git, and aufs-util.git. Note that there is no "4" in
-+"aufs-util.git."
-+While the aufs-util is always necessary, you need either of aufs4-linux
-+or aufs4-standalone.
-+
-+The aufs4-linux tree includes the whole linux mainline GIT tree,
-+git://git.kernel.org/.../torvalds/linux.git.
-+And you cannot select CONFIG_AUFS_FS=m for this version, eg. you cannot
-+build aufs4 as an external kernel module.
-+Several extra patches are not included in this tree. Only
-+aufs4-standalone tree contains them. They are describe in the later
-+section "Configuration and Compilation."
-+
-+On the other hand, the aufs4-standalone tree has only aufs source files
-+and necessary patches, and you can select CONFIG_AUFS_FS=m.
-+But you need to apply all aufs patches manually.
-+
-+You will find GIT branches whose name is in form of "aufs4.x" where "x"
-+represents the linux kernel version, "linux-4.x". For instance,
-+"aufs4.0" is for linux-4.0. For latest "linux-4.x-rcN", use
-+"aufs4.x-rcN" branch.
-+
-+o aufs4-linux tree
-+$ git clone --reference /your/linux/git/tree \
-+ git://github.com/sfjro/aufs4-linux.git aufs4-linux.git
-+- if you don't have linux GIT tree, then remove "--reference ..."
-+$ cd aufs4-linux.git
-+$ git checkout origin/aufs4.0
-+
-+Or You may want to directly git-pull aufs into your linux GIT tree, and
-+leave the patch-work to GIT.
-+$ cd /your/linux/git/tree
-+$ git remote add aufs4 git://github.com/sfjro/aufs4-linux.git
-+$ git fetch aufs4
-+$ git checkout -b my4.0 v4.0
-+$ (add your local change...)
-+$ git pull aufs4 aufs4.0
-+- now you have v4.0 + your_changes + aufs4.0 in you my4.0 branch.
-+- you may need to solve some conflicts between your_changes and
-+ aufs4.0. in this case, git-rerere is recommended so that you can
-+ solve the similar conflicts automatically when you upgrade to 4.1 or
-+ later in the future.
-+
-+o aufs4-standalone tree
-+$ git clone git://github.com/sfjro/aufs4-standalone.git aufs4-standalone.git
-+$ cd aufs4-standalone.git
-+$ git checkout origin/aufs4.0
-+
-+o aufs-util tree
-+$ git clone git://git.code.sf.net/p/aufs/aufs-util aufs-util.git
-+- note that the public aufs-util.git is on SourceForge instead of
-+ GitHUB.
-+$ cd aufs-util.git
-+$ git checkout origin/aufs4.0
-+
-+Note: The 4.x-rcN branch is to be used with `rc' kernel versions ONLY.
-+The minor version number, 'x' in '4.x', of aufs may not always
-+follow the minor version number of the kernel.
-+Because changes in the kernel that cause the use of a new
-+minor version number do not always require changes to aufs-util.
-+
-+Since aufs-util has its own minor version number, you may not be
-+able to find a GIT branch in aufs-util for your kernel's
-+exact minor version number.
-+In this case, you should git-checkout the branch for the
-+nearest lower number.
-+
-+For (an unreleased) example:
-+If you are using "linux-4.10" and the "aufs4.10" branch
-+does not exist in aufs-util repository, then "aufs4.9", "aufs4.8"
-+or something numerically smaller is the branch for your kernel.
-+
-+Also you can view all branches by
-+ $ git branch -a
-+
-+
-+3. Configuration and Compilation
-+----------------------------------------
-+Make sure you have git-checkout'ed the correct branch.
-+
-+For aufs4-linux tree,
-+- enable CONFIG_AUFS_FS.
-+- set other aufs configurations if necessary.
-+
-+For aufs4-standalone tree,
-+There are several ways to build.
-+
-+1.
-+- apply ./aufs4-kbuild.patch to your kernel source files.
-+- apply ./aufs4-base.patch too.
-+- apply ./aufs4-mmap.patch too.
-+- apply ./aufs4-standalone.patch too, if you have a plan to set
-+ CONFIG_AUFS_FS=m. otherwise you don't need ./aufs4-standalone.patch.
-+- copy ./{Documentation,fs,include/uapi/linux/aufs_type.h} files to your
-+ kernel source tree. Never copy $PWD/include/uapi/linux/Kbuild.
-+- enable CONFIG_AUFS_FS, you can select either
-+ =m or =y.
-+- and build your kernel as usual.
-+- install the built kernel.
-+ Note: Since linux-3.9, every filesystem module requires an alias
-+ "fs-<fsname>". You should make sure that "fs-aufs" is listed in your
-+ modules.aliases file if you set CONFIG_AUFS_FS=m.
-+- install the header files too by "make headers_install" to the
-+ directory where you specify. By default, it is $PWD/usr.
-+ "make help" shows a brief note for headers_install.
-+- and reboot your system.
-+
-+2.
-+- module only (CONFIG_AUFS_FS=m).
-+- apply ./aufs4-base.patch to your kernel source files.
-+- apply ./aufs4-mmap.patch too.
-+- apply ./aufs4-standalone.patch too.
-+- build your kernel, don't forget "make headers_install", and reboot.
-+- edit ./config.mk and set other aufs configurations if necessary.
-+ Note: You should read $PWD/fs/aufs/Kconfig carefully which describes
-+ every aufs configurations.
-+- build the module by simple "make".
-+ Note: Since linux-3.9, every filesystem module requires an alias
-+ "fs-<fsname>". You should make sure that "fs-aufs" is listed in your
-+ modules.aliases file.
-+- you can specify ${KDIR} make variable which points to your kernel
-+ source tree.
-+- install the files
-+ + run "make install" to install the aufs module, or copy the built
-+ $PWD/aufs.ko to /lib/modules/... and run depmod -a (or reboot simply).
-+ + run "make install_headers" (instead of headers_install) to install
-+ the modified aufs header file (you can specify DESTDIR which is
-+ available in aufs standalone version's Makefile only), or copy
-+ $PWD/usr/include/linux/aufs_type.h to /usr/include/linux or wherever
-+ you like manually. By default, the target directory is $PWD/usr.
-+- no need to apply aufs4-kbuild.patch, nor copying source files to your
-+ kernel source tree.
-+
-+Note: The header file aufs_type.h is necessary to build aufs-util
-+ as well as "make headers_install" in the kernel source tree.
-+ headers_install is subject to be forgotten, but it is essentially
-+ necessary, not only for building aufs-util.
-+ You may not meet problems without headers_install in some older
-+ version though.
-+
-+And then,
-+- read README in aufs-util, build and install it
-+- note that your distribution may contain an obsoleted version of
-+ aufs_type.h in /usr/include/linux or something. When you build aufs
-+ utilities, make sure that your compiler refers the correct aufs header
-+ file which is built by "make headers_install."
-+- if you want to use readdir(3) in userspace or pathconf(3) wrapper,
-+ then run "make install_ulib" too. And refer to the aufs manual in
-+ detail.
-+
-+There several other patches in aufs4-standalone.git. They are all
-+optional. When you meet some problems, they will help you.
-+- aufs4-loopback.patch
-+ Supports a nested loopback mount in a branch-fs. This patch is
-+ unnecessary until aufs produces a message like "you may want to try
-+ another patch for loopback file".
-+- vfs-ino.patch
-+ Modifies a system global kernel internal function get_next_ino() in
-+ order to stop assigning 0 for an inode-number. Not directly related to
-+ aufs, but recommended generally.
-+- tmpfs-idr.patch
-+ Keeps the tmpfs inode number as the lowest value. Effective to reduce
-+ the size of aufs XINO files for tmpfs branch. Also it prevents the
-+ duplication of inode number, which is important for backup tools and
-+ other utilities. When you find aufs XINO files for tmpfs branch
-+ growing too much, try this patch.
-+
-+
-+4. Usage
-+----------------------------------------
-+At first, make sure aufs-util are installed, and please read the aufs
-+manual, aufs.5 in aufs-util.git tree.
-+$ man -l aufs.5
-+
-+And then,
-+$ mkdir /tmp/rw /tmp/aufs
-+# mount -t aufs -o br=/tmp/rw:${HOME} none /tmp/aufs
-+
-+Here is another example. The result is equivalent.
-+# mount -t aufs -o br=/tmp/rw=rw:${HOME}=ro none /tmp/aufs
-+ Or
-+# mount -t aufs -o br:/tmp/rw none /tmp/aufs
-+# mount -o remount,append:${HOME} /tmp/aufs
-+
-+Then, you can see whole tree of your home dir through /tmp/aufs. If
-+you modify a file under /tmp/aufs, the one on your home directory is
-+not affected, instead the same named file will be newly created under
-+/tmp/rw. And all of your modification to a file will be applied to
-+the one under /tmp/rw. This is called the file based Copy on Write
-+(COW) method.
-+Aufs mount options are described in aufs.5.
-+If you run chroot or something and make your aufs as a root directory,
-+then you need to customize the shutdown script. See the aufs manual in
-+detail.
-+
-+Additionally, there are some sample usages of aufs which are a
-+diskless system with network booting, and LiveCD over NFS.
-+See sample dir in CVS tree on SourceForge.
-+
-+
-+5. Contact
-+----------------------------------------
-+When you have any problems or strange behaviour in aufs, please let me
-+know with:
-+- /proc/mounts (instead of the output of mount(8))
-+- /sys/module/aufs/*
-+- /sys/fs/aufs/* (if you have them)
-+- /debug/aufs/* (if you have them)
-+- linux kernel version
-+ if your kernel is not plain, for example modified by distributor,
-+ the url where i can download its source is necessary too.
-+- aufs version which was printed at loading the module or booting the
-+ system, instead of the date you downloaded.
-+- configuration (define/undefine CONFIG_AUFS_xxx)
-+- kernel configuration or /proc/config.gz (if you have it)
-+- behaviour which you think to be incorrect
-+- actual operation, reproducible one is better
-+- mailto: aufs-users at lists.sourceforge.net
-+
-+Usually, I don't watch the Public Areas(Bugs, Support Requests, Patches,
-+and Feature Requests) on SourceForge. Please join and write to
-+aufs-users ML.
-+
-+
-+6. Acknowledgements
-+----------------------------------------
-+Thanks to everyone who have tried and are using aufs, whoever
-+have reported a bug or any feedback.
-+
-+Especially donators:
-+Tomas Matejicek(slax.org) made a donation (much more than once).
-+ Since Apr 2010, Tomas M (the author of Slax and Linux Live
-+ scripts) is making "doubling" donations.
-+ Unfortunately I cannot list all of the donators, but I really
-+ appreciate.
-+ It ends Aug 2010, but the ordinary donation URL is still available.
-+ <http://sourceforge.net/donate/index.php?group_id=167503>
-+Dai Itasaka made a donation (2007/8).
-+Chuck Smith made a donation (2008/4, 10 and 12).
-+Henk Schoneveld made a donation (2008/9).
-+Chih-Wei Huang, ASUS, CTC donated Eee PC 4G (2008/10).
-+Francois Dupoux made a donation (2008/11).
-+Bruno Cesar Ribas and Luis Carlos Erpen de Bona, C3SL serves public
-+ aufs2 GIT tree (2009/2).
-+William Grant made a donation (2009/3).
-+Patrick Lane made a donation (2009/4).
-+The Mail Archive (mail-archive.com) made donations (2009/5).
-+Nippy Networks (Ed Wildgoose) made a donation (2009/7).
-+New Dream Network, LLC (www.dreamhost.com) made a donation (2009/11).
-+Pavel Pronskiy made a donation (2011/2).
-+Iridium and Inmarsat satellite phone retailer (www.mailasail.com), Nippy
-+ Networks (Ed Wildgoose) made a donation for hardware (2011/3).
-+Max Lekomcev (DOM-TV project) made a donation (2011/7, 12, 2012/3, 6 and
-+11).
-+Sam Liddicott made a donation (2011/9).
-+Era Scarecrow made a donation (2013/4).
-+Bor Ratajc made a donation (2013/4).
-+Alessandro Gorreta made a donation (2013/4).
-+POIRETTE Marc made a donation (2013/4).
-+Alessandro Gorreta made a donation (2013/4).
-+lauri kasvandik made a donation (2013/5).
-+"pemasu from Finland" made a donation (2013/7).
-+The Parted Magic Project made a donation (2013/9 and 11).
-+Pavel Barta made a donation (2013/10).
-+Nikolay Pertsev made a donation (2014/5).
-+James B made a donation (2014/7 and 2015/7).
-+Stefano Di Biase made a donation (2014/8).
-+Daniel Epellei made a donation (2015/1).
-+
-+Thank you very much.
-+Donations are always, including future donations, very important and
-+helpful for me to keep on developing aufs.
-+
-+
-+7.
-+----------------------------------------
-+If you are an experienced user, no explanation is needed. Aufs is
-+just a linux filesystem.
-+
-+
-+Enjoy!
-+
-+# Local variables: ;
-+# mode: text;
-+# End: ;
-diff -Nur linux-4.1.10.orig/drivers/block/loop.c linux-4.1.10/drivers/block/loop.c
---- linux-4.1.10.orig/drivers/block/loop.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/drivers/block/loop.c 2015-10-22 21:35:53.000000000 +0200
-@@ -538,6 +538,24 @@
- return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
- }
-
-+/*
-+ * for AUFS
-+ * no get/put for file.
-+ */
-+struct file *loop_backing_file(struct super_block *sb)
-+{
-+ struct file *ret;
-+ struct loop_device *l;
-+
-+ ret = NULL;
-+ if (MAJOR(sb->s_dev) == LOOP_MAJOR) {
-+ l = sb->s_bdev->bd_disk->private_data;
-+ ret = l->lo_backing_file;
-+ }
-+ return ret;
-+}
-+EXPORT_SYMBOL(loop_backing_file);
-+
- /* loop sysfs attributes */
-
- static ssize_t loop_attr_show(struct device *dev, char *page,
-diff -Nur linux-4.1.10.orig/fs/aufs/aufs.h linux-4.1.10/fs/aufs/aufs.h
---- linux-4.1.10.orig/fs/aufs/aufs.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/aufs.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,59 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * all header files
-+ */
-+
-+#ifndef __AUFS_H__
-+#define __AUFS_H__
-+
-+#ifdef __KERNEL__
-+
-+#define AuStub(type, name, body, ...) \
-+ static inline type name(__VA_ARGS__) { body; }
-+
-+#define AuStubVoid(name, ...) \
-+ AuStub(void, name, , __VA_ARGS__)
-+#define AuStubInt0(name, ...) \
-+ AuStub(int, name, return 0, __VA_ARGS__)
-+
-+#include "debug.h"
-+
-+#include "branch.h"
-+#include "cpup.h"
-+#include "dcsub.h"
-+#include "dbgaufs.h"
-+#include "dentry.h"
-+#include "dir.h"
-+#include "dynop.h"
-+#include "file.h"
-+#include "fstype.h"
-+#include "inode.h"
-+#include "loop.h"
-+#include "module.h"
-+#include "opts.h"
-+#include "rwsem.h"
-+#include "spl.h"
-+#include "super.h"
-+#include "sysaufs.h"
-+#include "vfsub.h"
-+#include "whout.h"
-+#include "wkq.h"
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/branch.c linux-4.1.10/fs/aufs/branch.c
---- linux-4.1.10.orig/fs/aufs/branch.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/branch.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,1414 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * branch management
-+ */
-+
-+#include <linux/compat.h>
-+#include <linux/statfs.h>
-+#include "aufs.h"
-+
-+/*
-+ * free a single branch
-+ */
-+static void au_br_do_free(struct au_branch *br)
-+{
-+ int i;
-+ struct au_wbr *wbr;
-+ struct au_dykey **key;
-+
-+ au_hnotify_fin_br(br);
-+
-+ if (br->br_xino.xi_file)
-+ fput(br->br_xino.xi_file);
-+ mutex_destroy(&br->br_xino.xi_nondir_mtx);
-+
-+ AuDebugOn(atomic_read(&br->br_count));
-+
-+ wbr = br->br_wbr;
-+ if (wbr) {
-+ for (i = 0; i < AuBrWh_Last; i++)
-+ dput(wbr->wbr_wh[i]);
-+ AuDebugOn(atomic_read(&wbr->wbr_wh_running));
-+ AuRwDestroy(&wbr->wbr_wh_rwsem);
-+ }
-+
-+ if (br->br_fhsm) {
-+ au_br_fhsm_fin(br->br_fhsm);
-+ kfree(br->br_fhsm);
-+ }
-+
-+ key = br->br_dykey;
-+ for (i = 0; i < AuBrDynOp; i++, key++)
-+ if (*key)
-+ au_dy_put(*key);
-+ else
-+ break;
-+
-+ /* recursive lock, s_umount of branch's */
-+ lockdep_off();
-+ path_put(&br->br_path);
-+ lockdep_on();
-+ kfree(wbr);
-+ kfree(br);
-+}
-+
-+/*
-+ * frees all branches
-+ */
-+void au_br_free(struct au_sbinfo *sbinfo)
-+{
-+ aufs_bindex_t bmax;
-+ struct au_branch **br;
-+
-+ AuRwMustWriteLock(&sbinfo->si_rwsem);
-+
-+ bmax = sbinfo->si_bend + 1;
-+ br = sbinfo->si_branch;
-+ while (bmax--)
-+ au_br_do_free(*br++);
-+}
-+
-+/*
-+ * find the index of a branch which is specified by @br_id.
-+ */
-+int au_br_index(struct super_block *sb, aufs_bindex_t br_id)
-+{
-+ aufs_bindex_t bindex, bend;
-+
-+ bend = au_sbend(sb);
-+ for (bindex = 0; bindex <= bend; bindex++)
-+ if (au_sbr_id(sb, bindex) == br_id)
-+ return bindex;
-+ return -1;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * add a branch
-+ */
-+
-+static int test_overlap(struct super_block *sb, struct dentry *h_adding,
-+ struct dentry *h_root)
-+{
-+ if (unlikely(h_adding == h_root
-+ || au_test_loopback_overlap(sb, h_adding)))
-+ return 1;
-+ if (h_adding->d_sb != h_root->d_sb)
-+ return 0;
-+ return au_test_subdir(h_adding, h_root)
-+ || au_test_subdir(h_root, h_adding);
-+}
-+
-+/*
-+ * returns a newly allocated branch. @new_nbranch is a number of branches
-+ * after adding a branch.
-+ */
-+static struct au_branch *au_br_alloc(struct super_block *sb, int new_nbranch,
-+ int perm)
-+{
-+ struct au_branch *add_branch;
-+ struct dentry *root;
-+ struct inode *inode;
-+ int err;
-+
-+ err = -ENOMEM;
-+ root = sb->s_root;
-+ add_branch = kmalloc(sizeof(*add_branch), GFP_NOFS);
-+ if (unlikely(!add_branch))
-+ goto out;
-+
-+ err = au_hnotify_init_br(add_branch, perm);
-+ if (unlikely(err))
-+ goto out_br;
-+
-+ add_branch->br_wbr = NULL;
-+ if (au_br_writable(perm)) {
-+ /* may be freed separately at changing the branch permission */
-+ add_branch->br_wbr = kmalloc(sizeof(*add_branch->br_wbr),
-+ GFP_NOFS);
-+ if (unlikely(!add_branch->br_wbr))
-+ goto out_hnotify;
-+ }
-+
-+ add_branch->br_fhsm = NULL;
-+ if (au_br_fhsm(perm)) {
-+ err = au_fhsm_br_alloc(add_branch);
-+ if (unlikely(err))
-+ goto out_wbr;
-+ }
-+
-+ err = au_sbr_realloc(au_sbi(sb), new_nbranch);
-+ if (!err)
-+ err = au_di_realloc(au_di(root), new_nbranch);
-+ if (!err) {
-+ inode = d_inode(root);
-+ err = au_ii_realloc(au_ii(inode), new_nbranch);
-+ }
-+ if (!err)
-+ return add_branch; /* success */
-+
-+out_wbr:
-+ kfree(add_branch->br_wbr);
-+out_hnotify:
-+ au_hnotify_fin_br(add_branch);
-+out_br:
-+ kfree(add_branch);
-+out:
-+ return ERR_PTR(err);
-+}
-+
-+/*
-+ * test if the branch permission is legal or not.
-+ */
-+static int test_br(struct inode *inode, int brperm, char *path)
-+{
-+ int err;
-+
-+ err = (au_br_writable(brperm) && IS_RDONLY(inode));
-+ if (!err)
-+ goto out;
-+
-+ err = -EINVAL;
-+ pr_err("write permission for readonly mount or inode, %s\n", path);
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * returns:
-+ * 0: success, the caller will add it
-+ * plus: success, it is already unified, the caller should ignore it
-+ * minus: error
-+ */
-+static int test_add(struct super_block *sb, struct au_opt_add *add, int remount)
-+{
-+ int err;
-+ aufs_bindex_t bend, bindex;
-+ struct dentry *root, *h_dentry;
-+ struct inode *inode, *h_inode;
-+
-+ root = sb->s_root;
-+ bend = au_sbend(sb);
-+ if (unlikely(bend >= 0
-+ && au_find_dbindex(root, add->path.dentry) >= 0)) {
-+ err = 1;
-+ if (!remount) {
-+ err = -EINVAL;
-+ pr_err("%s duplicated\n", add->pathname);
-+ }
-+ goto out;
-+ }
-+
-+ err = -ENOSPC; /* -E2BIG; */
-+ if (unlikely(AUFS_BRANCH_MAX <= add->bindex
-+ || AUFS_BRANCH_MAX - 1 <= bend)) {
-+ pr_err("number of branches exceeded %s\n", add->pathname);
-+ goto out;
-+ }
-+
-+ err = -EDOM;
-+ if (unlikely(add->bindex < 0 || bend + 1 < add->bindex)) {
-+ pr_err("bad index %d\n", add->bindex);
-+ goto out;
-+ }
-+
-+ inode = d_inode(add->path.dentry);
-+ err = -ENOENT;
-+ if (unlikely(!inode->i_nlink)) {
-+ pr_err("no existence %s\n", add->pathname);
-+ goto out;
-+ }
-+
-+ err = -EINVAL;
-+ if (unlikely(inode->i_sb == sb)) {
-+ pr_err("%s must be outside\n", add->pathname);
-+ goto out;
-+ }
-+
-+ if (unlikely(au_test_fs_unsuppoted(inode->i_sb))) {
-+ pr_err("unsupported filesystem, %s (%s)\n",
-+ add->pathname, au_sbtype(inode->i_sb));
-+ goto out;
-+ }
-+
-+ if (unlikely(inode->i_sb->s_stack_depth)) {
-+ pr_err("already stacked, %s (%s)\n",
-+ add->pathname, au_sbtype(inode->i_sb));
-+ goto out;
-+ }
-+
-+ err = test_br(d_inode(add->path.dentry), add->perm, add->pathname);
-+ if (unlikely(err))
-+ goto out;
-+
-+ if (bend < 0)
-+ return 0; /* success */
-+
-+ err = -EINVAL;
-+ for (bindex = 0; bindex <= bend; bindex++)
-+ if (unlikely(test_overlap(sb, add->path.dentry,
-+ au_h_dptr(root, bindex)))) {
-+ pr_err("%s is overlapped\n", add->pathname);
-+ goto out;
-+ }
-+
-+ err = 0;
-+ if (au_opt_test(au_mntflags(sb), WARN_PERM)) {
-+ h_dentry = au_h_dptr(root, 0);
-+ h_inode = d_inode(h_dentry);
-+ if ((h_inode->i_mode & S_IALLUGO) != (inode->i_mode & S_IALLUGO)
-+ || !uid_eq(h_inode->i_uid, inode->i_uid)
-+ || !gid_eq(h_inode->i_gid, inode->i_gid))
-+ pr_warn("uid/gid/perm %s %u/%u/0%o, %u/%u/0%o\n",
-+ add->pathname,
-+ i_uid_read(inode), i_gid_read(inode),
-+ (inode->i_mode & S_IALLUGO),
-+ i_uid_read(h_inode), i_gid_read(h_inode),
-+ (h_inode->i_mode & S_IALLUGO));
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * initialize or clean the whiteouts for an adding branch
-+ */
-+static int au_br_init_wh(struct super_block *sb, struct au_branch *br,
-+ int new_perm)
-+{
-+ int err, old_perm;
-+ aufs_bindex_t bindex;
-+ struct mutex *h_mtx;
-+ struct au_wbr *wbr;
-+ struct au_hinode *hdir;
-+ struct dentry *h_dentry;
-+
-+ err = vfsub_mnt_want_write(au_br_mnt(br));
-+ if (unlikely(err))
-+ goto out;
-+
-+ wbr = br->br_wbr;
-+ old_perm = br->br_perm;
-+ br->br_perm = new_perm;
-+ hdir = NULL;
-+ h_mtx = NULL;
-+ bindex = au_br_index(sb, br->br_id);
-+ if (0 <= bindex) {
-+ hdir = au_hi(d_inode(sb->s_root), bindex);
-+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
-+ } else {
-+ h_dentry = au_br_dentry(br);
-+ h_mtx = &d_inode(h_dentry)->i_mutex;
-+ mutex_lock_nested(h_mtx, AuLsc_I_PARENT);
-+ }
-+ if (!wbr)
-+ err = au_wh_init(br, sb);
-+ else {
-+ wbr_wh_write_lock(wbr);
-+ err = au_wh_init(br, sb);
-+ wbr_wh_write_unlock(wbr);
-+ }
-+ if (hdir)
-+ au_hn_imtx_unlock(hdir);
-+ else
-+ mutex_unlock(h_mtx);
-+ vfsub_mnt_drop_write(au_br_mnt(br));
-+ br->br_perm = old_perm;
-+
-+ if (!err && wbr && !au_br_writable(new_perm)) {
-+ kfree(wbr);
-+ br->br_wbr = NULL;
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+static int au_wbr_init(struct au_branch *br, struct super_block *sb,
-+ int perm)
-+{
-+ int err;
-+ struct kstatfs kst;
-+ struct au_wbr *wbr;
-+
-+ wbr = br->br_wbr;
-+ au_rw_init(&wbr->wbr_wh_rwsem);
-+ memset(wbr->wbr_wh, 0, sizeof(wbr->wbr_wh));
-+ atomic_set(&wbr->wbr_wh_running, 0);
-+ wbr->wbr_bytes = 0;
-+
-+ /*
-+ * a limit for rmdir/rename a dir
-+ * cf. AUFS_MAX_NAMELEN in include/uapi/linux/aufs_type.h
-+ */
-+ err = vfs_statfs(&br->br_path, &kst);
-+ if (unlikely(err))
-+ goto out;
-+ err = -EINVAL;
-+ if (kst.f_namelen >= NAME_MAX)
-+ err = au_br_init_wh(sb, br, perm);
-+ else
-+ pr_err("%pd(%s), unsupported namelen %ld\n",
-+ au_br_dentry(br),
-+ au_sbtype(au_br_dentry(br)->d_sb), kst.f_namelen);
-+
-+out:
-+ return err;
-+}
-+
-+/* initialize a new branch */
-+static int au_br_init(struct au_branch *br, struct super_block *sb,
-+ struct au_opt_add *add)
-+{
-+ int err;
-+ struct inode *h_inode;
-+
-+ err = 0;
-+ memset(&br->br_xino, 0, sizeof(br->br_xino));
-+ mutex_init(&br->br_xino.xi_nondir_mtx);
-+ br->br_perm = add->perm;
-+ br->br_path = add->path; /* set first, path_get() later */
-+ spin_lock_init(&br->br_dykey_lock);
-+ memset(br->br_dykey, 0, sizeof(br->br_dykey));
-+ atomic_set(&br->br_count, 0);
-+ atomic_set(&br->br_xino_running, 0);
-+ br->br_id = au_new_br_id(sb);
-+ AuDebugOn(br->br_id < 0);
-+
-+ if (au_br_writable(add->perm)) {
-+ err = au_wbr_init(br, sb, add->perm);
-+ if (unlikely(err))
-+ goto out_err;
-+ }
-+
-+ if (au_opt_test(au_mntflags(sb), XINO)) {
-+ h_inode = d_inode(add->path.dentry);
-+ err = au_xino_br(sb, br, h_inode->i_ino,
-+ au_sbr(sb, 0)->br_xino.xi_file, /*do_test*/1);
-+ if (unlikely(err)) {
-+ AuDebugOn(br->br_xino.xi_file);
-+ goto out_err;
-+ }
-+ }
-+
-+ sysaufs_br_init(br);
-+ path_get(&br->br_path);
-+ goto out; /* success */
-+
-+out_err:
-+ memset(&br->br_path, 0, sizeof(br->br_path));
-+out:
-+ return err;
-+}
-+
-+static void au_br_do_add_brp(struct au_sbinfo *sbinfo, aufs_bindex_t bindex,
-+ struct au_branch *br, aufs_bindex_t bend,
-+ aufs_bindex_t amount)
-+{
-+ struct au_branch **brp;
-+
-+ AuRwMustWriteLock(&sbinfo->si_rwsem);
-+
-+ brp = sbinfo->si_branch + bindex;
-+ memmove(brp + 1, brp, sizeof(*brp) * amount);
-+ *brp = br;
-+ sbinfo->si_bend++;
-+ if (unlikely(bend < 0))
-+ sbinfo->si_bend = 0;
-+}
-+
-+static void au_br_do_add_hdp(struct au_dinfo *dinfo, aufs_bindex_t bindex,
-+ aufs_bindex_t bend, aufs_bindex_t amount)
-+{
-+ struct au_hdentry *hdp;
-+
-+ AuRwMustWriteLock(&dinfo->di_rwsem);
-+
-+ hdp = dinfo->di_hdentry + bindex;
-+ memmove(hdp + 1, hdp, sizeof(*hdp) * amount);
-+ au_h_dentry_init(hdp);
-+ dinfo->di_bend++;
-+ if (unlikely(bend < 0))
-+ dinfo->di_bstart = 0;
-+}
-+
-+static void au_br_do_add_hip(struct au_iinfo *iinfo, aufs_bindex_t bindex,
-+ aufs_bindex_t bend, aufs_bindex_t amount)
-+{
-+ struct au_hinode *hip;
-+
-+ AuRwMustWriteLock(&iinfo->ii_rwsem);
-+
-+ hip = iinfo->ii_hinode + bindex;
-+ memmove(hip + 1, hip, sizeof(*hip) * amount);
-+ hip->hi_inode = NULL;
-+ au_hn_init(hip);
-+ iinfo->ii_bend++;
-+ if (unlikely(bend < 0))
-+ iinfo->ii_bstart = 0;
-+}
-+
-+static void au_br_do_add(struct super_block *sb, struct au_branch *br,
-+ aufs_bindex_t bindex)
-+{
-+ struct dentry *root, *h_dentry;
-+ struct inode *root_inode, *h_inode;
-+ aufs_bindex_t bend, amount;
-+
-+ root = sb->s_root;
-+ root_inode = d_inode(root);
-+ bend = au_sbend(sb);
-+ amount = bend + 1 - bindex;
-+ h_dentry = au_br_dentry(br);
-+ au_sbilist_lock();
-+ au_br_do_add_brp(au_sbi(sb), bindex, br, bend, amount);
-+ au_br_do_add_hdp(au_di(root), bindex, bend, amount);
-+ au_br_do_add_hip(au_ii(root_inode), bindex, bend, amount);
-+ au_set_h_dptr(root, bindex, dget(h_dentry));
-+ h_inode = d_inode(h_dentry);
-+ au_set_h_iptr(root_inode, bindex, au_igrab(h_inode), /*flags*/0);
-+ au_sbilist_unlock();
-+}
-+
-+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount)
-+{
-+ int err;
-+ aufs_bindex_t bend, add_bindex;
-+ struct dentry *root, *h_dentry;
-+ struct inode *root_inode;
-+ struct au_branch *add_branch;
-+
-+ root = sb->s_root;
-+ root_inode = d_inode(root);
-+ IMustLock(root_inode);
-+ err = test_add(sb, add, remount);
-+ if (unlikely(err < 0))
-+ goto out;
-+ if (err) {
-+ err = 0;
-+ goto out; /* success */
-+ }
-+
-+ bend = au_sbend(sb);
-+ add_branch = au_br_alloc(sb, bend + 2, add->perm);
-+ err = PTR_ERR(add_branch);
-+ if (IS_ERR(add_branch))
-+ goto out;
-+
-+ err = au_br_init(add_branch, sb, add);
-+ if (unlikely(err)) {
-+ au_br_do_free(add_branch);
-+ goto out;
-+ }
-+
-+ add_bindex = add->bindex;
-+ if (!remount)
-+ au_br_do_add(sb, add_branch, add_bindex);
-+ else {
-+ sysaufs_brs_del(sb, add_bindex);
-+ au_br_do_add(sb, add_branch, add_bindex);
-+ sysaufs_brs_add(sb, add_bindex);
-+ }
-+
-+ h_dentry = add->path.dentry;
-+ if (!add_bindex) {
-+ au_cpup_attr_all(root_inode, /*force*/1);
-+ sb->s_maxbytes = h_dentry->d_sb->s_maxbytes;
-+ } else
-+ au_add_nlink(root_inode, d_inode(h_dentry));
-+
-+ /*
-+ * this test/set prevents aufs from handling unnecesary notify events
-+ * of xino files, in case of re-adding a writable branch which was
-+ * once detached from aufs.
-+ */
-+ if (au_xino_brid(sb) < 0
-+ && au_br_writable(add_branch->br_perm)
-+ && !au_test_fs_bad_xino(h_dentry->d_sb)
-+ && add_branch->br_xino.xi_file
-+ && add_branch->br_xino.xi_file->f_path.dentry->d_parent == h_dentry)
-+ au_xino_brid_set(sb, add_branch->br_id);
-+
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static unsigned long long au_farray_cb(void *a,
-+ unsigned long long max __maybe_unused,
-+ void *arg)
-+{
-+ unsigned long long n;
-+ struct file **p, *f;
-+ struct au_sphlhead *files;
-+ struct au_finfo *finfo;
-+ struct super_block *sb = arg;
-+
-+ n = 0;
-+ p = a;
-+ files = &au_sbi(sb)->si_files;
-+ spin_lock(&files->spin);
-+ hlist_for_each_entry(finfo, &files->head, fi_hlist) {
-+ f = finfo->fi_file;
-+ if (file_count(f)
-+ && !special_file(file_inode(f)->i_mode)) {
-+ get_file(f);
-+ *p++ = f;
-+ n++;
-+ AuDebugOn(n > max);
-+ }
-+ }
-+ spin_unlock(&files->spin);
-+
-+ return n;
-+}
-+
-+static struct file **au_farray_alloc(struct super_block *sb,
-+ unsigned long long *max)
-+{
-+ *max = atomic_long_read(&au_sbi(sb)->si_nfiles);
-+ return au_array_alloc(max, au_farray_cb, sb);
-+}
-+
-+static void au_farray_free(struct file **a, unsigned long long max)
-+{
-+ unsigned long long ull;
-+
-+ for (ull = 0; ull < max; ull++)
-+ if (a[ull])
-+ fput(a[ull]);
-+ au_array_free(a);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * delete a branch
-+ */
-+
-+/* to show the line number, do not make it inlined function */
-+#define AuVerbose(do_info, fmt, ...) do { \
-+ if (do_info) \
-+ pr_info(fmt, ##__VA_ARGS__); \
-+} while (0)
-+
-+static int au_test_ibusy(struct inode *inode, aufs_bindex_t bstart,
-+ aufs_bindex_t bend)
-+{
-+ return (inode && !S_ISDIR(inode->i_mode)) || bstart == bend;
-+}
-+
-+static int au_test_dbusy(struct dentry *dentry, aufs_bindex_t bstart,
-+ aufs_bindex_t bend)
-+{
-+ return au_test_ibusy(d_inode(dentry), bstart, bend);
-+}
-+
-+/*
-+ * test if the branch is deletable or not.
-+ */
-+static int test_dentry_busy(struct dentry *root, aufs_bindex_t bindex,
-+ unsigned int sigen, const unsigned int verbose)
-+{
-+ int err, i, j, ndentry;
-+ aufs_bindex_t bstart, bend;
-+ struct au_dcsub_pages dpages;
-+ struct au_dpage *dpage;
-+ struct dentry *d;
-+
-+ err = au_dpages_init(&dpages, GFP_NOFS);
-+ if (unlikely(err))
-+ goto out;
-+ err = au_dcsub_pages(&dpages, root, NULL, NULL);
-+ if (unlikely(err))
-+ goto out_dpages;
-+
-+ for (i = 0; !err && i < dpages.ndpage; i++) {
-+ dpage = dpages.dpages + i;
-+ ndentry = dpage->ndentry;
-+ for (j = 0; !err && j < ndentry; j++) {
-+ d = dpage->dentries[j];
-+ AuDebugOn(au_dcount(d) <= 0);
-+ if (!au_digen_test(d, sigen)) {
-+ di_read_lock_child(d, AuLock_IR);
-+ if (unlikely(au_dbrange_test(d))) {
-+ di_read_unlock(d, AuLock_IR);
-+ continue;
-+ }
-+ } else {
-+ di_write_lock_child(d);
-+ if (unlikely(au_dbrange_test(d))) {
-+ di_write_unlock(d);
-+ continue;
-+ }
-+ err = au_reval_dpath(d, sigen);
-+ if (!err)
-+ di_downgrade_lock(d, AuLock_IR);
-+ else {
-+ di_write_unlock(d);
-+ break;
-+ }
-+ }
-+
-+ /* AuDbgDentry(d); */
-+ bstart = au_dbstart(d);
-+ bend = au_dbend(d);
-+ if (bstart <= bindex
-+ && bindex <= bend
-+ && au_h_dptr(d, bindex)
-+ && au_test_dbusy(d, bstart, bend)) {
-+ err = -EBUSY;
-+ AuVerbose(verbose, "busy %pd\n", d);
-+ AuDbgDentry(d);
-+ }
-+ di_read_unlock(d, AuLock_IR);
-+ }
-+ }
-+
-+out_dpages:
-+ au_dpages_free(&dpages);
-+out:
-+ return err;
-+}
-+
-+static int test_inode_busy(struct super_block *sb, aufs_bindex_t bindex,
-+ unsigned int sigen, const unsigned int verbose)
-+{
-+ int err;
-+ unsigned long long max, ull;
-+ struct inode *i, **array;
-+ aufs_bindex_t bstart, bend;
-+
-+ array = au_iarray_alloc(sb, &max);
-+ err = PTR_ERR(array);
-+ if (IS_ERR(array))
-+ goto out;
-+
-+ err = 0;
-+ AuDbg("b%d\n", bindex);
-+ for (ull = 0; !err && ull < max; ull++) {
-+ i = array[ull];
-+ if (unlikely(!i))
-+ break;
-+ if (i->i_ino == AUFS_ROOT_INO)
-+ continue;
-+
-+ /* AuDbgInode(i); */
-+ if (au_iigen(i, NULL) == sigen)
-+ ii_read_lock_child(i);
-+ else {
-+ ii_write_lock_child(i);
-+ err = au_refresh_hinode_self(i);
-+ au_iigen_dec(i);
-+ if (!err)
-+ ii_downgrade_lock(i);
-+ else {
-+ ii_write_unlock(i);
-+ break;
-+ }
-+ }
-+
-+ bstart = au_ibstart(i);
-+ bend = au_ibend(i);
-+ if (bstart <= bindex
-+ && bindex <= bend
-+ && au_h_iptr(i, bindex)
-+ && au_test_ibusy(i, bstart, bend)) {
-+ err = -EBUSY;
-+ AuVerbose(verbose, "busy i%lu\n", i->i_ino);
-+ AuDbgInode(i);
-+ }
-+ ii_read_unlock(i);
-+ }
-+ au_iarray_free(array, max);
-+
-+out:
-+ return err;
-+}
-+
-+static int test_children_busy(struct dentry *root, aufs_bindex_t bindex,
-+ const unsigned int verbose)
-+{
-+ int err;
-+ unsigned int sigen;
-+
-+ sigen = au_sigen(root->d_sb);
-+ DiMustNoWaiters(root);
-+ IiMustNoWaiters(d_inode(root));
-+ di_write_unlock(root);
-+ err = test_dentry_busy(root, bindex, sigen, verbose);
-+ if (!err)
-+ err = test_inode_busy(root->d_sb, bindex, sigen, verbose);
-+ di_write_lock_child(root); /* aufs_write_lock() calls ..._child() */
-+
-+ return err;
-+}
-+
-+static int test_dir_busy(struct file *file, aufs_bindex_t br_id,
-+ struct file **to_free, int *idx)
-+{
-+ int err;
-+ unsigned char matched, root;
-+ aufs_bindex_t bindex, bend;
-+ struct au_fidir *fidir;
-+ struct au_hfile *hfile;
-+
-+ err = 0;
-+ root = IS_ROOT(file->f_path.dentry);
-+ if (root) {
-+ get_file(file);
-+ to_free[*idx] = file;
-+ (*idx)++;
-+ goto out;
-+ }
-+
-+ matched = 0;
-+ fidir = au_fi(file)->fi_hdir;
-+ AuDebugOn(!fidir);
-+ bend = au_fbend_dir(file);
-+ for (bindex = au_fbstart(file); bindex <= bend; bindex++) {
-+ hfile = fidir->fd_hfile + bindex;
-+ if (!hfile->hf_file)
-+ continue;
-+
-+ if (hfile->hf_br->br_id == br_id) {
-+ matched = 1;
-+ break;
-+ }
-+ }
-+ if (matched)
-+ err = -EBUSY;
-+
-+out:
-+ return err;
-+}
-+
-+static int test_file_busy(struct super_block *sb, aufs_bindex_t br_id,
-+ struct file **to_free, int opened)
-+{
-+ int err, idx;
-+ unsigned long long ull, max;
-+ aufs_bindex_t bstart;
-+ struct file *file, **array;
-+ struct dentry *root;
-+ struct au_hfile *hfile;
-+
-+ array = au_farray_alloc(sb, &max);
-+ err = PTR_ERR(array);
-+ if (IS_ERR(array))
-+ goto out;
-+
-+ err = 0;
-+ idx = 0;
-+ root = sb->s_root;
-+ di_write_unlock(root);
-+ for (ull = 0; ull < max; ull++) {
-+ file = array[ull];
-+ if (unlikely(!file))
-+ break;
-+
-+ /* AuDbg("%pD\n", file); */
-+ fi_read_lock(file);
-+ bstart = au_fbstart(file);
-+ if (!d_is_dir(file->f_path.dentry)) {
-+ hfile = &au_fi(file)->fi_htop;
-+ if (hfile->hf_br->br_id == br_id)
-+ err = -EBUSY;
-+ } else
-+ err = test_dir_busy(file, br_id, to_free, &idx);
-+ fi_read_unlock(file);
-+ if (unlikely(err))
-+ break;
-+ }
-+ di_write_lock_child(root);
-+ au_farray_free(array, max);
-+ AuDebugOn(idx > opened);
-+
-+out:
-+ return err;
-+}
-+
-+static void br_del_file(struct file **to_free, unsigned long long opened,
-+ aufs_bindex_t br_id)
-+{
-+ unsigned long long ull;
-+ aufs_bindex_t bindex, bstart, bend, bfound;
-+ struct file *file;
-+ struct au_fidir *fidir;
-+ struct au_hfile *hfile;
-+
-+ for (ull = 0; ull < opened; ull++) {
-+ file = to_free[ull];
-+ if (unlikely(!file))
-+ break;
-+
-+ /* AuDbg("%pD\n", file); */
-+ AuDebugOn(!d_is_dir(file->f_path.dentry));
-+ bfound = -1;
-+ fidir = au_fi(file)->fi_hdir;
-+ AuDebugOn(!fidir);
-+ fi_write_lock(file);
-+ bstart = au_fbstart(file);
-+ bend = au_fbend_dir(file);
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ hfile = fidir->fd_hfile + bindex;
-+ if (!hfile->hf_file)
-+ continue;
-+
-+ if (hfile->hf_br->br_id == br_id) {
-+ bfound = bindex;
-+ break;
-+ }
-+ }
-+ AuDebugOn(bfound < 0);
-+ au_set_h_fptr(file, bfound, NULL);
-+ if (bfound == bstart) {
-+ for (bstart++; bstart <= bend; bstart++)
-+ if (au_hf_dir(file, bstart)) {
-+ au_set_fbstart(file, bstart);
-+ break;
-+ }
-+ }
-+ fi_write_unlock(file);
-+ }
-+}
-+
-+static void au_br_do_del_brp(struct au_sbinfo *sbinfo,
-+ const aufs_bindex_t bindex,
-+ const aufs_bindex_t bend)
-+{
-+ struct au_branch **brp, **p;
-+
-+ AuRwMustWriteLock(&sbinfo->si_rwsem);
-+
-+ brp = sbinfo->si_branch + bindex;
-+ if (bindex < bend)
-+ memmove(brp, brp + 1, sizeof(*brp) * (bend - bindex));
-+ sbinfo->si_branch[0 + bend] = NULL;
-+ sbinfo->si_bend--;
-+
-+ p = krealloc(sbinfo->si_branch, sizeof(*p) * bend, AuGFP_SBILIST);
-+ if (p)
-+ sbinfo->si_branch = p;
-+ /* harmless error */
-+}
-+
-+static void au_br_do_del_hdp(struct au_dinfo *dinfo, const aufs_bindex_t bindex,
-+ const aufs_bindex_t bend)
-+{
-+ struct au_hdentry *hdp, *p;
-+
-+ AuRwMustWriteLock(&dinfo->di_rwsem);
-+
-+ hdp = dinfo->di_hdentry;
-+ if (bindex < bend)
-+ memmove(hdp + bindex, hdp + bindex + 1,
-+ sizeof(*hdp) * (bend - bindex));
-+ hdp[0 + bend].hd_dentry = NULL;
-+ dinfo->di_bend--;
-+
-+ p = krealloc(hdp, sizeof(*p) * bend, AuGFP_SBILIST);
-+ if (p)
-+ dinfo->di_hdentry = p;
-+ /* harmless error */
-+}
-+
-+static void au_br_do_del_hip(struct au_iinfo *iinfo, const aufs_bindex_t bindex,
-+ const aufs_bindex_t bend)
-+{
-+ struct au_hinode *hip, *p;
-+
-+ AuRwMustWriteLock(&iinfo->ii_rwsem);
-+
-+ hip = iinfo->ii_hinode + bindex;
-+ if (bindex < bend)
-+ memmove(hip, hip + 1, sizeof(*hip) * (bend - bindex));
-+ iinfo->ii_hinode[0 + bend].hi_inode = NULL;
-+ au_hn_init(iinfo->ii_hinode + bend);
-+ iinfo->ii_bend--;
-+
-+ p = krealloc(iinfo->ii_hinode, sizeof(*p) * bend, AuGFP_SBILIST);
-+ if (p)
-+ iinfo->ii_hinode = p;
-+ /* harmless error */
-+}
-+
-+static void au_br_do_del(struct super_block *sb, aufs_bindex_t bindex,
-+ struct au_branch *br)
-+{
-+ aufs_bindex_t bend;
-+ struct au_sbinfo *sbinfo;
-+ struct dentry *root, *h_root;
-+ struct inode *inode, *h_inode;
-+ struct au_hinode *hinode;
-+
-+ SiMustWriteLock(sb);
-+
-+ root = sb->s_root;
-+ inode = d_inode(root);
-+ sbinfo = au_sbi(sb);
-+ bend = sbinfo->si_bend;
-+
-+ h_root = au_h_dptr(root, bindex);
-+ hinode = au_hi(inode, bindex);
-+ h_inode = au_igrab(hinode->hi_inode);
-+ au_hiput(hinode);
-+
-+ au_sbilist_lock();
-+ au_br_do_del_brp(sbinfo, bindex, bend);
-+ au_br_do_del_hdp(au_di(root), bindex, bend);
-+ au_br_do_del_hip(au_ii(inode), bindex, bend);
-+ au_sbilist_unlock();
-+
-+ dput(h_root);
-+ iput(h_inode);
-+ au_br_do_free(br);
-+}
-+
-+static unsigned long long empty_cb(void *array, unsigned long long max,
-+ void *arg)
-+{
-+ return max;
-+}
-+
-+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount)
-+{
-+ int err, rerr, i;
-+ unsigned long long opened;
-+ unsigned int mnt_flags;
-+ aufs_bindex_t bindex, bend, br_id;
-+ unsigned char do_wh, verbose;
-+ struct au_branch *br;
-+ struct au_wbr *wbr;
-+ struct dentry *root;
-+ struct file **to_free;
-+
-+ err = 0;
-+ opened = 0;
-+ to_free = NULL;
-+ root = sb->s_root;
-+ bindex = au_find_dbindex(root, del->h_path.dentry);
-+ if (bindex < 0) {
-+ if (remount)
-+ goto out; /* success */
-+ err = -ENOENT;
-+ pr_err("%s no such branch\n", del->pathname);
-+ goto out;
-+ }
-+ AuDbg("bindex b%d\n", bindex);
-+
-+ err = -EBUSY;
-+ mnt_flags = au_mntflags(sb);
-+ verbose = !!au_opt_test(mnt_flags, VERBOSE);
-+ bend = au_sbend(sb);
-+ if (unlikely(!bend)) {
-+ AuVerbose(verbose, "no more branches left\n");
-+ goto out;
-+ }
-+ br = au_sbr(sb, bindex);
-+ AuDebugOn(!path_equal(&br->br_path, &del->h_path));
-+
-+ br_id = br->br_id;
-+ opened = atomic_read(&br->br_count);
-+ if (unlikely(opened)) {
-+ to_free = au_array_alloc(&opened, empty_cb, NULL);
-+ err = PTR_ERR(to_free);
-+ if (IS_ERR(to_free))
-+ goto out;
-+
-+ err = test_file_busy(sb, br_id, to_free, opened);
-+ if (unlikely(err)) {
-+ AuVerbose(verbose, "%llu file(s) opened\n", opened);
-+ goto out;
-+ }
-+ }
-+
-+ wbr = br->br_wbr;
-+ do_wh = wbr && (wbr->wbr_whbase || wbr->wbr_plink || wbr->wbr_orph);
-+ if (do_wh) {
-+ /* instead of WbrWhMustWriteLock(wbr) */
-+ SiMustWriteLock(sb);
-+ for (i = 0; i < AuBrWh_Last; i++) {
-+ dput(wbr->wbr_wh[i]);
-+ wbr->wbr_wh[i] = NULL;
-+ }
-+ }
-+
-+ err = test_children_busy(root, bindex, verbose);
-+ if (unlikely(err)) {
-+ if (do_wh)
-+ goto out_wh;
-+ goto out;
-+ }
-+
-+ err = 0;
-+ if (to_free) {
-+ /*
-+ * now we confirmed the branch is deletable.
-+ * let's free the remaining opened dirs on the branch.
-+ */
-+ di_write_unlock(root);
-+ br_del_file(to_free, opened, br_id);
-+ di_write_lock_child(root);
-+ }
-+
-+ if (!remount)
-+ au_br_do_del(sb, bindex, br);
-+ else {
-+ sysaufs_brs_del(sb, bindex);
-+ au_br_do_del(sb, bindex, br);
-+ sysaufs_brs_add(sb, bindex);
-+ }
-+
-+ if (!bindex) {
-+ au_cpup_attr_all(d_inode(root), /*force*/1);
-+ sb->s_maxbytes = au_sbr_sb(sb, 0)->s_maxbytes;
-+ } else
-+ au_sub_nlink(d_inode(root), d_inode(del->h_path.dentry));
-+ if (au_opt_test(mnt_flags, PLINK))
-+ au_plink_half_refresh(sb, br_id);
-+
-+ if (au_xino_brid(sb) == br_id)
-+ au_xino_brid_set(sb, -1);
-+ goto out; /* success */
-+
-+out_wh:
-+ /* revert */
-+ rerr = au_br_init_wh(sb, br, br->br_perm);
-+ if (rerr)
-+ pr_warn("failed re-creating base whiteout, %s. (%d)\n",
-+ del->pathname, rerr);
-+out:
-+ if (to_free)
-+ au_farray_free(to_free, opened);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_ibusy(struct super_block *sb, struct aufs_ibusy __user *arg)
-+{
-+ int err;
-+ aufs_bindex_t bstart, bend;
-+ struct aufs_ibusy ibusy;
-+ struct inode *inode, *h_inode;
-+
-+ err = -EPERM;
-+ if (unlikely(!capable(CAP_SYS_ADMIN)))
-+ goto out;
-+
-+ err = copy_from_user(&ibusy, arg, sizeof(ibusy));
-+ if (!err)
-+ err = !access_ok(VERIFY_WRITE, &arg->h_ino, sizeof(arg->h_ino));
-+ if (unlikely(err)) {
-+ err = -EFAULT;
-+ AuTraceErr(err);
-+ goto out;
-+ }
-+
-+ err = -EINVAL;
-+ si_read_lock(sb, AuLock_FLUSH);
-+ if (unlikely(ibusy.bindex < 0 || ibusy.bindex > au_sbend(sb)))
-+ goto out_unlock;
-+
-+ err = 0;
-+ ibusy.h_ino = 0; /* invalid */
-+ inode = ilookup(sb, ibusy.ino);
-+ if (!inode
-+ || inode->i_ino == AUFS_ROOT_INO
-+ || is_bad_inode(inode))
-+ goto out_unlock;
-+
-+ ii_read_lock_child(inode);
-+ bstart = au_ibstart(inode);
-+ bend = au_ibend(inode);
-+ if (bstart <= ibusy.bindex && ibusy.bindex <= bend) {
-+ h_inode = au_h_iptr(inode, ibusy.bindex);
-+ if (h_inode && au_test_ibusy(inode, bstart, bend))
-+ ibusy.h_ino = h_inode->i_ino;
-+ }
-+ ii_read_unlock(inode);
-+ iput(inode);
-+
-+out_unlock:
-+ si_read_unlock(sb);
-+ if (!err) {
-+ err = __put_user(ibusy.h_ino, &arg->h_ino);
-+ if (unlikely(err)) {
-+ err = -EFAULT;
-+ AuTraceErr(err);
-+ }
-+ }
-+out:
-+ return err;
-+}
-+
-+long au_ibusy_ioctl(struct file *file, unsigned long arg)
-+{
-+ return au_ibusy(file->f_path.dentry->d_sb, (void __user *)arg);
-+}
-+
-+#ifdef CONFIG_COMPAT
-+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg)
-+{
-+ return au_ibusy(file->f_path.dentry->d_sb, compat_ptr(arg));
-+}
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * change a branch permission
-+ */
-+
-+static void au_warn_ima(void)
-+{
-+#ifdef CONFIG_IMA
-+ /* since it doesn't support mark_files_ro() */
-+ AuWarn1("RW -> RO makes IMA to produce wrong message\n");
-+#endif
-+}
-+
-+static int do_need_sigen_inc(int a, int b)
-+{
-+ return au_br_whable(a) && !au_br_whable(b);
-+}
-+
-+static int need_sigen_inc(int old, int new)
-+{
-+ return do_need_sigen_inc(old, new)
-+ || do_need_sigen_inc(new, old);
-+}
-+
-+static int au_br_mod_files_ro(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ int err, do_warn;
-+ unsigned int mnt_flags;
-+ unsigned long long ull, max;
-+ aufs_bindex_t br_id;
-+ unsigned char verbose, writer;
-+ struct file *file, *hf, **array;
-+ struct au_hfile *hfile;
-+
-+ mnt_flags = au_mntflags(sb);
-+ verbose = !!au_opt_test(mnt_flags, VERBOSE);
-+
-+ array = au_farray_alloc(sb, &max);
-+ err = PTR_ERR(array);
-+ if (IS_ERR(array))
-+ goto out;
-+
-+ do_warn = 0;
-+ br_id = au_sbr_id(sb, bindex);
-+ for (ull = 0; ull < max; ull++) {
-+ file = array[ull];
-+ if (unlikely(!file))
-+ break;
-+
-+ /* AuDbg("%pD\n", file); */
-+ fi_read_lock(file);
-+ if (unlikely(au_test_mmapped(file))) {
-+ err = -EBUSY;
-+ AuVerbose(verbose, "mmapped %pD\n", file);
-+ AuDbgFile(file);
-+ FiMustNoWaiters(file);
-+ fi_read_unlock(file);
-+ goto out_array;
-+ }
-+
-+ hfile = &au_fi(file)->fi_htop;
-+ hf = hfile->hf_file;
-+ if (!d_is_reg(file->f_path.dentry)
-+ || !(file->f_mode & FMODE_WRITE)
-+ || hfile->hf_br->br_id != br_id
-+ || !(hf->f_mode & FMODE_WRITE))
-+ array[ull] = NULL;
-+ else {
-+ do_warn = 1;
-+ get_file(file);
-+ }
-+
-+ FiMustNoWaiters(file);
-+ fi_read_unlock(file);
-+ fput(file);
-+ }
-+
-+ err = 0;
-+ if (do_warn)
-+ au_warn_ima();
-+
-+ for (ull = 0; ull < max; ull++) {
-+ file = array[ull];
-+ if (!file)
-+ continue;
-+
-+ /* todo: already flushed? */
-+ /*
-+ * fs/super.c:mark_files_ro() is gone, but aufs keeps its
-+ * approach which resets f_mode and calls mnt_drop_write() and
-+ * file_release_write() for each file, because the branch
-+ * attribute in aufs world is totally different from the native
-+ * fs rw/ro mode.
-+ */
-+ /* fi_read_lock(file); */
-+ hfile = &au_fi(file)->fi_htop;
-+ hf = hfile->hf_file;
-+ /* fi_read_unlock(file); */
-+ spin_lock(&hf->f_lock);
-+ writer = !!(hf->f_mode & FMODE_WRITER);
-+ hf->f_mode &= ~(FMODE_WRITE | FMODE_WRITER);
-+ spin_unlock(&hf->f_lock);
-+ if (writer) {
-+ put_write_access(file_inode(hf));
-+ __mnt_drop_write(hf->f_path.mnt);
-+ }
-+ }
-+
-+out_array:
-+ au_farray_free(array, max);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
-+ int *do_refresh)
-+{
-+ int err, rerr;
-+ aufs_bindex_t bindex;
-+ struct dentry *root;
-+ struct au_branch *br;
-+ struct au_br_fhsm *bf;
-+
-+ root = sb->s_root;
-+ bindex = au_find_dbindex(root, mod->h_root);
-+ if (bindex < 0) {
-+ if (remount)
-+ return 0; /* success */
-+ err = -ENOENT;
-+ pr_err("%s no such branch\n", mod->path);
-+ goto out;
-+ }
-+ AuDbg("bindex b%d\n", bindex);
-+
-+ err = test_br(d_inode(mod->h_root), mod->perm, mod->path);
-+ if (unlikely(err))
-+ goto out;
-+
-+ br = au_sbr(sb, bindex);
-+ AuDebugOn(mod->h_root != au_br_dentry(br));
-+ if (br->br_perm == mod->perm)
-+ return 0; /* success */
-+
-+ /* pre-allocate for non-fhsm --> fhsm */
-+ bf = NULL;
-+ if (!au_br_fhsm(br->br_perm) && au_br_fhsm(mod->perm)) {
-+ err = au_fhsm_br_alloc(br);
-+ if (unlikely(err))
-+ goto out;
-+ bf = br->br_fhsm;
-+ br->br_fhsm = NULL;
-+ }
-+
-+ if (au_br_writable(br->br_perm)) {
-+ /* remove whiteout base */
-+ err = au_br_init_wh(sb, br, mod->perm);
-+ if (unlikely(err))
-+ goto out_bf;
-+
-+ if (!au_br_writable(mod->perm)) {
-+ /* rw --> ro, file might be mmapped */
-+ DiMustNoWaiters(root);
-+ IiMustNoWaiters(d_inode(root));
-+ di_write_unlock(root);
-+ err = au_br_mod_files_ro(sb, bindex);
-+ /* aufs_write_lock() calls ..._child() */
-+ di_write_lock_child(root);
-+
-+ if (unlikely(err)) {
-+ rerr = -ENOMEM;
-+ br->br_wbr = kmalloc(sizeof(*br->br_wbr),
-+ GFP_NOFS);
-+ if (br->br_wbr)
-+ rerr = au_wbr_init(br, sb, br->br_perm);
-+ if (unlikely(rerr)) {
-+ AuIOErr("nested error %d (%d)\n",
-+ rerr, err);
-+ br->br_perm = mod->perm;
-+ }
-+ }
-+ }
-+ } else if (au_br_writable(mod->perm)) {
-+ /* ro --> rw */
-+ err = -ENOMEM;
-+ br->br_wbr = kmalloc(sizeof(*br->br_wbr), GFP_NOFS);
-+ if (br->br_wbr) {
-+ err = au_wbr_init(br, sb, mod->perm);
-+ if (unlikely(err)) {
-+ kfree(br->br_wbr);
-+ br->br_wbr = NULL;
-+ }
-+ }
-+ }
-+ if (unlikely(err))
-+ goto out_bf;
-+
-+ if (au_br_fhsm(br->br_perm)) {
-+ if (!au_br_fhsm(mod->perm)) {
-+ /* fhsm --> non-fhsm */
-+ au_br_fhsm_fin(br->br_fhsm);
-+ kfree(br->br_fhsm);
-+ br->br_fhsm = NULL;
-+ }
-+ } else if (au_br_fhsm(mod->perm))
-+ /* non-fhsm --> fhsm */
-+ br->br_fhsm = bf;
-+
-+ *do_refresh |= need_sigen_inc(br->br_perm, mod->perm);
-+ br->br_perm = mod->perm;
-+ goto out; /* success */
-+
-+out_bf:
-+ kfree(bf);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int au_br_stfs(struct au_branch *br, struct aufs_stfs *stfs)
-+{
-+ int err;
-+ struct kstatfs kstfs;
-+
-+ err = vfs_statfs(&br->br_path, &kstfs);
-+ if (!err) {
-+ stfs->f_blocks = kstfs.f_blocks;
-+ stfs->f_bavail = kstfs.f_bavail;
-+ stfs->f_files = kstfs.f_files;
-+ stfs->f_ffree = kstfs.f_ffree;
-+ }
-+
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/branch.h linux-4.1.10/fs/aufs/branch.h
---- linux-4.1.10.orig/fs/aufs/branch.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/branch.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,279 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * branch filesystems and xino for them
-+ */
-+
-+#ifndef __AUFS_BRANCH_H__
-+#define __AUFS_BRANCH_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/mount.h>
-+#include "dynop.h"
-+#include "rwsem.h"
-+#include "super.h"
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* a xino file */
-+struct au_xino_file {
-+ struct file *xi_file;
-+ struct mutex xi_nondir_mtx;
-+
-+ /* todo: make xino files an array to support huge inode number */
-+
-+#ifdef CONFIG_DEBUG_FS
-+ struct dentry *xi_dbgaufs;
-+#endif
-+};
-+
-+/* File-based Hierarchical Storage Management */
-+struct au_br_fhsm {
-+#ifdef CONFIG_AUFS_FHSM
-+ struct mutex bf_lock;
-+ unsigned long bf_jiffy;
-+ struct aufs_stfs bf_stfs;
-+ int bf_readable;
-+#endif
-+};
-+
-+/* members for writable branch only */
-+enum {AuBrWh_BASE, AuBrWh_PLINK, AuBrWh_ORPH, AuBrWh_Last};
-+struct au_wbr {
-+ struct au_rwsem wbr_wh_rwsem;
-+ struct dentry *wbr_wh[AuBrWh_Last];
-+ atomic_t wbr_wh_running;
-+#define wbr_whbase wbr_wh[AuBrWh_BASE] /* whiteout base */
-+#define wbr_plink wbr_wh[AuBrWh_PLINK] /* pseudo-link dir */
-+#define wbr_orph wbr_wh[AuBrWh_ORPH] /* dir for orphans */
-+
-+ /* mfs mode */
-+ unsigned long long wbr_bytes;
-+};
-+
-+/* ext2 has 3 types of operations at least, ext3 has 4 */
-+#define AuBrDynOp (AuDyLast * 4)
-+
-+#ifdef CONFIG_AUFS_HFSNOTIFY
-+/* support for asynchronous destruction */
-+struct au_br_hfsnotify {
-+ struct fsnotify_group *hfsn_group;
-+};
-+#endif
-+
-+/* sysfs entries */
-+struct au_brsysfs {
-+ char name[16];
-+ struct attribute attr;
-+};
-+
-+enum {
-+ AuBrSysfs_BR,
-+ AuBrSysfs_BRID,
-+ AuBrSysfs_Last
-+};
-+
-+/* protected by superblock rwsem */
-+struct au_branch {
-+ struct au_xino_file br_xino;
-+
-+ aufs_bindex_t br_id;
-+
-+ int br_perm;
-+ struct path br_path;
-+ spinlock_t br_dykey_lock;
-+ struct au_dykey *br_dykey[AuBrDynOp];
-+ atomic_t br_count;
-+
-+ struct au_wbr *br_wbr;
-+ struct au_br_fhsm *br_fhsm;
-+
-+ /* xino truncation */
-+ atomic_t br_xino_running;
-+
-+#ifdef CONFIG_AUFS_HFSNOTIFY
-+ struct au_br_hfsnotify *br_hfsn;
-+#endif
-+
-+#ifdef CONFIG_SYSFS
-+ /* entries under sysfs per mount-point */
-+ struct au_brsysfs br_sysfs[AuBrSysfs_Last];
-+#endif
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline struct vfsmount *au_br_mnt(struct au_branch *br)
-+{
-+ return br->br_path.mnt;
-+}
-+
-+static inline struct dentry *au_br_dentry(struct au_branch *br)
-+{
-+ return br->br_path.dentry;
-+}
-+
-+static inline struct super_block *au_br_sb(struct au_branch *br)
-+{
-+ return au_br_mnt(br)->mnt_sb;
-+}
-+
-+static inline int au_br_rdonly(struct au_branch *br)
-+{
-+ return ((au_br_sb(br)->s_flags & MS_RDONLY)
-+ || !au_br_writable(br->br_perm))
-+ ? -EROFS : 0;
-+}
-+
-+static inline int au_br_hnotifyable(int brperm __maybe_unused)
-+{
-+#ifdef CONFIG_AUFS_HNOTIFY
-+ return !(brperm & AuBrPerm_RR);
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_br_test_oflag(int oflag, struct au_branch *br)
-+{
-+ int err, exec_flag;
-+
-+ err = 0;
-+ exec_flag = oflag & __FMODE_EXEC;
-+ if (unlikely(exec_flag && (au_br_mnt(br)->mnt_flags & MNT_NOEXEC)))
-+ err = -EACCES;
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* branch.c */
-+struct au_sbinfo;
-+void au_br_free(struct au_sbinfo *sinfo);
-+int au_br_index(struct super_block *sb, aufs_bindex_t br_id);
-+struct au_opt_add;
-+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount);
-+struct au_opt_del;
-+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount);
-+long au_ibusy_ioctl(struct file *file, unsigned long arg);
-+#ifdef CONFIG_COMPAT
-+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg);
-+#endif
-+struct au_opt_mod;
-+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
-+ int *do_refresh);
-+struct aufs_stfs;
-+int au_br_stfs(struct au_branch *br, struct aufs_stfs *stfs);
-+
-+/* xino.c */
-+static const loff_t au_loff_max = LLONG_MAX;
-+
-+int au_xib_trunc(struct super_block *sb);
-+ssize_t xino_fread(vfs_readf_t func, struct file *file, void *buf, size_t size,
-+ loff_t *pos);
-+ssize_t xino_fwrite(vfs_writef_t func, struct file *file, void *buf,
-+ size_t size, loff_t *pos);
-+struct file *au_xino_create2(struct file *base_file, struct file *copy_src);
-+struct file *au_xino_create(struct super_block *sb, char *fname, int silent);
-+ino_t au_xino_new_ino(struct super_block *sb);
-+void au_xino_delete_inode(struct inode *inode, const int unlinked);
-+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
-+ ino_t ino);
-+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
-+ ino_t *ino);
-+int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t hino,
-+ struct file *base_file, int do_test);
-+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex);
-+
-+struct au_opt_xino;
-+int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount);
-+void au_xino_clr(struct super_block *sb);
-+struct file *au_xino_def(struct super_block *sb);
-+int au_xino_path(struct seq_file *seq, struct file *file);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* Superblock to branch */
-+static inline
-+aufs_bindex_t au_sbr_id(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ return au_sbr(sb, bindex)->br_id;
-+}
-+
-+static inline
-+struct vfsmount *au_sbr_mnt(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ return au_br_mnt(au_sbr(sb, bindex));
-+}
-+
-+static inline
-+struct super_block *au_sbr_sb(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ return au_br_sb(au_sbr(sb, bindex));
-+}
-+
-+static inline void au_sbr_put(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ atomic_dec(&au_sbr(sb, bindex)->br_count);
-+}
-+
-+static inline int au_sbr_perm(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ return au_sbr(sb, bindex)->br_perm;
-+}
-+
-+static inline int au_sbr_whable(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ return au_br_whable(au_sbr_perm(sb, bindex));
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * wbr_wh_read_lock, wbr_wh_write_lock
-+ * wbr_wh_read_unlock, wbr_wh_write_unlock, wbr_wh_downgrade_lock
-+ */
-+AuSimpleRwsemFuncs(wbr_wh, struct au_wbr *wbr, &wbr->wbr_wh_rwsem);
-+
-+#define WbrWhMustNoWaiters(wbr) AuRwMustNoWaiters(&wbr->wbr_wh_rwsem)
-+#define WbrWhMustAnyLock(wbr) AuRwMustAnyLock(&wbr->wbr_wh_rwsem)
-+#define WbrWhMustWriteLock(wbr) AuRwMustWriteLock(&wbr->wbr_wh_rwsem)
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_AUFS_FHSM
-+static inline void au_br_fhsm_init(struct au_br_fhsm *brfhsm)
-+{
-+ mutex_init(&brfhsm->bf_lock);
-+ brfhsm->bf_jiffy = 0;
-+ brfhsm->bf_readable = 0;
-+}
-+
-+static inline void au_br_fhsm_fin(struct au_br_fhsm *brfhsm)
-+{
-+ mutex_destroy(&brfhsm->bf_lock);
-+}
-+#else
-+AuStubVoid(au_br_fhsm_init, struct au_br_fhsm *brfhsm)
-+AuStubVoid(au_br_fhsm_fin, struct au_br_fhsm *brfhsm)
-+#endif
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_BRANCH_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/conf.mk linux-4.1.10/fs/aufs/conf.mk
---- linux-4.1.10.orig/fs/aufs/conf.mk 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/conf.mk 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,38 @@
-+
-+AuConfStr = CONFIG_AUFS_FS=${CONFIG_AUFS_FS}
-+
-+define AuConf
-+ifdef ${1}
-+AuConfStr += ${1}=${${1}}
-+endif
-+endef
-+
-+AuConfAll = BRANCH_MAX_127 BRANCH_MAX_511 BRANCH_MAX_1023 BRANCH_MAX_32767 \
-+ SBILIST \
-+ HNOTIFY HFSNOTIFY \
-+ EXPORT INO_T_64 \
-+ XATTR \
-+ FHSM \
-+ RDU \
-+ SHWH \
-+ BR_RAMFS \
-+ BR_FUSE POLL \
-+ BR_HFSPLUS \
-+ BDEV_LOOP \
-+ DEBUG MAGIC_SYSRQ
-+$(foreach i, ${AuConfAll}, \
-+ $(eval $(call AuConf,CONFIG_AUFS_${i})))
-+
-+AuConfName = ${obj}/conf.str
-+${AuConfName}.tmp: FORCE
-+ @echo ${AuConfStr} | tr ' ' '\n' | sed -e 's/^/"/' -e 's/$$/\\n"/' > $@
-+${AuConfName}: ${AuConfName}.tmp
-+ @diff -q $< $@ > /dev/null 2>&1 || { \
-+ echo ' GEN ' $@; \
-+ cp -p $< $@; \
-+ }
-+FORCE:
-+clean-files += ${AuConfName} ${AuConfName}.tmp
-+${obj}/sysfs.o: ${AuConfName}
-+
-+-include ${srctree}/${src}/conf_priv.mk
-diff -Nur linux-4.1.10.orig/fs/aufs/cpup.c linux-4.1.10/fs/aufs/cpup.c
---- linux-4.1.10.orig/fs/aufs/cpup.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/cpup.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,1319 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * copy-up functions, see wbr_policy.c for copy-down
-+ */
-+
-+#include <linux/fs_stack.h>
-+#include <linux/mm.h>
-+#include "aufs.h"
-+
-+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags)
-+{
-+ const unsigned int mask = S_DEAD | S_SWAPFILE | S_PRIVATE
-+ | S_NOATIME | S_NOCMTIME | S_AUTOMOUNT;
-+
-+ BUILD_BUG_ON(sizeof(iflags) != sizeof(dst->i_flags));
-+
-+ dst->i_flags |= iflags & ~mask;
-+ if (au_test_fs_notime(dst->i_sb))
-+ dst->i_flags |= S_NOATIME | S_NOCMTIME;
-+}
-+
-+void au_cpup_attr_timesizes(struct inode *inode)
-+{
-+ struct inode *h_inode;
-+
-+ h_inode = au_h_iptr(inode, au_ibstart(inode));
-+ fsstack_copy_attr_times(inode, h_inode);
-+ fsstack_copy_inode_size(inode, h_inode);
-+}
-+
-+void au_cpup_attr_nlink(struct inode *inode, int force)
-+{
-+ struct inode *h_inode;
-+ struct super_block *sb;
-+ aufs_bindex_t bindex, bend;
-+
-+ sb = inode->i_sb;
-+ bindex = au_ibstart(inode);
-+ h_inode = au_h_iptr(inode, bindex);
-+ if (!force
-+ && !S_ISDIR(h_inode->i_mode)
-+ && au_opt_test(au_mntflags(sb), PLINK)
-+ && au_plink_test(inode))
-+ return;
-+
-+ /*
-+ * 0 can happen in revalidating.
-+ * h_inode->i_mutex may not be held here, but it is harmless since once
-+ * i_nlink reaches 0, it will never become positive except O_TMPFILE
-+ * case.
-+ * todo: O_TMPFILE+linkat(AT_SYMLINK_FOLLOW) bypassing aufs may cause
-+ * the incorrect link count.
-+ */
-+ set_nlink(inode, h_inode->i_nlink);
-+
-+ /*
-+ * fewer nlink makes find(1) noisy, but larger nlink doesn't.
-+ * it may includes whplink directory.
-+ */
-+ if (S_ISDIR(h_inode->i_mode)) {
-+ bend = au_ibend(inode);
-+ for (bindex++; bindex <= bend; bindex++) {
-+ h_inode = au_h_iptr(inode, bindex);
-+ if (h_inode)
-+ au_add_nlink(inode, h_inode);
-+ }
-+ }
-+}
-+
-+void au_cpup_attr_changeable(struct inode *inode)
-+{
-+ struct inode *h_inode;
-+
-+ h_inode = au_h_iptr(inode, au_ibstart(inode));
-+ inode->i_mode = h_inode->i_mode;
-+ inode->i_uid = h_inode->i_uid;
-+ inode->i_gid = h_inode->i_gid;
-+ au_cpup_attr_timesizes(inode);
-+ au_cpup_attr_flags(inode, h_inode->i_flags);
-+}
-+
-+void au_cpup_igen(struct inode *inode, struct inode *h_inode)
-+{
-+ struct au_iinfo *iinfo = au_ii(inode);
-+
-+ IiMustWriteLock(inode);
-+
-+ iinfo->ii_higen = h_inode->i_generation;
-+ iinfo->ii_hsb1 = h_inode->i_sb;
-+}
-+
-+void au_cpup_attr_all(struct inode *inode, int force)
-+{
-+ struct inode *h_inode;
-+
-+ h_inode = au_h_iptr(inode, au_ibstart(inode));
-+ au_cpup_attr_changeable(inode);
-+ if (inode->i_nlink > 0)
-+ au_cpup_attr_nlink(inode, force);
-+ inode->i_rdev = h_inode->i_rdev;
-+ inode->i_blkbits = h_inode->i_blkbits;
-+ au_cpup_igen(inode, h_inode);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* Note: dt_dentry and dt_h_dentry are not dget/dput-ed */
-+
-+/* keep the timestamps of the parent dir when cpup */
-+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
-+ struct path *h_path)
-+{
-+ struct inode *h_inode;
-+
-+ dt->dt_dentry = dentry;
-+ dt->dt_h_path = *h_path;
-+ h_inode = d_inode(h_path->dentry);
-+ dt->dt_atime = h_inode->i_atime;
-+ dt->dt_mtime = h_inode->i_mtime;
-+ /* smp_mb(); */
-+}
-+
-+void au_dtime_revert(struct au_dtime *dt)
-+{
-+ struct iattr attr;
-+ int err;
-+
-+ attr.ia_atime = dt->dt_atime;
-+ attr.ia_mtime = dt->dt_mtime;
-+ attr.ia_valid = ATTR_FORCE | ATTR_MTIME | ATTR_MTIME_SET
-+ | ATTR_ATIME | ATTR_ATIME_SET;
-+
-+ /* no delegation since this is a directory */
-+ err = vfsub_notify_change(&dt->dt_h_path, &attr, /*delegated*/NULL);
-+ if (unlikely(err))
-+ pr_warn("restoring timestamps failed(%d). ignored\n", err);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* internal use only */
-+struct au_cpup_reg_attr {
-+ int valid;
-+ struct kstat st;
-+ unsigned int iflags; /* inode->i_flags */
-+};
-+
-+static noinline_for_stack
-+int cpup_iattr(struct dentry *dst, aufs_bindex_t bindex, struct dentry *h_src,
-+ struct au_cpup_reg_attr *h_src_attr)
-+{
-+ int err, sbits, icex;
-+ unsigned int mnt_flags;
-+ unsigned char verbose;
-+ struct iattr ia;
-+ struct path h_path;
-+ struct inode *h_isrc, *h_idst;
-+ struct kstat *h_st;
-+ struct au_branch *br;
-+
-+ h_path.dentry = au_h_dptr(dst, bindex);
-+ h_idst = d_inode(h_path.dentry);
-+ br = au_sbr(dst->d_sb, bindex);
-+ h_path.mnt = au_br_mnt(br);
-+ h_isrc = d_inode(h_src);
-+ ia.ia_valid = ATTR_FORCE | ATTR_UID | ATTR_GID
-+ | ATTR_ATIME | ATTR_MTIME
-+ | ATTR_ATIME_SET | ATTR_MTIME_SET;
-+ if (h_src_attr && h_src_attr->valid) {
-+ h_st = &h_src_attr->st;
-+ ia.ia_uid = h_st->uid;
-+ ia.ia_gid = h_st->gid;
-+ ia.ia_atime = h_st->atime;
-+ ia.ia_mtime = h_st->mtime;
-+ if (h_idst->i_mode != h_st->mode
-+ && !S_ISLNK(h_idst->i_mode)) {
-+ ia.ia_valid |= ATTR_MODE;
-+ ia.ia_mode = h_st->mode;
-+ }
-+ sbits = !!(h_st->mode & (S_ISUID | S_ISGID));
-+ au_cpup_attr_flags(h_idst, h_src_attr->iflags);
-+ } else {
-+ ia.ia_uid = h_isrc->i_uid;
-+ ia.ia_gid = h_isrc->i_gid;
-+ ia.ia_atime = h_isrc->i_atime;
-+ ia.ia_mtime = h_isrc->i_mtime;
-+ if (h_idst->i_mode != h_isrc->i_mode
-+ && !S_ISLNK(h_idst->i_mode)) {
-+ ia.ia_valid |= ATTR_MODE;
-+ ia.ia_mode = h_isrc->i_mode;
-+ }
-+ sbits = !!(h_isrc->i_mode & (S_ISUID | S_ISGID));
-+ au_cpup_attr_flags(h_idst, h_isrc->i_flags);
-+ }
-+ /* no delegation since it is just created */
-+ err = vfsub_notify_change(&h_path, &ia, /*delegated*/NULL);
-+
-+ /* is this nfs only? */
-+ if (!err && sbits && au_test_nfs(h_path.dentry->d_sb)) {
-+ ia.ia_valid = ATTR_FORCE | ATTR_MODE;
-+ ia.ia_mode = h_isrc->i_mode;
-+ err = vfsub_notify_change(&h_path, &ia, /*delegated*/NULL);
-+ }
-+
-+ icex = br->br_perm & AuBrAttr_ICEX;
-+ if (!err) {
-+ mnt_flags = au_mntflags(dst->d_sb);
-+ verbose = !!au_opt_test(mnt_flags, VERBOSE);
-+ err = au_cpup_xattr(h_path.dentry, h_src, icex, verbose);
-+ }
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_do_copy_file(struct file *dst, struct file *src, loff_t len,
-+ char *buf, unsigned long blksize)
-+{
-+ int err;
-+ size_t sz, rbytes, wbytes;
-+ unsigned char all_zero;
-+ char *p, *zp;
-+ struct mutex *h_mtx;
-+ /* reduce stack usage */
-+ struct iattr *ia;
-+
-+ zp = page_address(ZERO_PAGE(0));
-+ if (unlikely(!zp))
-+ return -ENOMEM; /* possible? */
-+
-+ err = 0;
-+ all_zero = 0;
-+ while (len) {
-+ AuDbg("len %lld\n", len);
-+ sz = blksize;
-+ if (len < blksize)
-+ sz = len;
-+
-+ rbytes = 0;
-+ /* todo: signal_pending? */
-+ while (!rbytes || err == -EAGAIN || err == -EINTR) {
-+ rbytes = vfsub_read_k(src, buf, sz, &src->f_pos);
-+ err = rbytes;
-+ }
-+ if (unlikely(err < 0))
-+ break;
-+
-+ all_zero = 0;
-+ if (len >= rbytes && rbytes == blksize)
-+ all_zero = !memcmp(buf, zp, rbytes);
-+ if (!all_zero) {
-+ wbytes = rbytes;
-+ p = buf;
-+ while (wbytes) {
-+ size_t b;
-+
-+ b = vfsub_write_k(dst, p, wbytes, &dst->f_pos);
-+ err = b;
-+ /* todo: signal_pending? */
-+ if (unlikely(err == -EAGAIN || err == -EINTR))
-+ continue;
-+ if (unlikely(err < 0))
-+ break;
-+ wbytes -= b;
-+ p += b;
-+ }
-+ if (unlikely(err < 0))
-+ break;
-+ } else {
-+ loff_t res;
-+
-+ AuLabel(hole);
-+ res = vfsub_llseek(dst, rbytes, SEEK_CUR);
-+ err = res;
-+ if (unlikely(res < 0))
-+ break;
-+ }
-+ len -= rbytes;
-+ err = 0;
-+ }
-+
-+ /* the last block may be a hole */
-+ if (!err && all_zero) {
-+ AuLabel(last hole);
-+
-+ err = 1;
-+ if (au_test_nfs(dst->f_path.dentry->d_sb)) {
-+ /* nfs requires this step to make last hole */
-+ /* is this only nfs? */
-+ do {
-+ /* todo: signal_pending? */
-+ err = vfsub_write_k(dst, "\0", 1, &dst->f_pos);
-+ } while (err == -EAGAIN || err == -EINTR);
-+ if (err == 1)
-+ dst->f_pos--;
-+ }
-+
-+ if (err == 1) {
-+ ia = (void *)buf;
-+ ia->ia_size = dst->f_pos;
-+ ia->ia_valid = ATTR_SIZE | ATTR_FILE;
-+ ia->ia_file = dst;
-+ h_mtx = &file_inode(dst)->i_mutex;
-+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD2);
-+ /* no delegation since it is just created */
-+ err = vfsub_notify_change(&dst->f_path, ia,
-+ /*delegated*/NULL);
-+ mutex_unlock(h_mtx);
-+ }
-+ }
-+
-+ return err;
-+}
-+
-+int au_copy_file(struct file *dst, struct file *src, loff_t len)
-+{
-+ int err;
-+ unsigned long blksize;
-+ unsigned char do_kfree;
-+ char *buf;
-+
-+ err = -ENOMEM;
-+ blksize = dst->f_path.dentry->d_sb->s_blocksize;
-+ if (!blksize || PAGE_SIZE < blksize)
-+ blksize = PAGE_SIZE;
-+ AuDbg("blksize %lu\n", blksize);
-+ do_kfree = (blksize != PAGE_SIZE && blksize >= sizeof(struct iattr *));
-+ if (do_kfree)
-+ buf = kmalloc(blksize, GFP_NOFS);
-+ else
-+ buf = (void *)__get_free_page(GFP_NOFS);
-+ if (unlikely(!buf))
-+ goto out;
-+
-+ if (len > (1 << 22))
-+ AuDbg("copying a large file %lld\n", (long long)len);
-+
-+ src->f_pos = 0;
-+ dst->f_pos = 0;
-+ err = au_do_copy_file(dst, src, len, buf, blksize);
-+ if (do_kfree)
-+ kfree(buf);
-+ else
-+ free_page((unsigned long)buf);
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * to support a sparse file which is opened with O_APPEND,
-+ * we need to close the file.
-+ */
-+static int au_cp_regular(struct au_cp_generic *cpg)
-+{
-+ int err, i;
-+ enum { SRC, DST };
-+ struct {
-+ aufs_bindex_t bindex;
-+ unsigned int flags;
-+ struct dentry *dentry;
-+ int force_wr;
-+ struct file *file;
-+ void *label;
-+ } *f, file[] = {
-+ {
-+ .bindex = cpg->bsrc,
-+ .flags = O_RDONLY | O_NOATIME | O_LARGEFILE,
-+ .label = &&out
-+ },
-+ {
-+ .bindex = cpg->bdst,
-+ .flags = O_WRONLY | O_NOATIME | O_LARGEFILE,
-+ .force_wr = !!au_ftest_cpup(cpg->flags, RWDST),
-+ .label = &&out_src
-+ }
-+ };
-+ struct super_block *sb;
-+
-+ /* bsrc branch can be ro/rw. */
-+ sb = cpg->dentry->d_sb;
-+ f = file;
-+ for (i = 0; i < 2; i++, f++) {
-+ f->dentry = au_h_dptr(cpg->dentry, f->bindex);
-+ f->file = au_h_open(cpg->dentry, f->bindex, f->flags,
-+ /*file*/NULL, f->force_wr);
-+ err = PTR_ERR(f->file);
-+ if (IS_ERR(f->file))
-+ goto *f->label;
-+ }
-+
-+ /* try stopping to update while we copyup */
-+ IMustLock(d_inode(file[SRC].dentry));
-+ err = au_copy_file(file[DST].file, file[SRC].file, cpg->len);
-+
-+ fput(file[DST].file);
-+ au_sbr_put(sb, file[DST].bindex);
-+
-+out_src:
-+ fput(file[SRC].file);
-+ au_sbr_put(sb, file[SRC].bindex);
-+out:
-+ return err;
-+}
-+
-+static int au_do_cpup_regular(struct au_cp_generic *cpg,
-+ struct au_cpup_reg_attr *h_src_attr)
-+{
-+ int err, rerr;
-+ loff_t l;
-+ struct path h_path;
-+ struct inode *h_src_inode, *h_dst_inode;
-+
-+ err = 0;
-+ h_src_inode = au_h_iptr(d_inode(cpg->dentry), cpg->bsrc);
-+ l = i_size_read(h_src_inode);
-+ if (cpg->len == -1 || l < cpg->len)
-+ cpg->len = l;
-+ if (cpg->len) {
-+ /* try stopping to update while we are referencing */
-+ mutex_lock_nested(&h_src_inode->i_mutex, AuLsc_I_CHILD);
-+ au_pin_hdir_unlock(cpg->pin);
-+
-+ h_path.dentry = au_h_dptr(cpg->dentry, cpg->bsrc);
-+ h_path.mnt = au_sbr_mnt(cpg->dentry->d_sb, cpg->bsrc);
-+ h_src_attr->iflags = h_src_inode->i_flags;
-+ if (!au_test_nfs(h_src_inode->i_sb))
-+ err = vfs_getattr(&h_path, &h_src_attr->st);
-+ else {
-+ mutex_unlock(&h_src_inode->i_mutex);
-+ err = vfs_getattr(&h_path, &h_src_attr->st);
-+ mutex_lock_nested(&h_src_inode->i_mutex, AuLsc_I_CHILD);
-+ }
-+ if (unlikely(err)) {
-+ mutex_unlock(&h_src_inode->i_mutex);
-+ goto out;
-+ }
-+ h_src_attr->valid = 1;
-+ err = au_cp_regular(cpg);
-+ mutex_unlock(&h_src_inode->i_mutex);
-+ rerr = au_pin_hdir_relock(cpg->pin);
-+ if (!err && rerr)
-+ err = rerr;
-+ }
-+ if (!err && (h_src_inode->i_state & I_LINKABLE)) {
-+ h_path.dentry = au_h_dptr(cpg->dentry, cpg->bdst);
-+ h_dst_inode = d_inode(h_path.dentry);
-+ spin_lock(&h_dst_inode->i_lock);
-+ h_dst_inode->i_state |= I_LINKABLE;
-+ spin_unlock(&h_dst_inode->i_lock);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+static int au_do_cpup_symlink(struct path *h_path, struct dentry *h_src,
-+ struct inode *h_dir)
-+{
-+ int err, symlen;
-+ mm_segment_t old_fs;
-+ union {
-+ char *k;
-+ char __user *u;
-+ } sym;
-+ struct inode *h_inode = d_inode(h_src);
-+ const struct inode_operations *h_iop = h_inode->i_op;
-+
-+ err = -ENOSYS;
-+ if (unlikely(!h_iop->readlink))
-+ goto out;
-+
-+ err = -ENOMEM;
-+ sym.k = (void *)__get_free_page(GFP_NOFS);
-+ if (unlikely(!sym.k))
-+ goto out;
-+
-+ /* unnecessary to support mmap_sem since symlink is not mmap-able */
-+ old_fs = get_fs();
-+ set_fs(KERNEL_DS);
-+ symlen = h_iop->readlink(h_src, sym.u, PATH_MAX);
-+ err = symlen;
-+ set_fs(old_fs);
-+
-+ if (symlen > 0) {
-+ sym.k[symlen] = 0;
-+ err = vfsub_symlink(h_dir, h_path, sym.k);
-+ }
-+ free_page((unsigned long)sym.k);
-+
-+out:
-+ return err;
-+}
-+
-+static noinline_for_stack
-+int cpup_entry(struct au_cp_generic *cpg, struct dentry *dst_parent,
-+ struct au_cpup_reg_attr *h_src_attr)
-+{
-+ int err;
-+ umode_t mode;
-+ unsigned int mnt_flags;
-+ unsigned char isdir, isreg, force;
-+ const unsigned char do_dt = !!au_ftest_cpup(cpg->flags, DTIME);
-+ struct au_dtime dt;
-+ struct path h_path;
-+ struct dentry *h_src, *h_dst, *h_parent;
-+ struct inode *h_inode, *h_dir, *dir, *inode;
-+ struct super_block *sb;
-+
-+ /* bsrc branch can be ro/rw. */
-+ h_src = au_h_dptr(cpg->dentry, cpg->bsrc);
-+ h_inode = d_inode(h_src);
-+ AuDebugOn(h_inode != au_h_iptr(d_inode(cpg->dentry), cpg->bsrc));
-+
-+ /* try stopping to be referenced while we are creating */
-+ h_dst = au_h_dptr(cpg->dentry, cpg->bdst);
-+ if (au_ftest_cpup(cpg->flags, RENAME))
-+ AuDebugOn(strncmp(h_dst->d_name.name, AUFS_WH_PFX,
-+ AUFS_WH_PFX_LEN));
-+ h_parent = h_dst->d_parent; /* dir inode is locked */
-+ h_dir = d_inode(h_parent);
-+ IMustLock(h_dir);
-+ AuDebugOn(h_parent != h_dst->d_parent);
-+
-+ sb = cpg->dentry->d_sb;
-+ h_path.mnt = au_sbr_mnt(sb, cpg->bdst);
-+ if (do_dt) {
-+ h_path.dentry = h_parent;
-+ au_dtime_store(&dt, dst_parent, &h_path);
-+ }
-+ h_path.dentry = h_dst;
-+
-+ isreg = 0;
-+ isdir = 0;
-+ mode = h_inode->i_mode;
-+ switch (mode & S_IFMT) {
-+ case S_IFREG:
-+ isreg = 1;
-+ err = vfsub_create(h_dir, &h_path, mode | S_IWUSR,
-+ /*want_excl*/true);
-+ if (!err)
-+ err = au_do_cpup_regular(cpg, h_src_attr);
-+ break;
-+ case S_IFDIR:
-+ isdir = 1;
-+ err = vfsub_mkdir(h_dir, &h_path, mode);
-+ if (!err) {
-+ /*
-+ * strange behaviour from the users view,
-+ * particularry setattr case
-+ */
-+ dir = d_inode(dst_parent);
-+ if (au_ibstart(dir) == cpg->bdst)
-+ au_cpup_attr_nlink(dir, /*force*/1);
-+ inode = d_inode(cpg->dentry);
-+ au_cpup_attr_nlink(inode, /*force*/1);
-+ }
-+ break;
-+ case S_IFLNK:
-+ err = au_do_cpup_symlink(&h_path, h_src, h_dir);
-+ break;
-+ case S_IFCHR:
-+ case S_IFBLK:
-+ AuDebugOn(!capable(CAP_MKNOD));
-+ /*FALLTHROUGH*/
-+ case S_IFIFO:
-+ case S_IFSOCK:
-+ err = vfsub_mknod(h_dir, &h_path, mode, h_inode->i_rdev);
-+ break;
-+ default:
-+ AuIOErr("Unknown inode type 0%o\n", mode);
-+ err = -EIO;
-+ }
-+
-+ mnt_flags = au_mntflags(sb);
-+ if (!au_opt_test(mnt_flags, UDBA_NONE)
-+ && !isdir
-+ && au_opt_test(mnt_flags, XINO)
-+ && (h_inode->i_nlink == 1
-+ || (h_inode->i_state & I_LINKABLE))
-+ /* todo: unnecessary? */
-+ /* && d_inode(cpg->dentry)->i_nlink == 1 */
-+ && cpg->bdst < cpg->bsrc
-+ && !au_ftest_cpup(cpg->flags, KEEPLINO))
-+ au_xino_write(sb, cpg->bsrc, h_inode->i_ino, /*ino*/0);
-+ /* ignore this error */
-+
-+ if (!err) {
-+ force = 0;
-+ if (isreg) {
-+ force = !!cpg->len;
-+ if (cpg->len == -1)
-+ force = !!i_size_read(h_inode);
-+ }
-+ au_fhsm_wrote(sb, cpg->bdst, force);
-+ }
-+
-+ if (do_dt)
-+ au_dtime_revert(&dt);
-+ return err;
-+}
-+
-+static int au_do_ren_after_cpup(struct au_cp_generic *cpg, struct path *h_path)
-+{
-+ int err;
-+ struct dentry *dentry, *h_dentry, *h_parent, *parent;
-+ struct inode *h_dir;
-+ aufs_bindex_t bdst;
-+
-+ dentry = cpg->dentry;
-+ bdst = cpg->bdst;
-+ h_dentry = au_h_dptr(dentry, bdst);
-+ if (!au_ftest_cpup(cpg->flags, OVERWRITE)) {
-+ dget(h_dentry);
-+ au_set_h_dptr(dentry, bdst, NULL);
-+ err = au_lkup_neg(dentry, bdst, /*wh*/0);
-+ if (!err)
-+ h_path->dentry = dget(au_h_dptr(dentry, bdst));
-+ au_set_h_dptr(dentry, bdst, h_dentry);
-+ } else {
-+ err = 0;
-+ parent = dget_parent(dentry);
-+ h_parent = au_h_dptr(parent, bdst);
-+ dput(parent);
-+ h_path->dentry = vfsub_lkup_one(&dentry->d_name, h_parent);
-+ if (IS_ERR(h_path->dentry))
-+ err = PTR_ERR(h_path->dentry);
-+ }
-+ if (unlikely(err))
-+ goto out;
-+
-+ h_parent = h_dentry->d_parent; /* dir inode is locked */
-+ h_dir = d_inode(h_parent);
-+ IMustLock(h_dir);
-+ AuDbg("%pd %pd\n", h_dentry, h_path->dentry);
-+ /* no delegation since it is just created */
-+ err = vfsub_rename(h_dir, h_dentry, h_dir, h_path, /*delegated*/NULL);
-+ dput(h_path->dentry);
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * copyup the @dentry from @bsrc to @bdst.
-+ * the caller must set the both of lower dentries.
-+ * @len is for truncating when it is -1 copyup the entire file.
-+ * in link/rename cases, @dst_parent may be different from the real one.
-+ * basic->bsrc can be larger than basic->bdst.
-+ */
-+static int au_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent)
-+{
-+ int err, rerr;
-+ aufs_bindex_t old_ibstart;
-+ unsigned char isdir, plink;
-+ struct dentry *h_src, *h_dst, *h_parent;
-+ struct inode *dst_inode, *h_dir, *inode, *delegated, *src_inode;
-+ struct super_block *sb;
-+ struct au_branch *br;
-+ /* to reuduce stack size */
-+ struct {
-+ struct au_dtime dt;
-+ struct path h_path;
-+ struct au_cpup_reg_attr h_src_attr;
-+ } *a;
-+
-+ err = -ENOMEM;
-+ a = kmalloc(sizeof(*a), GFP_NOFS);
-+ if (unlikely(!a))
-+ goto out;
-+ a->h_src_attr.valid = 0;
-+
-+ sb = cpg->dentry->d_sb;
-+ br = au_sbr(sb, cpg->bdst);
-+ a->h_path.mnt = au_br_mnt(br);
-+ h_dst = au_h_dptr(cpg->dentry, cpg->bdst);
-+ h_parent = h_dst->d_parent; /* dir inode is locked */
-+ h_dir = d_inode(h_parent);
-+ IMustLock(h_dir);
-+
-+ h_src = au_h_dptr(cpg->dentry, cpg->bsrc);
-+ inode = d_inode(cpg->dentry);
-+
-+ if (!dst_parent)
-+ dst_parent = dget_parent(cpg->dentry);
-+ else
-+ dget(dst_parent);
-+
-+ plink = !!au_opt_test(au_mntflags(sb), PLINK);
-+ dst_inode = au_h_iptr(inode, cpg->bdst);
-+ if (dst_inode) {
-+ if (unlikely(!plink)) {
-+ err = -EIO;
-+ AuIOErr("hi%lu(i%lu) exists on b%d "
-+ "but plink is disabled\n",
-+ dst_inode->i_ino, inode->i_ino, cpg->bdst);
-+ goto out_parent;
-+ }
-+
-+ if (dst_inode->i_nlink) {
-+ const int do_dt = au_ftest_cpup(cpg->flags, DTIME);
-+
-+ h_src = au_plink_lkup(inode, cpg->bdst);
-+ err = PTR_ERR(h_src);
-+ if (IS_ERR(h_src))
-+ goto out_parent;
-+ if (unlikely(d_is_negative(h_src))) {
-+ err = -EIO;
-+ AuIOErr("i%lu exists on a upper branch "
-+ "but not pseudo-linked\n",
-+ inode->i_ino);
-+ dput(h_src);
-+ goto out_parent;
-+ }
-+
-+ if (do_dt) {
-+ a->h_path.dentry = h_parent;
-+ au_dtime_store(&a->dt, dst_parent, &a->h_path);
-+ }
-+
-+ a->h_path.dentry = h_dst;
-+ delegated = NULL;
-+ err = vfsub_link(h_src, h_dir, &a->h_path, &delegated);
-+ if (!err && au_ftest_cpup(cpg->flags, RENAME))
-+ err = au_do_ren_after_cpup(cpg, &a->h_path);
-+ if (do_dt)
-+ au_dtime_revert(&a->dt);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal link\n");
-+ iput(delegated);
-+ }
-+ dput(h_src);
-+ goto out_parent;
-+ } else
-+ /* todo: cpup_wh_file? */
-+ /* udba work */
-+ au_update_ibrange(inode, /*do_put_zero*/1);
-+ }
-+
-+ isdir = S_ISDIR(inode->i_mode);
-+ old_ibstart = au_ibstart(inode);
-+ err = cpup_entry(cpg, dst_parent, &a->h_src_attr);
-+ if (unlikely(err))
-+ goto out_rev;
-+ dst_inode = d_inode(h_dst);
-+ mutex_lock_nested(&dst_inode->i_mutex, AuLsc_I_CHILD2);
-+ /* todo: necessary? */
-+ /* au_pin_hdir_unlock(cpg->pin); */
-+
-+ err = cpup_iattr(cpg->dentry, cpg->bdst, h_src, &a->h_src_attr);
-+ if (unlikely(err)) {
-+ /* todo: necessary? */
-+ /* au_pin_hdir_relock(cpg->pin); */ /* ignore an error */
-+ mutex_unlock(&dst_inode->i_mutex);
-+ goto out_rev;
-+ }
-+
-+ if (cpg->bdst < old_ibstart) {
-+ if (S_ISREG(inode->i_mode)) {
-+ err = au_dy_iaop(inode, cpg->bdst, dst_inode);
-+ if (unlikely(err)) {
-+ /* ignore an error */
-+ /* au_pin_hdir_relock(cpg->pin); */
-+ mutex_unlock(&dst_inode->i_mutex);
-+ goto out_rev;
-+ }
-+ }
-+ au_set_ibstart(inode, cpg->bdst);
-+ } else
-+ au_set_ibend(inode, cpg->bdst);
-+ au_set_h_iptr(inode, cpg->bdst, au_igrab(dst_inode),
-+ au_hi_flags(inode, isdir));
-+
-+ /* todo: necessary? */
-+ /* err = au_pin_hdir_relock(cpg->pin); */
-+ mutex_unlock(&dst_inode->i_mutex);
-+ if (unlikely(err))
-+ goto out_rev;
-+
-+ src_inode = d_inode(h_src);
-+ if (!isdir
-+ && (src_inode->i_nlink > 1
-+ || src_inode->i_state & I_LINKABLE)
-+ && plink)
-+ au_plink_append(inode, cpg->bdst, h_dst);
-+
-+ if (au_ftest_cpup(cpg->flags, RENAME)) {
-+ a->h_path.dentry = h_dst;
-+ err = au_do_ren_after_cpup(cpg, &a->h_path);
-+ }
-+ if (!err)
-+ goto out_parent; /* success */
-+
-+ /* revert */
-+out_rev:
-+ a->h_path.dentry = h_parent;
-+ au_dtime_store(&a->dt, dst_parent, &a->h_path);
-+ a->h_path.dentry = h_dst;
-+ rerr = 0;
-+ if (d_is_positive(h_dst)) {
-+ if (!isdir) {
-+ /* no delegation since it is just created */
-+ rerr = vfsub_unlink(h_dir, &a->h_path,
-+ /*delegated*/NULL, /*force*/0);
-+ } else
-+ rerr = vfsub_rmdir(h_dir, &a->h_path);
-+ }
-+ au_dtime_revert(&a->dt);
-+ if (rerr) {
-+ AuIOErr("failed removing broken entry(%d, %d)\n", err, rerr);
-+ err = -EIO;
-+ }
-+out_parent:
-+ dput(dst_parent);
-+ kfree(a);
-+out:
-+ return err;
-+}
-+
-+#if 0 /* reserved */
-+struct au_cpup_single_args {
-+ int *errp;
-+ struct au_cp_generic *cpg;
-+ struct dentry *dst_parent;
-+};
-+
-+static void au_call_cpup_single(void *args)
-+{
-+ struct au_cpup_single_args *a = args;
-+
-+ au_pin_hdir_acquire_nest(a->cpg->pin);
-+ *a->errp = au_cpup_single(a->cpg, a->dst_parent);
-+ au_pin_hdir_release(a->cpg->pin);
-+}
-+#endif
-+
-+/*
-+ * prevent SIGXFSZ in copy-up.
-+ * testing CAP_MKNOD is for generic fs,
-+ * but CAP_FSETID is for xfs only, currently.
-+ */
-+static int au_cpup_sio_test(struct au_pin *pin, umode_t mode)
-+{
-+ int do_sio;
-+ struct super_block *sb;
-+ struct inode *h_dir;
-+
-+ do_sio = 0;
-+ sb = au_pinned_parent(pin)->d_sb;
-+ if (!au_wkq_test()
-+ && (!au_sbi(sb)->si_plink_maint_pid
-+ || au_plink_maint(sb, AuLock_NOPLM))) {
-+ switch (mode & S_IFMT) {
-+ case S_IFREG:
-+ /* no condition about RLIMIT_FSIZE and the file size */
-+ do_sio = 1;
-+ break;
-+ case S_IFCHR:
-+ case S_IFBLK:
-+ do_sio = !capable(CAP_MKNOD);
-+ break;
-+ }
-+ if (!do_sio)
-+ do_sio = ((mode & (S_ISUID | S_ISGID))
-+ && !capable(CAP_FSETID));
-+ /* this workaround may be removed in the future */
-+ if (!do_sio) {
-+ h_dir = au_pinned_h_dir(pin);
-+ do_sio = h_dir->i_mode & S_ISVTX;
-+ }
-+ }
-+
-+ return do_sio;
-+}
-+
-+#if 0 /* reserved */
-+int au_sio_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent)
-+{
-+ int err, wkq_err;
-+ struct dentry *h_dentry;
-+
-+ h_dentry = au_h_dptr(cpg->dentry, cpg->bsrc);
-+ if (!au_cpup_sio_test(pin, d_inode(h_dentry)->i_mode))
-+ err = au_cpup_single(cpg, dst_parent);
-+ else {
-+ struct au_cpup_single_args args = {
-+ .errp = &err,
-+ .cpg = cpg,
-+ .dst_parent = dst_parent
-+ };
-+ wkq_err = au_wkq_wait(au_call_cpup_single, &args);
-+ if (unlikely(wkq_err))
-+ err = wkq_err;
-+ }
-+
-+ return err;
-+}
-+#endif
-+
-+/*
-+ * copyup the @dentry from the first active lower branch to @bdst,
-+ * using au_cpup_single().
-+ */
-+static int au_cpup_simple(struct au_cp_generic *cpg)
-+{
-+ int err;
-+ unsigned int flags_orig;
-+ struct dentry *dentry;
-+
-+ AuDebugOn(cpg->bsrc < 0);
-+
-+ dentry = cpg->dentry;
-+ DiMustWriteLock(dentry);
-+
-+ err = au_lkup_neg(dentry, cpg->bdst, /*wh*/1);
-+ if (!err) {
-+ flags_orig = cpg->flags;
-+ au_fset_cpup(cpg->flags, RENAME);
-+ err = au_cpup_single(cpg, NULL);
-+ cpg->flags = flags_orig;
-+ if (!err)
-+ return 0; /* success */
-+
-+ /* revert */
-+ au_set_h_dptr(dentry, cpg->bdst, NULL);
-+ au_set_dbstart(dentry, cpg->bsrc);
-+ }
-+
-+ return err;
-+}
-+
-+struct au_cpup_simple_args {
-+ int *errp;
-+ struct au_cp_generic *cpg;
-+};
-+
-+static void au_call_cpup_simple(void *args)
-+{
-+ struct au_cpup_simple_args *a = args;
-+
-+ au_pin_hdir_acquire_nest(a->cpg->pin);
-+ *a->errp = au_cpup_simple(a->cpg);
-+ au_pin_hdir_release(a->cpg->pin);
-+}
-+
-+static int au_do_sio_cpup_simple(struct au_cp_generic *cpg)
-+{
-+ int err, wkq_err;
-+ struct dentry *dentry, *parent;
-+ struct file *h_file;
-+ struct inode *h_dir;
-+
-+ dentry = cpg->dentry;
-+ h_file = NULL;
-+ if (au_ftest_cpup(cpg->flags, HOPEN)) {
-+ AuDebugOn(cpg->bsrc < 0);
-+ h_file = au_h_open_pre(dentry, cpg->bsrc, /*force_wr*/0);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out;
-+ }
-+
-+ parent = dget_parent(dentry);
-+ h_dir = au_h_iptr(d_inode(parent), cpg->bdst);
-+ if (!au_test_h_perm_sio(h_dir, MAY_EXEC | MAY_WRITE)
-+ && !au_cpup_sio_test(cpg->pin, d_inode(dentry)->i_mode))
-+ err = au_cpup_simple(cpg);
-+ else {
-+ struct au_cpup_simple_args args = {
-+ .errp = &err,
-+ .cpg = cpg
-+ };
-+ wkq_err = au_wkq_wait(au_call_cpup_simple, &args);
-+ if (unlikely(wkq_err))
-+ err = wkq_err;
-+ }
-+
-+ dput(parent);
-+ if (h_file)
-+ au_h_open_post(dentry, cpg->bsrc, h_file);
-+
-+out:
-+ return err;
-+}
-+
-+int au_sio_cpup_simple(struct au_cp_generic *cpg)
-+{
-+ aufs_bindex_t bsrc, bend;
-+ struct dentry *dentry, *h_dentry;
-+
-+ if (cpg->bsrc < 0) {
-+ dentry = cpg->dentry;
-+ bend = au_dbend(dentry);
-+ for (bsrc = cpg->bdst + 1; bsrc <= bend; bsrc++) {
-+ h_dentry = au_h_dptr(dentry, bsrc);
-+ if (h_dentry) {
-+ AuDebugOn(d_is_negative(h_dentry));
-+ break;
-+ }
-+ }
-+ AuDebugOn(bsrc > bend);
-+ cpg->bsrc = bsrc;
-+ }
-+ AuDebugOn(cpg->bsrc <= cpg->bdst);
-+ return au_do_sio_cpup_simple(cpg);
-+}
-+
-+int au_sio_cpdown_simple(struct au_cp_generic *cpg)
-+{
-+ AuDebugOn(cpg->bdst <= cpg->bsrc);
-+ return au_do_sio_cpup_simple(cpg);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * copyup the deleted file for writing.
-+ */
-+static int au_do_cpup_wh(struct au_cp_generic *cpg, struct dentry *wh_dentry,
-+ struct file *file)
-+{
-+ int err;
-+ unsigned int flags_orig;
-+ aufs_bindex_t bsrc_orig;
-+ struct dentry *h_d_dst, *h_d_start;
-+ struct au_dinfo *dinfo;
-+ struct au_hdentry *hdp;
-+
-+ dinfo = au_di(cpg->dentry);
-+ AuRwMustWriteLock(&dinfo->di_rwsem);
-+
-+ bsrc_orig = cpg->bsrc;
-+ cpg->bsrc = dinfo->di_bstart;
-+ hdp = dinfo->di_hdentry;
-+ h_d_dst = hdp[0 + cpg->bdst].hd_dentry;
-+ dinfo->di_bstart = cpg->bdst;
-+ hdp[0 + cpg->bdst].hd_dentry = wh_dentry;
-+ h_d_start = NULL;
-+ if (file) {
-+ h_d_start = hdp[0 + cpg->bsrc].hd_dentry;
-+ hdp[0 + cpg->bsrc].hd_dentry = au_hf_top(file)->f_path.dentry;
-+ }
-+ flags_orig = cpg->flags;
-+ cpg->flags = !AuCpup_DTIME;
-+ err = au_cpup_single(cpg, /*h_parent*/NULL);
-+ cpg->flags = flags_orig;
-+ if (file) {
-+ if (!err)
-+ err = au_reopen_nondir(file);
-+ hdp[0 + cpg->bsrc].hd_dentry = h_d_start;
-+ }
-+ hdp[0 + cpg->bdst].hd_dentry = h_d_dst;
-+ dinfo->di_bstart = cpg->bsrc;
-+ cpg->bsrc = bsrc_orig;
-+
-+ return err;
-+}
-+
-+static int au_cpup_wh(struct au_cp_generic *cpg, struct file *file)
-+{
-+ int err;
-+ aufs_bindex_t bdst;
-+ struct au_dtime dt;
-+ struct dentry *dentry, *parent, *h_parent, *wh_dentry;
-+ struct au_branch *br;
-+ struct path h_path;
-+
-+ dentry = cpg->dentry;
-+ bdst = cpg->bdst;
-+ br = au_sbr(dentry->d_sb, bdst);
-+ parent = dget_parent(dentry);
-+ h_parent = au_h_dptr(parent, bdst);
-+ wh_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
-+ err = PTR_ERR(wh_dentry);
-+ if (IS_ERR(wh_dentry))
-+ goto out;
-+
-+ h_path.dentry = h_parent;
-+ h_path.mnt = au_br_mnt(br);
-+ au_dtime_store(&dt, parent, &h_path);
-+ err = au_do_cpup_wh(cpg, wh_dentry, file);
-+ if (unlikely(err))
-+ goto out_wh;
-+
-+ dget(wh_dentry);
-+ h_path.dentry = wh_dentry;
-+ if (!d_is_dir(wh_dentry)) {
-+ /* no delegation since it is just created */
-+ err = vfsub_unlink(d_inode(h_parent), &h_path,
-+ /*delegated*/NULL, /*force*/0);
-+ } else
-+ err = vfsub_rmdir(d_inode(h_parent), &h_path);
-+ if (unlikely(err)) {
-+ AuIOErr("failed remove copied-up tmp file %pd(%d)\n",
-+ wh_dentry, err);
-+ err = -EIO;
-+ }
-+ au_dtime_revert(&dt);
-+ au_set_hi_wh(d_inode(dentry), bdst, wh_dentry);
-+
-+out_wh:
-+ dput(wh_dentry);
-+out:
-+ dput(parent);
-+ return err;
-+}
-+
-+struct au_cpup_wh_args {
-+ int *errp;
-+ struct au_cp_generic *cpg;
-+ struct file *file;
-+};
-+
-+static void au_call_cpup_wh(void *args)
-+{
-+ struct au_cpup_wh_args *a = args;
-+
-+ au_pin_hdir_acquire_nest(a->cpg->pin);
-+ *a->errp = au_cpup_wh(a->cpg, a->file);
-+ au_pin_hdir_release(a->cpg->pin);
-+}
-+
-+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file)
-+{
-+ int err, wkq_err;
-+ aufs_bindex_t bdst;
-+ struct dentry *dentry, *parent, *h_orph, *h_parent;
-+ struct inode *dir, *h_dir, *h_tmpdir;
-+ struct au_wbr *wbr;
-+ struct au_pin wh_pin, *pin_orig;
-+
-+ dentry = cpg->dentry;
-+ bdst = cpg->bdst;
-+ parent = dget_parent(dentry);
-+ dir = d_inode(parent);
-+ h_orph = NULL;
-+ h_parent = NULL;
-+ h_dir = au_igrab(au_h_iptr(dir, bdst));
-+ h_tmpdir = h_dir;
-+ pin_orig = NULL;
-+ if (!h_dir->i_nlink) {
-+ wbr = au_sbr(dentry->d_sb, bdst)->br_wbr;
-+ h_orph = wbr->wbr_orph;
-+
-+ h_parent = dget(au_h_dptr(parent, bdst));
-+ au_set_h_dptr(parent, bdst, dget(h_orph));
-+ h_tmpdir = d_inode(h_orph);
-+ au_set_h_iptr(dir, bdst, au_igrab(h_tmpdir), /*flags*/0);
-+
-+ mutex_lock_nested(&h_tmpdir->i_mutex, AuLsc_I_PARENT3);
-+ /* todo: au_h_open_pre()? */
-+
-+ pin_orig = cpg->pin;
-+ au_pin_init(&wh_pin, dentry, bdst, AuLsc_DI_PARENT,
-+ AuLsc_I_PARENT3, cpg->pin->udba, AuPin_DI_LOCKED);
-+ cpg->pin = &wh_pin;
-+ }
-+
-+ if (!au_test_h_perm_sio(h_tmpdir, MAY_EXEC | MAY_WRITE)
-+ && !au_cpup_sio_test(cpg->pin, d_inode(dentry)->i_mode))
-+ err = au_cpup_wh(cpg, file);
-+ else {
-+ struct au_cpup_wh_args args = {
-+ .errp = &err,
-+ .cpg = cpg,
-+ .file = file
-+ };
-+ wkq_err = au_wkq_wait(au_call_cpup_wh, &args);
-+ if (unlikely(wkq_err))
-+ err = wkq_err;
-+ }
-+
-+ if (h_orph) {
-+ mutex_unlock(&h_tmpdir->i_mutex);
-+ /* todo: au_h_open_post()? */
-+ au_set_h_iptr(dir, bdst, au_igrab(h_dir), /*flags*/0);
-+ au_set_h_dptr(parent, bdst, h_parent);
-+ AuDebugOn(!pin_orig);
-+ cpg->pin = pin_orig;
-+ }
-+ iput(h_dir);
-+ dput(parent);
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * generic routine for both of copy-up and copy-down.
-+ */
-+/* cf. revalidate function in file.c */
-+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
-+ int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
-+ struct au_pin *pin,
-+ struct dentry *h_parent, void *arg),
-+ void *arg)
-+{
-+ int err;
-+ struct au_pin pin;
-+ struct dentry *d, *parent, *h_parent, *real_parent, *h_dentry;
-+
-+ err = 0;
-+ parent = dget_parent(dentry);
-+ if (IS_ROOT(parent))
-+ goto out;
-+
-+ au_pin_init(&pin, dentry, bdst, AuLsc_DI_PARENT2, AuLsc_I_PARENT2,
-+ au_opt_udba(dentry->d_sb), AuPin_MNT_WRITE);
-+
-+ /* do not use au_dpage */
-+ real_parent = parent;
-+ while (1) {
-+ dput(parent);
-+ parent = dget_parent(dentry);
-+ h_parent = au_h_dptr(parent, bdst);
-+ if (h_parent)
-+ goto out; /* success */
-+
-+ /* find top dir which is necessary to cpup */
-+ do {
-+ d = parent;
-+ dput(parent);
-+ parent = dget_parent(d);
-+ di_read_lock_parent3(parent, !AuLock_IR);
-+ h_parent = au_h_dptr(parent, bdst);
-+ di_read_unlock(parent, !AuLock_IR);
-+ } while (!h_parent);
-+
-+ if (d != real_parent)
-+ di_write_lock_child3(d);
-+
-+ /* somebody else might create while we were sleeping */
-+ h_dentry = au_h_dptr(d, bdst);
-+ if (!h_dentry || d_is_negative(h_dentry)) {
-+ if (h_dentry)
-+ au_update_dbstart(d);
-+
-+ au_pin_set_dentry(&pin, d);
-+ err = au_do_pin(&pin);
-+ if (!err) {
-+ err = cp(d, bdst, &pin, h_parent, arg);
-+ au_unpin(&pin);
-+ }
-+ }
-+
-+ if (d != real_parent)
-+ di_write_unlock(d);
-+ if (unlikely(err))
-+ break;
-+ }
-+
-+out:
-+ dput(parent);
-+ return err;
-+}
-+
-+static int au_cpup_dir(struct dentry *dentry, aufs_bindex_t bdst,
-+ struct au_pin *pin,
-+ struct dentry *h_parent __maybe_unused,
-+ void *arg __maybe_unused)
-+{
-+ struct au_cp_generic cpg = {
-+ .dentry = dentry,
-+ .bdst = bdst,
-+ .bsrc = -1,
-+ .len = 0,
-+ .pin = pin,
-+ .flags = AuCpup_DTIME
-+ };
-+ return au_sio_cpup_simple(&cpg);
-+}
-+
-+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
-+{
-+ return au_cp_dirs(dentry, bdst, au_cpup_dir, NULL);
-+}
-+
-+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
-+{
-+ int err;
-+ struct dentry *parent;
-+ struct inode *dir;
-+
-+ parent = dget_parent(dentry);
-+ dir = d_inode(parent);
-+ err = 0;
-+ if (au_h_iptr(dir, bdst))
-+ goto out;
-+
-+ di_read_unlock(parent, AuLock_IR);
-+ di_write_lock_parent(parent);
-+ /* someone else might change our inode while we were sleeping */
-+ if (!au_h_iptr(dir, bdst))
-+ err = au_cpup_dirs(dentry, bdst);
-+ di_downgrade_lock(parent, AuLock_IR);
-+
-+out:
-+ dput(parent);
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/cpup.h linux-4.1.10/fs/aufs/cpup.h
---- linux-4.1.10.orig/fs/aufs/cpup.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/cpup.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,94 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * copy-up/down functions
-+ */
-+
-+#ifndef __AUFS_CPUP_H__
-+#define __AUFS_CPUP_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/path.h>
-+
-+struct inode;
-+struct file;
-+struct au_pin;
-+
-+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags);
-+void au_cpup_attr_timesizes(struct inode *inode);
-+void au_cpup_attr_nlink(struct inode *inode, int force);
-+void au_cpup_attr_changeable(struct inode *inode);
-+void au_cpup_igen(struct inode *inode, struct inode *h_inode);
-+void au_cpup_attr_all(struct inode *inode, int force);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct au_cp_generic {
-+ struct dentry *dentry;
-+ aufs_bindex_t bdst, bsrc;
-+ loff_t len;
-+ struct au_pin *pin;
-+ unsigned int flags;
-+};
-+
-+/* cpup flags */
-+#define AuCpup_DTIME 1 /* do dtime_store/revert */
-+#define AuCpup_KEEPLINO (1 << 1) /* do not clear the lower xino,
-+ for link(2) */
-+#define AuCpup_RENAME (1 << 2) /* rename after cpup */
-+#define AuCpup_HOPEN (1 << 3) /* call h_open_pre/post() in
-+ cpup */
-+#define AuCpup_OVERWRITE (1 << 4) /* allow overwriting the
-+ existing entry */
-+#define AuCpup_RWDST (1 << 5) /* force write target even if
-+ the branch is marked as RO */
-+
-+#define au_ftest_cpup(flags, name) ((flags) & AuCpup_##name)
-+#define au_fset_cpup(flags, name) \
-+ do { (flags) |= AuCpup_##name; } while (0)
-+#define au_fclr_cpup(flags, name) \
-+ do { (flags) &= ~AuCpup_##name; } while (0)
-+
-+int au_copy_file(struct file *dst, struct file *src, loff_t len);
-+int au_sio_cpup_simple(struct au_cp_generic *cpg);
-+int au_sio_cpdown_simple(struct au_cp_generic *cpg);
-+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file);
-+
-+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
-+ int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
-+ struct au_pin *pin,
-+ struct dentry *h_parent, void *arg),
-+ void *arg);
-+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
-+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* keep timestamps when copyup */
-+struct au_dtime {
-+ struct dentry *dt_dentry;
-+ struct path dt_h_path;
-+ struct timespec dt_atime, dt_mtime;
-+};
-+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
-+ struct path *h_path);
-+void au_dtime_revert(struct au_dtime *dt);
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_CPUP_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/dbgaufs.c linux-4.1.10/fs/aufs/dbgaufs.c
---- linux-4.1.10.orig/fs/aufs/dbgaufs.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/dbgaufs.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,432 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * debugfs interface
-+ */
-+
-+#include <linux/debugfs.h>
-+#include "aufs.h"
-+
-+#ifndef CONFIG_SYSFS
-+#error DEBUG_FS depends upon SYSFS
-+#endif
-+
-+static struct dentry *dbgaufs;
-+static const mode_t dbgaufs_mode = S_IRUSR | S_IRGRP | S_IROTH;
-+
-+/* 20 is max digits length of ulong 64 */
-+struct dbgaufs_arg {
-+ int n;
-+ char a[20 * 4];
-+};
-+
-+/*
-+ * common function for all XINO files
-+ */
-+static int dbgaufs_xi_release(struct inode *inode __maybe_unused,
-+ struct file *file)
-+{
-+ kfree(file->private_data);
-+ return 0;
-+}
-+
-+static int dbgaufs_xi_open(struct file *xf, struct file *file, int do_fcnt)
-+{
-+ int err;
-+ struct kstat st;
-+ struct dbgaufs_arg *p;
-+
-+ err = -ENOMEM;
-+ p = kmalloc(sizeof(*p), GFP_NOFS);
-+ if (unlikely(!p))
-+ goto out;
-+
-+ err = 0;
-+ p->n = 0;
-+ file->private_data = p;
-+ if (!xf)
-+ goto out;
-+
-+ err = vfs_getattr(&xf->f_path, &st);
-+ if (!err) {
-+ if (do_fcnt)
-+ p->n = snprintf
-+ (p->a, sizeof(p->a), "%ld, %llux%lu %lld\n",
-+ (long)file_count(xf), st.blocks, st.blksize,
-+ (long long)st.size);
-+ else
-+ p->n = snprintf(p->a, sizeof(p->a), "%llux%lu %lld\n",
-+ st.blocks, st.blksize,
-+ (long long)st.size);
-+ AuDebugOn(p->n >= sizeof(p->a));
-+ } else {
-+ p->n = snprintf(p->a, sizeof(p->a), "err %d\n", err);
-+ err = 0;
-+ }
-+
-+out:
-+ return err;
-+
-+}
-+
-+static ssize_t dbgaufs_xi_read(struct file *file, char __user *buf,
-+ size_t count, loff_t *ppos)
-+{
-+ struct dbgaufs_arg *p;
-+
-+ p = file->private_data;
-+ return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct dbgaufs_plink_arg {
-+ int n;
-+ char a[];
-+};
-+
-+static int dbgaufs_plink_release(struct inode *inode __maybe_unused,
-+ struct file *file)
-+{
-+ free_page((unsigned long)file->private_data);
-+ return 0;
-+}
-+
-+static int dbgaufs_plink_open(struct inode *inode, struct file *file)
-+{
-+ int err, i, limit;
-+ unsigned long n, sum;
-+ struct dbgaufs_plink_arg *p;
-+ struct au_sbinfo *sbinfo;
-+ struct super_block *sb;
-+ struct au_sphlhead *sphl;
-+
-+ err = -ENOMEM;
-+ p = (void *)get_zeroed_page(GFP_NOFS);
-+ if (unlikely(!p))
-+ goto out;
-+
-+ err = -EFBIG;
-+ sbinfo = inode->i_private;
-+ sb = sbinfo->si_sb;
-+ si_noflush_read_lock(sb);
-+ if (au_opt_test(au_mntflags(sb), PLINK)) {
-+ limit = PAGE_SIZE - sizeof(p->n);
-+
-+ /* the number of buckets */
-+ n = snprintf(p->a + p->n, limit, "%d\n", AuPlink_NHASH);
-+ p->n += n;
-+ limit -= n;
-+
-+ sum = 0;
-+ for (i = 0, sphl = sbinfo->si_plink;
-+ i < AuPlink_NHASH;
-+ i++, sphl++) {
-+ n = au_sphl_count(sphl);
-+ sum += n;
-+
-+ n = snprintf(p->a + p->n, limit, "%lu ", n);
-+ p->n += n;
-+ limit -= n;
-+ if (unlikely(limit <= 0))
-+ goto out_free;
-+ }
-+ p->a[p->n - 1] = '\n';
-+
-+ /* the sum of plinks */
-+ n = snprintf(p->a + p->n, limit, "%lu\n", sum);
-+ p->n += n;
-+ limit -= n;
-+ if (unlikely(limit <= 0))
-+ goto out_free;
-+ } else {
-+#define str "1\n0\n0\n"
-+ p->n = sizeof(str) - 1;
-+ strcpy(p->a, str);
-+#undef str
-+ }
-+ si_read_unlock(sb);
-+
-+ err = 0;
-+ file->private_data = p;
-+ goto out; /* success */
-+
-+out_free:
-+ free_page((unsigned long)p);
-+out:
-+ return err;
-+}
-+
-+static ssize_t dbgaufs_plink_read(struct file *file, char __user *buf,
-+ size_t count, loff_t *ppos)
-+{
-+ struct dbgaufs_plink_arg *p;
-+
-+ p = file->private_data;
-+ return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
-+}
-+
-+static const struct file_operations dbgaufs_plink_fop = {
-+ .owner = THIS_MODULE,
-+ .open = dbgaufs_plink_open,
-+ .release = dbgaufs_plink_release,
-+ .read = dbgaufs_plink_read
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int dbgaufs_xib_open(struct inode *inode, struct file *file)
-+{
-+ int err;
-+ struct au_sbinfo *sbinfo;
-+ struct super_block *sb;
-+
-+ sbinfo = inode->i_private;
-+ sb = sbinfo->si_sb;
-+ si_noflush_read_lock(sb);
-+ err = dbgaufs_xi_open(sbinfo->si_xib, file, /*do_fcnt*/0);
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+static const struct file_operations dbgaufs_xib_fop = {
-+ .owner = THIS_MODULE,
-+ .open = dbgaufs_xib_open,
-+ .release = dbgaufs_xi_release,
-+ .read = dbgaufs_xi_read
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#define DbgaufsXi_PREFIX "xi"
-+
-+static int dbgaufs_xino_open(struct inode *inode, struct file *file)
-+{
-+ int err;
-+ long l;
-+ struct au_sbinfo *sbinfo;
-+ struct super_block *sb;
-+ struct file *xf;
-+ struct qstr *name;
-+
-+ err = -ENOENT;
-+ xf = NULL;
-+ name = &file->f_path.dentry->d_name;
-+ if (unlikely(name->len < sizeof(DbgaufsXi_PREFIX)
-+ || memcmp(name->name, DbgaufsXi_PREFIX,
-+ sizeof(DbgaufsXi_PREFIX) - 1)))
-+ goto out;
-+ err = kstrtol(name->name + sizeof(DbgaufsXi_PREFIX) - 1, 10, &l);
-+ if (unlikely(err))
-+ goto out;
-+
-+ sbinfo = inode->i_private;
-+ sb = sbinfo->si_sb;
-+ si_noflush_read_lock(sb);
-+ if (l <= au_sbend(sb)) {
-+ xf = au_sbr(sb, (aufs_bindex_t)l)->br_xino.xi_file;
-+ err = dbgaufs_xi_open(xf, file, /*do_fcnt*/1);
-+ } else
-+ err = -ENOENT;
-+ si_read_unlock(sb);
-+
-+out:
-+ return err;
-+}
-+
-+static const struct file_operations dbgaufs_xino_fop = {
-+ .owner = THIS_MODULE,
-+ .open = dbgaufs_xino_open,
-+ .release = dbgaufs_xi_release,
-+ .read = dbgaufs_xi_read
-+};
-+
-+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ aufs_bindex_t bend;
-+ struct au_branch *br;
-+ struct au_xino_file *xi;
-+
-+ if (!au_sbi(sb)->si_dbgaufs)
-+ return;
-+
-+ bend = au_sbend(sb);
-+ for (; bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ xi = &br->br_xino;
-+ debugfs_remove(xi->xi_dbgaufs);
-+ xi->xi_dbgaufs = NULL;
-+ }
-+}
-+
-+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ struct au_sbinfo *sbinfo;
-+ struct dentry *parent;
-+ struct au_branch *br;
-+ struct au_xino_file *xi;
-+ aufs_bindex_t bend;
-+ char name[sizeof(DbgaufsXi_PREFIX) + 5]; /* "xi" bindex NULL */
-+
-+ sbinfo = au_sbi(sb);
-+ parent = sbinfo->si_dbgaufs;
-+ if (!parent)
-+ return;
-+
-+ bend = au_sbend(sb);
-+ for (; bindex <= bend; bindex++) {
-+ snprintf(name, sizeof(name), DbgaufsXi_PREFIX "%d", bindex);
-+ br = au_sbr(sb, bindex);
-+ xi = &br->br_xino;
-+ AuDebugOn(xi->xi_dbgaufs);
-+ xi->xi_dbgaufs = debugfs_create_file(name, dbgaufs_mode, parent,
-+ sbinfo, &dbgaufs_xino_fop);
-+ /* ignore an error */
-+ if (unlikely(!xi->xi_dbgaufs))
-+ AuWarn1("failed %s under debugfs\n", name);
-+ }
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_AUFS_EXPORT
-+static int dbgaufs_xigen_open(struct inode *inode, struct file *file)
-+{
-+ int err;
-+ struct au_sbinfo *sbinfo;
-+ struct super_block *sb;
-+
-+ sbinfo = inode->i_private;
-+ sb = sbinfo->si_sb;
-+ si_noflush_read_lock(sb);
-+ err = dbgaufs_xi_open(sbinfo->si_xigen, file, /*do_fcnt*/0);
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+static const struct file_operations dbgaufs_xigen_fop = {
-+ .owner = THIS_MODULE,
-+ .open = dbgaufs_xigen_open,
-+ .release = dbgaufs_xi_release,
-+ .read = dbgaufs_xi_read
-+};
-+
-+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
-+{
-+ int err;
-+
-+ /*
-+ * This function is a dynamic '__init' function actually,
-+ * so the tiny check for si_rwsem is unnecessary.
-+ */
-+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
-+
-+ err = -EIO;
-+ sbinfo->si_dbgaufs_xigen = debugfs_create_file
-+ ("xigen", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
-+ &dbgaufs_xigen_fop);
-+ if (sbinfo->si_dbgaufs_xigen)
-+ err = 0;
-+
-+ return err;
-+}
-+#else
-+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
-+{
-+ return 0;
-+}
-+#endif /* CONFIG_AUFS_EXPORT */
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void dbgaufs_si_fin(struct au_sbinfo *sbinfo)
-+{
-+ /*
-+ * This function is a dynamic '__fin' function actually,
-+ * so the tiny check for si_rwsem is unnecessary.
-+ */
-+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
-+
-+ debugfs_remove_recursive(sbinfo->si_dbgaufs);
-+ sbinfo->si_dbgaufs = NULL;
-+ kobject_put(&sbinfo->si_kobj);
-+}
-+
-+int dbgaufs_si_init(struct au_sbinfo *sbinfo)
-+{
-+ int err;
-+ char name[SysaufsSiNameLen];
-+
-+ /*
-+ * This function is a dynamic '__init' function actually,
-+ * so the tiny check for si_rwsem is unnecessary.
-+ */
-+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
-+
-+ err = -ENOENT;
-+ if (!dbgaufs) {
-+ AuErr1("/debug/aufs is uninitialized\n");
-+ goto out;
-+ }
-+
-+ err = -EIO;
-+ sysaufs_name(sbinfo, name);
-+ sbinfo->si_dbgaufs = debugfs_create_dir(name, dbgaufs);
-+ if (unlikely(!sbinfo->si_dbgaufs))
-+ goto out;
-+ kobject_get(&sbinfo->si_kobj);
-+
-+ sbinfo->si_dbgaufs_xib = debugfs_create_file
-+ ("xib", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
-+ &dbgaufs_xib_fop);
-+ if (unlikely(!sbinfo->si_dbgaufs_xib))
-+ goto out_dir;
-+
-+ sbinfo->si_dbgaufs_plink = debugfs_create_file
-+ ("plink", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
-+ &dbgaufs_plink_fop);
-+ if (unlikely(!sbinfo->si_dbgaufs_plink))
-+ goto out_dir;
-+
-+ err = dbgaufs_xigen_init(sbinfo);
-+ if (!err)
-+ goto out; /* success */
-+
-+out_dir:
-+ dbgaufs_si_fin(sbinfo);
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void dbgaufs_fin(void)
-+{
-+ debugfs_remove(dbgaufs);
-+}
-+
-+int __init dbgaufs_init(void)
-+{
-+ int err;
-+
-+ err = -EIO;
-+ dbgaufs = debugfs_create_dir(AUFS_NAME, NULL);
-+ if (dbgaufs)
-+ err = 0;
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/dbgaufs.h linux-4.1.10/fs/aufs/dbgaufs.h
---- linux-4.1.10.orig/fs/aufs/dbgaufs.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/dbgaufs.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,48 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * debugfs interface
-+ */
-+
-+#ifndef __DBGAUFS_H__
-+#define __DBGAUFS_H__
-+
-+#ifdef __KERNEL__
-+
-+struct super_block;
-+struct au_sbinfo;
-+
-+#ifdef CONFIG_DEBUG_FS
-+/* dbgaufs.c */
-+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
-+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex);
-+void dbgaufs_si_fin(struct au_sbinfo *sbinfo);
-+int dbgaufs_si_init(struct au_sbinfo *sbinfo);
-+void dbgaufs_fin(void);
-+int __init dbgaufs_init(void);
-+#else
-+AuStubVoid(dbgaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex)
-+AuStubVoid(dbgaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex)
-+AuStubVoid(dbgaufs_si_fin, struct au_sbinfo *sbinfo)
-+AuStubInt0(dbgaufs_si_init, struct au_sbinfo *sbinfo)
-+AuStubVoid(dbgaufs_fin, void)
-+AuStubInt0(__init dbgaufs_init, void)
-+#endif /* CONFIG_DEBUG_FS */
-+
-+#endif /* __KERNEL__ */
-+#endif /* __DBGAUFS_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/dcsub.c linux-4.1.10/fs/aufs/dcsub.c
---- linux-4.1.10.orig/fs/aufs/dcsub.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/dcsub.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,224 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * sub-routines for dentry cache
-+ */
-+
-+#include "aufs.h"
-+
-+static void au_dpage_free(struct au_dpage *dpage)
-+{
-+ int i;
-+ struct dentry **p;
-+
-+ p = dpage->dentries;
-+ for (i = 0; i < dpage->ndentry; i++)
-+ dput(*p++);
-+ free_page((unsigned long)dpage->dentries);
-+}
-+
-+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp)
-+{
-+ int err;
-+ void *p;
-+
-+ err = -ENOMEM;
-+ dpages->dpages = kmalloc(sizeof(*dpages->dpages), gfp);
-+ if (unlikely(!dpages->dpages))
-+ goto out;
-+
-+ p = (void *)__get_free_page(gfp);
-+ if (unlikely(!p))
-+ goto out_dpages;
-+
-+ dpages->dpages[0].ndentry = 0;
-+ dpages->dpages[0].dentries = p;
-+ dpages->ndpage = 1;
-+ return 0; /* success */
-+
-+out_dpages:
-+ kfree(dpages->dpages);
-+out:
-+ return err;
-+}
-+
-+void au_dpages_free(struct au_dcsub_pages *dpages)
-+{
-+ int i;
-+ struct au_dpage *p;
-+
-+ p = dpages->dpages;
-+ for (i = 0; i < dpages->ndpage; i++)
-+ au_dpage_free(p++);
-+ kfree(dpages->dpages);
-+}
-+
-+static int au_dpages_append(struct au_dcsub_pages *dpages,
-+ struct dentry *dentry, gfp_t gfp)
-+{
-+ int err, sz;
-+ struct au_dpage *dpage;
-+ void *p;
-+
-+ dpage = dpages->dpages + dpages->ndpage - 1;
-+ sz = PAGE_SIZE / sizeof(dentry);
-+ if (unlikely(dpage->ndentry >= sz)) {
-+ AuLabel(new dpage);
-+ err = -ENOMEM;
-+ sz = dpages->ndpage * sizeof(*dpages->dpages);
-+ p = au_kzrealloc(dpages->dpages, sz,
-+ sz + sizeof(*dpages->dpages), gfp);
-+ if (unlikely(!p))
-+ goto out;
-+
-+ dpages->dpages = p;
-+ dpage = dpages->dpages + dpages->ndpage;
-+ p = (void *)__get_free_page(gfp);
-+ if (unlikely(!p))
-+ goto out;
-+
-+ dpage->ndentry = 0;
-+ dpage->dentries = p;
-+ dpages->ndpage++;
-+ }
-+
-+ AuDebugOn(au_dcount(dentry) <= 0);
-+ dpage->dentries[dpage->ndentry++] = dget_dlock(dentry);
-+ return 0; /* success */
-+
-+out:
-+ return err;
-+}
-+
-+/* todo: BAD approach */
-+/* copied from linux/fs/dcache.c */
-+enum d_walk_ret {
-+ D_WALK_CONTINUE,
-+ D_WALK_QUIT,
-+ D_WALK_NORETRY,
-+ D_WALK_SKIP,
-+};
-+
-+extern void d_walk(struct dentry *parent, void *data,
-+ enum d_walk_ret (*enter)(void *, struct dentry *),
-+ void (*finish)(void *));
-+
-+struct ac_dpages_arg {
-+ int err;
-+ struct au_dcsub_pages *dpages;
-+ struct super_block *sb;
-+ au_dpages_test test;
-+ void *arg;
-+};
-+
-+static enum d_walk_ret au_call_dpages_append(void *_arg, struct dentry *dentry)
-+{
-+ enum d_walk_ret ret;
-+ struct ac_dpages_arg *arg = _arg;
-+
-+ ret = D_WALK_CONTINUE;
-+ if (dentry->d_sb == arg->sb
-+ && !IS_ROOT(dentry)
-+ && au_dcount(dentry) > 0
-+ && au_di(dentry)
-+ && (!arg->test || arg->test(dentry, arg->arg))) {
-+ arg->err = au_dpages_append(arg->dpages, dentry, GFP_ATOMIC);
-+ if (unlikely(arg->err))
-+ ret = D_WALK_QUIT;
-+ }
-+
-+ return ret;
-+}
-+
-+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
-+ au_dpages_test test, void *arg)
-+{
-+ struct ac_dpages_arg args = {
-+ .err = 0,
-+ .dpages = dpages,
-+ .sb = root->d_sb,
-+ .test = test,
-+ .arg = arg
-+ };
-+
-+ d_walk(root, &args, au_call_dpages_append, NULL);
-+
-+ return args.err;
-+}
-+
-+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
-+ int do_include, au_dpages_test test, void *arg)
-+{
-+ int err;
-+
-+ err = 0;
-+ write_seqlock(&rename_lock);
-+ spin_lock(&dentry->d_lock);
-+ if (do_include
-+ && au_dcount(dentry) > 0
-+ && (!test || test(dentry, arg)))
-+ err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
-+ spin_unlock(&dentry->d_lock);
-+ if (unlikely(err))
-+ goto out;
-+
-+ /*
-+ * RCU for vfsmount is unnecessary since this is a traverse in a single
-+ * mount
-+ */
-+ while (!IS_ROOT(dentry)) {
-+ dentry = dentry->d_parent; /* rename_lock is locked */
-+ spin_lock(&dentry->d_lock);
-+ if (au_dcount(dentry) > 0
-+ && (!test || test(dentry, arg)))
-+ err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
-+ spin_unlock(&dentry->d_lock);
-+ if (unlikely(err))
-+ break;
-+ }
-+
-+out:
-+ write_sequnlock(&rename_lock);
-+ return err;
-+}
-+
-+static inline int au_dcsub_dpages_aufs(struct dentry *dentry, void *arg)
-+{
-+ return au_di(dentry) && dentry->d_sb == arg;
-+}
-+
-+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages,
-+ struct dentry *dentry, int do_include)
-+{
-+ return au_dcsub_pages_rev(dpages, dentry, do_include,
-+ au_dcsub_dpages_aufs, dentry->d_sb);
-+}
-+
-+int au_test_subdir(struct dentry *d1, struct dentry *d2)
-+{
-+ struct path path[2] = {
-+ {
-+ .dentry = d1
-+ },
-+ {
-+ .dentry = d2
-+ }
-+ };
-+
-+ return path_is_under(path + 0, path + 1);
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/dcsub.h linux-4.1.10/fs/aufs/dcsub.h
---- linux-4.1.10.orig/fs/aufs/dcsub.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/dcsub.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,136 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * sub-routines for dentry cache
-+ */
-+
-+#ifndef __AUFS_DCSUB_H__
-+#define __AUFS_DCSUB_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/dcache.h>
-+#include <linux/fs.h>
-+
-+struct au_dpage {
-+ int ndentry;
-+ struct dentry **dentries;
-+};
-+
-+struct au_dcsub_pages {
-+ int ndpage;
-+ struct au_dpage *dpages;
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* dcsub.c */
-+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp);
-+void au_dpages_free(struct au_dcsub_pages *dpages);
-+typedef int (*au_dpages_test)(struct dentry *dentry, void *arg);
-+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
-+ au_dpages_test test, void *arg);
-+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
-+ int do_include, au_dpages_test test, void *arg);
-+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages,
-+ struct dentry *dentry, int do_include);
-+int au_test_subdir(struct dentry *d1, struct dentry *d2);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * todo: in linux-3.13, several similar (but faster) helpers are added to
-+ * include/linux/dcache.h. Try them (in the future).
-+ */
-+
-+static inline int au_d_hashed_positive(struct dentry *d)
-+{
-+ int err;
-+ struct inode *inode = d_inode(d);
-+
-+ err = 0;
-+ if (unlikely(d_unhashed(d)
-+ || d_is_negative(d)
-+ || !inode->i_nlink))
-+ err = -ENOENT;
-+ return err;
-+}
-+
-+static inline int au_d_linkable(struct dentry *d)
-+{
-+ int err;
-+ struct inode *inode = d_inode(d);
-+
-+ err = au_d_hashed_positive(d);
-+ if (err
-+ && d_is_positive(d)
-+ && (inode->i_state & I_LINKABLE))
-+ err = 0;
-+ return err;
-+}
-+
-+static inline int au_d_alive(struct dentry *d)
-+{
-+ int err;
-+ struct inode *inode;
-+
-+ err = 0;
-+ if (!IS_ROOT(d))
-+ err = au_d_hashed_positive(d);
-+ else {
-+ inode = d_inode(d);
-+ if (unlikely(d_unlinked(d)
-+ || d_is_negative(d)
-+ || !inode->i_nlink))
-+ err = -ENOENT;
-+ }
-+ return err;
-+}
-+
-+static inline int au_alive_dir(struct dentry *d)
-+{
-+ int err;
-+
-+ err = au_d_alive(d);
-+ if (unlikely(err || IS_DEADDIR(d_inode(d))))
-+ err = -ENOENT;
-+ return err;
-+}
-+
-+static inline int au_qstreq(struct qstr *a, struct qstr *b)
-+{
-+ return a->len == b->len
-+ && !memcmp(a->name, b->name, a->len);
-+}
-+
-+/*
-+ * by the commit
-+ * 360f547 2015-01-25 dcache: let the dentry count go down to zero without
-+ * taking d_lock
-+ * the type of d_lockref.count became int, but the inlined function d_count()
-+ * still returns unsigned int.
-+ * I don't know why. Maybe it is for every d_count() users?
-+ * Anyway au_dcount() lives on.
-+ */
-+static inline int au_dcount(struct dentry *d)
-+{
-+ return (int)d_count(d);
-+}
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_DCSUB_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/debug.c linux-4.1.10/fs/aufs/debug.c
---- linux-4.1.10.orig/fs/aufs/debug.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/debug.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,440 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * debug print functions
-+ */
-+
-+#include "aufs.h"
-+
-+/* Returns 0, or -errno. arg is in kp->arg. */
-+static int param_atomic_t_set(const char *val, const struct kernel_param *kp)
-+{
-+ int err, n;
-+
-+ err = kstrtoint(val, 0, &n);
-+ if (!err) {
-+ if (n > 0)
-+ au_debug_on();
-+ else
-+ au_debug_off();
-+ }
-+ return err;
-+}
-+
-+/* Returns length written or -errno. Buffer is 4k (ie. be short!) */
-+static int param_atomic_t_get(char *buffer, const struct kernel_param *kp)
-+{
-+ atomic_t *a;
-+
-+ a = kp->arg;
-+ return sprintf(buffer, "%d", atomic_read(a));
-+}
-+
-+static struct kernel_param_ops param_ops_atomic_t = {
-+ .set = param_atomic_t_set,
-+ .get = param_atomic_t_get
-+ /* void (*free)(void *arg) */
-+};
-+
-+atomic_t aufs_debug = ATOMIC_INIT(0);
-+MODULE_PARM_DESC(debug, "debug print");
-+module_param_named(debug, aufs_debug, atomic_t, S_IRUGO | S_IWUSR | S_IWGRP);
-+
-+DEFINE_MUTEX(au_dbg_mtx); /* just to serialize the dbg msgs */
-+char *au_plevel = KERN_DEBUG;
-+#define dpri(fmt, ...) do { \
-+ if ((au_plevel \
-+ && strcmp(au_plevel, KERN_DEBUG)) \
-+ || au_debug_test()) \
-+ printk("%s" fmt, au_plevel, ##__VA_ARGS__); \
-+} while (0)
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void au_dpri_whlist(struct au_nhash *whlist)
-+{
-+ unsigned long ul, n;
-+ struct hlist_head *head;
-+ struct au_vdir_wh *pos;
-+
-+ n = whlist->nh_num;
-+ head = whlist->nh_head;
-+ for (ul = 0; ul < n; ul++) {
-+ hlist_for_each_entry(pos, head, wh_hash)
-+ dpri("b%d, %.*s, %d\n",
-+ pos->wh_bindex,
-+ pos->wh_str.len, pos->wh_str.name,
-+ pos->wh_str.len);
-+ head++;
-+ }
-+}
-+
-+void au_dpri_vdir(struct au_vdir *vdir)
-+{
-+ unsigned long ul;
-+ union au_vdir_deblk_p p;
-+ unsigned char *o;
-+
-+ if (!vdir || IS_ERR(vdir)) {
-+ dpri("err %ld\n", PTR_ERR(vdir));
-+ return;
-+ }
-+
-+ dpri("deblk %u, nblk %lu, deblk %p, last{%lu, %p}, ver %lu\n",
-+ vdir->vd_deblk_sz, vdir->vd_nblk, vdir->vd_deblk,
-+ vdir->vd_last.ul, vdir->vd_last.p.deblk, vdir->vd_version);
-+ for (ul = 0; ul < vdir->vd_nblk; ul++) {
-+ p.deblk = vdir->vd_deblk[ul];
-+ o = p.deblk;
-+ dpri("[%lu]: %p\n", ul, o);
-+ }
-+}
-+
-+static int do_pri_inode(aufs_bindex_t bindex, struct inode *inode, int hn,
-+ struct dentry *wh)
-+{
-+ char *n = NULL;
-+ int l = 0;
-+
-+ if (!inode || IS_ERR(inode)) {
-+ dpri("i%d: err %ld\n", bindex, PTR_ERR(inode));
-+ return -1;
-+ }
-+
-+ /* the type of i_blocks depends upon CONFIG_LBDAF */
-+ BUILD_BUG_ON(sizeof(inode->i_blocks) != sizeof(unsigned long)
-+ && sizeof(inode->i_blocks) != sizeof(u64));
-+ if (wh) {
-+ n = (void *)wh->d_name.name;
-+ l = wh->d_name.len;
-+ }
-+
-+ dpri("i%d: %p, i%lu, %s, cnt %d, nl %u, 0%o, sz %llu, blk %llu,"
-+ " hn %d, ct %lld, np %lu, st 0x%lx, f 0x%x, v %llu, g %x%s%.*s\n",
-+ bindex, inode,
-+ inode->i_ino, inode->i_sb ? au_sbtype(inode->i_sb) : "??",
-+ atomic_read(&inode->i_count), inode->i_nlink, inode->i_mode,
-+ i_size_read(inode), (unsigned long long)inode->i_blocks,
-+ hn, (long long)timespec_to_ns(&inode->i_ctime) & 0x0ffff,
-+ inode->i_mapping ? inode->i_mapping->nrpages : 0,
-+ inode->i_state, inode->i_flags, inode->i_version,
-+ inode->i_generation,
-+ l ? ", wh " : "", l, n);
-+ return 0;
-+}
-+
-+void au_dpri_inode(struct inode *inode)
-+{
-+ struct au_iinfo *iinfo;
-+ aufs_bindex_t bindex;
-+ int err, hn;
-+
-+ err = do_pri_inode(-1, inode, -1, NULL);
-+ if (err || !au_test_aufs(inode->i_sb))
-+ return;
-+
-+ iinfo = au_ii(inode);
-+ if (!iinfo)
-+ return;
-+ dpri("i-1: bstart %d, bend %d, gen %d\n",
-+ iinfo->ii_bstart, iinfo->ii_bend, au_iigen(inode, NULL));
-+ if (iinfo->ii_bstart < 0)
-+ return;
-+ hn = 0;
-+ for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; bindex++) {
-+ hn = !!au_hn(iinfo->ii_hinode + bindex);
-+ do_pri_inode(bindex, iinfo->ii_hinode[0 + bindex].hi_inode, hn,
-+ iinfo->ii_hinode[0 + bindex].hi_whdentry);
-+ }
-+}
-+
-+void au_dpri_dalias(struct inode *inode)
-+{
-+ struct dentry *d;
-+
-+ spin_lock(&inode->i_lock);
-+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias)
-+ au_dpri_dentry(d);
-+ spin_unlock(&inode->i_lock);
-+}
-+
-+static int do_pri_dentry(aufs_bindex_t bindex, struct dentry *dentry)
-+{
-+ struct dentry *wh = NULL;
-+ int hn;
-+ struct au_iinfo *iinfo;
-+
-+ if (!dentry || IS_ERR(dentry)) {
-+ dpri("d%d: err %ld\n", bindex, PTR_ERR(dentry));
-+ return -1;
-+ }
-+ /* do not call dget_parent() here */
-+ /* note: access d_xxx without d_lock */
-+ dpri("d%d: %p, %pd2?, %s, cnt %d, flags 0x%x, %shashed\n",
-+ bindex, dentry, dentry,
-+ dentry->d_sb ? au_sbtype(dentry->d_sb) : "??",
-+ au_dcount(dentry), dentry->d_flags,
-+ d_unhashed(dentry) ? "un" : "");
-+ hn = -1;
-+ if (bindex >= 0
-+ && d_is_positive(dentry)
-+ && au_test_aufs(dentry->d_sb)) {
-+ iinfo = au_ii(d_inode(dentry));
-+ if (iinfo) {
-+ hn = !!au_hn(iinfo->ii_hinode + bindex);
-+ wh = iinfo->ii_hinode[0 + bindex].hi_whdentry;
-+ }
-+ }
-+ do_pri_inode(bindex, d_inode(dentry), hn, wh);
-+ return 0;
-+}
-+
-+void au_dpri_dentry(struct dentry *dentry)
-+{
-+ struct au_dinfo *dinfo;
-+ aufs_bindex_t bindex;
-+ int err;
-+ struct au_hdentry *hdp;
-+
-+ err = do_pri_dentry(-1, dentry);
-+ if (err || !au_test_aufs(dentry->d_sb))
-+ return;
-+
-+ dinfo = au_di(dentry);
-+ if (!dinfo)
-+ return;
-+ dpri("d-1: bstart %d, bend %d, bwh %d, bdiropq %d, gen %d, tmp %d\n",
-+ dinfo->di_bstart, dinfo->di_bend,
-+ dinfo->di_bwh, dinfo->di_bdiropq, au_digen(dentry),
-+ dinfo->di_tmpfile);
-+ if (dinfo->di_bstart < 0)
-+ return;
-+ hdp = dinfo->di_hdentry;
-+ for (bindex = dinfo->di_bstart; bindex <= dinfo->di_bend; bindex++)
-+ do_pri_dentry(bindex, hdp[0 + bindex].hd_dentry);
-+}
-+
-+static int do_pri_file(aufs_bindex_t bindex, struct file *file)
-+{
-+ char a[32];
-+
-+ if (!file || IS_ERR(file)) {
-+ dpri("f%d: err %ld\n", bindex, PTR_ERR(file));
-+ return -1;
-+ }
-+ a[0] = 0;
-+ if (bindex < 0
-+ && !IS_ERR_OR_NULL(file->f_path.dentry)
-+ && au_test_aufs(file->f_path.dentry->d_sb)
-+ && au_fi(file))
-+ snprintf(a, sizeof(a), ", gen %d, mmapped %d",
-+ au_figen(file), atomic_read(&au_fi(file)->fi_mmapped));
-+ dpri("f%d: mode 0x%x, flags 0%o, cnt %ld, v %llu, pos %llu%s\n",
-+ bindex, file->f_mode, file->f_flags, (long)file_count(file),
-+ file->f_version, file->f_pos, a);
-+ if (!IS_ERR_OR_NULL(file->f_path.dentry))
-+ do_pri_dentry(bindex, file->f_path.dentry);
-+ return 0;
-+}
-+
-+void au_dpri_file(struct file *file)
-+{
-+ struct au_finfo *finfo;
-+ struct au_fidir *fidir;
-+ struct au_hfile *hfile;
-+ aufs_bindex_t bindex;
-+ int err;
-+
-+ err = do_pri_file(-1, file);
-+ if (err
-+ || IS_ERR_OR_NULL(file->f_path.dentry)
-+ || !au_test_aufs(file->f_path.dentry->d_sb))
-+ return;
-+
-+ finfo = au_fi(file);
-+ if (!finfo)
-+ return;
-+ if (finfo->fi_btop < 0)
-+ return;
-+ fidir = finfo->fi_hdir;
-+ if (!fidir)
-+ do_pri_file(finfo->fi_btop, finfo->fi_htop.hf_file);
-+ else
-+ for (bindex = finfo->fi_btop;
-+ bindex >= 0 && bindex <= fidir->fd_bbot;
-+ bindex++) {
-+ hfile = fidir->fd_hfile + bindex;
-+ do_pri_file(bindex, hfile ? hfile->hf_file : NULL);
-+ }
-+}
-+
-+static int do_pri_br(aufs_bindex_t bindex, struct au_branch *br)
-+{
-+ struct vfsmount *mnt;
-+ struct super_block *sb;
-+
-+ if (!br || IS_ERR(br))
-+ goto out;
-+ mnt = au_br_mnt(br);
-+ if (!mnt || IS_ERR(mnt))
-+ goto out;
-+ sb = mnt->mnt_sb;
-+ if (!sb || IS_ERR(sb))
-+ goto out;
-+
-+ dpri("s%d: {perm 0x%x, id %d, cnt %d, wbr %p}, "
-+ "%s, dev 0x%02x%02x, flags 0x%lx, cnt %d, active %d, "
-+ "xino %d\n",
-+ bindex, br->br_perm, br->br_id, atomic_read(&br->br_count),
-+ br->br_wbr, au_sbtype(sb), MAJOR(sb->s_dev), MINOR(sb->s_dev),
-+ sb->s_flags, sb->s_count,
-+ atomic_read(&sb->s_active), !!br->br_xino.xi_file);
-+ return 0;
-+
-+out:
-+ dpri("s%d: err %ld\n", bindex, PTR_ERR(br));
-+ return -1;
-+}
-+
-+void au_dpri_sb(struct super_block *sb)
-+{
-+ struct au_sbinfo *sbinfo;
-+ aufs_bindex_t bindex;
-+ int err;
-+ /* to reuduce stack size */
-+ struct {
-+ struct vfsmount mnt;
-+ struct au_branch fake;
-+ } *a;
-+
-+ /* this function can be called from magic sysrq */
-+ a = kzalloc(sizeof(*a), GFP_ATOMIC);
-+ if (unlikely(!a)) {
-+ dpri("no memory\n");
-+ return;
-+ }
-+
-+ a->mnt.mnt_sb = sb;
-+ a->fake.br_perm = 0;
-+ a->fake.br_path.mnt = &a->mnt;
-+ a->fake.br_xino.xi_file = NULL;
-+ atomic_set(&a->fake.br_count, 0);
-+ smp_mb(); /* atomic_set */
-+ err = do_pri_br(-1, &a->fake);
-+ kfree(a);
-+ dpri("dev 0x%x\n", sb->s_dev);
-+ if (err || !au_test_aufs(sb))
-+ return;
-+
-+ sbinfo = au_sbi(sb);
-+ if (!sbinfo)
-+ return;
-+ dpri("nw %d, gen %u, kobj %d\n",
-+ atomic_read(&sbinfo->si_nowait.nw_len), sbinfo->si_generation,
-+ atomic_read(&sbinfo->si_kobj.kref.refcount));
-+ for (bindex = 0; bindex <= sbinfo->si_bend; bindex++)
-+ do_pri_br(bindex, sbinfo->si_branch[0 + bindex]);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line)
-+{
-+ struct inode *h_inode, *inode = d_inode(dentry);
-+ struct dentry *h_dentry;
-+ aufs_bindex_t bindex, bend, bi;
-+
-+ if (!inode /* || au_di(dentry)->di_lsc == AuLsc_DI_TMP */)
-+ return;
-+
-+ bend = au_dbend(dentry);
-+ bi = au_ibend(inode);
-+ if (bi < bend)
-+ bend = bi;
-+ bindex = au_dbstart(dentry);
-+ bi = au_ibstart(inode);
-+ if (bi > bindex)
-+ bindex = bi;
-+
-+ for (; bindex <= bend; bindex++) {
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (!h_dentry)
-+ continue;
-+ h_inode = au_h_iptr(inode, bindex);
-+ if (unlikely(h_inode != d_inode(h_dentry))) {
-+ au_debug_on();
-+ AuDbg("b%d, %s:%d\n", bindex, func, line);
-+ AuDbgDentry(dentry);
-+ AuDbgInode(inode);
-+ au_debug_off();
-+ BUG();
-+ }
-+ }
-+}
-+
-+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen)
-+{
-+ int err, i, j;
-+ struct au_dcsub_pages dpages;
-+ struct au_dpage *dpage;
-+ struct dentry **dentries;
-+
-+ err = au_dpages_init(&dpages, GFP_NOFS);
-+ AuDebugOn(err);
-+ err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/1);
-+ AuDebugOn(err);
-+ for (i = dpages.ndpage - 1; !err && i >= 0; i--) {
-+ dpage = dpages.dpages + i;
-+ dentries = dpage->dentries;
-+ for (j = dpage->ndentry - 1; !err && j >= 0; j--)
-+ AuDebugOn(au_digen_test(dentries[j], sigen));
-+ }
-+ au_dpages_free(&dpages);
-+}
-+
-+void au_dbg_verify_kthread(void)
-+{
-+ if (au_wkq_test()) {
-+ au_dbg_blocked();
-+ /*
-+ * It may be recursive, but udba=notify between two aufs mounts,
-+ * where a single ro branch is shared, is not a problem.
-+ */
-+ /* WARN_ON(1); */
-+ }
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int __init au_debug_init(void)
-+{
-+ aufs_bindex_t bindex;
-+ struct au_vdir_destr destr;
-+
-+ bindex = -1;
-+ AuDebugOn(bindex >= 0);
-+
-+ destr.len = -1;
-+ AuDebugOn(destr.len < NAME_MAX);
-+
-+#ifdef CONFIG_4KSTACKS
-+ pr_warn("CONFIG_4KSTACKS is defined.\n");
-+#endif
-+
-+ return 0;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/debug.h linux-4.1.10/fs/aufs/debug.h
---- linux-4.1.10.orig/fs/aufs/debug.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/debug.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,225 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * debug print functions
-+ */
-+
-+#ifndef __AUFS_DEBUG_H__
-+#define __AUFS_DEBUG_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/atomic.h>
-+#include <linux/module.h>
-+#include <linux/kallsyms.h>
-+#include <linux/sysrq.h>
-+
-+#ifdef CONFIG_AUFS_DEBUG
-+#define AuDebugOn(a) BUG_ON(a)
-+
-+/* module parameter */
-+extern atomic_t aufs_debug;
-+static inline void au_debug_on(void)
-+{
-+ atomic_inc(&aufs_debug);
-+}
-+static inline void au_debug_off(void)
-+{
-+ atomic_dec_if_positive(&aufs_debug);
-+}
-+
-+static inline int au_debug_test(void)
-+{
-+ return atomic_read(&aufs_debug) > 0;
-+}
-+#else
-+#define AuDebugOn(a) do {} while (0)
-+AuStubVoid(au_debug_on, void)
-+AuStubVoid(au_debug_off, void)
-+AuStubInt0(au_debug_test, void)
-+#endif /* CONFIG_AUFS_DEBUG */
-+
-+#define param_check_atomic_t(name, p) __param_check(name, p, atomic_t)
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* debug print */
-+
-+#define AuDbg(fmt, ...) do { \
-+ if (au_debug_test()) \
-+ pr_debug("DEBUG: " fmt, ##__VA_ARGS__); \
-+} while (0)
-+#define AuLabel(l) AuDbg(#l "\n")
-+#define AuIOErr(fmt, ...) pr_err("I/O Error, " fmt, ##__VA_ARGS__)
-+#define AuWarn1(fmt, ...) do { \
-+ static unsigned char _c; \
-+ if (!_c++) \
-+ pr_warn(fmt, ##__VA_ARGS__); \
-+} while (0)
-+
-+#define AuErr1(fmt, ...) do { \
-+ static unsigned char _c; \
-+ if (!_c++) \
-+ pr_err(fmt, ##__VA_ARGS__); \
-+} while (0)
-+
-+#define AuIOErr1(fmt, ...) do { \
-+ static unsigned char _c; \
-+ if (!_c++) \
-+ AuIOErr(fmt, ##__VA_ARGS__); \
-+} while (0)
-+
-+#define AuUnsupportMsg "This operation is not supported." \
-+ " Please report this application to aufs-users ML."
-+#define AuUnsupport(fmt, ...) do { \
-+ pr_err(AuUnsupportMsg "\n" fmt, ##__VA_ARGS__); \
-+ dump_stack(); \
-+} while (0)
-+
-+#define AuTraceErr(e) do { \
-+ if (unlikely((e) < 0)) \
-+ AuDbg("err %d\n", (int)(e)); \
-+} while (0)
-+
-+#define AuTraceErrPtr(p) do { \
-+ if (IS_ERR(p)) \
-+ AuDbg("err %ld\n", PTR_ERR(p)); \
-+} while (0)
-+
-+/* dirty macros for debug print, use with "%.*s" and caution */
-+#define AuLNPair(qstr) (qstr)->len, (qstr)->name
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct dentry;
-+#ifdef CONFIG_AUFS_DEBUG
-+extern struct mutex au_dbg_mtx;
-+extern char *au_plevel;
-+struct au_nhash;
-+void au_dpri_whlist(struct au_nhash *whlist);
-+struct au_vdir;
-+void au_dpri_vdir(struct au_vdir *vdir);
-+struct inode;
-+void au_dpri_inode(struct inode *inode);
-+void au_dpri_dalias(struct inode *inode);
-+void au_dpri_dentry(struct dentry *dentry);
-+struct file;
-+void au_dpri_file(struct file *filp);
-+struct super_block;
-+void au_dpri_sb(struct super_block *sb);
-+
-+#define au_dbg_verify_dinode(d) __au_dbg_verify_dinode(d, __func__, __LINE__)
-+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line);
-+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen);
-+void au_dbg_verify_kthread(void);
-+
-+int __init au_debug_init(void);
-+
-+#define AuDbgWhlist(w) do { \
-+ mutex_lock(&au_dbg_mtx); \
-+ AuDbg(#w "\n"); \
-+ au_dpri_whlist(w); \
-+ mutex_unlock(&au_dbg_mtx); \
-+} while (0)
-+
-+#define AuDbgVdir(v) do { \
-+ mutex_lock(&au_dbg_mtx); \
-+ AuDbg(#v "\n"); \
-+ au_dpri_vdir(v); \
-+ mutex_unlock(&au_dbg_mtx); \
-+} while (0)
-+
-+#define AuDbgInode(i) do { \
-+ mutex_lock(&au_dbg_mtx); \
-+ AuDbg(#i "\n"); \
-+ au_dpri_inode(i); \
-+ mutex_unlock(&au_dbg_mtx); \
-+} while (0)
-+
-+#define AuDbgDAlias(i) do { \
-+ mutex_lock(&au_dbg_mtx); \
-+ AuDbg(#i "\n"); \
-+ au_dpri_dalias(i); \
-+ mutex_unlock(&au_dbg_mtx); \
-+} while (0)
-+
-+#define AuDbgDentry(d) do { \
-+ mutex_lock(&au_dbg_mtx); \
-+ AuDbg(#d "\n"); \
-+ au_dpri_dentry(d); \
-+ mutex_unlock(&au_dbg_mtx); \
-+} while (0)
-+
-+#define AuDbgFile(f) do { \
-+ mutex_lock(&au_dbg_mtx); \
-+ AuDbg(#f "\n"); \
-+ au_dpri_file(f); \
-+ mutex_unlock(&au_dbg_mtx); \
-+} while (0)
-+
-+#define AuDbgSb(sb) do { \
-+ mutex_lock(&au_dbg_mtx); \
-+ AuDbg(#sb "\n"); \
-+ au_dpri_sb(sb); \
-+ mutex_unlock(&au_dbg_mtx); \
-+} while (0)
-+
-+#define AuDbgSym(addr) do { \
-+ char sym[KSYM_SYMBOL_LEN]; \
-+ sprint_symbol(sym, (unsigned long)addr); \
-+ AuDbg("%s\n", sym); \
-+} while (0)
-+#else
-+AuStubVoid(au_dbg_verify_dinode, struct dentry *dentry)
-+AuStubVoid(au_dbg_verify_gen, struct dentry *parent, unsigned int sigen)
-+AuStubVoid(au_dbg_verify_kthread, void)
-+AuStubInt0(__init au_debug_init, void)
-+
-+#define AuDbgWhlist(w) do {} while (0)
-+#define AuDbgVdir(v) do {} while (0)
-+#define AuDbgInode(i) do {} while (0)
-+#define AuDbgDAlias(i) do {} while (0)
-+#define AuDbgDentry(d) do {} while (0)
-+#define AuDbgFile(f) do {} while (0)
-+#define AuDbgSb(sb) do {} while (0)
-+#define AuDbgSym(addr) do {} while (0)
-+#endif /* CONFIG_AUFS_DEBUG */
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_AUFS_MAGIC_SYSRQ
-+int __init au_sysrq_init(void);
-+void au_sysrq_fin(void);
-+
-+#ifdef CONFIG_HW_CONSOLE
-+#define au_dbg_blocked() do { \
-+ WARN_ON(1); \
-+ handle_sysrq('w'); \
-+} while (0)
-+#else
-+AuStubVoid(au_dbg_blocked, void)
-+#endif
-+
-+#else
-+AuStubInt0(__init au_sysrq_init, void)
-+AuStubVoid(au_sysrq_fin, void)
-+AuStubVoid(au_dbg_blocked, void)
-+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_DEBUG_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/dentry.c linux-4.1.10/fs/aufs/dentry.c
---- linux-4.1.10.orig/fs/aufs/dentry.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/dentry.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,1105 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * lookup and dentry operations
-+ */
-+
-+#include <linux/namei.h>
-+#include "aufs.h"
-+
-+#define AuLkup_ALLOW_NEG 1
-+#define AuLkup_IGNORE_PERM (1 << 1)
-+#define au_ftest_lkup(flags, name) ((flags) & AuLkup_##name)
-+#define au_fset_lkup(flags, name) \
-+ do { (flags) |= AuLkup_##name; } while (0)
-+#define au_fclr_lkup(flags, name) \
-+ do { (flags) &= ~AuLkup_##name; } while (0)
-+
-+struct au_do_lookup_args {
-+ unsigned int flags;
-+ mode_t type;
-+};
-+
-+/*
-+ * returns positive/negative dentry, NULL or an error.
-+ * NULL means whiteout-ed or not-found.
-+ */
-+static struct dentry*
-+au_do_lookup(struct dentry *h_parent, struct dentry *dentry,
-+ aufs_bindex_t bindex, struct qstr *wh_name,
-+ struct au_do_lookup_args *args)
-+{
-+ struct dentry *h_dentry;
-+ struct inode *h_inode;
-+ struct au_branch *br;
-+ int wh_found, opq;
-+ unsigned char wh_able;
-+ const unsigned char allow_neg = !!au_ftest_lkup(args->flags, ALLOW_NEG);
-+ const unsigned char ignore_perm = !!au_ftest_lkup(args->flags,
-+ IGNORE_PERM);
-+
-+ wh_found = 0;
-+ br = au_sbr(dentry->d_sb, bindex);
-+ wh_able = !!au_br_whable(br->br_perm);
-+ if (wh_able)
-+ wh_found = au_wh_test(h_parent, wh_name, /*try_sio*/0);
-+ h_dentry = ERR_PTR(wh_found);
-+ if (!wh_found)
-+ goto real_lookup;
-+ if (unlikely(wh_found < 0))
-+ goto out;
-+
-+ /* We found a whiteout */
-+ /* au_set_dbend(dentry, bindex); */
-+ au_set_dbwh(dentry, bindex);
-+ if (!allow_neg)
-+ return NULL; /* success */
-+
-+real_lookup:
-+ if (!ignore_perm)
-+ h_dentry = vfsub_lkup_one(&dentry->d_name, h_parent);
-+ else
-+ h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent);
-+ if (IS_ERR(h_dentry)) {
-+ if (PTR_ERR(h_dentry) == -ENAMETOOLONG
-+ && !allow_neg)
-+ h_dentry = NULL;
-+ goto out;
-+ }
-+
-+ h_inode = d_inode(h_dentry);
-+ if (d_is_negative(h_dentry)) {
-+ if (!allow_neg)
-+ goto out_neg;
-+ } else if (wh_found
-+ || (args->type && args->type != (h_inode->i_mode & S_IFMT)))
-+ goto out_neg;
-+
-+ if (au_dbend(dentry) <= bindex)
-+ au_set_dbend(dentry, bindex);
-+ if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry))
-+ au_set_dbstart(dentry, bindex);
-+ au_set_h_dptr(dentry, bindex, h_dentry);
-+
-+ if (!d_is_dir(h_dentry)
-+ || !wh_able
-+ || (d_really_is_positive(dentry) && !d_is_dir(dentry)))
-+ goto out; /* success */
-+
-+ mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
-+ opq = au_diropq_test(h_dentry);
-+ mutex_unlock(&h_inode->i_mutex);
-+ if (opq > 0)
-+ au_set_dbdiropq(dentry, bindex);
-+ else if (unlikely(opq < 0)) {
-+ au_set_h_dptr(dentry, bindex, NULL);
-+ h_dentry = ERR_PTR(opq);
-+ }
-+ goto out;
-+
-+out_neg:
-+ dput(h_dentry);
-+ h_dentry = NULL;
-+out:
-+ return h_dentry;
-+}
-+
-+static int au_test_shwh(struct super_block *sb, const struct qstr *name)
-+{
-+ if (unlikely(!au_opt_test(au_mntflags(sb), SHWH)
-+ && !strncmp(name->name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)))
-+ return -EPERM;
-+ return 0;
-+}
-+
-+/*
-+ * returns the number of lower positive dentries,
-+ * otherwise an error.
-+ * can be called at unlinking with @type is zero.
-+ */
-+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type)
-+{
-+ int npositive, err;
-+ aufs_bindex_t bindex, btail, bdiropq;
-+ unsigned char isdir, dirperm1;
-+ struct qstr whname;
-+ struct au_do_lookup_args args = {
-+ .flags = 0,
-+ .type = type
-+ };
-+ const struct qstr *name = &dentry->d_name;
-+ struct dentry *parent;
-+ struct super_block *sb;
-+
-+ sb = dentry->d_sb;
-+ err = au_test_shwh(sb, name);
-+ if (unlikely(err))
-+ goto out;
-+
-+ err = au_wh_name_alloc(&whname, name);
-+ if (unlikely(err))
-+ goto out;
-+
-+ isdir = !!d_is_dir(dentry);
-+ if (!type)
-+ au_fset_lkup(args.flags, ALLOW_NEG);
-+ dirperm1 = !!au_opt_test(au_mntflags(sb), DIRPERM1);
-+
-+ npositive = 0;
-+ parent = dget_parent(dentry);
-+ btail = au_dbtaildir(parent);
-+ for (bindex = bstart; bindex <= btail; bindex++) {
-+ struct dentry *h_parent, *h_dentry;
-+ struct inode *h_inode, *h_dir;
-+
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (h_dentry) {
-+ if (d_is_positive(h_dentry))
-+ npositive++;
-+ if (type != S_IFDIR)
-+ break;
-+ continue;
-+ }
-+ h_parent = au_h_dptr(parent, bindex);
-+ if (!h_parent || !d_is_dir(h_parent))
-+ continue;
-+
-+ h_dir = d_inode(h_parent);
-+ mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
-+ h_dentry = au_do_lookup(h_parent, dentry, bindex, &whname,
-+ &args);
-+ mutex_unlock(&h_dir->i_mutex);
-+ err = PTR_ERR(h_dentry);
-+ if (IS_ERR(h_dentry))
-+ goto out_parent;
-+ if (h_dentry)
-+ au_fclr_lkup(args.flags, ALLOW_NEG);
-+ if (dirperm1)
-+ au_fset_lkup(args.flags, IGNORE_PERM);
-+
-+ if (au_dbwh(dentry) == bindex)
-+ break;
-+ if (!h_dentry)
-+ continue;
-+ if (d_is_negative(h_dentry))
-+ continue;
-+ h_inode = d_inode(h_dentry);
-+ npositive++;
-+ if (!args.type)
-+ args.type = h_inode->i_mode & S_IFMT;
-+ if (args.type != S_IFDIR)
-+ break;
-+ else if (isdir) {
-+ /* the type of lower may be different */
-+ bdiropq = au_dbdiropq(dentry);
-+ if (bdiropq >= 0 && bdiropq <= bindex)
-+ break;
-+ }
-+ }
-+
-+ if (npositive) {
-+ AuLabel(positive);
-+ au_update_dbstart(dentry);
-+ }
-+ err = npositive;
-+ if (unlikely(!au_opt_test(au_mntflags(sb), UDBA_NONE)
-+ && au_dbstart(dentry) < 0)) {
-+ err = -EIO;
-+ AuIOErr("both of real entry and whiteout found, %pd, err %d\n",
-+ dentry, err);
-+ }
-+
-+out_parent:
-+ dput(parent);
-+ kfree(whname.name);
-+out:
-+ return err;
-+}
-+
-+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent)
-+{
-+ struct dentry *dentry;
-+ int wkq_err;
-+
-+ if (!au_test_h_perm_sio(d_inode(parent), MAY_EXEC))
-+ dentry = vfsub_lkup_one(name, parent);
-+ else {
-+ struct vfsub_lkup_one_args args = {
-+ .errp = &dentry,
-+ .name = name,
-+ .parent = parent
-+ };
-+
-+ wkq_err = au_wkq_wait(vfsub_call_lkup_one, &args);
-+ if (unlikely(wkq_err))
-+ dentry = ERR_PTR(wkq_err);
-+ }
-+
-+ return dentry;
-+}
-+
-+/*
-+ * lookup @dentry on @bindex which should be negative.
-+ */
-+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh)
-+{
-+ int err;
-+ struct dentry *parent, *h_parent, *h_dentry;
-+ struct au_branch *br;
-+
-+ parent = dget_parent(dentry);
-+ h_parent = au_h_dptr(parent, bindex);
-+ br = au_sbr(dentry->d_sb, bindex);
-+ if (wh)
-+ h_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
-+ else
-+ h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent);
-+ err = PTR_ERR(h_dentry);
-+ if (IS_ERR(h_dentry))
-+ goto out;
-+ if (unlikely(d_is_positive(h_dentry))) {
-+ err = -EIO;
-+ AuIOErr("%pd should be negative on b%d.\n", h_dentry, bindex);
-+ dput(h_dentry);
-+ goto out;
-+ }
-+
-+ err = 0;
-+ if (bindex < au_dbstart(dentry))
-+ au_set_dbstart(dentry, bindex);
-+ if (au_dbend(dentry) < bindex)
-+ au_set_dbend(dentry, bindex);
-+ au_set_h_dptr(dentry, bindex, h_dentry);
-+
-+out:
-+ dput(parent);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* subset of struct inode */
-+struct au_iattr {
-+ unsigned long i_ino;
-+ /* unsigned int i_nlink; */
-+ kuid_t i_uid;
-+ kgid_t i_gid;
-+ u64 i_version;
-+/*
-+ loff_t i_size;
-+ blkcnt_t i_blocks;
-+*/
-+ umode_t i_mode;
-+};
-+
-+static void au_iattr_save(struct au_iattr *ia, struct inode *h_inode)
-+{
-+ ia->i_ino = h_inode->i_ino;
-+ /* ia->i_nlink = h_inode->i_nlink; */
-+ ia->i_uid = h_inode->i_uid;
-+ ia->i_gid = h_inode->i_gid;
-+ ia->i_version = h_inode->i_version;
-+/*
-+ ia->i_size = h_inode->i_size;
-+ ia->i_blocks = h_inode->i_blocks;
-+*/
-+ ia->i_mode = (h_inode->i_mode & S_IFMT);
-+}
-+
-+static int au_iattr_test(struct au_iattr *ia, struct inode *h_inode)
-+{
-+ return ia->i_ino != h_inode->i_ino
-+ /* || ia->i_nlink != h_inode->i_nlink */
-+ || !uid_eq(ia->i_uid, h_inode->i_uid)
-+ || !gid_eq(ia->i_gid, h_inode->i_gid)
-+ || ia->i_version != h_inode->i_version
-+/*
-+ || ia->i_size != h_inode->i_size
-+ || ia->i_blocks != h_inode->i_blocks
-+*/
-+ || ia->i_mode != (h_inode->i_mode & S_IFMT);
-+}
-+
-+static int au_h_verify_dentry(struct dentry *h_dentry, struct dentry *h_parent,
-+ struct au_branch *br)
-+{
-+ int err;
-+ struct au_iattr ia;
-+ struct inode *h_inode;
-+ struct dentry *h_d;
-+ struct super_block *h_sb;
-+
-+ err = 0;
-+ memset(&ia, -1, sizeof(ia));
-+ h_sb = h_dentry->d_sb;
-+ h_inode = NULL;
-+ if (d_is_positive(h_dentry)) {
-+ h_inode = d_inode(h_dentry);
-+ au_iattr_save(&ia, h_inode);
-+ } else if (au_test_nfs(h_sb) || au_test_fuse(h_sb))
-+ /* nfs d_revalidate may return 0 for negative dentry */
-+ /* fuse d_revalidate always return 0 for negative dentry */
-+ goto out;
-+
-+ /* main purpose is namei.c:cached_lookup() and d_revalidate */
-+ h_d = vfsub_lkup_one(&h_dentry->d_name, h_parent);
-+ err = PTR_ERR(h_d);
-+ if (IS_ERR(h_d))
-+ goto out;
-+
-+ err = 0;
-+ if (unlikely(h_d != h_dentry
-+ || d_inode(h_d) != h_inode
-+ || (h_inode && au_iattr_test(&ia, h_inode))))
-+ err = au_busy_or_stale();
-+ dput(h_d);
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
-+ struct dentry *h_parent, struct au_branch *br)
-+{
-+ int err;
-+
-+ err = 0;
-+ if (udba == AuOpt_UDBA_REVAL
-+ && !au_test_fs_remote(h_dentry->d_sb)) {
-+ IMustLock(h_dir);
-+ err = (d_inode(h_dentry->d_parent) != h_dir);
-+ } else if (udba != AuOpt_UDBA_NONE)
-+ err = au_h_verify_dentry(h_dentry, h_parent, br);
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_do_refresh_hdentry(struct dentry *dentry, struct dentry *parent)
-+{
-+ int err;
-+ aufs_bindex_t new_bindex, bindex, bend, bwh, bdiropq;
-+ struct au_hdentry tmp, *p, *q;
-+ struct au_dinfo *dinfo;
-+ struct super_block *sb;
-+
-+ DiMustWriteLock(dentry);
-+
-+ sb = dentry->d_sb;
-+ dinfo = au_di(dentry);
-+ bend = dinfo->di_bend;
-+ bwh = dinfo->di_bwh;
-+ bdiropq = dinfo->di_bdiropq;
-+ p = dinfo->di_hdentry + dinfo->di_bstart;
-+ for (bindex = dinfo->di_bstart; bindex <= bend; bindex++, p++) {
-+ if (!p->hd_dentry)
-+ continue;
-+
-+ new_bindex = au_br_index(sb, p->hd_id);
-+ if (new_bindex == bindex)
-+ continue;
-+
-+ if (dinfo->di_bwh == bindex)
-+ bwh = new_bindex;
-+ if (dinfo->di_bdiropq == bindex)
-+ bdiropq = new_bindex;
-+ if (new_bindex < 0) {
-+ au_hdput(p);
-+ p->hd_dentry = NULL;
-+ continue;
-+ }
-+
-+ /* swap two lower dentries, and loop again */
-+ q = dinfo->di_hdentry + new_bindex;
-+ tmp = *q;
-+ *q = *p;
-+ *p = tmp;
-+ if (tmp.hd_dentry) {
-+ bindex--;
-+ p--;
-+ }
-+ }
-+
-+ dinfo->di_bwh = -1;
-+ if (bwh >= 0 && bwh <= au_sbend(sb) && au_sbr_whable(sb, bwh))
-+ dinfo->di_bwh = bwh;
-+
-+ dinfo->di_bdiropq = -1;
-+ if (bdiropq >= 0
-+ && bdiropq <= au_sbend(sb)
-+ && au_sbr_whable(sb, bdiropq))
-+ dinfo->di_bdiropq = bdiropq;
-+
-+ err = -EIO;
-+ dinfo->di_bstart = -1;
-+ dinfo->di_bend = -1;
-+ bend = au_dbend(parent);
-+ p = dinfo->di_hdentry;
-+ for (bindex = 0; bindex <= bend; bindex++, p++)
-+ if (p->hd_dentry) {
-+ dinfo->di_bstart = bindex;
-+ break;
-+ }
-+
-+ if (dinfo->di_bstart >= 0) {
-+ p = dinfo->di_hdentry + bend;
-+ for (bindex = bend; bindex >= 0; bindex--, p--)
-+ if (p->hd_dentry) {
-+ dinfo->di_bend = bindex;
-+ err = 0;
-+ break;
-+ }
-+ }
-+
-+ return err;
-+}
-+
-+static void au_do_hide(struct dentry *dentry)
-+{
-+ struct inode *inode;
-+
-+ if (d_really_is_positive(dentry)) {
-+ inode = d_inode(dentry);
-+ if (!d_is_dir(dentry)) {
-+ if (inode->i_nlink && !d_unhashed(dentry))
-+ drop_nlink(inode);
-+ } else {
-+ clear_nlink(inode);
-+ /* stop next lookup */
-+ inode->i_flags |= S_DEAD;
-+ }
-+ smp_mb(); /* necessary? */
-+ }
-+ d_drop(dentry);
-+}
-+
-+static int au_hide_children(struct dentry *parent)
-+{
-+ int err, i, j, ndentry;
-+ struct au_dcsub_pages dpages;
-+ struct au_dpage *dpage;
-+ struct dentry *dentry;
-+
-+ err = au_dpages_init(&dpages, GFP_NOFS);
-+ if (unlikely(err))
-+ goto out;
-+ err = au_dcsub_pages(&dpages, parent, NULL, NULL);
-+ if (unlikely(err))
-+ goto out_dpages;
-+
-+ /* in reverse order */
-+ for (i = dpages.ndpage - 1; i >= 0; i--) {
-+ dpage = dpages.dpages + i;
-+ ndentry = dpage->ndentry;
-+ for (j = ndentry - 1; j >= 0; j--) {
-+ dentry = dpage->dentries[j];
-+ if (dentry != parent)
-+ au_do_hide(dentry);
-+ }
-+ }
-+
-+out_dpages:
-+ au_dpages_free(&dpages);
-+out:
-+ return err;
-+}
-+
-+static void au_hide(struct dentry *dentry)
-+{
-+ int err;
-+
-+ AuDbgDentry(dentry);
-+ if (d_is_dir(dentry)) {
-+ /* shrink_dcache_parent(dentry); */
-+ err = au_hide_children(dentry);
-+ if (unlikely(err))
-+ AuIOErr("%pd, failed hiding children, ignored %d\n",
-+ dentry, err);
-+ }
-+ au_do_hide(dentry);
-+}
-+
-+/*
-+ * By adding a dirty branch, a cached dentry may be affected in various ways.
-+ *
-+ * a dirty branch is added
-+ * - on the top of layers
-+ * - in the middle of layers
-+ * - to the bottom of layers
-+ *
-+ * on the added branch there exists
-+ * - a whiteout
-+ * - a diropq
-+ * - a same named entry
-+ * + exist
-+ * * negative --> positive
-+ * * positive --> positive
-+ * - type is unchanged
-+ * - type is changed
-+ * + doesn't exist
-+ * * negative --> negative
-+ * * positive --> negative (rejected by au_br_del() for non-dir case)
-+ * - none
-+ */
-+static int au_refresh_by_dinfo(struct dentry *dentry, struct au_dinfo *dinfo,
-+ struct au_dinfo *tmp)
-+{
-+ int err;
-+ aufs_bindex_t bindex, bend;
-+ struct {
-+ struct dentry *dentry;
-+ struct inode *inode;
-+ mode_t mode;
-+ } orig_h, tmp_h;
-+ struct au_hdentry *hd;
-+ struct inode *inode, *h_inode;
-+ struct dentry *h_dentry;
-+
-+ err = 0;
-+ AuDebugOn(dinfo->di_bstart < 0);
-+ orig_h.mode = 0;
-+ orig_h.dentry = dinfo->di_hdentry[dinfo->di_bstart].hd_dentry;
-+ orig_h.inode = NULL;
-+ if (d_is_positive(orig_h.dentry)) {
-+ orig_h.inode = d_inode(orig_h.dentry);
-+ orig_h.mode = orig_h.inode->i_mode & S_IFMT;
-+ }
-+ memset(&tmp_h, 0, sizeof(tmp_h));
-+ if (tmp->di_bstart >= 0) {
-+ tmp_h.dentry = tmp->di_hdentry[tmp->di_bstart].hd_dentry;
-+ tmp_h.inode = NULL;
-+ if (d_is_positive(tmp_h.dentry)) {
-+ tmp_h.inode = d_inode(tmp_h.dentry);
-+ tmp_h.mode = tmp_h.inode->i_mode & S_IFMT;
-+ }
-+ }
-+
-+ inode = NULL;
-+ if (d_really_is_positive(dentry))
-+ inode = d_inode(dentry);
-+ if (!orig_h.inode) {
-+ AuDbg("nagative originally\n");
-+ if (inode) {
-+ au_hide(dentry);
-+ goto out;
-+ }
-+ AuDebugOn(inode);
-+ AuDebugOn(dinfo->di_bstart != dinfo->di_bend);
-+ AuDebugOn(dinfo->di_bdiropq != -1);
-+
-+ if (!tmp_h.inode) {
-+ AuDbg("negative --> negative\n");
-+ /* should have only one negative lower */
-+ if (tmp->di_bstart >= 0
-+ && tmp->di_bstart < dinfo->di_bstart) {
-+ AuDebugOn(tmp->di_bstart != tmp->di_bend);
-+ AuDebugOn(dinfo->di_bstart != dinfo->di_bend);
-+ au_set_h_dptr(dentry, dinfo->di_bstart, NULL);
-+ au_di_cp(dinfo, tmp);
-+ hd = tmp->di_hdentry + tmp->di_bstart;
-+ au_set_h_dptr(dentry, tmp->di_bstart,
-+ dget(hd->hd_dentry));
-+ }
-+ au_dbg_verify_dinode(dentry);
-+ } else {
-+ AuDbg("negative --> positive\n");
-+ /*
-+ * similar to the behaviour of creating with bypassing
-+ * aufs.
-+ * unhash it in order to force an error in the
-+ * succeeding create operation.
-+ * we should not set S_DEAD here.
-+ */
-+ d_drop(dentry);
-+ /* au_di_swap(tmp, dinfo); */
-+ au_dbg_verify_dinode(dentry);
-+ }
-+ } else {
-+ AuDbg("positive originally\n");
-+ /* inode may be NULL */
-+ AuDebugOn(inode && (inode->i_mode & S_IFMT) != orig_h.mode);
-+ if (!tmp_h.inode) {
-+ AuDbg("positive --> negative\n");
-+ /* or bypassing aufs */
-+ au_hide(dentry);
-+ if (tmp->di_bwh >= 0 && tmp->di_bwh <= dinfo->di_bstart)
-+ dinfo->di_bwh = tmp->di_bwh;
-+ if (inode)
-+ err = au_refresh_hinode_self(inode);
-+ au_dbg_verify_dinode(dentry);
-+ } else if (orig_h.mode == tmp_h.mode) {
-+ AuDbg("positive --> positive, same type\n");
-+ if (!S_ISDIR(orig_h.mode)
-+ && dinfo->di_bstart > tmp->di_bstart) {
-+ /*
-+ * similar to the behaviour of removing and
-+ * creating.
-+ */
-+ au_hide(dentry);
-+ if (inode)
-+ err = au_refresh_hinode_self(inode);
-+ au_dbg_verify_dinode(dentry);
-+ } else {
-+ /* fill empty slots */
-+ if (dinfo->di_bstart > tmp->di_bstart)
-+ dinfo->di_bstart = tmp->di_bstart;
-+ if (dinfo->di_bend < tmp->di_bend)
-+ dinfo->di_bend = tmp->di_bend;
-+ dinfo->di_bwh = tmp->di_bwh;
-+ dinfo->di_bdiropq = tmp->di_bdiropq;
-+ hd = tmp->di_hdentry;
-+ bend = dinfo->di_bend;
-+ for (bindex = tmp->di_bstart; bindex <= bend;
-+ bindex++) {
-+ if (au_h_dptr(dentry, bindex))
-+ continue;
-+ h_dentry = hd[bindex].hd_dentry;
-+ if (!h_dentry)
-+ continue;
-+ AuDebugOn(d_is_negative(h_dentry));
-+ h_inode = d_inode(h_dentry);
-+ AuDebugOn(orig_h.mode
-+ != (h_inode->i_mode
-+ & S_IFMT));
-+ au_set_h_dptr(dentry, bindex,
-+ dget(h_dentry));
-+ }
-+ err = au_refresh_hinode(inode, dentry);
-+ au_dbg_verify_dinode(dentry);
-+ }
-+ } else {
-+ AuDbg("positive --> positive, different type\n");
-+ /* similar to the behaviour of removing and creating */
-+ au_hide(dentry);
-+ if (inode)
-+ err = au_refresh_hinode_self(inode);
-+ au_dbg_verify_dinode(dentry);
-+ }
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent)
-+{
-+ int err, ebrange;
-+ unsigned int sigen;
-+ struct au_dinfo *dinfo, *tmp;
-+ struct super_block *sb;
-+ struct inode *inode;
-+
-+ DiMustWriteLock(dentry);
-+ AuDebugOn(IS_ROOT(dentry));
-+ AuDebugOn(d_really_is_negative(parent));
-+
-+ sb = dentry->d_sb;
-+ sigen = au_sigen(sb);
-+ err = au_digen_test(parent, sigen);
-+ if (unlikely(err))
-+ goto out;
-+
-+ dinfo = au_di(dentry);
-+ err = au_di_realloc(dinfo, au_sbend(sb) + 1);
-+ if (unlikely(err))
-+ goto out;
-+ ebrange = au_dbrange_test(dentry);
-+ if (!ebrange)
-+ ebrange = au_do_refresh_hdentry(dentry, parent);
-+
-+ if (d_unhashed(dentry) || ebrange /* || dinfo->di_tmpfile */) {
-+ AuDebugOn(au_dbstart(dentry) < 0 && au_dbend(dentry) >= 0);
-+ if (d_really_is_positive(dentry)) {
-+ inode = d_inode(dentry);
-+ err = au_refresh_hinode_self(inode);
-+ }
-+ au_dbg_verify_dinode(dentry);
-+ if (!err)
-+ goto out_dgen; /* success */
-+ goto out;
-+ }
-+
-+ /* temporary dinfo */
-+ AuDbgDentry(dentry);
-+ err = -ENOMEM;
-+ tmp = au_di_alloc(sb, AuLsc_DI_TMP);
-+ if (unlikely(!tmp))
-+ goto out;
-+ au_di_swap(tmp, dinfo);
-+ /* returns the number of positive dentries */
-+ /*
-+ * if current working dir is removed, it returns an error.
-+ * but the dentry is legal.
-+ */
-+ err = au_lkup_dentry(dentry, /*bstart*/0, /*type*/0);
-+ AuDbgDentry(dentry);
-+ au_di_swap(tmp, dinfo);
-+ if (err == -ENOENT)
-+ err = 0;
-+ if (err >= 0) {
-+ /* compare/refresh by dinfo */
-+ AuDbgDentry(dentry);
-+ err = au_refresh_by_dinfo(dentry, dinfo, tmp);
-+ au_dbg_verify_dinode(dentry);
-+ AuTraceErr(err);
-+ }
-+ au_rw_write_unlock(&tmp->di_rwsem);
-+ au_di_free(tmp);
-+ if (unlikely(err))
-+ goto out;
-+
-+out_dgen:
-+ au_update_digen(dentry);
-+out:
-+ if (unlikely(err && !(dentry->d_flags & DCACHE_NFSFS_RENAMED))) {
-+ AuIOErr("failed refreshing %pd, %d\n", dentry, err);
-+ AuDbgDentry(dentry);
-+ }
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+static int au_do_h_d_reval(struct dentry *h_dentry, unsigned int flags,
-+ struct dentry *dentry, aufs_bindex_t bindex)
-+{
-+ int err, valid;
-+
-+ err = 0;
-+ if (!(h_dentry->d_flags & DCACHE_OP_REVALIDATE))
-+ goto out;
-+
-+ AuDbg("b%d\n", bindex);
-+ /*
-+ * gave up supporting LOOKUP_CREATE/OPEN for lower fs,
-+ * due to whiteout and branch permission.
-+ */
-+ flags &= ~(/*LOOKUP_PARENT |*/ LOOKUP_OPEN | LOOKUP_CREATE
-+ | LOOKUP_FOLLOW | LOOKUP_EXCL);
-+ /* it may return tri-state */
-+ valid = h_dentry->d_op->d_revalidate(h_dentry, flags);
-+
-+ if (unlikely(valid < 0))
-+ err = valid;
-+ else if (!valid)
-+ err = -EINVAL;
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* todo: remove this */
-+static int h_d_revalidate(struct dentry *dentry, struct inode *inode,
-+ unsigned int flags, int do_udba)
-+{
-+ int err;
-+ umode_t mode, h_mode;
-+ aufs_bindex_t bindex, btail, bstart, ibs, ibe;
-+ unsigned char plus, unhashed, is_root, h_plus, h_nfs, tmpfile;
-+ struct inode *h_inode, *h_cached_inode;
-+ struct dentry *h_dentry;
-+ struct qstr *name, *h_name;
-+
-+ err = 0;
-+ plus = 0;
-+ mode = 0;
-+ ibs = -1;
-+ ibe = -1;
-+ unhashed = !!d_unhashed(dentry);
-+ is_root = !!IS_ROOT(dentry);
-+ name = &dentry->d_name;
-+ tmpfile = au_di(dentry)->di_tmpfile;
-+
-+ /*
-+ * Theoretically, REVAL test should be unnecessary in case of
-+ * {FS,I}NOTIFY.
-+ * But {fs,i}notify doesn't fire some necessary events,
-+ * IN_ATTRIB for atime/nlink/pageio
-+ * Let's do REVAL test too.
-+ */
-+ if (do_udba && inode) {
-+ mode = (inode->i_mode & S_IFMT);
-+ plus = (inode->i_nlink > 0);
-+ ibs = au_ibstart(inode);
-+ ibe = au_ibend(inode);
-+ }
-+
-+ bstart = au_dbstart(dentry);
-+ btail = bstart;
-+ if (inode && S_ISDIR(inode->i_mode))
-+ btail = au_dbtaildir(dentry);
-+ for (bindex = bstart; bindex <= btail; bindex++) {
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (!h_dentry)
-+ continue;
-+
-+ AuDbg("b%d, %pd\n", bindex, h_dentry);
-+ h_nfs = !!au_test_nfs(h_dentry->d_sb);
-+ spin_lock(&h_dentry->d_lock);
-+ h_name = &h_dentry->d_name;
-+ if (unlikely(do_udba
-+ && !is_root
-+ && ((!h_nfs
-+ && (unhashed != !!d_unhashed(h_dentry)
-+ || (!tmpfile
-+ && !au_qstreq(name, h_name))
-+ ))
-+ || (h_nfs
-+ && !(flags & LOOKUP_OPEN)
-+ && (h_dentry->d_flags
-+ & DCACHE_NFSFS_RENAMED)))
-+ )) {
-+ int h_unhashed;
-+
-+ h_unhashed = d_unhashed(h_dentry);
-+ spin_unlock(&h_dentry->d_lock);
-+ AuDbg("unhash 0x%x 0x%x, %pd %pd\n",
-+ unhashed, h_unhashed, dentry, h_dentry);
-+ goto err;
-+ }
-+ spin_unlock(&h_dentry->d_lock);
-+
-+ err = au_do_h_d_reval(h_dentry, flags, dentry, bindex);
-+ if (unlikely(err))
-+ /* do not goto err, to keep the errno */
-+ break;
-+
-+ /* todo: plink too? */
-+ if (!do_udba)
-+ continue;
-+
-+ /* UDBA tests */
-+ if (unlikely(!!inode != d_is_positive(h_dentry)))
-+ goto err;
-+
-+ h_inode = NULL;
-+ if (d_is_positive(h_dentry))
-+ h_inode = d_inode(h_dentry);
-+ h_plus = plus;
-+ h_mode = mode;
-+ h_cached_inode = h_inode;
-+ if (h_inode) {
-+ h_mode = (h_inode->i_mode & S_IFMT);
-+ h_plus = (h_inode->i_nlink > 0);
-+ }
-+ if (inode && ibs <= bindex && bindex <= ibe)
-+ h_cached_inode = au_h_iptr(inode, bindex);
-+
-+ if (!h_nfs) {
-+ if (unlikely(plus != h_plus && !tmpfile))
-+ goto err;
-+ } else {
-+ if (unlikely(!(h_dentry->d_flags & DCACHE_NFSFS_RENAMED)
-+ && !is_root
-+ && !IS_ROOT(h_dentry)
-+ && unhashed != d_unhashed(h_dentry)))
-+ goto err;
-+ }
-+ if (unlikely(mode != h_mode
-+ || h_cached_inode != h_inode))
-+ goto err;
-+ continue;
-+
-+err:
-+ err = -EINVAL;
-+ break;
-+ }
-+
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* todo: consolidate with do_refresh() and au_reval_for_attr() */
-+static int simple_reval_dpath(struct dentry *dentry, unsigned int sigen)
-+{
-+ int err;
-+ struct dentry *parent;
-+
-+ if (!au_digen_test(dentry, sigen))
-+ return 0;
-+
-+ parent = dget_parent(dentry);
-+ di_read_lock_parent(parent, AuLock_IR);
-+ AuDebugOn(au_digen_test(parent, sigen));
-+ au_dbg_verify_gen(parent, sigen);
-+ err = au_refresh_dentry(dentry, parent);
-+ di_read_unlock(parent, AuLock_IR);
-+ dput(parent);
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+int au_reval_dpath(struct dentry *dentry, unsigned int sigen)
-+{
-+ int err;
-+ struct dentry *d, *parent;
-+
-+ if (!au_ftest_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR))
-+ return simple_reval_dpath(dentry, sigen);
-+
-+ /* slow loop, keep it simple and stupid */
-+ /* cf: au_cpup_dirs() */
-+ err = 0;
-+ parent = NULL;
-+ while (au_digen_test(dentry, sigen)) {
-+ d = dentry;
-+ while (1) {
-+ dput(parent);
-+ parent = dget_parent(d);
-+ if (!au_digen_test(parent, sigen))
-+ break;
-+ d = parent;
-+ }
-+
-+ if (d != dentry)
-+ di_write_lock_child2(d);
-+
-+ /* someone might update our dentry while we were sleeping */
-+ if (au_digen_test(d, sigen)) {
-+ /*
-+ * todo: consolidate with simple_reval_dpath(),
-+ * do_refresh() and au_reval_for_attr().
-+ */
-+ di_read_lock_parent(parent, AuLock_IR);
-+ err = au_refresh_dentry(d, parent);
-+ di_read_unlock(parent, AuLock_IR);
-+ }
-+
-+ if (d != dentry)
-+ di_write_unlock(d);
-+ dput(parent);
-+ if (unlikely(err))
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+/*
-+ * if valid returns 1, otherwise 0.
-+ */
-+static int aufs_d_revalidate(struct dentry *dentry, unsigned int flags)
-+{
-+ int valid, err;
-+ unsigned int sigen;
-+ unsigned char do_udba;
-+ struct super_block *sb;
-+ struct inode *inode;
-+
-+ /* todo: support rcu-walk? */
-+ if (flags & LOOKUP_RCU)
-+ return -ECHILD;
-+
-+ valid = 0;
-+ if (unlikely(!au_di(dentry)))
-+ goto out;
-+
-+ valid = 1;
-+ sb = dentry->d_sb;
-+ /*
-+ * todo: very ugly
-+ * i_mutex of parent dir may be held,
-+ * but we should not return 'invalid' due to busy.
-+ */
-+ err = aufs_read_lock(dentry, AuLock_FLUSH | AuLock_DW | AuLock_NOPLM);
-+ if (unlikely(err)) {
-+ valid = err;
-+ AuTraceErr(err);
-+ goto out;
-+ }
-+ inode = NULL;
-+ if (d_really_is_positive(dentry))
-+ inode = d_inode(dentry);
-+ if (unlikely(inode && is_bad_inode(inode))) {
-+ err = -EINVAL;
-+ AuTraceErr(err);
-+ goto out_dgrade;
-+ }
-+ if (unlikely(au_dbrange_test(dentry))) {
-+ err = -EINVAL;
-+ AuTraceErr(err);
-+ goto out_dgrade;
-+ }
-+
-+ sigen = au_sigen(sb);
-+ if (au_digen_test(dentry, sigen)) {
-+ AuDebugOn(IS_ROOT(dentry));
-+ err = au_reval_dpath(dentry, sigen);
-+ if (unlikely(err)) {
-+ AuTraceErr(err);
-+ goto out_dgrade;
-+ }
-+ }
-+ di_downgrade_lock(dentry, AuLock_IR);
-+
-+ err = -EINVAL;
-+ if (!(flags & (LOOKUP_OPEN | LOOKUP_EMPTY))
-+ && inode
-+ && !(inode->i_state && I_LINKABLE)
-+ && (IS_DEADDIR(inode) || !inode->i_nlink))
-+ goto out_inval;
-+
-+ do_udba = !au_opt_test(au_mntflags(sb), UDBA_NONE);
-+ if (do_udba && inode) {
-+ aufs_bindex_t bstart = au_ibstart(inode);
-+ struct inode *h_inode;
-+
-+ if (bstart >= 0) {
-+ h_inode = au_h_iptr(inode, bstart);
-+ if (h_inode && au_test_higen(inode, h_inode))
-+ goto out_inval;
-+ }
-+ }
-+
-+ err = h_d_revalidate(dentry, inode, flags, do_udba);
-+ if (unlikely(!err && do_udba && au_dbstart(dentry) < 0)) {
-+ err = -EIO;
-+ AuDbg("both of real entry and whiteout found, %p, err %d\n",
-+ dentry, err);
-+ }
-+ goto out_inval;
-+
-+out_dgrade:
-+ di_downgrade_lock(dentry, AuLock_IR);
-+out_inval:
-+ aufs_read_unlock(dentry, AuLock_IR);
-+ AuTraceErr(err);
-+ valid = !err;
-+out:
-+ if (!valid) {
-+ AuDbg("%pd invalid, %d\n", dentry, valid);
-+ d_drop(dentry);
-+ }
-+ return valid;
-+}
-+
-+static void aufs_d_release(struct dentry *dentry)
-+{
-+ if (au_di(dentry)) {
-+ au_di_fin(dentry);
-+ au_hn_di_reinit(dentry);
-+ }
-+}
-+
-+const struct dentry_operations aufs_dop = {
-+ .d_revalidate = aufs_d_revalidate,
-+ .d_weak_revalidate = aufs_d_revalidate,
-+ .d_release = aufs_d_release
-+};
-diff -Nur linux-4.1.10.orig/fs/aufs/dentry.h linux-4.1.10/fs/aufs/dentry.h
---- linux-4.1.10.orig/fs/aufs/dentry.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/dentry.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,233 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * lookup and dentry operations
-+ */
-+
-+#ifndef __AUFS_DENTRY_H__
-+#define __AUFS_DENTRY_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/dcache.h>
-+#include "rwsem.h"
-+
-+struct au_hdentry {
-+ struct dentry *hd_dentry;
-+ aufs_bindex_t hd_id;
-+};
-+
-+struct au_dinfo {
-+ atomic_t di_generation;
-+
-+ struct au_rwsem di_rwsem;
-+ aufs_bindex_t di_bstart, di_bend, di_bwh, di_bdiropq;
-+ unsigned char di_tmpfile; /* to allow the different name */
-+ struct au_hdentry *di_hdentry;
-+} ____cacheline_aligned_in_smp;
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* dentry.c */
-+extern const struct dentry_operations aufs_dop;
-+struct au_branch;
-+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent);
-+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
-+ struct dentry *h_parent, struct au_branch *br);
-+
-+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type);
-+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh);
-+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent);
-+int au_reval_dpath(struct dentry *dentry, unsigned int sigen);
-+
-+/* dinfo.c */
-+void au_di_init_once(void *_di);
-+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc);
-+void au_di_free(struct au_dinfo *dinfo);
-+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b);
-+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src);
-+int au_di_init(struct dentry *dentry);
-+void au_di_fin(struct dentry *dentry);
-+int au_di_realloc(struct au_dinfo *dinfo, int nbr);
-+
-+void di_read_lock(struct dentry *d, int flags, unsigned int lsc);
-+void di_read_unlock(struct dentry *d, int flags);
-+void di_downgrade_lock(struct dentry *d, int flags);
-+void di_write_lock(struct dentry *d, unsigned int lsc);
-+void di_write_unlock(struct dentry *d);
-+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir);
-+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir);
-+void di_write_unlock2(struct dentry *d1, struct dentry *d2);
-+
-+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex);
-+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex);
-+aufs_bindex_t au_dbtail(struct dentry *dentry);
-+aufs_bindex_t au_dbtaildir(struct dentry *dentry);
-+
-+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
-+ struct dentry *h_dentry);
-+int au_digen_test(struct dentry *dentry, unsigned int sigen);
-+int au_dbrange_test(struct dentry *dentry);
-+void au_update_digen(struct dentry *dentry);
-+void au_update_dbrange(struct dentry *dentry, int do_put_zero);
-+void au_update_dbstart(struct dentry *dentry);
-+void au_update_dbend(struct dentry *dentry);
-+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline struct au_dinfo *au_di(struct dentry *dentry)
-+{
-+ return dentry->d_fsdata;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* lock subclass for dinfo */
-+enum {
-+ AuLsc_DI_CHILD, /* child first */
-+ AuLsc_DI_CHILD2, /* rename(2), link(2), and cpup at hnotify */
-+ AuLsc_DI_CHILD3, /* copyup dirs */
-+ AuLsc_DI_PARENT,
-+ AuLsc_DI_PARENT2,
-+ AuLsc_DI_PARENT3,
-+ AuLsc_DI_TMP /* temp for replacing dinfo */
-+};
-+
-+/*
-+ * di_read_lock_child, di_write_lock_child,
-+ * di_read_lock_child2, di_write_lock_child2,
-+ * di_read_lock_child3, di_write_lock_child3,
-+ * di_read_lock_parent, di_write_lock_parent,
-+ * di_read_lock_parent2, di_write_lock_parent2,
-+ * di_read_lock_parent3, di_write_lock_parent3,
-+ */
-+#define AuReadLockFunc(name, lsc) \
-+static inline void di_read_lock_##name(struct dentry *d, int flags) \
-+{ di_read_lock(d, flags, AuLsc_DI_##lsc); }
-+
-+#define AuWriteLockFunc(name, lsc) \
-+static inline void di_write_lock_##name(struct dentry *d) \
-+{ di_write_lock(d, AuLsc_DI_##lsc); }
-+
-+#define AuRWLockFuncs(name, lsc) \
-+ AuReadLockFunc(name, lsc) \
-+ AuWriteLockFunc(name, lsc)
-+
-+AuRWLockFuncs(child, CHILD);
-+AuRWLockFuncs(child2, CHILD2);
-+AuRWLockFuncs(child3, CHILD3);
-+AuRWLockFuncs(parent, PARENT);
-+AuRWLockFuncs(parent2, PARENT2);
-+AuRWLockFuncs(parent3, PARENT3);
-+
-+#undef AuReadLockFunc
-+#undef AuWriteLockFunc
-+#undef AuRWLockFuncs
-+
-+#define DiMustNoWaiters(d) AuRwMustNoWaiters(&au_di(d)->di_rwsem)
-+#define DiMustAnyLock(d) AuRwMustAnyLock(&au_di(d)->di_rwsem)
-+#define DiMustWriteLock(d) AuRwMustWriteLock(&au_di(d)->di_rwsem)
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* todo: memory barrier? */
-+static inline unsigned int au_digen(struct dentry *d)
-+{
-+ return atomic_read(&au_di(d)->di_generation);
-+}
-+
-+static inline void au_h_dentry_init(struct au_hdentry *hdentry)
-+{
-+ hdentry->hd_dentry = NULL;
-+}
-+
-+static inline void au_hdput(struct au_hdentry *hd)
-+{
-+ if (hd)
-+ dput(hd->hd_dentry);
-+}
-+
-+static inline aufs_bindex_t au_dbstart(struct dentry *dentry)
-+{
-+ DiMustAnyLock(dentry);
-+ return au_di(dentry)->di_bstart;
-+}
-+
-+static inline aufs_bindex_t au_dbend(struct dentry *dentry)
-+{
-+ DiMustAnyLock(dentry);
-+ return au_di(dentry)->di_bend;
-+}
-+
-+static inline aufs_bindex_t au_dbwh(struct dentry *dentry)
-+{
-+ DiMustAnyLock(dentry);
-+ return au_di(dentry)->di_bwh;
-+}
-+
-+static inline aufs_bindex_t au_dbdiropq(struct dentry *dentry)
-+{
-+ DiMustAnyLock(dentry);
-+ return au_di(dentry)->di_bdiropq;
-+}
-+
-+/* todo: hard/soft set? */
-+static inline void au_set_dbstart(struct dentry *dentry, aufs_bindex_t bindex)
-+{
-+ DiMustWriteLock(dentry);
-+ au_di(dentry)->di_bstart = bindex;
-+}
-+
-+static inline void au_set_dbend(struct dentry *dentry, aufs_bindex_t bindex)
-+{
-+ DiMustWriteLock(dentry);
-+ au_di(dentry)->di_bend = bindex;
-+}
-+
-+static inline void au_set_dbwh(struct dentry *dentry, aufs_bindex_t bindex)
-+{
-+ DiMustWriteLock(dentry);
-+ /* dbwh can be outside of bstart - bend range */
-+ au_di(dentry)->di_bwh = bindex;
-+}
-+
-+static inline void au_set_dbdiropq(struct dentry *dentry, aufs_bindex_t bindex)
-+{
-+ DiMustWriteLock(dentry);
-+ au_di(dentry)->di_bdiropq = bindex;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_AUFS_HNOTIFY
-+static inline void au_digen_dec(struct dentry *d)
-+{
-+ atomic_dec(&au_di(d)->di_generation);
-+}
-+
-+static inline void au_hn_di_reinit(struct dentry *dentry)
-+{
-+ dentry->d_fsdata = NULL;
-+}
-+#else
-+AuStubVoid(au_hn_di_reinit, struct dentry *dentry __maybe_unused)
-+#endif /* CONFIG_AUFS_HNOTIFY */
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_DENTRY_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/dinfo.c linux-4.1.10/fs/aufs/dinfo.c
---- linux-4.1.10.orig/fs/aufs/dinfo.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/dinfo.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,550 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * dentry private data
-+ */
-+
-+#include "aufs.h"
-+
-+void au_di_init_once(void *_dinfo)
-+{
-+ struct au_dinfo *dinfo = _dinfo;
-+ static struct lock_class_key aufs_di;
-+
-+ au_rw_init(&dinfo->di_rwsem);
-+ au_rw_class(&dinfo->di_rwsem, &aufs_di);
-+}
-+
-+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc)
-+{
-+ struct au_dinfo *dinfo;
-+ int nbr, i;
-+
-+ dinfo = au_cache_alloc_dinfo();
-+ if (unlikely(!dinfo))
-+ goto out;
-+
-+ nbr = au_sbend(sb) + 1;
-+ if (nbr <= 0)
-+ nbr = 1;
-+ dinfo->di_hdentry = kcalloc(nbr, sizeof(*dinfo->di_hdentry), GFP_NOFS);
-+ if (dinfo->di_hdentry) {
-+ au_rw_write_lock_nested(&dinfo->di_rwsem, lsc);
-+ dinfo->di_bstart = -1;
-+ dinfo->di_bend = -1;
-+ dinfo->di_bwh = -1;
-+ dinfo->di_bdiropq = -1;
-+ dinfo->di_tmpfile = 0;
-+ for (i = 0; i < nbr; i++)
-+ dinfo->di_hdentry[i].hd_id = -1;
-+ goto out;
-+ }
-+
-+ au_cache_free_dinfo(dinfo);
-+ dinfo = NULL;
-+
-+out:
-+ return dinfo;
-+}
-+
-+void au_di_free(struct au_dinfo *dinfo)
-+{
-+ struct au_hdentry *p;
-+ aufs_bindex_t bend, bindex;
-+
-+ /* dentry may not be revalidated */
-+ bindex = dinfo->di_bstart;
-+ if (bindex >= 0) {
-+ bend = dinfo->di_bend;
-+ p = dinfo->di_hdentry + bindex;
-+ while (bindex++ <= bend)
-+ au_hdput(p++);
-+ }
-+ kfree(dinfo->di_hdentry);
-+ au_cache_free_dinfo(dinfo);
-+}
-+
-+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b)
-+{
-+ struct au_hdentry *p;
-+ aufs_bindex_t bi;
-+
-+ AuRwMustWriteLock(&a->di_rwsem);
-+ AuRwMustWriteLock(&b->di_rwsem);
-+
-+#define DiSwap(v, name) \
-+ do { \
-+ v = a->di_##name; \
-+ a->di_##name = b->di_##name; \
-+ b->di_##name = v; \
-+ } while (0)
-+
-+ DiSwap(p, hdentry);
-+ DiSwap(bi, bstart);
-+ DiSwap(bi, bend);
-+ DiSwap(bi, bwh);
-+ DiSwap(bi, bdiropq);
-+ /* smp_mb(); */
-+
-+#undef DiSwap
-+}
-+
-+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src)
-+{
-+ AuRwMustWriteLock(&dst->di_rwsem);
-+ AuRwMustWriteLock(&src->di_rwsem);
-+
-+ dst->di_bstart = src->di_bstart;
-+ dst->di_bend = src->di_bend;
-+ dst->di_bwh = src->di_bwh;
-+ dst->di_bdiropq = src->di_bdiropq;
-+ /* smp_mb(); */
-+}
-+
-+int au_di_init(struct dentry *dentry)
-+{
-+ int err;
-+ struct super_block *sb;
-+ struct au_dinfo *dinfo;
-+
-+ err = 0;
-+ sb = dentry->d_sb;
-+ dinfo = au_di_alloc(sb, AuLsc_DI_CHILD);
-+ if (dinfo) {
-+ atomic_set(&dinfo->di_generation, au_sigen(sb));
-+ /* smp_mb(); */ /* atomic_set */
-+ dentry->d_fsdata = dinfo;
-+ } else
-+ err = -ENOMEM;
-+
-+ return err;
-+}
-+
-+void au_di_fin(struct dentry *dentry)
-+{
-+ struct au_dinfo *dinfo;
-+
-+ dinfo = au_di(dentry);
-+ AuRwDestroy(&dinfo->di_rwsem);
-+ au_di_free(dinfo);
-+}
-+
-+int au_di_realloc(struct au_dinfo *dinfo, int nbr)
-+{
-+ int err, sz;
-+ struct au_hdentry *hdp;
-+
-+ AuRwMustWriteLock(&dinfo->di_rwsem);
-+
-+ err = -ENOMEM;
-+ sz = sizeof(*hdp) * (dinfo->di_bend + 1);
-+ if (!sz)
-+ sz = sizeof(*hdp);
-+ hdp = au_kzrealloc(dinfo->di_hdentry, sz, sizeof(*hdp) * nbr, GFP_NOFS);
-+ if (hdp) {
-+ dinfo->di_hdentry = hdp;
-+ err = 0;
-+ }
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static void do_ii_write_lock(struct inode *inode, unsigned int lsc)
-+{
-+ switch (lsc) {
-+ case AuLsc_DI_CHILD:
-+ ii_write_lock_child(inode);
-+ break;
-+ case AuLsc_DI_CHILD2:
-+ ii_write_lock_child2(inode);
-+ break;
-+ case AuLsc_DI_CHILD3:
-+ ii_write_lock_child3(inode);
-+ break;
-+ case AuLsc_DI_PARENT:
-+ ii_write_lock_parent(inode);
-+ break;
-+ case AuLsc_DI_PARENT2:
-+ ii_write_lock_parent2(inode);
-+ break;
-+ case AuLsc_DI_PARENT3:
-+ ii_write_lock_parent3(inode);
-+ break;
-+ default:
-+ BUG();
-+ }
-+}
-+
-+static void do_ii_read_lock(struct inode *inode, unsigned int lsc)
-+{
-+ switch (lsc) {
-+ case AuLsc_DI_CHILD:
-+ ii_read_lock_child(inode);
-+ break;
-+ case AuLsc_DI_CHILD2:
-+ ii_read_lock_child2(inode);
-+ break;
-+ case AuLsc_DI_CHILD3:
-+ ii_read_lock_child3(inode);
-+ break;
-+ case AuLsc_DI_PARENT:
-+ ii_read_lock_parent(inode);
-+ break;
-+ case AuLsc_DI_PARENT2:
-+ ii_read_lock_parent2(inode);
-+ break;
-+ case AuLsc_DI_PARENT3:
-+ ii_read_lock_parent3(inode);
-+ break;
-+ default:
-+ BUG();
-+ }
-+}
-+
-+void di_read_lock(struct dentry *d, int flags, unsigned int lsc)
-+{
-+ struct inode *inode;
-+
-+ au_rw_read_lock_nested(&au_di(d)->di_rwsem, lsc);
-+ if (d_really_is_positive(d)) {
-+ inode = d_inode(d);
-+ if (au_ftest_lock(flags, IW))
-+ do_ii_write_lock(inode, lsc);
-+ else if (au_ftest_lock(flags, IR))
-+ do_ii_read_lock(inode, lsc);
-+ }
-+}
-+
-+void di_read_unlock(struct dentry *d, int flags)
-+{
-+ struct inode *inode;
-+
-+ if (d_really_is_positive(d)) {
-+ inode = d_inode(d);
-+ if (au_ftest_lock(flags, IW)) {
-+ au_dbg_verify_dinode(d);
-+ ii_write_unlock(inode);
-+ } else if (au_ftest_lock(flags, IR)) {
-+ au_dbg_verify_dinode(d);
-+ ii_read_unlock(inode);
-+ }
-+ }
-+ au_rw_read_unlock(&au_di(d)->di_rwsem);
-+}
-+
-+void di_downgrade_lock(struct dentry *d, int flags)
-+{
-+ if (d_really_is_positive(d) && au_ftest_lock(flags, IR))
-+ ii_downgrade_lock(d_inode(d));
-+ au_rw_dgrade_lock(&au_di(d)->di_rwsem);
-+}
-+
-+void di_write_lock(struct dentry *d, unsigned int lsc)
-+{
-+ au_rw_write_lock_nested(&au_di(d)->di_rwsem, lsc);
-+ if (d_really_is_positive(d))
-+ do_ii_write_lock(d_inode(d), lsc);
-+}
-+
-+void di_write_unlock(struct dentry *d)
-+{
-+ au_dbg_verify_dinode(d);
-+ if (d_really_is_positive(d))
-+ ii_write_unlock(d_inode(d));
-+ au_rw_write_unlock(&au_di(d)->di_rwsem);
-+}
-+
-+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir)
-+{
-+ AuDebugOn(d1 == d2
-+ || d_inode(d1) == d_inode(d2)
-+ || d1->d_sb != d2->d_sb);
-+
-+ if (isdir && au_test_subdir(d1, d2)) {
-+ di_write_lock_child(d1);
-+ di_write_lock_child2(d2);
-+ } else {
-+ /* there should be no races */
-+ di_write_lock_child(d2);
-+ di_write_lock_child2(d1);
-+ }
-+}
-+
-+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir)
-+{
-+ AuDebugOn(d1 == d2
-+ || d_inode(d1) == d_inode(d2)
-+ || d1->d_sb != d2->d_sb);
-+
-+ if (isdir && au_test_subdir(d1, d2)) {
-+ di_write_lock_parent(d1);
-+ di_write_lock_parent2(d2);
-+ } else {
-+ /* there should be no races */
-+ di_write_lock_parent(d2);
-+ di_write_lock_parent2(d1);
-+ }
-+}
-+
-+void di_write_unlock2(struct dentry *d1, struct dentry *d2)
-+{
-+ di_write_unlock(d1);
-+ if (d_inode(d1) == d_inode(d2))
-+ au_rw_write_unlock(&au_di(d2)->di_rwsem);
-+ else
-+ di_write_unlock(d2);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex)
-+{
-+ struct dentry *d;
-+
-+ DiMustAnyLock(dentry);
-+
-+ if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry))
-+ return NULL;
-+ AuDebugOn(bindex < 0);
-+ d = au_di(dentry)->di_hdentry[0 + bindex].hd_dentry;
-+ AuDebugOn(d && au_dcount(d) <= 0);
-+ return d;
-+}
-+
-+/*
-+ * extended version of au_h_dptr().
-+ * returns a hashed and positive (or linkable) h_dentry in bindex, NULL, or
-+ * error.
-+ */
-+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex)
-+{
-+ struct dentry *h_dentry;
-+ struct inode *inode, *h_inode;
-+
-+ AuDebugOn(d_really_is_negative(dentry));
-+
-+ h_dentry = NULL;
-+ if (au_dbstart(dentry) <= bindex
-+ && bindex <= au_dbend(dentry))
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (h_dentry && !au_d_linkable(h_dentry)) {
-+ dget(h_dentry);
-+ goto out; /* success */
-+ }
-+
-+ inode = d_inode(dentry);
-+ AuDebugOn(bindex < au_ibstart(inode));
-+ AuDebugOn(au_ibend(inode) < bindex);
-+ h_inode = au_h_iptr(inode, bindex);
-+ h_dentry = d_find_alias(h_inode);
-+ if (h_dentry) {
-+ if (!IS_ERR(h_dentry)) {
-+ if (!au_d_linkable(h_dentry))
-+ goto out; /* success */
-+ dput(h_dentry);
-+ } else
-+ goto out;
-+ }
-+
-+ if (au_opt_test(au_mntflags(dentry->d_sb), PLINK)) {
-+ h_dentry = au_plink_lkup(inode, bindex);
-+ AuDebugOn(!h_dentry);
-+ if (!IS_ERR(h_dentry)) {
-+ if (!au_d_hashed_positive(h_dentry))
-+ goto out; /* success */
-+ dput(h_dentry);
-+ h_dentry = NULL;
-+ }
-+ }
-+
-+out:
-+ AuDbgDentry(h_dentry);
-+ return h_dentry;
-+}
-+
-+aufs_bindex_t au_dbtail(struct dentry *dentry)
-+{
-+ aufs_bindex_t bend, bwh;
-+
-+ bend = au_dbend(dentry);
-+ if (0 <= bend) {
-+ bwh = au_dbwh(dentry);
-+ if (!bwh)
-+ return bwh;
-+ if (0 < bwh && bwh < bend)
-+ return bwh - 1;
-+ }
-+ return bend;
-+}
-+
-+aufs_bindex_t au_dbtaildir(struct dentry *dentry)
-+{
-+ aufs_bindex_t bend, bopq;
-+
-+ bend = au_dbtail(dentry);
-+ if (0 <= bend) {
-+ bopq = au_dbdiropq(dentry);
-+ if (0 <= bopq && bopq < bend)
-+ bend = bopq;
-+ }
-+ return bend;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
-+ struct dentry *h_dentry)
-+{
-+ struct au_hdentry *hd = au_di(dentry)->di_hdentry + bindex;
-+ struct au_branch *br;
-+
-+ DiMustWriteLock(dentry);
-+
-+ au_hdput(hd);
-+ hd->hd_dentry = h_dentry;
-+ if (h_dentry) {
-+ br = au_sbr(dentry->d_sb, bindex);
-+ hd->hd_id = br->br_id;
-+ }
-+}
-+
-+int au_dbrange_test(struct dentry *dentry)
-+{
-+ int err;
-+ aufs_bindex_t bstart, bend;
-+
-+ err = 0;
-+ bstart = au_dbstart(dentry);
-+ bend = au_dbend(dentry);
-+ if (bstart >= 0)
-+ AuDebugOn(bend < 0 && bstart > bend);
-+ else {
-+ err = -EIO;
-+ AuDebugOn(bend >= 0);
-+ }
-+
-+ return err;
-+}
-+
-+int au_digen_test(struct dentry *dentry, unsigned int sigen)
-+{
-+ int err;
-+
-+ err = 0;
-+ if (unlikely(au_digen(dentry) != sigen
-+ || au_iigen_test(d_inode(dentry), sigen)))
-+ err = -EIO;
-+
-+ return err;
-+}
-+
-+void au_update_digen(struct dentry *dentry)
-+{
-+ atomic_set(&au_di(dentry)->di_generation, au_sigen(dentry->d_sb));
-+ /* smp_mb(); */ /* atomic_set */
-+}
-+
-+void au_update_dbrange(struct dentry *dentry, int do_put_zero)
-+{
-+ struct au_dinfo *dinfo;
-+ struct dentry *h_d;
-+ struct au_hdentry *hdp;
-+
-+ DiMustWriteLock(dentry);
-+
-+ dinfo = au_di(dentry);
-+ if (!dinfo || dinfo->di_bstart < 0)
-+ return;
-+
-+ hdp = dinfo->di_hdentry;
-+ if (do_put_zero) {
-+ aufs_bindex_t bindex, bend;
-+
-+ bend = dinfo->di_bend;
-+ for (bindex = dinfo->di_bstart; bindex <= bend; bindex++) {
-+ h_d = hdp[0 + bindex].hd_dentry;
-+ if (h_d && d_is_negative(h_d))
-+ au_set_h_dptr(dentry, bindex, NULL);
-+ }
-+ }
-+
-+ dinfo->di_bstart = -1;
-+ while (++dinfo->di_bstart <= dinfo->di_bend)
-+ if (hdp[0 + dinfo->di_bstart].hd_dentry)
-+ break;
-+ if (dinfo->di_bstart > dinfo->di_bend) {
-+ dinfo->di_bstart = -1;
-+ dinfo->di_bend = -1;
-+ return;
-+ }
-+
-+ dinfo->di_bend++;
-+ while (0 <= --dinfo->di_bend)
-+ if (hdp[0 + dinfo->di_bend].hd_dentry)
-+ break;
-+ AuDebugOn(dinfo->di_bstart > dinfo->di_bend || dinfo->di_bend < 0);
-+}
-+
-+void au_update_dbstart(struct dentry *dentry)
-+{
-+ aufs_bindex_t bindex, bend;
-+ struct dentry *h_dentry;
-+
-+ bend = au_dbend(dentry);
-+ for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) {
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (!h_dentry)
-+ continue;
-+ if (d_is_positive(h_dentry)) {
-+ au_set_dbstart(dentry, bindex);
-+ return;
-+ }
-+ au_set_h_dptr(dentry, bindex, NULL);
-+ }
-+}
-+
-+void au_update_dbend(struct dentry *dentry)
-+{
-+ aufs_bindex_t bindex, bstart;
-+ struct dentry *h_dentry;
-+
-+ bstart = au_dbstart(dentry);
-+ for (bindex = au_dbend(dentry); bindex >= bstart; bindex--) {
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (!h_dentry)
-+ continue;
-+ if (d_is_positive(h_dentry)) {
-+ au_set_dbend(dentry, bindex);
-+ return;
-+ }
-+ au_set_h_dptr(dentry, bindex, NULL);
-+ }
-+}
-+
-+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry)
-+{
-+ aufs_bindex_t bindex, bend;
-+
-+ bend = au_dbend(dentry);
-+ for (bindex = au_dbstart(dentry); bindex <= bend; bindex++)
-+ if (au_h_dptr(dentry, bindex) == h_dentry)
-+ return bindex;
-+ return -1;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/dir.c linux-4.1.10/fs/aufs/dir.c
---- linux-4.1.10.orig/fs/aufs/dir.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/dir.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,753 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * directory operations
-+ */
-+
-+#include <linux/fs_stack.h>
-+#include "aufs.h"
-+
-+void au_add_nlink(struct inode *dir, struct inode *h_dir)
-+{
-+ unsigned int nlink;
-+
-+ AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
-+
-+ nlink = dir->i_nlink;
-+ nlink += h_dir->i_nlink - 2;
-+ if (h_dir->i_nlink < 2)
-+ nlink += 2;
-+ smp_mb(); /* for i_nlink */
-+ /* 0 can happen in revaliding */
-+ set_nlink(dir, nlink);
-+}
-+
-+void au_sub_nlink(struct inode *dir, struct inode *h_dir)
-+{
-+ unsigned int nlink;
-+
-+ AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
-+
-+ nlink = dir->i_nlink;
-+ nlink -= h_dir->i_nlink - 2;
-+ if (h_dir->i_nlink < 2)
-+ nlink -= 2;
-+ smp_mb(); /* for i_nlink */
-+ /* nlink == 0 means the branch-fs is broken */
-+ set_nlink(dir, nlink);
-+}
-+
-+loff_t au_dir_size(struct file *file, struct dentry *dentry)
-+{
-+ loff_t sz;
-+ aufs_bindex_t bindex, bend;
-+ struct file *h_file;
-+ struct dentry *h_dentry;
-+
-+ sz = 0;
-+ if (file) {
-+ AuDebugOn(!d_is_dir(file->f_path.dentry));
-+
-+ bend = au_fbend_dir(file);
-+ for (bindex = au_fbstart(file);
-+ bindex <= bend && sz < KMALLOC_MAX_SIZE;
-+ bindex++) {
-+ h_file = au_hf_dir(file, bindex);
-+ if (h_file && file_inode(h_file))
-+ sz += vfsub_f_size_read(h_file);
-+ }
-+ } else {
-+ AuDebugOn(!dentry);
-+ AuDebugOn(!d_is_dir(dentry));
-+
-+ bend = au_dbtaildir(dentry);
-+ for (bindex = au_dbstart(dentry);
-+ bindex <= bend && sz < KMALLOC_MAX_SIZE;
-+ bindex++) {
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (h_dentry && d_is_positive(h_dentry))
-+ sz += i_size_read(d_inode(h_dentry));
-+ }
-+ }
-+ if (sz < KMALLOC_MAX_SIZE)
-+ sz = roundup_pow_of_two(sz);
-+ if (sz > KMALLOC_MAX_SIZE)
-+ sz = KMALLOC_MAX_SIZE;
-+ else if (sz < NAME_MAX) {
-+ BUILD_BUG_ON(AUFS_RDBLK_DEF < NAME_MAX);
-+ sz = AUFS_RDBLK_DEF;
-+ }
-+ return sz;
-+}
-+
-+struct au_dir_ts_arg {
-+ struct dentry *dentry;
-+ aufs_bindex_t brid;
-+};
-+
-+static void au_do_dir_ts(void *arg)
-+{
-+ struct au_dir_ts_arg *a = arg;
-+ struct au_dtime dt;
-+ struct path h_path;
-+ struct inode *dir, *h_dir;
-+ struct super_block *sb;
-+ struct au_branch *br;
-+ struct au_hinode *hdir;
-+ int err;
-+ aufs_bindex_t bstart, bindex;
-+
-+ sb = a->dentry->d_sb;
-+ if (d_really_is_negative(a->dentry))
-+ goto out;
-+ aufs_read_lock(a->dentry, AuLock_DW | AuLock_DIR); /* noflush */
-+
-+ /* no dir->i_mutex lock */
-+ dir = d_inode(a->dentry);
-+ bstart = au_ibstart(dir);
-+ bindex = au_br_index(sb, a->brid);
-+ if (bindex < bstart)
-+ goto out_unlock;
-+
-+ br = au_sbr(sb, bindex);
-+ h_path.dentry = au_h_dptr(a->dentry, bindex);
-+ if (!h_path.dentry)
-+ goto out_unlock;
-+ h_path.mnt = au_br_mnt(br);
-+ au_dtime_store(&dt, a->dentry, &h_path);
-+
-+ br = au_sbr(sb, bstart);
-+ if (!au_br_writable(br->br_perm))
-+ goto out_unlock;
-+ h_path.dentry = au_h_dptr(a->dentry, bstart);
-+ h_path.mnt = au_br_mnt(br);
-+ err = vfsub_mnt_want_write(h_path.mnt);
-+ if (err)
-+ goto out_unlock;
-+ hdir = au_hi(dir, bstart);
-+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
-+ h_dir = au_h_iptr(dir, bstart);
-+ if (h_dir->i_nlink
-+ && timespec_compare(&h_dir->i_mtime, &dt.dt_mtime) < 0) {
-+ dt.dt_h_path = h_path;
-+ au_dtime_revert(&dt);
-+ }
-+ au_hn_imtx_unlock(hdir);
-+ vfsub_mnt_drop_write(h_path.mnt);
-+ au_cpup_attr_timesizes(dir);
-+
-+out_unlock:
-+ aufs_read_unlock(a->dentry, AuLock_DW);
-+out:
-+ dput(a->dentry);
-+ au_nwt_done(&au_sbi(sb)->si_nowait);
-+ kfree(arg);
-+}
-+
-+void au_dir_ts(struct inode *dir, aufs_bindex_t bindex)
-+{
-+ int perm, wkq_err;
-+ aufs_bindex_t bstart;
-+ struct au_dir_ts_arg *arg;
-+ struct dentry *dentry;
-+ struct super_block *sb;
-+
-+ IMustLock(dir);
-+
-+ dentry = d_find_any_alias(dir);
-+ AuDebugOn(!dentry);
-+ sb = dentry->d_sb;
-+ bstart = au_ibstart(dir);
-+ if (bstart == bindex) {
-+ au_cpup_attr_timesizes(dir);
-+ goto out;
-+ }
-+
-+ perm = au_sbr_perm(sb, bstart);
-+ if (!au_br_writable(perm))
-+ goto out;
-+
-+ arg = kmalloc(sizeof(*arg), GFP_NOFS);
-+ if (!arg)
-+ goto out;
-+
-+ arg->dentry = dget(dentry); /* will be dput-ted by au_do_dir_ts() */
-+ arg->brid = au_sbr_id(sb, bindex);
-+ wkq_err = au_wkq_nowait(au_do_dir_ts, arg, sb, /*flags*/0);
-+ if (unlikely(wkq_err)) {
-+ pr_err("wkq %d\n", wkq_err);
-+ dput(dentry);
-+ kfree(arg);
-+ }
-+
-+out:
-+ dput(dentry);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int reopen_dir(struct file *file)
-+{
-+ int err;
-+ unsigned int flags;
-+ aufs_bindex_t bindex, btail, bstart;
-+ struct dentry *dentry, *h_dentry;
-+ struct file *h_file;
-+
-+ /* open all lower dirs */
-+ dentry = file->f_path.dentry;
-+ bstart = au_dbstart(dentry);
-+ for (bindex = au_fbstart(file); bindex < bstart; bindex++)
-+ au_set_h_fptr(file, bindex, NULL);
-+ au_set_fbstart(file, bstart);
-+
-+ btail = au_dbtaildir(dentry);
-+ for (bindex = au_fbend_dir(file); btail < bindex; bindex--)
-+ au_set_h_fptr(file, bindex, NULL);
-+ au_set_fbend_dir(file, btail);
-+
-+ flags = vfsub_file_flags(file);
-+ for (bindex = bstart; bindex <= btail; bindex++) {
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (!h_dentry)
-+ continue;
-+ h_file = au_hf_dir(file, bindex);
-+ if (h_file)
-+ continue;
-+
-+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out; /* close all? */
-+ au_set_h_fptr(file, bindex, h_file);
-+ }
-+ au_update_figen(file);
-+ /* todo: necessary? */
-+ /* file->f_ra = h_file->f_ra; */
-+ err = 0;
-+
-+out:
-+ return err;
-+}
-+
-+static int do_open_dir(struct file *file, int flags, struct file *h_file)
-+{
-+ int err;
-+ aufs_bindex_t bindex, btail;
-+ struct dentry *dentry, *h_dentry;
-+
-+ FiMustWriteLock(file);
-+ AuDebugOn(h_file);
-+
-+ err = 0;
-+ dentry = file->f_path.dentry;
-+ file->f_version = d_inode(dentry)->i_version;
-+ bindex = au_dbstart(dentry);
-+ au_set_fbstart(file, bindex);
-+ btail = au_dbtaildir(dentry);
-+ au_set_fbend_dir(file, btail);
-+ for (; !err && bindex <= btail; bindex++) {
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (!h_dentry)
-+ continue;
-+
-+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0);
-+ if (IS_ERR(h_file)) {
-+ err = PTR_ERR(h_file);
-+ break;
-+ }
-+ au_set_h_fptr(file, bindex, h_file);
-+ }
-+ au_update_figen(file);
-+ /* todo: necessary? */
-+ /* file->f_ra = h_file->f_ra; */
-+ if (!err)
-+ return 0; /* success */
-+
-+ /* close all */
-+ for (bindex = au_fbstart(file); bindex <= btail; bindex++)
-+ au_set_h_fptr(file, bindex, NULL);
-+ au_set_fbstart(file, -1);
-+ au_set_fbend_dir(file, -1);
-+
-+ return err;
-+}
-+
-+static int aufs_open_dir(struct inode *inode __maybe_unused,
-+ struct file *file)
-+{
-+ int err;
-+ struct super_block *sb;
-+ struct au_fidir *fidir;
-+
-+ err = -ENOMEM;
-+ sb = file->f_path.dentry->d_sb;
-+ si_read_lock(sb, AuLock_FLUSH);
-+ fidir = au_fidir_alloc(sb);
-+ if (fidir) {
-+ struct au_do_open_args args = {
-+ .open = do_open_dir,
-+ .fidir = fidir
-+ };
-+ err = au_do_open(file, &args);
-+ if (unlikely(err))
-+ kfree(fidir);
-+ }
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+static int aufs_release_dir(struct inode *inode __maybe_unused,
-+ struct file *file)
-+{
-+ struct au_vdir *vdir_cache;
-+ struct au_finfo *finfo;
-+ struct au_fidir *fidir;
-+ aufs_bindex_t bindex, bend;
-+
-+ finfo = au_fi(file);
-+ fidir = finfo->fi_hdir;
-+ if (fidir) {
-+ au_sphl_del(&finfo->fi_hlist,
-+ &au_sbi(file->f_path.dentry->d_sb)->si_files);
-+ vdir_cache = fidir->fd_vdir_cache; /* lock-free */
-+ if (vdir_cache)
-+ au_vdir_free(vdir_cache);
-+
-+ bindex = finfo->fi_btop;
-+ if (bindex >= 0) {
-+ /*
-+ * calls fput() instead of filp_close(),
-+ * since no dnotify or lock for the lower file.
-+ */
-+ bend = fidir->fd_bbot;
-+ for (; bindex <= bend; bindex++)
-+ au_set_h_fptr(file, bindex, NULL);
-+ }
-+ kfree(fidir);
-+ finfo->fi_hdir = NULL;
-+ }
-+ au_finfo_fin(file);
-+ return 0;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_do_flush_dir(struct file *file, fl_owner_t id)
-+{
-+ int err;
-+ aufs_bindex_t bindex, bend;
-+ struct file *h_file;
-+
-+ err = 0;
-+ bend = au_fbend_dir(file);
-+ for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) {
-+ h_file = au_hf_dir(file, bindex);
-+ if (h_file)
-+ err = vfsub_flush(h_file, id);
-+ }
-+ return err;
-+}
-+
-+static int aufs_flush_dir(struct file *file, fl_owner_t id)
-+{
-+ return au_do_flush(file, id, au_do_flush_dir);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_do_fsync_dir_no_file(struct dentry *dentry, int datasync)
-+{
-+ int err;
-+ aufs_bindex_t bend, bindex;
-+ struct inode *inode;
-+ struct super_block *sb;
-+
-+ err = 0;
-+ sb = dentry->d_sb;
-+ inode = d_inode(dentry);
-+ IMustLock(inode);
-+ bend = au_dbend(dentry);
-+ for (bindex = au_dbstart(dentry); !err && bindex <= bend; bindex++) {
-+ struct path h_path;
-+
-+ if (au_test_ro(sb, bindex, inode))
-+ continue;
-+ h_path.dentry = au_h_dptr(dentry, bindex);
-+ if (!h_path.dentry)
-+ continue;
-+
-+ h_path.mnt = au_sbr_mnt(sb, bindex);
-+ err = vfsub_fsync(NULL, &h_path, datasync);
-+ }
-+
-+ return err;
-+}
-+
-+static int au_do_fsync_dir(struct file *file, int datasync)
-+{
-+ int err;
-+ aufs_bindex_t bend, bindex;
-+ struct file *h_file;
-+ struct super_block *sb;
-+ struct inode *inode;
-+
-+ err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1);
-+ if (unlikely(err))
-+ goto out;
-+
-+ inode = file_inode(file);
-+ sb = inode->i_sb;
-+ bend = au_fbend_dir(file);
-+ for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) {
-+ h_file = au_hf_dir(file, bindex);
-+ if (!h_file || au_test_ro(sb, bindex, inode))
-+ continue;
-+
-+ err = vfsub_fsync(h_file, &h_file->f_path, datasync);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * @file may be NULL
-+ */
-+static int aufs_fsync_dir(struct file *file, loff_t start, loff_t end,
-+ int datasync)
-+{
-+ int err;
-+ struct dentry *dentry;
-+ struct inode *inode;
-+ struct super_block *sb;
-+ struct mutex *mtx;
-+
-+ err = 0;
-+ dentry = file->f_path.dentry;
-+ inode = d_inode(dentry);
-+ mtx = &inode->i_mutex;
-+ mutex_lock(mtx);
-+ sb = dentry->d_sb;
-+ si_noflush_read_lock(sb);
-+ if (file)
-+ err = au_do_fsync_dir(file, datasync);
-+ else {
-+ di_write_lock_child(dentry);
-+ err = au_do_fsync_dir_no_file(dentry, datasync);
-+ }
-+ au_cpup_attr_timesizes(inode);
-+ di_write_unlock(dentry);
-+ if (file)
-+ fi_write_unlock(file);
-+
-+ si_read_unlock(sb);
-+ mutex_unlock(mtx);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int aufs_iterate(struct file *file, struct dir_context *ctx)
-+{
-+ int err;
-+ struct dentry *dentry;
-+ struct inode *inode, *h_inode;
-+ struct super_block *sb;
-+
-+ AuDbg("%pD, ctx{%pf, %llu}\n", file, ctx->actor, ctx->pos);
-+
-+ dentry = file->f_path.dentry;
-+ inode = d_inode(dentry);
-+ IMustLock(inode);
-+
-+ sb = dentry->d_sb;
-+ si_read_lock(sb, AuLock_FLUSH);
-+ err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1);
-+ if (unlikely(err))
-+ goto out;
-+ err = au_alive_dir(dentry);
-+ if (!err)
-+ err = au_vdir_init(file);
-+ di_downgrade_lock(dentry, AuLock_IR);
-+ if (unlikely(err))
-+ goto out_unlock;
-+
-+ h_inode = au_h_iptr(inode, au_ibstart(inode));
-+ if (!au_test_nfsd()) {
-+ err = au_vdir_fill_de(file, ctx);
-+ fsstack_copy_attr_atime(inode, h_inode);
-+ } else {
-+ /*
-+ * nfsd filldir may call lookup_one_len(), vfs_getattr(),
-+ * encode_fh() and others.
-+ */
-+ atomic_inc(&h_inode->i_count);
-+ di_read_unlock(dentry, AuLock_IR);
-+ si_read_unlock(sb);
-+ err = au_vdir_fill_de(file, ctx);
-+ fsstack_copy_attr_atime(inode, h_inode);
-+ fi_write_unlock(file);
-+ iput(h_inode);
-+
-+ AuTraceErr(err);
-+ return err;
-+ }
-+
-+out_unlock:
-+ di_read_unlock(dentry, AuLock_IR);
-+ fi_write_unlock(file);
-+out:
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#define AuTestEmpty_WHONLY 1
-+#define AuTestEmpty_CALLED (1 << 1)
-+#define AuTestEmpty_SHWH (1 << 2)
-+#define au_ftest_testempty(flags, name) ((flags) & AuTestEmpty_##name)
-+#define au_fset_testempty(flags, name) \
-+ do { (flags) |= AuTestEmpty_##name; } while (0)
-+#define au_fclr_testempty(flags, name) \
-+ do { (flags) &= ~AuTestEmpty_##name; } while (0)
-+
-+#ifndef CONFIG_AUFS_SHWH
-+#undef AuTestEmpty_SHWH
-+#define AuTestEmpty_SHWH 0
-+#endif
-+
-+struct test_empty_arg {
-+ struct dir_context ctx;
-+ struct au_nhash *whlist;
-+ unsigned int flags;
-+ int err;
-+ aufs_bindex_t bindex;
-+};
-+
-+static int test_empty_cb(struct dir_context *ctx, const char *__name,
-+ int namelen, loff_t offset __maybe_unused, u64 ino,
-+ unsigned int d_type)
-+{
-+ struct test_empty_arg *arg = container_of(ctx, struct test_empty_arg,
-+ ctx);
-+ char *name = (void *)__name;
-+
-+ arg->err = 0;
-+ au_fset_testempty(arg->flags, CALLED);
-+ /* smp_mb(); */
-+ if (name[0] == '.'
-+ && (namelen == 1 || (name[1] == '.' && namelen == 2)))
-+ goto out; /* success */
-+
-+ if (namelen <= AUFS_WH_PFX_LEN
-+ || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
-+ if (au_ftest_testempty(arg->flags, WHONLY)
-+ && !au_nhash_test_known_wh(arg->whlist, name, namelen))
-+ arg->err = -ENOTEMPTY;
-+ goto out;
-+ }
-+
-+ name += AUFS_WH_PFX_LEN;
-+ namelen -= AUFS_WH_PFX_LEN;
-+ if (!au_nhash_test_known_wh(arg->whlist, name, namelen))
-+ arg->err = au_nhash_append_wh
-+ (arg->whlist, name, namelen, ino, d_type, arg->bindex,
-+ au_ftest_testempty(arg->flags, SHWH));
-+
-+out:
-+ /* smp_mb(); */
-+ AuTraceErr(arg->err);
-+ return arg->err;
-+}
-+
-+static int do_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
-+{
-+ int err;
-+ struct file *h_file;
-+
-+ h_file = au_h_open(dentry, arg->bindex,
-+ O_RDONLY | O_NONBLOCK | O_DIRECTORY | O_LARGEFILE,
-+ /*file*/NULL, /*force_wr*/0);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out;
-+
-+ err = 0;
-+ if (!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE)
-+ && !file_inode(h_file)->i_nlink)
-+ goto out_put;
-+
-+ do {
-+ arg->err = 0;
-+ au_fclr_testempty(arg->flags, CALLED);
-+ /* smp_mb(); */
-+ err = vfsub_iterate_dir(h_file, &arg->ctx);
-+ if (err >= 0)
-+ err = arg->err;
-+ } while (!err && au_ftest_testempty(arg->flags, CALLED));
-+
-+out_put:
-+ fput(h_file);
-+ au_sbr_put(dentry->d_sb, arg->bindex);
-+out:
-+ return err;
-+}
-+
-+struct do_test_empty_args {
-+ int *errp;
-+ struct dentry *dentry;
-+ struct test_empty_arg *arg;
-+};
-+
-+static void call_do_test_empty(void *args)
-+{
-+ struct do_test_empty_args *a = args;
-+ *a->errp = do_test_empty(a->dentry, a->arg);
-+}
-+
-+static int sio_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
-+{
-+ int err, wkq_err;
-+ struct dentry *h_dentry;
-+ struct inode *h_inode;
-+
-+ h_dentry = au_h_dptr(dentry, arg->bindex);
-+ h_inode = d_inode(h_dentry);
-+ /* todo: i_mode changes anytime? */
-+ mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
-+ err = au_test_h_perm_sio(h_inode, MAY_EXEC | MAY_READ);
-+ mutex_unlock(&h_inode->i_mutex);
-+ if (!err)
-+ err = do_test_empty(dentry, arg);
-+ else {
-+ struct do_test_empty_args args = {
-+ .errp = &err,
-+ .dentry = dentry,
-+ .arg = arg
-+ };
-+ unsigned int flags = arg->flags;
-+
-+ wkq_err = au_wkq_wait(call_do_test_empty, &args);
-+ if (unlikely(wkq_err))
-+ err = wkq_err;
-+ arg->flags = flags;
-+ }
-+
-+ return err;
-+}
-+
-+int au_test_empty_lower(struct dentry *dentry)
-+{
-+ int err;
-+ unsigned int rdhash;
-+ aufs_bindex_t bindex, bstart, btail;
-+ struct au_nhash whlist;
-+ struct test_empty_arg arg = {
-+ .ctx = {
-+ .actor = test_empty_cb
-+ }
-+ };
-+ int (*test_empty)(struct dentry *dentry, struct test_empty_arg *arg);
-+
-+ SiMustAnyLock(dentry->d_sb);
-+
-+ rdhash = au_sbi(dentry->d_sb)->si_rdhash;
-+ if (!rdhash)
-+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, dentry));
-+ err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS);
-+ if (unlikely(err))
-+ goto out;
-+
-+ arg.flags = 0;
-+ arg.whlist = &whlist;
-+ bstart = au_dbstart(dentry);
-+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
-+ au_fset_testempty(arg.flags, SHWH);
-+ test_empty = do_test_empty;
-+ if (au_opt_test(au_mntflags(dentry->d_sb), DIRPERM1))
-+ test_empty = sio_test_empty;
-+ arg.bindex = bstart;
-+ err = test_empty(dentry, &arg);
-+ if (unlikely(err))
-+ goto out_whlist;
-+
-+ au_fset_testempty(arg.flags, WHONLY);
-+ btail = au_dbtaildir(dentry);
-+ for (bindex = bstart + 1; !err && bindex <= btail; bindex++) {
-+ struct dentry *h_dentry;
-+
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (h_dentry && d_is_positive(h_dentry)) {
-+ arg.bindex = bindex;
-+ err = test_empty(dentry, &arg);
-+ }
-+ }
-+
-+out_whlist:
-+ au_nhash_wh_free(&whlist);
-+out:
-+ return err;
-+}
-+
-+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist)
-+{
-+ int err;
-+ struct test_empty_arg arg = {
-+ .ctx = {
-+ .actor = test_empty_cb
-+ }
-+ };
-+ aufs_bindex_t bindex, btail;
-+
-+ err = 0;
-+ arg.whlist = whlist;
-+ arg.flags = AuTestEmpty_WHONLY;
-+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
-+ au_fset_testempty(arg.flags, SHWH);
-+ btail = au_dbtaildir(dentry);
-+ for (bindex = au_dbstart(dentry); !err && bindex <= btail; bindex++) {
-+ struct dentry *h_dentry;
-+
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (h_dentry && d_is_positive(h_dentry)) {
-+ arg.bindex = bindex;
-+ err = sio_test_empty(dentry, &arg);
-+ }
-+ }
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+const struct file_operations aufs_dir_fop = {
-+ .owner = THIS_MODULE,
-+ .llseek = default_llseek,
-+ .read = generic_read_dir,
-+ .iterate = aufs_iterate,
-+ .unlocked_ioctl = aufs_ioctl_dir,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = aufs_compat_ioctl_dir,
-+#endif
-+ .open = aufs_open_dir,
-+ .release = aufs_release_dir,
-+ .flush = aufs_flush_dir,
-+ .fsync = aufs_fsync_dir
-+};
-diff -Nur linux-4.1.10.orig/fs/aufs/dir.h linux-4.1.10/fs/aufs/dir.h
---- linux-4.1.10.orig/fs/aufs/dir.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/dir.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,131 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * directory operations
-+ */
-+
-+#ifndef __AUFS_DIR_H__
-+#define __AUFS_DIR_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/fs.h>
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* need to be faster and smaller */
-+
-+struct au_nhash {
-+ unsigned int nh_num;
-+ struct hlist_head *nh_head;
-+};
-+
-+struct au_vdir_destr {
-+ unsigned char len;
-+ unsigned char name[0];
-+} __packed;
-+
-+struct au_vdir_dehstr {
-+ struct hlist_node hash;
-+ struct au_vdir_destr *str;
-+} ____cacheline_aligned_in_smp;
-+
-+struct au_vdir_de {
-+ ino_t de_ino;
-+ unsigned char de_type;
-+ /* caution: packed */
-+ struct au_vdir_destr de_str;
-+} __packed;
-+
-+struct au_vdir_wh {
-+ struct hlist_node wh_hash;
-+#ifdef CONFIG_AUFS_SHWH
-+ ino_t wh_ino;
-+ aufs_bindex_t wh_bindex;
-+ unsigned char wh_type;
-+#else
-+ aufs_bindex_t wh_bindex;
-+#endif
-+ /* caution: packed */
-+ struct au_vdir_destr wh_str;
-+} __packed;
-+
-+union au_vdir_deblk_p {
-+ unsigned char *deblk;
-+ struct au_vdir_de *de;
-+};
-+
-+struct au_vdir {
-+ unsigned char **vd_deblk;
-+ unsigned long vd_nblk;
-+ struct {
-+ unsigned long ul;
-+ union au_vdir_deblk_p p;
-+ } vd_last;
-+
-+ unsigned long vd_version;
-+ unsigned int vd_deblk_sz;
-+ unsigned long vd_jiffy;
-+} ____cacheline_aligned_in_smp;
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* dir.c */
-+extern const struct file_operations aufs_dir_fop;
-+void au_add_nlink(struct inode *dir, struct inode *h_dir);
-+void au_sub_nlink(struct inode *dir, struct inode *h_dir);
-+loff_t au_dir_size(struct file *file, struct dentry *dentry);
-+void au_dir_ts(struct inode *dir, aufs_bindex_t bsrc);
-+int au_test_empty_lower(struct dentry *dentry);
-+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist);
-+
-+/* vdir.c */
-+unsigned int au_rdhash_est(loff_t sz);
-+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp);
-+void au_nhash_wh_free(struct au_nhash *whlist);
-+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
-+ int limit);
-+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen);
-+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
-+ unsigned int d_type, aufs_bindex_t bindex,
-+ unsigned char shwh);
-+void au_vdir_free(struct au_vdir *vdir);
-+int au_vdir_init(struct file *file);
-+int au_vdir_fill_de(struct file *file, struct dir_context *ctx);
-+
-+/* ioctl.c */
-+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg);
-+
-+#ifdef CONFIG_AUFS_RDU
-+/* rdu.c */
-+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-+#ifdef CONFIG_COMPAT
-+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd,
-+ unsigned long arg);
-+#endif
-+#else
-+AuStub(long, au_rdu_ioctl, return -EINVAL, struct file *file,
-+ unsigned int cmd, unsigned long arg)
-+#ifdef CONFIG_COMPAT
-+AuStub(long, au_rdu_compat_ioctl, return -EINVAL, struct file *file,
-+ unsigned int cmd, unsigned long arg)
-+#endif
-+#endif
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_DIR_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/dynop.c linux-4.1.10/fs/aufs/dynop.c
---- linux-4.1.10.orig/fs/aufs/dynop.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/dynop.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,369 @@
-+/*
-+ * Copyright (C) 2010-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * dynamically customizable operations for regular files
-+ */
-+
-+#include "aufs.h"
-+
-+#define DyPrSym(key) AuDbgSym(key->dk_op.dy_hop)
-+
-+/*
-+ * How large will these lists be?
-+ * Usually just a few elements, 20-30 at most for each, I guess.
-+ */
-+static struct au_splhead dynop[AuDyLast];
-+
-+static struct au_dykey *dy_gfind_get(struct au_splhead *spl, const void *h_op)
-+{
-+ struct au_dykey *key, *tmp;
-+ struct list_head *head;
-+
-+ key = NULL;
-+ head = &spl->head;
-+ rcu_read_lock();
-+ list_for_each_entry_rcu(tmp, head, dk_list)
-+ if (tmp->dk_op.dy_hop == h_op) {
-+ key = tmp;
-+ kref_get(&key->dk_kref);
-+ break;
-+ }
-+ rcu_read_unlock();
-+
-+ return key;
-+}
-+
-+static struct au_dykey *dy_bradd(struct au_branch *br, struct au_dykey *key)
-+{
-+ struct au_dykey **k, *found;
-+ const void *h_op = key->dk_op.dy_hop;
-+ int i;
-+
-+ found = NULL;
-+ k = br->br_dykey;
-+ for (i = 0; i < AuBrDynOp; i++)
-+ if (k[i]) {
-+ if (k[i]->dk_op.dy_hop == h_op) {
-+ found = k[i];
-+ break;
-+ }
-+ } else
-+ break;
-+ if (!found) {
-+ spin_lock(&br->br_dykey_lock);
-+ for (; i < AuBrDynOp; i++)
-+ if (k[i]) {
-+ if (k[i]->dk_op.dy_hop == h_op) {
-+ found = k[i];
-+ break;
-+ }
-+ } else {
-+ k[i] = key;
-+ break;
-+ }
-+ spin_unlock(&br->br_dykey_lock);
-+ BUG_ON(i == AuBrDynOp); /* expand the array */
-+ }
-+
-+ return found;
-+}
-+
-+/* kref_get() if @key is already added */
-+static struct au_dykey *dy_gadd(struct au_splhead *spl, struct au_dykey *key)
-+{
-+ struct au_dykey *tmp, *found;
-+ struct list_head *head;
-+ const void *h_op = key->dk_op.dy_hop;
-+
-+ found = NULL;
-+ head = &spl->head;
-+ spin_lock(&spl->spin);
-+ list_for_each_entry(tmp, head, dk_list)
-+ if (tmp->dk_op.dy_hop == h_op) {
-+ kref_get(&tmp->dk_kref);
-+ found = tmp;
-+ break;
-+ }
-+ if (!found)
-+ list_add_rcu(&key->dk_list, head);
-+ spin_unlock(&spl->spin);
-+
-+ if (!found)
-+ DyPrSym(key);
-+ return found;
-+}
-+
-+static void dy_free_rcu(struct rcu_head *rcu)
-+{
-+ struct au_dykey *key;
-+
-+ key = container_of(rcu, struct au_dykey, dk_rcu);
-+ DyPrSym(key);
-+ kfree(key);
-+}
-+
-+static void dy_free(struct kref *kref)
-+{
-+ struct au_dykey *key;
-+ struct au_splhead *spl;
-+
-+ key = container_of(kref, struct au_dykey, dk_kref);
-+ spl = dynop + key->dk_op.dy_type;
-+ au_spl_del_rcu(&key->dk_list, spl);
-+ call_rcu(&key->dk_rcu, dy_free_rcu);
-+}
-+
-+void au_dy_put(struct au_dykey *key)
-+{
-+ kref_put(&key->dk_kref, dy_free);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#define DyDbgSize(cnt, op) AuDebugOn(cnt != sizeof(op)/sizeof(void *))
-+
-+#ifdef CONFIG_AUFS_DEBUG
-+#define DyDbgDeclare(cnt) unsigned int cnt = 0
-+#define DyDbgInc(cnt) do { cnt++; } while (0)
-+#else
-+#define DyDbgDeclare(cnt) do {} while (0)
-+#define DyDbgInc(cnt) do {} while (0)
-+#endif
-+
-+#define DySet(func, dst, src, h_op, h_sb) do { \
-+ DyDbgInc(cnt); \
-+ if (h_op->func) { \
-+ if (src.func) \
-+ dst.func = src.func; \
-+ else \
-+ AuDbg("%s %s\n", au_sbtype(h_sb), #func); \
-+ } \
-+} while (0)
-+
-+#define DySetForce(func, dst, src) do { \
-+ AuDebugOn(!src.func); \
-+ DyDbgInc(cnt); \
-+ dst.func = src.func; \
-+} while (0)
-+
-+#define DySetAop(func) \
-+ DySet(func, dyaop->da_op, aufs_aop, h_aop, h_sb)
-+#define DySetAopForce(func) \
-+ DySetForce(func, dyaop->da_op, aufs_aop)
-+
-+static void dy_aop(struct au_dykey *key, const void *h_op,
-+ struct super_block *h_sb __maybe_unused)
-+{
-+ struct au_dyaop *dyaop = (void *)key;
-+ const struct address_space_operations *h_aop = h_op;
-+ DyDbgDeclare(cnt);
-+
-+ AuDbg("%s\n", au_sbtype(h_sb));
-+
-+ DySetAop(writepage);
-+ DySetAopForce(readpage); /* force */
-+ DySetAop(writepages);
-+ DySetAop(set_page_dirty);
-+ DySetAop(readpages);
-+ DySetAop(write_begin);
-+ DySetAop(write_end);
-+ DySetAop(bmap);
-+ DySetAop(invalidatepage);
-+ DySetAop(releasepage);
-+ DySetAop(freepage);
-+ /* this one will be changed according to an aufs mount option */
-+ DySetAop(direct_IO);
-+ DySetAop(migratepage);
-+ DySetAop(launder_page);
-+ DySetAop(is_partially_uptodate);
-+ DySetAop(is_dirty_writeback);
-+ DySetAop(error_remove_page);
-+ DySetAop(swap_activate);
-+ DySetAop(swap_deactivate);
-+
-+ DyDbgSize(cnt, *h_aop);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static void dy_bug(struct kref *kref)
-+{
-+ BUG();
-+}
-+
-+static struct au_dykey *dy_get(struct au_dynop *op, struct au_branch *br)
-+{
-+ struct au_dykey *key, *old;
-+ struct au_splhead *spl;
-+ struct op {
-+ unsigned int sz;
-+ void (*set)(struct au_dykey *key, const void *h_op,
-+ struct super_block *h_sb __maybe_unused);
-+ };
-+ static const struct op a[] = {
-+ [AuDy_AOP] = {
-+ .sz = sizeof(struct au_dyaop),
-+ .set = dy_aop
-+ }
-+ };
-+ const struct op *p;
-+
-+ spl = dynop + op->dy_type;
-+ key = dy_gfind_get(spl, op->dy_hop);
-+ if (key)
-+ goto out_add; /* success */
-+
-+ p = a + op->dy_type;
-+ key = kzalloc(p->sz, GFP_NOFS);
-+ if (unlikely(!key)) {
-+ key = ERR_PTR(-ENOMEM);
-+ goto out;
-+ }
-+
-+ key->dk_op.dy_hop = op->dy_hop;
-+ kref_init(&key->dk_kref);
-+ p->set(key, op->dy_hop, au_br_sb(br));
-+ old = dy_gadd(spl, key);
-+ if (old) {
-+ kfree(key);
-+ key = old;
-+ }
-+
-+out_add:
-+ old = dy_bradd(br, key);
-+ if (old)
-+ /* its ref-count should never be zero here */
-+ kref_put(&key->dk_kref, dy_bug);
-+out:
-+ return key;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+/*
-+ * Aufs prohibits O_DIRECT by defaut even if the branch supports it.
-+ * This behaviour is necessary to return an error from open(O_DIRECT) instead
-+ * of the succeeding I/O. The dio mount option enables O_DIRECT and makes
-+ * open(O_DIRECT) always succeed, but the succeeding I/O may return an error.
-+ * See the aufs manual in detail.
-+ */
-+static void dy_adx(struct au_dyaop *dyaop, int do_dx)
-+{
-+ if (!do_dx)
-+ dyaop->da_op.direct_IO = NULL;
-+ else
-+ dyaop->da_op.direct_IO = aufs_aop.direct_IO;
-+}
-+
-+static struct au_dyaop *dy_aget(struct au_branch *br,
-+ const struct address_space_operations *h_aop,
-+ int do_dx)
-+{
-+ struct au_dyaop *dyaop;
-+ struct au_dynop op;
-+
-+ op.dy_type = AuDy_AOP;
-+ op.dy_haop = h_aop;
-+ dyaop = (void *)dy_get(&op, br);
-+ if (IS_ERR(dyaop))
-+ goto out;
-+ dy_adx(dyaop, do_dx);
-+
-+out:
-+ return dyaop;
-+}
-+
-+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex,
-+ struct inode *h_inode)
-+{
-+ int err, do_dx;
-+ struct super_block *sb;
-+ struct au_branch *br;
-+ struct au_dyaop *dyaop;
-+
-+ AuDebugOn(!S_ISREG(h_inode->i_mode));
-+ IiMustWriteLock(inode);
-+
-+ sb = inode->i_sb;
-+ br = au_sbr(sb, bindex);
-+ do_dx = !!au_opt_test(au_mntflags(sb), DIO);
-+ dyaop = dy_aget(br, h_inode->i_mapping->a_ops, do_dx);
-+ err = PTR_ERR(dyaop);
-+ if (IS_ERR(dyaop))
-+ /* unnecessary to call dy_fput() */
-+ goto out;
-+
-+ err = 0;
-+ inode->i_mapping->a_ops = &dyaop->da_op;
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * Is it safe to replace a_ops during the inode/file is in operation?
-+ * Yes, I hope so.
-+ */
-+int au_dy_irefresh(struct inode *inode)
-+{
-+ int err;
-+ aufs_bindex_t bstart;
-+ struct inode *h_inode;
-+
-+ err = 0;
-+ if (S_ISREG(inode->i_mode)) {
-+ bstart = au_ibstart(inode);
-+ h_inode = au_h_iptr(inode, bstart);
-+ err = au_dy_iaop(inode, bstart, h_inode);
-+ }
-+ return err;
-+}
-+
-+void au_dy_arefresh(int do_dx)
-+{
-+ struct au_splhead *spl;
-+ struct list_head *head;
-+ struct au_dykey *key;
-+
-+ spl = dynop + AuDy_AOP;
-+ head = &spl->head;
-+ spin_lock(&spl->spin);
-+ list_for_each_entry(key, head, dk_list)
-+ dy_adx((void *)key, do_dx);
-+ spin_unlock(&spl->spin);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void __init au_dy_init(void)
-+{
-+ int i;
-+
-+ /* make sure that 'struct au_dykey *' can be any type */
-+ BUILD_BUG_ON(offsetof(struct au_dyaop, da_key));
-+
-+ for (i = 0; i < AuDyLast; i++)
-+ au_spl_init(dynop + i);
-+}
-+
-+void au_dy_fin(void)
-+{
-+ int i;
-+
-+ for (i = 0; i < AuDyLast; i++)
-+ WARN_ON(!list_empty(&dynop[i].head));
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/dynop.h linux-4.1.10/fs/aufs/dynop.h
---- linux-4.1.10.orig/fs/aufs/dynop.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/dynop.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,74 @@
-+/*
-+ * Copyright (C) 2010-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * dynamically customizable operations (for regular files only)
-+ */
-+
-+#ifndef __AUFS_DYNOP_H__
-+#define __AUFS_DYNOP_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/fs.h>
-+#include <linux/kref.h>
-+
-+enum {AuDy_AOP, AuDyLast};
-+
-+struct au_dynop {
-+ int dy_type;
-+ union {
-+ const void *dy_hop;
-+ const struct address_space_operations *dy_haop;
-+ };
-+};
-+
-+struct au_dykey {
-+ union {
-+ struct list_head dk_list;
-+ struct rcu_head dk_rcu;
-+ };
-+ struct au_dynop dk_op;
-+
-+ /*
-+ * during I am in the branch local array, kref is gotten. when the
-+ * branch is removed, kref is put.
-+ */
-+ struct kref dk_kref;
-+};
-+
-+/* stop unioning since their sizes are very different from each other */
-+struct au_dyaop {
-+ struct au_dykey da_key;
-+ struct address_space_operations da_op; /* not const */
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* dynop.c */
-+struct au_branch;
-+void au_dy_put(struct au_dykey *key);
-+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex,
-+ struct inode *h_inode);
-+int au_dy_irefresh(struct inode *inode);
-+void au_dy_arefresh(int do_dio);
-+
-+void __init au_dy_init(void);
-+void au_dy_fin(void);
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_DYNOP_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/export.c linux-4.1.10/fs/aufs/export.c
---- linux-4.1.10.orig/fs/aufs/export.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/export.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,832 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * export via nfs
-+ */
-+
-+#include <linux/exportfs.h>
-+#include <linux/fs_struct.h>
-+#include <linux/namei.h>
-+#include <linux/nsproxy.h>
-+#include <linux/random.h>
-+#include <linux/writeback.h>
-+#include "../fs/mount.h"
-+#include "aufs.h"
-+
-+union conv {
-+#ifdef CONFIG_AUFS_INO_T_64
-+ __u32 a[2];
-+#else
-+ __u32 a[1];
-+#endif
-+ ino_t ino;
-+};
-+
-+static ino_t decode_ino(__u32 *a)
-+{
-+ union conv u;
-+
-+ BUILD_BUG_ON(sizeof(u.ino) != sizeof(u.a));
-+ u.a[0] = a[0];
-+#ifdef CONFIG_AUFS_INO_T_64
-+ u.a[1] = a[1];
-+#endif
-+ return u.ino;
-+}
-+
-+static void encode_ino(__u32 *a, ino_t ino)
-+{
-+ union conv u;
-+
-+ u.ino = ino;
-+ a[0] = u.a[0];
-+#ifdef CONFIG_AUFS_INO_T_64
-+ a[1] = u.a[1];
-+#endif
-+}
-+
-+/* NFS file handle */
-+enum {
-+ Fh_br_id,
-+ Fh_sigen,
-+#ifdef CONFIG_AUFS_INO_T_64
-+ /* support 64bit inode number */
-+ Fh_ino1,
-+ Fh_ino2,
-+ Fh_dir_ino1,
-+ Fh_dir_ino2,
-+#else
-+ Fh_ino1,
-+ Fh_dir_ino1,
-+#endif
-+ Fh_igen,
-+ Fh_h_type,
-+ Fh_tail,
-+
-+ Fh_ino = Fh_ino1,
-+ Fh_dir_ino = Fh_dir_ino1
-+};
-+
-+static int au_test_anon(struct dentry *dentry)
-+{
-+ /* note: read d_flags without d_lock */
-+ return !!(dentry->d_flags & DCACHE_DISCONNECTED);
-+}
-+
-+int au_test_nfsd(void)
-+{
-+ int ret;
-+ struct task_struct *tsk = current;
-+ char comm[sizeof(tsk->comm)];
-+
-+ ret = 0;
-+ if (tsk->flags & PF_KTHREAD) {
-+ get_task_comm(comm, tsk);
-+ ret = !strcmp(comm, "nfsd");
-+ }
-+
-+ return ret;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+/* inode generation external table */
-+
-+void au_xigen_inc(struct inode *inode)
-+{
-+ loff_t pos;
-+ ssize_t sz;
-+ __u32 igen;
-+ struct super_block *sb;
-+ struct au_sbinfo *sbinfo;
-+
-+ sb = inode->i_sb;
-+ AuDebugOn(!au_opt_test(au_mntflags(sb), XINO));
-+
-+ sbinfo = au_sbi(sb);
-+ pos = inode->i_ino;
-+ pos *= sizeof(igen);
-+ igen = inode->i_generation + 1;
-+ sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xigen, &igen,
-+ sizeof(igen), &pos);
-+ if (sz == sizeof(igen))
-+ return; /* success */
-+
-+ if (unlikely(sz >= 0))
-+ AuIOErr("xigen error (%zd)\n", sz);
-+}
-+
-+int au_xigen_new(struct inode *inode)
-+{
-+ int err;
-+ loff_t pos;
-+ ssize_t sz;
-+ struct super_block *sb;
-+ struct au_sbinfo *sbinfo;
-+ struct file *file;
-+
-+ err = 0;
-+ /* todo: dirty, at mount time */
-+ if (inode->i_ino == AUFS_ROOT_INO)
-+ goto out;
-+ sb = inode->i_sb;
-+ SiMustAnyLock(sb);
-+ if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
-+ goto out;
-+
-+ err = -EFBIG;
-+ pos = inode->i_ino;
-+ if (unlikely(au_loff_max / sizeof(inode->i_generation) - 1 < pos)) {
-+ AuIOErr1("too large i%lld\n", pos);
-+ goto out;
-+ }
-+ pos *= sizeof(inode->i_generation);
-+
-+ err = 0;
-+ sbinfo = au_sbi(sb);
-+ file = sbinfo->si_xigen;
-+ BUG_ON(!file);
-+
-+ if (vfsub_f_size_read(file)
-+ < pos + sizeof(inode->i_generation)) {
-+ inode->i_generation = atomic_inc_return(&sbinfo->si_xigen_next);
-+ sz = xino_fwrite(sbinfo->si_xwrite, file, &inode->i_generation,
-+ sizeof(inode->i_generation), &pos);
-+ } else
-+ sz = xino_fread(sbinfo->si_xread, file, &inode->i_generation,
-+ sizeof(inode->i_generation), &pos);
-+ if (sz == sizeof(inode->i_generation))
-+ goto out; /* success */
-+
-+ err = sz;
-+ if (unlikely(sz >= 0)) {
-+ err = -EIO;
-+ AuIOErr("xigen error (%zd)\n", sz);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+int au_xigen_set(struct super_block *sb, struct file *base)
-+{
-+ int err;
-+ struct au_sbinfo *sbinfo;
-+ struct file *file;
-+
-+ SiMustWriteLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ file = au_xino_create2(base, sbinfo->si_xigen);
-+ err = PTR_ERR(file);
-+ if (IS_ERR(file))
-+ goto out;
-+ err = 0;
-+ if (sbinfo->si_xigen)
-+ fput(sbinfo->si_xigen);
-+ sbinfo->si_xigen = file;
-+
-+out:
-+ return err;
-+}
-+
-+void au_xigen_clr(struct super_block *sb)
-+{
-+ struct au_sbinfo *sbinfo;
-+
-+ SiMustWriteLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ if (sbinfo->si_xigen) {
-+ fput(sbinfo->si_xigen);
-+ sbinfo->si_xigen = NULL;
-+ }
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static struct dentry *decode_by_ino(struct super_block *sb, ino_t ino,
-+ ino_t dir_ino)
-+{
-+ struct dentry *dentry, *d;
-+ struct inode *inode;
-+ unsigned int sigen;
-+
-+ dentry = NULL;
-+ inode = ilookup(sb, ino);
-+ if (!inode)
-+ goto out;
-+
-+ dentry = ERR_PTR(-ESTALE);
-+ sigen = au_sigen(sb);
-+ if (unlikely(is_bad_inode(inode)
-+ || IS_DEADDIR(inode)
-+ || sigen != au_iigen(inode, NULL)))
-+ goto out_iput;
-+
-+ dentry = NULL;
-+ if (!dir_ino || S_ISDIR(inode->i_mode))
-+ dentry = d_find_alias(inode);
-+ else {
-+ spin_lock(&inode->i_lock);
-+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) {
-+ spin_lock(&d->d_lock);
-+ if (!au_test_anon(d)
-+ && d_inode(d->d_parent)->i_ino == dir_ino) {
-+ dentry = dget_dlock(d);
-+ spin_unlock(&d->d_lock);
-+ break;
-+ }
-+ spin_unlock(&d->d_lock);
-+ }
-+ spin_unlock(&inode->i_lock);
-+ }
-+ if (unlikely(dentry && au_digen_test(dentry, sigen))) {
-+ /* need to refresh */
-+ dput(dentry);
-+ dentry = NULL;
-+ }
-+
-+out_iput:
-+ iput(inode);
-+out:
-+ AuTraceErrPtr(dentry);
-+ return dentry;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* todo: dirty? */
-+/* if exportfs_decode_fh() passed vfsmount*, we could be happy */
-+
-+struct au_compare_mnt_args {
-+ /* input */
-+ struct super_block *sb;
-+
-+ /* output */
-+ struct vfsmount *mnt;
-+};
-+
-+static int au_compare_mnt(struct vfsmount *mnt, void *arg)
-+{
-+ struct au_compare_mnt_args *a = arg;
-+
-+ if (mnt->mnt_sb != a->sb)
-+ return 0;
-+ a->mnt = mntget(mnt);
-+ return 1;
-+}
-+
-+static struct vfsmount *au_mnt_get(struct super_block *sb)
-+{
-+ int err;
-+ struct path root;
-+ struct au_compare_mnt_args args = {
-+ .sb = sb
-+ };
-+
-+ get_fs_root(current->fs, &root);
-+ rcu_read_lock();
-+ err = iterate_mounts(au_compare_mnt, &args, root.mnt);
-+ rcu_read_unlock();
-+ path_put(&root);
-+ AuDebugOn(!err);
-+ AuDebugOn(!args.mnt);
-+ return args.mnt;
-+}
-+
-+struct au_nfsd_si_lock {
-+ unsigned int sigen;
-+ aufs_bindex_t bindex, br_id;
-+ unsigned char force_lock;
-+};
-+
-+static int si_nfsd_read_lock(struct super_block *sb,
-+ struct au_nfsd_si_lock *nsi_lock)
-+{
-+ int err;
-+ aufs_bindex_t bindex;
-+
-+ si_read_lock(sb, AuLock_FLUSH);
-+
-+ /* branch id may be wrapped around */
-+ err = 0;
-+ bindex = au_br_index(sb, nsi_lock->br_id);
-+ if (bindex >= 0 && nsi_lock->sigen + AUFS_BRANCH_MAX > au_sigen(sb))
-+ goto out; /* success */
-+
-+ err = -ESTALE;
-+ bindex = -1;
-+ if (!nsi_lock->force_lock)
-+ si_read_unlock(sb);
-+
-+out:
-+ nsi_lock->bindex = bindex;
-+ return err;
-+}
-+
-+struct find_name_by_ino {
-+ struct dir_context ctx;
-+ int called, found;
-+ ino_t ino;
-+ char *name;
-+ int namelen;
-+};
-+
-+static int
-+find_name_by_ino(struct dir_context *ctx, const char *name, int namelen,
-+ loff_t offset, u64 ino, unsigned int d_type)
-+{
-+ struct find_name_by_ino *a = container_of(ctx, struct find_name_by_ino,
-+ ctx);
-+
-+ a->called++;
-+ if (a->ino != ino)
-+ return 0;
-+
-+ memcpy(a->name, name, namelen);
-+ a->namelen = namelen;
-+ a->found = 1;
-+ return 1;
-+}
-+
-+static struct dentry *au_lkup_by_ino(struct path *path, ino_t ino,
-+ struct au_nfsd_si_lock *nsi_lock)
-+{
-+ struct dentry *dentry, *parent;
-+ struct file *file;
-+ struct inode *dir;
-+ struct find_name_by_ino arg = {
-+ .ctx = {
-+ .actor = find_name_by_ino
-+ }
-+ };
-+ int err;
-+
-+ parent = path->dentry;
-+ if (nsi_lock)
-+ si_read_unlock(parent->d_sb);
-+ file = vfsub_dentry_open(path, au_dir_roflags);
-+ dentry = (void *)file;
-+ if (IS_ERR(file))
-+ goto out;
-+
-+ dentry = ERR_PTR(-ENOMEM);
-+ arg.name = (void *)__get_free_page(GFP_NOFS);
-+ if (unlikely(!arg.name))
-+ goto out_file;
-+ arg.ino = ino;
-+ arg.found = 0;
-+ do {
-+ arg.called = 0;
-+ /* smp_mb(); */
-+ err = vfsub_iterate_dir(file, &arg.ctx);
-+ } while (!err && !arg.found && arg.called);
-+ dentry = ERR_PTR(err);
-+ if (unlikely(err))
-+ goto out_name;
-+ /* instead of ENOENT */
-+ dentry = ERR_PTR(-ESTALE);
-+ if (!arg.found)
-+ goto out_name;
-+
-+ /* do not call vfsub_lkup_one() */
-+ dir = d_inode(parent);
-+ mutex_lock(&dir->i_mutex);
-+ dentry = vfsub_lookup_one_len(arg.name, parent, arg.namelen);
-+ mutex_unlock(&dir->i_mutex);
-+ AuTraceErrPtr(dentry);
-+ if (IS_ERR(dentry))
-+ goto out_name;
-+ AuDebugOn(au_test_anon(dentry));
-+ if (unlikely(d_really_is_negative(dentry))) {
-+ dput(dentry);
-+ dentry = ERR_PTR(-ENOENT);
-+ }
-+
-+out_name:
-+ free_page((unsigned long)arg.name);
-+out_file:
-+ fput(file);
-+out:
-+ if (unlikely(nsi_lock
-+ && si_nfsd_read_lock(parent->d_sb, nsi_lock) < 0))
-+ if (!IS_ERR(dentry)) {
-+ dput(dentry);
-+ dentry = ERR_PTR(-ESTALE);
-+ }
-+ AuTraceErrPtr(dentry);
-+ return dentry;
-+}
-+
-+static struct dentry *decode_by_dir_ino(struct super_block *sb, ino_t ino,
-+ ino_t dir_ino,
-+ struct au_nfsd_si_lock *nsi_lock)
-+{
-+ struct dentry *dentry;
-+ struct path path;
-+
-+ if (dir_ino != AUFS_ROOT_INO) {
-+ path.dentry = decode_by_ino(sb, dir_ino, 0);
-+ dentry = path.dentry;
-+ if (!path.dentry || IS_ERR(path.dentry))
-+ goto out;
-+ AuDebugOn(au_test_anon(path.dentry));
-+ } else
-+ path.dentry = dget(sb->s_root);
-+
-+ path.mnt = au_mnt_get(sb);
-+ dentry = au_lkup_by_ino(&path, ino, nsi_lock);
-+ path_put(&path);
-+
-+out:
-+ AuTraceErrPtr(dentry);
-+ return dentry;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int h_acceptable(void *expv, struct dentry *dentry)
-+{
-+ return 1;
-+}
-+
-+static char *au_build_path(struct dentry *h_parent, struct path *h_rootpath,
-+ char *buf, int len, struct super_block *sb)
-+{
-+ char *p;
-+ int n;
-+ struct path path;
-+
-+ p = d_path(h_rootpath, buf, len);
-+ if (IS_ERR(p))
-+ goto out;
-+ n = strlen(p);
-+
-+ path.mnt = h_rootpath->mnt;
-+ path.dentry = h_parent;
-+ p = d_path(&path, buf, len);
-+ if (IS_ERR(p))
-+ goto out;
-+ if (n != 1)
-+ p += n;
-+
-+ path.mnt = au_mnt_get(sb);
-+ path.dentry = sb->s_root;
-+ p = d_path(&path, buf, len - strlen(p));
-+ mntput(path.mnt);
-+ if (IS_ERR(p))
-+ goto out;
-+ if (n != 1)
-+ p[strlen(p)] = '/';
-+
-+out:
-+ AuTraceErrPtr(p);
-+ return p;
-+}
-+
-+static
-+struct dentry *decode_by_path(struct super_block *sb, ino_t ino, __u32 *fh,
-+ int fh_len, struct au_nfsd_si_lock *nsi_lock)
-+{
-+ struct dentry *dentry, *h_parent, *root;
-+ struct super_block *h_sb;
-+ char *pathname, *p;
-+ struct vfsmount *h_mnt;
-+ struct au_branch *br;
-+ int err;
-+ struct path path;
-+
-+ br = au_sbr(sb, nsi_lock->bindex);
-+ h_mnt = au_br_mnt(br);
-+ h_sb = h_mnt->mnt_sb;
-+ /* todo: call lower fh_to_dentry()? fh_to_parent()? */
-+ h_parent = exportfs_decode_fh(h_mnt, (void *)(fh + Fh_tail),
-+ fh_len - Fh_tail, fh[Fh_h_type],
-+ h_acceptable, /*context*/NULL);
-+ dentry = h_parent;
-+ if (unlikely(!h_parent || IS_ERR(h_parent))) {
-+ AuWarn1("%s decode_fh failed, %ld\n",
-+ au_sbtype(h_sb), PTR_ERR(h_parent));
-+ goto out;
-+ }
-+ dentry = NULL;
-+ if (unlikely(au_test_anon(h_parent))) {
-+ AuWarn1("%s decode_fh returned a disconnected dentry\n",
-+ au_sbtype(h_sb));
-+ goto out_h_parent;
-+ }
-+
-+ dentry = ERR_PTR(-ENOMEM);
-+ pathname = (void *)__get_free_page(GFP_NOFS);
-+ if (unlikely(!pathname))
-+ goto out_h_parent;
-+
-+ root = sb->s_root;
-+ path.mnt = h_mnt;
-+ di_read_lock_parent(root, !AuLock_IR);
-+ path.dentry = au_h_dptr(root, nsi_lock->bindex);
-+ di_read_unlock(root, !AuLock_IR);
-+ p = au_build_path(h_parent, &path, pathname, PAGE_SIZE, sb);
-+ dentry = (void *)p;
-+ if (IS_ERR(p))
-+ goto out_pathname;
-+
-+ si_read_unlock(sb);
-+ err = vfsub_kern_path(p, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
-+ dentry = ERR_PTR(err);
-+ if (unlikely(err))
-+ goto out_relock;
-+
-+ dentry = ERR_PTR(-ENOENT);
-+ AuDebugOn(au_test_anon(path.dentry));
-+ if (unlikely(d_really_is_negative(path.dentry)))
-+ goto out_path;
-+
-+ if (ino != d_inode(path.dentry)->i_ino)
-+ dentry = au_lkup_by_ino(&path, ino, /*nsi_lock*/NULL);
-+ else
-+ dentry = dget(path.dentry);
-+
-+out_path:
-+ path_put(&path);
-+out_relock:
-+ if (unlikely(si_nfsd_read_lock(sb, nsi_lock) < 0))
-+ if (!IS_ERR(dentry)) {
-+ dput(dentry);
-+ dentry = ERR_PTR(-ESTALE);
-+ }
-+out_pathname:
-+ free_page((unsigned long)pathname);
-+out_h_parent:
-+ dput(h_parent);
-+out:
-+ AuTraceErrPtr(dentry);
-+ return dentry;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static struct dentry *
-+aufs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len,
-+ int fh_type)
-+{
-+ struct dentry *dentry;
-+ __u32 *fh = fid->raw;
-+ struct au_branch *br;
-+ ino_t ino, dir_ino;
-+ struct au_nfsd_si_lock nsi_lock = {
-+ .force_lock = 0
-+ };
-+
-+ dentry = ERR_PTR(-ESTALE);
-+ /* it should never happen, but the file handle is unreliable */
-+ if (unlikely(fh_len < Fh_tail))
-+ goto out;
-+ nsi_lock.sigen = fh[Fh_sigen];
-+ nsi_lock.br_id = fh[Fh_br_id];
-+
-+ /* branch id may be wrapped around */
-+ br = NULL;
-+ if (unlikely(si_nfsd_read_lock(sb, &nsi_lock)))
-+ goto out;
-+ nsi_lock.force_lock = 1;
-+
-+ /* is this inode still cached? */
-+ ino = decode_ino(fh + Fh_ino);
-+ /* it should never happen */
-+ if (unlikely(ino == AUFS_ROOT_INO))
-+ goto out;
-+
-+ dir_ino = decode_ino(fh + Fh_dir_ino);
-+ dentry = decode_by_ino(sb, ino, dir_ino);
-+ if (IS_ERR(dentry))
-+ goto out_unlock;
-+ if (dentry)
-+ goto accept;
-+
-+ /* is the parent dir cached? */
-+ br = au_sbr(sb, nsi_lock.bindex);
-+ atomic_inc(&br->br_count);
-+ dentry = decode_by_dir_ino(sb, ino, dir_ino, &nsi_lock);
-+ if (IS_ERR(dentry))
-+ goto out_unlock;
-+ if (dentry)
-+ goto accept;
-+
-+ /* lookup path */
-+ dentry = decode_by_path(sb, ino, fh, fh_len, &nsi_lock);
-+ if (IS_ERR(dentry))
-+ goto out_unlock;
-+ if (unlikely(!dentry))
-+ /* todo?: make it ESTALE */
-+ goto out_unlock;
-+
-+accept:
-+ if (!au_digen_test(dentry, au_sigen(sb))
-+ && d_inode(dentry)->i_generation == fh[Fh_igen])
-+ goto out_unlock; /* success */
-+
-+ dput(dentry);
-+ dentry = ERR_PTR(-ESTALE);
-+out_unlock:
-+ if (br)
-+ atomic_dec(&br->br_count);
-+ si_read_unlock(sb);
-+out:
-+ AuTraceErrPtr(dentry);
-+ return dentry;
-+}
-+
-+#if 0 /* reserved for future use */
-+/* support subtreecheck option */
-+static struct dentry *aufs_fh_to_parent(struct super_block *sb, struct fid *fid,
-+ int fh_len, int fh_type)
-+{
-+ struct dentry *parent;
-+ __u32 *fh = fid->raw;
-+ ino_t dir_ino;
-+
-+ dir_ino = decode_ino(fh + Fh_dir_ino);
-+ parent = decode_by_ino(sb, dir_ino, 0);
-+ if (IS_ERR(parent))
-+ goto out;
-+ if (!parent)
-+ parent = decode_by_path(sb, au_br_index(sb, fh[Fh_br_id]),
-+ dir_ino, fh, fh_len);
-+
-+out:
-+ AuTraceErrPtr(parent);
-+ return parent;
-+}
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int aufs_encode_fh(struct inode *inode, __u32 *fh, int *max_len,
-+ struct inode *dir)
-+{
-+ int err;
-+ aufs_bindex_t bindex;
-+ struct super_block *sb, *h_sb;
-+ struct dentry *dentry, *parent, *h_parent;
-+ struct inode *h_dir;
-+ struct au_branch *br;
-+
-+ err = -ENOSPC;
-+ if (unlikely(*max_len <= Fh_tail)) {
-+ AuWarn1("NFSv2 client (max_len %d)?\n", *max_len);
-+ goto out;
-+ }
-+
-+ err = FILEID_ROOT;
-+ if (inode->i_ino == AUFS_ROOT_INO) {
-+ AuDebugOn(inode->i_ino != AUFS_ROOT_INO);
-+ goto out;
-+ }
-+
-+ h_parent = NULL;
-+ sb = inode->i_sb;
-+ err = si_read_lock(sb, AuLock_FLUSH);
-+ if (unlikely(err))
-+ goto out;
-+
-+#ifdef CONFIG_AUFS_DEBUG
-+ if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
-+ AuWarn1("NFS-exporting requires xino\n");
-+#endif
-+ err = -EIO;
-+ parent = NULL;
-+ ii_read_lock_child(inode);
-+ bindex = au_ibstart(inode);
-+ if (!dir) {
-+ dentry = d_find_any_alias(inode);
-+ if (unlikely(!dentry))
-+ goto out_unlock;
-+ AuDebugOn(au_test_anon(dentry));
-+ parent = dget_parent(dentry);
-+ dput(dentry);
-+ if (unlikely(!parent))
-+ goto out_unlock;
-+ if (d_really_is_positive(parent))
-+ dir = d_inode(parent);
-+ }
-+
-+ ii_read_lock_parent(dir);
-+ h_dir = au_h_iptr(dir, bindex);
-+ ii_read_unlock(dir);
-+ if (unlikely(!h_dir))
-+ goto out_parent;
-+ h_parent = d_find_any_alias(h_dir);
-+ if (unlikely(!h_parent))
-+ goto out_hparent;
-+
-+ err = -EPERM;
-+ br = au_sbr(sb, bindex);
-+ h_sb = au_br_sb(br);
-+ if (unlikely(!h_sb->s_export_op)) {
-+ AuErr1("%s branch is not exportable\n", au_sbtype(h_sb));
-+ goto out_hparent;
-+ }
-+
-+ fh[Fh_br_id] = br->br_id;
-+ fh[Fh_sigen] = au_sigen(sb);
-+ encode_ino(fh + Fh_ino, inode->i_ino);
-+ encode_ino(fh + Fh_dir_ino, dir->i_ino);
-+ fh[Fh_igen] = inode->i_generation;
-+
-+ *max_len -= Fh_tail;
-+ fh[Fh_h_type] = exportfs_encode_fh(h_parent, (void *)(fh + Fh_tail),
-+ max_len,
-+ /*connectable or subtreecheck*/0);
-+ err = fh[Fh_h_type];
-+ *max_len += Fh_tail;
-+ /* todo: macros? */
-+ if (err != FILEID_INVALID)
-+ err = 99;
-+ else
-+ AuWarn1("%s encode_fh failed\n", au_sbtype(h_sb));
-+
-+out_hparent:
-+ dput(h_parent);
-+out_parent:
-+ dput(parent);
-+out_unlock:
-+ ii_read_unlock(inode);
-+ si_read_unlock(sb);
-+out:
-+ if (unlikely(err < 0))
-+ err = FILEID_INVALID;
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int aufs_commit_metadata(struct inode *inode)
-+{
-+ int err;
-+ aufs_bindex_t bindex;
-+ struct super_block *sb;
-+ struct inode *h_inode;
-+ int (*f)(struct inode *inode);
-+
-+ sb = inode->i_sb;
-+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
-+ ii_write_lock_child(inode);
-+ bindex = au_ibstart(inode);
-+ AuDebugOn(bindex < 0);
-+ h_inode = au_h_iptr(inode, bindex);
-+
-+ f = h_inode->i_sb->s_export_op->commit_metadata;
-+ if (f)
-+ err = f(h_inode);
-+ else {
-+ struct writeback_control wbc = {
-+ .sync_mode = WB_SYNC_ALL,
-+ .nr_to_write = 0 /* metadata only */
-+ };
-+
-+ err = sync_inode(h_inode, &wbc);
-+ }
-+
-+ au_cpup_attr_timesizes(inode);
-+ ii_write_unlock(inode);
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static struct export_operations aufs_export_op = {
-+ .fh_to_dentry = aufs_fh_to_dentry,
-+ /* .fh_to_parent = aufs_fh_to_parent, */
-+ .encode_fh = aufs_encode_fh,
-+ .commit_metadata = aufs_commit_metadata
-+};
-+
-+void au_export_init(struct super_block *sb)
-+{
-+ struct au_sbinfo *sbinfo;
-+ __u32 u;
-+
-+ sb->s_export_op = &aufs_export_op;
-+ sbinfo = au_sbi(sb);
-+ sbinfo->si_xigen = NULL;
-+ get_random_bytes(&u, sizeof(u));
-+ BUILD_BUG_ON(sizeof(u) != sizeof(int));
-+ atomic_set(&sbinfo->si_xigen_next, u);
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/fhsm.c linux-4.1.10/fs/aufs/fhsm.c
---- linux-4.1.10.orig/fs/aufs/fhsm.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/fhsm.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,426 @@
-+/*
-+ * Copyright (C) 2011-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+/*
-+ * File-based Hierarchy Storage Management
-+ */
-+
-+#include <linux/anon_inodes.h>
-+#include <linux/poll.h>
-+#include <linux/seq_file.h>
-+#include <linux/statfs.h>
-+#include "aufs.h"
-+
-+static aufs_bindex_t au_fhsm_bottom(struct super_block *sb)
-+{
-+ struct au_sbinfo *sbinfo;
-+ struct au_fhsm *fhsm;
-+
-+ SiMustAnyLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ fhsm = &sbinfo->si_fhsm;
-+ AuDebugOn(!fhsm);
-+ return fhsm->fhsm_bottom;
-+}
-+
-+void au_fhsm_set_bottom(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ struct au_sbinfo *sbinfo;
-+ struct au_fhsm *fhsm;
-+
-+ SiMustWriteLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ fhsm = &sbinfo->si_fhsm;
-+ AuDebugOn(!fhsm);
-+ fhsm->fhsm_bottom = bindex;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_fhsm_test_jiffy(struct au_sbinfo *sbinfo, struct au_branch *br)
-+{
-+ struct au_br_fhsm *bf;
-+
-+ bf = br->br_fhsm;
-+ MtxMustLock(&bf->bf_lock);
-+
-+ return !bf->bf_readable
-+ || time_after(jiffies,
-+ bf->bf_jiffy + sbinfo->si_fhsm.fhsm_expire);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static void au_fhsm_notify(struct super_block *sb, int val)
-+{
-+ struct au_sbinfo *sbinfo;
-+ struct au_fhsm *fhsm;
-+
-+ SiMustAnyLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ fhsm = &sbinfo->si_fhsm;
-+ if (au_fhsm_pid(fhsm)
-+ && atomic_read(&fhsm->fhsm_readable) != -1) {
-+ atomic_set(&fhsm->fhsm_readable, val);
-+ if (val)
-+ wake_up(&fhsm->fhsm_wqh);
-+ }
-+}
-+
-+static int au_fhsm_stfs(struct super_block *sb, aufs_bindex_t bindex,
-+ struct aufs_stfs *rstfs, int do_lock, int do_notify)
-+{
-+ int err;
-+ struct au_branch *br;
-+ struct au_br_fhsm *bf;
-+
-+ br = au_sbr(sb, bindex);
-+ AuDebugOn(au_br_rdonly(br));
-+ bf = br->br_fhsm;
-+ AuDebugOn(!bf);
-+
-+ if (do_lock)
-+ mutex_lock(&bf->bf_lock);
-+ else
-+ MtxMustLock(&bf->bf_lock);
-+
-+ /* sb->s_root for NFS is unreliable */
-+ err = au_br_stfs(br, &bf->bf_stfs);
-+ if (unlikely(err)) {
-+ AuErr1("FHSM failed (%d), b%d, ignored.\n", bindex, err);
-+ goto out;
-+ }
-+
-+ bf->bf_jiffy = jiffies;
-+ bf->bf_readable = 1;
-+ if (do_notify)
-+ au_fhsm_notify(sb, /*val*/1);
-+ if (rstfs)
-+ *rstfs = bf->bf_stfs;
-+
-+out:
-+ if (do_lock)
-+ mutex_unlock(&bf->bf_lock);
-+ au_fhsm_notify(sb, /*val*/1);
-+
-+ return err;
-+}
-+
-+void au_fhsm_wrote(struct super_block *sb, aufs_bindex_t bindex, int force)
-+{
-+ int err;
-+ struct au_sbinfo *sbinfo;
-+ struct au_fhsm *fhsm;
-+ struct au_branch *br;
-+ struct au_br_fhsm *bf;
-+
-+ AuDbg("b%d, force %d\n", bindex, force);
-+ SiMustAnyLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ fhsm = &sbinfo->si_fhsm;
-+ if (!au_ftest_si(sbinfo, FHSM)
-+ || fhsm->fhsm_bottom == bindex)
-+ return;
-+
-+ br = au_sbr(sb, bindex);
-+ bf = br->br_fhsm;
-+ AuDebugOn(!bf);
-+ mutex_lock(&bf->bf_lock);
-+ if (force
-+ || au_fhsm_pid(fhsm)
-+ || au_fhsm_test_jiffy(sbinfo, br))
-+ err = au_fhsm_stfs(sb, bindex, /*rstfs*/NULL, /*do_lock*/0,
-+ /*do_notify*/1);
-+ mutex_unlock(&bf->bf_lock);
-+}
-+
-+void au_fhsm_wrote_all(struct super_block *sb, int force)
-+{
-+ aufs_bindex_t bindex, bend;
-+ struct au_branch *br;
-+
-+ /* exclude the bottom */
-+ bend = au_fhsm_bottom(sb);
-+ for (bindex = 0; bindex < bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ if (au_br_fhsm(br->br_perm))
-+ au_fhsm_wrote(sb, bindex, force);
-+ }
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static unsigned int au_fhsm_poll(struct file *file,
-+ struct poll_table_struct *wait)
-+{
-+ unsigned int mask;
-+ struct au_sbinfo *sbinfo;
-+ struct au_fhsm *fhsm;
-+
-+ mask = 0;
-+ sbinfo = file->private_data;
-+ fhsm = &sbinfo->si_fhsm;
-+ poll_wait(file, &fhsm->fhsm_wqh, wait);
-+ if (atomic_read(&fhsm->fhsm_readable))
-+ mask = POLLIN /* | POLLRDNORM */;
-+
-+ AuTraceErr((int)mask);
-+ return mask;
-+}
-+
-+static int au_fhsm_do_read_one(struct aufs_stbr __user *stbr,
-+ struct aufs_stfs *stfs, __s16 brid)
-+{
-+ int err;
-+
-+ err = copy_to_user(&stbr->stfs, stfs, sizeof(*stfs));
-+ if (!err)
-+ err = __put_user(brid, &stbr->brid);
-+ if (unlikely(err))
-+ err = -EFAULT;
-+
-+ return err;
-+}
-+
-+static ssize_t au_fhsm_do_read(struct super_block *sb,
-+ struct aufs_stbr __user *stbr, size_t count)
-+{
-+ ssize_t err;
-+ int nstbr;
-+ aufs_bindex_t bindex, bend;
-+ struct au_branch *br;
-+ struct au_br_fhsm *bf;
-+
-+ /* except the bottom branch */
-+ err = 0;
-+ nstbr = 0;
-+ bend = au_fhsm_bottom(sb);
-+ for (bindex = 0; !err && bindex < bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ if (!au_br_fhsm(br->br_perm))
-+ continue;
-+
-+ bf = br->br_fhsm;
-+ mutex_lock(&bf->bf_lock);
-+ if (bf->bf_readable) {
-+ err = -EFAULT;
-+ if (count >= sizeof(*stbr))
-+ err = au_fhsm_do_read_one(stbr++, &bf->bf_stfs,
-+ br->br_id);
-+ if (!err) {
-+ bf->bf_readable = 0;
-+ count -= sizeof(*stbr);
-+ nstbr++;
-+ }
-+ }
-+ mutex_unlock(&bf->bf_lock);
-+ }
-+ if (!err)
-+ err = sizeof(*stbr) * nstbr;
-+
-+ return err;
-+}
-+
-+static ssize_t au_fhsm_read(struct file *file, char __user *buf, size_t count,
-+ loff_t *pos)
-+{
-+ ssize_t err;
-+ int readable;
-+ aufs_bindex_t nfhsm, bindex, bend;
-+ struct au_sbinfo *sbinfo;
-+ struct au_fhsm *fhsm;
-+ struct au_branch *br;
-+ struct super_block *sb;
-+
-+ err = 0;
-+ sbinfo = file->private_data;
-+ fhsm = &sbinfo->si_fhsm;
-+need_data:
-+ spin_lock_irq(&fhsm->fhsm_wqh.lock);
-+ if (!atomic_read(&fhsm->fhsm_readable)) {
-+ if (vfsub_file_flags(file) & O_NONBLOCK)
-+ err = -EAGAIN;
-+ else
-+ err = wait_event_interruptible_locked_irq
-+ (fhsm->fhsm_wqh,
-+ atomic_read(&fhsm->fhsm_readable));
-+ }
-+ spin_unlock_irq(&fhsm->fhsm_wqh.lock);
-+ if (unlikely(err))
-+ goto out;
-+
-+ /* sb may already be dead */
-+ au_rw_read_lock(&sbinfo->si_rwsem);
-+ readable = atomic_read(&fhsm->fhsm_readable);
-+ if (readable > 0) {
-+ sb = sbinfo->si_sb;
-+ AuDebugOn(!sb);
-+ /* exclude the bottom branch */
-+ nfhsm = 0;
-+ bend = au_fhsm_bottom(sb);
-+ for (bindex = 0; bindex < bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ if (au_br_fhsm(br->br_perm))
-+ nfhsm++;
-+ }
-+ err = -EMSGSIZE;
-+ if (nfhsm * sizeof(struct aufs_stbr) <= count) {
-+ atomic_set(&fhsm->fhsm_readable, 0);
-+ err = au_fhsm_do_read(sbinfo->si_sb, (void __user *)buf,
-+ count);
-+ }
-+ }
-+ au_rw_read_unlock(&sbinfo->si_rwsem);
-+ if (!readable)
-+ goto need_data;
-+
-+out:
-+ return err;
-+}
-+
-+static int au_fhsm_release(struct inode *inode, struct file *file)
-+{
-+ struct au_sbinfo *sbinfo;
-+ struct au_fhsm *fhsm;
-+
-+ /* sb may already be dead */
-+ sbinfo = file->private_data;
-+ fhsm = &sbinfo->si_fhsm;
-+ spin_lock(&fhsm->fhsm_spin);
-+ fhsm->fhsm_pid = 0;
-+ spin_unlock(&fhsm->fhsm_spin);
-+ kobject_put(&sbinfo->si_kobj);
-+
-+ return 0;
-+}
-+
-+static const struct file_operations au_fhsm_fops = {
-+ .owner = THIS_MODULE,
-+ .llseek = noop_llseek,
-+ .read = au_fhsm_read,
-+ .poll = au_fhsm_poll,
-+ .release = au_fhsm_release
-+};
-+
-+int au_fhsm_fd(struct super_block *sb, int oflags)
-+{
-+ int err, fd;
-+ struct au_sbinfo *sbinfo;
-+ struct au_fhsm *fhsm;
-+
-+ err = -EPERM;
-+ if (unlikely(!capable(CAP_SYS_ADMIN)))
-+ goto out;
-+
-+ err = -EINVAL;
-+ if (unlikely(oflags & ~(O_CLOEXEC | O_NONBLOCK)))
-+ goto out;
-+
-+ err = 0;
-+ sbinfo = au_sbi(sb);
-+ fhsm = &sbinfo->si_fhsm;
-+ spin_lock(&fhsm->fhsm_spin);
-+ if (!fhsm->fhsm_pid)
-+ fhsm->fhsm_pid = current->pid;
-+ else
-+ err = -EBUSY;
-+ spin_unlock(&fhsm->fhsm_spin);
-+ if (unlikely(err))
-+ goto out;
-+
-+ oflags |= O_RDONLY;
-+ /* oflags |= FMODE_NONOTIFY; */
-+ fd = anon_inode_getfd("[aufs_fhsm]", &au_fhsm_fops, sbinfo, oflags);
-+ err = fd;
-+ if (unlikely(fd < 0))
-+ goto out_pid;
-+
-+ /* succeed reglardless 'fhsm' status */
-+ kobject_get(&sbinfo->si_kobj);
-+ si_noflush_read_lock(sb);
-+ if (au_ftest_si(sbinfo, FHSM))
-+ au_fhsm_wrote_all(sb, /*force*/0);
-+ si_read_unlock(sb);
-+ goto out; /* success */
-+
-+out_pid:
-+ spin_lock(&fhsm->fhsm_spin);
-+ fhsm->fhsm_pid = 0;
-+ spin_unlock(&fhsm->fhsm_spin);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int au_fhsm_br_alloc(struct au_branch *br)
-+{
-+ int err;
-+
-+ err = 0;
-+ br->br_fhsm = kmalloc(sizeof(*br->br_fhsm), GFP_NOFS);
-+ if (br->br_fhsm)
-+ au_br_fhsm_init(br->br_fhsm);
-+ else
-+ err = -ENOMEM;
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void au_fhsm_fin(struct super_block *sb)
-+{
-+ au_fhsm_notify(sb, /*val*/-1);
-+}
-+
-+void au_fhsm_init(struct au_sbinfo *sbinfo)
-+{
-+ struct au_fhsm *fhsm;
-+
-+ fhsm = &sbinfo->si_fhsm;
-+ spin_lock_init(&fhsm->fhsm_spin);
-+ init_waitqueue_head(&fhsm->fhsm_wqh);
-+ atomic_set(&fhsm->fhsm_readable, 0);
-+ fhsm->fhsm_expire
-+ = msecs_to_jiffies(AUFS_FHSM_CACHE_DEF_SEC * MSEC_PER_SEC);
-+ fhsm->fhsm_bottom = -1;
-+}
-+
-+void au_fhsm_set(struct au_sbinfo *sbinfo, unsigned int sec)
-+{
-+ sbinfo->si_fhsm.fhsm_expire
-+ = msecs_to_jiffies(sec * MSEC_PER_SEC);
-+}
-+
-+void au_fhsm_show(struct seq_file *seq, struct au_sbinfo *sbinfo)
-+{
-+ unsigned int u;
-+
-+ if (!au_ftest_si(sbinfo, FHSM))
-+ return;
-+
-+ u = jiffies_to_msecs(sbinfo->si_fhsm.fhsm_expire) / MSEC_PER_SEC;
-+ if (u != AUFS_FHSM_CACHE_DEF_SEC)
-+ seq_printf(seq, ",fhsm_sec=%u", u);
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/file.c linux-4.1.10/fs/aufs/file.c
---- linux-4.1.10.orig/fs/aufs/file.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/file.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,841 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * handling file/dir, and address_space operation
-+ */
-+
-+#ifdef CONFIG_AUFS_DEBUG
-+#include <linux/migrate.h>
-+#endif
-+#include <linux/pagemap.h>
-+#include "aufs.h"
-+
-+/* drop flags for writing */
-+unsigned int au_file_roflags(unsigned int flags)
-+{
-+ flags &= ~(O_WRONLY | O_RDWR | O_APPEND | O_CREAT | O_TRUNC);
-+ flags |= O_RDONLY | O_NOATIME;
-+ return flags;
-+}
-+
-+/* common functions to regular file and dir */
-+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
-+ struct file *file, int force_wr)
-+{
-+ struct file *h_file;
-+ struct dentry *h_dentry;
-+ struct inode *h_inode;
-+ struct super_block *sb;
-+ struct au_branch *br;
-+ struct path h_path;
-+ int err;
-+
-+ /* a race condition can happen between open and unlink/rmdir */
-+ h_file = ERR_PTR(-ENOENT);
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (au_test_nfsd() && (!h_dentry || d_is_negative(h_dentry)))
-+ goto out;
-+ h_inode = d_inode(h_dentry);
-+ spin_lock(&h_dentry->d_lock);
-+ err = (!d_unhashed(dentry) && d_unlinked(h_dentry))
-+ /* || !d_inode(dentry)->i_nlink */
-+ ;
-+ spin_unlock(&h_dentry->d_lock);
-+ if (unlikely(err))
-+ goto out;
-+
-+ sb = dentry->d_sb;
-+ br = au_sbr(sb, bindex);
-+ err = au_br_test_oflag(flags, br);
-+ h_file = ERR_PTR(err);
-+ if (unlikely(err))
-+ goto out;
-+
-+ /* drop flags for writing */
-+ if (au_test_ro(sb, bindex, d_inode(dentry))) {
-+ if (force_wr && !(flags & O_WRONLY))
-+ force_wr = 0;
-+ flags = au_file_roflags(flags);
-+ if (force_wr) {
-+ h_file = ERR_PTR(-EROFS);
-+ flags = au_file_roflags(flags);
-+ if (unlikely(vfsub_native_ro(h_inode)
-+ || IS_APPEND(h_inode)))
-+ goto out;
-+ flags &= ~O_ACCMODE;
-+ flags |= O_WRONLY;
-+ }
-+ }
-+ flags &= ~O_CREAT;
-+ atomic_inc(&br->br_count);
-+ h_path.dentry = h_dentry;
-+ h_path.mnt = au_br_mnt(br);
-+ h_file = vfsub_dentry_open(&h_path, flags);
-+ if (IS_ERR(h_file))
-+ goto out_br;
-+
-+ if (flags & __FMODE_EXEC) {
-+ err = deny_write_access(h_file);
-+ if (unlikely(err)) {
-+ fput(h_file);
-+ h_file = ERR_PTR(err);
-+ goto out_br;
-+ }
-+ }
-+ fsnotify_open(h_file);
-+ goto out; /* success */
-+
-+out_br:
-+ atomic_dec(&br->br_count);
-+out:
-+ return h_file;
-+}
-+
-+static int au_cmoo(struct dentry *dentry)
-+{
-+ int err, cmoo;
-+ unsigned int udba;
-+ struct path h_path;
-+ struct au_pin pin;
-+ struct au_cp_generic cpg = {
-+ .dentry = dentry,
-+ .bdst = -1,
-+ .bsrc = -1,
-+ .len = -1,
-+ .pin = &pin,
-+ .flags = AuCpup_DTIME | AuCpup_HOPEN
-+ };
-+ struct inode *delegated;
-+ struct super_block *sb;
-+ struct au_sbinfo *sbinfo;
-+ struct au_fhsm *fhsm;
-+ pid_t pid;
-+ struct au_branch *br;
-+ struct dentry *parent;
-+ struct au_hinode *hdir;
-+
-+ DiMustWriteLock(dentry);
-+ IiMustWriteLock(d_inode(dentry));
-+
-+ err = 0;
-+ if (IS_ROOT(dentry))
-+ goto out;
-+ cpg.bsrc = au_dbstart(dentry);
-+ if (!cpg.bsrc)
-+ goto out;
-+
-+ sb = dentry->d_sb;
-+ sbinfo = au_sbi(sb);
-+ fhsm = &sbinfo->si_fhsm;
-+ pid = au_fhsm_pid(fhsm);
-+ if (pid
-+ && (current->pid == pid
-+ || current->real_parent->pid == pid))
-+ goto out;
-+
-+ br = au_sbr(sb, cpg.bsrc);
-+ cmoo = au_br_cmoo(br->br_perm);
-+ if (!cmoo)
-+ goto out;
-+ if (!d_is_reg(dentry))
-+ cmoo &= AuBrAttr_COO_ALL;
-+ if (!cmoo)
-+ goto out;
-+
-+ parent = dget_parent(dentry);
-+ di_write_lock_parent(parent);
-+ err = au_wbr_do_copyup_bu(dentry, cpg.bsrc - 1);
-+ cpg.bdst = err;
-+ if (unlikely(err < 0)) {
-+ err = 0; /* there is no upper writable branch */
-+ goto out_dgrade;
-+ }
-+ AuDbg("bsrc %d, bdst %d\n", cpg.bsrc, cpg.bdst);
-+
-+ /* do not respect the coo attrib for the target branch */
-+ err = au_cpup_dirs(dentry, cpg.bdst);
-+ if (unlikely(err))
-+ goto out_dgrade;
-+
-+ di_downgrade_lock(parent, AuLock_IR);
-+ udba = au_opt_udba(sb);
-+ err = au_pin(&pin, dentry, cpg.bdst, udba,
-+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
-+ if (unlikely(err))
-+ goto out_parent;
-+
-+ err = au_sio_cpup_simple(&cpg);
-+ au_unpin(&pin);
-+ if (unlikely(err))
-+ goto out_parent;
-+ if (!(cmoo & AuBrWAttr_MOO))
-+ goto out_parent; /* success */
-+
-+ err = au_pin(&pin, dentry, cpg.bsrc, udba,
-+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
-+ if (unlikely(err))
-+ goto out_parent;
-+
-+ h_path.mnt = au_br_mnt(br);
-+ h_path.dentry = au_h_dptr(dentry, cpg.bsrc);
-+ hdir = au_hi(d_inode(parent), cpg.bsrc);
-+ delegated = NULL;
-+ err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated, /*force*/1);
-+ au_unpin(&pin);
-+ /* todo: keep h_dentry or not? */
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal unlink\n");
-+ iput(delegated);
-+ }
-+ if (unlikely(err)) {
-+ pr_err("unlink %pd after coo failed (%d), ignored\n",
-+ dentry, err);
-+ err = 0;
-+ }
-+ goto out_parent; /* success */
-+
-+out_dgrade:
-+ di_downgrade_lock(parent, AuLock_IR);
-+out_parent:
-+ di_read_unlock(parent, AuLock_IR);
-+ dput(parent);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+int au_do_open(struct file *file, struct au_do_open_args *args)
-+{
-+ int err, no_lock = args->no_lock;
-+ struct dentry *dentry;
-+ struct au_finfo *finfo;
-+
-+ if (!no_lock)
-+ err = au_finfo_init(file, args->fidir);
-+ else {
-+ lockdep_off();
-+ err = au_finfo_init(file, args->fidir);
-+ lockdep_on();
-+ }
-+ if (unlikely(err))
-+ goto out;
-+
-+ dentry = file->f_path.dentry;
-+ AuDebugOn(IS_ERR_OR_NULL(dentry));
-+ if (!no_lock) {
-+ di_write_lock_child(dentry);
-+ err = au_cmoo(dentry);
-+ di_downgrade_lock(dentry, AuLock_IR);
-+ if (!err)
-+ err = args->open(file, vfsub_file_flags(file), NULL);
-+ di_read_unlock(dentry, AuLock_IR);
-+ } else {
-+ err = au_cmoo(dentry);
-+ if (!err)
-+ err = args->open(file, vfsub_file_flags(file),
-+ args->h_file);
-+ if (!err && au_fbstart(file) != au_dbstart(dentry))
-+ /*
-+ * cmoo happens after h_file was opened.
-+ * need to refresh file later.
-+ */
-+ atomic_dec(&au_fi(file)->fi_generation);
-+ }
-+
-+ finfo = au_fi(file);
-+ if (!err) {
-+ finfo->fi_file = file;
-+ au_sphl_add(&finfo->fi_hlist,
-+ &au_sbi(file->f_path.dentry->d_sb)->si_files);
-+ }
-+ if (!no_lock)
-+ fi_write_unlock(file);
-+ else {
-+ lockdep_off();
-+ fi_write_unlock(file);
-+ lockdep_on();
-+ }
-+ if (unlikely(err)) {
-+ finfo->fi_hdir = NULL;
-+ au_finfo_fin(file);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+int au_reopen_nondir(struct file *file)
-+{
-+ int err;
-+ aufs_bindex_t bstart;
-+ struct dentry *dentry;
-+ struct file *h_file, *h_file_tmp;
-+
-+ dentry = file->f_path.dentry;
-+ bstart = au_dbstart(dentry);
-+ h_file_tmp = NULL;
-+ if (au_fbstart(file) == bstart) {
-+ h_file = au_hf_top(file);
-+ if (file->f_mode == h_file->f_mode)
-+ return 0; /* success */
-+ h_file_tmp = h_file;
-+ get_file(h_file_tmp);
-+ au_set_h_fptr(file, bstart, NULL);
-+ }
-+ AuDebugOn(au_fi(file)->fi_hdir);
-+ /*
-+ * it can happen
-+ * file exists on both of rw and ro
-+ * open --> dbstart and fbstart are both 0
-+ * prepend a branch as rw, "rw" become ro
-+ * remove rw/file
-+ * delete the top branch, "rw" becomes rw again
-+ * --> dbstart is 1, fbstart is still 0
-+ * write --> fbstart is 0 but dbstart is 1
-+ */
-+ /* AuDebugOn(au_fbstart(file) < bstart); */
-+
-+ h_file = au_h_open(dentry, bstart, vfsub_file_flags(file) & ~O_TRUNC,
-+ file, /*force_wr*/0);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file)) {
-+ if (h_file_tmp) {
-+ atomic_inc(&au_sbr(dentry->d_sb, bstart)->br_count);
-+ au_set_h_fptr(file, bstart, h_file_tmp);
-+ h_file_tmp = NULL;
-+ }
-+ goto out; /* todo: close all? */
-+ }
-+
-+ err = 0;
-+ au_set_fbstart(file, bstart);
-+ au_set_h_fptr(file, bstart, h_file);
-+ au_update_figen(file);
-+ /* todo: necessary? */
-+ /* file->f_ra = h_file->f_ra; */
-+
-+out:
-+ if (h_file_tmp)
-+ fput(h_file_tmp);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_reopen_wh(struct file *file, aufs_bindex_t btgt,
-+ struct dentry *hi_wh)
-+{
-+ int err;
-+ aufs_bindex_t bstart;
-+ struct au_dinfo *dinfo;
-+ struct dentry *h_dentry;
-+ struct au_hdentry *hdp;
-+
-+ dinfo = au_di(file->f_path.dentry);
-+ AuRwMustWriteLock(&dinfo->di_rwsem);
-+
-+ bstart = dinfo->di_bstart;
-+ dinfo->di_bstart = btgt;
-+ hdp = dinfo->di_hdentry;
-+ h_dentry = hdp[0 + btgt].hd_dentry;
-+ hdp[0 + btgt].hd_dentry = hi_wh;
-+ err = au_reopen_nondir(file);
-+ hdp[0 + btgt].hd_dentry = h_dentry;
-+ dinfo->di_bstart = bstart;
-+
-+ return err;
-+}
-+
-+static int au_ready_to_write_wh(struct file *file, loff_t len,
-+ aufs_bindex_t bcpup, struct au_pin *pin)
-+{
-+ int err;
-+ struct inode *inode, *h_inode;
-+ struct dentry *h_dentry, *hi_wh;
-+ struct au_cp_generic cpg = {
-+ .dentry = file->f_path.dentry,
-+ .bdst = bcpup,
-+ .bsrc = -1,
-+ .len = len,
-+ .pin = pin
-+ };
-+
-+ au_update_dbstart(cpg.dentry);
-+ inode = d_inode(cpg.dentry);
-+ h_inode = NULL;
-+ if (au_dbstart(cpg.dentry) <= bcpup
-+ && au_dbend(cpg.dentry) >= bcpup) {
-+ h_dentry = au_h_dptr(cpg.dentry, bcpup);
-+ if (h_dentry && d_is_positive(h_dentry))
-+ h_inode = d_inode(h_dentry);
-+ }
-+ hi_wh = au_hi_wh(inode, bcpup);
-+ if (!hi_wh && !h_inode)
-+ err = au_sio_cpup_wh(&cpg, file);
-+ else
-+ /* already copied-up after unlink */
-+ err = au_reopen_wh(file, bcpup, hi_wh);
-+
-+ if (!err
-+ && (inode->i_nlink > 1
-+ || (inode->i_state & I_LINKABLE))
-+ && au_opt_test(au_mntflags(cpg.dentry->d_sb), PLINK))
-+ au_plink_append(inode, bcpup, au_h_dptr(cpg.dentry, bcpup));
-+
-+ return err;
-+}
-+
-+/*
-+ * prepare the @file for writing.
-+ */
-+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin)
-+{
-+ int err;
-+ aufs_bindex_t dbstart;
-+ struct dentry *parent;
-+ struct inode *inode;
-+ struct super_block *sb;
-+ struct file *h_file;
-+ struct au_cp_generic cpg = {
-+ .dentry = file->f_path.dentry,
-+ .bdst = -1,
-+ .bsrc = -1,
-+ .len = len,
-+ .pin = pin,
-+ .flags = AuCpup_DTIME
-+ };
-+
-+ sb = cpg.dentry->d_sb;
-+ inode = d_inode(cpg.dentry);
-+ cpg.bsrc = au_fbstart(file);
-+ err = au_test_ro(sb, cpg.bsrc, inode);
-+ if (!err && (au_hf_top(file)->f_mode & FMODE_WRITE)) {
-+ err = au_pin(pin, cpg.dentry, cpg.bsrc, AuOpt_UDBA_NONE,
-+ /*flags*/0);
-+ goto out;
-+ }
-+
-+ /* need to cpup or reopen */
-+ parent = dget_parent(cpg.dentry);
-+ di_write_lock_parent(parent);
-+ err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
-+ cpg.bdst = err;
-+ if (unlikely(err < 0))
-+ goto out_dgrade;
-+ err = 0;
-+
-+ if (!d_unhashed(cpg.dentry) && !au_h_dptr(parent, cpg.bdst)) {
-+ err = au_cpup_dirs(cpg.dentry, cpg.bdst);
-+ if (unlikely(err))
-+ goto out_dgrade;
-+ }
-+
-+ err = au_pin(pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
-+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
-+ if (unlikely(err))
-+ goto out_dgrade;
-+
-+ dbstart = au_dbstart(cpg.dentry);
-+ if (dbstart <= cpg.bdst)
-+ cpg.bsrc = cpg.bdst;
-+
-+ if (dbstart <= cpg.bdst /* just reopen */
-+ || !d_unhashed(cpg.dentry) /* copyup and reopen */
-+ ) {
-+ h_file = au_h_open_pre(cpg.dentry, cpg.bsrc, /*force_wr*/0);
-+ if (IS_ERR(h_file))
-+ err = PTR_ERR(h_file);
-+ else {
-+ di_downgrade_lock(parent, AuLock_IR);
-+ if (dbstart > cpg.bdst)
-+ err = au_sio_cpup_simple(&cpg);
-+ if (!err)
-+ err = au_reopen_nondir(file);
-+ au_h_open_post(cpg.dentry, cpg.bsrc, h_file);
-+ }
-+ } else { /* copyup as wh and reopen */
-+ /*
-+ * since writable hfsplus branch is not supported,
-+ * h_open_pre/post() are unnecessary.
-+ */
-+ err = au_ready_to_write_wh(file, len, cpg.bdst, pin);
-+ di_downgrade_lock(parent, AuLock_IR);
-+ }
-+
-+ if (!err) {
-+ au_pin_set_parent_lflag(pin, /*lflag*/0);
-+ goto out_dput; /* success */
-+ }
-+ au_unpin(pin);
-+ goto out_unlock;
-+
-+out_dgrade:
-+ di_downgrade_lock(parent, AuLock_IR);
-+out_unlock:
-+ di_read_unlock(parent, AuLock_IR);
-+out_dput:
-+ dput(parent);
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int au_do_flush(struct file *file, fl_owner_t id,
-+ int (*flush)(struct file *file, fl_owner_t id))
-+{
-+ int err;
-+ struct super_block *sb;
-+ struct inode *inode;
-+
-+ inode = file_inode(file);
-+ sb = inode->i_sb;
-+ si_noflush_read_lock(sb);
-+ fi_read_lock(file);
-+ ii_read_lock_child(inode);
-+
-+ err = flush(file, id);
-+ au_cpup_attr_timesizes(inode);
-+
-+ ii_read_unlock(inode);
-+ fi_read_unlock(file);
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_file_refresh_by_inode(struct file *file, int *need_reopen)
-+{
-+ int err;
-+ struct au_pin pin;
-+ struct au_finfo *finfo;
-+ struct dentry *parent, *hi_wh;
-+ struct inode *inode;
-+ struct super_block *sb;
-+ struct au_cp_generic cpg = {
-+ .dentry = file->f_path.dentry,
-+ .bdst = -1,
-+ .bsrc = -1,
-+ .len = -1,
-+ .pin = &pin,
-+ .flags = AuCpup_DTIME
-+ };
-+
-+ FiMustWriteLock(file);
-+
-+ err = 0;
-+ finfo = au_fi(file);
-+ sb = cpg.dentry->d_sb;
-+ inode = d_inode(cpg.dentry);
-+ cpg.bdst = au_ibstart(inode);
-+ if (cpg.bdst == finfo->fi_btop || IS_ROOT(cpg.dentry))
-+ goto out;
-+
-+ parent = dget_parent(cpg.dentry);
-+ if (au_test_ro(sb, cpg.bdst, inode)) {
-+ di_read_lock_parent(parent, !AuLock_IR);
-+ err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
-+ cpg.bdst = err;
-+ di_read_unlock(parent, !AuLock_IR);
-+ if (unlikely(err < 0))
-+ goto out_parent;
-+ err = 0;
-+ }
-+
-+ di_read_lock_parent(parent, AuLock_IR);
-+ hi_wh = au_hi_wh(inode, cpg.bdst);
-+ if (!S_ISDIR(inode->i_mode)
-+ && au_opt_test(au_mntflags(sb), PLINK)
-+ && au_plink_test(inode)
-+ && !d_unhashed(cpg.dentry)
-+ && cpg.bdst < au_dbstart(cpg.dentry)) {
-+ err = au_test_and_cpup_dirs(cpg.dentry, cpg.bdst);
-+ if (unlikely(err))
-+ goto out_unlock;
-+
-+ /* always superio. */
-+ err = au_pin(&pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
-+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
-+ if (!err) {
-+ err = au_sio_cpup_simple(&cpg);
-+ au_unpin(&pin);
-+ }
-+ } else if (hi_wh) {
-+ /* already copied-up after unlink */
-+ err = au_reopen_wh(file, cpg.bdst, hi_wh);
-+ *need_reopen = 0;
-+ }
-+
-+out_unlock:
-+ di_read_unlock(parent, AuLock_IR);
-+out_parent:
-+ dput(parent);
-+out:
-+ return err;
-+}
-+
-+static void au_do_refresh_dir(struct file *file)
-+{
-+ aufs_bindex_t bindex, bend, new_bindex, brid;
-+ struct au_hfile *p, tmp, *q;
-+ struct au_finfo *finfo;
-+ struct super_block *sb;
-+ struct au_fidir *fidir;
-+
-+ FiMustWriteLock(file);
-+
-+ sb = file->f_path.dentry->d_sb;
-+ finfo = au_fi(file);
-+ fidir = finfo->fi_hdir;
-+ AuDebugOn(!fidir);
-+ p = fidir->fd_hfile + finfo->fi_btop;
-+ brid = p->hf_br->br_id;
-+ bend = fidir->fd_bbot;
-+ for (bindex = finfo->fi_btop; bindex <= bend; bindex++, p++) {
-+ if (!p->hf_file)
-+ continue;
-+
-+ new_bindex = au_br_index(sb, p->hf_br->br_id);
-+ if (new_bindex == bindex)
-+ continue;
-+ if (new_bindex < 0) {
-+ au_set_h_fptr(file, bindex, NULL);
-+ continue;
-+ }
-+
-+ /* swap two lower inode, and loop again */
-+ q = fidir->fd_hfile + new_bindex;
-+ tmp = *q;
-+ *q = *p;
-+ *p = tmp;
-+ if (tmp.hf_file) {
-+ bindex--;
-+ p--;
-+ }
-+ }
-+
-+ p = fidir->fd_hfile;
-+ if (!au_test_mmapped(file) && !d_unlinked(file->f_path.dentry)) {
-+ bend = au_sbend(sb);
-+ for (finfo->fi_btop = 0; finfo->fi_btop <= bend;
-+ finfo->fi_btop++, p++)
-+ if (p->hf_file) {
-+ if (file_inode(p->hf_file))
-+ break;
-+ au_hfput(p, file);
-+ }
-+ } else {
-+ bend = au_br_index(sb, brid);
-+ for (finfo->fi_btop = 0; finfo->fi_btop < bend;
-+ finfo->fi_btop++, p++)
-+ if (p->hf_file)
-+ au_hfput(p, file);
-+ bend = au_sbend(sb);
-+ }
-+
-+ p = fidir->fd_hfile + bend;
-+ for (fidir->fd_bbot = bend; fidir->fd_bbot >= finfo->fi_btop;
-+ fidir->fd_bbot--, p--)
-+ if (p->hf_file) {
-+ if (file_inode(p->hf_file))
-+ break;
-+ au_hfput(p, file);
-+ }
-+ AuDebugOn(fidir->fd_bbot < finfo->fi_btop);
-+}
-+
-+/*
-+ * after branch manipulating, refresh the file.
-+ */
-+static int refresh_file(struct file *file, int (*reopen)(struct file *file))
-+{
-+ int err, need_reopen;
-+ aufs_bindex_t bend, bindex;
-+ struct dentry *dentry;
-+ struct au_finfo *finfo;
-+ struct au_hfile *hfile;
-+
-+ dentry = file->f_path.dentry;
-+ finfo = au_fi(file);
-+ if (!finfo->fi_hdir) {
-+ hfile = &finfo->fi_htop;
-+ AuDebugOn(!hfile->hf_file);
-+ bindex = au_br_index(dentry->d_sb, hfile->hf_br->br_id);
-+ AuDebugOn(bindex < 0);
-+ if (bindex != finfo->fi_btop)
-+ au_set_fbstart(file, bindex);
-+ } else {
-+ err = au_fidir_realloc(finfo, au_sbend(dentry->d_sb) + 1);
-+ if (unlikely(err))
-+ goto out;
-+ au_do_refresh_dir(file);
-+ }
-+
-+ err = 0;
-+ need_reopen = 1;
-+ if (!au_test_mmapped(file))
-+ err = au_file_refresh_by_inode(file, &need_reopen);
-+ if (!err && need_reopen && !d_unlinked(dentry))
-+ err = reopen(file);
-+ if (!err) {
-+ au_update_figen(file);
-+ goto out; /* success */
-+ }
-+
-+ /* error, close all lower files */
-+ if (finfo->fi_hdir) {
-+ bend = au_fbend_dir(file);
-+ for (bindex = au_fbstart(file); bindex <= bend; bindex++)
-+ au_set_h_fptr(file, bindex, NULL);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+/* common function to regular file and dir */
-+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
-+ int wlock)
-+{
-+ int err;
-+ unsigned int sigen, figen;
-+ aufs_bindex_t bstart;
-+ unsigned char pseudo_link;
-+ struct dentry *dentry;
-+ struct inode *inode;
-+
-+ err = 0;
-+ dentry = file->f_path.dentry;
-+ inode = d_inode(dentry);
-+ sigen = au_sigen(dentry->d_sb);
-+ fi_write_lock(file);
-+ figen = au_figen(file);
-+ di_write_lock_child(dentry);
-+ bstart = au_dbstart(dentry);
-+ pseudo_link = (bstart != au_ibstart(inode));
-+ if (sigen == figen && !pseudo_link && au_fbstart(file) == bstart) {
-+ if (!wlock) {
-+ di_downgrade_lock(dentry, AuLock_IR);
-+ fi_downgrade_lock(file);
-+ }
-+ goto out; /* success */
-+ }
-+
-+ AuDbg("sigen %d, figen %d\n", sigen, figen);
-+ if (au_digen_test(dentry, sigen)) {
-+ err = au_reval_dpath(dentry, sigen);
-+ AuDebugOn(!err && au_digen_test(dentry, sigen));
-+ }
-+
-+ if (!err)
-+ err = refresh_file(file, reopen);
-+ if (!err) {
-+ if (!wlock) {
-+ di_downgrade_lock(dentry, AuLock_IR);
-+ fi_downgrade_lock(file);
-+ }
-+ } else {
-+ di_write_unlock(dentry);
-+ fi_write_unlock(file);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* cf. aufs_nopage() */
-+/* for madvise(2) */
-+static int aufs_readpage(struct file *file __maybe_unused, struct page *page)
-+{
-+ unlock_page(page);
-+ return 0;
-+}
-+
-+/* it will never be called, but necessary to support O_DIRECT */
-+static ssize_t aufs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-+ loff_t offset)
-+{ BUG(); return 0; }
-+
-+/* they will never be called. */
-+#ifdef CONFIG_AUFS_DEBUG
-+static int aufs_write_begin(struct file *file, struct address_space *mapping,
-+ loff_t pos, unsigned len, unsigned flags,
-+ struct page **pagep, void **fsdata)
-+{ AuUnsupport(); return 0; }
-+static int aufs_write_end(struct file *file, struct address_space *mapping,
-+ loff_t pos, unsigned len, unsigned copied,
-+ struct page *page, void *fsdata)
-+{ AuUnsupport(); return 0; }
-+static int aufs_writepage(struct page *page, struct writeback_control *wbc)
-+{ AuUnsupport(); return 0; }
-+
-+static int aufs_set_page_dirty(struct page *page)
-+{ AuUnsupport(); return 0; }
-+static void aufs_invalidatepage(struct page *page, unsigned int offset,
-+ unsigned int length)
-+{ AuUnsupport(); }
-+static int aufs_releasepage(struct page *page, gfp_t gfp)
-+{ AuUnsupport(); return 0; }
-+static int aufs_migratepage(struct address_space *mapping, struct page *newpage,
-+ struct page *page, enum migrate_mode mode)
-+{ AuUnsupport(); return 0; }
-+static int aufs_launder_page(struct page *page)
-+{ AuUnsupport(); return 0; }
-+static int aufs_is_partially_uptodate(struct page *page,
-+ unsigned long from,
-+ unsigned long count)
-+{ AuUnsupport(); return 0; }
-+static void aufs_is_dirty_writeback(struct page *page, bool *dirty,
-+ bool *writeback)
-+{ AuUnsupport(); }
-+static int aufs_error_remove_page(struct address_space *mapping,
-+ struct page *page)
-+{ AuUnsupport(); return 0; }
-+static int aufs_swap_activate(struct swap_info_struct *sis, struct file *file,
-+ sector_t *span)
-+{ AuUnsupport(); return 0; }
-+static void aufs_swap_deactivate(struct file *file)
-+{ AuUnsupport(); }
-+#endif /* CONFIG_AUFS_DEBUG */
-+
-+const struct address_space_operations aufs_aop = {
-+ .readpage = aufs_readpage,
-+ .direct_IO = aufs_direct_IO,
-+#ifdef CONFIG_AUFS_DEBUG
-+ .writepage = aufs_writepage,
-+ /* no writepages, because of writepage */
-+ .set_page_dirty = aufs_set_page_dirty,
-+ /* no readpages, because of readpage */
-+ .write_begin = aufs_write_begin,
-+ .write_end = aufs_write_end,
-+ /* no bmap, no block device */
-+ .invalidatepage = aufs_invalidatepage,
-+ .releasepage = aufs_releasepage,
-+ .migratepage = aufs_migratepage,
-+ .launder_page = aufs_launder_page,
-+ .is_partially_uptodate = aufs_is_partially_uptodate,
-+ .is_dirty_writeback = aufs_is_dirty_writeback,
-+ .error_remove_page = aufs_error_remove_page,
-+ .swap_activate = aufs_swap_activate,
-+ .swap_deactivate = aufs_swap_deactivate
-+#endif /* CONFIG_AUFS_DEBUG */
-+};
-diff -Nur linux-4.1.10.orig/fs/aufs/file.h linux-4.1.10/fs/aufs/file.h
---- linux-4.1.10.orig/fs/aufs/file.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/file.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,291 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * file operations
-+ */
-+
-+#ifndef __AUFS_FILE_H__
-+#define __AUFS_FILE_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/poll.h>
-+#include "rwsem.h"
-+
-+struct au_branch;
-+struct au_hfile {
-+ struct file *hf_file;
-+ struct au_branch *hf_br;
-+};
-+
-+struct au_vdir;
-+struct au_fidir {
-+ aufs_bindex_t fd_bbot;
-+ aufs_bindex_t fd_nent;
-+ struct au_vdir *fd_vdir_cache;
-+ struct au_hfile fd_hfile[];
-+};
-+
-+static inline int au_fidir_sz(int nent)
-+{
-+ AuDebugOn(nent < 0);
-+ return sizeof(struct au_fidir) + sizeof(struct au_hfile) * nent;
-+}
-+
-+struct au_finfo {
-+ atomic_t fi_generation;
-+
-+ struct au_rwsem fi_rwsem;
-+ aufs_bindex_t fi_btop;
-+
-+ /* do not union them */
-+ struct { /* for non-dir */
-+ struct au_hfile fi_htop;
-+ atomic_t fi_mmapped;
-+ };
-+ struct au_fidir *fi_hdir; /* for dir only */
-+
-+ struct hlist_node fi_hlist;
-+ struct file *fi_file; /* very ugly */
-+} ____cacheline_aligned_in_smp;
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* file.c */
-+extern const struct address_space_operations aufs_aop;
-+unsigned int au_file_roflags(unsigned int flags);
-+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
-+ struct file *file, int force_wr);
-+struct au_do_open_args {
-+ int no_lock;
-+ int (*open)(struct file *file, int flags,
-+ struct file *h_file);
-+ struct au_fidir *fidir;
-+ struct file *h_file;
-+};
-+int au_do_open(struct file *file, struct au_do_open_args *args);
-+int au_reopen_nondir(struct file *file);
-+struct au_pin;
-+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin);
-+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
-+ int wlock);
-+int au_do_flush(struct file *file, fl_owner_t id,
-+ int (*flush)(struct file *file, fl_owner_t id));
-+
-+/* poll.c */
-+#ifdef CONFIG_AUFS_POLL
-+unsigned int aufs_poll(struct file *file, poll_table *wait);
-+#endif
-+
-+#ifdef CONFIG_AUFS_BR_HFSPLUS
-+/* hfsplus.c */
-+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex,
-+ int force_wr);
-+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex,
-+ struct file *h_file);
-+#else
-+AuStub(struct file *, au_h_open_pre, return NULL, struct dentry *dentry,
-+ aufs_bindex_t bindex, int force_wr)
-+AuStubVoid(au_h_open_post, struct dentry *dentry, aufs_bindex_t bindex,
-+ struct file *h_file);
-+#endif
-+
-+/* f_op.c */
-+extern const struct file_operations aufs_file_fop;
-+int au_do_open_nondir(struct file *file, int flags, struct file *h_file);
-+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file);
-+struct file *au_read_pre(struct file *file, int keep_fi);
-+
-+/* finfo.c */
-+void au_hfput(struct au_hfile *hf, struct file *file);
-+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex,
-+ struct file *h_file);
-+
-+void au_update_figen(struct file *file);
-+struct au_fidir *au_fidir_alloc(struct super_block *sb);
-+int au_fidir_realloc(struct au_finfo *finfo, int nbr);
-+
-+void au_fi_init_once(void *_fi);
-+void au_finfo_fin(struct file *file);
-+int au_finfo_init(struct file *file, struct au_fidir *fidir);
-+
-+/* ioctl.c */
-+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg);
-+#ifdef CONFIG_COMPAT
-+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd,
-+ unsigned long arg);
-+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd,
-+ unsigned long arg);
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline struct au_finfo *au_fi(struct file *file)
-+{
-+ return file->private_data;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * fi_read_lock, fi_write_lock,
-+ * fi_read_unlock, fi_write_unlock, fi_downgrade_lock
-+ */
-+AuSimpleRwsemFuncs(fi, struct file *f, &au_fi(f)->fi_rwsem);
-+
-+#define FiMustNoWaiters(f) AuRwMustNoWaiters(&au_fi(f)->fi_rwsem)
-+#define FiMustAnyLock(f) AuRwMustAnyLock(&au_fi(f)->fi_rwsem)
-+#define FiMustWriteLock(f) AuRwMustWriteLock(&au_fi(f)->fi_rwsem)
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* todo: hard/soft set? */
-+static inline aufs_bindex_t au_fbstart(struct file *file)
-+{
-+ FiMustAnyLock(file);
-+ return au_fi(file)->fi_btop;
-+}
-+
-+static inline aufs_bindex_t au_fbend_dir(struct file *file)
-+{
-+ FiMustAnyLock(file);
-+ AuDebugOn(!au_fi(file)->fi_hdir);
-+ return au_fi(file)->fi_hdir->fd_bbot;
-+}
-+
-+static inline struct au_vdir *au_fvdir_cache(struct file *file)
-+{
-+ FiMustAnyLock(file);
-+ AuDebugOn(!au_fi(file)->fi_hdir);
-+ return au_fi(file)->fi_hdir->fd_vdir_cache;
-+}
-+
-+static inline void au_set_fbstart(struct file *file, aufs_bindex_t bindex)
-+{
-+ FiMustWriteLock(file);
-+ au_fi(file)->fi_btop = bindex;
-+}
-+
-+static inline void au_set_fbend_dir(struct file *file, aufs_bindex_t bindex)
-+{
-+ FiMustWriteLock(file);
-+ AuDebugOn(!au_fi(file)->fi_hdir);
-+ au_fi(file)->fi_hdir->fd_bbot = bindex;
-+}
-+
-+static inline void au_set_fvdir_cache(struct file *file,
-+ struct au_vdir *vdir_cache)
-+{
-+ FiMustWriteLock(file);
-+ AuDebugOn(!au_fi(file)->fi_hdir);
-+ au_fi(file)->fi_hdir->fd_vdir_cache = vdir_cache;
-+}
-+
-+static inline struct file *au_hf_top(struct file *file)
-+{
-+ FiMustAnyLock(file);
-+ AuDebugOn(au_fi(file)->fi_hdir);
-+ return au_fi(file)->fi_htop.hf_file;
-+}
-+
-+static inline struct file *au_hf_dir(struct file *file, aufs_bindex_t bindex)
-+{
-+ FiMustAnyLock(file);
-+ AuDebugOn(!au_fi(file)->fi_hdir);
-+ return au_fi(file)->fi_hdir->fd_hfile[0 + bindex].hf_file;
-+}
-+
-+/* todo: memory barrier? */
-+static inline unsigned int au_figen(struct file *f)
-+{
-+ return atomic_read(&au_fi(f)->fi_generation);
-+}
-+
-+static inline void au_set_mmapped(struct file *f)
-+{
-+ if (atomic_inc_return(&au_fi(f)->fi_mmapped))
-+ return;
-+ pr_warn("fi_mmapped wrapped around\n");
-+ while (!atomic_inc_return(&au_fi(f)->fi_mmapped))
-+ ;
-+}
-+
-+static inline void au_unset_mmapped(struct file *f)
-+{
-+ atomic_dec(&au_fi(f)->fi_mmapped);
-+}
-+
-+static inline int au_test_mmapped(struct file *f)
-+{
-+ return atomic_read(&au_fi(f)->fi_mmapped);
-+}
-+
-+/* customize vma->vm_file */
-+
-+static inline void au_do_vm_file_reset(struct vm_area_struct *vma,
-+ struct file *file)
-+{
-+ struct file *f;
-+
-+ f = vma->vm_file;
-+ get_file(file);
-+ vma->vm_file = file;
-+ fput(f);
-+}
-+
-+#ifdef CONFIG_MMU
-+#define AuDbgVmRegion(file, vma) do {} while (0)
-+
-+static inline void au_vm_file_reset(struct vm_area_struct *vma,
-+ struct file *file)
-+{
-+ au_do_vm_file_reset(vma, file);
-+}
-+#else
-+#define AuDbgVmRegion(file, vma) \
-+ AuDebugOn((vma)->vm_region && (vma)->vm_region->vm_file != (file))
-+
-+static inline void au_vm_file_reset(struct vm_area_struct *vma,
-+ struct file *file)
-+{
-+ struct file *f;
-+
-+ au_do_vm_file_reset(vma, file);
-+ f = vma->vm_region->vm_file;
-+ get_file(file);
-+ vma->vm_region->vm_file = file;
-+ fput(f);
-+}
-+#endif /* CONFIG_MMU */
-+
-+/* handle vma->vm_prfile */
-+static inline void au_vm_prfile_set(struct vm_area_struct *vma,
-+ struct file *file)
-+{
-+ get_file(file);
-+ vma->vm_prfile = file;
-+#ifndef CONFIG_MMU
-+ get_file(file);
-+ vma->vm_region->vm_prfile = file;
-+#endif
-+}
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_FILE_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/finfo.c linux-4.1.10/fs/aufs/finfo.c
---- linux-4.1.10.orig/fs/aufs/finfo.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/finfo.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,157 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * file private data
-+ */
-+
-+#include "aufs.h"
-+
-+void au_hfput(struct au_hfile *hf, struct file *file)
-+{
-+ /* todo: direct access f_flags */
-+ if (vfsub_file_flags(file) & __FMODE_EXEC)
-+ allow_write_access(hf->hf_file);
-+ fput(hf->hf_file);
-+ hf->hf_file = NULL;
-+ atomic_dec(&hf->hf_br->br_count);
-+ hf->hf_br = NULL;
-+}
-+
-+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex, struct file *val)
-+{
-+ struct au_finfo *finfo = au_fi(file);
-+ struct au_hfile *hf;
-+ struct au_fidir *fidir;
-+
-+ fidir = finfo->fi_hdir;
-+ if (!fidir) {
-+ AuDebugOn(finfo->fi_btop != bindex);
-+ hf = &finfo->fi_htop;
-+ } else
-+ hf = fidir->fd_hfile + bindex;
-+
-+ if (hf && hf->hf_file)
-+ au_hfput(hf, file);
-+ if (val) {
-+ FiMustWriteLock(file);
-+ AuDebugOn(IS_ERR_OR_NULL(file->f_path.dentry));
-+ hf->hf_file = val;
-+ hf->hf_br = au_sbr(file->f_path.dentry->d_sb, bindex);
-+ }
-+}
-+
-+void au_update_figen(struct file *file)
-+{
-+ atomic_set(&au_fi(file)->fi_generation, au_digen(file->f_path.dentry));
-+ /* smp_mb(); */ /* atomic_set */
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct au_fidir *au_fidir_alloc(struct super_block *sb)
-+{
-+ struct au_fidir *fidir;
-+ int nbr;
-+
-+ nbr = au_sbend(sb) + 1;
-+ if (nbr < 2)
-+ nbr = 2; /* initial allocate for 2 branches */
-+ fidir = kzalloc(au_fidir_sz(nbr), GFP_NOFS);
-+ if (fidir) {
-+ fidir->fd_bbot = -1;
-+ fidir->fd_nent = nbr;
-+ fidir->fd_vdir_cache = NULL;
-+ }
-+
-+ return fidir;
-+}
-+
-+int au_fidir_realloc(struct au_finfo *finfo, int nbr)
-+{
-+ int err;
-+ struct au_fidir *fidir, *p;
-+
-+ AuRwMustWriteLock(&finfo->fi_rwsem);
-+ fidir = finfo->fi_hdir;
-+ AuDebugOn(!fidir);
-+
-+ err = -ENOMEM;
-+ p = au_kzrealloc(fidir, au_fidir_sz(fidir->fd_nent), au_fidir_sz(nbr),
-+ GFP_NOFS);
-+ if (p) {
-+ p->fd_nent = nbr;
-+ finfo->fi_hdir = p;
-+ err = 0;
-+ }
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void au_finfo_fin(struct file *file)
-+{
-+ struct au_finfo *finfo;
-+
-+ au_nfiles_dec(file->f_path.dentry->d_sb);
-+
-+ finfo = au_fi(file);
-+ AuDebugOn(finfo->fi_hdir);
-+ AuRwDestroy(&finfo->fi_rwsem);
-+ au_cache_free_finfo(finfo);
-+}
-+
-+void au_fi_init_once(void *_finfo)
-+{
-+ struct au_finfo *finfo = _finfo;
-+ static struct lock_class_key aufs_fi;
-+
-+ au_rw_init(&finfo->fi_rwsem);
-+ au_rw_class(&finfo->fi_rwsem, &aufs_fi);
-+}
-+
-+int au_finfo_init(struct file *file, struct au_fidir *fidir)
-+{
-+ int err;
-+ struct au_finfo *finfo;
-+ struct dentry *dentry;
-+
-+ err = -ENOMEM;
-+ dentry = file->f_path.dentry;
-+ finfo = au_cache_alloc_finfo();
-+ if (unlikely(!finfo))
-+ goto out;
-+
-+ err = 0;
-+ au_nfiles_inc(dentry->d_sb);
-+ /* verbose coding for lock class name */
-+ if (!fidir)
-+ au_rw_class(&finfo->fi_rwsem, au_lc_key + AuLcNonDir_FIINFO);
-+ else
-+ au_rw_class(&finfo->fi_rwsem, au_lc_key + AuLcDir_FIINFO);
-+ au_rw_write_lock(&finfo->fi_rwsem);
-+ finfo->fi_btop = -1;
-+ finfo->fi_hdir = fidir;
-+ atomic_set(&finfo->fi_generation, au_digen(dentry));
-+ /* smp_mb(); */ /* atomic_set */
-+
-+ file->private_data = finfo;
-+
-+out:
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/f_op.c linux-4.1.10/fs/aufs/f_op.c
---- linux-4.1.10.orig/fs/aufs/f_op.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/f_op.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,738 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * file and vm operations
-+ */
-+
-+#include <linux/aio.h>
-+#include <linux/fs_stack.h>
-+#include <linux/mman.h>
-+#include <linux/security.h>
-+#include "aufs.h"
-+
-+int au_do_open_nondir(struct file *file, int flags, struct file *h_file)
-+{
-+ int err;
-+ aufs_bindex_t bindex;
-+ struct dentry *dentry;
-+ struct au_finfo *finfo;
-+ struct inode *h_inode;
-+
-+ FiMustWriteLock(file);
-+
-+ err = 0;
-+ dentry = file->f_path.dentry;
-+ AuDebugOn(IS_ERR_OR_NULL(dentry));
-+ finfo = au_fi(file);
-+ memset(&finfo->fi_htop, 0, sizeof(finfo->fi_htop));
-+ atomic_set(&finfo->fi_mmapped, 0);
-+ bindex = au_dbstart(dentry);
-+ if (!h_file)
-+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0);
-+ else
-+ get_file(h_file);
-+ if (IS_ERR(h_file))
-+ err = PTR_ERR(h_file);
-+ else {
-+ if ((flags & __O_TMPFILE)
-+ && !(flags & O_EXCL)) {
-+ h_inode = file_inode(h_file);
-+ spin_lock(&h_inode->i_lock);
-+ h_inode->i_state |= I_LINKABLE;
-+ spin_unlock(&h_inode->i_lock);
-+ }
-+ au_set_fbstart(file, bindex);
-+ au_set_h_fptr(file, bindex, h_file);
-+ au_update_figen(file);
-+ /* todo: necessary? */
-+ /* file->f_ra = h_file->f_ra; */
-+ }
-+
-+ return err;
-+}
-+
-+static int aufs_open_nondir(struct inode *inode __maybe_unused,
-+ struct file *file)
-+{
-+ int err;
-+ struct super_block *sb;
-+ struct au_do_open_args args = {
-+ .open = au_do_open_nondir
-+ };
-+
-+ AuDbg("%pD, f_flags 0x%x, f_mode 0x%x\n",
-+ file, vfsub_file_flags(file), file->f_mode);
-+
-+ sb = file->f_path.dentry->d_sb;
-+ si_read_lock(sb, AuLock_FLUSH);
-+ err = au_do_open(file, &args);
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file)
-+{
-+ struct au_finfo *finfo;
-+ aufs_bindex_t bindex;
-+
-+ finfo = au_fi(file);
-+ au_sphl_del(&finfo->fi_hlist,
-+ &au_sbi(file->f_path.dentry->d_sb)->si_files);
-+ bindex = finfo->fi_btop;
-+ if (bindex >= 0)
-+ au_set_h_fptr(file, bindex, NULL);
-+
-+ au_finfo_fin(file);
-+ return 0;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_do_flush_nondir(struct file *file, fl_owner_t id)
-+{
-+ int err;
-+ struct file *h_file;
-+
-+ err = 0;
-+ h_file = au_hf_top(file);
-+ if (h_file)
-+ err = vfsub_flush(h_file, id);
-+ return err;
-+}
-+
-+static int aufs_flush_nondir(struct file *file, fl_owner_t id)
-+{
-+ return au_do_flush(file, id, au_do_flush_nondir);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+/*
-+ * read and write functions acquire [fdi]_rwsem once, but release before
-+ * mmap_sem. This is because to stop a race condition between mmap(2).
-+ * Releasing these aufs-rwsem should be safe, no branch-mamagement (by keeping
-+ * si_rwsem), no harmful copy-up should happen. Actually copy-up may happen in
-+ * read functions after [fdi]_rwsem are released, but it should be harmless.
-+ */
-+
-+/* Callers should call au_read_post() or fput() in the end */
-+struct file *au_read_pre(struct file *file, int keep_fi)
-+{
-+ struct file *h_file;
-+ int err;
-+
-+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
-+ if (!err) {
-+ di_read_unlock(file->f_path.dentry, AuLock_IR);
-+ h_file = au_hf_top(file);
-+ get_file(h_file);
-+ if (!keep_fi)
-+ fi_read_unlock(file);
-+ } else
-+ h_file = ERR_PTR(err);
-+
-+ return h_file;
-+}
-+
-+static void au_read_post(struct inode *inode, struct file *h_file)
-+{
-+ /* update without lock, I don't think it a problem */
-+ fsstack_copy_attr_atime(inode, file_inode(h_file));
-+ fput(h_file);
-+}
-+
-+struct au_write_pre {
-+ blkcnt_t blks;
-+ aufs_bindex_t bstart;
-+};
-+
-+/*
-+ * return with iinfo is write-locked
-+ * callers should call au_write_post() or iinfo_write_unlock() + fput() in the
-+ * end
-+ */
-+static struct file *au_write_pre(struct file *file, int do_ready,
-+ struct au_write_pre *wpre)
-+{
-+ struct file *h_file;
-+ struct dentry *dentry;
-+ int err;
-+ struct au_pin pin;
-+
-+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
-+ h_file = ERR_PTR(err);
-+ if (unlikely(err))
-+ goto out;
-+
-+ dentry = file->f_path.dentry;
-+ if (do_ready) {
-+ err = au_ready_to_write(file, -1, &pin);
-+ if (unlikely(err)) {
-+ h_file = ERR_PTR(err);
-+ di_write_unlock(dentry);
-+ goto out_fi;
-+ }
-+ }
-+
-+ di_downgrade_lock(dentry, /*flags*/0);
-+ if (wpre)
-+ wpre->bstart = au_fbstart(file);
-+ h_file = au_hf_top(file);
-+ get_file(h_file);
-+ if (wpre)
-+ wpre->blks = file_inode(h_file)->i_blocks;
-+ if (do_ready)
-+ au_unpin(&pin);
-+ di_read_unlock(dentry, /*flags*/0);
-+
-+out_fi:
-+ fi_write_unlock(file);
-+out:
-+ return h_file;
-+}
-+
-+static void au_write_post(struct inode *inode, struct file *h_file,
-+ struct au_write_pre *wpre, ssize_t written)
-+{
-+ struct inode *h_inode;
-+
-+ au_cpup_attr_timesizes(inode);
-+ AuDebugOn(au_ibstart(inode) != wpre->bstart);
-+ h_inode = file_inode(h_file);
-+ inode->i_mode = h_inode->i_mode;
-+ ii_write_unlock(inode);
-+ fput(h_file);
-+
-+ /* AuDbg("blks %llu, %llu\n", (u64)blks, (u64)h_inode->i_blocks); */
-+ if (written > 0)
-+ au_fhsm_wrote(inode->i_sb, wpre->bstart,
-+ /*force*/h_inode->i_blocks > wpre->blks);
-+}
-+
-+static ssize_t aufs_read(struct file *file, char __user *buf, size_t count,
-+ loff_t *ppos)
-+{
-+ ssize_t err;
-+ struct inode *inode;
-+ struct file *h_file;
-+ struct super_block *sb;
-+
-+ inode = file_inode(file);
-+ sb = inode->i_sb;
-+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
-+
-+ h_file = au_read_pre(file, /*keep_fi*/0);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out;
-+
-+ /* filedata may be obsoleted by concurrent copyup, but no problem */
-+ err = vfsub_read_u(h_file, buf, count, ppos);
-+ /* todo: necessary? */
-+ /* file->f_ra = h_file->f_ra; */
-+ au_read_post(inode, h_file);
-+
-+out:
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+/*
-+ * todo: very ugly
-+ * it locks both of i_mutex and si_rwsem for read in safe.
-+ * if the plink maintenance mode continues forever (that is the problem),
-+ * may loop forever.
-+ */
-+static void au_mtx_and_read_lock(struct inode *inode)
-+{
-+ int err;
-+ struct super_block *sb = inode->i_sb;
-+
-+ while (1) {
-+ mutex_lock(&inode->i_mutex);
-+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
-+ if (!err)
-+ break;
-+ mutex_unlock(&inode->i_mutex);
-+ si_read_lock(sb, AuLock_NOPLMW);
-+ si_read_unlock(sb);
-+ }
-+}
-+
-+static ssize_t aufs_write(struct file *file, const char __user *ubuf,
-+ size_t count, loff_t *ppos)
-+{
-+ ssize_t err;
-+ struct au_write_pre wpre;
-+ struct inode *inode;
-+ struct file *h_file;
-+ char __user *buf = (char __user *)ubuf;
-+
-+ inode = file_inode(file);
-+ au_mtx_and_read_lock(inode);
-+
-+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out;
-+
-+ err = vfsub_write_u(h_file, buf, count, ppos);
-+ au_write_post(inode, h_file, &wpre, err);
-+
-+out:
-+ si_read_unlock(inode->i_sb);
-+ mutex_unlock(&inode->i_mutex);
-+ return err;
-+}
-+
-+static ssize_t au_do_iter(struct file *h_file, int rw, struct kiocb *kio,
-+ struct iov_iter *iov_iter)
-+{
-+ ssize_t err;
-+ struct file *file;
-+ ssize_t (*iter)(struct kiocb *, struct iov_iter *);
-+
-+ err = security_file_permission(h_file, rw);
-+ if (unlikely(err))
-+ goto out;
-+
-+ err = -ENOSYS;
-+ iter = NULL;
-+ if (rw == MAY_READ)
-+ iter = h_file->f_op->read_iter;
-+ else if (rw == MAY_WRITE)
-+ iter = h_file->f_op->write_iter;
-+
-+ file = kio->ki_filp;
-+ kio->ki_filp = h_file;
-+ if (iter) {
-+ lockdep_off();
-+ err = iter(kio, iov_iter);
-+ lockdep_on();
-+ } else
-+ /* currently there is no such fs */
-+ WARN_ON_ONCE(1);
-+ kio->ki_filp = file;
-+
-+out:
-+ return err;
-+}
-+
-+static ssize_t aufs_read_iter(struct kiocb *kio, struct iov_iter *iov_iter)
-+{
-+ ssize_t err;
-+ struct file *file, *h_file;
-+ struct inode *inode;
-+ struct super_block *sb;
-+
-+ file = kio->ki_filp;
-+ inode = file_inode(file);
-+ sb = inode->i_sb;
-+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
-+
-+ h_file = au_read_pre(file, /*keep_fi*/0);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out;
-+
-+ err = au_do_iter(h_file, MAY_READ, kio, iov_iter);
-+ /* todo: necessary? */
-+ /* file->f_ra = h_file->f_ra; */
-+ au_read_post(inode, h_file);
-+
-+out:
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+static ssize_t aufs_write_iter(struct kiocb *kio, struct iov_iter *iov_iter)
-+{
-+ ssize_t err;
-+ struct au_write_pre wpre;
-+ struct inode *inode;
-+ struct file *file, *h_file;
-+
-+ file = kio->ki_filp;
-+ inode = file_inode(file);
-+ au_mtx_and_read_lock(inode);
-+
-+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out;
-+
-+ err = au_do_iter(h_file, MAY_WRITE, kio, iov_iter);
-+ au_write_post(inode, h_file, &wpre, err);
-+
-+out:
-+ si_read_unlock(inode->i_sb);
-+ mutex_unlock(&inode->i_mutex);
-+ return err;
-+}
-+
-+static ssize_t aufs_splice_read(struct file *file, loff_t *ppos,
-+ struct pipe_inode_info *pipe, size_t len,
-+ unsigned int flags)
-+{
-+ ssize_t err;
-+ struct file *h_file;
-+ struct inode *inode;
-+ struct super_block *sb;
-+
-+ inode = file_inode(file);
-+ sb = inode->i_sb;
-+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
-+
-+ h_file = au_read_pre(file, /*keep_fi*/1);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out;
-+
-+ if (au_test_loopback_kthread()) {
-+ au_warn_loopback(h_file->f_path.dentry->d_sb);
-+ if (file->f_mapping != h_file->f_mapping) {
-+ file->f_mapping = h_file->f_mapping;
-+ smp_mb(); /* unnecessary? */
-+ }
-+ }
-+ fi_read_unlock(file);
-+
-+ err = vfsub_splice_to(h_file, ppos, pipe, len, flags);
-+ /* todo: necessasry? */
-+ /* file->f_ra = h_file->f_ra; */
-+ au_read_post(inode, h_file);
-+
-+out:
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+static ssize_t
-+aufs_splice_write(struct pipe_inode_info *pipe, struct file *file, loff_t *ppos,
-+ size_t len, unsigned int flags)
-+{
-+ ssize_t err;
-+ struct au_write_pre wpre;
-+ struct inode *inode;
-+ struct file *h_file;
-+
-+ inode = file_inode(file);
-+ au_mtx_and_read_lock(inode);
-+
-+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out;
-+
-+ err = vfsub_splice_from(pipe, h_file, ppos, len, flags);
-+ au_write_post(inode, h_file, &wpre, err);
-+
-+out:
-+ si_read_unlock(inode->i_sb);
-+ mutex_unlock(&inode->i_mutex);
-+ return err;
-+}
-+
-+static long aufs_fallocate(struct file *file, int mode, loff_t offset,
-+ loff_t len)
-+{
-+ long err;
-+ struct au_write_pre wpre;
-+ struct inode *inode;
-+ struct file *h_file;
-+
-+ inode = file_inode(file);
-+ au_mtx_and_read_lock(inode);
-+
-+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out;
-+
-+ lockdep_off();
-+ err = vfs_fallocate(h_file, mode, offset, len);
-+ lockdep_on();
-+ au_write_post(inode, h_file, &wpre, /*written*/1);
-+
-+out:
-+ si_read_unlock(inode->i_sb);
-+ mutex_unlock(&inode->i_mutex);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * The locking order around current->mmap_sem.
-+ * - in most and regular cases
-+ * file I/O syscall -- aufs_read() or something
-+ * -- si_rwsem for read -- mmap_sem
-+ * (Note that [fdi]i_rwsem are released before mmap_sem).
-+ * - in mmap case
-+ * mmap(2) -- mmap_sem -- aufs_mmap() -- si_rwsem for read -- [fdi]i_rwsem
-+ * This AB-BA order is definitly bad, but is not a problem since "si_rwsem for
-+ * read" allows muliple processes to acquire it and [fdi]i_rwsem are not held in
-+ * file I/O. Aufs needs to stop lockdep in aufs_mmap() though.
-+ * It means that when aufs acquires si_rwsem for write, the process should never
-+ * acquire mmap_sem.
-+ *
-+ * Actually aufs_iterate() holds [fdi]i_rwsem before mmap_sem, but this is not a
-+ * problem either since any directory is not able to be mmap-ed.
-+ * The similar scenario is applied to aufs_readlink() too.
-+ */
-+
-+#if 0 /* stop calling security_file_mmap() */
-+/* cf. linux/include/linux/mman.h: calc_vm_prot_bits() */
-+#define AuConv_VM_PROT(f, b) _calc_vm_trans(f, VM_##b, PROT_##b)
-+
-+static unsigned long au_arch_prot_conv(unsigned long flags)
-+{
-+ /* currently ppc64 only */
-+#ifdef CONFIG_PPC64
-+ /* cf. linux/arch/powerpc/include/asm/mman.h */
-+ AuDebugOn(arch_calc_vm_prot_bits(-1) != VM_SAO);
-+ return AuConv_VM_PROT(flags, SAO);
-+#else
-+ AuDebugOn(arch_calc_vm_prot_bits(-1));
-+ return 0;
-+#endif
-+}
-+
-+static unsigned long au_prot_conv(unsigned long flags)
-+{
-+ return AuConv_VM_PROT(flags, READ)
-+ | AuConv_VM_PROT(flags, WRITE)
-+ | AuConv_VM_PROT(flags, EXEC)
-+ | au_arch_prot_conv(flags);
-+}
-+
-+/* cf. linux/include/linux/mman.h: calc_vm_flag_bits() */
-+#define AuConv_VM_MAP(f, b) _calc_vm_trans(f, VM_##b, MAP_##b)
-+
-+static unsigned long au_flag_conv(unsigned long flags)
-+{
-+ return AuConv_VM_MAP(flags, GROWSDOWN)
-+ | AuConv_VM_MAP(flags, DENYWRITE)
-+ | AuConv_VM_MAP(flags, LOCKED);
-+}
-+#endif
-+
-+static int aufs_mmap(struct file *file, struct vm_area_struct *vma)
-+{
-+ int err;
-+ const unsigned char wlock
-+ = (file->f_mode & FMODE_WRITE) && (vma->vm_flags & VM_SHARED);
-+ struct super_block *sb;
-+ struct file *h_file;
-+ struct inode *inode;
-+
-+ AuDbgVmRegion(file, vma);
-+
-+ inode = file_inode(file);
-+ sb = inode->i_sb;
-+ lockdep_off();
-+ si_read_lock(sb, AuLock_NOPLMW);
-+
-+ h_file = au_write_pre(file, wlock, /*wpre*/NULL);
-+ lockdep_on();
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out;
-+
-+ err = 0;
-+ au_set_mmapped(file);
-+ au_vm_file_reset(vma, h_file);
-+ /*
-+ * we cannot call security_mmap_file() here since it may acquire
-+ * mmap_sem or i_mutex.
-+ *
-+ * err = security_mmap_file(h_file, au_prot_conv(vma->vm_flags),
-+ * au_flag_conv(vma->vm_flags));
-+ */
-+ if (!err)
-+ err = h_file->f_op->mmap(h_file, vma);
-+ if (!err) {
-+ au_vm_prfile_set(vma, file);
-+ fsstack_copy_attr_atime(inode, file_inode(h_file));
-+ goto out_fput; /* success */
-+ }
-+ au_unset_mmapped(file);
-+ au_vm_file_reset(vma, file);
-+
-+out_fput:
-+ lockdep_off();
-+ ii_write_unlock(inode);
-+ lockdep_on();
-+ fput(h_file);
-+out:
-+ lockdep_off();
-+ si_read_unlock(sb);
-+ lockdep_on();
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int aufs_fsync_nondir(struct file *file, loff_t start, loff_t end,
-+ int datasync)
-+{
-+ int err;
-+ struct au_write_pre wpre;
-+ struct inode *inode;
-+ struct file *h_file;
-+
-+ err = 0; /* -EBADF; */ /* posix? */
-+ if (unlikely(!(file->f_mode & FMODE_WRITE)))
-+ goto out;
-+
-+ inode = file_inode(file);
-+ au_mtx_and_read_lock(inode);
-+
-+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out_unlock;
-+
-+ err = vfsub_fsync(h_file, &h_file->f_path, datasync);
-+ au_write_post(inode, h_file, &wpre, /*written*/0);
-+
-+out_unlock:
-+ si_read_unlock(inode->i_sb);
-+ mutex_unlock(&inode->i_mutex);
-+out:
-+ return err;
-+}
-+
-+/* no one supports this operation, currently */
-+#if 0
-+static int aufs_aio_fsync_nondir(struct kiocb *kio, int datasync)
-+{
-+ int err;
-+ struct au_write_pre wpre;
-+ struct inode *inode;
-+ struct file *file, *h_file;
-+
-+ err = 0; /* -EBADF; */ /* posix? */
-+ if (unlikely(!(file->f_mode & FMODE_WRITE)))
-+ goto out;
-+
-+ file = kio->ki_filp;
-+ inode = file_inode(file);
-+ au_mtx_and_read_lock(inode);
-+
-+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out_unlock;
-+
-+ err = -ENOSYS;
-+ h_file = au_hf_top(file);
-+ if (h_file->f_op->aio_fsync) {
-+ struct mutex *h_mtx;
-+
-+ h_mtx = &file_inode(h_file)->i_mutex;
-+ if (!is_sync_kiocb(kio)) {
-+ get_file(h_file);
-+ fput(file);
-+ }
-+ kio->ki_filp = h_file;
-+ err = h_file->f_op->aio_fsync(kio, datasync);
-+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
-+ if (!err)
-+ vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL);
-+ /*ignore*/
-+ mutex_unlock(h_mtx);
-+ }
-+ au_write_post(inode, h_file, &wpre, /*written*/0);
-+
-+out_unlock:
-+ si_read_unlock(inode->sb);
-+ mutex_unlock(&inode->i_mutex);
-+out:
-+ return err;
-+}
-+#endif
-+
-+static int aufs_fasync(int fd, struct file *file, int flag)
-+{
-+ int err;
-+ struct file *h_file;
-+ struct super_block *sb;
-+
-+ sb = file->f_path.dentry->d_sb;
-+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
-+
-+ h_file = au_read_pre(file, /*keep_fi*/0);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out;
-+
-+ if (h_file->f_op->fasync)
-+ err = h_file->f_op->fasync(fd, h_file, flag);
-+ fput(h_file); /* instead of au_read_post() */
-+
-+out:
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* no one supports this operation, currently */
-+#if 0
-+static ssize_t aufs_sendpage(struct file *file, struct page *page, int offset,
-+ size_t len, loff_t *pos, int more)
-+{
-+}
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+const struct file_operations aufs_file_fop = {
-+ .owner = THIS_MODULE,
-+
-+ .llseek = default_llseek,
-+
-+ .read = aufs_read,
-+ .write = aufs_write,
-+ .read_iter = aufs_read_iter,
-+ .write_iter = aufs_write_iter,
-+
-+#ifdef CONFIG_AUFS_POLL
-+ .poll = aufs_poll,
-+#endif
-+ .unlocked_ioctl = aufs_ioctl_nondir,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = aufs_compat_ioctl_nondir,
-+#endif
-+ .mmap = aufs_mmap,
-+ .open = aufs_open_nondir,
-+ .flush = aufs_flush_nondir,
-+ .release = aufs_release_nondir,
-+ .fsync = aufs_fsync_nondir,
-+ /* .aio_fsync = aufs_aio_fsync_nondir, */
-+ .fasync = aufs_fasync,
-+ /* .sendpage = aufs_sendpage, */
-+ .splice_write = aufs_splice_write,
-+ .splice_read = aufs_splice_read,
-+#if 0
-+ .aio_splice_write = aufs_aio_splice_write,
-+ .aio_splice_read = aufs_aio_splice_read,
-+#endif
-+ .fallocate = aufs_fallocate
-+};
-diff -Nur linux-4.1.10.orig/fs/aufs/fstype.h linux-4.1.10/fs/aufs/fstype.h
---- linux-4.1.10.orig/fs/aufs/fstype.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/fstype.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,400 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * judging filesystem type
-+ */
-+
-+#ifndef __AUFS_FSTYPE_H__
-+#define __AUFS_FSTYPE_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/fs.h>
-+#include <linux/magic.h>
-+#include <linux/romfs_fs.h>
-+#include <linux/nfs_fs.h>
-+
-+static inline int au_test_aufs(struct super_block *sb)
-+{
-+ return sb->s_magic == AUFS_SUPER_MAGIC;
-+}
-+
-+static inline const char *au_sbtype(struct super_block *sb)
-+{
-+ return sb->s_type->name;
-+}
-+
-+static inline int au_test_iso9660(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_ISO9660_FS) || defined(CONFIG_ISO9660_FS_MODULE)
-+ return sb->s_magic == ISOFS_SUPER_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_romfs(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_ROMFS_FS) || defined(CONFIG_ROMFS_FS_MODULE)
-+ return sb->s_magic == ROMFS_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_cramfs(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_CRAMFS) || defined(CONFIG_CRAMFS_MODULE)
-+ return sb->s_magic == CRAMFS_MAGIC;
-+#endif
-+ return 0;
-+}
-+
-+static inline int au_test_nfs(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_NFS_FS) || defined(CONFIG_NFS_FS_MODULE)
-+ return sb->s_magic == NFS_SUPER_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_fuse(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_FUSE_FS) || defined(CONFIG_FUSE_FS_MODULE)
-+ return sb->s_magic == FUSE_SUPER_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_xfs(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_XFS_FS) || defined(CONFIG_XFS_FS_MODULE)
-+ return sb->s_magic == XFS_SB_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_tmpfs(struct super_block *sb __maybe_unused)
-+{
-+#ifdef CONFIG_TMPFS
-+ return sb->s_magic == TMPFS_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_ecryptfs(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_ECRYPT_FS) || defined(CONFIG_ECRYPT_FS_MODULE)
-+ return !strcmp(au_sbtype(sb), "ecryptfs");
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_ramfs(struct super_block *sb)
-+{
-+ return sb->s_magic == RAMFS_MAGIC;
-+}
-+
-+static inline int au_test_ubifs(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_UBIFS_FS) || defined(CONFIG_UBIFS_FS_MODULE)
-+ return sb->s_magic == UBIFS_SUPER_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_procfs(struct super_block *sb __maybe_unused)
-+{
-+#ifdef CONFIG_PROC_FS
-+ return sb->s_magic == PROC_SUPER_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_sysfs(struct super_block *sb __maybe_unused)
-+{
-+#ifdef CONFIG_SYSFS
-+ return sb->s_magic == SYSFS_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_configfs(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_CONFIGFS_FS) || defined(CONFIG_CONFIGFS_FS_MODULE)
-+ return sb->s_magic == CONFIGFS_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_minix(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_MINIX_FS) || defined(CONFIG_MINIX_FS_MODULE)
-+ return sb->s_magic == MINIX3_SUPER_MAGIC
-+ || sb->s_magic == MINIX2_SUPER_MAGIC
-+ || sb->s_magic == MINIX2_SUPER_MAGIC2
-+ || sb->s_magic == MINIX_SUPER_MAGIC
-+ || sb->s_magic == MINIX_SUPER_MAGIC2;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_fat(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_FAT_FS) || defined(CONFIG_FAT_FS_MODULE)
-+ return sb->s_magic == MSDOS_SUPER_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_msdos(struct super_block *sb)
-+{
-+ return au_test_fat(sb);
-+}
-+
-+static inline int au_test_vfat(struct super_block *sb)
-+{
-+ return au_test_fat(sb);
-+}
-+
-+static inline int au_test_securityfs(struct super_block *sb __maybe_unused)
-+{
-+#ifdef CONFIG_SECURITYFS
-+ return sb->s_magic == SECURITYFS_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_squashfs(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_SQUASHFS) || defined(CONFIG_SQUASHFS_MODULE)
-+ return sb->s_magic == SQUASHFS_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_btrfs(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
-+ return sb->s_magic == BTRFS_SUPER_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_xenfs(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_XENFS) || defined(CONFIG_XENFS_MODULE)
-+ return sb->s_magic == XENFS_SUPER_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_debugfs(struct super_block *sb __maybe_unused)
-+{
-+#ifdef CONFIG_DEBUG_FS
-+ return sb->s_magic == DEBUGFS_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_nilfs(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_NILFS) || defined(CONFIG_NILFS_MODULE)
-+ return sb->s_magic == NILFS_SUPER_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static inline int au_test_hfsplus(struct super_block *sb __maybe_unused)
-+{
-+#if defined(CONFIG_HFSPLUS_FS) || defined(CONFIG_HFSPLUS_FS_MODULE)
-+ return sb->s_magic == HFSPLUS_SUPER_MAGIC;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+/*
-+ * they can't be an aufs branch.
-+ */
-+static inline int au_test_fs_unsuppoted(struct super_block *sb)
-+{
-+ return
-+#ifndef CONFIG_AUFS_BR_RAMFS
-+ au_test_ramfs(sb) ||
-+#endif
-+ au_test_procfs(sb)
-+ || au_test_sysfs(sb)
-+ || au_test_configfs(sb)
-+ || au_test_debugfs(sb)
-+ || au_test_securityfs(sb)
-+ || au_test_xenfs(sb)
-+ || au_test_ecryptfs(sb)
-+ /* || !strcmp(au_sbtype(sb), "unionfs") */
-+ || au_test_aufs(sb); /* will be supported in next version */
-+}
-+
-+static inline int au_test_fs_remote(struct super_block *sb)
-+{
-+ return !au_test_tmpfs(sb)
-+#ifdef CONFIG_AUFS_BR_RAMFS
-+ && !au_test_ramfs(sb)
-+#endif
-+ && !(sb->s_type->fs_flags & FS_REQUIRES_DEV);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * Note: these functions (below) are created after reading ->getattr() in all
-+ * filesystems under linux/fs. it means we have to do so in every update...
-+ */
-+
-+/*
-+ * some filesystems require getattr to refresh the inode attributes before
-+ * referencing.
-+ * in most cases, we can rely on the inode attribute in NFS (or every remote fs)
-+ * and leave the work for d_revalidate()
-+ */
-+static inline int au_test_fs_refresh_iattr(struct super_block *sb)
-+{
-+ return au_test_nfs(sb)
-+ || au_test_fuse(sb)
-+ /* || au_test_btrfs(sb) */ /* untested */
-+ ;
-+}
-+
-+/*
-+ * filesystems which don't maintain i_size or i_blocks.
-+ */
-+static inline int au_test_fs_bad_iattr_size(struct super_block *sb)
-+{
-+ return au_test_xfs(sb)
-+ || au_test_btrfs(sb)
-+ || au_test_ubifs(sb)
-+ || au_test_hfsplus(sb) /* maintained, but incorrect */
-+ /* || au_test_minix(sb) */ /* untested */
-+ ;
-+}
-+
-+/*
-+ * filesystems which don't store the correct value in some of their inode
-+ * attributes.
-+ */
-+static inline int au_test_fs_bad_iattr(struct super_block *sb)
-+{
-+ return au_test_fs_bad_iattr_size(sb)
-+ || au_test_fat(sb)
-+ || au_test_msdos(sb)
-+ || au_test_vfat(sb);
-+}
-+
-+/* they don't check i_nlink in link(2) */
-+static inline int au_test_fs_no_limit_nlink(struct super_block *sb)
-+{
-+ return au_test_tmpfs(sb)
-+#ifdef CONFIG_AUFS_BR_RAMFS
-+ || au_test_ramfs(sb)
-+#endif
-+ || au_test_ubifs(sb)
-+ || au_test_hfsplus(sb);
-+}
-+
-+/*
-+ * filesystems which sets S_NOATIME and S_NOCMTIME.
-+ */
-+static inline int au_test_fs_notime(struct super_block *sb)
-+{
-+ return au_test_nfs(sb)
-+ || au_test_fuse(sb)
-+ || au_test_ubifs(sb)
-+ ;
-+}
-+
-+/* temporary support for i#1 in cramfs */
-+static inline int au_test_fs_unique_ino(struct inode *inode)
-+{
-+ if (au_test_cramfs(inode->i_sb))
-+ return inode->i_ino != 1;
-+ return 1;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * the filesystem where the xino files placed must support i/o after unlink and
-+ * maintain i_size and i_blocks.
-+ */
-+static inline int au_test_fs_bad_xino(struct super_block *sb)
-+{
-+ return au_test_fs_remote(sb)
-+ || au_test_fs_bad_iattr_size(sb)
-+ /* don't want unnecessary work for xino */
-+ || au_test_aufs(sb)
-+ || au_test_ecryptfs(sb)
-+ || au_test_nilfs(sb);
-+}
-+
-+static inline int au_test_fs_trunc_xino(struct super_block *sb)
-+{
-+ return au_test_tmpfs(sb)
-+ || au_test_ramfs(sb);
-+}
-+
-+/*
-+ * test if the @sb is real-readonly.
-+ */
-+static inline int au_test_fs_rr(struct super_block *sb)
-+{
-+ return au_test_squashfs(sb)
-+ || au_test_iso9660(sb)
-+ || au_test_cramfs(sb)
-+ || au_test_romfs(sb);
-+}
-+
-+/*
-+ * test if the @inode is nfs with 'noacl' option
-+ * NFS always sets MS_POSIXACL regardless its mount option 'noacl.'
-+ */
-+static inline int au_test_nfs_noacl(struct inode *inode)
-+{
-+ return au_test_nfs(inode->i_sb)
-+ /* && IS_POSIXACL(inode) */
-+ && !nfs_server_capable(inode, NFS_CAP_ACLS);
-+}
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_FSTYPE_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/hfsnotify.c linux-4.1.10/fs/aufs/hfsnotify.c
---- linux-4.1.10.orig/fs/aufs/hfsnotify.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/hfsnotify.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,288 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * fsnotify for the lower directories
-+ */
-+
-+#include "aufs.h"
-+
-+/* FS_IN_IGNORED is unnecessary */
-+static const __u32 AuHfsnMask = (FS_MOVED_TO | FS_MOVED_FROM | FS_DELETE
-+ | FS_CREATE | FS_EVENT_ON_CHILD);
-+static DECLARE_WAIT_QUEUE_HEAD(au_hfsn_wq);
-+static __cacheline_aligned_in_smp atomic64_t au_hfsn_ifree = ATOMIC64_INIT(0);
-+
-+static void au_hfsn_free_mark(struct fsnotify_mark *mark)
-+{
-+ struct au_hnotify *hn = container_of(mark, struct au_hnotify,
-+ hn_mark);
-+ AuDbg("here\n");
-+ au_cache_free_hnotify(hn);
-+ smp_mb__before_atomic();
-+ if (atomic64_dec_and_test(&au_hfsn_ifree))
-+ wake_up(&au_hfsn_wq);
-+}
-+
-+static int au_hfsn_alloc(struct au_hinode *hinode)
-+{
-+ int err;
-+ struct au_hnotify *hn;
-+ struct super_block *sb;
-+ struct au_branch *br;
-+ struct fsnotify_mark *mark;
-+ aufs_bindex_t bindex;
-+
-+ hn = hinode->hi_notify;
-+ sb = hn->hn_aufs_inode->i_sb;
-+ bindex = au_br_index(sb, hinode->hi_id);
-+ br = au_sbr(sb, bindex);
-+ AuDebugOn(!br->br_hfsn);
-+
-+ mark = &hn->hn_mark;
-+ fsnotify_init_mark(mark, au_hfsn_free_mark);
-+ mark->mask = AuHfsnMask;
-+ /*
-+ * by udba rename or rmdir, aufs assign a new inode to the known
-+ * h_inode, so specify 1 to allow dups.
-+ */
-+ lockdep_off();
-+ err = fsnotify_add_mark(mark, br->br_hfsn->hfsn_group, hinode->hi_inode,
-+ /*mnt*/NULL, /*allow_dups*/1);
-+ /* even if err */
-+ fsnotify_put_mark(mark);
-+ lockdep_on();
-+
-+ return err;
-+}
-+
-+static int au_hfsn_free(struct au_hinode *hinode, struct au_hnotify *hn)
-+{
-+ struct fsnotify_mark *mark;
-+ unsigned long long ull;
-+ struct fsnotify_group *group;
-+
-+ ull = atomic64_inc_return(&au_hfsn_ifree);
-+ BUG_ON(!ull);
-+
-+ mark = &hn->hn_mark;
-+ spin_lock(&mark->lock);
-+ group = mark->group;
-+ fsnotify_get_group(group);
-+ spin_unlock(&mark->lock);
-+ lockdep_off();
-+ fsnotify_destroy_mark(mark, group);
-+ fsnotify_put_group(group);
-+ lockdep_on();
-+
-+ /* free hn by myself */
-+ return 0;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static void au_hfsn_ctl(struct au_hinode *hinode, int do_set)
-+{
-+ struct fsnotify_mark *mark;
-+
-+ mark = &hinode->hi_notify->hn_mark;
-+ spin_lock(&mark->lock);
-+ if (do_set) {
-+ AuDebugOn(mark->mask & AuHfsnMask);
-+ mark->mask |= AuHfsnMask;
-+ } else {
-+ AuDebugOn(!(mark->mask & AuHfsnMask));
-+ mark->mask &= ~AuHfsnMask;
-+ }
-+ spin_unlock(&mark->lock);
-+ /* fsnotify_recalc_inode_mask(hinode->hi_inode); */
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* #define AuDbgHnotify */
-+#ifdef AuDbgHnotify
-+static char *au_hfsn_name(u32 mask)
-+{
-+#ifdef CONFIG_AUFS_DEBUG
-+#define test_ret(flag) \
-+ do { \
-+ if (mask & flag) \
-+ return #flag; \
-+ } while (0)
-+ test_ret(FS_ACCESS);
-+ test_ret(FS_MODIFY);
-+ test_ret(FS_ATTRIB);
-+ test_ret(FS_CLOSE_WRITE);
-+ test_ret(FS_CLOSE_NOWRITE);
-+ test_ret(FS_OPEN);
-+ test_ret(FS_MOVED_FROM);
-+ test_ret(FS_MOVED_TO);
-+ test_ret(FS_CREATE);
-+ test_ret(FS_DELETE);
-+ test_ret(FS_DELETE_SELF);
-+ test_ret(FS_MOVE_SELF);
-+ test_ret(FS_UNMOUNT);
-+ test_ret(FS_Q_OVERFLOW);
-+ test_ret(FS_IN_IGNORED);
-+ test_ret(FS_ISDIR);
-+ test_ret(FS_IN_ONESHOT);
-+ test_ret(FS_EVENT_ON_CHILD);
-+ return "";
-+#undef test_ret
-+#else
-+ return "??";
-+#endif
-+}
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static void au_hfsn_free_group(struct fsnotify_group *group)
-+{
-+ struct au_br_hfsnotify *hfsn = group->private;
-+
-+ AuDbg("here\n");
-+ kfree(hfsn);
-+}
-+
-+static int au_hfsn_handle_event(struct fsnotify_group *group,
-+ struct inode *inode,
-+ struct fsnotify_mark *inode_mark,
-+ struct fsnotify_mark *vfsmount_mark,
-+ u32 mask, void *data, int data_type,
-+ const unsigned char *file_name, u32 cookie)
-+{
-+ int err;
-+ struct au_hnotify *hnotify;
-+ struct inode *h_dir, *h_inode;
-+ struct qstr h_child_qstr = QSTR_INIT(file_name, strlen(file_name));
-+
-+ AuDebugOn(data_type != FSNOTIFY_EVENT_INODE);
-+
-+ err = 0;
-+ /* if FS_UNMOUNT happens, there must be another bug */
-+ AuDebugOn(mask & FS_UNMOUNT);
-+ if (mask & (FS_IN_IGNORED | FS_UNMOUNT))
-+ goto out;
-+
-+ h_dir = inode;
-+ h_inode = NULL;
-+#ifdef AuDbgHnotify
-+ au_debug_on();
-+ if (1 || h_child_qstr.len != sizeof(AUFS_XINO_FNAME) - 1
-+ || strncmp(h_child_qstr.name, AUFS_XINO_FNAME, h_child_qstr.len)) {
-+ AuDbg("i%lu, mask 0x%x %s, hcname %.*s, hi%lu\n",
-+ h_dir->i_ino, mask, au_hfsn_name(mask),
-+ AuLNPair(&h_child_qstr), h_inode ? h_inode->i_ino : 0);
-+ /* WARN_ON(1); */
-+ }
-+ au_debug_off();
-+#endif
-+
-+ AuDebugOn(!inode_mark);
-+ hnotify = container_of(inode_mark, struct au_hnotify, hn_mark);
-+ err = au_hnotify(h_dir, hnotify, mask, &h_child_qstr, h_inode);
-+
-+out:
-+ return err;
-+}
-+
-+static struct fsnotify_ops au_hfsn_ops = {
-+ .handle_event = au_hfsn_handle_event,
-+ .free_group_priv = au_hfsn_free_group
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static void au_hfsn_fin_br(struct au_branch *br)
-+{
-+ struct au_br_hfsnotify *hfsn;
-+
-+ hfsn = br->br_hfsn;
-+ if (hfsn) {
-+ lockdep_off();
-+ fsnotify_put_group(hfsn->hfsn_group);
-+ lockdep_on();
-+ }
-+}
-+
-+static int au_hfsn_init_br(struct au_branch *br, int perm)
-+{
-+ int err;
-+ struct fsnotify_group *group;
-+ struct au_br_hfsnotify *hfsn;
-+
-+ err = 0;
-+ br->br_hfsn = NULL;
-+ if (!au_br_hnotifyable(perm))
-+ goto out;
-+
-+ err = -ENOMEM;
-+ hfsn = kmalloc(sizeof(*hfsn), GFP_NOFS);
-+ if (unlikely(!hfsn))
-+ goto out;
-+
-+ err = 0;
-+ group = fsnotify_alloc_group(&au_hfsn_ops);
-+ if (IS_ERR(group)) {
-+ err = PTR_ERR(group);
-+ pr_err("fsnotify_alloc_group() failed, %d\n", err);
-+ goto out_hfsn;
-+ }
-+
-+ group->private = hfsn;
-+ hfsn->hfsn_group = group;
-+ br->br_hfsn = hfsn;
-+ goto out; /* success */
-+
-+out_hfsn:
-+ kfree(hfsn);
-+out:
-+ return err;
-+}
-+
-+static int au_hfsn_reset_br(unsigned int udba, struct au_branch *br, int perm)
-+{
-+ int err;
-+
-+ err = 0;
-+ if (!br->br_hfsn)
-+ err = au_hfsn_init_br(br, perm);
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static void au_hfsn_fin(void)
-+{
-+ AuDbg("au_hfsn_ifree %lld\n", (long long)atomic64_read(&au_hfsn_ifree));
-+ wait_event(au_hfsn_wq, !atomic64_read(&au_hfsn_ifree));
-+}
-+
-+const struct au_hnotify_op au_hnotify_op = {
-+ .ctl = au_hfsn_ctl,
-+ .alloc = au_hfsn_alloc,
-+ .free = au_hfsn_free,
-+
-+ .fin = au_hfsn_fin,
-+
-+ .reset_br = au_hfsn_reset_br,
-+ .fin_br = au_hfsn_fin_br,
-+ .init_br = au_hfsn_init_br
-+};
-diff -Nur linux-4.1.10.orig/fs/aufs/hfsplus.c linux-4.1.10/fs/aufs/hfsplus.c
---- linux-4.1.10.orig/fs/aufs/hfsplus.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/hfsplus.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,56 @@
-+/*
-+ * Copyright (C) 2010-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * special support for filesystems which aqucires an inode mutex
-+ * at final closing a file, eg, hfsplus.
-+ *
-+ * This trick is very simple and stupid, just to open the file before really
-+ * neceeary open to tell hfsplus that this is not the final closing.
-+ * The caller should call au_h_open_pre() after acquiring the inode mutex,
-+ * and au_h_open_post() after releasing it.
-+ */
-+
-+#include "aufs.h"
-+
-+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex,
-+ int force_wr)
-+{
-+ struct file *h_file;
-+ struct dentry *h_dentry;
-+
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ AuDebugOn(!h_dentry);
-+ AuDebugOn(d_is_negative(h_dentry));
-+
-+ h_file = NULL;
-+ if (au_test_hfsplus(h_dentry->d_sb)
-+ && d_is_reg(h_dentry))
-+ h_file = au_h_open(dentry, bindex,
-+ O_RDONLY | O_NOATIME | O_LARGEFILE,
-+ /*file*/NULL, force_wr);
-+ return h_file;
-+}
-+
-+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex,
-+ struct file *h_file)
-+{
-+ if (h_file) {
-+ fput(h_file);
-+ au_sbr_put(dentry->d_sb, bindex);
-+ }
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/hnotify.c linux-4.1.10/fs/aufs/hnotify.c
---- linux-4.1.10.orig/fs/aufs/hnotify.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/hnotify.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,710 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * abstraction to notify the direct changes on lower directories
-+ */
-+
-+#include "aufs.h"
-+
-+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode)
-+{
-+ int err;
-+ struct au_hnotify *hn;
-+
-+ err = -ENOMEM;
-+ hn = au_cache_alloc_hnotify();
-+ if (hn) {
-+ hn->hn_aufs_inode = inode;
-+ hinode->hi_notify = hn;
-+ err = au_hnotify_op.alloc(hinode);
-+ AuTraceErr(err);
-+ if (unlikely(err)) {
-+ hinode->hi_notify = NULL;
-+ au_cache_free_hnotify(hn);
-+ /*
-+ * The upper dir was removed by udba, but the same named
-+ * dir left. In this case, aufs assignes a new inode
-+ * number and set the monitor again.
-+ * For the lower dir, the old monitnor is still left.
-+ */
-+ if (err == -EEXIST)
-+ err = 0;
-+ }
-+ }
-+
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+void au_hn_free(struct au_hinode *hinode)
-+{
-+ struct au_hnotify *hn;
-+
-+ hn = hinode->hi_notify;
-+ if (hn) {
-+ hinode->hi_notify = NULL;
-+ if (au_hnotify_op.free(hinode, hn))
-+ au_cache_free_hnotify(hn);
-+ }
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void au_hn_ctl(struct au_hinode *hinode, int do_set)
-+{
-+ if (hinode->hi_notify)
-+ au_hnotify_op.ctl(hinode, do_set);
-+}
-+
-+void au_hn_reset(struct inode *inode, unsigned int flags)
-+{
-+ aufs_bindex_t bindex, bend;
-+ struct inode *hi;
-+ struct dentry *iwhdentry;
-+
-+ bend = au_ibend(inode);
-+ for (bindex = au_ibstart(inode); bindex <= bend; bindex++) {
-+ hi = au_h_iptr(inode, bindex);
-+ if (!hi)
-+ continue;
-+
-+ /* mutex_lock_nested(&hi->i_mutex, AuLsc_I_CHILD); */
-+ iwhdentry = au_hi_wh(inode, bindex);
-+ if (iwhdentry)
-+ dget(iwhdentry);
-+ au_igrab(hi);
-+ au_set_h_iptr(inode, bindex, NULL, 0);
-+ au_set_h_iptr(inode, bindex, au_igrab(hi),
-+ flags & ~AuHi_XINO);
-+ iput(hi);
-+ dput(iwhdentry);
-+ /* mutex_unlock(&hi->i_mutex); */
-+ }
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int hn_xino(struct inode *inode, struct inode *h_inode)
-+{
-+ int err;
-+ aufs_bindex_t bindex, bend, bfound, bstart;
-+ struct inode *h_i;
-+
-+ err = 0;
-+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
-+ pr_warn("branch root dir was changed\n");
-+ goto out;
-+ }
-+
-+ bfound = -1;
-+ bend = au_ibend(inode);
-+ bstart = au_ibstart(inode);
-+#if 0 /* reserved for future use */
-+ if (bindex == bend) {
-+ /* keep this ino in rename case */
-+ goto out;
-+ }
-+#endif
-+ for (bindex = bstart; bindex <= bend; bindex++)
-+ if (au_h_iptr(inode, bindex) == h_inode) {
-+ bfound = bindex;
-+ break;
-+ }
-+ if (bfound < 0)
-+ goto out;
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ h_i = au_h_iptr(inode, bindex);
-+ if (!h_i)
-+ continue;
-+
-+ err = au_xino_write(inode->i_sb, bindex, h_i->i_ino, /*ino*/0);
-+ /* ignore this error */
-+ /* bad action? */
-+ }
-+
-+ /* children inode number will be broken */
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+static int hn_gen_tree(struct dentry *dentry)
-+{
-+ int err, i, j, ndentry;
-+ struct au_dcsub_pages dpages;
-+ struct au_dpage *dpage;
-+ struct dentry **dentries;
-+
-+ err = au_dpages_init(&dpages, GFP_NOFS);
-+ if (unlikely(err))
-+ goto out;
-+ err = au_dcsub_pages(&dpages, dentry, NULL, NULL);
-+ if (unlikely(err))
-+ goto out_dpages;
-+
-+ for (i = 0; i < dpages.ndpage; i++) {
-+ dpage = dpages.dpages + i;
-+ dentries = dpage->dentries;
-+ ndentry = dpage->ndentry;
-+ for (j = 0; j < ndentry; j++) {
-+ struct dentry *d;
-+
-+ d = dentries[j];
-+ if (IS_ROOT(d))
-+ continue;
-+
-+ au_digen_dec(d);
-+ if (d_really_is_positive(d))
-+ /* todo: reset children xino?
-+ cached children only? */
-+ au_iigen_dec(d_inode(d));
-+ }
-+ }
-+
-+out_dpages:
-+ au_dpages_free(&dpages);
-+
-+#if 0
-+ /* discard children */
-+ dentry_unhash(dentry);
-+ dput(dentry);
-+#endif
-+out:
-+ return err;
-+}
-+
-+/*
-+ * return 0 if processed.
-+ */
-+static int hn_gen_by_inode(char *name, unsigned int nlen, struct inode *inode,
-+ const unsigned int isdir)
-+{
-+ int err;
-+ struct dentry *d;
-+ struct qstr *dname;
-+
-+ err = 1;
-+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
-+ pr_warn("branch root dir was changed\n");
-+ err = 0;
-+ goto out;
-+ }
-+
-+ if (!isdir) {
-+ AuDebugOn(!name);
-+ au_iigen_dec(inode);
-+ spin_lock(&inode->i_lock);
-+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) {
-+ spin_lock(&d->d_lock);
-+ dname = &d->d_name;
-+ if (dname->len != nlen
-+ && memcmp(dname->name, name, nlen)) {
-+ spin_unlock(&d->d_lock);
-+ continue;
-+ }
-+ err = 0;
-+ au_digen_dec(d);
-+ spin_unlock(&d->d_lock);
-+ break;
-+ }
-+ spin_unlock(&inode->i_lock);
-+ } else {
-+ au_fset_si(au_sbi(inode->i_sb), FAILED_REFRESH_DIR);
-+ d = d_find_any_alias(inode);
-+ if (!d) {
-+ au_iigen_dec(inode);
-+ goto out;
-+ }
-+
-+ spin_lock(&d->d_lock);
-+ dname = &d->d_name;
-+ if (dname->len == nlen && !memcmp(dname->name, name, nlen)) {
-+ spin_unlock(&d->d_lock);
-+ err = hn_gen_tree(d);
-+ spin_lock(&d->d_lock);
-+ }
-+ spin_unlock(&d->d_lock);
-+ dput(d);
-+ }
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+static int hn_gen_by_name(struct dentry *dentry, const unsigned int isdir)
-+{
-+ int err;
-+
-+ if (IS_ROOT(dentry)) {
-+ pr_warn("branch root dir was changed\n");
-+ return 0;
-+ }
-+
-+ err = 0;
-+ if (!isdir) {
-+ au_digen_dec(dentry);
-+ if (d_really_is_positive(dentry))
-+ au_iigen_dec(d_inode(dentry));
-+ } else {
-+ au_fset_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR);
-+ if (d_really_is_positive(dentry))
-+ err = hn_gen_tree(dentry);
-+ }
-+
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* hnotify job flags */
-+#define AuHnJob_XINO0 1
-+#define AuHnJob_GEN (1 << 1)
-+#define AuHnJob_DIRENT (1 << 2)
-+#define AuHnJob_ISDIR (1 << 3)
-+#define AuHnJob_TRYXINO0 (1 << 4)
-+#define AuHnJob_MNTPNT (1 << 5)
-+#define au_ftest_hnjob(flags, name) ((flags) & AuHnJob_##name)
-+#define au_fset_hnjob(flags, name) \
-+ do { (flags) |= AuHnJob_##name; } while (0)
-+#define au_fclr_hnjob(flags, name) \
-+ do { (flags) &= ~AuHnJob_##name; } while (0)
-+
-+enum {
-+ AuHn_CHILD,
-+ AuHn_PARENT,
-+ AuHnLast
-+};
-+
-+struct au_hnotify_args {
-+ struct inode *h_dir, *dir, *h_child_inode;
-+ u32 mask;
-+ unsigned int flags[AuHnLast];
-+ unsigned int h_child_nlen;
-+ char h_child_name[];
-+};
-+
-+struct hn_job_args {
-+ unsigned int flags;
-+ struct inode *inode, *h_inode, *dir, *h_dir;
-+ struct dentry *dentry;
-+ char *h_name;
-+ int h_nlen;
-+};
-+
-+static int hn_job(struct hn_job_args *a)
-+{
-+ const unsigned int isdir = au_ftest_hnjob(a->flags, ISDIR);
-+ int e;
-+
-+ /* reset xino */
-+ if (au_ftest_hnjob(a->flags, XINO0) && a->inode)
-+ hn_xino(a->inode, a->h_inode); /* ignore this error */
-+
-+ if (au_ftest_hnjob(a->flags, TRYXINO0)
-+ && a->inode
-+ && a->h_inode) {
-+ mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
-+ if (!a->h_inode->i_nlink
-+ && !(a->h_inode->i_state & I_LINKABLE))
-+ hn_xino(a->inode, a->h_inode); /* ignore this error */
-+ mutex_unlock(&a->h_inode->i_mutex);
-+ }
-+
-+ /* make the generation obsolete */
-+ if (au_ftest_hnjob(a->flags, GEN)) {
-+ e = -1;
-+ if (a->inode)
-+ e = hn_gen_by_inode(a->h_name, a->h_nlen, a->inode,
-+ isdir);
-+ if (e && a->dentry)
-+ hn_gen_by_name(a->dentry, isdir);
-+ /* ignore this error */
-+ }
-+
-+ /* make dir entries obsolete */
-+ if (au_ftest_hnjob(a->flags, DIRENT) && a->inode) {
-+ struct au_vdir *vdir;
-+
-+ vdir = au_ivdir(a->inode);
-+ if (vdir)
-+ vdir->vd_jiffy = 0;
-+ /* IMustLock(a->inode); */
-+ /* a->inode->i_version++; */
-+ }
-+
-+ /* can do nothing but warn */
-+ if (au_ftest_hnjob(a->flags, MNTPNT)
-+ && a->dentry
-+ && d_mountpoint(a->dentry))
-+ pr_warn("mount-point %pd is removed or renamed\n", a->dentry);
-+
-+ return 0;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static struct dentry *lookup_wlock_by_name(char *name, unsigned int nlen,
-+ struct inode *dir)
-+{
-+ struct dentry *dentry, *d, *parent;
-+ struct qstr *dname;
-+
-+ parent = d_find_any_alias(dir);
-+ if (!parent)
-+ return NULL;
-+
-+ dentry = NULL;
-+ spin_lock(&parent->d_lock);
-+ list_for_each_entry(d, &parent->d_subdirs, d_child) {
-+ /* AuDbg("%pd\n", d); */
-+ spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
-+ dname = &d->d_name;
-+ if (dname->len != nlen || memcmp(dname->name, name, nlen))
-+ goto cont_unlock;
-+ if (au_di(d))
-+ au_digen_dec(d);
-+ else
-+ goto cont_unlock;
-+ if (au_dcount(d) > 0) {
-+ dentry = dget_dlock(d);
-+ spin_unlock(&d->d_lock);
-+ break;
-+ }
-+
-+cont_unlock:
-+ spin_unlock(&d->d_lock);
-+ }
-+ spin_unlock(&parent->d_lock);
-+ dput(parent);
-+
-+ if (dentry)
-+ di_write_lock_child(dentry);
-+
-+ return dentry;
-+}
-+
-+static struct inode *lookup_wlock_by_ino(struct super_block *sb,
-+ aufs_bindex_t bindex, ino_t h_ino)
-+{
-+ struct inode *inode;
-+ ino_t ino;
-+ int err;
-+
-+ inode = NULL;
-+ err = au_xino_read(sb, bindex, h_ino, &ino);
-+ if (!err && ino)
-+ inode = ilookup(sb, ino);
-+ if (!inode)
-+ goto out;
-+
-+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
-+ pr_warn("wrong root branch\n");
-+ iput(inode);
-+ inode = NULL;
-+ goto out;
-+ }
-+
-+ ii_write_lock_child(inode);
-+
-+out:
-+ return inode;
-+}
-+
-+static void au_hn_bh(void *_args)
-+{
-+ struct au_hnotify_args *a = _args;
-+ struct super_block *sb;
-+ aufs_bindex_t bindex, bend, bfound;
-+ unsigned char xino, try_iput;
-+ int err;
-+ struct inode *inode;
-+ ino_t h_ino;
-+ struct hn_job_args args;
-+ struct dentry *dentry;
-+ struct au_sbinfo *sbinfo;
-+
-+ AuDebugOn(!_args);
-+ AuDebugOn(!a->h_dir);
-+ AuDebugOn(!a->dir);
-+ AuDebugOn(!a->mask);
-+ AuDbg("mask 0x%x, i%lu, hi%lu, hci%lu\n",
-+ a->mask, a->dir->i_ino, a->h_dir->i_ino,
-+ a->h_child_inode ? a->h_child_inode->i_ino : 0);
-+
-+ inode = NULL;
-+ dentry = NULL;
-+ /*
-+ * do not lock a->dir->i_mutex here
-+ * because of d_revalidate() may cause a deadlock.
-+ */
-+ sb = a->dir->i_sb;
-+ AuDebugOn(!sb);
-+ sbinfo = au_sbi(sb);
-+ AuDebugOn(!sbinfo);
-+ si_write_lock(sb, AuLock_NOPLMW);
-+
-+ ii_read_lock_parent(a->dir);
-+ bfound = -1;
-+ bend = au_ibend(a->dir);
-+ for (bindex = au_ibstart(a->dir); bindex <= bend; bindex++)
-+ if (au_h_iptr(a->dir, bindex) == a->h_dir) {
-+ bfound = bindex;
-+ break;
-+ }
-+ ii_read_unlock(a->dir);
-+ if (unlikely(bfound < 0))
-+ goto out;
-+
-+ xino = !!au_opt_test(au_mntflags(sb), XINO);
-+ h_ino = 0;
-+ if (a->h_child_inode)
-+ h_ino = a->h_child_inode->i_ino;
-+
-+ if (a->h_child_nlen
-+ && (au_ftest_hnjob(a->flags[AuHn_CHILD], GEN)
-+ || au_ftest_hnjob(a->flags[AuHn_CHILD], MNTPNT)))
-+ dentry = lookup_wlock_by_name(a->h_child_name, a->h_child_nlen,
-+ a->dir);
-+ try_iput = 0;
-+ if (dentry && d_really_is_positive(dentry))
-+ inode = d_inode(dentry);
-+ if (xino && !inode && h_ino
-+ && (au_ftest_hnjob(a->flags[AuHn_CHILD], XINO0)
-+ || au_ftest_hnjob(a->flags[AuHn_CHILD], TRYXINO0)
-+ || au_ftest_hnjob(a->flags[AuHn_CHILD], GEN))) {
-+ inode = lookup_wlock_by_ino(sb, bfound, h_ino);
-+ try_iput = 1;
-+ }
-+
-+ args.flags = a->flags[AuHn_CHILD];
-+ args.dentry = dentry;
-+ args.inode = inode;
-+ args.h_inode = a->h_child_inode;
-+ args.dir = a->dir;
-+ args.h_dir = a->h_dir;
-+ args.h_name = a->h_child_name;
-+ args.h_nlen = a->h_child_nlen;
-+ err = hn_job(&args);
-+ if (dentry) {
-+ if (au_di(dentry))
-+ di_write_unlock(dentry);
-+ dput(dentry);
-+ }
-+ if (inode && try_iput) {
-+ ii_write_unlock(inode);
-+ iput(inode);
-+ }
-+
-+ ii_write_lock_parent(a->dir);
-+ args.flags = a->flags[AuHn_PARENT];
-+ args.dentry = NULL;
-+ args.inode = a->dir;
-+ args.h_inode = a->h_dir;
-+ args.dir = NULL;
-+ args.h_dir = NULL;
-+ args.h_name = NULL;
-+ args.h_nlen = 0;
-+ err = hn_job(&args);
-+ ii_write_unlock(a->dir);
-+
-+out:
-+ iput(a->h_child_inode);
-+ iput(a->h_dir);
-+ iput(a->dir);
-+ si_write_unlock(sb);
-+ au_nwt_done(&sbinfo->si_nowait);
-+ kfree(a);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask,
-+ struct qstr *h_child_qstr, struct inode *h_child_inode)
-+{
-+ int err, len;
-+ unsigned int flags[AuHnLast], f;
-+ unsigned char isdir, isroot, wh;
-+ struct inode *dir;
-+ struct au_hnotify_args *args;
-+ char *p, *h_child_name;
-+
-+ err = 0;
-+ AuDebugOn(!hnotify || !hnotify->hn_aufs_inode);
-+ dir = igrab(hnotify->hn_aufs_inode);
-+ if (!dir)
-+ goto out;
-+
-+ isroot = (dir->i_ino == AUFS_ROOT_INO);
-+ wh = 0;
-+ h_child_name = (void *)h_child_qstr->name;
-+ len = h_child_qstr->len;
-+ if (h_child_name) {
-+ if (len > AUFS_WH_PFX_LEN
-+ && !memcmp(h_child_name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
-+ h_child_name += AUFS_WH_PFX_LEN;
-+ len -= AUFS_WH_PFX_LEN;
-+ wh = 1;
-+ }
-+ }
-+
-+ isdir = 0;
-+ if (h_child_inode)
-+ isdir = !!S_ISDIR(h_child_inode->i_mode);
-+ flags[AuHn_PARENT] = AuHnJob_ISDIR;
-+ flags[AuHn_CHILD] = 0;
-+ if (isdir)
-+ flags[AuHn_CHILD] = AuHnJob_ISDIR;
-+ au_fset_hnjob(flags[AuHn_PARENT], DIRENT);
-+ au_fset_hnjob(flags[AuHn_CHILD], GEN);
-+ switch (mask & FS_EVENTS_POSS_ON_CHILD) {
-+ case FS_MOVED_FROM:
-+ case FS_MOVED_TO:
-+ au_fset_hnjob(flags[AuHn_CHILD], XINO0);
-+ au_fset_hnjob(flags[AuHn_CHILD], MNTPNT);
-+ /*FALLTHROUGH*/
-+ case FS_CREATE:
-+ AuDebugOn(!h_child_name);
-+ break;
-+
-+ case FS_DELETE:
-+ /*
-+ * aufs never be able to get this child inode.
-+ * revalidation should be in d_revalidate()
-+ * by checking i_nlink, i_generation or d_unhashed().
-+ */
-+ AuDebugOn(!h_child_name);
-+ au_fset_hnjob(flags[AuHn_CHILD], TRYXINO0);
-+ au_fset_hnjob(flags[AuHn_CHILD], MNTPNT);
-+ break;
-+
-+ default:
-+ AuDebugOn(1);
-+ }
-+
-+ if (wh)
-+ h_child_inode = NULL;
-+
-+ err = -ENOMEM;
-+ /* iput() and kfree() will be called in au_hnotify() */
-+ args = kmalloc(sizeof(*args) + len + 1, GFP_NOFS);
-+ if (unlikely(!args)) {
-+ AuErr1("no memory\n");
-+ iput(dir);
-+ goto out;
-+ }
-+ args->flags[AuHn_PARENT] = flags[AuHn_PARENT];
-+ args->flags[AuHn_CHILD] = flags[AuHn_CHILD];
-+ args->mask = mask;
-+ args->dir = dir;
-+ args->h_dir = igrab(h_dir);
-+ if (h_child_inode)
-+ h_child_inode = igrab(h_child_inode); /* can be NULL */
-+ args->h_child_inode = h_child_inode;
-+ args->h_child_nlen = len;
-+ if (len) {
-+ p = (void *)args;
-+ p += sizeof(*args);
-+ memcpy(p, h_child_name, len);
-+ p[len] = 0;
-+ }
-+
-+ /* NFS fires the event for silly-renamed one from kworker */
-+ f = 0;
-+ if (!dir->i_nlink
-+ || (au_test_nfs(h_dir->i_sb) && (mask & FS_DELETE)))
-+ f = AuWkq_NEST;
-+ err = au_wkq_nowait(au_hn_bh, args, dir->i_sb, f);
-+ if (unlikely(err)) {
-+ pr_err("wkq %d\n", err);
-+ iput(args->h_child_inode);
-+ iput(args->h_dir);
-+ iput(args->dir);
-+ kfree(args);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm)
-+{
-+ int err;
-+
-+ AuDebugOn(!(udba & AuOptMask_UDBA));
-+
-+ err = 0;
-+ if (au_hnotify_op.reset_br)
-+ err = au_hnotify_op.reset_br(udba, br, perm);
-+
-+ return err;
-+}
-+
-+int au_hnotify_init_br(struct au_branch *br, int perm)
-+{
-+ int err;
-+
-+ err = 0;
-+ if (au_hnotify_op.init_br)
-+ err = au_hnotify_op.init_br(br, perm);
-+
-+ return err;
-+}
-+
-+void au_hnotify_fin_br(struct au_branch *br)
-+{
-+ if (au_hnotify_op.fin_br)
-+ au_hnotify_op.fin_br(br);
-+}
-+
-+static void au_hn_destroy_cache(void)
-+{
-+ kmem_cache_destroy(au_cachep[AuCache_HNOTIFY]);
-+ au_cachep[AuCache_HNOTIFY] = NULL;
-+}
-+
-+int __init au_hnotify_init(void)
-+{
-+ int err;
-+
-+ err = -ENOMEM;
-+ au_cachep[AuCache_HNOTIFY] = AuCache(au_hnotify);
-+ if (au_cachep[AuCache_HNOTIFY]) {
-+ err = 0;
-+ if (au_hnotify_op.init)
-+ err = au_hnotify_op.init();
-+ if (unlikely(err))
-+ au_hn_destroy_cache();
-+ }
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+void au_hnotify_fin(void)
-+{
-+ if (au_hnotify_op.fin)
-+ au_hnotify_op.fin();
-+ /* cf. au_cache_fin() */
-+ if (au_cachep[AuCache_HNOTIFY])
-+ au_hn_destroy_cache();
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/iinfo.c linux-4.1.10/fs/aufs/iinfo.c
---- linux-4.1.10.orig/fs/aufs/iinfo.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/iinfo.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,277 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * inode private data
-+ */
-+
-+#include "aufs.h"
-+
-+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex)
-+{
-+ struct inode *h_inode;
-+
-+ IiMustAnyLock(inode);
-+
-+ h_inode = au_ii(inode)->ii_hinode[0 + bindex].hi_inode;
-+ AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
-+ return h_inode;
-+}
-+
-+/* todo: hard/soft set? */
-+void au_hiput(struct au_hinode *hinode)
-+{
-+ au_hn_free(hinode);
-+ dput(hinode->hi_whdentry);
-+ iput(hinode->hi_inode);
-+}
-+
-+unsigned int au_hi_flags(struct inode *inode, int isdir)
-+{
-+ unsigned int flags;
-+ const unsigned int mnt_flags = au_mntflags(inode->i_sb);
-+
-+ flags = 0;
-+ if (au_opt_test(mnt_flags, XINO))
-+ au_fset_hi(flags, XINO);
-+ if (isdir && au_opt_test(mnt_flags, UDBA_HNOTIFY))
-+ au_fset_hi(flags, HNOTIFY);
-+ return flags;
-+}
-+
-+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
-+ struct inode *h_inode, unsigned int flags)
-+{
-+ struct au_hinode *hinode;
-+ struct inode *hi;
-+ struct au_iinfo *iinfo = au_ii(inode);
-+
-+ IiMustWriteLock(inode);
-+
-+ hinode = iinfo->ii_hinode + bindex;
-+ hi = hinode->hi_inode;
-+ AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
-+
-+ if (hi)
-+ au_hiput(hinode);
-+ hinode->hi_inode = h_inode;
-+ if (h_inode) {
-+ int err;
-+ struct super_block *sb = inode->i_sb;
-+ struct au_branch *br;
-+
-+ AuDebugOn(inode->i_mode
-+ && (h_inode->i_mode & S_IFMT)
-+ != (inode->i_mode & S_IFMT));
-+ if (bindex == iinfo->ii_bstart)
-+ au_cpup_igen(inode, h_inode);
-+ br = au_sbr(sb, bindex);
-+ hinode->hi_id = br->br_id;
-+ if (au_ftest_hi(flags, XINO)) {
-+ err = au_xino_write(sb, bindex, h_inode->i_ino,
-+ inode->i_ino);
-+ if (unlikely(err))
-+ AuIOErr1("failed au_xino_write() %d\n", err);
-+ }
-+
-+ if (au_ftest_hi(flags, HNOTIFY)
-+ && au_br_hnotifyable(br->br_perm)) {
-+ err = au_hn_alloc(hinode, inode);
-+ if (unlikely(err))
-+ AuIOErr1("au_hn_alloc() %d\n", err);
-+ }
-+ }
-+}
-+
-+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
-+ struct dentry *h_wh)
-+{
-+ struct au_hinode *hinode;
-+
-+ IiMustWriteLock(inode);
-+
-+ hinode = au_ii(inode)->ii_hinode + bindex;
-+ AuDebugOn(hinode->hi_whdentry);
-+ hinode->hi_whdentry = h_wh;
-+}
-+
-+void au_update_iigen(struct inode *inode, int half)
-+{
-+ struct au_iinfo *iinfo;
-+ struct au_iigen *iigen;
-+ unsigned int sigen;
-+
-+ sigen = au_sigen(inode->i_sb);
-+ iinfo = au_ii(inode);
-+ iigen = &iinfo->ii_generation;
-+ spin_lock(&iinfo->ii_genspin);
-+ iigen->ig_generation = sigen;
-+ if (half)
-+ au_ig_fset(iigen->ig_flags, HALF_REFRESHED);
-+ else
-+ au_ig_fclr(iigen->ig_flags, HALF_REFRESHED);
-+ spin_unlock(&iinfo->ii_genspin);
-+}
-+
-+/* it may be called at remount time, too */
-+void au_update_ibrange(struct inode *inode, int do_put_zero)
-+{
-+ struct au_iinfo *iinfo;
-+ aufs_bindex_t bindex, bend;
-+
-+ iinfo = au_ii(inode);
-+ if (!iinfo)
-+ return;
-+
-+ IiMustWriteLock(inode);
-+
-+ if (do_put_zero && iinfo->ii_bstart >= 0) {
-+ for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend;
-+ bindex++) {
-+ struct inode *h_i;
-+
-+ h_i = iinfo->ii_hinode[0 + bindex].hi_inode;
-+ if (h_i
-+ && !h_i->i_nlink
-+ && !(h_i->i_state & I_LINKABLE))
-+ au_set_h_iptr(inode, bindex, NULL, 0);
-+ }
-+ }
-+
-+ iinfo->ii_bstart = -1;
-+ iinfo->ii_bend = -1;
-+ bend = au_sbend(inode->i_sb);
-+ for (bindex = 0; bindex <= bend; bindex++)
-+ if (iinfo->ii_hinode[0 + bindex].hi_inode) {
-+ iinfo->ii_bstart = bindex;
-+ break;
-+ }
-+ if (iinfo->ii_bstart >= 0)
-+ for (bindex = bend; bindex >= iinfo->ii_bstart; bindex--)
-+ if (iinfo->ii_hinode[0 + bindex].hi_inode) {
-+ iinfo->ii_bend = bindex;
-+ break;
-+ }
-+ AuDebugOn(iinfo->ii_bstart > iinfo->ii_bend);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void au_icntnr_init_once(void *_c)
-+{
-+ struct au_icntnr *c = _c;
-+ struct au_iinfo *iinfo = &c->iinfo;
-+ static struct lock_class_key aufs_ii;
-+
-+ spin_lock_init(&iinfo->ii_genspin);
-+ au_rw_init(&iinfo->ii_rwsem);
-+ au_rw_class(&iinfo->ii_rwsem, &aufs_ii);
-+ inode_init_once(&c->vfs_inode);
-+}
-+
-+int au_iinfo_init(struct inode *inode)
-+{
-+ struct au_iinfo *iinfo;
-+ struct super_block *sb;
-+ int nbr, i;
-+
-+ sb = inode->i_sb;
-+ iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
-+ nbr = au_sbend(sb) + 1;
-+ if (unlikely(nbr <= 0))
-+ nbr = 1;
-+ iinfo->ii_hinode = kcalloc(nbr, sizeof(*iinfo->ii_hinode), GFP_NOFS);
-+ if (iinfo->ii_hinode) {
-+ au_ninodes_inc(sb);
-+ for (i = 0; i < nbr; i++)
-+ iinfo->ii_hinode[i].hi_id = -1;
-+
-+ iinfo->ii_generation.ig_generation = au_sigen(sb);
-+ iinfo->ii_bstart = -1;
-+ iinfo->ii_bend = -1;
-+ iinfo->ii_vdir = NULL;
-+ return 0;
-+ }
-+ return -ENOMEM;
-+}
-+
-+int au_ii_realloc(struct au_iinfo *iinfo, int nbr)
-+{
-+ int err, sz;
-+ struct au_hinode *hip;
-+
-+ AuRwMustWriteLock(&iinfo->ii_rwsem);
-+
-+ err = -ENOMEM;
-+ sz = sizeof(*hip) * (iinfo->ii_bend + 1);
-+ if (!sz)
-+ sz = sizeof(*hip);
-+ hip = au_kzrealloc(iinfo->ii_hinode, sz, sizeof(*hip) * nbr, GFP_NOFS);
-+ if (hip) {
-+ iinfo->ii_hinode = hip;
-+ err = 0;
-+ }
-+
-+ return err;
-+}
-+
-+void au_iinfo_fin(struct inode *inode)
-+{
-+ struct au_iinfo *iinfo;
-+ struct au_hinode *hi;
-+ struct super_block *sb;
-+ aufs_bindex_t bindex, bend;
-+ const unsigned char unlinked = !inode->i_nlink;
-+
-+ iinfo = au_ii(inode);
-+ /* bad_inode case */
-+ if (!iinfo)
-+ return;
-+
-+ sb = inode->i_sb;
-+ au_ninodes_dec(sb);
-+ if (si_pid_test(sb))
-+ au_xino_delete_inode(inode, unlinked);
-+ else {
-+ /*
-+ * it is safe to hide the dependency between sbinfo and
-+ * sb->s_umount.
-+ */
-+ lockdep_off();
-+ si_noflush_read_lock(sb);
-+ au_xino_delete_inode(inode, unlinked);
-+ si_read_unlock(sb);
-+ lockdep_on();
-+ }
-+
-+ if (iinfo->ii_vdir)
-+ au_vdir_free(iinfo->ii_vdir);
-+
-+ bindex = iinfo->ii_bstart;
-+ if (bindex >= 0) {
-+ hi = iinfo->ii_hinode + bindex;
-+ bend = iinfo->ii_bend;
-+ while (bindex++ <= bend) {
-+ if (hi->hi_inode)
-+ au_hiput(hi);
-+ hi++;
-+ }
-+ }
-+ kfree(iinfo->ii_hinode);
-+ iinfo->ii_hinode = NULL;
-+ AuRwDestroy(&iinfo->ii_rwsem);
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/inode.c linux-4.1.10/fs/aufs/inode.c
---- linux-4.1.10.orig/fs/aufs/inode.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/inode.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,500 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * inode functions
-+ */
-+
-+#include "aufs.h"
-+
-+struct inode *au_igrab(struct inode *inode)
-+{
-+ if (inode) {
-+ AuDebugOn(!atomic_read(&inode->i_count));
-+ ihold(inode);
-+ }
-+ return inode;
-+}
-+
-+static void au_refresh_hinode_attr(struct inode *inode, int do_version)
-+{
-+ au_cpup_attr_all(inode, /*force*/0);
-+ au_update_iigen(inode, /*half*/1);
-+ if (do_version)
-+ inode->i_version++;
-+}
-+
-+static int au_ii_refresh(struct inode *inode, int *update)
-+{
-+ int err, e;
-+ umode_t type;
-+ aufs_bindex_t bindex, new_bindex;
-+ struct super_block *sb;
-+ struct au_iinfo *iinfo;
-+ struct au_hinode *p, *q, tmp;
-+
-+ IiMustWriteLock(inode);
-+
-+ *update = 0;
-+ sb = inode->i_sb;
-+ type = inode->i_mode & S_IFMT;
-+ iinfo = au_ii(inode);
-+ err = au_ii_realloc(iinfo, au_sbend(sb) + 1);
-+ if (unlikely(err))
-+ goto out;
-+
-+ AuDebugOn(iinfo->ii_bstart < 0);
-+ p = iinfo->ii_hinode + iinfo->ii_bstart;
-+ for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend;
-+ bindex++, p++) {
-+ if (!p->hi_inode)
-+ continue;
-+
-+ AuDebugOn(type != (p->hi_inode->i_mode & S_IFMT));
-+ new_bindex = au_br_index(sb, p->hi_id);
-+ if (new_bindex == bindex)
-+ continue;
-+
-+ if (new_bindex < 0) {
-+ *update = 1;
-+ au_hiput(p);
-+ p->hi_inode = NULL;
-+ continue;
-+ }
-+
-+ if (new_bindex < iinfo->ii_bstart)
-+ iinfo->ii_bstart = new_bindex;
-+ if (iinfo->ii_bend < new_bindex)
-+ iinfo->ii_bend = new_bindex;
-+ /* swap two lower inode, and loop again */
-+ q = iinfo->ii_hinode + new_bindex;
-+ tmp = *q;
-+ *q = *p;
-+ *p = tmp;
-+ if (tmp.hi_inode) {
-+ bindex--;
-+ p--;
-+ }
-+ }
-+ au_update_ibrange(inode, /*do_put_zero*/0);
-+ e = au_dy_irefresh(inode);
-+ if (unlikely(e && !err))
-+ err = e;
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+int au_refresh_hinode_self(struct inode *inode)
-+{
-+ int err, update;
-+
-+ err = au_ii_refresh(inode, &update);
-+ if (!err)
-+ au_refresh_hinode_attr(inode, update && S_ISDIR(inode->i_mode));
-+
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+int au_refresh_hinode(struct inode *inode, struct dentry *dentry)
-+{
-+ int err, e, update;
-+ unsigned int flags;
-+ umode_t mode;
-+ aufs_bindex_t bindex, bend;
-+ unsigned char isdir;
-+ struct au_hinode *p;
-+ struct au_iinfo *iinfo;
-+
-+ err = au_ii_refresh(inode, &update);
-+ if (unlikely(err))
-+ goto out;
-+
-+ update = 0;
-+ iinfo = au_ii(inode);
-+ p = iinfo->ii_hinode + iinfo->ii_bstart;
-+ mode = (inode->i_mode & S_IFMT);
-+ isdir = S_ISDIR(mode);
-+ flags = au_hi_flags(inode, isdir);
-+ bend = au_dbend(dentry);
-+ for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) {
-+ struct inode *h_i, *h_inode;
-+ struct dentry *h_d;
-+
-+ h_d = au_h_dptr(dentry, bindex);
-+ if (!h_d || d_is_negative(h_d))
-+ continue;
-+
-+ h_inode = d_inode(h_d);
-+ AuDebugOn(mode != (h_inode->i_mode & S_IFMT));
-+ if (iinfo->ii_bstart <= bindex && bindex <= iinfo->ii_bend) {
-+ h_i = au_h_iptr(inode, bindex);
-+ if (h_i) {
-+ if (h_i == h_inode)
-+ continue;
-+ err = -EIO;
-+ break;
-+ }
-+ }
-+ if (bindex < iinfo->ii_bstart)
-+ iinfo->ii_bstart = bindex;
-+ if (iinfo->ii_bend < bindex)
-+ iinfo->ii_bend = bindex;
-+ au_set_h_iptr(inode, bindex, au_igrab(h_inode), flags);
-+ update = 1;
-+ }
-+ au_update_ibrange(inode, /*do_put_zero*/0);
-+ e = au_dy_irefresh(inode);
-+ if (unlikely(e && !err))
-+ err = e;
-+ if (!err)
-+ au_refresh_hinode_attr(inode, update && isdir);
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+static int set_inode(struct inode *inode, struct dentry *dentry)
-+{
-+ int err;
-+ unsigned int flags;
-+ umode_t mode;
-+ aufs_bindex_t bindex, bstart, btail;
-+ unsigned char isdir;
-+ struct dentry *h_dentry;
-+ struct inode *h_inode;
-+ struct au_iinfo *iinfo;
-+
-+ IiMustWriteLock(inode);
-+
-+ err = 0;
-+ isdir = 0;
-+ bstart = au_dbstart(dentry);
-+ h_dentry = au_h_dptr(dentry, bstart);
-+ h_inode = d_inode(h_dentry);
-+ mode = h_inode->i_mode;
-+ switch (mode & S_IFMT) {
-+ case S_IFREG:
-+ btail = au_dbtail(dentry);
-+ inode->i_op = &aufs_iop;
-+ inode->i_fop = &aufs_file_fop;
-+ err = au_dy_iaop(inode, bstart, h_inode);
-+ if (unlikely(err))
-+ goto out;
-+ break;
-+ case S_IFDIR:
-+ isdir = 1;
-+ btail = au_dbtaildir(dentry);
-+ inode->i_op = &aufs_dir_iop;
-+ inode->i_fop = &aufs_dir_fop;
-+ break;
-+ case S_IFLNK:
-+ btail = au_dbtail(dentry);
-+ inode->i_op = &aufs_symlink_iop;
-+ break;
-+ case S_IFBLK:
-+ case S_IFCHR:
-+ case S_IFIFO:
-+ case S_IFSOCK:
-+ btail = au_dbtail(dentry);
-+ inode->i_op = &aufs_iop;
-+ init_special_inode(inode, mode, h_inode->i_rdev);
-+ break;
-+ default:
-+ AuIOErr("Unknown file type 0%o\n", mode);
-+ err = -EIO;
-+ goto out;
-+ }
-+
-+ /* do not set hnotify for whiteouted dirs (SHWH mode) */
-+ flags = au_hi_flags(inode, isdir);
-+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH)
-+ && au_ftest_hi(flags, HNOTIFY)
-+ && dentry->d_name.len > AUFS_WH_PFX_LEN
-+ && !memcmp(dentry->d_name.name, AUFS_WH_PFX, AUFS_WH_PFX_LEN))
-+ au_fclr_hi(flags, HNOTIFY);
-+ iinfo = au_ii(inode);
-+ iinfo->ii_bstart = bstart;
-+ iinfo->ii_bend = btail;
-+ for (bindex = bstart; bindex <= btail; bindex++) {
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (h_dentry)
-+ au_set_h_iptr(inode, bindex,
-+ au_igrab(d_inode(h_dentry)), flags);
-+ }
-+ au_cpup_attr_all(inode, /*force*/1);
-+ /*
-+ * to force calling aufs_get_acl() every time,
-+ * do not call cache_no_acl() for aufs inode.
-+ */
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * successful returns with iinfo write_locked
-+ * minus: errno
-+ * zero: success, matched
-+ * plus: no error, but unmatched
-+ */
-+static int reval_inode(struct inode *inode, struct dentry *dentry)
-+{
-+ int err;
-+ unsigned int gen;
-+ struct au_iigen iigen;
-+ aufs_bindex_t bindex, bend;
-+ struct inode *h_inode, *h_dinode;
-+ struct dentry *h_dentry;
-+
-+ /*
-+ * before this function, if aufs got any iinfo lock, it must be only
-+ * one, the parent dir.
-+ * it can happen by UDBA and the obsoleted inode number.
-+ */
-+ err = -EIO;
-+ if (unlikely(inode->i_ino == parent_ino(dentry)))
-+ goto out;
-+
-+ err = 1;
-+ ii_write_lock_new_child(inode);
-+ h_dentry = au_h_dptr(dentry, au_dbstart(dentry));
-+ h_dinode = d_inode(h_dentry);
-+ bend = au_ibend(inode);
-+ for (bindex = au_ibstart(inode); bindex <= bend; bindex++) {
-+ h_inode = au_h_iptr(inode, bindex);
-+ if (!h_inode || h_inode != h_dinode)
-+ continue;
-+
-+ err = 0;
-+ gen = au_iigen(inode, &iigen);
-+ if (gen == au_digen(dentry)
-+ && !au_ig_ftest(iigen.ig_flags, HALF_REFRESHED))
-+ break;
-+
-+ /* fully refresh inode using dentry */
-+ err = au_refresh_hinode(inode, dentry);
-+ if (!err)
-+ au_update_iigen(inode, /*half*/0);
-+ break;
-+ }
-+
-+ if (unlikely(err))
-+ ii_write_unlock(inode);
-+out:
-+ return err;
-+}
-+
-+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
-+ unsigned int d_type, ino_t *ino)
-+{
-+ int err;
-+ struct mutex *mtx;
-+
-+ /* prevent hardlinked inode number from race condition */
-+ mtx = NULL;
-+ if (d_type != DT_DIR) {
-+ mtx = &au_sbr(sb, bindex)->br_xino.xi_nondir_mtx;
-+ mutex_lock(mtx);
-+ }
-+ err = au_xino_read(sb, bindex, h_ino, ino);
-+ if (unlikely(err))
-+ goto out;
-+
-+ if (!*ino) {
-+ err = -EIO;
-+ *ino = au_xino_new_ino(sb);
-+ if (unlikely(!*ino))
-+ goto out;
-+ err = au_xino_write(sb, bindex, h_ino, *ino);
-+ if (unlikely(err))
-+ goto out;
-+ }
-+
-+out:
-+ if (mtx)
-+ mutex_unlock(mtx);
-+ return err;
-+}
-+
-+/* successful returns with iinfo write_locked */
-+/* todo: return with unlocked? */
-+struct inode *au_new_inode(struct dentry *dentry, int must_new)
-+{
-+ struct inode *inode, *h_inode;
-+ struct dentry *h_dentry;
-+ struct super_block *sb;
-+ struct mutex *mtx;
-+ ino_t h_ino, ino;
-+ int err;
-+ aufs_bindex_t bstart;
-+
-+ sb = dentry->d_sb;
-+ bstart = au_dbstart(dentry);
-+ h_dentry = au_h_dptr(dentry, bstart);
-+ h_inode = d_inode(h_dentry);
-+ h_ino = h_inode->i_ino;
-+
-+ /*
-+ * stop 'race'-ing between hardlinks under different
-+ * parents.
-+ */
-+ mtx = NULL;
-+ if (!d_is_dir(h_dentry))
-+ mtx = &au_sbr(sb, bstart)->br_xino.xi_nondir_mtx;
-+
-+new_ino:
-+ if (mtx)
-+ mutex_lock(mtx);
-+ err = au_xino_read(sb, bstart, h_ino, &ino);
-+ inode = ERR_PTR(err);
-+ if (unlikely(err))
-+ goto out;
-+
-+ if (!ino) {
-+ ino = au_xino_new_ino(sb);
-+ if (unlikely(!ino)) {
-+ inode = ERR_PTR(-EIO);
-+ goto out;
-+ }
-+ }
-+
-+ AuDbg("i%lu\n", (unsigned long)ino);
-+ inode = au_iget_locked(sb, ino);
-+ err = PTR_ERR(inode);
-+ if (IS_ERR(inode))
-+ goto out;
-+
-+ AuDbg("%lx, new %d\n", inode->i_state, !!(inode->i_state & I_NEW));
-+ if (inode->i_state & I_NEW) {
-+ /* verbose coding for lock class name */
-+ if (unlikely(d_is_symlink(h_dentry)))
-+ au_rw_class(&au_ii(inode)->ii_rwsem,
-+ au_lc_key + AuLcSymlink_IIINFO);
-+ else if (unlikely(d_is_dir(h_dentry)))
-+ au_rw_class(&au_ii(inode)->ii_rwsem,
-+ au_lc_key + AuLcDir_IIINFO);
-+ else /* likely */
-+ au_rw_class(&au_ii(inode)->ii_rwsem,
-+ au_lc_key + AuLcNonDir_IIINFO);
-+
-+ ii_write_lock_new_child(inode);
-+ err = set_inode(inode, dentry);
-+ if (!err) {
-+ unlock_new_inode(inode);
-+ goto out; /* success */
-+ }
-+
-+ /*
-+ * iget_failed() calls iput(), but we need to call
-+ * ii_write_unlock() after iget_failed(). so dirty hack for
-+ * i_count.
-+ */
-+ atomic_inc(&inode->i_count);
-+ iget_failed(inode);
-+ ii_write_unlock(inode);
-+ au_xino_write(sb, bstart, h_ino, /*ino*/0);
-+ /* ignore this error */
-+ goto out_iput;
-+ } else if (!must_new && !IS_DEADDIR(inode) && inode->i_nlink) {
-+ /*
-+ * horrible race condition between lookup, readdir and copyup
-+ * (or something).
-+ */
-+ if (mtx)
-+ mutex_unlock(mtx);
-+ err = reval_inode(inode, dentry);
-+ if (unlikely(err < 0)) {
-+ mtx = NULL;
-+ goto out_iput;
-+ }
-+
-+ if (!err) {
-+ mtx = NULL;
-+ goto out; /* success */
-+ } else if (mtx)
-+ mutex_lock(mtx);
-+ }
-+
-+ if (unlikely(au_test_fs_unique_ino(h_inode)))
-+ AuWarn1("Warning: Un-notified UDBA or repeatedly renamed dir,"
-+ " b%d, %s, %pd, hi%lu, i%lu.\n",
-+ bstart, au_sbtype(h_dentry->d_sb), dentry,
-+ (unsigned long)h_ino, (unsigned long)ino);
-+ ino = 0;
-+ err = au_xino_write(sb, bstart, h_ino, /*ino*/0);
-+ if (!err) {
-+ iput(inode);
-+ if (mtx)
-+ mutex_unlock(mtx);
-+ goto new_ino;
-+ }
-+
-+out_iput:
-+ iput(inode);
-+ inode = ERR_PTR(err);
-+out:
-+ if (mtx)
-+ mutex_unlock(mtx);
-+ return inode;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
-+ struct inode *inode)
-+{
-+ int err;
-+ struct inode *hi;
-+
-+ err = au_br_rdonly(au_sbr(sb, bindex));
-+
-+ /* pseudo-link after flushed may happen out of bounds */
-+ if (!err
-+ && inode
-+ && au_ibstart(inode) <= bindex
-+ && bindex <= au_ibend(inode)) {
-+ /*
-+ * permission check is unnecessary since vfsub routine
-+ * will be called later
-+ */
-+ hi = au_h_iptr(inode, bindex);
-+ if (hi)
-+ err = IS_IMMUTABLE(hi) ? -EROFS : 0;
-+ }
-+
-+ return err;
-+}
-+
-+int au_test_h_perm(struct inode *h_inode, int mask)
-+{
-+ if (uid_eq(current_fsuid(), GLOBAL_ROOT_UID))
-+ return 0;
-+ return inode_permission(h_inode, mask);
-+}
-+
-+int au_test_h_perm_sio(struct inode *h_inode, int mask)
-+{
-+ if (au_test_nfs(h_inode->i_sb)
-+ && (mask & MAY_WRITE)
-+ && S_ISDIR(h_inode->i_mode))
-+ mask |= MAY_READ; /* force permission check */
-+ return au_test_h_perm(h_inode, mask);
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/inode.h linux-4.1.10/fs/aufs/inode.h
---- linux-4.1.10.orig/fs/aufs/inode.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/inode.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,673 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * inode operations
-+ */
-+
-+#ifndef __AUFS_INODE_H__
-+#define __AUFS_INODE_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/fsnotify.h>
-+#include "rwsem.h"
-+
-+struct vfsmount;
-+
-+struct au_hnotify {
-+#ifdef CONFIG_AUFS_HNOTIFY
-+#ifdef CONFIG_AUFS_HFSNOTIFY
-+ /* never use fsnotify_add_vfsmount_mark() */
-+ struct fsnotify_mark hn_mark;
-+#endif
-+ struct inode *hn_aufs_inode; /* no get/put */
-+#endif
-+} ____cacheline_aligned_in_smp;
-+
-+struct au_hinode {
-+ struct inode *hi_inode;
-+ aufs_bindex_t hi_id;
-+#ifdef CONFIG_AUFS_HNOTIFY
-+ struct au_hnotify *hi_notify;
-+#endif
-+
-+ /* reference to the copied-up whiteout with get/put */
-+ struct dentry *hi_whdentry;
-+};
-+
-+/* ig_flags */
-+#define AuIG_HALF_REFRESHED 1
-+#define au_ig_ftest(flags, name) ((flags) & AuIG_##name)
-+#define au_ig_fset(flags, name) \
-+ do { (flags) |= AuIG_##name; } while (0)
-+#define au_ig_fclr(flags, name) \
-+ do { (flags) &= ~AuIG_##name; } while (0)
-+
-+struct au_iigen {
-+ __u32 ig_generation, ig_flags;
-+};
-+
-+struct au_vdir;
-+struct au_iinfo {
-+ spinlock_t ii_genspin;
-+ struct au_iigen ii_generation;
-+ struct super_block *ii_hsb1; /* no get/put */
-+
-+ struct au_rwsem ii_rwsem;
-+ aufs_bindex_t ii_bstart, ii_bend;
-+ __u32 ii_higen;
-+ struct au_hinode *ii_hinode;
-+ struct au_vdir *ii_vdir;
-+};
-+
-+struct au_icntnr {
-+ struct au_iinfo iinfo;
-+ struct inode vfs_inode;
-+} ____cacheline_aligned_in_smp;
-+
-+/* au_pin flags */
-+#define AuPin_DI_LOCKED 1
-+#define AuPin_MNT_WRITE (1 << 1)
-+#define au_ftest_pin(flags, name) ((flags) & AuPin_##name)
-+#define au_fset_pin(flags, name) \
-+ do { (flags) |= AuPin_##name; } while (0)
-+#define au_fclr_pin(flags, name) \
-+ do { (flags) &= ~AuPin_##name; } while (0)
-+
-+struct au_pin {
-+ /* input */
-+ struct dentry *dentry;
-+ unsigned int udba;
-+ unsigned char lsc_di, lsc_hi, flags;
-+ aufs_bindex_t bindex;
-+
-+ /* output */
-+ struct dentry *parent;
-+ struct au_hinode *hdir;
-+ struct vfsmount *h_mnt;
-+
-+ /* temporary unlock/relock for copyup */
-+ struct dentry *h_dentry, *h_parent;
-+ struct au_branch *br;
-+ struct task_struct *task;
-+};
-+
-+void au_pin_hdir_unlock(struct au_pin *p);
-+int au_pin_hdir_lock(struct au_pin *p);
-+int au_pin_hdir_relock(struct au_pin *p);
-+void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task);
-+void au_pin_hdir_acquire_nest(struct au_pin *p);
-+void au_pin_hdir_release(struct au_pin *p);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline struct au_iinfo *au_ii(struct inode *inode)
-+{
-+ struct au_iinfo *iinfo;
-+
-+ iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
-+ if (iinfo->ii_hinode)
-+ return iinfo;
-+ return NULL; /* debugging bad_inode case */
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* inode.c */
-+struct inode *au_igrab(struct inode *inode);
-+int au_refresh_hinode_self(struct inode *inode);
-+int au_refresh_hinode(struct inode *inode, struct dentry *dentry);
-+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
-+ unsigned int d_type, ino_t *ino);
-+struct inode *au_new_inode(struct dentry *dentry, int must_new);
-+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
-+ struct inode *inode);
-+int au_test_h_perm(struct inode *h_inode, int mask);
-+int au_test_h_perm_sio(struct inode *h_inode, int mask);
-+
-+static inline int au_wh_ino(struct super_block *sb, aufs_bindex_t bindex,
-+ ino_t h_ino, unsigned int d_type, ino_t *ino)
-+{
-+#ifdef CONFIG_AUFS_SHWH
-+ return au_ino(sb, bindex, h_ino, d_type, ino);
-+#else
-+ return 0;
-+#endif
-+}
-+
-+/* i_op.c */
-+extern struct inode_operations aufs_iop, aufs_symlink_iop, aufs_dir_iop;
-+
-+/* au_wr_dir flags */
-+#define AuWrDir_ADD_ENTRY 1
-+#define AuWrDir_ISDIR (1 << 1)
-+#define AuWrDir_TMPFILE (1 << 2)
-+#define au_ftest_wrdir(flags, name) ((flags) & AuWrDir_##name)
-+#define au_fset_wrdir(flags, name) \
-+ do { (flags) |= AuWrDir_##name; } while (0)
-+#define au_fclr_wrdir(flags, name) \
-+ do { (flags) &= ~AuWrDir_##name; } while (0)
-+
-+struct au_wr_dir_args {
-+ aufs_bindex_t force_btgt;
-+ unsigned char flags;
-+};
-+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
-+ struct au_wr_dir_args *args);
-+
-+struct dentry *au_pinned_h_parent(struct au_pin *pin);
-+void au_pin_init(struct au_pin *pin, struct dentry *dentry,
-+ aufs_bindex_t bindex, int lsc_di, int lsc_hi,
-+ unsigned int udba, unsigned char flags);
-+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
-+ unsigned int udba, unsigned char flags) __must_check;
-+int au_do_pin(struct au_pin *pin) __must_check;
-+void au_unpin(struct au_pin *pin);
-+int au_reval_for_attr(struct dentry *dentry, unsigned int sigen);
-+
-+#define AuIcpup_DID_CPUP 1
-+#define au_ftest_icpup(flags, name) ((flags) & AuIcpup_##name)
-+#define au_fset_icpup(flags, name) \
-+ do { (flags) |= AuIcpup_##name; } while (0)
-+#define au_fclr_icpup(flags, name) \
-+ do { (flags) &= ~AuIcpup_##name; } while (0)
-+
-+struct au_icpup_args {
-+ unsigned char flags;
-+ unsigned char pin_flags;
-+ aufs_bindex_t btgt;
-+ unsigned int udba;
-+ struct au_pin pin;
-+ struct path h_path;
-+ struct inode *h_inode;
-+};
-+
-+int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia,
-+ struct au_icpup_args *a);
-+
-+int au_h_path_getattr(struct dentry *dentry, int force, struct path *h_path);
-+
-+/* i_op_add.c */
-+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
-+ struct dentry *h_parent, int isdir);
-+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
-+ dev_t dev);
-+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname);
-+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
-+ bool want_excl);
-+struct vfsub_aopen_args;
-+int au_aopen_or_create(struct inode *dir, struct dentry *dentry,
-+ struct vfsub_aopen_args *args);
-+int aufs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode);
-+int aufs_link(struct dentry *src_dentry, struct inode *dir,
-+ struct dentry *dentry);
-+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
-+
-+/* i_op_del.c */
-+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup);
-+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
-+ struct dentry *h_parent, int isdir);
-+int aufs_unlink(struct inode *dir, struct dentry *dentry);
-+int aufs_rmdir(struct inode *dir, struct dentry *dentry);
-+
-+/* i_op_ren.c */
-+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt);
-+int aufs_rename(struct inode *src_dir, struct dentry *src_dentry,
-+ struct inode *dir, struct dentry *dentry);
-+
-+/* iinfo.c */
-+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex);
-+void au_hiput(struct au_hinode *hinode);
-+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
-+ struct dentry *h_wh);
-+unsigned int au_hi_flags(struct inode *inode, int isdir);
-+
-+/* hinode flags */
-+#define AuHi_XINO 1
-+#define AuHi_HNOTIFY (1 << 1)
-+#define au_ftest_hi(flags, name) ((flags) & AuHi_##name)
-+#define au_fset_hi(flags, name) \
-+ do { (flags) |= AuHi_##name; } while (0)
-+#define au_fclr_hi(flags, name) \
-+ do { (flags) &= ~AuHi_##name; } while (0)
-+
-+#ifndef CONFIG_AUFS_HNOTIFY
-+#undef AuHi_HNOTIFY
-+#define AuHi_HNOTIFY 0
-+#endif
-+
-+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
-+ struct inode *h_inode, unsigned int flags);
-+
-+void au_update_iigen(struct inode *inode, int half);
-+void au_update_ibrange(struct inode *inode, int do_put_zero);
-+
-+void au_icntnr_init_once(void *_c);
-+int au_iinfo_init(struct inode *inode);
-+void au_iinfo_fin(struct inode *inode);
-+int au_ii_realloc(struct au_iinfo *iinfo, int nbr);
-+
-+#ifdef CONFIG_PROC_FS
-+/* plink.c */
-+int au_plink_maint(struct super_block *sb, int flags);
-+struct au_sbinfo;
-+void au_plink_maint_leave(struct au_sbinfo *sbinfo);
-+int au_plink_maint_enter(struct super_block *sb);
-+#ifdef CONFIG_AUFS_DEBUG
-+void au_plink_list(struct super_block *sb);
-+#else
-+AuStubVoid(au_plink_list, struct super_block *sb)
-+#endif
-+int au_plink_test(struct inode *inode);
-+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex);
-+void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
-+ struct dentry *h_dentry);
-+void au_plink_put(struct super_block *sb, int verbose);
-+void au_plink_clean(struct super_block *sb, int verbose);
-+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id);
-+#else
-+AuStubInt0(au_plink_maint, struct super_block *sb, int flags);
-+AuStubVoid(au_plink_maint_leave, struct au_sbinfo *sbinfo);
-+AuStubInt0(au_plink_maint_enter, struct super_block *sb);
-+AuStubVoid(au_plink_list, struct super_block *sb);
-+AuStubInt0(au_plink_test, struct inode *inode);
-+AuStub(struct dentry *, au_plink_lkup, return NULL,
-+ struct inode *inode, aufs_bindex_t bindex);
-+AuStubVoid(au_plink_append, struct inode *inode, aufs_bindex_t bindex,
-+ struct dentry *h_dentry);
-+AuStubVoid(au_plink_put, struct super_block *sb, int verbose);
-+AuStubVoid(au_plink_clean, struct super_block *sb, int verbose);
-+AuStubVoid(au_plink_half_refresh, struct super_block *sb, aufs_bindex_t br_id);
-+#endif /* CONFIG_PROC_FS */
-+
-+#ifdef CONFIG_AUFS_XATTR
-+/* xattr.c */
-+int au_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, int ignore_flags,
-+ unsigned int verbose);
-+ssize_t aufs_listxattr(struct dentry *dentry, char *list, size_t size);
-+ssize_t aufs_getxattr(struct dentry *dentry, const char *name, void *value,
-+ size_t size);
-+int aufs_setxattr(struct dentry *dentry, const char *name, const void *value,
-+ size_t size, int flags);
-+int aufs_removexattr(struct dentry *dentry, const char *name);
-+
-+/* void au_xattr_init(struct super_block *sb); */
-+#else
-+AuStubInt0(au_cpup_xattr, struct dentry *h_dst, struct dentry *h_src,
-+ int ignore_flags, unsigned int verbose);
-+/* AuStubVoid(au_xattr_init, struct super_block *sb); */
-+#endif
-+
-+#ifdef CONFIG_FS_POSIX_ACL
-+struct posix_acl *aufs_get_acl(struct inode *inode, int type);
-+int aufs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
-+#endif
-+
-+#if IS_ENABLED(CONFIG_AUFS_XATTR) || IS_ENABLED(CONFIG_FS_POSIX_ACL)
-+enum {
-+ AU_XATTR_SET,
-+ AU_XATTR_REMOVE,
-+ AU_ACL_SET
-+};
-+
-+struct au_srxattr {
-+ int type;
-+ union {
-+ struct {
-+ const char *name;
-+ const void *value;
-+ size_t size;
-+ int flags;
-+ } set;
-+ struct {
-+ const char *name;
-+ } remove;
-+ struct {
-+ struct posix_acl *acl;
-+ int type;
-+ } acl_set;
-+ } u;
-+};
-+ssize_t au_srxattr(struct dentry *dentry, struct au_srxattr *arg);
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* lock subclass for iinfo */
-+enum {
-+ AuLsc_II_CHILD, /* child first */
-+ AuLsc_II_CHILD2, /* rename(2), link(2), and cpup at hnotify */
-+ AuLsc_II_CHILD3, /* copyup dirs */
-+ AuLsc_II_PARENT, /* see AuLsc_I_PARENT in vfsub.h */
-+ AuLsc_II_PARENT2,
-+ AuLsc_II_PARENT3, /* copyup dirs */
-+ AuLsc_II_NEW_CHILD
-+};
-+
-+/*
-+ * ii_read_lock_child, ii_write_lock_child,
-+ * ii_read_lock_child2, ii_write_lock_child2,
-+ * ii_read_lock_child3, ii_write_lock_child3,
-+ * ii_read_lock_parent, ii_write_lock_parent,
-+ * ii_read_lock_parent2, ii_write_lock_parent2,
-+ * ii_read_lock_parent3, ii_write_lock_parent3,
-+ * ii_read_lock_new_child, ii_write_lock_new_child,
-+ */
-+#define AuReadLockFunc(name, lsc) \
-+static inline void ii_read_lock_##name(struct inode *i) \
-+{ \
-+ au_rw_read_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
-+}
-+
-+#define AuWriteLockFunc(name, lsc) \
-+static inline void ii_write_lock_##name(struct inode *i) \
-+{ \
-+ au_rw_write_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
-+}
-+
-+#define AuRWLockFuncs(name, lsc) \
-+ AuReadLockFunc(name, lsc) \
-+ AuWriteLockFunc(name, lsc)
-+
-+AuRWLockFuncs(child, CHILD);
-+AuRWLockFuncs(child2, CHILD2);
-+AuRWLockFuncs(child3, CHILD3);
-+AuRWLockFuncs(parent, PARENT);
-+AuRWLockFuncs(parent2, PARENT2);
-+AuRWLockFuncs(parent3, PARENT3);
-+AuRWLockFuncs(new_child, NEW_CHILD);
-+
-+#undef AuReadLockFunc
-+#undef AuWriteLockFunc
-+#undef AuRWLockFuncs
-+
-+/*
-+ * ii_read_unlock, ii_write_unlock, ii_downgrade_lock
-+ */
-+AuSimpleUnlockRwsemFuncs(ii, struct inode *i, &au_ii(i)->ii_rwsem);
-+
-+#define IiMustNoWaiters(i) AuRwMustNoWaiters(&au_ii(i)->ii_rwsem)
-+#define IiMustAnyLock(i) AuRwMustAnyLock(&au_ii(i)->ii_rwsem)
-+#define IiMustWriteLock(i) AuRwMustWriteLock(&au_ii(i)->ii_rwsem)
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline void au_icntnr_init(struct au_icntnr *c)
-+{
-+#ifdef CONFIG_AUFS_DEBUG
-+ c->vfs_inode.i_mode = 0;
-+#endif
-+}
-+
-+static inline unsigned int au_iigen(struct inode *inode, struct au_iigen *iigen)
-+{
-+ unsigned int gen;
-+ struct au_iinfo *iinfo;
-+
-+ iinfo = au_ii(inode);
-+ spin_lock(&iinfo->ii_genspin);
-+ if (iigen)
-+ *iigen = iinfo->ii_generation;
-+ gen = iinfo->ii_generation.ig_generation;
-+ spin_unlock(&iinfo->ii_genspin);
-+
-+ return gen;
-+}
-+
-+/* tiny test for inode number */
-+/* tmpfs generation is too rough */
-+static inline int au_test_higen(struct inode *inode, struct inode *h_inode)
-+{
-+ struct au_iinfo *iinfo;
-+
-+ iinfo = au_ii(inode);
-+ AuRwMustAnyLock(&iinfo->ii_rwsem);
-+ return !(iinfo->ii_hsb1 == h_inode->i_sb
-+ && iinfo->ii_higen == h_inode->i_generation);
-+}
-+
-+static inline void au_iigen_dec(struct inode *inode)
-+{
-+ struct au_iinfo *iinfo;
-+
-+ iinfo = au_ii(inode);
-+ spin_lock(&iinfo->ii_genspin);
-+ iinfo->ii_generation.ig_generation--;
-+ spin_unlock(&iinfo->ii_genspin);
-+}
-+
-+static inline int au_iigen_test(struct inode *inode, unsigned int sigen)
-+{
-+ int err;
-+
-+ err = 0;
-+ if (unlikely(inode && au_iigen(inode, NULL) != sigen))
-+ err = -EIO;
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline aufs_bindex_t au_ii_br_id(struct inode *inode,
-+ aufs_bindex_t bindex)
-+{
-+ IiMustAnyLock(inode);
-+ return au_ii(inode)->ii_hinode[0 + bindex].hi_id;
-+}
-+
-+static inline aufs_bindex_t au_ibstart(struct inode *inode)
-+{
-+ IiMustAnyLock(inode);
-+ return au_ii(inode)->ii_bstart;
-+}
-+
-+static inline aufs_bindex_t au_ibend(struct inode *inode)
-+{
-+ IiMustAnyLock(inode);
-+ return au_ii(inode)->ii_bend;
-+}
-+
-+static inline struct au_vdir *au_ivdir(struct inode *inode)
-+{
-+ IiMustAnyLock(inode);
-+ return au_ii(inode)->ii_vdir;
-+}
-+
-+static inline struct dentry *au_hi_wh(struct inode *inode, aufs_bindex_t bindex)
-+{
-+ IiMustAnyLock(inode);
-+ return au_ii(inode)->ii_hinode[0 + bindex].hi_whdentry;
-+}
-+
-+static inline void au_set_ibstart(struct inode *inode, aufs_bindex_t bindex)
-+{
-+ IiMustWriteLock(inode);
-+ au_ii(inode)->ii_bstart = bindex;
-+}
-+
-+static inline void au_set_ibend(struct inode *inode, aufs_bindex_t bindex)
-+{
-+ IiMustWriteLock(inode);
-+ au_ii(inode)->ii_bend = bindex;
-+}
-+
-+static inline void au_set_ivdir(struct inode *inode, struct au_vdir *vdir)
-+{
-+ IiMustWriteLock(inode);
-+ au_ii(inode)->ii_vdir = vdir;
-+}
-+
-+static inline struct au_hinode *au_hi(struct inode *inode, aufs_bindex_t bindex)
-+{
-+ IiMustAnyLock(inode);
-+ return au_ii(inode)->ii_hinode + bindex;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline struct dentry *au_pinned_parent(struct au_pin *pin)
-+{
-+ if (pin)
-+ return pin->parent;
-+ return NULL;
-+}
-+
-+static inline struct inode *au_pinned_h_dir(struct au_pin *pin)
-+{
-+ if (pin && pin->hdir)
-+ return pin->hdir->hi_inode;
-+ return NULL;
-+}
-+
-+static inline struct au_hinode *au_pinned_hdir(struct au_pin *pin)
-+{
-+ if (pin)
-+ return pin->hdir;
-+ return NULL;
-+}
-+
-+static inline void au_pin_set_dentry(struct au_pin *pin, struct dentry *dentry)
-+{
-+ if (pin)
-+ pin->dentry = dentry;
-+}
-+
-+static inline void au_pin_set_parent_lflag(struct au_pin *pin,
-+ unsigned char lflag)
-+{
-+ if (pin) {
-+ if (lflag)
-+ au_fset_pin(pin->flags, DI_LOCKED);
-+ else
-+ au_fclr_pin(pin->flags, DI_LOCKED);
-+ }
-+}
-+
-+#if 0 /* reserved */
-+static inline void au_pin_set_parent(struct au_pin *pin, struct dentry *parent)
-+{
-+ if (pin) {
-+ dput(pin->parent);
-+ pin->parent = dget(parent);
-+ }
-+}
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct au_branch;
-+#ifdef CONFIG_AUFS_HNOTIFY
-+struct au_hnotify_op {
-+ void (*ctl)(struct au_hinode *hinode, int do_set);
-+ int (*alloc)(struct au_hinode *hinode);
-+
-+ /*
-+ * if it returns true, the the caller should free hinode->hi_notify,
-+ * otherwise ->free() frees it.
-+ */
-+ int (*free)(struct au_hinode *hinode,
-+ struct au_hnotify *hn) __must_check;
-+
-+ void (*fin)(void);
-+ int (*init)(void);
-+
-+ int (*reset_br)(unsigned int udba, struct au_branch *br, int perm);
-+ void (*fin_br)(struct au_branch *br);
-+ int (*init_br)(struct au_branch *br, int perm);
-+};
-+
-+/* hnotify.c */
-+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode);
-+void au_hn_free(struct au_hinode *hinode);
-+void au_hn_ctl(struct au_hinode *hinode, int do_set);
-+void au_hn_reset(struct inode *inode, unsigned int flags);
-+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask,
-+ struct qstr *h_child_qstr, struct inode *h_child_inode);
-+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm);
-+int au_hnotify_init_br(struct au_branch *br, int perm);
-+void au_hnotify_fin_br(struct au_branch *br);
-+int __init au_hnotify_init(void);
-+void au_hnotify_fin(void);
-+
-+/* hfsnotify.c */
-+extern const struct au_hnotify_op au_hnotify_op;
-+
-+static inline
-+void au_hn_init(struct au_hinode *hinode)
-+{
-+ hinode->hi_notify = NULL;
-+}
-+
-+static inline struct au_hnotify *au_hn(struct au_hinode *hinode)
-+{
-+ return hinode->hi_notify;
-+}
-+
-+#else
-+AuStub(int, au_hn_alloc, return -EOPNOTSUPP,
-+ struct au_hinode *hinode __maybe_unused,
-+ struct inode *inode __maybe_unused)
-+AuStub(struct au_hnotify *, au_hn, return NULL, struct au_hinode *hinode)
-+AuStubVoid(au_hn_free, struct au_hinode *hinode __maybe_unused)
-+AuStubVoid(au_hn_ctl, struct au_hinode *hinode __maybe_unused,
-+ int do_set __maybe_unused)
-+AuStubVoid(au_hn_reset, struct inode *inode __maybe_unused,
-+ unsigned int flags __maybe_unused)
-+AuStubInt0(au_hnotify_reset_br, unsigned int udba __maybe_unused,
-+ struct au_branch *br __maybe_unused,
-+ int perm __maybe_unused)
-+AuStubInt0(au_hnotify_init_br, struct au_branch *br __maybe_unused,
-+ int perm __maybe_unused)
-+AuStubVoid(au_hnotify_fin_br, struct au_branch *br __maybe_unused)
-+AuStubInt0(__init au_hnotify_init, void)
-+AuStubVoid(au_hnotify_fin, void)
-+AuStubVoid(au_hn_init, struct au_hinode *hinode __maybe_unused)
-+#endif /* CONFIG_AUFS_HNOTIFY */
-+
-+static inline void au_hn_suspend(struct au_hinode *hdir)
-+{
-+ au_hn_ctl(hdir, /*do_set*/0);
-+}
-+
-+static inline void au_hn_resume(struct au_hinode *hdir)
-+{
-+ au_hn_ctl(hdir, /*do_set*/1);
-+}
-+
-+static inline void au_hn_imtx_lock(struct au_hinode *hdir)
-+{
-+ mutex_lock(&hdir->hi_inode->i_mutex);
-+ au_hn_suspend(hdir);
-+}
-+
-+static inline void au_hn_imtx_lock_nested(struct au_hinode *hdir,
-+ unsigned int sc __maybe_unused)
-+{
-+ mutex_lock_nested(&hdir->hi_inode->i_mutex, sc);
-+ au_hn_suspend(hdir);
-+}
-+
-+static inline void au_hn_imtx_unlock(struct au_hinode *hdir)
-+{
-+ au_hn_resume(hdir);
-+ mutex_unlock(&hdir->hi_inode->i_mutex);
-+}
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_INODE_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/ioctl.c linux-4.1.10/fs/aufs/ioctl.c
---- linux-4.1.10.orig/fs/aufs/ioctl.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/ioctl.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,219 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * ioctl
-+ * plink-management and readdir in userspace.
-+ * assist the pathconf(3) wrapper library.
-+ * move-down
-+ * File-based Hierarchical Storage Management.
-+ */
-+
-+#include <linux/compat.h>
-+#include <linux/file.h>
-+#include "aufs.h"
-+
-+static int au_wbr_fd(struct path *path, struct aufs_wbr_fd __user *arg)
-+{
-+ int err, fd;
-+ aufs_bindex_t wbi, bindex, bend;
-+ struct file *h_file;
-+ struct super_block *sb;
-+ struct dentry *root;
-+ struct au_branch *br;
-+ struct aufs_wbr_fd wbrfd = {
-+ .oflags = au_dir_roflags,
-+ .brid = -1
-+ };
-+ const int valid = O_RDONLY | O_NONBLOCK | O_LARGEFILE | O_DIRECTORY
-+ | O_NOATIME | O_CLOEXEC;
-+
-+ AuDebugOn(wbrfd.oflags & ~valid);
-+
-+ if (arg) {
-+ err = copy_from_user(&wbrfd, arg, sizeof(wbrfd));
-+ if (unlikely(err)) {
-+ err = -EFAULT;
-+ goto out;
-+ }
-+
-+ err = -EINVAL;
-+ AuDbg("wbrfd{0%o, %d}\n", wbrfd.oflags, wbrfd.brid);
-+ wbrfd.oflags |= au_dir_roflags;
-+ AuDbg("0%o\n", wbrfd.oflags);
-+ if (unlikely(wbrfd.oflags & ~valid))
-+ goto out;
-+ }
-+
-+ fd = get_unused_fd_flags(0);
-+ err = fd;
-+ if (unlikely(fd < 0))
-+ goto out;
-+
-+ h_file = ERR_PTR(-EINVAL);
-+ wbi = 0;
-+ br = NULL;
-+ sb = path->dentry->d_sb;
-+ root = sb->s_root;
-+ aufs_read_lock(root, AuLock_IR);
-+ bend = au_sbend(sb);
-+ if (wbrfd.brid >= 0) {
-+ wbi = au_br_index(sb, wbrfd.brid);
-+ if (unlikely(wbi < 0 || wbi > bend))
-+ goto out_unlock;
-+ }
-+
-+ h_file = ERR_PTR(-ENOENT);
-+ br = au_sbr(sb, wbi);
-+ if (!au_br_writable(br->br_perm)) {
-+ if (arg)
-+ goto out_unlock;
-+
-+ bindex = wbi + 1;
-+ wbi = -1;
-+ for (; bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ if (au_br_writable(br->br_perm)) {
-+ wbi = bindex;
-+ br = au_sbr(sb, wbi);
-+ break;
-+ }
-+ }
-+ }
-+ AuDbg("wbi %d\n", wbi);
-+ if (wbi >= 0)
-+ h_file = au_h_open(root, wbi, wbrfd.oflags, NULL,
-+ /*force_wr*/0);
-+
-+out_unlock:
-+ aufs_read_unlock(root, AuLock_IR);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out_fd;
-+
-+ atomic_dec(&br->br_count); /* cf. au_h_open() */
-+ fd_install(fd, h_file);
-+ err = fd;
-+ goto out; /* success */
-+
-+out_fd:
-+ put_unused_fd(fd);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ long err;
-+ struct dentry *dentry;
-+
-+ switch (cmd) {
-+ case AUFS_CTL_RDU:
-+ case AUFS_CTL_RDU_INO:
-+ err = au_rdu_ioctl(file, cmd, arg);
-+ break;
-+
-+ case AUFS_CTL_WBR_FD:
-+ err = au_wbr_fd(&file->f_path, (void __user *)arg);
-+ break;
-+
-+ case AUFS_CTL_IBUSY:
-+ err = au_ibusy_ioctl(file, arg);
-+ break;
-+
-+ case AUFS_CTL_BRINFO:
-+ err = au_brinfo_ioctl(file, arg);
-+ break;
-+
-+ case AUFS_CTL_FHSM_FD:
-+ dentry = file->f_path.dentry;
-+ if (IS_ROOT(dentry))
-+ err = au_fhsm_fd(dentry->d_sb, arg);
-+ else
-+ err = -ENOTTY;
-+ break;
-+
-+ default:
-+ /* do not call the lower */
-+ AuDbg("0x%x\n", cmd);
-+ err = -ENOTTY;
-+ }
-+
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ long err;
-+
-+ switch (cmd) {
-+ case AUFS_CTL_MVDOWN:
-+ err = au_mvdown(file->f_path.dentry, (void __user *)arg);
-+ break;
-+
-+ case AUFS_CTL_WBR_FD:
-+ err = au_wbr_fd(&file->f_path, (void __user *)arg);
-+ break;
-+
-+ default:
-+ /* do not call the lower */
-+ AuDbg("0x%x\n", cmd);
-+ err = -ENOTTY;
-+ }
-+
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+#ifdef CONFIG_COMPAT
-+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ long err;
-+
-+ switch (cmd) {
-+ case AUFS_CTL_RDU:
-+ case AUFS_CTL_RDU_INO:
-+ err = au_rdu_compat_ioctl(file, cmd, arg);
-+ break;
-+
-+ case AUFS_CTL_IBUSY:
-+ err = au_ibusy_compat_ioctl(file, arg);
-+ break;
-+
-+ case AUFS_CTL_BRINFO:
-+ err = au_brinfo_compat_ioctl(file, arg);
-+ break;
-+
-+ default:
-+ err = aufs_ioctl_dir(file, cmd, arg);
-+ }
-+
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ return aufs_ioctl_nondir(file, cmd, (unsigned long)compat_ptr(arg));
-+}
-+#endif
-diff -Nur linux-4.1.10.orig/fs/aufs/i_op_add.c linux-4.1.10/fs/aufs/i_op_add.c
---- linux-4.1.10.orig/fs/aufs/i_op_add.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/i_op_add.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,932 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * inode operations (add entry)
-+ */
-+
-+#include "aufs.h"
-+
-+/*
-+ * final procedure of adding a new entry, except link(2).
-+ * remove whiteout, instantiate, copyup the parent dir's times and size
-+ * and update version.
-+ * if it failed, re-create the removed whiteout.
-+ */
-+static int epilog(struct inode *dir, aufs_bindex_t bindex,
-+ struct dentry *wh_dentry, struct dentry *dentry)
-+{
-+ int err, rerr;
-+ aufs_bindex_t bwh;
-+ struct path h_path;
-+ struct super_block *sb;
-+ struct inode *inode, *h_dir;
-+ struct dentry *wh;
-+
-+ bwh = -1;
-+ sb = dir->i_sb;
-+ if (wh_dentry) {
-+ h_dir = d_inode(wh_dentry->d_parent); /* dir inode is locked */
-+ IMustLock(h_dir);
-+ AuDebugOn(au_h_iptr(dir, bindex) != h_dir);
-+ bwh = au_dbwh(dentry);
-+ h_path.dentry = wh_dentry;
-+ h_path.mnt = au_sbr_mnt(sb, bindex);
-+ err = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path,
-+ dentry);
-+ if (unlikely(err))
-+ goto out;
-+ }
-+
-+ inode = au_new_inode(dentry, /*must_new*/1);
-+ if (!IS_ERR(inode)) {
-+ d_instantiate(dentry, inode);
-+ dir = d_inode(dentry->d_parent); /* dir inode is locked */
-+ IMustLock(dir);
-+ au_dir_ts(dir, bindex);
-+ dir->i_version++;
-+ au_fhsm_wrote(sb, bindex, /*force*/0);
-+ return 0; /* success */
-+ }
-+
-+ err = PTR_ERR(inode);
-+ if (!wh_dentry)
-+ goto out;
-+
-+ /* revert */
-+ /* dir inode is locked */
-+ wh = au_wh_create(dentry, bwh, wh_dentry->d_parent);
-+ rerr = PTR_ERR(wh);
-+ if (IS_ERR(wh)) {
-+ AuIOErr("%pd reverting whiteout failed(%d, %d)\n",
-+ dentry, err, rerr);
-+ err = -EIO;
-+ } else
-+ dput(wh);
-+
-+out:
-+ return err;
-+}
-+
-+static int au_d_may_add(struct dentry *dentry)
-+{
-+ int err;
-+
-+ err = 0;
-+ if (unlikely(d_unhashed(dentry)))
-+ err = -ENOENT;
-+ if (unlikely(d_really_is_positive(dentry)))
-+ err = -EEXIST;
-+ return err;
-+}
-+
-+/*
-+ * simple tests for the adding inode operations.
-+ * following the checks in vfs, plus the parent-child relationship.
-+ */
-+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
-+ struct dentry *h_parent, int isdir)
-+{
-+ int err;
-+ umode_t h_mode;
-+ struct dentry *h_dentry;
-+ struct inode *h_inode;
-+
-+ err = -ENAMETOOLONG;
-+ if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
-+ goto out;
-+
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (d_really_is_negative(dentry)) {
-+ err = -EEXIST;
-+ if (unlikely(d_is_positive(h_dentry)))
-+ goto out;
-+ } else {
-+ /* rename(2) case */
-+ err = -EIO;
-+ if (unlikely(d_is_negative(h_dentry)))
-+ goto out;
-+ h_inode = d_inode(h_dentry);
-+ if (unlikely(!h_inode->i_nlink))
-+ goto out;
-+
-+ h_mode = h_inode->i_mode;
-+ if (!isdir) {
-+ err = -EISDIR;
-+ if (unlikely(S_ISDIR(h_mode)))
-+ goto out;
-+ } else if (unlikely(!S_ISDIR(h_mode))) {
-+ err = -ENOTDIR;
-+ goto out;
-+ }
-+ }
-+
-+ err = 0;
-+ /* expected parent dir is locked */
-+ if (unlikely(h_parent != h_dentry->d_parent))
-+ err = -EIO;
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/*
-+ * initial procedure of adding a new entry.
-+ * prepare writable branch and the parent dir, lock it,
-+ * and lookup whiteout for the new entry.
-+ */
-+static struct dentry*
-+lock_hdir_lkup_wh(struct dentry *dentry, struct au_dtime *dt,
-+ struct dentry *src_dentry, struct au_pin *pin,
-+ struct au_wr_dir_args *wr_dir_args)
-+{
-+ struct dentry *wh_dentry, *h_parent;
-+ struct super_block *sb;
-+ struct au_branch *br;
-+ int err;
-+ unsigned int udba;
-+ aufs_bindex_t bcpup;
-+
-+ AuDbg("%pd\n", dentry);
-+
-+ err = au_wr_dir(dentry, src_dentry, wr_dir_args);
-+ bcpup = err;
-+ wh_dentry = ERR_PTR(err);
-+ if (unlikely(err < 0))
-+ goto out;
-+
-+ sb = dentry->d_sb;
-+ udba = au_opt_udba(sb);
-+ err = au_pin(pin, dentry, bcpup, udba,
-+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
-+ wh_dentry = ERR_PTR(err);
-+ if (unlikely(err))
-+ goto out;
-+
-+ h_parent = au_pinned_h_parent(pin);
-+ if (udba != AuOpt_UDBA_NONE
-+ && au_dbstart(dentry) == bcpup)
-+ err = au_may_add(dentry, bcpup, h_parent,
-+ au_ftest_wrdir(wr_dir_args->flags, ISDIR));
-+ else if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
-+ err = -ENAMETOOLONG;
-+ wh_dentry = ERR_PTR(err);
-+ if (unlikely(err))
-+ goto out_unpin;
-+
-+ br = au_sbr(sb, bcpup);
-+ if (dt) {
-+ struct path tmp = {
-+ .dentry = h_parent,
-+ .mnt = au_br_mnt(br)
-+ };
-+ au_dtime_store(dt, au_pinned_parent(pin), &tmp);
-+ }
-+
-+ wh_dentry = NULL;
-+ if (bcpup != au_dbwh(dentry))
-+ goto out; /* success */
-+
-+ /*
-+ * ENAMETOOLONG here means that if we allowed create such name, then it
-+ * would not be able to removed in the future. So we don't allow such
-+ * name here and we don't handle ENAMETOOLONG differently here.
-+ */
-+ wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
-+
-+out_unpin:
-+ if (IS_ERR(wh_dentry))
-+ au_unpin(pin);
-+out:
-+ return wh_dentry;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+enum { Mknod, Symlink, Creat };
-+struct simple_arg {
-+ int type;
-+ union {
-+ struct {
-+ umode_t mode;
-+ bool want_excl;
-+ bool try_aopen;
-+ struct vfsub_aopen_args *aopen;
-+ } c;
-+ struct {
-+ const char *symname;
-+ } s;
-+ struct {
-+ umode_t mode;
-+ dev_t dev;
-+ } m;
-+ } u;
-+};
-+
-+static int add_simple(struct inode *dir, struct dentry *dentry,
-+ struct simple_arg *arg)
-+{
-+ int err, rerr;
-+ aufs_bindex_t bstart;
-+ unsigned char created;
-+ const unsigned char try_aopen
-+ = (arg->type == Creat && arg->u.c.try_aopen);
-+ struct dentry *wh_dentry, *parent;
-+ struct inode *h_dir;
-+ struct super_block *sb;
-+ struct au_branch *br;
-+ /* to reuduce stack size */
-+ struct {
-+ struct au_dtime dt;
-+ struct au_pin pin;
-+ struct path h_path;
-+ struct au_wr_dir_args wr_dir_args;
-+ } *a;
-+
-+ AuDbg("%pd\n", dentry);
-+ IMustLock(dir);
-+
-+ err = -ENOMEM;
-+ a = kmalloc(sizeof(*a), GFP_NOFS);
-+ if (unlikely(!a))
-+ goto out;
-+ a->wr_dir_args.force_btgt = -1;
-+ a->wr_dir_args.flags = AuWrDir_ADD_ENTRY;
-+
-+ parent = dentry->d_parent; /* dir inode is locked */
-+ if (!try_aopen) {
-+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
-+ if (unlikely(err))
-+ goto out_free;
-+ }
-+ err = au_d_may_add(dentry);
-+ if (unlikely(err))
-+ goto out_unlock;
-+ if (!try_aopen)
-+ di_write_lock_parent(parent);
-+ wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
-+ &a->pin, &a->wr_dir_args);
-+ err = PTR_ERR(wh_dentry);
-+ if (IS_ERR(wh_dentry))
-+ goto out_parent;
-+
-+ bstart = au_dbstart(dentry);
-+ sb = dentry->d_sb;
-+ br = au_sbr(sb, bstart);
-+ a->h_path.dentry = au_h_dptr(dentry, bstart);
-+ a->h_path.mnt = au_br_mnt(br);
-+ h_dir = au_pinned_h_dir(&a->pin);
-+ switch (arg->type) {
-+ case Creat:
-+ err = 0;
-+ if (!try_aopen || !h_dir->i_op->atomic_open)
-+ err = vfsub_create(h_dir, &a->h_path, arg->u.c.mode,
-+ arg->u.c.want_excl);
-+ else
-+ err = vfsub_atomic_open(h_dir, a->h_path.dentry,
-+ arg->u.c.aopen, br);
-+ break;
-+ case Symlink:
-+ err = vfsub_symlink(h_dir, &a->h_path, arg->u.s.symname);
-+ break;
-+ case Mknod:
-+ err = vfsub_mknod(h_dir, &a->h_path, arg->u.m.mode,
-+ arg->u.m.dev);
-+ break;
-+ default:
-+ BUG();
-+ }
-+ created = !err;
-+ if (!err)
-+ err = epilog(dir, bstart, wh_dentry, dentry);
-+
-+ /* revert */
-+ if (unlikely(created && err && d_is_positive(a->h_path.dentry))) {
-+ /* no delegation since it is just created */
-+ rerr = vfsub_unlink(h_dir, &a->h_path, /*delegated*/NULL,
-+ /*force*/0);
-+ if (rerr) {
-+ AuIOErr("%pd revert failure(%d, %d)\n",
-+ dentry, err, rerr);
-+ err = -EIO;
-+ }
-+ au_dtime_revert(&a->dt);
-+ }
-+
-+ if (!err && try_aopen && !h_dir->i_op->atomic_open)
-+ *arg->u.c.aopen->opened |= FILE_CREATED;
-+
-+ au_unpin(&a->pin);
-+ dput(wh_dentry);
-+
-+out_parent:
-+ if (!try_aopen)
-+ di_write_unlock(parent);
-+out_unlock:
-+ if (unlikely(err)) {
-+ au_update_dbstart(dentry);
-+ d_drop(dentry);
-+ }
-+ if (!try_aopen)
-+ aufs_read_unlock(dentry, AuLock_DW);
-+out_free:
-+ kfree(a);
-+out:
-+ return err;
-+}
-+
-+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
-+ dev_t dev)
-+{
-+ struct simple_arg arg = {
-+ .type = Mknod,
-+ .u.m = {
-+ .mode = mode,
-+ .dev = dev
-+ }
-+ };
-+ return add_simple(dir, dentry, &arg);
-+}
-+
-+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
-+{
-+ struct simple_arg arg = {
-+ .type = Symlink,
-+ .u.s.symname = symname
-+ };
-+ return add_simple(dir, dentry, &arg);
-+}
-+
-+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
-+ bool want_excl)
-+{
-+ struct simple_arg arg = {
-+ .type = Creat,
-+ .u.c = {
-+ .mode = mode,
-+ .want_excl = want_excl
-+ }
-+ };
-+ return add_simple(dir, dentry, &arg);
-+}
-+
-+int au_aopen_or_create(struct inode *dir, struct dentry *dentry,
-+ struct vfsub_aopen_args *aopen_args)
-+{
-+ struct simple_arg arg = {
-+ .type = Creat,
-+ .u.c = {
-+ .mode = aopen_args->create_mode,
-+ .want_excl = aopen_args->open_flag & O_EXCL,
-+ .try_aopen = true,
-+ .aopen = aopen_args
-+ }
-+ };
-+ return add_simple(dir, dentry, &arg);
-+}
-+
-+int aufs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
-+{
-+ int err;
-+ aufs_bindex_t bindex;
-+ struct super_block *sb;
-+ struct dentry *parent, *h_parent, *h_dentry;
-+ struct inode *h_dir, *inode;
-+ struct vfsmount *h_mnt;
-+ struct au_wr_dir_args wr_dir_args = {
-+ .force_btgt = -1,
-+ .flags = AuWrDir_TMPFILE
-+ };
-+
-+ /* copy-up may happen */
-+ mutex_lock(&dir->i_mutex);
-+
-+ sb = dir->i_sb;
-+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
-+ if (unlikely(err))
-+ goto out;
-+
-+ err = au_di_init(dentry);
-+ if (unlikely(err))
-+ goto out_si;
-+
-+ err = -EBUSY;
-+ parent = d_find_any_alias(dir);
-+ AuDebugOn(!parent);
-+ di_write_lock_parent(parent);
-+ if (unlikely(d_inode(parent) != dir))
-+ goto out_parent;
-+
-+ err = au_digen_test(parent, au_sigen(sb));
-+ if (unlikely(err))
-+ goto out_parent;
-+
-+ bindex = au_dbstart(parent);
-+ au_set_dbstart(dentry, bindex);
-+ au_set_dbend(dentry, bindex);
-+ err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
-+ bindex = err;
-+ if (unlikely(err < 0))
-+ goto out_parent;
-+
-+ err = -EOPNOTSUPP;
-+ h_dir = au_h_iptr(dir, bindex);
-+ if (unlikely(!h_dir->i_op->tmpfile))
-+ goto out_parent;
-+
-+ h_mnt = au_sbr_mnt(sb, bindex);
-+ err = vfsub_mnt_want_write(h_mnt);
-+ if (unlikely(err))
-+ goto out_parent;
-+
-+ h_parent = au_h_dptr(parent, bindex);
-+ err = inode_permission(d_inode(h_parent), MAY_WRITE | MAY_EXEC);
-+ if (unlikely(err))
-+ goto out_mnt;
-+
-+ err = -ENOMEM;
-+ h_dentry = d_alloc(h_parent, &dentry->d_name);
-+ if (unlikely(!h_dentry))
-+ goto out_mnt;
-+
-+ err = h_dir->i_op->tmpfile(h_dir, h_dentry, mode);
-+ if (unlikely(err))
-+ goto out_dentry;
-+
-+ au_set_dbstart(dentry, bindex);
-+ au_set_dbend(dentry, bindex);
-+ au_set_h_dptr(dentry, bindex, dget(h_dentry));
-+ inode = au_new_inode(dentry, /*must_new*/1);
-+ if (IS_ERR(inode)) {
-+ err = PTR_ERR(inode);
-+ au_set_h_dptr(dentry, bindex, NULL);
-+ au_set_dbstart(dentry, -1);
-+ au_set_dbend(dentry, -1);
-+ } else {
-+ if (!inode->i_nlink)
-+ set_nlink(inode, 1);
-+ d_tmpfile(dentry, inode);
-+ au_di(dentry)->di_tmpfile = 1;
-+
-+ /* update without i_mutex */
-+ if (au_ibstart(dir) == au_dbstart(dentry))
-+ au_cpup_attr_timesizes(dir);
-+ }
-+
-+out_dentry:
-+ dput(h_dentry);
-+out_mnt:
-+ vfsub_mnt_drop_write(h_mnt);
-+out_parent:
-+ di_write_unlock(parent);
-+ dput(parent);
-+ di_write_unlock(dentry);
-+ if (!err)
-+#if 0
-+ /* verbose coding for lock class name */
-+ au_rw_class(&au_di(dentry)->di_rwsem,
-+ au_lc_key + AuLcNonDir_DIINFO);
-+#else
-+ ;
-+#endif
-+ else {
-+ au_di_fin(dentry);
-+ dentry->d_fsdata = NULL;
-+ }
-+out_si:
-+ si_read_unlock(sb);
-+out:
-+ mutex_unlock(&dir->i_mutex);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct au_link_args {
-+ aufs_bindex_t bdst, bsrc;
-+ struct au_pin pin;
-+ struct path h_path;
-+ struct dentry *src_parent, *parent;
-+};
-+
-+static int au_cpup_before_link(struct dentry *src_dentry,
-+ struct au_link_args *a)
-+{
-+ int err;
-+ struct dentry *h_src_dentry;
-+ struct au_cp_generic cpg = {
-+ .dentry = src_dentry,
-+ .bdst = a->bdst,
-+ .bsrc = a->bsrc,
-+ .len = -1,
-+ .pin = &a->pin,
-+ .flags = AuCpup_DTIME | AuCpup_HOPEN /* | AuCpup_KEEPLINO */
-+ };
-+
-+ di_read_lock_parent(a->src_parent, AuLock_IR);
-+ err = au_test_and_cpup_dirs(src_dentry, a->bdst);
-+ if (unlikely(err))
-+ goto out;
-+
-+ h_src_dentry = au_h_dptr(src_dentry, a->bsrc);
-+ err = au_pin(&a->pin, src_dentry, a->bdst,
-+ au_opt_udba(src_dentry->d_sb),
-+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
-+ if (unlikely(err))
-+ goto out;
-+
-+ err = au_sio_cpup_simple(&cpg);
-+ au_unpin(&a->pin);
-+
-+out:
-+ di_read_unlock(a->src_parent, AuLock_IR);
-+ return err;
-+}
-+
-+static int au_cpup_or_link(struct dentry *src_dentry, struct dentry *dentry,
-+ struct au_link_args *a)
-+{
-+ int err;
-+ unsigned char plink;
-+ aufs_bindex_t bend;
-+ struct dentry *h_src_dentry;
-+ struct inode *h_inode, *inode, *delegated;
-+ struct super_block *sb;
-+ struct file *h_file;
-+
-+ plink = 0;
-+ h_inode = NULL;
-+ sb = src_dentry->d_sb;
-+ inode = d_inode(src_dentry);
-+ if (au_ibstart(inode) <= a->bdst)
-+ h_inode = au_h_iptr(inode, a->bdst);
-+ if (!h_inode || !h_inode->i_nlink) {
-+ /* copyup src_dentry as the name of dentry. */
-+ bend = au_dbend(dentry);
-+ if (bend < a->bsrc)
-+ au_set_dbend(dentry, a->bsrc);
-+ au_set_h_dptr(dentry, a->bsrc,
-+ dget(au_h_dptr(src_dentry, a->bsrc)));
-+ dget(a->h_path.dentry);
-+ au_set_h_dptr(dentry, a->bdst, NULL);
-+ AuDbg("temporary d_inode...\n");
-+ spin_lock(&dentry->d_lock);
-+ dentry->d_inode = d_inode(src_dentry); /* tmp */
-+ spin_unlock(&dentry->d_lock);
-+ h_file = au_h_open_pre(dentry, a->bsrc, /*force_wr*/0);
-+ if (IS_ERR(h_file))
-+ err = PTR_ERR(h_file);
-+ else {
-+ struct au_cp_generic cpg = {
-+ .dentry = dentry,
-+ .bdst = a->bdst,
-+ .bsrc = -1,
-+ .len = -1,
-+ .pin = &a->pin,
-+ .flags = AuCpup_KEEPLINO
-+ };
-+ err = au_sio_cpup_simple(&cpg);
-+ au_h_open_post(dentry, a->bsrc, h_file);
-+ if (!err) {
-+ dput(a->h_path.dentry);
-+ a->h_path.dentry = au_h_dptr(dentry, a->bdst);
-+ } else
-+ au_set_h_dptr(dentry, a->bdst,
-+ a->h_path.dentry);
-+ }
-+ spin_lock(&dentry->d_lock);
-+ dentry->d_inode = NULL; /* restore */
-+ spin_unlock(&dentry->d_lock);
-+ AuDbg("temporary d_inode...done\n");
-+ au_set_h_dptr(dentry, a->bsrc, NULL);
-+ au_set_dbend(dentry, bend);
-+ } else {
-+ /* the inode of src_dentry already exists on a.bdst branch */
-+ h_src_dentry = d_find_alias(h_inode);
-+ if (!h_src_dentry && au_plink_test(inode)) {
-+ plink = 1;
-+ h_src_dentry = au_plink_lkup(inode, a->bdst);
-+ err = PTR_ERR(h_src_dentry);
-+ if (IS_ERR(h_src_dentry))
-+ goto out;
-+
-+ if (unlikely(d_is_negative(h_src_dentry))) {
-+ dput(h_src_dentry);
-+ h_src_dentry = NULL;
-+ }
-+
-+ }
-+ if (h_src_dentry) {
-+ delegated = NULL;
-+ err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
-+ &a->h_path, &delegated);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal link\n");
-+ iput(delegated);
-+ }
-+ dput(h_src_dentry);
-+ } else {
-+ AuIOErr("no dentry found for hi%lu on b%d\n",
-+ h_inode->i_ino, a->bdst);
-+ err = -EIO;
-+ }
-+ }
-+
-+ if (!err && !plink)
-+ au_plink_append(inode, a->bdst, a->h_path.dentry);
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+int aufs_link(struct dentry *src_dentry, struct inode *dir,
-+ struct dentry *dentry)
-+{
-+ int err, rerr;
-+ struct au_dtime dt;
-+ struct au_link_args *a;
-+ struct dentry *wh_dentry, *h_src_dentry;
-+ struct inode *inode, *delegated;
-+ struct super_block *sb;
-+ struct au_wr_dir_args wr_dir_args = {
-+ /* .force_btgt = -1, */
-+ .flags = AuWrDir_ADD_ENTRY
-+ };
-+
-+ IMustLock(dir);
-+ inode = d_inode(src_dentry);
-+ IMustLock(inode);
-+
-+ err = -ENOMEM;
-+ a = kzalloc(sizeof(*a), GFP_NOFS);
-+ if (unlikely(!a))
-+ goto out;
-+
-+ a->parent = dentry->d_parent; /* dir inode is locked */
-+ err = aufs_read_and_write_lock2(dentry, src_dentry,
-+ AuLock_NOPLM | AuLock_GEN);
-+ if (unlikely(err))
-+ goto out_kfree;
-+ err = au_d_linkable(src_dentry);
-+ if (unlikely(err))
-+ goto out_unlock;
-+ err = au_d_may_add(dentry);
-+ if (unlikely(err))
-+ goto out_unlock;
-+
-+ a->src_parent = dget_parent(src_dentry);
-+ wr_dir_args.force_btgt = au_ibstart(inode);
-+
-+ di_write_lock_parent(a->parent);
-+ wr_dir_args.force_btgt = au_wbr(dentry, wr_dir_args.force_btgt);
-+ wh_dentry = lock_hdir_lkup_wh(dentry, &dt, src_dentry, &a->pin,
-+ &wr_dir_args);
-+ err = PTR_ERR(wh_dentry);
-+ if (IS_ERR(wh_dentry))
-+ goto out_parent;
-+
-+ err = 0;
-+ sb = dentry->d_sb;
-+ a->bdst = au_dbstart(dentry);
-+ a->h_path.dentry = au_h_dptr(dentry, a->bdst);
-+ a->h_path.mnt = au_sbr_mnt(sb, a->bdst);
-+ a->bsrc = au_ibstart(inode);
-+ h_src_dentry = au_h_d_alias(src_dentry, a->bsrc);
-+ if (!h_src_dentry && au_di(src_dentry)->di_tmpfile)
-+ h_src_dentry = dget(au_hi_wh(inode, a->bsrc));
-+ if (!h_src_dentry) {
-+ a->bsrc = au_dbstart(src_dentry);
-+ h_src_dentry = au_h_d_alias(src_dentry, a->bsrc);
-+ AuDebugOn(!h_src_dentry);
-+ } else if (IS_ERR(h_src_dentry)) {
-+ err = PTR_ERR(h_src_dentry);
-+ goto out_parent;
-+ }
-+
-+ if (au_opt_test(au_mntflags(sb), PLINK)) {
-+ if (a->bdst < a->bsrc
-+ /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */)
-+ err = au_cpup_or_link(src_dentry, dentry, a);
-+ else {
-+ delegated = NULL;
-+ err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
-+ &a->h_path, &delegated);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal link\n");
-+ iput(delegated);
-+ }
-+ }
-+ dput(h_src_dentry);
-+ } else {
-+ /*
-+ * copyup src_dentry to the branch we process,
-+ * and then link(2) to it.
-+ */
-+ dput(h_src_dentry);
-+ if (a->bdst < a->bsrc
-+ /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */) {
-+ au_unpin(&a->pin);
-+ di_write_unlock(a->parent);
-+ err = au_cpup_before_link(src_dentry, a);
-+ di_write_lock_parent(a->parent);
-+ if (!err)
-+ err = au_pin(&a->pin, dentry, a->bdst,
-+ au_opt_udba(sb),
-+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
-+ if (unlikely(err))
-+ goto out_wh;
-+ }
-+ if (!err) {
-+ h_src_dentry = au_h_dptr(src_dentry, a->bdst);
-+ err = -ENOENT;
-+ if (h_src_dentry && d_is_positive(h_src_dentry)) {
-+ delegated = NULL;
-+ err = vfsub_link(h_src_dentry,
-+ au_pinned_h_dir(&a->pin),
-+ &a->h_path, &delegated);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry"
-+ " for NFSv4 delegation"
-+ " for an internal link\n");
-+ iput(delegated);
-+ }
-+ }
-+ }
-+ }
-+ if (unlikely(err))
-+ goto out_unpin;
-+
-+ if (wh_dentry) {
-+ a->h_path.dentry = wh_dentry;
-+ err = au_wh_unlink_dentry(au_pinned_h_dir(&a->pin), &a->h_path,
-+ dentry);
-+ if (unlikely(err))
-+ goto out_revert;
-+ }
-+
-+ au_dir_ts(dir, a->bdst);
-+ dir->i_version++;
-+ inc_nlink(inode);
-+ inode->i_ctime = dir->i_ctime;
-+ d_instantiate(dentry, au_igrab(inode));
-+ if (d_unhashed(a->h_path.dentry))
-+ /* some filesystem calls d_drop() */
-+ d_drop(dentry);
-+ /* some filesystems consume an inode even hardlink */
-+ au_fhsm_wrote(sb, a->bdst, /*force*/0);
-+ goto out_unpin; /* success */
-+
-+out_revert:
-+ /* no delegation since it is just created */
-+ rerr = vfsub_unlink(au_pinned_h_dir(&a->pin), &a->h_path,
-+ /*delegated*/NULL, /*force*/0);
-+ if (unlikely(rerr)) {
-+ AuIOErr("%pd reverting failed(%d, %d)\n", dentry, err, rerr);
-+ err = -EIO;
-+ }
-+ au_dtime_revert(&dt);
-+out_unpin:
-+ au_unpin(&a->pin);
-+out_wh:
-+ dput(wh_dentry);
-+out_parent:
-+ di_write_unlock(a->parent);
-+ dput(a->src_parent);
-+out_unlock:
-+ if (unlikely(err)) {
-+ au_update_dbstart(dentry);
-+ d_drop(dentry);
-+ }
-+ aufs_read_and_write_unlock2(dentry, src_dentry);
-+out_kfree:
-+ kfree(a);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
-+{
-+ int err, rerr;
-+ aufs_bindex_t bindex;
-+ unsigned char diropq;
-+ struct path h_path;
-+ struct dentry *wh_dentry, *parent, *opq_dentry;
-+ struct mutex *h_mtx;
-+ struct super_block *sb;
-+ struct {
-+ struct au_pin pin;
-+ struct au_dtime dt;
-+ } *a; /* reduce the stack usage */
-+ struct au_wr_dir_args wr_dir_args = {
-+ .force_btgt = -1,
-+ .flags = AuWrDir_ADD_ENTRY | AuWrDir_ISDIR
-+ };
-+
-+ IMustLock(dir);
-+
-+ err = -ENOMEM;
-+ a = kmalloc(sizeof(*a), GFP_NOFS);
-+ if (unlikely(!a))
-+ goto out;
-+
-+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
-+ if (unlikely(err))
-+ goto out_free;
-+ err = au_d_may_add(dentry);
-+ if (unlikely(err))
-+ goto out_unlock;
-+
-+ parent = dentry->d_parent; /* dir inode is locked */
-+ di_write_lock_parent(parent);
-+ wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
-+ &a->pin, &wr_dir_args);
-+ err = PTR_ERR(wh_dentry);
-+ if (IS_ERR(wh_dentry))
-+ goto out_parent;
-+
-+ sb = dentry->d_sb;
-+ bindex = au_dbstart(dentry);
-+ h_path.dentry = au_h_dptr(dentry, bindex);
-+ h_path.mnt = au_sbr_mnt(sb, bindex);
-+ err = vfsub_mkdir(au_pinned_h_dir(&a->pin), &h_path, mode);
-+ if (unlikely(err))
-+ goto out_unpin;
-+
-+ /* make the dir opaque */
-+ diropq = 0;
-+ h_mtx = &d_inode(h_path.dentry)->i_mutex;
-+ if (wh_dentry
-+ || au_opt_test(au_mntflags(sb), ALWAYS_DIROPQ)) {
-+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
-+ opq_dentry = au_diropq_create(dentry, bindex);
-+ mutex_unlock(h_mtx);
-+ err = PTR_ERR(opq_dentry);
-+ if (IS_ERR(opq_dentry))
-+ goto out_dir;
-+ dput(opq_dentry);
-+ diropq = 1;
-+ }
-+
-+ err = epilog(dir, bindex, wh_dentry, dentry);
-+ if (!err) {
-+ inc_nlink(dir);
-+ goto out_unpin; /* success */
-+ }
-+
-+ /* revert */
-+ if (diropq) {
-+ AuLabel(revert opq);
-+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
-+ rerr = au_diropq_remove(dentry, bindex);
-+ mutex_unlock(h_mtx);
-+ if (rerr) {
-+ AuIOErr("%pd reverting diropq failed(%d, %d)\n",
-+ dentry, err, rerr);
-+ err = -EIO;
-+ }
-+ }
-+
-+out_dir:
-+ AuLabel(revert dir);
-+ rerr = vfsub_rmdir(au_pinned_h_dir(&a->pin), &h_path);
-+ if (rerr) {
-+ AuIOErr("%pd reverting dir failed(%d, %d)\n",
-+ dentry, err, rerr);
-+ err = -EIO;
-+ }
-+ au_dtime_revert(&a->dt);
-+out_unpin:
-+ au_unpin(&a->pin);
-+ dput(wh_dentry);
-+out_parent:
-+ di_write_unlock(parent);
-+out_unlock:
-+ if (unlikely(err)) {
-+ au_update_dbstart(dentry);
-+ d_drop(dentry);
-+ }
-+ aufs_read_unlock(dentry, AuLock_DW);
-+out_free:
-+ kfree(a);
-+out:
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/i_op.c linux-4.1.10/fs/aufs/i_op.c
---- linux-4.1.10.orig/fs/aufs/i_op.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/i_op.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,1447 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * inode operations (except add/del/rename)
-+ */
-+
-+#include <linux/device_cgroup.h>
-+#include <linux/fs_stack.h>
-+#include <linux/mm.h>
-+#include <linux/namei.h>
-+#include <linux/security.h>
-+#include "aufs.h"
-+
-+static int h_permission(struct inode *h_inode, int mask,
-+ struct vfsmount *h_mnt, int brperm)
-+{
-+ int err;
-+ const unsigned char write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
-+
-+ err = -EACCES;
-+ if ((write_mask && IS_IMMUTABLE(h_inode))
-+ || ((mask & MAY_EXEC)
-+ && S_ISREG(h_inode->i_mode)
-+ && ((h_mnt->mnt_flags & MNT_NOEXEC)
-+ || !(h_inode->i_mode & S_IXUGO))))
-+ goto out;
-+
-+ /*
-+ * - skip the lower fs test in the case of write to ro branch.
-+ * - nfs dir permission write check is optimized, but a policy for
-+ * link/rename requires a real check.
-+ * - nfs always sets MS_POSIXACL regardless its mount option 'noacl.'
-+ * in this case, generic_permission() returns -EOPNOTSUPP.
-+ */
-+ if ((write_mask && !au_br_writable(brperm))
-+ || (au_test_nfs(h_inode->i_sb) && S_ISDIR(h_inode->i_mode)
-+ && write_mask && !(mask & MAY_READ))
-+ || !h_inode->i_op->permission) {
-+ /* AuLabel(generic_permission); */
-+ /* AuDbg("get_acl %pf\n", h_inode->i_op->get_acl); */
-+ err = generic_permission(h_inode, mask);
-+ if (err == -EOPNOTSUPP && au_test_nfs_noacl(h_inode))
-+ err = h_inode->i_op->permission(h_inode, mask);
-+ AuTraceErr(err);
-+ } else {
-+ /* AuLabel(h_inode->permission); */
-+ err = h_inode->i_op->permission(h_inode, mask);
-+ AuTraceErr(err);
-+ }
-+
-+ if (!err)
-+ err = devcgroup_inode_permission(h_inode, mask);
-+ if (!err)
-+ err = security_inode_permission(h_inode, mask);
-+
-+#if 0
-+ if (!err) {
-+ /* todo: do we need to call ima_path_check()? */
-+ struct path h_path = {
-+ .dentry =
-+ .mnt = h_mnt
-+ };
-+ err = ima_path_check(&h_path,
-+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
-+ IMA_COUNT_LEAVE);
-+ }
-+#endif
-+
-+out:
-+ return err;
-+}
-+
-+static int aufs_permission(struct inode *inode, int mask)
-+{
-+ int err;
-+ aufs_bindex_t bindex, bend;
-+ const unsigned char isdir = !!S_ISDIR(inode->i_mode),
-+ write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
-+ struct inode *h_inode;
-+ struct super_block *sb;
-+ struct au_branch *br;
-+
-+ /* todo: support rcu-walk? */
-+ if (mask & MAY_NOT_BLOCK)
-+ return -ECHILD;
-+
-+ sb = inode->i_sb;
-+ si_read_lock(sb, AuLock_FLUSH);
-+ ii_read_lock_child(inode);
-+#if 0
-+ err = au_iigen_test(inode, au_sigen(sb));
-+ if (unlikely(err))
-+ goto out;
-+#endif
-+
-+ if (!isdir
-+ || write_mask
-+ || au_opt_test(au_mntflags(sb), DIRPERM1)) {
-+ err = au_busy_or_stale();
-+ h_inode = au_h_iptr(inode, au_ibstart(inode));
-+ if (unlikely(!h_inode
-+ || (h_inode->i_mode & S_IFMT)
-+ != (inode->i_mode & S_IFMT)))
-+ goto out;
-+
-+ err = 0;
-+ bindex = au_ibstart(inode);
-+ br = au_sbr(sb, bindex);
-+ err = h_permission(h_inode, mask, au_br_mnt(br), br->br_perm);
-+ if (write_mask
-+ && !err
-+ && !special_file(h_inode->i_mode)) {
-+ /* test whether the upper writable branch exists */
-+ err = -EROFS;
-+ for (; bindex >= 0; bindex--)
-+ if (!au_br_rdonly(au_sbr(sb, bindex))) {
-+ err = 0;
-+ break;
-+ }
-+ }
-+ goto out;
-+ }
-+
-+ /* non-write to dir */
-+ err = 0;
-+ bend = au_ibend(inode);
-+ for (bindex = au_ibstart(inode); !err && bindex <= bend; bindex++) {
-+ h_inode = au_h_iptr(inode, bindex);
-+ if (h_inode) {
-+ err = au_busy_or_stale();
-+ if (unlikely(!S_ISDIR(h_inode->i_mode)))
-+ break;
-+
-+ br = au_sbr(sb, bindex);
-+ err = h_permission(h_inode, mask, au_br_mnt(br),
-+ br->br_perm);
-+ }
-+ }
-+
-+out:
-+ ii_read_unlock(inode);
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static struct dentry *aufs_lookup(struct inode *dir, struct dentry *dentry,
-+ unsigned int flags)
-+{
-+ struct dentry *ret, *parent;
-+ struct inode *inode;
-+ struct super_block *sb;
-+ int err, npositive;
-+
-+ IMustLock(dir);
-+
-+ /* todo: support rcu-walk? */
-+ ret = ERR_PTR(-ECHILD);
-+ if (flags & LOOKUP_RCU)
-+ goto out;
-+
-+ ret = ERR_PTR(-ENAMETOOLONG);
-+ if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
-+ goto out;
-+
-+ sb = dir->i_sb;
-+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
-+ ret = ERR_PTR(err);
-+ if (unlikely(err))
-+ goto out;
-+
-+ err = au_di_init(dentry);
-+ ret = ERR_PTR(err);
-+ if (unlikely(err))
-+ goto out_si;
-+
-+ inode = NULL;
-+ npositive = 0; /* suppress a warning */
-+ parent = dentry->d_parent; /* dir inode is locked */
-+ di_read_lock_parent(parent, AuLock_IR);
-+ err = au_alive_dir(parent);
-+ if (!err)
-+ err = au_digen_test(parent, au_sigen(sb));
-+ if (!err) {
-+ npositive = au_lkup_dentry(dentry, au_dbstart(parent),
-+ /*type*/0);
-+ err = npositive;
-+ }
-+ di_read_unlock(parent, AuLock_IR);
-+ ret = ERR_PTR(err);
-+ if (unlikely(err < 0))
-+ goto out_unlock;
-+
-+ if (npositive) {
-+ inode = au_new_inode(dentry, /*must_new*/0);
-+ if (IS_ERR(inode)) {
-+ ret = (void *)inode;
-+ inode = NULL;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ if (inode)
-+ atomic_inc(&inode->i_count);
-+ ret = d_splice_alias(inode, dentry);
-+#if 0
-+ if (unlikely(d_need_lookup(dentry))) {
-+ spin_lock(&dentry->d_lock);
-+ dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
-+ spin_unlock(&dentry->d_lock);
-+ } else
-+#endif
-+ if (inode) {
-+ if (!IS_ERR(ret)) {
-+ iput(inode);
-+ if (ret && ret != dentry)
-+ ii_write_unlock(inode);
-+ } else {
-+ ii_write_unlock(inode);
-+ iput(inode);
-+ inode = NULL;
-+ }
-+ }
-+
-+out_unlock:
-+ di_write_unlock(dentry);
-+ if (inode) {
-+ /* verbose coding for lock class name */
-+ if (unlikely(S_ISLNK(inode->i_mode)))
-+ au_rw_class(&au_di(dentry)->di_rwsem,
-+ au_lc_key + AuLcSymlink_DIINFO);
-+ else if (unlikely(S_ISDIR(inode->i_mode)))
-+ au_rw_class(&au_di(dentry)->di_rwsem,
-+ au_lc_key + AuLcDir_DIINFO);
-+ else /* likely */
-+ au_rw_class(&au_di(dentry)->di_rwsem,
-+ au_lc_key + AuLcNonDir_DIINFO);
-+ }
-+out_si:
-+ si_read_unlock(sb);
-+out:
-+ return ret;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct aopen_node {
-+ struct hlist_node hlist;
-+ struct file *file, *h_file;
-+};
-+
-+static int au_do_aopen(struct inode *inode, struct file *file)
-+{
-+ struct au_sphlhead *aopen;
-+ struct aopen_node *node;
-+ struct au_do_open_args args = {
-+ .no_lock = 1,
-+ .open = au_do_open_nondir
-+ };
-+
-+ aopen = &au_sbi(inode->i_sb)->si_aopen;
-+ spin_lock(&aopen->spin);
-+ hlist_for_each_entry(node, &aopen->head, hlist)
-+ if (node->file == file) {
-+ args.h_file = node->h_file;
-+ break;
-+ }
-+ spin_unlock(&aopen->spin);
-+ /* AuDebugOn(!args.h_file); */
-+
-+ return au_do_open(file, &args);
-+}
-+
-+static int aufs_atomic_open(struct inode *dir, struct dentry *dentry,
-+ struct file *file, unsigned int open_flag,
-+ umode_t create_mode, int *opened)
-+{
-+ int err, h_opened = *opened;
-+ struct dentry *parent;
-+ struct dentry *d;
-+ struct au_sphlhead *aopen;
-+ struct vfsub_aopen_args args = {
-+ .open_flag = open_flag,
-+ .create_mode = create_mode,
-+ .opened = &h_opened
-+ };
-+ struct aopen_node aopen_node = {
-+ .file = file
-+ };
-+
-+ IMustLock(dir);
-+ AuDbg("open_flag 0x%x\n", open_flag);
-+ AuDbgDentry(dentry);
-+
-+ err = 0;
-+ if (!au_di(dentry)) {
-+ d = aufs_lookup(dir, dentry, /*flags*/0);
-+ if (IS_ERR(d)) {
-+ err = PTR_ERR(d);
-+ goto out;
-+ } else if (d) {
-+ /*
-+ * obsoleted dentry found.
-+ * another error will be returned later.
-+ */
-+ d_drop(d);
-+ dput(d);
-+ AuDbgDentry(d);
-+ }
-+ AuDbgDentry(dentry);
-+ }
-+
-+ if (d_is_positive(dentry)
-+ || d_unhashed(dentry)
-+ || d_unlinked(dentry)
-+ || !(open_flag & O_CREAT))
-+ goto out_no_open;
-+
-+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN);
-+ if (unlikely(err))
-+ goto out;
-+
-+ parent = dentry->d_parent; /* dir is locked */
-+ di_write_lock_parent(parent);
-+ err = au_lkup_dentry(dentry, /*bstart*/0, /*type*/0);
-+ if (unlikely(err))
-+ goto out_unlock;
-+
-+ AuDbgDentry(dentry);
-+ if (d_is_positive(dentry))
-+ goto out_unlock;
-+
-+ args.file = get_empty_filp();
-+ err = PTR_ERR(args.file);
-+ if (IS_ERR(args.file))
-+ goto out_unlock;
-+
-+ args.file->f_flags = file->f_flags;
-+ err = au_aopen_or_create(dir, dentry, &args);
-+ AuTraceErr(err);
-+ AuDbgFile(args.file);
-+ if (unlikely(err < 0)) {
-+ if (h_opened & FILE_OPENED)
-+ fput(args.file);
-+ else
-+ put_filp(args.file);
-+ goto out_unlock;
-+ }
-+
-+ /* some filesystems don't set FILE_CREATED while succeeded? */
-+ *opened |= FILE_CREATED;
-+ if (h_opened & FILE_OPENED)
-+ aopen_node.h_file = args.file;
-+ else {
-+ put_filp(args.file);
-+ args.file = NULL;
-+ }
-+ aopen = &au_sbi(dir->i_sb)->si_aopen;
-+ au_sphl_add(&aopen_node.hlist, aopen);
-+ err = finish_open(file, dentry, au_do_aopen, opened);
-+ au_sphl_del(&aopen_node.hlist, aopen);
-+ AuTraceErr(err);
-+ AuDbgFile(file);
-+ if (aopen_node.h_file)
-+ fput(aopen_node.h_file);
-+
-+out_unlock:
-+ di_write_unlock(parent);
-+ aufs_read_unlock(dentry, AuLock_DW);
-+ AuDbgDentry(dentry);
-+ if (unlikely(err))
-+ goto out;
-+out_no_open:
-+ if (!err && !(*opened & FILE_CREATED)) {
-+ AuLabel(out_no_open);
-+ dget(dentry);
-+ err = finish_no_open(file, dentry);
-+ }
-+out:
-+ AuDbg("%pd%s%s\n", dentry,
-+ (*opened & FILE_CREATED) ? " created" : "",
-+ (*opened & FILE_OPENED) ? " opened" : "");
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_wr_dir_cpup(struct dentry *dentry, struct dentry *parent,
-+ const unsigned char add_entry, aufs_bindex_t bcpup,
-+ aufs_bindex_t bstart)
-+{
-+ int err;
-+ struct dentry *h_parent;
-+ struct inode *h_dir;
-+
-+ if (add_entry)
-+ IMustLock(d_inode(parent));
-+ else
-+ di_write_lock_parent(parent);
-+
-+ err = 0;
-+ if (!au_h_dptr(parent, bcpup)) {
-+ if (bstart > bcpup)
-+ err = au_cpup_dirs(dentry, bcpup);
-+ else if (bstart < bcpup)
-+ err = au_cpdown_dirs(dentry, bcpup);
-+ else
-+ BUG();
-+ }
-+ if (!err && add_entry && !au_ftest_wrdir(add_entry, TMPFILE)) {
-+ h_parent = au_h_dptr(parent, bcpup);
-+ h_dir = d_inode(h_parent);
-+ mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
-+ err = au_lkup_neg(dentry, bcpup, /*wh*/0);
-+ /* todo: no unlock here */
-+ mutex_unlock(&h_dir->i_mutex);
-+
-+ AuDbg("bcpup %d\n", bcpup);
-+ if (!err) {
-+ if (d_really_is_negative(dentry))
-+ au_set_h_dptr(dentry, bstart, NULL);
-+ au_update_dbrange(dentry, /*do_put_zero*/0);
-+ }
-+ }
-+
-+ if (!add_entry)
-+ di_write_unlock(parent);
-+ if (!err)
-+ err = bcpup; /* success */
-+
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/*
-+ * decide the branch and the parent dir where we will create a new entry.
-+ * returns new bindex or an error.
-+ * copyup the parent dir if needed.
-+ */
-+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
-+ struct au_wr_dir_args *args)
-+{
-+ int err;
-+ unsigned int flags;
-+ aufs_bindex_t bcpup, bstart, src_bstart;
-+ const unsigned char add_entry
-+ = au_ftest_wrdir(args->flags, ADD_ENTRY)
-+ | au_ftest_wrdir(args->flags, TMPFILE);
-+ struct super_block *sb;
-+ struct dentry *parent;
-+ struct au_sbinfo *sbinfo;
-+
-+ sb = dentry->d_sb;
-+ sbinfo = au_sbi(sb);
-+ parent = dget_parent(dentry);
-+ bstart = au_dbstart(dentry);
-+ bcpup = bstart;
-+ if (args->force_btgt < 0) {
-+ if (src_dentry) {
-+ src_bstart = au_dbstart(src_dentry);
-+ if (src_bstart < bstart)
-+ bcpup = src_bstart;
-+ } else if (add_entry) {
-+ flags = 0;
-+ if (au_ftest_wrdir(args->flags, ISDIR))
-+ au_fset_wbr(flags, DIR);
-+ err = AuWbrCreate(sbinfo, dentry, flags);
-+ bcpup = err;
-+ }
-+
-+ if (bcpup < 0 || au_test_ro(sb, bcpup, d_inode(dentry))) {
-+ if (add_entry)
-+ err = AuWbrCopyup(sbinfo, dentry);
-+ else {
-+ if (!IS_ROOT(dentry)) {
-+ di_read_lock_parent(parent, !AuLock_IR);
-+ err = AuWbrCopyup(sbinfo, dentry);
-+ di_read_unlock(parent, !AuLock_IR);
-+ } else
-+ err = AuWbrCopyup(sbinfo, dentry);
-+ }
-+ bcpup = err;
-+ if (unlikely(err < 0))
-+ goto out;
-+ }
-+ } else {
-+ bcpup = args->force_btgt;
-+ AuDebugOn(au_test_ro(sb, bcpup, d_inode(dentry)));
-+ }
-+
-+ AuDbg("bstart %d, bcpup %d\n", bstart, bcpup);
-+ err = bcpup;
-+ if (bcpup == bstart)
-+ goto out; /* success */
-+
-+ /* copyup the new parent into the branch we process */
-+ err = au_wr_dir_cpup(dentry, parent, add_entry, bcpup, bstart);
-+ if (err >= 0) {
-+ if (d_really_is_negative(dentry)) {
-+ au_set_h_dptr(dentry, bstart, NULL);
-+ au_set_dbstart(dentry, bcpup);
-+ au_set_dbend(dentry, bcpup);
-+ }
-+ AuDebugOn(add_entry
-+ && !au_ftest_wrdir(args->flags, TMPFILE)
-+ && !au_h_dptr(dentry, bcpup));
-+ }
-+
-+out:
-+ dput(parent);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void au_pin_hdir_unlock(struct au_pin *p)
-+{
-+ if (p->hdir)
-+ au_hn_imtx_unlock(p->hdir);
-+}
-+
-+int au_pin_hdir_lock(struct au_pin *p)
-+{
-+ int err;
-+
-+ err = 0;
-+ if (!p->hdir)
-+ goto out;
-+
-+ /* even if an error happens later, keep this lock */
-+ au_hn_imtx_lock_nested(p->hdir, p->lsc_hi);
-+
-+ err = -EBUSY;
-+ if (unlikely(p->hdir->hi_inode != d_inode(p->h_parent)))
-+ goto out;
-+
-+ err = 0;
-+ if (p->h_dentry)
-+ err = au_h_verify(p->h_dentry, p->udba, p->hdir->hi_inode,
-+ p->h_parent, p->br);
-+
-+out:
-+ return err;
-+}
-+
-+int au_pin_hdir_relock(struct au_pin *p)
-+{
-+ int err, i;
-+ struct inode *h_i;
-+ struct dentry *h_d[] = {
-+ p->h_dentry,
-+ p->h_parent
-+ };
-+
-+ err = au_pin_hdir_lock(p);
-+ if (unlikely(err))
-+ goto out;
-+
-+ for (i = 0; !err && i < sizeof(h_d)/sizeof(*h_d); i++) {
-+ if (!h_d[i])
-+ continue;
-+ if (d_is_positive(h_d[i])) {
-+ h_i = d_inode(h_d[i]);
-+ err = !h_i->i_nlink;
-+ }
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task)
-+{
-+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
-+ p->hdir->hi_inode->i_mutex.owner = task;
-+#endif
-+}
-+
-+void au_pin_hdir_acquire_nest(struct au_pin *p)
-+{
-+ if (p->hdir) {
-+ mutex_acquire_nest(&p->hdir->hi_inode->i_mutex.dep_map,
-+ p->lsc_hi, 0, NULL, _RET_IP_);
-+ au_pin_hdir_set_owner(p, current);
-+ }
-+}
-+
-+void au_pin_hdir_release(struct au_pin *p)
-+{
-+ if (p->hdir) {
-+ au_pin_hdir_set_owner(p, p->task);
-+ mutex_release(&p->hdir->hi_inode->i_mutex.dep_map, 1, _RET_IP_);
-+ }
-+}
-+
-+struct dentry *au_pinned_h_parent(struct au_pin *pin)
-+{
-+ if (pin && pin->parent)
-+ return au_h_dptr(pin->parent, pin->bindex);
-+ return NULL;
-+}
-+
-+void au_unpin(struct au_pin *p)
-+{
-+ if (p->hdir)
-+ au_pin_hdir_unlock(p);
-+ if (p->h_mnt && au_ftest_pin(p->flags, MNT_WRITE))
-+ vfsub_mnt_drop_write(p->h_mnt);
-+ if (!p->hdir)
-+ return;
-+
-+ if (!au_ftest_pin(p->flags, DI_LOCKED))
-+ di_read_unlock(p->parent, AuLock_IR);
-+ iput(p->hdir->hi_inode);
-+ dput(p->parent);
-+ p->parent = NULL;
-+ p->hdir = NULL;
-+ p->h_mnt = NULL;
-+ /* do not clear p->task */
-+}
-+
-+int au_do_pin(struct au_pin *p)
-+{
-+ int err;
-+ struct super_block *sb;
-+ struct inode *h_dir;
-+
-+ err = 0;
-+ sb = p->dentry->d_sb;
-+ p->br = au_sbr(sb, p->bindex);
-+ if (IS_ROOT(p->dentry)) {
-+ if (au_ftest_pin(p->flags, MNT_WRITE)) {
-+ p->h_mnt = au_br_mnt(p->br);
-+ err = vfsub_mnt_want_write(p->h_mnt);
-+ if (unlikely(err)) {
-+ au_fclr_pin(p->flags, MNT_WRITE);
-+ goto out_err;
-+ }
-+ }
-+ goto out;
-+ }
-+
-+ p->h_dentry = NULL;
-+ if (p->bindex <= au_dbend(p->dentry))
-+ p->h_dentry = au_h_dptr(p->dentry, p->bindex);
-+
-+ p->parent = dget_parent(p->dentry);
-+ if (!au_ftest_pin(p->flags, DI_LOCKED))
-+ di_read_lock(p->parent, AuLock_IR, p->lsc_di);
-+
-+ h_dir = NULL;
-+ p->h_parent = au_h_dptr(p->parent, p->bindex);
-+ p->hdir = au_hi(d_inode(p->parent), p->bindex);
-+ if (p->hdir)
-+ h_dir = p->hdir->hi_inode;
-+
-+ /*
-+ * udba case, or
-+ * if DI_LOCKED is not set, then p->parent may be different
-+ * and h_parent can be NULL.
-+ */
-+ if (unlikely(!p->hdir || !h_dir || !p->h_parent)) {
-+ err = -EBUSY;
-+ if (!au_ftest_pin(p->flags, DI_LOCKED))
-+ di_read_unlock(p->parent, AuLock_IR);
-+ dput(p->parent);
-+ p->parent = NULL;
-+ goto out_err;
-+ }
-+
-+ if (au_ftest_pin(p->flags, MNT_WRITE)) {
-+ p->h_mnt = au_br_mnt(p->br);
-+ err = vfsub_mnt_want_write(p->h_mnt);
-+ if (unlikely(err)) {
-+ au_fclr_pin(p->flags, MNT_WRITE);
-+ if (!au_ftest_pin(p->flags, DI_LOCKED))
-+ di_read_unlock(p->parent, AuLock_IR);
-+ dput(p->parent);
-+ p->parent = NULL;
-+ goto out_err;
-+ }
-+ }
-+
-+ au_igrab(h_dir);
-+ err = au_pin_hdir_lock(p);
-+ if (!err)
-+ goto out; /* success */
-+
-+ au_unpin(p);
-+
-+out_err:
-+ pr_err("err %d\n", err);
-+ err = au_busy_or_stale();
-+out:
-+ return err;
-+}
-+
-+void au_pin_init(struct au_pin *p, struct dentry *dentry,
-+ aufs_bindex_t bindex, int lsc_di, int lsc_hi,
-+ unsigned int udba, unsigned char flags)
-+{
-+ p->dentry = dentry;
-+ p->udba = udba;
-+ p->lsc_di = lsc_di;
-+ p->lsc_hi = lsc_hi;
-+ p->flags = flags;
-+ p->bindex = bindex;
-+
-+ p->parent = NULL;
-+ p->hdir = NULL;
-+ p->h_mnt = NULL;
-+
-+ p->h_dentry = NULL;
-+ p->h_parent = NULL;
-+ p->br = NULL;
-+ p->task = current;
-+}
-+
-+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
-+ unsigned int udba, unsigned char flags)
-+{
-+ au_pin_init(pin, dentry, bindex, AuLsc_DI_PARENT, AuLsc_I_PARENT2,
-+ udba, flags);
-+ return au_do_pin(pin);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * ->setattr() and ->getattr() are called in various cases.
-+ * chmod, stat: dentry is revalidated.
-+ * fchmod, fstat: file and dentry are not revalidated, additionally they may be
-+ * unhashed.
-+ * for ->setattr(), ia->ia_file is passed from ftruncate only.
-+ */
-+/* todo: consolidate with do_refresh() and simple_reval_dpath() */
-+int au_reval_for_attr(struct dentry *dentry, unsigned int sigen)
-+{
-+ int err;
-+ struct dentry *parent;
-+
-+ err = 0;
-+ if (au_digen_test(dentry, sigen)) {
-+ parent = dget_parent(dentry);
-+ di_read_lock_parent(parent, AuLock_IR);
-+ err = au_refresh_dentry(dentry, parent);
-+ di_read_unlock(parent, AuLock_IR);
-+ dput(parent);
-+ }
-+
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia,
-+ struct au_icpup_args *a)
-+{
-+ int err;
-+ loff_t sz;
-+ aufs_bindex_t bstart, ibstart;
-+ struct dentry *hi_wh, *parent;
-+ struct inode *inode;
-+ struct au_wr_dir_args wr_dir_args = {
-+ .force_btgt = -1,
-+ .flags = 0
-+ };
-+
-+ if (d_is_dir(dentry))
-+ au_fset_wrdir(wr_dir_args.flags, ISDIR);
-+ /* plink or hi_wh() case */
-+ bstart = au_dbstart(dentry);
-+ inode = d_inode(dentry);
-+ ibstart = au_ibstart(inode);
-+ if (bstart != ibstart && !au_test_ro(inode->i_sb, ibstart, inode))
-+ wr_dir_args.force_btgt = ibstart;
-+ err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
-+ if (unlikely(err < 0))
-+ goto out;
-+ a->btgt = err;
-+ if (err != bstart)
-+ au_fset_icpup(a->flags, DID_CPUP);
-+
-+ err = 0;
-+ a->pin_flags = AuPin_MNT_WRITE;
-+ parent = NULL;
-+ if (!IS_ROOT(dentry)) {
-+ au_fset_pin(a->pin_flags, DI_LOCKED);
-+ parent = dget_parent(dentry);
-+ di_write_lock_parent(parent);
-+ }
-+
-+ err = au_pin(&a->pin, dentry, a->btgt, a->udba, a->pin_flags);
-+ if (unlikely(err))
-+ goto out_parent;
-+
-+ a->h_path.dentry = au_h_dptr(dentry, bstart);
-+ sz = -1;
-+ a->h_inode = d_inode(a->h_path.dentry);
-+ if (ia && (ia->ia_valid & ATTR_SIZE)) {
-+ mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
-+ if (ia->ia_size < i_size_read(a->h_inode))
-+ sz = ia->ia_size;
-+ mutex_unlock(&a->h_inode->i_mutex);
-+ }
-+
-+ hi_wh = NULL;
-+ if (au_ftest_icpup(a->flags, DID_CPUP) && d_unlinked(dentry)) {
-+ hi_wh = au_hi_wh(inode, a->btgt);
-+ if (!hi_wh) {
-+ struct au_cp_generic cpg = {
-+ .dentry = dentry,
-+ .bdst = a->btgt,
-+ .bsrc = -1,
-+ .len = sz,
-+ .pin = &a->pin
-+ };
-+ err = au_sio_cpup_wh(&cpg, /*file*/NULL);
-+ if (unlikely(err))
-+ goto out_unlock;
-+ hi_wh = au_hi_wh(inode, a->btgt);
-+ /* todo: revalidate hi_wh? */
-+ }
-+ }
-+
-+ if (parent) {
-+ au_pin_set_parent_lflag(&a->pin, /*lflag*/0);
-+ di_downgrade_lock(parent, AuLock_IR);
-+ dput(parent);
-+ parent = NULL;
-+ }
-+ if (!au_ftest_icpup(a->flags, DID_CPUP))
-+ goto out; /* success */
-+
-+ if (!d_unhashed(dentry)) {
-+ struct au_cp_generic cpg = {
-+ .dentry = dentry,
-+ .bdst = a->btgt,
-+ .bsrc = bstart,
-+ .len = sz,
-+ .pin = &a->pin,
-+ .flags = AuCpup_DTIME | AuCpup_HOPEN
-+ };
-+ err = au_sio_cpup_simple(&cpg);
-+ if (!err)
-+ a->h_path.dentry = au_h_dptr(dentry, a->btgt);
-+ } else if (!hi_wh)
-+ a->h_path.dentry = au_h_dptr(dentry, a->btgt);
-+ else
-+ a->h_path.dentry = hi_wh; /* do not dget here */
-+
-+out_unlock:
-+ a->h_inode = d_inode(a->h_path.dentry);
-+ if (!err)
-+ goto out; /* success */
-+ au_unpin(&a->pin);
-+out_parent:
-+ if (parent) {
-+ di_write_unlock(parent);
-+ dput(parent);
-+ }
-+out:
-+ if (!err)
-+ mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
-+ return err;
-+}
-+
-+static int aufs_setattr(struct dentry *dentry, struct iattr *ia)
-+{
-+ int err;
-+ struct inode *inode, *delegated;
-+ struct super_block *sb;
-+ struct file *file;
-+ struct au_icpup_args *a;
-+
-+ inode = d_inode(dentry);
-+ IMustLock(inode);
-+
-+ err = -ENOMEM;
-+ a = kzalloc(sizeof(*a), GFP_NOFS);
-+ if (unlikely(!a))
-+ goto out;
-+
-+ if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
-+ ia->ia_valid &= ~ATTR_MODE;
-+
-+ file = NULL;
-+ sb = dentry->d_sb;
-+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
-+ if (unlikely(err))
-+ goto out_kfree;
-+
-+ if (ia->ia_valid & ATTR_FILE) {
-+ /* currently ftruncate(2) only */
-+ AuDebugOn(!d_is_reg(dentry));
-+ file = ia->ia_file;
-+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
-+ if (unlikely(err))
-+ goto out_si;
-+ ia->ia_file = au_hf_top(file);
-+ a->udba = AuOpt_UDBA_NONE;
-+ } else {
-+ /* fchmod() doesn't pass ia_file */
-+ a->udba = au_opt_udba(sb);
-+ di_write_lock_child(dentry);
-+ /* no d_unlinked(), to set UDBA_NONE for root */
-+ if (d_unhashed(dentry))
-+ a->udba = AuOpt_UDBA_NONE;
-+ if (a->udba != AuOpt_UDBA_NONE) {
-+ AuDebugOn(IS_ROOT(dentry));
-+ err = au_reval_for_attr(dentry, au_sigen(sb));
-+ if (unlikely(err))
-+ goto out_dentry;
-+ }
-+ }
-+
-+ err = au_pin_and_icpup(dentry, ia, a);
-+ if (unlikely(err < 0))
-+ goto out_dentry;
-+ if (au_ftest_icpup(a->flags, DID_CPUP)) {
-+ ia->ia_file = NULL;
-+ ia->ia_valid &= ~ATTR_FILE;
-+ }
-+
-+ a->h_path.mnt = au_sbr_mnt(sb, a->btgt);
-+ if ((ia->ia_valid & (ATTR_MODE | ATTR_CTIME))
-+ == (ATTR_MODE | ATTR_CTIME)) {
-+ err = security_path_chmod(&a->h_path, ia->ia_mode);
-+ if (unlikely(err))
-+ goto out_unlock;
-+ } else if ((ia->ia_valid & (ATTR_UID | ATTR_GID))
-+ && (ia->ia_valid & ATTR_CTIME)) {
-+ err = security_path_chown(&a->h_path, ia->ia_uid, ia->ia_gid);
-+ if (unlikely(err))
-+ goto out_unlock;
-+ }
-+
-+ if (ia->ia_valid & ATTR_SIZE) {
-+ struct file *f;
-+
-+ if (ia->ia_size < i_size_read(inode))
-+ /* unmap only */
-+ truncate_setsize(inode, ia->ia_size);
-+
-+ f = NULL;
-+ if (ia->ia_valid & ATTR_FILE)
-+ f = ia->ia_file;
-+ mutex_unlock(&a->h_inode->i_mutex);
-+ err = vfsub_trunc(&a->h_path, ia->ia_size, ia->ia_valid, f);
-+ mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
-+ } else {
-+ delegated = NULL;
-+ while (1) {
-+ err = vfsub_notify_change(&a->h_path, ia, &delegated);
-+ if (delegated) {
-+ err = break_deleg_wait(&delegated);
-+ if (!err)
-+ continue;
-+ }
-+ break;
-+ }
-+ }
-+ if (!err)
-+ au_cpup_attr_changeable(inode);
-+
-+out_unlock:
-+ mutex_unlock(&a->h_inode->i_mutex);
-+ au_unpin(&a->pin);
-+ if (unlikely(err))
-+ au_update_dbstart(dentry);
-+out_dentry:
-+ di_write_unlock(dentry);
-+ if (file) {
-+ fi_write_unlock(file);
-+ ia->ia_file = file;
-+ ia->ia_valid |= ATTR_FILE;
-+ }
-+out_si:
-+ si_read_unlock(sb);
-+out_kfree:
-+ kfree(a);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+#if IS_ENABLED(CONFIG_AUFS_XATTR) || IS_ENABLED(CONFIG_FS_POSIX_ACL)
-+static int au_h_path_to_set_attr(struct dentry *dentry,
-+ struct au_icpup_args *a, struct path *h_path)
-+{
-+ int err;
-+ struct super_block *sb;
-+
-+ sb = dentry->d_sb;
-+ a->udba = au_opt_udba(sb);
-+ /* no d_unlinked(), to set UDBA_NONE for root */
-+ if (d_unhashed(dentry))
-+ a->udba = AuOpt_UDBA_NONE;
-+ if (a->udba != AuOpt_UDBA_NONE) {
-+ AuDebugOn(IS_ROOT(dentry));
-+ err = au_reval_for_attr(dentry, au_sigen(sb));
-+ if (unlikely(err))
-+ goto out;
-+ }
-+ err = au_pin_and_icpup(dentry, /*ia*/NULL, a);
-+ if (unlikely(err < 0))
-+ goto out;
-+
-+ h_path->dentry = a->h_path.dentry;
-+ h_path->mnt = au_sbr_mnt(sb, a->btgt);
-+
-+out:
-+ return err;
-+}
-+
-+ssize_t au_srxattr(struct dentry *dentry, struct au_srxattr *arg)
-+{
-+ int err;
-+ struct path h_path;
-+ struct super_block *sb;
-+ struct au_icpup_args *a;
-+ struct inode *inode, *h_inode;
-+
-+ inode = d_inode(dentry);
-+ IMustLock(inode);
-+
-+ err = -ENOMEM;
-+ a = kzalloc(sizeof(*a), GFP_NOFS);
-+ if (unlikely(!a))
-+ goto out;
-+
-+ sb = dentry->d_sb;
-+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
-+ if (unlikely(err))
-+ goto out_kfree;
-+
-+ h_path.dentry = NULL; /* silence gcc */
-+ di_write_lock_child(dentry);
-+ err = au_h_path_to_set_attr(dentry, a, &h_path);
-+ if (unlikely(err))
-+ goto out_di;
-+
-+ mutex_unlock(&a->h_inode->i_mutex);
-+ switch (arg->type) {
-+ case AU_XATTR_SET:
-+ err = vfsub_setxattr(h_path.dentry,
-+ arg->u.set.name, arg->u.set.value,
-+ arg->u.set.size, arg->u.set.flags);
-+ break;
-+ case AU_XATTR_REMOVE:
-+ err = vfsub_removexattr(h_path.dentry, arg->u.remove.name);
-+ break;
-+ case AU_ACL_SET:
-+ err = -EOPNOTSUPP;
-+ h_inode = d_inode(h_path.dentry);
-+ if (h_inode->i_op->set_acl)
-+ err = h_inode->i_op->set_acl(h_inode,
-+ arg->u.acl_set.acl,
-+ arg->u.acl_set.type);
-+ break;
-+ }
-+ if (!err)
-+ au_cpup_attr_timesizes(inode);
-+
-+ au_unpin(&a->pin);
-+ if (unlikely(err))
-+ au_update_dbstart(dentry);
-+
-+out_di:
-+ di_write_unlock(dentry);
-+ si_read_unlock(sb);
-+out_kfree:
-+ kfree(a);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+#endif
-+
-+static void au_refresh_iattr(struct inode *inode, struct kstat *st,
-+ unsigned int nlink)
-+{
-+ unsigned int n;
-+
-+ inode->i_mode = st->mode;
-+ /* don't i_[ug]id_write() here */
-+ inode->i_uid = st->uid;
-+ inode->i_gid = st->gid;
-+ inode->i_atime = st->atime;
-+ inode->i_mtime = st->mtime;
-+ inode->i_ctime = st->ctime;
-+
-+ au_cpup_attr_nlink(inode, /*force*/0);
-+ if (S_ISDIR(inode->i_mode)) {
-+ n = inode->i_nlink;
-+ n -= nlink;
-+ n += st->nlink;
-+ smp_mb(); /* for i_nlink */
-+ /* 0 can happen */
-+ set_nlink(inode, n);
-+ }
-+
-+ spin_lock(&inode->i_lock);
-+ inode->i_blocks = st->blocks;
-+ i_size_write(inode, st->size);
-+ spin_unlock(&inode->i_lock);
-+}
-+
-+/*
-+ * common routine for aufs_getattr() and aufs_getxattr().
-+ * returns zero or negative (an error).
-+ * @dentry will be read-locked in success.
-+ */
-+int au_h_path_getattr(struct dentry *dentry, int force, struct path *h_path)
-+{
-+ int err;
-+ unsigned int mnt_flags, sigen;
-+ unsigned char udba_none;
-+ aufs_bindex_t bindex;
-+ struct super_block *sb, *h_sb;
-+ struct inode *inode;
-+
-+ h_path->mnt = NULL;
-+ h_path->dentry = NULL;
-+
-+ err = 0;
-+ sb = dentry->d_sb;
-+ mnt_flags = au_mntflags(sb);
-+ udba_none = !!au_opt_test(mnt_flags, UDBA_NONE);
-+
-+ /* support fstat(2) */
-+ if (!d_unlinked(dentry) && !udba_none) {
-+ sigen = au_sigen(sb);
-+ err = au_digen_test(dentry, sigen);
-+ if (!err) {
-+ di_read_lock_child(dentry, AuLock_IR);
-+ err = au_dbrange_test(dentry);
-+ if (unlikely(err)) {
-+ di_read_unlock(dentry, AuLock_IR);
-+ goto out;
-+ }
-+ } else {
-+ AuDebugOn(IS_ROOT(dentry));
-+ di_write_lock_child(dentry);
-+ err = au_dbrange_test(dentry);
-+ if (!err)
-+ err = au_reval_for_attr(dentry, sigen);
-+ if (!err)
-+ di_downgrade_lock(dentry, AuLock_IR);
-+ else {
-+ di_write_unlock(dentry);
-+ goto out;
-+ }
-+ }
-+ } else
-+ di_read_lock_child(dentry, AuLock_IR);
-+
-+ inode = d_inode(dentry);
-+ bindex = au_ibstart(inode);
-+ h_path->mnt = au_sbr_mnt(sb, bindex);
-+ h_sb = h_path->mnt->mnt_sb;
-+ if (!force
-+ && !au_test_fs_bad_iattr(h_sb)
-+ && udba_none)
-+ goto out; /* success */
-+
-+ if (au_dbstart(dentry) == bindex)
-+ h_path->dentry = au_h_dptr(dentry, bindex);
-+ else if (au_opt_test(mnt_flags, PLINK) && au_plink_test(inode)) {
-+ h_path->dentry = au_plink_lkup(inode, bindex);
-+ if (IS_ERR(h_path->dentry))
-+ /* pretending success */
-+ h_path->dentry = NULL;
-+ else
-+ dput(h_path->dentry);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+static int aufs_getattr(struct vfsmount *mnt __maybe_unused,
-+ struct dentry *dentry, struct kstat *st)
-+{
-+ int err;
-+ unsigned char positive;
-+ struct path h_path;
-+ struct inode *inode;
-+ struct super_block *sb;
-+
-+ inode = d_inode(dentry);
-+ sb = dentry->d_sb;
-+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
-+ if (unlikely(err))
-+ goto out;
-+ err = au_h_path_getattr(dentry, /*force*/0, &h_path);
-+ if (unlikely(err))
-+ goto out_si;
-+ if (unlikely(!h_path.dentry))
-+ /* illegally overlapped or something */
-+ goto out_fill; /* pretending success */
-+
-+ positive = d_is_positive(h_path.dentry);
-+ if (positive)
-+ err = vfs_getattr(&h_path, st);
-+ if (!err) {
-+ if (positive)
-+ au_refresh_iattr(inode, st,
-+ d_inode(h_path.dentry)->i_nlink);
-+ goto out_fill; /* success */
-+ }
-+ AuTraceErr(err);
-+ goto out_di;
-+
-+out_fill:
-+ generic_fillattr(inode, st);
-+out_di:
-+ di_read_unlock(dentry, AuLock_IR);
-+out_si:
-+ si_read_unlock(sb);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int h_readlink(struct dentry *dentry, int bindex, char __user *buf,
-+ int bufsiz)
-+{
-+ int err;
-+ struct super_block *sb;
-+ struct dentry *h_dentry;
-+ struct inode *inode, *h_inode;
-+
-+ err = -EINVAL;
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ h_inode = d_inode(h_dentry);
-+ if (unlikely(!h_inode->i_op->readlink))
-+ goto out;
-+
-+ err = security_inode_readlink(h_dentry);
-+ if (unlikely(err))
-+ goto out;
-+
-+ sb = dentry->d_sb;
-+ inode = d_inode(dentry);
-+ if (!au_test_ro(sb, bindex, inode)) {
-+ vfsub_touch_atime(au_sbr_mnt(sb, bindex), h_dentry);
-+ fsstack_copy_attr_atime(inode, h_inode);
-+ }
-+ err = h_inode->i_op->readlink(h_dentry, buf, bufsiz);
-+
-+out:
-+ return err;
-+}
-+
-+static int aufs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
-+{
-+ int err;
-+
-+ err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN);
-+ if (unlikely(err))
-+ goto out;
-+ err = au_d_hashed_positive(dentry);
-+ if (!err)
-+ err = h_readlink(dentry, au_dbstart(dentry), buf, bufsiz);
-+ aufs_read_unlock(dentry, AuLock_IR);
-+
-+out:
-+ return err;
-+}
-+
-+static void *aufs_follow_link(struct dentry *dentry, struct nameidata *nd)
-+{
-+ int err;
-+ mm_segment_t old_fs;
-+ union {
-+ char *k;
-+ char __user *u;
-+ } buf;
-+
-+ err = -ENOMEM;
-+ buf.k = (void *)__get_free_page(GFP_NOFS);
-+ if (unlikely(!buf.k))
-+ goto out;
-+
-+ err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN);
-+ if (unlikely(err))
-+ goto out_name;
-+
-+ err = au_d_hashed_positive(dentry);
-+ if (!err) {
-+ old_fs = get_fs();
-+ set_fs(KERNEL_DS);
-+ err = h_readlink(dentry, au_dbstart(dentry), buf.u, PATH_MAX);
-+ set_fs(old_fs);
-+ }
-+ aufs_read_unlock(dentry, AuLock_IR);
-+
-+ if (err >= 0) {
-+ buf.k[err] = 0;
-+ /* will be freed by put_link */
-+ nd_set_link(nd, buf.k);
-+ return NULL; /* success */
-+ }
-+
-+out_name:
-+ free_page((unsigned long)buf.k);
-+out:
-+ AuTraceErr(err);
-+ return ERR_PTR(err);
-+}
-+
-+static void aufs_put_link(struct dentry *dentry __maybe_unused,
-+ struct nameidata *nd, void *cookie __maybe_unused)
-+{
-+ char *p;
-+
-+ p = nd_get_link(nd);
-+ if (!IS_ERR_OR_NULL(p))
-+ free_page((unsigned long)p);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int aufs_update_time(struct inode *inode, struct timespec *ts, int flags)
-+{
-+ int err;
-+ struct super_block *sb;
-+ struct inode *h_inode;
-+
-+ sb = inode->i_sb;
-+ /* mmap_sem might be acquired already, cf. aufs_mmap() */
-+ lockdep_off();
-+ si_read_lock(sb, AuLock_FLUSH);
-+ ii_write_lock_child(inode);
-+ lockdep_on();
-+ h_inode = au_h_iptr(inode, au_ibstart(inode));
-+ err = vfsub_update_time(h_inode, ts, flags);
-+ lockdep_off();
-+ if (!err)
-+ au_cpup_attr_timesizes(inode);
-+ ii_write_unlock(inode);
-+ si_read_unlock(sb);
-+ lockdep_on();
-+
-+ if (!err && (flags & S_VERSION))
-+ inode_inc_iversion(inode);
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct inode_operations aufs_symlink_iop = {
-+ .permission = aufs_permission,
-+#ifdef CONFIG_FS_POSIX_ACL
-+ .get_acl = aufs_get_acl,
-+ .set_acl = aufs_set_acl, /* unsupport for symlink? */
-+#endif
-+
-+ .setattr = aufs_setattr,
-+ .getattr = aufs_getattr,
-+
-+#ifdef CONFIG_AUFS_XATTR
-+ .setxattr = aufs_setxattr,
-+ .getxattr = aufs_getxattr,
-+ .listxattr = aufs_listxattr,
-+ .removexattr = aufs_removexattr,
-+#endif
-+
-+ .readlink = aufs_readlink,
-+ .follow_link = aufs_follow_link,
-+ .put_link = aufs_put_link,
-+
-+ /* .update_time = aufs_update_time */
-+};
-+
-+struct inode_operations aufs_dir_iop = {
-+ .create = aufs_create,
-+ .lookup = aufs_lookup,
-+ .link = aufs_link,
-+ .unlink = aufs_unlink,
-+ .symlink = aufs_symlink,
-+ .mkdir = aufs_mkdir,
-+ .rmdir = aufs_rmdir,
-+ .mknod = aufs_mknod,
-+ .rename = aufs_rename,
-+
-+ .permission = aufs_permission,
-+#ifdef CONFIG_FS_POSIX_ACL
-+ .get_acl = aufs_get_acl,
-+ .set_acl = aufs_set_acl,
-+#endif
-+
-+ .setattr = aufs_setattr,
-+ .getattr = aufs_getattr,
-+
-+#ifdef CONFIG_AUFS_XATTR
-+ .setxattr = aufs_setxattr,
-+ .getxattr = aufs_getxattr,
-+ .listxattr = aufs_listxattr,
-+ .removexattr = aufs_removexattr,
-+#endif
-+
-+ .update_time = aufs_update_time,
-+ .atomic_open = aufs_atomic_open,
-+ .tmpfile = aufs_tmpfile
-+};
-+
-+struct inode_operations aufs_iop = {
-+ .permission = aufs_permission,
-+#ifdef CONFIG_FS_POSIX_ACL
-+ .get_acl = aufs_get_acl,
-+ .set_acl = aufs_set_acl,
-+#endif
-+
-+ .setattr = aufs_setattr,
-+ .getattr = aufs_getattr,
-+
-+#ifdef CONFIG_AUFS_XATTR
-+ .setxattr = aufs_setxattr,
-+ .getxattr = aufs_getxattr,
-+ .listxattr = aufs_listxattr,
-+ .removexattr = aufs_removexattr,
-+#endif
-+
-+ .update_time = aufs_update_time
-+};
-diff -Nur linux-4.1.10.orig/fs/aufs/i_op_del.c linux-4.1.10/fs/aufs/i_op_del.c
---- linux-4.1.10.orig/fs/aufs/i_op_del.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/i_op_del.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,510 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * inode operations (del entry)
-+ */
-+
-+#include "aufs.h"
-+
-+/*
-+ * decide if a new whiteout for @dentry is necessary or not.
-+ * when it is necessary, prepare the parent dir for the upper branch whose
-+ * branch index is @bcpup for creation. the actual creation of the whiteout will
-+ * be done by caller.
-+ * return value:
-+ * 0: wh is unnecessary
-+ * plus: wh is necessary
-+ * minus: error
-+ */
-+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup)
-+{
-+ int need_wh, err;
-+ aufs_bindex_t bstart;
-+ struct super_block *sb;
-+
-+ sb = dentry->d_sb;
-+ bstart = au_dbstart(dentry);
-+ if (*bcpup < 0) {
-+ *bcpup = bstart;
-+ if (au_test_ro(sb, bstart, d_inode(dentry))) {
-+ err = AuWbrCopyup(au_sbi(sb), dentry);
-+ *bcpup = err;
-+ if (unlikely(err < 0))
-+ goto out;
-+ }
-+ } else
-+ AuDebugOn(bstart < *bcpup
-+ || au_test_ro(sb, *bcpup, d_inode(dentry)));
-+ AuDbg("bcpup %d, bstart %d\n", *bcpup, bstart);
-+
-+ if (*bcpup != bstart) {
-+ err = au_cpup_dirs(dentry, *bcpup);
-+ if (unlikely(err))
-+ goto out;
-+ need_wh = 1;
-+ } else {
-+ struct au_dinfo *dinfo, *tmp;
-+
-+ need_wh = -ENOMEM;
-+ dinfo = au_di(dentry);
-+ tmp = au_di_alloc(sb, AuLsc_DI_TMP);
-+ if (tmp) {
-+ au_di_cp(tmp, dinfo);
-+ au_di_swap(tmp, dinfo);
-+ /* returns the number of positive dentries */
-+ need_wh = au_lkup_dentry(dentry, bstart + 1, /*type*/0);
-+ au_di_swap(tmp, dinfo);
-+ au_rw_write_unlock(&tmp->di_rwsem);
-+ au_di_free(tmp);
-+ }
-+ }
-+ AuDbg("need_wh %d\n", need_wh);
-+ err = need_wh;
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * simple tests for the del-entry operations.
-+ * following the checks in vfs, plus the parent-child relationship.
-+ */
-+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
-+ struct dentry *h_parent, int isdir)
-+{
-+ int err;
-+ umode_t h_mode;
-+ struct dentry *h_dentry, *h_latest;
-+ struct inode *h_inode;
-+
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (d_really_is_positive(dentry)) {
-+ err = -ENOENT;
-+ if (unlikely(d_is_negative(h_dentry)))
-+ goto out;
-+ h_inode = d_inode(h_dentry);
-+ if (unlikely(!h_inode->i_nlink))
-+ goto out;
-+
-+ h_mode = h_inode->i_mode;
-+ if (!isdir) {
-+ err = -EISDIR;
-+ if (unlikely(S_ISDIR(h_mode)))
-+ goto out;
-+ } else if (unlikely(!S_ISDIR(h_mode))) {
-+ err = -ENOTDIR;
-+ goto out;
-+ }
-+ } else {
-+ /* rename(2) case */
-+ err = -EIO;
-+ if (unlikely(d_is_positive(h_dentry)))
-+ goto out;
-+ }
-+
-+ err = -ENOENT;
-+ /* expected parent dir is locked */
-+ if (unlikely(h_parent != h_dentry->d_parent))
-+ goto out;
-+ err = 0;
-+
-+ /*
-+ * rmdir a dir may break the consistency on some filesystem.
-+ * let's try heavy test.
-+ */
-+ err = -EACCES;
-+ if (unlikely(!au_opt_test(au_mntflags(dentry->d_sb), DIRPERM1)
-+ && au_test_h_perm(d_inode(h_parent),
-+ MAY_EXEC | MAY_WRITE)))
-+ goto out;
-+
-+ h_latest = au_sio_lkup_one(&dentry->d_name, h_parent);
-+ err = -EIO;
-+ if (IS_ERR(h_latest))
-+ goto out;
-+ if (h_latest == h_dentry)
-+ err = 0;
-+ dput(h_latest);
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * decide the branch where we operate for @dentry. the branch index will be set
-+ * @rbcpup. after diciding it, 'pin' it and store the timestamps of the parent
-+ * dir for reverting.
-+ * when a new whiteout is necessary, create it.
-+ */
-+static struct dentry*
-+lock_hdir_create_wh(struct dentry *dentry, int isdir, aufs_bindex_t *rbcpup,
-+ struct au_dtime *dt, struct au_pin *pin)
-+{
-+ struct dentry *wh_dentry;
-+ struct super_block *sb;
-+ struct path h_path;
-+ int err, need_wh;
-+ unsigned int udba;
-+ aufs_bindex_t bcpup;
-+
-+ need_wh = au_wr_dir_need_wh(dentry, isdir, rbcpup);
-+ wh_dentry = ERR_PTR(need_wh);
-+ if (unlikely(need_wh < 0))
-+ goto out;
-+
-+ sb = dentry->d_sb;
-+ udba = au_opt_udba(sb);
-+ bcpup = *rbcpup;
-+ err = au_pin(pin, dentry, bcpup, udba,
-+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
-+ wh_dentry = ERR_PTR(err);
-+ if (unlikely(err))
-+ goto out;
-+
-+ h_path.dentry = au_pinned_h_parent(pin);
-+ if (udba != AuOpt_UDBA_NONE
-+ && au_dbstart(dentry) == bcpup) {
-+ err = au_may_del(dentry, bcpup, h_path.dentry, isdir);
-+ wh_dentry = ERR_PTR(err);
-+ if (unlikely(err))
-+ goto out_unpin;
-+ }
-+
-+ h_path.mnt = au_sbr_mnt(sb, bcpup);
-+ au_dtime_store(dt, au_pinned_parent(pin), &h_path);
-+ wh_dentry = NULL;
-+ if (!need_wh)
-+ goto out; /* success, no need to create whiteout */
-+
-+ wh_dentry = au_wh_create(dentry, bcpup, h_path.dentry);
-+ if (IS_ERR(wh_dentry))
-+ goto out_unpin;
-+
-+ /* returns with the parent is locked and wh_dentry is dget-ed */
-+ goto out; /* success */
-+
-+out_unpin:
-+ au_unpin(pin);
-+out:
-+ return wh_dentry;
-+}
-+
-+/*
-+ * when removing a dir, rename it to a unique temporary whiteout-ed name first
-+ * in order to be revertible and save time for removing many child whiteouts
-+ * under the dir.
-+ * returns 1 when there are too many child whiteout and caller should remove
-+ * them asynchronously. returns 0 when the number of children is enough small to
-+ * remove now or the branch fs is a remote fs.
-+ * otherwise return an error.
-+ */
-+static int renwh_and_rmdir(struct dentry *dentry, aufs_bindex_t bindex,
-+ struct au_nhash *whlist, struct inode *dir)
-+{
-+ int rmdir_later, err, dirwh;
-+ struct dentry *h_dentry;
-+ struct super_block *sb;
-+ struct inode *inode;
-+
-+ sb = dentry->d_sb;
-+ SiMustAnyLock(sb);
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ err = au_whtmp_ren(h_dentry, au_sbr(sb, bindex));
-+ if (unlikely(err))
-+ goto out;
-+
-+ /* stop monitoring */
-+ inode = d_inode(dentry);
-+ au_hn_free(au_hi(inode, bindex));
-+
-+ if (!au_test_fs_remote(h_dentry->d_sb)) {
-+ dirwh = au_sbi(sb)->si_dirwh;
-+ rmdir_later = (dirwh <= 1);
-+ if (!rmdir_later)
-+ rmdir_later = au_nhash_test_longer_wh(whlist, bindex,
-+ dirwh);
-+ if (rmdir_later)
-+ return rmdir_later;
-+ }
-+
-+ err = au_whtmp_rmdir(dir, bindex, h_dentry, whlist);
-+ if (unlikely(err)) {
-+ AuIOErr("rmdir %pd, b%d failed, %d. ignored\n",
-+ h_dentry, bindex, err);
-+ err = 0;
-+ }
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/*
-+ * final procedure for deleting a entry.
-+ * maintain dentry and iattr.
-+ */
-+static void epilog(struct inode *dir, struct dentry *dentry,
-+ aufs_bindex_t bindex)
-+{
-+ struct inode *inode;
-+
-+ inode = d_inode(dentry);
-+ d_drop(dentry);
-+ inode->i_ctime = dir->i_ctime;
-+
-+ au_dir_ts(dir, bindex);
-+ dir->i_version++;
-+}
-+
-+/*
-+ * when an error happened, remove the created whiteout and revert everything.
-+ */
-+static int do_revert(int err, struct inode *dir, aufs_bindex_t bindex,
-+ aufs_bindex_t bwh, struct dentry *wh_dentry,
-+ struct dentry *dentry, struct au_dtime *dt)
-+{
-+ int rerr;
-+ struct path h_path = {
-+ .dentry = wh_dentry,
-+ .mnt = au_sbr_mnt(dir->i_sb, bindex)
-+ };
-+
-+ rerr = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path, dentry);
-+ if (!rerr) {
-+ au_set_dbwh(dentry, bwh);
-+ au_dtime_revert(dt);
-+ return 0;
-+ }
-+
-+ AuIOErr("%pd reverting whiteout failed(%d, %d)\n", dentry, err, rerr);
-+ return -EIO;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int aufs_unlink(struct inode *dir, struct dentry *dentry)
-+{
-+ int err;
-+ aufs_bindex_t bwh, bindex, bstart;
-+ struct inode *inode, *h_dir, *delegated;
-+ struct dentry *parent, *wh_dentry;
-+ /* to reuduce stack size */
-+ struct {
-+ struct au_dtime dt;
-+ struct au_pin pin;
-+ struct path h_path;
-+ } *a;
-+
-+ IMustLock(dir);
-+
-+ err = -ENOMEM;
-+ a = kmalloc(sizeof(*a), GFP_NOFS);
-+ if (unlikely(!a))
-+ goto out;
-+
-+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
-+ if (unlikely(err))
-+ goto out_free;
-+ err = au_d_hashed_positive(dentry);
-+ if (unlikely(err))
-+ goto out_unlock;
-+ inode = d_inode(dentry);
-+ IMustLock(inode);
-+ err = -EISDIR;
-+ if (unlikely(d_is_dir(dentry)))
-+ goto out_unlock; /* possible? */
-+
-+ bstart = au_dbstart(dentry);
-+ bwh = au_dbwh(dentry);
-+ bindex = -1;
-+ parent = dentry->d_parent; /* dir inode is locked */
-+ di_write_lock_parent(parent);
-+ wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/0, &bindex, &a->dt,
-+ &a->pin);
-+ err = PTR_ERR(wh_dentry);
-+ if (IS_ERR(wh_dentry))
-+ goto out_parent;
-+
-+ a->h_path.mnt = au_sbr_mnt(dentry->d_sb, bstart);
-+ a->h_path.dentry = au_h_dptr(dentry, bstart);
-+ dget(a->h_path.dentry);
-+ if (bindex == bstart) {
-+ h_dir = au_pinned_h_dir(&a->pin);
-+ delegated = NULL;
-+ err = vfsub_unlink(h_dir, &a->h_path, &delegated, /*force*/0);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal unlink\n");
-+ iput(delegated);
-+ }
-+ } else {
-+ /* dir inode is locked */
-+ h_dir = d_inode(wh_dentry->d_parent);
-+ IMustLock(h_dir);
-+ err = 0;
-+ }
-+
-+ if (!err) {
-+ vfsub_drop_nlink(inode);
-+ epilog(dir, dentry, bindex);
-+
-+ /* update target timestamps */
-+ if (bindex == bstart) {
-+ vfsub_update_h_iattr(&a->h_path, /*did*/NULL);
-+ /*ignore*/
-+ inode->i_ctime = d_inode(a->h_path.dentry)->i_ctime;
-+ } else
-+ /* todo: this timestamp may be reverted later */
-+ inode->i_ctime = h_dir->i_ctime;
-+ goto out_unpin; /* success */
-+ }
-+
-+ /* revert */
-+ if (wh_dentry) {
-+ int rerr;
-+
-+ rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry,
-+ &a->dt);
-+ if (rerr)
-+ err = rerr;
-+ }
-+
-+out_unpin:
-+ au_unpin(&a->pin);
-+ dput(wh_dentry);
-+ dput(a->h_path.dentry);
-+out_parent:
-+ di_write_unlock(parent);
-+out_unlock:
-+ aufs_read_unlock(dentry, AuLock_DW);
-+out_free:
-+ kfree(a);
-+out:
-+ return err;
-+}
-+
-+int aufs_rmdir(struct inode *dir, struct dentry *dentry)
-+{
-+ int err, rmdir_later;
-+ aufs_bindex_t bwh, bindex, bstart;
-+ struct inode *inode;
-+ struct dentry *parent, *wh_dentry, *h_dentry;
-+ struct au_whtmp_rmdir *args;
-+ /* to reuduce stack size */
-+ struct {
-+ struct au_dtime dt;
-+ struct au_pin pin;
-+ } *a;
-+
-+ IMustLock(dir);
-+
-+ err = -ENOMEM;
-+ a = kmalloc(sizeof(*a), GFP_NOFS);
-+ if (unlikely(!a))
-+ goto out;
-+
-+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN);
-+ if (unlikely(err))
-+ goto out_free;
-+ err = au_alive_dir(dentry);
-+ if (unlikely(err))
-+ goto out_unlock;
-+ inode = d_inode(dentry);
-+ IMustLock(inode);
-+ err = -ENOTDIR;
-+ if (unlikely(!d_is_dir(dentry)))
-+ goto out_unlock; /* possible? */
-+
-+ err = -ENOMEM;
-+ args = au_whtmp_rmdir_alloc(dir->i_sb, GFP_NOFS);
-+ if (unlikely(!args))
-+ goto out_unlock;
-+
-+ parent = dentry->d_parent; /* dir inode is locked */
-+ di_write_lock_parent(parent);
-+ err = au_test_empty(dentry, &args->whlist);
-+ if (unlikely(err))
-+ goto out_parent;
-+
-+ bstart = au_dbstart(dentry);
-+ bwh = au_dbwh(dentry);
-+ bindex = -1;
-+ wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/1, &bindex, &a->dt,
-+ &a->pin);
-+ err = PTR_ERR(wh_dentry);
-+ if (IS_ERR(wh_dentry))
-+ goto out_parent;
-+
-+ h_dentry = au_h_dptr(dentry, bstart);
-+ dget(h_dentry);
-+ rmdir_later = 0;
-+ if (bindex == bstart) {
-+ err = renwh_and_rmdir(dentry, bstart, &args->whlist, dir);
-+ if (err > 0) {
-+ rmdir_later = err;
-+ err = 0;
-+ }
-+ } else {
-+ /* stop monitoring */
-+ au_hn_free(au_hi(inode, bstart));
-+
-+ /* dir inode is locked */
-+ IMustLock(d_inode(wh_dentry->d_parent));
-+ err = 0;
-+ }
-+
-+ if (!err) {
-+ vfsub_dead_dir(inode);
-+ au_set_dbdiropq(dentry, -1);
-+ epilog(dir, dentry, bindex);
-+
-+ if (rmdir_later) {
-+ au_whtmp_kick_rmdir(dir, bstart, h_dentry, args);
-+ args = NULL;
-+ }
-+
-+ goto out_unpin; /* success */
-+ }
-+
-+ /* revert */
-+ AuLabel(revert);
-+ if (wh_dentry) {
-+ int rerr;
-+
-+ rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry,
-+ &a->dt);
-+ if (rerr)
-+ err = rerr;
-+ }
-+
-+out_unpin:
-+ au_unpin(&a->pin);
-+ dput(wh_dentry);
-+ dput(h_dentry);
-+out_parent:
-+ di_write_unlock(parent);
-+ if (args)
-+ au_whtmp_rmdir_free(args);
-+out_unlock:
-+ aufs_read_unlock(dentry, AuLock_DW);
-+out_free:
-+ kfree(a);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/i_op_ren.c linux-4.1.10/fs/aufs/i_op_ren.c
---- linux-4.1.10.orig/fs/aufs/i_op_ren.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/i_op_ren.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,1017 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * inode operation (rename entry)
-+ * todo: this is crazy monster
-+ */
-+
-+#include "aufs.h"
-+
-+enum { AuSRC, AuDST, AuSrcDst };
-+enum { AuPARENT, AuCHILD, AuParentChild };
-+
-+#define AuRen_ISDIR 1
-+#define AuRen_ISSAMEDIR (1 << 1)
-+#define AuRen_WHSRC (1 << 2)
-+#define AuRen_WHDST (1 << 3)
-+#define AuRen_MNT_WRITE (1 << 4)
-+#define AuRen_DT_DSTDIR (1 << 5)
-+#define AuRen_DIROPQ (1 << 6)
-+#define au_ftest_ren(flags, name) ((flags) & AuRen_##name)
-+#define au_fset_ren(flags, name) \
-+ do { (flags) |= AuRen_##name; } while (0)
-+#define au_fclr_ren(flags, name) \
-+ do { (flags) &= ~AuRen_##name; } while (0)
-+
-+struct au_ren_args {
-+ struct {
-+ struct dentry *dentry, *h_dentry, *parent, *h_parent,
-+ *wh_dentry;
-+ struct inode *dir, *inode;
-+ struct au_hinode *hdir;
-+ struct au_dtime dt[AuParentChild];
-+ aufs_bindex_t bstart;
-+ } sd[AuSrcDst];
-+
-+#define src_dentry sd[AuSRC].dentry
-+#define src_dir sd[AuSRC].dir
-+#define src_inode sd[AuSRC].inode
-+#define src_h_dentry sd[AuSRC].h_dentry
-+#define src_parent sd[AuSRC].parent
-+#define src_h_parent sd[AuSRC].h_parent
-+#define src_wh_dentry sd[AuSRC].wh_dentry
-+#define src_hdir sd[AuSRC].hdir
-+#define src_h_dir sd[AuSRC].hdir->hi_inode
-+#define src_dt sd[AuSRC].dt
-+#define src_bstart sd[AuSRC].bstart
-+
-+#define dst_dentry sd[AuDST].dentry
-+#define dst_dir sd[AuDST].dir
-+#define dst_inode sd[AuDST].inode
-+#define dst_h_dentry sd[AuDST].h_dentry
-+#define dst_parent sd[AuDST].parent
-+#define dst_h_parent sd[AuDST].h_parent
-+#define dst_wh_dentry sd[AuDST].wh_dentry
-+#define dst_hdir sd[AuDST].hdir
-+#define dst_h_dir sd[AuDST].hdir->hi_inode
-+#define dst_dt sd[AuDST].dt
-+#define dst_bstart sd[AuDST].bstart
-+
-+ struct dentry *h_trap;
-+ struct au_branch *br;
-+ struct au_hinode *src_hinode;
-+ struct path h_path;
-+ struct au_nhash whlist;
-+ aufs_bindex_t btgt, src_bwh, src_bdiropq;
-+
-+ unsigned int flags;
-+
-+ struct au_whtmp_rmdir *thargs;
-+ struct dentry *h_dst;
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * functions for reverting.
-+ * when an error happened in a single rename systemcall, we should revert
-+ * everything as if nothing happened.
-+ * we don't need to revert the copied-up/down the parent dir since they are
-+ * harmless.
-+ */
-+
-+#define RevertFailure(fmt, ...) do { \
-+ AuIOErr("revert failure: " fmt " (%d, %d)\n", \
-+ ##__VA_ARGS__, err, rerr); \
-+ err = -EIO; \
-+} while (0)
-+
-+static void au_ren_rev_diropq(int err, struct au_ren_args *a)
-+{
-+ int rerr;
-+
-+ au_hn_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD);
-+ rerr = au_diropq_remove(a->src_dentry, a->btgt);
-+ au_hn_imtx_unlock(a->src_hinode);
-+ au_set_dbdiropq(a->src_dentry, a->src_bdiropq);
-+ if (rerr)
-+ RevertFailure("remove diropq %pd", a->src_dentry);
-+}
-+
-+static void au_ren_rev_rename(int err, struct au_ren_args *a)
-+{
-+ int rerr;
-+ struct inode *delegated;
-+
-+ a->h_path.dentry = vfsub_lkup_one(&a->src_dentry->d_name,
-+ a->src_h_parent);
-+ rerr = PTR_ERR(a->h_path.dentry);
-+ if (IS_ERR(a->h_path.dentry)) {
-+ RevertFailure("lkup one %pd", a->src_dentry);
-+ return;
-+ }
-+
-+ delegated = NULL;
-+ rerr = vfsub_rename(a->dst_h_dir,
-+ au_h_dptr(a->src_dentry, a->btgt),
-+ a->src_h_dir, &a->h_path, &delegated);
-+ if (unlikely(rerr == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal rename\n");
-+ iput(delegated);
-+ }
-+ d_drop(a->h_path.dentry);
-+ dput(a->h_path.dentry);
-+ /* au_set_h_dptr(a->src_dentry, a->btgt, NULL); */
-+ if (rerr)
-+ RevertFailure("rename %pd", a->src_dentry);
-+}
-+
-+static void au_ren_rev_whtmp(int err, struct au_ren_args *a)
-+{
-+ int rerr;
-+ struct inode *delegated;
-+
-+ a->h_path.dentry = vfsub_lkup_one(&a->dst_dentry->d_name,
-+ a->dst_h_parent);
-+ rerr = PTR_ERR(a->h_path.dentry);
-+ if (IS_ERR(a->h_path.dentry)) {
-+ RevertFailure("lkup one %pd", a->dst_dentry);
-+ return;
-+ }
-+ if (d_is_positive(a->h_path.dentry)) {
-+ d_drop(a->h_path.dentry);
-+ dput(a->h_path.dentry);
-+ return;
-+ }
-+
-+ delegated = NULL;
-+ rerr = vfsub_rename(a->dst_h_dir, a->h_dst, a->dst_h_dir, &a->h_path,
-+ &delegated);
-+ if (unlikely(rerr == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal rename\n");
-+ iput(delegated);
-+ }
-+ d_drop(a->h_path.dentry);
-+ dput(a->h_path.dentry);
-+ if (!rerr)
-+ au_set_h_dptr(a->dst_dentry, a->btgt, dget(a->h_dst));
-+ else
-+ RevertFailure("rename %pd", a->h_dst);
-+}
-+
-+static void au_ren_rev_whsrc(int err, struct au_ren_args *a)
-+{
-+ int rerr;
-+
-+ a->h_path.dentry = a->src_wh_dentry;
-+ rerr = au_wh_unlink_dentry(a->src_h_dir, &a->h_path, a->src_dentry);
-+ au_set_dbwh(a->src_dentry, a->src_bwh);
-+ if (rerr)
-+ RevertFailure("unlink %pd", a->src_wh_dentry);
-+}
-+#undef RevertFailure
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * when we have to copyup the renaming entry, do it with the rename-target name
-+ * in order to minimize the cost (the later actual rename is unnecessary).
-+ * otherwise rename it on the target branch.
-+ */
-+static int au_ren_or_cpup(struct au_ren_args *a)
-+{
-+ int err;
-+ struct dentry *d;
-+ struct inode *delegated;
-+
-+ d = a->src_dentry;
-+ if (au_dbstart(d) == a->btgt) {
-+ a->h_path.dentry = a->dst_h_dentry;
-+ if (au_ftest_ren(a->flags, DIROPQ)
-+ && au_dbdiropq(d) == a->btgt)
-+ au_fclr_ren(a->flags, DIROPQ);
-+ AuDebugOn(au_dbstart(d) != a->btgt);
-+ delegated = NULL;
-+ err = vfsub_rename(a->src_h_dir, au_h_dptr(d, a->btgt),
-+ a->dst_h_dir, &a->h_path, &delegated);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal rename\n");
-+ iput(delegated);
-+ }
-+ } else
-+ BUG();
-+
-+ if (!err && a->h_dst)
-+ /* it will be set to dinfo later */
-+ dget(a->h_dst);
-+
-+ return err;
-+}
-+
-+/* cf. aufs_rmdir() */
-+static int au_ren_del_whtmp(struct au_ren_args *a)
-+{
-+ int err;
-+ struct inode *dir;
-+
-+ dir = a->dst_dir;
-+ SiMustAnyLock(dir->i_sb);
-+ if (!au_nhash_test_longer_wh(&a->whlist, a->btgt,
-+ au_sbi(dir->i_sb)->si_dirwh)
-+ || au_test_fs_remote(a->h_dst->d_sb)) {
-+ err = au_whtmp_rmdir(dir, a->btgt, a->h_dst, &a->whlist);
-+ if (unlikely(err))
-+ pr_warn("failed removing whtmp dir %pd (%d), "
-+ "ignored.\n", a->h_dst, err);
-+ } else {
-+ au_nhash_wh_free(&a->thargs->whlist);
-+ a->thargs->whlist = a->whlist;
-+ a->whlist.nh_num = 0;
-+ au_whtmp_kick_rmdir(dir, a->btgt, a->h_dst, a->thargs);
-+ dput(a->h_dst);
-+ a->thargs = NULL;
-+ }
-+
-+ return 0;
-+}
-+
-+/* make it 'opaque' dir. */
-+static int au_ren_diropq(struct au_ren_args *a)
-+{
-+ int err;
-+ struct dentry *diropq;
-+
-+ err = 0;
-+ a->src_bdiropq = au_dbdiropq(a->src_dentry);
-+ a->src_hinode = au_hi(a->src_inode, a->btgt);
-+ au_hn_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD);
-+ diropq = au_diropq_create(a->src_dentry, a->btgt);
-+ au_hn_imtx_unlock(a->src_hinode);
-+ if (IS_ERR(diropq))
-+ err = PTR_ERR(diropq);
-+ else
-+ dput(diropq);
-+
-+ return err;
-+}
-+
-+static int do_rename(struct au_ren_args *a)
-+{
-+ int err;
-+ struct dentry *d, *h_d;
-+
-+ /* prepare workqueue args for asynchronous rmdir */
-+ h_d = a->dst_h_dentry;
-+ if (au_ftest_ren(a->flags, ISDIR) && d_is_positive(h_d)) {
-+ err = -ENOMEM;
-+ a->thargs = au_whtmp_rmdir_alloc(a->src_dentry->d_sb, GFP_NOFS);
-+ if (unlikely(!a->thargs))
-+ goto out;
-+ a->h_dst = dget(h_d);
-+ }
-+
-+ /* create whiteout for src_dentry */
-+ if (au_ftest_ren(a->flags, WHSRC)) {
-+ a->src_bwh = au_dbwh(a->src_dentry);
-+ AuDebugOn(a->src_bwh >= 0);
-+ a->src_wh_dentry
-+ = au_wh_create(a->src_dentry, a->btgt, a->src_h_parent);
-+ err = PTR_ERR(a->src_wh_dentry);
-+ if (IS_ERR(a->src_wh_dentry))
-+ goto out_thargs;
-+ }
-+
-+ /* lookup whiteout for dentry */
-+ if (au_ftest_ren(a->flags, WHDST)) {
-+ h_d = au_wh_lkup(a->dst_h_parent, &a->dst_dentry->d_name,
-+ a->br);
-+ err = PTR_ERR(h_d);
-+ if (IS_ERR(h_d))
-+ goto out_whsrc;
-+ if (d_is_negative(h_d))
-+ dput(h_d);
-+ else
-+ a->dst_wh_dentry = h_d;
-+ }
-+
-+ /* rename dentry to tmpwh */
-+ if (a->thargs) {
-+ err = au_whtmp_ren(a->dst_h_dentry, a->br);
-+ if (unlikely(err))
-+ goto out_whdst;
-+
-+ d = a->dst_dentry;
-+ au_set_h_dptr(d, a->btgt, NULL);
-+ err = au_lkup_neg(d, a->btgt, /*wh*/0);
-+ if (unlikely(err))
-+ goto out_whtmp;
-+ a->dst_h_dentry = au_h_dptr(d, a->btgt);
-+ }
-+
-+ BUG_ON(d_is_positive(a->dst_h_dentry) && a->src_bstart != a->btgt);
-+
-+ /* rename by vfs_rename or cpup */
-+ d = a->dst_dentry;
-+ if (au_ftest_ren(a->flags, ISDIR)
-+ && (a->dst_wh_dentry
-+ || au_dbdiropq(d) == a->btgt
-+ /* hide the lower to keep xino */
-+ || a->btgt < au_dbend(d)
-+ || au_opt_test(au_mntflags(d->d_sb), ALWAYS_DIROPQ)))
-+ au_fset_ren(a->flags, DIROPQ);
-+ err = au_ren_or_cpup(a);
-+ if (unlikely(err))
-+ /* leave the copied-up one */
-+ goto out_whtmp;
-+
-+ /* make dir opaque */
-+ if (au_ftest_ren(a->flags, DIROPQ)) {
-+ err = au_ren_diropq(a);
-+ if (unlikely(err))
-+ goto out_rename;
-+ }
-+
-+ /* update target timestamps */
-+ AuDebugOn(au_dbstart(a->src_dentry) != a->btgt);
-+ a->h_path.dentry = au_h_dptr(a->src_dentry, a->btgt);
-+ vfsub_update_h_iattr(&a->h_path, /*did*/NULL); /*ignore*/
-+ a->src_inode->i_ctime = d_inode(a->h_path.dentry)->i_ctime;
-+
-+ /* remove whiteout for dentry */
-+ if (a->dst_wh_dentry) {
-+ a->h_path.dentry = a->dst_wh_dentry;
-+ err = au_wh_unlink_dentry(a->dst_h_dir, &a->h_path,
-+ a->dst_dentry);
-+ if (unlikely(err))
-+ goto out_diropq;
-+ }
-+
-+ /* remove whtmp */
-+ if (a->thargs)
-+ au_ren_del_whtmp(a); /* ignore this error */
-+
-+ au_fhsm_wrote(a->src_dentry->d_sb, a->btgt, /*force*/0);
-+ err = 0;
-+ goto out_success;
-+
-+out_diropq:
-+ if (au_ftest_ren(a->flags, DIROPQ))
-+ au_ren_rev_diropq(err, a);
-+out_rename:
-+ au_ren_rev_rename(err, a);
-+ dput(a->h_dst);
-+out_whtmp:
-+ if (a->thargs)
-+ au_ren_rev_whtmp(err, a);
-+out_whdst:
-+ dput(a->dst_wh_dentry);
-+ a->dst_wh_dentry = NULL;
-+out_whsrc:
-+ if (a->src_wh_dentry)
-+ au_ren_rev_whsrc(err, a);
-+out_success:
-+ dput(a->src_wh_dentry);
-+ dput(a->dst_wh_dentry);
-+out_thargs:
-+ if (a->thargs) {
-+ dput(a->h_dst);
-+ au_whtmp_rmdir_free(a->thargs);
-+ a->thargs = NULL;
-+ }
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * test if @dentry dir can be rename destination or not.
-+ * success means, it is a logically empty dir.
-+ */
-+static int may_rename_dstdir(struct dentry *dentry, struct au_nhash *whlist)
-+{
-+ return au_test_empty(dentry, whlist);
-+}
-+
-+/*
-+ * test if @dentry dir can be rename source or not.
-+ * if it can, return 0 and @children is filled.
-+ * success means,
-+ * - it is a logically empty dir.
-+ * - or, it exists on writable branch and has no children including whiteouts
-+ * on the lower branch.
-+ */
-+static int may_rename_srcdir(struct dentry *dentry, aufs_bindex_t btgt)
-+{
-+ int err;
-+ unsigned int rdhash;
-+ aufs_bindex_t bstart;
-+
-+ bstart = au_dbstart(dentry);
-+ if (bstart != btgt) {
-+ struct au_nhash whlist;
-+
-+ SiMustAnyLock(dentry->d_sb);
-+ rdhash = au_sbi(dentry->d_sb)->si_rdhash;
-+ if (!rdhash)
-+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL,
-+ dentry));
-+ err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS);
-+ if (unlikely(err))
-+ goto out;
-+ err = au_test_empty(dentry, &whlist);
-+ au_nhash_wh_free(&whlist);
-+ goto out;
-+ }
-+
-+ if (bstart == au_dbtaildir(dentry))
-+ return 0; /* success */
-+
-+ err = au_test_empty_lower(dentry);
-+
-+out:
-+ if (err == -ENOTEMPTY) {
-+ AuWarn1("renaming dir who has child(ren) on multiple branches,"
-+ " is not supported\n");
-+ err = -EXDEV;
-+ }
-+ return err;
-+}
-+
-+/* side effect: sets whlist and h_dentry */
-+static int au_ren_may_dir(struct au_ren_args *a)
-+{
-+ int err;
-+ unsigned int rdhash;
-+ struct dentry *d;
-+
-+ d = a->dst_dentry;
-+ SiMustAnyLock(d->d_sb);
-+
-+ err = 0;
-+ if (au_ftest_ren(a->flags, ISDIR) && a->dst_inode) {
-+ rdhash = au_sbi(d->d_sb)->si_rdhash;
-+ if (!rdhash)
-+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, d));
-+ err = au_nhash_alloc(&a->whlist, rdhash, GFP_NOFS);
-+ if (unlikely(err))
-+ goto out;
-+
-+ au_set_dbstart(d, a->dst_bstart);
-+ err = may_rename_dstdir(d, &a->whlist);
-+ au_set_dbstart(d, a->btgt);
-+ }
-+ a->dst_h_dentry = au_h_dptr(d, au_dbstart(d));
-+ if (unlikely(err))
-+ goto out;
-+
-+ d = a->src_dentry;
-+ a->src_h_dentry = au_h_dptr(d, au_dbstart(d));
-+ if (au_ftest_ren(a->flags, ISDIR)) {
-+ err = may_rename_srcdir(d, a->btgt);
-+ if (unlikely(err)) {
-+ au_nhash_wh_free(&a->whlist);
-+ a->whlist.nh_num = 0;
-+ }
-+ }
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * simple tests for rename.
-+ * following the checks in vfs, plus the parent-child relationship.
-+ */
-+static int au_may_ren(struct au_ren_args *a)
-+{
-+ int err, isdir;
-+ struct inode *h_inode;
-+
-+ if (a->src_bstart == a->btgt) {
-+ err = au_may_del(a->src_dentry, a->btgt, a->src_h_parent,
-+ au_ftest_ren(a->flags, ISDIR));
-+ if (unlikely(err))
-+ goto out;
-+ err = -EINVAL;
-+ if (unlikely(a->src_h_dentry == a->h_trap))
-+ goto out;
-+ }
-+
-+ err = 0;
-+ if (a->dst_bstart != a->btgt)
-+ goto out;
-+
-+ err = -ENOTEMPTY;
-+ if (unlikely(a->dst_h_dentry == a->h_trap))
-+ goto out;
-+
-+ err = -EIO;
-+ isdir = !!au_ftest_ren(a->flags, ISDIR);
-+ if (d_really_is_negative(a->dst_dentry)) {
-+ if (d_is_negative(a->dst_h_dentry))
-+ err = au_may_add(a->dst_dentry, a->btgt,
-+ a->dst_h_parent, isdir);
-+ } else {
-+ if (unlikely(d_is_negative(a->dst_h_dentry)))
-+ goto out;
-+ h_inode = d_inode(a->dst_h_dentry);
-+ if (h_inode->i_nlink)
-+ err = au_may_del(a->dst_dentry, a->btgt,
-+ a->dst_h_parent, isdir);
-+ }
-+
-+out:
-+ if (unlikely(err == -ENOENT || err == -EEXIST))
-+ err = -EIO;
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * locking order
-+ * (VFS)
-+ * - src_dir and dir by lock_rename()
-+ * - inode if exitsts
-+ * (aufs)
-+ * - lock all
-+ * + src_dentry and dentry by aufs_read_and_write_lock2() which calls,
-+ * + si_read_lock
-+ * + di_write_lock2_child()
-+ * + di_write_lock_child()
-+ * + ii_write_lock_child()
-+ * + di_write_lock_child2()
-+ * + ii_write_lock_child2()
-+ * + src_parent and parent
-+ * + di_write_lock_parent()
-+ * + ii_write_lock_parent()
-+ * + di_write_lock_parent2()
-+ * + ii_write_lock_parent2()
-+ * + lower src_dir and dir by vfsub_lock_rename()
-+ * + verify the every relationships between child and parent. if any
-+ * of them failed, unlock all and return -EBUSY.
-+ */
-+static void au_ren_unlock(struct au_ren_args *a)
-+{
-+ vfsub_unlock_rename(a->src_h_parent, a->src_hdir,
-+ a->dst_h_parent, a->dst_hdir);
-+ if (au_ftest_ren(a->flags, MNT_WRITE))
-+ vfsub_mnt_drop_write(au_br_mnt(a->br));
-+}
-+
-+static int au_ren_lock(struct au_ren_args *a)
-+{
-+ int err;
-+ unsigned int udba;
-+
-+ err = 0;
-+ a->src_h_parent = au_h_dptr(a->src_parent, a->btgt);
-+ a->src_hdir = au_hi(a->src_dir, a->btgt);
-+ a->dst_h_parent = au_h_dptr(a->dst_parent, a->btgt);
-+ a->dst_hdir = au_hi(a->dst_dir, a->btgt);
-+
-+ err = vfsub_mnt_want_write(au_br_mnt(a->br));
-+ if (unlikely(err))
-+ goto out;
-+ au_fset_ren(a->flags, MNT_WRITE);
-+ a->h_trap = vfsub_lock_rename(a->src_h_parent, a->src_hdir,
-+ a->dst_h_parent, a->dst_hdir);
-+ udba = au_opt_udba(a->src_dentry->d_sb);
-+ if (unlikely(a->src_hdir->hi_inode != d_inode(a->src_h_parent)
-+ || a->dst_hdir->hi_inode != d_inode(a->dst_h_parent)))
-+ err = au_busy_or_stale();
-+ if (!err && au_dbstart(a->src_dentry) == a->btgt)
-+ err = au_h_verify(a->src_h_dentry, udba,
-+ d_inode(a->src_h_parent), a->src_h_parent,
-+ a->br);
-+ if (!err && au_dbstart(a->dst_dentry) == a->btgt)
-+ err = au_h_verify(a->dst_h_dentry, udba,
-+ d_inode(a->dst_h_parent), a->dst_h_parent,
-+ a->br);
-+ if (!err)
-+ goto out; /* success */
-+
-+ err = au_busy_or_stale();
-+ au_ren_unlock(a);
-+
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static void au_ren_refresh_dir(struct au_ren_args *a)
-+{
-+ struct inode *dir;
-+
-+ dir = a->dst_dir;
-+ dir->i_version++;
-+ if (au_ftest_ren(a->flags, ISDIR)) {
-+ /* is this updating defined in POSIX? */
-+ au_cpup_attr_timesizes(a->src_inode);
-+ au_cpup_attr_nlink(dir, /*force*/1);
-+ }
-+
-+ au_dir_ts(dir, a->btgt);
-+
-+ if (au_ftest_ren(a->flags, ISSAMEDIR))
-+ return;
-+
-+ dir = a->src_dir;
-+ dir->i_version++;
-+ if (au_ftest_ren(a->flags, ISDIR))
-+ au_cpup_attr_nlink(dir, /*force*/1);
-+ au_dir_ts(dir, a->btgt);
-+}
-+
-+static void au_ren_refresh(struct au_ren_args *a)
-+{
-+ aufs_bindex_t bend, bindex;
-+ struct dentry *d, *h_d;
-+ struct inode *i, *h_i;
-+ struct super_block *sb;
-+
-+ d = a->dst_dentry;
-+ d_drop(d);
-+ if (a->h_dst)
-+ /* already dget-ed by au_ren_or_cpup() */
-+ au_set_h_dptr(d, a->btgt, a->h_dst);
-+
-+ i = a->dst_inode;
-+ if (i) {
-+ if (!au_ftest_ren(a->flags, ISDIR))
-+ vfsub_drop_nlink(i);
-+ else {
-+ vfsub_dead_dir(i);
-+ au_cpup_attr_timesizes(i);
-+ }
-+ au_update_dbrange(d, /*do_put_zero*/1);
-+ } else {
-+ bend = a->btgt;
-+ for (bindex = au_dbstart(d); bindex < bend; bindex++)
-+ au_set_h_dptr(d, bindex, NULL);
-+ bend = au_dbend(d);
-+ for (bindex = a->btgt + 1; bindex <= bend; bindex++)
-+ au_set_h_dptr(d, bindex, NULL);
-+ au_update_dbrange(d, /*do_put_zero*/0);
-+ }
-+
-+ d = a->src_dentry;
-+ au_set_dbwh(d, -1);
-+ bend = au_dbend(d);
-+ for (bindex = a->btgt + 1; bindex <= bend; bindex++) {
-+ h_d = au_h_dptr(d, bindex);
-+ if (h_d)
-+ au_set_h_dptr(d, bindex, NULL);
-+ }
-+ au_set_dbend(d, a->btgt);
-+
-+ sb = d->d_sb;
-+ i = a->src_inode;
-+ if (au_opt_test(au_mntflags(sb), PLINK) && au_plink_test(i))
-+ return; /* success */
-+
-+ bend = au_ibend(i);
-+ for (bindex = a->btgt + 1; bindex <= bend; bindex++) {
-+ h_i = au_h_iptr(i, bindex);
-+ if (h_i) {
-+ au_xino_write(sb, bindex, h_i->i_ino, /*ino*/0);
-+ /* ignore this error */
-+ au_set_h_iptr(i, bindex, NULL, 0);
-+ }
-+ }
-+ au_set_ibend(i, a->btgt);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* mainly for link(2) and rename(2) */
-+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt)
-+{
-+ aufs_bindex_t bdiropq, bwh;
-+ struct dentry *parent;
-+ struct au_branch *br;
-+
-+ parent = dentry->d_parent;
-+ IMustLock(d_inode(parent)); /* dir is locked */
-+
-+ bdiropq = au_dbdiropq(parent);
-+ bwh = au_dbwh(dentry);
-+ br = au_sbr(dentry->d_sb, btgt);
-+ if (au_br_rdonly(br)
-+ || (0 <= bdiropq && bdiropq < btgt)
-+ || (0 <= bwh && bwh < btgt))
-+ btgt = -1;
-+
-+ AuDbg("btgt %d\n", btgt);
-+ return btgt;
-+}
-+
-+/* sets src_bstart, dst_bstart and btgt */
-+static int au_ren_wbr(struct au_ren_args *a)
-+{
-+ int err;
-+ struct au_wr_dir_args wr_dir_args = {
-+ /* .force_btgt = -1, */
-+ .flags = AuWrDir_ADD_ENTRY
-+ };
-+
-+ a->src_bstart = au_dbstart(a->src_dentry);
-+ a->dst_bstart = au_dbstart(a->dst_dentry);
-+ if (au_ftest_ren(a->flags, ISDIR))
-+ au_fset_wrdir(wr_dir_args.flags, ISDIR);
-+ wr_dir_args.force_btgt = a->src_bstart;
-+ if (a->dst_inode && a->dst_bstart < a->src_bstart)
-+ wr_dir_args.force_btgt = a->dst_bstart;
-+ wr_dir_args.force_btgt = au_wbr(a->dst_dentry, wr_dir_args.force_btgt);
-+ err = au_wr_dir(a->dst_dentry, a->src_dentry, &wr_dir_args);
-+ a->btgt = err;
-+
-+ return err;
-+}
-+
-+static void au_ren_dt(struct au_ren_args *a)
-+{
-+ a->h_path.dentry = a->src_h_parent;
-+ au_dtime_store(a->src_dt + AuPARENT, a->src_parent, &a->h_path);
-+ if (!au_ftest_ren(a->flags, ISSAMEDIR)) {
-+ a->h_path.dentry = a->dst_h_parent;
-+ au_dtime_store(a->dst_dt + AuPARENT, a->dst_parent, &a->h_path);
-+ }
-+
-+ au_fclr_ren(a->flags, DT_DSTDIR);
-+ if (!au_ftest_ren(a->flags, ISDIR))
-+ return;
-+
-+ a->h_path.dentry = a->src_h_dentry;
-+ au_dtime_store(a->src_dt + AuCHILD, a->src_dentry, &a->h_path);
-+ if (d_is_positive(a->dst_h_dentry)) {
-+ au_fset_ren(a->flags, DT_DSTDIR);
-+ a->h_path.dentry = a->dst_h_dentry;
-+ au_dtime_store(a->dst_dt + AuCHILD, a->dst_dentry, &a->h_path);
-+ }
-+}
-+
-+static void au_ren_rev_dt(int err, struct au_ren_args *a)
-+{
-+ struct dentry *h_d;
-+ struct mutex *h_mtx;
-+
-+ au_dtime_revert(a->src_dt + AuPARENT);
-+ if (!au_ftest_ren(a->flags, ISSAMEDIR))
-+ au_dtime_revert(a->dst_dt + AuPARENT);
-+
-+ if (au_ftest_ren(a->flags, ISDIR) && err != -EIO) {
-+ h_d = a->src_dt[AuCHILD].dt_h_path.dentry;
-+ h_mtx = &d_inode(h_d)->i_mutex;
-+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
-+ au_dtime_revert(a->src_dt + AuCHILD);
-+ mutex_unlock(h_mtx);
-+
-+ if (au_ftest_ren(a->flags, DT_DSTDIR)) {
-+ h_d = a->dst_dt[AuCHILD].dt_h_path.dentry;
-+ h_mtx = &d_inode(h_d)->i_mutex;
-+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
-+ au_dtime_revert(a->dst_dt + AuCHILD);
-+ mutex_unlock(h_mtx);
-+ }
-+ }
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int aufs_rename(struct inode *_src_dir, struct dentry *_src_dentry,
-+ struct inode *_dst_dir, struct dentry *_dst_dentry)
-+{
-+ int err, flags;
-+ /* reduce stack space */
-+ struct au_ren_args *a;
-+
-+ AuDbg("%pd, %pd\n", _src_dentry, _dst_dentry);
-+ IMustLock(_src_dir);
-+ IMustLock(_dst_dir);
-+
-+ err = -ENOMEM;
-+ BUILD_BUG_ON(sizeof(*a) > PAGE_SIZE);
-+ a = kzalloc(sizeof(*a), GFP_NOFS);
-+ if (unlikely(!a))
-+ goto out;
-+
-+ a->src_dir = _src_dir;
-+ a->src_dentry = _src_dentry;
-+ a->src_inode = NULL;
-+ if (d_really_is_positive(a->src_dentry))
-+ a->src_inode = d_inode(a->src_dentry);
-+ a->src_parent = a->src_dentry->d_parent; /* dir inode is locked */
-+ a->dst_dir = _dst_dir;
-+ a->dst_dentry = _dst_dentry;
-+ a->dst_inode = NULL;
-+ if (d_really_is_positive(a->dst_dentry))
-+ a->dst_inode = d_inode(a->dst_dentry);
-+ a->dst_parent = a->dst_dentry->d_parent; /* dir inode is locked */
-+ if (a->dst_inode) {
-+ IMustLock(a->dst_inode);
-+ au_igrab(a->dst_inode);
-+ }
-+
-+ err = -ENOTDIR;
-+ flags = AuLock_FLUSH | AuLock_NOPLM | AuLock_GEN;
-+ if (d_is_dir(a->src_dentry)) {
-+ au_fset_ren(a->flags, ISDIR);
-+ if (unlikely(d_really_is_positive(a->dst_dentry)
-+ && !d_is_dir(a->dst_dentry)))
-+ goto out_free;
-+ err = aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry,
-+ AuLock_DIR | flags);
-+ } else
-+ err = aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry,
-+ flags);
-+ if (unlikely(err))
-+ goto out_free;
-+
-+ err = au_d_hashed_positive(a->src_dentry);
-+ if (unlikely(err))
-+ goto out_unlock;
-+ err = -ENOENT;
-+ if (a->dst_inode) {
-+ /*
-+ * If it is a dir, VFS unhash dst_dentry before this
-+ * function. It means we cannot rely upon d_unhashed().
-+ */
-+ if (unlikely(!a->dst_inode->i_nlink))
-+ goto out_unlock;
-+ if (!S_ISDIR(a->dst_inode->i_mode)) {
-+ err = au_d_hashed_positive(a->dst_dentry);
-+ if (unlikely(err))
-+ goto out_unlock;
-+ } else if (unlikely(IS_DEADDIR(a->dst_inode)))
-+ goto out_unlock;
-+ } else if (unlikely(d_unhashed(a->dst_dentry)))
-+ goto out_unlock;
-+
-+ /*
-+ * is it possible?
-+ * yes, it happened (in linux-3.3-rcN) but I don't know why.
-+ * there may exist a problem somewhere else.
-+ */
-+ err = -EINVAL;
-+ if (unlikely(d_inode(a->dst_parent) == d_inode(a->src_dentry)))
-+ goto out_unlock;
-+
-+ au_fset_ren(a->flags, ISSAMEDIR); /* temporary */
-+ di_write_lock_parent(a->dst_parent);
-+
-+ /* which branch we process */
-+ err = au_ren_wbr(a);
-+ if (unlikely(err < 0))
-+ goto out_parent;
-+ a->br = au_sbr(a->dst_dentry->d_sb, a->btgt);
-+ a->h_path.mnt = au_br_mnt(a->br);
-+
-+ /* are they available to be renamed */
-+ err = au_ren_may_dir(a);
-+ if (unlikely(err))
-+ goto out_children;
-+
-+ /* prepare the writable parent dir on the same branch */
-+ if (a->dst_bstart == a->btgt) {
-+ au_fset_ren(a->flags, WHDST);
-+ } else {
-+ err = au_cpup_dirs(a->dst_dentry, a->btgt);
-+ if (unlikely(err))
-+ goto out_children;
-+ }
-+
-+ if (a->src_dir != a->dst_dir) {
-+ /*
-+ * this temporary unlock is safe,
-+ * because both dir->i_mutex are locked.
-+ */
-+ di_write_unlock(a->dst_parent);
-+ di_write_lock_parent(a->src_parent);
-+ err = au_wr_dir_need_wh(a->src_dentry,
-+ au_ftest_ren(a->flags, ISDIR),
-+ &a->btgt);
-+ di_write_unlock(a->src_parent);
-+ di_write_lock2_parent(a->src_parent, a->dst_parent, /*isdir*/1);
-+ au_fclr_ren(a->flags, ISSAMEDIR);
-+ } else
-+ err = au_wr_dir_need_wh(a->src_dentry,
-+ au_ftest_ren(a->flags, ISDIR),
-+ &a->btgt);
-+ if (unlikely(err < 0))
-+ goto out_children;
-+ if (err)
-+ au_fset_ren(a->flags, WHSRC);
-+
-+ /* cpup src */
-+ if (a->src_bstart != a->btgt) {
-+ struct au_pin pin;
-+
-+ err = au_pin(&pin, a->src_dentry, a->btgt,
-+ au_opt_udba(a->src_dentry->d_sb),
-+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
-+ if (!err) {
-+ struct au_cp_generic cpg = {
-+ .dentry = a->src_dentry,
-+ .bdst = a->btgt,
-+ .bsrc = a->src_bstart,
-+ .len = -1,
-+ .pin = &pin,
-+ .flags = AuCpup_DTIME | AuCpup_HOPEN
-+ };
-+ AuDebugOn(au_dbstart(a->src_dentry) != a->src_bstart);
-+ err = au_sio_cpup_simple(&cpg);
-+ au_unpin(&pin);
-+ }
-+ if (unlikely(err))
-+ goto out_children;
-+ a->src_bstart = a->btgt;
-+ a->src_h_dentry = au_h_dptr(a->src_dentry, a->btgt);
-+ au_fset_ren(a->flags, WHSRC);
-+ }
-+
-+ /* lock them all */
-+ err = au_ren_lock(a);
-+ if (unlikely(err))
-+ /* leave the copied-up one */
-+ goto out_children;
-+
-+ if (!au_opt_test(au_mntflags(a->dst_dir->i_sb), UDBA_NONE))
-+ err = au_may_ren(a);
-+ else if (unlikely(a->dst_dentry->d_name.len > AUFS_MAX_NAMELEN))
-+ err = -ENAMETOOLONG;
-+ if (unlikely(err))
-+ goto out_hdir;
-+
-+ /* store timestamps to be revertible */
-+ au_ren_dt(a);
-+
-+ /* here we go */
-+ err = do_rename(a);
-+ if (unlikely(err))
-+ goto out_dt;
-+
-+ /* update dir attributes */
-+ au_ren_refresh_dir(a);
-+
-+ /* dput/iput all lower dentries */
-+ au_ren_refresh(a);
-+
-+ goto out_hdir; /* success */
-+
-+out_dt:
-+ au_ren_rev_dt(err, a);
-+out_hdir:
-+ au_ren_unlock(a);
-+out_children:
-+ au_nhash_wh_free(&a->whlist);
-+ if (err && a->dst_inode && a->dst_bstart != a->btgt) {
-+ AuDbg("bstart %d, btgt %d\n", a->dst_bstart, a->btgt);
-+ au_set_h_dptr(a->dst_dentry, a->btgt, NULL);
-+ au_set_dbstart(a->dst_dentry, a->dst_bstart);
-+ }
-+out_parent:
-+ if (!err)
-+ d_move(a->src_dentry, a->dst_dentry);
-+ else {
-+ au_update_dbstart(a->dst_dentry);
-+ if (!a->dst_inode)
-+ d_drop(a->dst_dentry);
-+ }
-+ if (au_ftest_ren(a->flags, ISSAMEDIR))
-+ di_write_unlock(a->dst_parent);
-+ else
-+ di_write_unlock2(a->src_parent, a->dst_parent);
-+out_unlock:
-+ aufs_read_and_write_unlock2(a->dst_dentry, a->src_dentry);
-+out_free:
-+ iput(a->dst_inode);
-+ if (a->thargs)
-+ au_whtmp_rmdir_free(a->thargs);
-+ kfree(a);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/Kconfig linux-4.1.10/fs/aufs/Kconfig
---- linux-4.1.10.orig/fs/aufs/Kconfig 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/Kconfig 2015-10-22 21:36:44.000000000 +0200
-@@ -0,0 +1,185 @@
-+config AUFS_FS
-+ tristate "Aufs (Advanced multi layered unification filesystem) support"
-+ help
-+ Aufs is a stackable unification filesystem such as Unionfs,
-+ which unifies several directories and provides a merged single
-+ directory.
-+ In the early days, aufs was entirely re-designed and
-+ re-implemented Unionfs Version 1.x series. Introducing many
-+ original ideas, approaches and improvements, it becomes totally
-+ different from Unionfs while keeping the basic features.
-+
-+if AUFS_FS
-+choice
-+ prompt "Maximum number of branches"
-+ default AUFS_BRANCH_MAX_127
-+ help
-+ Specifies the maximum number of branches (or member directories)
-+ in a single aufs. The larger value consumes more system
-+ resources and has a minor impact to performance.
-+config AUFS_BRANCH_MAX_127
-+ bool "127"
-+ help
-+ Specifies the maximum number of branches (or member directories)
-+ in a single aufs. The larger value consumes more system
-+ resources and has a minor impact to performance.
-+config AUFS_BRANCH_MAX_511
-+ bool "511"
-+ help
-+ Specifies the maximum number of branches (or member directories)
-+ in a single aufs. The larger value consumes more system
-+ resources and has a minor impact to performance.
-+config AUFS_BRANCH_MAX_1023
-+ bool "1023"
-+ help
-+ Specifies the maximum number of branches (or member directories)
-+ in a single aufs. The larger value consumes more system
-+ resources and has a minor impact to performance.
-+config AUFS_BRANCH_MAX_32767
-+ bool "32767"
-+ help
-+ Specifies the maximum number of branches (or member directories)
-+ in a single aufs. The larger value consumes more system
-+ resources and has a minor impact to performance.
-+endchoice
-+
-+config AUFS_SBILIST
-+ bool
-+ depends on AUFS_MAGIC_SYSRQ || PROC_FS
-+ default y
-+ help
-+ Automatic configuration for internal use.
-+ When aufs supports Magic SysRq or /proc, enabled automatically.
-+
-+config AUFS_HNOTIFY
-+ bool "Detect direct branch access (bypassing aufs)"
-+ help
-+ If you want to modify files on branches directly, eg. bypassing aufs,
-+ and want aufs to detect the changes of them fully, then enable this
-+ option and use 'udba=notify' mount option.
-+ Currently there is only one available configuration, "fsnotify".
-+ It will have a negative impact to the performance.
-+ See detail in aufs.5.
-+
-+choice
-+ prompt "method" if AUFS_HNOTIFY
-+ default AUFS_HFSNOTIFY
-+config AUFS_HFSNOTIFY
-+ bool "fsnotify"
-+ select FSNOTIFY
-+endchoice
-+
-+config AUFS_EXPORT
-+ bool "NFS-exportable aufs"
-+ select EXPORTFS
-+ help
-+ If you want to export your mounted aufs via NFS, then enable this
-+ option. There are several requirements for this configuration.
-+ See detail in aufs.5.
-+
-+config AUFS_INO_T_64
-+ bool
-+ depends on AUFS_EXPORT
-+ depends on 64BIT && !(ALPHA || S390)
-+ default y
-+ help
-+ Automatic configuration for internal use.
-+ /* typedef unsigned long/int __kernel_ino_t */
-+ /* alpha and s390x are int */
-+
-+config AUFS_XATTR
-+ bool "support for XATTR/EA (including Security Labels)"
-+ help
-+ If your branch fs supports XATTR/EA and you want to make them
-+ available in aufs too, then enable this opsion and specify the
-+ branch attributes for EA.
-+ See detail in aufs.5.
-+
-+config AUFS_FHSM
-+ bool "File-based Hierarchical Storage Management"
-+ help
-+ Hierarchical Storage Management (or HSM) is a well-known feature
-+ in the storage world. Aufs provides this feature as file-based.
-+ with multiple branches.
-+ These multiple branches are prioritized, ie. the topmost one
-+ should be the fastest drive and be used heavily.
-+
-+config AUFS_RDU
-+ bool "Readdir in userspace"
-+ help
-+ Aufs has two methods to provide a merged view for a directory,
-+ by a user-space library and by kernel-space natively. The latter
-+ is always enabled but sometimes large and slow.
-+ If you enable this option, install the library in aufs2-util
-+ package, and set some environment variables for your readdir(3),
-+ then the work will be handled in user-space which generally
-+ shows better performance in most cases.
-+ See detail in aufs.5.
-+
-+config AUFS_SHWH
-+ bool "Show whiteouts"
-+ help
-+ If you want to make the whiteouts in aufs visible, then enable
-+ this option and specify 'shwh' mount option. Although it may
-+ sounds like philosophy or something, but in technically it
-+ simply shows the name of whiteout with keeping its behaviour.
-+
-+config AUFS_BR_RAMFS
-+ bool "Ramfs (initramfs/rootfs) as an aufs branch"
-+ help
-+ If you want to use ramfs as an aufs branch fs, then enable this
-+ option. Generally tmpfs is recommended.
-+ Aufs prohibited them to be a branch fs by default, because
-+ initramfs becomes unusable after switch_root or something
-+ generally. If you sets initramfs as an aufs branch and boot your
-+ system by switch_root, you will meet a problem easily since the
-+ files in initramfs may be inaccessible.
-+ Unless you are going to use ramfs as an aufs branch fs without
-+ switch_root or something, leave it N.
-+
-+config AUFS_BR_FUSE
-+ bool "Fuse fs as an aufs branch"
-+ depends on FUSE_FS
-+ select AUFS_POLL
-+ help
-+ If you want to use fuse-based userspace filesystem as an aufs
-+ branch fs, then enable this option.
-+ It implements the internal poll(2) operation which is
-+ implemented by fuse only (curretnly).
-+
-+config AUFS_POLL
-+ bool
-+ help
-+ Automatic configuration for internal use.
-+
-+config AUFS_BR_HFSPLUS
-+ bool "Hfsplus as an aufs branch"
-+ depends on HFSPLUS_FS
-+ default y
-+ help
-+ If you want to use hfsplus fs as an aufs branch fs, then enable
-+ this option. This option introduces a small overhead at
-+ copying-up a file on hfsplus.
-+
-+config AUFS_BDEV_LOOP
-+ bool
-+ depends on BLK_DEV_LOOP
-+ default y
-+ help
-+ Automatic configuration for internal use.
-+ Convert =[ym] into =y.
-+
-+config AUFS_DEBUG
-+ bool "Debug aufs"
-+ help
-+ Enable this to compile aufs internal debug code.
-+ It will have a negative impact to the performance.
-+
-+config AUFS_MAGIC_SYSRQ
-+ bool
-+ depends on AUFS_DEBUG && MAGIC_SYSRQ
-+ default y
-+ help
-+ Automatic configuration for internal use.
-+ When aufs supports Magic SysRq, enabled automatically.
-+endif
-diff -Nur linux-4.1.10.orig/fs/aufs/loop.c linux-4.1.10/fs/aufs/loop.c
---- linux-4.1.10.orig/fs/aufs/loop.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/loop.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,145 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * support for loopback block device as a branch
-+ */
-+
-+#include "aufs.h"
-+
-+/* added into drivers/block/loop.c */
-+static struct file *(*backing_file_func)(struct super_block *sb);
-+
-+/*
-+ * test if two lower dentries have overlapping branches.
-+ */
-+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding)
-+{
-+ struct super_block *h_sb;
-+ struct file *backing_file;
-+
-+ if (unlikely(!backing_file_func)) {
-+ /* don't load "loop" module here */
-+ backing_file_func = symbol_get(loop_backing_file);
-+ if (unlikely(!backing_file_func))
-+ /* "loop" module is not loaded */
-+ return 0;
-+ }
-+
-+ h_sb = h_adding->d_sb;
-+ backing_file = backing_file_func(h_sb);
-+ if (!backing_file)
-+ return 0;
-+
-+ h_adding = backing_file->f_path.dentry;
-+ /*
-+ * h_adding can be local NFS.
-+ * in this case aufs cannot detect the loop.
-+ */
-+ if (unlikely(h_adding->d_sb == sb))
-+ return 1;
-+ return !!au_test_subdir(h_adding, sb->s_root);
-+}
-+
-+/* true if a kernel thread named 'loop[0-9].*' accesses a file */
-+int au_test_loopback_kthread(void)
-+{
-+ int ret;
-+ struct task_struct *tsk = current;
-+ char c, comm[sizeof(tsk->comm)];
-+
-+ ret = 0;
-+ if (tsk->flags & PF_KTHREAD) {
-+ get_task_comm(comm, tsk);
-+ c = comm[4];
-+ ret = ('0' <= c && c <= '9'
-+ && !strncmp(comm, "loop", 4));
-+ }
-+
-+ return ret;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#define au_warn_loopback_step 16
-+static int au_warn_loopback_nelem = au_warn_loopback_step;
-+static unsigned long *au_warn_loopback_array;
-+
-+void au_warn_loopback(struct super_block *h_sb)
-+{
-+ int i, new_nelem;
-+ unsigned long *a, magic;
-+ static DEFINE_SPINLOCK(spin);
-+
-+ magic = h_sb->s_magic;
-+ spin_lock(&spin);
-+ a = au_warn_loopback_array;
-+ for (i = 0; i < au_warn_loopback_nelem && *a; i++)
-+ if (a[i] == magic) {
-+ spin_unlock(&spin);
-+ return;
-+ }
-+
-+ /* h_sb is new to us, print it */
-+ if (i < au_warn_loopback_nelem) {
-+ a[i] = magic;
-+ goto pr;
-+ }
-+
-+ /* expand the array */
-+ new_nelem = au_warn_loopback_nelem + au_warn_loopback_step;
-+ a = au_kzrealloc(au_warn_loopback_array,
-+ au_warn_loopback_nelem * sizeof(unsigned long),
-+ new_nelem * sizeof(unsigned long), GFP_ATOMIC);
-+ if (a) {
-+ au_warn_loopback_nelem = new_nelem;
-+ au_warn_loopback_array = a;
-+ a[i] = magic;
-+ goto pr;
-+ }
-+
-+ spin_unlock(&spin);
-+ AuWarn1("realloc failed, ignored\n");
-+ return;
-+
-+pr:
-+ spin_unlock(&spin);
-+ pr_warn("you may want to try another patch for loopback file "
-+ "on %s(0x%lx) branch\n", au_sbtype(h_sb), magic);
-+}
-+
-+int au_loopback_init(void)
-+{
-+ int err;
-+ struct super_block *sb __maybe_unused;
-+
-+ AuDebugOn(sizeof(sb->s_magic) != sizeof(unsigned long));
-+
-+ err = 0;
-+ au_warn_loopback_array = kcalloc(au_warn_loopback_step,
-+ sizeof(unsigned long), GFP_NOFS);
-+ if (unlikely(!au_warn_loopback_array))
-+ err = -ENOMEM;
-+
-+ return err;
-+}
-+
-+void au_loopback_fin(void)
-+{
-+ symbol_put(loop_backing_file);
-+ kfree(au_warn_loopback_array);
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/loop.h linux-4.1.10/fs/aufs/loop.h
---- linux-4.1.10.orig/fs/aufs/loop.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/loop.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,52 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * support for loopback mount as a branch
-+ */
-+
-+#ifndef __AUFS_LOOP_H__
-+#define __AUFS_LOOP_H__
-+
-+#ifdef __KERNEL__
-+
-+struct dentry;
-+struct super_block;
-+
-+#ifdef CONFIG_AUFS_BDEV_LOOP
-+/* drivers/block/loop.c */
-+struct file *loop_backing_file(struct super_block *sb);
-+
-+/* loop.c */
-+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding);
-+int au_test_loopback_kthread(void);
-+void au_warn_loopback(struct super_block *h_sb);
-+
-+int au_loopback_init(void);
-+void au_loopback_fin(void);
-+#else
-+AuStubInt0(au_test_loopback_overlap, struct super_block *sb,
-+ struct dentry *h_adding)
-+AuStubInt0(au_test_loopback_kthread, void)
-+AuStubVoid(au_warn_loopback, struct super_block *h_sb)
-+
-+AuStubInt0(au_loopback_init, void)
-+AuStubVoid(au_loopback_fin, void)
-+#endif /* BLK_DEV_LOOP */
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_LOOP_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/magic.mk linux-4.1.10/fs/aufs/magic.mk
---- linux-4.1.10.orig/fs/aufs/magic.mk 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/magic.mk 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,30 @@
-+
-+# defined in ${srctree}/fs/fuse/inode.c
-+# tristate
-+ifdef CONFIG_FUSE_FS
-+ccflags-y += -DFUSE_SUPER_MAGIC=0x65735546
-+endif
-+
-+# defined in ${srctree}/fs/xfs/xfs_sb.h
-+# tristate
-+ifdef CONFIG_XFS_FS
-+ccflags-y += -DXFS_SB_MAGIC=0x58465342
-+endif
-+
-+# defined in ${srctree}/fs/configfs/mount.c
-+# tristate
-+ifdef CONFIG_CONFIGFS_FS
-+ccflags-y += -DCONFIGFS_MAGIC=0x62656570
-+endif
-+
-+# defined in ${srctree}/fs/ubifs/ubifs.h
-+# tristate
-+ifdef CONFIG_UBIFS_FS
-+ccflags-y += -DUBIFS_SUPER_MAGIC=0x24051905
-+endif
-+
-+# defined in ${srctree}/fs/hfsplus/hfsplus_raw.h
-+# tristate
-+ifdef CONFIG_HFSPLUS_FS
-+ccflags-y += -DHFSPLUS_SUPER_MAGIC=0x482b
-+endif
-diff -Nur linux-4.1.10.orig/fs/aufs/Makefile linux-4.1.10/fs/aufs/Makefile
---- linux-4.1.10.orig/fs/aufs/Makefile 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/Makefile 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,44 @@
-+
-+include ${src}/magic.mk
-+ifeq (${CONFIG_AUFS_FS},m)
-+include ${src}/conf.mk
-+endif
-+-include ${src}/priv_def.mk
-+
-+# cf. include/linux/kernel.h
-+# enable pr_debug
-+ccflags-y += -DDEBUG
-+# sparse requires the full pathname
-+ifdef M
-+ccflags-y += -include ${M}/../../include/uapi/linux/aufs_type.h
-+else
-+ccflags-y += -include ${srctree}/include/uapi/linux/aufs_type.h
-+endif
-+
-+obj-$(CONFIG_AUFS_FS) += aufs.o
-+aufs-y := module.o sbinfo.o super.o branch.o xino.o sysaufs.o opts.o \
-+ wkq.o vfsub.o dcsub.o \
-+ cpup.o whout.o wbr_policy.o \
-+ dinfo.o dentry.o \
-+ dynop.o \
-+ finfo.o file.o f_op.o \
-+ dir.o vdir.o \
-+ iinfo.o inode.o i_op.o i_op_add.o i_op_del.o i_op_ren.o \
-+ mvdown.o ioctl.o
-+
-+# all are boolean
-+aufs-$(CONFIG_PROC_FS) += procfs.o plink.o
-+aufs-$(CONFIG_SYSFS) += sysfs.o
-+aufs-$(CONFIG_DEBUG_FS) += dbgaufs.o
-+aufs-$(CONFIG_AUFS_BDEV_LOOP) += loop.o
-+aufs-$(CONFIG_AUFS_HNOTIFY) += hnotify.o
-+aufs-$(CONFIG_AUFS_HFSNOTIFY) += hfsnotify.o
-+aufs-$(CONFIG_AUFS_EXPORT) += export.o
-+aufs-$(CONFIG_AUFS_XATTR) += xattr.o
-+aufs-$(CONFIG_FS_POSIX_ACL) += posix_acl.o
-+aufs-$(CONFIG_AUFS_FHSM) += fhsm.o
-+aufs-$(CONFIG_AUFS_POLL) += poll.o
-+aufs-$(CONFIG_AUFS_RDU) += rdu.o
-+aufs-$(CONFIG_AUFS_BR_HFSPLUS) += hfsplus.o
-+aufs-$(CONFIG_AUFS_DEBUG) += debug.o
-+aufs-$(CONFIG_AUFS_MAGIC_SYSRQ) += sysrq.o
-diff -Nur linux-4.1.10.orig/fs/aufs/module.c linux-4.1.10/fs/aufs/module.c
---- linux-4.1.10.orig/fs/aufs/module.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/module.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,218 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * module global variables and operations
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/seq_file.h>
-+#include "aufs.h"
-+
-+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp)
-+{
-+ if (new_sz <= nused)
-+ return p;
-+
-+ p = krealloc(p, new_sz, gfp);
-+ if (p)
-+ memset(p + nused, 0, new_sz - nused);
-+ return p;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * aufs caches
-+ */
-+struct kmem_cache *au_cachep[AuCache_Last];
-+static int __init au_cache_init(void)
-+{
-+ au_cachep[AuCache_DINFO] = AuCacheCtor(au_dinfo, au_di_init_once);
-+ if (au_cachep[AuCache_DINFO])
-+ /* SLAB_DESTROY_BY_RCU */
-+ au_cachep[AuCache_ICNTNR] = AuCacheCtor(au_icntnr,
-+ au_icntnr_init_once);
-+ if (au_cachep[AuCache_ICNTNR])
-+ au_cachep[AuCache_FINFO] = AuCacheCtor(au_finfo,
-+ au_fi_init_once);
-+ if (au_cachep[AuCache_FINFO])
-+ au_cachep[AuCache_VDIR] = AuCache(au_vdir);
-+ if (au_cachep[AuCache_VDIR])
-+ au_cachep[AuCache_DEHSTR] = AuCache(au_vdir_dehstr);
-+ if (au_cachep[AuCache_DEHSTR])
-+ return 0;
-+
-+ return -ENOMEM;
-+}
-+
-+static void au_cache_fin(void)
-+{
-+ int i;
-+
-+ /*
-+ * Make sure all delayed rcu free inodes are flushed before we
-+ * destroy cache.
-+ */
-+ rcu_barrier();
-+
-+ /* excluding AuCache_HNOTIFY */
-+ BUILD_BUG_ON(AuCache_HNOTIFY + 1 != AuCache_Last);
-+ for (i = 0; i < AuCache_HNOTIFY; i++)
-+ if (au_cachep[i]) {
-+ kmem_cache_destroy(au_cachep[i]);
-+ au_cachep[i] = NULL;
-+ }
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int au_dir_roflags;
-+
-+#ifdef CONFIG_AUFS_SBILIST
-+/*
-+ * iterate_supers_type() doesn't protect us from
-+ * remounting (branch management)
-+ */
-+struct au_splhead au_sbilist;
-+#endif
-+
-+struct lock_class_key au_lc_key[AuLcKey_Last];
-+
-+/*
-+ * functions for module interface.
-+ */
-+MODULE_LICENSE("GPL");
-+/* MODULE_LICENSE("GPL v2"); */
-+MODULE_AUTHOR("Junjiro R. Okajima <aufs-users@lists.sourceforge.net>");
-+MODULE_DESCRIPTION(AUFS_NAME
-+ " -- Advanced multi layered unification filesystem");
-+MODULE_VERSION(AUFS_VERSION);
-+MODULE_ALIAS_FS(AUFS_NAME);
-+
-+/* this module parameter has no meaning when SYSFS is disabled */
-+int sysaufs_brs = 1;
-+MODULE_PARM_DESC(brs, "use <sysfs>/fs/aufs/si_*/brN");
-+module_param_named(brs, sysaufs_brs, int, S_IRUGO);
-+
-+/* this module parameter has no meaning when USER_NS is disabled */
-+static bool au_userns;
-+MODULE_PARM_DESC(allow_userns, "allow unprivileged to mount under userns");
-+module_param_named(allow_userns, au_userns, bool, S_IRUGO);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static char au_esc_chars[0x20 + 3]; /* 0x01-0x20, backslash, del, and NULL */
-+
-+int au_seq_path(struct seq_file *seq, struct path *path)
-+{
-+ int err;
-+
-+ err = seq_path(seq, path, au_esc_chars);
-+ if (err > 0)
-+ err = 0;
-+ else if (err < 0)
-+ err = -ENOMEM;
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int __init aufs_init(void)
-+{
-+ int err, i;
-+ char *p;
-+
-+ p = au_esc_chars;
-+ for (i = 1; i <= ' '; i++)
-+ *p++ = i;
-+ *p++ = '\\';
-+ *p++ = '\x7f';
-+ *p = 0;
-+
-+ au_dir_roflags = au_file_roflags(O_DIRECTORY | O_LARGEFILE);
-+
-+ au_sbilist_init();
-+ sysaufs_brs_init();
-+ au_debug_init();
-+ au_dy_init();
-+ err = sysaufs_init();
-+ if (unlikely(err))
-+ goto out;
-+ err = au_procfs_init();
-+ if (unlikely(err))
-+ goto out_sysaufs;
-+ err = au_wkq_init();
-+ if (unlikely(err))
-+ goto out_procfs;
-+ err = au_loopback_init();
-+ if (unlikely(err))
-+ goto out_wkq;
-+ err = au_hnotify_init();
-+ if (unlikely(err))
-+ goto out_loopback;
-+ err = au_sysrq_init();
-+ if (unlikely(err))
-+ goto out_hin;
-+ err = au_cache_init();
-+ if (unlikely(err))
-+ goto out_sysrq;
-+
-+ aufs_fs_type.fs_flags |= au_userns ? FS_USERNS_MOUNT : 0;
-+ err = register_filesystem(&aufs_fs_type);
-+ if (unlikely(err))
-+ goto out_cache;
-+
-+ /* since we define pr_fmt, call printk directly */
-+ printk(KERN_INFO AUFS_NAME " " AUFS_VERSION "\n");
-+ goto out; /* success */
-+
-+out_cache:
-+ au_cache_fin();
-+out_sysrq:
-+ au_sysrq_fin();
-+out_hin:
-+ au_hnotify_fin();
-+out_loopback:
-+ au_loopback_fin();
-+out_wkq:
-+ au_wkq_fin();
-+out_procfs:
-+ au_procfs_fin();
-+out_sysaufs:
-+ sysaufs_fin();
-+ au_dy_fin();
-+out:
-+ return err;
-+}
-+
-+static void __exit aufs_exit(void)
-+{
-+ unregister_filesystem(&aufs_fs_type);
-+ au_cache_fin();
-+ au_sysrq_fin();
-+ au_hnotify_fin();
-+ au_loopback_fin();
-+ au_wkq_fin();
-+ au_procfs_fin();
-+ sysaufs_fin();
-+ au_dy_fin();
-+}
-+
-+module_init(aufs_init);
-+module_exit(aufs_exit);
-diff -Nur linux-4.1.10.orig/fs/aufs/module.h linux-4.1.10/fs/aufs/module.h
---- linux-4.1.10.orig/fs/aufs/module.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/module.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,104 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * module initialization and module-global
-+ */
-+
-+#ifndef __AUFS_MODULE_H__
-+#define __AUFS_MODULE_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/slab.h>
-+
-+struct path;
-+struct seq_file;
-+
-+/* module parameters */
-+extern int sysaufs_brs;
-+
-+/* ---------------------------------------------------------------------- */
-+
-+extern int au_dir_roflags;
-+
-+enum {
-+ AuLcNonDir_FIINFO,
-+ AuLcNonDir_DIINFO,
-+ AuLcNonDir_IIINFO,
-+
-+ AuLcDir_FIINFO,
-+ AuLcDir_DIINFO,
-+ AuLcDir_IIINFO,
-+
-+ AuLcSymlink_DIINFO,
-+ AuLcSymlink_IIINFO,
-+
-+ AuLcKey_Last
-+};
-+extern struct lock_class_key au_lc_key[AuLcKey_Last];
-+
-+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp);
-+int au_seq_path(struct seq_file *seq, struct path *path);
-+
-+#ifdef CONFIG_PROC_FS
-+/* procfs.c */
-+int __init au_procfs_init(void);
-+void au_procfs_fin(void);
-+#else
-+AuStubInt0(au_procfs_init, void);
-+AuStubVoid(au_procfs_fin, void);
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* kmem cache */
-+enum {
-+ AuCache_DINFO,
-+ AuCache_ICNTNR,
-+ AuCache_FINFO,
-+ AuCache_VDIR,
-+ AuCache_DEHSTR,
-+ AuCache_HNOTIFY, /* must be last */
-+ AuCache_Last
-+};
-+
-+#define AuCacheFlags (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD)
-+#define AuCache(type) KMEM_CACHE(type, AuCacheFlags)
-+#define AuCacheCtor(type, ctor) \
-+ kmem_cache_create(#type, sizeof(struct type), \
-+ __alignof__(struct type), AuCacheFlags, ctor)
-+
-+extern struct kmem_cache *au_cachep[];
-+
-+#define AuCacheFuncs(name, index) \
-+static inline struct au_##name *au_cache_alloc_##name(void) \
-+{ return kmem_cache_alloc(au_cachep[AuCache_##index], GFP_NOFS); } \
-+static inline void au_cache_free_##name(struct au_##name *p) \
-+{ kmem_cache_free(au_cachep[AuCache_##index], p); }
-+
-+AuCacheFuncs(dinfo, DINFO);
-+AuCacheFuncs(icntnr, ICNTNR);
-+AuCacheFuncs(finfo, FINFO);
-+AuCacheFuncs(vdir, VDIR);
-+AuCacheFuncs(vdir_dehstr, DEHSTR);
-+#ifdef CONFIG_AUFS_HNOTIFY
-+AuCacheFuncs(hnotify, HNOTIFY);
-+#endif
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_MODULE_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/mvdown.c linux-4.1.10/fs/aufs/mvdown.c
---- linux-4.1.10.orig/fs/aufs/mvdown.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/mvdown.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,694 @@
-+/*
-+ * Copyright (C) 2011-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * move-down, opposite of copy-up
-+ */
-+
-+#include "aufs.h"
-+
-+struct au_mvd_args {
-+ struct {
-+ struct super_block *h_sb;
-+ struct dentry *h_parent;
-+ struct au_hinode *hdir;
-+ struct inode *h_dir, *h_inode;
-+ struct au_pin pin;
-+ } info[AUFS_MVDOWN_NARRAY];
-+
-+ struct aufs_mvdown mvdown;
-+ struct dentry *dentry, *parent;
-+ struct inode *inode, *dir;
-+ struct super_block *sb;
-+ aufs_bindex_t bopq, bwh, bfound;
-+ unsigned char rename_lock;
-+};
-+
-+#define mvd_errno mvdown.au_errno
-+#define mvd_bsrc mvdown.stbr[AUFS_MVDOWN_UPPER].bindex
-+#define mvd_src_brid mvdown.stbr[AUFS_MVDOWN_UPPER].brid
-+#define mvd_bdst mvdown.stbr[AUFS_MVDOWN_LOWER].bindex
-+#define mvd_dst_brid mvdown.stbr[AUFS_MVDOWN_LOWER].brid
-+
-+#define mvd_h_src_sb info[AUFS_MVDOWN_UPPER].h_sb
-+#define mvd_h_src_parent info[AUFS_MVDOWN_UPPER].h_parent
-+#define mvd_hdir_src info[AUFS_MVDOWN_UPPER].hdir
-+#define mvd_h_src_dir info[AUFS_MVDOWN_UPPER].h_dir
-+#define mvd_h_src_inode info[AUFS_MVDOWN_UPPER].h_inode
-+#define mvd_pin_src info[AUFS_MVDOWN_UPPER].pin
-+
-+#define mvd_h_dst_sb info[AUFS_MVDOWN_LOWER].h_sb
-+#define mvd_h_dst_parent info[AUFS_MVDOWN_LOWER].h_parent
-+#define mvd_hdir_dst info[AUFS_MVDOWN_LOWER].hdir
-+#define mvd_h_dst_dir info[AUFS_MVDOWN_LOWER].h_dir
-+#define mvd_h_dst_inode info[AUFS_MVDOWN_LOWER].h_inode
-+#define mvd_pin_dst info[AUFS_MVDOWN_LOWER].pin
-+
-+#define AU_MVD_PR(flag, ...) do { \
-+ if (flag) \
-+ pr_err(__VA_ARGS__); \
-+ } while (0)
-+
-+static int find_lower_writable(struct au_mvd_args *a)
-+{
-+ struct super_block *sb;
-+ aufs_bindex_t bindex, bend;
-+ struct au_branch *br;
-+
-+ sb = a->sb;
-+ bindex = a->mvd_bsrc;
-+ bend = au_sbend(sb);
-+ if (a->mvdown.flags & AUFS_MVDOWN_FHSM_LOWER)
-+ for (bindex++; bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ if (au_br_fhsm(br->br_perm)
-+ && (!(au_br_sb(br)->s_flags & MS_RDONLY)))
-+ return bindex;
-+ }
-+ else if (!(a->mvdown.flags & AUFS_MVDOWN_ROLOWER))
-+ for (bindex++; bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ if (!au_br_rdonly(br))
-+ return bindex;
-+ }
-+ else
-+ for (bindex++; bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ if (!(au_br_sb(br)->s_flags & MS_RDONLY)) {
-+ if (au_br_rdonly(br))
-+ a->mvdown.flags
-+ |= AUFS_MVDOWN_ROLOWER_R;
-+ return bindex;
-+ }
-+ }
-+
-+ return -1;
-+}
-+
-+/* make the parent dir on bdst */
-+static int au_do_mkdir(const unsigned char dmsg, struct au_mvd_args *a)
-+{
-+ int err;
-+
-+ err = 0;
-+ a->mvd_hdir_src = au_hi(a->dir, a->mvd_bsrc);
-+ a->mvd_hdir_dst = au_hi(a->dir, a->mvd_bdst);
-+ a->mvd_h_src_parent = au_h_dptr(a->parent, a->mvd_bsrc);
-+ a->mvd_h_dst_parent = NULL;
-+ if (au_dbend(a->parent) >= a->mvd_bdst)
-+ a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst);
-+ if (!a->mvd_h_dst_parent) {
-+ err = au_cpdown_dirs(a->dentry, a->mvd_bdst);
-+ if (unlikely(err)) {
-+ AU_MVD_PR(dmsg, "cpdown_dirs failed\n");
-+ goto out;
-+ }
-+ a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst);
-+ }
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* lock them all */
-+static int au_do_lock(const unsigned char dmsg, struct au_mvd_args *a)
-+{
-+ int err;
-+ struct dentry *h_trap;
-+
-+ a->mvd_h_src_sb = au_sbr_sb(a->sb, a->mvd_bsrc);
-+ a->mvd_h_dst_sb = au_sbr_sb(a->sb, a->mvd_bdst);
-+ err = au_pin(&a->mvd_pin_dst, a->dentry, a->mvd_bdst,
-+ au_opt_udba(a->sb),
-+ AuPin_MNT_WRITE | AuPin_DI_LOCKED);
-+ AuTraceErr(err);
-+ if (unlikely(err)) {
-+ AU_MVD_PR(dmsg, "pin_dst failed\n");
-+ goto out;
-+ }
-+
-+ if (a->mvd_h_src_sb != a->mvd_h_dst_sb) {
-+ a->rename_lock = 0;
-+ au_pin_init(&a->mvd_pin_src, a->dentry, a->mvd_bsrc,
-+ AuLsc_DI_PARENT, AuLsc_I_PARENT3,
-+ au_opt_udba(a->sb),
-+ AuPin_MNT_WRITE | AuPin_DI_LOCKED);
-+ err = au_do_pin(&a->mvd_pin_src);
-+ AuTraceErr(err);
-+ a->mvd_h_src_dir = d_inode(a->mvd_h_src_parent);
-+ if (unlikely(err)) {
-+ AU_MVD_PR(dmsg, "pin_src failed\n");
-+ goto out_dst;
-+ }
-+ goto out; /* success */
-+ }
-+
-+ a->rename_lock = 1;
-+ au_pin_hdir_unlock(&a->mvd_pin_dst);
-+ err = au_pin(&a->mvd_pin_src, a->dentry, a->mvd_bsrc,
-+ au_opt_udba(a->sb),
-+ AuPin_MNT_WRITE | AuPin_DI_LOCKED);
-+ AuTraceErr(err);
-+ a->mvd_h_src_dir = d_inode(a->mvd_h_src_parent);
-+ if (unlikely(err)) {
-+ AU_MVD_PR(dmsg, "pin_src failed\n");
-+ au_pin_hdir_lock(&a->mvd_pin_dst);
-+ goto out_dst;
-+ }
-+ au_pin_hdir_unlock(&a->mvd_pin_src);
-+ h_trap = vfsub_lock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
-+ a->mvd_h_dst_parent, a->mvd_hdir_dst);
-+ if (h_trap) {
-+ err = (h_trap != a->mvd_h_src_parent);
-+ if (err)
-+ err = (h_trap != a->mvd_h_dst_parent);
-+ }
-+ BUG_ON(err); /* it should never happen */
-+ if (unlikely(a->mvd_h_src_dir != au_pinned_h_dir(&a->mvd_pin_src))) {
-+ err = -EBUSY;
-+ AuTraceErr(err);
-+ vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
-+ a->mvd_h_dst_parent, a->mvd_hdir_dst);
-+ au_pin_hdir_lock(&a->mvd_pin_src);
-+ au_unpin(&a->mvd_pin_src);
-+ au_pin_hdir_lock(&a->mvd_pin_dst);
-+ goto out_dst;
-+ }
-+ goto out; /* success */
-+
-+out_dst:
-+ au_unpin(&a->mvd_pin_dst);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+static void au_do_unlock(const unsigned char dmsg, struct au_mvd_args *a)
-+{
-+ if (!a->rename_lock)
-+ au_unpin(&a->mvd_pin_src);
-+ else {
-+ vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
-+ a->mvd_h_dst_parent, a->mvd_hdir_dst);
-+ au_pin_hdir_lock(&a->mvd_pin_src);
-+ au_unpin(&a->mvd_pin_src);
-+ au_pin_hdir_lock(&a->mvd_pin_dst);
-+ }
-+ au_unpin(&a->mvd_pin_dst);
-+}
-+
-+/* copy-down the file */
-+static int au_do_cpdown(const unsigned char dmsg, struct au_mvd_args *a)
-+{
-+ int err;
-+ struct au_cp_generic cpg = {
-+ .dentry = a->dentry,
-+ .bdst = a->mvd_bdst,
-+ .bsrc = a->mvd_bsrc,
-+ .len = -1,
-+ .pin = &a->mvd_pin_dst,
-+ .flags = AuCpup_DTIME | AuCpup_HOPEN
-+ };
-+
-+ AuDbg("b%d, b%d\n", cpg.bsrc, cpg.bdst);
-+ if (a->mvdown.flags & AUFS_MVDOWN_OWLOWER)
-+ au_fset_cpup(cpg.flags, OVERWRITE);
-+ if (a->mvdown.flags & AUFS_MVDOWN_ROLOWER)
-+ au_fset_cpup(cpg.flags, RWDST);
-+ err = au_sio_cpdown_simple(&cpg);
-+ if (unlikely(err))
-+ AU_MVD_PR(dmsg, "cpdown failed\n");
-+
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/*
-+ * unlink the whiteout on bdst if exist which may be created by UDBA while we
-+ * were sleeping
-+ */
-+static int au_do_unlink_wh(const unsigned char dmsg, struct au_mvd_args *a)
-+{
-+ int err;
-+ struct path h_path;
-+ struct au_branch *br;
-+ struct inode *delegated;
-+
-+ br = au_sbr(a->sb, a->mvd_bdst);
-+ h_path.dentry = au_wh_lkup(a->mvd_h_dst_parent, &a->dentry->d_name, br);
-+ err = PTR_ERR(h_path.dentry);
-+ if (IS_ERR(h_path.dentry)) {
-+ AU_MVD_PR(dmsg, "wh_lkup failed\n");
-+ goto out;
-+ }
-+
-+ err = 0;
-+ if (d_is_positive(h_path.dentry)) {
-+ h_path.mnt = au_br_mnt(br);
-+ delegated = NULL;
-+ err = vfsub_unlink(d_inode(a->mvd_h_dst_parent), &h_path,
-+ &delegated, /*force*/0);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal unlink\n");
-+ iput(delegated);
-+ }
-+ if (unlikely(err))
-+ AU_MVD_PR(dmsg, "wh_unlink failed\n");
-+ }
-+ dput(h_path.dentry);
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/*
-+ * unlink the topmost h_dentry
-+ */
-+static int au_do_unlink(const unsigned char dmsg, struct au_mvd_args *a)
-+{
-+ int err;
-+ struct path h_path;
-+ struct inode *delegated;
-+
-+ h_path.mnt = au_sbr_mnt(a->sb, a->mvd_bsrc);
-+ h_path.dentry = au_h_dptr(a->dentry, a->mvd_bsrc);
-+ delegated = NULL;
-+ err = vfsub_unlink(a->mvd_h_src_dir, &h_path, &delegated, /*force*/0);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal unlink\n");
-+ iput(delegated);
-+ }
-+ if (unlikely(err))
-+ AU_MVD_PR(dmsg, "unlink failed\n");
-+
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* Since mvdown succeeded, we ignore an error of this function */
-+static void au_do_stfs(const unsigned char dmsg, struct au_mvd_args *a)
-+{
-+ int err;
-+ struct au_branch *br;
-+
-+ a->mvdown.flags |= AUFS_MVDOWN_STFS_FAILED;
-+ br = au_sbr(a->sb, a->mvd_bsrc);
-+ err = au_br_stfs(br, &a->mvdown.stbr[AUFS_MVDOWN_UPPER].stfs);
-+ if (!err) {
-+ br = au_sbr(a->sb, a->mvd_bdst);
-+ a->mvdown.stbr[AUFS_MVDOWN_LOWER].brid = br->br_id;
-+ err = au_br_stfs(br, &a->mvdown.stbr[AUFS_MVDOWN_LOWER].stfs);
-+ }
-+ if (!err)
-+ a->mvdown.flags &= ~AUFS_MVDOWN_STFS_FAILED;
-+ else
-+ AU_MVD_PR(dmsg, "statfs failed (%d), ignored\n", err);
-+}
-+
-+/*
-+ * copy-down the file and unlink the bsrc file.
-+ * - unlink the bdst whout if exist
-+ * - copy-down the file (with whtmp name and rename)
-+ * - unlink the bsrc file
-+ */
-+static int au_do_mvdown(const unsigned char dmsg, struct au_mvd_args *a)
-+{
-+ int err;
-+
-+ err = au_do_mkdir(dmsg, a);
-+ if (!err)
-+ err = au_do_lock(dmsg, a);
-+ if (unlikely(err))
-+ goto out;
-+
-+ /*
-+ * do not revert the activities we made on bdst since they should be
-+ * harmless in aufs.
-+ */
-+
-+ err = au_do_cpdown(dmsg, a);
-+ if (!err)
-+ err = au_do_unlink_wh(dmsg, a);
-+ if (!err && !(a->mvdown.flags & AUFS_MVDOWN_KUPPER))
-+ err = au_do_unlink(dmsg, a);
-+ if (unlikely(err))
-+ goto out_unlock;
-+
-+ AuDbg("%pd2, 0x%x, %d --> %d\n",
-+ a->dentry, a->mvdown.flags, a->mvd_bsrc, a->mvd_bdst);
-+ if (find_lower_writable(a) < 0)
-+ a->mvdown.flags |= AUFS_MVDOWN_BOTTOM;
-+
-+ if (a->mvdown.flags & AUFS_MVDOWN_STFS)
-+ au_do_stfs(dmsg, a);
-+
-+ /* maintain internal array */
-+ if (!(a->mvdown.flags & AUFS_MVDOWN_KUPPER)) {
-+ au_set_h_dptr(a->dentry, a->mvd_bsrc, NULL);
-+ au_set_dbstart(a->dentry, a->mvd_bdst);
-+ au_set_h_iptr(a->inode, a->mvd_bsrc, NULL, /*flags*/0);
-+ au_set_ibstart(a->inode, a->mvd_bdst);
-+ }
-+ if (au_dbend(a->dentry) < a->mvd_bdst)
-+ au_set_dbend(a->dentry, a->mvd_bdst);
-+ if (au_ibend(a->inode) < a->mvd_bdst)
-+ au_set_ibend(a->inode, a->mvd_bdst);
-+
-+out_unlock:
-+ au_do_unlock(dmsg, a);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* make sure the file is idle */
-+static int au_mvd_args_busy(const unsigned char dmsg, struct au_mvd_args *a)
-+{
-+ int err, plinked;
-+
-+ err = 0;
-+ plinked = !!au_opt_test(au_mntflags(a->sb), PLINK);
-+ if (au_dbstart(a->dentry) == a->mvd_bsrc
-+ && au_dcount(a->dentry) == 1
-+ && atomic_read(&a->inode->i_count) == 1
-+ /* && a->mvd_h_src_inode->i_nlink == 1 */
-+ && (!plinked || !au_plink_test(a->inode))
-+ && a->inode->i_nlink == 1)
-+ goto out;
-+
-+ err = -EBUSY;
-+ AU_MVD_PR(dmsg,
-+ "b%d, d{b%d, c%d?}, i{c%d?, l%u}, hi{l%u}, p{%d, %d}\n",
-+ a->mvd_bsrc, au_dbstart(a->dentry), au_dcount(a->dentry),
-+ atomic_read(&a->inode->i_count), a->inode->i_nlink,
-+ a->mvd_h_src_inode->i_nlink,
-+ plinked, plinked ? au_plink_test(a->inode) : 0);
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* make sure the parent dir is fine */
-+static int au_mvd_args_parent(const unsigned char dmsg,
-+ struct au_mvd_args *a)
-+{
-+ int err;
-+ aufs_bindex_t bindex;
-+
-+ err = 0;
-+ if (unlikely(au_alive_dir(a->parent))) {
-+ err = -ENOENT;
-+ AU_MVD_PR(dmsg, "parent dir is dead\n");
-+ goto out;
-+ }
-+
-+ a->bopq = au_dbdiropq(a->parent);
-+ bindex = au_wbr_nonopq(a->dentry, a->mvd_bdst);
-+ AuDbg("b%d\n", bindex);
-+ if (unlikely((bindex >= 0 && bindex < a->mvd_bdst)
-+ || (a->bopq != -1 && a->bopq < a->mvd_bdst))) {
-+ err = -EINVAL;
-+ a->mvd_errno = EAU_MVDOWN_OPAQUE;
-+ AU_MVD_PR(dmsg, "ancestor is opaque b%d, b%d\n",
-+ a->bopq, a->mvd_bdst);
-+ }
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+static int au_mvd_args_intermediate(const unsigned char dmsg,
-+ struct au_mvd_args *a)
-+{
-+ int err;
-+ struct au_dinfo *dinfo, *tmp;
-+
-+ /* lookup the next lower positive entry */
-+ err = -ENOMEM;
-+ tmp = au_di_alloc(a->sb, AuLsc_DI_TMP);
-+ if (unlikely(!tmp))
-+ goto out;
-+
-+ a->bfound = -1;
-+ a->bwh = -1;
-+ dinfo = au_di(a->dentry);
-+ au_di_cp(tmp, dinfo);
-+ au_di_swap(tmp, dinfo);
-+
-+ /* returns the number of positive dentries */
-+ err = au_lkup_dentry(a->dentry, a->mvd_bsrc + 1, /*type*/0);
-+ if (!err)
-+ a->bwh = au_dbwh(a->dentry);
-+ else if (err > 0)
-+ a->bfound = au_dbstart(a->dentry);
-+
-+ au_di_swap(tmp, dinfo);
-+ au_rw_write_unlock(&tmp->di_rwsem);
-+ au_di_free(tmp);
-+ if (unlikely(err < 0))
-+ AU_MVD_PR(dmsg, "failed look-up lower\n");
-+
-+ /*
-+ * here, we have these cases.
-+ * bfound == -1
-+ * no positive dentry under bsrc. there are more sub-cases.
-+ * bwh < 0
-+ * there no whiteout, we can safely move-down.
-+ * bwh <= bsrc
-+ * impossible
-+ * bsrc < bwh && bwh < bdst
-+ * there is a whiteout on RO branch. cannot proceed.
-+ * bwh == bdst
-+ * there is a whiteout on the RW target branch. it should
-+ * be removed.
-+ * bdst < bwh
-+ * there is a whiteout somewhere unrelated branch.
-+ * -1 < bfound && bfound <= bsrc
-+ * impossible.
-+ * bfound < bdst
-+ * found, but it is on RO branch between bsrc and bdst. cannot
-+ * proceed.
-+ * bfound == bdst
-+ * found, replace it if AUFS_MVDOWN_FORCE is set. otherwise return
-+ * error.
-+ * bdst < bfound
-+ * found, after we create the file on bdst, it will be hidden.
-+ */
-+
-+ AuDebugOn(a->bfound == -1
-+ && a->bwh != -1
-+ && a->bwh <= a->mvd_bsrc);
-+ AuDebugOn(-1 < a->bfound
-+ && a->bfound <= a->mvd_bsrc);
-+
-+ err = -EINVAL;
-+ if (a->bfound == -1
-+ && a->mvd_bsrc < a->bwh
-+ && a->bwh != -1
-+ && a->bwh < a->mvd_bdst) {
-+ a->mvd_errno = EAU_MVDOWN_WHITEOUT;
-+ AU_MVD_PR(dmsg, "bsrc %d, bdst %d, bfound %d, bwh %d\n",
-+ a->mvd_bsrc, a->mvd_bdst, a->bfound, a->bwh);
-+ goto out;
-+ } else if (a->bfound != -1 && a->bfound < a->mvd_bdst) {
-+ a->mvd_errno = EAU_MVDOWN_UPPER;
-+ AU_MVD_PR(dmsg, "bdst %d, bfound %d\n",
-+ a->mvd_bdst, a->bfound);
-+ goto out;
-+ }
-+
-+ err = 0; /* success */
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+static int au_mvd_args_exist(const unsigned char dmsg, struct au_mvd_args *a)
-+{
-+ int err;
-+
-+ err = 0;
-+ if (!(a->mvdown.flags & AUFS_MVDOWN_OWLOWER)
-+ && a->bfound == a->mvd_bdst)
-+ err = -EEXIST;
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+static int au_mvd_args(const unsigned char dmsg, struct au_mvd_args *a)
-+{
-+ int err;
-+ struct au_branch *br;
-+
-+ err = -EISDIR;
-+ if (unlikely(S_ISDIR(a->inode->i_mode)))
-+ goto out;
-+
-+ err = -EINVAL;
-+ if (!(a->mvdown.flags & AUFS_MVDOWN_BRID_UPPER))
-+ a->mvd_bsrc = au_ibstart(a->inode);
-+ else {
-+ a->mvd_bsrc = au_br_index(a->sb, a->mvd_src_brid);
-+ if (unlikely(a->mvd_bsrc < 0
-+ || (a->mvd_bsrc < au_dbstart(a->dentry)
-+ || au_dbend(a->dentry) < a->mvd_bsrc
-+ || !au_h_dptr(a->dentry, a->mvd_bsrc))
-+ || (a->mvd_bsrc < au_ibstart(a->inode)
-+ || au_ibend(a->inode) < a->mvd_bsrc
-+ || !au_h_iptr(a->inode, a->mvd_bsrc)))) {
-+ a->mvd_errno = EAU_MVDOWN_NOUPPER;
-+ AU_MVD_PR(dmsg, "no upper\n");
-+ goto out;
-+ }
-+ }
-+ if (unlikely(a->mvd_bsrc == au_sbend(a->sb))) {
-+ a->mvd_errno = EAU_MVDOWN_BOTTOM;
-+ AU_MVD_PR(dmsg, "on the bottom\n");
-+ goto out;
-+ }
-+ a->mvd_h_src_inode = au_h_iptr(a->inode, a->mvd_bsrc);
-+ br = au_sbr(a->sb, a->mvd_bsrc);
-+ err = au_br_rdonly(br);
-+ if (!(a->mvdown.flags & AUFS_MVDOWN_ROUPPER)) {
-+ if (unlikely(err))
-+ goto out;
-+ } else if (!(vfsub_native_ro(a->mvd_h_src_inode)
-+ || IS_APPEND(a->mvd_h_src_inode))) {
-+ if (err)
-+ a->mvdown.flags |= AUFS_MVDOWN_ROUPPER_R;
-+ /* go on */
-+ } else
-+ goto out;
-+
-+ err = -EINVAL;
-+ if (!(a->mvdown.flags & AUFS_MVDOWN_BRID_LOWER)) {
-+ a->mvd_bdst = find_lower_writable(a);
-+ if (unlikely(a->mvd_bdst < 0)) {
-+ a->mvd_errno = EAU_MVDOWN_BOTTOM;
-+ AU_MVD_PR(dmsg, "no writable lower branch\n");
-+ goto out;
-+ }
-+ } else {
-+ a->mvd_bdst = au_br_index(a->sb, a->mvd_dst_brid);
-+ if (unlikely(a->mvd_bdst < 0
-+ || au_sbend(a->sb) < a->mvd_bdst)) {
-+ a->mvd_errno = EAU_MVDOWN_NOLOWERBR;
-+ AU_MVD_PR(dmsg, "no lower brid\n");
-+ goto out;
-+ }
-+ }
-+
-+ err = au_mvd_args_busy(dmsg, a);
-+ if (!err)
-+ err = au_mvd_args_parent(dmsg, a);
-+ if (!err)
-+ err = au_mvd_args_intermediate(dmsg, a);
-+ if (!err)
-+ err = au_mvd_args_exist(dmsg, a);
-+ if (!err)
-+ AuDbg("b%d, b%d\n", a->mvd_bsrc, a->mvd_bdst);
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *uarg)
-+{
-+ int err, e;
-+ unsigned char dmsg;
-+ struct au_mvd_args *args;
-+
-+ err = -EPERM;
-+ if (unlikely(!capable(CAP_SYS_ADMIN)))
-+ goto out;
-+
-+ err = -ENOMEM;
-+ args = kmalloc(sizeof(*args), GFP_NOFS);
-+ if (unlikely(!args))
-+ goto out;
-+
-+ err = copy_from_user(&args->mvdown, uarg, sizeof(args->mvdown));
-+ if (!err)
-+ err = !access_ok(VERIFY_WRITE, uarg, sizeof(*uarg));
-+ if (unlikely(err)) {
-+ err = -EFAULT;
-+ AuTraceErr(err);
-+ goto out_free;
-+ }
-+ AuDbg("flags 0x%x\n", args->mvdown.flags);
-+ args->mvdown.flags &= ~(AUFS_MVDOWN_ROLOWER_R | AUFS_MVDOWN_ROUPPER_R);
-+ args->mvdown.au_errno = 0;
-+ args->dentry = dentry;
-+ args->inode = d_inode(dentry);
-+ args->sb = dentry->d_sb;
-+
-+ err = -ENOENT;
-+ dmsg = !!(args->mvdown.flags & AUFS_MVDOWN_DMSG);
-+ args->parent = dget_parent(dentry);
-+ args->dir = d_inode(args->parent);
-+ mutex_lock_nested(&args->dir->i_mutex, I_MUTEX_PARENT);
-+ dput(args->parent);
-+ if (unlikely(args->parent != dentry->d_parent)) {
-+ AU_MVD_PR(dmsg, "parent dir is moved\n");
-+ goto out_dir;
-+ }
-+
-+ mutex_lock_nested(&args->inode->i_mutex, I_MUTEX_CHILD);
-+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH);
-+ if (unlikely(err))
-+ goto out_inode;
-+
-+ di_write_lock_parent(args->parent);
-+ err = au_mvd_args(dmsg, args);
-+ if (unlikely(err))
-+ goto out_parent;
-+
-+ err = au_do_mvdown(dmsg, args);
-+ if (unlikely(err))
-+ goto out_parent;
-+
-+ au_cpup_attr_timesizes(args->dir);
-+ au_cpup_attr_timesizes(args->inode);
-+ au_cpup_igen(args->inode, au_h_iptr(args->inode, args->mvd_bdst));
-+ /* au_digen_dec(dentry); */
-+
-+out_parent:
-+ di_write_unlock(args->parent);
-+ aufs_read_unlock(dentry, AuLock_DW);
-+out_inode:
-+ mutex_unlock(&args->inode->i_mutex);
-+out_dir:
-+ mutex_unlock(&args->dir->i_mutex);
-+out_free:
-+ e = copy_to_user(uarg, &args->mvdown, sizeof(args->mvdown));
-+ if (unlikely(e))
-+ err = -EFAULT;
-+ kfree(args);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/opts.c linux-4.1.10/fs/aufs/opts.c
---- linux-4.1.10.orig/fs/aufs/opts.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/opts.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,1835 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * mount options/flags
-+ */
-+
-+#include <linux/namei.h>
-+#include <linux/types.h> /* a distribution requires */
-+#include <linux/parser.h>
-+#include "aufs.h"
-+
-+/* ---------------------------------------------------------------------- */
-+
-+enum {
-+ Opt_br,
-+ Opt_add, Opt_del, Opt_mod, Opt_append, Opt_prepend,
-+ Opt_idel, Opt_imod,
-+ Opt_dirwh, Opt_rdcache, Opt_rdblk, Opt_rdhash,
-+ Opt_rdblk_def, Opt_rdhash_def,
-+ Opt_xino, Opt_noxino,
-+ Opt_trunc_xino, Opt_trunc_xino_v, Opt_notrunc_xino,
-+ Opt_trunc_xino_path, Opt_itrunc_xino,
-+ Opt_trunc_xib, Opt_notrunc_xib,
-+ Opt_shwh, Opt_noshwh,
-+ Opt_plink, Opt_noplink, Opt_list_plink,
-+ Opt_udba,
-+ Opt_dio, Opt_nodio,
-+ Opt_diropq_a, Opt_diropq_w,
-+ Opt_warn_perm, Opt_nowarn_perm,
-+ Opt_wbr_copyup, Opt_wbr_create,
-+ Opt_fhsm_sec,
-+ Opt_verbose, Opt_noverbose,
-+ Opt_sum, Opt_nosum, Opt_wsum,
-+ Opt_dirperm1, Opt_nodirperm1,
-+ Opt_acl, Opt_noacl,
-+ Opt_tail, Opt_ignore, Opt_ignore_silent, Opt_err
-+};
-+
-+static match_table_t options = {
-+ {Opt_br, "br=%s"},
-+ {Opt_br, "br:%s"},
-+
-+ {Opt_add, "add=%d:%s"},
-+ {Opt_add, "add:%d:%s"},
-+ {Opt_add, "ins=%d:%s"},
-+ {Opt_add, "ins:%d:%s"},
-+ {Opt_append, "append=%s"},
-+ {Opt_append, "append:%s"},
-+ {Opt_prepend, "prepend=%s"},
-+ {Opt_prepend, "prepend:%s"},
-+
-+ {Opt_del, "del=%s"},
-+ {Opt_del, "del:%s"},
-+ /* {Opt_idel, "idel:%d"}, */
-+ {Opt_mod, "mod=%s"},
-+ {Opt_mod, "mod:%s"},
-+ /* {Opt_imod, "imod:%d:%s"}, */
-+
-+ {Opt_dirwh, "dirwh=%d"},
-+
-+ {Opt_xino, "xino=%s"},
-+ {Opt_noxino, "noxino"},
-+ {Opt_trunc_xino, "trunc_xino"},
-+ {Opt_trunc_xino_v, "trunc_xino_v=%d:%d"},
-+ {Opt_notrunc_xino, "notrunc_xino"},
-+ {Opt_trunc_xino_path, "trunc_xino=%s"},
-+ {Opt_itrunc_xino, "itrunc_xino=%d"},
-+ /* {Opt_zxino, "zxino=%s"}, */
-+ {Opt_trunc_xib, "trunc_xib"},
-+ {Opt_notrunc_xib, "notrunc_xib"},
-+
-+#ifdef CONFIG_PROC_FS
-+ {Opt_plink, "plink"},
-+#else
-+ {Opt_ignore_silent, "plink"},
-+#endif
-+
-+ {Opt_noplink, "noplink"},
-+
-+#ifdef CONFIG_AUFS_DEBUG
-+ {Opt_list_plink, "list_plink"},
-+#endif
-+
-+ {Opt_udba, "udba=%s"},
-+
-+ {Opt_dio, "dio"},
-+ {Opt_nodio, "nodio"},
-+
-+#ifdef CONFIG_AUFS_FHSM
-+ {Opt_fhsm_sec, "fhsm_sec=%d"},
-+#else
-+ {Opt_ignore_silent, "fhsm_sec=%d"},
-+#endif
-+
-+ {Opt_diropq_a, "diropq=always"},
-+ {Opt_diropq_a, "diropq=a"},
-+ {Opt_diropq_w, "diropq=whiteouted"},
-+ {Opt_diropq_w, "diropq=w"},
-+
-+ {Opt_warn_perm, "warn_perm"},
-+ {Opt_nowarn_perm, "nowarn_perm"},
-+
-+ /* keep them temporary */
-+ {Opt_ignore_silent, "nodlgt"},
-+ {Opt_ignore_silent, "clean_plink"},
-+
-+#ifdef CONFIG_AUFS_SHWH
-+ {Opt_shwh, "shwh"},
-+#endif
-+ {Opt_noshwh, "noshwh"},
-+
-+ {Opt_dirperm1, "dirperm1"},
-+ {Opt_nodirperm1, "nodirperm1"},
-+
-+ {Opt_verbose, "verbose"},
-+ {Opt_verbose, "v"},
-+ {Opt_noverbose, "noverbose"},
-+ {Opt_noverbose, "quiet"},
-+ {Opt_noverbose, "q"},
-+ {Opt_noverbose, "silent"},
-+
-+ {Opt_sum, "sum"},
-+ {Opt_nosum, "nosum"},
-+ {Opt_wsum, "wsum"},
-+
-+ {Opt_rdcache, "rdcache=%d"},
-+ {Opt_rdblk, "rdblk=%d"},
-+ {Opt_rdblk_def, "rdblk=def"},
-+ {Opt_rdhash, "rdhash=%d"},
-+ {Opt_rdhash_def, "rdhash=def"},
-+
-+ {Opt_wbr_create, "create=%s"},
-+ {Opt_wbr_create, "create_policy=%s"},
-+ {Opt_wbr_copyup, "cpup=%s"},
-+ {Opt_wbr_copyup, "copyup=%s"},
-+ {Opt_wbr_copyup, "copyup_policy=%s"},
-+
-+ /* generic VFS flag */
-+#ifdef CONFIG_FS_POSIX_ACL
-+ {Opt_acl, "acl"},
-+ {Opt_noacl, "noacl"},
-+#else
-+ {Opt_ignore_silent, "acl"},
-+ {Opt_ignore_silent, "noacl"},
-+#endif
-+
-+ /* internal use for the scripts */
-+ {Opt_ignore_silent, "si=%s"},
-+
-+ {Opt_br, "dirs=%s"},
-+ {Opt_ignore, "debug=%d"},
-+ {Opt_ignore, "delete=whiteout"},
-+ {Opt_ignore, "delete=all"},
-+ {Opt_ignore, "imap=%s"},
-+
-+ /* temporary workaround, due to old mount(8)? */
-+ {Opt_ignore_silent, "relatime"},
-+
-+ {Opt_err, NULL}
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static const char *au_parser_pattern(int val, match_table_t tbl)
-+{
-+ struct match_token *p;
-+
-+ p = tbl;
-+ while (p->pattern) {
-+ if (p->token == val)
-+ return p->pattern;
-+ p++;
-+ }
-+ BUG();
-+ return "??";
-+}
-+
-+static const char *au_optstr(int *val, match_table_t tbl)
-+{
-+ struct match_token *p;
-+ int v;
-+
-+ v = *val;
-+ if (!v)
-+ goto out;
-+ p = tbl;
-+ while (p->pattern) {
-+ if (p->token
-+ && (v & p->token) == p->token) {
-+ *val &= ~p->token;
-+ return p->pattern;
-+ }
-+ p++;
-+ }
-+
-+out:
-+ return NULL;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static match_table_t brperm = {
-+ {AuBrPerm_RO, AUFS_BRPERM_RO},
-+ {AuBrPerm_RR, AUFS_BRPERM_RR},
-+ {AuBrPerm_RW, AUFS_BRPERM_RW},
-+ {0, NULL}
-+};
-+
-+static match_table_t brattr = {
-+ /* general */
-+ {AuBrAttr_COO_REG, AUFS_BRATTR_COO_REG},
-+ {AuBrAttr_COO_ALL, AUFS_BRATTR_COO_ALL},
-+ /* 'unpin' attrib is meaningless since linux-3.18-rc1 */
-+ {AuBrAttr_UNPIN, AUFS_BRATTR_UNPIN},
-+#ifdef CONFIG_AUFS_FHSM
-+ {AuBrAttr_FHSM, AUFS_BRATTR_FHSM},
-+#endif
-+#ifdef CONFIG_AUFS_XATTR
-+ {AuBrAttr_ICEX, AUFS_BRATTR_ICEX},
-+ {AuBrAttr_ICEX_SEC, AUFS_BRATTR_ICEX_SEC},
-+ {AuBrAttr_ICEX_SYS, AUFS_BRATTR_ICEX_SYS},
-+ {AuBrAttr_ICEX_TR, AUFS_BRATTR_ICEX_TR},
-+ {AuBrAttr_ICEX_USR, AUFS_BRATTR_ICEX_USR},
-+ {AuBrAttr_ICEX_OTH, AUFS_BRATTR_ICEX_OTH},
-+#endif
-+
-+ /* ro/rr branch */
-+ {AuBrRAttr_WH, AUFS_BRRATTR_WH},
-+
-+ /* rw branch */
-+ {AuBrWAttr_MOO, AUFS_BRWATTR_MOO},
-+ {AuBrWAttr_NoLinkWH, AUFS_BRWATTR_NLWH},
-+
-+ {0, NULL}
-+};
-+
-+static int br_attr_val(char *str, match_table_t table, substring_t args[])
-+{
-+ int attr, v;
-+ char *p;
-+
-+ attr = 0;
-+ do {
-+ p = strchr(str, '+');
-+ if (p)
-+ *p = 0;
-+ v = match_token(str, table, args);
-+ if (v) {
-+ if (v & AuBrAttr_CMOO_Mask)
-+ attr &= ~AuBrAttr_CMOO_Mask;
-+ attr |= v;
-+ } else {
-+ if (p)
-+ *p = '+';
-+ pr_warn("ignored branch attribute %s\n", str);
-+ break;
-+ }
-+ if (p)
-+ str = p + 1;
-+ } while (p);
-+
-+ return attr;
-+}
-+
-+static int au_do_optstr_br_attr(au_br_perm_str_t *str, int perm)
-+{
-+ int sz;
-+ const char *p;
-+ char *q;
-+
-+ q = str->a;
-+ *q = 0;
-+ p = au_optstr(&perm, brattr);
-+ if (p) {
-+ sz = strlen(p);
-+ memcpy(q, p, sz + 1);
-+ q += sz;
-+ } else
-+ goto out;
-+
-+ do {
-+ p = au_optstr(&perm, brattr);
-+ if (p) {
-+ *q++ = '+';
-+ sz = strlen(p);
-+ memcpy(q, p, sz + 1);
-+ q += sz;
-+ }
-+ } while (p);
-+
-+out:
-+ return q - str->a;
-+}
-+
-+static int noinline_for_stack br_perm_val(char *perm)
-+{
-+ int val, bad, sz;
-+ char *p;
-+ substring_t args[MAX_OPT_ARGS];
-+ au_br_perm_str_t attr;
-+
-+ p = strchr(perm, '+');
-+ if (p)
-+ *p = 0;
-+ val = match_token(perm, brperm, args);
-+ if (!val) {
-+ if (p)
-+ *p = '+';
-+ pr_warn("ignored branch permission %s\n", perm);
-+ val = AuBrPerm_RO;
-+ goto out;
-+ }
-+ if (!p)
-+ goto out;
-+
-+ val |= br_attr_val(p + 1, brattr, args);
-+
-+ bad = 0;
-+ switch (val & AuBrPerm_Mask) {
-+ case AuBrPerm_RO:
-+ case AuBrPerm_RR:
-+ bad = val & AuBrWAttr_Mask;
-+ val &= ~AuBrWAttr_Mask;
-+ break;
-+ case AuBrPerm_RW:
-+ bad = val & AuBrRAttr_Mask;
-+ val &= ~AuBrRAttr_Mask;
-+ break;
-+ }
-+
-+ /*
-+ * 'unpin' attrib becomes meaningless since linux-3.18-rc1, but aufs
-+ * does not treat it as an error, just warning.
-+ * this is a tiny guard for the user operation.
-+ */
-+ if (val & AuBrAttr_UNPIN) {
-+ bad |= AuBrAttr_UNPIN;
-+ val &= ~AuBrAttr_UNPIN;
-+ }
-+
-+ if (unlikely(bad)) {
-+ sz = au_do_optstr_br_attr(&attr, bad);
-+ AuDebugOn(!sz);
-+ pr_warn("ignored branch attribute %s\n", attr.a);
-+ }
-+
-+out:
-+ return val;
-+}
-+
-+void au_optstr_br_perm(au_br_perm_str_t *str, int perm)
-+{
-+ au_br_perm_str_t attr;
-+ const char *p;
-+ char *q;
-+ int sz;
-+
-+ q = str->a;
-+ p = au_optstr(&perm, brperm);
-+ AuDebugOn(!p || !*p);
-+ sz = strlen(p);
-+ memcpy(q, p, sz + 1);
-+ q += sz;
-+
-+ sz = au_do_optstr_br_attr(&attr, perm);
-+ if (sz) {
-+ *q++ = '+';
-+ memcpy(q, attr.a, sz + 1);
-+ }
-+
-+ AuDebugOn(strlen(str->a) >= sizeof(str->a));
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static match_table_t udbalevel = {
-+ {AuOpt_UDBA_REVAL, "reval"},
-+ {AuOpt_UDBA_NONE, "none"},
-+#ifdef CONFIG_AUFS_HNOTIFY
-+ {AuOpt_UDBA_HNOTIFY, "notify"}, /* abstraction */
-+#ifdef CONFIG_AUFS_HFSNOTIFY
-+ {AuOpt_UDBA_HNOTIFY, "fsnotify"},
-+#endif
-+#endif
-+ {-1, NULL}
-+};
-+
-+static int noinline_for_stack udba_val(char *str)
-+{
-+ substring_t args[MAX_OPT_ARGS];
-+
-+ return match_token(str, udbalevel, args);
-+}
-+
-+const char *au_optstr_udba(int udba)
-+{
-+ return au_parser_pattern(udba, udbalevel);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static match_table_t au_wbr_create_policy = {
-+ {AuWbrCreate_TDP, "tdp"},
-+ {AuWbrCreate_TDP, "top-down-parent"},
-+ {AuWbrCreate_RR, "rr"},
-+ {AuWbrCreate_RR, "round-robin"},
-+ {AuWbrCreate_MFS, "mfs"},
-+ {AuWbrCreate_MFS, "most-free-space"},
-+ {AuWbrCreate_MFSV, "mfs:%d"},
-+ {AuWbrCreate_MFSV, "most-free-space:%d"},
-+
-+ {AuWbrCreate_MFSRR, "mfsrr:%d"},
-+ {AuWbrCreate_MFSRRV, "mfsrr:%d:%d"},
-+ {AuWbrCreate_PMFS, "pmfs"},
-+ {AuWbrCreate_PMFSV, "pmfs:%d"},
-+ {AuWbrCreate_PMFSRR, "pmfsrr:%d"},
-+ {AuWbrCreate_PMFSRRV, "pmfsrr:%d:%d"},
-+
-+ {-1, NULL}
-+};
-+
-+/*
-+ * cf. linux/lib/parser.c and cmdline.c
-+ * gave up calling memparse() since it uses simple_strtoull() instead of
-+ * kstrto...().
-+ */
-+static int noinline_for_stack
-+au_match_ull(substring_t *s, unsigned long long *result)
-+{
-+ int err;
-+ unsigned int len;
-+ char a[32];
-+
-+ err = -ERANGE;
-+ len = s->to - s->from;
-+ if (len + 1 <= sizeof(a)) {
-+ memcpy(a, s->from, len);
-+ a[len] = '\0';
-+ err = kstrtoull(a, 0, result);
-+ }
-+ return err;
-+}
-+
-+static int au_wbr_mfs_wmark(substring_t *arg, char *str,
-+ struct au_opt_wbr_create *create)
-+{
-+ int err;
-+ unsigned long long ull;
-+
-+ err = 0;
-+ if (!au_match_ull(arg, &ull))
-+ create->mfsrr_watermark = ull;
-+ else {
-+ pr_err("bad integer in %s\n", str);
-+ err = -EINVAL;
-+ }
-+
-+ return err;
-+}
-+
-+static int au_wbr_mfs_sec(substring_t *arg, char *str,
-+ struct au_opt_wbr_create *create)
-+{
-+ int n, err;
-+
-+ err = 0;
-+ if (!match_int(arg, &n) && 0 <= n && n <= AUFS_MFS_MAX_SEC)
-+ create->mfs_second = n;
-+ else {
-+ pr_err("bad integer in %s\n", str);
-+ err = -EINVAL;
-+ }
-+
-+ return err;
-+}
-+
-+static int noinline_for_stack
-+au_wbr_create_val(char *str, struct au_opt_wbr_create *create)
-+{
-+ int err, e;
-+ substring_t args[MAX_OPT_ARGS];
-+
-+ err = match_token(str, au_wbr_create_policy, args);
-+ create->wbr_create = err;
-+ switch (err) {
-+ case AuWbrCreate_MFSRRV:
-+ case AuWbrCreate_PMFSRRV:
-+ e = au_wbr_mfs_wmark(&args[0], str, create);
-+ if (!e)
-+ e = au_wbr_mfs_sec(&args[1], str, create);
-+ if (unlikely(e))
-+ err = e;
-+ break;
-+ case AuWbrCreate_MFSRR:
-+ case AuWbrCreate_PMFSRR:
-+ e = au_wbr_mfs_wmark(&args[0], str, create);
-+ if (unlikely(e)) {
-+ err = e;
-+ break;
-+ }
-+ /*FALLTHROUGH*/
-+ case AuWbrCreate_MFS:
-+ case AuWbrCreate_PMFS:
-+ create->mfs_second = AUFS_MFS_DEF_SEC;
-+ break;
-+ case AuWbrCreate_MFSV:
-+ case AuWbrCreate_PMFSV:
-+ e = au_wbr_mfs_sec(&args[0], str, create);
-+ if (unlikely(e))
-+ err = e;
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+const char *au_optstr_wbr_create(int wbr_create)
-+{
-+ return au_parser_pattern(wbr_create, au_wbr_create_policy);
-+}
-+
-+static match_table_t au_wbr_copyup_policy = {
-+ {AuWbrCopyup_TDP, "tdp"},
-+ {AuWbrCopyup_TDP, "top-down-parent"},
-+ {AuWbrCopyup_BUP, "bup"},
-+ {AuWbrCopyup_BUP, "bottom-up-parent"},
-+ {AuWbrCopyup_BU, "bu"},
-+ {AuWbrCopyup_BU, "bottom-up"},
-+ {-1, NULL}
-+};
-+
-+static int noinline_for_stack au_wbr_copyup_val(char *str)
-+{
-+ substring_t args[MAX_OPT_ARGS];
-+
-+ return match_token(str, au_wbr_copyup_policy, args);
-+}
-+
-+const char *au_optstr_wbr_copyup(int wbr_copyup)
-+{
-+ return au_parser_pattern(wbr_copyup, au_wbr_copyup_policy);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static const int lkup_dirflags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
-+
-+static void dump_opts(struct au_opts *opts)
-+{
-+#ifdef CONFIG_AUFS_DEBUG
-+ /* reduce stack space */
-+ union {
-+ struct au_opt_add *add;
-+ struct au_opt_del *del;
-+ struct au_opt_mod *mod;
-+ struct au_opt_xino *xino;
-+ struct au_opt_xino_itrunc *xino_itrunc;
-+ struct au_opt_wbr_create *create;
-+ } u;
-+ struct au_opt *opt;
-+
-+ opt = opts->opt;
-+ while (opt->type != Opt_tail) {
-+ switch (opt->type) {
-+ case Opt_add:
-+ u.add = &opt->add;
-+ AuDbg("add {b%d, %s, 0x%x, %p}\n",
-+ u.add->bindex, u.add->pathname, u.add->perm,
-+ u.add->path.dentry);
-+ break;
-+ case Opt_del:
-+ case Opt_idel:
-+ u.del = &opt->del;
-+ AuDbg("del {%s, %p}\n",
-+ u.del->pathname, u.del->h_path.dentry);
-+ break;
-+ case Opt_mod:
-+ case Opt_imod:
-+ u.mod = &opt->mod;
-+ AuDbg("mod {%s, 0x%x, %p}\n",
-+ u.mod->path, u.mod->perm, u.mod->h_root);
-+ break;
-+ case Opt_append:
-+ u.add = &opt->add;
-+ AuDbg("append {b%d, %s, 0x%x, %p}\n",
-+ u.add->bindex, u.add->pathname, u.add->perm,
-+ u.add->path.dentry);
-+ break;
-+ case Opt_prepend:
-+ u.add = &opt->add;
-+ AuDbg("prepend {b%d, %s, 0x%x, %p}\n",
-+ u.add->bindex, u.add->pathname, u.add->perm,
-+ u.add->path.dentry);
-+ break;
-+ case Opt_dirwh:
-+ AuDbg("dirwh %d\n", opt->dirwh);
-+ break;
-+ case Opt_rdcache:
-+ AuDbg("rdcache %d\n", opt->rdcache);
-+ break;
-+ case Opt_rdblk:
-+ AuDbg("rdblk %u\n", opt->rdblk);
-+ break;
-+ case Opt_rdblk_def:
-+ AuDbg("rdblk_def\n");
-+ break;
-+ case Opt_rdhash:
-+ AuDbg("rdhash %u\n", opt->rdhash);
-+ break;
-+ case Opt_rdhash_def:
-+ AuDbg("rdhash_def\n");
-+ break;
-+ case Opt_xino:
-+ u.xino = &opt->xino;
-+ AuDbg("xino {%s %pD}\n", u.xino->path, u.xino->file);
-+ break;
-+ case Opt_trunc_xino:
-+ AuLabel(trunc_xino);
-+ break;
-+ case Opt_notrunc_xino:
-+ AuLabel(notrunc_xino);
-+ break;
-+ case Opt_trunc_xino_path:
-+ case Opt_itrunc_xino:
-+ u.xino_itrunc = &opt->xino_itrunc;
-+ AuDbg("trunc_xino %d\n", u.xino_itrunc->bindex);
-+ break;
-+ case Opt_noxino:
-+ AuLabel(noxino);
-+ break;
-+ case Opt_trunc_xib:
-+ AuLabel(trunc_xib);
-+ break;
-+ case Opt_notrunc_xib:
-+ AuLabel(notrunc_xib);
-+ break;
-+ case Opt_shwh:
-+ AuLabel(shwh);
-+ break;
-+ case Opt_noshwh:
-+ AuLabel(noshwh);
-+ break;
-+ case Opt_dirperm1:
-+ AuLabel(dirperm1);
-+ break;
-+ case Opt_nodirperm1:
-+ AuLabel(nodirperm1);
-+ break;
-+ case Opt_plink:
-+ AuLabel(plink);
-+ break;
-+ case Opt_noplink:
-+ AuLabel(noplink);
-+ break;
-+ case Opt_list_plink:
-+ AuLabel(list_plink);
-+ break;
-+ case Opt_udba:
-+ AuDbg("udba %d, %s\n",
-+ opt->udba, au_optstr_udba(opt->udba));
-+ break;
-+ case Opt_dio:
-+ AuLabel(dio);
-+ break;
-+ case Opt_nodio:
-+ AuLabel(nodio);
-+ break;
-+ case Opt_diropq_a:
-+ AuLabel(diropq_a);
-+ break;
-+ case Opt_diropq_w:
-+ AuLabel(diropq_w);
-+ break;
-+ case Opt_warn_perm:
-+ AuLabel(warn_perm);
-+ break;
-+ case Opt_nowarn_perm:
-+ AuLabel(nowarn_perm);
-+ break;
-+ case Opt_verbose:
-+ AuLabel(verbose);
-+ break;
-+ case Opt_noverbose:
-+ AuLabel(noverbose);
-+ break;
-+ case Opt_sum:
-+ AuLabel(sum);
-+ break;
-+ case Opt_nosum:
-+ AuLabel(nosum);
-+ break;
-+ case Opt_wsum:
-+ AuLabel(wsum);
-+ break;
-+ case Opt_wbr_create:
-+ u.create = &opt->wbr_create;
-+ AuDbg("create %d, %s\n", u.create->wbr_create,
-+ au_optstr_wbr_create(u.create->wbr_create));
-+ switch (u.create->wbr_create) {
-+ case AuWbrCreate_MFSV:
-+ case AuWbrCreate_PMFSV:
-+ AuDbg("%d sec\n", u.create->mfs_second);
-+ break;
-+ case AuWbrCreate_MFSRR:
-+ AuDbg("%llu watermark\n",
-+ u.create->mfsrr_watermark);
-+ break;
-+ case AuWbrCreate_MFSRRV:
-+ case AuWbrCreate_PMFSRRV:
-+ AuDbg("%llu watermark, %d sec\n",
-+ u.create->mfsrr_watermark,
-+ u.create->mfs_second);
-+ break;
-+ }
-+ break;
-+ case Opt_wbr_copyup:
-+ AuDbg("copyup %d, %s\n", opt->wbr_copyup,
-+ au_optstr_wbr_copyup(opt->wbr_copyup));
-+ break;
-+ case Opt_fhsm_sec:
-+ AuDbg("fhsm_sec %u\n", opt->fhsm_second);
-+ break;
-+ case Opt_acl:
-+ AuLabel(acl);
-+ break;
-+ case Opt_noacl:
-+ AuLabel(noacl);
-+ break;
-+ default:
-+ BUG();
-+ }
-+ opt++;
-+ }
-+#endif
-+}
-+
-+void au_opts_free(struct au_opts *opts)
-+{
-+ struct au_opt *opt;
-+
-+ opt = opts->opt;
-+ while (opt->type != Opt_tail) {
-+ switch (opt->type) {
-+ case Opt_add:
-+ case Opt_append:
-+ case Opt_prepend:
-+ path_put(&opt->add.path);
-+ break;
-+ case Opt_del:
-+ case Opt_idel:
-+ path_put(&opt->del.h_path);
-+ break;
-+ case Opt_mod:
-+ case Opt_imod:
-+ dput(opt->mod.h_root);
-+ break;
-+ case Opt_xino:
-+ fput(opt->xino.file);
-+ break;
-+ }
-+ opt++;
-+ }
-+}
-+
-+static int opt_add(struct au_opt *opt, char *opt_str, unsigned long sb_flags,
-+ aufs_bindex_t bindex)
-+{
-+ int err;
-+ struct au_opt_add *add = &opt->add;
-+ char *p;
-+
-+ add->bindex = bindex;
-+ add->perm = AuBrPerm_RO;
-+ add->pathname = opt_str;
-+ p = strchr(opt_str, '=');
-+ if (p) {
-+ *p++ = 0;
-+ if (*p)
-+ add->perm = br_perm_val(p);
-+ }
-+
-+ err = vfsub_kern_path(add->pathname, lkup_dirflags, &add->path);
-+ if (!err) {
-+ if (!p) {
-+ add->perm = AuBrPerm_RO;
-+ if (au_test_fs_rr(add->path.dentry->d_sb))
-+ add->perm = AuBrPerm_RR;
-+ else if (!bindex && !(sb_flags & MS_RDONLY))
-+ add->perm = AuBrPerm_RW;
-+ }
-+ opt->type = Opt_add;
-+ goto out;
-+ }
-+ pr_err("lookup failed %s (%d)\n", add->pathname, err);
-+ err = -EINVAL;
-+
-+out:
-+ return err;
-+}
-+
-+static int au_opts_parse_del(struct au_opt_del *del, substring_t args[])
-+{
-+ int err;
-+
-+ del->pathname = args[0].from;
-+ AuDbg("del path %s\n", del->pathname);
-+
-+ err = vfsub_kern_path(del->pathname, lkup_dirflags, &del->h_path);
-+ if (unlikely(err))
-+ pr_err("lookup failed %s (%d)\n", del->pathname, err);
-+
-+ return err;
-+}
-+
-+#if 0 /* reserved for future use */
-+static int au_opts_parse_idel(struct super_block *sb, aufs_bindex_t bindex,
-+ struct au_opt_del *del, substring_t args[])
-+{
-+ int err;
-+ struct dentry *root;
-+
-+ err = -EINVAL;
-+ root = sb->s_root;
-+ aufs_read_lock(root, AuLock_FLUSH);
-+ if (bindex < 0 || au_sbend(sb) < bindex) {
-+ pr_err("out of bounds, %d\n", bindex);
-+ goto out;
-+ }
-+
-+ err = 0;
-+ del->h_path.dentry = dget(au_h_dptr(root, bindex));
-+ del->h_path.mnt = mntget(au_sbr_mnt(sb, bindex));
-+
-+out:
-+ aufs_read_unlock(root, !AuLock_IR);
-+ return err;
-+}
-+#endif
-+
-+static int noinline_for_stack
-+au_opts_parse_mod(struct au_opt_mod *mod, substring_t args[])
-+{
-+ int err;
-+ struct path path;
-+ char *p;
-+
-+ err = -EINVAL;
-+ mod->path = args[0].from;
-+ p = strchr(mod->path, '=');
-+ if (unlikely(!p)) {
-+ pr_err("no permssion %s\n", args[0].from);
-+ goto out;
-+ }
-+
-+ *p++ = 0;
-+ err = vfsub_kern_path(mod->path, lkup_dirflags, &path);
-+ if (unlikely(err)) {
-+ pr_err("lookup failed %s (%d)\n", mod->path, err);
-+ goto out;
-+ }
-+
-+ mod->perm = br_perm_val(p);
-+ AuDbg("mod path %s, perm 0x%x, %s\n", mod->path, mod->perm, p);
-+ mod->h_root = dget(path.dentry);
-+ path_put(&path);
-+
-+out:
-+ return err;
-+}
-+
-+#if 0 /* reserved for future use */
-+static int au_opts_parse_imod(struct super_block *sb, aufs_bindex_t bindex,
-+ struct au_opt_mod *mod, substring_t args[])
-+{
-+ int err;
-+ struct dentry *root;
-+
-+ err = -EINVAL;
-+ root = sb->s_root;
-+ aufs_read_lock(root, AuLock_FLUSH);
-+ if (bindex < 0 || au_sbend(sb) < bindex) {
-+ pr_err("out of bounds, %d\n", bindex);
-+ goto out;
-+ }
-+
-+ err = 0;
-+ mod->perm = br_perm_val(args[1].from);
-+ AuDbg("mod path %s, perm 0x%x, %s\n",
-+ mod->path, mod->perm, args[1].from);
-+ mod->h_root = dget(au_h_dptr(root, bindex));
-+
-+out:
-+ aufs_read_unlock(root, !AuLock_IR);
-+ return err;
-+}
-+#endif
-+
-+static int au_opts_parse_xino(struct super_block *sb, struct au_opt_xino *xino,
-+ substring_t args[])
-+{
-+ int err;
-+ struct file *file;
-+
-+ file = au_xino_create(sb, args[0].from, /*silent*/0);
-+ err = PTR_ERR(file);
-+ if (IS_ERR(file))
-+ goto out;
-+
-+ err = -EINVAL;
-+ if (unlikely(file->f_path.dentry->d_sb == sb)) {
-+ fput(file);
-+ pr_err("%s must be outside\n", args[0].from);
-+ goto out;
-+ }
-+
-+ err = 0;
-+ xino->file = file;
-+ xino->path = args[0].from;
-+
-+out:
-+ return err;
-+}
-+
-+static int noinline_for_stack
-+au_opts_parse_xino_itrunc_path(struct super_block *sb,
-+ struct au_opt_xino_itrunc *xino_itrunc,
-+ substring_t args[])
-+{
-+ int err;
-+ aufs_bindex_t bend, bindex;
-+ struct path path;
-+ struct dentry *root;
-+
-+ err = vfsub_kern_path(args[0].from, lkup_dirflags, &path);
-+ if (unlikely(err)) {
-+ pr_err("lookup failed %s (%d)\n", args[0].from, err);
-+ goto out;
-+ }
-+
-+ xino_itrunc->bindex = -1;
-+ root = sb->s_root;
-+ aufs_read_lock(root, AuLock_FLUSH);
-+ bend = au_sbend(sb);
-+ for (bindex = 0; bindex <= bend; bindex++) {
-+ if (au_h_dptr(root, bindex) == path.dentry) {
-+ xino_itrunc->bindex = bindex;
-+ break;
-+ }
-+ }
-+ aufs_read_unlock(root, !AuLock_IR);
-+ path_put(&path);
-+
-+ if (unlikely(xino_itrunc->bindex < 0)) {
-+ pr_err("no such branch %s\n", args[0].from);
-+ err = -EINVAL;
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+/* called without aufs lock */
-+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts)
-+{
-+ int err, n, token;
-+ aufs_bindex_t bindex;
-+ unsigned char skipped;
-+ struct dentry *root;
-+ struct au_opt *opt, *opt_tail;
-+ char *opt_str;
-+ /* reduce the stack space */
-+ union {
-+ struct au_opt_xino_itrunc *xino_itrunc;
-+ struct au_opt_wbr_create *create;
-+ } u;
-+ struct {
-+ substring_t args[MAX_OPT_ARGS];
-+ } *a;
-+
-+ err = -ENOMEM;
-+ a = kmalloc(sizeof(*a), GFP_NOFS);
-+ if (unlikely(!a))
-+ goto out;
-+
-+ root = sb->s_root;
-+ err = 0;
-+ bindex = 0;
-+ opt = opts->opt;
-+ opt_tail = opt + opts->max_opt - 1;
-+ opt->type = Opt_tail;
-+ while (!err && (opt_str = strsep(&str, ",")) && *opt_str) {
-+ err = -EINVAL;
-+ skipped = 0;
-+ token = match_token(opt_str, options, a->args);
-+ switch (token) {
-+ case Opt_br:
-+ err = 0;
-+ while (!err && (opt_str = strsep(&a->args[0].from, ":"))
-+ && *opt_str) {
-+ err = opt_add(opt, opt_str, opts->sb_flags,
-+ bindex++);
-+ if (unlikely(!err && ++opt > opt_tail)) {
-+ err = -E2BIG;
-+ break;
-+ }
-+ opt->type = Opt_tail;
-+ skipped = 1;
-+ }
-+ break;
-+ case Opt_add:
-+ if (unlikely(match_int(&a->args[0], &n))) {
-+ pr_err("bad integer in %s\n", opt_str);
-+ break;
-+ }
-+ bindex = n;
-+ err = opt_add(opt, a->args[1].from, opts->sb_flags,
-+ bindex);
-+ if (!err)
-+ opt->type = token;
-+ break;
-+ case Opt_append:
-+ err = opt_add(opt, a->args[0].from, opts->sb_flags,
-+ /*dummy bindex*/1);
-+ if (!err)
-+ opt->type = token;
-+ break;
-+ case Opt_prepend:
-+ err = opt_add(opt, a->args[0].from, opts->sb_flags,
-+ /*bindex*/0);
-+ if (!err)
-+ opt->type = token;
-+ break;
-+ case Opt_del:
-+ err = au_opts_parse_del(&opt->del, a->args);
-+ if (!err)
-+ opt->type = token;
-+ break;
-+#if 0 /* reserved for future use */
-+ case Opt_idel:
-+ del->pathname = "(indexed)";
-+ if (unlikely(match_int(&args[0], &n))) {
-+ pr_err("bad integer in %s\n", opt_str);
-+ break;
-+ }
-+ err = au_opts_parse_idel(sb, n, &opt->del, a->args);
-+ if (!err)
-+ opt->type = token;
-+ break;
-+#endif
-+ case Opt_mod:
-+ err = au_opts_parse_mod(&opt->mod, a->args);
-+ if (!err)
-+ opt->type = token;
-+ break;
-+#ifdef IMOD /* reserved for future use */
-+ case Opt_imod:
-+ u.mod->path = "(indexed)";
-+ if (unlikely(match_int(&a->args[0], &n))) {
-+ pr_err("bad integer in %s\n", opt_str);
-+ break;
-+ }
-+ err = au_opts_parse_imod(sb, n, &opt->mod, a->args);
-+ if (!err)
-+ opt->type = token;
-+ break;
-+#endif
-+ case Opt_xino:
-+ err = au_opts_parse_xino(sb, &opt->xino, a->args);
-+ if (!err)
-+ opt->type = token;
-+ break;
-+
-+ case Opt_trunc_xino_path:
-+ err = au_opts_parse_xino_itrunc_path
-+ (sb, &opt->xino_itrunc, a->args);
-+ if (!err)
-+ opt->type = token;
-+ break;
-+
-+ case Opt_itrunc_xino:
-+ u.xino_itrunc = &opt->xino_itrunc;
-+ if (unlikely(match_int(&a->args[0], &n))) {
-+ pr_err("bad integer in %s\n", opt_str);
-+ break;
-+ }
-+ u.xino_itrunc->bindex = n;
-+ aufs_read_lock(root, AuLock_FLUSH);
-+ if (n < 0 || au_sbend(sb) < n) {
-+ pr_err("out of bounds, %d\n", n);
-+ aufs_read_unlock(root, !AuLock_IR);
-+ break;
-+ }
-+ aufs_read_unlock(root, !AuLock_IR);
-+ err = 0;
-+ opt->type = token;
-+ break;
-+
-+ case Opt_dirwh:
-+ if (unlikely(match_int(&a->args[0], &opt->dirwh)))
-+ break;
-+ err = 0;
-+ opt->type = token;
-+ break;
-+
-+ case Opt_rdcache:
-+ if (unlikely(match_int(&a->args[0], &n))) {
-+ pr_err("bad integer in %s\n", opt_str);
-+ break;
-+ }
-+ if (unlikely(n > AUFS_RDCACHE_MAX)) {
-+ pr_err("rdcache must be smaller than %d\n",
-+ AUFS_RDCACHE_MAX);
-+ break;
-+ }
-+ opt->rdcache = n;
-+ err = 0;
-+ opt->type = token;
-+ break;
-+ case Opt_rdblk:
-+ if (unlikely(match_int(&a->args[0], &n)
-+ || n < 0
-+ || n > KMALLOC_MAX_SIZE)) {
-+ pr_err("bad integer in %s\n", opt_str);
-+ break;
-+ }
-+ if (unlikely(n && n < NAME_MAX)) {
-+ pr_err("rdblk must be larger than %d\n",
-+ NAME_MAX);
-+ break;
-+ }
-+ opt->rdblk = n;
-+ err = 0;
-+ opt->type = token;
-+ break;
-+ case Opt_rdhash:
-+ if (unlikely(match_int(&a->args[0], &n)
-+ || n < 0
-+ || n * sizeof(struct hlist_head)
-+ > KMALLOC_MAX_SIZE)) {
-+ pr_err("bad integer in %s\n", opt_str);
-+ break;
-+ }
-+ opt->rdhash = n;
-+ err = 0;
-+ opt->type = token;
-+ break;
-+
-+ case Opt_trunc_xino:
-+ case Opt_notrunc_xino:
-+ case Opt_noxino:
-+ case Opt_trunc_xib:
-+ case Opt_notrunc_xib:
-+ case Opt_shwh:
-+ case Opt_noshwh:
-+ case Opt_dirperm1:
-+ case Opt_nodirperm1:
-+ case Opt_plink:
-+ case Opt_noplink:
-+ case Opt_list_plink:
-+ case Opt_dio:
-+ case Opt_nodio:
-+ case Opt_diropq_a:
-+ case Opt_diropq_w:
-+ case Opt_warn_perm:
-+ case Opt_nowarn_perm:
-+ case Opt_verbose:
-+ case Opt_noverbose:
-+ case Opt_sum:
-+ case Opt_nosum:
-+ case Opt_wsum:
-+ case Opt_rdblk_def:
-+ case Opt_rdhash_def:
-+ case Opt_acl:
-+ case Opt_noacl:
-+ err = 0;
-+ opt->type = token;
-+ break;
-+
-+ case Opt_udba:
-+ opt->udba = udba_val(a->args[0].from);
-+ if (opt->udba >= 0) {
-+ err = 0;
-+ opt->type = token;
-+ } else
-+ pr_err("wrong value, %s\n", opt_str);
-+ break;
-+
-+ case Opt_wbr_create:
-+ u.create = &opt->wbr_create;
-+ u.create->wbr_create
-+ = au_wbr_create_val(a->args[0].from, u.create);
-+ if (u.create->wbr_create >= 0) {
-+ err = 0;
-+ opt->type = token;
-+ } else
-+ pr_err("wrong value, %s\n", opt_str);
-+ break;
-+ case Opt_wbr_copyup:
-+ opt->wbr_copyup = au_wbr_copyup_val(a->args[0].from);
-+ if (opt->wbr_copyup >= 0) {
-+ err = 0;
-+ opt->type = token;
-+ } else
-+ pr_err("wrong value, %s\n", opt_str);
-+ break;
-+
-+ case Opt_fhsm_sec:
-+ if (unlikely(match_int(&a->args[0], &n)
-+ || n < 0)) {
-+ pr_err("bad integer in %s\n", opt_str);
-+ break;
-+ }
-+ if (sysaufs_brs) {
-+ opt->fhsm_second = n;
-+ opt->type = token;
-+ } else
-+ pr_warn("ignored %s\n", opt_str);
-+ err = 0;
-+ break;
-+
-+ case Opt_ignore:
-+ pr_warn("ignored %s\n", opt_str);
-+ /*FALLTHROUGH*/
-+ case Opt_ignore_silent:
-+ skipped = 1;
-+ err = 0;
-+ break;
-+ case Opt_err:
-+ pr_err("unknown option %s\n", opt_str);
-+ break;
-+ }
-+
-+ if (!err && !skipped) {
-+ if (unlikely(++opt > opt_tail)) {
-+ err = -E2BIG;
-+ opt--;
-+ opt->type = Opt_tail;
-+ break;
-+ }
-+ opt->type = Opt_tail;
-+ }
-+ }
-+
-+ kfree(a);
-+ dump_opts(opts);
-+ if (unlikely(err))
-+ au_opts_free(opts);
-+
-+out:
-+ return err;
-+}
-+
-+static int au_opt_wbr_create(struct super_block *sb,
-+ struct au_opt_wbr_create *create)
-+{
-+ int err;
-+ struct au_sbinfo *sbinfo;
-+
-+ SiMustWriteLock(sb);
-+
-+ err = 1; /* handled */
-+ sbinfo = au_sbi(sb);
-+ if (sbinfo->si_wbr_create_ops->fin) {
-+ err = sbinfo->si_wbr_create_ops->fin(sb);
-+ if (!err)
-+ err = 1;
-+ }
-+
-+ sbinfo->si_wbr_create = create->wbr_create;
-+ sbinfo->si_wbr_create_ops = au_wbr_create_ops + create->wbr_create;
-+ switch (create->wbr_create) {
-+ case AuWbrCreate_MFSRRV:
-+ case AuWbrCreate_MFSRR:
-+ case AuWbrCreate_PMFSRR:
-+ case AuWbrCreate_PMFSRRV:
-+ sbinfo->si_wbr_mfs.mfsrr_watermark = create->mfsrr_watermark;
-+ /*FALLTHROUGH*/
-+ case AuWbrCreate_MFS:
-+ case AuWbrCreate_MFSV:
-+ case AuWbrCreate_PMFS:
-+ case AuWbrCreate_PMFSV:
-+ sbinfo->si_wbr_mfs.mfs_expire
-+ = msecs_to_jiffies(create->mfs_second * MSEC_PER_SEC);
-+ break;
-+ }
-+
-+ if (sbinfo->si_wbr_create_ops->init)
-+ sbinfo->si_wbr_create_ops->init(sb); /* ignore */
-+
-+ return err;
-+}
-+
-+/*
-+ * returns,
-+ * plus: processed without an error
-+ * zero: unprocessed
-+ */
-+static int au_opt_simple(struct super_block *sb, struct au_opt *opt,
-+ struct au_opts *opts)
-+{
-+ int err;
-+ struct au_sbinfo *sbinfo;
-+
-+ SiMustWriteLock(sb);
-+
-+ err = 1; /* handled */
-+ sbinfo = au_sbi(sb);
-+ switch (opt->type) {
-+ case Opt_udba:
-+ sbinfo->si_mntflags &= ~AuOptMask_UDBA;
-+ sbinfo->si_mntflags |= opt->udba;
-+ opts->given_udba |= opt->udba;
-+ break;
-+
-+ case Opt_plink:
-+ au_opt_set(sbinfo->si_mntflags, PLINK);
-+ break;
-+ case Opt_noplink:
-+ if (au_opt_test(sbinfo->si_mntflags, PLINK))
-+ au_plink_put(sb, /*verbose*/1);
-+ au_opt_clr(sbinfo->si_mntflags, PLINK);
-+ break;
-+ case Opt_list_plink:
-+ if (au_opt_test(sbinfo->si_mntflags, PLINK))
-+ au_plink_list(sb);
-+ break;
-+
-+ case Opt_dio:
-+ au_opt_set(sbinfo->si_mntflags, DIO);
-+ au_fset_opts(opts->flags, REFRESH_DYAOP);
-+ break;
-+ case Opt_nodio:
-+ au_opt_clr(sbinfo->si_mntflags, DIO);
-+ au_fset_opts(opts->flags, REFRESH_DYAOP);
-+ break;
-+
-+ case Opt_fhsm_sec:
-+ au_fhsm_set(sbinfo, opt->fhsm_second);
-+ break;
-+
-+ case Opt_diropq_a:
-+ au_opt_set(sbinfo->si_mntflags, ALWAYS_DIROPQ);
-+ break;
-+ case Opt_diropq_w:
-+ au_opt_clr(sbinfo->si_mntflags, ALWAYS_DIROPQ);
-+ break;
-+
-+ case Opt_warn_perm:
-+ au_opt_set(sbinfo->si_mntflags, WARN_PERM);
-+ break;
-+ case Opt_nowarn_perm:
-+ au_opt_clr(sbinfo->si_mntflags, WARN_PERM);
-+ break;
-+
-+ case Opt_verbose:
-+ au_opt_set(sbinfo->si_mntflags, VERBOSE);
-+ break;
-+ case Opt_noverbose:
-+ au_opt_clr(sbinfo->si_mntflags, VERBOSE);
-+ break;
-+
-+ case Opt_sum:
-+ au_opt_set(sbinfo->si_mntflags, SUM);
-+ break;
-+ case Opt_wsum:
-+ au_opt_clr(sbinfo->si_mntflags, SUM);
-+ au_opt_set(sbinfo->si_mntflags, SUM_W);
-+ case Opt_nosum:
-+ au_opt_clr(sbinfo->si_mntflags, SUM);
-+ au_opt_clr(sbinfo->si_mntflags, SUM_W);
-+ break;
-+
-+ case Opt_wbr_create:
-+ err = au_opt_wbr_create(sb, &opt->wbr_create);
-+ break;
-+ case Opt_wbr_copyup:
-+ sbinfo->si_wbr_copyup = opt->wbr_copyup;
-+ sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + opt->wbr_copyup;
-+ break;
-+
-+ case Opt_dirwh:
-+ sbinfo->si_dirwh = opt->dirwh;
-+ break;
-+
-+ case Opt_rdcache:
-+ sbinfo->si_rdcache
-+ = msecs_to_jiffies(opt->rdcache * MSEC_PER_SEC);
-+ break;
-+ case Opt_rdblk:
-+ sbinfo->si_rdblk = opt->rdblk;
-+ break;
-+ case Opt_rdblk_def:
-+ sbinfo->si_rdblk = AUFS_RDBLK_DEF;
-+ break;
-+ case Opt_rdhash:
-+ sbinfo->si_rdhash = opt->rdhash;
-+ break;
-+ case Opt_rdhash_def:
-+ sbinfo->si_rdhash = AUFS_RDHASH_DEF;
-+ break;
-+
-+ case Opt_shwh:
-+ au_opt_set(sbinfo->si_mntflags, SHWH);
-+ break;
-+ case Opt_noshwh:
-+ au_opt_clr(sbinfo->si_mntflags, SHWH);
-+ break;
-+
-+ case Opt_dirperm1:
-+ au_opt_set(sbinfo->si_mntflags, DIRPERM1);
-+ break;
-+ case Opt_nodirperm1:
-+ au_opt_clr(sbinfo->si_mntflags, DIRPERM1);
-+ break;
-+
-+ case Opt_trunc_xino:
-+ au_opt_set(sbinfo->si_mntflags, TRUNC_XINO);
-+ break;
-+ case Opt_notrunc_xino:
-+ au_opt_clr(sbinfo->si_mntflags, TRUNC_XINO);
-+ break;
-+
-+ case Opt_trunc_xino_path:
-+ case Opt_itrunc_xino:
-+ err = au_xino_trunc(sb, opt->xino_itrunc.bindex);
-+ if (!err)
-+ err = 1;
-+ break;
-+
-+ case Opt_trunc_xib:
-+ au_fset_opts(opts->flags, TRUNC_XIB);
-+ break;
-+ case Opt_notrunc_xib:
-+ au_fclr_opts(opts->flags, TRUNC_XIB);
-+ break;
-+
-+ case Opt_acl:
-+ sb->s_flags |= MS_POSIXACL;
-+ break;
-+ case Opt_noacl:
-+ sb->s_flags &= ~MS_POSIXACL;
-+ break;
-+
-+ default:
-+ err = 0;
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+/*
-+ * returns tri-state.
-+ * plus: processed without an error
-+ * zero: unprocessed
-+ * minus: error
-+ */
-+static int au_opt_br(struct super_block *sb, struct au_opt *opt,
-+ struct au_opts *opts)
-+{
-+ int err, do_refresh;
-+
-+ err = 0;
-+ switch (opt->type) {
-+ case Opt_append:
-+ opt->add.bindex = au_sbend(sb) + 1;
-+ if (opt->add.bindex < 0)
-+ opt->add.bindex = 0;
-+ goto add;
-+ case Opt_prepend:
-+ opt->add.bindex = 0;
-+ add: /* indented label */
-+ case Opt_add:
-+ err = au_br_add(sb, &opt->add,
-+ au_ftest_opts(opts->flags, REMOUNT));
-+ if (!err) {
-+ err = 1;
-+ au_fset_opts(opts->flags, REFRESH);
-+ }
-+ break;
-+
-+ case Opt_del:
-+ case Opt_idel:
-+ err = au_br_del(sb, &opt->del,
-+ au_ftest_opts(opts->flags, REMOUNT));
-+ if (!err) {
-+ err = 1;
-+ au_fset_opts(opts->flags, TRUNC_XIB);
-+ au_fset_opts(opts->flags, REFRESH);
-+ }
-+ break;
-+
-+ case Opt_mod:
-+ case Opt_imod:
-+ err = au_br_mod(sb, &opt->mod,
-+ au_ftest_opts(opts->flags, REMOUNT),
-+ &do_refresh);
-+ if (!err) {
-+ err = 1;
-+ if (do_refresh)
-+ au_fset_opts(opts->flags, REFRESH);
-+ }
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static int au_opt_xino(struct super_block *sb, struct au_opt *opt,
-+ struct au_opt_xino **opt_xino,
-+ struct au_opts *opts)
-+{
-+ int err;
-+ aufs_bindex_t bend, bindex;
-+ struct dentry *root, *parent, *h_root;
-+
-+ err = 0;
-+ switch (opt->type) {
-+ case Opt_xino:
-+ err = au_xino_set(sb, &opt->xino,
-+ !!au_ftest_opts(opts->flags, REMOUNT));
-+ if (unlikely(err))
-+ break;
-+
-+ *opt_xino = &opt->xino;
-+ au_xino_brid_set(sb, -1);
-+
-+ /* safe d_parent access */
-+ parent = opt->xino.file->f_path.dentry->d_parent;
-+ root = sb->s_root;
-+ bend = au_sbend(sb);
-+ for (bindex = 0; bindex <= bend; bindex++) {
-+ h_root = au_h_dptr(root, bindex);
-+ if (h_root == parent) {
-+ au_xino_brid_set(sb, au_sbr_id(sb, bindex));
-+ break;
-+ }
-+ }
-+ break;
-+
-+ case Opt_noxino:
-+ au_xino_clr(sb);
-+ au_xino_brid_set(sb, -1);
-+ *opt_xino = (void *)-1;
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
-+ unsigned int pending)
-+{
-+ int err, fhsm;
-+ aufs_bindex_t bindex, bend;
-+ unsigned char do_plink, skip, do_free;
-+ struct au_branch *br;
-+ struct au_wbr *wbr;
-+ struct dentry *root;
-+ struct inode *dir, *h_dir;
-+ struct au_sbinfo *sbinfo;
-+ struct au_hinode *hdir;
-+
-+ SiMustAnyLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ AuDebugOn(!(sbinfo->si_mntflags & AuOptMask_UDBA));
-+
-+ if (!(sb_flags & MS_RDONLY)) {
-+ if (unlikely(!au_br_writable(au_sbr_perm(sb, 0))))
-+ pr_warn("first branch should be rw\n");
-+ if (unlikely(au_opt_test(sbinfo->si_mntflags, SHWH)))
-+ pr_warn("shwh should be used with ro\n");
-+ }
-+
-+ if (au_opt_test((sbinfo->si_mntflags | pending), UDBA_HNOTIFY)
-+ && !au_opt_test(sbinfo->si_mntflags, XINO))
-+ pr_warn("udba=*notify requires xino\n");
-+
-+ if (au_opt_test(sbinfo->si_mntflags, DIRPERM1))
-+ pr_warn("dirperm1 breaks the protection"
-+ " by the permission bits on the lower branch\n");
-+
-+ err = 0;
-+ fhsm = 0;
-+ root = sb->s_root;
-+ dir = d_inode(root);
-+ do_plink = !!au_opt_test(sbinfo->si_mntflags, PLINK);
-+ bend = au_sbend(sb);
-+ for (bindex = 0; !err && bindex <= bend; bindex++) {
-+ skip = 0;
-+ h_dir = au_h_iptr(dir, bindex);
-+ br = au_sbr(sb, bindex);
-+
-+ if ((br->br_perm & AuBrAttr_ICEX)
-+ && !h_dir->i_op->listxattr)
-+ br->br_perm &= ~AuBrAttr_ICEX;
-+#if 0
-+ if ((br->br_perm & AuBrAttr_ICEX_SEC)
-+ && (au_br_sb(br)->s_flags & MS_NOSEC))
-+ br->br_perm &= ~AuBrAttr_ICEX_SEC;
-+#endif
-+
-+ do_free = 0;
-+ wbr = br->br_wbr;
-+ if (wbr)
-+ wbr_wh_read_lock(wbr);
-+
-+ if (!au_br_writable(br->br_perm)) {
-+ do_free = !!wbr;
-+ skip = (!wbr
-+ || (!wbr->wbr_whbase
-+ && !wbr->wbr_plink
-+ && !wbr->wbr_orph));
-+ } else if (!au_br_wh_linkable(br->br_perm)) {
-+ /* skip = (!br->br_whbase && !br->br_orph); */
-+ skip = (!wbr || !wbr->wbr_whbase);
-+ if (skip && wbr) {
-+ if (do_plink)
-+ skip = !!wbr->wbr_plink;
-+ else
-+ skip = !wbr->wbr_plink;
-+ }
-+ } else {
-+ /* skip = (br->br_whbase && br->br_ohph); */
-+ skip = (wbr && wbr->wbr_whbase);
-+ if (skip) {
-+ if (do_plink)
-+ skip = !!wbr->wbr_plink;
-+ else
-+ skip = !wbr->wbr_plink;
-+ }
-+ }
-+ if (wbr)
-+ wbr_wh_read_unlock(wbr);
-+
-+ if (au_br_fhsm(br->br_perm)) {
-+ fhsm++;
-+ AuDebugOn(!br->br_fhsm);
-+ }
-+
-+ if (skip)
-+ continue;
-+
-+ hdir = au_hi(dir, bindex);
-+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
-+ if (wbr)
-+ wbr_wh_write_lock(wbr);
-+ err = au_wh_init(br, sb);
-+ if (wbr)
-+ wbr_wh_write_unlock(wbr);
-+ au_hn_imtx_unlock(hdir);
-+
-+ if (!err && do_free) {
-+ kfree(wbr);
-+ br->br_wbr = NULL;
-+ }
-+ }
-+
-+ if (fhsm >= 2) {
-+ au_fset_si(sbinfo, FHSM);
-+ for (bindex = bend; bindex >= 0; bindex--) {
-+ br = au_sbr(sb, bindex);
-+ if (au_br_fhsm(br->br_perm)) {
-+ au_fhsm_set_bottom(sb, bindex);
-+ break;
-+ }
-+ }
-+ } else {
-+ au_fclr_si(sbinfo, FHSM);
-+ au_fhsm_set_bottom(sb, -1);
-+ }
-+
-+ return err;
-+}
-+
-+int au_opts_mount(struct super_block *sb, struct au_opts *opts)
-+{
-+ int err;
-+ unsigned int tmp;
-+ aufs_bindex_t bindex, bend;
-+ struct au_opt *opt;
-+ struct au_opt_xino *opt_xino, xino;
-+ struct au_sbinfo *sbinfo;
-+ struct au_branch *br;
-+ struct inode *dir;
-+
-+ SiMustWriteLock(sb);
-+
-+ err = 0;
-+ opt_xino = NULL;
-+ opt = opts->opt;
-+ while (err >= 0 && opt->type != Opt_tail)
-+ err = au_opt_simple(sb, opt++, opts);
-+ if (err > 0)
-+ err = 0;
-+ else if (unlikely(err < 0))
-+ goto out;
-+
-+ /* disable xino and udba temporary */
-+ sbinfo = au_sbi(sb);
-+ tmp = sbinfo->si_mntflags;
-+ au_opt_clr(sbinfo->si_mntflags, XINO);
-+ au_opt_set_udba(sbinfo->si_mntflags, UDBA_REVAL);
-+
-+ opt = opts->opt;
-+ while (err >= 0 && opt->type != Opt_tail)
-+ err = au_opt_br(sb, opt++, opts);
-+ if (err > 0)
-+ err = 0;
-+ else if (unlikely(err < 0))
-+ goto out;
-+
-+ bend = au_sbend(sb);
-+ if (unlikely(bend < 0)) {
-+ err = -EINVAL;
-+ pr_err("no branches\n");
-+ goto out;
-+ }
-+
-+ if (au_opt_test(tmp, XINO))
-+ au_opt_set(sbinfo->si_mntflags, XINO);
-+ opt = opts->opt;
-+ while (!err && opt->type != Opt_tail)
-+ err = au_opt_xino(sb, opt++, &opt_xino, opts);
-+ if (unlikely(err))
-+ goto out;
-+
-+ err = au_opts_verify(sb, sb->s_flags, tmp);
-+ if (unlikely(err))
-+ goto out;
-+
-+ /* restore xino */
-+ if (au_opt_test(tmp, XINO) && !opt_xino) {
-+ xino.file = au_xino_def(sb);
-+ err = PTR_ERR(xino.file);
-+ if (IS_ERR(xino.file))
-+ goto out;
-+
-+ err = au_xino_set(sb, &xino, /*remount*/0);
-+ fput(xino.file);
-+ if (unlikely(err))
-+ goto out;
-+ }
-+
-+ /* restore udba */
-+ tmp &= AuOptMask_UDBA;
-+ sbinfo->si_mntflags &= ~AuOptMask_UDBA;
-+ sbinfo->si_mntflags |= tmp;
-+ bend = au_sbend(sb);
-+ for (bindex = 0; bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ err = au_hnotify_reset_br(tmp, br, br->br_perm);
-+ if (unlikely(err))
-+ AuIOErr("hnotify failed on br %d, %d, ignored\n",
-+ bindex, err);
-+ /* go on even if err */
-+ }
-+ if (au_opt_test(tmp, UDBA_HNOTIFY)) {
-+ dir = d_inode(sb->s_root);
-+ au_hn_reset(dir, au_hi_flags(dir, /*isdir*/1) & ~AuHi_XINO);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+int au_opts_remount(struct super_block *sb, struct au_opts *opts)
-+{
-+ int err, rerr;
-+ struct inode *dir;
-+ struct au_opt_xino *opt_xino;
-+ struct au_opt *opt;
-+ struct au_sbinfo *sbinfo;
-+
-+ SiMustWriteLock(sb);
-+
-+ dir = d_inode(sb->s_root);
-+ sbinfo = au_sbi(sb);
-+ err = 0;
-+ opt_xino = NULL;
-+ opt = opts->opt;
-+ while (err >= 0 && opt->type != Opt_tail) {
-+ err = au_opt_simple(sb, opt, opts);
-+ if (!err)
-+ err = au_opt_br(sb, opt, opts);
-+ if (!err)
-+ err = au_opt_xino(sb, opt, &opt_xino, opts);
-+ opt++;
-+ }
-+ if (err > 0)
-+ err = 0;
-+ AuTraceErr(err);
-+ /* go on even err */
-+
-+ rerr = au_opts_verify(sb, opts->sb_flags, /*pending*/0);
-+ if (unlikely(rerr && !err))
-+ err = rerr;
-+
-+ if (au_ftest_opts(opts->flags, TRUNC_XIB)) {
-+ rerr = au_xib_trunc(sb);
-+ if (unlikely(rerr && !err))
-+ err = rerr;
-+ }
-+
-+ /* will be handled by the caller */
-+ if (!au_ftest_opts(opts->flags, REFRESH)
-+ && (opts->given_udba || au_opt_test(sbinfo->si_mntflags, XINO)))
-+ au_fset_opts(opts->flags, REFRESH);
-+
-+ AuDbg("status 0x%x\n", opts->flags);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+unsigned int au_opt_udba(struct super_block *sb)
-+{
-+ return au_mntflags(sb) & AuOptMask_UDBA;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/opts.h linux-4.1.10/fs/aufs/opts.h
---- linux-4.1.10.orig/fs/aufs/opts.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/opts.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,210 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * mount options/flags
-+ */
-+
-+#ifndef __AUFS_OPTS_H__
-+#define __AUFS_OPTS_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/path.h>
-+
-+struct file;
-+struct super_block;
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* mount flags */
-+#define AuOpt_XINO 1 /* external inode number bitmap
-+ and translation table */
-+#define AuOpt_TRUNC_XINO (1 << 1) /* truncate xino files */
-+#define AuOpt_UDBA_NONE (1 << 2) /* users direct branch access */
-+#define AuOpt_UDBA_REVAL (1 << 3)
-+#define AuOpt_UDBA_HNOTIFY (1 << 4)
-+#define AuOpt_SHWH (1 << 5) /* show whiteout */
-+#define AuOpt_PLINK (1 << 6) /* pseudo-link */
-+#define AuOpt_DIRPERM1 (1 << 7) /* ignore the lower dir's perm
-+ bits */
-+#define AuOpt_ALWAYS_DIROPQ (1 << 9) /* policy to creating diropq */
-+#define AuOpt_SUM (1 << 10) /* summation for statfs(2) */
-+#define AuOpt_SUM_W (1 << 11) /* unimplemented */
-+#define AuOpt_WARN_PERM (1 << 12) /* warn when add-branch */
-+#define AuOpt_VERBOSE (1 << 13) /* busy inode when del-branch */
-+#define AuOpt_DIO (1 << 14) /* direct io */
-+
-+#ifndef CONFIG_AUFS_HNOTIFY
-+#undef AuOpt_UDBA_HNOTIFY
-+#define AuOpt_UDBA_HNOTIFY 0
-+#endif
-+#ifndef CONFIG_AUFS_SHWH
-+#undef AuOpt_SHWH
-+#define AuOpt_SHWH 0
-+#endif
-+
-+#define AuOpt_Def (AuOpt_XINO \
-+ | AuOpt_UDBA_REVAL \
-+ | AuOpt_PLINK \
-+ /* | AuOpt_DIRPERM1 */ \
-+ | AuOpt_WARN_PERM)
-+#define AuOptMask_UDBA (AuOpt_UDBA_NONE \
-+ | AuOpt_UDBA_REVAL \
-+ | AuOpt_UDBA_HNOTIFY)
-+
-+#define au_opt_test(flags, name) (flags & AuOpt_##name)
-+#define au_opt_set(flags, name) do { \
-+ BUILD_BUG_ON(AuOpt_##name & AuOptMask_UDBA); \
-+ ((flags) |= AuOpt_##name); \
-+} while (0)
-+#define au_opt_set_udba(flags, name) do { \
-+ (flags) &= ~AuOptMask_UDBA; \
-+ ((flags) |= AuOpt_##name); \
-+} while (0)
-+#define au_opt_clr(flags, name) do { \
-+ ((flags) &= ~AuOpt_##name); \
-+} while (0)
-+
-+static inline unsigned int au_opts_plink(unsigned int mntflags)
-+{
-+#ifdef CONFIG_PROC_FS
-+ return mntflags;
-+#else
-+ return mntflags & ~AuOpt_PLINK;
-+#endif
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* policies to select one among multiple writable branches */
-+enum {
-+ AuWbrCreate_TDP, /* top down parent */
-+ AuWbrCreate_RR, /* round robin */
-+ AuWbrCreate_MFS, /* most free space */
-+ AuWbrCreate_MFSV, /* mfs with seconds */
-+ AuWbrCreate_MFSRR, /* mfs then rr */
-+ AuWbrCreate_MFSRRV, /* mfs then rr with seconds */
-+ AuWbrCreate_PMFS, /* parent and mfs */
-+ AuWbrCreate_PMFSV, /* parent and mfs with seconds */
-+ AuWbrCreate_PMFSRR, /* parent, mfs and round-robin */
-+ AuWbrCreate_PMFSRRV, /* plus seconds */
-+
-+ AuWbrCreate_Def = AuWbrCreate_TDP
-+};
-+
-+enum {
-+ AuWbrCopyup_TDP, /* top down parent */
-+ AuWbrCopyup_BUP, /* bottom up parent */
-+ AuWbrCopyup_BU, /* bottom up */
-+
-+ AuWbrCopyup_Def = AuWbrCopyup_TDP
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct au_opt_add {
-+ aufs_bindex_t bindex;
-+ char *pathname;
-+ int perm;
-+ struct path path;
-+};
-+
-+struct au_opt_del {
-+ char *pathname;
-+ struct path h_path;
-+};
-+
-+struct au_opt_mod {
-+ char *path;
-+ int perm;
-+ struct dentry *h_root;
-+};
-+
-+struct au_opt_xino {
-+ char *path;
-+ struct file *file;
-+};
-+
-+struct au_opt_xino_itrunc {
-+ aufs_bindex_t bindex;
-+};
-+
-+struct au_opt_wbr_create {
-+ int wbr_create;
-+ int mfs_second;
-+ unsigned long long mfsrr_watermark;
-+};
-+
-+struct au_opt {
-+ int type;
-+ union {
-+ struct au_opt_xino xino;
-+ struct au_opt_xino_itrunc xino_itrunc;
-+ struct au_opt_add add;
-+ struct au_opt_del del;
-+ struct au_opt_mod mod;
-+ int dirwh;
-+ int rdcache;
-+ unsigned int rdblk;
-+ unsigned int rdhash;
-+ int udba;
-+ struct au_opt_wbr_create wbr_create;
-+ int wbr_copyup;
-+ unsigned int fhsm_second;
-+ };
-+};
-+
-+/* opts flags */
-+#define AuOpts_REMOUNT 1
-+#define AuOpts_REFRESH (1 << 1)
-+#define AuOpts_TRUNC_XIB (1 << 2)
-+#define AuOpts_REFRESH_DYAOP (1 << 3)
-+#define au_ftest_opts(flags, name) ((flags) & AuOpts_##name)
-+#define au_fset_opts(flags, name) \
-+ do { (flags) |= AuOpts_##name; } while (0)
-+#define au_fclr_opts(flags, name) \
-+ do { (flags) &= ~AuOpts_##name; } while (0)
-+
-+struct au_opts {
-+ struct au_opt *opt;
-+ int max_opt;
-+
-+ unsigned int given_udba;
-+ unsigned int flags;
-+ unsigned long sb_flags;
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* opts.c */
-+void au_optstr_br_perm(au_br_perm_str_t *str, int perm);
-+const char *au_optstr_udba(int udba);
-+const char *au_optstr_wbr_copyup(int wbr_copyup);
-+const char *au_optstr_wbr_create(int wbr_create);
-+
-+void au_opts_free(struct au_opts *opts);
-+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts);
-+int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
-+ unsigned int pending);
-+int au_opts_mount(struct super_block *sb, struct au_opts *opts);
-+int au_opts_remount(struct super_block *sb, struct au_opts *opts);
-+
-+unsigned int au_opt_udba(struct super_block *sb);
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_OPTS_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/plink.c linux-4.1.10/fs/aufs/plink.c
---- linux-4.1.10.orig/fs/aufs/plink.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/plink.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,528 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * pseudo-link
-+ */
-+
-+#include "aufs.h"
-+
-+/*
-+ * the pseudo-link maintenance mode.
-+ * during a user process maintains the pseudo-links,
-+ * prohibit adding a new plink and branch manipulation.
-+ *
-+ * Flags
-+ * NOPLM:
-+ * For entry functions which will handle plink, and i_mutex is already held
-+ * in VFS.
-+ * They cannot wait and should return an error at once.
-+ * Callers has to check the error.
-+ * NOPLMW:
-+ * For entry functions which will handle plink, but i_mutex is not held
-+ * in VFS.
-+ * They can wait the plink maintenance mode to finish.
-+ *
-+ * They behave like F_SETLK and F_SETLKW.
-+ * If the caller never handle plink, then both flags are unnecessary.
-+ */
-+
-+int au_plink_maint(struct super_block *sb, int flags)
-+{
-+ int err;
-+ pid_t pid, ppid;
-+ struct au_sbinfo *sbi;
-+
-+ SiMustAnyLock(sb);
-+
-+ err = 0;
-+ if (!au_opt_test(au_mntflags(sb), PLINK))
-+ goto out;
-+
-+ sbi = au_sbi(sb);
-+ pid = sbi->si_plink_maint_pid;
-+ if (!pid || pid == current->pid)
-+ goto out;
-+
-+ /* todo: it highly depends upon /sbin/mount.aufs */
-+ rcu_read_lock();
-+ ppid = task_pid_vnr(rcu_dereference(current->real_parent));
-+ rcu_read_unlock();
-+ if (pid == ppid)
-+ goto out;
-+
-+ if (au_ftest_lock(flags, NOPLMW)) {
-+ /* if there is no i_mutex lock in VFS, we don't need to wait */
-+ /* AuDebugOn(!lockdep_depth(current)); */
-+ while (sbi->si_plink_maint_pid) {
-+ si_read_unlock(sb);
-+ /* gave up wake_up_bit() */
-+ wait_event(sbi->si_plink_wq, !sbi->si_plink_maint_pid);
-+
-+ if (au_ftest_lock(flags, FLUSH))
-+ au_nwt_flush(&sbi->si_nowait);
-+ si_noflush_read_lock(sb);
-+ }
-+ } else if (au_ftest_lock(flags, NOPLM)) {
-+ AuDbg("ppid %d, pid %d\n", ppid, pid);
-+ err = -EAGAIN;
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+void au_plink_maint_leave(struct au_sbinfo *sbinfo)
-+{
-+ spin_lock(&sbinfo->si_plink_maint_lock);
-+ sbinfo->si_plink_maint_pid = 0;
-+ spin_unlock(&sbinfo->si_plink_maint_lock);
-+ wake_up_all(&sbinfo->si_plink_wq);
-+}
-+
-+int au_plink_maint_enter(struct super_block *sb)
-+{
-+ int err;
-+ struct au_sbinfo *sbinfo;
-+
-+ err = 0;
-+ sbinfo = au_sbi(sb);
-+ /* make sure i am the only one in this fs */
-+ si_write_lock(sb, AuLock_FLUSH);
-+ if (au_opt_test(au_mntflags(sb), PLINK)) {
-+ spin_lock(&sbinfo->si_plink_maint_lock);
-+ if (!sbinfo->si_plink_maint_pid)
-+ sbinfo->si_plink_maint_pid = current->pid;
-+ else
-+ err = -EBUSY;
-+ spin_unlock(&sbinfo->si_plink_maint_lock);
-+ }
-+ si_write_unlock(sb);
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_AUFS_DEBUG
-+void au_plink_list(struct super_block *sb)
-+{
-+ int i;
-+ struct au_sbinfo *sbinfo;
-+ struct hlist_head *plink_hlist;
-+ struct pseudo_link *plink;
-+
-+ SiMustAnyLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
-+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
-+
-+ for (i = 0; i < AuPlink_NHASH; i++) {
-+ plink_hlist = &sbinfo->si_plink[i].head;
-+ rcu_read_lock();
-+ hlist_for_each_entry_rcu(plink, plink_hlist, hlist)
-+ AuDbg("%lu\n", plink->inode->i_ino);
-+ rcu_read_unlock();
-+ }
-+}
-+#endif
-+
-+/* is the inode pseudo-linked? */
-+int au_plink_test(struct inode *inode)
-+{
-+ int found, i;
-+ struct au_sbinfo *sbinfo;
-+ struct hlist_head *plink_hlist;
-+ struct pseudo_link *plink;
-+
-+ sbinfo = au_sbi(inode->i_sb);
-+ AuRwMustAnyLock(&sbinfo->si_rwsem);
-+ AuDebugOn(!au_opt_test(au_mntflags(inode->i_sb), PLINK));
-+ AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM));
-+
-+ found = 0;
-+ i = au_plink_hash(inode->i_ino);
-+ plink_hlist = &sbinfo->si_plink[i].head;
-+ rcu_read_lock();
-+ hlist_for_each_entry_rcu(plink, plink_hlist, hlist)
-+ if (plink->inode == inode) {
-+ found = 1;
-+ break;
-+ }
-+ rcu_read_unlock();
-+ return found;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * generate a name for plink.
-+ * the file will be stored under AUFS_WH_PLINKDIR.
-+ */
-+/* 20 is max digits length of ulong 64 */
-+#define PLINK_NAME_LEN ((20 + 1) * 2)
-+
-+static int plink_name(char *name, int len, struct inode *inode,
-+ aufs_bindex_t bindex)
-+{
-+ int rlen;
-+ struct inode *h_inode;
-+
-+ h_inode = au_h_iptr(inode, bindex);
-+ rlen = snprintf(name, len, "%lu.%lu", inode->i_ino, h_inode->i_ino);
-+ return rlen;
-+}
-+
-+struct au_do_plink_lkup_args {
-+ struct dentry **errp;
-+ struct qstr *tgtname;
-+ struct dentry *h_parent;
-+ struct au_branch *br;
-+};
-+
-+static struct dentry *au_do_plink_lkup(struct qstr *tgtname,
-+ struct dentry *h_parent,
-+ struct au_branch *br)
-+{
-+ struct dentry *h_dentry;
-+ struct mutex *h_mtx;
-+
-+ h_mtx = &d_inode(h_parent)->i_mutex;
-+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD2);
-+ h_dentry = vfsub_lkup_one(tgtname, h_parent);
-+ mutex_unlock(h_mtx);
-+ return h_dentry;
-+}
-+
-+static void au_call_do_plink_lkup(void *args)
-+{
-+ struct au_do_plink_lkup_args *a = args;
-+ *a->errp = au_do_plink_lkup(a->tgtname, a->h_parent, a->br);
-+}
-+
-+/* lookup the plink-ed @inode under the branch at @bindex */
-+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex)
-+{
-+ struct dentry *h_dentry, *h_parent;
-+ struct au_branch *br;
-+ int wkq_err;
-+ char a[PLINK_NAME_LEN];
-+ struct qstr tgtname = QSTR_INIT(a, 0);
-+
-+ AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM));
-+
-+ br = au_sbr(inode->i_sb, bindex);
-+ h_parent = br->br_wbr->wbr_plink;
-+ tgtname.len = plink_name(a, sizeof(a), inode, bindex);
-+
-+ if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) {
-+ struct au_do_plink_lkup_args args = {
-+ .errp = &h_dentry,
-+ .tgtname = &tgtname,
-+ .h_parent = h_parent,
-+ .br = br
-+ };
-+
-+ wkq_err = au_wkq_wait(au_call_do_plink_lkup, &args);
-+ if (unlikely(wkq_err))
-+ h_dentry = ERR_PTR(wkq_err);
-+ } else
-+ h_dentry = au_do_plink_lkup(&tgtname, h_parent, br);
-+
-+ return h_dentry;
-+}
-+
-+/* create a pseudo-link */
-+static int do_whplink(struct qstr *tgt, struct dentry *h_parent,
-+ struct dentry *h_dentry, struct au_branch *br)
-+{
-+ int err;
-+ struct path h_path = {
-+ .mnt = au_br_mnt(br)
-+ };
-+ struct inode *h_dir, *delegated;
-+
-+ h_dir = d_inode(h_parent);
-+ mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_CHILD2);
-+again:
-+ h_path.dentry = vfsub_lkup_one(tgt, h_parent);
-+ err = PTR_ERR(h_path.dentry);
-+ if (IS_ERR(h_path.dentry))
-+ goto out;
-+
-+ err = 0;
-+ /* wh.plink dir is not monitored */
-+ /* todo: is it really safe? */
-+ if (d_is_positive(h_path.dentry)
-+ && d_inode(h_path.dentry) != d_inode(h_dentry)) {
-+ delegated = NULL;
-+ err = vfsub_unlink(h_dir, &h_path, &delegated, /*force*/0);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal unlink\n");
-+ iput(delegated);
-+ }
-+ dput(h_path.dentry);
-+ h_path.dentry = NULL;
-+ if (!err)
-+ goto again;
-+ }
-+ if (!err && d_is_negative(h_path.dentry)) {
-+ delegated = NULL;
-+ err = vfsub_link(h_dentry, h_dir, &h_path, &delegated);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal link\n");
-+ iput(delegated);
-+ }
-+ }
-+ dput(h_path.dentry);
-+
-+out:
-+ mutex_unlock(&h_dir->i_mutex);
-+ return err;
-+}
-+
-+struct do_whplink_args {
-+ int *errp;
-+ struct qstr *tgt;
-+ struct dentry *h_parent;
-+ struct dentry *h_dentry;
-+ struct au_branch *br;
-+};
-+
-+static void call_do_whplink(void *args)
-+{
-+ struct do_whplink_args *a = args;
-+ *a->errp = do_whplink(a->tgt, a->h_parent, a->h_dentry, a->br);
-+}
-+
-+static int whplink(struct dentry *h_dentry, struct inode *inode,
-+ aufs_bindex_t bindex, struct au_branch *br)
-+{
-+ int err, wkq_err;
-+ struct au_wbr *wbr;
-+ struct dentry *h_parent;
-+ char a[PLINK_NAME_LEN];
-+ struct qstr tgtname = QSTR_INIT(a, 0);
-+
-+ wbr = au_sbr(inode->i_sb, bindex)->br_wbr;
-+ h_parent = wbr->wbr_plink;
-+ tgtname.len = plink_name(a, sizeof(a), inode, bindex);
-+
-+ /* always superio. */
-+ if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) {
-+ struct do_whplink_args args = {
-+ .errp = &err,
-+ .tgt = &tgtname,
-+ .h_parent = h_parent,
-+ .h_dentry = h_dentry,
-+ .br = br
-+ };
-+ wkq_err = au_wkq_wait(call_do_whplink, &args);
-+ if (unlikely(wkq_err))
-+ err = wkq_err;
-+ } else
-+ err = do_whplink(&tgtname, h_parent, h_dentry, br);
-+
-+ return err;
-+}
-+
-+/* free a single plink */
-+static void do_put_plink(struct pseudo_link *plink, int do_del)
-+{
-+ if (do_del)
-+ hlist_del(&plink->hlist);
-+ iput(plink->inode);
-+ kfree(plink);
-+}
-+
-+static void do_put_plink_rcu(struct rcu_head *rcu)
-+{
-+ struct pseudo_link *plink;
-+
-+ plink = container_of(rcu, struct pseudo_link, rcu);
-+ iput(plink->inode);
-+ kfree(plink);
-+}
-+
-+/*
-+ * create a new pseudo-link for @h_dentry on @bindex.
-+ * the linked inode is held in aufs @inode.
-+ */
-+void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
-+ struct dentry *h_dentry)
-+{
-+ struct super_block *sb;
-+ struct au_sbinfo *sbinfo;
-+ struct hlist_head *plink_hlist;
-+ struct pseudo_link *plink, *tmp;
-+ struct au_sphlhead *sphl;
-+ int found, err, cnt, i;
-+
-+ sb = inode->i_sb;
-+ sbinfo = au_sbi(sb);
-+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
-+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
-+
-+ found = au_plink_test(inode);
-+ if (found)
-+ return;
-+
-+ i = au_plink_hash(inode->i_ino);
-+ sphl = sbinfo->si_plink + i;
-+ plink_hlist = &sphl->head;
-+ tmp = kmalloc(sizeof(*plink), GFP_NOFS);
-+ if (tmp)
-+ tmp->inode = au_igrab(inode);
-+ else {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ spin_lock(&sphl->spin);
-+ hlist_for_each_entry(plink, plink_hlist, hlist) {
-+ if (plink->inode == inode) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ if (!found)
-+ hlist_add_head_rcu(&tmp->hlist, plink_hlist);
-+ spin_unlock(&sphl->spin);
-+ if (!found) {
-+ cnt = au_sphl_count(sphl);
-+#define msg "unexpectedly unblanced or too many pseudo-links"
-+ if (cnt > AUFS_PLINK_WARN)
-+ AuWarn1(msg ", %d\n", cnt);
-+#undef msg
-+ err = whplink(h_dentry, inode, bindex, au_sbr(sb, bindex));
-+ } else {
-+ do_put_plink(tmp, 0);
-+ return;
-+ }
-+
-+out:
-+ if (unlikely(err)) {
-+ pr_warn("err %d, damaged pseudo link.\n", err);
-+ if (tmp) {
-+ au_sphl_del_rcu(&tmp->hlist, sphl);
-+ call_rcu(&tmp->rcu, do_put_plink_rcu);
-+ }
-+ }
-+}
-+
-+/* free all plinks */
-+void au_plink_put(struct super_block *sb, int verbose)
-+{
-+ int i, warned;
-+ struct au_sbinfo *sbinfo;
-+ struct hlist_head *plink_hlist;
-+ struct hlist_node *tmp;
-+ struct pseudo_link *plink;
-+
-+ SiMustWriteLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
-+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
-+
-+ /* no spin_lock since sbinfo is write-locked */
-+ warned = 0;
-+ for (i = 0; i < AuPlink_NHASH; i++) {
-+ plink_hlist = &sbinfo->si_plink[i].head;
-+ if (!warned && verbose && !hlist_empty(plink_hlist)) {
-+ pr_warn("pseudo-link is not flushed");
-+ warned = 1;
-+ }
-+ hlist_for_each_entry_safe(plink, tmp, plink_hlist, hlist)
-+ do_put_plink(plink, 0);
-+ INIT_HLIST_HEAD(plink_hlist);
-+ }
-+}
-+
-+void au_plink_clean(struct super_block *sb, int verbose)
-+{
-+ struct dentry *root;
-+
-+ root = sb->s_root;
-+ aufs_write_lock(root);
-+ if (au_opt_test(au_mntflags(sb), PLINK))
-+ au_plink_put(sb, verbose);
-+ aufs_write_unlock(root);
-+}
-+
-+static int au_plink_do_half_refresh(struct inode *inode, aufs_bindex_t br_id)
-+{
-+ int do_put;
-+ aufs_bindex_t bstart, bend, bindex;
-+
-+ do_put = 0;
-+ bstart = au_ibstart(inode);
-+ bend = au_ibend(inode);
-+ if (bstart >= 0) {
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ if (!au_h_iptr(inode, bindex)
-+ || au_ii_br_id(inode, bindex) != br_id)
-+ continue;
-+ au_set_h_iptr(inode, bindex, NULL, 0);
-+ do_put = 1;
-+ break;
-+ }
-+ if (do_put)
-+ for (bindex = bstart; bindex <= bend; bindex++)
-+ if (au_h_iptr(inode, bindex)) {
-+ do_put = 0;
-+ break;
-+ }
-+ } else
-+ do_put = 1;
-+
-+ return do_put;
-+}
-+
-+/* free the plinks on a branch specified by @br_id */
-+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id)
-+{
-+ struct au_sbinfo *sbinfo;
-+ struct hlist_head *plink_hlist;
-+ struct hlist_node *tmp;
-+ struct pseudo_link *plink;
-+ struct inode *inode;
-+ int i, do_put;
-+
-+ SiMustWriteLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
-+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
-+
-+ /* no spin_lock since sbinfo is write-locked */
-+ for (i = 0; i < AuPlink_NHASH; i++) {
-+ plink_hlist = &sbinfo->si_plink[i].head;
-+ hlist_for_each_entry_safe(plink, tmp, plink_hlist, hlist) {
-+ inode = au_igrab(plink->inode);
-+ ii_write_lock_child(inode);
-+ do_put = au_plink_do_half_refresh(inode, br_id);
-+ if (do_put)
-+ do_put_plink(plink, 1);
-+ ii_write_unlock(inode);
-+ iput(inode);
-+ }
-+ }
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/poll.c linux-4.1.10/fs/aufs/poll.c
---- linux-4.1.10.orig/fs/aufs/poll.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/poll.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,52 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * poll operation
-+ * There is only one filesystem which implements ->poll operation, currently.
-+ */
-+
-+#include "aufs.h"
-+
-+unsigned int aufs_poll(struct file *file, poll_table *wait)
-+{
-+ unsigned int mask;
-+ int err;
-+ struct file *h_file;
-+ struct super_block *sb;
-+
-+ /* We should pretend an error happened. */
-+ mask = POLLERR /* | POLLIN | POLLOUT */;
-+ sb = file->f_path.dentry->d_sb;
-+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
-+
-+ h_file = au_read_pre(file, /*keep_fi*/0);
-+ err = PTR_ERR(h_file);
-+ if (IS_ERR(h_file))
-+ goto out;
-+
-+ /* it is not an error if h_file has no operation */
-+ mask = DEFAULT_POLLMASK;
-+ if (h_file->f_op->poll)
-+ mask = h_file->f_op->poll(h_file, wait);
-+ fput(h_file); /* instead of au_read_post() */
-+
-+out:
-+ si_read_unlock(sb);
-+ AuTraceErr((int)mask);
-+ return mask;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/posix_acl.c linux-4.1.10/fs/aufs/posix_acl.c
---- linux-4.1.10.orig/fs/aufs/posix_acl.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/posix_acl.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,99 @@
-+/*
-+ * Copyright (C) 2014-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * posix acl operations
-+ */
-+
-+#include <linux/fs.h>
-+#include <linux/posix_acl.h>
-+#include "aufs.h"
-+
-+struct posix_acl *aufs_get_acl(struct inode *inode, int type)
-+{
-+ struct posix_acl *acl;
-+ int err;
-+ aufs_bindex_t bindex;
-+ struct inode *h_inode;
-+ struct super_block *sb;
-+
-+ acl = NULL;
-+ sb = inode->i_sb;
-+ si_read_lock(sb, AuLock_FLUSH);
-+ ii_read_lock_child(inode);
-+ if (!(sb->s_flags & MS_POSIXACL))
-+ goto out;
-+
-+ bindex = au_ibstart(inode);
-+ h_inode = au_h_iptr(inode, bindex);
-+ if (unlikely(!h_inode
-+ || ((h_inode->i_mode & S_IFMT)
-+ != (inode->i_mode & S_IFMT)))) {
-+ err = au_busy_or_stale();
-+ acl = ERR_PTR(err);
-+ goto out;
-+ }
-+
-+ /* always topmost only */
-+ acl = get_acl(h_inode, type);
-+
-+out:
-+ ii_read_unlock(inode);
-+ si_read_unlock(sb);
-+
-+ AuTraceErrPtr(acl);
-+ return acl;
-+}
-+
-+int aufs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
-+{
-+ int err;
-+ ssize_t ssz;
-+ struct dentry *dentry;
-+ struct au_srxattr arg = {
-+ .type = AU_ACL_SET,
-+ .u.acl_set = {
-+ .acl = acl,
-+ .type = type
-+ },
-+ };
-+
-+ mutex_lock(&inode->i_mutex);
-+ if (inode->i_ino == AUFS_ROOT_INO)
-+ dentry = dget(inode->i_sb->s_root);
-+ else {
-+ dentry = d_find_alias(inode);
-+ if (!dentry)
-+ dentry = d_find_any_alias(inode);
-+ if (!dentry) {
-+ pr_warn("cannot handle this inode, "
-+ "please report to aufs-users ML\n");
-+ err = -ENOENT;
-+ goto out;
-+ }
-+ }
-+
-+ ssz = au_srxattr(dentry, &arg);
-+ dput(dentry);
-+ err = ssz;
-+ if (ssz >= 0)
-+ err = 0;
-+
-+out:
-+ mutex_unlock(&inode->i_mutex);
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/procfs.c linux-4.1.10/fs/aufs/procfs.c
---- linux-4.1.10.orig/fs/aufs/procfs.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/procfs.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,169 @@
-+/*
-+ * Copyright (C) 2010-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * procfs interfaces
-+ */
-+
-+#include <linux/proc_fs.h>
-+#include "aufs.h"
-+
-+static int au_procfs_plm_release(struct inode *inode, struct file *file)
-+{
-+ struct au_sbinfo *sbinfo;
-+
-+ sbinfo = file->private_data;
-+ if (sbinfo) {
-+ au_plink_maint_leave(sbinfo);
-+ kobject_put(&sbinfo->si_kobj);
-+ }
-+
-+ return 0;
-+}
-+
-+static void au_procfs_plm_write_clean(struct file *file)
-+{
-+ struct au_sbinfo *sbinfo;
-+
-+ sbinfo = file->private_data;
-+ if (sbinfo)
-+ au_plink_clean(sbinfo->si_sb, /*verbose*/0);
-+}
-+
-+static int au_procfs_plm_write_si(struct file *file, unsigned long id)
-+{
-+ int err;
-+ struct super_block *sb;
-+ struct au_sbinfo *sbinfo;
-+
-+ err = -EBUSY;
-+ if (unlikely(file->private_data))
-+ goto out;
-+
-+ sb = NULL;
-+ /* don't use au_sbilist_lock() here */
-+ spin_lock(&au_sbilist.spin);
-+ list_for_each_entry(sbinfo, &au_sbilist.head, si_list)
-+ if (id == sysaufs_si_id(sbinfo)) {
-+ kobject_get(&sbinfo->si_kobj);
-+ sb = sbinfo->si_sb;
-+ break;
-+ }
-+ spin_unlock(&au_sbilist.spin);
-+
-+ err = -EINVAL;
-+ if (unlikely(!sb))
-+ goto out;
-+
-+ err = au_plink_maint_enter(sb);
-+ if (!err)
-+ /* keep kobject_get() */
-+ file->private_data = sbinfo;
-+ else
-+ kobject_put(&sbinfo->si_kobj);
-+out:
-+ return err;
-+}
-+
-+/*
-+ * Accept a valid "si=xxxx" only.
-+ * Once it is accepted successfully, accept "clean" too.
-+ */
-+static ssize_t au_procfs_plm_write(struct file *file, const char __user *ubuf,
-+ size_t count, loff_t *ppos)
-+{
-+ ssize_t err;
-+ unsigned long id;
-+ /* last newline is allowed */
-+ char buf[3 + sizeof(unsigned long) * 2 + 1];
-+
-+ err = -EACCES;
-+ if (unlikely(!capable(CAP_SYS_ADMIN)))
-+ goto out;
-+
-+ err = -EINVAL;
-+ if (unlikely(count > sizeof(buf)))
-+ goto out;
-+
-+ err = copy_from_user(buf, ubuf, count);
-+ if (unlikely(err)) {
-+ err = -EFAULT;
-+ goto out;
-+ }
-+ buf[count] = 0;
-+
-+ err = -EINVAL;
-+ if (!strcmp("clean", buf)) {
-+ au_procfs_plm_write_clean(file);
-+ goto out_success;
-+ } else if (unlikely(strncmp("si=", buf, 3)))
-+ goto out;
-+
-+ err = kstrtoul(buf + 3, 16, &id);
-+ if (unlikely(err))
-+ goto out;
-+
-+ err = au_procfs_plm_write_si(file, id);
-+ if (unlikely(err))
-+ goto out;
-+
-+out_success:
-+ err = count; /* success */
-+out:
-+ return err;
-+}
-+
-+static const struct file_operations au_procfs_plm_fop = {
-+ .write = au_procfs_plm_write,
-+ .release = au_procfs_plm_release,
-+ .owner = THIS_MODULE
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static struct proc_dir_entry *au_procfs_dir;
-+
-+void au_procfs_fin(void)
-+{
-+ remove_proc_entry(AUFS_PLINK_MAINT_NAME, au_procfs_dir);
-+ remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL);
-+}
-+
-+int __init au_procfs_init(void)
-+{
-+ int err;
-+ struct proc_dir_entry *entry;
-+
-+ err = -ENOMEM;
-+ au_procfs_dir = proc_mkdir(AUFS_PLINK_MAINT_DIR, NULL);
-+ if (unlikely(!au_procfs_dir))
-+ goto out;
-+
-+ entry = proc_create(AUFS_PLINK_MAINT_NAME, S_IFREG | S_IWUSR,
-+ au_procfs_dir, &au_procfs_plm_fop);
-+ if (unlikely(!entry))
-+ goto out_dir;
-+
-+ err = 0;
-+ goto out; /* success */
-+
-+
-+out_dir:
-+ remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL);
-+out:
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/rdu.c linux-4.1.10/fs/aufs/rdu.c
---- linux-4.1.10.orig/fs/aufs/rdu.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/rdu.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,388 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * readdir in userspace.
-+ */
-+
-+#include <linux/compat.h>
-+#include <linux/fs_stack.h>
-+#include <linux/security.h>
-+#include "aufs.h"
-+
-+/* bits for struct aufs_rdu.flags */
-+#define AuRdu_CALLED 1
-+#define AuRdu_CONT (1 << 1)
-+#define AuRdu_FULL (1 << 2)
-+#define au_ftest_rdu(flags, name) ((flags) & AuRdu_##name)
-+#define au_fset_rdu(flags, name) \
-+ do { (flags) |= AuRdu_##name; } while (0)
-+#define au_fclr_rdu(flags, name) \
-+ do { (flags) &= ~AuRdu_##name; } while (0)
-+
-+struct au_rdu_arg {
-+ struct dir_context ctx;
-+ struct aufs_rdu *rdu;
-+ union au_rdu_ent_ul ent;
-+ unsigned long end;
-+
-+ struct super_block *sb;
-+ int err;
-+};
-+
-+static int au_rdu_fill(struct dir_context *ctx, const char *name, int nlen,
-+ loff_t offset, u64 h_ino, unsigned int d_type)
-+{
-+ int err, len;
-+ struct au_rdu_arg *arg = container_of(ctx, struct au_rdu_arg, ctx);
-+ struct aufs_rdu *rdu = arg->rdu;
-+ struct au_rdu_ent ent;
-+
-+ err = 0;
-+ arg->err = 0;
-+ au_fset_rdu(rdu->cookie.flags, CALLED);
-+ len = au_rdu_len(nlen);
-+ if (arg->ent.ul + len < arg->end) {
-+ ent.ino = h_ino;
-+ ent.bindex = rdu->cookie.bindex;
-+ ent.type = d_type;
-+ ent.nlen = nlen;
-+ if (unlikely(nlen > AUFS_MAX_NAMELEN))
-+ ent.type = DT_UNKNOWN;
-+
-+ /* unnecessary to support mmap_sem since this is a dir */
-+ err = -EFAULT;
-+ if (copy_to_user(arg->ent.e, &ent, sizeof(ent)))
-+ goto out;
-+ if (copy_to_user(arg->ent.e->name, name, nlen))
-+ goto out;
-+ /* the terminating NULL */
-+ if (__put_user(0, arg->ent.e->name + nlen))
-+ goto out;
-+ err = 0;
-+ /* AuDbg("%p, %.*s\n", arg->ent.p, nlen, name); */
-+ arg->ent.ul += len;
-+ rdu->rent++;
-+ } else {
-+ err = -EFAULT;
-+ au_fset_rdu(rdu->cookie.flags, FULL);
-+ rdu->full = 1;
-+ rdu->tail = arg->ent;
-+ }
-+
-+out:
-+ /* AuTraceErr(err); */
-+ return err;
-+}
-+
-+static int au_rdu_do(struct file *h_file, struct au_rdu_arg *arg)
-+{
-+ int err;
-+ loff_t offset;
-+ struct au_rdu_cookie *cookie = &arg->rdu->cookie;
-+
-+ /* we don't have to care (FMODE_32BITHASH | FMODE_64BITHASH) for ext4 */
-+ offset = vfsub_llseek(h_file, cookie->h_pos, SEEK_SET);
-+ err = offset;
-+ if (unlikely(offset != cookie->h_pos))
-+ goto out;
-+
-+ err = 0;
-+ do {
-+ arg->err = 0;
-+ au_fclr_rdu(cookie->flags, CALLED);
-+ /* smp_mb(); */
-+ err = vfsub_iterate_dir(h_file, &arg->ctx);
-+ if (err >= 0)
-+ err = arg->err;
-+ } while (!err
-+ && au_ftest_rdu(cookie->flags, CALLED)
-+ && !au_ftest_rdu(cookie->flags, FULL));
-+ cookie->h_pos = h_file->f_pos;
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+static int au_rdu(struct file *file, struct aufs_rdu *rdu)
-+{
-+ int err;
-+ aufs_bindex_t bend;
-+ struct au_rdu_arg arg = {
-+ .ctx = {
-+ .actor = au_rdu_fill
-+ }
-+ };
-+ struct dentry *dentry;
-+ struct inode *inode;
-+ struct file *h_file;
-+ struct au_rdu_cookie *cookie = &rdu->cookie;
-+
-+ err = !access_ok(VERIFY_WRITE, rdu->ent.e, rdu->sz);
-+ if (unlikely(err)) {
-+ err = -EFAULT;
-+ AuTraceErr(err);
-+ goto out;
-+ }
-+ rdu->rent = 0;
-+ rdu->tail = rdu->ent;
-+ rdu->full = 0;
-+ arg.rdu = rdu;
-+ arg.ent = rdu->ent;
-+ arg.end = arg.ent.ul;
-+ arg.end += rdu->sz;
-+
-+ err = -ENOTDIR;
-+ if (unlikely(!file->f_op->iterate))
-+ goto out;
-+
-+ err = security_file_permission(file, MAY_READ);
-+ AuTraceErr(err);
-+ if (unlikely(err))
-+ goto out;
-+
-+ dentry = file->f_path.dentry;
-+ inode = d_inode(dentry);
-+#if 1
-+ mutex_lock(&inode->i_mutex);
-+#else
-+ err = mutex_lock_killable(&inode->i_mutex);
-+ AuTraceErr(err);
-+ if (unlikely(err))
-+ goto out;
-+#endif
-+
-+ arg.sb = inode->i_sb;
-+ err = si_read_lock(arg.sb, AuLock_FLUSH | AuLock_NOPLM);
-+ if (unlikely(err))
-+ goto out_mtx;
-+ err = au_alive_dir(dentry);
-+ if (unlikely(err))
-+ goto out_si;
-+ /* todo: reval? */
-+ fi_read_lock(file);
-+
-+ err = -EAGAIN;
-+ if (unlikely(au_ftest_rdu(cookie->flags, CONT)
-+ && cookie->generation != au_figen(file)))
-+ goto out_unlock;
-+
-+ err = 0;
-+ if (!rdu->blk) {
-+ rdu->blk = au_sbi(arg.sb)->si_rdblk;
-+ if (!rdu->blk)
-+ rdu->blk = au_dir_size(file, /*dentry*/NULL);
-+ }
-+ bend = au_fbstart(file);
-+ if (cookie->bindex < bend)
-+ cookie->bindex = bend;
-+ bend = au_fbend_dir(file);
-+ /* AuDbg("b%d, b%d\n", cookie->bindex, bend); */
-+ for (; !err && cookie->bindex <= bend;
-+ cookie->bindex++, cookie->h_pos = 0) {
-+ h_file = au_hf_dir(file, cookie->bindex);
-+ if (!h_file)
-+ continue;
-+
-+ au_fclr_rdu(cookie->flags, FULL);
-+ err = au_rdu_do(h_file, &arg);
-+ AuTraceErr(err);
-+ if (unlikely(au_ftest_rdu(cookie->flags, FULL) || err))
-+ break;
-+ }
-+ AuDbg("rent %llu\n", rdu->rent);
-+
-+ if (!err && !au_ftest_rdu(cookie->flags, CONT)) {
-+ rdu->shwh = !!au_opt_test(au_sbi(arg.sb)->si_mntflags, SHWH);
-+ au_fset_rdu(cookie->flags, CONT);
-+ cookie->generation = au_figen(file);
-+ }
-+
-+ ii_read_lock_child(inode);
-+ fsstack_copy_attr_atime(inode, au_h_iptr(inode, au_ibstart(inode)));
-+ ii_read_unlock(inode);
-+
-+out_unlock:
-+ fi_read_unlock(file);
-+out_si:
-+ si_read_unlock(arg.sb);
-+out_mtx:
-+ mutex_unlock(&inode->i_mutex);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+static int au_rdu_ino(struct file *file, struct aufs_rdu *rdu)
-+{
-+ int err;
-+ ino_t ino;
-+ unsigned long long nent;
-+ union au_rdu_ent_ul *u;
-+ struct au_rdu_ent ent;
-+ struct super_block *sb;
-+
-+ err = 0;
-+ nent = rdu->nent;
-+ u = &rdu->ent;
-+ sb = file->f_path.dentry->d_sb;
-+ si_read_lock(sb, AuLock_FLUSH);
-+ while (nent-- > 0) {
-+ /* unnecessary to support mmap_sem since this is a dir */
-+ err = copy_from_user(&ent, u->e, sizeof(ent));
-+ if (!err)
-+ err = !access_ok(VERIFY_WRITE, &u->e->ino, sizeof(ino));
-+ if (unlikely(err)) {
-+ err = -EFAULT;
-+ AuTraceErr(err);
-+ break;
-+ }
-+
-+ /* AuDbg("b%d, i%llu\n", ent.bindex, ent.ino); */
-+ if (!ent.wh)
-+ err = au_ino(sb, ent.bindex, ent.ino, ent.type, &ino);
-+ else
-+ err = au_wh_ino(sb, ent.bindex, ent.ino, ent.type,
-+ &ino);
-+ if (unlikely(err)) {
-+ AuTraceErr(err);
-+ break;
-+ }
-+
-+ err = __put_user(ino, &u->e->ino);
-+ if (unlikely(err)) {
-+ err = -EFAULT;
-+ AuTraceErr(err);
-+ break;
-+ }
-+ u->ul += au_rdu_len(ent.nlen);
-+ }
-+ si_read_unlock(sb);
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_rdu_verify(struct aufs_rdu *rdu)
-+{
-+ AuDbg("rdu{%llu, %p, %u | %u | %llu, %u, %u | "
-+ "%llu, b%d, 0x%x, g%u}\n",
-+ rdu->sz, rdu->ent.e, rdu->verify[AufsCtlRduV_SZ],
-+ rdu->blk,
-+ rdu->rent, rdu->shwh, rdu->full,
-+ rdu->cookie.h_pos, rdu->cookie.bindex, rdu->cookie.flags,
-+ rdu->cookie.generation);
-+
-+ if (rdu->verify[AufsCtlRduV_SZ] == sizeof(*rdu))
-+ return 0;
-+
-+ AuDbg("%u:%u\n",
-+ rdu->verify[AufsCtlRduV_SZ], (unsigned int)sizeof(*rdu));
-+ return -EINVAL;
-+}
-+
-+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ long err, e;
-+ struct aufs_rdu rdu;
-+ void __user *p = (void __user *)arg;
-+
-+ err = copy_from_user(&rdu, p, sizeof(rdu));
-+ if (unlikely(err)) {
-+ err = -EFAULT;
-+ AuTraceErr(err);
-+ goto out;
-+ }
-+ err = au_rdu_verify(&rdu);
-+ if (unlikely(err))
-+ goto out;
-+
-+ switch (cmd) {
-+ case AUFS_CTL_RDU:
-+ err = au_rdu(file, &rdu);
-+ if (unlikely(err))
-+ break;
-+
-+ e = copy_to_user(p, &rdu, sizeof(rdu));
-+ if (unlikely(e)) {
-+ err = -EFAULT;
-+ AuTraceErr(err);
-+ }
-+ break;
-+ case AUFS_CTL_RDU_INO:
-+ err = au_rdu_ino(file, &rdu);
-+ break;
-+
-+ default:
-+ /* err = -ENOTTY; */
-+ err = -EINVAL;
-+ }
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+#ifdef CONFIG_COMPAT
-+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ long err, e;
-+ struct aufs_rdu rdu;
-+ void __user *p = compat_ptr(arg);
-+
-+ /* todo: get_user()? */
-+ err = copy_from_user(&rdu, p, sizeof(rdu));
-+ if (unlikely(err)) {
-+ err = -EFAULT;
-+ AuTraceErr(err);
-+ goto out;
-+ }
-+ rdu.ent.e = compat_ptr(rdu.ent.ul);
-+ err = au_rdu_verify(&rdu);
-+ if (unlikely(err))
-+ goto out;
-+
-+ switch (cmd) {
-+ case AUFS_CTL_RDU:
-+ err = au_rdu(file, &rdu);
-+ if (unlikely(err))
-+ break;
-+
-+ rdu.ent.ul = ptr_to_compat(rdu.ent.e);
-+ rdu.tail.ul = ptr_to_compat(rdu.tail.e);
-+ e = copy_to_user(p, &rdu, sizeof(rdu));
-+ if (unlikely(e)) {
-+ err = -EFAULT;
-+ AuTraceErr(err);
-+ }
-+ break;
-+ case AUFS_CTL_RDU_INO:
-+ err = au_rdu_ino(file, &rdu);
-+ break;
-+
-+ default:
-+ /* err = -ENOTTY; */
-+ err = -EINVAL;
-+ }
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+#endif
-diff -Nur linux-4.1.10.orig/fs/aufs/rwsem.h linux-4.1.10/fs/aufs/rwsem.h
---- linux-4.1.10.orig/fs/aufs/rwsem.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/rwsem.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,191 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * simple read-write semaphore wrappers
-+ */
-+
-+#ifndef __AUFS_RWSEM_H__
-+#define __AUFS_RWSEM_H__
-+
-+#ifdef __KERNEL__
-+
-+#include "debug.h"
-+
-+struct au_rwsem {
-+ struct rw_semaphore rwsem;
-+#ifdef CONFIG_AUFS_DEBUG
-+ /* just for debugging, not almighty counter */
-+ atomic_t rcnt, wcnt;
-+#endif
-+};
-+
-+#ifdef CONFIG_AUFS_DEBUG
-+#define AuDbgCntInit(rw) do { \
-+ atomic_set(&(rw)->rcnt, 0); \
-+ atomic_set(&(rw)->wcnt, 0); \
-+ smp_mb(); /* atomic set */ \
-+} while (0)
-+
-+#define AuDbgRcntInc(rw) atomic_inc(&(rw)->rcnt)
-+#define AuDbgRcntDec(rw) WARN_ON(atomic_dec_return(&(rw)->rcnt) < 0)
-+#define AuDbgWcntInc(rw) atomic_inc(&(rw)->wcnt)
-+#define AuDbgWcntDec(rw) WARN_ON(atomic_dec_return(&(rw)->wcnt) < 0)
-+#else
-+#define AuDbgCntInit(rw) do {} while (0)
-+#define AuDbgRcntInc(rw) do {} while (0)
-+#define AuDbgRcntDec(rw) do {} while (0)
-+#define AuDbgWcntInc(rw) do {} while (0)
-+#define AuDbgWcntDec(rw) do {} while (0)
-+#endif /* CONFIG_AUFS_DEBUG */
-+
-+/* to debug easier, do not make them inlined functions */
-+#define AuRwMustNoWaiters(rw) AuDebugOn(!list_empty(&(rw)->rwsem.wait_list))
-+/* rwsem_is_locked() is unusable */
-+#define AuRwMustReadLock(rw) AuDebugOn(atomic_read(&(rw)->rcnt) <= 0)
-+#define AuRwMustWriteLock(rw) AuDebugOn(atomic_read(&(rw)->wcnt) <= 0)
-+#define AuRwMustAnyLock(rw) AuDebugOn(atomic_read(&(rw)->rcnt) <= 0 \
-+ && atomic_read(&(rw)->wcnt) <= 0)
-+#define AuRwDestroy(rw) AuDebugOn(atomic_read(&(rw)->rcnt) \
-+ || atomic_read(&(rw)->wcnt))
-+
-+#define au_rw_class(rw, key) lockdep_set_class(&(rw)->rwsem, key)
-+
-+static inline void au_rw_init(struct au_rwsem *rw)
-+{
-+ AuDbgCntInit(rw);
-+ init_rwsem(&rw->rwsem);
-+}
-+
-+static inline void au_rw_init_wlock(struct au_rwsem *rw)
-+{
-+ au_rw_init(rw);
-+ down_write(&rw->rwsem);
-+ AuDbgWcntInc(rw);
-+}
-+
-+static inline void au_rw_init_wlock_nested(struct au_rwsem *rw,
-+ unsigned int lsc)
-+{
-+ au_rw_init(rw);
-+ down_write_nested(&rw->rwsem, lsc);
-+ AuDbgWcntInc(rw);
-+}
-+
-+static inline void au_rw_read_lock(struct au_rwsem *rw)
-+{
-+ down_read(&rw->rwsem);
-+ AuDbgRcntInc(rw);
-+}
-+
-+static inline void au_rw_read_lock_nested(struct au_rwsem *rw, unsigned int lsc)
-+{
-+ down_read_nested(&rw->rwsem, lsc);
-+ AuDbgRcntInc(rw);
-+}
-+
-+static inline void au_rw_read_unlock(struct au_rwsem *rw)
-+{
-+ AuRwMustReadLock(rw);
-+ AuDbgRcntDec(rw);
-+ up_read(&rw->rwsem);
-+}
-+
-+static inline void au_rw_dgrade_lock(struct au_rwsem *rw)
-+{
-+ AuRwMustWriteLock(rw);
-+ AuDbgRcntInc(rw);
-+ AuDbgWcntDec(rw);
-+ downgrade_write(&rw->rwsem);
-+}
-+
-+static inline void au_rw_write_lock(struct au_rwsem *rw)
-+{
-+ down_write(&rw->rwsem);
-+ AuDbgWcntInc(rw);
-+}
-+
-+static inline void au_rw_write_lock_nested(struct au_rwsem *rw,
-+ unsigned int lsc)
-+{
-+ down_write_nested(&rw->rwsem, lsc);
-+ AuDbgWcntInc(rw);
-+}
-+
-+static inline void au_rw_write_unlock(struct au_rwsem *rw)
-+{
-+ AuRwMustWriteLock(rw);
-+ AuDbgWcntDec(rw);
-+ up_write(&rw->rwsem);
-+}
-+
-+/* why is not _nested version defined */
-+static inline int au_rw_read_trylock(struct au_rwsem *rw)
-+{
-+ int ret;
-+
-+ ret = down_read_trylock(&rw->rwsem);
-+ if (ret)
-+ AuDbgRcntInc(rw);
-+ return ret;
-+}
-+
-+static inline int au_rw_write_trylock(struct au_rwsem *rw)
-+{
-+ int ret;
-+
-+ ret = down_write_trylock(&rw->rwsem);
-+ if (ret)
-+ AuDbgWcntInc(rw);
-+ return ret;
-+}
-+
-+#undef AuDbgCntInit
-+#undef AuDbgRcntInc
-+#undef AuDbgRcntDec
-+#undef AuDbgWcntInc
-+#undef AuDbgWcntDec
-+
-+#define AuSimpleLockRwsemFuncs(prefix, param, rwsem) \
-+static inline void prefix##_read_lock(param) \
-+{ au_rw_read_lock(rwsem); } \
-+static inline void prefix##_write_lock(param) \
-+{ au_rw_write_lock(rwsem); } \
-+static inline int prefix##_read_trylock(param) \
-+{ return au_rw_read_trylock(rwsem); } \
-+static inline int prefix##_write_trylock(param) \
-+{ return au_rw_write_trylock(rwsem); }
-+/* why is not _nested version defined */
-+/* static inline void prefix##_read_trylock_nested(param, lsc)
-+{ au_rw_read_trylock_nested(rwsem, lsc)); }
-+static inline void prefix##_write_trylock_nestd(param, lsc)
-+{ au_rw_write_trylock_nested(rwsem, lsc); } */
-+
-+#define AuSimpleUnlockRwsemFuncs(prefix, param, rwsem) \
-+static inline void prefix##_read_unlock(param) \
-+{ au_rw_read_unlock(rwsem); } \
-+static inline void prefix##_write_unlock(param) \
-+{ au_rw_write_unlock(rwsem); } \
-+static inline void prefix##_downgrade_lock(param) \
-+{ au_rw_dgrade_lock(rwsem); }
-+
-+#define AuSimpleRwsemFuncs(prefix, param, rwsem) \
-+ AuSimpleLockRwsemFuncs(prefix, param, rwsem) \
-+ AuSimpleUnlockRwsemFuncs(prefix, param, rwsem)
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_RWSEM_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/sbinfo.c linux-4.1.10/fs/aufs/sbinfo.c
---- linux-4.1.10.orig/fs/aufs/sbinfo.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/sbinfo.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,356 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * superblock private data
-+ */
-+
-+#include "aufs.h"
-+
-+/*
-+ * they are necessary regardless sysfs is disabled.
-+ */
-+void au_si_free(struct kobject *kobj)
-+{
-+ int i;
-+ struct au_sbinfo *sbinfo;
-+ char *locked __maybe_unused; /* debug only */
-+
-+ sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
-+ for (i = 0; i < AuPlink_NHASH; i++)
-+ AuDebugOn(!hlist_empty(&sbinfo->si_plink[i].head));
-+ AuDebugOn(atomic_read(&sbinfo->si_nowait.nw_len));
-+
-+ au_rw_write_lock(&sbinfo->si_rwsem);
-+ au_br_free(sbinfo);
-+ au_rw_write_unlock(&sbinfo->si_rwsem);
-+
-+ AuDebugOn(radix_tree_gang_lookup
-+ (&sbinfo->au_si_pid.tree, (void **)&locked,
-+ /*first_index*/PID_MAX_DEFAULT - 1,
-+ /*max_items*/sizeof(locked)/sizeof(*locked)));
-+
-+ kfree(sbinfo->si_branch);
-+ kfree(sbinfo->au_si_pid.bitmap);
-+ mutex_destroy(&sbinfo->si_xib_mtx);
-+ AuRwDestroy(&sbinfo->si_rwsem);
-+
-+ kfree(sbinfo);
-+}
-+
-+int au_si_alloc(struct super_block *sb)
-+{
-+ int err, i;
-+ struct au_sbinfo *sbinfo;
-+ static struct lock_class_key aufs_si;
-+
-+ err = -ENOMEM;
-+ sbinfo = kzalloc(sizeof(*sbinfo), GFP_NOFS);
-+ if (unlikely(!sbinfo))
-+ goto out;
-+
-+ BUILD_BUG_ON(sizeof(unsigned long) !=
-+ sizeof(*sbinfo->au_si_pid.bitmap));
-+ sbinfo->au_si_pid.bitmap = kcalloc(BITS_TO_LONGS(PID_MAX_DEFAULT),
-+ sizeof(*sbinfo->au_si_pid.bitmap),
-+ GFP_NOFS);
-+ if (unlikely(!sbinfo->au_si_pid.bitmap))
-+ goto out_sbinfo;
-+
-+ /* will be reallocated separately */
-+ sbinfo->si_branch = kzalloc(sizeof(*sbinfo->si_branch), GFP_NOFS);
-+ if (unlikely(!sbinfo->si_branch))
-+ goto out_pidmap;
-+
-+ err = sysaufs_si_init(sbinfo);
-+ if (unlikely(err))
-+ goto out_br;
-+
-+ au_nwt_init(&sbinfo->si_nowait);
-+ au_rw_init_wlock(&sbinfo->si_rwsem);
-+ au_rw_class(&sbinfo->si_rwsem, &aufs_si);
-+ spin_lock_init(&sbinfo->au_si_pid.tree_lock);
-+ INIT_RADIX_TREE(&sbinfo->au_si_pid.tree, GFP_ATOMIC | __GFP_NOFAIL);
-+
-+ atomic_long_set(&sbinfo->si_ninodes, 0);
-+ atomic_long_set(&sbinfo->si_nfiles, 0);
-+
-+ sbinfo->si_bend = -1;
-+ sbinfo->si_last_br_id = AUFS_BRANCH_MAX / 2;
-+
-+ sbinfo->si_wbr_copyup = AuWbrCopyup_Def;
-+ sbinfo->si_wbr_create = AuWbrCreate_Def;
-+ sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + sbinfo->si_wbr_copyup;
-+ sbinfo->si_wbr_create_ops = au_wbr_create_ops + sbinfo->si_wbr_create;
-+
-+ au_fhsm_init(sbinfo);
-+
-+ sbinfo->si_mntflags = au_opts_plink(AuOpt_Def);
-+
-+ sbinfo->si_xino_jiffy = jiffies;
-+ sbinfo->si_xino_expire
-+ = msecs_to_jiffies(AUFS_XINO_DEF_SEC * MSEC_PER_SEC);
-+ mutex_init(&sbinfo->si_xib_mtx);
-+ sbinfo->si_xino_brid = -1;
-+ /* leave si_xib_last_pindex and si_xib_next_bit */
-+
-+ au_sphl_init(&sbinfo->si_aopen);
-+
-+ sbinfo->si_rdcache = msecs_to_jiffies(AUFS_RDCACHE_DEF * MSEC_PER_SEC);
-+ sbinfo->si_rdblk = AUFS_RDBLK_DEF;
-+ sbinfo->si_rdhash = AUFS_RDHASH_DEF;
-+ sbinfo->si_dirwh = AUFS_DIRWH_DEF;
-+
-+ for (i = 0; i < AuPlink_NHASH; i++)
-+ au_sphl_init(sbinfo->si_plink + i);
-+ init_waitqueue_head(&sbinfo->si_plink_wq);
-+ spin_lock_init(&sbinfo->si_plink_maint_lock);
-+
-+ au_sphl_init(&sbinfo->si_files);
-+
-+ /* leave other members for sysaufs and si_mnt. */
-+ sbinfo->si_sb = sb;
-+ sb->s_fs_info = sbinfo;
-+ si_pid_set(sb);
-+ return 0; /* success */
-+
-+out_br:
-+ kfree(sbinfo->si_branch);
-+out_pidmap:
-+ kfree(sbinfo->au_si_pid.bitmap);
-+out_sbinfo:
-+ kfree(sbinfo);
-+out:
-+ return err;
-+}
-+
-+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr)
-+{
-+ int err, sz;
-+ struct au_branch **brp;
-+
-+ AuRwMustWriteLock(&sbinfo->si_rwsem);
-+
-+ err = -ENOMEM;
-+ sz = sizeof(*brp) * (sbinfo->si_bend + 1);
-+ if (unlikely(!sz))
-+ sz = sizeof(*brp);
-+ brp = au_kzrealloc(sbinfo->si_branch, sz, sizeof(*brp) * nbr, GFP_NOFS);
-+ if (brp) {
-+ sbinfo->si_branch = brp;
-+ err = 0;
-+ }
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+unsigned int au_sigen_inc(struct super_block *sb)
-+{
-+ unsigned int gen;
-+ struct inode *inode;
-+
-+ SiMustWriteLock(sb);
-+
-+ gen = ++au_sbi(sb)->si_generation;
-+ au_update_digen(sb->s_root);
-+ inode = d_inode(sb->s_root);
-+ au_update_iigen(inode, /*half*/0);
-+ inode->i_version++;
-+ return gen;
-+}
-+
-+aufs_bindex_t au_new_br_id(struct super_block *sb)
-+{
-+ aufs_bindex_t br_id;
-+ int i;
-+ struct au_sbinfo *sbinfo;
-+
-+ SiMustWriteLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ for (i = 0; i <= AUFS_BRANCH_MAX; i++) {
-+ br_id = ++sbinfo->si_last_br_id;
-+ AuDebugOn(br_id < 0);
-+ if (br_id && au_br_index(sb, br_id) < 0)
-+ return br_id;
-+ }
-+
-+ return -1;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* it is ok that new 'nwt' tasks are appended while we are sleeping */
-+int si_read_lock(struct super_block *sb, int flags)
-+{
-+ int err;
-+
-+ err = 0;
-+ if (au_ftest_lock(flags, FLUSH))
-+ au_nwt_flush(&au_sbi(sb)->si_nowait);
-+
-+ si_noflush_read_lock(sb);
-+ err = au_plink_maint(sb, flags);
-+ if (unlikely(err))
-+ si_read_unlock(sb);
-+
-+ return err;
-+}
-+
-+int si_write_lock(struct super_block *sb, int flags)
-+{
-+ int err;
-+
-+ if (au_ftest_lock(flags, FLUSH))
-+ au_nwt_flush(&au_sbi(sb)->si_nowait);
-+
-+ si_noflush_write_lock(sb);
-+ err = au_plink_maint(sb, flags);
-+ if (unlikely(err))
-+ si_write_unlock(sb);
-+
-+ return err;
-+}
-+
-+/* dentry and super_block lock. call at entry point */
-+int aufs_read_lock(struct dentry *dentry, int flags)
-+{
-+ int err;
-+ struct super_block *sb;
-+
-+ sb = dentry->d_sb;
-+ err = si_read_lock(sb, flags);
-+ if (unlikely(err))
-+ goto out;
-+
-+ if (au_ftest_lock(flags, DW))
-+ di_write_lock_child(dentry);
-+ else
-+ di_read_lock_child(dentry, flags);
-+
-+ if (au_ftest_lock(flags, GEN)) {
-+ err = au_digen_test(dentry, au_sigen(sb));
-+ AuDebugOn(!err && au_dbrange_test(dentry));
-+ if (unlikely(err))
-+ aufs_read_unlock(dentry, flags);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+void aufs_read_unlock(struct dentry *dentry, int flags)
-+{
-+ if (au_ftest_lock(flags, DW))
-+ di_write_unlock(dentry);
-+ else
-+ di_read_unlock(dentry, flags);
-+ si_read_unlock(dentry->d_sb);
-+}
-+
-+void aufs_write_lock(struct dentry *dentry)
-+{
-+ si_write_lock(dentry->d_sb, AuLock_FLUSH | AuLock_NOPLMW);
-+ di_write_lock_child(dentry);
-+}
-+
-+void aufs_write_unlock(struct dentry *dentry)
-+{
-+ di_write_unlock(dentry);
-+ si_write_unlock(dentry->d_sb);
-+}
-+
-+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags)
-+{
-+ int err;
-+ unsigned int sigen;
-+ struct super_block *sb;
-+
-+ sb = d1->d_sb;
-+ err = si_read_lock(sb, flags);
-+ if (unlikely(err))
-+ goto out;
-+
-+ di_write_lock2_child(d1, d2, au_ftest_lock(flags, DIR));
-+
-+ if (au_ftest_lock(flags, GEN)) {
-+ sigen = au_sigen(sb);
-+ err = au_digen_test(d1, sigen);
-+ AuDebugOn(!err && au_dbrange_test(d1));
-+ if (!err) {
-+ err = au_digen_test(d2, sigen);
-+ AuDebugOn(!err && au_dbrange_test(d2));
-+ }
-+ if (unlikely(err))
-+ aufs_read_and_write_unlock2(d1, d2);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2)
-+{
-+ di_write_unlock2(d1, d2);
-+ si_read_unlock(d1->d_sb);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int si_pid_test_slow(struct super_block *sb)
-+{
-+ void *p;
-+
-+ rcu_read_lock();
-+ p = radix_tree_lookup(&au_sbi(sb)->au_si_pid.tree, current->pid);
-+ rcu_read_unlock();
-+
-+ return (long)!!p;
-+}
-+
-+void si_pid_set_slow(struct super_block *sb)
-+{
-+ int err;
-+ struct au_sbinfo *sbinfo;
-+
-+ AuDebugOn(si_pid_test_slow(sb));
-+
-+ sbinfo = au_sbi(sb);
-+ err = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
-+ AuDebugOn(err);
-+ spin_lock(&sbinfo->au_si_pid.tree_lock);
-+ err = radix_tree_insert(&sbinfo->au_si_pid.tree, current->pid,
-+ /*any valid ptr*/sb);
-+ spin_unlock(&sbinfo->au_si_pid.tree_lock);
-+ AuDebugOn(err);
-+ radix_tree_preload_end();
-+}
-+
-+void si_pid_clr_slow(struct super_block *sb)
-+{
-+ void *p;
-+ struct au_sbinfo *sbinfo;
-+
-+ AuDebugOn(!si_pid_test_slow(sb));
-+
-+ sbinfo = au_sbi(sb);
-+ spin_lock(&sbinfo->au_si_pid.tree_lock);
-+ p = radix_tree_delete(&sbinfo->au_si_pid.tree, current->pid);
-+ spin_unlock(&sbinfo->au_si_pid.tree_lock);
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/spl.h linux-4.1.10/fs/aufs/spl.h
---- linux-4.1.10.orig/fs/aufs/spl.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/spl.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,111 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * simple list protected by a spinlock
-+ */
-+
-+#ifndef __AUFS_SPL_H__
-+#define __AUFS_SPL_H__
-+
-+#ifdef __KERNEL__
-+
-+struct au_splhead {
-+ spinlock_t spin;
-+ struct list_head head;
-+};
-+
-+static inline void au_spl_init(struct au_splhead *spl)
-+{
-+ spin_lock_init(&spl->spin);
-+ INIT_LIST_HEAD(&spl->head);
-+}
-+
-+static inline void au_spl_add(struct list_head *list, struct au_splhead *spl)
-+{
-+ spin_lock(&spl->spin);
-+ list_add(list, &spl->head);
-+ spin_unlock(&spl->spin);
-+}
-+
-+static inline void au_spl_del(struct list_head *list, struct au_splhead *spl)
-+{
-+ spin_lock(&spl->spin);
-+ list_del(list);
-+ spin_unlock(&spl->spin);
-+}
-+
-+static inline void au_spl_del_rcu(struct list_head *list,
-+ struct au_splhead *spl)
-+{
-+ spin_lock(&spl->spin);
-+ list_del_rcu(list);
-+ spin_unlock(&spl->spin);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct au_sphlhead {
-+ spinlock_t spin;
-+ struct hlist_head head;
-+};
-+
-+static inline void au_sphl_init(struct au_sphlhead *sphl)
-+{
-+ spin_lock_init(&sphl->spin);
-+ INIT_HLIST_HEAD(&sphl->head);
-+}
-+
-+static inline void au_sphl_add(struct hlist_node *hlist,
-+ struct au_sphlhead *sphl)
-+{
-+ spin_lock(&sphl->spin);
-+ hlist_add_head(hlist, &sphl->head);
-+ spin_unlock(&sphl->spin);
-+}
-+
-+static inline void au_sphl_del(struct hlist_node *hlist,
-+ struct au_sphlhead *sphl)
-+{
-+ spin_lock(&sphl->spin);
-+ hlist_del(hlist);
-+ spin_unlock(&sphl->spin);
-+}
-+
-+static inline void au_sphl_del_rcu(struct hlist_node *hlist,
-+ struct au_sphlhead *sphl)
-+{
-+ spin_lock(&sphl->spin);
-+ hlist_del_rcu(hlist);
-+ spin_unlock(&sphl->spin);
-+}
-+
-+static inline unsigned long au_sphl_count(struct au_sphlhead *sphl)
-+{
-+ unsigned long cnt;
-+ struct hlist_node *pos;
-+
-+ cnt = 0;
-+ spin_lock(&sphl->spin);
-+ hlist_for_each(pos, &sphl->head)
-+ cnt++;
-+ spin_unlock(&sphl->spin);
-+ return cnt;
-+}
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_SPL_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/super.c linux-4.1.10/fs/aufs/super.c
---- linux-4.1.10.orig/fs/aufs/super.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/super.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,1004 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * mount and super_block operations
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/seq_file.h>
-+#include <linux/statfs.h>
-+#include <linux/vmalloc.h>
-+#include "aufs.h"
-+
-+/*
-+ * super_operations
-+ */
-+static struct inode *aufs_alloc_inode(struct super_block *sb __maybe_unused)
-+{
-+ struct au_icntnr *c;
-+
-+ c = au_cache_alloc_icntnr();
-+ if (c) {
-+ au_icntnr_init(c);
-+ c->vfs_inode.i_version = 1; /* sigen(sb); */
-+ c->iinfo.ii_hinode = NULL;
-+ return &c->vfs_inode;
-+ }
-+ return NULL;
-+}
-+
-+static void aufs_destroy_inode_cb(struct rcu_head *head)
-+{
-+ struct inode *inode = container_of(head, struct inode, i_rcu);
-+
-+ INIT_HLIST_HEAD(&inode->i_dentry);
-+ au_cache_free_icntnr(container_of(inode, struct au_icntnr, vfs_inode));
-+}
-+
-+static void aufs_destroy_inode(struct inode *inode)
-+{
-+ au_iinfo_fin(inode);
-+ call_rcu(&inode->i_rcu, aufs_destroy_inode_cb);
-+}
-+
-+struct inode *au_iget_locked(struct super_block *sb, ino_t ino)
-+{
-+ struct inode *inode;
-+ int err;
-+
-+ inode = iget_locked(sb, ino);
-+ if (unlikely(!inode)) {
-+ inode = ERR_PTR(-ENOMEM);
-+ goto out;
-+ }
-+ if (!(inode->i_state & I_NEW))
-+ goto out;
-+
-+ err = au_xigen_new(inode);
-+ if (!err)
-+ err = au_iinfo_init(inode);
-+ if (!err)
-+ inode->i_version++;
-+ else {
-+ iget_failed(inode);
-+ inode = ERR_PTR(err);
-+ }
-+
-+out:
-+ /* never return NULL */
-+ AuDebugOn(!inode);
-+ AuTraceErrPtr(inode);
-+ return inode;
-+}
-+
-+/* lock free root dinfo */
-+static int au_show_brs(struct seq_file *seq, struct super_block *sb)
-+{
-+ int err;
-+ aufs_bindex_t bindex, bend;
-+ struct path path;
-+ struct au_hdentry *hdp;
-+ struct au_branch *br;
-+ au_br_perm_str_t perm;
-+
-+ err = 0;
-+ bend = au_sbend(sb);
-+ hdp = au_di(sb->s_root)->di_hdentry;
-+ for (bindex = 0; !err && bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ path.mnt = au_br_mnt(br);
-+ path.dentry = hdp[bindex].hd_dentry;
-+ err = au_seq_path(seq, &path);
-+ if (!err) {
-+ au_optstr_br_perm(&perm, br->br_perm);
-+ err = seq_printf(seq, "=%s", perm.a);
-+ if (err == -1)
-+ err = -E2BIG;
-+ }
-+ if (!err && bindex != bend)
-+ err = seq_putc(seq, ':');
-+ }
-+
-+ return err;
-+}
-+
-+static void au_show_wbr_create(struct seq_file *m, int v,
-+ struct au_sbinfo *sbinfo)
-+{
-+ const char *pat;
-+
-+ AuRwMustAnyLock(&sbinfo->si_rwsem);
-+
-+ seq_puts(m, ",create=");
-+ pat = au_optstr_wbr_create(v);
-+ switch (v) {
-+ case AuWbrCreate_TDP:
-+ case AuWbrCreate_RR:
-+ case AuWbrCreate_MFS:
-+ case AuWbrCreate_PMFS:
-+ seq_puts(m, pat);
-+ break;
-+ case AuWbrCreate_MFSV:
-+ seq_printf(m, /*pat*/"mfs:%lu",
-+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
-+ / MSEC_PER_SEC);
-+ break;
-+ case AuWbrCreate_PMFSV:
-+ seq_printf(m, /*pat*/"pmfs:%lu",
-+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
-+ / MSEC_PER_SEC);
-+ break;
-+ case AuWbrCreate_MFSRR:
-+ seq_printf(m, /*pat*/"mfsrr:%llu",
-+ sbinfo->si_wbr_mfs.mfsrr_watermark);
-+ break;
-+ case AuWbrCreate_MFSRRV:
-+ seq_printf(m, /*pat*/"mfsrr:%llu:%lu",
-+ sbinfo->si_wbr_mfs.mfsrr_watermark,
-+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
-+ / MSEC_PER_SEC);
-+ break;
-+ case AuWbrCreate_PMFSRR:
-+ seq_printf(m, /*pat*/"pmfsrr:%llu",
-+ sbinfo->si_wbr_mfs.mfsrr_watermark);
-+ break;
-+ case AuWbrCreate_PMFSRRV:
-+ seq_printf(m, /*pat*/"pmfsrr:%llu:%lu",
-+ sbinfo->si_wbr_mfs.mfsrr_watermark,
-+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
-+ / MSEC_PER_SEC);
-+ break;
-+ }
-+}
-+
-+static int au_show_xino(struct seq_file *seq, struct super_block *sb)
-+{
-+#ifdef CONFIG_SYSFS
-+ return 0;
-+#else
-+ int err;
-+ const int len = sizeof(AUFS_XINO_FNAME) - 1;
-+ aufs_bindex_t bindex, brid;
-+ struct qstr *name;
-+ struct file *f;
-+ struct dentry *d, *h_root;
-+ struct au_hdentry *hdp;
-+
-+ AuRwMustAnyLock(&sbinfo->si_rwsem);
-+
-+ err = 0;
-+ f = au_sbi(sb)->si_xib;
-+ if (!f)
-+ goto out;
-+
-+ /* stop printing the default xino path on the first writable branch */
-+ h_root = NULL;
-+ brid = au_xino_brid(sb);
-+ if (brid >= 0) {
-+ bindex = au_br_index(sb, brid);
-+ hdp = au_di(sb->s_root)->di_hdentry;
-+ h_root = hdp[0 + bindex].hd_dentry;
-+ }
-+ d = f->f_path.dentry;
-+ name = &d->d_name;
-+ /* safe ->d_parent because the file is unlinked */
-+ if (d->d_parent == h_root
-+ && name->len == len
-+ && !memcmp(name->name, AUFS_XINO_FNAME, len))
-+ goto out;
-+
-+ seq_puts(seq, ",xino=");
-+ err = au_xino_path(seq, f);
-+
-+out:
-+ return err;
-+#endif
-+}
-+
-+/* seq_file will re-call me in case of too long string */
-+static int aufs_show_options(struct seq_file *m, struct dentry *dentry)
-+{
-+ int err;
-+ unsigned int mnt_flags, v;
-+ struct super_block *sb;
-+ struct au_sbinfo *sbinfo;
-+
-+#define AuBool(name, str) do { \
-+ v = au_opt_test(mnt_flags, name); \
-+ if (v != au_opt_test(AuOpt_Def, name)) \
-+ seq_printf(m, ",%s" #str, v ? "" : "no"); \
-+} while (0)
-+
-+#define AuStr(name, str) do { \
-+ v = mnt_flags & AuOptMask_##name; \
-+ if (v != (AuOpt_Def & AuOptMask_##name)) \
-+ seq_printf(m, "," #str "=%s", au_optstr_##str(v)); \
-+} while (0)
-+
-+#define AuUInt(name, str, val) do { \
-+ if (val != AUFS_##name##_DEF) \
-+ seq_printf(m, "," #str "=%u", val); \
-+} while (0)
-+
-+ sb = dentry->d_sb;
-+ if (sb->s_flags & MS_POSIXACL)
-+ seq_puts(m, ",acl");
-+
-+ /* lock free root dinfo */
-+ si_noflush_read_lock(sb);
-+ sbinfo = au_sbi(sb);
-+ seq_printf(m, ",si=%lx", sysaufs_si_id(sbinfo));
-+
-+ mnt_flags = au_mntflags(sb);
-+ if (au_opt_test(mnt_flags, XINO)) {
-+ err = au_show_xino(m, sb);
-+ if (unlikely(err))
-+ goto out;
-+ } else
-+ seq_puts(m, ",noxino");
-+
-+ AuBool(TRUNC_XINO, trunc_xino);
-+ AuStr(UDBA, udba);
-+ AuBool(SHWH, shwh);
-+ AuBool(PLINK, plink);
-+ AuBool(DIO, dio);
-+ AuBool(DIRPERM1, dirperm1);
-+
-+ v = sbinfo->si_wbr_create;
-+ if (v != AuWbrCreate_Def)
-+ au_show_wbr_create(m, v, sbinfo);
-+
-+ v = sbinfo->si_wbr_copyup;
-+ if (v != AuWbrCopyup_Def)
-+ seq_printf(m, ",cpup=%s", au_optstr_wbr_copyup(v));
-+
-+ v = au_opt_test(mnt_flags, ALWAYS_DIROPQ);
-+ if (v != au_opt_test(AuOpt_Def, ALWAYS_DIROPQ))
-+ seq_printf(m, ",diropq=%c", v ? 'a' : 'w');
-+
-+ AuUInt(DIRWH, dirwh, sbinfo->si_dirwh);
-+
-+ v = jiffies_to_msecs(sbinfo->si_rdcache) / MSEC_PER_SEC;
-+ AuUInt(RDCACHE, rdcache, v);
-+
-+ AuUInt(RDBLK, rdblk, sbinfo->si_rdblk);
-+ AuUInt(RDHASH, rdhash, sbinfo->si_rdhash);
-+
-+ au_fhsm_show(m, sbinfo);
-+
-+ AuBool(SUM, sum);
-+ /* AuBool(SUM_W, wsum); */
-+ AuBool(WARN_PERM, warn_perm);
-+ AuBool(VERBOSE, verbose);
-+
-+out:
-+ /* be sure to print "br:" last */
-+ if (!sysaufs_brs) {
-+ seq_puts(m, ",br:");
-+ au_show_brs(m, sb);
-+ }
-+ si_read_unlock(sb);
-+ return 0;
-+
-+#undef AuBool
-+#undef AuStr
-+#undef AuUInt
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* sum mode which returns the summation for statfs(2) */
-+
-+static u64 au_add_till_max(u64 a, u64 b)
-+{
-+ u64 old;
-+
-+ old = a;
-+ a += b;
-+ if (old <= a)
-+ return a;
-+ return ULLONG_MAX;
-+}
-+
-+static u64 au_mul_till_max(u64 a, long mul)
-+{
-+ u64 old;
-+
-+ old = a;
-+ a *= mul;
-+ if (old <= a)
-+ return a;
-+ return ULLONG_MAX;
-+}
-+
-+static int au_statfs_sum(struct super_block *sb, struct kstatfs *buf)
-+{
-+ int err;
-+ long bsize, factor;
-+ u64 blocks, bfree, bavail, files, ffree;
-+ aufs_bindex_t bend, bindex, i;
-+ unsigned char shared;
-+ struct path h_path;
-+ struct super_block *h_sb;
-+
-+ err = 0;
-+ bsize = LONG_MAX;
-+ files = 0;
-+ ffree = 0;
-+ blocks = 0;
-+ bfree = 0;
-+ bavail = 0;
-+ bend = au_sbend(sb);
-+ for (bindex = 0; bindex <= bend; bindex++) {
-+ h_path.mnt = au_sbr_mnt(sb, bindex);
-+ h_sb = h_path.mnt->mnt_sb;
-+ shared = 0;
-+ for (i = 0; !shared && i < bindex; i++)
-+ shared = (au_sbr_sb(sb, i) == h_sb);
-+ if (shared)
-+ continue;
-+
-+ /* sb->s_root for NFS is unreliable */
-+ h_path.dentry = h_path.mnt->mnt_root;
-+ err = vfs_statfs(&h_path, buf);
-+ if (unlikely(err))
-+ goto out;
-+
-+ if (bsize > buf->f_bsize) {
-+ /*
-+ * we will reduce bsize, so we have to expand blocks
-+ * etc. to match them again
-+ */
-+ factor = (bsize / buf->f_bsize);
-+ blocks = au_mul_till_max(blocks, factor);
-+ bfree = au_mul_till_max(bfree, factor);
-+ bavail = au_mul_till_max(bavail, factor);
-+ bsize = buf->f_bsize;
-+ }
-+
-+ factor = (buf->f_bsize / bsize);
-+ blocks = au_add_till_max(blocks,
-+ au_mul_till_max(buf->f_blocks, factor));
-+ bfree = au_add_till_max(bfree,
-+ au_mul_till_max(buf->f_bfree, factor));
-+ bavail = au_add_till_max(bavail,
-+ au_mul_till_max(buf->f_bavail, factor));
-+ files = au_add_till_max(files, buf->f_files);
-+ ffree = au_add_till_max(ffree, buf->f_ffree);
-+ }
-+
-+ buf->f_bsize = bsize;
-+ buf->f_blocks = blocks;
-+ buf->f_bfree = bfree;
-+ buf->f_bavail = bavail;
-+ buf->f_files = files;
-+ buf->f_ffree = ffree;
-+ buf->f_frsize = 0;
-+
-+out:
-+ return err;
-+}
-+
-+static int aufs_statfs(struct dentry *dentry, struct kstatfs *buf)
-+{
-+ int err;
-+ struct path h_path;
-+ struct super_block *sb;
-+
-+ /* lock free root dinfo */
-+ sb = dentry->d_sb;
-+ si_noflush_read_lock(sb);
-+ if (!au_opt_test(au_mntflags(sb), SUM)) {
-+ /* sb->s_root for NFS is unreliable */
-+ h_path.mnt = au_sbr_mnt(sb, 0);
-+ h_path.dentry = h_path.mnt->mnt_root;
-+ err = vfs_statfs(&h_path, buf);
-+ } else
-+ err = au_statfs_sum(sb, buf);
-+ si_read_unlock(sb);
-+
-+ if (!err) {
-+ buf->f_type = AUFS_SUPER_MAGIC;
-+ buf->f_namelen = AUFS_MAX_NAMELEN;
-+ memset(&buf->f_fsid, 0, sizeof(buf->f_fsid));
-+ }
-+ /* buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1; */
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int aufs_sync_fs(struct super_block *sb, int wait)
-+{
-+ int err, e;
-+ aufs_bindex_t bend, bindex;
-+ struct au_branch *br;
-+ struct super_block *h_sb;
-+
-+ err = 0;
-+ si_noflush_read_lock(sb);
-+ bend = au_sbend(sb);
-+ for (bindex = 0; bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ if (!au_br_writable(br->br_perm))
-+ continue;
-+
-+ h_sb = au_sbr_sb(sb, bindex);
-+ if (h_sb->s_op->sync_fs) {
-+ e = h_sb->s_op->sync_fs(h_sb, wait);
-+ if (unlikely(e && !err))
-+ err = e;
-+ /* go on even if an error happens */
-+ }
-+ }
-+ si_read_unlock(sb);
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* final actions when unmounting a file system */
-+static void aufs_put_super(struct super_block *sb)
-+{
-+ struct au_sbinfo *sbinfo;
-+
-+ sbinfo = au_sbi(sb);
-+ if (!sbinfo)
-+ return;
-+
-+ dbgaufs_si_fin(sbinfo);
-+ kobject_put(&sbinfo->si_kobj);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void au_array_free(void *array)
-+{
-+ if (array) {
-+ if (!is_vmalloc_addr(array))
-+ kfree(array);
-+ else
-+ vfree(array);
-+ }
-+}
-+
-+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb, void *arg)
-+{
-+ void *array;
-+ unsigned long long n, sz;
-+
-+ array = NULL;
-+ n = 0;
-+ if (!*hint)
-+ goto out;
-+
-+ if (*hint > ULLONG_MAX / sizeof(array)) {
-+ array = ERR_PTR(-EMFILE);
-+ pr_err("hint %llu\n", *hint);
-+ goto out;
-+ }
-+
-+ sz = sizeof(array) * *hint;
-+ array = kzalloc(sz, GFP_NOFS);
-+ if (unlikely(!array))
-+ array = vzalloc(sz);
-+ if (unlikely(!array)) {
-+ array = ERR_PTR(-ENOMEM);
-+ goto out;
-+ }
-+
-+ n = cb(array, *hint, arg);
-+ AuDebugOn(n > *hint);
-+
-+out:
-+ *hint = n;
-+ return array;
-+}
-+
-+static unsigned long long au_iarray_cb(void *a,
-+ unsigned long long max __maybe_unused,
-+ void *arg)
-+{
-+ unsigned long long n;
-+ struct inode **p, *inode;
-+ struct list_head *head;
-+
-+ n = 0;
-+ p = a;
-+ head = arg;
-+ spin_lock(&inode_sb_list_lock);
-+ list_for_each_entry(inode, head, i_sb_list) {
-+ if (!is_bad_inode(inode)
-+ && au_ii(inode)->ii_bstart >= 0) {
-+ spin_lock(&inode->i_lock);
-+ if (atomic_read(&inode->i_count)) {
-+ au_igrab(inode);
-+ *p++ = inode;
-+ n++;
-+ AuDebugOn(n > max);
-+ }
-+ spin_unlock(&inode->i_lock);
-+ }
-+ }
-+ spin_unlock(&inode_sb_list_lock);
-+
-+ return n;
-+}
-+
-+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max)
-+{
-+ *max = atomic_long_read(&au_sbi(sb)->si_ninodes);
-+ return au_array_alloc(max, au_iarray_cb, &sb->s_inodes);
-+}
-+
-+void au_iarray_free(struct inode **a, unsigned long long max)
-+{
-+ unsigned long long ull;
-+
-+ for (ull = 0; ull < max; ull++)
-+ iput(a[ull]);
-+ au_array_free(a);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * refresh dentry and inode at remount time.
-+ */
-+/* todo: consolidate with simple_reval_dpath() and au_reval_for_attr() */
-+static int au_do_refresh(struct dentry *dentry, unsigned int dir_flags,
-+ struct dentry *parent)
-+{
-+ int err;
-+
-+ di_write_lock_child(dentry);
-+ di_read_lock_parent(parent, AuLock_IR);
-+ err = au_refresh_dentry(dentry, parent);
-+ if (!err && dir_flags)
-+ au_hn_reset(d_inode(dentry), dir_flags);
-+ di_read_unlock(parent, AuLock_IR);
-+ di_write_unlock(dentry);
-+
-+ return err;
-+}
-+
-+static int au_do_refresh_d(struct dentry *dentry, unsigned int sigen,
-+ struct au_sbinfo *sbinfo,
-+ const unsigned int dir_flags)
-+{
-+ int err;
-+ struct dentry *parent;
-+
-+ err = 0;
-+ parent = dget_parent(dentry);
-+ if (!au_digen_test(parent, sigen) && au_digen_test(dentry, sigen)) {
-+ if (d_really_is_positive(dentry)) {
-+ if (!d_is_dir(dentry))
-+ err = au_do_refresh(dentry, /*dir_flags*/0,
-+ parent);
-+ else {
-+ err = au_do_refresh(dentry, dir_flags, parent);
-+ if (unlikely(err))
-+ au_fset_si(sbinfo, FAILED_REFRESH_DIR);
-+ }
-+ } else
-+ err = au_do_refresh(dentry, /*dir_flags*/0, parent);
-+ AuDbgDentry(dentry);
-+ }
-+ dput(parent);
-+
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+static int au_refresh_d(struct super_block *sb)
-+{
-+ int err, i, j, ndentry, e;
-+ unsigned int sigen;
-+ struct au_dcsub_pages dpages;
-+ struct au_dpage *dpage;
-+ struct dentry **dentries, *d;
-+ struct au_sbinfo *sbinfo;
-+ struct dentry *root = sb->s_root;
-+ const unsigned int dir_flags = au_hi_flags(d_inode(root), /*isdir*/1);
-+
-+ err = au_dpages_init(&dpages, GFP_NOFS);
-+ if (unlikely(err))
-+ goto out;
-+ err = au_dcsub_pages(&dpages, root, NULL, NULL);
-+ if (unlikely(err))
-+ goto out_dpages;
-+
-+ sigen = au_sigen(sb);
-+ sbinfo = au_sbi(sb);
-+ for (i = 0; i < dpages.ndpage; i++) {
-+ dpage = dpages.dpages + i;
-+ dentries = dpage->dentries;
-+ ndentry = dpage->ndentry;
-+ for (j = 0; j < ndentry; j++) {
-+ d = dentries[j];
-+ e = au_do_refresh_d(d, sigen, sbinfo, dir_flags);
-+ if (unlikely(e && !err))
-+ err = e;
-+ /* go on even err */
-+ }
-+ }
-+
-+out_dpages:
-+ au_dpages_free(&dpages);
-+out:
-+ return err;
-+}
-+
-+static int au_refresh_i(struct super_block *sb)
-+{
-+ int err, e;
-+ unsigned int sigen;
-+ unsigned long long max, ull;
-+ struct inode *inode, **array;
-+
-+ array = au_iarray_alloc(sb, &max);
-+ err = PTR_ERR(array);
-+ if (IS_ERR(array))
-+ goto out;
-+
-+ err = 0;
-+ sigen = au_sigen(sb);
-+ for (ull = 0; ull < max; ull++) {
-+ inode = array[ull];
-+ if (unlikely(!inode))
-+ break;
-+ if (au_iigen(inode, NULL) != sigen) {
-+ ii_write_lock_child(inode);
-+ e = au_refresh_hinode_self(inode);
-+ ii_write_unlock(inode);
-+ if (unlikely(e)) {
-+ pr_err("error %d, i%lu\n", e, inode->i_ino);
-+ if (!err)
-+ err = e;
-+ /* go on even if err */
-+ }
-+ }
-+ }
-+
-+ au_iarray_free(array, max);
-+
-+out:
-+ return err;
-+}
-+
-+static void au_remount_refresh(struct super_block *sb)
-+{
-+ int err, e;
-+ unsigned int udba;
-+ aufs_bindex_t bindex, bend;
-+ struct dentry *root;
-+ struct inode *inode;
-+ struct au_branch *br;
-+
-+ au_sigen_inc(sb);
-+ au_fclr_si(au_sbi(sb), FAILED_REFRESH_DIR);
-+
-+ root = sb->s_root;
-+ DiMustNoWaiters(root);
-+ inode = d_inode(root);
-+ IiMustNoWaiters(inode);
-+
-+ udba = au_opt_udba(sb);
-+ bend = au_sbend(sb);
-+ for (bindex = 0; bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ err = au_hnotify_reset_br(udba, br, br->br_perm);
-+ if (unlikely(err))
-+ AuIOErr("hnotify failed on br %d, %d, ignored\n",
-+ bindex, err);
-+ /* go on even if err */
-+ }
-+ au_hn_reset(inode, au_hi_flags(inode, /*isdir*/1));
-+
-+ di_write_unlock(root);
-+ err = au_refresh_d(sb);
-+ e = au_refresh_i(sb);
-+ if (unlikely(e && !err))
-+ err = e;
-+ /* aufs_write_lock() calls ..._child() */
-+ di_write_lock_child(root);
-+
-+ au_cpup_attr_all(inode, /*force*/1);
-+
-+ if (unlikely(err))
-+ AuIOErr("refresh failed, ignored, %d\n", err);
-+}
-+
-+/* stop extra interpretation of errno in mount(8), and strange error messages */
-+static int cvt_err(int err)
-+{
-+ AuTraceErr(err);
-+
-+ switch (err) {
-+ case -ENOENT:
-+ case -ENOTDIR:
-+ case -EEXIST:
-+ case -EIO:
-+ err = -EINVAL;
-+ }
-+ return err;
-+}
-+
-+static int aufs_remount_fs(struct super_block *sb, int *flags, char *data)
-+{
-+ int err, do_dx;
-+ unsigned int mntflags;
-+ struct au_opts opts;
-+ struct dentry *root;
-+ struct inode *inode;
-+ struct au_sbinfo *sbinfo;
-+
-+ err = 0;
-+ root = sb->s_root;
-+ if (!data || !*data) {
-+ err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
-+ if (!err) {
-+ di_write_lock_child(root);
-+ err = au_opts_verify(sb, *flags, /*pending*/0);
-+ aufs_write_unlock(root);
-+ }
-+ goto out;
-+ }
-+
-+ err = -ENOMEM;
-+ memset(&opts, 0, sizeof(opts));
-+ opts.opt = (void *)__get_free_page(GFP_NOFS);
-+ if (unlikely(!opts.opt))
-+ goto out;
-+ opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
-+ opts.flags = AuOpts_REMOUNT;
-+ opts.sb_flags = *flags;
-+
-+ /* parse it before aufs lock */
-+ err = au_opts_parse(sb, data, &opts);
-+ if (unlikely(err))
-+ goto out_opts;
-+
-+ sbinfo = au_sbi(sb);
-+ inode = d_inode(root);
-+ mutex_lock(&inode->i_mutex);
-+ err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
-+ if (unlikely(err))
-+ goto out_mtx;
-+ di_write_lock_child(root);
-+
-+ /* au_opts_remount() may return an error */
-+ err = au_opts_remount(sb, &opts);
-+ au_opts_free(&opts);
-+
-+ if (au_ftest_opts(opts.flags, REFRESH))
-+ au_remount_refresh(sb);
-+
-+ if (au_ftest_opts(opts.flags, REFRESH_DYAOP)) {
-+ mntflags = au_mntflags(sb);
-+ do_dx = !!au_opt_test(mntflags, DIO);
-+ au_dy_arefresh(do_dx);
-+ }
-+
-+ au_fhsm_wrote_all(sb, /*force*/1); /* ?? */
-+ aufs_write_unlock(root);
-+
-+out_mtx:
-+ mutex_unlock(&inode->i_mutex);
-+out_opts:
-+ free_page((unsigned long)opts.opt);
-+out:
-+ err = cvt_err(err);
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+static const struct super_operations aufs_sop = {
-+ .alloc_inode = aufs_alloc_inode,
-+ .destroy_inode = aufs_destroy_inode,
-+ /* always deleting, no clearing */
-+ .drop_inode = generic_delete_inode,
-+ .show_options = aufs_show_options,
-+ .statfs = aufs_statfs,
-+ .put_super = aufs_put_super,
-+ .sync_fs = aufs_sync_fs,
-+ .remount_fs = aufs_remount_fs
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int alloc_root(struct super_block *sb)
-+{
-+ int err;
-+ struct inode *inode;
-+ struct dentry *root;
-+
-+ err = -ENOMEM;
-+ inode = au_iget_locked(sb, AUFS_ROOT_INO);
-+ err = PTR_ERR(inode);
-+ if (IS_ERR(inode))
-+ goto out;
-+
-+ inode->i_op = &aufs_dir_iop;
-+ inode->i_fop = &aufs_dir_fop;
-+ inode->i_mode = S_IFDIR;
-+ set_nlink(inode, 2);
-+ unlock_new_inode(inode);
-+
-+ root = d_make_root(inode);
-+ if (unlikely(!root))
-+ goto out;
-+ err = PTR_ERR(root);
-+ if (IS_ERR(root))
-+ goto out;
-+
-+ err = au_di_init(root);
-+ if (!err) {
-+ sb->s_root = root;
-+ return 0; /* success */
-+ }
-+ dput(root);
-+
-+out:
-+ return err;
-+}
-+
-+static int aufs_fill_super(struct super_block *sb, void *raw_data,
-+ int silent __maybe_unused)
-+{
-+ int err;
-+ struct au_opts opts;
-+ struct dentry *root;
-+ struct inode *inode;
-+ char *arg = raw_data;
-+
-+ if (unlikely(!arg || !*arg)) {
-+ err = -EINVAL;
-+ pr_err("no arg\n");
-+ goto out;
-+ }
-+
-+ err = -ENOMEM;
-+ memset(&opts, 0, sizeof(opts));
-+ opts.opt = (void *)__get_free_page(GFP_NOFS);
-+ if (unlikely(!opts.opt))
-+ goto out;
-+ opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
-+ opts.sb_flags = sb->s_flags;
-+
-+ err = au_si_alloc(sb);
-+ if (unlikely(err))
-+ goto out_opts;
-+
-+ /* all timestamps always follow the ones on the branch */
-+ sb->s_flags |= MS_NOATIME | MS_NODIRATIME;
-+ sb->s_op = &aufs_sop;
-+ sb->s_d_op = &aufs_dop;
-+ sb->s_magic = AUFS_SUPER_MAGIC;
-+ sb->s_maxbytes = 0;
-+ sb->s_stack_depth = 1;
-+ au_export_init(sb);
-+ /* au_xattr_init(sb); */
-+
-+ err = alloc_root(sb);
-+ if (unlikely(err)) {
-+ si_write_unlock(sb);
-+ goto out_info;
-+ }
-+ root = sb->s_root;
-+ inode = d_inode(root);
-+
-+ /*
-+ * actually we can parse options regardless aufs lock here.
-+ * but at remount time, parsing must be done before aufs lock.
-+ * so we follow the same rule.
-+ */
-+ ii_write_lock_parent(inode);
-+ aufs_write_unlock(root);
-+ err = au_opts_parse(sb, arg, &opts);
-+ if (unlikely(err))
-+ goto out_root;
-+
-+ /* lock vfs_inode first, then aufs. */
-+ mutex_lock(&inode->i_mutex);
-+ aufs_write_lock(root);
-+ err = au_opts_mount(sb, &opts);
-+ au_opts_free(&opts);
-+ aufs_write_unlock(root);
-+ mutex_unlock(&inode->i_mutex);
-+ if (!err)
-+ goto out_opts; /* success */
-+
-+out_root:
-+ dput(root);
-+ sb->s_root = NULL;
-+out_info:
-+ dbgaufs_si_fin(au_sbi(sb));
-+ kobject_put(&au_sbi(sb)->si_kobj);
-+ sb->s_fs_info = NULL;
-+out_opts:
-+ free_page((unsigned long)opts.opt);
-+out:
-+ AuTraceErr(err);
-+ err = cvt_err(err);
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static struct dentry *aufs_mount(struct file_system_type *fs_type, int flags,
-+ const char *dev_name __maybe_unused,
-+ void *raw_data)
-+{
-+ struct dentry *root;
-+ struct super_block *sb;
-+
-+ /* all timestamps always follow the ones on the branch */
-+ /* mnt->mnt_flags |= MNT_NOATIME | MNT_NODIRATIME; */
-+ root = mount_nodev(fs_type, flags, raw_data, aufs_fill_super);
-+ if (IS_ERR(root))
-+ goto out;
-+
-+ sb = root->d_sb;
-+ si_write_lock(sb, !AuLock_FLUSH);
-+ sysaufs_brs_add(sb, 0);
-+ si_write_unlock(sb);
-+ au_sbilist_add(sb);
-+
-+out:
-+ return root;
-+}
-+
-+static void aufs_kill_sb(struct super_block *sb)
-+{
-+ struct au_sbinfo *sbinfo;
-+
-+ sbinfo = au_sbi(sb);
-+ if (sbinfo) {
-+ au_sbilist_del(sb);
-+ aufs_write_lock(sb->s_root);
-+ au_fhsm_fin(sb);
-+ if (sbinfo->si_wbr_create_ops->fin)
-+ sbinfo->si_wbr_create_ops->fin(sb);
-+ if (au_opt_test(sbinfo->si_mntflags, UDBA_HNOTIFY)) {
-+ au_opt_set_udba(sbinfo->si_mntflags, UDBA_NONE);
-+ au_remount_refresh(sb);
-+ }
-+ if (au_opt_test(sbinfo->si_mntflags, PLINK))
-+ au_plink_put(sb, /*verbose*/1);
-+ au_xino_clr(sb);
-+ sbinfo->si_sb = NULL;
-+ aufs_write_unlock(sb->s_root);
-+ au_nwt_flush(&sbinfo->si_nowait);
-+ }
-+ kill_anon_super(sb);
-+}
-+
-+struct file_system_type aufs_fs_type = {
-+ .name = AUFS_FSTYPE,
-+ /* a race between rename and others */
-+ .fs_flags = FS_RENAME_DOES_D_MOVE,
-+ .mount = aufs_mount,
-+ .kill_sb = aufs_kill_sb,
-+ /* no need to __module_get() and module_put(). */
-+ .owner = THIS_MODULE,
-+};
-diff -Nur linux-4.1.10.orig/fs/aufs/super.h linux-4.1.10/fs/aufs/super.h
---- linux-4.1.10.orig/fs/aufs/super.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/super.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,635 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * super_block operations
-+ */
-+
-+#ifndef __AUFS_SUPER_H__
-+#define __AUFS_SUPER_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/fs.h>
-+#include <linux/kobject.h>
-+#include "rwsem.h"
-+#include "spl.h"
-+#include "wkq.h"
-+
-+/* policies to select one among multiple writable branches */
-+struct au_wbr_copyup_operations {
-+ int (*copyup)(struct dentry *dentry);
-+};
-+
-+#define AuWbr_DIR 1 /* target is a dir */
-+#define AuWbr_PARENT (1 << 1) /* always require a parent */
-+
-+#define au_ftest_wbr(flags, name) ((flags) & AuWbr_##name)
-+#define au_fset_wbr(flags, name) { (flags) |= AuWbr_##name; }
-+#define au_fclr_wbr(flags, name) { (flags) &= ~AuWbr_##name; }
-+
-+struct au_wbr_create_operations {
-+ int (*create)(struct dentry *dentry, unsigned int flags);
-+ int (*init)(struct super_block *sb);
-+ int (*fin)(struct super_block *sb);
-+};
-+
-+struct au_wbr_mfs {
-+ struct mutex mfs_lock; /* protect this structure */
-+ unsigned long mfs_jiffy;
-+ unsigned long mfs_expire;
-+ aufs_bindex_t mfs_bindex;
-+
-+ unsigned long long mfsrr_bytes;
-+ unsigned long long mfsrr_watermark;
-+};
-+
-+struct pseudo_link {
-+ union {
-+ struct hlist_node hlist;
-+ struct rcu_head rcu;
-+ };
-+ struct inode *inode;
-+};
-+
-+#define AuPlink_NHASH 100
-+static inline int au_plink_hash(ino_t ino)
-+{
-+ return ino % AuPlink_NHASH;
-+}
-+
-+/* File-based Hierarchical Storage Management */
-+struct au_fhsm {
-+#ifdef CONFIG_AUFS_FHSM
-+ /* allow only one process who can receive the notification */
-+ spinlock_t fhsm_spin;
-+ pid_t fhsm_pid;
-+ wait_queue_head_t fhsm_wqh;
-+ atomic_t fhsm_readable;
-+
-+ /* these are protected by si_rwsem */
-+ unsigned long fhsm_expire;
-+ aufs_bindex_t fhsm_bottom;
-+#endif
-+};
-+
-+struct au_branch;
-+struct au_sbinfo {
-+ /* nowait tasks in the system-wide workqueue */
-+ struct au_nowait_tasks si_nowait;
-+
-+ /*
-+ * tried sb->s_umount, but failed due to the dependecy between i_mutex.
-+ * rwsem for au_sbinfo is necessary.
-+ */
-+ struct au_rwsem si_rwsem;
-+
-+ /* prevent recursive locking in deleting inode */
-+ struct {
-+ unsigned long *bitmap;
-+ spinlock_t tree_lock;
-+ struct radix_tree_root tree;
-+ } au_si_pid;
-+
-+ /*
-+ * dirty approach to protect sb->sb_inodes and ->s_files (gone) from
-+ * remount.
-+ */
-+ atomic_long_t si_ninodes, si_nfiles;
-+
-+ /* branch management */
-+ unsigned int si_generation;
-+
-+ /* see AuSi_ flags */
-+ unsigned char au_si_status;
-+
-+ aufs_bindex_t si_bend;
-+
-+ /* dirty trick to keep br_id plus */
-+ unsigned int si_last_br_id :
-+ sizeof(aufs_bindex_t) * BITS_PER_BYTE - 1;
-+ struct au_branch **si_branch;
-+
-+ /* policy to select a writable branch */
-+ unsigned char si_wbr_copyup;
-+ unsigned char si_wbr_create;
-+ struct au_wbr_copyup_operations *si_wbr_copyup_ops;
-+ struct au_wbr_create_operations *si_wbr_create_ops;
-+
-+ /* round robin */
-+ atomic_t si_wbr_rr_next;
-+
-+ /* most free space */
-+ struct au_wbr_mfs si_wbr_mfs;
-+
-+ /* File-based Hierarchical Storage Management */
-+ struct au_fhsm si_fhsm;
-+
-+ /* mount flags */
-+ /* include/asm-ia64/siginfo.h defines a macro named si_flags */
-+ unsigned int si_mntflags;
-+
-+ /* external inode number (bitmap and translation table) */
-+ vfs_readf_t si_xread;
-+ vfs_writef_t si_xwrite;
-+ struct file *si_xib;
-+ struct mutex si_xib_mtx; /* protect xib members */
-+ unsigned long *si_xib_buf;
-+ unsigned long si_xib_last_pindex;
-+ int si_xib_next_bit;
-+ aufs_bindex_t si_xino_brid;
-+ unsigned long si_xino_jiffy;
-+ unsigned long si_xino_expire;
-+ /* reserved for future use */
-+ /* unsigned long long si_xib_limit; */ /* Max xib file size */
-+
-+#ifdef CONFIG_AUFS_EXPORT
-+ /* i_generation */
-+ struct file *si_xigen;
-+ atomic_t si_xigen_next;
-+#endif
-+
-+ /* dirty trick to suppoer atomic_open */
-+ struct au_sphlhead si_aopen;
-+
-+ /* vdir parameters */
-+ unsigned long si_rdcache; /* max cache time in jiffies */
-+ unsigned int si_rdblk; /* deblk size */
-+ unsigned int si_rdhash; /* hash size */
-+
-+ /*
-+ * If the number of whiteouts are larger than si_dirwh, leave all of
-+ * them after au_whtmp_ren to reduce the cost of rmdir(2).
-+ * future fsck.aufs or kernel thread will remove them later.
-+ * Otherwise, remove all whiteouts and the dir in rmdir(2).
-+ */
-+ unsigned int si_dirwh;
-+
-+ /* pseudo_link list */
-+ struct au_sphlhead si_plink[AuPlink_NHASH];
-+ wait_queue_head_t si_plink_wq;
-+ spinlock_t si_plink_maint_lock;
-+ pid_t si_plink_maint_pid;
-+
-+ /* file list */
-+ struct au_sphlhead si_files;
-+
-+ /*
-+ * sysfs and lifetime management.
-+ * this is not a small structure and it may be a waste of memory in case
-+ * of sysfs is disabled, particulary when many aufs-es are mounted.
-+ * but using sysfs is majority.
-+ */
-+ struct kobject si_kobj;
-+#ifdef CONFIG_DEBUG_FS
-+ struct dentry *si_dbgaufs;
-+ struct dentry *si_dbgaufs_plink;
-+ struct dentry *si_dbgaufs_xib;
-+#ifdef CONFIG_AUFS_EXPORT
-+ struct dentry *si_dbgaufs_xigen;
-+#endif
-+#endif
-+
-+#ifdef CONFIG_AUFS_SBILIST
-+ struct list_head si_list;
-+#endif
-+
-+ /* dirty, necessary for unmounting, sysfs and sysrq */
-+ struct super_block *si_sb;
-+};
-+
-+/* sbinfo status flags */
-+/*
-+ * set true when refresh_dirs() failed at remount time.
-+ * then try refreshing dirs at access time again.
-+ * if it is false, refreshing dirs at access time is unnecesary
-+ */
-+#define AuSi_FAILED_REFRESH_DIR 1
-+
-+#define AuSi_FHSM (1 << 1) /* fhsm is active now */
-+
-+#ifndef CONFIG_AUFS_FHSM
-+#undef AuSi_FHSM
-+#define AuSi_FHSM 0
-+#endif
-+
-+static inline unsigned char au_do_ftest_si(struct au_sbinfo *sbi,
-+ unsigned int flag)
-+{
-+ AuRwMustAnyLock(&sbi->si_rwsem);
-+ return sbi->au_si_status & flag;
-+}
-+#define au_ftest_si(sbinfo, name) au_do_ftest_si(sbinfo, AuSi_##name)
-+#define au_fset_si(sbinfo, name) do { \
-+ AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
-+ (sbinfo)->au_si_status |= AuSi_##name; \
-+} while (0)
-+#define au_fclr_si(sbinfo, name) do { \
-+ AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
-+ (sbinfo)->au_si_status &= ~AuSi_##name; \
-+} while (0)
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* policy to select one among writable branches */
-+#define AuWbrCopyup(sbinfo, ...) \
-+ ((sbinfo)->si_wbr_copyup_ops->copyup(__VA_ARGS__))
-+#define AuWbrCreate(sbinfo, ...) \
-+ ((sbinfo)->si_wbr_create_ops->create(__VA_ARGS__))
-+
-+/* flags for si_read_lock()/aufs_read_lock()/di_read_lock() */
-+#define AuLock_DW 1 /* write-lock dentry */
-+#define AuLock_IR (1 << 1) /* read-lock inode */
-+#define AuLock_IW (1 << 2) /* write-lock inode */
-+#define AuLock_FLUSH (1 << 3) /* wait for 'nowait' tasks */
-+#define AuLock_DIR (1 << 4) /* target is a dir */
-+#define AuLock_NOPLM (1 << 5) /* return err in plm mode */
-+#define AuLock_NOPLMW (1 << 6) /* wait for plm mode ends */
-+#define AuLock_GEN (1 << 7) /* test digen/iigen */
-+#define au_ftest_lock(flags, name) ((flags) & AuLock_##name)
-+#define au_fset_lock(flags, name) \
-+ do { (flags) |= AuLock_##name; } while (0)
-+#define au_fclr_lock(flags, name) \
-+ do { (flags) &= ~AuLock_##name; } while (0)
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* super.c */
-+extern struct file_system_type aufs_fs_type;
-+struct inode *au_iget_locked(struct super_block *sb, ino_t ino);
-+typedef unsigned long long (*au_arraycb_t)(void *array, unsigned long long max,
-+ void *arg);
-+void au_array_free(void *array);
-+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb, void *arg);
-+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max);
-+void au_iarray_free(struct inode **a, unsigned long long max);
-+
-+/* sbinfo.c */
-+void au_si_free(struct kobject *kobj);
-+int au_si_alloc(struct super_block *sb);
-+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr);
-+
-+unsigned int au_sigen_inc(struct super_block *sb);
-+aufs_bindex_t au_new_br_id(struct super_block *sb);
-+
-+int si_read_lock(struct super_block *sb, int flags);
-+int si_write_lock(struct super_block *sb, int flags);
-+int aufs_read_lock(struct dentry *dentry, int flags);
-+void aufs_read_unlock(struct dentry *dentry, int flags);
-+void aufs_write_lock(struct dentry *dentry);
-+void aufs_write_unlock(struct dentry *dentry);
-+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags);
-+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2);
-+
-+int si_pid_test_slow(struct super_block *sb);
-+void si_pid_set_slow(struct super_block *sb);
-+void si_pid_clr_slow(struct super_block *sb);
-+
-+/* wbr_policy.c */
-+extern struct au_wbr_copyup_operations au_wbr_copyup_ops[];
-+extern struct au_wbr_create_operations au_wbr_create_ops[];
-+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst);
-+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex);
-+int au_wbr_do_copyup_bu(struct dentry *dentry, aufs_bindex_t bstart);
-+
-+/* mvdown.c */
-+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *arg);
-+
-+#ifdef CONFIG_AUFS_FHSM
-+/* fhsm.c */
-+
-+static inline pid_t au_fhsm_pid(struct au_fhsm *fhsm)
-+{
-+ pid_t pid;
-+
-+ spin_lock(&fhsm->fhsm_spin);
-+ pid = fhsm->fhsm_pid;
-+ spin_unlock(&fhsm->fhsm_spin);
-+
-+ return pid;
-+}
-+
-+void au_fhsm_wrote(struct super_block *sb, aufs_bindex_t bindex, int force);
-+void au_fhsm_wrote_all(struct super_block *sb, int force);
-+int au_fhsm_fd(struct super_block *sb, int oflags);
-+int au_fhsm_br_alloc(struct au_branch *br);
-+void au_fhsm_set_bottom(struct super_block *sb, aufs_bindex_t bindex);
-+void au_fhsm_fin(struct super_block *sb);
-+void au_fhsm_init(struct au_sbinfo *sbinfo);
-+void au_fhsm_set(struct au_sbinfo *sbinfo, unsigned int sec);
-+void au_fhsm_show(struct seq_file *seq, struct au_sbinfo *sbinfo);
-+#else
-+AuStubVoid(au_fhsm_wrote, struct super_block *sb, aufs_bindex_t bindex,
-+ int force)
-+AuStubVoid(au_fhsm_wrote_all, struct super_block *sb, int force)
-+AuStub(int, au_fhsm_fd, return -EOPNOTSUPP, struct super_block *sb, int oflags)
-+AuStub(pid_t, au_fhsm_pid, return 0, struct au_fhsm *fhsm)
-+AuStubInt0(au_fhsm_br_alloc, struct au_branch *br)
-+AuStubVoid(au_fhsm_set_bottom, struct super_block *sb, aufs_bindex_t bindex)
-+AuStubVoid(au_fhsm_fin, struct super_block *sb)
-+AuStubVoid(au_fhsm_init, struct au_sbinfo *sbinfo)
-+AuStubVoid(au_fhsm_set, struct au_sbinfo *sbinfo, unsigned int sec)
-+AuStubVoid(au_fhsm_show, struct seq_file *seq, struct au_sbinfo *sbinfo)
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline struct au_sbinfo *au_sbi(struct super_block *sb)
-+{
-+ return sb->s_fs_info;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_AUFS_EXPORT
-+int au_test_nfsd(void);
-+void au_export_init(struct super_block *sb);
-+void au_xigen_inc(struct inode *inode);
-+int au_xigen_new(struct inode *inode);
-+int au_xigen_set(struct super_block *sb, struct file *base);
-+void au_xigen_clr(struct super_block *sb);
-+
-+static inline int au_busy_or_stale(void)
-+{
-+ if (!au_test_nfsd())
-+ return -EBUSY;
-+ return -ESTALE;
-+}
-+#else
-+AuStubInt0(au_test_nfsd, void)
-+AuStubVoid(au_export_init, struct super_block *sb)
-+AuStubVoid(au_xigen_inc, struct inode *inode)
-+AuStubInt0(au_xigen_new, struct inode *inode)
-+AuStubInt0(au_xigen_set, struct super_block *sb, struct file *base)
-+AuStubVoid(au_xigen_clr, struct super_block *sb)
-+AuStub(int, au_busy_or_stale, return -EBUSY, void)
-+#endif /* CONFIG_AUFS_EXPORT */
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_AUFS_SBILIST
-+/* module.c */
-+extern struct au_splhead au_sbilist;
-+
-+static inline void au_sbilist_init(void)
-+{
-+ au_spl_init(&au_sbilist);
-+}
-+
-+static inline void au_sbilist_add(struct super_block *sb)
-+{
-+ au_spl_add(&au_sbi(sb)->si_list, &au_sbilist);
-+}
-+
-+static inline void au_sbilist_del(struct super_block *sb)
-+{
-+ au_spl_del(&au_sbi(sb)->si_list, &au_sbilist);
-+}
-+
-+#ifdef CONFIG_AUFS_MAGIC_SYSRQ
-+static inline void au_sbilist_lock(void)
-+{
-+ spin_lock(&au_sbilist.spin);
-+}
-+
-+static inline void au_sbilist_unlock(void)
-+{
-+ spin_unlock(&au_sbilist.spin);
-+}
-+#define AuGFP_SBILIST GFP_ATOMIC
-+#else
-+AuStubVoid(au_sbilist_lock, void)
-+AuStubVoid(au_sbilist_unlock, void)
-+#define AuGFP_SBILIST GFP_NOFS
-+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
-+#else
-+AuStubVoid(au_sbilist_init, void)
-+AuStubVoid(au_sbilist_add, struct super_block *sb)
-+AuStubVoid(au_sbilist_del, struct super_block *sb)
-+AuStubVoid(au_sbilist_lock, void)
-+AuStubVoid(au_sbilist_unlock, void)
-+#define AuGFP_SBILIST GFP_NOFS
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline void dbgaufs_si_null(struct au_sbinfo *sbinfo)
-+{
-+ /*
-+ * This function is a dynamic '__init' function actually,
-+ * so the tiny check for si_rwsem is unnecessary.
-+ */
-+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
-+#ifdef CONFIG_DEBUG_FS
-+ sbinfo->si_dbgaufs = NULL;
-+ sbinfo->si_dbgaufs_plink = NULL;
-+ sbinfo->si_dbgaufs_xib = NULL;
-+#ifdef CONFIG_AUFS_EXPORT
-+ sbinfo->si_dbgaufs_xigen = NULL;
-+#endif
-+#endif
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline pid_t si_pid_bit(void)
-+{
-+ /* the origin of pid is 1, but the bitmap's is 0 */
-+ return current->pid - 1;
-+}
-+
-+static inline int si_pid_test(struct super_block *sb)
-+{
-+ pid_t bit;
-+
-+ bit = si_pid_bit();
-+ if (bit < PID_MAX_DEFAULT)
-+ return test_bit(bit, au_sbi(sb)->au_si_pid.bitmap);
-+ return si_pid_test_slow(sb);
-+}
-+
-+static inline void si_pid_set(struct super_block *sb)
-+{
-+ pid_t bit;
-+
-+ bit = si_pid_bit();
-+ if (bit < PID_MAX_DEFAULT) {
-+ AuDebugOn(test_bit(bit, au_sbi(sb)->au_si_pid.bitmap));
-+ set_bit(bit, au_sbi(sb)->au_si_pid.bitmap);
-+ /* smp_mb(); */
-+ } else
-+ si_pid_set_slow(sb);
-+}
-+
-+static inline void si_pid_clr(struct super_block *sb)
-+{
-+ pid_t bit;
-+
-+ bit = si_pid_bit();
-+ if (bit < PID_MAX_DEFAULT) {
-+ AuDebugOn(!test_bit(bit, au_sbi(sb)->au_si_pid.bitmap));
-+ clear_bit(bit, au_sbi(sb)->au_si_pid.bitmap);
-+ /* smp_mb(); */
-+ } else
-+ si_pid_clr_slow(sb);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* lock superblock. mainly for entry point functions */
-+/*
-+ * __si_read_lock, __si_write_lock,
-+ * __si_read_unlock, __si_write_unlock, __si_downgrade_lock
-+ */
-+AuSimpleRwsemFuncs(__si, struct super_block *sb, &au_sbi(sb)->si_rwsem);
-+
-+#define SiMustNoWaiters(sb) AuRwMustNoWaiters(&au_sbi(sb)->si_rwsem)
-+#define SiMustAnyLock(sb) AuRwMustAnyLock(&au_sbi(sb)->si_rwsem)
-+#define SiMustWriteLock(sb) AuRwMustWriteLock(&au_sbi(sb)->si_rwsem)
-+
-+static inline void si_noflush_read_lock(struct super_block *sb)
-+{
-+ __si_read_lock(sb);
-+ si_pid_set(sb);
-+}
-+
-+static inline int si_noflush_read_trylock(struct super_block *sb)
-+{
-+ int locked;
-+
-+ locked = __si_read_trylock(sb);
-+ if (locked)
-+ si_pid_set(sb);
-+ return locked;
-+}
-+
-+static inline void si_noflush_write_lock(struct super_block *sb)
-+{
-+ __si_write_lock(sb);
-+ si_pid_set(sb);
-+}
-+
-+static inline int si_noflush_write_trylock(struct super_block *sb)
-+{
-+ int locked;
-+
-+ locked = __si_write_trylock(sb);
-+ if (locked)
-+ si_pid_set(sb);
-+ return locked;
-+}
-+
-+#if 0 /* reserved */
-+static inline int si_read_trylock(struct super_block *sb, int flags)
-+{
-+ if (au_ftest_lock(flags, FLUSH))
-+ au_nwt_flush(&au_sbi(sb)->si_nowait);
-+ return si_noflush_read_trylock(sb);
-+}
-+#endif
-+
-+static inline void si_read_unlock(struct super_block *sb)
-+{
-+ si_pid_clr(sb);
-+ __si_read_unlock(sb);
-+}
-+
-+#if 0 /* reserved */
-+static inline int si_write_trylock(struct super_block *sb, int flags)
-+{
-+ if (au_ftest_lock(flags, FLUSH))
-+ au_nwt_flush(&au_sbi(sb)->si_nowait);
-+ return si_noflush_write_trylock(sb);
-+}
-+#endif
-+
-+static inline void si_write_unlock(struct super_block *sb)
-+{
-+ si_pid_clr(sb);
-+ __si_write_unlock(sb);
-+}
-+
-+#if 0 /* reserved */
-+static inline void si_downgrade_lock(struct super_block *sb)
-+{
-+ __si_downgrade_lock(sb);
-+}
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline aufs_bindex_t au_sbend(struct super_block *sb)
-+{
-+ SiMustAnyLock(sb);
-+ return au_sbi(sb)->si_bend;
-+}
-+
-+static inline unsigned int au_mntflags(struct super_block *sb)
-+{
-+ SiMustAnyLock(sb);
-+ return au_sbi(sb)->si_mntflags;
-+}
-+
-+static inline unsigned int au_sigen(struct super_block *sb)
-+{
-+ SiMustAnyLock(sb);
-+ return au_sbi(sb)->si_generation;
-+}
-+
-+static inline void au_ninodes_inc(struct super_block *sb)
-+{
-+ atomic_long_inc(&au_sbi(sb)->si_ninodes);
-+}
-+
-+static inline void au_ninodes_dec(struct super_block *sb)
-+{
-+ AuDebugOn(!atomic_long_read(&au_sbi(sb)->si_ninodes));
-+ atomic_long_dec(&au_sbi(sb)->si_ninodes);
-+}
-+
-+static inline void au_nfiles_inc(struct super_block *sb)
-+{
-+ atomic_long_inc(&au_sbi(sb)->si_nfiles);
-+}
-+
-+static inline void au_nfiles_dec(struct super_block *sb)
-+{
-+ AuDebugOn(!atomic_long_read(&au_sbi(sb)->si_nfiles));
-+ atomic_long_dec(&au_sbi(sb)->si_nfiles);
-+}
-+
-+static inline struct au_branch *au_sbr(struct super_block *sb,
-+ aufs_bindex_t bindex)
-+{
-+ SiMustAnyLock(sb);
-+ return au_sbi(sb)->si_branch[0 + bindex];
-+}
-+
-+static inline void au_xino_brid_set(struct super_block *sb, aufs_bindex_t brid)
-+{
-+ SiMustWriteLock(sb);
-+ au_sbi(sb)->si_xino_brid = brid;
-+}
-+
-+static inline aufs_bindex_t au_xino_brid(struct super_block *sb)
-+{
-+ SiMustAnyLock(sb);
-+ return au_sbi(sb)->si_xino_brid;
-+}
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_SUPER_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/sysaufs.c linux-4.1.10/fs/aufs/sysaufs.c
---- linux-4.1.10.orig/fs/aufs/sysaufs.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/sysaufs.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,104 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * sysfs interface and lifetime management
-+ * they are necessary regardless sysfs is disabled.
-+ */
-+
-+#include <linux/random.h>
-+#include "aufs.h"
-+
-+unsigned long sysaufs_si_mask;
-+struct kset *sysaufs_kset;
-+
-+#define AuSiAttr(_name) { \
-+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
-+ .show = sysaufs_si_##_name, \
-+}
-+
-+static struct sysaufs_si_attr sysaufs_si_attr_xi_path = AuSiAttr(xi_path);
-+struct attribute *sysaufs_si_attrs[] = {
-+ &sysaufs_si_attr_xi_path.attr,
-+ NULL,
-+};
-+
-+static const struct sysfs_ops au_sbi_ops = {
-+ .show = sysaufs_si_show
-+};
-+
-+static struct kobj_type au_sbi_ktype = {
-+ .release = au_si_free,
-+ .sysfs_ops = &au_sbi_ops,
-+ .default_attrs = sysaufs_si_attrs
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int sysaufs_si_init(struct au_sbinfo *sbinfo)
-+{
-+ int err;
-+
-+ sbinfo->si_kobj.kset = sysaufs_kset;
-+ /* cf. sysaufs_name() */
-+ err = kobject_init_and_add
-+ (&sbinfo->si_kobj, &au_sbi_ktype, /*&sysaufs_kset->kobj*/NULL,
-+ SysaufsSiNamePrefix "%lx", sysaufs_si_id(sbinfo));
-+
-+ dbgaufs_si_null(sbinfo);
-+ if (!err) {
-+ err = dbgaufs_si_init(sbinfo);
-+ if (unlikely(err))
-+ kobject_put(&sbinfo->si_kobj);
-+ }
-+ return err;
-+}
-+
-+void sysaufs_fin(void)
-+{
-+ dbgaufs_fin();
-+ sysfs_remove_group(&sysaufs_kset->kobj, sysaufs_attr_group);
-+ kset_unregister(sysaufs_kset);
-+}
-+
-+int __init sysaufs_init(void)
-+{
-+ int err;
-+
-+ do {
-+ get_random_bytes(&sysaufs_si_mask, sizeof(sysaufs_si_mask));
-+ } while (!sysaufs_si_mask);
-+
-+ err = -EINVAL;
-+ sysaufs_kset = kset_create_and_add(AUFS_NAME, NULL, fs_kobj);
-+ if (unlikely(!sysaufs_kset))
-+ goto out;
-+ err = PTR_ERR(sysaufs_kset);
-+ if (IS_ERR(sysaufs_kset))
-+ goto out;
-+ err = sysfs_create_group(&sysaufs_kset->kobj, sysaufs_attr_group);
-+ if (unlikely(err)) {
-+ kset_unregister(sysaufs_kset);
-+ goto out;
-+ }
-+
-+ err = dbgaufs_init();
-+ if (unlikely(err))
-+ sysaufs_fin();
-+out:
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/sysaufs.h linux-4.1.10/fs/aufs/sysaufs.h
---- linux-4.1.10.orig/fs/aufs/sysaufs.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/sysaufs.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,101 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * sysfs interface and mount lifetime management
-+ */
-+
-+#ifndef __SYSAUFS_H__
-+#define __SYSAUFS_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/sysfs.h>
-+#include "module.h"
-+
-+struct super_block;
-+struct au_sbinfo;
-+
-+struct sysaufs_si_attr {
-+ struct attribute attr;
-+ int (*show)(struct seq_file *seq, struct super_block *sb);
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* sysaufs.c */
-+extern unsigned long sysaufs_si_mask;
-+extern struct kset *sysaufs_kset;
-+extern struct attribute *sysaufs_si_attrs[];
-+int sysaufs_si_init(struct au_sbinfo *sbinfo);
-+int __init sysaufs_init(void);
-+void sysaufs_fin(void);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* some people doesn't like to show a pointer in kernel */
-+static inline unsigned long sysaufs_si_id(struct au_sbinfo *sbinfo)
-+{
-+ return sysaufs_si_mask ^ (unsigned long)sbinfo;
-+}
-+
-+#define SysaufsSiNamePrefix "si_"
-+#define SysaufsSiNameLen (sizeof(SysaufsSiNamePrefix) + 16)
-+static inline void sysaufs_name(struct au_sbinfo *sbinfo, char *name)
-+{
-+ snprintf(name, SysaufsSiNameLen, SysaufsSiNamePrefix "%lx",
-+ sysaufs_si_id(sbinfo));
-+}
-+
-+struct au_branch;
-+#ifdef CONFIG_SYSFS
-+/* sysfs.c */
-+extern struct attribute_group *sysaufs_attr_group;
-+
-+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb);
-+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
-+ char *buf);
-+long au_brinfo_ioctl(struct file *file, unsigned long arg);
-+#ifdef CONFIG_COMPAT
-+long au_brinfo_compat_ioctl(struct file *file, unsigned long arg);
-+#endif
-+
-+void sysaufs_br_init(struct au_branch *br);
-+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex);
-+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
-+
-+#define sysaufs_brs_init() do {} while (0)
-+
-+#else
-+#define sysaufs_attr_group NULL
-+
-+AuStubInt0(sysaufs_si_xi_path, struct seq_file *seq, struct super_block *sb)
-+AuStub(ssize_t, sysaufs_si_show, return 0, struct kobject *kobj,
-+ struct attribute *attr, char *buf)
-+AuStubVoid(sysaufs_br_init, struct au_branch *br)
-+AuStubVoid(sysaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex)
-+AuStubVoid(sysaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex)
-+
-+static inline void sysaufs_brs_init(void)
-+{
-+ sysaufs_brs = 0;
-+}
-+
-+#endif /* CONFIG_SYSFS */
-+
-+#endif /* __KERNEL__ */
-+#endif /* __SYSAUFS_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/sysfs.c linux-4.1.10/fs/aufs/sysfs.c
---- linux-4.1.10.orig/fs/aufs/sysfs.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/sysfs.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,376 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * sysfs interface
-+ */
-+
-+#include <linux/compat.h>
-+#include <linux/seq_file.h>
-+#include "aufs.h"
-+
-+#ifdef CONFIG_AUFS_FS_MODULE
-+/* this entry violates the "one line per file" policy of sysfs */
-+static ssize_t config_show(struct kobject *kobj, struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t err;
-+ static char *conf =
-+/* this file is generated at compiling */
-+#include "conf.str"
-+ ;
-+
-+ err = snprintf(buf, PAGE_SIZE, conf);
-+ if (unlikely(err >= PAGE_SIZE))
-+ err = -EFBIG;
-+ return err;
-+}
-+
-+static struct kobj_attribute au_config_attr = __ATTR_RO(config);
-+#endif
-+
-+static struct attribute *au_attr[] = {
-+#ifdef CONFIG_AUFS_FS_MODULE
-+ &au_config_attr.attr,
-+#endif
-+ NULL, /* need to NULL terminate the list of attributes */
-+};
-+
-+static struct attribute_group sysaufs_attr_group_body = {
-+ .attrs = au_attr
-+};
-+
-+struct attribute_group *sysaufs_attr_group = &sysaufs_attr_group_body;
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb)
-+{
-+ int err;
-+
-+ SiMustAnyLock(sb);
-+
-+ err = 0;
-+ if (au_opt_test(au_mntflags(sb), XINO)) {
-+ err = au_xino_path(seq, au_sbi(sb)->si_xib);
-+ seq_putc(seq, '\n');
-+ }
-+ return err;
-+}
-+
-+/*
-+ * the lifetime of branch is independent from the entry under sysfs.
-+ * sysfs handles the lifetime of the entry, and never call ->show() after it is
-+ * unlinked.
-+ */
-+static int sysaufs_si_br(struct seq_file *seq, struct super_block *sb,
-+ aufs_bindex_t bindex, int idx)
-+{
-+ int err;
-+ struct path path;
-+ struct dentry *root;
-+ struct au_branch *br;
-+ au_br_perm_str_t perm;
-+
-+ AuDbg("b%d\n", bindex);
-+
-+ err = 0;
-+ root = sb->s_root;
-+ di_read_lock_parent(root, !AuLock_IR);
-+ br = au_sbr(sb, bindex);
-+
-+ switch (idx) {
-+ case AuBrSysfs_BR:
-+ path.mnt = au_br_mnt(br);
-+ path.dentry = au_h_dptr(root, bindex);
-+ err = au_seq_path(seq, &path);
-+ if (!err) {
-+ au_optstr_br_perm(&perm, br->br_perm);
-+ err = seq_printf(seq, "=%s\n", perm.a);
-+ }
-+ break;
-+ case AuBrSysfs_BRID:
-+ err = seq_printf(seq, "%d\n", br->br_id);
-+ break;
-+ }
-+ di_read_unlock(root, !AuLock_IR);
-+ if (err == -1)
-+ err = -E2BIG;
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static struct seq_file *au_seq(char *p, ssize_t len)
-+{
-+ struct seq_file *seq;
-+
-+ seq = kzalloc(sizeof(*seq), GFP_NOFS);
-+ if (seq) {
-+ /* mutex_init(&seq.lock); */
-+ seq->buf = p;
-+ seq->size = len;
-+ return seq; /* success */
-+ }
-+
-+ seq = ERR_PTR(-ENOMEM);
-+ return seq;
-+}
-+
-+#define SysaufsBr_PREFIX "br"
-+#define SysaufsBrid_PREFIX "brid"
-+
-+/* todo: file size may exceed PAGE_SIZE */
-+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
-+ char *buf)
-+{
-+ ssize_t err;
-+ int idx;
-+ long l;
-+ aufs_bindex_t bend;
-+ struct au_sbinfo *sbinfo;
-+ struct super_block *sb;
-+ struct seq_file *seq;
-+ char *name;
-+ struct attribute **cattr;
-+
-+ sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
-+ sb = sbinfo->si_sb;
-+
-+ /*
-+ * prevent a race condition between sysfs and aufs.
-+ * for instance, sysfs_file_read() calls sysfs_get_active_two() which
-+ * prohibits maintaining the sysfs entries.
-+ * hew we acquire read lock after sysfs_get_active_two().
-+ * on the other hand, the remount process may maintain the sysfs/aufs
-+ * entries after acquiring write lock.
-+ * it can cause a deadlock.
-+ * simply we gave up processing read here.
-+ */
-+ err = -EBUSY;
-+ if (unlikely(!si_noflush_read_trylock(sb)))
-+ goto out;
-+
-+ seq = au_seq(buf, PAGE_SIZE);
-+ err = PTR_ERR(seq);
-+ if (IS_ERR(seq))
-+ goto out_unlock;
-+
-+ name = (void *)attr->name;
-+ cattr = sysaufs_si_attrs;
-+ while (*cattr) {
-+ if (!strcmp(name, (*cattr)->name)) {
-+ err = container_of(*cattr, struct sysaufs_si_attr, attr)
-+ ->show(seq, sb);
-+ goto out_seq;
-+ }
-+ cattr++;
-+ }
-+
-+ if (!strncmp(name, SysaufsBrid_PREFIX,
-+ sizeof(SysaufsBrid_PREFIX) - 1)) {
-+ idx = AuBrSysfs_BRID;
-+ name += sizeof(SysaufsBrid_PREFIX) - 1;
-+ } else if (!strncmp(name, SysaufsBr_PREFIX,
-+ sizeof(SysaufsBr_PREFIX) - 1)) {
-+ idx = AuBrSysfs_BR;
-+ name += sizeof(SysaufsBr_PREFIX) - 1;
-+ } else
-+ BUG();
-+
-+ err = kstrtol(name, 10, &l);
-+ if (!err) {
-+ bend = au_sbend(sb);
-+ if (l <= bend)
-+ err = sysaufs_si_br(seq, sb, (aufs_bindex_t)l, idx);
-+ else
-+ err = -ENOENT;
-+ }
-+
-+out_seq:
-+ if (!err) {
-+ err = seq->count;
-+ /* sysfs limit */
-+ if (unlikely(err == PAGE_SIZE))
-+ err = -EFBIG;
-+ }
-+ kfree(seq);
-+out_unlock:
-+ si_read_unlock(sb);
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_brinfo(struct super_block *sb, union aufs_brinfo __user *arg)
-+{
-+ int err;
-+ int16_t brid;
-+ aufs_bindex_t bindex, bend;
-+ size_t sz;
-+ char *buf;
-+ struct seq_file *seq;
-+ struct au_branch *br;
-+
-+ si_read_lock(sb, AuLock_FLUSH);
-+ bend = au_sbend(sb);
-+ err = bend + 1;
-+ if (!arg)
-+ goto out;
-+
-+ err = -ENOMEM;
-+ buf = (void *)__get_free_page(GFP_NOFS);
-+ if (unlikely(!buf))
-+ goto out;
-+
-+ seq = au_seq(buf, PAGE_SIZE);
-+ err = PTR_ERR(seq);
-+ if (IS_ERR(seq))
-+ goto out_buf;
-+
-+ sz = sizeof(*arg) - offsetof(union aufs_brinfo, path);
-+ for (bindex = 0; bindex <= bend; bindex++, arg++) {
-+ err = !access_ok(VERIFY_WRITE, arg, sizeof(*arg));
-+ if (unlikely(err))
-+ break;
-+
-+ br = au_sbr(sb, bindex);
-+ brid = br->br_id;
-+ BUILD_BUG_ON(sizeof(brid) != sizeof(arg->id));
-+ err = __put_user(brid, &arg->id);
-+ if (unlikely(err))
-+ break;
-+
-+ BUILD_BUG_ON(sizeof(br->br_perm) != sizeof(arg->perm));
-+ err = __put_user(br->br_perm, &arg->perm);
-+ if (unlikely(err))
-+ break;
-+
-+ err = au_seq_path(seq, &br->br_path);
-+ if (unlikely(err))
-+ break;
-+ err = seq_putc(seq, '\0');
-+ if (!err && seq->count <= sz) {
-+ err = copy_to_user(arg->path, seq->buf, seq->count);
-+ seq->count = 0;
-+ if (unlikely(err))
-+ break;
-+ } else {
-+ err = -E2BIG;
-+ goto out_seq;
-+ }
-+ }
-+ if (unlikely(err))
-+ err = -EFAULT;
-+
-+out_seq:
-+ kfree(seq);
-+out_buf:
-+ free_page((unsigned long)buf);
-+out:
-+ si_read_unlock(sb);
-+ return err;
-+}
-+
-+long au_brinfo_ioctl(struct file *file, unsigned long arg)
-+{
-+ return au_brinfo(file->f_path.dentry->d_sb, (void __user *)arg);
-+}
-+
-+#ifdef CONFIG_COMPAT
-+long au_brinfo_compat_ioctl(struct file *file, unsigned long arg)
-+{
-+ return au_brinfo(file->f_path.dentry->d_sb, compat_ptr(arg));
-+}
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void sysaufs_br_init(struct au_branch *br)
-+{
-+ int i;
-+ struct au_brsysfs *br_sysfs;
-+ struct attribute *attr;
-+
-+ br_sysfs = br->br_sysfs;
-+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
-+ attr = &br_sysfs->attr;
-+ sysfs_attr_init(attr);
-+ attr->name = br_sysfs->name;
-+ attr->mode = S_IRUGO;
-+ br_sysfs++;
-+ }
-+}
-+
-+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ struct au_branch *br;
-+ struct kobject *kobj;
-+ struct au_brsysfs *br_sysfs;
-+ int i;
-+ aufs_bindex_t bend;
-+
-+ dbgaufs_brs_del(sb, bindex);
-+
-+ if (!sysaufs_brs)
-+ return;
-+
-+ kobj = &au_sbi(sb)->si_kobj;
-+ bend = au_sbend(sb);
-+ for (; bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ br_sysfs = br->br_sysfs;
-+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
-+ sysfs_remove_file(kobj, &br_sysfs->attr);
-+ br_sysfs++;
-+ }
-+ }
-+}
-+
-+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ int err, i;
-+ aufs_bindex_t bend;
-+ struct kobject *kobj;
-+ struct au_branch *br;
-+ struct au_brsysfs *br_sysfs;
-+
-+ dbgaufs_brs_add(sb, bindex);
-+
-+ if (!sysaufs_brs)
-+ return;
-+
-+ kobj = &au_sbi(sb)->si_kobj;
-+ bend = au_sbend(sb);
-+ for (; bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ br_sysfs = br->br_sysfs;
-+ snprintf(br_sysfs[AuBrSysfs_BR].name, sizeof(br_sysfs->name),
-+ SysaufsBr_PREFIX "%d", bindex);
-+ snprintf(br_sysfs[AuBrSysfs_BRID].name, sizeof(br_sysfs->name),
-+ SysaufsBrid_PREFIX "%d", bindex);
-+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
-+ err = sysfs_create_file(kobj, &br_sysfs->attr);
-+ if (unlikely(err))
-+ pr_warn("failed %s under sysfs(%d)\n",
-+ br_sysfs->name, err);
-+ br_sysfs++;
-+ }
-+ }
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/sysrq.c linux-4.1.10/fs/aufs/sysrq.c
---- linux-4.1.10.orig/fs/aufs/sysrq.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/sysrq.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,157 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * magic sysrq hanlder
-+ */
-+
-+/* #include <linux/sysrq.h> */
-+#include <linux/writeback.h>
-+#include "aufs.h"
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static void sysrq_sb(struct super_block *sb)
-+{
-+ char *plevel;
-+ struct au_sbinfo *sbinfo;
-+ struct file *file;
-+ struct au_sphlhead *files;
-+ struct au_finfo *finfo;
-+
-+ plevel = au_plevel;
-+ au_plevel = KERN_WARNING;
-+
-+ /* since we define pr_fmt, call printk directly */
-+#define pr(str) printk(KERN_WARNING AUFS_NAME ": " str)
-+
-+ sbinfo = au_sbi(sb);
-+ printk(KERN_WARNING "si=%lx\n", sysaufs_si_id(sbinfo));
-+ pr("superblock\n");
-+ au_dpri_sb(sb);
-+
-+#if 0
-+ pr("root dentry\n");
-+ au_dpri_dentry(sb->s_root);
-+ pr("root inode\n");
-+ au_dpri_inode(d_inode(sb->s_root));
-+#endif
-+
-+#if 0
-+ do {
-+ int err, i, j, ndentry;
-+ struct au_dcsub_pages dpages;
-+ struct au_dpage *dpage;
-+
-+ err = au_dpages_init(&dpages, GFP_ATOMIC);
-+ if (unlikely(err))
-+ break;
-+ err = au_dcsub_pages(&dpages, sb->s_root, NULL, NULL);
-+ if (!err)
-+ for (i = 0; i < dpages.ndpage; i++) {
-+ dpage = dpages.dpages + i;
-+ ndentry = dpage->ndentry;
-+ for (j = 0; j < ndentry; j++)
-+ au_dpri_dentry(dpage->dentries[j]);
-+ }
-+ au_dpages_free(&dpages);
-+ } while (0);
-+#endif
-+
-+#if 1
-+ {
-+ struct inode *i;
-+
-+ pr("isolated inode\n");
-+ spin_lock(&inode_sb_list_lock);
-+ list_for_each_entry(i, &sb->s_inodes, i_sb_list) {
-+ spin_lock(&i->i_lock);
-+ if (1 || hlist_empty(&i->i_dentry))
-+ au_dpri_inode(i);
-+ spin_unlock(&i->i_lock);
-+ }
-+ spin_unlock(&inode_sb_list_lock);
-+ }
-+#endif
-+ pr("files\n");
-+ files = &au_sbi(sb)->si_files;
-+ spin_lock(&files->spin);
-+ hlist_for_each_entry(finfo, &files->head, fi_hlist) {
-+ umode_t mode;
-+
-+ file = finfo->fi_file;
-+ mode = file_inode(file)->i_mode;
-+ if (!special_file(mode))
-+ au_dpri_file(file);
-+ }
-+ spin_unlock(&files->spin);
-+ pr("done\n");
-+
-+#undef pr
-+ au_plevel = plevel;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* module parameter */
-+static char *aufs_sysrq_key = "a";
-+module_param_named(sysrq, aufs_sysrq_key, charp, S_IRUGO);
-+MODULE_PARM_DESC(sysrq, "MagicSysRq key for " AUFS_NAME);
-+
-+static void au_sysrq(int key __maybe_unused)
-+{
-+ struct au_sbinfo *sbinfo;
-+
-+ lockdep_off();
-+ au_sbilist_lock();
-+ list_for_each_entry(sbinfo, &au_sbilist.head, si_list)
-+ sysrq_sb(sbinfo->si_sb);
-+ au_sbilist_unlock();
-+ lockdep_on();
-+}
-+
-+static struct sysrq_key_op au_sysrq_op = {
-+ .handler = au_sysrq,
-+ .help_msg = "Aufs",
-+ .action_msg = "Aufs",
-+ .enable_mask = SYSRQ_ENABLE_DUMP
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int __init au_sysrq_init(void)
-+{
-+ int err;
-+ char key;
-+
-+ err = -1;
-+ key = *aufs_sysrq_key;
-+ if ('a' <= key && key <= 'z')
-+ err = register_sysrq_key(key, &au_sysrq_op);
-+ if (unlikely(err))
-+ pr_err("err %d, sysrq=%c\n", err, key);
-+ return err;
-+}
-+
-+void au_sysrq_fin(void)
-+{
-+ int err;
-+
-+ err = unregister_sysrq_key(*aufs_sysrq_key, &au_sysrq_op);
-+ if (unlikely(err))
-+ pr_err("err %d (ignored)\n", err);
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/vdir.c linux-4.1.10/fs/aufs/vdir.c
---- linux-4.1.10.orig/fs/aufs/vdir.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/vdir.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,888 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * virtual or vertical directory
-+ */
-+
-+#include "aufs.h"
-+
-+static unsigned int calc_size(int nlen)
-+{
-+ return ALIGN(sizeof(struct au_vdir_de) + nlen, sizeof(ino_t));
-+}
-+
-+static int set_deblk_end(union au_vdir_deblk_p *p,
-+ union au_vdir_deblk_p *deblk_end)
-+{
-+ if (calc_size(0) <= deblk_end->deblk - p->deblk) {
-+ p->de->de_str.len = 0;
-+ /* smp_mb(); */
-+ return 0;
-+ }
-+ return -1; /* error */
-+}
-+
-+/* returns true or false */
-+static int is_deblk_end(union au_vdir_deblk_p *p,
-+ union au_vdir_deblk_p *deblk_end)
-+{
-+ if (calc_size(0) <= deblk_end->deblk - p->deblk)
-+ return !p->de->de_str.len;
-+ return 1;
-+}
-+
-+static unsigned char *last_deblk(struct au_vdir *vdir)
-+{
-+ return vdir->vd_deblk[vdir->vd_nblk - 1];
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* estimate the appropriate size for name hash table */
-+unsigned int au_rdhash_est(loff_t sz)
-+{
-+ unsigned int n;
-+
-+ n = UINT_MAX;
-+ sz >>= 10;
-+ if (sz < n)
-+ n = sz;
-+ if (sz < AUFS_RDHASH_DEF)
-+ n = AUFS_RDHASH_DEF;
-+ /* pr_info("n %u\n", n); */
-+ return n;
-+}
-+
-+/*
-+ * the allocated memory has to be freed by
-+ * au_nhash_wh_free() or au_nhash_de_free().
-+ */
-+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp)
-+{
-+ struct hlist_head *head;
-+ unsigned int u;
-+ size_t sz;
-+
-+ sz = sizeof(*nhash->nh_head) * num_hash;
-+ head = kmalloc(sz, gfp);
-+ if (head) {
-+ nhash->nh_num = num_hash;
-+ nhash->nh_head = head;
-+ for (u = 0; u < num_hash; u++)
-+ INIT_HLIST_HEAD(head++);
-+ return 0; /* success */
-+ }
-+
-+ return -ENOMEM;
-+}
-+
-+static void nhash_count(struct hlist_head *head)
-+{
-+#if 0
-+ unsigned long n;
-+ struct hlist_node *pos;
-+
-+ n = 0;
-+ hlist_for_each(pos, head)
-+ n++;
-+ pr_info("%lu\n", n);
-+#endif
-+}
-+
-+static void au_nhash_wh_do_free(struct hlist_head *head)
-+{
-+ struct au_vdir_wh *pos;
-+ struct hlist_node *node;
-+
-+ hlist_for_each_entry_safe(pos, node, head, wh_hash)
-+ kfree(pos);
-+}
-+
-+static void au_nhash_de_do_free(struct hlist_head *head)
-+{
-+ struct au_vdir_dehstr *pos;
-+ struct hlist_node *node;
-+
-+ hlist_for_each_entry_safe(pos, node, head, hash)
-+ au_cache_free_vdir_dehstr(pos);
-+}
-+
-+static void au_nhash_do_free(struct au_nhash *nhash,
-+ void (*free)(struct hlist_head *head))
-+{
-+ unsigned int n;
-+ struct hlist_head *head;
-+
-+ n = nhash->nh_num;
-+ if (!n)
-+ return;
-+
-+ head = nhash->nh_head;
-+ while (n-- > 0) {
-+ nhash_count(head);
-+ free(head++);
-+ }
-+ kfree(nhash->nh_head);
-+}
-+
-+void au_nhash_wh_free(struct au_nhash *whlist)
-+{
-+ au_nhash_do_free(whlist, au_nhash_wh_do_free);
-+}
-+
-+static void au_nhash_de_free(struct au_nhash *delist)
-+{
-+ au_nhash_do_free(delist, au_nhash_de_do_free);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
-+ int limit)
-+{
-+ int num;
-+ unsigned int u, n;
-+ struct hlist_head *head;
-+ struct au_vdir_wh *pos;
-+
-+ num = 0;
-+ n = whlist->nh_num;
-+ head = whlist->nh_head;
-+ for (u = 0; u < n; u++, head++)
-+ hlist_for_each_entry(pos, head, wh_hash)
-+ if (pos->wh_bindex == btgt && ++num > limit)
-+ return 1;
-+ return 0;
-+}
-+
-+static struct hlist_head *au_name_hash(struct au_nhash *nhash,
-+ unsigned char *name,
-+ unsigned int len)
-+{
-+ unsigned int v;
-+ /* const unsigned int magic_bit = 12; */
-+
-+ AuDebugOn(!nhash->nh_num || !nhash->nh_head);
-+
-+ v = 0;
-+ while (len--)
-+ v += *name++;
-+ /* v = hash_long(v, magic_bit); */
-+ v %= nhash->nh_num;
-+ return nhash->nh_head + v;
-+}
-+
-+static int au_nhash_test_name(struct au_vdir_destr *str, const char *name,
-+ int nlen)
-+{
-+ return str->len == nlen && !memcmp(str->name, name, nlen);
-+}
-+
-+/* returns found or not */
-+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen)
-+{
-+ struct hlist_head *head;
-+ struct au_vdir_wh *pos;
-+ struct au_vdir_destr *str;
-+
-+ head = au_name_hash(whlist, name, nlen);
-+ hlist_for_each_entry(pos, head, wh_hash) {
-+ str = &pos->wh_str;
-+ AuDbg("%.*s\n", str->len, str->name);
-+ if (au_nhash_test_name(str, name, nlen))
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+/* returns found(true) or not */
-+static int test_known(struct au_nhash *delist, char *name, int nlen)
-+{
-+ struct hlist_head *head;
-+ struct au_vdir_dehstr *pos;
-+ struct au_vdir_destr *str;
-+
-+ head = au_name_hash(delist, name, nlen);
-+ hlist_for_each_entry(pos, head, hash) {
-+ str = pos->str;
-+ AuDbg("%.*s\n", str->len, str->name);
-+ if (au_nhash_test_name(str, name, nlen))
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static void au_shwh_init_wh(struct au_vdir_wh *wh, ino_t ino,
-+ unsigned char d_type)
-+{
-+#ifdef CONFIG_AUFS_SHWH
-+ wh->wh_ino = ino;
-+ wh->wh_type = d_type;
-+#endif
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
-+ unsigned int d_type, aufs_bindex_t bindex,
-+ unsigned char shwh)
-+{
-+ int err;
-+ struct au_vdir_destr *str;
-+ struct au_vdir_wh *wh;
-+
-+ AuDbg("%.*s\n", nlen, name);
-+ AuDebugOn(!whlist->nh_num || !whlist->nh_head);
-+
-+ err = -ENOMEM;
-+ wh = kmalloc(sizeof(*wh) + nlen, GFP_NOFS);
-+ if (unlikely(!wh))
-+ goto out;
-+
-+ err = 0;
-+ wh->wh_bindex = bindex;
-+ if (shwh)
-+ au_shwh_init_wh(wh, ino, d_type);
-+ str = &wh->wh_str;
-+ str->len = nlen;
-+ memcpy(str->name, name, nlen);
-+ hlist_add_head(&wh->wh_hash, au_name_hash(whlist, name, nlen));
-+ /* smp_mb(); */
-+
-+out:
-+ return err;
-+}
-+
-+static int append_deblk(struct au_vdir *vdir)
-+{
-+ int err;
-+ unsigned long ul;
-+ const unsigned int deblk_sz = vdir->vd_deblk_sz;
-+ union au_vdir_deblk_p p, deblk_end;
-+ unsigned char **o;
-+
-+ err = -ENOMEM;
-+ o = krealloc(vdir->vd_deblk, sizeof(*o) * (vdir->vd_nblk + 1),
-+ GFP_NOFS);
-+ if (unlikely(!o))
-+ goto out;
-+
-+ vdir->vd_deblk = o;
-+ p.deblk = kmalloc(deblk_sz, GFP_NOFS);
-+ if (p.deblk) {
-+ ul = vdir->vd_nblk++;
-+ vdir->vd_deblk[ul] = p.deblk;
-+ vdir->vd_last.ul = ul;
-+ vdir->vd_last.p.deblk = p.deblk;
-+ deblk_end.deblk = p.deblk + deblk_sz;
-+ err = set_deblk_end(&p, &deblk_end);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+static int append_de(struct au_vdir *vdir, char *name, int nlen, ino_t ino,
-+ unsigned int d_type, struct au_nhash *delist)
-+{
-+ int err;
-+ unsigned int sz;
-+ const unsigned int deblk_sz = vdir->vd_deblk_sz;
-+ union au_vdir_deblk_p p, *room, deblk_end;
-+ struct au_vdir_dehstr *dehstr;
-+
-+ p.deblk = last_deblk(vdir);
-+ deblk_end.deblk = p.deblk + deblk_sz;
-+ room = &vdir->vd_last.p;
-+ AuDebugOn(room->deblk < p.deblk || deblk_end.deblk <= room->deblk
-+ || !is_deblk_end(room, &deblk_end));
-+
-+ sz = calc_size(nlen);
-+ if (unlikely(sz > deblk_end.deblk - room->deblk)) {
-+ err = append_deblk(vdir);
-+ if (unlikely(err))
-+ goto out;
-+
-+ p.deblk = last_deblk(vdir);
-+ deblk_end.deblk = p.deblk + deblk_sz;
-+ /* smp_mb(); */
-+ AuDebugOn(room->deblk != p.deblk);
-+ }
-+
-+ err = -ENOMEM;
-+ dehstr = au_cache_alloc_vdir_dehstr();
-+ if (unlikely(!dehstr))
-+ goto out;
-+
-+ dehstr->str = &room->de->de_str;
-+ hlist_add_head(&dehstr->hash, au_name_hash(delist, name, nlen));
-+ room->de->de_ino = ino;
-+ room->de->de_type = d_type;
-+ room->de->de_str.len = nlen;
-+ memcpy(room->de->de_str.name, name, nlen);
-+
-+ err = 0;
-+ room->deblk += sz;
-+ if (unlikely(set_deblk_end(room, &deblk_end)))
-+ err = append_deblk(vdir);
-+ /* smp_mb(); */
-+
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void au_vdir_free(struct au_vdir *vdir)
-+{
-+ unsigned char **deblk;
-+
-+ deblk = vdir->vd_deblk;
-+ while (vdir->vd_nblk--)
-+ kfree(*deblk++);
-+ kfree(vdir->vd_deblk);
-+ au_cache_free_vdir(vdir);
-+}
-+
-+static struct au_vdir *alloc_vdir(struct file *file)
-+{
-+ struct au_vdir *vdir;
-+ struct super_block *sb;
-+ int err;
-+
-+ sb = file->f_path.dentry->d_sb;
-+ SiMustAnyLock(sb);
-+
-+ err = -ENOMEM;
-+ vdir = au_cache_alloc_vdir();
-+ if (unlikely(!vdir))
-+ goto out;
-+
-+ vdir->vd_deblk = kzalloc(sizeof(*vdir->vd_deblk), GFP_NOFS);
-+ if (unlikely(!vdir->vd_deblk))
-+ goto out_free;
-+
-+ vdir->vd_deblk_sz = au_sbi(sb)->si_rdblk;
-+ if (!vdir->vd_deblk_sz) {
-+ /* estimate the appropriate size for deblk */
-+ vdir->vd_deblk_sz = au_dir_size(file, /*dentry*/NULL);
-+ /* pr_info("vd_deblk_sz %u\n", vdir->vd_deblk_sz); */
-+ }
-+ vdir->vd_nblk = 0;
-+ vdir->vd_version = 0;
-+ vdir->vd_jiffy = 0;
-+ err = append_deblk(vdir);
-+ if (!err)
-+ return vdir; /* success */
-+
-+ kfree(vdir->vd_deblk);
-+
-+out_free:
-+ au_cache_free_vdir(vdir);
-+out:
-+ vdir = ERR_PTR(err);
-+ return vdir;
-+}
-+
-+static int reinit_vdir(struct au_vdir *vdir)
-+{
-+ int err;
-+ union au_vdir_deblk_p p, deblk_end;
-+
-+ while (vdir->vd_nblk > 1) {
-+ kfree(vdir->vd_deblk[vdir->vd_nblk - 1]);
-+ /* vdir->vd_deblk[vdir->vd_nblk - 1] = NULL; */
-+ vdir->vd_nblk--;
-+ }
-+ p.deblk = vdir->vd_deblk[0];
-+ deblk_end.deblk = p.deblk + vdir->vd_deblk_sz;
-+ err = set_deblk_end(&p, &deblk_end);
-+ /* keep vd_dblk_sz */
-+ vdir->vd_last.ul = 0;
-+ vdir->vd_last.p.deblk = vdir->vd_deblk[0];
-+ vdir->vd_version = 0;
-+ vdir->vd_jiffy = 0;
-+ /* smp_mb(); */
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#define AuFillVdir_CALLED 1
-+#define AuFillVdir_WHABLE (1 << 1)
-+#define AuFillVdir_SHWH (1 << 2)
-+#define au_ftest_fillvdir(flags, name) ((flags) & AuFillVdir_##name)
-+#define au_fset_fillvdir(flags, name) \
-+ do { (flags) |= AuFillVdir_##name; } while (0)
-+#define au_fclr_fillvdir(flags, name) \
-+ do { (flags) &= ~AuFillVdir_##name; } while (0)
-+
-+#ifndef CONFIG_AUFS_SHWH
-+#undef AuFillVdir_SHWH
-+#define AuFillVdir_SHWH 0
-+#endif
-+
-+struct fillvdir_arg {
-+ struct dir_context ctx;
-+ struct file *file;
-+ struct au_vdir *vdir;
-+ struct au_nhash delist;
-+ struct au_nhash whlist;
-+ aufs_bindex_t bindex;
-+ unsigned int flags;
-+ int err;
-+};
-+
-+static int fillvdir(struct dir_context *ctx, const char *__name, int nlen,
-+ loff_t offset __maybe_unused, u64 h_ino,
-+ unsigned int d_type)
-+{
-+ struct fillvdir_arg *arg = container_of(ctx, struct fillvdir_arg, ctx);
-+ char *name = (void *)__name;
-+ struct super_block *sb;
-+ ino_t ino;
-+ const unsigned char shwh = !!au_ftest_fillvdir(arg->flags, SHWH);
-+
-+ arg->err = 0;
-+ sb = arg->file->f_path.dentry->d_sb;
-+ au_fset_fillvdir(arg->flags, CALLED);
-+ /* smp_mb(); */
-+ if (nlen <= AUFS_WH_PFX_LEN
-+ || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
-+ if (test_known(&arg->delist, name, nlen)
-+ || au_nhash_test_known_wh(&arg->whlist, name, nlen))
-+ goto out; /* already exists or whiteouted */
-+
-+ arg->err = au_ino(sb, arg->bindex, h_ino, d_type, &ino);
-+ if (!arg->err) {
-+ if (unlikely(nlen > AUFS_MAX_NAMELEN))
-+ d_type = DT_UNKNOWN;
-+ arg->err = append_de(arg->vdir, name, nlen, ino,
-+ d_type, &arg->delist);
-+ }
-+ } else if (au_ftest_fillvdir(arg->flags, WHABLE)) {
-+ name += AUFS_WH_PFX_LEN;
-+ nlen -= AUFS_WH_PFX_LEN;
-+ if (au_nhash_test_known_wh(&arg->whlist, name, nlen))
-+ goto out; /* already whiteouted */
-+
-+ if (shwh)
-+ arg->err = au_wh_ino(sb, arg->bindex, h_ino, d_type,
-+ &ino);
-+ if (!arg->err) {
-+ if (nlen <= AUFS_MAX_NAMELEN + AUFS_WH_PFX_LEN)
-+ d_type = DT_UNKNOWN;
-+ arg->err = au_nhash_append_wh
-+ (&arg->whlist, name, nlen, ino, d_type,
-+ arg->bindex, shwh);
-+ }
-+ }
-+
-+out:
-+ if (!arg->err)
-+ arg->vdir->vd_jiffy = jiffies;
-+ /* smp_mb(); */
-+ AuTraceErr(arg->err);
-+ return arg->err;
-+}
-+
-+static int au_handle_shwh(struct super_block *sb, struct au_vdir *vdir,
-+ struct au_nhash *whlist, struct au_nhash *delist)
-+{
-+#ifdef CONFIG_AUFS_SHWH
-+ int err;
-+ unsigned int nh, u;
-+ struct hlist_head *head;
-+ struct au_vdir_wh *pos;
-+ struct hlist_node *n;
-+ char *p, *o;
-+ struct au_vdir_destr *destr;
-+
-+ AuDebugOn(!au_opt_test(au_mntflags(sb), SHWH));
-+
-+ err = -ENOMEM;
-+ o = p = (void *)__get_free_page(GFP_NOFS);
-+ if (unlikely(!p))
-+ goto out;
-+
-+ err = 0;
-+ nh = whlist->nh_num;
-+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
-+ p += AUFS_WH_PFX_LEN;
-+ for (u = 0; u < nh; u++) {
-+ head = whlist->nh_head + u;
-+ hlist_for_each_entry_safe(pos, n, head, wh_hash) {
-+ destr = &pos->wh_str;
-+ memcpy(p, destr->name, destr->len);
-+ err = append_de(vdir, o, destr->len + AUFS_WH_PFX_LEN,
-+ pos->wh_ino, pos->wh_type, delist);
-+ if (unlikely(err))
-+ break;
-+ }
-+ }
-+
-+ free_page((unsigned long)o);
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+static int au_do_read_vdir(struct fillvdir_arg *arg)
-+{
-+ int err;
-+ unsigned int rdhash;
-+ loff_t offset;
-+ aufs_bindex_t bend, bindex, bstart;
-+ unsigned char shwh;
-+ struct file *hf, *file;
-+ struct super_block *sb;
-+
-+ file = arg->file;
-+ sb = file->f_path.dentry->d_sb;
-+ SiMustAnyLock(sb);
-+
-+ rdhash = au_sbi(sb)->si_rdhash;
-+ if (!rdhash)
-+ rdhash = au_rdhash_est(au_dir_size(file, /*dentry*/NULL));
-+ err = au_nhash_alloc(&arg->delist, rdhash, GFP_NOFS);
-+ if (unlikely(err))
-+ goto out;
-+ err = au_nhash_alloc(&arg->whlist, rdhash, GFP_NOFS);
-+ if (unlikely(err))
-+ goto out_delist;
-+
-+ err = 0;
-+ arg->flags = 0;
-+ shwh = 0;
-+ if (au_opt_test(au_mntflags(sb), SHWH)) {
-+ shwh = 1;
-+ au_fset_fillvdir(arg->flags, SHWH);
-+ }
-+ bstart = au_fbstart(file);
-+ bend = au_fbend_dir(file);
-+ for (bindex = bstart; !err && bindex <= bend; bindex++) {
-+ hf = au_hf_dir(file, bindex);
-+ if (!hf)
-+ continue;
-+
-+ offset = vfsub_llseek(hf, 0, SEEK_SET);
-+ err = offset;
-+ if (unlikely(offset))
-+ break;
-+
-+ arg->bindex = bindex;
-+ au_fclr_fillvdir(arg->flags, WHABLE);
-+ if (shwh
-+ || (bindex != bend
-+ && au_br_whable(au_sbr_perm(sb, bindex))))
-+ au_fset_fillvdir(arg->flags, WHABLE);
-+ do {
-+ arg->err = 0;
-+ au_fclr_fillvdir(arg->flags, CALLED);
-+ /* smp_mb(); */
-+ err = vfsub_iterate_dir(hf, &arg->ctx);
-+ if (err >= 0)
-+ err = arg->err;
-+ } while (!err && au_ftest_fillvdir(arg->flags, CALLED));
-+
-+ /*
-+ * dir_relax() may be good for concurrency, but aufs should not
-+ * use it since it will cause a lockdep problem.
-+ */
-+ }
-+
-+ if (!err && shwh)
-+ err = au_handle_shwh(sb, arg->vdir, &arg->whlist, &arg->delist);
-+
-+ au_nhash_wh_free(&arg->whlist);
-+
-+out_delist:
-+ au_nhash_de_free(&arg->delist);
-+out:
-+ return err;
-+}
-+
-+static int read_vdir(struct file *file, int may_read)
-+{
-+ int err;
-+ unsigned long expire;
-+ unsigned char do_read;
-+ struct fillvdir_arg arg = {
-+ .ctx = {
-+ .actor = fillvdir
-+ }
-+ };
-+ struct inode *inode;
-+ struct au_vdir *vdir, *allocated;
-+
-+ err = 0;
-+ inode = file_inode(file);
-+ IMustLock(inode);
-+ SiMustAnyLock(inode->i_sb);
-+
-+ allocated = NULL;
-+ do_read = 0;
-+ expire = au_sbi(inode->i_sb)->si_rdcache;
-+ vdir = au_ivdir(inode);
-+ if (!vdir) {
-+ do_read = 1;
-+ vdir = alloc_vdir(file);
-+ err = PTR_ERR(vdir);
-+ if (IS_ERR(vdir))
-+ goto out;
-+ err = 0;
-+ allocated = vdir;
-+ } else if (may_read
-+ && (inode->i_version != vdir->vd_version
-+ || time_after(jiffies, vdir->vd_jiffy + expire))) {
-+ do_read = 1;
-+ err = reinit_vdir(vdir);
-+ if (unlikely(err))
-+ goto out;
-+ }
-+
-+ if (!do_read)
-+ return 0; /* success */
-+
-+ arg.file = file;
-+ arg.vdir = vdir;
-+ err = au_do_read_vdir(&arg);
-+ if (!err) {
-+ /* file->f_pos = 0; */ /* todo: ctx->pos? */
-+ vdir->vd_version = inode->i_version;
-+ vdir->vd_last.ul = 0;
-+ vdir->vd_last.p.deblk = vdir->vd_deblk[0];
-+ if (allocated)
-+ au_set_ivdir(inode, allocated);
-+ } else if (allocated)
-+ au_vdir_free(allocated);
-+
-+out:
-+ return err;
-+}
-+
-+static int copy_vdir(struct au_vdir *tgt, struct au_vdir *src)
-+{
-+ int err, rerr;
-+ unsigned long ul, n;
-+ const unsigned int deblk_sz = src->vd_deblk_sz;
-+
-+ AuDebugOn(tgt->vd_nblk != 1);
-+
-+ err = -ENOMEM;
-+ if (tgt->vd_nblk < src->vd_nblk) {
-+ unsigned char **p;
-+
-+ p = krealloc(tgt->vd_deblk, sizeof(*p) * src->vd_nblk,
-+ GFP_NOFS);
-+ if (unlikely(!p))
-+ goto out;
-+ tgt->vd_deblk = p;
-+ }
-+
-+ if (tgt->vd_deblk_sz != deblk_sz) {
-+ unsigned char *p;
-+
-+ tgt->vd_deblk_sz = deblk_sz;
-+ p = krealloc(tgt->vd_deblk[0], deblk_sz, GFP_NOFS);
-+ if (unlikely(!p))
-+ goto out;
-+ tgt->vd_deblk[0] = p;
-+ }
-+ memcpy(tgt->vd_deblk[0], src->vd_deblk[0], deblk_sz);
-+ tgt->vd_version = src->vd_version;
-+ tgt->vd_jiffy = src->vd_jiffy;
-+
-+ n = src->vd_nblk;
-+ for (ul = 1; ul < n; ul++) {
-+ tgt->vd_deblk[ul] = kmemdup(src->vd_deblk[ul], deblk_sz,
-+ GFP_NOFS);
-+ if (unlikely(!tgt->vd_deblk[ul]))
-+ goto out;
-+ tgt->vd_nblk++;
-+ }
-+ tgt->vd_nblk = n;
-+ tgt->vd_last.ul = tgt->vd_last.ul;
-+ tgt->vd_last.p.deblk = tgt->vd_deblk[tgt->vd_last.ul];
-+ tgt->vd_last.p.deblk += src->vd_last.p.deblk
-+ - src->vd_deblk[src->vd_last.ul];
-+ /* smp_mb(); */
-+ return 0; /* success */
-+
-+out:
-+ rerr = reinit_vdir(tgt);
-+ BUG_ON(rerr);
-+ return err;
-+}
-+
-+int au_vdir_init(struct file *file)
-+{
-+ int err;
-+ struct inode *inode;
-+ struct au_vdir *vdir_cache, *allocated;
-+
-+ /* test file->f_pos here instead of ctx->pos */
-+ err = read_vdir(file, !file->f_pos);
-+ if (unlikely(err))
-+ goto out;
-+
-+ allocated = NULL;
-+ vdir_cache = au_fvdir_cache(file);
-+ if (!vdir_cache) {
-+ vdir_cache = alloc_vdir(file);
-+ err = PTR_ERR(vdir_cache);
-+ if (IS_ERR(vdir_cache))
-+ goto out;
-+ allocated = vdir_cache;
-+ } else if (!file->f_pos && vdir_cache->vd_version != file->f_version) {
-+ /* test file->f_pos here instead of ctx->pos */
-+ err = reinit_vdir(vdir_cache);
-+ if (unlikely(err))
-+ goto out;
-+ } else
-+ return 0; /* success */
-+
-+ inode = file_inode(file);
-+ err = copy_vdir(vdir_cache, au_ivdir(inode));
-+ if (!err) {
-+ file->f_version = inode->i_version;
-+ if (allocated)
-+ au_set_fvdir_cache(file, allocated);
-+ } else if (allocated)
-+ au_vdir_free(allocated);
-+
-+out:
-+ return err;
-+}
-+
-+static loff_t calc_offset(struct au_vdir *vdir)
-+{
-+ loff_t offset;
-+ union au_vdir_deblk_p p;
-+
-+ p.deblk = vdir->vd_deblk[vdir->vd_last.ul];
-+ offset = vdir->vd_last.p.deblk - p.deblk;
-+ offset += vdir->vd_deblk_sz * vdir->vd_last.ul;
-+ return offset;
-+}
-+
-+/* returns true or false */
-+static int seek_vdir(struct file *file, struct dir_context *ctx)
-+{
-+ int valid;
-+ unsigned int deblk_sz;
-+ unsigned long ul, n;
-+ loff_t offset;
-+ union au_vdir_deblk_p p, deblk_end;
-+ struct au_vdir *vdir_cache;
-+
-+ valid = 1;
-+ vdir_cache = au_fvdir_cache(file);
-+ offset = calc_offset(vdir_cache);
-+ AuDbg("offset %lld\n", offset);
-+ if (ctx->pos == offset)
-+ goto out;
-+
-+ vdir_cache->vd_last.ul = 0;
-+ vdir_cache->vd_last.p.deblk = vdir_cache->vd_deblk[0];
-+ if (!ctx->pos)
-+ goto out;
-+
-+ valid = 0;
-+ deblk_sz = vdir_cache->vd_deblk_sz;
-+ ul = div64_u64(ctx->pos, deblk_sz);
-+ AuDbg("ul %lu\n", ul);
-+ if (ul >= vdir_cache->vd_nblk)
-+ goto out;
-+
-+ n = vdir_cache->vd_nblk;
-+ for (; ul < n; ul++) {
-+ p.deblk = vdir_cache->vd_deblk[ul];
-+ deblk_end.deblk = p.deblk + deblk_sz;
-+ offset = ul;
-+ offset *= deblk_sz;
-+ while (!is_deblk_end(&p, &deblk_end) && offset < ctx->pos) {
-+ unsigned int l;
-+
-+ l = calc_size(p.de->de_str.len);
-+ offset += l;
-+ p.deblk += l;
-+ }
-+ if (!is_deblk_end(&p, &deblk_end)) {
-+ valid = 1;
-+ vdir_cache->vd_last.ul = ul;
-+ vdir_cache->vd_last.p = p;
-+ break;
-+ }
-+ }
-+
-+out:
-+ /* smp_mb(); */
-+ AuTraceErr(!valid);
-+ return valid;
-+}
-+
-+int au_vdir_fill_de(struct file *file, struct dir_context *ctx)
-+{
-+ unsigned int l, deblk_sz;
-+ union au_vdir_deblk_p deblk_end;
-+ struct au_vdir *vdir_cache;
-+ struct au_vdir_de *de;
-+
-+ vdir_cache = au_fvdir_cache(file);
-+ if (!seek_vdir(file, ctx))
-+ return 0;
-+
-+ deblk_sz = vdir_cache->vd_deblk_sz;
-+ while (1) {
-+ deblk_end.deblk = vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
-+ deblk_end.deblk += deblk_sz;
-+ while (!is_deblk_end(&vdir_cache->vd_last.p, &deblk_end)) {
-+ de = vdir_cache->vd_last.p.de;
-+ AuDbg("%.*s, off%lld, i%lu, dt%d\n",
-+ de->de_str.len, de->de_str.name, ctx->pos,
-+ (unsigned long)de->de_ino, de->de_type);
-+ if (unlikely(!dir_emit(ctx, de->de_str.name,
-+ de->de_str.len, de->de_ino,
-+ de->de_type))) {
-+ /* todo: ignore the error caused by udba? */
-+ /* return err; */
-+ return 0;
-+ }
-+
-+ l = calc_size(de->de_str.len);
-+ vdir_cache->vd_last.p.deblk += l;
-+ ctx->pos += l;
-+ }
-+ if (vdir_cache->vd_last.ul < vdir_cache->vd_nblk - 1) {
-+ vdir_cache->vd_last.ul++;
-+ vdir_cache->vd_last.p.deblk
-+ = vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
-+ ctx->pos = deblk_sz * vdir_cache->vd_last.ul;
-+ continue;
-+ }
-+ break;
-+ }
-+
-+ /* smp_mb(); */
-+ return 0;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/vfsub.c linux-4.1.10/fs/aufs/vfsub.c
---- linux-4.1.10.orig/fs/aufs/vfsub.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/vfsub.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,848 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * sub-routines for VFS
-+ */
-+
-+#include <linux/namei.h>
-+#include <linux/security.h>
-+#include <linux/splice.h>
-+#include "aufs.h"
-+
-+int vfsub_update_h_iattr(struct path *h_path, int *did)
-+{
-+ int err;
-+ struct kstat st;
-+ struct super_block *h_sb;
-+
-+ /* for remote fs, leave work for its getattr or d_revalidate */
-+ /* for bad i_attr fs, handle them in aufs_getattr() */
-+ /* still some fs may acquire i_mutex. we need to skip them */
-+ err = 0;
-+ if (!did)
-+ did = &err;
-+ h_sb = h_path->dentry->d_sb;
-+ *did = (!au_test_fs_remote(h_sb) && au_test_fs_refresh_iattr(h_sb));
-+ if (*did)
-+ err = vfs_getattr(h_path, &st);
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct file *vfsub_dentry_open(struct path *path, int flags)
-+{
-+ struct file *file;
-+
-+ file = dentry_open(path, flags /* | __FMODE_NONOTIFY */,
-+ current_cred());
-+ if (!IS_ERR_OR_NULL(file)
-+ && (file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
-+ i_readcount_inc(d_inode(path->dentry));
-+
-+ return file;
-+}
-+
-+struct file *vfsub_filp_open(const char *path, int oflags, int mode)
-+{
-+ struct file *file;
-+
-+ lockdep_off();
-+ file = filp_open(path,
-+ oflags /* | __FMODE_NONOTIFY */,
-+ mode);
-+ lockdep_on();
-+ if (IS_ERR(file))
-+ goto out;
-+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
-+
-+out:
-+ return file;
-+}
-+
-+/*
-+ * Ideally this function should call VFS:do_last() in order to keep all its
-+ * checkings. But it is very hard for aufs to regenerate several VFS internal
-+ * structure such as nameidata. This is a second (or third) best approach.
-+ * cf. linux/fs/namei.c:do_last(), lookup_open() and atomic_open().
-+ */
-+int vfsub_atomic_open(struct inode *dir, struct dentry *dentry,
-+ struct vfsub_aopen_args *args, struct au_branch *br)
-+{
-+ int err;
-+ struct file *file = args->file;
-+ /* copied from linux/fs/namei.c:atomic_open() */
-+ struct dentry *const DENTRY_NOT_SET = (void *)-1UL;
-+
-+ IMustLock(dir);
-+ AuDebugOn(!dir->i_op->atomic_open);
-+
-+ err = au_br_test_oflag(args->open_flag, br);
-+ if (unlikely(err))
-+ goto out;
-+
-+ args->file->f_path.dentry = DENTRY_NOT_SET;
-+ args->file->f_path.mnt = au_br_mnt(br);
-+ err = dir->i_op->atomic_open(dir, dentry, file, args->open_flag,
-+ args->create_mode, args->opened);
-+ if (err >= 0) {
-+ /* some filesystems don't set FILE_CREATED while succeeded? */
-+ if (*args->opened & FILE_CREATED)
-+ fsnotify_create(dir, dentry);
-+ } else
-+ goto out;
-+
-+
-+ if (!err) {
-+ /* todo: call VFS:may_open() here */
-+ err = open_check_o_direct(file);
-+ /* todo: ima_file_check() too? */
-+ if (!err && (args->open_flag & __FMODE_EXEC))
-+ err = deny_write_access(file);
-+ if (unlikely(err))
-+ /* note that the file is created and still opened */
-+ goto out;
-+ }
-+
-+ atomic_inc(&br->br_count);
-+ fsnotify_open(file);
-+
-+out:
-+ return err;
-+}
-+
-+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path)
-+{
-+ int err;
-+
-+ err = kern_path(name, flags, path);
-+ if (!err && d_is_positive(path->dentry))
-+ vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
-+ return err;
-+}
-+
-+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
-+ int len)
-+{
-+ struct path path = {
-+ .mnt = NULL
-+ };
-+
-+ /* VFS checks it too, but by WARN_ON_ONCE() */
-+ IMustLock(d_inode(parent));
-+
-+ path.dentry = lookup_one_len(name, parent, len);
-+ if (IS_ERR(path.dentry))
-+ goto out;
-+ if (d_is_positive(path.dentry))
-+ vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/
-+
-+out:
-+ AuTraceErrPtr(path.dentry);
-+ return path.dentry;
-+}
-+
-+void vfsub_call_lkup_one(void *args)
-+{
-+ struct vfsub_lkup_one_args *a = args;
-+ *a->errp = vfsub_lkup_one(a->name, a->parent);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
-+ struct dentry *d2, struct au_hinode *hdir2)
-+{
-+ struct dentry *d;
-+
-+ lockdep_off();
-+ d = lock_rename(d1, d2);
-+ lockdep_on();
-+ au_hn_suspend(hdir1);
-+ if (hdir1 != hdir2)
-+ au_hn_suspend(hdir2);
-+
-+ return d;
-+}
-+
-+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
-+ struct dentry *d2, struct au_hinode *hdir2)
-+{
-+ au_hn_resume(hdir1);
-+ if (hdir1 != hdir2)
-+ au_hn_resume(hdir2);
-+ lockdep_off();
-+ unlock_rename(d1, d2);
-+ lockdep_on();
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int vfsub_create(struct inode *dir, struct path *path, int mode, bool want_excl)
-+{
-+ int err;
-+ struct dentry *d;
-+
-+ IMustLock(dir);
-+
-+ d = path->dentry;
-+ path->dentry = d->d_parent;
-+ err = security_path_mknod(path, d, mode, 0);
-+ path->dentry = d;
-+ if (unlikely(err))
-+ goto out;
-+
-+ lockdep_off();
-+ err = vfs_create(dir, path->dentry, mode, want_excl);
-+ lockdep_on();
-+ if (!err) {
-+ struct path tmp = *path;
-+ int did;
-+
-+ vfsub_update_h_iattr(&tmp, &did);
-+ if (did) {
-+ tmp.dentry = path->dentry->d_parent;
-+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
-+ }
-+ /*ignore*/
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+int vfsub_symlink(struct inode *dir, struct path *path, const char *symname)
-+{
-+ int err;
-+ struct dentry *d;
-+
-+ IMustLock(dir);
-+
-+ d = path->dentry;
-+ path->dentry = d->d_parent;
-+ err = security_path_symlink(path, d, symname);
-+ path->dentry = d;
-+ if (unlikely(err))
-+ goto out;
-+
-+ lockdep_off();
-+ err = vfs_symlink(dir, path->dentry, symname);
-+ lockdep_on();
-+ if (!err) {
-+ struct path tmp = *path;
-+ int did;
-+
-+ vfsub_update_h_iattr(&tmp, &did);
-+ if (did) {
-+ tmp.dentry = path->dentry->d_parent;
-+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
-+ }
-+ /*ignore*/
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev)
-+{
-+ int err;
-+ struct dentry *d;
-+
-+ IMustLock(dir);
-+
-+ d = path->dentry;
-+ path->dentry = d->d_parent;
-+ err = security_path_mknod(path, d, mode, new_encode_dev(dev));
-+ path->dentry = d;
-+ if (unlikely(err))
-+ goto out;
-+
-+ lockdep_off();
-+ err = vfs_mknod(dir, path->dentry, mode, dev);
-+ lockdep_on();
-+ if (!err) {
-+ struct path tmp = *path;
-+ int did;
-+
-+ vfsub_update_h_iattr(&tmp, &did);
-+ if (did) {
-+ tmp.dentry = path->dentry->d_parent;
-+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
-+ }
-+ /*ignore*/
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+static int au_test_nlink(struct inode *inode)
-+{
-+ const unsigned int link_max = UINT_MAX >> 1; /* rough margin */
-+
-+ if (!au_test_fs_no_limit_nlink(inode->i_sb)
-+ || inode->i_nlink < link_max)
-+ return 0;
-+ return -EMLINK;
-+}
-+
-+int vfsub_link(struct dentry *src_dentry, struct inode *dir, struct path *path,
-+ struct inode **delegated_inode)
-+{
-+ int err;
-+ struct dentry *d;
-+
-+ IMustLock(dir);
-+
-+ err = au_test_nlink(d_inode(src_dentry));
-+ if (unlikely(err))
-+ return err;
-+
-+ /* we don't call may_linkat() */
-+ d = path->dentry;
-+ path->dentry = d->d_parent;
-+ err = security_path_link(src_dentry, path, d);
-+ path->dentry = d;
-+ if (unlikely(err))
-+ goto out;
-+
-+ lockdep_off();
-+ err = vfs_link(src_dentry, dir, path->dentry, delegated_inode);
-+ lockdep_on();
-+ if (!err) {
-+ struct path tmp = *path;
-+ int did;
-+
-+ /* fuse has different memory inode for the same inumber */
-+ vfsub_update_h_iattr(&tmp, &did);
-+ if (did) {
-+ tmp.dentry = path->dentry->d_parent;
-+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
-+ tmp.dentry = src_dentry;
-+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
-+ }
-+ /*ignore*/
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+int vfsub_rename(struct inode *src_dir, struct dentry *src_dentry,
-+ struct inode *dir, struct path *path,
-+ struct inode **delegated_inode)
-+{
-+ int err;
-+ struct path tmp = {
-+ .mnt = path->mnt
-+ };
-+ struct dentry *d;
-+
-+ IMustLock(dir);
-+ IMustLock(src_dir);
-+
-+ d = path->dentry;
-+ path->dentry = d->d_parent;
-+ tmp.dentry = src_dentry->d_parent;
-+ err = security_path_rename(&tmp, src_dentry, path, d, /*flags*/0);
-+ path->dentry = d;
-+ if (unlikely(err))
-+ goto out;
-+
-+ lockdep_off();
-+ err = vfs_rename(src_dir, src_dentry, dir, path->dentry,
-+ delegated_inode, /*flags*/0);
-+ lockdep_on();
-+ if (!err) {
-+ int did;
-+
-+ tmp.dentry = d->d_parent;
-+ vfsub_update_h_iattr(&tmp, &did);
-+ if (did) {
-+ tmp.dentry = src_dentry;
-+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
-+ tmp.dentry = src_dentry->d_parent;
-+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
-+ }
-+ /*ignore*/
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+int vfsub_mkdir(struct inode *dir, struct path *path, int mode)
-+{
-+ int err;
-+ struct dentry *d;
-+
-+ IMustLock(dir);
-+
-+ d = path->dentry;
-+ path->dentry = d->d_parent;
-+ err = security_path_mkdir(path, d, mode);
-+ path->dentry = d;
-+ if (unlikely(err))
-+ goto out;
-+
-+ lockdep_off();
-+ err = vfs_mkdir(dir, path->dentry, mode);
-+ lockdep_on();
-+ if (!err) {
-+ struct path tmp = *path;
-+ int did;
-+
-+ vfsub_update_h_iattr(&tmp, &did);
-+ if (did) {
-+ tmp.dentry = path->dentry->d_parent;
-+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
-+ }
-+ /*ignore*/
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+int vfsub_rmdir(struct inode *dir, struct path *path)
-+{
-+ int err;
-+ struct dentry *d;
-+
-+ IMustLock(dir);
-+
-+ d = path->dentry;
-+ path->dentry = d->d_parent;
-+ err = security_path_rmdir(path, d);
-+ path->dentry = d;
-+ if (unlikely(err))
-+ goto out;
-+
-+ lockdep_off();
-+ err = vfs_rmdir(dir, path->dentry);
-+ lockdep_on();
-+ if (!err) {
-+ struct path tmp = {
-+ .dentry = path->dentry->d_parent,
-+ .mnt = path->mnt
-+ };
-+
-+ vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* todo: support mmap_sem? */
-+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
-+ loff_t *ppos)
-+{
-+ ssize_t err;
-+
-+ lockdep_off();
-+ err = vfs_read(file, ubuf, count, ppos);
-+ lockdep_on();
-+ if (err >= 0)
-+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
-+ return err;
-+}
-+
-+/* todo: kernel_read()? */
-+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
-+ loff_t *ppos)
-+{
-+ ssize_t err;
-+ mm_segment_t oldfs;
-+ union {
-+ void *k;
-+ char __user *u;
-+ } buf;
-+
-+ buf.k = kbuf;
-+ oldfs = get_fs();
-+ set_fs(KERNEL_DS);
-+ err = vfsub_read_u(file, buf.u, count, ppos);
-+ set_fs(oldfs);
-+ return err;
-+}
-+
-+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
-+ loff_t *ppos)
-+{
-+ ssize_t err;
-+
-+ lockdep_off();
-+ err = vfs_write(file, ubuf, count, ppos);
-+ lockdep_on();
-+ if (err >= 0)
-+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
-+ return err;
-+}
-+
-+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count, loff_t *ppos)
-+{
-+ ssize_t err;
-+ mm_segment_t oldfs;
-+ union {
-+ void *k;
-+ const char __user *u;
-+ } buf;
-+
-+ buf.k = kbuf;
-+ oldfs = get_fs();
-+ set_fs(KERNEL_DS);
-+ err = vfsub_write_u(file, buf.u, count, ppos);
-+ set_fs(oldfs);
-+ return err;
-+}
-+
-+int vfsub_flush(struct file *file, fl_owner_t id)
-+{
-+ int err;
-+
-+ err = 0;
-+ if (file->f_op->flush) {
-+ if (!au_test_nfs(file->f_path.dentry->d_sb))
-+ err = file->f_op->flush(file, id);
-+ else {
-+ lockdep_off();
-+ err = file->f_op->flush(file, id);
-+ lockdep_on();
-+ }
-+ if (!err)
-+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL);
-+ /*ignore*/
-+ }
-+ return err;
-+}
-+
-+int vfsub_iterate_dir(struct file *file, struct dir_context *ctx)
-+{
-+ int err;
-+
-+ AuDbg("%pD, ctx{%pf, %llu}\n", file, ctx->actor, ctx->pos);
-+
-+ lockdep_off();
-+ err = iterate_dir(file, ctx);
-+ lockdep_on();
-+ if (err >= 0)
-+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
-+ return err;
-+}
-+
-+long vfsub_splice_to(struct file *in, loff_t *ppos,
-+ struct pipe_inode_info *pipe, size_t len,
-+ unsigned int flags)
-+{
-+ long err;
-+
-+ lockdep_off();
-+ err = do_splice_to(in, ppos, pipe, len, flags);
-+ lockdep_on();
-+ file_accessed(in);
-+ if (err >= 0)
-+ vfsub_update_h_iattr(&in->f_path, /*did*/NULL); /*ignore*/
-+ return err;
-+}
-+
-+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
-+ loff_t *ppos, size_t len, unsigned int flags)
-+{
-+ long err;
-+
-+ lockdep_off();
-+ err = do_splice_from(pipe, out, ppos, len, flags);
-+ lockdep_on();
-+ if (err >= 0)
-+ vfsub_update_h_iattr(&out->f_path, /*did*/NULL); /*ignore*/
-+ return err;
-+}
-+
-+int vfsub_fsync(struct file *file, struct path *path, int datasync)
-+{
-+ int err;
-+
-+ /* file can be NULL */
-+ lockdep_off();
-+ err = vfs_fsync(file, datasync);
-+ lockdep_on();
-+ if (!err) {
-+ if (!path) {
-+ AuDebugOn(!file);
-+ path = &file->f_path;
-+ }
-+ vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
-+ }
-+ return err;
-+}
-+
-+/* cf. open.c:do_sys_truncate() and do_sys_ftruncate() */
-+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
-+ struct file *h_file)
-+{
-+ int err;
-+ struct inode *h_inode;
-+ struct super_block *h_sb;
-+
-+ if (!h_file) {
-+ err = vfsub_truncate(h_path, length);
-+ goto out;
-+ }
-+
-+ h_inode = d_inode(h_path->dentry);
-+ h_sb = h_inode->i_sb;
-+ lockdep_off();
-+ sb_start_write(h_sb);
-+ lockdep_on();
-+ err = locks_verify_truncate(h_inode, h_file, length);
-+ if (!err)
-+ err = security_path_truncate(h_path);
-+ if (!err) {
-+ lockdep_off();
-+ err = do_truncate(h_path->dentry, length, attr, h_file);
-+ lockdep_on();
-+ }
-+ lockdep_off();
-+ sb_end_write(h_sb);
-+ lockdep_on();
-+
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct au_vfsub_mkdir_args {
-+ int *errp;
-+ struct inode *dir;
-+ struct path *path;
-+ int mode;
-+};
-+
-+static void au_call_vfsub_mkdir(void *args)
-+{
-+ struct au_vfsub_mkdir_args *a = args;
-+ *a->errp = vfsub_mkdir(a->dir, a->path, a->mode);
-+}
-+
-+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode)
-+{
-+ int err, do_sio, wkq_err;
-+
-+ do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
-+ if (!do_sio) {
-+ lockdep_off();
-+ err = vfsub_mkdir(dir, path, mode);
-+ lockdep_on();
-+ } else {
-+ struct au_vfsub_mkdir_args args = {
-+ .errp = &err,
-+ .dir = dir,
-+ .path = path,
-+ .mode = mode
-+ };
-+ wkq_err = au_wkq_wait(au_call_vfsub_mkdir, &args);
-+ if (unlikely(wkq_err))
-+ err = wkq_err;
-+ }
-+
-+ return err;
-+}
-+
-+struct au_vfsub_rmdir_args {
-+ int *errp;
-+ struct inode *dir;
-+ struct path *path;
-+};
-+
-+static void au_call_vfsub_rmdir(void *args)
-+{
-+ struct au_vfsub_rmdir_args *a = args;
-+ *a->errp = vfsub_rmdir(a->dir, a->path);
-+}
-+
-+int vfsub_sio_rmdir(struct inode *dir, struct path *path)
-+{
-+ int err, do_sio, wkq_err;
-+
-+ do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
-+ if (!do_sio) {
-+ lockdep_off();
-+ err = vfsub_rmdir(dir, path);
-+ lockdep_on();
-+ } else {
-+ struct au_vfsub_rmdir_args args = {
-+ .errp = &err,
-+ .dir = dir,
-+ .path = path
-+ };
-+ wkq_err = au_wkq_wait(au_call_vfsub_rmdir, &args);
-+ if (unlikely(wkq_err))
-+ err = wkq_err;
-+ }
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct notify_change_args {
-+ int *errp;
-+ struct path *path;
-+ struct iattr *ia;
-+ struct inode **delegated_inode;
-+};
-+
-+static void call_notify_change(void *args)
-+{
-+ struct notify_change_args *a = args;
-+ struct inode *h_inode;
-+
-+ h_inode = d_inode(a->path->dentry);
-+ IMustLock(h_inode);
-+
-+ *a->errp = -EPERM;
-+ if (!IS_IMMUTABLE(h_inode) && !IS_APPEND(h_inode)) {
-+ lockdep_off();
-+ *a->errp = notify_change(a->path->dentry, a->ia,
-+ a->delegated_inode);
-+ lockdep_on();
-+ if (!*a->errp)
-+ vfsub_update_h_iattr(a->path, /*did*/NULL); /*ignore*/
-+ }
-+ AuTraceErr(*a->errp);
-+}
-+
-+int vfsub_notify_change(struct path *path, struct iattr *ia,
-+ struct inode **delegated_inode)
-+{
-+ int err;
-+ struct notify_change_args args = {
-+ .errp = &err,
-+ .path = path,
-+ .ia = ia,
-+ .delegated_inode = delegated_inode
-+ };
-+
-+ call_notify_change(&args);
-+
-+ return err;
-+}
-+
-+int vfsub_sio_notify_change(struct path *path, struct iattr *ia,
-+ struct inode **delegated_inode)
-+{
-+ int err, wkq_err;
-+ struct notify_change_args args = {
-+ .errp = &err,
-+ .path = path,
-+ .ia = ia,
-+ .delegated_inode = delegated_inode
-+ };
-+
-+ wkq_err = au_wkq_wait(call_notify_change, &args);
-+ if (unlikely(wkq_err))
-+ err = wkq_err;
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct unlink_args {
-+ int *errp;
-+ struct inode *dir;
-+ struct path *path;
-+ struct inode **delegated_inode;
-+};
-+
-+static void call_unlink(void *args)
-+{
-+ struct unlink_args *a = args;
-+ struct dentry *d = a->path->dentry;
-+ struct inode *h_inode;
-+ const int stop_sillyrename = (au_test_nfs(d->d_sb)
-+ && au_dcount(d) == 1);
-+
-+ IMustLock(a->dir);
-+
-+ a->path->dentry = d->d_parent;
-+ *a->errp = security_path_unlink(a->path, d);
-+ a->path->dentry = d;
-+ if (unlikely(*a->errp))
-+ return;
-+
-+ if (!stop_sillyrename)
-+ dget(d);
-+ h_inode = NULL;
-+ if (d_is_positive(d)) {
-+ h_inode = d_inode(d);
-+ ihold(h_inode);
-+ }
-+
-+ lockdep_off();
-+ *a->errp = vfs_unlink(a->dir, d, a->delegated_inode);
-+ lockdep_on();
-+ if (!*a->errp) {
-+ struct path tmp = {
-+ .dentry = d->d_parent,
-+ .mnt = a->path->mnt
-+ };
-+ vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
-+ }
-+
-+ if (!stop_sillyrename)
-+ dput(d);
-+ if (h_inode)
-+ iput(h_inode);
-+
-+ AuTraceErr(*a->errp);
-+}
-+
-+/*
-+ * @dir: must be locked.
-+ * @dentry: target dentry.
-+ */
-+int vfsub_unlink(struct inode *dir, struct path *path,
-+ struct inode **delegated_inode, int force)
-+{
-+ int err;
-+ struct unlink_args args = {
-+ .errp = &err,
-+ .dir = dir,
-+ .path = path,
-+ .delegated_inode = delegated_inode
-+ };
-+
-+ if (!force)
-+ call_unlink(&args);
-+ else {
-+ int wkq_err;
-+
-+ wkq_err = au_wkq_wait(call_unlink, &args);
-+ if (unlikely(wkq_err))
-+ err = wkq_err;
-+ }
-+
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/vfsub.h linux-4.1.10/fs/aufs/vfsub.h
---- linux-4.1.10.orig/fs/aufs/vfsub.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/vfsub.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,286 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * sub-routines for VFS
-+ */
-+
-+#ifndef __AUFS_VFSUB_H__
-+#define __AUFS_VFSUB_H__
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/fs.h>
-+#include <linux/mount.h>
-+#include <linux/xattr.h>
-+#include "debug.h"
-+
-+/* copied from linux/fs/internal.h */
-+/* todo: BAD approach!! */
-+extern void __mnt_drop_write(struct vfsmount *);
-+extern spinlock_t inode_sb_list_lock;
-+extern int open_check_o_direct(struct file *f);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* lock subclass for lower inode */
-+/* default MAX_LOCKDEP_SUBCLASSES(8) is not enough */
-+/* reduce? gave up. */
-+enum {
-+ AuLsc_I_Begin = I_MUTEX_PARENT2, /* 5 */
-+ AuLsc_I_PARENT, /* lower inode, parent first */
-+ AuLsc_I_PARENT2, /* copyup dirs */
-+ AuLsc_I_PARENT3, /* copyup wh */
-+ AuLsc_I_CHILD,
-+ AuLsc_I_CHILD2,
-+ AuLsc_I_End
-+};
-+
-+/* to debug easier, do not make them inlined functions */
-+#define MtxMustLock(mtx) AuDebugOn(!mutex_is_locked(mtx))
-+#define IMustLock(i) MtxMustLock(&(i)->i_mutex)
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline void vfsub_drop_nlink(struct inode *inode)
-+{
-+ AuDebugOn(!inode->i_nlink);
-+ drop_nlink(inode);
-+}
-+
-+static inline void vfsub_dead_dir(struct inode *inode)
-+{
-+ AuDebugOn(!S_ISDIR(inode->i_mode));
-+ inode->i_flags |= S_DEAD;
-+ clear_nlink(inode);
-+}
-+
-+static inline int vfsub_native_ro(struct inode *inode)
-+{
-+ return (inode->i_sb->s_flags & MS_RDONLY)
-+ || IS_RDONLY(inode)
-+ /* || IS_APPEND(inode) */
-+ || IS_IMMUTABLE(inode);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int vfsub_update_h_iattr(struct path *h_path, int *did);
-+struct file *vfsub_dentry_open(struct path *path, int flags);
-+struct file *vfsub_filp_open(const char *path, int oflags, int mode);
-+struct vfsub_aopen_args {
-+ struct file *file;
-+ unsigned int open_flag;
-+ umode_t create_mode;
-+ int *opened;
-+};
-+struct au_branch;
-+int vfsub_atomic_open(struct inode *dir, struct dentry *dentry,
-+ struct vfsub_aopen_args *args, struct au_branch *br);
-+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path);
-+
-+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
-+ int len);
-+
-+struct vfsub_lkup_one_args {
-+ struct dentry **errp;
-+ struct qstr *name;
-+ struct dentry *parent;
-+};
-+
-+static inline struct dentry *vfsub_lkup_one(struct qstr *name,
-+ struct dentry *parent)
-+{
-+ return vfsub_lookup_one_len(name->name, parent, name->len);
-+}
-+
-+void vfsub_call_lkup_one(void *args);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline int vfsub_mnt_want_write(struct vfsmount *mnt)
-+{
-+ int err;
-+
-+ lockdep_off();
-+ err = mnt_want_write(mnt);
-+ lockdep_on();
-+ return err;
-+}
-+
-+static inline void vfsub_mnt_drop_write(struct vfsmount *mnt)
-+{
-+ lockdep_off();
-+ mnt_drop_write(mnt);
-+ lockdep_on();
-+}
-+
-+#if 0 /* reserved */
-+static inline void vfsub_mnt_drop_write_file(struct file *file)
-+{
-+ lockdep_off();
-+ mnt_drop_write_file(file);
-+ lockdep_on();
-+}
-+#endif
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct au_hinode;
-+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
-+ struct dentry *d2, struct au_hinode *hdir2);
-+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
-+ struct dentry *d2, struct au_hinode *hdir2);
-+
-+int vfsub_create(struct inode *dir, struct path *path, int mode,
-+ bool want_excl);
-+int vfsub_symlink(struct inode *dir, struct path *path,
-+ const char *symname);
-+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev);
-+int vfsub_link(struct dentry *src_dentry, struct inode *dir,
-+ struct path *path, struct inode **delegated_inode);
-+int vfsub_rename(struct inode *src_hdir, struct dentry *src_dentry,
-+ struct inode *hdir, struct path *path,
-+ struct inode **delegated_inode);
-+int vfsub_mkdir(struct inode *dir, struct path *path, int mode);
-+int vfsub_rmdir(struct inode *dir, struct path *path);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
-+ loff_t *ppos);
-+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
-+ loff_t *ppos);
-+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
-+ loff_t *ppos);
-+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count,
-+ loff_t *ppos);
-+int vfsub_flush(struct file *file, fl_owner_t id);
-+int vfsub_iterate_dir(struct file *file, struct dir_context *ctx);
-+
-+static inline loff_t vfsub_f_size_read(struct file *file)
-+{
-+ return i_size_read(file_inode(file));
-+}
-+
-+static inline unsigned int vfsub_file_flags(struct file *file)
-+{
-+ unsigned int flags;
-+
-+ spin_lock(&file->f_lock);
-+ flags = file->f_flags;
-+ spin_unlock(&file->f_lock);
-+
-+ return flags;
-+}
-+
-+#if 0 /* reserved */
-+static inline void vfsub_file_accessed(struct file *h_file)
-+{
-+ file_accessed(h_file);
-+ vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL); /*ignore*/
-+}
-+#endif
-+
-+static inline void vfsub_touch_atime(struct vfsmount *h_mnt,
-+ struct dentry *h_dentry)
-+{
-+ struct path h_path = {
-+ .dentry = h_dentry,
-+ .mnt = h_mnt
-+ };
-+ touch_atime(&h_path);
-+ vfsub_update_h_iattr(&h_path, /*did*/NULL); /*ignore*/
-+}
-+
-+static inline int vfsub_update_time(struct inode *h_inode, struct timespec *ts,
-+ int flags)
-+{
-+ return generic_update_time(h_inode, ts, flags);
-+ /* no vfsub_update_h_iattr() since we don't have struct path */
-+}
-+
-+long vfsub_splice_to(struct file *in, loff_t *ppos,
-+ struct pipe_inode_info *pipe, size_t len,
-+ unsigned int flags);
-+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
-+ loff_t *ppos, size_t len, unsigned int flags);
-+
-+static inline long vfsub_truncate(struct path *path, loff_t length)
-+{
-+ long err;
-+
-+ lockdep_off();
-+ err = vfs_truncate(path, length);
-+ lockdep_on();
-+ return err;
-+}
-+
-+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
-+ struct file *h_file);
-+int vfsub_fsync(struct file *file, struct path *path, int datasync);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline loff_t vfsub_llseek(struct file *file, loff_t offset, int origin)
-+{
-+ loff_t err;
-+
-+ lockdep_off();
-+ err = vfs_llseek(file, offset, origin);
-+ lockdep_on();
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode);
-+int vfsub_sio_rmdir(struct inode *dir, struct path *path);
-+int vfsub_sio_notify_change(struct path *path, struct iattr *ia,
-+ struct inode **delegated_inode);
-+int vfsub_notify_change(struct path *path, struct iattr *ia,
-+ struct inode **delegated_inode);
-+int vfsub_unlink(struct inode *dir, struct path *path,
-+ struct inode **delegated_inode, int force);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline int vfsub_setxattr(struct dentry *dentry, const char *name,
-+ const void *value, size_t size, int flags)
-+{
-+ int err;
-+
-+ lockdep_off();
-+ err = vfs_setxattr(dentry, name, value, size, flags);
-+ lockdep_on();
-+
-+ return err;
-+}
-+
-+static inline int vfsub_removexattr(struct dentry *dentry, const char *name)
-+{
-+ int err;
-+
-+ lockdep_off();
-+ err = vfs_removexattr(dentry, name);
-+ lockdep_on();
-+
-+ return err;
-+}
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_VFSUB_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/wbr_policy.c linux-4.1.10/fs/aufs/wbr_policy.c
---- linux-4.1.10.orig/fs/aufs/wbr_policy.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/wbr_policy.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,765 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * policies for selecting one among multiple writable branches
-+ */
-+
-+#include <linux/statfs.h>
-+#include "aufs.h"
-+
-+/* subset of cpup_attr() */
-+static noinline_for_stack
-+int au_cpdown_attr(struct path *h_path, struct dentry *h_src)
-+{
-+ int err, sbits;
-+ struct iattr ia;
-+ struct inode *h_isrc;
-+
-+ h_isrc = d_inode(h_src);
-+ ia.ia_valid = ATTR_FORCE | ATTR_MODE | ATTR_UID | ATTR_GID;
-+ ia.ia_mode = h_isrc->i_mode;
-+ ia.ia_uid = h_isrc->i_uid;
-+ ia.ia_gid = h_isrc->i_gid;
-+ sbits = !!(ia.ia_mode & (S_ISUID | S_ISGID));
-+ au_cpup_attr_flags(d_inode(h_path->dentry), h_isrc->i_flags);
-+ /* no delegation since it is just created */
-+ err = vfsub_sio_notify_change(h_path, &ia, /*delegated*/NULL);
-+
-+ /* is this nfs only? */
-+ if (!err && sbits && au_test_nfs(h_path->dentry->d_sb)) {
-+ ia.ia_valid = ATTR_FORCE | ATTR_MODE;
-+ ia.ia_mode = h_isrc->i_mode;
-+ err = vfsub_sio_notify_change(h_path, &ia, /*delegated*/NULL);
-+ }
-+
-+ return err;
-+}
-+
-+#define AuCpdown_PARENT_OPQ 1
-+#define AuCpdown_WHED (1 << 1)
-+#define AuCpdown_MADE_DIR (1 << 2)
-+#define AuCpdown_DIROPQ (1 << 3)
-+#define au_ftest_cpdown(flags, name) ((flags) & AuCpdown_##name)
-+#define au_fset_cpdown(flags, name) \
-+ do { (flags) |= AuCpdown_##name; } while (0)
-+#define au_fclr_cpdown(flags, name) \
-+ do { (flags) &= ~AuCpdown_##name; } while (0)
-+
-+static int au_cpdown_dir_opq(struct dentry *dentry, aufs_bindex_t bdst,
-+ unsigned int *flags)
-+{
-+ int err;
-+ struct dentry *opq_dentry;
-+
-+ opq_dentry = au_diropq_create(dentry, bdst);
-+ err = PTR_ERR(opq_dentry);
-+ if (IS_ERR(opq_dentry))
-+ goto out;
-+ dput(opq_dentry);
-+ au_fset_cpdown(*flags, DIROPQ);
-+
-+out:
-+ return err;
-+}
-+
-+static int au_cpdown_dir_wh(struct dentry *dentry, struct dentry *h_parent,
-+ struct inode *dir, aufs_bindex_t bdst)
-+{
-+ int err;
-+ struct path h_path;
-+ struct au_branch *br;
-+
-+ br = au_sbr(dentry->d_sb, bdst);
-+ h_path.dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
-+ err = PTR_ERR(h_path.dentry);
-+ if (IS_ERR(h_path.dentry))
-+ goto out;
-+
-+ err = 0;
-+ if (d_is_positive(h_path.dentry)) {
-+ h_path.mnt = au_br_mnt(br);
-+ err = au_wh_unlink_dentry(au_h_iptr(dir, bdst), &h_path,
-+ dentry);
-+ }
-+ dput(h_path.dentry);
-+
-+out:
-+ return err;
-+}
-+
-+static int au_cpdown_dir(struct dentry *dentry, aufs_bindex_t bdst,
-+ struct au_pin *pin,
-+ struct dentry *h_parent, void *arg)
-+{
-+ int err, rerr;
-+ aufs_bindex_t bopq, bstart;
-+ struct path h_path;
-+ struct dentry *parent;
-+ struct inode *h_dir, *h_inode, *inode, *dir;
-+ unsigned int *flags = arg;
-+
-+ bstart = au_dbstart(dentry);
-+ /* dentry is di-locked */
-+ parent = dget_parent(dentry);
-+ dir = d_inode(parent);
-+ h_dir = d_inode(h_parent);
-+ AuDebugOn(h_dir != au_h_iptr(dir, bdst));
-+ IMustLock(h_dir);
-+
-+ err = au_lkup_neg(dentry, bdst, /*wh*/0);
-+ if (unlikely(err < 0))
-+ goto out;
-+ h_path.dentry = au_h_dptr(dentry, bdst);
-+ h_path.mnt = au_sbr_mnt(dentry->d_sb, bdst);
-+ err = vfsub_sio_mkdir(au_h_iptr(dir, bdst), &h_path,
-+ S_IRWXU | S_IRUGO | S_IXUGO);
-+ if (unlikely(err))
-+ goto out_put;
-+ au_fset_cpdown(*flags, MADE_DIR);
-+
-+ bopq = au_dbdiropq(dentry);
-+ au_fclr_cpdown(*flags, WHED);
-+ au_fclr_cpdown(*flags, DIROPQ);
-+ if (au_dbwh(dentry) == bdst)
-+ au_fset_cpdown(*flags, WHED);
-+ if (!au_ftest_cpdown(*flags, PARENT_OPQ) && bopq <= bdst)
-+ au_fset_cpdown(*flags, PARENT_OPQ);
-+ h_inode = d_inode(h_path.dentry);
-+ mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
-+ if (au_ftest_cpdown(*flags, WHED)) {
-+ err = au_cpdown_dir_opq(dentry, bdst, flags);
-+ if (unlikely(err)) {
-+ mutex_unlock(&h_inode->i_mutex);
-+ goto out_dir;
-+ }
-+ }
-+
-+ err = au_cpdown_attr(&h_path, au_h_dptr(dentry, bstart));
-+ mutex_unlock(&h_inode->i_mutex);
-+ if (unlikely(err))
-+ goto out_opq;
-+
-+ if (au_ftest_cpdown(*flags, WHED)) {
-+ err = au_cpdown_dir_wh(dentry, h_parent, dir, bdst);
-+ if (unlikely(err))
-+ goto out_opq;
-+ }
-+
-+ inode = d_inode(dentry);
-+ if (au_ibend(inode) < bdst)
-+ au_set_ibend(inode, bdst);
-+ au_set_h_iptr(inode, bdst, au_igrab(h_inode),
-+ au_hi_flags(inode, /*isdir*/1));
-+ au_fhsm_wrote(dentry->d_sb, bdst, /*force*/0);
-+ goto out; /* success */
-+
-+ /* revert */
-+out_opq:
-+ if (au_ftest_cpdown(*flags, DIROPQ)) {
-+ mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
-+ rerr = au_diropq_remove(dentry, bdst);
-+ mutex_unlock(&h_inode->i_mutex);
-+ if (unlikely(rerr)) {
-+ AuIOErr("failed removing diropq for %pd b%d (%d)\n",
-+ dentry, bdst, rerr);
-+ err = -EIO;
-+ goto out;
-+ }
-+ }
-+out_dir:
-+ if (au_ftest_cpdown(*flags, MADE_DIR)) {
-+ rerr = vfsub_sio_rmdir(au_h_iptr(dir, bdst), &h_path);
-+ if (unlikely(rerr)) {
-+ AuIOErr("failed removing %pd b%d (%d)\n",
-+ dentry, bdst, rerr);
-+ err = -EIO;
-+ }
-+ }
-+out_put:
-+ au_set_h_dptr(dentry, bdst, NULL);
-+ if (au_dbend(dentry) == bdst)
-+ au_update_dbend(dentry);
-+out:
-+ dput(parent);
-+ return err;
-+}
-+
-+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst)
-+{
-+ int err;
-+ unsigned int flags;
-+
-+ flags = 0;
-+ err = au_cp_dirs(dentry, bdst, au_cpdown_dir, &flags);
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* policies for create */
-+
-+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex)
-+{
-+ int err, i, j, ndentry;
-+ aufs_bindex_t bopq;
-+ struct au_dcsub_pages dpages;
-+ struct au_dpage *dpage;
-+ struct dentry **dentries, *parent, *d;
-+
-+ err = au_dpages_init(&dpages, GFP_NOFS);
-+ if (unlikely(err))
-+ goto out;
-+ parent = dget_parent(dentry);
-+ err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/0);
-+ if (unlikely(err))
-+ goto out_free;
-+
-+ err = bindex;
-+ for (i = 0; i < dpages.ndpage; i++) {
-+ dpage = dpages.dpages + i;
-+ dentries = dpage->dentries;
-+ ndentry = dpage->ndentry;
-+ for (j = 0; j < ndentry; j++) {
-+ d = dentries[j];
-+ di_read_lock_parent2(d, !AuLock_IR);
-+ bopq = au_dbdiropq(d);
-+ di_read_unlock(d, !AuLock_IR);
-+ if (bopq >= 0 && bopq < err)
-+ err = bopq;
-+ }
-+ }
-+
-+out_free:
-+ dput(parent);
-+ au_dpages_free(&dpages);
-+out:
-+ return err;
-+}
-+
-+static int au_wbr_bu(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ for (; bindex >= 0; bindex--)
-+ if (!au_br_rdonly(au_sbr(sb, bindex)))
-+ return bindex;
-+ return -EROFS;
-+}
-+
-+/* top down parent */
-+static int au_wbr_create_tdp(struct dentry *dentry,
-+ unsigned int flags __maybe_unused)
-+{
-+ int err;
-+ aufs_bindex_t bstart, bindex;
-+ struct super_block *sb;
-+ struct dentry *parent, *h_parent;
-+
-+ sb = dentry->d_sb;
-+ bstart = au_dbstart(dentry);
-+ err = bstart;
-+ if (!au_br_rdonly(au_sbr(sb, bstart)))
-+ goto out;
-+
-+ err = -EROFS;
-+ parent = dget_parent(dentry);
-+ for (bindex = au_dbstart(parent); bindex < bstart; bindex++) {
-+ h_parent = au_h_dptr(parent, bindex);
-+ if (!h_parent || d_is_negative(h_parent))
-+ continue;
-+
-+ if (!au_br_rdonly(au_sbr(sb, bindex))) {
-+ err = bindex;
-+ break;
-+ }
-+ }
-+ dput(parent);
-+
-+ /* bottom up here */
-+ if (unlikely(err < 0)) {
-+ err = au_wbr_bu(sb, bstart - 1);
-+ if (err >= 0)
-+ err = au_wbr_nonopq(dentry, err);
-+ }
-+
-+out:
-+ AuDbg("b%d\n", err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* an exception for the policy other than tdp */
-+static int au_wbr_create_exp(struct dentry *dentry)
-+{
-+ int err;
-+ aufs_bindex_t bwh, bdiropq;
-+ struct dentry *parent;
-+
-+ err = -1;
-+ bwh = au_dbwh(dentry);
-+ parent = dget_parent(dentry);
-+ bdiropq = au_dbdiropq(parent);
-+ if (bwh >= 0) {
-+ if (bdiropq >= 0)
-+ err = min(bdiropq, bwh);
-+ else
-+ err = bwh;
-+ AuDbg("%d\n", err);
-+ } else if (bdiropq >= 0) {
-+ err = bdiropq;
-+ AuDbg("%d\n", err);
-+ }
-+ dput(parent);
-+
-+ if (err >= 0)
-+ err = au_wbr_nonopq(dentry, err);
-+
-+ if (err >= 0 && au_br_rdonly(au_sbr(dentry->d_sb, err)))
-+ err = -1;
-+
-+ AuDbg("%d\n", err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* round robin */
-+static int au_wbr_create_init_rr(struct super_block *sb)
-+{
-+ int err;
-+
-+ err = au_wbr_bu(sb, au_sbend(sb));
-+ atomic_set(&au_sbi(sb)->si_wbr_rr_next, -err); /* less important */
-+ /* smp_mb(); */
-+
-+ AuDbg("b%d\n", err);
-+ return err;
-+}
-+
-+static int au_wbr_create_rr(struct dentry *dentry, unsigned int flags)
-+{
-+ int err, nbr;
-+ unsigned int u;
-+ aufs_bindex_t bindex, bend;
-+ struct super_block *sb;
-+ atomic_t *next;
-+
-+ err = au_wbr_create_exp(dentry);
-+ if (err >= 0)
-+ goto out;
-+
-+ sb = dentry->d_sb;
-+ next = &au_sbi(sb)->si_wbr_rr_next;
-+ bend = au_sbend(sb);
-+ nbr = bend + 1;
-+ for (bindex = 0; bindex <= bend; bindex++) {
-+ if (!au_ftest_wbr(flags, DIR)) {
-+ err = atomic_dec_return(next) + 1;
-+ /* modulo for 0 is meaningless */
-+ if (unlikely(!err))
-+ err = atomic_dec_return(next) + 1;
-+ } else
-+ err = atomic_read(next);
-+ AuDbg("%d\n", err);
-+ u = err;
-+ err = u % nbr;
-+ AuDbg("%d\n", err);
-+ if (!au_br_rdonly(au_sbr(sb, err)))
-+ break;
-+ err = -EROFS;
-+ }
-+
-+ if (err >= 0)
-+ err = au_wbr_nonopq(dentry, err);
-+
-+out:
-+ AuDbg("%d\n", err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* most free space */
-+static void au_mfs(struct dentry *dentry, struct dentry *parent)
-+{
-+ struct super_block *sb;
-+ struct au_branch *br;
-+ struct au_wbr_mfs *mfs;
-+ struct dentry *h_parent;
-+ aufs_bindex_t bindex, bend;
-+ int err;
-+ unsigned long long b, bavail;
-+ struct path h_path;
-+ /* reduce the stack usage */
-+ struct kstatfs *st;
-+
-+ st = kmalloc(sizeof(*st), GFP_NOFS);
-+ if (unlikely(!st)) {
-+ AuWarn1("failed updating mfs(%d), ignored\n", -ENOMEM);
-+ return;
-+ }
-+
-+ bavail = 0;
-+ sb = dentry->d_sb;
-+ mfs = &au_sbi(sb)->si_wbr_mfs;
-+ MtxMustLock(&mfs->mfs_lock);
-+ mfs->mfs_bindex = -EROFS;
-+ mfs->mfsrr_bytes = 0;
-+ if (!parent) {
-+ bindex = 0;
-+ bend = au_sbend(sb);
-+ } else {
-+ bindex = au_dbstart(parent);
-+ bend = au_dbtaildir(parent);
-+ }
-+
-+ for (; bindex <= bend; bindex++) {
-+ if (parent) {
-+ h_parent = au_h_dptr(parent, bindex);
-+ if (!h_parent || d_is_negative(h_parent))
-+ continue;
-+ }
-+ br = au_sbr(sb, bindex);
-+ if (au_br_rdonly(br))
-+ continue;
-+
-+ /* sb->s_root for NFS is unreliable */
-+ h_path.mnt = au_br_mnt(br);
-+ h_path.dentry = h_path.mnt->mnt_root;
-+ err = vfs_statfs(&h_path, st);
-+ if (unlikely(err)) {
-+ AuWarn1("failed statfs, b%d, %d\n", bindex, err);
-+ continue;
-+ }
-+
-+ /* when the available size is equal, select the lower one */
-+ BUILD_BUG_ON(sizeof(b) < sizeof(st->f_bavail)
-+ || sizeof(b) < sizeof(st->f_bsize));
-+ b = st->f_bavail * st->f_bsize;
-+ br->br_wbr->wbr_bytes = b;
-+ if (b >= bavail) {
-+ bavail = b;
-+ mfs->mfs_bindex = bindex;
-+ mfs->mfs_jiffy = jiffies;
-+ }
-+ }
-+
-+ mfs->mfsrr_bytes = bavail;
-+ AuDbg("b%d\n", mfs->mfs_bindex);
-+ kfree(st);
-+}
-+
-+static int au_wbr_create_mfs(struct dentry *dentry, unsigned int flags)
-+{
-+ int err;
-+ struct dentry *parent;
-+ struct super_block *sb;
-+ struct au_wbr_mfs *mfs;
-+
-+ err = au_wbr_create_exp(dentry);
-+ if (err >= 0)
-+ goto out;
-+
-+ sb = dentry->d_sb;
-+ parent = NULL;
-+ if (au_ftest_wbr(flags, PARENT))
-+ parent = dget_parent(dentry);
-+ mfs = &au_sbi(sb)->si_wbr_mfs;
-+ mutex_lock(&mfs->mfs_lock);
-+ if (time_after(jiffies, mfs->mfs_jiffy + mfs->mfs_expire)
-+ || mfs->mfs_bindex < 0
-+ || au_br_rdonly(au_sbr(sb, mfs->mfs_bindex)))
-+ au_mfs(dentry, parent);
-+ mutex_unlock(&mfs->mfs_lock);
-+ err = mfs->mfs_bindex;
-+ dput(parent);
-+
-+ if (err >= 0)
-+ err = au_wbr_nonopq(dentry, err);
-+
-+out:
-+ AuDbg("b%d\n", err);
-+ return err;
-+}
-+
-+static int au_wbr_create_init_mfs(struct super_block *sb)
-+{
-+ struct au_wbr_mfs *mfs;
-+
-+ mfs = &au_sbi(sb)->si_wbr_mfs;
-+ mutex_init(&mfs->mfs_lock);
-+ mfs->mfs_jiffy = 0;
-+ mfs->mfs_bindex = -EROFS;
-+
-+ return 0;
-+}
-+
-+static int au_wbr_create_fin_mfs(struct super_block *sb __maybe_unused)
-+{
-+ mutex_destroy(&au_sbi(sb)->si_wbr_mfs.mfs_lock);
-+ return 0;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* most free space and then round robin */
-+static int au_wbr_create_mfsrr(struct dentry *dentry, unsigned int flags)
-+{
-+ int err;
-+ struct au_wbr_mfs *mfs;
-+
-+ err = au_wbr_create_mfs(dentry, flags);
-+ if (err >= 0) {
-+ mfs = &au_sbi(dentry->d_sb)->si_wbr_mfs;
-+ mutex_lock(&mfs->mfs_lock);
-+ if (mfs->mfsrr_bytes < mfs->mfsrr_watermark)
-+ err = au_wbr_create_rr(dentry, flags);
-+ mutex_unlock(&mfs->mfs_lock);
-+ }
-+
-+ AuDbg("b%d\n", err);
-+ return err;
-+}
-+
-+static int au_wbr_create_init_mfsrr(struct super_block *sb)
-+{
-+ int err;
-+
-+ au_wbr_create_init_mfs(sb); /* ignore */
-+ err = au_wbr_create_init_rr(sb);
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* top down parent and most free space */
-+static int au_wbr_create_pmfs(struct dentry *dentry, unsigned int flags)
-+{
-+ int err, e2;
-+ unsigned long long b;
-+ aufs_bindex_t bindex, bstart, bend;
-+ struct super_block *sb;
-+ struct dentry *parent, *h_parent;
-+ struct au_branch *br;
-+
-+ err = au_wbr_create_tdp(dentry, flags);
-+ if (unlikely(err < 0))
-+ goto out;
-+ parent = dget_parent(dentry);
-+ bstart = au_dbstart(parent);
-+ bend = au_dbtaildir(parent);
-+ if (bstart == bend)
-+ goto out_parent; /* success */
-+
-+ e2 = au_wbr_create_mfs(dentry, flags);
-+ if (e2 < 0)
-+ goto out_parent; /* success */
-+
-+ /* when the available size is equal, select upper one */
-+ sb = dentry->d_sb;
-+ br = au_sbr(sb, err);
-+ b = br->br_wbr->wbr_bytes;
-+ AuDbg("b%d, %llu\n", err, b);
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ h_parent = au_h_dptr(parent, bindex);
-+ if (!h_parent || d_is_negative(h_parent))
-+ continue;
-+
-+ br = au_sbr(sb, bindex);
-+ if (!au_br_rdonly(br) && br->br_wbr->wbr_bytes > b) {
-+ b = br->br_wbr->wbr_bytes;
-+ err = bindex;
-+ AuDbg("b%d, %llu\n", err, b);
-+ }
-+ }
-+
-+ if (err >= 0)
-+ err = au_wbr_nonopq(dentry, err);
-+
-+out_parent:
-+ dput(parent);
-+out:
-+ AuDbg("b%d\n", err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * - top down parent
-+ * - most free space with parent
-+ * - most free space round-robin regardless parent
-+ */
-+static int au_wbr_create_pmfsrr(struct dentry *dentry, unsigned int flags)
-+{
-+ int err;
-+ unsigned long long watermark;
-+ struct super_block *sb;
-+ struct au_branch *br;
-+ struct au_wbr_mfs *mfs;
-+
-+ err = au_wbr_create_pmfs(dentry, flags | AuWbr_PARENT);
-+ if (unlikely(err < 0))
-+ goto out;
-+
-+ sb = dentry->d_sb;
-+ br = au_sbr(sb, err);
-+ mfs = &au_sbi(sb)->si_wbr_mfs;
-+ mutex_lock(&mfs->mfs_lock);
-+ watermark = mfs->mfsrr_watermark;
-+ mutex_unlock(&mfs->mfs_lock);
-+ if (br->br_wbr->wbr_bytes < watermark)
-+ /* regardless the parent dir */
-+ err = au_wbr_create_mfsrr(dentry, flags);
-+
-+out:
-+ AuDbg("b%d\n", err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* policies for copyup */
-+
-+/* top down parent */
-+static int au_wbr_copyup_tdp(struct dentry *dentry)
-+{
-+ return au_wbr_create_tdp(dentry, /*flags, anything is ok*/0);
-+}
-+
-+/* bottom up parent */
-+static int au_wbr_copyup_bup(struct dentry *dentry)
-+{
-+ int err;
-+ aufs_bindex_t bindex, bstart;
-+ struct dentry *parent, *h_parent;
-+ struct super_block *sb;
-+
-+ err = -EROFS;
-+ sb = dentry->d_sb;
-+ parent = dget_parent(dentry);
-+ bstart = au_dbstart(parent);
-+ for (bindex = au_dbstart(dentry); bindex >= bstart; bindex--) {
-+ h_parent = au_h_dptr(parent, bindex);
-+ if (!h_parent || d_is_negative(h_parent))
-+ continue;
-+
-+ if (!au_br_rdonly(au_sbr(sb, bindex))) {
-+ err = bindex;
-+ break;
-+ }
-+ }
-+ dput(parent);
-+
-+ /* bottom up here */
-+ if (unlikely(err < 0))
-+ err = au_wbr_bu(sb, bstart - 1);
-+
-+ AuDbg("b%d\n", err);
-+ return err;
-+}
-+
-+/* bottom up */
-+int au_wbr_do_copyup_bu(struct dentry *dentry, aufs_bindex_t bstart)
-+{
-+ int err;
-+
-+ err = au_wbr_bu(dentry->d_sb, bstart);
-+ AuDbg("b%d\n", err);
-+ if (err > bstart)
-+ err = au_wbr_nonopq(dentry, err);
-+
-+ AuDbg("b%d\n", err);
-+ return err;
-+}
-+
-+static int au_wbr_copyup_bu(struct dentry *dentry)
-+{
-+ int err;
-+ aufs_bindex_t bstart;
-+
-+ bstart = au_dbstart(dentry);
-+ err = au_wbr_do_copyup_bu(dentry, bstart);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct au_wbr_copyup_operations au_wbr_copyup_ops[] = {
-+ [AuWbrCopyup_TDP] = {
-+ .copyup = au_wbr_copyup_tdp
-+ },
-+ [AuWbrCopyup_BUP] = {
-+ .copyup = au_wbr_copyup_bup
-+ },
-+ [AuWbrCopyup_BU] = {
-+ .copyup = au_wbr_copyup_bu
-+ }
-+};
-+
-+struct au_wbr_create_operations au_wbr_create_ops[] = {
-+ [AuWbrCreate_TDP] = {
-+ .create = au_wbr_create_tdp
-+ },
-+ [AuWbrCreate_RR] = {
-+ .create = au_wbr_create_rr,
-+ .init = au_wbr_create_init_rr
-+ },
-+ [AuWbrCreate_MFS] = {
-+ .create = au_wbr_create_mfs,
-+ .init = au_wbr_create_init_mfs,
-+ .fin = au_wbr_create_fin_mfs
-+ },
-+ [AuWbrCreate_MFSV] = {
-+ .create = au_wbr_create_mfs,
-+ .init = au_wbr_create_init_mfs,
-+ .fin = au_wbr_create_fin_mfs
-+ },
-+ [AuWbrCreate_MFSRR] = {
-+ .create = au_wbr_create_mfsrr,
-+ .init = au_wbr_create_init_mfsrr,
-+ .fin = au_wbr_create_fin_mfs
-+ },
-+ [AuWbrCreate_MFSRRV] = {
-+ .create = au_wbr_create_mfsrr,
-+ .init = au_wbr_create_init_mfsrr,
-+ .fin = au_wbr_create_fin_mfs
-+ },
-+ [AuWbrCreate_PMFS] = {
-+ .create = au_wbr_create_pmfs,
-+ .init = au_wbr_create_init_mfs,
-+ .fin = au_wbr_create_fin_mfs
-+ },
-+ [AuWbrCreate_PMFSV] = {
-+ .create = au_wbr_create_pmfs,
-+ .init = au_wbr_create_init_mfs,
-+ .fin = au_wbr_create_fin_mfs
-+ },
-+ [AuWbrCreate_PMFSRR] = {
-+ .create = au_wbr_create_pmfsrr,
-+ .init = au_wbr_create_init_mfsrr,
-+ .fin = au_wbr_create_fin_mfs
-+ },
-+ [AuWbrCreate_PMFSRRV] = {
-+ .create = au_wbr_create_pmfsrr,
-+ .init = au_wbr_create_init_mfsrr,
-+ .fin = au_wbr_create_fin_mfs
-+ }
-+};
-diff -Nur linux-4.1.10.orig/fs/aufs/whout.c linux-4.1.10/fs/aufs/whout.c
---- linux-4.1.10.orig/fs/aufs/whout.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/whout.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,1063 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * whiteout for logical deletion and opaque directory
-+ */
-+
-+#include "aufs.h"
-+
-+#define WH_MASK S_IRUGO
-+
-+/*
-+ * If a directory contains this file, then it is opaque. We start with the
-+ * .wh. flag so that it is blocked by lookup.
-+ */
-+static struct qstr diropq_name = QSTR_INIT(AUFS_WH_DIROPQ,
-+ sizeof(AUFS_WH_DIROPQ) - 1);
-+
-+/*
-+ * generate whiteout name, which is NOT terminated by NULL.
-+ * @name: original d_name.name
-+ * @len: original d_name.len
-+ * @wh: whiteout qstr
-+ * returns zero when succeeds, otherwise error.
-+ * succeeded value as wh->name should be freed by kfree().
-+ */
-+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name)
-+{
-+ char *p;
-+
-+ if (unlikely(name->len > PATH_MAX - AUFS_WH_PFX_LEN))
-+ return -ENAMETOOLONG;
-+
-+ wh->len = name->len + AUFS_WH_PFX_LEN;
-+ p = kmalloc(wh->len, GFP_NOFS);
-+ wh->name = p;
-+ if (p) {
-+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
-+ memcpy(p + AUFS_WH_PFX_LEN, name->name, name->len);
-+ /* smp_mb(); */
-+ return 0;
-+ }
-+ return -ENOMEM;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * test if the @wh_name exists under @h_parent.
-+ * @try_sio specifies the necessary of super-io.
-+ */
-+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name, int try_sio)
-+{
-+ int err;
-+ struct dentry *wh_dentry;
-+
-+ if (!try_sio)
-+ wh_dentry = vfsub_lkup_one(wh_name, h_parent);
-+ else
-+ wh_dentry = au_sio_lkup_one(wh_name, h_parent);
-+ err = PTR_ERR(wh_dentry);
-+ if (IS_ERR(wh_dentry)) {
-+ if (err == -ENAMETOOLONG)
-+ err = 0;
-+ goto out;
-+ }
-+
-+ err = 0;
-+ if (d_is_negative(wh_dentry))
-+ goto out_wh; /* success */
-+
-+ err = 1;
-+ if (d_is_reg(wh_dentry))
-+ goto out_wh; /* success */
-+
-+ err = -EIO;
-+ AuIOErr("%pd Invalid whiteout entry type 0%o.\n",
-+ wh_dentry, d_inode(wh_dentry)->i_mode);
-+
-+out_wh:
-+ dput(wh_dentry);
-+out:
-+ return err;
-+}
-+
-+/*
-+ * test if the @h_dentry sets opaque or not.
-+ */
-+int au_diropq_test(struct dentry *h_dentry)
-+{
-+ int err;
-+ struct inode *h_dir;
-+
-+ h_dir = d_inode(h_dentry);
-+ err = au_wh_test(h_dentry, &diropq_name,
-+ au_test_h_perm_sio(h_dir, MAY_EXEC));
-+ return err;
-+}
-+
-+/*
-+ * returns a negative dentry whose name is unique and temporary.
-+ */
-+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
-+ struct qstr *prefix)
-+{
-+ struct dentry *dentry;
-+ int i;
-+ char defname[NAME_MAX - AUFS_MAX_NAMELEN + DNAME_INLINE_LEN + 1],
-+ *name, *p;
-+ /* strict atomic_t is unnecessary here */
-+ static unsigned short cnt;
-+ struct qstr qs;
-+
-+ BUILD_BUG_ON(sizeof(cnt) * 2 > AUFS_WH_TMP_LEN);
-+
-+ name = defname;
-+ qs.len = sizeof(defname) - DNAME_INLINE_LEN + prefix->len - 1;
-+ if (unlikely(prefix->len > DNAME_INLINE_LEN)) {
-+ dentry = ERR_PTR(-ENAMETOOLONG);
-+ if (unlikely(qs.len > NAME_MAX))
-+ goto out;
-+ dentry = ERR_PTR(-ENOMEM);
-+ name = kmalloc(qs.len + 1, GFP_NOFS);
-+ if (unlikely(!name))
-+ goto out;
-+ }
-+
-+ /* doubly whiteout-ed */
-+ memcpy(name, AUFS_WH_PFX AUFS_WH_PFX, AUFS_WH_PFX_LEN * 2);
-+ p = name + AUFS_WH_PFX_LEN * 2;
-+ memcpy(p, prefix->name, prefix->len);
-+ p += prefix->len;
-+ *p++ = '.';
-+ AuDebugOn(name + qs.len + 1 - p <= AUFS_WH_TMP_LEN);
-+
-+ qs.name = name;
-+ for (i = 0; i < 3; i++) {
-+ sprintf(p, "%.*x", AUFS_WH_TMP_LEN, cnt++);
-+ dentry = au_sio_lkup_one(&qs, h_parent);
-+ if (IS_ERR(dentry) || d_is_negative(dentry))
-+ goto out_name;
-+ dput(dentry);
-+ }
-+ /* pr_warn("could not get random name\n"); */
-+ dentry = ERR_PTR(-EEXIST);
-+ AuDbg("%.*s\n", AuLNPair(&qs));
-+ BUG();
-+
-+out_name:
-+ if (name != defname)
-+ kfree(name);
-+out:
-+ AuTraceErrPtr(dentry);
-+ return dentry;
-+}
-+
-+/*
-+ * rename the @h_dentry on @br to the whiteouted temporary name.
-+ */
-+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br)
-+{
-+ int err;
-+ struct path h_path = {
-+ .mnt = au_br_mnt(br)
-+ };
-+ struct inode *h_dir, *delegated;
-+ struct dentry *h_parent;
-+
-+ h_parent = h_dentry->d_parent; /* dir inode is locked */
-+ h_dir = d_inode(h_parent);
-+ IMustLock(h_dir);
-+
-+ h_path.dentry = au_whtmp_lkup(h_parent, br, &h_dentry->d_name);
-+ err = PTR_ERR(h_path.dentry);
-+ if (IS_ERR(h_path.dentry))
-+ goto out;
-+
-+ /* under the same dir, no need to lock_rename() */
-+ delegated = NULL;
-+ err = vfsub_rename(h_dir, h_dentry, h_dir, &h_path, &delegated);
-+ AuTraceErr(err);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal rename\n");
-+ iput(delegated);
-+ }
-+ dput(h_path.dentry);
-+
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+/*
-+ * functions for removing a whiteout
-+ */
-+
-+static int do_unlink_wh(struct inode *h_dir, struct path *h_path)
-+{
-+ int err, force;
-+ struct inode *delegated;
-+
-+ /*
-+ * forces superio when the dir has a sticky bit.
-+ * this may be a violation of unix fs semantics.
-+ */
-+ force = (h_dir->i_mode & S_ISVTX)
-+ && !uid_eq(current_fsuid(), d_inode(h_path->dentry)->i_uid);
-+ delegated = NULL;
-+ err = vfsub_unlink(h_dir, h_path, &delegated, force);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal unlink\n");
-+ iput(delegated);
-+ }
-+ return err;
-+}
-+
-+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
-+ struct dentry *dentry)
-+{
-+ int err;
-+
-+ err = do_unlink_wh(h_dir, h_path);
-+ if (!err && dentry)
-+ au_set_dbwh(dentry, -1);
-+
-+ return err;
-+}
-+
-+static int unlink_wh_name(struct dentry *h_parent, struct qstr *wh,
-+ struct au_branch *br)
-+{
-+ int err;
-+ struct path h_path = {
-+ .mnt = au_br_mnt(br)
-+ };
-+
-+ err = 0;
-+ h_path.dentry = vfsub_lkup_one(wh, h_parent);
-+ if (IS_ERR(h_path.dentry))
-+ err = PTR_ERR(h_path.dentry);
-+ else {
-+ if (d_is_reg(h_path.dentry))
-+ err = do_unlink_wh(d_inode(h_parent), &h_path);
-+ dput(h_path.dentry);
-+ }
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+/*
-+ * initialize/clean whiteout for a branch
-+ */
-+
-+static void au_wh_clean(struct inode *h_dir, struct path *whpath,
-+ const int isdir)
-+{
-+ int err;
-+ struct inode *delegated;
-+
-+ if (d_is_negative(whpath->dentry))
-+ return;
-+
-+ if (isdir)
-+ err = vfsub_rmdir(h_dir, whpath);
-+ else {
-+ delegated = NULL;
-+ err = vfsub_unlink(h_dir, whpath, &delegated, /*force*/0);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal unlink\n");
-+ iput(delegated);
-+ }
-+ }
-+ if (unlikely(err))
-+ pr_warn("failed removing %pd (%d), ignored.\n",
-+ whpath->dentry, err);
-+}
-+
-+static int test_linkable(struct dentry *h_root)
-+{
-+ struct inode *h_dir = d_inode(h_root);
-+
-+ if (h_dir->i_op->link)
-+ return 0;
-+
-+ pr_err("%pd (%s) doesn't support link(2), use noplink and rw+nolwh\n",
-+ h_root, au_sbtype(h_root->d_sb));
-+ return -ENOSYS;
-+}
-+
-+/* todo: should this mkdir be done in /sbin/mount.aufs helper? */
-+static int au_whdir(struct inode *h_dir, struct path *path)
-+{
-+ int err;
-+
-+ err = -EEXIST;
-+ if (d_is_negative(path->dentry)) {
-+ int mode = S_IRWXU;
-+
-+ if (au_test_nfs(path->dentry->d_sb))
-+ mode |= S_IXUGO;
-+ err = vfsub_mkdir(h_dir, path, mode);
-+ } else if (d_is_dir(path->dentry))
-+ err = 0;
-+ else
-+ pr_err("unknown %pd exists\n", path->dentry);
-+
-+ return err;
-+}
-+
-+struct au_wh_base {
-+ const struct qstr *name;
-+ struct dentry *dentry;
-+};
-+
-+static void au_wh_init_ro(struct inode *h_dir, struct au_wh_base base[],
-+ struct path *h_path)
-+{
-+ h_path->dentry = base[AuBrWh_BASE].dentry;
-+ au_wh_clean(h_dir, h_path, /*isdir*/0);
-+ h_path->dentry = base[AuBrWh_PLINK].dentry;
-+ au_wh_clean(h_dir, h_path, /*isdir*/1);
-+ h_path->dentry = base[AuBrWh_ORPH].dentry;
-+ au_wh_clean(h_dir, h_path, /*isdir*/1);
-+}
-+
-+/*
-+ * returns tri-state,
-+ * minus: error, caller should print the message
-+ * zero: succuess
-+ * plus: error, caller should NOT print the message
-+ */
-+static int au_wh_init_rw_nolink(struct dentry *h_root, struct au_wbr *wbr,
-+ int do_plink, struct au_wh_base base[],
-+ struct path *h_path)
-+{
-+ int err;
-+ struct inode *h_dir;
-+
-+ h_dir = d_inode(h_root);
-+ h_path->dentry = base[AuBrWh_BASE].dentry;
-+ au_wh_clean(h_dir, h_path, /*isdir*/0);
-+ h_path->dentry = base[AuBrWh_PLINK].dentry;
-+ if (do_plink) {
-+ err = test_linkable(h_root);
-+ if (unlikely(err)) {
-+ err = 1;
-+ goto out;
-+ }
-+
-+ err = au_whdir(h_dir, h_path);
-+ if (unlikely(err))
-+ goto out;
-+ wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
-+ } else
-+ au_wh_clean(h_dir, h_path, /*isdir*/1);
-+ h_path->dentry = base[AuBrWh_ORPH].dentry;
-+ err = au_whdir(h_dir, h_path);
-+ if (unlikely(err))
-+ goto out;
-+ wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * for the moment, aufs supports the branch filesystem which does not support
-+ * link(2). testing on FAT which does not support i_op->setattr() fully either,
-+ * copyup failed. finally, such filesystem will not be used as the writable
-+ * branch.
-+ *
-+ * returns tri-state, see above.
-+ */
-+static int au_wh_init_rw(struct dentry *h_root, struct au_wbr *wbr,
-+ int do_plink, struct au_wh_base base[],
-+ struct path *h_path)
-+{
-+ int err;
-+ struct inode *h_dir;
-+
-+ WbrWhMustWriteLock(wbr);
-+
-+ err = test_linkable(h_root);
-+ if (unlikely(err)) {
-+ err = 1;
-+ goto out;
-+ }
-+
-+ /*
-+ * todo: should this create be done in /sbin/mount.aufs helper?
-+ */
-+ err = -EEXIST;
-+ h_dir = d_inode(h_root);
-+ if (d_is_negative(base[AuBrWh_BASE].dentry)) {
-+ h_path->dentry = base[AuBrWh_BASE].dentry;
-+ err = vfsub_create(h_dir, h_path, WH_MASK, /*want_excl*/true);
-+ } else if (d_is_reg(base[AuBrWh_BASE].dentry))
-+ err = 0;
-+ else
-+ pr_err("unknown %pd2 exists\n", base[AuBrWh_BASE].dentry);
-+ if (unlikely(err))
-+ goto out;
-+
-+ h_path->dentry = base[AuBrWh_PLINK].dentry;
-+ if (do_plink) {
-+ err = au_whdir(h_dir, h_path);
-+ if (unlikely(err))
-+ goto out;
-+ wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
-+ } else
-+ au_wh_clean(h_dir, h_path, /*isdir*/1);
-+ wbr->wbr_whbase = dget(base[AuBrWh_BASE].dentry);
-+
-+ h_path->dentry = base[AuBrWh_ORPH].dentry;
-+ err = au_whdir(h_dir, h_path);
-+ if (unlikely(err))
-+ goto out;
-+ wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * initialize the whiteout base file/dir for @br.
-+ */
-+int au_wh_init(struct au_branch *br, struct super_block *sb)
-+{
-+ int err, i;
-+ const unsigned char do_plink
-+ = !!au_opt_test(au_mntflags(sb), PLINK);
-+ struct inode *h_dir;
-+ struct path path = br->br_path;
-+ struct dentry *h_root = path.dentry;
-+ struct au_wbr *wbr = br->br_wbr;
-+ static const struct qstr base_name[] = {
-+ [AuBrWh_BASE] = QSTR_INIT(AUFS_BASE_NAME,
-+ sizeof(AUFS_BASE_NAME) - 1),
-+ [AuBrWh_PLINK] = QSTR_INIT(AUFS_PLINKDIR_NAME,
-+ sizeof(AUFS_PLINKDIR_NAME) - 1),
-+ [AuBrWh_ORPH] = QSTR_INIT(AUFS_ORPHDIR_NAME,
-+ sizeof(AUFS_ORPHDIR_NAME) - 1)
-+ };
-+ struct au_wh_base base[] = {
-+ [AuBrWh_BASE] = {
-+ .name = base_name + AuBrWh_BASE,
-+ .dentry = NULL
-+ },
-+ [AuBrWh_PLINK] = {
-+ .name = base_name + AuBrWh_PLINK,
-+ .dentry = NULL
-+ },
-+ [AuBrWh_ORPH] = {
-+ .name = base_name + AuBrWh_ORPH,
-+ .dentry = NULL
-+ }
-+ };
-+
-+ if (wbr)
-+ WbrWhMustWriteLock(wbr);
-+
-+ for (i = 0; i < AuBrWh_Last; i++) {
-+ /* doubly whiteouted */
-+ struct dentry *d;
-+
-+ d = au_wh_lkup(h_root, (void *)base[i].name, br);
-+ err = PTR_ERR(d);
-+ if (IS_ERR(d))
-+ goto out;
-+
-+ base[i].dentry = d;
-+ AuDebugOn(wbr
-+ && wbr->wbr_wh[i]
-+ && wbr->wbr_wh[i] != base[i].dentry);
-+ }
-+
-+ if (wbr)
-+ for (i = 0; i < AuBrWh_Last; i++) {
-+ dput(wbr->wbr_wh[i]);
-+ wbr->wbr_wh[i] = NULL;
-+ }
-+
-+ err = 0;
-+ if (!au_br_writable(br->br_perm)) {
-+ h_dir = d_inode(h_root);
-+ au_wh_init_ro(h_dir, base, &path);
-+ } else if (!au_br_wh_linkable(br->br_perm)) {
-+ err = au_wh_init_rw_nolink(h_root, wbr, do_plink, base, &path);
-+ if (err > 0)
-+ goto out;
-+ else if (err)
-+ goto out_err;
-+ } else {
-+ err = au_wh_init_rw(h_root, wbr, do_plink, base, &path);
-+ if (err > 0)
-+ goto out;
-+ else if (err)
-+ goto out_err;
-+ }
-+ goto out; /* success */
-+
-+out_err:
-+ pr_err("an error(%d) on the writable branch %pd(%s)\n",
-+ err, h_root, au_sbtype(h_root->d_sb));
-+out:
-+ for (i = 0; i < AuBrWh_Last; i++)
-+ dput(base[i].dentry);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+/*
-+ * whiteouts are all hard-linked usually.
-+ * when its link count reaches a ceiling, we create a new whiteout base
-+ * asynchronously.
-+ */
-+
-+struct reinit_br_wh {
-+ struct super_block *sb;
-+ struct au_branch *br;
-+};
-+
-+static void reinit_br_wh(void *arg)
-+{
-+ int err;
-+ aufs_bindex_t bindex;
-+ struct path h_path;
-+ struct reinit_br_wh *a = arg;
-+ struct au_wbr *wbr;
-+ struct inode *dir, *delegated;
-+ struct dentry *h_root;
-+ struct au_hinode *hdir;
-+
-+ err = 0;
-+ wbr = a->br->br_wbr;
-+ /* big aufs lock */
-+ si_noflush_write_lock(a->sb);
-+ if (!au_br_writable(a->br->br_perm))
-+ goto out;
-+ bindex = au_br_index(a->sb, a->br->br_id);
-+ if (unlikely(bindex < 0))
-+ goto out;
-+
-+ di_read_lock_parent(a->sb->s_root, AuLock_IR);
-+ dir = d_inode(a->sb->s_root);
-+ hdir = au_hi(dir, bindex);
-+ h_root = au_h_dptr(a->sb->s_root, bindex);
-+ AuDebugOn(h_root != au_br_dentry(a->br));
-+
-+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
-+ wbr_wh_write_lock(wbr);
-+ err = au_h_verify(wbr->wbr_whbase, au_opt_udba(a->sb), hdir->hi_inode,
-+ h_root, a->br);
-+ if (!err) {
-+ h_path.dentry = wbr->wbr_whbase;
-+ h_path.mnt = au_br_mnt(a->br);
-+ delegated = NULL;
-+ err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated,
-+ /*force*/0);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal unlink\n");
-+ iput(delegated);
-+ }
-+ } else {
-+ pr_warn("%pd is moved, ignored\n", wbr->wbr_whbase);
-+ err = 0;
-+ }
-+ dput(wbr->wbr_whbase);
-+ wbr->wbr_whbase = NULL;
-+ if (!err)
-+ err = au_wh_init(a->br, a->sb);
-+ wbr_wh_write_unlock(wbr);
-+ au_hn_imtx_unlock(hdir);
-+ di_read_unlock(a->sb->s_root, AuLock_IR);
-+ if (!err)
-+ au_fhsm_wrote(a->sb, bindex, /*force*/0);
-+
-+out:
-+ if (wbr)
-+ atomic_dec(&wbr->wbr_wh_running);
-+ atomic_dec(&a->br->br_count);
-+ si_write_unlock(a->sb);
-+ au_nwt_done(&au_sbi(a->sb)->si_nowait);
-+ kfree(arg);
-+ if (unlikely(err))
-+ AuIOErr("err %d\n", err);
-+}
-+
-+static void kick_reinit_br_wh(struct super_block *sb, struct au_branch *br)
-+{
-+ int do_dec, wkq_err;
-+ struct reinit_br_wh *arg;
-+
-+ do_dec = 1;
-+ if (atomic_inc_return(&br->br_wbr->wbr_wh_running) != 1)
-+ goto out;
-+
-+ /* ignore ENOMEM */
-+ arg = kmalloc(sizeof(*arg), GFP_NOFS);
-+ if (arg) {
-+ /*
-+ * dec(wh_running), kfree(arg) and dec(br_count)
-+ * in reinit function
-+ */
-+ arg->sb = sb;
-+ arg->br = br;
-+ atomic_inc(&br->br_count);
-+ wkq_err = au_wkq_nowait(reinit_br_wh, arg, sb, /*flags*/0);
-+ if (unlikely(wkq_err)) {
-+ atomic_dec(&br->br_wbr->wbr_wh_running);
-+ atomic_dec(&br->br_count);
-+ kfree(arg);
-+ }
-+ do_dec = 0;
-+ }
-+
-+out:
-+ if (do_dec)
-+ atomic_dec(&br->br_wbr->wbr_wh_running);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * create the whiteout @wh.
-+ */
-+static int link_or_create_wh(struct super_block *sb, aufs_bindex_t bindex,
-+ struct dentry *wh)
-+{
-+ int err;
-+ struct path h_path = {
-+ .dentry = wh
-+ };
-+ struct au_branch *br;
-+ struct au_wbr *wbr;
-+ struct dentry *h_parent;
-+ struct inode *h_dir, *delegated;
-+
-+ h_parent = wh->d_parent; /* dir inode is locked */
-+ h_dir = d_inode(h_parent);
-+ IMustLock(h_dir);
-+
-+ br = au_sbr(sb, bindex);
-+ h_path.mnt = au_br_mnt(br);
-+ wbr = br->br_wbr;
-+ wbr_wh_read_lock(wbr);
-+ if (wbr->wbr_whbase) {
-+ delegated = NULL;
-+ err = vfsub_link(wbr->wbr_whbase, h_dir, &h_path, &delegated);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal link\n");
-+ iput(delegated);
-+ }
-+ if (!err || err != -EMLINK)
-+ goto out;
-+
-+ /* link count full. re-initialize br_whbase. */
-+ kick_reinit_br_wh(sb, br);
-+ }
-+
-+ /* return this error in this context */
-+ err = vfsub_create(h_dir, &h_path, WH_MASK, /*want_excl*/true);
-+ if (!err)
-+ au_fhsm_wrote(sb, bindex, /*force*/0);
-+
-+out:
-+ wbr_wh_read_unlock(wbr);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * create or remove the diropq.
-+ */
-+static struct dentry *do_diropq(struct dentry *dentry, aufs_bindex_t bindex,
-+ unsigned int flags)
-+{
-+ struct dentry *opq_dentry, *h_dentry;
-+ struct super_block *sb;
-+ struct au_branch *br;
-+ int err;
-+
-+ sb = dentry->d_sb;
-+ br = au_sbr(sb, bindex);
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ opq_dentry = vfsub_lkup_one(&diropq_name, h_dentry);
-+ if (IS_ERR(opq_dentry))
-+ goto out;
-+
-+ if (au_ftest_diropq(flags, CREATE)) {
-+ err = link_or_create_wh(sb, bindex, opq_dentry);
-+ if (!err) {
-+ au_set_dbdiropq(dentry, bindex);
-+ goto out; /* success */
-+ }
-+ } else {
-+ struct path tmp = {
-+ .dentry = opq_dentry,
-+ .mnt = au_br_mnt(br)
-+ };
-+ err = do_unlink_wh(au_h_iptr(d_inode(dentry), bindex), &tmp);
-+ if (!err)
-+ au_set_dbdiropq(dentry, -1);
-+ }
-+ dput(opq_dentry);
-+ opq_dentry = ERR_PTR(err);
-+
-+out:
-+ return opq_dentry;
-+}
-+
-+struct do_diropq_args {
-+ struct dentry **errp;
-+ struct dentry *dentry;
-+ aufs_bindex_t bindex;
-+ unsigned int flags;
-+};
-+
-+static void call_do_diropq(void *args)
-+{
-+ struct do_diropq_args *a = args;
-+ *a->errp = do_diropq(a->dentry, a->bindex, a->flags);
-+}
-+
-+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
-+ unsigned int flags)
-+{
-+ struct dentry *diropq, *h_dentry;
-+
-+ h_dentry = au_h_dptr(dentry, bindex);
-+ if (!au_test_h_perm_sio(d_inode(h_dentry), MAY_EXEC | MAY_WRITE))
-+ diropq = do_diropq(dentry, bindex, flags);
-+ else {
-+ int wkq_err;
-+ struct do_diropq_args args = {
-+ .errp = &diropq,
-+ .dentry = dentry,
-+ .bindex = bindex,
-+ .flags = flags
-+ };
-+
-+ wkq_err = au_wkq_wait(call_do_diropq, &args);
-+ if (unlikely(wkq_err))
-+ diropq = ERR_PTR(wkq_err);
-+ }
-+
-+ return diropq;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * lookup whiteout dentry.
-+ * @h_parent: lower parent dentry which must exist and be locked
-+ * @base_name: name of dentry which will be whiteouted
-+ * returns dentry for whiteout.
-+ */
-+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
-+ struct au_branch *br)
-+{
-+ int err;
-+ struct qstr wh_name;
-+ struct dentry *wh_dentry;
-+
-+ err = au_wh_name_alloc(&wh_name, base_name);
-+ wh_dentry = ERR_PTR(err);
-+ if (!err) {
-+ wh_dentry = vfsub_lkup_one(&wh_name, h_parent);
-+ kfree(wh_name.name);
-+ }
-+ return wh_dentry;
-+}
-+
-+/*
-+ * link/create a whiteout for @dentry on @bindex.
-+ */
-+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
-+ struct dentry *h_parent)
-+{
-+ struct dentry *wh_dentry;
-+ struct super_block *sb;
-+ int err;
-+
-+ sb = dentry->d_sb;
-+ wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, au_sbr(sb, bindex));
-+ if (!IS_ERR(wh_dentry) && d_is_negative(wh_dentry)) {
-+ err = link_or_create_wh(sb, bindex, wh_dentry);
-+ if (!err) {
-+ au_set_dbwh(dentry, bindex);
-+ au_fhsm_wrote(sb, bindex, /*force*/0);
-+ } else {
-+ dput(wh_dentry);
-+ wh_dentry = ERR_PTR(err);
-+ }
-+ }
-+
-+ return wh_dentry;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* Delete all whiteouts in this directory on branch bindex. */
-+static int del_wh_children(struct dentry *h_dentry, struct au_nhash *whlist,
-+ aufs_bindex_t bindex, struct au_branch *br)
-+{
-+ int err;
-+ unsigned long ul, n;
-+ struct qstr wh_name;
-+ char *p;
-+ struct hlist_head *head;
-+ struct au_vdir_wh *pos;
-+ struct au_vdir_destr *str;
-+
-+ err = -ENOMEM;
-+ p = (void *)__get_free_page(GFP_NOFS);
-+ wh_name.name = p;
-+ if (unlikely(!wh_name.name))
-+ goto out;
-+
-+ err = 0;
-+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
-+ p += AUFS_WH_PFX_LEN;
-+ n = whlist->nh_num;
-+ head = whlist->nh_head;
-+ for (ul = 0; !err && ul < n; ul++, head++) {
-+ hlist_for_each_entry(pos, head, wh_hash) {
-+ if (pos->wh_bindex != bindex)
-+ continue;
-+
-+ str = &pos->wh_str;
-+ if (str->len + AUFS_WH_PFX_LEN <= PATH_MAX) {
-+ memcpy(p, str->name, str->len);
-+ wh_name.len = AUFS_WH_PFX_LEN + str->len;
-+ err = unlink_wh_name(h_dentry, &wh_name, br);
-+ if (!err)
-+ continue;
-+ break;
-+ }
-+ AuIOErr("whiteout name too long %.*s\n",
-+ str->len, str->name);
-+ err = -EIO;
-+ break;
-+ }
-+ }
-+ free_page((unsigned long)wh_name.name);
-+
-+out:
-+ return err;
-+}
-+
-+struct del_wh_children_args {
-+ int *errp;
-+ struct dentry *h_dentry;
-+ struct au_nhash *whlist;
-+ aufs_bindex_t bindex;
-+ struct au_branch *br;
-+};
-+
-+static void call_del_wh_children(void *args)
-+{
-+ struct del_wh_children_args *a = args;
-+ *a->errp = del_wh_children(a->h_dentry, a->whlist, a->bindex, a->br);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp)
-+{
-+ struct au_whtmp_rmdir *whtmp;
-+ int err;
-+ unsigned int rdhash;
-+
-+ SiMustAnyLock(sb);
-+
-+ whtmp = kmalloc(sizeof(*whtmp), gfp);
-+ if (unlikely(!whtmp)) {
-+ whtmp = ERR_PTR(-ENOMEM);
-+ goto out;
-+ }
-+
-+ whtmp->dir = NULL;
-+ whtmp->br = NULL;
-+ whtmp->wh_dentry = NULL;
-+ /* no estimation for dir size */
-+ rdhash = au_sbi(sb)->si_rdhash;
-+ if (!rdhash)
-+ rdhash = AUFS_RDHASH_DEF;
-+ err = au_nhash_alloc(&whtmp->whlist, rdhash, gfp);
-+ if (unlikely(err)) {
-+ kfree(whtmp);
-+ whtmp = ERR_PTR(err);
-+ }
-+
-+out:
-+ return whtmp;
-+}
-+
-+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp)
-+{
-+ if (whtmp->br)
-+ atomic_dec(&whtmp->br->br_count);
-+ dput(whtmp->wh_dentry);
-+ iput(whtmp->dir);
-+ au_nhash_wh_free(&whtmp->whlist);
-+ kfree(whtmp);
-+}
-+
-+/*
-+ * rmdir the whiteouted temporary named dir @h_dentry.
-+ * @whlist: whiteouted children.
-+ */
-+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
-+ struct dentry *wh_dentry, struct au_nhash *whlist)
-+{
-+ int err;
-+ unsigned int h_nlink;
-+ struct path h_tmp;
-+ struct inode *wh_inode, *h_dir;
-+ struct au_branch *br;
-+
-+ h_dir = d_inode(wh_dentry->d_parent); /* dir inode is locked */
-+ IMustLock(h_dir);
-+
-+ br = au_sbr(dir->i_sb, bindex);
-+ wh_inode = d_inode(wh_dentry);
-+ mutex_lock_nested(&wh_inode->i_mutex, AuLsc_I_CHILD);
-+
-+ /*
-+ * someone else might change some whiteouts while we were sleeping.
-+ * it means this whlist may have an obsoleted entry.
-+ */
-+ if (!au_test_h_perm_sio(wh_inode, MAY_EXEC | MAY_WRITE))
-+ err = del_wh_children(wh_dentry, whlist, bindex, br);
-+ else {
-+ int wkq_err;
-+ struct del_wh_children_args args = {
-+ .errp = &err,
-+ .h_dentry = wh_dentry,
-+ .whlist = whlist,
-+ .bindex = bindex,
-+ .br = br
-+ };
-+
-+ wkq_err = au_wkq_wait(call_del_wh_children, &args);
-+ if (unlikely(wkq_err))
-+ err = wkq_err;
-+ }
-+ mutex_unlock(&wh_inode->i_mutex);
-+
-+ if (!err) {
-+ h_tmp.dentry = wh_dentry;
-+ h_tmp.mnt = au_br_mnt(br);
-+ h_nlink = h_dir->i_nlink;
-+ err = vfsub_rmdir(h_dir, &h_tmp);
-+ /* some fs doesn't change the parent nlink in some cases */
-+ h_nlink -= h_dir->i_nlink;
-+ }
-+
-+ if (!err) {
-+ if (au_ibstart(dir) == bindex) {
-+ /* todo: dir->i_mutex is necessary */
-+ au_cpup_attr_timesizes(dir);
-+ if (h_nlink)
-+ vfsub_drop_nlink(dir);
-+ }
-+ return 0; /* success */
-+ }
-+
-+ pr_warn("failed removing %pd(%d), ignored\n", wh_dentry, err);
-+ return err;
-+}
-+
-+static void call_rmdir_whtmp(void *args)
-+{
-+ int err;
-+ aufs_bindex_t bindex;
-+ struct au_whtmp_rmdir *a = args;
-+ struct super_block *sb;
-+ struct dentry *h_parent;
-+ struct inode *h_dir;
-+ struct au_hinode *hdir;
-+
-+ /* rmdir by nfsd may cause deadlock with this i_mutex */
-+ /* mutex_lock(&a->dir->i_mutex); */
-+ err = -EROFS;
-+ sb = a->dir->i_sb;
-+ si_read_lock(sb, !AuLock_FLUSH);
-+ if (!au_br_writable(a->br->br_perm))
-+ goto out;
-+ bindex = au_br_index(sb, a->br->br_id);
-+ if (unlikely(bindex < 0))
-+ goto out;
-+
-+ err = -EIO;
-+ ii_write_lock_parent(a->dir);
-+ h_parent = dget_parent(a->wh_dentry);
-+ h_dir = d_inode(h_parent);
-+ hdir = au_hi(a->dir, bindex);
-+ err = vfsub_mnt_want_write(au_br_mnt(a->br));
-+ if (unlikely(err))
-+ goto out_mnt;
-+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
-+ err = au_h_verify(a->wh_dentry, au_opt_udba(sb), h_dir, h_parent,
-+ a->br);
-+ if (!err)
-+ err = au_whtmp_rmdir(a->dir, bindex, a->wh_dentry, &a->whlist);
-+ au_hn_imtx_unlock(hdir);
-+ vfsub_mnt_drop_write(au_br_mnt(a->br));
-+
-+out_mnt:
-+ dput(h_parent);
-+ ii_write_unlock(a->dir);
-+out:
-+ /* mutex_unlock(&a->dir->i_mutex); */
-+ au_whtmp_rmdir_free(a);
-+ si_read_unlock(sb);
-+ au_nwt_done(&au_sbi(sb)->si_nowait);
-+ if (unlikely(err))
-+ AuIOErr("err %d\n", err);
-+}
-+
-+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
-+ struct dentry *wh_dentry, struct au_whtmp_rmdir *args)
-+{
-+ int wkq_err;
-+ struct super_block *sb;
-+
-+ IMustLock(dir);
-+
-+ /* all post-process will be done in do_rmdir_whtmp(). */
-+ sb = dir->i_sb;
-+ args->dir = au_igrab(dir);
-+ args->br = au_sbr(sb, bindex);
-+ atomic_inc(&args->br->br_count);
-+ args->wh_dentry = dget(wh_dentry);
-+ wkq_err = au_wkq_nowait(call_rmdir_whtmp, args, sb, /*flags*/0);
-+ if (unlikely(wkq_err)) {
-+ pr_warn("rmdir error %pd (%d), ignored\n", wh_dentry, wkq_err);
-+ au_whtmp_rmdir_free(args);
-+ }
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/whout.h linux-4.1.10/fs/aufs/whout.h
---- linux-4.1.10.orig/fs/aufs/whout.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/whout.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,85 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * whiteout for logical deletion and opaque directory
-+ */
-+
-+#ifndef __AUFS_WHOUT_H__
-+#define __AUFS_WHOUT_H__
-+
-+#ifdef __KERNEL__
-+
-+#include "dir.h"
-+
-+/* whout.c */
-+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name);
-+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name, int try_sio);
-+int au_diropq_test(struct dentry *h_dentry);
-+struct au_branch;
-+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
-+ struct qstr *prefix);
-+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br);
-+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
-+ struct dentry *dentry);
-+int au_wh_init(struct au_branch *br, struct super_block *sb);
-+
-+/* diropq flags */
-+#define AuDiropq_CREATE 1
-+#define au_ftest_diropq(flags, name) ((flags) & AuDiropq_##name)
-+#define au_fset_diropq(flags, name) \
-+ do { (flags) |= AuDiropq_##name; } while (0)
-+#define au_fclr_diropq(flags, name) \
-+ do { (flags) &= ~AuDiropq_##name; } while (0)
-+
-+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
-+ unsigned int flags);
-+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
-+ struct au_branch *br);
-+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
-+ struct dentry *h_parent);
-+
-+/* real rmdir for the whiteout-ed dir */
-+struct au_whtmp_rmdir {
-+ struct inode *dir;
-+ struct au_branch *br;
-+ struct dentry *wh_dentry;
-+ struct au_nhash whlist;
-+};
-+
-+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp);
-+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp);
-+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
-+ struct dentry *wh_dentry, struct au_nhash *whlist);
-+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
-+ struct dentry *wh_dentry, struct au_whtmp_rmdir *args);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline struct dentry *au_diropq_create(struct dentry *dentry,
-+ aufs_bindex_t bindex)
-+{
-+ return au_diropq_sio(dentry, bindex, AuDiropq_CREATE);
-+}
-+
-+static inline int au_diropq_remove(struct dentry *dentry, aufs_bindex_t bindex)
-+{
-+ return PTR_ERR(au_diropq_sio(dentry, bindex, !AuDiropq_CREATE));
-+}
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_WHOUT_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/wkq.c linux-4.1.10/fs/aufs/wkq.c
---- linux-4.1.10.orig/fs/aufs/wkq.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/wkq.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,213 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * workqueue for asynchronous/super-io operations
-+ * todo: try new dredential scheme
-+ */
-+
-+#include <linux/module.h>
-+#include "aufs.h"
-+
-+/* internal workqueue named AUFS_WKQ_NAME */
-+
-+static struct workqueue_struct *au_wkq;
-+
-+struct au_wkinfo {
-+ struct work_struct wk;
-+ struct kobject *kobj;
-+
-+ unsigned int flags; /* see wkq.h */
-+
-+ au_wkq_func_t func;
-+ void *args;
-+
-+ struct completion *comp;
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static void wkq_func(struct work_struct *wk)
-+{
-+ struct au_wkinfo *wkinfo = container_of(wk, struct au_wkinfo, wk);
-+
-+ AuDebugOn(!uid_eq(current_fsuid(), GLOBAL_ROOT_UID));
-+ AuDebugOn(rlimit(RLIMIT_FSIZE) != RLIM_INFINITY);
-+
-+ wkinfo->func(wkinfo->args);
-+ if (au_ftest_wkq(wkinfo->flags, WAIT))
-+ complete(wkinfo->comp);
-+ else {
-+ kobject_put(wkinfo->kobj);
-+ module_put(THIS_MODULE); /* todo: ?? */
-+ kfree(wkinfo);
-+ }
-+}
-+
-+/*
-+ * Since struct completion is large, try allocating it dynamically.
-+ */
-+#if 1 /* defined(CONFIG_4KSTACKS) || defined(AuTest4KSTACKS) */
-+#define AuWkqCompDeclare(name) struct completion *comp = NULL
-+
-+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
-+{
-+ *comp = kmalloc(sizeof(**comp), GFP_NOFS);
-+ if (*comp) {
-+ init_completion(*comp);
-+ wkinfo->comp = *comp;
-+ return 0;
-+ }
-+ return -ENOMEM;
-+}
-+
-+static void au_wkq_comp_free(struct completion *comp)
-+{
-+ kfree(comp);
-+}
-+
-+#else
-+
-+/* no braces */
-+#define AuWkqCompDeclare(name) \
-+ DECLARE_COMPLETION_ONSTACK(_ ## name); \
-+ struct completion *comp = &_ ## name
-+
-+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
-+{
-+ wkinfo->comp = *comp;
-+ return 0;
-+}
-+
-+static void au_wkq_comp_free(struct completion *comp __maybe_unused)
-+{
-+ /* empty */
-+}
-+#endif /* 4KSTACKS */
-+
-+static void au_wkq_run(struct au_wkinfo *wkinfo)
-+{
-+ if (au_ftest_wkq(wkinfo->flags, NEST)) {
-+ if (au_wkq_test()) {
-+ AuWarn1("wkq from wkq, unless silly-rename on NFS,"
-+ " due to a dead dir by UDBA?\n");
-+ AuDebugOn(au_ftest_wkq(wkinfo->flags, WAIT));
-+ }
-+ } else
-+ au_dbg_verify_kthread();
-+
-+ if (au_ftest_wkq(wkinfo->flags, WAIT)) {
-+ INIT_WORK_ONSTACK(&wkinfo->wk, wkq_func);
-+ queue_work(au_wkq, &wkinfo->wk);
-+ } else {
-+ INIT_WORK(&wkinfo->wk, wkq_func);
-+ schedule_work(&wkinfo->wk);
-+ }
-+}
-+
-+/*
-+ * Be careful. It is easy to make deadlock happen.
-+ * processA: lock, wkq and wait
-+ * processB: wkq and wait, lock in wkq
-+ * --> deadlock
-+ */
-+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args)
-+{
-+ int err;
-+ AuWkqCompDeclare(comp);
-+ struct au_wkinfo wkinfo = {
-+ .flags = flags,
-+ .func = func,
-+ .args = args
-+ };
-+
-+ err = au_wkq_comp_alloc(&wkinfo, &comp);
-+ if (!err) {
-+ au_wkq_run(&wkinfo);
-+ /* no timeout, no interrupt */
-+ wait_for_completion(wkinfo.comp);
-+ au_wkq_comp_free(comp);
-+ destroy_work_on_stack(&wkinfo.wk);
-+ }
-+
-+ return err;
-+
-+}
-+
-+/*
-+ * Note: dget/dput() in func for aufs dentries are not supported. It will be a
-+ * problem in a concurrent umounting.
-+ */
-+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
-+ unsigned int flags)
-+{
-+ int err;
-+ struct au_wkinfo *wkinfo;
-+
-+ atomic_inc(&au_sbi(sb)->si_nowait.nw_len);
-+
-+ /*
-+ * wkq_func() must free this wkinfo.
-+ * it highly depends upon the implementation of workqueue.
-+ */
-+ err = 0;
-+ wkinfo = kmalloc(sizeof(*wkinfo), GFP_NOFS);
-+ if (wkinfo) {
-+ wkinfo->kobj = &au_sbi(sb)->si_kobj;
-+ wkinfo->flags = flags & ~AuWkq_WAIT;
-+ wkinfo->func = func;
-+ wkinfo->args = args;
-+ wkinfo->comp = NULL;
-+ kobject_get(wkinfo->kobj);
-+ __module_get(THIS_MODULE); /* todo: ?? */
-+
-+ au_wkq_run(wkinfo);
-+ } else {
-+ err = -ENOMEM;
-+ au_nwt_done(&au_sbi(sb)->si_nowait);
-+ }
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+void au_nwt_init(struct au_nowait_tasks *nwt)
-+{
-+ atomic_set(&nwt->nw_len, 0);
-+ /* smp_mb(); */ /* atomic_set */
-+ init_waitqueue_head(&nwt->nw_wq);
-+}
-+
-+void au_wkq_fin(void)
-+{
-+ destroy_workqueue(au_wkq);
-+}
-+
-+int __init au_wkq_init(void)
-+{
-+ int err;
-+
-+ err = 0;
-+ au_wkq = alloc_workqueue(AUFS_WKQ_NAME, 0, WQ_DFL_ACTIVE);
-+ if (IS_ERR(au_wkq))
-+ err = PTR_ERR(au_wkq);
-+ else if (!au_wkq)
-+ err = -ENOMEM;
-+
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/aufs/wkq.h linux-4.1.10/fs/aufs/wkq.h
---- linux-4.1.10.orig/fs/aufs/wkq.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/wkq.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,91 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * workqueue for asynchronous/super-io operations
-+ * todo: try new credentials management scheme
-+ */
-+
-+#ifndef __AUFS_WKQ_H__
-+#define __AUFS_WKQ_H__
-+
-+#ifdef __KERNEL__
-+
-+struct super_block;
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * in the next operation, wait for the 'nowait' tasks in system-wide workqueue
-+ */
-+struct au_nowait_tasks {
-+ atomic_t nw_len;
-+ wait_queue_head_t nw_wq;
-+};
-+
-+/* ---------------------------------------------------------------------- */
-+
-+typedef void (*au_wkq_func_t)(void *args);
-+
-+/* wkq flags */
-+#define AuWkq_WAIT 1
-+#define AuWkq_NEST (1 << 1)
-+#define au_ftest_wkq(flags, name) ((flags) & AuWkq_##name)
-+#define au_fset_wkq(flags, name) \
-+ do { (flags) |= AuWkq_##name; } while (0)
-+#define au_fclr_wkq(flags, name) \
-+ do { (flags) &= ~AuWkq_##name; } while (0)
-+
-+#ifndef CONFIG_AUFS_HNOTIFY
-+#undef AuWkq_NEST
-+#define AuWkq_NEST 0
-+#endif
-+
-+/* wkq.c */
-+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args);
-+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
-+ unsigned int flags);
-+void au_nwt_init(struct au_nowait_tasks *nwt);
-+int __init au_wkq_init(void);
-+void au_wkq_fin(void);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static inline int au_wkq_test(void)
-+{
-+ return current->flags & PF_WQ_WORKER;
-+}
-+
-+static inline int au_wkq_wait(au_wkq_func_t func, void *args)
-+{
-+ return au_wkq_do_wait(AuWkq_WAIT, func, args);
-+}
-+
-+static inline void au_nwt_done(struct au_nowait_tasks *nwt)
-+{
-+ if (atomic_dec_and_test(&nwt->nw_len))
-+ wake_up_all(&nwt->nw_wq);
-+}
-+
-+static inline int au_nwt_flush(struct au_nowait_tasks *nwt)
-+{
-+ wait_event(nwt->nw_wq, !atomic_read(&nwt->nw_len));
-+ return 0;
-+}
-+
-+#endif /* __KERNEL__ */
-+#endif /* __AUFS_WKQ_H__ */
-diff -Nur linux-4.1.10.orig/fs/aufs/xattr.c linux-4.1.10/fs/aufs/xattr.c
---- linux-4.1.10.orig/fs/aufs/xattr.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/xattr.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,344 @@
-+/*
-+ * Copyright (C) 2014-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * handling xattr functions
-+ */
-+
-+#include <linux/xattr.h>
-+#include "aufs.h"
-+
-+static int au_xattr_ignore(int err, char *name, unsigned int ignore_flags)
-+{
-+ if (!ignore_flags)
-+ goto out;
-+ switch (err) {
-+ case -ENOMEM:
-+ case -EDQUOT:
-+ goto out;
-+ }
-+
-+ if ((ignore_flags & AuBrAttr_ICEX) == AuBrAttr_ICEX) {
-+ err = 0;
-+ goto out;
-+ }
-+
-+#define cmp(brattr, prefix) do { \
-+ if (!strncmp(name, XATTR_##prefix##_PREFIX, \
-+ XATTR_##prefix##_PREFIX_LEN)) { \
-+ if (ignore_flags & AuBrAttr_ICEX_##brattr) \
-+ err = 0; \
-+ goto out; \
-+ } \
-+ } while (0)
-+
-+ cmp(SEC, SECURITY);
-+ cmp(SYS, SYSTEM);
-+ cmp(TR, TRUSTED);
-+ cmp(USR, USER);
-+#undef cmp
-+
-+ if (ignore_flags & AuBrAttr_ICEX_OTH)
-+ err = 0;
-+
-+out:
-+ return err;
-+}
-+
-+static const int au_xattr_out_of_list = AuBrAttr_ICEX_OTH << 1;
-+
-+static int au_do_cpup_xattr(struct dentry *h_dst, struct dentry *h_src,
-+ char *name, char **buf, unsigned int ignore_flags,
-+ unsigned int verbose)
-+{
-+ int err;
-+ ssize_t ssz;
-+ struct inode *h_idst;
-+
-+ ssz = vfs_getxattr_alloc(h_src, name, buf, 0, GFP_NOFS);
-+ err = ssz;
-+ if (unlikely(err <= 0)) {
-+ if (err == -ENODATA
-+ || (err == -EOPNOTSUPP
-+ && ((ignore_flags & au_xattr_out_of_list)
-+ || (au_test_nfs_noacl(d_inode(h_src))
-+ && (!strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS)
-+ || !strcmp(name,
-+ XATTR_NAME_POSIX_ACL_DEFAULT))))
-+ ))
-+ err = 0;
-+ if (err && (verbose || au_debug_test()))
-+ pr_err("%s, err %d\n", name, err);
-+ goto out;
-+ }
-+
-+ /* unlock it temporary */
-+ h_idst = d_inode(h_dst);
-+ mutex_unlock(&h_idst->i_mutex);
-+ err = vfsub_setxattr(h_dst, name, *buf, ssz, /*flags*/0);
-+ mutex_lock_nested(&h_idst->i_mutex, AuLsc_I_CHILD2);
-+ if (unlikely(err)) {
-+ if (verbose || au_debug_test())
-+ pr_err("%s, err %d\n", name, err);
-+ err = au_xattr_ignore(err, name, ignore_flags);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+int au_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, int ignore_flags,
-+ unsigned int verbose)
-+{
-+ int err, unlocked, acl_access, acl_default;
-+ ssize_t ssz;
-+ struct inode *h_isrc, *h_idst;
-+ char *value, *p, *o, *e;
-+
-+ /* try stopping to update the source inode while we are referencing */
-+ /* there should not be the parent-child relationship between them */
-+ h_isrc = d_inode(h_src);
-+ h_idst = d_inode(h_dst);
-+ mutex_unlock(&h_idst->i_mutex);
-+ mutex_lock_nested(&h_isrc->i_mutex, AuLsc_I_CHILD);
-+ mutex_lock_nested(&h_idst->i_mutex, AuLsc_I_CHILD2);
-+ unlocked = 0;
-+
-+ /* some filesystems don't list POSIX ACL, for example tmpfs */
-+ ssz = vfs_listxattr(h_src, NULL, 0);
-+ err = ssz;
-+ if (unlikely(err < 0)) {
-+ AuTraceErr(err);
-+ if (err == -ENODATA
-+ || err == -EOPNOTSUPP)
-+ err = 0; /* ignore */
-+ goto out;
-+ }
-+
-+ err = 0;
-+ p = NULL;
-+ o = NULL;
-+ if (ssz) {
-+ err = -ENOMEM;
-+ p = kmalloc(ssz, GFP_NOFS);
-+ o = p;
-+ if (unlikely(!p))
-+ goto out;
-+ err = vfs_listxattr(h_src, p, ssz);
-+ }
-+ mutex_unlock(&h_isrc->i_mutex);
-+ unlocked = 1;
-+ AuDbg("err %d, ssz %zd\n", err, ssz);
-+ if (unlikely(err < 0))
-+ goto out_free;
-+
-+ err = 0;
-+ e = p + ssz;
-+ value = NULL;
-+ acl_access = 0;
-+ acl_default = 0;
-+ while (!err && p < e) {
-+ acl_access |= !strncmp(p, XATTR_NAME_POSIX_ACL_ACCESS,
-+ sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1);
-+ acl_default |= !strncmp(p, XATTR_NAME_POSIX_ACL_DEFAULT,
-+ sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)
-+ - 1);
-+ err = au_do_cpup_xattr(h_dst, h_src, p, &value, ignore_flags,
-+ verbose);
-+ p += strlen(p) + 1;
-+ }
-+ AuTraceErr(err);
-+ ignore_flags |= au_xattr_out_of_list;
-+ if (!err && !acl_access) {
-+ err = au_do_cpup_xattr(h_dst, h_src,
-+ XATTR_NAME_POSIX_ACL_ACCESS, &value,
-+ ignore_flags, verbose);
-+ AuTraceErr(err);
-+ }
-+ if (!err && !acl_default) {
-+ err = au_do_cpup_xattr(h_dst, h_src,
-+ XATTR_NAME_POSIX_ACL_DEFAULT, &value,
-+ ignore_flags, verbose);
-+ AuTraceErr(err);
-+ }
-+
-+ kfree(value);
-+
-+out_free:
-+ kfree(o);
-+out:
-+ if (!unlocked)
-+ mutex_unlock(&h_isrc->i_mutex);
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+enum {
-+ AU_XATTR_LIST,
-+ AU_XATTR_GET
-+};
-+
-+struct au_lgxattr {
-+ int type;
-+ union {
-+ struct {
-+ char *list;
-+ size_t size;
-+ } list;
-+ struct {
-+ const char *name;
-+ void *value;
-+ size_t size;
-+ } get;
-+ } u;
-+};
-+
-+static ssize_t au_lgxattr(struct dentry *dentry, struct au_lgxattr *arg)
-+{
-+ ssize_t err;
-+ struct path h_path;
-+ struct super_block *sb;
-+
-+ sb = dentry->d_sb;
-+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
-+ if (unlikely(err))
-+ goto out;
-+ err = au_h_path_getattr(dentry, /*force*/1, &h_path);
-+ if (unlikely(err))
-+ goto out_si;
-+ if (unlikely(!h_path.dentry))
-+ /* illegally overlapped or something */
-+ goto out_di; /* pretending success */
-+
-+ /* always topmost entry only */
-+ switch (arg->type) {
-+ case AU_XATTR_LIST:
-+ err = vfs_listxattr(h_path.dentry,
-+ arg->u.list.list, arg->u.list.size);
-+ break;
-+ case AU_XATTR_GET:
-+ err = vfs_getxattr(h_path.dentry,
-+ arg->u.get.name, arg->u.get.value,
-+ arg->u.get.size);
-+ break;
-+ }
-+
-+out_di:
-+ di_read_unlock(dentry, AuLock_IR);
-+out_si:
-+ si_read_unlock(sb);
-+out:
-+ AuTraceErr(err);
-+ return err;
-+}
-+
-+ssize_t aufs_listxattr(struct dentry *dentry, char *list, size_t size)
-+{
-+ struct au_lgxattr arg = {
-+ .type = AU_XATTR_LIST,
-+ .u.list = {
-+ .list = list,
-+ .size = size
-+ },
-+ };
-+
-+ return au_lgxattr(dentry, &arg);
-+}
-+
-+ssize_t aufs_getxattr(struct dentry *dentry, const char *name, void *value,
-+ size_t size)
-+{
-+ struct au_lgxattr arg = {
-+ .type = AU_XATTR_GET,
-+ .u.get = {
-+ .name = name,
-+ .value = value,
-+ .size = size
-+ },
-+ };
-+
-+ return au_lgxattr(dentry, &arg);
-+}
-+
-+int aufs_setxattr(struct dentry *dentry, const char *name, const void *value,
-+ size_t size, int flags)
-+{
-+ struct au_srxattr arg = {
-+ .type = AU_XATTR_SET,
-+ .u.set = {
-+ .name = name,
-+ .value = value,
-+ .size = size,
-+ .flags = flags
-+ },
-+ };
-+
-+ return au_srxattr(dentry, &arg);
-+}
-+
-+int aufs_removexattr(struct dentry *dentry, const char *name)
-+{
-+ struct au_srxattr arg = {
-+ .type = AU_XATTR_REMOVE,
-+ .u.remove = {
-+ .name = name
-+ },
-+ };
-+
-+ return au_srxattr(dentry, &arg);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#if 0
-+static size_t au_xattr_list(struct dentry *dentry, char *list, size_t list_size,
-+ const char *name, size_t name_len, int type)
-+{
-+ return aufs_listxattr(dentry, list, list_size);
-+}
-+
-+static int au_xattr_get(struct dentry *dentry, const char *name, void *buffer,
-+ size_t size, int type)
-+{
-+ return aufs_getxattr(dentry, name, buffer, size);
-+}
-+
-+static int au_xattr_set(struct dentry *dentry, const char *name,
-+ const void *value, size_t size, int flags, int type)
-+{
-+ return aufs_setxattr(dentry, name, value, size, flags);
-+}
-+
-+static const struct xattr_handler au_xattr_handler = {
-+ /* no prefix, no flags */
-+ .list = au_xattr_list,
-+ .get = au_xattr_get,
-+ .set = au_xattr_set
-+ /* why no remove? */
-+};
-+
-+static const struct xattr_handler *au_xattr_handlers[] = {
-+ &au_xattr_handler
-+};
-+
-+void au_xattr_init(struct super_block *sb)
-+{
-+ /* sb->s_xattr = au_xattr_handlers; */
-+}
-+#endif
-diff -Nur linux-4.1.10.orig/fs/aufs/xino.c linux-4.1.10/fs/aufs/xino.c
---- linux-4.1.10.orig/fs/aufs/xino.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/fs/aufs/xino.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,1296 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+/*
-+ * external inode number translation table and bitmap
-+ */
-+
-+#include <linux/seq_file.h>
-+#include <linux/statfs.h>
-+#include "aufs.h"
-+
-+/* todo: unnecessary to support mmap_sem since kernel-space? */
-+ssize_t xino_fread(vfs_readf_t func, struct file *file, void *kbuf, size_t size,
-+ loff_t *pos)
-+{
-+ ssize_t err;
-+ mm_segment_t oldfs;
-+ union {
-+ void *k;
-+ char __user *u;
-+ } buf;
-+
-+ buf.k = kbuf;
-+ oldfs = get_fs();
-+ set_fs(KERNEL_DS);
-+ do {
-+ /* todo: signal_pending? */
-+ err = func(file, buf.u, size, pos);
-+ } while (err == -EAGAIN || err == -EINTR);
-+ set_fs(oldfs);
-+
-+#if 0 /* reserved for future use */
-+ if (err > 0)
-+ fsnotify_access(file->f_path.dentry);
-+#endif
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static ssize_t do_xino_fwrite(vfs_writef_t func, struct file *file, void *kbuf,
-+ size_t size, loff_t *pos)
-+{
-+ ssize_t err;
-+ mm_segment_t oldfs;
-+ union {
-+ void *k;
-+ const char __user *u;
-+ } buf;
-+
-+ buf.k = kbuf;
-+ oldfs = get_fs();
-+ set_fs(KERNEL_DS);
-+ do {
-+ /* todo: signal_pending? */
-+ err = func(file, buf.u, size, pos);
-+ } while (err == -EAGAIN || err == -EINTR);
-+ set_fs(oldfs);
-+
-+#if 0 /* reserved for future use */
-+ if (err > 0)
-+ fsnotify_modify(file->f_path.dentry);
-+#endif
-+
-+ return err;
-+}
-+
-+struct do_xino_fwrite_args {
-+ ssize_t *errp;
-+ vfs_writef_t func;
-+ struct file *file;
-+ void *buf;
-+ size_t size;
-+ loff_t *pos;
-+};
-+
-+static void call_do_xino_fwrite(void *args)
-+{
-+ struct do_xino_fwrite_args *a = args;
-+ *a->errp = do_xino_fwrite(a->func, a->file, a->buf, a->size, a->pos);
-+}
-+
-+ssize_t xino_fwrite(vfs_writef_t func, struct file *file, void *buf,
-+ size_t size, loff_t *pos)
-+{
-+ ssize_t err;
-+
-+ /* todo: signal block and no wkq? */
-+ if (rlimit(RLIMIT_FSIZE) == RLIM_INFINITY) {
-+ lockdep_off();
-+ err = do_xino_fwrite(func, file, buf, size, pos);
-+ lockdep_on();
-+ } else {
-+ /*
-+ * it breaks RLIMIT_FSIZE and normal user's limit,
-+ * users should care about quota and real 'filesystem full.'
-+ */
-+ int wkq_err;
-+ struct do_xino_fwrite_args args = {
-+ .errp = &err,
-+ .func = func,
-+ .file = file,
-+ .buf = buf,
-+ .size = size,
-+ .pos = pos
-+ };
-+
-+ wkq_err = au_wkq_wait(call_do_xino_fwrite, &args);
-+ if (unlikely(wkq_err))
-+ err = wkq_err;
-+ }
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * create a new xinofile at the same place/path as @base_file.
-+ */
-+struct file *au_xino_create2(struct file *base_file, struct file *copy_src)
-+{
-+ struct file *file;
-+ struct dentry *base, *parent;
-+ struct inode *dir, *delegated;
-+ struct qstr *name;
-+ struct path path;
-+ int err;
-+
-+ base = base_file->f_path.dentry;
-+ parent = base->d_parent; /* dir inode is locked */
-+ dir = d_inode(parent);
-+ IMustLock(dir);
-+
-+ file = ERR_PTR(-EINVAL);
-+ name = &base->d_name;
-+ path.dentry = vfsub_lookup_one_len(name->name, parent, name->len);
-+ if (IS_ERR(path.dentry)) {
-+ file = (void *)path.dentry;
-+ pr_err("%pd lookup err %ld\n",
-+ base, PTR_ERR(path.dentry));
-+ goto out;
-+ }
-+
-+ /* no need to mnt_want_write() since we call dentry_open() later */
-+ err = vfs_create(dir, path.dentry, S_IRUGO | S_IWUGO, NULL);
-+ if (unlikely(err)) {
-+ file = ERR_PTR(err);
-+ pr_err("%pd create err %d\n", base, err);
-+ goto out_dput;
-+ }
-+
-+ path.mnt = base_file->f_path.mnt;
-+ file = vfsub_dentry_open(&path,
-+ O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE
-+ /* | __FMODE_NONOTIFY */);
-+ if (IS_ERR(file)) {
-+ pr_err("%pd open err %ld\n", base, PTR_ERR(file));
-+ goto out_dput;
-+ }
-+
-+ delegated = NULL;
-+ err = vfsub_unlink(dir, &file->f_path, &delegated, /*force*/0);
-+ if (unlikely(err == -EWOULDBLOCK)) {
-+ pr_warn("cannot retry for NFSv4 delegation"
-+ " for an internal unlink\n");
-+ iput(delegated);
-+ }
-+ if (unlikely(err)) {
-+ pr_err("%pd unlink err %d\n", base, err);
-+ goto out_fput;
-+ }
-+
-+ if (copy_src) {
-+ /* no one can touch copy_src xino */
-+ err = au_copy_file(file, copy_src, vfsub_f_size_read(copy_src));
-+ if (unlikely(err)) {
-+ pr_err("%pd copy err %d\n", base, err);
-+ goto out_fput;
-+ }
-+ }
-+ goto out_dput; /* success */
-+
-+out_fput:
-+ fput(file);
-+ file = ERR_PTR(err);
-+out_dput:
-+ dput(path.dentry);
-+out:
-+ return file;
-+}
-+
-+struct au_xino_lock_dir {
-+ struct au_hinode *hdir;
-+ struct dentry *parent;
-+ struct mutex *mtx;
-+};
-+
-+static void au_xino_lock_dir(struct super_block *sb, struct file *xino,
-+ struct au_xino_lock_dir *ldir)
-+{
-+ aufs_bindex_t brid, bindex;
-+
-+ ldir->hdir = NULL;
-+ bindex = -1;
-+ brid = au_xino_brid(sb);
-+ if (brid >= 0)
-+ bindex = au_br_index(sb, brid);
-+ if (bindex >= 0) {
-+ ldir->hdir = au_hi(d_inode(sb->s_root), bindex);
-+ au_hn_imtx_lock_nested(ldir->hdir, AuLsc_I_PARENT);
-+ } else {
-+ ldir->parent = dget_parent(xino->f_path.dentry);
-+ ldir->mtx = &d_inode(ldir->parent)->i_mutex;
-+ mutex_lock_nested(ldir->mtx, AuLsc_I_PARENT);
-+ }
-+}
-+
-+static void au_xino_unlock_dir(struct au_xino_lock_dir *ldir)
-+{
-+ if (ldir->hdir)
-+ au_hn_imtx_unlock(ldir->hdir);
-+ else {
-+ mutex_unlock(ldir->mtx);
-+ dput(ldir->parent);
-+ }
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* trucate xino files asynchronously */
-+
-+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex)
-+{
-+ int err;
-+ unsigned long jiffy;
-+ blkcnt_t blocks;
-+ aufs_bindex_t bi, bend;
-+ struct kstatfs *st;
-+ struct au_branch *br;
-+ struct file *new_xino, *file;
-+ struct super_block *h_sb;
-+ struct au_xino_lock_dir ldir;
-+
-+ err = -ENOMEM;
-+ st = kzalloc(sizeof(*st), GFP_NOFS);
-+ if (unlikely(!st))
-+ goto out;
-+
-+ err = -EINVAL;
-+ bend = au_sbend(sb);
-+ if (unlikely(bindex < 0 || bend < bindex))
-+ goto out_st;
-+ br = au_sbr(sb, bindex);
-+ file = br->br_xino.xi_file;
-+ if (!file)
-+ goto out_st;
-+
-+ err = vfs_statfs(&file->f_path, st);
-+ if (unlikely(err))
-+ AuErr1("statfs err %d, ignored\n", err);
-+ jiffy = jiffies;
-+ blocks = file_inode(file)->i_blocks;
-+ pr_info("begin truncating xino(b%d), ib%llu, %llu/%llu free blks\n",
-+ bindex, (u64)blocks, st->f_bfree, st->f_blocks);
-+
-+ au_xino_lock_dir(sb, file, &ldir);
-+ /* mnt_want_write() is unnecessary here */
-+ new_xino = au_xino_create2(file, file);
-+ au_xino_unlock_dir(&ldir);
-+ err = PTR_ERR(new_xino);
-+ if (IS_ERR(new_xino)) {
-+ pr_err("err %d, ignored\n", err);
-+ goto out_st;
-+ }
-+ err = 0;
-+ fput(file);
-+ br->br_xino.xi_file = new_xino;
-+
-+ h_sb = au_br_sb(br);
-+ for (bi = 0; bi <= bend; bi++) {
-+ if (unlikely(bi == bindex))
-+ continue;
-+ br = au_sbr(sb, bi);
-+ if (au_br_sb(br) != h_sb)
-+ continue;
-+
-+ fput(br->br_xino.xi_file);
-+ br->br_xino.xi_file = new_xino;
-+ get_file(new_xino);
-+ }
-+
-+ err = vfs_statfs(&new_xino->f_path, st);
-+ if (!err) {
-+ pr_info("end truncating xino(b%d), ib%llu, %llu/%llu free blks\n",
-+ bindex, (u64)file_inode(new_xino)->i_blocks,
-+ st->f_bfree, st->f_blocks);
-+ if (file_inode(new_xino)->i_blocks < blocks)
-+ au_sbi(sb)->si_xino_jiffy = jiffy;
-+ } else
-+ AuErr1("statfs err %d, ignored\n", err);
-+
-+out_st:
-+ kfree(st);
-+out:
-+ return err;
-+}
-+
-+struct xino_do_trunc_args {
-+ struct super_block *sb;
-+ struct au_branch *br;
-+};
-+
-+static void xino_do_trunc(void *_args)
-+{
-+ struct xino_do_trunc_args *args = _args;
-+ struct super_block *sb;
-+ struct au_branch *br;
-+ struct inode *dir;
-+ int err;
-+ aufs_bindex_t bindex;
-+
-+ err = 0;
-+ sb = args->sb;
-+ dir = d_inode(sb->s_root);
-+ br = args->br;
-+
-+ si_noflush_write_lock(sb);
-+ ii_read_lock_parent(dir);
-+ bindex = au_br_index(sb, br->br_id);
-+ err = au_xino_trunc(sb, bindex);
-+ ii_read_unlock(dir);
-+ if (unlikely(err))
-+ pr_warn("err b%d, (%d)\n", bindex, err);
-+ atomic_dec(&br->br_xino_running);
-+ atomic_dec(&br->br_count);
-+ si_write_unlock(sb);
-+ au_nwt_done(&au_sbi(sb)->si_nowait);
-+ kfree(args);
-+}
-+
-+static int xino_trunc_test(struct super_block *sb, struct au_branch *br)
-+{
-+ int err;
-+ struct kstatfs st;
-+ struct au_sbinfo *sbinfo;
-+
-+ /* todo: si_xino_expire and the ratio should be customizable */
-+ sbinfo = au_sbi(sb);
-+ if (time_before(jiffies,
-+ sbinfo->si_xino_jiffy + sbinfo->si_xino_expire))
-+ return 0;
-+
-+ /* truncation border */
-+ err = vfs_statfs(&br->br_xino.xi_file->f_path, &st);
-+ if (unlikely(err)) {
-+ AuErr1("statfs err %d, ignored\n", err);
-+ return 0;
-+ }
-+ if (div64_u64(st.f_bfree * 100, st.f_blocks) >= AUFS_XINO_DEF_TRUNC)
-+ return 0;
-+
-+ return 1;
-+}
-+
-+static void xino_try_trunc(struct super_block *sb, struct au_branch *br)
-+{
-+ struct xino_do_trunc_args *args;
-+ int wkq_err;
-+
-+ if (!xino_trunc_test(sb, br))
-+ return;
-+
-+ if (atomic_inc_return(&br->br_xino_running) > 1)
-+ goto out;
-+
-+ /* lock and kfree() will be called in trunc_xino() */
-+ args = kmalloc(sizeof(*args), GFP_NOFS);
-+ if (unlikely(!args)) {
-+ AuErr1("no memory\n");
-+ goto out_args;
-+ }
-+
-+ atomic_inc(&br->br_count);
-+ args->sb = sb;
-+ args->br = br;
-+ wkq_err = au_wkq_nowait(xino_do_trunc, args, sb, /*flags*/0);
-+ if (!wkq_err)
-+ return; /* success */
-+
-+ pr_err("wkq %d\n", wkq_err);
-+ atomic_dec(&br->br_count);
-+
-+out_args:
-+ kfree(args);
-+out:
-+ atomic_dec(&br->br_xino_running);
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static int au_xino_do_write(vfs_writef_t write, struct file *file,
-+ ino_t h_ino, ino_t ino)
-+{
-+ loff_t pos;
-+ ssize_t sz;
-+
-+ pos = h_ino;
-+ if (unlikely(au_loff_max / sizeof(ino) - 1 < pos)) {
-+ AuIOErr1("too large hi%lu\n", (unsigned long)h_ino);
-+ return -EFBIG;
-+ }
-+ pos *= sizeof(ino);
-+ sz = xino_fwrite(write, file, &ino, sizeof(ino), &pos);
-+ if (sz == sizeof(ino))
-+ return 0; /* success */
-+
-+ AuIOErr("write failed (%zd)\n", sz);
-+ return -EIO;
-+}
-+
-+/*
-+ * write @ino to the xinofile for the specified branch{@sb, @bindex}
-+ * at the position of @h_ino.
-+ * even if @ino is zero, it is written to the xinofile and means no entry.
-+ * if the size of the xino file on a specific filesystem exceeds the watermark,
-+ * try truncating it.
-+ */
-+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
-+ ino_t ino)
-+{
-+ int err;
-+ unsigned int mnt_flags;
-+ struct au_branch *br;
-+
-+ BUILD_BUG_ON(sizeof(long long) != sizeof(au_loff_max)
-+ || ((loff_t)-1) > 0);
-+ SiMustAnyLock(sb);
-+
-+ mnt_flags = au_mntflags(sb);
-+ if (!au_opt_test(mnt_flags, XINO))
-+ return 0;
-+
-+ br = au_sbr(sb, bindex);
-+ err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file,
-+ h_ino, ino);
-+ if (!err) {
-+ if (au_opt_test(mnt_flags, TRUNC_XINO)
-+ && au_test_fs_trunc_xino(au_br_sb(br)))
-+ xino_try_trunc(sb, br);
-+ return 0; /* success */
-+ }
-+
-+ AuIOErr("write failed (%d)\n", err);
-+ return -EIO;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* aufs inode number bitmap */
-+
-+static const int page_bits = (int)PAGE_SIZE * BITS_PER_BYTE;
-+static ino_t xib_calc_ino(unsigned long pindex, int bit)
-+{
-+ ino_t ino;
-+
-+ AuDebugOn(bit < 0 || page_bits <= bit);
-+ ino = AUFS_FIRST_INO + pindex * page_bits + bit;
-+ return ino;
-+}
-+
-+static void xib_calc_bit(ino_t ino, unsigned long *pindex, int *bit)
-+{
-+ AuDebugOn(ino < AUFS_FIRST_INO);
-+ ino -= AUFS_FIRST_INO;
-+ *pindex = ino / page_bits;
-+ *bit = ino % page_bits;
-+}
-+
-+static int xib_pindex(struct super_block *sb, unsigned long pindex)
-+{
-+ int err;
-+ loff_t pos;
-+ ssize_t sz;
-+ struct au_sbinfo *sbinfo;
-+ struct file *xib;
-+ unsigned long *p;
-+
-+ sbinfo = au_sbi(sb);
-+ MtxMustLock(&sbinfo->si_xib_mtx);
-+ AuDebugOn(pindex > ULONG_MAX / PAGE_SIZE
-+ || !au_opt_test(sbinfo->si_mntflags, XINO));
-+
-+ if (pindex == sbinfo->si_xib_last_pindex)
-+ return 0;
-+
-+ xib = sbinfo->si_xib;
-+ p = sbinfo->si_xib_buf;
-+ pos = sbinfo->si_xib_last_pindex;
-+ pos *= PAGE_SIZE;
-+ sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
-+ if (unlikely(sz != PAGE_SIZE))
-+ goto out;
-+
-+ pos = pindex;
-+ pos *= PAGE_SIZE;
-+ if (vfsub_f_size_read(xib) >= pos + PAGE_SIZE)
-+ sz = xino_fread(sbinfo->si_xread, xib, p, PAGE_SIZE, &pos);
-+ else {
-+ memset(p, 0, PAGE_SIZE);
-+ sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
-+ }
-+ if (sz == PAGE_SIZE) {
-+ sbinfo->si_xib_last_pindex = pindex;
-+ return 0; /* success */
-+ }
-+
-+out:
-+ AuIOErr1("write failed (%zd)\n", sz);
-+ err = sz;
-+ if (sz >= 0)
-+ err = -EIO;
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+static void au_xib_clear_bit(struct inode *inode)
-+{
-+ int err, bit;
-+ unsigned long pindex;
-+ struct super_block *sb;
-+ struct au_sbinfo *sbinfo;
-+
-+ AuDebugOn(inode->i_nlink);
-+
-+ sb = inode->i_sb;
-+ xib_calc_bit(inode->i_ino, &pindex, &bit);
-+ AuDebugOn(page_bits <= bit);
-+ sbinfo = au_sbi(sb);
-+ mutex_lock(&sbinfo->si_xib_mtx);
-+ err = xib_pindex(sb, pindex);
-+ if (!err) {
-+ clear_bit(bit, sbinfo->si_xib_buf);
-+ sbinfo->si_xib_next_bit = bit;
-+ }
-+ mutex_unlock(&sbinfo->si_xib_mtx);
-+}
-+
-+/* for s_op->delete_inode() */
-+void au_xino_delete_inode(struct inode *inode, const int unlinked)
-+{
-+ int err;
-+ unsigned int mnt_flags;
-+ aufs_bindex_t bindex, bend, bi;
-+ unsigned char try_trunc;
-+ struct au_iinfo *iinfo;
-+ struct super_block *sb;
-+ struct au_hinode *hi;
-+ struct inode *h_inode;
-+ struct au_branch *br;
-+ vfs_writef_t xwrite;
-+
-+ sb = inode->i_sb;
-+ mnt_flags = au_mntflags(sb);
-+ if (!au_opt_test(mnt_flags, XINO)
-+ || inode->i_ino == AUFS_ROOT_INO)
-+ return;
-+
-+ if (unlinked) {
-+ au_xigen_inc(inode);
-+ au_xib_clear_bit(inode);
-+ }
-+
-+ iinfo = au_ii(inode);
-+ if (!iinfo)
-+ return;
-+
-+ bindex = iinfo->ii_bstart;
-+ if (bindex < 0)
-+ return;
-+
-+ xwrite = au_sbi(sb)->si_xwrite;
-+ try_trunc = !!au_opt_test(mnt_flags, TRUNC_XINO);
-+ hi = iinfo->ii_hinode + bindex;
-+ bend = iinfo->ii_bend;
-+ for (; bindex <= bend; bindex++, hi++) {
-+ h_inode = hi->hi_inode;
-+ if (!h_inode
-+ || (!unlinked && h_inode->i_nlink))
-+ continue;
-+
-+ /* inode may not be revalidated */
-+ bi = au_br_index(sb, hi->hi_id);
-+ if (bi < 0)
-+ continue;
-+
-+ br = au_sbr(sb, bi);
-+ err = au_xino_do_write(xwrite, br->br_xino.xi_file,
-+ h_inode->i_ino, /*ino*/0);
-+ if (!err && try_trunc
-+ && au_test_fs_trunc_xino(au_br_sb(br)))
-+ xino_try_trunc(sb, br);
-+ }
-+}
-+
-+/* get an unused inode number from bitmap */
-+ino_t au_xino_new_ino(struct super_block *sb)
-+{
-+ ino_t ino;
-+ unsigned long *p, pindex, ul, pend;
-+ struct au_sbinfo *sbinfo;
-+ struct file *file;
-+ int free_bit, err;
-+
-+ if (!au_opt_test(au_mntflags(sb), XINO))
-+ return iunique(sb, AUFS_FIRST_INO);
-+
-+ sbinfo = au_sbi(sb);
-+ mutex_lock(&sbinfo->si_xib_mtx);
-+ p = sbinfo->si_xib_buf;
-+ free_bit = sbinfo->si_xib_next_bit;
-+ if (free_bit < page_bits && !test_bit(free_bit, p))
-+ goto out; /* success */
-+ free_bit = find_first_zero_bit(p, page_bits);
-+ if (free_bit < page_bits)
-+ goto out; /* success */
-+
-+ pindex = sbinfo->si_xib_last_pindex;
-+ for (ul = pindex - 1; ul < ULONG_MAX; ul--) {
-+ err = xib_pindex(sb, ul);
-+ if (unlikely(err))
-+ goto out_err;
-+ free_bit = find_first_zero_bit(p, page_bits);
-+ if (free_bit < page_bits)
-+ goto out; /* success */
-+ }
-+
-+ file = sbinfo->si_xib;
-+ pend = vfsub_f_size_read(file) / PAGE_SIZE;
-+ for (ul = pindex + 1; ul <= pend; ul++) {
-+ err = xib_pindex(sb, ul);
-+ if (unlikely(err))
-+ goto out_err;
-+ free_bit = find_first_zero_bit(p, page_bits);
-+ if (free_bit < page_bits)
-+ goto out; /* success */
-+ }
-+ BUG();
-+
-+out:
-+ set_bit(free_bit, p);
-+ sbinfo->si_xib_next_bit = free_bit + 1;
-+ pindex = sbinfo->si_xib_last_pindex;
-+ mutex_unlock(&sbinfo->si_xib_mtx);
-+ ino = xib_calc_ino(pindex, free_bit);
-+ AuDbg("i%lu\n", (unsigned long)ino);
-+ return ino;
-+out_err:
-+ mutex_unlock(&sbinfo->si_xib_mtx);
-+ AuDbg("i0\n");
-+ return 0;
-+}
-+
-+/*
-+ * read @ino from xinofile for the specified branch{@sb, @bindex}
-+ * at the position of @h_ino.
-+ * if @ino does not exist and @do_new is true, get new one.
-+ */
-+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
-+ ino_t *ino)
-+{
-+ int err;
-+ ssize_t sz;
-+ loff_t pos;
-+ struct file *file;
-+ struct au_sbinfo *sbinfo;
-+
-+ *ino = 0;
-+ if (!au_opt_test(au_mntflags(sb), XINO))
-+ return 0; /* no xino */
-+
-+ err = 0;
-+ sbinfo = au_sbi(sb);
-+ pos = h_ino;
-+ if (unlikely(au_loff_max / sizeof(*ino) - 1 < pos)) {
-+ AuIOErr1("too large hi%lu\n", (unsigned long)h_ino);
-+ return -EFBIG;
-+ }
-+ pos *= sizeof(*ino);
-+
-+ file = au_sbr(sb, bindex)->br_xino.xi_file;
-+ if (vfsub_f_size_read(file) < pos + sizeof(*ino))
-+ return 0; /* no ino */
-+
-+ sz = xino_fread(sbinfo->si_xread, file, ino, sizeof(*ino), &pos);
-+ if (sz == sizeof(*ino))
-+ return 0; /* success */
-+
-+ err = sz;
-+ if (unlikely(sz >= 0)) {
-+ err = -EIO;
-+ AuIOErr("xino read error (%zd)\n", sz);
-+ }
-+
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* create and set a new xino file */
-+
-+struct file *au_xino_create(struct super_block *sb, char *fname, int silent)
-+{
-+ struct file *file;
-+ struct dentry *h_parent, *d;
-+ struct inode *h_dir, *inode;
-+ int err;
-+
-+ /*
-+ * at mount-time, and the xino file is the default path,
-+ * hnotify is disabled so we have no notify events to ignore.
-+ * when a user specified the xino, we cannot get au_hdir to be ignored.
-+ */
-+ file = vfsub_filp_open(fname, O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE
-+ /* | __FMODE_NONOTIFY */,
-+ S_IRUGO | S_IWUGO);
-+ if (IS_ERR(file)) {
-+ if (!silent)
-+ pr_err("open %s(%ld)\n", fname, PTR_ERR(file));
-+ return file;
-+ }
-+
-+ /* keep file count */
-+ err = 0;
-+ inode = file_inode(file);
-+ h_parent = dget_parent(file->f_path.dentry);
-+ h_dir = d_inode(h_parent);
-+ mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
-+ /* mnt_want_write() is unnecessary here */
-+ /* no delegation since it is just created */
-+ if (inode->i_nlink)
-+ err = vfsub_unlink(h_dir, &file->f_path, /*delegated*/NULL,
-+ /*force*/0);
-+ mutex_unlock(&h_dir->i_mutex);
-+ dput(h_parent);
-+ if (unlikely(err)) {
-+ if (!silent)
-+ pr_err("unlink %s(%d)\n", fname, err);
-+ goto out;
-+ }
-+
-+ err = -EINVAL;
-+ d = file->f_path.dentry;
-+ if (unlikely(sb == d->d_sb)) {
-+ if (!silent)
-+ pr_err("%s must be outside\n", fname);
-+ goto out;
-+ }
-+ if (unlikely(au_test_fs_bad_xino(d->d_sb))) {
-+ if (!silent)
-+ pr_err("xino doesn't support %s(%s)\n",
-+ fname, au_sbtype(d->d_sb));
-+ goto out;
-+ }
-+ return file; /* success */
-+
-+out:
-+ fput(file);
-+ file = ERR_PTR(err);
-+ return file;
-+}
-+
-+/*
-+ * find another branch who is on the same filesystem of the specified
-+ * branch{@btgt}. search until @bend.
-+ */
-+static int is_sb_shared(struct super_block *sb, aufs_bindex_t btgt,
-+ aufs_bindex_t bend)
-+{
-+ aufs_bindex_t bindex;
-+ struct super_block *tgt_sb = au_sbr_sb(sb, btgt);
-+
-+ for (bindex = 0; bindex < btgt; bindex++)
-+ if (unlikely(tgt_sb == au_sbr_sb(sb, bindex)))
-+ return bindex;
-+ for (bindex++; bindex <= bend; bindex++)
-+ if (unlikely(tgt_sb == au_sbr_sb(sb, bindex)))
-+ return bindex;
-+ return -1;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * initialize the xinofile for the specified branch @br
-+ * at the place/path where @base_file indicates.
-+ * test whether another branch is on the same filesystem or not,
-+ * if @do_test is true.
-+ */
-+int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t h_ino,
-+ struct file *base_file, int do_test)
-+{
-+ int err;
-+ ino_t ino;
-+ aufs_bindex_t bend, bindex;
-+ struct au_branch *shared_br, *b;
-+ struct file *file;
-+ struct super_block *tgt_sb;
-+
-+ shared_br = NULL;
-+ bend = au_sbend(sb);
-+ if (do_test) {
-+ tgt_sb = au_br_sb(br);
-+ for (bindex = 0; bindex <= bend; bindex++) {
-+ b = au_sbr(sb, bindex);
-+ if (tgt_sb == au_br_sb(b)) {
-+ shared_br = b;
-+ break;
-+ }
-+ }
-+ }
-+
-+ if (!shared_br || !shared_br->br_xino.xi_file) {
-+ struct au_xino_lock_dir ldir;
-+
-+ au_xino_lock_dir(sb, base_file, &ldir);
-+ /* mnt_want_write() is unnecessary here */
-+ file = au_xino_create2(base_file, NULL);
-+ au_xino_unlock_dir(&ldir);
-+ err = PTR_ERR(file);
-+ if (IS_ERR(file))
-+ goto out;
-+ br->br_xino.xi_file = file;
-+ } else {
-+ br->br_xino.xi_file = shared_br->br_xino.xi_file;
-+ get_file(br->br_xino.xi_file);
-+ }
-+
-+ ino = AUFS_ROOT_INO;
-+ err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file,
-+ h_ino, ino);
-+ if (unlikely(err)) {
-+ fput(br->br_xino.xi_file);
-+ br->br_xino.xi_file = NULL;
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* trucate a xino bitmap file */
-+
-+/* todo: slow */
-+static int do_xib_restore(struct super_block *sb, struct file *file, void *page)
-+{
-+ int err, bit;
-+ ssize_t sz;
-+ unsigned long pindex;
-+ loff_t pos, pend;
-+ struct au_sbinfo *sbinfo;
-+ vfs_readf_t func;
-+ ino_t *ino;
-+ unsigned long *p;
-+
-+ err = 0;
-+ sbinfo = au_sbi(sb);
-+ MtxMustLock(&sbinfo->si_xib_mtx);
-+ p = sbinfo->si_xib_buf;
-+ func = sbinfo->si_xread;
-+ pend = vfsub_f_size_read(file);
-+ pos = 0;
-+ while (pos < pend) {
-+ sz = xino_fread(func, file, page, PAGE_SIZE, &pos);
-+ err = sz;
-+ if (unlikely(sz <= 0))
-+ goto out;
-+
-+ err = 0;
-+ for (ino = page; sz > 0; ino++, sz -= sizeof(ino)) {
-+ if (unlikely(*ino < AUFS_FIRST_INO))
-+ continue;
-+
-+ xib_calc_bit(*ino, &pindex, &bit);
-+ AuDebugOn(page_bits <= bit);
-+ err = xib_pindex(sb, pindex);
-+ if (!err)
-+ set_bit(bit, p);
-+ else
-+ goto out;
-+ }
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+static int xib_restore(struct super_block *sb)
-+{
-+ int err;
-+ aufs_bindex_t bindex, bend;
-+ void *page;
-+
-+ err = -ENOMEM;
-+ page = (void *)__get_free_page(GFP_NOFS);
-+ if (unlikely(!page))
-+ goto out;
-+
-+ err = 0;
-+ bend = au_sbend(sb);
-+ for (bindex = 0; !err && bindex <= bend; bindex++)
-+ if (!bindex || is_sb_shared(sb, bindex, bindex - 1) < 0)
-+ err = do_xib_restore
-+ (sb, au_sbr(sb, bindex)->br_xino.xi_file, page);
-+ else
-+ AuDbg("b%d\n", bindex);
-+ free_page((unsigned long)page);
-+
-+out:
-+ return err;
-+}
-+
-+int au_xib_trunc(struct super_block *sb)
-+{
-+ int err;
-+ ssize_t sz;
-+ loff_t pos;
-+ struct au_xino_lock_dir ldir;
-+ struct au_sbinfo *sbinfo;
-+ unsigned long *p;
-+ struct file *file;
-+
-+ SiMustWriteLock(sb);
-+
-+ err = 0;
-+ sbinfo = au_sbi(sb);
-+ if (!au_opt_test(sbinfo->si_mntflags, XINO))
-+ goto out;
-+
-+ file = sbinfo->si_xib;
-+ if (vfsub_f_size_read(file) <= PAGE_SIZE)
-+ goto out;
-+
-+ au_xino_lock_dir(sb, file, &ldir);
-+ /* mnt_want_write() is unnecessary here */
-+ file = au_xino_create2(sbinfo->si_xib, NULL);
-+ au_xino_unlock_dir(&ldir);
-+ err = PTR_ERR(file);
-+ if (IS_ERR(file))
-+ goto out;
-+ fput(sbinfo->si_xib);
-+ sbinfo->si_xib = file;
-+
-+ p = sbinfo->si_xib_buf;
-+ memset(p, 0, PAGE_SIZE);
-+ pos = 0;
-+ sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xib, p, PAGE_SIZE, &pos);
-+ if (unlikely(sz != PAGE_SIZE)) {
-+ err = sz;
-+ AuIOErr("err %d\n", err);
-+ if (sz >= 0)
-+ err = -EIO;
-+ goto out;
-+ }
-+
-+ mutex_lock(&sbinfo->si_xib_mtx);
-+ /* mnt_want_write() is unnecessary here */
-+ err = xib_restore(sb);
-+ mutex_unlock(&sbinfo->si_xib_mtx);
-+
-+out:
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * xino mount option handlers
-+ */
-+
-+/* xino bitmap */
-+static void xino_clear_xib(struct super_block *sb)
-+{
-+ struct au_sbinfo *sbinfo;
-+
-+ SiMustWriteLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ sbinfo->si_xread = NULL;
-+ sbinfo->si_xwrite = NULL;
-+ if (sbinfo->si_xib)
-+ fput(sbinfo->si_xib);
-+ sbinfo->si_xib = NULL;
-+ free_page((unsigned long)sbinfo->si_xib_buf);
-+ sbinfo->si_xib_buf = NULL;
-+}
-+
-+static int au_xino_set_xib(struct super_block *sb, struct file *base)
-+{
-+ int err;
-+ loff_t pos;
-+ struct au_sbinfo *sbinfo;
-+ struct file *file;
-+
-+ SiMustWriteLock(sb);
-+
-+ sbinfo = au_sbi(sb);
-+ file = au_xino_create2(base, sbinfo->si_xib);
-+ err = PTR_ERR(file);
-+ if (IS_ERR(file))
-+ goto out;
-+ if (sbinfo->si_xib)
-+ fput(sbinfo->si_xib);
-+ sbinfo->si_xib = file;
-+ sbinfo->si_xread = vfs_readf(file);
-+ sbinfo->si_xwrite = vfs_writef(file);
-+
-+ err = -ENOMEM;
-+ if (!sbinfo->si_xib_buf)
-+ sbinfo->si_xib_buf = (void *)get_zeroed_page(GFP_NOFS);
-+ if (unlikely(!sbinfo->si_xib_buf))
-+ goto out_unset;
-+
-+ sbinfo->si_xib_last_pindex = 0;
-+ sbinfo->si_xib_next_bit = 0;
-+ if (vfsub_f_size_read(file) < PAGE_SIZE) {
-+ pos = 0;
-+ err = xino_fwrite(sbinfo->si_xwrite, file, sbinfo->si_xib_buf,
-+ PAGE_SIZE, &pos);
-+ if (unlikely(err != PAGE_SIZE))
-+ goto out_free;
-+ }
-+ err = 0;
-+ goto out; /* success */
-+
-+out_free:
-+ free_page((unsigned long)sbinfo->si_xib_buf);
-+ sbinfo->si_xib_buf = NULL;
-+ if (err >= 0)
-+ err = -EIO;
-+out_unset:
-+ fput(sbinfo->si_xib);
-+ sbinfo->si_xib = NULL;
-+ sbinfo->si_xread = NULL;
-+ sbinfo->si_xwrite = NULL;
-+out:
-+ return err;
-+}
-+
-+/* xino for each branch */
-+static void xino_clear_br(struct super_block *sb)
-+{
-+ aufs_bindex_t bindex, bend;
-+ struct au_branch *br;
-+
-+ bend = au_sbend(sb);
-+ for (bindex = 0; bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ if (!br || !br->br_xino.xi_file)
-+ continue;
-+
-+ fput(br->br_xino.xi_file);
-+ br->br_xino.xi_file = NULL;
-+ }
-+}
-+
-+static int au_xino_set_br(struct super_block *sb, struct file *base)
-+{
-+ int err;
-+ ino_t ino;
-+ aufs_bindex_t bindex, bend, bshared;
-+ struct {
-+ struct file *old, *new;
-+ } *fpair, *p;
-+ struct au_branch *br;
-+ struct inode *inode;
-+ vfs_writef_t writef;
-+
-+ SiMustWriteLock(sb);
-+
-+ err = -ENOMEM;
-+ bend = au_sbend(sb);
-+ fpair = kcalloc(bend + 1, sizeof(*fpair), GFP_NOFS);
-+ if (unlikely(!fpair))
-+ goto out;
-+
-+ inode = d_inode(sb->s_root);
-+ ino = AUFS_ROOT_INO;
-+ writef = au_sbi(sb)->si_xwrite;
-+ for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) {
-+ br = au_sbr(sb, bindex);
-+ bshared = is_sb_shared(sb, bindex, bindex - 1);
-+ if (bshared >= 0) {
-+ /* shared xino */
-+ *p = fpair[bshared];
-+ get_file(p->new);
-+ }
-+
-+ if (!p->new) {
-+ /* new xino */
-+ p->old = br->br_xino.xi_file;
-+ p->new = au_xino_create2(base, br->br_xino.xi_file);
-+ err = PTR_ERR(p->new);
-+ if (IS_ERR(p->new)) {
-+ p->new = NULL;
-+ goto out_pair;
-+ }
-+ }
-+
-+ err = au_xino_do_write(writef, p->new,
-+ au_h_iptr(inode, bindex)->i_ino, ino);
-+ if (unlikely(err))
-+ goto out_pair;
-+ }
-+
-+ for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) {
-+ br = au_sbr(sb, bindex);
-+ if (br->br_xino.xi_file)
-+ fput(br->br_xino.xi_file);
-+ get_file(p->new);
-+ br->br_xino.xi_file = p->new;
-+ }
-+
-+out_pair:
-+ for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++)
-+ if (p->new)
-+ fput(p->new);
-+ else
-+ break;
-+ kfree(fpair);
-+out:
-+ return err;
-+}
-+
-+void au_xino_clr(struct super_block *sb)
-+{
-+ struct au_sbinfo *sbinfo;
-+
-+ au_xigen_clr(sb);
-+ xino_clear_xib(sb);
-+ xino_clear_br(sb);
-+ sbinfo = au_sbi(sb);
-+ /* lvalue, do not call au_mntflags() */
-+ au_opt_clr(sbinfo->si_mntflags, XINO);
-+}
-+
-+int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount)
-+{
-+ int err, skip;
-+ struct dentry *parent, *cur_parent;
-+ struct qstr *dname, *cur_name;
-+ struct file *cur_xino;
-+ struct inode *dir;
-+ struct au_sbinfo *sbinfo;
-+
-+ SiMustWriteLock(sb);
-+
-+ err = 0;
-+ sbinfo = au_sbi(sb);
-+ parent = dget_parent(xino->file->f_path.dentry);
-+ if (remount) {
-+ skip = 0;
-+ dname = &xino->file->f_path.dentry->d_name;
-+ cur_xino = sbinfo->si_xib;
-+ if (cur_xino) {
-+ cur_parent = dget_parent(cur_xino->f_path.dentry);
-+ cur_name = &cur_xino->f_path.dentry->d_name;
-+ skip = (cur_parent == parent
-+ && au_qstreq(dname, cur_name));
-+ dput(cur_parent);
-+ }
-+ if (skip)
-+ goto out;
-+ }
-+
-+ au_opt_set(sbinfo->si_mntflags, XINO);
-+ dir = d_inode(parent);
-+ mutex_lock_nested(&dir->i_mutex, AuLsc_I_PARENT);
-+ /* mnt_want_write() is unnecessary here */
-+ err = au_xino_set_xib(sb, xino->file);
-+ if (!err)
-+ err = au_xigen_set(sb, xino->file);
-+ if (!err)
-+ err = au_xino_set_br(sb, xino->file);
-+ mutex_unlock(&dir->i_mutex);
-+ if (!err)
-+ goto out; /* success */
-+
-+ /* reset all */
-+ AuIOErr("failed creating xino(%d).\n", err);
-+ au_xigen_clr(sb);
-+ xino_clear_xib(sb);
-+
-+out:
-+ dput(parent);
-+ return err;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/*
-+ * create a xinofile at the default place/path.
-+ */
-+struct file *au_xino_def(struct super_block *sb)
-+{
-+ struct file *file;
-+ char *page, *p;
-+ struct au_branch *br;
-+ struct super_block *h_sb;
-+ struct path path;
-+ aufs_bindex_t bend, bindex, bwr;
-+
-+ br = NULL;
-+ bend = au_sbend(sb);
-+ bwr = -1;
-+ for (bindex = 0; bindex <= bend; bindex++) {
-+ br = au_sbr(sb, bindex);
-+ if (au_br_writable(br->br_perm)
-+ && !au_test_fs_bad_xino(au_br_sb(br))) {
-+ bwr = bindex;
-+ break;
-+ }
-+ }
-+
-+ if (bwr >= 0) {
-+ file = ERR_PTR(-ENOMEM);
-+ page = (void *)__get_free_page(GFP_NOFS);
-+ if (unlikely(!page))
-+ goto out;
-+ path.mnt = au_br_mnt(br);
-+ path.dentry = au_h_dptr(sb->s_root, bwr);
-+ p = d_path(&path, page, PATH_MAX - sizeof(AUFS_XINO_FNAME));
-+ file = (void *)p;
-+ if (!IS_ERR(p)) {
-+ strcat(p, "/" AUFS_XINO_FNAME);
-+ AuDbg("%s\n", p);
-+ file = au_xino_create(sb, p, /*silent*/0);
-+ if (!IS_ERR(file))
-+ au_xino_brid_set(sb, br->br_id);
-+ }
-+ free_page((unsigned long)page);
-+ } else {
-+ file = au_xino_create(sb, AUFS_XINO_DEFPATH, /*silent*/0);
-+ if (IS_ERR(file))
-+ goto out;
-+ h_sb = file->f_path.dentry->d_sb;
-+ if (unlikely(au_test_fs_bad_xino(h_sb))) {
-+ pr_err("xino doesn't support %s(%s)\n",
-+ AUFS_XINO_DEFPATH, au_sbtype(h_sb));
-+ fput(file);
-+ file = ERR_PTR(-EINVAL);
-+ }
-+ if (!IS_ERR(file))
-+ au_xino_brid_set(sb, -1);
-+ }
-+
-+out:
-+ return file;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+int au_xino_path(struct seq_file *seq, struct file *file)
-+{
-+ int err;
-+
-+ err = au_seq_path(seq, &file->f_path);
-+ if (unlikely(err))
-+ goto out;
-+
-+#define Deleted "\\040(deleted)"
-+ seq->count -= sizeof(Deleted) - 1;
-+ AuDebugOn(memcmp(seq->buf + seq->count, Deleted,
-+ sizeof(Deleted) - 1));
-+#undef Deleted
-+
-+out:
-+ return err;
-+}
-diff -Nur linux-4.1.10.orig/fs/buffer.c linux-4.1.10/fs/buffer.c
---- linux-4.1.10.orig/fs/buffer.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/buffer.c 2015-10-22 21:35:53.000000000 +0200
-@@ -2450,7 +2450,7 @@
- * Update file times before taking page lock. We may end up failing the
- * fault so this update may be superfluous but who really cares...
- */
-- file_update_time(vma->vm_file);
-+ vma_file_update_time(vma);
-
- ret = __block_page_mkwrite(vma, vmf, get_block);
- sb_end_pagefault(sb);
-diff -Nur linux-4.1.10.orig/fs/dcache.c linux-4.1.10/fs/dcache.c
---- linux-4.1.10.orig/fs/dcache.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/dcache.c 2015-10-22 21:35:53.000000000 +0200
-@@ -1167,7 +1167,7 @@
- *
- * The @enter() and @finish() callbacks are called with d_lock held.
- */
--static void d_walk(struct dentry *parent, void *data,
-+void d_walk(struct dentry *parent, void *data,
- enum d_walk_ret (*enter)(void *, struct dentry *),
- void (*finish)(void *))
- {
-@@ -1272,6 +1272,7 @@
- seq = 1;
- goto again;
- }
-+EXPORT_SYMBOL(d_walk);
-
- /*
- * Search for at least 1 mount point in the dentry's subdirs.
-diff -Nur linux-4.1.10.orig/fs/file_table.c linux-4.1.10/fs/file_table.c
---- linux-4.1.10.orig/fs/file_table.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/file_table.c 2015-10-22 21:35:53.000000000 +0200
-@@ -147,6 +147,7 @@
- }
- return ERR_PTR(-ENFILE);
- }
-+EXPORT_SYMBOL(get_empty_filp);
-
- /**
- * alloc_file - allocate and initialize a 'struct file'
-@@ -308,6 +309,7 @@
- file_free(file);
- }
- }
-+EXPORT_SYMBOL(put_filp);
-
- void __init files_init(unsigned long mempages)
- {
-diff -Nur linux-4.1.10.orig/fs/inode.c linux-4.1.10/fs/inode.c
---- linux-4.1.10.orig/fs/inode.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/inode.c 2015-10-22 21:35:53.000000000 +0200
-@@ -58,6 +58,7 @@
- static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
-
- __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
-+EXPORT_SYMBOL(inode_sb_list_lock);
-
- /*
- * Empty aops. Can be used for the cases where the user does not
-diff -Nur linux-4.1.10.orig/fs/Kconfig linux-4.1.10/fs/Kconfig
---- linux-4.1.10.orig/fs/Kconfig 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/Kconfig 2015-10-22 21:35:53.000000000 +0200
-@@ -218,6 +218,7 @@
- source "fs/sysv/Kconfig"
- source "fs/ufs/Kconfig"
- source "fs/exofs/Kconfig"
-+source "fs/aufs/Kconfig"
-
- endif # MISC_FILESYSTEMS
-
-diff -Nur linux-4.1.10.orig/fs/Makefile linux-4.1.10/fs/Makefile
---- linux-4.1.10.orig/fs/Makefile 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/Makefile 2015-10-22 21:35:53.000000000 +0200
-@@ -127,3 +127,4 @@
- obj-$(CONFIG_CEPH_FS) += ceph/
- obj-$(CONFIG_PSTORE) += pstore/
- obj-$(CONFIG_EFIVAR_FS) += efivarfs/
-+obj-$(CONFIG_AUFS_FS) += aufs/
-diff -Nur linux-4.1.10.orig/fs/namespace.c linux-4.1.10/fs/namespace.c
---- linux-4.1.10.orig/fs/namespace.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/namespace.c 2015-10-22 21:35:53.000000000 +0200
-@@ -463,6 +463,7 @@
- mnt_dec_writers(real_mount(mnt));
- preempt_enable();
- }
-+EXPORT_SYMBOL_GPL(__mnt_drop_write);
-
- /**
- * mnt_drop_write - give up write access to a mount
-@@ -1792,6 +1793,7 @@
- }
- return 0;
- }
-+EXPORT_SYMBOL(iterate_mounts);
-
- static void cleanup_group_ids(struct mount *mnt, struct mount *end)
- {
-diff -Nur linux-4.1.10.orig/fs/notify/group.c linux-4.1.10/fs/notify/group.c
---- linux-4.1.10.orig/fs/notify/group.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/notify/group.c 2015-10-22 21:35:53.000000000 +0200
-@@ -22,6 +22,7 @@
- #include <linux/srcu.h>
- #include <linux/rculist.h>
- #include <linux/wait.h>
-+#include <linux/module.h>
-
- #include <linux/fsnotify_backend.h>
- #include "fsnotify.h"
-@@ -72,6 +73,7 @@
- {
- atomic_inc(&group->refcnt);
- }
-+EXPORT_SYMBOL(fsnotify_get_group);
-
- /*
- * Drop a reference to a group. Free it if it's through.
-@@ -81,6 +83,7 @@
- if (atomic_dec_and_test(&group->refcnt))
- fsnotify_final_destroy_group(group);
- }
-+EXPORT_SYMBOL(fsnotify_put_group);
-
- /*
- * Create a new fsnotify_group and hold a reference for the group returned.
-@@ -109,6 +112,7 @@
-
- return group;
- }
-+EXPORT_SYMBOL(fsnotify_alloc_group);
-
- int fsnotify_fasync(int fd, struct file *file, int on)
- {
-diff -Nur linux-4.1.10.orig/fs/notify/mark.c linux-4.1.10/fs/notify/mark.c
---- linux-4.1.10.orig/fs/notify/mark.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/notify/mark.c 2015-10-22 21:35:53.000000000 +0200
-@@ -109,6 +109,7 @@
- mark->free_mark(mark);
- }
- }
-+EXPORT_SYMBOL(fsnotify_put_mark);
-
- /* Calculate mask of events for a list of marks */
- u32 fsnotify_recalc_mask(struct hlist_head *head)
-@@ -202,6 +203,7 @@
- fsnotify_destroy_mark_locked(mark, group);
- mutex_unlock(&group->mark_mutex);
- }
-+EXPORT_SYMBOL(fsnotify_destroy_mark);
-
- /*
- * Destroy all marks in the given list. The marks must be already detached from
-@@ -376,6 +378,7 @@
-
- return ret;
- }
-+EXPORT_SYMBOL(fsnotify_add_mark);
-
- int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
- struct inode *inode, struct vfsmount *mnt, int allow_dups)
-@@ -475,6 +478,7 @@
- atomic_set(&mark->refcnt, 1);
- mark->free_mark = free_mark;
- }
-+EXPORT_SYMBOL(fsnotify_init_mark);
-
- static int fsnotify_mark_destroy(void *ignored)
- {
-diff -Nur linux-4.1.10.orig/fs/open.c linux-4.1.10/fs/open.c
---- linux-4.1.10.orig/fs/open.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/open.c 2015-10-22 21:35:53.000000000 +0200
-@@ -62,6 +62,7 @@
- mutex_unlock(&dentry->d_inode->i_mutex);
- return ret;
- }
-+EXPORT_SYMBOL(do_truncate);
-
- long vfs_truncate(struct path *path, loff_t length)
- {
-@@ -676,6 +677,7 @@
- }
- return 0;
- }
-+EXPORT_SYMBOL(open_check_o_direct);
-
- static int do_dentry_open(struct file *f,
- int (*open)(struct inode *, struct file *),
-diff -Nur linux-4.1.10.orig/fs/proc/base.c linux-4.1.10/fs/proc/base.c
---- linux-4.1.10.orig/fs/proc/base.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/proc/base.c 2015-10-22 21:35:53.000000000 +0200
-@@ -1744,7 +1744,7 @@
- down_read(&mm->mmap_sem);
- vma = find_exact_vma(mm, vm_start, vm_end);
- if (vma && vma->vm_file) {
-- *path = vma->vm_file->f_path;
-+ *path = vma_pr_or_file(vma)->f_path;
- path_get(path);
- rc = 0;
- }
-diff -Nur linux-4.1.10.orig/fs/proc/nommu.c linux-4.1.10/fs/proc/nommu.c
---- linux-4.1.10.orig/fs/proc/nommu.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/proc/nommu.c 2015-10-22 21:35:53.000000000 +0200
-@@ -45,7 +45,10 @@
- file = region->vm_file;
-
- if (file) {
-- struct inode *inode = file_inode(region->vm_file);
-+ struct inode *inode;
-+
-+ file = vmr_pr_or_file(region);
-+ inode = file_inode(file);
- dev = inode->i_sb->s_dev;
- ino = inode->i_ino;
- }
-diff -Nur linux-4.1.10.orig/fs/proc/task_mmu.c linux-4.1.10/fs/proc/task_mmu.c
---- linux-4.1.10.orig/fs/proc/task_mmu.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/proc/task_mmu.c 2015-10-22 21:35:53.000000000 +0200
-@@ -279,7 +279,10 @@
- const char *name = NULL;
-
- if (file) {
-- struct inode *inode = file_inode(vma->vm_file);
-+ struct inode *inode;
-+
-+ file = vma_pr_or_file(vma);
-+ inode = file_inode(file);
- dev = inode->i_sb->s_dev;
- ino = inode->i_ino;
- pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
-@@ -1479,7 +1482,7 @@
- struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
- struct vm_area_struct *vma = v;
- struct numa_maps *md = &numa_priv->md;
-- struct file *file = vma->vm_file;
-+ struct file *file = vma_pr_or_file(vma);
- struct mm_struct *mm = vma->vm_mm;
- struct mm_walk walk = {
- .hugetlb_entry = gather_hugetlb_stats,
-diff -Nur linux-4.1.10.orig/fs/proc/task_nommu.c linux-4.1.10/fs/proc/task_nommu.c
---- linux-4.1.10.orig/fs/proc/task_nommu.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/proc/task_nommu.c 2015-10-22 21:35:53.000000000 +0200
-@@ -160,7 +160,10 @@
- file = vma->vm_file;
-
- if (file) {
-- struct inode *inode = file_inode(vma->vm_file);
-+ struct inode *inode;
-+
-+ file = vma_pr_or_file(vma);
-+ inode = file_inode(file);
- dev = inode->i_sb->s_dev;
- ino = inode->i_ino;
- pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
-diff -Nur linux-4.1.10.orig/fs/read_write.c linux-4.1.10/fs/read_write.c
---- linux-4.1.10.orig/fs/read_write.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/read_write.c 2015-10-22 21:35:53.000000000 +0200
-@@ -494,6 +494,30 @@
- }
- EXPORT_SYMBOL(__vfs_write);
-
-+vfs_readf_t vfs_readf(struct file *file)
-+{
-+ const struct file_operations *fop = file->f_op;
-+
-+ if (fop->read)
-+ return fop->read;
-+ if (fop->read_iter)
-+ return new_sync_read;
-+ return ERR_PTR(-ENOSYS);
-+}
-+EXPORT_SYMBOL(vfs_readf);
-+
-+vfs_writef_t vfs_writef(struct file *file)
-+{
-+ const struct file_operations *fop = file->f_op;
-+
-+ if (fop->write)
-+ return fop->write;
-+ if (fop->write_iter)
-+ return new_sync_write;
-+ return ERR_PTR(-ENOSYS);
-+}
-+EXPORT_SYMBOL(vfs_writef);
-+
- ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
- {
- mm_segment_t old_fs;
-diff -Nur linux-4.1.10.orig/fs/splice.c linux-4.1.10/fs/splice.c
---- linux-4.1.10.orig/fs/splice.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/splice.c 2015-10-22 21:35:53.000000000 +0200
-@@ -1101,8 +1101,8 @@
- /*
- * Attempt to initiate a splice from pipe to file.
- */
--static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
-- loff_t *ppos, size_t len, unsigned int flags)
-+long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
-+ loff_t *ppos, size_t len, unsigned int flags)
- {
- ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
- loff_t *, size_t, unsigned int);
-@@ -1114,13 +1114,14 @@
-
- return splice_write(pipe, out, ppos, len, flags);
- }
-+EXPORT_SYMBOL(do_splice_from);
-
- /*
- * Attempt to initiate a splice from a file to a pipe.
- */
--static long do_splice_to(struct file *in, loff_t *ppos,
-- struct pipe_inode_info *pipe, size_t len,
-- unsigned int flags)
-+long do_splice_to(struct file *in, loff_t *ppos,
-+ struct pipe_inode_info *pipe, size_t len,
-+ unsigned int flags)
- {
- ssize_t (*splice_read)(struct file *, loff_t *,
- struct pipe_inode_info *, size_t, unsigned int);
-@@ -1140,6 +1141,7 @@
-
- return splice_read(in, ppos, pipe, len, flags);
- }
-+EXPORT_SYMBOL(do_splice_to);
-
- /**
- * splice_direct_to_actor - splices data directly between two non-pipes
-diff -Nur linux-4.1.10.orig/fs/xattr.c linux-4.1.10/fs/xattr.c
---- linux-4.1.10.orig/fs/xattr.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/fs/xattr.c 2015-10-22 21:35:53.000000000 +0200
-@@ -207,6 +207,7 @@
- *xattr_value = value;
- return error;
- }
-+EXPORT_SYMBOL(vfs_getxattr_alloc);
-
- /* Compare an extended attribute value with the given value */
- int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
-diff -Nur linux-4.1.10.orig/include/linux/file.h linux-4.1.10/include/linux/file.h
---- linux-4.1.10.orig/include/linux/file.h 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/include/linux/file.h 2015-10-22 21:35:53.000000000 +0200
-@@ -19,6 +19,7 @@
- struct path;
- extern struct file *alloc_file(struct path *, fmode_t mode,
- const struct file_operations *fop);
-+extern struct file *get_empty_filp(void);
-
- static inline void fput_light(struct file *file, int fput_needed)
- {
-diff -Nur linux-4.1.10.orig/include/linux/fs.h linux-4.1.10/include/linux/fs.h
---- linux-4.1.10.orig/include/linux/fs.h 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/include/linux/fs.h 2015-10-22 21:35:53.000000000 +0200
-@@ -1649,6 +1649,12 @@
- struct iovec *fast_pointer,
- struct iovec **ret_pointer);
-
-+typedef ssize_t (*vfs_readf_t)(struct file *, char __user *, size_t, loff_t *);
-+typedef ssize_t (*vfs_writef_t)(struct file *, const char __user *, size_t,
-+ loff_t *);
-+vfs_readf_t vfs_readf(struct file *file);
-+vfs_writef_t vfs_writef(struct file *file);
-+
- extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
- extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *);
- extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
-diff -Nur linux-4.1.10.orig/include/linux/mm.h linux-4.1.10/include/linux/mm.h
---- linux-4.1.10.orig/include/linux/mm.h 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/include/linux/mm.h 2015-10-22 21:35:53.000000000 +0200
-@@ -1200,6 +1200,28 @@
- }
- #endif
-
-+extern void vma_do_file_update_time(struct vm_area_struct *, const char[], int);
-+extern struct file *vma_do_pr_or_file(struct vm_area_struct *, const char[],
-+ int);
-+extern void vma_do_get_file(struct vm_area_struct *, const char[], int);
-+extern void vma_do_fput(struct vm_area_struct *, const char[], int);
-+
-+#define vma_file_update_time(vma) vma_do_file_update_time(vma, __func__, \
-+ __LINE__)
-+#define vma_pr_or_file(vma) vma_do_pr_or_file(vma, __func__, \
-+ __LINE__)
-+#define vma_get_file(vma) vma_do_get_file(vma, __func__, __LINE__)
-+#define vma_fput(vma) vma_do_fput(vma, __func__, __LINE__)
-+
-+#ifndef CONFIG_MMU
-+extern struct file *vmr_do_pr_or_file(struct vm_region *, const char[], int);
-+extern void vmr_do_fput(struct vm_region *, const char[], int);
-+
-+#define vmr_pr_or_file(region) vmr_do_pr_or_file(region, __func__, \
-+ __LINE__)
-+#define vmr_fput(region) vmr_do_fput(region, __func__, __LINE__)
-+#endif /* !CONFIG_MMU */
-+
- extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
- extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, int write);
-diff -Nur linux-4.1.10.orig/include/linux/mm_types.h linux-4.1.10/include/linux/mm_types.h
---- linux-4.1.10.orig/include/linux/mm_types.h 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/include/linux/mm_types.h 2015-10-22 21:35:53.000000000 +0200
-@@ -232,6 +232,7 @@
- unsigned long vm_top; /* region allocated to here */
- unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
- struct file *vm_file; /* the backing file or NULL */
-+ struct file *vm_prfile; /* the virtual backing file or NULL */
-
- int vm_usage; /* region usage count (access under nommu_region_sem) */
- bool vm_icache_flushed : 1; /* true if the icache has been flushed for
-@@ -296,6 +297,7 @@
- unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
- units, *not* PAGE_CACHE_SIZE */
- struct file * vm_file; /* File we map to (can be NULL). */
-+ struct file *vm_prfile; /* shadow of vm_file */
- void * vm_private_data; /* was vm_pte (shared mem) */
-
- #ifndef CONFIG_MMU
-diff -Nur linux-4.1.10.orig/include/linux/splice.h linux-4.1.10/include/linux/splice.h
---- linux-4.1.10.orig/include/linux/splice.h 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/include/linux/splice.h 2015-10-22 21:35:53.000000000 +0200
-@@ -83,4 +83,10 @@
- extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
-
- extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
-+
-+extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
-+ loff_t *ppos, size_t len, unsigned int flags);
-+extern long do_splice_to(struct file *in, loff_t *ppos,
-+ struct pipe_inode_info *pipe, size_t len,
-+ unsigned int flags);
- #endif
-diff -Nur linux-4.1.10.orig/include/uapi/linux/aufs_type.h linux-4.1.10/include/uapi/linux/aufs_type.h
---- linux-4.1.10.orig/include/uapi/linux/aufs_type.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/include/uapi/linux/aufs_type.h 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,419 @@
-+/*
-+ * Copyright (C) 2005-2015 Junjiro R. Okajima
-+ *
-+ * This program, aufs is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#ifndef __AUFS_TYPE_H__
-+#define __AUFS_TYPE_H__
-+
-+#define AUFS_NAME "aufs"
-+
-+#ifdef __KERNEL__
-+/*
-+ * define it before including all other headers.
-+ * sched.h may use pr_* macros before defining "current", so define the
-+ * no-current version first, and re-define later.
-+ */
-+#define pr_fmt(fmt) AUFS_NAME " %s:%d: " fmt, __func__, __LINE__
-+#include <linux/sched.h>
-+#undef pr_fmt
-+#define pr_fmt(fmt) \
-+ AUFS_NAME " %s:%d:%.*s[%d]: " fmt, __func__, __LINE__, \
-+ (int)sizeof(current->comm), current->comm, current->pid
-+#else
-+#include <stdint.h>
-+#include <sys/types.h>
-+#endif /* __KERNEL__ */
-+
-+#include <linux/limits.h>
-+
-+#define AUFS_VERSION "4.1-20151012"
-+
-+/* todo? move this to linux-2.6.19/include/magic.h */
-+#define AUFS_SUPER_MAGIC ('a' << 24 | 'u' << 16 | 'f' << 8 | 's')
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_AUFS_BRANCH_MAX_127
-+typedef int8_t aufs_bindex_t;
-+#define AUFS_BRANCH_MAX 127
-+#else
-+typedef int16_t aufs_bindex_t;
-+#ifdef CONFIG_AUFS_BRANCH_MAX_511
-+#define AUFS_BRANCH_MAX 511
-+#elif defined(CONFIG_AUFS_BRANCH_MAX_1023)
-+#define AUFS_BRANCH_MAX 1023
-+#elif defined(CONFIG_AUFS_BRANCH_MAX_32767)
-+#define AUFS_BRANCH_MAX 32767
-+#endif
-+#endif
-+
-+#ifdef __KERNEL__
-+#ifndef AUFS_BRANCH_MAX
-+#error unknown CONFIG_AUFS_BRANCH_MAX value
-+#endif
-+#endif /* __KERNEL__ */
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#define AUFS_FSTYPE AUFS_NAME
-+
-+#define AUFS_ROOT_INO 2
-+#define AUFS_FIRST_INO 11
-+
-+#define AUFS_WH_PFX ".wh."
-+#define AUFS_WH_PFX_LEN ((int)sizeof(AUFS_WH_PFX) - 1)
-+#define AUFS_WH_TMP_LEN 4
-+/* a limit for rmdir/rename a dir and copyup */
-+#define AUFS_MAX_NAMELEN (NAME_MAX \
-+ - AUFS_WH_PFX_LEN * 2 /* doubly whiteouted */\
-+ - 1 /* dot */\
-+ - AUFS_WH_TMP_LEN) /* hex */
-+#define AUFS_XINO_FNAME "." AUFS_NAME ".xino"
-+#define AUFS_XINO_DEFPATH "/tmp/" AUFS_XINO_FNAME
-+#define AUFS_XINO_DEF_SEC 30 /* seconds */
-+#define AUFS_XINO_DEF_TRUNC 45 /* percentage */
-+#define AUFS_DIRWH_DEF 3
-+#define AUFS_RDCACHE_DEF 10 /* seconds */
-+#define AUFS_RDCACHE_MAX 3600 /* seconds */
-+#define AUFS_RDBLK_DEF 512 /* bytes */
-+#define AUFS_RDHASH_DEF 32
-+#define AUFS_WKQ_NAME AUFS_NAME "d"
-+#define AUFS_MFS_DEF_SEC 30 /* seconds */
-+#define AUFS_MFS_MAX_SEC 3600 /* seconds */
-+#define AUFS_FHSM_CACHE_DEF_SEC 30 /* seconds */
-+#define AUFS_PLINK_WARN 50 /* number of plinks in a single bucket */
-+
-+/* pseudo-link maintenace under /proc */
-+#define AUFS_PLINK_MAINT_NAME "plink_maint"
-+#define AUFS_PLINK_MAINT_DIR "fs/" AUFS_NAME
-+#define AUFS_PLINK_MAINT_PATH AUFS_PLINK_MAINT_DIR "/" AUFS_PLINK_MAINT_NAME
-+
-+#define AUFS_DIROPQ_NAME AUFS_WH_PFX ".opq" /* whiteouted doubly */
-+#define AUFS_WH_DIROPQ AUFS_WH_PFX AUFS_DIROPQ_NAME
-+
-+#define AUFS_BASE_NAME AUFS_WH_PFX AUFS_NAME
-+#define AUFS_PLINKDIR_NAME AUFS_WH_PFX "plnk"
-+#define AUFS_ORPHDIR_NAME AUFS_WH_PFX "orph"
-+
-+/* doubly whiteouted */
-+#define AUFS_WH_BASE AUFS_WH_PFX AUFS_BASE_NAME
-+#define AUFS_WH_PLINKDIR AUFS_WH_PFX AUFS_PLINKDIR_NAME
-+#define AUFS_WH_ORPHDIR AUFS_WH_PFX AUFS_ORPHDIR_NAME
-+
-+/* branch permissions and attributes */
-+#define AUFS_BRPERM_RW "rw"
-+#define AUFS_BRPERM_RO "ro"
-+#define AUFS_BRPERM_RR "rr"
-+#define AUFS_BRATTR_COO_REG "coo_reg"
-+#define AUFS_BRATTR_COO_ALL "coo_all"
-+#define AUFS_BRATTR_FHSM "fhsm"
-+#define AUFS_BRATTR_UNPIN "unpin"
-+#define AUFS_BRATTR_ICEX "icex"
-+#define AUFS_BRATTR_ICEX_SEC "icexsec"
-+#define AUFS_BRATTR_ICEX_SYS "icexsys"
-+#define AUFS_BRATTR_ICEX_TR "icextr"
-+#define AUFS_BRATTR_ICEX_USR "icexusr"
-+#define AUFS_BRATTR_ICEX_OTH "icexoth"
-+#define AUFS_BRRATTR_WH "wh"
-+#define AUFS_BRWATTR_NLWH "nolwh"
-+#define AUFS_BRWATTR_MOO "moo"
-+
-+#define AuBrPerm_RW 1 /* writable, hardlinkable wh */
-+#define AuBrPerm_RO (1 << 1) /* readonly */
-+#define AuBrPerm_RR (1 << 2) /* natively readonly */
-+#define AuBrPerm_Mask (AuBrPerm_RW | AuBrPerm_RO | AuBrPerm_RR)
-+
-+#define AuBrAttr_COO_REG (1 << 3) /* copy-up on open */
-+#define AuBrAttr_COO_ALL (1 << 4)
-+#define AuBrAttr_COO_Mask (AuBrAttr_COO_REG | AuBrAttr_COO_ALL)
-+
-+#define AuBrAttr_FHSM (1 << 5) /* file-based hsm */
-+#define AuBrAttr_UNPIN (1 << 6) /* rename-able top dir of
-+ branch. meaningless since
-+ linux-3.18-rc1 */
-+
-+/* ignore error in copying XATTR */
-+#define AuBrAttr_ICEX_SEC (1 << 7)
-+#define AuBrAttr_ICEX_SYS (1 << 8)
-+#define AuBrAttr_ICEX_TR (1 << 9)
-+#define AuBrAttr_ICEX_USR (1 << 10)
-+#define AuBrAttr_ICEX_OTH (1 << 11)
-+#define AuBrAttr_ICEX (AuBrAttr_ICEX_SEC \
-+ | AuBrAttr_ICEX_SYS \
-+ | AuBrAttr_ICEX_TR \
-+ | AuBrAttr_ICEX_USR \
-+ | AuBrAttr_ICEX_OTH)
-+
-+#define AuBrRAttr_WH (1 << 12) /* whiteout-able */
-+#define AuBrRAttr_Mask AuBrRAttr_WH
-+
-+#define AuBrWAttr_NoLinkWH (1 << 13) /* un-hardlinkable whiteouts */
-+#define AuBrWAttr_MOO (1 << 14) /* move-up on open */
-+#define AuBrWAttr_Mask (AuBrWAttr_NoLinkWH | AuBrWAttr_MOO)
-+
-+#define AuBrAttr_CMOO_Mask (AuBrAttr_COO_Mask | AuBrWAttr_MOO)
-+
-+/* #warning test userspace */
-+#ifdef __KERNEL__
-+#ifndef CONFIG_AUFS_FHSM
-+#undef AuBrAttr_FHSM
-+#define AuBrAttr_FHSM 0
-+#endif
-+#ifndef CONFIG_AUFS_XATTR
-+#undef AuBrAttr_ICEX
-+#define AuBrAttr_ICEX 0
-+#undef AuBrAttr_ICEX_SEC
-+#define AuBrAttr_ICEX_SEC 0
-+#undef AuBrAttr_ICEX_SYS
-+#define AuBrAttr_ICEX_SYS 0
-+#undef AuBrAttr_ICEX_TR
-+#define AuBrAttr_ICEX_TR 0
-+#undef AuBrAttr_ICEX_USR
-+#define AuBrAttr_ICEX_USR 0
-+#undef AuBrAttr_ICEX_OTH
-+#define AuBrAttr_ICEX_OTH 0
-+#endif
-+#endif
-+
-+/* the longest combination */
-+/* AUFS_BRATTR_ICEX and AUFS_BRATTR_ICEX_TR don't affect here */
-+#define AuBrPermStrSz sizeof(AUFS_BRPERM_RW \
-+ "+" AUFS_BRATTR_COO_REG \
-+ "+" AUFS_BRATTR_FHSM \
-+ "+" AUFS_BRATTR_UNPIN \
-+ "+" AUFS_BRATTR_ICEX_SEC \
-+ "+" AUFS_BRATTR_ICEX_SYS \
-+ "+" AUFS_BRATTR_ICEX_USR \
-+ "+" AUFS_BRATTR_ICEX_OTH \
-+ "+" AUFS_BRWATTR_NLWH)
-+
-+typedef struct {
-+ char a[AuBrPermStrSz];
-+} au_br_perm_str_t;
-+
-+static inline int au_br_writable(int brperm)
-+{
-+ return brperm & AuBrPerm_RW;
-+}
-+
-+static inline int au_br_whable(int brperm)
-+{
-+ return brperm & (AuBrPerm_RW | AuBrRAttr_WH);
-+}
-+
-+static inline int au_br_wh_linkable(int brperm)
-+{
-+ return !(brperm & AuBrWAttr_NoLinkWH);
-+}
-+
-+static inline int au_br_cmoo(int brperm)
-+{
-+ return brperm & AuBrAttr_CMOO_Mask;
-+}
-+
-+static inline int au_br_fhsm(int brperm)
-+{
-+ return brperm & AuBrAttr_FHSM;
-+}
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* ioctl */
-+enum {
-+ /* readdir in userspace */
-+ AuCtl_RDU,
-+ AuCtl_RDU_INO,
-+
-+ AuCtl_WBR_FD, /* pathconf wrapper */
-+ AuCtl_IBUSY, /* busy inode */
-+ AuCtl_MVDOWN, /* move-down */
-+ AuCtl_BR, /* info about branches */
-+ AuCtl_FHSM_FD /* connection for fhsm */
-+};
-+
-+/* borrowed from linux/include/linux/kernel.h */
-+#ifndef ALIGN
-+#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
-+#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
-+#endif
-+
-+/* borrowed from linux/include/linux/compiler-gcc3.h */
-+#ifndef __aligned
-+#define __aligned(x) __attribute__((aligned(x)))
-+#endif
-+
-+#ifdef __KERNEL__
-+#ifndef __packed
-+#define __packed __attribute__((packed))
-+#endif
-+#endif
-+
-+struct au_rdu_cookie {
-+ uint64_t h_pos;
-+ int16_t bindex;
-+ uint8_t flags;
-+ uint8_t pad;
-+ uint32_t generation;
-+} __aligned(8);
-+
-+struct au_rdu_ent {
-+ uint64_t ino;
-+ int16_t bindex;
-+ uint8_t type;
-+ uint8_t nlen;
-+ uint8_t wh;
-+ char name[0];
-+} __aligned(8);
-+
-+static inline int au_rdu_len(int nlen)
-+{
-+ /* include the terminating NULL */
-+ return ALIGN(sizeof(struct au_rdu_ent) + nlen + 1,
-+ sizeof(uint64_t));
-+}
-+
-+union au_rdu_ent_ul {
-+ struct au_rdu_ent __user *e;
-+ uint64_t ul;
-+};
-+
-+enum {
-+ AufsCtlRduV_SZ,
-+ AufsCtlRduV_End
-+};
-+
-+struct aufs_rdu {
-+ /* input */
-+ union {
-+ uint64_t sz; /* AuCtl_RDU */
-+ uint64_t nent; /* AuCtl_RDU_INO */
-+ };
-+ union au_rdu_ent_ul ent;
-+ uint16_t verify[AufsCtlRduV_End];
-+
-+ /* input/output */
-+ uint32_t blk;
-+
-+ /* output */
-+ union au_rdu_ent_ul tail;
-+ /* number of entries which were added in a single call */
-+ uint64_t rent;
-+ uint8_t full;
-+ uint8_t shwh;
-+
-+ struct au_rdu_cookie cookie;
-+} __aligned(8);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct aufs_wbr_fd {
-+ uint32_t oflags;
-+ int16_t brid;
-+} __aligned(8);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+struct aufs_ibusy {
-+ uint64_t ino, h_ino;
-+ int16_t bindex;
-+} __aligned(8);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+/* error code for move-down */
-+/* the actual message strings are implemented in aufs-util.git */
-+enum {
-+ EAU_MVDOWN_OPAQUE = 1,
-+ EAU_MVDOWN_WHITEOUT,
-+ EAU_MVDOWN_UPPER,
-+ EAU_MVDOWN_BOTTOM,
-+ EAU_MVDOWN_NOUPPER,
-+ EAU_MVDOWN_NOLOWERBR,
-+ EAU_Last
-+};
-+
-+/* flags for move-down */
-+#define AUFS_MVDOWN_DMSG 1
-+#define AUFS_MVDOWN_OWLOWER (1 << 1) /* overwrite lower */
-+#define AUFS_MVDOWN_KUPPER (1 << 2) /* keep upper */
-+#define AUFS_MVDOWN_ROLOWER (1 << 3) /* do even if lower is RO */
-+#define AUFS_MVDOWN_ROLOWER_R (1 << 4) /* did on lower RO */
-+#define AUFS_MVDOWN_ROUPPER (1 << 5) /* do even if upper is RO */
-+#define AUFS_MVDOWN_ROUPPER_R (1 << 6) /* did on upper RO */
-+#define AUFS_MVDOWN_BRID_UPPER (1 << 7) /* upper brid */
-+#define AUFS_MVDOWN_BRID_LOWER (1 << 8) /* lower brid */
-+#define AUFS_MVDOWN_FHSM_LOWER (1 << 9) /* find fhsm attr for lower */
-+#define AUFS_MVDOWN_STFS (1 << 10) /* req. stfs */
-+#define AUFS_MVDOWN_STFS_FAILED (1 << 11) /* output: stfs is unusable */
-+#define AUFS_MVDOWN_BOTTOM (1 << 12) /* output: no more lowers */
-+
-+/* index for move-down */
-+enum {
-+ AUFS_MVDOWN_UPPER,
-+ AUFS_MVDOWN_LOWER,
-+ AUFS_MVDOWN_NARRAY
-+};
-+
-+/*
-+ * additional info of move-down
-+ * number of free blocks and inodes.
-+ * subset of struct kstatfs, but smaller and always 64bit.
-+ */
-+struct aufs_stfs {
-+ uint64_t f_blocks;
-+ uint64_t f_bavail;
-+ uint64_t f_files;
-+ uint64_t f_ffree;
-+};
-+
-+struct aufs_stbr {
-+ int16_t brid; /* optional input */
-+ int16_t bindex; /* output */
-+ struct aufs_stfs stfs; /* output when AUFS_MVDOWN_STFS set */
-+} __aligned(8);
-+
-+struct aufs_mvdown {
-+ uint32_t flags; /* input/output */
-+ struct aufs_stbr stbr[AUFS_MVDOWN_NARRAY]; /* input/output */
-+ int8_t au_errno; /* output */
-+} __aligned(8);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+union aufs_brinfo {
-+ /* PATH_MAX may differ between kernel-space and user-space */
-+ char _spacer[4096];
-+ struct {
-+ int16_t id;
-+ int perm;
-+ char path[0];
-+ };
-+} __aligned(8);
-+
-+/* ---------------------------------------------------------------------- */
-+
-+#define AuCtlType 'A'
-+#define AUFS_CTL_RDU _IOWR(AuCtlType, AuCtl_RDU, struct aufs_rdu)
-+#define AUFS_CTL_RDU_INO _IOWR(AuCtlType, AuCtl_RDU_INO, struct aufs_rdu)
-+#define AUFS_CTL_WBR_FD _IOW(AuCtlType, AuCtl_WBR_FD, \
-+ struct aufs_wbr_fd)
-+#define AUFS_CTL_IBUSY _IOWR(AuCtlType, AuCtl_IBUSY, struct aufs_ibusy)
-+#define AUFS_CTL_MVDOWN _IOWR(AuCtlType, AuCtl_MVDOWN, \
-+ struct aufs_mvdown)
-+#define AUFS_CTL_BRINFO _IOW(AuCtlType, AuCtl_BR, union aufs_brinfo)
-+#define AUFS_CTL_FHSM_FD _IOW(AuCtlType, AuCtl_FHSM_FD, int)
-+
-+#endif /* __AUFS_TYPE_H__ */
-diff -Nur linux-4.1.10.orig/include/uapi/linux/Kbuild linux-4.1.10/include/uapi/linux/Kbuild
---- linux-4.1.10.orig/include/uapi/linux/Kbuild 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/include/uapi/linux/Kbuild 2015-10-22 21:35:53.000000000 +0200
-@@ -59,6 +59,7 @@
- header-y += atm_tcp.h
- header-y += atm_zatm.h
- header-y += audit.h
-+header-y += aufs_type.h
- header-y += auto_fs4.h
- header-y += auto_fs.h
- header-y += auxvec.h
-diff -Nur linux-4.1.10.orig/kernel/fork.c linux-4.1.10/kernel/fork.c
---- linux-4.1.10.orig/kernel/fork.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/kernel/fork.c 2015-10-22 21:35:53.000000000 +0200
-@@ -456,7 +456,7 @@
- struct inode *inode = file_inode(file);
- struct address_space *mapping = file->f_mapping;
-
-- get_file(file);
-+ vma_get_file(tmp);
- if (tmp->vm_flags & VM_DENYWRITE)
- atomic_dec(&inode->i_writecount);
- i_mmap_lock_write(mapping);
-diff -Nur linux-4.1.10.orig/MAINTAINERS linux-4.1.10/MAINTAINERS
---- linux-4.1.10.orig/MAINTAINERS 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/MAINTAINERS 2015-10-22 21:35:53.000000000 +0200
-@@ -1880,6 +1880,19 @@
- F: include/uapi/linux/audit.h
- F: kernel/audit*
-
-+AUFS (advanced multi layered unification filesystem) FILESYSTEM
-+M: "J. R. Okajima" <hooanon05g@gmail.com>
-+L: linux-unionfs@vger.kernel.org
-+L: aufs-users@lists.sourceforge.net (members only)
-+W: http://aufs.sourceforge.net
-+T: git://github.com/sfjro/aufs4-linux.git
-+S: Supported
-+F: Documentation/filesystems/aufs/
-+F: Documentation/ABI/testing/debugfs-aufs
-+F: Documentation/ABI/testing/sysfs-aufs
-+F: fs/aufs/
-+F: include/uapi/linux/aufs_type.h
-+
- AUXILIARY DISPLAY DRIVERS
- M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
- W: http://miguelojeda.es/auxdisplay.htm
-diff -Nur linux-4.1.10.orig/mm/filemap.c linux-4.1.10/mm/filemap.c
---- linux-4.1.10.orig/mm/filemap.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/mm/filemap.c 2015-10-22 21:35:53.000000000 +0200
-@@ -2062,7 +2062,7 @@
- int ret = VM_FAULT_LOCKED;
-
- sb_start_pagefault(inode->i_sb);
-- file_update_time(vma->vm_file);
-+ vma_file_update_time(vma);
- lock_page(page);
- if (page->mapping != inode->i_mapping) {
- unlock_page(page);
-diff -Nur linux-4.1.10.orig/mm/Makefile linux-4.1.10/mm/Makefile
---- linux-4.1.10.orig/mm/Makefile 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/mm/Makefile 2015-10-22 21:35:53.000000000 +0200
-@@ -21,7 +21,7 @@
- mm_init.o mmu_context.o percpu.o slab_common.o \
- compaction.o vmacache.o \
- interval_tree.o list_lru.o workingset.o \
-- debug.o $(mmu-y)
-+ prfile.o debug.o $(mmu-y)
-
- obj-y += init-mm.o
-
-diff -Nur linux-4.1.10.orig/mm/memory.c linux-4.1.10/mm/memory.c
---- linux-4.1.10.orig/mm/memory.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/mm/memory.c 2015-10-22 21:35:53.000000000 +0200
-@@ -2034,7 +2034,7 @@
- }
-
- if (!page_mkwrite)
-- file_update_time(vma->vm_file);
-+ vma_file_update_time(vma);
- }
-
- return VM_FAULT_WRITE;
-diff -Nur linux-4.1.10.orig/mm/mmap.c linux-4.1.10/mm/mmap.c
---- linux-4.1.10.orig/mm/mmap.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/mm/mmap.c 2015-10-22 21:35:53.000000000 +0200
-@@ -274,7 +274,7 @@
- if (vma->vm_ops && vma->vm_ops->close)
- vma->vm_ops->close(vma);
- if (vma->vm_file)
-- fput(vma->vm_file);
-+ vma_fput(vma);
- mpol_put(vma_policy(vma));
- kmem_cache_free(vm_area_cachep, vma);
- return next;
-@@ -886,7 +886,7 @@
- if (remove_next) {
- if (file) {
- uprobe_munmap(next, next->vm_start, next->vm_end);
-- fput(file);
-+ vma_fput(vma);
- }
- if (next->anon_vma)
- anon_vma_merge(vma, next);
-@@ -1671,8 +1671,8 @@
- return addr;
-
- unmap_and_free_vma:
-+ vma_fput(vma);
- vma->vm_file = NULL;
-- fput(file);
-
- /* Undo any partial mapping done by a device driver. */
- unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
-@@ -2473,7 +2473,7 @@
- goto out_free_mpol;
-
- if (new->vm_file)
-- get_file(new->vm_file);
-+ vma_get_file(new);
-
- if (new->vm_ops && new->vm_ops->open)
- new->vm_ops->open(new);
-@@ -2492,7 +2492,7 @@
- if (new->vm_ops && new->vm_ops->close)
- new->vm_ops->close(new);
- if (new->vm_file)
-- fput(new->vm_file);
-+ vma_fput(new);
- unlink_anon_vmas(new);
- out_free_mpol:
- mpol_put(vma_policy(new));
-@@ -2635,7 +2635,6 @@
- struct vm_area_struct *vma;
- unsigned long populate = 0;
- unsigned long ret = -EINVAL;
-- struct file *file;
-
- pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. "
- "See Documentation/vm/remap_file_pages.txt.\n",
-@@ -2679,10 +2678,10 @@
- munlock_vma_pages_range(vma, start, start + size);
- }
-
-- file = get_file(vma->vm_file);
-+ vma_get_file(vma);
- ret = do_mmap_pgoff(vma->vm_file, start, size,
- prot, flags, pgoff, &populate);
-- fput(file);
-+ vma_fput(vma);
- out:
- up_write(&mm->mmap_sem);
- if (populate)
-@@ -2949,7 +2948,7 @@
- if (anon_vma_clone(new_vma, vma))
- goto out_free_mempol;
- if (new_vma->vm_file)
-- get_file(new_vma->vm_file);
-+ vma_get_file(new_vma);
- if (new_vma->vm_ops && new_vma->vm_ops->open)
- new_vma->vm_ops->open(new_vma);
- vma_link(mm, new_vma, prev, rb_link, rb_parent);
-diff -Nur linux-4.1.10.orig/mm/nommu.c linux-4.1.10/mm/nommu.c
---- linux-4.1.10.orig/mm/nommu.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/mm/nommu.c 2015-10-22 21:35:53.000000000 +0200
-@@ -693,7 +693,7 @@
- up_write(&nommu_region_sem);
-
- if (region->vm_file)
-- fput(region->vm_file);
-+ vmr_fput(region);
-
- /* IO memory and memory shared directly out of the pagecache
- * from ramfs/tmpfs mustn't be released here */
-@@ -858,7 +858,7 @@
- if (vma->vm_ops && vma->vm_ops->close)
- vma->vm_ops->close(vma);
- if (vma->vm_file)
-- fput(vma->vm_file);
-+ vma_fput(vma);
- put_nommu_region(vma->vm_region);
- kmem_cache_free(vm_area_cachep, vma);
- }
-@@ -1398,7 +1398,7 @@
- goto error_just_free;
- }
- }
-- fput(region->vm_file);
-+ vmr_fput(region);
- kmem_cache_free(vm_region_jar, region);
- region = pregion;
- result = start;
-@@ -1474,10 +1474,10 @@
- up_write(&nommu_region_sem);
- error:
- if (region->vm_file)
-- fput(region->vm_file);
-+ vmr_fput(region);
- kmem_cache_free(vm_region_jar, region);
- if (vma->vm_file)
-- fput(vma->vm_file);
-+ vma_fput(vma);
- kmem_cache_free(vm_area_cachep, vma);
- kleave(" = %d", ret);
- return ret;
-diff -Nur linux-4.1.10.orig/mm/prfile.c linux-4.1.10/mm/prfile.c
---- linux-4.1.10.orig/mm/prfile.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.10/mm/prfile.c 2015-10-22 21:35:53.000000000 +0200
-@@ -0,0 +1,86 @@
-+/*
-+ * Mainly for aufs which mmap(2) diffrent file and wants to print different path
-+ * in /proc/PID/maps.
-+ * Call these functions via macros defined in linux/mm.h.
-+ *
-+ * See Documentation/filesystems/aufs/design/06mmap.txt
-+ *
-+ * Copyright (c) 2014 Junjro R. Okajima
-+ * Copyright (c) 2014 Ian Campbell
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+
-+/* #define PRFILE_TRACE */
-+static inline void prfile_trace(struct file *f, struct file *pr,
-+ const char func[], int line, const char func2[])
-+{
-+#ifdef PRFILE_TRACE
-+ if (pr)
-+ pr_info("%s:%d: %s, %s\n", func, line, func2,
-+ f ? (char *)f->f_path.dentry->d_name.name : "(null)");
-+#endif
-+}
-+
-+void vma_do_file_update_time(struct vm_area_struct *vma, const char func[],
-+ int line)
-+{
-+ struct file *f = vma->vm_file, *pr = vma->vm_prfile;
-+
-+ prfile_trace(f, pr, func, line, __func__);
-+ file_update_time(f);
-+ if (f && pr)
-+ file_update_time(pr);
-+}
-+
-+struct file *vma_do_pr_or_file(struct vm_area_struct *vma, const char func[],
-+ int line)
-+{
-+ struct file *f = vma->vm_file, *pr = vma->vm_prfile;
-+
-+ prfile_trace(f, pr, func, line, __func__);
-+ return (f && pr) ? pr : f;
-+}
-+
-+void vma_do_get_file(struct vm_area_struct *vma, const char func[], int line)
-+{
-+ struct file *f = vma->vm_file, *pr = vma->vm_prfile;
-+
-+ prfile_trace(f, pr, func, line, __func__);
-+ get_file(f);
-+ if (f && pr)
-+ get_file(pr);
-+}
-+
-+void vma_do_fput(struct vm_area_struct *vma, const char func[], int line)
-+{
-+ struct file *f = vma->vm_file, *pr = vma->vm_prfile;
-+
-+ prfile_trace(f, pr, func, line, __func__);
-+ fput(f);
-+ if (f && pr)
-+ fput(pr);
-+}
-+
-+#ifndef CONFIG_MMU
-+struct file *vmr_do_pr_or_file(struct vm_region *region, const char func[],
-+ int line)
-+{
-+ struct file *f = region->vm_file, *pr = region->vm_prfile;
-+
-+ prfile_trace(f, pr, func, line, __func__);
-+ return (f && pr) ? pr : f;
-+}
-+
-+void vmr_do_fput(struct vm_region *region, const char func[], int line)
-+{
-+ struct file *f = region->vm_file, *pr = region->vm_prfile;
-+
-+ prfile_trace(f, pr, func, line, __func__);
-+ fput(f);
-+ if (f && pr)
-+ fput(pr);
-+}
-+#endif /* !CONFIG_MMU */
-diff -Nur linux-4.1.10.orig/security/commoncap.c linux-4.1.10/security/commoncap.c
---- linux-4.1.10.orig/security/commoncap.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/security/commoncap.c 2015-10-22 21:35:53.000000000 +0200
-@@ -975,9 +975,11 @@
- }
- return ret;
- }
-+EXPORT_SYMBOL(cap_mmap_addr);
-
- int cap_mmap_file(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags)
- {
- return 0;
- }
-+EXPORT_SYMBOL(cap_mmap_file);
-diff -Nur linux-4.1.10.orig/security/device_cgroup.c linux-4.1.10/security/device_cgroup.c
---- linux-4.1.10.orig/security/device_cgroup.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/security/device_cgroup.c 2015-10-22 21:35:53.000000000 +0200
-@@ -7,6 +7,7 @@
- #include <linux/device_cgroup.h>
- #include <linux/cgroup.h>
- #include <linux/ctype.h>
-+#include <linux/export.h>
- #include <linux/list.h>
- #include <linux/uaccess.h>
- #include <linux/seq_file.h>
-@@ -849,6 +850,7 @@
- return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
- access);
- }
-+EXPORT_SYMBOL(__devcgroup_inode_permission);
-
- int devcgroup_inode_mknod(int mode, dev_t dev)
- {
-diff -Nur linux-4.1.10.orig/security/security.c linux-4.1.10/security/security.c
---- linux-4.1.10.orig/security/security.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/security/security.c 2015-10-22 21:35:53.000000000 +0200
-@@ -430,6 +430,7 @@
- return 0;
- return security_ops->path_rmdir(dir, dentry);
- }
-+EXPORT_SYMBOL(security_path_rmdir);
-
- int security_path_unlink(struct path *dir, struct dentry *dentry)
- {
-@@ -446,6 +447,7 @@
- return 0;
- return security_ops->path_symlink(dir, dentry, old_name);
- }
-+EXPORT_SYMBOL(security_path_symlink);
-
- int security_path_link(struct dentry *old_dentry, struct path *new_dir,
- struct dentry *new_dentry)
-@@ -454,6 +456,7 @@
- return 0;
- return security_ops->path_link(old_dentry, new_dir, new_dentry);
- }
-+EXPORT_SYMBOL(security_path_link);
-
- int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
- struct path *new_dir, struct dentry *new_dentry,
-@@ -481,6 +484,7 @@
- return 0;
- return security_ops->path_truncate(path);
- }
-+EXPORT_SYMBOL(security_path_truncate);
-
- int security_path_chmod(struct path *path, umode_t mode)
- {
-@@ -488,6 +492,7 @@
- return 0;
- return security_ops->path_chmod(path, mode);
- }
-+EXPORT_SYMBOL(security_path_chmod);
-
- int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
- {
-@@ -495,6 +500,7 @@
- return 0;
- return security_ops->path_chown(path, uid, gid);
- }
-+EXPORT_SYMBOL(security_path_chown);
-
- int security_path_chroot(struct path *path)
- {
-@@ -580,6 +586,7 @@
- return 0;
- return security_ops->inode_readlink(dentry);
- }
-+EXPORT_SYMBOL(security_inode_readlink);
-
- int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd)
- {
-@@ -594,6 +601,7 @@
- return 0;
- return security_ops->inode_permission(inode, mask);
- }
-+EXPORT_SYMBOL(security_inode_permission);
-
- int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
- {
-@@ -716,6 +724,7 @@
-
- return fsnotify_perm(file, mask);
- }
-+EXPORT_SYMBOL(security_file_permission);
-
- int security_file_alloc(struct file *file)
- {
-@@ -775,6 +784,7 @@
- return ret;
- return ima_file_mmap(file, prot);
- }
-+EXPORT_SYMBOL(security_mmap_file);
-
- int security_mmap_addr(unsigned long addr)
- {
diff --git a/target/linux/patches/4.1.13/cleankernel.patch b/target/linux/patches/4.1.13/cleankernel.patch
deleted file mode 100644
index 59693f426..000000000
--- a/target/linux/patches/4.1.13/cleankernel.patch
+++ /dev/null
@@ -1,11 +0,0 @@
-diff -Nur linux-4.1.10.orig/scripts/Makefile.headersinst linux-4.1.10/scripts/Makefile.headersinst
---- linux-4.1.10.orig/scripts/Makefile.headersinst 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/scripts/Makefile.headersinst 2015-10-15 11:23:35.000000000 +0200
-@@ -107,7 +107,6 @@
-
- targets += $(install-file)
- $(install-file): scripts/headers_install.sh $(input-files1) $(input-files2) $(input-files3) FORCE
-- $(if $(unwanted),$(call cmd,remove),)
- $(if $(wildcard $(dir $@)),,$(shell mkdir -p $(dir $@)))
- $(call if_changed,install)
-
diff --git a/target/linux/patches/4.1.13/cris-header.patch b/target/linux/patches/4.1.13/cris-header.patch
deleted file mode 100644
index 2b5a88461..000000000
--- a/target/linux/patches/4.1.13/cris-header.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff -Nur linux-3.16.2.orig/arch/cris/include/arch-v10/arch/Kbuild linux-3.16.2/arch/cris/include/arch-v10/arch/Kbuild
---- linux-3.16.2.orig/arch/cris/include/arch-v10/arch/Kbuild 2014-09-06 01:37:11.000000000 +0200
-+++ linux-3.16.2/arch/cris/include/arch-v10/arch/Kbuild 2014-09-26 19:24:50.000000000 +0200
-@@ -1 +1,2 @@
- # CRISv10 arch
-+header-y += ptrace.h
-diff -Nur linux-3.16.2.orig/arch/cris/include/arch-v32/arch/Kbuild linux-3.16.2/arch/cris/include/arch-v32/arch/Kbuild
---- linux-3.16.2.orig/arch/cris/include/arch-v32/arch/Kbuild 2014-09-06 01:37:11.000000000 +0200
-+++ linux-3.16.2/arch/cris/include/arch-v32/arch/Kbuild 2014-09-26 19:24:31.000000000 +0200
-@@ -1 +1,2 @@
- # CRISv32 arch
-+header-y += ptrace.h
diff --git a/target/linux/patches/4.1.13/initramfs-nosizelimit.patch b/target/linux/patches/4.1.13/initramfs-nosizelimit.patch
deleted file mode 100644
index 40d2f6bd8..000000000
--- a/target/linux/patches/4.1.13/initramfs-nosizelimit.patch
+++ /dev/null
@@ -1,57 +0,0 @@
-From 9a18df7a71bfa620b1278777d64783a359d7eb4e Mon Sep 17 00:00:00 2001
-From: Thorsten Glaser <tg@mirbsd.org>
-Date: Sun, 4 May 2014 01:37:54 +0200
-Subject: [PATCH] mount tmpfs-as-rootfs (initramfs) with -o
- nr_blocks=0,nr_inodes=0
-
-I would have preferred to write this patch to be able to pass
-rootflags=nr_blocks=0,nr_inodes=0 on the kernel command line,
-and then hand these rootflags over to the initramfs (tmpfs)
-mount in the same way the kernel hands them over to the block
-device rootfs mount. But at least the Debian/m68k initrd also
-parses $rootflags from the environment and adds it to the call
-to the user-space mount for the eventual root device, which
-would make the kernel command line rootflags option be used in
-both places (tmpfs and e.g. ext4) which is guaranteed to error
-out in at least one of them.
-
-This change is intended to aid people in a setup where the
-initrd is the final root filesystem, i.e. not mounted over.
-This is especially useful in automated tests running on qemu
-for boards with constrained memory (e.g. 64 MiB on sh4).
-
-Considering that the initramfs is normally emptied out then
-overmounted, this change is probably safe for setups where
-initramfs just hosts early userspace, too, since the tmpfs
-backing it is not accessible any more later on, AFAICT.
-
-Signed-off-by: Thorsten Glaser <tg@mirbsd.org>
----
- init/do_mounts.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/init/do_mounts.c b/init/do_mounts.c
-index 82f2288..55a4cfe 100644
---- a/init/do_mounts.c
-+++ b/init/do_mounts.c
-@@ -594,6 +594,7 @@ out:
- }
-
- static bool is_tmpfs;
-+static char tmpfs_rootflags[] = "nr_blocks=0,nr_inodes=0";
- static struct dentry *rootfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
- {
-@@ -606,6 +607,9 @@ static struct dentry *rootfs_mount(struct file_system_type *fs_type,
- if (IS_ENABLED(CONFIG_TMPFS) && is_tmpfs)
- fill = shmem_fill_super;
-
-+ if (is_tmpfs)
-+ data = tmpfs_rootflags;
-+
- return mount_nodev(fs_type, flags, data, fill);
- }
-
---
-2.0.0.rc0
-
diff --git a/target/linux/patches/4.1.13/realtime.patch b/target/linux/patches/4.1.13/realtime.patch
deleted file mode 100644
index 3b65f6148..000000000
--- a/target/linux/patches/4.1.13/realtime.patch
+++ /dev/null
@@ -1,27435 +0,0 @@
-diff -Nur linux-4.1.13.orig/arch/alpha/mm/fault.c linux-4.1.13/arch/alpha/mm/fault.c
---- linux-4.1.13.orig/arch/alpha/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/alpha/mm/fault.c 2015-11-29 09:23:09.477622951 +0100
-@@ -23,8 +23,7 @@
- #include <linux/smp.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
--
--#include <asm/uaccess.h>
-+#include <linux/uaccess.h>
-
- extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
-
-@@ -107,7 +106,7 @@
-
- /* If we're in an interrupt context, or have no user context,
- we must not take the fault. */
-- if (!mm || in_atomic())
-+ if (!mm || faulthandler_disabled())
- goto no_context;
-
- #ifdef CONFIG_ALPHA_LARGE_VMALLOC
-diff -Nur linux-4.1.13.orig/arch/arc/include/asm/futex.h linux-4.1.13/arch/arc/include/asm/futex.h
---- linux-4.1.13.orig/arch/arc/include/asm/futex.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arc/include/asm/futex.h 2015-11-29 09:23:09.477622951 +0100
-@@ -53,7 +53,7 @@
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
-- pagefault_disable(); /* implies preempt_disable() */
-+ pagefault_disable();
-
- switch (op) {
- case FUTEX_OP_SET:
-@@ -75,7 +75,7 @@
- ret = -ENOSYS;
- }
-
-- pagefault_enable(); /* subsumes preempt_enable() */
-+ pagefault_enable();
-
- if (!ret) {
- switch (cmp) {
-@@ -104,7 +104,7 @@
- return ret;
- }
-
--/* Compare-xchg with preemption disabled.
-+/* Compare-xchg with pagefaults disabled.
- * Notes:
- * -Best-Effort: Exchg happens only if compare succeeds.
- * If compare fails, returns; leaving retry/looping to upper layers
-@@ -121,7 +121,7 @@
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
-- pagefault_disable(); /* implies preempt_disable() */
-+ pagefault_disable();
-
- /* TBD : can use llock/scond */
- __asm__ __volatile__(
-@@ -142,7 +142,7 @@
- : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
- : "cc", "memory");
-
-- pagefault_enable(); /* subsumes preempt_enable() */
-+ pagefault_enable();
-
- *uval = val;
- return val;
-diff -Nur linux-4.1.13.orig/arch/arc/mm/fault.c linux-4.1.13/arch/arc/mm/fault.c
---- linux-4.1.13.orig/arch/arc/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arc/mm/fault.c 2015-11-29 09:23:09.477622951 +0100
-@@ -86,7 +86,7 @@
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
-diff -Nur linux-4.1.13.orig/arch/arm/include/asm/cmpxchg.h linux-4.1.13/arch/arm/include/asm/cmpxchg.h
---- linux-4.1.13.orig/arch/arm/include/asm/cmpxchg.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/include/asm/cmpxchg.h 2015-11-29 09:23:09.477622951 +0100
-@@ -129,6 +129,8 @@
-
- #else /* min ARCH >= ARMv6 */
-
-+#define __HAVE_ARCH_CMPXCHG 1
-+
- extern void __bad_cmpxchg(volatile void *ptr, int size);
-
- /*
-diff -Nur linux-4.1.13.orig/arch/arm/include/asm/futex.h linux-4.1.13/arch/arm/include/asm/futex.h
---- linux-4.1.13.orig/arch/arm/include/asm/futex.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/include/asm/futex.h 2015-11-29 09:23:09.477622951 +0100
-@@ -93,6 +93,7 @@
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
-+ preempt_disable();
- __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
- "1: " TUSER(ldr) " %1, [%4]\n"
- " teq %1, %2\n"
-@@ -104,6 +105,8 @@
- : "cc", "memory");
-
- *uval = val;
-+ preempt_enable();
-+
- return ret;
- }
-
-@@ -124,7 +127,10 @@
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
-- pagefault_disable(); /* implies preempt_disable() */
-+#ifndef CONFIG_SMP
-+ preempt_disable();
-+#endif
-+ pagefault_disable();
-
- switch (op) {
- case FUTEX_OP_SET:
-@@ -146,7 +152,10 @@
- ret = -ENOSYS;
- }
-
-- pagefault_enable(); /* subsumes preempt_enable() */
-+ pagefault_enable();
-+#ifndef CONFIG_SMP
-+ preempt_enable();
-+#endif
-
- if (!ret) {
- switch (cmp) {
-diff -Nur linux-4.1.13.orig/arch/arm/include/asm/switch_to.h linux-4.1.13/arch/arm/include/asm/switch_to.h
---- linux-4.1.13.orig/arch/arm/include/asm/switch_to.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/include/asm/switch_to.h 2015-11-29 09:23:09.477622951 +0100
-@@ -3,6 +3,13 @@
-
- #include <linux/thread_info.h>
-
-+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
-+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
-+#else
-+static inline void
-+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
-+#endif
-+
- /*
- * For v7 SMP cores running a preemptible kernel we may be pre-empted
- * during a TLB maintenance operation, so execute an inner-shareable dsb
-@@ -22,6 +29,7 @@
-
- #define switch_to(prev,next,last) \
- do { \
-+ switch_kmaps(prev, next); \
- last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
- } while (0)
-
-diff -Nur linux-4.1.13.orig/arch/arm/include/asm/thread_info.h linux-4.1.13/arch/arm/include/asm/thread_info.h
---- linux-4.1.13.orig/arch/arm/include/asm/thread_info.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/include/asm/thread_info.h 2015-11-29 09:23:09.477622951 +0100
-@@ -50,6 +50,7 @@
- struct thread_info {
- unsigned long flags; /* low level flags */
- int preempt_count; /* 0 => preemptable, <0 => bug */
-+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
- mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
- __u32 cpu; /* cpu */
-@@ -147,6 +148,7 @@
- #define TIF_SIGPENDING 0
- #define TIF_NEED_RESCHED 1
- #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
-+#define TIF_NEED_RESCHED_LAZY 3
- #define TIF_UPROBE 7
- #define TIF_SYSCALL_TRACE 8
- #define TIF_SYSCALL_AUDIT 9
-@@ -160,6 +162,7 @@
- #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
- #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
- #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_UPROBE (1 << TIF_UPROBE)
- #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
- #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-diff -Nur linux-4.1.13.orig/arch/arm/Kconfig linux-4.1.13/arch/arm/Kconfig
---- linux-4.1.13.orig/arch/arm/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/Kconfig 2015-11-29 09:23:09.477622951 +0100
-@@ -31,7 +31,7 @@
- select HARDIRQS_SW_RESEND
- select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
- select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
-- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
-+ select HAVE_ARCH_JUMP_LABEL if (!XIP_KERNEL && !PREEMPT_RT_BASE)
- select HAVE_ARCH_KGDB
- select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
- select HAVE_ARCH_TRACEHOOK
-@@ -66,6 +66,7 @@
- select HAVE_PERF_EVENTS
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
-+ select HAVE_PREEMPT_LAZY
- select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_SYSCALL_TRACEPOINTS
-diff -Nur linux-4.1.13.orig/arch/arm/kernel/asm-offsets.c linux-4.1.13/arch/arm/kernel/asm-offsets.c
---- linux-4.1.13.orig/arch/arm/kernel/asm-offsets.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/kernel/asm-offsets.c 2015-11-29 09:23:09.477622951 +0100
-@@ -65,6 +65,7 @@
- BLANK();
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
-+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
- DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-diff -Nur linux-4.1.13.orig/arch/arm/kernel/entry-armv.S linux-4.1.13/arch/arm/kernel/entry-armv.S
---- linux-4.1.13.orig/arch/arm/kernel/entry-armv.S 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/kernel/entry-armv.S 2015-11-29 09:23:09.477622951 +0100
-@@ -208,11 +208,18 @@
- #ifdef CONFIG_PREEMPT
- get_thread_info tsk
- ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
-- ldr r0, [tsk, #TI_FLAGS] @ get flags
- teq r8, #0 @ if preempt count != 0
-+ bne 1f @ return from exeption
-+ ldr r0, [tsk, #TI_FLAGS] @ get flags
-+ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
-+ blne svc_preempt @ preempt!
-+
-+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
-+ teq r8, #0 @ if preempt lazy count != 0
- movne r0, #0 @ force flags to 0
-- tst r0, #_TIF_NEED_RESCHED
-+ tst r0, #_TIF_NEED_RESCHED_LAZY
- blne svc_preempt
-+1:
- #endif
-
- svc_exit r5, irq = 1 @ return from exception
-@@ -227,6 +234,8 @@
- 1: bl preempt_schedule_irq @ irq en/disable is done inside
- ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
- tst r0, #_TIF_NEED_RESCHED
-+ bne 1b
-+ tst r0, #_TIF_NEED_RESCHED_LAZY
- reteq r8 @ go again
- b 1b
- #endif
-diff -Nur linux-4.1.13.orig/arch/arm/kernel/process.c linux-4.1.13/arch/arm/kernel/process.c
---- linux-4.1.13.orig/arch/arm/kernel/process.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/kernel/process.c 2015-11-29 09:23:09.477622951 +0100
-@@ -290,6 +290,30 @@
- }
-
- #ifdef CONFIG_MMU
-+/*
-+ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
-+ * initialized by pgtable_page_ctor() then a coredump of the vector page will
-+ * fail.
-+ */
-+static int __init vectors_user_mapping_init_page(void)
-+{
-+ struct page *page;
-+ unsigned long addr = 0xffff0000;
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ page = pmd_page(*(pmd));
-+
-+ pgtable_page_ctor(page);
-+
-+ return 0;
-+}
-+late_initcall(vectors_user_mapping_init_page);
-+
- #ifdef CONFIG_KUSER_HELPERS
- /*
- * The vectors page is always readable from user space for the
-diff -Nur linux-4.1.13.orig/arch/arm/kernel/signal.c linux-4.1.13/arch/arm/kernel/signal.c
---- linux-4.1.13.orig/arch/arm/kernel/signal.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/kernel/signal.c 2015-11-29 09:23:09.477622951 +0100
-@@ -568,7 +568,8 @@
- do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
- {
- do {
-- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
-+ if (likely(thread_flags & (_TIF_NEED_RESCHED |
-+ _TIF_NEED_RESCHED_LAZY))) {
- schedule();
- } else {
- if (unlikely(!user_mode(regs)))
-diff -Nur linux-4.1.13.orig/arch/arm/kernel/smp.c linux-4.1.13/arch/arm/kernel/smp.c
---- linux-4.1.13.orig/arch/arm/kernel/smp.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/kernel/smp.c 2015-11-29 09:23:09.481622687 +0100
-@@ -213,8 +213,6 @@
- flush_cache_louis();
- local_flush_tlb_all();
-
-- clear_tasks_mm_cpumask(cpu);
--
- return 0;
- }
-
-@@ -230,6 +228,9 @@
- pr_err("CPU%u: cpu didn't die\n", cpu);
- return;
- }
-+
-+ clear_tasks_mm_cpumask(cpu);
-+
- pr_notice("CPU%u: shutdown\n", cpu);
-
- /*
-diff -Nur linux-4.1.13.orig/arch/arm/kernel/unwind.c linux-4.1.13/arch/arm/kernel/unwind.c
---- linux-4.1.13.orig/arch/arm/kernel/unwind.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/kernel/unwind.c 2015-11-29 09:23:09.481622687 +0100
-@@ -93,7 +93,7 @@
- static const struct unwind_idx *__origin_unwind_idx;
- extern const struct unwind_idx __stop_unwind_idx[];
-
--static DEFINE_SPINLOCK(unwind_lock);
-+static DEFINE_RAW_SPINLOCK(unwind_lock);
- static LIST_HEAD(unwind_tables);
-
- /* Convert a prel31 symbol to an absolute address */
-@@ -201,7 +201,7 @@
- /* module unwind tables */
- struct unwind_table *table;
-
-- spin_lock_irqsave(&unwind_lock, flags);
-+ raw_spin_lock_irqsave(&unwind_lock, flags);
- list_for_each_entry(table, &unwind_tables, list) {
- if (addr >= table->begin_addr &&
- addr < table->end_addr) {
-@@ -213,7 +213,7 @@
- break;
- }
- }
-- spin_unlock_irqrestore(&unwind_lock, flags);
-+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
- }
-
- pr_debug("%s: idx = %p\n", __func__, idx);
-@@ -529,9 +529,9 @@
- tab->begin_addr = text_addr;
- tab->end_addr = text_addr + text_size;
-
-- spin_lock_irqsave(&unwind_lock, flags);
-+ raw_spin_lock_irqsave(&unwind_lock, flags);
- list_add_tail(&tab->list, &unwind_tables);
-- spin_unlock_irqrestore(&unwind_lock, flags);
-+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
-
- return tab;
- }
-@@ -543,9 +543,9 @@
- if (!tab)
- return;
-
-- spin_lock_irqsave(&unwind_lock, flags);
-+ raw_spin_lock_irqsave(&unwind_lock, flags);
- list_del(&tab->list);
-- spin_unlock_irqrestore(&unwind_lock, flags);
-+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
-
- kfree(tab);
- }
-diff -Nur linux-4.1.13.orig/arch/arm/kvm/arm.c linux-4.1.13/arch/arm/kvm/arm.c
---- linux-4.1.13.orig/arch/arm/kvm/arm.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/kvm/arm.c 2015-11-29 09:23:09.481622687 +0100
-@@ -474,9 +474,9 @@
-
- static void vcpu_pause(struct kvm_vcpu *vcpu)
- {
-- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
-+ struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
-
-- wait_event_interruptible(*wq, !vcpu->arch.pause);
-+ swait_event_interruptible(*wq, !vcpu->arch.pause);
- }
-
- static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
-diff -Nur linux-4.1.13.orig/arch/arm/kvm/psci.c linux-4.1.13/arch/arm/kvm/psci.c
---- linux-4.1.13.orig/arch/arm/kvm/psci.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/kvm/psci.c 2015-11-29 09:23:09.501621354 +0100
-@@ -68,7 +68,7 @@
- {
- struct kvm *kvm = source_vcpu->kvm;
- struct kvm_vcpu *vcpu = NULL;
-- wait_queue_head_t *wq;
-+ struct swait_head *wq;
- unsigned long cpu_id;
- unsigned long context_id;
- phys_addr_t target_pc;
-@@ -117,7 +117,7 @@
- smp_mb(); /* Make sure the above is visible */
-
- wq = kvm_arch_vcpu_wq(vcpu);
-- wake_up_interruptible(wq);
-+ swait_wake_interruptible(wq);
-
- return PSCI_RET_SUCCESS;
- }
-diff -Nur linux-4.1.13.orig/arch/arm/mach-exynos/platsmp.c linux-4.1.13/arch/arm/mach-exynos/platsmp.c
---- linux-4.1.13.orig/arch/arm/mach-exynos/platsmp.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/mach-exynos/platsmp.c 2015-11-29 09:23:09.501621354 +0100
-@@ -231,7 +231,7 @@
- return (void __iomem *)(S5P_VA_SCU);
- }
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void exynos_secondary_init(unsigned int cpu)
- {
-@@ -244,8 +244,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -259,7 +259,7 @@
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
-@@ -286,7 +286,7 @@
-
- if (timeout == 0) {
- printk(KERN_ERR "cpu1 power enable failed");
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- return -ETIMEDOUT;
- }
- }
-@@ -342,7 +342,7 @@
- * calibrations, then wait for it to finish
- */
- fail:
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? ret : 0;
- }
-diff -Nur linux-4.1.13.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.13/arch/arm/mach-hisi/platmcpm.c
---- linux-4.1.13.orig/arch/arm/mach-hisi/platmcpm.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/mach-hisi/platmcpm.c 2015-11-29 09:23:09.501621354 +0100
-@@ -57,7 +57,7 @@
-
- static void __iomem *sysctrl, *fabric;
- static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
- static u32 fabric_phys_addr;
- /*
- * [0]: bootwrapper physical address
-@@ -104,7 +104,7 @@
- if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
- return -EINVAL;
-
-- spin_lock_irq(&boot_lock);
-+ raw_spin_lock_irq(&boot_lock);
-
- if (hip04_cpu_table[cluster][cpu])
- goto out;
-@@ -133,7 +133,7 @@
- udelay(20);
- out:
- hip04_cpu_table[cluster][cpu]++;
-- spin_unlock_irq(&boot_lock);
-+ raw_spin_unlock_irq(&boot_lock);
-
- return 0;
- }
-@@ -149,7 +149,7 @@
-
- __mcpm_cpu_going_down(cpu, cluster);
-
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
- BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
- hip04_cpu_table[cluster][cpu]--;
- if (hip04_cpu_table[cluster][cpu] == 1) {
-@@ -162,7 +162,7 @@
-
- last_man = hip04_cluster_is_down(cluster);
- if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- /* Since it's Cortex A15, disable L2 prefetching. */
- asm volatile(
- "mcr p15, 1, %0, c15, c0, 3 \n\t"
-@@ -173,7 +173,7 @@
- hip04_set_snoop_filter(cluster, 0);
- __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
- } else {
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- v7_exit_coherency_flush(louis);
- }
-
-@@ -192,7 +192,7 @@
- cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
-
- count = TIMEOUT_MSEC / POLL_MSEC;
-- spin_lock_irq(&boot_lock);
-+ raw_spin_lock_irq(&boot_lock);
- for (tries = 0; tries < count; tries++) {
- if (hip04_cpu_table[cluster][cpu]) {
- ret = -EBUSY;
-@@ -202,10 +202,10 @@
- data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
- if (data & CORE_WFI_STATUS(cpu))
- break;
-- spin_unlock_irq(&boot_lock);
-+ raw_spin_unlock_irq(&boot_lock);
- /* Wait for clean L2 when the whole cluster is down. */
- msleep(POLL_MSEC);
-- spin_lock_irq(&boot_lock);
-+ raw_spin_lock_irq(&boot_lock);
- }
- if (tries >= count)
- goto err;
-@@ -220,10 +220,10 @@
- }
- if (tries >= count)
- goto err;
-- spin_unlock_irq(&boot_lock);
-+ raw_spin_unlock_irq(&boot_lock);
- return 0;
- err:
-- spin_unlock_irq(&boot_lock);
-+ raw_spin_unlock_irq(&boot_lock);
- return ret;
- }
-
-@@ -235,10 +235,10 @@
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
- if (!hip04_cpu_table[cluster][cpu])
- hip04_cpu_table[cluster][cpu] = 1;
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level)
-diff -Nur linux-4.1.13.orig/arch/arm/mach-omap2/omap-smp.c linux-4.1.13/arch/arm/mach-omap2/omap-smp.c
---- linux-4.1.13.orig/arch/arm/mach-omap2/omap-smp.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/mach-omap2/omap-smp.c 2015-11-29 09:23:09.501621354 +0100
-@@ -43,7 +43,7 @@
- /* SCU base address */
- static void __iomem *scu_base;
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- void __iomem *omap4_get_scu_base(void)
- {
-@@ -74,8 +74,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -89,7 +89,7 @@
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * Update the AuxCoreBoot0 with boot state for secondary core.
-@@ -166,7 +166,7 @@
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return 0;
- }
-diff -Nur linux-4.1.13.orig/arch/arm/mach-prima2/platsmp.c linux-4.1.13/arch/arm/mach-prima2/platsmp.c
---- linux-4.1.13.orig/arch/arm/mach-prima2/platsmp.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/mach-prima2/platsmp.c 2015-11-29 09:23:09.501621354 +0100
-@@ -22,7 +22,7 @@
-
- static void __iomem *clk_base;
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void sirfsoc_secondary_init(unsigned int cpu)
- {
-@@ -36,8 +36,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static const struct of_device_id clk_ids[] = {
-@@ -75,7 +75,7 @@
- /* make sure write buffer is drained */
- mb();
-
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
-@@ -107,7 +107,7 @@
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -Nur linux-4.1.13.orig/arch/arm/mach-qcom/platsmp.c linux-4.1.13/arch/arm/mach-qcom/platsmp.c
---- linux-4.1.13.orig/arch/arm/mach-qcom/platsmp.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/mach-qcom/platsmp.c 2015-11-29 09:23:09.501621354 +0100
-@@ -46,7 +46,7 @@
-
- extern void secondary_startup_arm(void);
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- #ifdef CONFIG_HOTPLUG_CPU
- static void __ref qcom_cpu_die(unsigned int cpu)
-@@ -60,8 +60,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static int scss_release_secondary(unsigned int cpu)
-@@ -284,7 +284,7 @@
- * set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * Send the secondary CPU a soft interrupt, thereby causing
-@@ -297,7 +297,7 @@
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return ret;
- }
-diff -Nur linux-4.1.13.orig/arch/arm/mach-spear/platsmp.c linux-4.1.13/arch/arm/mach-spear/platsmp.c
---- linux-4.1.13.orig/arch/arm/mach-spear/platsmp.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/mach-spear/platsmp.c 2015-11-29 09:23:09.501621354 +0100
-@@ -32,7 +32,7 @@
- sync_cache_w(&pen_release);
- }
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
-
-@@ -47,8 +47,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -59,7 +59,7 @@
- * set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
-@@ -84,7 +84,7 @@
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -Nur linux-4.1.13.orig/arch/arm/mach-sti/platsmp.c linux-4.1.13/arch/arm/mach-sti/platsmp.c
---- linux-4.1.13.orig/arch/arm/mach-sti/platsmp.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/mach-sti/platsmp.c 2015-11-29 09:23:09.501621354 +0100
-@@ -34,7 +34,7 @@
- sync_cache_w(&pen_release);
- }
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void sti_secondary_init(unsigned int cpu)
- {
-@@ -49,8 +49,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -61,7 +61,7 @@
- * set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
-@@ -92,7 +92,7 @@
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -Nur linux-4.1.13.orig/arch/arm/mach-ux500/platsmp.c linux-4.1.13/arch/arm/mach-ux500/platsmp.c
---- linux-4.1.13.orig/arch/arm/mach-ux500/platsmp.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/mach-ux500/platsmp.c 2015-11-29 09:23:09.501621354 +0100
-@@ -51,7 +51,7 @@
- return NULL;
- }
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void ux500_secondary_init(unsigned int cpu)
- {
-@@ -64,8 +64,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -76,7 +76,7 @@
- * set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
-@@ -97,7 +97,7 @@
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -Nur linux-4.1.13.orig/arch/arm/mm/fault.c linux-4.1.13/arch/arm/mm/fault.c
---- linux-4.1.13.orig/arch/arm/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/mm/fault.c 2015-11-29 09:23:09.501621354 +0100
-@@ -276,7 +276,7 @@
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
-@@ -430,6 +430,9 @@
- if (addr < TASK_SIZE)
- return do_page_fault(addr, fsr, regs);
-
-+ if (interrupts_enabled(regs))
-+ local_irq_enable();
-+
- if (user_mode(regs))
- goto bad_area;
-
-@@ -497,6 +500,9 @@
- static int
- do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
- {
-+ if (interrupts_enabled(regs))
-+ local_irq_enable();
-+
- do_bad_area(addr, fsr, regs);
- return 0;
- }
-diff -Nur linux-4.1.13.orig/arch/arm/mm/highmem.c linux-4.1.13/arch/arm/mm/highmem.c
---- linux-4.1.13.orig/arch/arm/mm/highmem.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/mm/highmem.c 2015-11-29 09:23:09.505621090 +0100
-@@ -54,11 +54,13 @@
-
- void *kmap_atomic(struct page *page)
- {
-+ pte_t pte = mk_pte(page, kmap_prot);
- unsigned int idx;
- unsigned long vaddr;
- void *kmap;
- int type;
-
-+ preempt_disable_nort();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -92,7 +94,10 @@
- * in place, so the contained TLB flush ensures the TLB is updated
- * with the new mapping.
- */
-- set_fixmap_pte(idx, mk_pte(page, kmap_prot));
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = pte;
-+#endif
-+ set_fixmap_pte(idx, pte);
-
- return (void *)vaddr;
- }
-@@ -109,27 +114,33 @@
-
- if (cache_is_vivt())
- __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = __pte(0);
-+#endif
- #ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(vaddr != __fix_to_virt(idx));
-- set_fixmap_pte(idx, __pte(0));
- #else
- (void) idx; /* to kill a warning */
- #endif
-+ set_fixmap_pte(idx, __pte(0));
- kmap_atomic_idx_pop();
- } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
- /* this address was obtained through kmap_high_get() */
- kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
- }
- pagefault_enable();
-+ preempt_enable_nort();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
- void *kmap_atomic_pfn(unsigned long pfn)
- {
-+ pte_t pte = pfn_pte(pfn, kmap_prot);
- unsigned long vaddr;
- int idx, type;
- struct page *page = pfn_to_page(pfn);
-
-+ preempt_disable_nort();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -140,7 +151,10 @@
- #ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
- #endif
-- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = pte;
-+#endif
-+ set_fixmap_pte(idx, pte);
-
- return (void *)vaddr;
- }
-@@ -154,3 +168,28 @@
-
- return pte_page(get_fixmap_pte(vaddr));
- }
-+
-+#if defined CONFIG_PREEMPT_RT_FULL
-+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+ int i;
-+
-+ /*
-+ * Clear @prev's kmap_atomic mappings
-+ */
-+ for (i = 0; i < prev_p->kmap_idx; i++) {
-+ int idx = i + KM_TYPE_NR * smp_processor_id();
-+
-+ set_fixmap_pte(idx, __pte(0));
-+ }
-+ /*
-+ * Restore @next_p's kmap_atomic mappings
-+ */
-+ for (i = 0; i < next_p->kmap_idx; i++) {
-+ int idx = i + KM_TYPE_NR * smp_processor_id();
-+
-+ if (!pte_none(next_p->kmap_pte[i]))
-+ set_fixmap_pte(idx, next_p->kmap_pte[i]);
-+ }
-+}
-+#endif
-diff -Nur linux-4.1.13.orig/arch/arm/plat-versatile/platsmp.c linux-4.1.13/arch/arm/plat-versatile/platsmp.c
---- linux-4.1.13.orig/arch/arm/plat-versatile/platsmp.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm/plat-versatile/platsmp.c 2015-11-29 09:23:09.505621090 +0100
-@@ -30,7 +30,7 @@
- sync_cache_w(&pen_release);
- }
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- void versatile_secondary_init(unsigned int cpu)
- {
-@@ -43,8 +43,8 @@
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -55,7 +55,7 @@
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * This is really belt and braces; we hold unintended secondary
-@@ -85,7 +85,7 @@
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -Nur linux-4.1.13.orig/arch/arm64/include/asm/futex.h linux-4.1.13/arch/arm64/include/asm/futex.h
---- linux-4.1.13.orig/arch/arm64/include/asm/futex.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm64/include/asm/futex.h 2015-11-29 09:23:09.505621090 +0100
-@@ -58,7 +58,7 @@
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
-- pagefault_disable(); /* implies preempt_disable() */
-+ pagefault_disable();
-
- switch (op) {
- case FUTEX_OP_SET:
-@@ -85,7 +85,7 @@
- ret = -ENOSYS;
- }
-
-- pagefault_enable(); /* subsumes preempt_enable() */
-+ pagefault_enable();
-
- if (!ret) {
- switch (cmp) {
-diff -Nur linux-4.1.13.orig/arch/arm64/include/asm/thread_info.h linux-4.1.13/arch/arm64/include/asm/thread_info.h
---- linux-4.1.13.orig/arch/arm64/include/asm/thread_info.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm64/include/asm/thread_info.h 2015-11-29 09:23:09.505621090 +0100
-@@ -47,6 +47,7 @@
- mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
- int preempt_count; /* 0 => preemptable, <0 => bug */
-+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
- int cpu; /* cpu */
- };
-
-@@ -101,6 +102,7 @@
- #define TIF_NEED_RESCHED 1
- #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
- #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
-+#define TIF_NEED_RESCHED_LAZY 4
- #define TIF_NOHZ 7
- #define TIF_SYSCALL_TRACE 8
- #define TIF_SYSCALL_AUDIT 9
-@@ -117,6 +119,7 @@
- #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
- #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
- #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
-+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_NOHZ (1 << TIF_NOHZ)
- #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
- #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-diff -Nur linux-4.1.13.orig/arch/arm64/Kconfig linux-4.1.13/arch/arm64/Kconfig
---- linux-4.1.13.orig/arch/arm64/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm64/Kconfig 2015-11-29 09:23:09.505621090 +0100
-@@ -69,8 +69,10 @@
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
- select HAVE_RCU_TABLE_FREE
-+ select HAVE_PREEMPT_LAZY
- select HAVE_SYSCALL_TRACEPOINTS
- select IRQ_DOMAIN
-+ select IRQ_FORCED_THREADING
- select MODULES_USE_ELF_RELA
- select NO_BOOTMEM
- select OF
-@@ -599,7 +601,7 @@
-
- config XEN
- bool "Xen guest support on ARM64"
-- depends on ARM64 && OF
-+ depends on ARM64 && OF && !PREEMPT_RT_FULL
- select SWIOTLB_XEN
- help
- Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
-diff -Nur linux-4.1.13.orig/arch/arm64/kernel/asm-offsets.c linux-4.1.13/arch/arm64/kernel/asm-offsets.c
---- linux-4.1.13.orig/arch/arm64/kernel/asm-offsets.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm64/kernel/asm-offsets.c 2015-11-29 09:23:09.505621090 +0100
-@@ -35,6 +35,7 @@
- BLANK();
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
-+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
- DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-diff -Nur linux-4.1.13.orig/arch/arm64/kernel/debug-monitors.c linux-4.1.13/arch/arm64/kernel/debug-monitors.c
---- linux-4.1.13.orig/arch/arm64/kernel/debug-monitors.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm64/kernel/debug-monitors.c 2015-11-29 09:23:09.505621090 +0100
-@@ -271,20 +271,21 @@
- * Use reader/writer locks instead of plain spinlock.
- */
- static LIST_HEAD(break_hook);
--static DEFINE_RWLOCK(break_hook_lock);
-+static DEFINE_SPINLOCK(break_hook_lock);
-
- void register_break_hook(struct break_hook *hook)
- {
-- write_lock(&break_hook_lock);
-- list_add(&hook->node, &break_hook);
-- write_unlock(&break_hook_lock);
-+ spin_lock(&break_hook_lock);
-+ list_add_rcu(&hook->node, &break_hook);
-+ spin_unlock(&break_hook_lock);
- }
-
- void unregister_break_hook(struct break_hook *hook)
- {
-- write_lock(&break_hook_lock);
-- list_del(&hook->node);
-- write_unlock(&break_hook_lock);
-+ spin_lock(&break_hook_lock);
-+ list_del_rcu(&hook->node);
-+ spin_unlock(&break_hook_lock);
-+ synchronize_rcu();
- }
-
- static int call_break_hook(struct pt_regs *regs, unsigned int esr)
-@@ -292,11 +293,11 @@
- struct break_hook *hook;
- int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
-
-- read_lock(&break_hook_lock);
-- list_for_each_entry(hook, &break_hook, node)
-+ rcu_read_lock();
-+ list_for_each_entry_rcu(hook, &break_hook, node)
- if ((esr & hook->esr_mask) == hook->esr_val)
- fn = hook->fn;
-- read_unlock(&break_hook_lock);
-+ rcu_read_unlock();
-
- return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
- }
-diff -Nur linux-4.1.13.orig/arch/arm64/kernel/entry.S linux-4.1.13/arch/arm64/kernel/entry.S
---- linux-4.1.13.orig/arch/arm64/kernel/entry.S 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm64/kernel/entry.S 2015-11-29 09:23:09.505621090 +0100
-@@ -367,11 +367,16 @@
- #ifdef CONFIG_PREEMPT
- get_thread_info tsk
- ldr w24, [tsk, #TI_PREEMPT] // get preempt count
-- cbnz w24, 1f // preempt count != 0
-+ cbnz w24, 2f // preempt count != 0
- ldr x0, [tsk, #TI_FLAGS] // get flags
-- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
-- bl el1_preempt
-+ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
-+
-+ ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count
-+ cbnz w24, 2f // preempt lazy count != 0
-+ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
- 1:
-+ bl el1_preempt
-+2:
- #endif
- #ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
-@@ -385,6 +390,7 @@
- 1: bl preempt_schedule_irq // irq en/disable is done inside
- ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
- tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
-+ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling?
- ret x24
- #endif
-
-@@ -622,6 +628,7 @@
- str x0, [sp, #S_X0] // returned x0
- work_pending:
- tbnz x1, #TIF_NEED_RESCHED, work_resched
-+ tbnz x1, #TIF_NEED_RESCHED_LAZY, work_resched
- /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
- ldr x2, [sp, #S_PSTATE]
- mov x0, sp // 'regs'
-diff -Nur linux-4.1.13.orig/arch/arm64/kernel/insn.c linux-4.1.13/arch/arm64/kernel/insn.c
---- linux-4.1.13.orig/arch/arm64/kernel/insn.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm64/kernel/insn.c 2015-11-29 09:23:09.505621090 +0100
-@@ -77,7 +77,7 @@
- }
- }
-
--static DEFINE_SPINLOCK(patch_lock);
-+static DEFINE_RAW_SPINLOCK(patch_lock);
-
- static void __kprobes *patch_map(void *addr, int fixmap)
- {
-@@ -124,13 +124,13 @@
- unsigned long flags = 0;
- int ret;
-
-- spin_lock_irqsave(&patch_lock, flags);
-+ raw_spin_lock_irqsave(&patch_lock, flags);
- waddr = patch_map(addr, FIX_TEXT_POKE0);
-
- ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
-
- patch_unmap(FIX_TEXT_POKE0);
-- spin_unlock_irqrestore(&patch_lock, flags);
-+ raw_spin_unlock_irqrestore(&patch_lock, flags);
-
- return ret;
- }
-diff -Nur linux-4.1.13.orig/arch/arm64/kernel/perf_event.c linux-4.1.13/arch/arm64/kernel/perf_event.c
---- linux-4.1.13.orig/arch/arm64/kernel/perf_event.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm64/kernel/perf_event.c 2015-11-29 09:23:09.505621090 +0100
-@@ -488,7 +488,7 @@
- }
-
- err = request_irq(irq, armpmu->handle_irq,
-- IRQF_NOBALANCING,
-+ IRQF_NOBALANCING | IRQF_NO_THREAD,
- "arm-pmu", armpmu);
- if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
-diff -Nur linux-4.1.13.orig/arch/arm64/mm/fault.c linux-4.1.13/arch/arm64/mm/fault.c
---- linux-4.1.13.orig/arch/arm64/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/arm64/mm/fault.c 2015-11-29 09:23:09.505621090 +0100
-@@ -211,7 +211,7 @@
- * If we're in an interrupt or have no user context, we must not take
- * the fault.
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
-diff -Nur linux-4.1.13.orig/arch/avr32/include/asm/uaccess.h linux-4.1.13/arch/avr32/include/asm/uaccess.h
---- linux-4.1.13.orig/arch/avr32/include/asm/uaccess.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/avr32/include/asm/uaccess.h 2015-11-29 09:23:09.505621090 +0100
-@@ -97,7 +97,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -116,7 +117,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -136,7 +138,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -158,7 +161,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-diff -Nur linux-4.1.13.orig/arch/avr32/mm/fault.c linux-4.1.13/arch/avr32/mm/fault.c
---- linux-4.1.13.orig/arch/avr32/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/avr32/mm/fault.c 2015-11-29 09:23:09.505621090 +0100
-@@ -14,11 +14,11 @@
- #include <linux/pagemap.h>
- #include <linux/kdebug.h>
- #include <linux/kprobes.h>
-+#include <linux/uaccess.h>
-
- #include <asm/mmu_context.h>
- #include <asm/sysreg.h>
- #include <asm/tlb.h>
--#include <asm/uaccess.h>
-
- #ifdef CONFIG_KPROBES
- static inline int notify_page_fault(struct pt_regs *regs, int trap)
-@@ -81,7 +81,7 @@
- * If we're in an interrupt or have no user context, we must
- * not take the fault...
- */
-- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
-+ if (faulthandler_disabled() || !mm || regs->sr & SYSREG_BIT(GM))
- goto no_context;
-
- local_irq_enable();
-diff -Nur linux-4.1.13.orig/arch/cris/mm/fault.c linux-4.1.13/arch/cris/mm/fault.c
---- linux-4.1.13.orig/arch/cris/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/cris/mm/fault.c 2015-11-29 09:23:09.505621090 +0100
-@@ -8,7 +8,7 @@
- #include <linux/interrupt.h>
- #include <linux/module.h>
- #include <linux/wait.h>
--#include <asm/uaccess.h>
-+#include <linux/uaccess.h>
- #include <arch/system.h>
-
- extern int find_fixup_code(struct pt_regs *);
-@@ -109,11 +109,11 @@
- info.si_code = SEGV_MAPERR;
-
- /*
-- * If we're in an interrupt or "atomic" operation or have no
-+ * If we're in an interrupt, have pagefaults disabled or have no
- * user context, we must not take the fault.
- */
-
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
-diff -Nur linux-4.1.13.orig/arch/frv/mm/fault.c linux-4.1.13/arch/frv/mm/fault.c
---- linux-4.1.13.orig/arch/frv/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/frv/mm/fault.c 2015-11-29 09:23:09.505621090 +0100
-@@ -19,9 +19,9 @@
- #include <linux/kernel.h>
- #include <linux/ptrace.h>
- #include <linux/hardirq.h>
-+#include <linux/uaccess.h>
-
- #include <asm/pgtable.h>
--#include <asm/uaccess.h>
- #include <asm/gdb-stub.h>
-
- /*****************************************************************************/
-@@ -78,7 +78,7 @@
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(__frame))
-diff -Nur linux-4.1.13.orig/arch/frv/mm/highmem.c linux-4.1.13/arch/frv/mm/highmem.c
---- linux-4.1.13.orig/arch/frv/mm/highmem.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/frv/mm/highmem.c 2015-11-29 09:23:09.505621090 +0100
-@@ -42,6 +42,7 @@
- unsigned long paddr;
- int type;
-
-+ preempt_disable();
- pagefault_disable();
- type = kmap_atomic_idx_push();
- paddr = page_to_phys(page);
-@@ -85,5 +86,6 @@
- }
- kmap_atomic_idx_pop();
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.13.orig/arch/hexagon/include/asm/uaccess.h linux-4.1.13/arch/hexagon/include/asm/uaccess.h
---- linux-4.1.13.orig/arch/hexagon/include/asm/uaccess.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/hexagon/include/asm/uaccess.h 2015-11-29 09:23:09.505621090 +0100
-@@ -36,7 +36,8 @@
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
-diff -Nur linux-4.1.13.orig/arch/ia64/mm/fault.c linux-4.1.13/arch/ia64/mm/fault.c
---- linux-4.1.13.orig/arch/ia64/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/ia64/mm/fault.c 2015-11-29 09:23:09.505621090 +0100
-@@ -11,10 +11,10 @@
- #include <linux/kprobes.h>
- #include <linux/kdebug.h>
- #include <linux/prefetch.h>
-+#include <linux/uaccess.h>
-
- #include <asm/pgtable.h>
- #include <asm/processor.h>
--#include <asm/uaccess.h>
-
- extern int die(char *, struct pt_regs *, long);
-
-@@ -96,7 +96,7 @@
- /*
- * If we're in an interrupt or have no user context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- #ifdef CONFIG_VIRTUAL_MEM_MAP
-diff -Nur linux-4.1.13.orig/arch/Kconfig linux-4.1.13/arch/Kconfig
---- linux-4.1.13.orig/arch/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/Kconfig 2015-11-29 09:23:09.477622951 +0100
-@@ -6,6 +6,7 @@
- tristate "OProfile system profiling"
- depends on PROFILING
- depends on HAVE_OPROFILE
-+ depends on !PREEMPT_RT_FULL
- select RING_BUFFER
- select RING_BUFFER_ALLOW_SWAP
- help
-diff -Nur linux-4.1.13.orig/arch/m32r/include/asm/uaccess.h linux-4.1.13/arch/m32r/include/asm/uaccess.h
---- linux-4.1.13.orig/arch/m32r/include/asm/uaccess.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/m32r/include/asm/uaccess.h 2015-11-29 09:23:09.509620826 +0100
-@@ -91,7 +91,8 @@
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
-@@ -155,7 +156,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -175,7 +177,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -194,7 +197,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -274,7 +278,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -568,7 +573,8 @@
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -588,7 +594,8 @@
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space.
- *
-@@ -606,7 +613,8 @@
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -626,7 +634,8 @@
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space.
- *
-@@ -677,7 +686,8 @@
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
-diff -Nur linux-4.1.13.orig/arch/m32r/mm/fault.c linux-4.1.13/arch/m32r/mm/fault.c
---- linux-4.1.13.orig/arch/m32r/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/m32r/mm/fault.c 2015-11-29 09:23:09.509620826 +0100
-@@ -24,9 +24,9 @@
- #include <linux/vt_kern.h> /* For unblank_screen() */
- #include <linux/highmem.h>
- #include <linux/module.h>
-+#include <linux/uaccess.h>
-
- #include <asm/m32r.h>
--#include <asm/uaccess.h>
- #include <asm/hardirq.h>
- #include <asm/mmu_context.h>
- #include <asm/tlbflush.h>
-@@ -111,10 +111,10 @@
- mm = tsk->mm;
-
- /*
-- * If we're in an interrupt or have no user context or are running in an
-- * atomic region then we must not take the fault..
-+ * If we're in an interrupt or have no user context or have pagefaults
-+ * disabled then we must not take the fault.
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto bad_area_nosemaphore;
-
- if (error_code & ACE_USERMODE)
-diff -Nur linux-4.1.13.orig/arch/m68k/mm/fault.c linux-4.1.13/arch/m68k/mm/fault.c
---- linux-4.1.13.orig/arch/m68k/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/m68k/mm/fault.c 2015-11-29 09:23:09.509620826 +0100
-@@ -10,10 +10,10 @@
- #include <linux/ptrace.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
-+#include <linux/uaccess.h>
-
- #include <asm/setup.h>
- #include <asm/traps.h>
--#include <asm/uaccess.h>
- #include <asm/pgalloc.h>
-
- extern void die_if_kernel(char *, struct pt_regs *, long);
-@@ -81,7 +81,7 @@
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
-diff -Nur linux-4.1.13.orig/arch/metag/mm/fault.c linux-4.1.13/arch/metag/mm/fault.c
---- linux-4.1.13.orig/arch/metag/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/metag/mm/fault.c 2015-11-29 09:23:09.509620826 +0100
-@@ -105,7 +105,7 @@
-
- mm = tsk->mm;
-
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
-diff -Nur linux-4.1.13.orig/arch/metag/mm/highmem.c linux-4.1.13/arch/metag/mm/highmem.c
---- linux-4.1.13.orig/arch/metag/mm/highmem.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/metag/mm/highmem.c 2015-11-29 09:23:09.509620826 +0100
-@@ -43,7 +43,7 @@
- unsigned long vaddr;
- int type;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -82,6 +82,7 @@
- }
-
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
-@@ -95,6 +96,7 @@
- unsigned long vaddr;
- int type;
-
-+ preempt_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
-diff -Nur linux-4.1.13.orig/arch/microblaze/include/asm/uaccess.h linux-4.1.13/arch/microblaze/include/asm/uaccess.h
---- linux-4.1.13.orig/arch/microblaze/include/asm/uaccess.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/microblaze/include/asm/uaccess.h 2015-11-29 09:23:09.509620826 +0100
-@@ -178,7 +178,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -290,7 +291,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-diff -Nur linux-4.1.13.orig/arch/microblaze/mm/fault.c linux-4.1.13/arch/microblaze/mm/fault.c
---- linux-4.1.13.orig/arch/microblaze/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/microblaze/mm/fault.c 2015-11-29 09:23:09.509620826 +0100
-@@ -107,14 +107,14 @@
- if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
- is_write = 0;
-
-- if (unlikely(in_atomic() || !mm)) {
-+ if (unlikely(faulthandler_disabled() || !mm)) {
- if (kernel_mode(regs))
- goto bad_area_nosemaphore;
-
-- /* in_atomic() in user mode is really bad,
-+ /* faulthandler_disabled() in user mode is really bad,
- as is current->mm == NULL. */
-- pr_emerg("Page fault in user mode with in_atomic(), mm = %p\n",
-- mm);
-+ pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n",
-+ mm);
- pr_emerg("r15 = %lx MSR = %lx\n",
- regs->r15, regs->msr);
- die("Weird page fault", regs, SIGSEGV);
-diff -Nur linux-4.1.13.orig/arch/microblaze/mm/highmem.c linux-4.1.13/arch/microblaze/mm/highmem.c
---- linux-4.1.13.orig/arch/microblaze/mm/highmem.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/microblaze/mm/highmem.c 2015-11-29 09:23:09.509620826 +0100
-@@ -37,7 +37,7 @@
- unsigned long vaddr;
- int idx, type;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -63,6 +63,7 @@
-
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
-+ preempt_enable();
- return;
- }
-
-@@ -84,5 +85,6 @@
- #endif
- kmap_atomic_idx_pop();
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.13.orig/arch/mips/include/asm/uaccess.h linux-4.1.13/arch/mips/include/asm/uaccess.h
---- linux-4.1.13.orig/arch/mips/include/asm/uaccess.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/mips/include/asm/uaccess.h 2015-11-29 09:23:09.513620562 +0100
-@@ -103,7 +103,8 @@
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
-@@ -138,7 +139,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -157,7 +159,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -177,7 +180,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -199,7 +203,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -498,7 +503,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -517,7 +523,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -537,7 +544,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -559,7 +567,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -815,7 +824,8 @@
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -888,7 +898,8 @@
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space.
- *
-@@ -1075,7 +1086,8 @@
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -1107,7 +1119,8 @@
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space.
- *
-@@ -1329,7 +1342,8 @@
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
-@@ -1398,7 +1412,8 @@
- * strnlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
-diff -Nur linux-4.1.13.orig/arch/mips/Kconfig linux-4.1.13/arch/mips/Kconfig
---- linux-4.1.13.orig/arch/mips/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/mips/Kconfig 2015-11-29 09:23:09.513620562 +0100
-@@ -2366,7 +2366,7 @@
- #
- config HIGHMEM
- bool "High Memory Support"
-- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
-+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
-
- config CPU_SUPPORTS_HIGHMEM
- bool
-diff -Nur linux-4.1.13.orig/arch/mips/kernel/signal-common.h linux-4.1.13/arch/mips/kernel/signal-common.h
---- linux-4.1.13.orig/arch/mips/kernel/signal-common.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/mips/kernel/signal-common.h 2015-11-29 09:23:09.513620562 +0100
-@@ -28,12 +28,7 @@
- extern int fpcsr_pending(unsigned int __user *fpcsr);
-
- /* Make sure we will not lose FPU ownership */
--#ifdef CONFIG_PREEMPT
--#define lock_fpu_owner() preempt_disable()
--#define unlock_fpu_owner() preempt_enable()
--#else
--#define lock_fpu_owner() pagefault_disable()
--#define unlock_fpu_owner() pagefault_enable()
--#endif
-+#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
-+#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
-
- #endif /* __SIGNAL_COMMON_H */
-diff -Nur linux-4.1.13.orig/arch/mips/mm/fault.c linux-4.1.13/arch/mips/mm/fault.c
---- linux-4.1.13.orig/arch/mips/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/mips/mm/fault.c 2015-11-29 09:23:09.513620562 +0100
-@@ -21,10 +21,10 @@
- #include <linux/module.h>
- #include <linux/kprobes.h>
- #include <linux/perf_event.h>
-+#include <linux/uaccess.h>
-
- #include <asm/branch.h>
- #include <asm/mmu_context.h>
--#include <asm/uaccess.h>
- #include <asm/ptrace.h>
- #include <asm/highmem.h> /* For VMALLOC_END */
- #include <linux/kdebug.h>
-@@ -94,7 +94,7 @@
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto bad_area_nosemaphore;
-
- if (user_mode(regs))
-diff -Nur linux-4.1.13.orig/arch/mips/mm/highmem.c linux-4.1.13/arch/mips/mm/highmem.c
---- linux-4.1.13.orig/arch/mips/mm/highmem.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/mips/mm/highmem.c 2015-11-29 09:23:09.513620562 +0100
-@@ -47,7 +47,7 @@
- unsigned long vaddr;
- int idx, type;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -72,6 +72,7 @@
-
- if (vaddr < FIXADDR_START) { // FIXME
- pagefault_enable();
-+ preempt_enable();
- return;
- }
-
-@@ -92,6 +93,7 @@
- #endif
- kmap_atomic_idx_pop();
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
-@@ -104,6 +106,7 @@
- unsigned long vaddr;
- int idx, type;
-
-+ preempt_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
-diff -Nur linux-4.1.13.orig/arch/mips/mm/init.c linux-4.1.13/arch/mips/mm/init.c
---- linux-4.1.13.orig/arch/mips/mm/init.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/mips/mm/init.c 2015-11-29 09:23:09.513620562 +0100
-@@ -90,6 +90,7 @@
-
- BUG_ON(Page_dcache_dirty(page));
-
-+ preempt_disable();
- pagefault_disable();
- idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
- idx += in_interrupt() ? FIX_N_COLOURS : 0;
-@@ -152,6 +153,7 @@
- write_c0_entryhi(old_ctx);
- local_irq_restore(flags);
- pagefault_enable();
-+ preempt_enable();
- }
-
- void copy_user_highpage(struct page *to, struct page *from,
-diff -Nur linux-4.1.13.orig/arch/mn10300/include/asm/highmem.h linux-4.1.13/arch/mn10300/include/asm/highmem.h
---- linux-4.1.13.orig/arch/mn10300/include/asm/highmem.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/mn10300/include/asm/highmem.h 2015-11-29 09:23:09.513620562 +0100
-@@ -75,6 +75,7 @@
- unsigned long vaddr;
- int idx, type;
-
-+ preempt_disable();
- pagefault_disable();
- if (page < highmem_start_page)
- return page_address(page);
-@@ -98,6 +99,7 @@
-
- if (vaddr < FIXADDR_START) { /* FIXME */
- pagefault_enable();
-+ preempt_enable();
- return;
- }
-
-@@ -122,6 +124,7 @@
-
- kmap_atomic_idx_pop();
- pagefault_enable();
-+ preempt_enable();
- }
- #endif /* __KERNEL__ */
-
-diff -Nur linux-4.1.13.orig/arch/mn10300/mm/fault.c linux-4.1.13/arch/mn10300/mm/fault.c
---- linux-4.1.13.orig/arch/mn10300/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/mn10300/mm/fault.c 2015-11-29 09:23:09.513620562 +0100
-@@ -23,8 +23,8 @@
- #include <linux/interrupt.h>
- #include <linux/init.h>
- #include <linux/vt_kern.h> /* For unblank_screen() */
-+#include <linux/uaccess.h>
-
--#include <asm/uaccess.h>
- #include <asm/pgalloc.h>
- #include <asm/hardirq.h>
- #include <asm/cpu-regs.h>
-@@ -168,7 +168,7 @@
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
-diff -Nur linux-4.1.13.orig/arch/nios2/mm/fault.c linux-4.1.13/arch/nios2/mm/fault.c
---- linux-4.1.13.orig/arch/nios2/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/nios2/mm/fault.c 2015-11-29 09:23:09.513620562 +0100
-@@ -77,7 +77,7 @@
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto bad_area_nosemaphore;
-
- if (user_mode(regs))
-diff -Nur linux-4.1.13.orig/arch/parisc/include/asm/cacheflush.h linux-4.1.13/arch/parisc/include/asm/cacheflush.h
---- linux-4.1.13.orig/arch/parisc/include/asm/cacheflush.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/parisc/include/asm/cacheflush.h 2015-11-29 09:23:09.513620562 +0100
-@@ -142,6 +142,7 @@
-
- static inline void *kmap_atomic(struct page *page)
- {
-+ preempt_disable();
- pagefault_disable();
- return page_address(page);
- }
-@@ -150,6 +151,7 @@
- {
- flush_kernel_dcache_page_addr(addr);
- pagefault_enable();
-+ preempt_enable();
- }
-
- #define kmap_atomic_prot(page, prot) kmap_atomic(page)
-diff -Nur linux-4.1.13.orig/arch/parisc/kernel/traps.c linux-4.1.13/arch/parisc/kernel/traps.c
---- linux-4.1.13.orig/arch/parisc/kernel/traps.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/parisc/kernel/traps.c 2015-11-29 09:23:09.513620562 +0100
-@@ -26,9 +26,9 @@
- #include <linux/console.h>
- #include <linux/bug.h>
- #include <linux/ratelimit.h>
-+#include <linux/uaccess.h>
-
- #include <asm/assembly.h>
--#include <asm/uaccess.h>
- #include <asm/io.h>
- #include <asm/irq.h>
- #include <asm/traps.h>
-@@ -796,7 +796,7 @@
- * unless pagefault_disable() was called before.
- */
-
-- if (fault_space == 0 && !in_atomic())
-+ if (fault_space == 0 && !faulthandler_disabled())
- {
- pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
- parisc_terminate("Kernel Fault", regs, code, fault_address);
-diff -Nur linux-4.1.13.orig/arch/parisc/mm/fault.c linux-4.1.13/arch/parisc/mm/fault.c
---- linux-4.1.13.orig/arch/parisc/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/parisc/mm/fault.c 2015-11-29 09:23:09.513620562 +0100
-@@ -15,8 +15,8 @@
- #include <linux/sched.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
-+#include <linux/uaccess.h>
-
--#include <asm/uaccess.h>
- #include <asm/traps.h>
-
- /* Various important other fields */
-@@ -207,7 +207,7 @@
- int fault;
- unsigned int flags;
-
-- if (in_atomic())
-+ if (pagefault_disabled())
- goto no_context;
-
- tsk = current;
-diff -Nur linux-4.1.13.orig/arch/powerpc/include/asm/kvm_host.h linux-4.1.13/arch/powerpc/include/asm/kvm_host.h
---- linux-4.1.13.orig/arch/powerpc/include/asm/kvm_host.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/include/asm/kvm_host.h 2015-11-29 09:23:09.517620297 +0100
-@@ -280,7 +280,7 @@
- u8 in_guest;
- struct list_head runnable_threads;
- spinlock_t lock;
-- wait_queue_head_t wq;
-+ struct swait_head wq;
- spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
- u64 stolen_tb;
- u64 preempt_tb;
-@@ -613,7 +613,7 @@
- u8 prodded;
- u32 last_inst;
-
-- wait_queue_head_t *wqp;
-+ struct swait_head *wqp;
- struct kvmppc_vcore *vcore;
- int ret;
- int trap;
-diff -Nur linux-4.1.13.orig/arch/powerpc/include/asm/thread_info.h linux-4.1.13/arch/powerpc/include/asm/thread_info.h
---- linux-4.1.13.orig/arch/powerpc/include/asm/thread_info.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/include/asm/thread_info.h 2015-11-29 09:23:09.517620297 +0100
-@@ -42,6 +42,8 @@
- int cpu; /* cpu we're on */
- int preempt_count; /* 0 => preemptable,
- <0 => BUG */
-+ int preempt_lazy_count; /* 0 => preemptable,
-+ <0 => BUG */
- unsigned long local_flags; /* private flags for thread */
-
- /* low level flags - has atomic operations done on it */
-@@ -82,8 +84,7 @@
- #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
- #define TIF_SIGPENDING 1 /* signal pending */
- #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
--#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
-- TIF_NEED_RESCHED */
-+#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */
- #define TIF_32BIT 4 /* 32 bit binary */
- #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
- #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
-@@ -101,6 +102,8 @@
- #if defined(CONFIG_PPC64)
- #define TIF_ELF2ABI 18 /* function descriptors must die! */
- #endif
-+#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling
-+ TIF_NEED_RESCHED */
-
- /* as above, but as bit values */
- #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-@@ -119,14 +122,16 @@
- #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
- #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
- #define _TIF_NOHZ (1<<TIF_NOHZ)
-+#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
- #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
- _TIF_NOHZ)
-
- #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-- _TIF_RESTORE_TM)
-+ _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY)
- #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
-+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
-
- /* Bits in local_flags */
- /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
-diff -Nur linux-4.1.13.orig/arch/powerpc/Kconfig linux-4.1.13/arch/powerpc/Kconfig
---- linux-4.1.13.orig/arch/powerpc/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/Kconfig 2015-11-29 09:23:09.513620562 +0100
-@@ -60,10 +60,11 @@
-
- config RWSEM_GENERIC_SPINLOCK
- bool
-+ default y if PREEMPT_RT_FULL
-
- config RWSEM_XCHGADD_ALGORITHM
- bool
-- default y
-+ default y if !PREEMPT_RT_FULL
-
- config GENERIC_LOCKBREAK
- bool
-@@ -138,6 +139,7 @@
- select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select GENERIC_STRNCPY_FROM_USER
- select GENERIC_STRNLEN_USER
-+ select HAVE_PREEMPT_LAZY
- select HAVE_MOD_ARCH_SPECIFIC
- select MODULES_USE_ELF_RELA
- select CLONE_BACKWARDS
-@@ -312,7 +314,7 @@
-
- config HIGHMEM
- bool "High memory support"
-- depends on PPC32
-+ depends on PPC32 && !PREEMPT_RT_FULL
-
- source kernel/Kconfig.hz
- source kernel/Kconfig.preempt
-diff -Nur linux-4.1.13.orig/arch/powerpc/kernel/asm-offsets.c linux-4.1.13/arch/powerpc/kernel/asm-offsets.c
---- linux-4.1.13.orig/arch/powerpc/kernel/asm-offsets.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/kernel/asm-offsets.c 2015-11-29 09:23:09.517620297 +0100
-@@ -160,6 +160,7 @@
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
-+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-
-diff -Nur linux-4.1.13.orig/arch/powerpc/kernel/entry_32.S linux-4.1.13/arch/powerpc/kernel/entry_32.S
---- linux-4.1.13.orig/arch/powerpc/kernel/entry_32.S 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/kernel/entry_32.S 2015-11-29 09:23:09.517620297 +0100
-@@ -813,7 +813,14 @@
- cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
- bne restore
- andi. r8,r8,_TIF_NEED_RESCHED
-+ bne+ 1f
-+ lwz r0,TI_PREEMPT_LAZY(r9)
-+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
-+ bne restore
-+ lwz r0,TI_FLAGS(r9)
-+ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
- beq+ restore
-+1:
- lwz r3,_MSR(r1)
- andi. r0,r3,MSR_EE /* interrupts off? */
- beq restore /* don't schedule if so */
-@@ -824,11 +831,11 @@
- */
- bl trace_hardirqs_off
- #endif
--1: bl preempt_schedule_irq
-+2: bl preempt_schedule_irq
- CURRENT_THREAD_INFO(r9, r1)
- lwz r3,TI_FLAGS(r9)
-- andi. r0,r3,_TIF_NEED_RESCHED
-- bne- 1b
-+ andi. r0,r3,_TIF_NEED_RESCHED_MASK
-+ bne- 2b
- #ifdef CONFIG_TRACE_IRQFLAGS
- /* And now, to properly rebalance the above, we tell lockdep they
- * are being turned back on, which will happen when we return
-@@ -1149,7 +1156,7 @@
- #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
-
- do_work: /* r10 contains MSR_KERNEL here */
-- andi. r0,r9,_TIF_NEED_RESCHED
-+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
- beq do_user_signal
-
- do_resched: /* r10 contains MSR_KERNEL here */
-@@ -1170,7 +1177,7 @@
- MTMSRD(r10) /* disable interrupts */
- CURRENT_THREAD_INFO(r9, r1)
- lwz r9,TI_FLAGS(r9)
-- andi. r0,r9,_TIF_NEED_RESCHED
-+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
- bne- do_resched
- andi. r0,r9,_TIF_USER_WORK_MASK
- beq restore_user
-diff -Nur linux-4.1.13.orig/arch/powerpc/kernel/entry_64.S linux-4.1.13/arch/powerpc/kernel/entry_64.S
---- linux-4.1.13.orig/arch/powerpc/kernel/entry_64.S 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/kernel/entry_64.S 2015-11-29 09:23:09.517620297 +0100
-@@ -636,7 +636,7 @@
- #else
- beq restore
- #endif
--1: andi. r0,r4,_TIF_NEED_RESCHED
-+1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
- beq 2f
- bl restore_interrupts
- SCHEDULE_USER
-@@ -698,10 +698,18 @@
-
- #ifdef CONFIG_PREEMPT
- /* Check if we need to preempt */
-+ lwz r8,TI_PREEMPT(r9)
-+ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
-+ bne restore
- andi. r0,r4,_TIF_NEED_RESCHED
-+ bne+ check_count
-+
-+ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
- beq+ restore
-+ lwz r8,TI_PREEMPT_LAZY(r9)
-+
- /* Check that preempt_count() == 0 and interrupts are enabled */
-- lwz r8,TI_PREEMPT(r9)
-+check_count:
- cmpwi cr1,r8,0
- ld r0,SOFTE(r1)
- cmpdi r0,0
-@@ -718,7 +726,7 @@
- /* Re-test flags and eventually loop */
- CURRENT_THREAD_INFO(r9, r1)
- ld r4,TI_FLAGS(r9)
-- andi. r0,r4,_TIF_NEED_RESCHED
-+ andi. r0,r4,_TIF_NEED_RESCHED_MASK
- bne 1b
-
- /*
-diff -Nur linux-4.1.13.orig/arch/powerpc/kernel/irq.c linux-4.1.13/arch/powerpc/kernel/irq.c
---- linux-4.1.13.orig/arch/powerpc/kernel/irq.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/kernel/irq.c 2015-11-29 09:23:09.517620297 +0100
-@@ -614,6 +614,7 @@
- }
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
- struct thread_info *curtp, *irqtp;
-@@ -631,6 +632,7 @@
- if (irqtp->flags)
- set_bits(irqtp->flags, &curtp->flags);
- }
-+#endif
-
- irq_hw_number_t virq_to_hw(unsigned int virq)
- {
-diff -Nur linux-4.1.13.orig/arch/powerpc/kernel/misc_32.S linux-4.1.13/arch/powerpc/kernel/misc_32.S
---- linux-4.1.13.orig/arch/powerpc/kernel/misc_32.S 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/kernel/misc_32.S 2015-11-29 09:23:09.517620297 +0100
-@@ -40,6 +40,7 @@
- * We store the saved ksp_limit in the unused part
- * of the STACK_FRAME_OVERHEAD
- */
-+#ifndef CONFIG_PREEMPT_RT_FULL
- _GLOBAL(call_do_softirq)
- mflr r0
- stw r0,4(r1)
-@@ -56,6 +57,7 @@
- stw r10,THREAD+KSP_LIMIT(r2)
- mtlr r0
- blr
-+#endif
-
- /*
- * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
-diff -Nur linux-4.1.13.orig/arch/powerpc/kernel/misc_64.S linux-4.1.13/arch/powerpc/kernel/misc_64.S
---- linux-4.1.13.orig/arch/powerpc/kernel/misc_64.S 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/kernel/misc_64.S 2015-11-29 09:23:09.517620297 +0100
-@@ -29,6 +29,7 @@
-
- .text
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- _GLOBAL(call_do_softirq)
- mflr r0
- std r0,16(r1)
-@@ -39,6 +40,7 @@
- ld r0,16(r1)
- mtlr r0
- blr
-+#endif
-
- _GLOBAL(call_do_irq)
- mflr r0
-diff -Nur linux-4.1.13.orig/arch/powerpc/kvm/book3s_hv.c linux-4.1.13/arch/powerpc/kvm/book3s_hv.c
---- linux-4.1.13.orig/arch/powerpc/kvm/book3s_hv.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/kvm/book3s_hv.c 2015-11-29 09:23:09.517620297 +0100
-@@ -115,11 +115,11 @@
- static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
- {
- int cpu = vcpu->cpu;
-- wait_queue_head_t *wqp;
-+ struct swait_head *wqp;
-
- wqp = kvm_arch_vcpu_wq(vcpu);
-- if (waitqueue_active(wqp)) {
-- wake_up_interruptible(wqp);
-+ if (swaitqueue_active(wqp)) {
-+ swait_wake_interruptible(wqp);
- ++vcpu->stat.halt_wakeup;
- }
-
-@@ -686,8 +686,8 @@
- tvcpu->arch.prodded = 1;
- smp_mb();
- if (vcpu->arch.ceded) {
-- if (waitqueue_active(&vcpu->wq)) {
-- wake_up_interruptible(&vcpu->wq);
-+ if (swaitqueue_active(&vcpu->wq)) {
-+ swait_wake_interruptible(&vcpu->wq);
- vcpu->stat.halt_wakeup++;
- }
- }
-@@ -1426,7 +1426,7 @@
- INIT_LIST_HEAD(&vcore->runnable_threads);
- spin_lock_init(&vcore->lock);
- spin_lock_init(&vcore->stoltb_lock);
-- init_waitqueue_head(&vcore->wq);
-+ init_swait_head(&vcore->wq);
- vcore->preempt_tb = TB_NIL;
- vcore->lpcr = kvm->arch.lpcr;
- vcore->first_vcpuid = core * threads_per_subcore;
-@@ -2073,10 +2073,9 @@
- {
- struct kvm_vcpu *vcpu;
- int do_sleep = 1;
-+ DEFINE_SWAITER(wait);
-
-- DEFINE_WAIT(wait);
--
-- prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
-+ swait_prepare(&vc->wq, &wait, TASK_INTERRUPTIBLE);
-
- /*
- * Check one last time for pending exceptions and ceded state after
-@@ -2090,7 +2089,7 @@
- }
-
- if (!do_sleep) {
-- finish_wait(&vc->wq, &wait);
-+ swait_finish(&vc->wq, &wait);
- return;
- }
-
-@@ -2098,7 +2097,7 @@
- trace_kvmppc_vcore_blocked(vc, 0);
- spin_unlock(&vc->lock);
- schedule();
-- finish_wait(&vc->wq, &wait);
-+ swait_finish(&vc->wq, &wait);
- spin_lock(&vc->lock);
- vc->vcore_state = VCORE_INACTIVE;
- trace_kvmppc_vcore_blocked(vc, 1);
-@@ -2142,7 +2141,7 @@
- kvmppc_start_thread(vcpu);
- trace_kvm_guest_enter(vcpu);
- } else if (vc->vcore_state == VCORE_SLEEPING) {
-- wake_up(&vc->wq);
-+ swait_wake(&vc->wq);
- }
-
- }
-diff -Nur linux-4.1.13.orig/arch/powerpc/kvm/Kconfig linux-4.1.13/arch/powerpc/kvm/Kconfig
---- linux-4.1.13.orig/arch/powerpc/kvm/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/kvm/Kconfig 2015-11-29 09:23:09.517620297 +0100
-@@ -172,6 +172,7 @@
- config KVM_MPIC
- bool "KVM in-kernel MPIC emulation"
- depends on KVM && E500
-+ depends on !PREEMPT_RT_FULL
- select HAVE_KVM_IRQCHIP
- select HAVE_KVM_IRQFD
- select HAVE_KVM_IRQ_ROUTING
-diff -Nur linux-4.1.13.orig/arch/powerpc/mm/fault.c linux-4.1.13/arch/powerpc/mm/fault.c
---- linux-4.1.13.orig/arch/powerpc/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/mm/fault.c 2015-11-29 09:23:09.517620297 +0100
-@@ -33,13 +33,13 @@
- #include <linux/ratelimit.h>
- #include <linux/context_tracking.h>
- #include <linux/hugetlb.h>
-+#include <linux/uaccess.h>
-
- #include <asm/firmware.h>
- #include <asm/page.h>
- #include <asm/pgtable.h>
- #include <asm/mmu.h>
- #include <asm/mmu_context.h>
--#include <asm/uaccess.h>
- #include <asm/tlbflush.h>
- #include <asm/siginfo.h>
- #include <asm/debug.h>
-@@ -272,15 +272,16 @@
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
-
-- if (in_atomic() || mm == NULL) {
-+ if (faulthandler_disabled() || mm == NULL) {
- if (!user_mode(regs)) {
- rc = SIGSEGV;
- goto bail;
- }
-- /* in_atomic() in user mode is really bad,
-+ /* faulthandler_disabled() in user mode is really bad,
- as is current->mm == NULL. */
- printk(KERN_EMERG "Page fault in user mode with "
-- "in_atomic() = %d mm = %p\n", in_atomic(), mm);
-+ "faulthandler_disabled() = %d mm = %p\n",
-+ faulthandler_disabled(), mm);
- printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
- regs->nip, regs->msr);
- die("Weird page fault", regs, SIGSEGV);
-diff -Nur linux-4.1.13.orig/arch/powerpc/mm/highmem.c linux-4.1.13/arch/powerpc/mm/highmem.c
---- linux-4.1.13.orig/arch/powerpc/mm/highmem.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/mm/highmem.c 2015-11-29 09:23:09.517620297 +0100
-@@ -34,7 +34,7 @@
- unsigned long vaddr;
- int idx, type;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -59,6 +59,7 @@
-
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
-+ preempt_enable();
- return;
- }
-
-@@ -82,5 +83,6 @@
-
- kmap_atomic_idx_pop();
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.13.orig/arch/powerpc/platforms/ps3/device-init.c linux-4.1.13/arch/powerpc/platforms/ps3/device-init.c
---- linux-4.1.13.orig/arch/powerpc/platforms/ps3/device-init.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/powerpc/platforms/ps3/device-init.c 2015-11-29 09:23:09.521620031 +0100
-@@ -752,7 +752,7 @@
- }
- pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
-
-- res = wait_event_interruptible(dev->done.wait,
-+ res = swait_event_interruptible(dev->done.wait,
- dev->done.done || kthread_should_stop());
- if (kthread_should_stop())
- res = -EINTR;
-diff -Nur linux-4.1.13.orig/arch/s390/include/asm/kvm_host.h linux-4.1.13/arch/s390/include/asm/kvm_host.h
---- linux-4.1.13.orig/arch/s390/include/asm/kvm_host.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/s390/include/asm/kvm_host.h 2015-11-29 09:23:09.521620031 +0100
-@@ -419,7 +419,7 @@
- struct kvm_s390_local_interrupt {
- spinlock_t lock;
- struct kvm_s390_float_interrupt *float_int;
-- wait_queue_head_t *wq;
-+ struct swait_head *wq;
- atomic_t *cpuflags;
- DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
- struct kvm_s390_irq_payload irq;
-diff -Nur linux-4.1.13.orig/arch/s390/include/asm/uaccess.h linux-4.1.13/arch/s390/include/asm/uaccess.h
---- linux-4.1.13.orig/arch/s390/include/asm/uaccess.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/s390/include/asm/uaccess.h 2015-11-29 09:23:09.521620031 +0100
-@@ -98,7 +98,8 @@
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -118,7 +119,8 @@
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -264,7 +266,8 @@
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space.
- *
-@@ -290,7 +293,8 @@
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space.
- *
-@@ -348,7 +352,8 @@
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
-diff -Nur linux-4.1.13.orig/arch/s390/kvm/interrupt.c linux-4.1.13/arch/s390/kvm/interrupt.c
---- linux-4.1.13.orig/arch/s390/kvm/interrupt.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/s390/kvm/interrupt.c 2015-11-29 09:23:09.521620031 +0100
-@@ -875,13 +875,13 @@
-
- void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
- {
-- if (waitqueue_active(&vcpu->wq)) {
-+ if (swaitqueue_active(&vcpu->wq)) {
- /*
- * The vcpu gave up the cpu voluntarily, mark it as a good
- * yield-candidate.
- */
- vcpu->preempted = true;
-- wake_up_interruptible(&vcpu->wq);
-+ swait_wake_interruptible(&vcpu->wq);
- vcpu->stat.halt_wakeup++;
- }
- }
-@@ -987,7 +987,7 @@
- spin_lock(&li->lock);
- irq.u.pgm.code = code;
- __inject_prog(vcpu, &irq);
-- BUG_ON(waitqueue_active(li->wq));
-+ BUG_ON(swaitqueue_active(li->wq));
- spin_unlock(&li->lock);
- return 0;
- }
-@@ -1006,7 +1006,7 @@
- spin_lock(&li->lock);
- irq.u.pgm = *pgm_info;
- rc = __inject_prog(vcpu, &irq);
-- BUG_ON(waitqueue_active(li->wq));
-+ BUG_ON(swaitqueue_active(li->wq));
- spin_unlock(&li->lock);
- return rc;
- }
-diff -Nur linux-4.1.13.orig/arch/s390/mm/fault.c linux-4.1.13/arch/s390/mm/fault.c
---- linux-4.1.13.orig/arch/s390/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/s390/mm/fault.c 2015-11-29 09:23:09.521620031 +0100
-@@ -399,7 +399,7 @@
- * user context.
- */
- fault = VM_FAULT_BADCONTEXT;
-- if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
-+ if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
- goto out;
-
- address = trans_exc_code & __FAIL_ADDR_MASK;
-diff -Nur linux-4.1.13.orig/arch/score/include/asm/uaccess.h linux-4.1.13/arch/score/include/asm/uaccess.h
---- linux-4.1.13.orig/arch/score/include/asm/uaccess.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/score/include/asm/uaccess.h 2015-11-29 09:23:09.521620031 +0100
-@@ -36,7 +36,8 @@
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
-@@ -61,7 +62,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -79,7 +81,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -98,7 +101,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -119,7 +123,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-diff -Nur linux-4.1.13.orig/arch/score/mm/fault.c linux-4.1.13/arch/score/mm/fault.c
---- linux-4.1.13.orig/arch/score/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/score/mm/fault.c 2015-11-29 09:23:09.521620031 +0100
-@@ -34,6 +34,7 @@
- #include <linux/string.h>
- #include <linux/types.h>
- #include <linux/ptrace.h>
-+#include <linux/uaccess.h>
-
- /*
- * This routine handles page faults. It determines the address,
-@@ -73,7 +74,7 @@
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (pagefault_disabled() || !mm)
- goto bad_area_nosemaphore;
-
- if (user_mode(regs))
-diff -Nur linux-4.1.13.orig/arch/sh/kernel/irq.c linux-4.1.13/arch/sh/kernel/irq.c
---- linux-4.1.13.orig/arch/sh/kernel/irq.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/sh/kernel/irq.c 2015-11-29 09:23:09.521620031 +0100
-@@ -147,6 +147,7 @@
- hardirq_ctx[cpu] = NULL;
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
- struct thread_info *curctx;
-@@ -174,6 +175,7 @@
- "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
- );
- }
-+#endif
- #else
- static inline void handle_one_irq(unsigned int irq)
- {
-diff -Nur linux-4.1.13.orig/arch/sh/mm/fault.c linux-4.1.13/arch/sh/mm/fault.c
---- linux-4.1.13.orig/arch/sh/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/sh/mm/fault.c 2015-11-29 09:23:09.521620031 +0100
-@@ -17,6 +17,7 @@
- #include <linux/kprobes.h>
- #include <linux/perf_event.h>
- #include <linux/kdebug.h>
-+#include <linux/uaccess.h>
- #include <asm/io_trapped.h>
- #include <asm/mmu_context.h>
- #include <asm/tlbflush.h>
-@@ -438,9 +439,9 @@
-
- /*
- * If we're in an interrupt, have no user context or are running
-- * in an atomic region then we must not take the fault:
-+ * with pagefaults disabled then we must not take the fault:
- */
-- if (unlikely(in_atomic() || !mm)) {
-+ if (unlikely(faulthandler_disabled() || !mm)) {
- bad_area_nosemaphore(regs, error_code, address);
- return;
- }
-diff -Nur linux-4.1.13.orig/arch/sparc/Kconfig linux-4.1.13/arch/sparc/Kconfig
---- linux-4.1.13.orig/arch/sparc/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/sparc/Kconfig 2015-11-29 09:23:09.521620031 +0100
-@@ -189,12 +189,10 @@
- source kernel/Kconfig.hz
-
- config RWSEM_GENERIC_SPINLOCK
-- bool
-- default y if SPARC32
-+ def_bool PREEMPT_RT_FULL
-
- config RWSEM_XCHGADD_ALGORITHM
-- bool
-- default y if SPARC64
-+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
-
- config GENERIC_HWEIGHT
- bool
-diff -Nur linux-4.1.13.orig/arch/sparc/kernel/irq_64.c linux-4.1.13/arch/sparc/kernel/irq_64.c
---- linux-4.1.13.orig/arch/sparc/kernel/irq_64.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/sparc/kernel/irq_64.c 2015-11-29 09:23:09.521620031 +0100
-@@ -849,6 +849,7 @@
- set_irq_regs(old_regs);
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
- void *orig_sp, *sp = softirq_stack[smp_processor_id()];
-@@ -863,6 +864,7 @@
- __asm__ __volatile__("mov %0, %%sp"
- : : "r" (orig_sp));
- }
-+#endif
-
- #ifdef CONFIG_HOTPLUG_CPU
- void fixup_irqs(void)
-diff -Nur linux-4.1.13.orig/arch/sparc/mm/fault_32.c linux-4.1.13/arch/sparc/mm/fault_32.c
---- linux-4.1.13.orig/arch/sparc/mm/fault_32.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/sparc/mm/fault_32.c 2015-11-29 09:23:09.521620031 +0100
-@@ -21,6 +21,7 @@
- #include <linux/perf_event.h>
- #include <linux/interrupt.h>
- #include <linux/kdebug.h>
-+#include <linux/uaccess.h>
-
- #include <asm/page.h>
- #include <asm/pgtable.h>
-@@ -29,7 +30,6 @@
- #include <asm/setup.h>
- #include <asm/smp.h>
- #include <asm/traps.h>
--#include <asm/uaccess.h>
-
- #include "mm_32.h"
-
-@@ -196,7 +196,7 @@
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (pagefault_disabled() || !mm)
- goto no_context;
-
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-diff -Nur linux-4.1.13.orig/arch/sparc/mm/fault_64.c linux-4.1.13/arch/sparc/mm/fault_64.c
---- linux-4.1.13.orig/arch/sparc/mm/fault_64.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/sparc/mm/fault_64.c 2015-11-29 09:23:09.521620031 +0100
-@@ -22,12 +22,12 @@
- #include <linux/kdebug.h>
- #include <linux/percpu.h>
- #include <linux/context_tracking.h>
-+#include <linux/uaccess.h>
-
- #include <asm/page.h>
- #include <asm/pgtable.h>
- #include <asm/openprom.h>
- #include <asm/oplib.h>
--#include <asm/uaccess.h>
- #include <asm/asi.h>
- #include <asm/lsu.h>
- #include <asm/sections.h>
-@@ -330,7 +330,7 @@
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto intr_or_no_mm;
-
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-diff -Nur linux-4.1.13.orig/arch/sparc/mm/highmem.c linux-4.1.13/arch/sparc/mm/highmem.c
---- linux-4.1.13.orig/arch/sparc/mm/highmem.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/sparc/mm/highmem.c 2015-11-29 09:23:09.521620031 +0100
-@@ -53,7 +53,7 @@
- unsigned long vaddr;
- long idx, type;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -91,6 +91,7 @@
-
- if (vaddr < FIXADDR_START) { // FIXME
- pagefault_enable();
-+ preempt_enable();
- return;
- }
-
-@@ -126,5 +127,6 @@
-
- kmap_atomic_idx_pop();
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.13.orig/arch/sparc/mm/init_64.c linux-4.1.13/arch/sparc/mm/init_64.c
---- linux-4.1.13.orig/arch/sparc/mm/init_64.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/sparc/mm/init_64.c 2015-11-29 09:23:09.521620031 +0100
-@@ -2738,7 +2738,7 @@
- struct mm_struct *mm = current->mm;
- struct tsb_config *tp;
-
-- if (in_atomic() || !mm) {
-+ if (faulthandler_disabled() || !mm) {
- const struct exception_table_entry *entry;
-
- entry = search_exception_tables(regs->tpc);
-diff -Nur linux-4.1.13.orig/arch/tile/include/asm/uaccess.h linux-4.1.13/arch/tile/include/asm/uaccess.h
---- linux-4.1.13.orig/arch/tile/include/asm/uaccess.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/tile/include/asm/uaccess.h 2015-11-29 09:23:09.521620031 +0100
-@@ -78,7 +78,8 @@
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
-@@ -192,7 +193,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -274,7 +276,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -330,7 +333,8 @@
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -366,7 +370,8 @@
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -437,7 +442,8 @@
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to user space. Caller must check
- * the specified blocks with access_ok() before calling this function.
-diff -Nur linux-4.1.13.orig/arch/tile/mm/fault.c linux-4.1.13/arch/tile/mm/fault.c
---- linux-4.1.13.orig/arch/tile/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/tile/mm/fault.c 2015-11-29 09:23:09.521620031 +0100
-@@ -354,9 +354,9 @@
-
- /*
- * If we're in an interrupt, have no user context or are running in an
-- * atomic region then we must not take the fault.
-+ * region with pagefaults disabled then we must not take the fault.
- */
-- if (in_atomic() || !mm) {
-+ if (pagefault_disabled() || !mm) {
- vma = NULL; /* happy compiler */
- goto bad_area_nosemaphore;
- }
-diff -Nur linux-4.1.13.orig/arch/tile/mm/highmem.c linux-4.1.13/arch/tile/mm/highmem.c
---- linux-4.1.13.orig/arch/tile/mm/highmem.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/tile/mm/highmem.c 2015-11-29 09:23:09.521620031 +0100
-@@ -201,7 +201,7 @@
- int idx, type;
- pte_t *pte;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
-
- /* Avoid icache flushes by disallowing atomic executable mappings. */
-@@ -259,6 +259,7 @@
- }
-
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
-diff -Nur linux-4.1.13.orig/arch/um/kernel/trap.c linux-4.1.13/arch/um/kernel/trap.c
---- linux-4.1.13.orig/arch/um/kernel/trap.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/um/kernel/trap.c 2015-11-29 09:23:09.521620031 +0100
-@@ -35,10 +35,10 @@
- *code_out = SEGV_MAPERR;
-
- /*
-- * If the fault was during atomic operation, don't take the fault, just
-+ * If the fault was with pagefaults disabled, don't take the fault, just
- * fail.
- */
-- if (in_atomic())
-+ if (faulthandler_disabled())
- goto out_nosemaphore;
-
- if (is_user)
-diff -Nur linux-4.1.13.orig/arch/unicore32/mm/fault.c linux-4.1.13/arch/unicore32/mm/fault.c
---- linux-4.1.13.orig/arch/unicore32/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/unicore32/mm/fault.c 2015-11-29 09:23:09.525619763 +0100
-@@ -218,7 +218,7 @@
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
-diff -Nur linux-4.1.13.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.1.13/arch/x86/crypto/aesni-intel_glue.c
---- linux-4.1.13.orig/arch/x86/crypto/aesni-intel_glue.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/crypto/aesni-intel_glue.c 2015-11-29 09:23:09.525619763 +0100
-@@ -382,14 +382,14 @@
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
-- kernel_fpu_begin();
- while ((nbytes = walk.nbytes)) {
-+ kernel_fpu_begin();
- aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-- nbytes & AES_BLOCK_MASK);
-+ nbytes & AES_BLOCK_MASK);
-+ kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-- kernel_fpu_end();
-
- return err;
- }
-@@ -406,14 +406,14 @@
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
-- kernel_fpu_begin();
- while ((nbytes = walk.nbytes)) {
-+ kernel_fpu_begin();
- aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK);
-+ kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-- kernel_fpu_end();
-
- return err;
- }
-@@ -430,14 +430,14 @@
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
-- kernel_fpu_begin();
- while ((nbytes = walk.nbytes)) {
-+ kernel_fpu_begin();
- aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK, walk.iv);
-+ kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-- kernel_fpu_end();
-
- return err;
- }
-@@ -454,14 +454,14 @@
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
-- kernel_fpu_begin();
- while ((nbytes = walk.nbytes)) {
-+ kernel_fpu_begin();
- aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK, walk.iv);
-+ kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-- kernel_fpu_end();
-
- return err;
- }
-@@ -513,18 +513,20 @@
- err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
-- kernel_fpu_begin();
- while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
-+ kernel_fpu_begin();
- aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK, walk.iv);
-+ kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
- if (walk.nbytes) {
-+ kernel_fpu_begin();
- ctr_crypt_final(ctx, &walk);
-+ kernel_fpu_end();
- err = blkcipher_walk_done(desc, &walk, 0);
- }
-- kernel_fpu_end();
-
- return err;
- }
-diff -Nur linux-4.1.13.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.1.13/arch/x86/crypto/cast5_avx_glue.c
---- linux-4.1.13.orig/arch/x86/crypto/cast5_avx_glue.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/crypto/cast5_avx_glue.c 2015-11-29 09:23:09.525619763 +0100
-@@ -60,7 +60,7 @@
- static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
- bool enc)
- {
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- const unsigned int bsize = CAST5_BLOCK_SIZE;
- unsigned int nbytes;
-@@ -76,7 +76,7 @@
- u8 *wsrc = walk->src.virt.addr;
- u8 *wdst = walk->dst.virt.addr;
-
-- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
-+ fpu_enabled = cast5_fpu_begin(false, nbytes);
-
- /* Process multi-block batch */
- if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
-@@ -104,10 +104,9 @@
- } while (nbytes >= bsize);
-
- done:
-+ cast5_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, walk, nbytes);
- }
--
-- cast5_fpu_end(fpu_enabled);
- return err;
- }
-
-@@ -228,7 +227,7 @@
- static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
- {
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
-@@ -237,12 +236,11 @@
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- while ((nbytes = walk.nbytes)) {
-- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
-+ fpu_enabled = cast5_fpu_begin(false, nbytes);
- nbytes = __cbc_decrypt(desc, &walk);
-+ cast5_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
--
-- cast5_fpu_end(fpu_enabled);
- return err;
- }
-
-@@ -312,7 +310,7 @@
- static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
- {
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
-@@ -321,13 +319,12 @@
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
-- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
-+ fpu_enabled = cast5_fpu_begin(false, nbytes);
- nbytes = __ctr_crypt(desc, &walk);
-+ cast5_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
-- cast5_fpu_end(fpu_enabled);
--
- if (walk.nbytes) {
- ctr_crypt_final(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
-diff -Nur linux-4.1.13.orig/arch/x86/crypto/glue_helper.c linux-4.1.13/arch/x86/crypto/glue_helper.c
---- linux-4.1.13.orig/arch/x86/crypto/glue_helper.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/crypto/glue_helper.c 2015-11-29 09:23:09.525619763 +0100
-@@ -39,7 +39,7 @@
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
- const unsigned int bsize = 128 / 8;
- unsigned int nbytes, i, func_bytes;
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- int err;
-
- err = blkcipher_walk_virt(desc, walk);
-@@ -49,7 +49,7 @@
- u8 *wdst = walk->dst.virt.addr;
-
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-- desc, fpu_enabled, nbytes);
-+ desc, false, nbytes);
-
- for (i = 0; i < gctx->num_funcs; i++) {
- func_bytes = bsize * gctx->funcs[i].num_blocks;
-@@ -71,10 +71,10 @@
- }
-
- done:
-+ glue_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, walk, nbytes);
- }
-
-- glue_fpu_end(fpu_enabled);
- return err;
- }
-
-@@ -194,7 +194,7 @@
- struct scatterlist *src, unsigned int nbytes)
- {
- const unsigned int bsize = 128 / 8;
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
-@@ -203,12 +203,12 @@
-
- while ((nbytes = walk.nbytes)) {
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-- desc, fpu_enabled, nbytes);
-+ desc, false, nbytes);
- nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
-+ glue_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
-- glue_fpu_end(fpu_enabled);
- return err;
- }
- EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
-@@ -277,7 +277,7 @@
- struct scatterlist *src, unsigned int nbytes)
- {
- const unsigned int bsize = 128 / 8;
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
-@@ -286,13 +286,12 @@
-
- while ((nbytes = walk.nbytes) >= bsize) {
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-- desc, fpu_enabled, nbytes);
-+ desc, false, nbytes);
- nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
-+ glue_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
-- glue_fpu_end(fpu_enabled);
--
- if (walk.nbytes) {
- glue_ctr_crypt_final_128bit(
- gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
-@@ -347,7 +346,7 @@
- void *tweak_ctx, void *crypt_ctx)
- {
- const unsigned int bsize = 128 / 8;
-- bool fpu_enabled = false;
-+ bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
-@@ -360,21 +359,21 @@
-
- /* set minimum length to bsize, for tweak_fn */
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-- desc, fpu_enabled,
-+ desc, false,
- nbytes < bsize ? bsize : nbytes);
--
- /* calculate first value of T */
- tweak_fn(tweak_ctx, walk.iv, walk.iv);
-+ glue_fpu_end(fpu_enabled);
-
- while (nbytes) {
-+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-+ desc, false, nbytes);
- nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
-
-+ glue_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- nbytes = walk.nbytes;
- }
--
-- glue_fpu_end(fpu_enabled);
--
- return err;
- }
- EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
-diff -Nur linux-4.1.13.orig/arch/x86/include/asm/preempt.h linux-4.1.13/arch/x86/include/asm/preempt.h
---- linux-4.1.13.orig/arch/x86/include/asm/preempt.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/include/asm/preempt.h 2015-11-29 09:23:09.525619763 +0100
-@@ -82,17 +82,33 @@
- * a decrement which hits zero means we have no preempt_count and should
- * reschedule.
- */
--static __always_inline bool __preempt_count_dec_and_test(void)
-+static __always_inline bool ____preempt_count_dec_and_test(void)
- {
- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
- }
-
-+static __always_inline bool __preempt_count_dec_and_test(void)
-+{
-+ if (____preempt_count_dec_and_test())
-+ return true;
-+#ifdef CONFIG_PREEMPT_LAZY
-+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+#else
-+ return false;
-+#endif
-+}
-+
- /*
- * Returns true when we need to resched and can (barring IRQ state).
- */
- static __always_inline bool should_resched(int preempt_offset)
- {
-+#ifdef CONFIG_PREEMPT_LAZY
-+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset ||
-+ test_thread_flag(TIF_NEED_RESCHED_LAZY));
-+#else
- return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
-+#endif
- }
-
- #ifdef CONFIG_PREEMPT
-diff -Nur linux-4.1.13.orig/arch/x86/include/asm/signal.h linux-4.1.13/arch/x86/include/asm/signal.h
---- linux-4.1.13.orig/arch/x86/include/asm/signal.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/include/asm/signal.h 2015-11-29 09:23:09.525619763 +0100
-@@ -23,6 +23,19 @@
- unsigned long sig[_NSIG_WORDS];
- } sigset_t;
-
-+/*
-+ * Because some traps use the IST stack, we must keep preemption
-+ * disabled while calling do_trap(), but do_trap() may call
-+ * force_sig_info() which will grab the signal spin_locks for the
-+ * task, which in PREEMPT_RT_FULL are mutexes. By defining
-+ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
-+ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
-+ * trap.
-+ */
-+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64)
-+#define ARCH_RT_DELAYS_SIGNAL_SEND
-+#endif
-+
- #ifndef CONFIG_COMPAT
- typedef sigset_t compat_sigset_t;
- #endif
-diff -Nur linux-4.1.13.orig/arch/x86/include/asm/stackprotector.h linux-4.1.13/arch/x86/include/asm/stackprotector.h
---- linux-4.1.13.orig/arch/x86/include/asm/stackprotector.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/include/asm/stackprotector.h 2015-11-29 09:23:09.525619763 +0100
-@@ -57,7 +57,7 @@
- */
- static __always_inline void boot_init_stack_canary(void)
- {
-- u64 canary;
-+ u64 uninitialized_var(canary);
- u64 tsc;
-
- #ifdef CONFIG_X86_64
-@@ -68,8 +68,16 @@
- * of randomness. The TSC only matters for very early init,
- * there it already has some randomness on most systems. Later
- * on during the bootup the random pool has true entropy too.
-+ *
-+ * For preempt-rt we need to weaken the randomness a bit, as
-+ * we can't call into the random generator from atomic context
-+ * due to locking constraints. We just leave canary
-+ * uninitialized and use the TSC based randomness on top of
-+ * it.
- */
-+#ifndef CONFIG_PREEMPT_RT_FULL
- get_random_bytes(&canary, sizeof(canary));
-+#endif
- tsc = __native_read_tsc();
- canary += tsc + (tsc << 32UL);
-
-diff -Nur linux-4.1.13.orig/arch/x86/include/asm/thread_info.h linux-4.1.13/arch/x86/include/asm/thread_info.h
---- linux-4.1.13.orig/arch/x86/include/asm/thread_info.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/include/asm/thread_info.h 2015-11-29 09:23:09.525619763 +0100
-@@ -55,6 +55,8 @@
- __u32 status; /* thread synchronous flags */
- __u32 cpu; /* current CPU */
- int saved_preempt_count;
-+ int preempt_lazy_count; /* 0 => lazy preemptable
-+ <0 => BUG */
- mm_segment_t addr_limit;
- void __user *sysenter_return;
- unsigned int sig_on_uaccess_error:1;
-@@ -95,6 +97,7 @@
- #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
- #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
- #define TIF_SECCOMP 8 /* secure computing */
-+#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
- #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
- #define TIF_UPROBE 12 /* breakpointed or singlestepping */
- #define TIF_NOTSC 16 /* TSC is not accessible in userland */
-@@ -119,6 +122,7 @@
- #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
- #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
- #define _TIF_SECCOMP (1 << TIF_SECCOMP)
-+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
- #define _TIF_UPROBE (1 << TIF_UPROBE)
- #define _TIF_NOTSC (1 << TIF_NOTSC)
-@@ -168,6 +172,8 @@
- #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
- #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
-
-+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
-+
- #define STACK_WARN (THREAD_SIZE/8)
-
- /*
-diff -Nur linux-4.1.13.orig/arch/x86/include/asm/uaccess_32.h linux-4.1.13/arch/x86/include/asm/uaccess_32.h
---- linux-4.1.13.orig/arch/x86/include/asm/uaccess_32.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/include/asm/uaccess_32.h 2015-11-29 09:23:09.525619763 +0100
-@@ -70,7 +70,8 @@
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -117,7 +118,8 @@
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
-diff -Nur linux-4.1.13.orig/arch/x86/include/asm/uaccess.h linux-4.1.13/arch/x86/include/asm/uaccess.h
---- linux-4.1.13.orig/arch/x86/include/asm/uaccess.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/include/asm/uaccess.h 2015-11-29 09:23:09.525619763 +0100
-@@ -74,7 +74,8 @@
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
-@@ -145,7 +146,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -240,7 +242,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -455,7 +458,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -479,7 +483,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-diff -Nur linux-4.1.13.orig/arch/x86/include/asm/uv/uv_bau.h linux-4.1.13/arch/x86/include/asm/uv/uv_bau.h
---- linux-4.1.13.orig/arch/x86/include/asm/uv/uv_bau.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/include/asm/uv/uv_bau.h 2015-11-29 09:23:09.525619763 +0100
-@@ -615,9 +615,9 @@
- cycles_t send_message;
- cycles_t period_end;
- cycles_t period_time;
-- spinlock_t uvhub_lock;
-- spinlock_t queue_lock;
-- spinlock_t disable_lock;
-+ raw_spinlock_t uvhub_lock;
-+ raw_spinlock_t queue_lock;
-+ raw_spinlock_t disable_lock;
- /* tunables */
- int max_concurr;
- int max_concurr_const;
-@@ -776,15 +776,15 @@
- * to be lowered below the current 'v'. atomic_add_unless can only stop
- * on equal.
- */
--static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
-+static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u)
- {
-- spin_lock(lock);
-+ raw_spin_lock(lock);
- if (atomic_read(v) >= u) {
-- spin_unlock(lock);
-+ raw_spin_unlock(lock);
- return 0;
- }
- atomic_inc(v);
-- spin_unlock(lock);
-+ raw_spin_unlock(lock);
- return 1;
- }
-
-diff -Nur linux-4.1.13.orig/arch/x86/include/asm/uv/uv_hub.h linux-4.1.13/arch/x86/include/asm/uv/uv_hub.h
---- linux-4.1.13.orig/arch/x86/include/asm/uv/uv_hub.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/include/asm/uv/uv_hub.h 2015-11-29 09:23:09.525619763 +0100
-@@ -492,7 +492,7 @@
- unsigned short nr_online_cpus;
- unsigned short pnode;
- short memory_nid;
-- spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */
-+ raw_spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */
- unsigned long nmi_count; /* obsolete, see uv_hub_nmi */
- };
- extern struct uv_blade_info *uv_blade_info;
-diff -Nur linux-4.1.13.orig/arch/x86/Kconfig linux-4.1.13/arch/x86/Kconfig
---- linux-4.1.13.orig/arch/x86/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/Kconfig 2015-11-29 09:23:09.525619763 +0100
-@@ -22,6 +22,7 @@
- ### Arch settings
- config X86
- def_bool y
-+ select HAVE_PREEMPT_LAZY
- select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
- select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
- select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
-@@ -203,8 +204,11 @@
- def_bool y
- depends on ISA_DMA_API
-
-+config RWSEM_GENERIC_SPINLOCK
-+ def_bool PREEMPT_RT_FULL
-+
- config RWSEM_XCHGADD_ALGORITHM
-- def_bool y
-+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
-
- config GENERIC_CALIBRATE_DELAY
- def_bool y
-@@ -838,7 +842,7 @@
- config MAXSMP
- bool "Enable Maximum number of SMP Processors and NUMA Nodes"
- depends on X86_64 && SMP && DEBUG_KERNEL
-- select CPUMASK_OFFSTACK
-+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
- ---help---
- Enable maximum number of CPUS and NUMA Nodes for this architecture.
- If unsure, say N.
-diff -Nur linux-4.1.13.orig/arch/x86/kernel/apic/io_apic.c linux-4.1.13/arch/x86/kernel/apic/io_apic.c
---- linux-4.1.13.orig/arch/x86/kernel/apic/io_apic.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kernel/apic/io_apic.c 2015-11-29 09:23:09.525619763 +0100
-@@ -1891,7 +1891,8 @@
- static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
- {
- /* If we are moving the irq we need to mask it */
-- if (unlikely(irqd_is_setaffinity_pending(data))) {
-+ if (unlikely(irqd_is_setaffinity_pending(data) &&
-+ !irqd_irq_inprogress(data))) {
- mask_ioapic(cfg);
- return true;
- }
-diff -Nur linux-4.1.13.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-4.1.13/arch/x86/kernel/apic/x2apic_uv_x.c
---- linux-4.1.13.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kernel/apic/x2apic_uv_x.c 2015-11-29 09:23:09.525619763 +0100
-@@ -949,7 +949,7 @@
- uv_blade_info[blade].pnode = pnode;
- uv_blade_info[blade].nr_possible_cpus = 0;
- uv_blade_info[blade].nr_online_cpus = 0;
-- spin_lock_init(&uv_blade_info[blade].nmi_lock);
-+ raw_spin_lock_init(&uv_blade_info[blade].nmi_lock);
- min_pnode = min(pnode, min_pnode);
- max_pnode = max(pnode, max_pnode);
- blade++;
-diff -Nur linux-4.1.13.orig/arch/x86/kernel/asm-offsets.c linux-4.1.13/arch/x86/kernel/asm-offsets.c
---- linux-4.1.13.orig/arch/x86/kernel/asm-offsets.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kernel/asm-offsets.c 2015-11-29 09:23:09.525619763 +0100
-@@ -32,6 +32,7 @@
- OFFSET(TI_flags, thread_info, flags);
- OFFSET(TI_status, thread_info, status);
- OFFSET(TI_addr_limit, thread_info, addr_limit);
-+ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
-
- BLANK();
- OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-@@ -71,4 +72,5 @@
-
- BLANK();
- DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
-+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
- }
-diff -Nur linux-4.1.13.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.13/arch/x86/kernel/cpu/mcheck/mce.c
---- linux-4.1.13.orig/arch/x86/kernel/cpu/mcheck/mce.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kernel/cpu/mcheck/mce.c 2015-11-29 09:23:09.529619496 +0100
-@@ -41,6 +41,8 @@
- #include <linux/debugfs.h>
- #include <linux/irq_work.h>
- #include <linux/export.h>
-+#include <linux/jiffies.h>
-+#include <linux/work-simple.h>
-
- #include <asm/processor.h>
- #include <asm/traps.h>
-@@ -1267,7 +1269,7 @@
- static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
-
- static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
--static DEFINE_PER_CPU(struct timer_list, mce_timer);
-+static DEFINE_PER_CPU(struct hrtimer, mce_timer);
-
- static unsigned long mce_adjust_timer_default(unsigned long interval)
- {
-@@ -1276,32 +1278,18 @@
-
- static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
-
--static void __restart_timer(struct timer_list *t, unsigned long interval)
-+static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval)
- {
-- unsigned long when = jiffies + interval;
-- unsigned long flags;
--
-- local_irq_save(flags);
--
-- if (timer_pending(t)) {
-- if (time_before(when, t->expires))
-- mod_timer_pinned(t, when);
-- } else {
-- t->expires = round_jiffies(when);
-- add_timer_on(t, smp_processor_id());
-- }
--
-- local_irq_restore(flags);
-+ if (!interval)
-+ return HRTIMER_NORESTART;
-+ hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval)));
-+ return HRTIMER_RESTART;
- }
-
--static void mce_timer_fn(unsigned long data)
-+static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
- {
-- struct timer_list *t = this_cpu_ptr(&mce_timer);
-- int cpu = smp_processor_id();
- unsigned long iv;
-
-- WARN_ON(cpu != data);
--
- iv = __this_cpu_read(mce_next_interval);
-
- if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1324,7 +1312,7 @@
-
- done:
- __this_cpu_write(mce_next_interval, iv);
-- __restart_timer(t, iv);
-+ return __restart_timer(timer, iv);
- }
-
- /*
-@@ -1332,7 +1320,7 @@
- */
- void mce_timer_kick(unsigned long interval)
- {
-- struct timer_list *t = this_cpu_ptr(&mce_timer);
-+ struct hrtimer *t = this_cpu_ptr(&mce_timer);
- unsigned long iv = __this_cpu_read(mce_next_interval);
-
- __restart_timer(t, interval);
-@@ -1347,7 +1335,7 @@
- int cpu;
-
- for_each_online_cpu(cpu)
-- del_timer_sync(&per_cpu(mce_timer, cpu));
-+ hrtimer_cancel(&per_cpu(mce_timer, cpu));
- }
-
- static void mce_do_trigger(struct work_struct *work)
-@@ -1357,6 +1345,56 @@
-
- static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
-
-+static void __mce_notify_work(struct swork_event *event)
-+{
-+ /* Not more than two messages every minute */
-+ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
-+
-+ /* wake processes polling /dev/mcelog */
-+ wake_up_interruptible(&mce_chrdev_wait);
-+
-+ /*
-+ * There is no risk of missing notifications because
-+ * work_pending is always cleared before the function is
-+ * executed.
-+ */
-+ if (mce_helper[0] && !work_pending(&mce_trigger_work))
-+ schedule_work(&mce_trigger_work);
-+
-+ if (__ratelimit(&ratelimit))
-+ pr_info(HW_ERR "Machine check events logged\n");
-+}
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static bool notify_work_ready __read_mostly;
-+static struct swork_event notify_work;
-+
-+static int mce_notify_work_init(void)
-+{
-+ int err;
-+
-+ err = swork_get();
-+ if (err)
-+ return err;
-+
-+ INIT_SWORK(&notify_work, __mce_notify_work);
-+ notify_work_ready = true;
-+ return 0;
-+}
-+
-+static void mce_notify_work(void)
-+{
-+ if (notify_work_ready)
-+ swork_queue(&notify_work);
-+}
-+#else
-+static void mce_notify_work(void)
-+{
-+ __mce_notify_work(NULL);
-+}
-+static inline int mce_notify_work_init(void) { return 0; }
-+#endif
-+
- /*
- * Notify the user(s) about new machine check events.
- * Can be called from interrupt context, but not from machine check/NMI
-@@ -1364,19 +1402,8 @@
- */
- int mce_notify_irq(void)
- {
-- /* Not more than two messages every minute */
-- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
--
- if (test_and_clear_bit(0, &mce_need_notify)) {
-- /* wake processes polling /dev/mcelog */
-- wake_up_interruptible(&mce_chrdev_wait);
--
-- if (mce_helper[0])
-- schedule_work(&mce_trigger_work);
--
-- if (__ratelimit(&ratelimit))
-- pr_info(HW_ERR "Machine check events logged\n");
--
-+ mce_notify_work();
- return 1;
- }
- return 0;
-@@ -1649,7 +1676,7 @@
- }
- }
-
--static void mce_start_timer(unsigned int cpu, struct timer_list *t)
-+static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
- {
- unsigned long iv = check_interval * HZ;
-
-@@ -1658,16 +1685,17 @@
-
- per_cpu(mce_next_interval, cpu) = iv;
-
-- t->expires = round_jiffies(jiffies + iv);
-- add_timer_on(t, cpu);
-+ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
-+ 0, HRTIMER_MODE_REL_PINNED);
- }
-
- static void __mcheck_cpu_init_timer(void)
- {
-- struct timer_list *t = this_cpu_ptr(&mce_timer);
-+ struct hrtimer *t = this_cpu_ptr(&mce_timer);
- unsigned int cpu = smp_processor_id();
-
-- setup_timer(t, mce_timer_fn, cpu);
-+ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ t->function = mce_timer_fn;
- mce_start_timer(cpu, t);
- }
-
-@@ -2345,6 +2373,8 @@
- if (!mce_available(raw_cpu_ptr(&cpu_info)))
- return;
-
-+ hrtimer_cancel(this_cpu_ptr(&mce_timer));
-+
- if (!(action & CPU_TASKS_FROZEN))
- cmci_clear();
- for (i = 0; i < mca_cfg.banks; i++) {
-@@ -2371,6 +2401,7 @@
- if (b->init)
- wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
- }
-+ __mcheck_cpu_init_timer();
- }
-
- /* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2378,7 +2409,6 @@
- mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
- {
- unsigned int cpu = (unsigned long)hcpu;
-- struct timer_list *t = &per_cpu(mce_timer, cpu);
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
-@@ -2398,11 +2428,9 @@
- break;
- case CPU_DOWN_PREPARE:
- smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
-- del_timer_sync(t);
- break;
- case CPU_DOWN_FAILED:
- smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
-- mce_start_timer(cpu, t);
- break;
- }
-
-@@ -2441,6 +2469,10 @@
- goto err_out;
- }
-
-+ err = mce_notify_work_init();
-+ if (err)
-+ goto err_out;
-+
- if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
- err = -ENOMEM;
- goto err_out;
-diff -Nur linux-4.1.13.orig/arch/x86/kernel/dumpstack_32.c linux-4.1.13/arch/x86/kernel/dumpstack_32.c
---- linux-4.1.13.orig/arch/x86/kernel/dumpstack_32.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kernel/dumpstack_32.c 2015-11-29 09:23:09.529619496 +0100
-@@ -42,7 +42,7 @@
- unsigned long *stack, unsigned long bp,
- const struct stacktrace_ops *ops, void *data)
- {
-- const unsigned cpu = get_cpu();
-+ const unsigned cpu = get_cpu_light();
- int graph = 0;
- u32 *prev_esp;
-
-@@ -86,7 +86,7 @@
- break;
- touch_nmi_watchdog();
- }
-- put_cpu();
-+ put_cpu_light();
- }
- EXPORT_SYMBOL(dump_trace);
-
-diff -Nur linux-4.1.13.orig/arch/x86/kernel/dumpstack_64.c linux-4.1.13/arch/x86/kernel/dumpstack_64.c
---- linux-4.1.13.orig/arch/x86/kernel/dumpstack_64.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kernel/dumpstack_64.c 2015-11-29 09:23:09.529619496 +0100
-@@ -152,7 +152,7 @@
- unsigned long *stack, unsigned long bp,
- const struct stacktrace_ops *ops, void *data)
- {
-- const unsigned cpu = get_cpu();
-+ const unsigned cpu = get_cpu_light();
- struct thread_info *tinfo;
- unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
- unsigned long dummy;
-@@ -241,7 +241,7 @@
- * This handles the process stack:
- */
- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
-- put_cpu();
-+ put_cpu_light();
- }
- EXPORT_SYMBOL(dump_trace);
-
-@@ -255,7 +255,7 @@
- int cpu;
- int i;
-
-- preempt_disable();
-+ migrate_disable();
- cpu = smp_processor_id();
-
- irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
-@@ -291,7 +291,7 @@
- pr_cont(" %016lx", *stack++);
- touch_nmi_watchdog();
- }
-- preempt_enable();
-+ migrate_enable();
-
- pr_cont("\n");
- show_trace_log_lvl(task, regs, sp, bp, log_lvl);
-diff -Nur linux-4.1.13.orig/arch/x86/kernel/entry_32.S linux-4.1.13/arch/x86/kernel/entry_32.S
---- linux-4.1.13.orig/arch/x86/kernel/entry_32.S 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kernel/entry_32.S 2015-11-29 09:23:09.529619496 +0100
-@@ -359,8 +359,24 @@
- ENTRY(resume_kernel)
- DISABLE_INTERRUPTS(CLBR_ANY)
- need_resched:
-+ # preempt count == 0 + NEED_RS set?
- cmpl $0,PER_CPU_VAR(__preempt_count)
-+#ifndef CONFIG_PREEMPT_LAZY
- jnz restore_all
-+#else
-+ jz test_int_off
-+
-+ # atleast preempt count == 0 ?
-+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
-+ jne restore_all
-+
-+ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
-+ jnz restore_all
-+
-+ testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp)
-+ jz restore_all
-+test_int_off:
-+#endif
- testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
- jz restore_all
- call preempt_schedule_irq
-@@ -594,7 +610,7 @@
- ALIGN
- RING0_PTREGS_FRAME # can't unwind into user space anyway
- work_pending:
-- testb $_TIF_NEED_RESCHED, %cl
-+ testl $_TIF_NEED_RESCHED_MASK, %ecx
- jz work_notifysig
- work_resched:
- call schedule
-@@ -607,7 +623,7 @@
- andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
- # than syscall tracing?
- jz restore_all
-- testb $_TIF_NEED_RESCHED, %cl
-+ testl $_TIF_NEED_RESCHED_MASK, %ecx
- jnz work_resched
-
- work_notifysig: # deal with pending signals and
-diff -Nur linux-4.1.13.orig/arch/x86/kernel/entry_64.S linux-4.1.13/arch/x86/kernel/entry_64.S
---- linux-4.1.13.orig/arch/x86/kernel/entry_64.S 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kernel/entry_64.S 2015-11-29 09:23:09.529619496 +0100
-@@ -370,8 +370,8 @@
- /* First do a reschedule test. */
- /* edx: work, edi: workmask */
- int_careful:
-- bt $TIF_NEED_RESCHED,%edx
-- jnc int_very_careful
-+ testl $_TIF_NEED_RESCHED_MASK,%edx
-+ jz int_very_careful
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
- pushq_cfi %rdi
-@@ -776,7 +776,23 @@
- bt $9,EFLAGS(%rsp) /* interrupts were off? */
- jnc 1f
- 0: cmpl $0,PER_CPU_VAR(__preempt_count)
-+#ifndef CONFIG_PREEMPT_LAZY
- jnz 1f
-+#else
-+ jz do_preempt_schedule_irq
-+
-+ # atleast preempt count == 0 ?
-+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
-+ jnz 1f
-+
-+ GET_THREAD_INFO(%rcx)
-+ cmpl $0, TI_preempt_lazy_count(%rcx)
-+ jnz 1f
-+
-+ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
-+ jnc 1f
-+do_preempt_schedule_irq:
-+#endif
- call preempt_schedule_irq
- jmp 0b
- 1:
-@@ -844,8 +860,8 @@
- /* edi: workmask, edx: work */
- retint_careful:
- CFI_RESTORE_STATE
-- bt $TIF_NEED_RESCHED,%edx
-- jnc retint_signal
-+ testl $_TIF_NEED_RESCHED_MASK,%edx
-+ jz retint_signal
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
- pushq_cfi %rdi
-@@ -1118,6 +1134,7 @@
- jmp 2b
- .previous
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /* Call softirq on interrupt stack. Interrupts are off. */
- ENTRY(do_softirq_own_stack)
- CFI_STARTPROC
-@@ -1137,6 +1154,7 @@
- ret
- CFI_ENDPROC
- END(do_softirq_own_stack)
-+#endif
-
- #ifdef CONFIG_XEN
- idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-diff -Nur linux-4.1.13.orig/arch/x86/kernel/irq_32.c linux-4.1.13/arch/x86/kernel/irq_32.c
---- linux-4.1.13.orig/arch/x86/kernel/irq_32.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kernel/irq_32.c 2015-11-29 09:23:09.529619496 +0100
-@@ -135,6 +135,7 @@
- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
- struct thread_info *curstk;
-@@ -153,6 +154,7 @@
-
- call_on_stack(__do_softirq, isp);
- }
-+#endif
-
- bool handle_irq(unsigned irq, struct pt_regs *regs)
- {
-diff -Nur linux-4.1.13.orig/arch/x86/kernel/process_32.c linux-4.1.13/arch/x86/kernel/process_32.c
---- linux-4.1.13.orig/arch/x86/kernel/process_32.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kernel/process_32.c 2015-11-29 09:23:09.529619496 +0100
-@@ -35,6 +35,7 @@
- #include <linux/uaccess.h>
- #include <linux/io.h>
- #include <linux/kdebug.h>
-+#include <linux/highmem.h>
-
- #include <asm/pgtable.h>
- #include <asm/ldt.h>
-@@ -210,6 +211,35 @@
- }
- EXPORT_SYMBOL_GPL(start_thread);
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+ int i;
-+
-+ /*
-+ * Clear @prev's kmap_atomic mappings
-+ */
-+ for (i = 0; i < prev_p->kmap_idx; i++) {
-+ int idx = i + KM_TYPE_NR * smp_processor_id();
-+ pte_t *ptep = kmap_pte - idx;
-+
-+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
-+ }
-+ /*
-+ * Restore @next_p's kmap_atomic mappings
-+ */
-+ for (i = 0; i < next_p->kmap_idx; i++) {
-+ int idx = i + KM_TYPE_NR * smp_processor_id();
-+
-+ if (!pte_none(next_p->kmap_pte[i]))
-+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
-+ }
-+}
-+#else
-+static inline void
-+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
-+#endif
-+
-
- /*
- * switch_to(x,y) should switch tasks from x to y.
-@@ -292,6 +322,8 @@
- task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
- __switch_to_xtra(prev_p, next_p, tss);
-
-+ switch_kmaps(prev_p, next_p);
-+
- /*
- * Leave lazy mode, flushing any hypercalls made here.
- * This must be done before restoring TLS segments so
-diff -Nur linux-4.1.13.orig/arch/x86/kernel/signal.c linux-4.1.13/arch/x86/kernel/signal.c
---- linux-4.1.13.orig/arch/x86/kernel/signal.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kernel/signal.c 2015-11-29 09:23:09.529619496 +0100
-@@ -723,6 +723,14 @@
- {
- user_exit();
-
-+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
-+ if (unlikely(current->forced_info.si_signo)) {
-+ struct task_struct *t = current;
-+ force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
-+ t->forced_info.si_signo = 0;
-+ }
-+#endif
-+
- if (thread_info_flags & _TIF_UPROBE)
- uprobe_notify_resume(regs);
-
-diff -Nur linux-4.1.13.orig/arch/x86/kernel/traps.c linux-4.1.13/arch/x86/kernel/traps.c
---- linux-4.1.13.orig/arch/x86/kernel/traps.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kernel/traps.c 2015-11-29 09:23:09.529619496 +0100
-@@ -88,9 +88,21 @@
- local_irq_enable();
- }
-
--static inline void preempt_conditional_sti(struct pt_regs *regs)
-+static inline void conditional_sti_ist(struct pt_regs *regs)
- {
-+#ifdef CONFIG_X86_64
-+ /*
-+ * X86_64 uses a per CPU stack on the IST for certain traps
-+ * like int3. The task can not be preempted when using one
-+ * of these stacks, thus preemption must be disabled, otherwise
-+ * the stack can be corrupted if the task is scheduled out,
-+ * and another task comes in and uses this stack.
-+ *
-+ * On x86_32 the task keeps its own stack and it is OK if the
-+ * task schedules out.
-+ */
- preempt_count_inc();
-+#endif
- if (regs->flags & X86_EFLAGS_IF)
- local_irq_enable();
- }
-@@ -101,11 +113,13 @@
- local_irq_disable();
- }
-
--static inline void preempt_conditional_cli(struct pt_regs *regs)
-+static inline void conditional_cli_ist(struct pt_regs *regs)
- {
- if (regs->flags & X86_EFLAGS_IF)
- local_irq_disable();
-+#ifdef CONFIG_X86_64
- preempt_count_dec();
-+#endif
- }
-
- enum ctx_state ist_enter(struct pt_regs *regs)
-@@ -536,9 +550,9 @@
- * as we may switch to the interrupt stack.
- */
- debug_stack_usage_inc();
-- preempt_conditional_sti(regs);
-+ conditional_sti_ist(regs);
- do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
-- preempt_conditional_cli(regs);
-+ conditional_cli_ist(regs);
- debug_stack_usage_dec();
- exit:
- ist_exit(regs, prev_state);
-@@ -668,12 +682,12 @@
- debug_stack_usage_inc();
-
- /* It's safe to allow irq's after DR6 has been saved */
-- preempt_conditional_sti(regs);
-+ conditional_sti_ist(regs);
-
- if (v8086_mode(regs)) {
- handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
- X86_TRAP_DB);
-- preempt_conditional_cli(regs);
-+ conditional_cli_ist(regs);
- debug_stack_usage_dec();
- goto exit;
- }
-@@ -693,7 +707,7 @@
- si_code = get_si_code(tsk->thread.debugreg6);
- if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
- send_sigtrap(tsk, regs, error_code, si_code);
-- preempt_conditional_cli(regs);
-+ conditional_cli_ist(regs);
- debug_stack_usage_dec();
-
- exit:
-diff -Nur linux-4.1.13.orig/arch/x86/kvm/lapic.c linux-4.1.13/arch/x86/kvm/lapic.c
---- linux-4.1.13.orig/arch/x86/kvm/lapic.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kvm/lapic.c 2015-11-29 09:23:09.529619496 +0100
-@@ -1104,7 +1104,7 @@
- static void apic_timer_expired(struct kvm_lapic *apic)
- {
- struct kvm_vcpu *vcpu = apic->vcpu;
-- wait_queue_head_t *q = &vcpu->wq;
-+ struct swait_head *q = &vcpu->wq;
- struct kvm_timer *ktimer = &apic->lapic_timer;
-
- if (atomic_read(&apic->lapic_timer.pending))
-@@ -1113,8 +1113,8 @@
- atomic_inc(&apic->lapic_timer.pending);
- kvm_set_pending_timer(vcpu);
-
-- if (waitqueue_active(q))
-- wake_up_interruptible(q);
-+ if (swaitqueue_active(q))
-+ swait_wake_interruptible(q);
-
- if (apic_lvtt_tscdeadline(apic))
- ktimer->expired_tscdeadline = ktimer->tscdeadline;
-@@ -1167,8 +1167,36 @@
- __delay(tsc_deadline - guest_tsc);
- }
-
-+static enum hrtimer_restart apic_timer_fn(struct hrtimer *data);
-+
-+static void __apic_timer_expired(struct hrtimer *data)
-+{
-+ int ret, i = 0;
-+ enum hrtimer_restart r;
-+ struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
-+
-+ r = apic_timer_fn(data);
-+
-+ if (r == HRTIMER_RESTART) {
-+ do {
-+ ret = hrtimer_start_expires(data, HRTIMER_MODE_ABS);
-+ if (ret == -ETIME)
-+ hrtimer_add_expires_ns(&ktimer->timer,
-+ ktimer->period);
-+ i++;
-+ } while (ret == -ETIME && i < 10);
-+
-+ if (ret == -ETIME) {
-+ printk_once(KERN_ERR "%s: failed to reprogram timer\n",
-+ __func__);
-+ WARN_ON_ONCE(1);
-+ }
-+ }
-+}
-+
- static void start_apic_timer(struct kvm_lapic *apic)
- {
-+ int ret;
- ktime_t now;
-
- atomic_set(&apic->lapic_timer.pending, 0);
-@@ -1199,9 +1227,11 @@
- }
- }
-
-- hrtimer_start(&apic->lapic_timer.timer,
-+ ret = hrtimer_start(&apic->lapic_timer.timer,
- ktime_add_ns(now, apic->lapic_timer.period),
- HRTIMER_MODE_ABS);
-+ if (ret == -ETIME)
-+ __apic_timer_expired(&apic->lapic_timer.timer);
-
- apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
- PRIx64 ", "
-@@ -1233,8 +1263,10 @@
- do_div(ns, this_tsc_khz);
- expire = ktime_add_ns(now, ns);
- expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
-- hrtimer_start(&apic->lapic_timer.timer,
-+ ret = hrtimer_start(&apic->lapic_timer.timer,
- expire, HRTIMER_MODE_ABS);
-+ if (ret == -ETIME)
-+ __apic_timer_expired(&apic->lapic_timer.timer);
- } else
- apic_timer_expired(apic);
-
-@@ -1707,6 +1739,7 @@
- hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS);
- apic->lapic_timer.timer.function = apic_timer_fn;
-+ apic->lapic_timer.timer.irqsafe = 1;
-
- /*
- * APIC is created enabled. This will prevent kvm_lapic_set_base from
-@@ -1834,7 +1867,8 @@
-
- timer = &vcpu->arch.apic->lapic_timer.timer;
- if (hrtimer_cancel(timer))
-- hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
-+ if (hrtimer_start_expires(timer, HRTIMER_MODE_ABS) == -ETIME)
-+ __apic_timer_expired(timer);
- }
-
- /*
-diff -Nur linux-4.1.13.orig/arch/x86/kvm/x86.c linux-4.1.13/arch/x86/kvm/x86.c
---- linux-4.1.13.orig/arch/x86/kvm/x86.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/kvm/x86.c 2015-11-29 09:23:09.529619496 +0100
-@@ -5813,6 +5813,13 @@
- goto out;
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
-+ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
-+ return -EOPNOTSUPP;
-+ }
-+#endif
-+
- r = kvm_mmu_module_init();
- if (r)
- goto out_free_percpu;
-diff -Nur linux-4.1.13.orig/arch/x86/lib/usercopy_32.c linux-4.1.13/arch/x86/lib/usercopy_32.c
---- linux-4.1.13.orig/arch/x86/lib/usercopy_32.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/lib/usercopy_32.c 2015-11-29 09:23:09.533619230 +0100
-@@ -647,7 +647,8 @@
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space.
- *
-@@ -668,7 +669,8 @@
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space.
- *
-diff -Nur linux-4.1.13.orig/arch/x86/mm/fault.c linux-4.1.13/arch/x86/mm/fault.c
---- linux-4.1.13.orig/arch/x86/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/mm/fault.c 2015-11-29 09:23:09.533619230 +0100
-@@ -13,6 +13,7 @@
- #include <linux/hugetlb.h> /* hstate_index_to_shift */
- #include <linux/prefetch.h> /* prefetchw */
- #include <linux/context_tracking.h> /* exception_enter(), ... */
-+#include <linux/uaccess.h> /* faulthandler_disabled() */
-
- #include <asm/traps.h> /* dotraplinkage, ... */
- #include <asm/pgalloc.h> /* pgd_*(), ... */
-@@ -1126,9 +1127,9 @@
-
- /*
- * If we're in an interrupt, have no user context or are running
-- * in an atomic region then we must not take the fault:
-+ * in a region with pagefaults disabled then we must not take the fault
- */
-- if (unlikely(in_atomic() || !mm)) {
-+ if (unlikely(faulthandler_disabled() || !mm)) {
- bad_area_nosemaphore(regs, error_code, address);
- return;
- }
-diff -Nur linux-4.1.13.orig/arch/x86/mm/highmem_32.c linux-4.1.13/arch/x86/mm/highmem_32.c
---- linux-4.1.13.orig/arch/x86/mm/highmem_32.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/mm/highmem_32.c 2015-11-29 09:23:09.533619230 +0100
-@@ -32,10 +32,11 @@
- */
- void *kmap_atomic_prot(struct page *page, pgprot_t prot)
- {
-+ pte_t pte = mk_pte(page, prot);
- unsigned long vaddr;
- int idx, type;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable_nort();
- pagefault_disable();
-
- if (!PageHighMem(page))
-@@ -45,7 +46,10 @@
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-- set_pte(kmap_pte-idx, mk_pte(page, prot));
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = pte;
-+#endif
-+ set_pte(kmap_pte-idx, pte);
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-@@ -88,6 +92,9 @@
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = __pte(0);
-+#endif
- kpte_clear_flush(kmap_pte-idx, vaddr);
- kmap_atomic_idx_pop();
- arch_flush_lazy_mmu_mode();
-@@ -100,6 +107,7 @@
- #endif
-
- pagefault_enable();
-+ preempt_enable_nort();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
-diff -Nur linux-4.1.13.orig/arch/x86/mm/iomap_32.c linux-4.1.13/arch/x86/mm/iomap_32.c
---- linux-4.1.13.orig/arch/x86/mm/iomap_32.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/mm/iomap_32.c 2015-11-29 09:23:09.533619230 +0100
-@@ -56,15 +56,22 @@
-
- void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
- {
-+ pte_t pte = pfn_pte(pfn, prot);
- unsigned long vaddr;
- int idx, type;
-
-+ preempt_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
-+ WARN_ON(!pte_none(*(kmap_pte - idx)));
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = pte;
-+#endif
-+ set_pte(kmap_pte - idx, pte);
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-@@ -112,10 +119,14 @@
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ current->kmap_pte[type] = __pte(0);
-+#endif
- kpte_clear_flush(kmap_pte-idx, vaddr);
- kmap_atomic_idx_pop();
- }
-
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL_GPL(iounmap_atomic);
-diff -Nur linux-4.1.13.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.13/arch/x86/platform/uv/tlb_uv.c
---- linux-4.1.13.orig/arch/x86/platform/uv/tlb_uv.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/platform/uv/tlb_uv.c 2015-11-29 09:23:09.533619230 +0100
-@@ -714,9 +714,9 @@
-
- quiesce_local_uvhub(hmaster);
-
-- spin_lock(&hmaster->queue_lock);
-+ raw_spin_lock(&hmaster->queue_lock);
- reset_with_ipi(&bau_desc->distribution, bcp);
-- spin_unlock(&hmaster->queue_lock);
-+ raw_spin_unlock(&hmaster->queue_lock);
-
- end_uvhub_quiesce(hmaster);
-
-@@ -736,9 +736,9 @@
-
- quiesce_local_uvhub(hmaster);
-
-- spin_lock(&hmaster->queue_lock);
-+ raw_spin_lock(&hmaster->queue_lock);
- reset_with_ipi(&bau_desc->distribution, bcp);
-- spin_unlock(&hmaster->queue_lock);
-+ raw_spin_unlock(&hmaster->queue_lock);
-
- end_uvhub_quiesce(hmaster);
-
-@@ -759,7 +759,7 @@
- cycles_t tm1;
-
- hmaster = bcp->uvhub_master;
-- spin_lock(&hmaster->disable_lock);
-+ raw_spin_lock(&hmaster->disable_lock);
- if (!bcp->baudisabled) {
- stat->s_bau_disabled++;
- tm1 = get_cycles();
-@@ -772,7 +772,7 @@
- }
- }
- }
-- spin_unlock(&hmaster->disable_lock);
-+ raw_spin_unlock(&hmaster->disable_lock);
- }
-
- static void count_max_concurr(int stat, struct bau_control *bcp,
-@@ -835,7 +835,7 @@
- */
- static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
- {
-- spinlock_t *lock = &hmaster->uvhub_lock;
-+ raw_spinlock_t *lock = &hmaster->uvhub_lock;
- atomic_t *v;
-
- v = &hmaster->active_descriptor_count;
-@@ -968,7 +968,7 @@
- struct bau_control *hmaster;
-
- hmaster = bcp->uvhub_master;
-- spin_lock(&hmaster->disable_lock);
-+ raw_spin_lock(&hmaster->disable_lock);
- if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
- stat->s_bau_reenabled++;
- for_each_present_cpu(tcpu) {
-@@ -980,10 +980,10 @@
- tbcp->period_giveups = 0;
- }
- }
-- spin_unlock(&hmaster->disable_lock);
-+ raw_spin_unlock(&hmaster->disable_lock);
- return 0;
- }
-- spin_unlock(&hmaster->disable_lock);
-+ raw_spin_unlock(&hmaster->disable_lock);
- return -1;
- }
-
-@@ -1901,9 +1901,9 @@
- bcp->cong_reps = congested_reps;
- bcp->disabled_period = sec_2_cycles(disabled_period);
- bcp->giveup_limit = giveup_limit;
-- spin_lock_init(&bcp->queue_lock);
-- spin_lock_init(&bcp->uvhub_lock);
-- spin_lock_init(&bcp->disable_lock);
-+ raw_spin_lock_init(&bcp->queue_lock);
-+ raw_spin_lock_init(&bcp->uvhub_lock);
-+ raw_spin_lock_init(&bcp->disable_lock);
- }
- }
-
-diff -Nur linux-4.1.13.orig/arch/x86/platform/uv/uv_time.c linux-4.1.13/arch/x86/platform/uv/uv_time.c
---- linux-4.1.13.orig/arch/x86/platform/uv/uv_time.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/x86/platform/uv/uv_time.c 2015-11-29 09:23:09.533619230 +0100
-@@ -58,7 +58,7 @@
-
- /* There is one of these allocated per node */
- struct uv_rtc_timer_head {
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- /* next cpu waiting for timer, local node relative: */
- int next_cpu;
- /* number of cpus on this node: */
-@@ -178,7 +178,7 @@
- uv_rtc_deallocate_timers();
- return -ENOMEM;
- }
-- spin_lock_init(&head->lock);
-+ raw_spin_lock_init(&head->lock);
- head->ncpus = uv_blade_nr_possible_cpus(bid);
- head->next_cpu = -1;
- blade_info[bid] = head;
-@@ -232,7 +232,7 @@
- unsigned long flags;
- int next_cpu;
-
-- spin_lock_irqsave(&head->lock, flags);
-+ raw_spin_lock_irqsave(&head->lock, flags);
-
- next_cpu = head->next_cpu;
- *t = expires;
-@@ -244,12 +244,12 @@
- if (uv_setup_intr(cpu, expires)) {
- *t = ULLONG_MAX;
- uv_rtc_find_next_timer(head, pnode);
-- spin_unlock_irqrestore(&head->lock, flags);
-+ raw_spin_unlock_irqrestore(&head->lock, flags);
- return -ETIME;
- }
- }
-
-- spin_unlock_irqrestore(&head->lock, flags);
-+ raw_spin_unlock_irqrestore(&head->lock, flags);
- return 0;
- }
-
-@@ -268,7 +268,7 @@
- unsigned long flags;
- int rc = 0;
-
-- spin_lock_irqsave(&head->lock, flags);
-+ raw_spin_lock_irqsave(&head->lock, flags);
-
- if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
- rc = 1;
-@@ -280,7 +280,7 @@
- uv_rtc_find_next_timer(head, pnode);
- }
-
-- spin_unlock_irqrestore(&head->lock, flags);
-+ raw_spin_unlock_irqrestore(&head->lock, flags);
-
- return rc;
- }
-@@ -300,13 +300,18 @@
- static cycle_t uv_read_rtc(struct clocksource *cs)
- {
- unsigned long offset;
-+ cycle_t cycles;
-
-+ preempt_disable();
- if (uv_get_min_hub_revision_id() == 1)
- offset = 0;
- else
- offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
-
-- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
-+ cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
-+ preempt_enable();
-+
-+ return cycles;
- }
-
- /*
-diff -Nur linux-4.1.13.orig/arch/xtensa/mm/fault.c linux-4.1.13/arch/xtensa/mm/fault.c
---- linux-4.1.13.orig/arch/xtensa/mm/fault.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/xtensa/mm/fault.c 2015-11-29 09:23:09.533619230 +0100
-@@ -15,10 +15,10 @@
- #include <linux/mm.h>
- #include <linux/module.h>
- #include <linux/hardirq.h>
-+#include <linux/uaccess.h>
- #include <asm/mmu_context.h>
- #include <asm/cacheflush.h>
- #include <asm/hardirq.h>
--#include <asm/uaccess.h>
- #include <asm/pgalloc.h>
-
- DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
-@@ -57,7 +57,7 @@
- /* If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm) {
-+ if (faulthandler_disabled() || !mm) {
- bad_page_fault(regs, address, SIGSEGV);
- return;
- }
-diff -Nur linux-4.1.13.orig/arch/xtensa/mm/highmem.c linux-4.1.13/arch/xtensa/mm/highmem.c
---- linux-4.1.13.orig/arch/xtensa/mm/highmem.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/arch/xtensa/mm/highmem.c 2015-11-29 09:23:09.533619230 +0100
-@@ -42,6 +42,7 @@
- enum fixed_addresses idx;
- unsigned long vaddr;
-
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -79,6 +80,7 @@
- }
-
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
-diff -Nur linux-4.1.13.orig/block/blk-core.c linux-4.1.13/block/blk-core.c
---- linux-4.1.13.orig/block/blk-core.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/block/blk-core.c 2015-11-29 09:23:09.533619230 +0100
-@@ -100,6 +100,9 @@
-
- INIT_LIST_HEAD(&rq->queuelist);
- INIT_LIST_HEAD(&rq->timeout_list);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
-+#endif
- rq->cpu = -1;
- rq->q = q;
- rq->__sector = (sector_t) -1;
-@@ -194,7 +197,7 @@
- **/
- void blk_start_queue(struct request_queue *q)
- {
-- WARN_ON(!irqs_disabled());
-+ WARN_ON_NONRT(!irqs_disabled());
-
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- __blk_run_queue(q);
-@@ -661,7 +664,7 @@
- q->bypass_depth = 1;
- __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
-
-- init_waitqueue_head(&q->mq_freeze_wq);
-+ init_swait_head(&q->mq_freeze_wq);
-
- if (blkcg_init_queue(q))
- goto fail_bdi;
-@@ -3077,7 +3080,7 @@
- blk_run_queue_async(q);
- else
- __blk_run_queue(q);
-- spin_unlock(q->queue_lock);
-+ spin_unlock_irq(q->queue_lock);
- }
-
- static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3125,7 +3128,6 @@
- void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
- {
- struct request_queue *q;
-- unsigned long flags;
- struct request *rq;
- LIST_HEAD(list);
- unsigned int depth;
-@@ -3145,11 +3147,6 @@
- q = NULL;
- depth = 0;
-
-- /*
-- * Save and disable interrupts here, to avoid doing it for every
-- * queue lock we have to take.
-- */
-- local_irq_save(flags);
- while (!list_empty(&list)) {
- rq = list_entry_rq(list.next);
- list_del_init(&rq->queuelist);
-@@ -3162,7 +3159,7 @@
- queue_unplugged(q, depth, from_schedule);
- q = rq->q;
- depth = 0;
-- spin_lock(q->queue_lock);
-+ spin_lock_irq(q->queue_lock);
- }
-
- /*
-@@ -3189,8 +3186,6 @@
- */
- if (q)
- queue_unplugged(q, depth, from_schedule);
--
-- local_irq_restore(flags);
- }
-
- void blk_finish_plug(struct blk_plug *plug)
-diff -Nur linux-4.1.13.orig/block/blk-ioc.c linux-4.1.13/block/blk-ioc.c
---- linux-4.1.13.orig/block/blk-ioc.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/block/blk-ioc.c 2015-11-29 09:23:09.533619230 +0100
-@@ -7,6 +7,7 @@
- #include <linux/bio.h>
- #include <linux/blkdev.h>
- #include <linux/slab.h>
-+#include <linux/delay.h>
-
- #include "blk.h"
-
-@@ -109,7 +110,7 @@
- spin_unlock(q->queue_lock);
- } else {
- spin_unlock_irqrestore(&ioc->lock, flags);
-- cpu_relax();
-+ cpu_chill();
- spin_lock_irqsave_nested(&ioc->lock, flags, 1);
- }
- }
-@@ -187,7 +188,7 @@
- spin_unlock(icq->q->queue_lock);
- } else {
- spin_unlock_irqrestore(&ioc->lock, flags);
-- cpu_relax();
-+ cpu_chill();
- goto retry;
- }
- }
-diff -Nur linux-4.1.13.orig/block/blk-iopoll.c linux-4.1.13/block/blk-iopoll.c
---- linux-4.1.13.orig/block/blk-iopoll.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/block/blk-iopoll.c 2015-11-29 09:23:09.533619230 +0100
-@@ -35,6 +35,7 @@
- list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
- __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
- local_irq_restore(flags);
-+ preempt_check_resched_rt();
- }
- EXPORT_SYMBOL(blk_iopoll_sched);
-
-@@ -132,6 +133,7 @@
- __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
-
- local_irq_enable();
-+ preempt_check_resched_rt();
- }
-
- /**
-@@ -201,6 +203,7 @@
- this_cpu_ptr(&blk_cpu_iopoll));
- __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
- local_irq_enable();
-+ preempt_check_resched_rt();
- }
-
- return NOTIFY_OK;
-diff -Nur linux-4.1.13.orig/block/blk-mq.c linux-4.1.13/block/blk-mq.c
---- linux-4.1.13.orig/block/blk-mq.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/block/blk-mq.c 2015-11-29 09:23:09.533619230 +0100
-@@ -88,7 +88,7 @@
- if (!(gfp & __GFP_WAIT))
- return -EBUSY;
-
-- ret = wait_event_interruptible(q->mq_freeze_wq,
-+ ret = swait_event_interruptible(q->mq_freeze_wq,
- !q->mq_freeze_depth || blk_queue_dying(q));
- if (blk_queue_dying(q))
- return -ENODEV;
-@@ -107,7 +107,7 @@
- struct request_queue *q =
- container_of(ref, struct request_queue, mq_usage_counter);
-
-- wake_up_all(&q->mq_freeze_wq);
-+ swait_wake_all(&q->mq_freeze_wq);
- }
-
- void blk_mq_freeze_queue_start(struct request_queue *q)
-@@ -127,7 +127,7 @@
-
- static void blk_mq_freeze_queue_wait(struct request_queue *q)
- {
-- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
-+ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
- }
-
- /*
-@@ -151,7 +151,7 @@
- spin_unlock_irq(q->queue_lock);
- if (wake) {
- percpu_ref_reinit(&q->mq_usage_counter);
-- wake_up_all(&q->mq_freeze_wq);
-+ swait_wake_all(&q->mq_freeze_wq);
- }
- }
- EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
-@@ -170,7 +170,7 @@
- * dying, we need to ensure that processes currently waiting on
- * the queue are notified as well.
- */
-- wake_up_all(&q->mq_freeze_wq);
-+ swait_wake_all(&q->mq_freeze_wq);
- }
-
- bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
-@@ -217,6 +217,9 @@
- rq->resid_len = 0;
- rq->sense = NULL;
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
-+#endif
- INIT_LIST_HEAD(&rq->timeout_list);
- rq->timeout = 0;
-
-@@ -346,6 +349,17 @@
- }
- EXPORT_SYMBOL(blk_mq_end_request);
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+
-+void __blk_mq_complete_request_remote_work(struct work_struct *work)
-+{
-+ struct request *rq = container_of(work, struct request, work);
-+
-+ rq->q->softirq_done_fn(rq);
-+}
-+
-+#else
-+
- static void __blk_mq_complete_request_remote(void *data)
- {
- struct request *rq = data;
-@@ -353,6 +367,8 @@
- rq->q->softirq_done_fn(rq);
- }
-
-+#endif
-+
- static void blk_mq_ipi_complete_request(struct request *rq)
- {
- struct blk_mq_ctx *ctx = rq->mq_ctx;
-@@ -364,19 +380,23 @@
- return;
- }
-
-- cpu = get_cpu();
-+ cpu = get_cpu_light();
- if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
- shared = cpus_share_cache(cpu, ctx->cpu);
-
- if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ schedule_work_on(ctx->cpu, &rq->work);
-+#else
- rq->csd.func = __blk_mq_complete_request_remote;
- rq->csd.info = rq;
- rq->csd.flags = 0;
- smp_call_function_single_async(ctx->cpu, &rq->csd);
-+#endif
- } else {
- rq->q->softirq_done_fn(rq);
- }
-- put_cpu();
-+ put_cpu_light();
- }
-
- void __blk_mq_complete_request(struct request *rq)
-@@ -905,14 +925,14 @@
- return;
-
- if (!async) {
-- int cpu = get_cpu();
-+ int cpu = get_cpu_light();
- if (cpumask_test_cpu(cpu, hctx->cpumask)) {
- __blk_mq_run_hw_queue(hctx);
-- put_cpu();
-+ put_cpu_light();
- return;
- }
-
-- put_cpu();
-+ put_cpu_light();
- }
-
- kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
-@@ -1589,7 +1609,7 @@
- {
- struct blk_mq_hw_ctx *hctx = data;
-
-- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
-+ if (action == CPU_POST_DEAD)
- return blk_mq_hctx_cpu_offline(hctx, cpu);
-
- /*
-diff -Nur linux-4.1.13.orig/block/blk-mq-cpu.c linux-4.1.13/block/blk-mq-cpu.c
---- linux-4.1.13.orig/block/blk-mq-cpu.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/block/blk-mq-cpu.c 2015-11-29 09:23:09.533619230 +0100
-@@ -16,7 +16,7 @@
- #include "blk-mq.h"
-
- static LIST_HEAD(blk_mq_cpu_notify_list);
--static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
-+static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
-
- static int blk_mq_main_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-@@ -25,7 +25,10 @@
- struct blk_mq_cpu_notifier *notify;
- int ret = NOTIFY_OK;
-
-- raw_spin_lock(&blk_mq_cpu_notify_lock);
-+ if (action != CPU_POST_DEAD)
-+ return NOTIFY_OK;
-+
-+ spin_lock(&blk_mq_cpu_notify_lock);
-
- list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
- ret = notify->notify(notify->data, action, cpu);
-@@ -33,7 +36,7 @@
- break;
- }
-
-- raw_spin_unlock(&blk_mq_cpu_notify_lock);
-+ spin_unlock(&blk_mq_cpu_notify_lock);
- return ret;
- }
-
-@@ -41,16 +44,16 @@
- {
- BUG_ON(!notifier->notify);
-
-- raw_spin_lock(&blk_mq_cpu_notify_lock);
-+ spin_lock(&blk_mq_cpu_notify_lock);
- list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
-- raw_spin_unlock(&blk_mq_cpu_notify_lock);
-+ spin_unlock(&blk_mq_cpu_notify_lock);
- }
-
- void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
- {
-- raw_spin_lock(&blk_mq_cpu_notify_lock);
-+ spin_lock(&blk_mq_cpu_notify_lock);
- list_del(&notifier->list);
-- raw_spin_unlock(&blk_mq_cpu_notify_lock);
-+ spin_unlock(&blk_mq_cpu_notify_lock);
- }
-
- void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
-diff -Nur linux-4.1.13.orig/block/blk-mq.h linux-4.1.13/block/blk-mq.h
---- linux-4.1.13.orig/block/blk-mq.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/block/blk-mq.h 2015-11-29 09:23:09.533619230 +0100
-@@ -76,7 +76,10 @@
- static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
- unsigned int cpu)
- {
-- return per_cpu_ptr(q->queue_ctx, cpu);
-+ struct blk_mq_ctx *ctx;
-+
-+ ctx = per_cpu_ptr(q->queue_ctx, cpu);
-+ return ctx;
- }
-
- /*
-@@ -87,12 +90,12 @@
- */
- static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
- {
-- return __blk_mq_get_ctx(q, get_cpu());
-+ return __blk_mq_get_ctx(q, get_cpu_light());
- }
-
- static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
- {
-- put_cpu();
-+ put_cpu_light();
- }
-
- struct blk_mq_alloc_data {
-diff -Nur linux-4.1.13.orig/block/blk-softirq.c linux-4.1.13/block/blk-softirq.c
---- linux-4.1.13.orig/block/blk-softirq.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/block/blk-softirq.c 2015-11-29 09:23:09.533619230 +0100
-@@ -51,6 +51,7 @@
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
-
- local_irq_restore(flags);
-+ preempt_check_resched_rt();
- }
-
- /*
-@@ -93,6 +94,7 @@
- this_cpu_ptr(&blk_cpu_done));
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_enable();
-+ preempt_check_resched_rt();
- }
-
- return NOTIFY_OK;
-@@ -150,6 +152,7 @@
- goto do_local;
-
- local_irq_restore(flags);
-+ preempt_check_resched_rt();
- }
-
- /**
-diff -Nur linux-4.1.13.orig/block/bounce.c linux-4.1.13/block/bounce.c
---- linux-4.1.13.orig/block/bounce.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/block/bounce.c 2015-11-29 09:23:09.537618965 +0100
-@@ -54,11 +54,11 @@
- unsigned long flags;
- unsigned char *vto;
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- vto = kmap_atomic(to->bv_page);
- memcpy(vto + to->bv_offset, vfrom, to->bv_len);
- kunmap_atomic(vto);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- }
-
- #else /* CONFIG_HIGHMEM */
-diff -Nur linux-4.1.13.orig/crypto/algapi.c linux-4.1.13/crypto/algapi.c
---- linux-4.1.13.orig/crypto/algapi.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/crypto/algapi.c 2015-11-29 09:23:09.537618965 +0100
-@@ -695,13 +695,13 @@
-
- int crypto_register_notifier(struct notifier_block *nb)
- {
-- return blocking_notifier_chain_register(&crypto_chain, nb);
-+ return srcu_notifier_chain_register(&crypto_chain, nb);
- }
- EXPORT_SYMBOL_GPL(crypto_register_notifier);
-
- int crypto_unregister_notifier(struct notifier_block *nb)
- {
-- return blocking_notifier_chain_unregister(&crypto_chain, nb);
-+ return srcu_notifier_chain_unregister(&crypto_chain, nb);
- }
- EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
-
-diff -Nur linux-4.1.13.orig/crypto/api.c linux-4.1.13/crypto/api.c
---- linux-4.1.13.orig/crypto/api.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/crypto/api.c 2015-11-29 09:23:09.537618965 +0100
-@@ -31,7 +31,7 @@
- DECLARE_RWSEM(crypto_alg_sem);
- EXPORT_SYMBOL_GPL(crypto_alg_sem);
-
--BLOCKING_NOTIFIER_HEAD(crypto_chain);
-+SRCU_NOTIFIER_HEAD(crypto_chain);
- EXPORT_SYMBOL_GPL(crypto_chain);
-
- static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
-@@ -236,10 +236,10 @@
- {
- int ok;
-
-- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
-+ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
- if (ok == NOTIFY_DONE) {
- request_module("cryptomgr");
-- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
-+ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
- }
-
- return ok;
-diff -Nur linux-4.1.13.orig/crypto/internal.h linux-4.1.13/crypto/internal.h
---- linux-4.1.13.orig/crypto/internal.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/crypto/internal.h 2015-11-29 09:23:09.537618965 +0100
-@@ -48,7 +48,7 @@
-
- extern struct list_head crypto_alg_list;
- extern struct rw_semaphore crypto_alg_sem;
--extern struct blocking_notifier_head crypto_chain;
-+extern struct srcu_notifier_head crypto_chain;
-
- #ifdef CONFIG_PROC_FS
- void __init crypto_init_proc(void);
-@@ -142,7 +142,7 @@
-
- static inline void crypto_notify(unsigned long val, void *v)
- {
-- blocking_notifier_call_chain(&crypto_chain, val, v);
-+ srcu_notifier_call_chain(&crypto_chain, val, v);
- }
-
- #endif /* _CRYPTO_INTERNAL_H */
-diff -Nur linux-4.1.13.orig/Documentation/hwlat_detector.txt linux-4.1.13/Documentation/hwlat_detector.txt
---- linux-4.1.13.orig/Documentation/hwlat_detector.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/Documentation/hwlat_detector.txt 2015-11-29 09:23:09.477622951 +0100
-@@ -0,0 +1,64 @@
-+Introduction:
-+-------------
-+
-+The module hwlat_detector is a special purpose kernel module that is used to
-+detect large system latencies induced by the behavior of certain underlying
-+hardware or firmware, independent of Linux itself. The code was developed
-+originally to detect SMIs (System Management Interrupts) on x86 systems,
-+however there is nothing x86 specific about this patchset. It was
-+originally written for use by the "RT" patch since the Real Time
-+kernel is highly latency sensitive.
-+
-+SMIs are usually not serviced by the Linux kernel, which typically does not
-+even know that they are occuring. SMIs are instead are set up by BIOS code
-+and are serviced by BIOS code, usually for "critical" events such as
-+management of thermal sensors and fans. Sometimes though, SMIs are used for
-+other tasks and those tasks can spend an inordinate amount of time in the
-+handler (sometimes measured in milliseconds). Obviously this is a problem if
-+you are trying to keep event service latencies down in the microsecond range.
-+
-+The hardware latency detector works by hogging all of the cpus for configurable
-+amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
-+for some period, then looking for gaps in the TSC data. Any gap indicates a
-+time when the polling was interrupted and since the machine is stopped and
-+interrupts turned off the only thing that could do that would be an SMI.
-+
-+Note that the SMI detector should *NEVER* be used in a production environment.
-+It is intended to be run manually to determine if the hardware platform has a
-+problem with long system firmware service routines.
-+
-+Usage:
-+------
-+
-+Loading the module hwlat_detector passing the parameter "enabled=1" (or by
-+setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
-+step required to start the hwlat_detector. It is possible to redefine the
-+threshold in microseconds (us) above which latency spikes will be taken
-+into account (parameter "threshold=").
-+
-+Example:
-+
-+ # modprobe hwlat_detector enabled=1 threshold=100
-+
-+After the module is loaded, it creates a directory named "hwlat_detector" under
-+the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
-+to have debugfs mounted, which might be on /sys/debug on your system.
-+
-+The /debug/hwlat_detector interface contains the following files:
-+
-+count - number of latency spikes observed since last reset
-+enable - a global enable/disable toggle (0/1), resets count
-+max - maximum hardware latency actually observed (usecs)
-+sample - a pipe from which to read current raw sample data
-+ in the format <timestamp> <latency observed usecs>
-+ (can be opened O_NONBLOCK for a single sample)
-+threshold - minimum latency value to be considered (usecs)
-+width - time period to sample with CPUs held (usecs)
-+ must be less than the total window size (enforced)
-+window - total period of sampling, width being inside (usecs)
-+
-+By default we will set width to 500,000 and window to 1,000,000, meaning that
-+we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
-+observe any latencies that exceed the threshold (initially 100 usecs),
-+then we write to a global sample ring buffer of 8K samples, which is
-+consumed by reading from the "sample" (pipe) debugfs file interface.
-diff -Nur linux-4.1.13.orig/Documentation/sysrq.txt linux-4.1.13/Documentation/sysrq.txt
---- linux-4.1.13.orig/Documentation/sysrq.txt 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/Documentation/sysrq.txt 2015-11-29 09:23:09.477622951 +0100
-@@ -59,10 +59,17 @@
- On other - If you know of the key combos for other architectures, please
- let me know so I can add them to this section.
-
--On all - write a character to /proc/sysrq-trigger. e.g.:
--
-+On all - write a character to /proc/sysrq-trigger, e.g.:
- echo t > /proc/sysrq-trigger
-
-+On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
-+ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
-+ Send an ICMP echo request with this pattern plus the particular
-+ SysRq command key. Example:
-+ # ping -c1 -s57 -p0102030468
-+ will trigger the SysRq-H (help) command.
-+
-+
- * What are the 'command' keys?
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- 'b' - Will immediately reboot the system without syncing or unmounting
-diff -Nur linux-4.1.13.orig/Documentation/trace/histograms.txt linux-4.1.13/Documentation/trace/histograms.txt
---- linux-4.1.13.orig/Documentation/trace/histograms.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/Documentation/trace/histograms.txt 2015-11-29 09:23:09.477622951 +0100
-@@ -0,0 +1,186 @@
-+ Using the Linux Kernel Latency Histograms
-+
-+
-+This document gives a short explanation how to enable, configure and use
-+latency histograms. Latency histograms are primarily relevant in the
-+context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
-+and are used in the quality management of the Linux real-time
-+capabilities.
-+
-+
-+* Purpose of latency histograms
-+
-+A latency histogram continuously accumulates the frequencies of latency
-+data. There are two types of histograms
-+- potential sources of latencies
-+- effective latencies
-+
-+
-+* Potential sources of latencies
-+
-+Potential sources of latencies are code segments where interrupts,
-+preemption or both are disabled (aka critical sections). To create
-+histograms of potential sources of latency, the kernel stores the time
-+stamp at the start of a critical section, determines the time elapsed
-+when the end of the section is reached, and increments the frequency
-+counter of that latency value - irrespective of whether any concurrently
-+running process is affected by latency or not.
-+- Configuration items (in the Kernel hacking/Tracers submenu)
-+ CONFIG_INTERRUPT_OFF_LATENCY
-+ CONFIG_PREEMPT_OFF_LATENCY
-+
-+
-+* Effective latencies
-+
-+Effective latencies are actually occuring during wakeup of a process. To
-+determine effective latencies, the kernel stores the time stamp when a
-+process is scheduled to be woken up, and determines the duration of the
-+wakeup time shortly before control is passed over to this process. Note
-+that the apparent latency in user space may be somewhat longer, since the
-+process may be interrupted after control is passed over to it but before
-+the execution in user space takes place. Simply measuring the interval
-+between enqueuing and wakeup may also not appropriate in cases when a
-+process is scheduled as a result of a timer expiration. The timer may have
-+missed its deadline, e.g. due to disabled interrupts, but this latency
-+would not be registered. Therefore, the offsets of missed timers are
-+recorded in a separate histogram. If both wakeup latency and missed timer
-+offsets are configured and enabled, a third histogram may be enabled that
-+records the overall latency as a sum of the timer latency, if any, and the
-+wakeup latency. This histogram is called "timerandwakeup".
-+- Configuration items (in the Kernel hacking/Tracers submenu)
-+ CONFIG_WAKEUP_LATENCY
-+ CONFIG_MISSED_TIMER_OFSETS
-+
-+
-+* Usage
-+
-+The interface to the administration of the latency histograms is located
-+in the debugfs file system. To mount it, either enter
-+
-+mount -t sysfs nodev /sys
-+mount -t debugfs nodev /sys/kernel/debug
-+
-+from shell command line level, or add
-+
-+nodev /sys sysfs defaults 0 0
-+nodev /sys/kernel/debug debugfs defaults 0 0
-+
-+to the file /etc/fstab. All latency histogram related files are then
-+available in the directory /sys/kernel/debug/tracing/latency_hist. A
-+particular histogram type is enabled by writing non-zero to the related
-+variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
-+Select "preemptirqsoff" for the histograms of potential sources of
-+latencies and "wakeup" for histograms of effective latencies etc. The
-+histogram data - one per CPU - are available in the files
-+
-+/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
-+/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
-+/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
-+/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
-+/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
-+/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
-+/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
-+
-+The histograms are reset by writing non-zero to the file "reset" in a
-+particular latency directory. To reset all latency data, use
-+
-+#!/bin/sh
-+
-+TRACINGDIR=/sys/kernel/debug/tracing
-+HISTDIR=$TRACINGDIR/latency_hist
-+
-+if test -d $HISTDIR
-+then
-+ cd $HISTDIR
-+ for i in `find . | grep /reset$`
-+ do
-+ echo 1 >$i
-+ done
-+fi
-+
-+
-+* Data format
-+
-+Latency data are stored with a resolution of one microsecond. The
-+maximum latency is 10,240 microseconds. The data are only valid, if the
-+overflow register is empty. Every output line contains the latency in
-+microseconds in the first row and the number of samples in the second
-+row. To display only lines with a positive latency count, use, for
-+example,
-+
-+grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
-+
-+#Minimum latency: 0 microseconds.
-+#Average latency: 0 microseconds.
-+#Maximum latency: 25 microseconds.
-+#Total samples: 3104770694
-+#There are 0 samples greater or equal than 10240 microseconds
-+#usecs samples
-+ 0 2984486876
-+ 1 49843506
-+ 2 58219047
-+ 3 5348126
-+ 4 2187960
-+ 5 3388262
-+ 6 959289
-+ 7 208294
-+ 8 40420
-+ 9 4485
-+ 10 14918
-+ 11 18340
-+ 12 25052
-+ 13 19455
-+ 14 5602
-+ 15 969
-+ 16 47
-+ 17 18
-+ 18 14
-+ 19 1
-+ 20 3
-+ 21 2
-+ 22 5
-+ 23 2
-+ 25 1
-+
-+
-+* Wakeup latency of a selected process
-+
-+To only collect wakeup latency data of a particular process, write the
-+PID of the requested process to
-+
-+/sys/kernel/debug/tracing/latency_hist/wakeup/pid
-+
-+PIDs are not considered, if this variable is set to 0.
-+
-+
-+* Details of the process with the highest wakeup latency so far
-+
-+Selected data of the process that suffered from the highest wakeup
-+latency that occurred in a particular CPU are available in the file
-+
-+/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
-+
-+In addition, other relevant system data at the time when the
-+latency occurred are given.
-+
-+The format of the data is (all in one line):
-+<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
-+<- <PID> <Priority> <Command> <Timestamp>
-+
-+The value of <Timeroffset> is only relevant in the combined timer
-+and wakeup latency recording. In the wakeup recording, it is
-+always 0, in the missed_timer_offsets recording, it is the same
-+as <Latency>.
-+
-+When retrospectively searching for the origin of a latency and
-+tracing was not enabled, it may be helpful to know the name and
-+some basic data of the task that (finally) was switching to the
-+late real-tlme task. In addition to the victim's data, also the
-+data of the possible culprit are therefore displayed after the
-+"<-" symbol.
-+
-+Finally, the timestamp of the time when the latency occurred
-+in <seconds>.<microseconds> after the most recent system boot
-+is provided.
-+
-+These data are also reset when the wakeup histogram is reset.
-diff -Nur linux-4.1.13.orig/drivers/acpi/acpica/acglobal.h linux-4.1.13/drivers/acpi/acpica/acglobal.h
---- linux-4.1.13.orig/drivers/acpi/acpica/acglobal.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/acpi/acpica/acglobal.h 2015-11-29 09:23:09.537618965 +0100
-@@ -112,7 +112,7 @@
- * interrupt level
- */
- ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
--ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
-+ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
- ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
-
- /* Mutex for _OSI support */
-diff -Nur linux-4.1.13.orig/drivers/acpi/acpica/hwregs.c linux-4.1.13/drivers/acpi/acpica/hwregs.c
---- linux-4.1.13.orig/drivers/acpi/acpica/hwregs.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/acpi/acpica/hwregs.c 2015-11-29 09:23:09.537618965 +0100
-@@ -269,14 +269,14 @@
- ACPI_BITMASK_ALL_FIXED_STATUS,
- ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
-
-- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
-+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
-
- /* Clear the fixed events in PM1 A/B */
-
- status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
- ACPI_BITMASK_ALL_FIXED_STATUS);
-
-- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
-
- if (ACPI_FAILURE(status)) {
- goto exit;
-diff -Nur linux-4.1.13.orig/drivers/acpi/acpica/hwxface.c linux-4.1.13/drivers/acpi/acpica/hwxface.c
---- linux-4.1.13.orig/drivers/acpi/acpica/hwxface.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/acpi/acpica/hwxface.c 2015-11-29 09:23:09.537618965 +0100
-@@ -374,7 +374,7 @@
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
-- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
-+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
-
- /*
- * At this point, we know that the parent register is one of the
-@@ -435,7 +435,7 @@
-
- unlock_and_exit:
-
-- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
- return_ACPI_STATUS(status);
- }
-
-diff -Nur linux-4.1.13.orig/drivers/acpi/acpica/utmutex.c linux-4.1.13/drivers/acpi/acpica/utmutex.c
---- linux-4.1.13.orig/drivers/acpi/acpica/utmutex.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/acpi/acpica/utmutex.c 2015-11-29 09:23:09.537618965 +0100
-@@ -88,7 +88,7 @@
- return_ACPI_STATUS (status);
- }
-
-- status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
-+ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
- if (ACPI_FAILURE (status)) {
- return_ACPI_STATUS (status);
- }
-@@ -141,7 +141,7 @@
- /* Delete the spinlocks */
-
- acpi_os_delete_lock(acpi_gbl_gpe_lock);
-- acpi_os_delete_lock(acpi_gbl_hardware_lock);
-+ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
- acpi_os_delete_lock(acpi_gbl_reference_count_lock);
-
- /* Delete the reader/writer lock */
-diff -Nur linux-4.1.13.orig/drivers/ata/libata-sff.c linux-4.1.13/drivers/ata/libata-sff.c
---- linux-4.1.13.orig/drivers/ata/libata-sff.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/ata/libata-sff.c 2015-11-29 09:23:09.537618965 +0100
-@@ -678,9 +678,9 @@
- unsigned long flags;
- unsigned int consumed;
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
-
- return consumed;
- }
-@@ -719,7 +719,7 @@
- unsigned long flags;
-
- /* FIXME: use a bounce buffer */
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- buf = kmap_atomic(page);
-
- /* do the actual data transfer */
-@@ -727,7 +727,7 @@
- do_write);
-
- kunmap_atomic(buf);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- } else {
- buf = page_address(page);
- ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
-@@ -864,7 +864,7 @@
- unsigned long flags;
-
- /* FIXME: use bounce buffer */
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- buf = kmap_atomic(page);
-
- /* do the actual data transfer */
-@@ -872,7 +872,7 @@
- count, rw);
-
- kunmap_atomic(buf);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- } else {
- buf = page_address(page);
- consumed = ap->ops->sff_data_xfer(dev, buf + offset,
-diff -Nur linux-4.1.13.orig/drivers/char/random.c linux-4.1.13/drivers/char/random.c
---- linux-4.1.13.orig/drivers/char/random.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/char/random.c 2015-11-29 09:23:09.537618965 +0100
-@@ -776,8 +776,6 @@
- } sample;
- long delta, delta2, delta3;
-
-- preempt_disable();
--
- sample.jiffies = jiffies;
- sample.cycles = random_get_entropy();
- sample.num = num;
-@@ -818,7 +816,6 @@
- */
- credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
- }
-- preempt_enable();
- }
-
- void add_input_randomness(unsigned int type, unsigned int code,
-@@ -871,28 +868,27 @@
- return *(ptr + f->reg_idx++);
- }
-
--void add_interrupt_randomness(int irq, int irq_flags)
-+void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
- {
- struct entropy_store *r;
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
-- struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
- cycles_t cycles = random_get_entropy();
- __u32 c_high, j_high;
-- __u64 ip;
- unsigned long seed;
- int credit = 0;
-
- if (cycles == 0)
-- cycles = get_reg(fast_pool, regs);
-+ cycles = get_reg(fast_pool, NULL);
- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
- fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
- fast_pool->pool[1] ^= now ^ c_high;
-- ip = regs ? instruction_pointer(regs) : _RET_IP_;
-+ if (!ip)
-+ ip = _RET_IP_;
- fast_pool->pool[2] ^= ip;
- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
-- get_reg(fast_pool, regs);
-+ get_reg(fast_pool, NULL);
-
- fast_mix(fast_pool);
- add_interrupt_bench(cycles);
-diff -Nur linux-4.1.13.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.13/drivers/clocksource/tcb_clksrc.c
---- linux-4.1.13.orig/drivers/clocksource/tcb_clksrc.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/clocksource/tcb_clksrc.c 2015-11-29 09:23:09.537618965 +0100
-@@ -23,8 +23,7 @@
- * this 32 bit free-running counter. the second channel is not used.
- *
- * - The third channel may be used to provide a 16-bit clockevent
-- * source, used in either periodic or oneshot mode. This runs
-- * at 32 KiHZ, and can handle delays of up to two seconds.
-+ * source, used in either periodic or oneshot mode.
- *
- * A boot clocksource and clockevent source are also currently needed,
- * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
-@@ -74,6 +73,7 @@
- struct tc_clkevt_device {
- struct clock_event_device clkevt;
- struct clk *clk;
-+ u32 freq;
- void __iomem *regs;
- };
-
-@@ -82,13 +82,6 @@
- return container_of(clkevt, struct tc_clkevt_device, clkevt);
- }
-
--/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
-- * because using one of the divided clocks would usually mean the
-- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
-- *
-- * A divided clock could be good for high resolution timers, since
-- * 30.5 usec resolution can seem "low".
-- */
- static u32 timer_clock;
-
- static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
-@@ -111,11 +104,12 @@
- case CLOCK_EVT_MODE_PERIODIC:
- clk_enable(tcd->clk);
-
-- /* slow clock, count up to RC, then irq and restart */
-+ /* count up to RC, then irq and restart */
- __raw_writel(timer_clock
- | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
- regs + ATMEL_TC_REG(2, CMR));
-- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
-+ __raw_writel((tcd->freq + HZ / 2) / HZ,
-+ tcaddr + ATMEL_TC_REG(2, RC));
-
- /* Enable clock and interrupts on RC compare */
- __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-@@ -128,7 +122,7 @@
- case CLOCK_EVT_MODE_ONESHOT:
- clk_enable(tcd->clk);
-
-- /* slow clock, count up to RC, then irq and stop */
-+ /* count up to RC, then irq and stop */
- __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
- | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
- regs + ATMEL_TC_REG(2, CMR));
-@@ -157,8 +151,12 @@
- .name = "tc_clkevt",
- .features = CLOCK_EVT_FEAT_PERIODIC
- | CLOCK_EVT_FEAT_ONESHOT,
-+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
- /* Should be lower than at91rm9200's system timer */
- .rating = 125,
-+#else
-+ .rating = 200,
-+#endif
- .set_next_event = tc_next_event,
- .set_mode = tc_mode,
- },
-@@ -178,8 +176,9 @@
- return IRQ_NONE;
- }
-
--static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
-+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
- {
-+ unsigned divisor = atmel_tc_divisors[divisor_idx];
- int ret;
- struct clk *t2_clk = tc->clk[2];
- int irq = tc->irq[2];
-@@ -193,7 +192,11 @@
- clkevt.regs = tc->regs;
- clkevt.clk = t2_clk;
-
-- timer_clock = clk32k_divisor_idx;
-+ timer_clock = divisor_idx;
-+ if (!divisor)
-+ clkevt.freq = 32768;
-+ else
-+ clkevt.freq = clk_get_rate(t2_clk) / divisor;
-
- clkevt.clkevt.cpumask = cpumask_of(0);
-
-@@ -203,7 +206,7 @@
- return ret;
- }
-
-- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
-+ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
-
- return ret;
- }
-@@ -340,7 +343,11 @@
- goto err_disable_t1;
-
- /* channel 2: periodic and oneshot timer support */
-+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
- ret = setup_clkevents(tc, clk32k_divisor_idx);
-+#else
-+ ret = setup_clkevents(tc, best_divisor_idx);
-+#endif
- if (ret)
- goto err_unregister_clksrc;
-
-diff -Nur linux-4.1.13.orig/drivers/clocksource/timer-atmel-pit.c linux-4.1.13/drivers/clocksource/timer-atmel-pit.c
---- linux-4.1.13.orig/drivers/clocksource/timer-atmel-pit.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/clocksource/timer-atmel-pit.c 2015-11-29 09:23:09.537618965 +0100
-@@ -90,6 +90,7 @@
- return elapsed;
- }
-
-+static struct irqaction at91sam926x_pit_irq;
- /*
- * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
- */
-@@ -100,6 +101,8 @@
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
-+ /* Set up irq handler */
-+ setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
- /* update clocksource counter */
- data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
- pit_write(data->base, AT91_PIT_MR,
-@@ -113,6 +116,7 @@
- /* disable irq, leaving the clocksource active */
- pit_write(data->base, AT91_PIT_MR,
- (data->cycle - 1) | AT91_PIT_PITEN);
-+ remove_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
- break;
- case CLOCK_EVT_MODE_RESUME:
- break;
-diff -Nur linux-4.1.13.orig/drivers/clocksource/timer-atmel-st.c linux-4.1.13/drivers/clocksource/timer-atmel-st.c
---- linux-4.1.13.orig/drivers/clocksource/timer-atmel-st.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/clocksource/timer-atmel-st.c 2015-11-29 09:23:09.537618965 +0100
-@@ -131,6 +131,7 @@
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
-+ remove_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
- case CLOCK_EVT_MODE_RESUME:
- irqmask = 0;
- break;
-diff -Nur linux-4.1.13.orig/drivers/cpufreq/cpufreq.c linux-4.1.13/drivers/cpufreq/cpufreq.c
---- linux-4.1.13.orig/drivers/cpufreq/cpufreq.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/cpufreq/cpufreq.c 2015-11-29 09:23:09.545618436 +0100
-@@ -64,12 +64,6 @@
- return cpufreq_driver->target_index || cpufreq_driver->target;
- }
-
--/*
-- * rwsem to guarantee that cpufreq driver module doesn't unload during critical
-- * sections
-- */
--static DECLARE_RWSEM(cpufreq_rwsem);
--
- /* internal prototypes */
- static int __cpufreq_governor(struct cpufreq_policy *policy,
- unsigned int event);
-@@ -215,9 +209,6 @@
- if (cpu >= nr_cpu_ids)
- return NULL;
-
-- if (!down_read_trylock(&cpufreq_rwsem))
-- return NULL;
--
- /* get the cpufreq driver */
- read_lock_irqsave(&cpufreq_driver_lock, flags);
-
-@@ -230,9 +221,6 @@
-
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
-- if (!policy)
-- up_read(&cpufreq_rwsem);
--
- return policy;
- }
- EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
-@@ -240,7 +228,6 @@
- void cpufreq_cpu_put(struct cpufreq_policy *policy)
- {
- kobject_put(&policy->kobj);
-- up_read(&cpufreq_rwsem);
- }
- EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
-
-@@ -765,9 +752,6 @@
- struct freq_attr *fattr = to_attr(attr);
- ssize_t ret;
-
-- if (!down_read_trylock(&cpufreq_rwsem))
-- return -EINVAL;
--
- down_read(&policy->rwsem);
-
- if (fattr->show)
-@@ -776,7 +760,6 @@
- ret = -EIO;
-
- up_read(&policy->rwsem);
-- up_read(&cpufreq_rwsem);
-
- return ret;
- }
-@@ -793,9 +776,6 @@
- if (!cpu_online(policy->cpu))
- goto unlock;
-
-- if (!down_read_trylock(&cpufreq_rwsem))
-- goto unlock;
--
- down_write(&policy->rwsem);
-
- if (fattr->store)
-@@ -804,8 +784,6 @@
- ret = -EIO;
-
- up_write(&policy->rwsem);
--
-- up_read(&cpufreq_rwsem);
- unlock:
- put_online_cpus();
-
-@@ -1117,16 +1095,12 @@
- if (unlikely(policy))
- return 0;
-
-- if (!down_read_trylock(&cpufreq_rwsem))
-- return 0;
--
- /* Check if this cpu was hot-unplugged earlier and has siblings */
- read_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_policy(policy) {
- if (cpumask_test_cpu(cpu, policy->related_cpus)) {
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = cpufreq_add_policy_cpu(policy, cpu, dev);
-- up_read(&cpufreq_rwsem);
- return ret;
- }
- }
-@@ -1269,8 +1243,6 @@
-
- kobject_uevent(&policy->kobj, KOBJ_ADD);
-
-- up_read(&cpufreq_rwsem);
--
- /* Callback for handling stuff after policy is ready */
- if (cpufreq_driver->ready)
- cpufreq_driver->ready(policy);
-@@ -1304,8 +1276,6 @@
- cpufreq_policy_free(policy);
-
- nomem_out:
-- up_read(&cpufreq_rwsem);
--
- return ret;
- }
-
-@@ -2499,19 +2469,20 @@
-
- pr_debug("unregistering driver %s\n", driver->name);
-
-+ /* Protect against concurrent cpu hotplug */
-+ get_online_cpus();
- subsys_interface_unregister(&cpufreq_interface);
- if (cpufreq_boost_supported())
- cpufreq_sysfs_remove_file(&boost.attr);
-
- unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
-
-- down_write(&cpufreq_rwsem);
- write_lock_irqsave(&cpufreq_driver_lock, flags);
-
- cpufreq_driver = NULL;
-
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-- up_write(&cpufreq_rwsem);
-+ put_online_cpus();
-
- return 0;
- }
-diff -Nur linux-4.1.13.orig/drivers/cpufreq/Kconfig.x86 linux-4.1.13/drivers/cpufreq/Kconfig.x86
---- linux-4.1.13.orig/drivers/cpufreq/Kconfig.x86 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/cpufreq/Kconfig.x86 2015-11-29 09:23:09.545618436 +0100
-@@ -123,7 +123,7 @@
-
- config X86_POWERNOW_K8
- tristate "AMD Opteron/Athlon64 PowerNow!"
-- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
-+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE
- help
- This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
- Support for K10 and newer processors is now in acpi-cpufreq.
-diff -Nur linux-4.1.13.orig/drivers/gpio/gpio-omap.c linux-4.1.13/drivers/gpio/gpio-omap.c
---- linux-4.1.13.orig/drivers/gpio/gpio-omap.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/gpio/gpio-omap.c 2015-11-29 09:23:09.545618436 +0100
-@@ -57,7 +57,7 @@
- u32 saved_datain;
- u32 level_mask;
- u32 toggle_mask;
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- struct gpio_chip chip;
- struct clk *dbck;
- u32 mod_usage;
-@@ -498,14 +498,14 @@
- (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
- return -EINVAL;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- retval = omap_set_gpio_triggering(bank, offset, type);
- omap_gpio_init_irq(bank, offset);
- if (!omap_gpio_is_input(bank, offset)) {
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- return -EINVAL;
- }
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- __irq_set_handler_locked(d->irq, handle_level_irq);
-@@ -626,14 +626,14 @@
- return -EINVAL;
- }
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- if (enable)
- bank->context.wake_en |= gpio_bit;
- else
- bank->context.wake_en &= ~gpio_bit;
-
- writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-@@ -668,7 +668,7 @@
- if (!BANK_USED(bank))
- pm_runtime_get_sync(bank->dev);
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- /* Set trigger to none. You need to enable the desired trigger with
- * request_irq() or set_irq_type(). Only do this if the IRQ line has
- * not already been requested.
-@@ -678,7 +678,7 @@
- omap_enable_gpio_module(bank, offset);
- }
- bank->mod_usage |= BIT(offset);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-@@ -688,11 +688,11 @@
- struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
- unsigned long flags;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- bank->mod_usage &= ~(BIT(offset));
- omap_disable_gpio_module(bank, offset);
- omap_reset_gpio(bank, offset);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- /*
- * If this is the last gpio to be freed in the bank,
-@@ -794,9 +794,9 @@
- if (!BANK_USED(bank))
- pm_runtime_get_sync(bank->dev);
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- omap_gpio_init_irq(bank, offset);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- omap_gpio_unmask_irq(d);
-
- return 0;
-@@ -808,11 +808,11 @@
- unsigned long flags;
- unsigned offset = d->hwirq;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- bank->irq_usage &= ~(BIT(offset));
- omap_disable_gpio_module(bank, offset);
- omap_reset_gpio(bank, offset);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- /*
- * If this is the last IRQ to be freed in the bank,
-@@ -836,10 +836,10 @@
- unsigned offset = d->hwirq;
- unsigned long flags;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- omap_set_gpio_irqenable(bank, offset, 0);
- omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- }
-
- static void omap_gpio_unmask_irq(struct irq_data *d)
-@@ -849,7 +849,7 @@
- u32 trigger = irqd_get_trigger_type(d);
- unsigned long flags;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- if (trigger)
- omap_set_gpio_triggering(bank, offset, trigger);
-
-@@ -861,7 +861,7 @@
- }
-
- omap_set_gpio_irqenable(bank, offset, 1);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- }
-
- /*---------------------------------------------------------------------*/
-@@ -874,9 +874,9 @@
- OMAP_MPUIO_GPIO_MASKIT / bank->stride;
- unsigned long flags;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-@@ -889,9 +889,9 @@
- OMAP_MPUIO_GPIO_MASKIT / bank->stride;
- unsigned long flags;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- writel_relaxed(bank->context.wake_en, mask_reg);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-@@ -937,9 +937,9 @@
-
- bank = container_of(chip, struct gpio_bank, chip);
- reg = bank->base + bank->regs->direction;
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- dir = !!(readl_relaxed(reg) & BIT(offset));
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- return dir;
- }
-
-@@ -949,9 +949,9 @@
- unsigned long flags;
-
- bank = container_of(chip, struct gpio_bank, chip);
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- omap_set_gpio_direction(bank, offset, 1);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
-
-@@ -973,10 +973,10 @@
- unsigned long flags;
-
- bank = container_of(chip, struct gpio_bank, chip);
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- bank->set_dataout(bank, offset, value);
- omap_set_gpio_direction(bank, offset, 0);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
-
-@@ -988,9 +988,9 @@
-
- bank = container_of(chip, struct gpio_bank, chip);
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- omap2_set_gpio_debounce(bank, offset, debounce);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-@@ -1001,9 +1001,9 @@
- unsigned long flags;
-
- bank = container_of(chip, struct gpio_bank, chip);
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- bank->set_dataout(bank, offset, value);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- }
-
- /*---------------------------------------------------------------------*/
-@@ -1199,7 +1199,7 @@
- else
- bank->set_dataout = omap_set_gpio_dataout_mask;
-
-- spin_lock_init(&bank->lock);
-+ raw_spin_lock_init(&bank->lock);
-
- /* Static mapping, never released */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-@@ -1246,7 +1246,7 @@
- unsigned long flags;
- u32 wake_low, wake_hi;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
-
- /*
- * Only edges can generate a wakeup event to the PRCM.
-@@ -1299,7 +1299,7 @@
- bank->get_context_loss_count(bank->dev);
-
- omap_gpio_dbck_disable(bank);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-@@ -1314,7 +1314,7 @@
- unsigned long flags;
- int c;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
-
- /*
- * On the first resume during the probe, the context has not
-@@ -1350,14 +1350,14 @@
- if (c != bank->context_loss_count) {
- omap_gpio_restore_context(bank);
- } else {
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
- }
- }
-
- if (!bank->workaround_enabled) {
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
-
-@@ -1412,7 +1412,7 @@
- }
-
- bank->workaround_enabled = false;
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-diff -Nur linux-4.1.13.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-4.1.13/drivers/gpu/drm/i915/i915_gem_execbuffer.c
---- linux-4.1.13.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-11-29 09:23:09.545618436 +0100
-@@ -32,6 +32,7 @@
- #include "i915_trace.h"
- #include "intel_drv.h"
- #include <linux/dma_remapping.h>
-+#include <linux/uaccess.h>
-
- #define __EXEC_OBJECT_HAS_PIN (1<<31)
- #define __EXEC_OBJECT_HAS_FENCE (1<<30)
-@@ -465,7 +466,7 @@
- }
-
- /* We can't wait for rendering with pagefaults disabled */
-- if (obj->active && in_atomic())
-+ if (obj->active && pagefault_disabled())
- return -EFAULT;
-
- if (use_cpu_reloc(obj))
-@@ -1338,7 +1339,9 @@
- return ret;
- }
-
-+#ifndef CONFIG_PREEMPT_RT_BASE
- trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
-+#endif
-
- i915_gem_execbuffer_move_to_active(vmas, ring);
- i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
-diff -Nur linux-4.1.13.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c linux-4.1.13/drivers/gpu/drm/i915/i915_gem_shrinker.c
---- linux-4.1.13.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/gpu/drm/i915/i915_gem_shrinker.c 2015-11-29 09:23:09.545618436 +0100
-@@ -39,7 +39,7 @@
- if (!mutex_is_locked(mutex))
- return false;
-
--#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
-+#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
- return mutex->owner == task;
- #else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
-diff -Nur linux-4.1.13.orig/drivers/gpu/drm/i915/intel_display.c linux-4.1.13/drivers/gpu/drm/i915/intel_display.c
---- linux-4.1.13.orig/drivers/gpu/drm/i915/intel_display.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/gpu/drm/i915/intel_display.c 2015-11-29 09:23:09.549618170 +0100
-@@ -10088,7 +10088,7 @@
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-- WARN_ON(!in_interrupt());
-+ WARN_ON_NONRT(!in_interrupt());
-
- if (crtc == NULL)
- return;
-diff -Nur linux-4.1.13.orig/drivers/i2c/busses/i2c-omap.c linux-4.1.13/drivers/i2c/busses/i2c-omap.c
---- linux-4.1.13.orig/drivers/i2c/busses/i2c-omap.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/i2c/busses/i2c-omap.c 2015-11-29 09:23:09.549618170 +0100
-@@ -996,15 +996,12 @@
- u16 mask;
- u16 stat;
-
-- spin_lock(&dev->lock);
-- mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
-+ mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
-
- if (stat & mask)
- ret = IRQ_WAKE_THREAD;
-
-- spin_unlock(&dev->lock);
--
- return ret;
- }
-
-diff -Nur linux-4.1.13.orig/drivers/ide/alim15x3.c linux-4.1.13/drivers/ide/alim15x3.c
---- linux-4.1.13.orig/drivers/ide/alim15x3.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/ide/alim15x3.c 2015-11-29 09:23:09.549618170 +0100
-@@ -234,7 +234,7 @@
-
- isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
-
- if (m5229_revision < 0xC2) {
- /*
-@@ -325,7 +325,7 @@
- }
- pci_dev_put(north);
- pci_dev_put(isa_dev);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- return 0;
- }
-
-diff -Nur linux-4.1.13.orig/drivers/ide/hpt366.c linux-4.1.13/drivers/ide/hpt366.c
---- linux-4.1.13.orig/drivers/ide/hpt366.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/ide/hpt366.c 2015-11-29 09:23:09.557617634 +0100
-@@ -1241,7 +1241,7 @@
-
- dma_old = inb(base + 2);
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
-
- dma_new = dma_old;
- pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
-@@ -1252,7 +1252,7 @@
- if (dma_new != dma_old)
- outb(dma_new, base + 2);
-
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
-
- printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
- hwif->name, base, base + 7);
-diff -Nur linux-4.1.13.orig/drivers/ide/ide-io.c linux-4.1.13/drivers/ide/ide-io.c
---- linux-4.1.13.orig/drivers/ide/ide-io.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/ide/ide-io.c 2015-11-29 09:23:09.557617634 +0100
-@@ -659,7 +659,7 @@
- /* disable_irq_nosync ?? */
- disable_irq(hwif->irq);
- /* local CPU only, as if we were handling an interrupt */
-- local_irq_disable();
-+ local_irq_disable_nort();
- if (hwif->polling) {
- startstop = handler(drive);
- } else if (drive_is_ready(drive)) {
-diff -Nur linux-4.1.13.orig/drivers/ide/ide-iops.c linux-4.1.13/drivers/ide/ide-iops.c
---- linux-4.1.13.orig/drivers/ide/ide-iops.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/ide/ide-iops.c 2015-11-29 09:23:09.557617634 +0100
-@@ -129,12 +129,12 @@
- if ((stat & ATA_BUSY) == 0)
- break;
-
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- *rstat = stat;
- return -EBUSY;
- }
- }
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- }
- /*
- * Allow status to settle, then read it again.
-diff -Nur linux-4.1.13.orig/drivers/ide/ide-io-std.c linux-4.1.13/drivers/ide/ide-io-std.c
---- linux-4.1.13.orig/drivers/ide/ide-io-std.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/ide/ide-io-std.c 2015-11-29 09:23:09.557617634 +0100
-@@ -175,7 +175,7 @@
- unsigned long uninitialized_var(flags);
-
- if ((io_32bit & 2) && !mmio) {
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- ata_vlb_sync(io_ports->nsect_addr);
- }
-
-@@ -186,7 +186,7 @@
- insl(data_addr, buf, words);
-
- if ((io_32bit & 2) && !mmio)
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
-
- if (((len + 1) & 3) < 2)
- return;
-@@ -219,7 +219,7 @@
- unsigned long uninitialized_var(flags);
-
- if ((io_32bit & 2) && !mmio) {
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- ata_vlb_sync(io_ports->nsect_addr);
- }
-
-@@ -230,7 +230,7 @@
- outsl(data_addr, buf, words);
-
- if ((io_32bit & 2) && !mmio)
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
-
- if (((len + 1) & 3) < 2)
- return;
-diff -Nur linux-4.1.13.orig/drivers/ide/ide-probe.c linux-4.1.13/drivers/ide/ide-probe.c
---- linux-4.1.13.orig/drivers/ide/ide-probe.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/ide/ide-probe.c 2015-11-29 09:23:09.557617634 +0100
-@@ -196,10 +196,10 @@
- int bswap = 1;
-
- /* local CPU only; some systems need this */
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- /* read 512 bytes of id info */
- hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
-
- drive->dev_flags |= IDE_DFLAG_ID_READ;
- #ifdef DEBUG
-diff -Nur linux-4.1.13.orig/drivers/ide/ide-taskfile.c linux-4.1.13/drivers/ide/ide-taskfile.c
---- linux-4.1.13.orig/drivers/ide/ide-taskfile.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/ide/ide-taskfile.c 2015-11-29 09:23:09.557617634 +0100
-@@ -250,7 +250,7 @@
-
- page_is_high = PageHighMem(page);
- if (page_is_high)
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
-
- buf = kmap_atomic(page) + offset;
-
-@@ -271,7 +271,7 @@
- kunmap_atomic(buf);
-
- if (page_is_high)
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
-
- len -= nr_bytes;
- }
-@@ -414,7 +414,7 @@
- }
-
- if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
-- local_irq_disable();
-+ local_irq_disable_nort();
-
- ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
-
-diff -Nur linux-4.1.13.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-4.1.13/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
---- linux-4.1.13.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-11-29 09:23:09.557617634 +0100
-@@ -821,7 +821,7 @@
-
- ipoib_dbg_mcast(priv, "restarting multicast task\n");
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- netif_addr_lock(dev);
- spin_lock(&priv->lock);
-
-@@ -903,7 +903,7 @@
-
- spin_unlock(&priv->lock);
- netif_addr_unlock(dev);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
-
- /*
- * make sure the in-flight joins have finished before we attempt
-diff -Nur linux-4.1.13.orig/drivers/input/gameport/gameport.c linux-4.1.13/drivers/input/gameport/gameport.c
---- linux-4.1.13.orig/drivers/input/gameport/gameport.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/input/gameport/gameport.c 2015-11-29 09:23:09.557617634 +0100
-@@ -124,12 +124,12 @@
- tx = 1 << 30;
-
- for(i = 0; i < 50; i++) {
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- GET_TIME(t1);
- for (t = 0; t < 50; t++) gameport_read(gameport);
- GET_TIME(t2);
- GET_TIME(t3);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- udelay(i * 10);
- if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
- }
-@@ -148,11 +148,11 @@
- tx = 1 << 30;
-
- for(i = 0; i < 50; i++) {
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- rdtscl(t1);
- for (t = 0; t < 50; t++) gameport_read(gameport);
- rdtscl(t2);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- udelay(i * 10);
- if (t2 - t1 < tx) tx = t2 - t1;
- }
-diff -Nur linux-4.1.13.orig/drivers/leds/trigger/Kconfig linux-4.1.13/drivers/leds/trigger/Kconfig
---- linux-4.1.13.orig/drivers/leds/trigger/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/leds/trigger/Kconfig 2015-11-29 09:23:09.557617634 +0100
-@@ -61,7 +61,7 @@
-
- config LEDS_TRIGGER_CPU
- bool "LED CPU Trigger"
-- depends on LEDS_TRIGGERS
-+ depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE
- help
- This allows LEDs to be controlled by active CPUs. This shows
- the active CPUs across an array of LEDs so you can see which
-diff -Nur linux-4.1.13.orig/drivers/md/bcache/Kconfig linux-4.1.13/drivers/md/bcache/Kconfig
---- linux-4.1.13.orig/drivers/md/bcache/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/md/bcache/Kconfig 2015-11-29 09:23:09.557617634 +0100
-@@ -1,6 +1,7 @@
-
- config BCACHE
- tristate "Block device as cache"
-+ depends on !PREEMPT_RT_FULL
- ---help---
- Allows a block device to be used as cache for other devices; uses
- a btree for indexing and the layout is optimized for SSDs.
-diff -Nur linux-4.1.13.orig/drivers/md/dm.c linux-4.1.13/drivers/md/dm.c
---- linux-4.1.13.orig/drivers/md/dm.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/md/dm.c 2015-11-29 09:23:09.557617634 +0100
-@@ -2132,7 +2132,7 @@
- /* Establish tio->ti before queuing work (map_tio_request) */
- tio->ti = ti;
- queue_kthread_work(&md->kworker, &tio->work);
-- BUG_ON(!irqs_disabled());
-+ BUG_ON_NONRT(!irqs_disabled());
- }
-
- goto out;
-diff -Nur linux-4.1.13.orig/drivers/md/raid5.c linux-4.1.13/drivers/md/raid5.c
---- linux-4.1.13.orig/drivers/md/raid5.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/md/raid5.c 2015-11-29 09:23:09.561617368 +0100
-@@ -1918,8 +1918,9 @@
- struct raid5_percpu *percpu;
- unsigned long cpu;
-
-- cpu = get_cpu();
-+ cpu = get_cpu_light();
- percpu = per_cpu_ptr(conf->percpu, cpu);
-+ spin_lock(&percpu->lock);
- if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
- ops_run_biofill(sh);
- overlap_clear++;
-@@ -1975,7 +1976,8 @@
- if (test_and_clear_bit(R5_Overlap, &dev->flags))
- wake_up(&sh->raid_conf->wait_for_overlap);
- }
-- put_cpu();
-+ spin_unlock(&percpu->lock);
-+ put_cpu_light();
- }
-
- static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
-@@ -6363,6 +6365,7 @@
- __func__, cpu);
- break;
- }
-+ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
- }
- put_online_cpus();
-
-diff -Nur linux-4.1.13.orig/drivers/md/raid5.h linux-4.1.13/drivers/md/raid5.h
---- linux-4.1.13.orig/drivers/md/raid5.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/md/raid5.h 2015-11-29 09:23:09.561617368 +0100
-@@ -495,6 +495,7 @@
- int recovery_disabled;
- /* per cpu variables */
- struct raid5_percpu {
-+ spinlock_t lock; /* Protection for -RT */
- struct page *spare_page; /* Used when checking P/Q in raid6 */
- struct flex_array *scribble; /* space for constructing buffer
- * lists and performing address
-diff -Nur linux-4.1.13.orig/drivers/misc/hwlat_detector.c linux-4.1.13/drivers/misc/hwlat_detector.c
---- linux-4.1.13.orig/drivers/misc/hwlat_detector.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/drivers/misc/hwlat_detector.c 2015-11-29 09:23:09.561617368 +0100
-@@ -0,0 +1,1240 @@
-+/*
-+ * hwlat_detector.c - A simple Hardware Latency detector.
-+ *
-+ * Use this module to detect large system latencies induced by the behavior of
-+ * certain underlying system hardware or firmware, independent of Linux itself.
-+ * The code was developed originally to detect the presence of SMIs on Intel
-+ * and AMD systems, although there is no dependency upon x86 herein.
-+ *
-+ * The classical example usage of this module is in detecting the presence of
-+ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
-+ * somewhat special form of hardware interrupt spawned from earlier CPU debug
-+ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
-+ * LPC (or other device) to generate a special interrupt under certain
-+ * circumstances, for example, upon expiration of a special SMI timer device,
-+ * due to certain external thermal readings, on certain I/O address accesses,
-+ * and other situations. An SMI hits a special CPU pin, triggers a special
-+ * SMI mode (complete with special memory map), and the OS is unaware.
-+ *
-+ * Although certain hardware-inducing latencies are necessary (for example,
-+ * a modern system often requires an SMI handler for correct thermal control
-+ * and remote management) they can wreak havoc upon any OS-level performance
-+ * guarantees toward low-latency, especially when the OS is not even made
-+ * aware of the presence of these interrupts. For this reason, we need a
-+ * somewhat brute force mechanism to detect these interrupts. In this case,
-+ * we do it by hogging all of the CPU(s) for configurable timer intervals,
-+ * sampling the built-in CPU timer, looking for discontiguous readings.
-+ *
-+ * WARNING: This implementation necessarily introduces latencies. Therefore,
-+ * you should NEVER use this module in a production environment
-+ * requiring any kind of low-latency performance guarantee(s).
-+ *
-+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
-+ *
-+ * Includes useful feedback from Clark Williams <clark@redhat.com>
-+ *
-+ * This file is licensed under the terms of the GNU General Public
-+ * License version 2. This program is licensed "as is" without any
-+ * warranty of any kind, whether express or implied.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/ring_buffer.h>
-+#include <linux/time.h>
-+#include <linux/hrtimer.h>
-+#include <linux/kthread.h>
-+#include <linux/debugfs.h>
-+#include <linux/seq_file.h>
-+#include <linux/uaccess.h>
-+#include <linux/version.h>
-+#include <linux/delay.h>
-+#include <linux/slab.h>
-+#include <linux/trace_clock.h>
-+
-+#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */
-+#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */
-+#define U64STR_SIZE 22 /* 20 digits max */
-+
-+#define VERSION "1.0.0"
-+#define BANNER "hwlat_detector: "
-+#define DRVNAME "hwlat_detector"
-+#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */
-+#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
-+#define DEFAULT_LAT_THRESHOLD 10 /* 10us */
-+
-+/* Module metadata */
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jon Masters <jcm@redhat.com>");
-+MODULE_DESCRIPTION("A simple hardware latency detector");
-+MODULE_VERSION(VERSION);
-+
-+/* Module parameters */
-+
-+static int debug;
-+static int enabled;
-+static int threshold;
-+
-+module_param(debug, int, 0); /* enable debug */
-+module_param(enabled, int, 0); /* enable detector */
-+module_param(threshold, int, 0); /* latency threshold */
-+
-+/* Buffering and sampling */
-+
-+static struct ring_buffer *ring_buffer; /* sample buffer */
-+static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */
-+static unsigned long buf_size = BUF_SIZE_DEFAULT;
-+static struct task_struct *kthread; /* sampling thread */
-+
-+/* DebugFS filesystem entries */
-+
-+static struct dentry *debug_dir; /* debugfs directory */
-+static struct dentry *debug_max; /* maximum TSC delta */
-+static struct dentry *debug_count; /* total detect count */
-+static struct dentry *debug_sample_width; /* sample width us */
-+static struct dentry *debug_sample_window; /* sample window us */
-+static struct dentry *debug_sample; /* raw samples us */
-+static struct dentry *debug_threshold; /* threshold us */
-+static struct dentry *debug_enable; /* enable/disable */
-+
-+/* Individual samples and global state */
-+
-+struct sample; /* latency sample */
-+struct data; /* Global state */
-+
-+/* Sampling functions */
-+static int __buffer_add_sample(struct sample *sample);
-+static struct sample *buffer_get_sample(struct sample *sample);
-+
-+/* Threading and state */
-+static int kthread_fn(void *unused);
-+static int start_kthread(void);
-+static int stop_kthread(void);
-+static void __reset_stats(void);
-+static int init_stats(void);
-+
-+/* Debugfs interface */
-+static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
-+ size_t cnt, loff_t *ppos, const u64 *entry);
-+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
-+ size_t cnt, loff_t *ppos, u64 *entry);
-+static int debug_sample_fopen(struct inode *inode, struct file *filp);
-+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
-+ size_t cnt, loff_t *ppos);
-+static int debug_sample_release(struct inode *inode, struct file *filp);
-+static int debug_enable_fopen(struct inode *inode, struct file *filp);
-+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
-+ size_t cnt, loff_t *ppos);
-+static ssize_t debug_enable_fwrite(struct file *file,
-+ const char __user *user_buffer,
-+ size_t user_size, loff_t *offset);
-+
-+/* Initialization functions */
-+static int init_debugfs(void);
-+static void free_debugfs(void);
-+static int detector_init(void);
-+static void detector_exit(void);
-+
-+/* Individual latency samples are stored here when detected and packed into
-+ * the ring_buffer circular buffer, where they are overwritten when
-+ * more than buf_size/sizeof(sample) samples are received. */
-+struct sample {
-+ u64 seqnum; /* unique sequence */
-+ u64 duration; /* ktime delta */
-+ u64 outer_duration; /* ktime delta (outer loop) */
-+ struct timespec timestamp; /* wall time */
-+ unsigned long lost;
-+};
-+
-+/* keep the global state somewhere. */
-+static struct data {
-+
-+ struct mutex lock; /* protect changes */
-+
-+ u64 count; /* total since reset */
-+ u64 max_sample; /* max hardware latency */
-+ u64 threshold; /* sample threshold level */
-+
-+ u64 sample_window; /* total sampling window (on+off) */
-+ u64 sample_width; /* active sampling portion of window */
-+
-+ atomic_t sample_open; /* whether the sample file is open */
-+
-+ wait_queue_head_t wq; /* waitqeue for new sample values */
-+
-+} data;
-+
-+/**
-+ * __buffer_add_sample - add a new latency sample recording to the ring buffer
-+ * @sample: The new latency sample value
-+ *
-+ * This receives a new latency sample and records it in a global ring buffer.
-+ * No additional locking is used in this case.
-+ */
-+static int __buffer_add_sample(struct sample *sample)
-+{
-+ return ring_buffer_write(ring_buffer,
-+ sizeof(struct sample), sample);
-+}
-+
-+/**
-+ * buffer_get_sample - remove a hardware latency sample from the ring buffer
-+ * @sample: Pre-allocated storage for the sample
-+ *
-+ * This retrieves a hardware latency sample from the global circular buffer
-+ */
-+static struct sample *buffer_get_sample(struct sample *sample)
-+{
-+ struct ring_buffer_event *e = NULL;
-+ struct sample *s = NULL;
-+ unsigned int cpu = 0;
-+
-+ if (!sample)
-+ return NULL;
-+
-+ mutex_lock(&ring_buffer_mutex);
-+ for_each_online_cpu(cpu) {
-+ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost);
-+ if (e)
-+ break;
-+ }
-+
-+ if (e) {
-+ s = ring_buffer_event_data(e);
-+ memcpy(sample, s, sizeof(struct sample));
-+ } else
-+ sample = NULL;
-+ mutex_unlock(&ring_buffer_mutex);
-+
-+ return sample;
-+}
-+
-+#ifndef CONFIG_TRACING
-+#define time_type ktime_t
-+#define time_get() ktime_get()
-+#define time_to_us(x) ktime_to_us(x)
-+#define time_sub(a, b) ktime_sub(a, b)
-+#define init_time(a, b) (a).tv64 = b
-+#define time_u64(a) ((a).tv64)
-+#else
-+#define time_type u64
-+#define time_get() trace_clock_local()
-+#define time_to_us(x) div_u64(x, 1000)
-+#define time_sub(a, b) ((a) - (b))
-+#define init_time(a, b) (a = b)
-+#define time_u64(a) a
-+#endif
-+/**
-+ * get_sample - sample the CPU TSC and look for likely hardware latencies
-+ *
-+ * Used to repeatedly capture the CPU TSC (or similar), looking for potential
-+ * hardware-induced latency. Called with interrupts disabled and with
-+ * data.lock held.
-+ */
-+static int get_sample(void)
-+{
-+ time_type start, t1, t2, last_t2;
-+ s64 diff, total = 0;
-+ u64 sample = 0;
-+ u64 outer_sample = 0;
-+ int ret = -1;
-+
-+ init_time(last_t2, 0);
-+ start = time_get(); /* start timestamp */
-+
-+ do {
-+
-+ t1 = time_get(); /* we'll look for a discontinuity */
-+ t2 = time_get();
-+
-+ if (time_u64(last_t2)) {
-+ /* Check the delta from outer loop (t2 to next t1) */
-+ diff = time_to_us(time_sub(t1, last_t2));
-+ /* This shouldn't happen */
-+ if (diff < 0) {
-+ pr_err(BANNER "time running backwards\n");
-+ goto out;
-+ }
-+ if (diff > outer_sample)
-+ outer_sample = diff;
-+ }
-+ last_t2 = t2;
-+
-+ total = time_to_us(time_sub(t2, start)); /* sample width */
-+
-+ /* This checks the inner loop (t1 to t2) */
-+ diff = time_to_us(time_sub(t2, t1)); /* current diff */
-+
-+ /* This shouldn't happen */
-+ if (diff < 0) {
-+ pr_err(BANNER "time running backwards\n");
-+ goto out;
-+ }
-+
-+ if (diff > sample)
-+ sample = diff; /* only want highest value */
-+
-+ } while (total <= data.sample_width);
-+
-+ ret = 0;
-+
-+ /* If we exceed the threshold value, we have found a hardware latency */
-+ if (sample > data.threshold || outer_sample > data.threshold) {
-+ struct sample s;
-+
-+ ret = 1;
-+
-+ data.count++;
-+ s.seqnum = data.count;
-+ s.duration = sample;
-+ s.outer_duration = outer_sample;
-+ s.timestamp = CURRENT_TIME;
-+ __buffer_add_sample(&s);
-+
-+ /* Keep a running maximum ever recorded hardware latency */
-+ if (sample > data.max_sample)
-+ data.max_sample = sample;
-+ }
-+
-+out:
-+ return ret;
-+}
-+
-+/*
-+ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
-+ * @unused: A required part of the kthread API.
-+ *
-+ * Used to periodically sample the CPU TSC via a call to get_sample. We
-+ * disable interrupts, which does (intentionally) introduce latency since we
-+ * need to ensure nothing else might be running (and thus pre-empting).
-+ * Obviously this should never be used in production environments.
-+ *
-+ * Currently this runs on which ever CPU it was scheduled on, but most
-+ * real-worald hardware latency situations occur across several CPUs,
-+ * but we might later generalize this if we find there are any actualy
-+ * systems with alternate SMI delivery or other hardware latencies.
-+ */
-+static int kthread_fn(void *unused)
-+{
-+ int ret;
-+ u64 interval;
-+
-+ while (!kthread_should_stop()) {
-+
-+ mutex_lock(&data.lock);
-+
-+ local_irq_disable();
-+ ret = get_sample();
-+ local_irq_enable();
-+
-+ if (ret > 0)
-+ wake_up(&data.wq); /* wake up reader(s) */
-+
-+ interval = data.sample_window - data.sample_width;
-+ do_div(interval, USEC_PER_MSEC); /* modifies interval value */
-+
-+ mutex_unlock(&data.lock);
-+
-+ if (msleep_interruptible(interval))
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * start_kthread - Kick off the hardware latency sampling/detector kthread
-+ *
-+ * This starts a kernel thread that will sit and sample the CPU timestamp
-+ * counter (TSC or similar) and look for potential hardware latencies.
-+ */
-+static int start_kthread(void)
-+{
-+ kthread = kthread_run(kthread_fn, NULL,
-+ DRVNAME);
-+ if (IS_ERR(kthread)) {
-+ pr_err(BANNER "could not start sampling thread\n");
-+ enabled = 0;
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * stop_kthread - Inform the hardware latency samping/detector kthread to stop
-+ *
-+ * This kicks the running hardware latency sampling/detector kernel thread and
-+ * tells it to stop sampling now. Use this on unload and at system shutdown.
-+ */
-+static int stop_kthread(void)
-+{
-+ int ret;
-+
-+ ret = kthread_stop(kthread);
-+
-+ return ret;
-+}
-+
-+/**
-+ * __reset_stats - Reset statistics for the hardware latency detector
-+ *
-+ * We use data to store various statistics and global state. We call this
-+ * function in order to reset those when "enable" is toggled on or off, and
-+ * also at initialization. Should be called with data.lock held.
-+ */
-+static void __reset_stats(void)
-+{
-+ data.count = 0;
-+ data.max_sample = 0;
-+ ring_buffer_reset(ring_buffer); /* flush out old sample entries */
-+}
-+
-+/**
-+ * init_stats - Setup global state statistics for the hardware latency detector
-+ *
-+ * We use data to store various statistics and global state. We also use
-+ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware
-+ * induced system latencies. This function initializes these structures and
-+ * allocates the global ring buffer also.
-+ */
-+static int init_stats(void)
-+{
-+ int ret = -ENOMEM;
-+
-+ mutex_init(&data.lock);
-+ init_waitqueue_head(&data.wq);
-+ atomic_set(&data.sample_open, 0);
-+
-+ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS);
-+
-+ if (WARN(!ring_buffer, KERN_ERR BANNER
-+ "failed to allocate ring buffer!\n"))
-+ goto out;
-+
-+ __reset_stats();
-+ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */
-+ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
-+ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */
-+
-+ ret = 0;
-+
-+out:
-+ return ret;
-+
-+}
-+
-+/*
-+ * simple_data_read - Wrapper read function for global state debugfs entries
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ * @entry: The entry to read from
-+ *
-+ * This function provides a generic read implementation for the global state
-+ * "data" structure debugfs filesystem entries. It would be nice to use
-+ * simple_attr_read directly, but we need to make sure that the data.lock
-+ * is held during the actual read.
-+ */
-+static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
-+ size_t cnt, loff_t *ppos, const u64 *entry)
-+{
-+ char buf[U64STR_SIZE];
-+ u64 val = 0;
-+ int len = 0;
-+
-+ memset(buf, 0, sizeof(buf));
-+
-+ if (!entry)
-+ return -EFAULT;
-+
-+ mutex_lock(&data.lock);
-+ val = *entry;
-+ mutex_unlock(&data.lock);
-+
-+ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val);
-+
-+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
-+
-+}
-+
-+/*
-+ * simple_data_write - Wrapper write function for global state debugfs entries
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to write value from
-+ * @cnt: The maximum number of bytes to write
-+ * @ppos: The current "file" position
-+ * @entry: The entry to write to
-+ *
-+ * This function provides a generic write implementation for the global state
-+ * "data" structure debugfs filesystem entries. It would be nice to use
-+ * simple_attr_write directly, but we need to make sure that the data.lock
-+ * is held during the actual write.
-+ */
-+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
-+ size_t cnt, loff_t *ppos, u64 *entry)
-+{
-+ char buf[U64STR_SIZE];
-+ int csize = min(cnt, sizeof(buf));
-+ u64 val = 0;
-+ int err = 0;
-+
-+ memset(buf, '\0', sizeof(buf));
-+ if (copy_from_user(buf, ubuf, csize))
-+ return -EFAULT;
-+
-+ buf[U64STR_SIZE-1] = '\0'; /* just in case */
-+ err = kstrtoull(buf, 10, &val);
-+ if (err)
-+ return -EINVAL;
-+
-+ mutex_lock(&data.lock);
-+ *entry = val;
-+ mutex_unlock(&data.lock);
-+
-+ return csize;
-+}
-+
-+/**
-+ * debug_count_fopen - Open function for "count" debugfs entry
-+ * @inode: The in-kernel inode representation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function provides an open implementation for the "count" debugfs
-+ * interface to the hardware latency detector.
-+ */
-+static int debug_count_fopen(struct inode *inode, struct file *filp)
-+{
-+ return 0;
-+}
-+
-+/**
-+ * debug_count_fread - Read function for "count" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ *
-+ * This function provides a read implementation for the "count" debugfs
-+ * interface to the hardware latency detector. Can be used to read the
-+ * number of latency readings exceeding the configured threshold since
-+ * the detector was last reset (e.g. by writing a zero into "count").
-+ */
-+static ssize_t debug_count_fread(struct file *filp, char __user *ubuf,
-+ size_t cnt, loff_t *ppos)
-+{
-+ return simple_data_read(filp, ubuf, cnt, ppos, &data.count);
-+}
-+
-+/**
-+ * debug_count_fwrite - Write function for "count" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that contains the value to write
-+ * @cnt: The maximum number of bytes to write to "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function provides a write implementation for the "count" debugfs
-+ * interface to the hardware latency detector. Can be used to write a
-+ * desired value, especially to zero the total count.
-+ */
-+static ssize_t debug_count_fwrite(struct file *filp,
-+ const char __user *ubuf,
-+ size_t cnt,
-+ loff_t *ppos)
-+{
-+ return simple_data_write(filp, ubuf, cnt, ppos, &data.count);
-+}
-+
-+/**
-+ * debug_enable_fopen - Dummy open function for "enable" debugfs interface
-+ * @inode: The in-kernel inode representation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function provides an open implementation for the "enable" debugfs
-+ * interface to the hardware latency detector.
-+ */
-+static int debug_enable_fopen(struct inode *inode, struct file *filp)
-+{
-+ return 0;
-+}
-+
-+/**
-+ * debug_enable_fread - Read function for "enable" debugfs interface
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ *
-+ * This function provides a read implementation for the "enable" debugfs
-+ * interface to the hardware latency detector. Can be used to determine
-+ * whether the detector is currently enabled ("0\n" or "1\n" returned).
-+ */
-+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
-+ size_t cnt, loff_t *ppos)
-+{
-+ char buf[4];
-+
-+ if ((cnt < sizeof(buf)) || (*ppos))
-+ return 0;
-+
-+ buf[0] = enabled ? '1' : '0';
-+ buf[1] = '\n';
-+ buf[2] = '\0';
-+ if (copy_to_user(ubuf, buf, strlen(buf)))
-+ return -EFAULT;
-+ return *ppos = strlen(buf);
-+}
-+
-+/**
-+ * debug_enable_fwrite - Write function for "enable" debugfs interface
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that contains the value to write
-+ * @cnt: The maximum number of bytes to write to "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function provides a write implementation for the "enable" debugfs
-+ * interface to the hardware latency detector. Can be used to enable or
-+ * disable the detector, which will have the side-effect of possibly
-+ * also resetting the global stats and kicking off the measuring
-+ * kthread (on an enable) or the converse (upon a disable).
-+ */
-+static ssize_t debug_enable_fwrite(struct file *filp,
-+ const char __user *ubuf,
-+ size_t cnt,
-+ loff_t *ppos)
-+{
-+ char buf[4];
-+ int csize = min(cnt, sizeof(buf));
-+ long val = 0;
-+ int err = 0;
-+
-+ memset(buf, '\0', sizeof(buf));
-+ if (copy_from_user(buf, ubuf, csize))
-+ return -EFAULT;
-+
-+ buf[sizeof(buf)-1] = '\0'; /* just in case */
-+ err = kstrtoul(buf, 10, &val);
-+ if (0 != err)
-+ return -EINVAL;
-+
-+ if (val) {
-+ if (enabled)
-+ goto unlock;
-+ enabled = 1;
-+ __reset_stats();
-+ if (start_kthread())
-+ return -EFAULT;
-+ } else {
-+ if (!enabled)
-+ goto unlock;
-+ enabled = 0;
-+ err = stop_kthread();
-+ if (err) {
-+ pr_err(BANNER "cannot stop kthread\n");
-+ return -EFAULT;
-+ }
-+ wake_up(&data.wq); /* reader(s) should return */
-+ }
-+unlock:
-+ return csize;
-+}
-+
-+/**
-+ * debug_max_fopen - Open function for "max" debugfs entry
-+ * @inode: The in-kernel inode representation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function provides an open implementation for the "max" debugfs
-+ * interface to the hardware latency detector.
-+ */
-+static int debug_max_fopen(struct inode *inode, struct file *filp)
-+{
-+ return 0;
-+}
-+
-+/**
-+ * debug_max_fread - Read function for "max" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ *
-+ * This function provides a read implementation for the "max" debugfs
-+ * interface to the hardware latency detector. Can be used to determine
-+ * the maximum latency value observed since it was last reset.
-+ */
-+static ssize_t debug_max_fread(struct file *filp, char __user *ubuf,
-+ size_t cnt, loff_t *ppos)
-+{
-+ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample);
-+}
-+
-+/**
-+ * debug_max_fwrite - Write function for "max" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that contains the value to write
-+ * @cnt: The maximum number of bytes to write to "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function provides a write implementation for the "max" debugfs
-+ * interface to the hardware latency detector. Can be used to reset the
-+ * maximum or set it to some other desired value - if, then, subsequent
-+ * measurements exceed this value, the maximum will be updated.
-+ */
-+static ssize_t debug_max_fwrite(struct file *filp,
-+ const char __user *ubuf,
-+ size_t cnt,
-+ loff_t *ppos)
-+{
-+ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample);
-+}
-+
-+
-+/**
-+ * debug_sample_fopen - An open function for "sample" debugfs interface
-+ * @inode: The in-kernel inode representation of this debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function handles opening the "sample" file within the hardware
-+ * latency detector debugfs directory interface. This file is used to read
-+ * raw samples from the global ring_buffer and allows the user to see a
-+ * running latency history. Can be opened blocking or non-blocking,
-+ * affecting whether it behaves as a buffer read pipe, or does not.
-+ * Implements simple locking to prevent multiple simultaneous use.
-+ */
-+static int debug_sample_fopen(struct inode *inode, struct file *filp)
-+{
-+ if (!atomic_add_unless(&data.sample_open, 1, 1))
-+ return -EBUSY;
-+ else
-+ return 0;
-+}
-+
-+/**
-+ * debug_sample_fread - A read function for "sample" debugfs interface
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that will contain the samples read
-+ * @cnt: The maximum bytes to read from the debugfs "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function handles reading from the "sample" file within the hardware
-+ * latency detector debugfs directory interface. This file is used to read
-+ * raw samples from the global ring_buffer and allows the user to see a
-+ * running latency history. By default this will block pending a new
-+ * value written into the sample buffer, unless there are already a
-+ * number of value(s) waiting in the buffer, or the sample file was
-+ * previously opened in a non-blocking mode of operation.
-+ */
-+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
-+ size_t cnt, loff_t *ppos)
-+{
-+ int len = 0;
-+ char buf[64];
-+ struct sample *sample = NULL;
-+
-+ if (!enabled)
-+ return 0;
-+
-+ sample = kzalloc(sizeof(struct sample), GFP_KERNEL);
-+ if (!sample)
-+ return -ENOMEM;
-+
-+ while (!buffer_get_sample(sample)) {
-+
-+ DEFINE_WAIT(wait);
-+
-+ if (filp->f_flags & O_NONBLOCK) {
-+ len = -EAGAIN;
-+ goto out;
-+ }
-+
-+ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE);
-+ schedule();
-+ finish_wait(&data.wq, &wait);
-+
-+ if (signal_pending(current)) {
-+ len = -EINTR;
-+ goto out;
-+ }
-+
-+ if (!enabled) { /* enable was toggled */
-+ len = 0;
-+ goto out;
-+ }
-+ }
-+
-+ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n",
-+ sample->timestamp.tv_sec,
-+ sample->timestamp.tv_nsec,
-+ sample->duration,
-+ sample->outer_duration);
-+
-+
-+ /* handling partial reads is more trouble than it's worth */
-+ if (len > cnt)
-+ goto out;
-+
-+ if (copy_to_user(ubuf, buf, len))
-+ len = -EFAULT;
-+
-+out:
-+ kfree(sample);
-+ return len;
-+}
-+
-+/**
-+ * debug_sample_release - Release function for "sample" debugfs interface
-+ * @inode: The in-kernel inode represenation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function completes the close of the debugfs interface "sample" file.
-+ * Frees the sample_open "lock" so that other users may open the interface.
-+ */
-+static int debug_sample_release(struct inode *inode, struct file *filp)
-+{
-+ atomic_dec(&data.sample_open);
-+
-+ return 0;
-+}
-+
-+/**
-+ * debug_threshold_fopen - Open function for "threshold" debugfs entry
-+ * @inode: The in-kernel inode representation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function provides an open implementation for the "threshold" debugfs
-+ * interface to the hardware latency detector.
-+ */
-+static int debug_threshold_fopen(struct inode *inode, struct file *filp)
-+{
-+ return 0;
-+}
-+
-+/**
-+ * debug_threshold_fread - Read function for "threshold" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ *
-+ * This function provides a read implementation for the "threshold" debugfs
-+ * interface to the hardware latency detector. It can be used to determine
-+ * the current threshold level at which a latency will be recorded in the
-+ * global ring buffer, typically on the order of 10us.
-+ */
-+static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf,
-+ size_t cnt, loff_t *ppos)
-+{
-+ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold);
-+}
-+
-+/**
-+ * debug_threshold_fwrite - Write function for "threshold" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that contains the value to write
-+ * @cnt: The maximum number of bytes to write to "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function provides a write implementation for the "threshold" debugfs
-+ * interface to the hardware latency detector. It can be used to configure
-+ * the threshold level at which any subsequently detected latencies will
-+ * be recorded into the global ring buffer.
-+ */
-+static ssize_t debug_threshold_fwrite(struct file *filp,
-+ const char __user *ubuf,
-+ size_t cnt,
-+ loff_t *ppos)
-+{
-+ int ret;
-+
-+ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold);
-+
-+ if (enabled)
-+ wake_up_process(kthread);
-+
-+ return ret;
-+}
-+
-+/**
-+ * debug_width_fopen - Open function for "width" debugfs entry
-+ * @inode: The in-kernel inode representation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function provides an open implementation for the "width" debugfs
-+ * interface to the hardware latency detector.
-+ */
-+static int debug_width_fopen(struct inode *inode, struct file *filp)
-+{
-+ return 0;
-+}
-+
-+/**
-+ * debug_width_fread - Read function for "width" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ *
-+ * This function provides a read implementation for the "width" debugfs
-+ * interface to the hardware latency detector. It can be used to determine
-+ * for how many us of the total window us we will actively sample for any
-+ * hardware-induced latecy periods. Obviously, it is not possible to
-+ * sample constantly and have the system respond to a sample reader, or,
-+ * worse, without having the system appear to have gone out to lunch.
-+ */
-+static ssize_t debug_width_fread(struct file *filp, char __user *ubuf,
-+ size_t cnt, loff_t *ppos)
-+{
-+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width);
-+}
-+
-+/**
-+ * debug_width_fwrite - Write function for "width" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that contains the value to write
-+ * @cnt: The maximum number of bytes to write to "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function provides a write implementation for the "width" debugfs
-+ * interface to the hardware latency detector. It can be used to configure
-+ * for how many us of the total window us we will actively sample for any
-+ * hardware-induced latency periods. Obviously, it is not possible to
-+ * sample constantly and have the system respond to a sample reader, or,
-+ * worse, without having the system appear to have gone out to lunch. It
-+ * is enforced that width is less that the total window size.
-+ */
-+static ssize_t debug_width_fwrite(struct file *filp,
-+ const char __user *ubuf,
-+ size_t cnt,
-+ loff_t *ppos)
-+{
-+ char buf[U64STR_SIZE];
-+ int csize = min(cnt, sizeof(buf));
-+ u64 val = 0;
-+ int err = 0;
-+
-+ memset(buf, '\0', sizeof(buf));
-+ if (copy_from_user(buf, ubuf, csize))
-+ return -EFAULT;
-+
-+ buf[U64STR_SIZE-1] = '\0'; /* just in case */
-+ err = kstrtoull(buf, 10, &val);
-+ if (0 != err)
-+ return -EINVAL;
-+
-+ mutex_lock(&data.lock);
-+ if (val < data.sample_window)
-+ data.sample_width = val;
-+ else {
-+ mutex_unlock(&data.lock);
-+ return -EINVAL;
-+ }
-+ mutex_unlock(&data.lock);
-+
-+ if (enabled)
-+ wake_up_process(kthread);
-+
-+ return csize;
-+}
-+
-+/**
-+ * debug_window_fopen - Open function for "window" debugfs entry
-+ * @inode: The in-kernel inode representation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function provides an open implementation for the "window" debugfs
-+ * interface to the hardware latency detector. The window is the total time
-+ * in us that will be considered one sample period. Conceptually, windows
-+ * occur back-to-back and contain a sample width period during which
-+ * actual sampling occurs.
-+ */
-+static int debug_window_fopen(struct inode *inode, struct file *filp)
-+{
-+ return 0;
-+}
-+
-+/**
-+ * debug_window_fread - Read function for "window" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ *
-+ * This function provides a read implementation for the "window" debugfs
-+ * interface to the hardware latency detector. The window is the total time
-+ * in us that will be considered one sample period. Conceptually, windows
-+ * occur back-to-back and contain a sample width period during which
-+ * actual sampling occurs. Can be used to read the total window size.
-+ */
-+static ssize_t debug_window_fread(struct file *filp, char __user *ubuf,
-+ size_t cnt, loff_t *ppos)
-+{
-+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window);
-+}
-+
-+/**
-+ * debug_window_fwrite - Write function for "window" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that contains the value to write
-+ * @cnt: The maximum number of bytes to write to "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function provides a write implementation for the "window" debufds
-+ * interface to the hardware latency detetector. The window is the total time
-+ * in us that will be considered one sample period. Conceptually, windows
-+ * occur back-to-back and contain a sample width period during which
-+ * actual sampling occurs. Can be used to write a new total window size. It
-+ * is enfoced that any value written must be greater than the sample width
-+ * size, or an error results.
-+ */
-+static ssize_t debug_window_fwrite(struct file *filp,
-+ const char __user *ubuf,
-+ size_t cnt,
-+ loff_t *ppos)
-+{
-+ char buf[U64STR_SIZE];
-+ int csize = min(cnt, sizeof(buf));
-+ u64 val = 0;
-+ int err = 0;
-+
-+ memset(buf, '\0', sizeof(buf));
-+ if (copy_from_user(buf, ubuf, csize))
-+ return -EFAULT;
-+
-+ buf[U64STR_SIZE-1] = '\0'; /* just in case */
-+ err = kstrtoull(buf, 10, &val);
-+ if (0 != err)
-+ return -EINVAL;
-+
-+ mutex_lock(&data.lock);
-+ if (data.sample_width < val)
-+ data.sample_window = val;
-+ else {
-+ mutex_unlock(&data.lock);
-+ return -EINVAL;
-+ }
-+ mutex_unlock(&data.lock);
-+
-+ return csize;
-+}
-+
-+/*
-+ * Function pointers for the "count" debugfs file operations
-+ */
-+static const struct file_operations count_fops = {
-+ .open = debug_count_fopen,
-+ .read = debug_count_fread,
-+ .write = debug_count_fwrite,
-+ .owner = THIS_MODULE,
-+};
-+
-+/*
-+ * Function pointers for the "enable" debugfs file operations
-+ */
-+static const struct file_operations enable_fops = {
-+ .open = debug_enable_fopen,
-+ .read = debug_enable_fread,
-+ .write = debug_enable_fwrite,
-+ .owner = THIS_MODULE,
-+};
-+
-+/*
-+ * Function pointers for the "max" debugfs file operations
-+ */
-+static const struct file_operations max_fops = {
-+ .open = debug_max_fopen,
-+ .read = debug_max_fread,
-+ .write = debug_max_fwrite,
-+ .owner = THIS_MODULE,
-+};
-+
-+/*
-+ * Function pointers for the "sample" debugfs file operations
-+ */
-+static const struct file_operations sample_fops = {
-+ .open = debug_sample_fopen,
-+ .read = debug_sample_fread,
-+ .release = debug_sample_release,
-+ .owner = THIS_MODULE,
-+};
-+
-+/*
-+ * Function pointers for the "threshold" debugfs file operations
-+ */
-+static const struct file_operations threshold_fops = {
-+ .open = debug_threshold_fopen,
-+ .read = debug_threshold_fread,
-+ .write = debug_threshold_fwrite,
-+ .owner = THIS_MODULE,
-+};
-+
-+/*
-+ * Function pointers for the "width" debugfs file operations
-+ */
-+static const struct file_operations width_fops = {
-+ .open = debug_width_fopen,
-+ .read = debug_width_fread,
-+ .write = debug_width_fwrite,
-+ .owner = THIS_MODULE,
-+};
-+
-+/*
-+ * Function pointers for the "window" debugfs file operations
-+ */
-+static const struct file_operations window_fops = {
-+ .open = debug_window_fopen,
-+ .read = debug_window_fread,
-+ .write = debug_window_fwrite,
-+ .owner = THIS_MODULE,
-+};
-+
-+/**
-+ * init_debugfs - A function to initialize the debugfs interface files
-+ *
-+ * This function creates entries in debugfs for "hwlat_detector", including
-+ * files to read values from the detector, current samples, and the
-+ * maximum sample that has been captured since the hardware latency
-+ * dectector was started.
-+ */
-+static int init_debugfs(void)
-+{
-+ int ret = -ENOMEM;
-+
-+ debug_dir = debugfs_create_dir(DRVNAME, NULL);
-+ if (!debug_dir)
-+ goto err_debug_dir;
-+
-+ debug_sample = debugfs_create_file("sample", 0444,
-+ debug_dir, NULL,
-+ &sample_fops);
-+ if (!debug_sample)
-+ goto err_sample;
-+
-+ debug_count = debugfs_create_file("count", 0444,
-+ debug_dir, NULL,
-+ &count_fops);
-+ if (!debug_count)
-+ goto err_count;
-+
-+ debug_max = debugfs_create_file("max", 0444,
-+ debug_dir, NULL,
-+ &max_fops);
-+ if (!debug_max)
-+ goto err_max;
-+
-+ debug_sample_window = debugfs_create_file("window", 0644,
-+ debug_dir, NULL,
-+ &window_fops);
-+ if (!debug_sample_window)
-+ goto err_window;
-+
-+ debug_sample_width = debugfs_create_file("width", 0644,
-+ debug_dir, NULL,
-+ &width_fops);
-+ if (!debug_sample_width)
-+ goto err_width;
-+
-+ debug_threshold = debugfs_create_file("threshold", 0644,
-+ debug_dir, NULL,
-+ &threshold_fops);
-+ if (!debug_threshold)
-+ goto err_threshold;
-+
-+ debug_enable = debugfs_create_file("enable", 0644,
-+ debug_dir, &enabled,
-+ &enable_fops);
-+ if (!debug_enable)
-+ goto err_enable;
-+
-+ else {
-+ ret = 0;
-+ goto out;
-+ }
-+
-+err_enable:
-+ debugfs_remove(debug_threshold);
-+err_threshold:
-+ debugfs_remove(debug_sample_width);
-+err_width:
-+ debugfs_remove(debug_sample_window);
-+err_window:
-+ debugfs_remove(debug_max);
-+err_max:
-+ debugfs_remove(debug_count);
-+err_count:
-+ debugfs_remove(debug_sample);
-+err_sample:
-+ debugfs_remove(debug_dir);
-+err_debug_dir:
-+out:
-+ return ret;
-+}
-+
-+/**
-+ * free_debugfs - A function to cleanup the debugfs file interface
-+ */
-+static void free_debugfs(void)
-+{
-+ /* could also use a debugfs_remove_recursive */
-+ debugfs_remove(debug_enable);
-+ debugfs_remove(debug_threshold);
-+ debugfs_remove(debug_sample_width);
-+ debugfs_remove(debug_sample_window);
-+ debugfs_remove(debug_max);
-+ debugfs_remove(debug_count);
-+ debugfs_remove(debug_sample);
-+ debugfs_remove(debug_dir);
-+}
-+
-+/**
-+ * detector_init - Standard module initialization code
-+ */
-+static int detector_init(void)
-+{
-+ int ret = -ENOMEM;
-+
-+ pr_info(BANNER "version %s\n", VERSION);
-+
-+ ret = init_stats();
-+ if (0 != ret)
-+ goto out;
-+
-+ ret = init_debugfs();
-+ if (0 != ret)
-+ goto err_stats;
-+
-+ if (enabled)
-+ ret = start_kthread();
-+
-+ goto out;
-+
-+err_stats:
-+ ring_buffer_free(ring_buffer);
-+out:
-+ return ret;
-+
-+}
-+
-+/**
-+ * detector_exit - Standard module cleanup code
-+ */
-+static void detector_exit(void)
-+{
-+ int err;
-+
-+ if (enabled) {
-+ enabled = 0;
-+ err = stop_kthread();
-+ if (err)
-+ pr_err(BANNER "cannot stop kthread\n");
-+ }
-+
-+ free_debugfs();
-+ ring_buffer_free(ring_buffer); /* free up the ring buffer */
-+
-+}
-+
-+module_init(detector_init);
-+module_exit(detector_exit);
-diff -Nur linux-4.1.13.orig/drivers/misc/Kconfig linux-4.1.13/drivers/misc/Kconfig
---- linux-4.1.13.orig/drivers/misc/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/misc/Kconfig 2015-11-29 09:23:09.561617368 +0100
-@@ -54,6 +54,7 @@
- config ATMEL_TCLIB
- bool "Atmel AT32/AT91 Timer/Counter Library"
- depends on (AVR32 || ARCH_AT91)
-+ default y if PREEMPT_RT_FULL
- help
- Select this if you want a library to allocate the Timer/Counter
- blocks found on many Atmel processors. This facilitates using
-@@ -69,8 +70,7 @@
- are combined to make a single 32-bit timer.
-
- When GENERIC_CLOCKEVENTS is defined, the third timer channel
-- may be used as a clock event device supporting oneshot mode
-- (delays of up to two seconds) based on the 32 KiHz clock.
-+ may be used as a clock event device supporting oneshot mode.
-
- config ATMEL_TCB_CLKSRC_BLOCK
- int
-@@ -84,6 +84,15 @@
- TC can be used for other purposes, such as PWM generation and
- interval timing.
-
-+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
-+ bool "TC Block use 32 KiHz clock"
-+ depends on ATMEL_TCB_CLKSRC
-+ default y if !PREEMPT_RT_FULL
-+ help
-+ Select this to use 32 KiHz base clock rate as TC block clock
-+ source for clock events.
-+
-+
- config DUMMY_IRQ
- tristate "Dummy IRQ handler"
- default n
-@@ -113,6 +122,35 @@
- for information on the specific driver level and support statement
- for your IBM server.
-
-+config HWLAT_DETECTOR
-+ tristate "Testing module to detect hardware-induced latencies"
-+ depends on DEBUG_FS
-+ depends on RING_BUFFER
-+ default m
-+ ---help---
-+ A simple hardware latency detector. Use this module to detect
-+ large latencies introduced by the behavior of the underlying
-+ system firmware external to Linux. We do this using periodic
-+ use of stop_machine to grab all available CPUs and measure
-+ for unexplainable gaps in the CPU timestamp counter(s). By
-+ default, the module is not enabled until the "enable" file
-+ within the "hwlat_detector" debugfs directory is toggled.
-+
-+ This module is often used to detect SMI (System Management
-+ Interrupts) on x86 systems, though is not x86 specific. To
-+ this end, we default to using a sample window of 1 second,
-+ during which we will sample for 0.5 seconds. If an SMI or
-+ similar event occurs during that time, it is recorded
-+ into an 8K samples global ring buffer until retreived.
-+
-+ WARNING: This software should never be enabled (it can be built
-+ but should not be turned on after it is loaded) in a production
-+ environment where high latencies are a concern since the
-+ sampling mechanism actually introduces latencies for
-+ regular tasks while the CPU(s) are being held.
-+
-+ If unsure, say N
-+
- config PHANTOM
- tristate "Sensable PHANToM (PCI)"
- depends on PCI
-diff -Nur linux-4.1.13.orig/drivers/misc/Makefile linux-4.1.13/drivers/misc/Makefile
---- linux-4.1.13.orig/drivers/misc/Makefile 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/misc/Makefile 2015-11-29 09:23:09.561617368 +0100
-@@ -38,6 +38,7 @@
- obj-$(CONFIG_HMC6352) += hmc6352.o
- obj-y += eeprom/
- obj-y += cb710/
-+obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o
- obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
- obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
- obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
-diff -Nur linux-4.1.13.orig/drivers/mmc/host/mmci.c linux-4.1.13/drivers/mmc/host/mmci.c
---- linux-4.1.13.orig/drivers/mmc/host/mmci.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/mmc/host/mmci.c 2015-11-29 09:23:09.561617368 +0100
-@@ -1155,15 +1155,12 @@
- struct sg_mapping_iter *sg_miter = &host->sg_miter;
- struct variant_data *variant = host->variant;
- void __iomem *base = host->base;
-- unsigned long flags;
- u32 status;
-
- status = readl(base + MMCISTATUS);
-
- dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
-
-- local_irq_save(flags);
--
- do {
- unsigned int remain, len;
- char *buffer;
-@@ -1203,8 +1200,6 @@
-
- sg_miter_stop(sg_miter);
-
-- local_irq_restore(flags);
--
- /*
- * If we have less than the fifo 'half-full' threshold to transfer,
- * trigger a PIO interrupt as soon as any data is available.
-diff -Nur linux-4.1.13.orig/drivers/net/ethernet/3com/3c59x.c linux-4.1.13/drivers/net/ethernet/3com/3c59x.c
---- linux-4.1.13.orig/drivers/net/ethernet/3com/3c59x.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/net/ethernet/3com/3c59x.c 2015-11-29 09:23:09.561617368 +0100
-@@ -842,9 +842,9 @@
- {
- struct vortex_private *vp = netdev_priv(dev);
- unsigned long flags;
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- }
- #endif
-
-@@ -1916,12 +1916,12 @@
- * Block interrupts because vortex_interrupt does a bare spin_lock()
- */
- unsigned long flags;
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- if (vp->full_bus_master_tx)
- boomerang_interrupt(dev->irq, dev);
- else
- vortex_interrupt(dev->irq, dev);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- }
- }
-
-diff -Nur linux-4.1.13.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-4.1.13/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
---- linux-4.1.13.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-11-29 09:23:09.561617368 +0100
-@@ -2213,11 +2213,7 @@
- }
-
- tpd_req = atl1c_cal_tpd_req(skb);
-- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
-- if (netif_msg_pktdata(adapter))
-- dev_info(&adapter->pdev->dev, "tx locked\n");
-- return NETDEV_TX_LOCKED;
-- }
-+ spin_lock_irqsave(&adapter->tx_lock, flags);
-
- if (atl1c_tpd_avail(adapter, type) < tpd_req) {
- /* no enough descriptor, just stop queue */
-diff -Nur linux-4.1.13.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-4.1.13/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
---- linux-4.1.13.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-11-29 09:23:09.561617368 +0100
-@@ -1880,8 +1880,7 @@
- return NETDEV_TX_OK;
- }
- tpd_req = atl1e_cal_tdp_req(skb);
-- if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
-- return NETDEV_TX_LOCKED;
-+ spin_lock_irqsave(&adapter->tx_lock, flags);
-
- if (atl1e_tpd_avail(adapter) < tpd_req) {
- /* no enough descriptor, just stop queue */
-diff -Nur linux-4.1.13.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-4.1.13/drivers/net/ethernet/chelsio/cxgb/sge.c
---- linux-4.1.13.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-11-29 09:23:09.569616840 +0100
-@@ -1664,8 +1664,7 @@
- struct cmdQ *q = &sge->cmdQ[qid];
- unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
-
-- if (!spin_trylock(&q->lock))
-- return NETDEV_TX_LOCKED;
-+ spin_lock(&q->lock);
-
- reclaim_completed_tx(sge, q);
-
-diff -Nur linux-4.1.13.orig/drivers/net/ethernet/freescale/gianfar.c linux-4.1.13/drivers/net/ethernet/freescale/gianfar.c
---- linux-4.1.13.orig/drivers/net/ethernet/freescale/gianfar.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/net/ethernet/freescale/gianfar.c 2015-11-29 09:23:09.569616840 +0100
-@@ -1540,7 +1540,7 @@
-
- if (netif_running(ndev)) {
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- lock_tx_qs(priv);
-
- gfar_halt_nodisable(priv);
-@@ -1556,7 +1556,7 @@
- gfar_write(&regs->maccfg1, tempval);
-
- unlock_tx_qs(priv);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
-
- disable_napi(priv);
-
-@@ -1598,7 +1598,7 @@
- /* Disable Magic Packet mode, in case something
- * else woke us up.
- */
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- lock_tx_qs(priv);
-
- tempval = gfar_read(&regs->maccfg2);
-@@ -1608,7 +1608,7 @@
- gfar_start(priv);
-
- unlock_tx_qs(priv);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
-
- netif_device_attach(ndev);
-
-@@ -3418,14 +3418,14 @@
- dev->stats.tx_dropped++;
- atomic64_inc(&priv->extra_stats.tx_underrun);
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- lock_tx_qs(priv);
-
- /* Reactivate the Tx Queues */
- gfar_write(&regs->tstat, gfargrp->tstat);
-
- unlock_tx_qs(priv);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- }
- netif_dbg(priv, tx_err, dev, "Transmit Error\n");
- }
-diff -Nur linux-4.1.13.orig/drivers/net/ethernet/neterion/s2io.c linux-4.1.13/drivers/net/ethernet/neterion/s2io.c
---- linux-4.1.13.orig/drivers/net/ethernet/neterion/s2io.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/net/ethernet/neterion/s2io.c 2015-11-29 09:23:09.569616840 +0100
-@@ -4084,12 +4084,7 @@
- [skb->priority & (MAX_TX_FIFOS - 1)];
- fifo = &mac_control->fifos[queue];
-
-- if (do_spin_lock)
-- spin_lock_irqsave(&fifo->tx_lock, flags);
-- else {
-- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
-- return NETDEV_TX_LOCKED;
-- }
-+ spin_lock_irqsave(&fifo->tx_lock, flags);
-
- if (sp->config.multiq) {
- if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
-diff -Nur linux-4.1.13.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-4.1.13/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
---- linux-4.1.13.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-11-29 09:23:09.569616840 +0100
-@@ -2137,10 +2137,8 @@
- struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
- unsigned long flags;
-
-- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
-- /* Collision - tell upper layer to requeue */
-- return NETDEV_TX_LOCKED;
-- }
-+ spin_lock_irqsave(&tx_ring->tx_lock, flags);
-+
- if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
-diff -Nur linux-4.1.13.orig/drivers/net/ethernet/realtek/8139too.c linux-4.1.13/drivers/net/ethernet/realtek/8139too.c
---- linux-4.1.13.orig/drivers/net/ethernet/realtek/8139too.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/net/ethernet/realtek/8139too.c 2015-11-29 09:23:09.569616840 +0100
-@@ -2229,7 +2229,7 @@
- struct rtl8139_private *tp = netdev_priv(dev);
- const int irq = tp->pci_dev->irq;
-
-- disable_irq(irq);
-+ disable_irq_nosync(irq);
- rtl8139_interrupt(irq, dev);
- enable_irq(irq);
- }
-diff -Nur linux-4.1.13.orig/drivers/net/ethernet/tehuti/tehuti.c linux-4.1.13/drivers/net/ethernet/tehuti/tehuti.c
---- linux-4.1.13.orig/drivers/net/ethernet/tehuti/tehuti.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/net/ethernet/tehuti/tehuti.c 2015-11-29 09:23:09.569616840 +0100
-@@ -1629,13 +1629,8 @@
- unsigned long flags;
-
- ENTER;
-- local_irq_save(flags);
-- if (!spin_trylock(&priv->tx_lock)) {
-- local_irq_restore(flags);
-- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
-- BDX_DRV_NAME, ndev->name);
-- return NETDEV_TX_LOCKED;
-- }
-+
-+ spin_lock_irqsave(&priv->tx_lock, flags);
-
- /* build tx descriptor */
- BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
-diff -Nur linux-4.1.13.orig/drivers/net/rionet.c linux-4.1.13/drivers/net/rionet.c
---- linux-4.1.13.orig/drivers/net/rionet.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/net/rionet.c 2015-11-29 09:23:09.581616045 +0100
-@@ -174,11 +174,7 @@
- unsigned long flags;
- int add_num = 1;
-
-- local_irq_save(flags);
-- if (!spin_trylock(&rnet->tx_lock)) {
-- local_irq_restore(flags);
-- return NETDEV_TX_LOCKED;
-- }
-+ spin_lock_irqsave(&rnet->tx_lock, flags);
-
- if (is_multicast_ether_addr(eth->h_dest))
- add_num = nets[rnet->mport->id].nact;
-diff -Nur linux-4.1.13.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-4.1.13/drivers/net/wireless/orinoco/orinoco_usb.c
---- linux-4.1.13.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/net/wireless/orinoco/orinoco_usb.c 2015-11-29 09:23:09.581616045 +0100
-@@ -697,7 +697,7 @@
- while (!ctx->done.done && msecs--)
- udelay(1000);
- } else {
-- wait_event_interruptible(ctx->done.wait,
-+ swait_event_interruptible(ctx->done.wait,
- ctx->done.done);
- }
- break;
-diff -Nur linux-4.1.13.orig/drivers/pci/access.c linux-4.1.13/drivers/pci/access.c
---- linux-4.1.13.orig/drivers/pci/access.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/pci/access.c 2015-11-29 09:23:09.581616045 +0100
-@@ -561,7 +561,7 @@
- WARN_ON(!dev->block_cfg_access);
-
- dev->block_cfg_access = 0;
-- wake_up_all(&pci_cfg_wait);
-+ wake_up_all_locked(&pci_cfg_wait);
- raw_spin_unlock_irqrestore(&pci_lock, flags);
- }
- EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
-diff -Nur linux-4.1.13.orig/drivers/scsi/fcoe/fcoe.c linux-4.1.13/drivers/scsi/fcoe/fcoe.c
---- linux-4.1.13.orig/drivers/scsi/fcoe/fcoe.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/scsi/fcoe/fcoe.c 2015-11-29 09:23:09.581616045 +0100
-@@ -1287,7 +1287,7 @@
- struct sk_buff *skb;
- #ifdef CONFIG_SMP
- struct fcoe_percpu_s *p0;
-- unsigned targ_cpu = get_cpu();
-+ unsigned targ_cpu = get_cpu_light();
- #endif /* CONFIG_SMP */
-
- FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
-@@ -1343,7 +1343,7 @@
- kfree_skb(skb);
- spin_unlock_bh(&p->fcoe_rx_list.lock);
- }
-- put_cpu();
-+ put_cpu_light();
- #else
- /*
- * This a non-SMP scenario where the singular Rx thread is
-@@ -1567,11 +1567,11 @@
- static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
- {
- struct fcoe_percpu_s *fps;
-- int rc;
-+ int rc, cpu = get_cpu_light();
-
-- fps = &get_cpu_var(fcoe_percpu);
-+ fps = &per_cpu(fcoe_percpu, cpu);
- rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
-- put_cpu_var(fcoe_percpu);
-+ put_cpu_light();
-
- return rc;
- }
-@@ -1767,11 +1767,11 @@
- return 0;
- }
-
-- stats = per_cpu_ptr(lport->stats, get_cpu());
-+ stats = per_cpu_ptr(lport->stats, get_cpu_light());
- stats->InvalidCRCCount++;
- if (stats->InvalidCRCCount < 5)
- printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
-- put_cpu();
-+ put_cpu_light();
- return -EINVAL;
- }
-
-@@ -1847,13 +1847,13 @@
- goto drop;
-
- if (!fcoe_filter_frames(lport, fp)) {
-- put_cpu();
-+ put_cpu_light();
- fc_exch_recv(lport, fp);
- return;
- }
- drop:
- stats->ErrorFrames++;
-- put_cpu();
-+ put_cpu_light();
- kfree_skb(skb);
- }
-
-diff -Nur linux-4.1.13.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-4.1.13/drivers/scsi/fcoe/fcoe_ctlr.c
---- linux-4.1.13.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/scsi/fcoe/fcoe_ctlr.c 2015-11-29 09:23:09.581616045 +0100
-@@ -831,7 +831,7 @@
-
- INIT_LIST_HEAD(&del_list);
-
-- stats = per_cpu_ptr(fip->lp->stats, get_cpu());
-+ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light());
-
- list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
- deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
-@@ -867,7 +867,7 @@
- sel_time = fcf->time;
- }
- }
-- put_cpu();
-+ put_cpu_light();
-
- list_for_each_entry_safe(fcf, next, &del_list, list) {
- /* Removes fcf from current list */
-diff -Nur linux-4.1.13.orig/drivers/scsi/libfc/fc_exch.c linux-4.1.13/drivers/scsi/libfc/fc_exch.c
---- linux-4.1.13.orig/drivers/scsi/libfc/fc_exch.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/scsi/libfc/fc_exch.c 2015-11-29 09:23:09.581616045 +0100
-@@ -814,10 +814,10 @@
- }
- memset(ep, 0, sizeof(*ep));
-
-- cpu = get_cpu();
-+ cpu = get_cpu_light();
- pool = per_cpu_ptr(mp->pool, cpu);
- spin_lock_bh(&pool->lock);
-- put_cpu();
-+ put_cpu_light();
-
- /* peek cache of free slot */
- if (pool->left != FC_XID_UNKNOWN) {
-diff -Nur linux-4.1.13.orig/drivers/scsi/libsas/sas_ata.c linux-4.1.13/drivers/scsi/libsas/sas_ata.c
---- linux-4.1.13.orig/drivers/scsi/libsas/sas_ata.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/scsi/libsas/sas_ata.c 2015-11-29 09:23:09.581616045 +0100
-@@ -190,7 +190,7 @@
- /* TODO: audit callers to ensure they are ready for qc_issue to
- * unconditionally re-enable interrupts
- */
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- spin_unlock(ap->lock);
-
- /* If the device fell off, no sense in issuing commands */
-@@ -255,7 +255,7 @@
-
- out:
- spin_lock(ap->lock);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- return ret;
- }
-
-diff -Nur linux-4.1.13.orig/drivers/scsi/qla2xxx/qla_inline.h linux-4.1.13/drivers/scsi/qla2xxx/qla_inline.h
---- linux-4.1.13.orig/drivers/scsi/qla2xxx/qla_inline.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/scsi/qla2xxx/qla_inline.h 2015-11-29 09:23:09.581616045 +0100
-@@ -59,12 +59,12 @@
- {
- unsigned long flags;
- struct qla_hw_data *ha = rsp->hw;
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- if (IS_P3P_TYPE(ha))
- qla82xx_poll(0, rsp);
- else
- ha->isp_ops->intr_handler(0, rsp);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- }
-
- static inline uint8_t *
-diff -Nur linux-4.1.13.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.1.13/drivers/thermal/x86_pkg_temp_thermal.c
---- linux-4.1.13.orig/drivers/thermal/x86_pkg_temp_thermal.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/thermal/x86_pkg_temp_thermal.c 2015-11-29 09:23:09.581616045 +0100
-@@ -29,6 +29,7 @@
- #include <linux/pm.h>
- #include <linux/thermal.h>
- #include <linux/debugfs.h>
-+#include <linux/work-simple.h>
- #include <asm/cpu_device_id.h>
- #include <asm/mce.h>
-
-@@ -352,7 +353,7 @@
- }
- }
-
--static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
-+static void platform_thermal_notify_work(struct swork_event *event)
- {
- unsigned long flags;
- int cpu = smp_processor_id();
-@@ -369,7 +370,7 @@
- pkg_work_scheduled[phy_id]) {
- disable_pkg_thres_interrupt();
- spin_unlock_irqrestore(&pkg_work_lock, flags);
-- return -EINVAL;
-+ return;
- }
- pkg_work_scheduled[phy_id] = 1;
- spin_unlock_irqrestore(&pkg_work_lock, flags);
-@@ -378,9 +379,48 @@
- schedule_delayed_work_on(cpu,
- &per_cpu(pkg_temp_thermal_threshold_work, cpu),
- msecs_to_jiffies(notify_delay_ms));
-+}
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static struct swork_event notify_work;
-+
-+static int thermal_notify_work_init(void)
-+{
-+ int err;
-+
-+ err = swork_get();
-+ if (err)
-+ return err;
-+
-+ INIT_SWORK(&notify_work, platform_thermal_notify_work);
- return 0;
- }
-
-+static void thermal_notify_work_cleanup(void)
-+{
-+ swork_put();
-+}
-+
-+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
-+{
-+ swork_queue(&notify_work);
-+ return 0;
-+}
-+
-+#else /* !CONFIG_PREEMPT_RT_FULL */
-+
-+static int thermal_notify_work_init(void) { return 0; }
-+
-+static void thermal_notify_work_cleanup(void) { }
-+
-+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
-+{
-+ platform_thermal_notify_work(NULL);
-+
-+ return 0;
-+}
-+#endif /* CONFIG_PREEMPT_RT_FULL */
-+
- static int find_siblings_cpu(int cpu)
- {
- int i;
-@@ -584,6 +624,9 @@
- if (!x86_match_cpu(pkg_temp_thermal_ids))
- return -ENODEV;
-
-+ if (!thermal_notify_work_init())
-+ return -ENODEV;
-+
- spin_lock_init(&pkg_work_lock);
- platform_thermal_package_notify =
- pkg_temp_thermal_platform_thermal_notify;
-@@ -608,7 +651,7 @@
- kfree(pkg_work_scheduled);
- platform_thermal_package_notify = NULL;
- platform_thermal_package_rate_control = NULL;
--
-+ thermal_notify_work_cleanup();
- return -ENODEV;
- }
-
-@@ -633,6 +676,7 @@
- mutex_unlock(&phy_dev_list_mutex);
- platform_thermal_package_notify = NULL;
- platform_thermal_package_rate_control = NULL;
-+ thermal_notify_work_cleanup();
- for_each_online_cpu(i)
- cancel_delayed_work_sync(
- &per_cpu(pkg_temp_thermal_threshold_work, i));
-diff -Nur linux-4.1.13.orig/drivers/tty/serial/8250/8250_core.c linux-4.1.13/drivers/tty/serial/8250/8250_core.c
---- linux-4.1.13.orig/drivers/tty/serial/8250/8250_core.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/tty/serial/8250/8250_core.c 2015-11-29 09:23:09.581616045 +0100
-@@ -36,6 +36,7 @@
- #include <linux/nmi.h>
- #include <linux/mutex.h>
- #include <linux/slab.h>
-+#include <linux/kdb.h>
- #include <linux/uaccess.h>
- #include <linux/pm_runtime.h>
- #ifdef CONFIG_SPARC
-@@ -80,7 +81,16 @@
- #define DEBUG_INTR(fmt...) do { } while (0)
- #endif
-
--#define PASS_LIMIT 512
-+/*
-+ * On -rt we can have a more delays, and legitimately
-+ * so - so don't drop work spuriously and spam the
-+ * syslog:
-+ */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define PASS_LIMIT 1000000
-+#else
-+# define PASS_LIMIT 512
-+#endif
-
- #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-@@ -3372,7 +3382,7 @@
-
- if (port->sysrq)
- locked = 0;
-- else if (oops_in_progress)
-+ else if (oops_in_progress || in_kdb_printk())
- locked = spin_trylock_irqsave(&port->lock, flags);
- else
- spin_lock_irqsave(&port->lock, flags);
-diff -Nur linux-4.1.13.orig/drivers/tty/serial/amba-pl011.c linux-4.1.13/drivers/tty/serial/amba-pl011.c
---- linux-4.1.13.orig/drivers/tty/serial/amba-pl011.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/tty/serial/amba-pl011.c 2015-11-29 09:23:09.585615778 +0100
-@@ -2000,13 +2000,19 @@
-
- clk_enable(uap->clk);
-
-- local_irq_save(flags);
-+ /*
-+ * local_irq_save(flags);
-+ *
-+ * This local_irq_save() is nonsense. If we come in via sysrq
-+ * handling then interrupts are already disabled. Aside of
-+ * that the port.sysrq check is racy on SMP regardless.
-+ */
- if (uap->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
-- locked = spin_trylock(&uap->port.lock);
-+ locked = spin_trylock_irqsave(&uap->port.lock, flags);
- else
-- spin_lock(&uap->port.lock);
-+ spin_lock_irqsave(&uap->port.lock, flags);
-
- /*
- * First save the CR then disable the interrupts
-@@ -2028,8 +2034,7 @@
- writew(old_cr, uap->port.membase + UART011_CR);
-
- if (locked)
-- spin_unlock(&uap->port.lock);
-- local_irq_restore(flags);
-+ spin_unlock_irqrestore(&uap->port.lock, flags);
-
- clk_disable(uap->clk);
- }
-diff -Nur linux-4.1.13.orig/drivers/tty/serial/omap-serial.c linux-4.1.13/drivers/tty/serial/omap-serial.c
---- linux-4.1.13.orig/drivers/tty/serial/omap-serial.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/tty/serial/omap-serial.c 2015-11-29 09:23:09.585615778 +0100
-@@ -1282,13 +1282,10 @@
-
- pm_runtime_get_sync(up->dev);
-
-- local_irq_save(flags);
-- if (up->port.sysrq)
-- locked = 0;
-- else if (oops_in_progress)
-- locked = spin_trylock(&up->port.lock);
-+ if (up->port.sysrq || oops_in_progress)
-+ locked = spin_trylock_irqsave(&up->port.lock, flags);
- else
-- spin_lock(&up->port.lock);
-+ spin_lock_irqsave(&up->port.lock, flags);
-
- /*
- * First save the IER then disable the interrupts
-@@ -1317,8 +1314,7 @@
- pm_runtime_mark_last_busy(up->dev);
- pm_runtime_put_autosuspend(up->dev);
- if (locked)
-- spin_unlock(&up->port.lock);
-- local_irq_restore(flags);
-+ spin_unlock_irqrestore(&up->port.lock, flags);
- }
-
- static int __init
-diff -Nur linux-4.1.13.orig/drivers/usb/core/hcd.c linux-4.1.13/drivers/usb/core/hcd.c
---- linux-4.1.13.orig/drivers/usb/core/hcd.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/usb/core/hcd.c 2015-11-29 09:23:09.585615778 +0100
-@@ -1684,9 +1684,9 @@
- * and no one may trigger the above deadlock situation when
- * running complete() in tasklet.
- */
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- urb->complete(urb);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
-
- usb_anchor_resume_wakeups(anchor);
- atomic_dec(&urb->use_count);
-diff -Nur linux-4.1.13.orig/drivers/usb/gadget/function/f_fs.c linux-4.1.13/drivers/usb/gadget/function/f_fs.c
---- linux-4.1.13.orig/drivers/usb/gadget/function/f_fs.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/usb/gadget/function/f_fs.c 2015-11-29 09:23:09.585615778 +0100
-@@ -1405,7 +1405,7 @@
- pr_info("%s(): freeing\n", __func__);
- ffs_data_clear(ffs);
- BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
-- waitqueue_active(&ffs->ep0req_completion.wait));
-+ swaitqueue_active(&ffs->ep0req_completion.wait));
- kfree(ffs->dev_name);
- kfree(ffs);
- }
-diff -Nur linux-4.1.13.orig/drivers/usb/gadget/legacy/inode.c linux-4.1.13/drivers/usb/gadget/legacy/inode.c
---- linux-4.1.13.orig/drivers/usb/gadget/legacy/inode.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/drivers/usb/gadget/legacy/inode.c 2015-11-29 09:23:09.585615778 +0100
-@@ -345,7 +345,7 @@
- spin_unlock_irq (&epdata->dev->lock);
-
- if (likely (value == 0)) {
-- value = wait_event_interruptible (done.wait, done.done);
-+ value = swait_event_interruptible (done.wait, done.done);
- if (value != 0) {
- spin_lock_irq (&epdata->dev->lock);
- if (likely (epdata->ep != NULL)) {
-@@ -354,7 +354,7 @@
- usb_ep_dequeue (epdata->ep, epdata->req);
- spin_unlock_irq (&epdata->dev->lock);
-
-- wait_event (done.wait, done.done);
-+ swait_event (done.wait, done.done);
- if (epdata->status == -ECONNRESET)
- epdata->status = -EINTR;
- } else {
-diff -Nur linux-4.1.13.orig/fs/aio.c linux-4.1.13/fs/aio.c
---- linux-4.1.13.orig/fs/aio.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/aio.c 2015-11-29 09:23:09.585615778 +0100
-@@ -40,6 +40,7 @@
- #include <linux/ramfs.h>
- #include <linux/percpu-refcount.h>
- #include <linux/mount.h>
-+#include <linux/work-simple.h>
-
- #include <asm/kmap_types.h>
- #include <asm/uaccess.h>
-@@ -115,7 +116,7 @@
- struct page **ring_pages;
- long nr_pages;
-
-- struct work_struct free_work;
-+ struct swork_event free_work;
-
- /*
- * signals when all in-flight requests are done
-@@ -253,6 +254,7 @@
- .mount = aio_mount,
- .kill_sb = kill_anon_super,
- };
-+ BUG_ON(swork_get());
- aio_mnt = kern_mount(&aio_fs);
- if (IS_ERR(aio_mnt))
- panic("Failed to create aio fs mount.");
-@@ -559,9 +561,9 @@
- return cancel(&kiocb->common);
- }
-
--static void free_ioctx(struct work_struct *work)
-+static void free_ioctx(struct swork_event *sev)
- {
-- struct kioctx *ctx = container_of(work, struct kioctx, free_work);
-+ struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
-
- pr_debug("freeing %p\n", ctx);
-
-@@ -580,8 +582,8 @@
- if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
- complete(&ctx->rq_wait->comp);
-
-- INIT_WORK(&ctx->free_work, free_ioctx);
-- schedule_work(&ctx->free_work);
-+ INIT_SWORK(&ctx->free_work, free_ioctx);
-+ swork_queue(&ctx->free_work);
- }
-
- /*
-@@ -589,9 +591,9 @@
- * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
- * now it's safe to cancel any that need to be.
- */
--static void free_ioctx_users(struct percpu_ref *ref)
-+static void free_ioctx_users_work(struct swork_event *sev)
- {
-- struct kioctx *ctx = container_of(ref, struct kioctx, users);
-+ struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
- struct aio_kiocb *req;
-
- spin_lock_irq(&ctx->ctx_lock);
-@@ -610,6 +612,14 @@
- percpu_ref_put(&ctx->reqs);
- }
-
-+static void free_ioctx_users(struct percpu_ref *ref)
-+{
-+ struct kioctx *ctx = container_of(ref, struct kioctx, users);
-+
-+ INIT_SWORK(&ctx->free_work, free_ioctx_users_work);
-+ swork_queue(&ctx->free_work);
-+}
-+
- static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
- {
- unsigned i, new_nr;
-diff -Nur linux-4.1.13.orig/fs/autofs4/autofs_i.h linux-4.1.13/fs/autofs4/autofs_i.h
---- linux-4.1.13.orig/fs/autofs4/autofs_i.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/autofs4/autofs_i.h 2015-11-29 09:23:09.585615778 +0100
-@@ -34,6 +34,7 @@
- #include <linux/sched.h>
- #include <linux/mount.h>
- #include <linux/namei.h>
-+#include <linux/delay.h>
- #include <asm/current.h>
- #include <asm/uaccess.h>
-
-diff -Nur linux-4.1.13.orig/fs/autofs4/expire.c linux-4.1.13/fs/autofs4/expire.c
---- linux-4.1.13.orig/fs/autofs4/expire.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/autofs4/expire.c 2015-11-29 09:23:09.585615778 +0100
-@@ -150,7 +150,7 @@
- parent = p->d_parent;
- if (!spin_trylock(&parent->d_lock)) {
- spin_unlock(&p->d_lock);
-- cpu_relax();
-+ cpu_chill();
- goto relock;
- }
- spin_unlock(&p->d_lock);
-diff -Nur linux-4.1.13.orig/fs/buffer.c linux-4.1.13/fs/buffer.c
---- linux-4.1.13.orig/fs/buffer.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/buffer.c 2015-11-29 09:23:09.585615778 +0100
-@@ -301,8 +301,7 @@
- * decide that the page is now completely done.
- */
- first = page_buffers(page);
-- local_irq_save(flags);
-- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
-+ flags = bh_uptodate_lock_irqsave(first);
- clear_buffer_async_read(bh);
- unlock_buffer(bh);
- tmp = bh;
-@@ -315,8 +314,7 @@
- }
- tmp = tmp->b_this_page;
- } while (tmp != bh);
-- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-- local_irq_restore(flags);
-+ bh_uptodate_unlock_irqrestore(first, flags);
-
- /*
- * If none of the buffers had errors and they are all
-@@ -328,9 +326,7 @@
- return;
-
- still_busy:
-- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-- local_irq_restore(flags);
-- return;
-+ bh_uptodate_unlock_irqrestore(first, flags);
- }
-
- /*
-@@ -358,8 +354,7 @@
- }
-
- first = page_buffers(page);
-- local_irq_save(flags);
-- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
-+ flags = bh_uptodate_lock_irqsave(first);
-
- clear_buffer_async_write(bh);
- unlock_buffer(bh);
-@@ -371,15 +366,12 @@
- }
- tmp = tmp->b_this_page;
- }
-- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-- local_irq_restore(flags);
-+ bh_uptodate_unlock_irqrestore(first, flags);
- end_page_writeback(page);
- return;
-
- still_busy:
-- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-- local_irq_restore(flags);
-- return;
-+ bh_uptodate_unlock_irqrestore(first, flags);
- }
- EXPORT_SYMBOL(end_buffer_async_write);
-
-@@ -3325,6 +3317,7 @@
- struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
- if (ret) {
- INIT_LIST_HEAD(&ret->b_assoc_buffers);
-+ buffer_head_init_locks(ret);
- preempt_disable();
- __this_cpu_inc(bh_accounting.nr);
- recalc_bh_state();
-diff -Nur linux-4.1.13.orig/fs/dcache.c linux-4.1.13/fs/dcache.c
---- linux-4.1.13.orig/fs/dcache.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/dcache.c 2015-11-29 09:23:09.585615778 +0100
-@@ -19,6 +19,7 @@
- #include <linux/mm.h>
- #include <linux/fs.h>
- #include <linux/fsnotify.h>
-+#include <linux/delay.h>
- #include <linux/slab.h>
- #include <linux/init.h>
- #include <linux/hash.h>
-@@ -589,7 +590,7 @@
-
- failed:
- spin_unlock(&dentry->d_lock);
-- cpu_relax();
-+ cpu_chill();
- return dentry; /* try again with same dentry */
- }
-
-@@ -2398,7 +2399,7 @@
- if (dentry->d_lockref.count == 1) {
- if (!spin_trylock(&inode->i_lock)) {
- spin_unlock(&dentry->d_lock);
-- cpu_relax();
-+ cpu_chill();
- goto again;
- }
- dentry->d_flags &= ~DCACHE_CANT_MOUNT;
-diff -Nur linux-4.1.13.orig/fs/eventpoll.c linux-4.1.13/fs/eventpoll.c
---- linux-4.1.13.orig/fs/eventpoll.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/eventpoll.c 2015-11-29 09:23:09.585615778 +0100
-@@ -505,12 +505,12 @@
- */
- static void ep_poll_safewake(wait_queue_head_t *wq)
- {
-- int this_cpu = get_cpu();
-+ int this_cpu = get_cpu_light();
-
- ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
- ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
-
-- put_cpu();
-+ put_cpu_light();
- }
-
- static void ep_remove_wait_queue(struct eppoll_entry *pwq)
-diff -Nur linux-4.1.13.orig/fs/exec.c linux-4.1.13/fs/exec.c
---- linux-4.1.13.orig/fs/exec.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/exec.c 2015-11-29 09:23:09.593615242 +0100
-@@ -859,12 +859,14 @@
- }
- }
- task_lock(tsk);
-+ preempt_disable_rt();
- active_mm = tsk->active_mm;
- tsk->mm = mm;
- tsk->active_mm = mm;
- activate_mm(active_mm, mm);
- tsk->mm->vmacache_seqnum = 0;
- vmacache_flush(tsk);
-+ preempt_enable_rt();
- task_unlock(tsk);
- if (old_mm) {
- up_read(&old_mm->mmap_sem);
-diff -Nur linux-4.1.13.orig/fs/jbd/checkpoint.c linux-4.1.13/fs/jbd/checkpoint.c
---- linux-4.1.13.orig/fs/jbd/checkpoint.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/jbd/checkpoint.c 2015-11-29 09:23:09.593615242 +0100
-@@ -129,6 +129,8 @@
- if (journal->j_flags & JFS_ABORT)
- return;
- spin_unlock(&journal->j_state_lock);
-+ if (current->plug)
-+ io_schedule();
- mutex_lock(&journal->j_checkpoint_mutex);
-
- /*
-diff -Nur linux-4.1.13.orig/fs/jbd2/checkpoint.c linux-4.1.13/fs/jbd2/checkpoint.c
---- linux-4.1.13.orig/fs/jbd2/checkpoint.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/jbd2/checkpoint.c 2015-11-29 09:23:09.593615242 +0100
-@@ -116,6 +116,8 @@
- nblocks = jbd2_space_needed(journal);
- while (jbd2_log_space_left(journal) < nblocks) {
- write_unlock(&journal->j_state_lock);
-+ if (current->plug)
-+ io_schedule();
- mutex_lock(&journal->j_checkpoint_mutex);
-
- /*
-diff -Nur linux-4.1.13.orig/fs/namespace.c linux-4.1.13/fs/namespace.c
---- linux-4.1.13.orig/fs/namespace.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/namespace.c 2015-11-29 09:23:09.593615242 +0100
-@@ -14,6 +14,7 @@
- #include <linux/mnt_namespace.h>
- #include <linux/user_namespace.h>
- #include <linux/namei.h>
-+#include <linux/delay.h>
- #include <linux/security.h>
- #include <linux/idr.h>
- #include <linux/init.h> /* init_rootfs */
-@@ -353,8 +354,11 @@
- * incremented count after it has set MNT_WRITE_HOLD.
- */
- smp_mb();
-- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
-- cpu_relax();
-+ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
-+ preempt_enable();
-+ cpu_chill();
-+ preempt_disable();
-+ }
- /*
- * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
- * be set to match its requirements. So we must not load that until
-diff -Nur linux-4.1.13.orig/fs/ntfs/aops.c linux-4.1.13/fs/ntfs/aops.c
---- linux-4.1.13.orig/fs/ntfs/aops.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/ntfs/aops.c 2015-11-29 09:23:09.593615242 +0100
-@@ -107,8 +107,7 @@
- "0x%llx.", (unsigned long long)bh->b_blocknr);
- }
- first = page_buffers(page);
-- local_irq_save(flags);
-- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
-+ flags = bh_uptodate_lock_irqsave(first);
- clear_buffer_async_read(bh);
- unlock_buffer(bh);
- tmp = bh;
-@@ -123,8 +122,7 @@
- }
- tmp = tmp->b_this_page;
- } while (tmp != bh);
-- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-- local_irq_restore(flags);
-+ bh_uptodate_unlock_irqrestore(first, flags);
- /*
- * If none of the buffers had errors then we can set the page uptodate,
- * but we first have to perform the post read mst fixups, if the
-@@ -145,13 +143,13 @@
- recs = PAGE_CACHE_SIZE / rec_size;
- /* Should have been verified before we got here... */
- BUG_ON(!recs);
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- kaddr = kmap_atomic(page);
- for (i = 0; i < recs; i++)
- post_read_mst_fixup((NTFS_RECORD*)(kaddr +
- i * rec_size), rec_size);
- kunmap_atomic(kaddr);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- flush_dcache_page(page);
- if (likely(page_uptodate && !PageError(page)))
- SetPageUptodate(page);
-@@ -159,9 +157,7 @@
- unlock_page(page);
- return;
- still_busy:
-- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-- local_irq_restore(flags);
-- return;
-+ bh_uptodate_unlock_irqrestore(first, flags);
- }
-
- /**
-diff -Nur linux-4.1.13.orig/fs/timerfd.c linux-4.1.13/fs/timerfd.c
---- linux-4.1.13.orig/fs/timerfd.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/timerfd.c 2015-11-29 09:23:09.593615242 +0100
-@@ -450,7 +450,10 @@
- break;
- }
- spin_unlock_irq(&ctx->wqh.lock);
-- cpu_relax();
-+ if (isalarm(ctx))
-+ hrtimer_wait_for_timer(&ctx->t.alarm.timer);
-+ else
-+ hrtimer_wait_for_timer(&ctx->t.tmr);
- }
-
- /*
-diff -Nur linux-4.1.13.orig/fs/xfs/xfs_inode.c linux-4.1.13/fs/xfs/xfs_inode.c
---- linux-4.1.13.orig/fs/xfs/xfs_inode.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/xfs/xfs_inode.c 2015-11-29 09:23:09.593615242 +0100
-@@ -164,7 +164,7 @@
- (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
- ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
- (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-- ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
-+ ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
-
- if (lock_flags & XFS_IOLOCK_EXCL)
- mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
-@@ -212,7 +212,7 @@
- (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
- ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
- (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-- ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
-+ ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
-
- if (lock_flags & XFS_IOLOCK_EXCL) {
- if (!mrtryupdate(&ip->i_iolock))
-@@ -281,7 +281,7 @@
- (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
- ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
- (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-- ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
-+ ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
- ASSERT(lock_flags != 0);
-
- if (lock_flags & XFS_IOLOCK_EXCL)
-@@ -364,30 +364,38 @@
-
- /*
- * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
-- * value. This shouldn't be called for page fault locking, but we also need to
-- * ensure we don't overrun the number of lockdep subclasses for the iolock or
-- * mmaplock as that is limited to 12 by the mmap lock lockdep annotations.
-+ * value. This can be called for any type of inode lock combination, including
-+ * parent locking. Care must be taken to ensure we don't overrun the subclass
-+ * storage fields in the class mask we build.
- */
- static inline int
- xfs_lock_inumorder(int lock_mode, int subclass)
- {
-+ int class = 0;
-+
-+ ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
-+ XFS_ILOCK_RTSUM)));
-+
- if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
-- ASSERT(subclass + XFS_LOCK_INUMORDER <
-- (1 << (XFS_MMAPLOCK_SHIFT - XFS_IOLOCK_SHIFT)));
-- lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
-+ ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
-+ ASSERT(subclass + XFS_IOLOCK_PARENT_VAL <
-+ MAX_LOCKDEP_SUBCLASSES);
-+ class += subclass << XFS_IOLOCK_SHIFT;
-+ if (lock_mode & XFS_IOLOCK_PARENT)
-+ class += XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT;
- }
-
- if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
-- ASSERT(subclass + XFS_LOCK_INUMORDER <
-- (1 << (XFS_ILOCK_SHIFT - XFS_MMAPLOCK_SHIFT)));
-- lock_mode |= (subclass + XFS_LOCK_INUMORDER) <<
-- XFS_MMAPLOCK_SHIFT;
-+ ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
-+ class += subclass << XFS_MMAPLOCK_SHIFT;
- }
-
-- if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
-- lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
-+ if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
-+ ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
-+ class += subclass << XFS_ILOCK_SHIFT;
-+ }
-
-- return lock_mode;
-+ return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
- }
-
- /*
-@@ -399,6 +407,11 @@
- * transaction (such as truncate). This can result in deadlock since the long
- * running trans might need to wait for the inode we just locked in order to
- * push the tail and free space in the log.
-+ *
-+ * xfs_lock_inodes() can only be used to lock one type of lock at a time -
-+ * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
-+ * lock more than one at a time, lockdep will report false positives saying we
-+ * have violated locking orders.
- */
- void
- xfs_lock_inodes(
-@@ -409,8 +422,29 @@
- int attempts = 0, i, j, try_lock;
- xfs_log_item_t *lp;
-
-- /* currently supports between 2 and 5 inodes */
-+ /*
-+ * Currently supports between 2 and 5 inodes with exclusive locking. We
-+ * support an arbitrary depth of locking here, but absolute limits on
-+ * inodes depend on the the type of locking and the limits placed by
-+ * lockdep annotations in xfs_lock_inumorder. These are all checked by
-+ * the asserts.
-+ */
- ASSERT(ips && inodes >= 2 && inodes <= 5);
-+ ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
-+ XFS_ILOCK_EXCL));
-+ ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
-+ XFS_ILOCK_SHARED)));
-+ ASSERT(!(lock_mode & XFS_IOLOCK_EXCL) ||
-+ inodes <= XFS_IOLOCK_MAX_SUBCLASS + 1);
-+ ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
-+ inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
-+ ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
-+ inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
-+
-+ if (lock_mode & XFS_IOLOCK_EXCL) {
-+ ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
-+ } else if (lock_mode & XFS_MMAPLOCK_EXCL)
-+ ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
-
- try_lock = 0;
- i = 0;
-diff -Nur linux-4.1.13.orig/fs/xfs/xfs_inode.h linux-4.1.13/fs/xfs/xfs_inode.h
---- linux-4.1.13.orig/fs/xfs/xfs_inode.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/fs/xfs/xfs_inode.h 2015-11-29 09:23:09.593615242 +0100
-@@ -284,9 +284,9 @@
- * Flags for lockdep annotations.
- *
- * XFS_LOCK_PARENT - for directory operations that require locking a
-- * parent directory inode and a child entry inode. The parent gets locked
-- * with this flag so it gets a lockdep subclass of 1 and the child entry
-- * lock will have a lockdep subclass of 0.
-+ * parent directory inode and a child entry inode. IOLOCK requires nesting,
-+ * MMAPLOCK does not support this class, ILOCK requires a single subclass
-+ * to differentiate parent from child.
- *
- * XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary
- * inodes do not participate in the normal lock order, and thus have their
-@@ -295,30 +295,63 @@
- * XFS_LOCK_INUMORDER - for locking several inodes at the some time
- * with xfs_lock_inodes(). This flag is used as the starting subclass
- * and each subsequent lock acquired will increment the subclass by one.
-- * So the first lock acquired will have a lockdep subclass of 4, the
-- * second lock will have a lockdep subclass of 5, and so on. It is
-- * the responsibility of the class builder to shift this to the correct
-- * portion of the lock_mode lockdep mask.
-+ * However, MAX_LOCKDEP_SUBCLASSES == 8, which means we are greatly
-+ * limited to the subclasses we can represent via nesting. We need at least
-+ * 5 inodes nest depth for the ILOCK through rename, and we also have to support
-+ * XFS_ILOCK_PARENT, which gives 6 subclasses. Then we have XFS_ILOCK_RTBITMAP
-+ * and XFS_ILOCK_RTSUM, which are another 2 unique subclasses, so that's all
-+ * 8 subclasses supported by lockdep.
-+ *
-+ * This also means we have to number the sub-classes in the lowest bits of
-+ * the mask we keep, and we have to ensure we never exceed 3 bits of lockdep
-+ * mask and we can't use bit-masking to build the subclasses. What a mess.
-+ *
-+ * Bit layout:
-+ *
-+ * Bit Lock Region
-+ * 16-19 XFS_IOLOCK_SHIFT dependencies
-+ * 20-23 XFS_MMAPLOCK_SHIFT dependencies
-+ * 24-31 XFS_ILOCK_SHIFT dependencies
-+ *
-+ * IOLOCK values
-+ *
-+ * 0-3 subclass value
-+ * 4-7 PARENT subclass values
-+ *
-+ * MMAPLOCK values
-+ *
-+ * 0-3 subclass value
-+ * 4-7 unused
-+ *
-+ * ILOCK values
-+ * 0-4 subclass values
-+ * 5 PARENT subclass (not nestable)
-+ * 6 RTBITMAP subclass (not nestable)
-+ * 7 RTSUM subclass (not nestable)
-+ *
- */
--#define XFS_LOCK_PARENT 1
--#define XFS_LOCK_RTBITMAP 2
--#define XFS_LOCK_RTSUM 3
--#define XFS_LOCK_INUMORDER 4
--
--#define XFS_IOLOCK_SHIFT 16
--#define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT)
-+#define XFS_IOLOCK_SHIFT 16
-+#define XFS_IOLOCK_PARENT_VAL 4
-+#define XFS_IOLOCK_MAX_SUBCLASS (XFS_IOLOCK_PARENT_VAL - 1)
-+#define XFS_IOLOCK_DEP_MASK 0x000f0000
-+#define XFS_IOLOCK_PARENT (XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT)
-
--#define XFS_MMAPLOCK_SHIFT 20
-+#define XFS_MMAPLOCK_SHIFT 20
-+#define XFS_MMAPLOCK_NUMORDER 0
-+#define XFS_MMAPLOCK_MAX_SUBCLASS 3
-+#define XFS_MMAPLOCK_DEP_MASK 0x00f00000
-
--#define XFS_ILOCK_SHIFT 24
--#define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT)
--#define XFS_ILOCK_RTBITMAP (XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT)
--#define XFS_ILOCK_RTSUM (XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT)
-+#define XFS_ILOCK_SHIFT 24
-+#define XFS_ILOCK_PARENT_VAL 5
-+#define XFS_ILOCK_MAX_SUBCLASS (XFS_ILOCK_PARENT_VAL - 1)
-+#define XFS_ILOCK_RTBITMAP_VAL 6
-+#define XFS_ILOCK_RTSUM_VAL 7
-+#define XFS_ILOCK_DEP_MASK 0xff000000
-+#define XFS_ILOCK_PARENT (XFS_ILOCK_PARENT_VAL << XFS_ILOCK_SHIFT)
-+#define XFS_ILOCK_RTBITMAP (XFS_ILOCK_RTBITMAP_VAL << XFS_ILOCK_SHIFT)
-+#define XFS_ILOCK_RTSUM (XFS_ILOCK_RTSUM_VAL << XFS_ILOCK_SHIFT)
-
--#define XFS_IOLOCK_DEP_MASK 0x000f0000
--#define XFS_MMAPLOCK_DEP_MASK 0x00f00000
--#define XFS_ILOCK_DEP_MASK 0xff000000
--#define XFS_LOCK_DEP_MASK (XFS_IOLOCK_DEP_MASK | \
-+#define XFS_LOCK_SUBCLASS_MASK (XFS_IOLOCK_DEP_MASK | \
- XFS_MMAPLOCK_DEP_MASK | \
- XFS_ILOCK_DEP_MASK)
-
-diff -Nur linux-4.1.13.orig/include/acpi/platform/aclinux.h linux-4.1.13/include/acpi/platform/aclinux.h
---- linux-4.1.13.orig/include/acpi/platform/aclinux.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/acpi/platform/aclinux.h 2015-11-29 09:23:09.593615242 +0100
-@@ -123,6 +123,7 @@
-
- #define acpi_cache_t struct kmem_cache
- #define acpi_spinlock spinlock_t *
-+#define acpi_raw_spinlock raw_spinlock_t *
- #define acpi_cpu_flags unsigned long
-
- /* Use native linux version of acpi_os_allocate_zeroed */
-@@ -141,6 +142,20 @@
- #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
- #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
-
-+#define acpi_os_create_raw_lock(__handle) \
-+({ \
-+ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
-+ \
-+ if (lock) { \
-+ *(__handle) = lock; \
-+ raw_spin_lock_init(*(__handle)); \
-+ } \
-+ lock ? AE_OK : AE_NO_MEMORY; \
-+ })
-+
-+#define acpi_os_delete_raw_lock(__handle) kfree(__handle)
-+
-+
- /*
- * OSL interfaces used by debugger/disassembler
- */
-diff -Nur linux-4.1.13.orig/include/asm-generic/bug.h linux-4.1.13/include/asm-generic/bug.h
---- linux-4.1.13.orig/include/asm-generic/bug.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/asm-generic/bug.h 2015-11-29 09:23:09.593615242 +0100
-@@ -206,6 +206,20 @@
- # define WARN_ON_SMP(x) ({0;})
- #endif
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+# define BUG_ON_RT(c) BUG_ON(c)
-+# define BUG_ON_NONRT(c) do { } while (0)
-+# define WARN_ON_RT(condition) WARN_ON(condition)
-+# define WARN_ON_NONRT(condition) do { } while (0)
-+# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
-+#else
-+# define BUG_ON_RT(c) do { } while (0)
-+# define BUG_ON_NONRT(c) BUG_ON(c)
-+# define WARN_ON_RT(condition) do { } while (0)
-+# define WARN_ON_NONRT(condition) WARN_ON(condition)
-+# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
-+#endif
-+
- #endif /* __ASSEMBLY__ */
-
- #endif
-diff -Nur linux-4.1.13.orig/include/asm-generic/futex.h linux-4.1.13/include/asm-generic/futex.h
---- linux-4.1.13.orig/include/asm-generic/futex.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/asm-generic/futex.h 2015-11-29 09:23:09.593615242 +0100
-@@ -8,8 +8,7 @@
- #ifndef CONFIG_SMP
- /*
- * The following implementation only for uniprocessor machines.
-- * For UP, it's relies on the fact that pagefault_disable() also disables
-- * preemption to ensure mutual exclusion.
-+ * It relies on preempt_disable() ensuring mutual exclusion.
- *
- */
-
-@@ -38,6 +37,7 @@
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
-+ preempt_disable();
- pagefault_disable();
-
- ret = -EFAULT;
-@@ -72,6 +72,7 @@
-
- out_pagefault_enable:
- pagefault_enable();
-+ preempt_enable();
-
- if (ret == 0) {
- switch (cmp) {
-@@ -106,6 +107,7 @@
- {
- u32 val;
-
-+ preempt_disable();
- if (unlikely(get_user(val, uaddr) != 0))
- return -EFAULT;
-
-@@ -113,6 +115,7 @@
- return -EFAULT;
-
- *uval = val;
-+ preempt_enable();
-
- return 0;
- }
-diff -Nur linux-4.1.13.orig/include/linux/blkdev.h linux-4.1.13/include/linux/blkdev.h
---- linux-4.1.13.orig/include/linux/blkdev.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/blkdev.h 2015-11-29 09:23:09.593615242 +0100
-@@ -101,6 +101,7 @@
- struct list_head queuelist;
- union {
- struct call_single_data csd;
-+ struct work_struct work;
- unsigned long fifo_time;
- };
-
-@@ -482,7 +483,7 @@
- struct throtl_data *td;
- #endif
- struct rcu_head rcu_head;
-- wait_queue_head_t mq_freeze_wq;
-+ struct swait_head mq_freeze_wq;
- struct percpu_ref mq_usage_counter;
- struct list_head all_q_node;
-
-diff -Nur linux-4.1.13.orig/include/linux/blk-mq.h linux-4.1.13/include/linux/blk-mq.h
---- linux-4.1.13.orig/include/linux/blk-mq.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/blk-mq.h 2015-11-29 09:23:09.593615242 +0100
-@@ -202,6 +202,7 @@
-
- struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
- struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
-+void __blk_mq_complete_request_remote_work(struct work_struct *work);
-
- int blk_mq_request_started(struct request *rq);
- void blk_mq_start_request(struct request *rq);
-diff -Nur linux-4.1.13.orig/include/linux/bottom_half.h linux-4.1.13/include/linux/bottom_half.h
---- linux-4.1.13.orig/include/linux/bottom_half.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/bottom_half.h 2015-11-29 09:23:09.593615242 +0100
-@@ -4,6 +4,39 @@
- #include <linux/preempt.h>
- #include <linux/preempt_mask.h>
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+
-+extern void __local_bh_disable(void);
-+extern void _local_bh_enable(void);
-+extern void __local_bh_enable(void);
-+
-+static inline void local_bh_disable(void)
-+{
-+ __local_bh_disable();
-+}
-+
-+static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
-+{
-+ __local_bh_disable();
-+}
-+
-+static inline void local_bh_enable(void)
-+{
-+ __local_bh_enable();
-+}
-+
-+static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
-+{
-+ __local_bh_enable();
-+}
-+
-+static inline void local_bh_enable_ip(unsigned long ip)
-+{
-+ __local_bh_enable();
-+}
-+
-+#else
-+
- #ifdef CONFIG_TRACE_IRQFLAGS
- extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
- #else
-@@ -31,5 +64,6 @@
- {
- __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
- }
-+#endif
-
- #endif /* _LINUX_BH_H */
-diff -Nur linux-4.1.13.orig/include/linux/buffer_head.h linux-4.1.13/include/linux/buffer_head.h
---- linux-4.1.13.orig/include/linux/buffer_head.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/buffer_head.h 2015-11-29 09:23:09.601614714 +0100
-@@ -75,8 +75,52 @@
- struct address_space *b_assoc_map; /* mapping this buffer is
- associated with */
- atomic_t b_count; /* users using this buffer_head */
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ spinlock_t b_uptodate_lock;
-+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
-+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
-+ spinlock_t b_state_lock;
-+ spinlock_t b_journal_head_lock;
-+#endif
-+#endif
- };
-
-+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
-+{
-+ unsigned long flags;
-+
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+ local_irq_save(flags);
-+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
-+#else
-+ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
-+#endif
-+ return flags;
-+}
-+
-+static inline void
-+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
-+{
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
-+ local_irq_restore(flags);
-+#else
-+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
-+#endif
-+}
-+
-+static inline void buffer_head_init_locks(struct buffer_head *bh)
-+{
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ spin_lock_init(&bh->b_uptodate_lock);
-+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
-+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
-+ spin_lock_init(&bh->b_state_lock);
-+ spin_lock_init(&bh->b_journal_head_lock);
-+#endif
-+#endif
-+}
-+
- /*
- * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
- * and buffer_foo() functions.
-diff -Nur linux-4.1.13.orig/include/linux/cgroup.h linux-4.1.13/include/linux/cgroup.h
---- linux-4.1.13.orig/include/linux/cgroup.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/cgroup.h 2015-11-29 09:23:09.601614714 +0100
-@@ -22,6 +22,7 @@
- #include <linux/seq_file.h>
- #include <linux/kernfs.h>
- #include <linux/wait.h>
-+#include <linux/work-simple.h>
-
- #ifdef CONFIG_CGROUPS
-
-@@ -91,6 +92,7 @@
- /* percpu_ref killing and RCU release */
- struct rcu_head rcu_head;
- struct work_struct destroy_work;
-+ struct swork_event destroy_swork;
- };
-
- /* bits in struct cgroup_subsys_state flags field */
-diff -Nur linux-4.1.13.orig/include/linux/completion.h linux-4.1.13/include/linux/completion.h
---- linux-4.1.13.orig/include/linux/completion.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/completion.h 2015-11-29 09:23:09.601614714 +0100
-@@ -7,8 +7,7 @@
- * Atomic wait-for-completion handler data structures.
- * See kernel/sched/completion.c for details.
- */
--
--#include <linux/wait.h>
-+#include <linux/wait-simple.h>
-
- /*
- * struct completion - structure used to maintain state for a "completion"
-@@ -24,11 +23,11 @@
- */
- struct completion {
- unsigned int done;
-- wait_queue_head_t wait;
-+ struct swait_head wait;
- };
-
- #define COMPLETION_INITIALIZER(work) \
-- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
-+ { 0, SWAIT_HEAD_INITIALIZER((work).wait) }
-
- #define COMPLETION_INITIALIZER_ONSTACK(work) \
- ({ init_completion(&work); work; })
-@@ -73,7 +72,7 @@
- static inline void init_completion(struct completion *x)
- {
- x->done = 0;
-- init_waitqueue_head(&x->wait);
-+ init_swait_head(&x->wait);
- }
-
- /**
-diff -Nur linux-4.1.13.orig/include/linux/cpu.h linux-4.1.13/include/linux/cpu.h
---- linux-4.1.13.orig/include/linux/cpu.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/cpu.h 2015-11-29 09:23:09.601614714 +0100
-@@ -231,6 +231,8 @@
- extern void put_online_cpus(void);
- extern void cpu_hotplug_disable(void);
- extern void cpu_hotplug_enable(void);
-+extern void pin_current_cpu(void);
-+extern void unpin_current_cpu(void);
- #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
- #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
- #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
-@@ -249,6 +251,8 @@
- #define put_online_cpus() do { } while (0)
- #define cpu_hotplug_disable() do { } while (0)
- #define cpu_hotplug_enable() do { } while (0)
-+static inline void pin_current_cpu(void) { }
-+static inline void unpin_current_cpu(void) { }
- #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
- #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
- /* These aren't inline functions due to a GCC bug. */
-diff -Nur linux-4.1.13.orig/include/linux/delay.h linux-4.1.13/include/linux/delay.h
---- linux-4.1.13.orig/include/linux/delay.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/delay.h 2015-11-29 09:23:09.601614714 +0100
-@@ -52,4 +52,10 @@
- msleep(seconds * 1000);
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+extern void cpu_chill(void);
-+#else
-+# define cpu_chill() cpu_relax()
-+#endif
-+
- #endif /* defined(_LINUX_DELAY_H) */
-diff -Nur linux-4.1.13.orig/include/linux/ftrace_event.h linux-4.1.13/include/linux/ftrace_event.h
---- linux-4.1.13.orig/include/linux/ftrace_event.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/ftrace_event.h 2015-11-29 09:23:09.601614714 +0100
-@@ -66,6 +66,9 @@
- unsigned char flags;
- unsigned char preempt_count;
- int pid;
-+ unsigned short migrate_disable;
-+ unsigned short padding;
-+ unsigned char preempt_lazy_count;
- };
-
- #define FTRACE_MAX_EVENT \
-diff -Nur linux-4.1.13.orig/include/linux/highmem.h linux-4.1.13/include/linux/highmem.h
---- linux-4.1.13.orig/include/linux/highmem.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/highmem.h 2015-11-29 09:23:09.601614714 +0100
-@@ -7,6 +7,7 @@
- #include <linux/mm.h>
- #include <linux/uaccess.h>
- #include <linux/hardirq.h>
-+#include <linux/sched.h>
-
- #include <asm/cacheflush.h>
-
-@@ -65,6 +66,7 @@
-
- static inline void *kmap_atomic(struct page *page)
- {
-+ preempt_disable_nort();
- pagefault_disable();
- return page_address(page);
- }
-@@ -73,6 +75,7 @@
- static inline void __kunmap_atomic(void *addr)
- {
- pagefault_enable();
-+ preempt_enable_nort();
- }
-
- #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
-@@ -85,32 +88,51 @@
-
- #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- DECLARE_PER_CPU(int, __kmap_atomic_idx);
-+#endif
-
- static inline int kmap_atomic_idx_push(void)
- {
-+#ifndef CONFIG_PREEMPT_RT_FULL
- int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
-
--#ifdef CONFIG_DEBUG_HIGHMEM
-+# ifdef CONFIG_DEBUG_HIGHMEM
- WARN_ON_ONCE(in_irq() && !irqs_disabled());
- BUG_ON(idx >= KM_TYPE_NR);
--#endif
-+# endif
- return idx;
-+#else
-+ current->kmap_idx++;
-+ BUG_ON(current->kmap_idx > KM_TYPE_NR);
-+ return current->kmap_idx - 1;
-+#endif
- }
-
- static inline int kmap_atomic_idx(void)
- {
-+#ifndef CONFIG_PREEMPT_RT_FULL
- return __this_cpu_read(__kmap_atomic_idx) - 1;
-+#else
-+ return current->kmap_idx - 1;
-+#endif
- }
-
- static inline void kmap_atomic_idx_pop(void)
- {
--#ifdef CONFIG_DEBUG_HIGHMEM
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# ifdef CONFIG_DEBUG_HIGHMEM
- int idx = __this_cpu_dec_return(__kmap_atomic_idx);
-
- BUG_ON(idx < 0);
--#else
-+# else
- __this_cpu_dec(__kmap_atomic_idx);
-+# endif
-+#else
-+ current->kmap_idx--;
-+# ifdef CONFIG_DEBUG_HIGHMEM
-+ BUG_ON(current->kmap_idx < 0);
-+# endif
- #endif
- }
-
-diff -Nur linux-4.1.13.orig/include/linux/hrtimer.h linux-4.1.13/include/linux/hrtimer.h
---- linux-4.1.13.orig/include/linux/hrtimer.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/hrtimer.h 2015-11-29 09:23:09.601614714 +0100
-@@ -111,6 +111,11 @@
- enum hrtimer_restart (*function)(struct hrtimer *);
- struct hrtimer_clock_base *base;
- unsigned long state;
-+ struct list_head cb_entry;
-+ int irqsafe;
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ ktime_t praecox;
-+#endif
- #ifdef CONFIG_TIMER_STATS
- int start_pid;
- void *start_site;
-@@ -147,6 +152,7 @@
- int index;
- clockid_t clockid;
- struct timerqueue_head active;
-+ struct list_head expired;
- ktime_t resolution;
- ktime_t (*get_time)(void);
- ktime_t softirq_time;
-@@ -194,6 +200,9 @@
- unsigned long nr_hangs;
- ktime_t max_hang_time;
- #endif
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ wait_queue_head_t wait;
-+#endif
- struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
- };
-
-@@ -381,6 +390,13 @@
- return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
- }
-
-+/* Softirq preemption could deadlock timer removal */
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
-+#else
-+# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
-+#endif
-+
- /* Query timers: */
- extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
- extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
-diff -Nur linux-4.1.13.orig/include/linux/idr.h linux-4.1.13/include/linux/idr.h
---- linux-4.1.13.orig/include/linux/idr.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/idr.h 2015-11-29 09:23:09.601614714 +0100
-@@ -95,10 +95,14 @@
- * Each idr_preload() should be matched with an invocation of this
- * function. See idr_preload() for details.
- */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+void idr_preload_end(void);
-+#else
- static inline void idr_preload_end(void)
- {
- preempt_enable();
- }
-+#endif
-
- /**
- * idr_find - return pointer for given id
-diff -Nur linux-4.1.13.orig/include/linux/init_task.h linux-4.1.13/include/linux/init_task.h
---- linux-4.1.13.orig/include/linux/init_task.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/init_task.h 2015-11-29 09:23:09.601614714 +0100
-@@ -147,9 +147,16 @@
- # define INIT_PERF_EVENTS(tsk)
- #endif
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+# define INIT_TIMER_LIST .posix_timer_list = NULL,
-+#else
-+# define INIT_TIMER_LIST
-+#endif
-+
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- # define INIT_VTIME(tsk) \
-- .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
-+ .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \
-+ .vtime_seq = SEQCNT_ZERO(tsk.vtime_seq), \
- .vtime_snap = 0, \
- .vtime_snap_whence = VTIME_SYS,
- #else
-@@ -238,6 +245,7 @@
- .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
- .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
- .timer_slack_ns = 50000, /* 50 usec default slack */ \
-+ INIT_TIMER_LIST \
- .pids = { \
- [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
- [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
-diff -Nur linux-4.1.13.orig/include/linux/interrupt.h linux-4.1.13/include/linux/interrupt.h
---- linux-4.1.13.orig/include/linux/interrupt.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/interrupt.h 2015-11-29 09:23:09.601614714 +0100
-@@ -61,6 +61,7 @@
- * interrupt handler after suspending interrupts. For system
- * wakeup devices users need to implement wakeup detection in
- * their interrupt handlers.
-+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
- */
- #define IRQF_SHARED 0x00000080
- #define IRQF_PROBE_SHARED 0x00000100
-@@ -74,6 +75,7 @@
- #define IRQF_NO_THREAD 0x00010000
- #define IRQF_EARLY_RESUME 0x00020000
- #define IRQF_COND_SUSPEND 0x00040000
-+#define IRQF_NO_SOFTIRQ_CALL 0x00080000
-
- #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
-
-@@ -102,6 +104,7 @@
- * @flags: flags (see IRQF_* above)
- * @thread_fn: interrupt handler function for threaded interrupts
- * @thread: thread pointer for threaded interrupts
-+ * @secondary: pointer to secondary irqaction (force threading)
- * @thread_flags: flags related to @thread
- * @thread_mask: bitmask for keeping track of @thread activity
- * @dir: pointer to the proc/irq/NN/name entry
-@@ -113,6 +116,7 @@
- struct irqaction *next;
- irq_handler_t thread_fn;
- struct task_struct *thread;
-+ struct irqaction *secondary;
- unsigned int irq;
- unsigned int flags;
- unsigned long thread_flags;
-@@ -184,7 +188,7 @@
- #ifdef CONFIG_LOCKDEP
- # define local_irq_enable_in_hardirq() do { } while (0)
- #else
--# define local_irq_enable_in_hardirq() local_irq_enable()
-+# define local_irq_enable_in_hardirq() local_irq_enable_nort()
- #endif
-
- extern void disable_irq_nosync(unsigned int irq);
-@@ -215,6 +219,7 @@
- unsigned int irq;
- struct kref kref;
- struct work_struct work;
-+ struct list_head list;
- void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
- void (*release)(struct kref *ref);
- };
-@@ -377,9 +382,13 @@
- bool state);
-
- #ifdef CONFIG_IRQ_FORCED_THREADING
-+# ifndef CONFIG_PREEMPT_RT_BASE
- extern bool force_irqthreads;
-+# else
-+# define force_irqthreads (true)
-+# endif
- #else
--#define force_irqthreads (0)
-+#define force_irqthreads (false)
- #endif
-
- #ifndef __ARCH_SET_SOFTIRQ_PENDING
-@@ -435,9 +444,10 @@
- void (*action)(struct softirq_action *);
- };
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- asmlinkage void do_softirq(void);
- asmlinkage void __do_softirq(void);
--
-+static inline void thread_do_softirq(void) { do_softirq(); }
- #ifdef __ARCH_HAS_DO_SOFTIRQ
- void do_softirq_own_stack(void);
- #else
-@@ -446,6 +456,9 @@
- __do_softirq();
- }
- #endif
-+#else
-+extern void thread_do_softirq(void);
-+#endif
-
- extern void open_softirq(int nr, void (*action)(struct softirq_action *));
- extern void softirq_init(void);
-@@ -453,6 +466,7 @@
-
- extern void raise_softirq_irqoff(unsigned int nr);
- extern void raise_softirq(unsigned int nr);
-+extern void softirq_check_pending_idle(void);
-
- DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-
-@@ -474,8 +488,9 @@
- to be executed on some cpu at least once after this.
- * If the tasklet is already scheduled, but its execution is still not
- started, it will be executed only once.
-- * If this tasklet is already running on another CPU (or schedule is called
-- from tasklet itself), it is rescheduled for later.
-+ * If this tasklet is already running on another CPU, it is rescheduled
-+ for later.
-+ * Schedule must not be called from the tasklet itself (a lockup occurs)
- * Tasklet is strictly serialized wrt itself, but not
- wrt another tasklets. If client needs some intertask synchronization,
- he makes it with spinlocks.
-@@ -500,27 +515,36 @@
- enum
- {
- TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
-- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
-+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
-+ TASKLET_STATE_PENDING /* Tasklet is pending */
- };
-
--#ifdef CONFIG_SMP
-+#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
-+#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
-+#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
-+
-+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- static inline int tasklet_trylock(struct tasklet_struct *t)
- {
- return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
- }
-
-+static inline int tasklet_tryunlock(struct tasklet_struct *t)
-+{
-+ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
-+}
-+
- static inline void tasklet_unlock(struct tasklet_struct *t)
- {
- smp_mb__before_atomic();
- clear_bit(TASKLET_STATE_RUN, &(t)->state);
- }
-
--static inline void tasklet_unlock_wait(struct tasklet_struct *t)
--{
-- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
--}
-+extern void tasklet_unlock_wait(struct tasklet_struct *t);
-+
- #else
- #define tasklet_trylock(t) 1
-+#define tasklet_tryunlock(t) 1
- #define tasklet_unlock_wait(t) do { } while (0)
- #define tasklet_unlock(t) do { } while (0)
- #endif
-@@ -569,12 +593,7 @@
- smp_mb();
- }
-
--static inline void tasklet_enable(struct tasklet_struct *t)
--{
-- smp_mb__before_atomic();
-- atomic_dec(&t->count);
--}
--
-+extern void tasklet_enable(struct tasklet_struct *t);
- extern void tasklet_kill(struct tasklet_struct *t);
- extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
- extern void tasklet_init(struct tasklet_struct *t,
-@@ -605,6 +624,12 @@
- tasklet_kill(&ttimer->tasklet);
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+extern void softirq_early_init(void);
-+#else
-+static inline void softirq_early_init(void) { }
-+#endif
-+
- /*
- * Autoprobing for irqs:
- *
-diff -Nur linux-4.1.13.orig/include/linux/io-mapping.h linux-4.1.13/include/linux/io-mapping.h
---- linux-4.1.13.orig/include/linux/io-mapping.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/io-mapping.h 2015-11-29 09:23:09.601614714 +0100
-@@ -141,6 +141,7 @@
- io_mapping_map_atomic_wc(struct io_mapping *mapping,
- unsigned long offset)
- {
-+ preempt_disable();
- pagefault_disable();
- return ((char __force __iomem *) mapping) + offset;
- }
-@@ -149,6 +150,7 @@
- io_mapping_unmap_atomic(void __iomem *vaddr)
- {
- pagefault_enable();
-+ preempt_enable();
- }
-
- /* Non-atomic map/unmap */
-diff -Nur linux-4.1.13.orig/include/linux/irqdesc.h linux-4.1.13/include/linux/irqdesc.h
---- linux-4.1.13.orig/include/linux/irqdesc.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/irqdesc.h 2015-11-29 09:23:09.601614714 +0100
-@@ -63,6 +63,7 @@
- unsigned int irqs_unhandled;
- atomic_t threads_handled;
- int threads_handled_last;
-+ u64 random_ip;
- raw_spinlock_t lock;
- struct cpumask *percpu_enabled;
- #ifdef CONFIG_SMP
-diff -Nur linux-4.1.13.orig/include/linux/irqflags.h linux-4.1.13/include/linux/irqflags.h
---- linux-4.1.13.orig/include/linux/irqflags.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/irqflags.h 2015-11-29 09:23:09.601614714 +0100
-@@ -25,8 +25,6 @@
- # define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
- # define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
- # define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
--# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
--# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
- # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
- #else
- # define trace_hardirqs_on() do { } while (0)
-@@ -39,9 +37,15 @@
- # define trace_softirqs_enabled(p) 0
- # define trace_hardirq_enter() do { } while (0)
- # define trace_hardirq_exit() do { } while (0)
-+# define INIT_TRACE_IRQFLAGS
-+#endif
-+
-+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
-+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
-+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
-+#else
- # define lockdep_softirq_enter() do { } while (0)
- # define lockdep_softirq_exit() do { } while (0)
--# define INIT_TRACE_IRQFLAGS
- #endif
-
- #if defined(CONFIG_IRQSOFF_TRACER) || \
-@@ -148,4 +152,23 @@
-
- #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
-
-+/*
-+ * local_irq* variants depending on RT/!RT
-+ */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define local_irq_disable_nort() do { } while (0)
-+# define local_irq_enable_nort() do { } while (0)
-+# define local_irq_save_nort(flags) local_save_flags(flags)
-+# define local_irq_restore_nort(flags) (void)(flags)
-+# define local_irq_disable_rt() local_irq_disable()
-+# define local_irq_enable_rt() local_irq_enable()
-+#else
-+# define local_irq_disable_nort() local_irq_disable()
-+# define local_irq_enable_nort() local_irq_enable()
-+# define local_irq_save_nort(flags) local_irq_save(flags)
-+# define local_irq_restore_nort(flags) local_irq_restore(flags)
-+# define local_irq_disable_rt() do { } while (0)
-+# define local_irq_enable_rt() do { } while (0)
-+#endif
-+
- #endif
-diff -Nur linux-4.1.13.orig/include/linux/irq.h linux-4.1.13/include/linux/irq.h
---- linux-4.1.13.orig/include/linux/irq.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/irq.h 2015-11-29 09:23:09.601614714 +0100
-@@ -72,6 +72,7 @@
- * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
- * it from the spurious interrupt detection
- * mechanism and from core side polling.
-+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
- */
- enum {
- IRQ_TYPE_NONE = 0x00000000,
-@@ -97,13 +98,14 @@
- IRQ_NOTHREAD = (1 << 16),
- IRQ_PER_CPU_DEVID = (1 << 17),
- IRQ_IS_POLLED = (1 << 18),
-+ IRQ_NO_SOFTIRQ_CALL = (1 << 19),
- };
-
- #define IRQF_MODIFY_MASK \
- (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
- IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
-- IRQ_IS_POLLED)
-+ IRQ_IS_POLLED | IRQ_NO_SOFTIRQ_CALL)
-
- #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
-
-diff -Nur linux-4.1.13.orig/include/linux/irq_work.h linux-4.1.13/include/linux/irq_work.h
---- linux-4.1.13.orig/include/linux/irq_work.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/irq_work.h 2015-11-29 09:23:09.601614714 +0100
-@@ -16,6 +16,7 @@
- #define IRQ_WORK_BUSY 2UL
- #define IRQ_WORK_FLAGS 3UL
- #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
-+#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
-
- struct irq_work {
- unsigned long flags;
-@@ -51,4 +52,10 @@
- static inline void irq_work_run(void) { }
- #endif
-
-+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
-+void irq_work_tick_soft(void);
-+#else
-+static inline void irq_work_tick_soft(void) { }
-+#endif
-+
- #endif /* _LINUX_IRQ_WORK_H */
-diff -Nur linux-4.1.13.orig/include/linux/jbd_common.h linux-4.1.13/include/linux/jbd_common.h
---- linux-4.1.13.orig/include/linux/jbd_common.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/jbd_common.h 2015-11-29 09:23:09.601614714 +0100
-@@ -15,32 +15,56 @@
-
- static inline void jbd_lock_bh_state(struct buffer_head *bh)
- {
-+#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_lock(BH_State, &bh->b_state);
-+#else
-+ spin_lock(&bh->b_state_lock);
-+#endif
- }
-
- static inline int jbd_trylock_bh_state(struct buffer_head *bh)
- {
-+#ifndef CONFIG_PREEMPT_RT_BASE
- return bit_spin_trylock(BH_State, &bh->b_state);
-+#else
-+ return spin_trylock(&bh->b_state_lock);
-+#endif
- }
-
- static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
- {
-+#ifndef CONFIG_PREEMPT_RT_BASE
- return bit_spin_is_locked(BH_State, &bh->b_state);
-+#else
-+ return spin_is_locked(&bh->b_state_lock);
-+#endif
- }
-
- static inline void jbd_unlock_bh_state(struct buffer_head *bh)
- {
-+#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_unlock(BH_State, &bh->b_state);
-+#else
-+ spin_unlock(&bh->b_state_lock);
-+#endif
- }
-
- static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
- {
-+#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_lock(BH_JournalHead, &bh->b_state);
-+#else
-+ spin_lock(&bh->b_journal_head_lock);
-+#endif
- }
-
- static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
- {
-+#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_unlock(BH_JournalHead, &bh->b_state);
-+#else
-+ spin_unlock(&bh->b_journal_head_lock);
-+#endif
- }
-
- #endif
-diff -Nur linux-4.1.13.orig/include/linux/kdb.h linux-4.1.13/include/linux/kdb.h
---- linux-4.1.13.orig/include/linux/kdb.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/kdb.h 2015-11-29 09:23:09.601614714 +0100
-@@ -167,6 +167,7 @@
- extern __printf(1, 2) int kdb_printf(const char *, ...);
- typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
-
-+#define in_kdb_printk() (kdb_trap_printk)
- extern void kdb_init(int level);
-
- /* Access to kdb specific polling devices */
-@@ -201,6 +202,7 @@
- extern int kdb_unregister(char *);
- #else /* ! CONFIG_KGDB_KDB */
- static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
-+#define in_kdb_printk() (0)
- static inline void kdb_init(int level) {}
- static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen) { return 0; }
-diff -Nur linux-4.1.13.orig/include/linux/kernel.h linux-4.1.13/include/linux/kernel.h
---- linux-4.1.13.orig/include/linux/kernel.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/kernel.h 2015-11-29 09:23:09.601614714 +0100
-@@ -188,6 +188,9 @@
- */
- # define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
-+
-+# define might_sleep_no_state_check() \
-+ do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
- # define sched_annotate_sleep() (current->task_state_change = 0)
- #else
- static inline void ___might_sleep(const char *file, int line,
-@@ -195,6 +198,7 @@
- static inline void __might_sleep(const char *file, int line,
- int preempt_offset) { }
- # define might_sleep() do { might_resched(); } while (0)
-+# define might_sleep_no_state_check() do { might_resched(); } while (0)
- # define sched_annotate_sleep() do { } while (0)
- #endif
-
-@@ -244,7 +248,8 @@
-
- #if defined(CONFIG_MMU) && \
- (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
--void might_fault(void);
-+#define might_fault() __might_fault(__FILE__, __LINE__)
-+void __might_fault(const char *file, int line);
- #else
- static inline void might_fault(void) { }
- #endif
-@@ -466,6 +471,7 @@
- SYSTEM_HALT,
- SYSTEM_POWER_OFF,
- SYSTEM_RESTART,
-+ SYSTEM_SUSPEND,
- } system_state;
-
- #define TAINT_PROPRIETARY_MODULE 0
-diff -Nur linux-4.1.13.orig/include/linux/kvm_host.h linux-4.1.13/include/linux/kvm_host.h
---- linux-4.1.13.orig/include/linux/kvm_host.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/kvm_host.h 2015-11-29 09:23:09.601614714 +0100
-@@ -230,7 +230,7 @@
-
- int fpu_active;
- int guest_fpu_loaded, guest_xcr0_loaded;
-- wait_queue_head_t wq;
-+ struct swait_head wq;
- struct pid *pid;
- int sigset_active;
- sigset_t sigset;
-@@ -690,7 +690,7 @@
- }
- #endif
-
--static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
-+static inline struct swait_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
- {
- #ifdef __KVM_HAVE_ARCH_WQP
- return vcpu->arch.wqp;
-diff -Nur linux-4.1.13.orig/include/linux/lglock.h linux-4.1.13/include/linux/lglock.h
---- linux-4.1.13.orig/include/linux/lglock.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/lglock.h 2015-11-29 09:23:09.601614714 +0100
-@@ -34,22 +34,39 @@
- #endif
-
- struct lglock {
-+#ifndef CONFIG_PREEMPT_RT_FULL
- arch_spinlock_t __percpu *lock;
-+#else
-+ struct rt_mutex __percpu *lock;
-+#endif
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lock_class_key lock_key;
- struct lockdep_map lock_dep_map;
- #endif
- };
-
--#define DEFINE_LGLOCK(name) \
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# define DEFINE_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
- struct lglock name = { .lock = &name ## _lock }
-
--#define DEFINE_STATIC_LGLOCK(name) \
-+# define DEFINE_STATIC_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
- static struct lglock name = { .lock = &name ## _lock }
-+#else
-+
-+# define DEFINE_LGLOCK(name) \
-+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
-+ = __RT_MUTEX_INITIALIZER( name ## _lock); \
-+ struct lglock name = { .lock = &name ## _lock }
-+
-+# define DEFINE_STATIC_LGLOCK(name) \
-+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
-+ = __RT_MUTEX_INITIALIZER( name ## _lock); \
-+ static struct lglock name = { .lock = &name ## _lock }
-+#endif
-
- void lg_lock_init(struct lglock *lg, char *name);
- void lg_local_lock(struct lglock *lg);
-@@ -59,6 +76,12 @@
- void lg_global_lock(struct lglock *lg);
- void lg_global_unlock(struct lglock *lg);
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+#define lg_global_trylock_relax(name) lg_global_lock(name)
-+#else
-+void lg_global_trylock_relax(struct lglock *lg);
-+#endif
-+
- #else
- /* When !CONFIG_SMP, map lglock to spinlock */
- #define lglock spinlock
-diff -Nur linux-4.1.13.orig/include/linux/list_bl.h linux-4.1.13/include/linux/list_bl.h
---- linux-4.1.13.orig/include/linux/list_bl.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/list_bl.h 2015-11-29 09:23:09.601614714 +0100
-@@ -2,6 +2,7 @@
- #define _LINUX_LIST_BL_H
-
- #include <linux/list.h>
-+#include <linux/spinlock.h>
- #include <linux/bit_spinlock.h>
-
- /*
-@@ -32,13 +33,22 @@
-
- struct hlist_bl_head {
- struct hlist_bl_node *first;
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ raw_spinlock_t lock;
-+#endif
- };
-
- struct hlist_bl_node {
- struct hlist_bl_node *next, **pprev;
- };
--#define INIT_HLIST_BL_HEAD(ptr) \
-- ((ptr)->first = NULL)
-+
-+static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h)
-+{
-+ h->first = NULL;
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ raw_spin_lock_init(&h->lock);
-+#endif
-+}
-
- static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
- {
-@@ -117,12 +127,26 @@
-
- static inline void hlist_bl_lock(struct hlist_bl_head *b)
- {
-+#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_lock(0, (unsigned long *)b);
-+#else
-+ raw_spin_lock(&b->lock);
-+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-+ __set_bit(0, (unsigned long *)b);
-+#endif
-+#endif
- }
-
- static inline void hlist_bl_unlock(struct hlist_bl_head *b)
- {
-+#ifndef CONFIG_PREEMPT_RT_BASE
- __bit_spin_unlock(0, (unsigned long *)b);
-+#else
-+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-+ __clear_bit(0, (unsigned long *)b);
-+#endif
-+ raw_spin_unlock(&b->lock);
-+#endif
- }
-
- static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
-diff -Nur linux-4.1.13.orig/include/linux/locallock.h linux-4.1.13/include/linux/locallock.h
---- linux-4.1.13.orig/include/linux/locallock.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/linux/locallock.h 2015-11-29 09:23:09.605614450 +0100
-@@ -0,0 +1,270 @@
-+#ifndef _LINUX_LOCALLOCK_H
-+#define _LINUX_LOCALLOCK_H
-+
-+#include <linux/percpu.h>
-+#include <linux/spinlock.h>
-+
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+# define LL_WARN(cond) WARN_ON(cond)
-+#else
-+# define LL_WARN(cond) do { } while (0)
-+#endif
-+
-+/*
-+ * per cpu lock based substitute for local_irq_*()
-+ */
-+struct local_irq_lock {
-+ spinlock_t lock;
-+ struct task_struct *owner;
-+ int nestcnt;
-+ unsigned long flags;
-+};
-+
-+#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
-+ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
-+ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
-+
-+#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
-+ DECLARE_PER_CPU(struct local_irq_lock, lvar)
-+
-+#define local_irq_lock_init(lvar) \
-+ do { \
-+ int __cpu; \
-+ for_each_possible_cpu(__cpu) \
-+ spin_lock_init(&per_cpu(lvar, __cpu).lock); \
-+ } while (0)
-+
-+/*
-+ * spin_lock|trylock|unlock_local flavour that does not migrate disable
-+ * used for __local_lock|trylock|unlock where get_local_var/put_local_var
-+ * already takes care of the migrate_disable/enable
-+ * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
-+ */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define spin_lock_local(lock) rt_spin_lock(lock)
-+# define spin_trylock_local(lock) rt_spin_trylock(lock)
-+# define spin_unlock_local(lock) rt_spin_unlock(lock)
-+#else
-+# define spin_lock_local(lock) spin_lock(lock)
-+# define spin_trylock_local(lock) spin_trylock(lock)
-+# define spin_unlock_local(lock) spin_unlock(lock)
-+#endif
-+
-+static inline void __local_lock(struct local_irq_lock *lv)
-+{
-+ if (lv->owner != current) {
-+ spin_lock_local(&lv->lock);
-+ LL_WARN(lv->owner);
-+ LL_WARN(lv->nestcnt);
-+ lv->owner = current;
-+ }
-+ lv->nestcnt++;
-+}
-+
-+#define local_lock(lvar) \
-+ do { __local_lock(&get_local_var(lvar)); } while (0)
-+
-+static inline int __local_trylock(struct local_irq_lock *lv)
-+{
-+ if (lv->owner != current && spin_trylock_local(&lv->lock)) {
-+ LL_WARN(lv->owner);
-+ LL_WARN(lv->nestcnt);
-+ lv->owner = current;
-+ lv->nestcnt = 1;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+#define local_trylock(lvar) \
-+ ({ \
-+ int __locked; \
-+ __locked = __local_trylock(&get_local_var(lvar)); \
-+ if (!__locked) \
-+ put_local_var(lvar); \
-+ __locked; \
-+ })
-+
-+static inline void __local_unlock(struct local_irq_lock *lv)
-+{
-+ LL_WARN(lv->nestcnt == 0);
-+ LL_WARN(lv->owner != current);
-+ if (--lv->nestcnt)
-+ return;
-+
-+ lv->owner = NULL;
-+ spin_unlock_local(&lv->lock);
-+}
-+
-+#define local_unlock(lvar) \
-+ do { \
-+ __local_unlock(this_cpu_ptr(&lvar)); \
-+ put_local_var(lvar); \
-+ } while (0)
-+
-+static inline void __local_lock_irq(struct local_irq_lock *lv)
-+{
-+ spin_lock_irqsave(&lv->lock, lv->flags);
-+ LL_WARN(lv->owner);
-+ LL_WARN(lv->nestcnt);
-+ lv->owner = current;
-+ lv->nestcnt = 1;
-+}
-+
-+#define local_lock_irq(lvar) \
-+ do { __local_lock_irq(&get_local_var(lvar)); } while (0)
-+
-+#define local_lock_irq_on(lvar, cpu) \
-+ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
-+
-+static inline void __local_unlock_irq(struct local_irq_lock *lv)
-+{
-+ LL_WARN(!lv->nestcnt);
-+ LL_WARN(lv->owner != current);
-+ lv->owner = NULL;
-+ lv->nestcnt = 0;
-+ spin_unlock_irq(&lv->lock);
-+}
-+
-+#define local_unlock_irq(lvar) \
-+ do { \
-+ __local_unlock_irq(this_cpu_ptr(&lvar)); \
-+ put_local_var(lvar); \
-+ } while (0)
-+
-+#define local_unlock_irq_on(lvar, cpu) \
-+ do { \
-+ __local_unlock_irq(&per_cpu(lvar, cpu)); \
-+ } while (0)
-+
-+static inline int __local_lock_irqsave(struct local_irq_lock *lv)
-+{
-+ if (lv->owner != current) {
-+ __local_lock_irq(lv);
-+ return 0;
-+ } else {
-+ lv->nestcnt++;
-+ return 1;
-+ }
-+}
-+
-+#define local_lock_irqsave(lvar, _flags) \
-+ do { \
-+ if (__local_lock_irqsave(&get_local_var(lvar))) \
-+ put_local_var(lvar); \
-+ _flags = __this_cpu_read(lvar.flags); \
-+ } while (0)
-+
-+#define local_lock_irqsave_on(lvar, _flags, cpu) \
-+ do { \
-+ __local_lock_irqsave(&per_cpu(lvar, cpu)); \
-+ _flags = per_cpu(lvar, cpu).flags; \
-+ } while (0)
-+
-+static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
-+ unsigned long flags)
-+{
-+ LL_WARN(!lv->nestcnt);
-+ LL_WARN(lv->owner != current);
-+ if (--lv->nestcnt)
-+ return 0;
-+
-+ lv->owner = NULL;
-+ spin_unlock_irqrestore(&lv->lock, lv->flags);
-+ return 1;
-+}
-+
-+#define local_unlock_irqrestore(lvar, flags) \
-+ do { \
-+ if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \
-+ put_local_var(lvar); \
-+ } while (0)
-+
-+#define local_unlock_irqrestore_on(lvar, flags, cpu) \
-+ do { \
-+ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
-+ } while (0)
-+
-+#define local_spin_trylock_irq(lvar, lock) \
-+ ({ \
-+ int __locked; \
-+ local_lock_irq(lvar); \
-+ __locked = spin_trylock(lock); \
-+ if (!__locked) \
-+ local_unlock_irq(lvar); \
-+ __locked; \
-+ })
-+
-+#define local_spin_lock_irq(lvar, lock) \
-+ do { \
-+ local_lock_irq(lvar); \
-+ spin_lock(lock); \
-+ } while (0)
-+
-+#define local_spin_unlock_irq(lvar, lock) \
-+ do { \
-+ spin_unlock(lock); \
-+ local_unlock_irq(lvar); \
-+ } while (0)
-+
-+#define local_spin_lock_irqsave(lvar, lock, flags) \
-+ do { \
-+ local_lock_irqsave(lvar, flags); \
-+ spin_lock(lock); \
-+ } while (0)
-+
-+#define local_spin_unlock_irqrestore(lvar, lock, flags) \
-+ do { \
-+ spin_unlock(lock); \
-+ local_unlock_irqrestore(lvar, flags); \
-+ } while (0)
-+
-+#define get_locked_var(lvar, var) \
-+ (*({ \
-+ local_lock(lvar); \
-+ this_cpu_ptr(&var); \
-+ }))
-+
-+#define put_locked_var(lvar, var) local_unlock(lvar);
-+
-+#define local_lock_cpu(lvar) \
-+ ({ \
-+ local_lock(lvar); \
-+ smp_processor_id(); \
-+ })
-+
-+#define local_unlock_cpu(lvar) local_unlock(lvar)
-+
-+#else /* PREEMPT_RT_BASE */
-+
-+#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
-+#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
-+
-+static inline void local_irq_lock_init(int lvar) { }
-+
-+#define local_lock(lvar) preempt_disable()
-+#define local_unlock(lvar) preempt_enable()
-+#define local_lock_irq(lvar) local_irq_disable()
-+#define local_unlock_irq(lvar) local_irq_enable()
-+#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
-+#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
-+
-+#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
-+#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
-+#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
-+#define local_spin_lock_irqsave(lvar, lock, flags) \
-+ spin_lock_irqsave(lock, flags)
-+#define local_spin_unlock_irqrestore(lvar, lock, flags) \
-+ spin_unlock_irqrestore(lock, flags)
-+
-+#define get_locked_var(lvar, var) get_cpu_var(var)
-+#define put_locked_var(lvar, var) put_cpu_var(var)
-+
-+#define local_lock_cpu(lvar) get_cpu()
-+#define local_unlock_cpu(lvar) put_cpu()
-+
-+#endif
-+
-+#endif
-diff -Nur linux-4.1.13.orig/include/linux/mm_types.h linux-4.1.13/include/linux/mm_types.h
---- linux-4.1.13.orig/include/linux/mm_types.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/mm_types.h 2015-11-29 09:23:09.605614450 +0100
-@@ -11,6 +11,7 @@
- #include <linux/completion.h>
- #include <linux/cpumask.h>
- #include <linux/uprobes.h>
-+#include <linux/rcupdate.h>
- #include <linux/page-flags-layout.h>
- #include <asm/page.h>
- #include <asm/mmu.h>
-@@ -453,6 +454,9 @@
- bool tlb_flush_pending;
- #endif
- struct uprobes_state uprobes_state;
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct rcu_head delayed_drop;
-+#endif
- #ifdef CONFIG_X86_INTEL_MPX
- /* address of the bounds directory */
- void __user *bd_addr;
-diff -Nur linux-4.1.13.orig/include/linux/mutex.h linux-4.1.13/include/linux/mutex.h
---- linux-4.1.13.orig/include/linux/mutex.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/mutex.h 2015-11-29 09:23:09.609614185 +0100
-@@ -19,6 +19,17 @@
- #include <asm/processor.h>
- #include <linux/osq_lock.h>
-
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
-+ , .dep_map = { .name = #lockname }
-+#else
-+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# include <linux/mutex_rt.h>
-+#else
-+
- /*
- * Simple, straightforward mutexes with strict semantics:
- *
-@@ -99,13 +110,6 @@
- static inline void mutex_destroy(struct mutex *lock) {}
- #endif
-
--#ifdef CONFIG_DEBUG_LOCK_ALLOC
--# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
-- , .dep_map = { .name = #lockname }
--#else
--# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
--#endif
--
- #define __MUTEX_INITIALIZER(lockname) \
- { .count = ATOMIC_INIT(1) \
- , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
-@@ -173,6 +177,8 @@
- extern int mutex_trylock(struct mutex *lock);
- extern void mutex_unlock(struct mutex *lock);
-
-+#endif /* !PREEMPT_RT_FULL */
-+
- extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
-
- #endif /* __LINUX_MUTEX_H */
-diff -Nur linux-4.1.13.orig/include/linux/mutex_rt.h linux-4.1.13/include/linux/mutex_rt.h
---- linux-4.1.13.orig/include/linux/mutex_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/linux/mutex_rt.h 2015-11-29 09:23:09.609614185 +0100
-@@ -0,0 +1,84 @@
-+#ifndef __LINUX_MUTEX_RT_H
-+#define __LINUX_MUTEX_RT_H
-+
-+#ifndef __LINUX_MUTEX_H
-+#error "Please include mutex.h"
-+#endif
-+
-+#include <linux/rtmutex.h>
-+
-+/* FIXME: Just for __lockfunc */
-+#include <linux/spinlock.h>
-+
-+struct mutex {
-+ struct rt_mutex lock;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ struct lockdep_map dep_map;
-+#endif
-+};
-+
-+#define __MUTEX_INITIALIZER(mutexname) \
-+ { \
-+ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
-+ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
-+ }
-+
-+#define DEFINE_MUTEX(mutexname) \
-+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
-+
-+extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
-+extern void __lockfunc _mutex_lock(struct mutex *lock);
-+extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
-+extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
-+extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
-+extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
-+extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
-+extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
-+extern int __lockfunc _mutex_trylock(struct mutex *lock);
-+extern void __lockfunc _mutex_unlock(struct mutex *lock);
-+
-+#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
-+#define mutex_lock(l) _mutex_lock(l)
-+#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
-+#define mutex_lock_killable(l) _mutex_lock_killable(l)
-+#define mutex_trylock(l) _mutex_trylock(l)
-+#define mutex_unlock(l) _mutex_unlock(l)
-+#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
-+# define mutex_lock_interruptible_nested(l, s) \
-+ _mutex_lock_interruptible_nested(l, s)
-+# define mutex_lock_killable_nested(l, s) \
-+ _mutex_lock_killable_nested(l, s)
-+
-+# define mutex_lock_nest_lock(lock, nest_lock) \
-+do { \
-+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
-+ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
-+} while (0)
-+
-+#else
-+# define mutex_lock_nested(l, s) _mutex_lock(l)
-+# define mutex_lock_interruptible_nested(l, s) \
-+ _mutex_lock_interruptible(l)
-+# define mutex_lock_killable_nested(l, s) \
-+ _mutex_lock_killable(l)
-+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-+#endif
-+
-+# define mutex_init(mutex) \
-+do { \
-+ static struct lock_class_key __key; \
-+ \
-+ rt_mutex_init(&(mutex)->lock); \
-+ __mutex_do_init((mutex), #mutex, &__key); \
-+} while (0)
-+
-+# define __mutex_init(mutex, name, key) \
-+do { \
-+ rt_mutex_init(&(mutex)->lock); \
-+ __mutex_do_init((mutex), name, key); \
-+} while (0)
-+
-+#endif
-diff -Nur linux-4.1.13.orig/include/linux/netdevice.h linux-4.1.13/include/linux/netdevice.h
---- linux-4.1.13.orig/include/linux/netdevice.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/netdevice.h 2015-11-29 09:23:09.609614185 +0100
-@@ -2469,6 +2469,7 @@
- unsigned int dropped;
- struct sk_buff_head input_pkt_queue;
- struct napi_struct backlog;
-+ struct sk_buff_head tofree_queue;
-
- };
-
-diff -Nur linux-4.1.13.orig/include/linux/netfilter/x_tables.h linux-4.1.13/include/linux/netfilter/x_tables.h
---- linux-4.1.13.orig/include/linux/netfilter/x_tables.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/netfilter/x_tables.h 2015-11-29 09:23:09.609614185 +0100
-@@ -3,6 +3,7 @@
-
-
- #include <linux/netdevice.h>
-+#include <linux/locallock.h>
- #include <uapi/linux/netfilter/x_tables.h>
-
- /**
-@@ -282,6 +283,8 @@
- */
- DECLARE_PER_CPU(seqcount_t, xt_recseq);
-
-+DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
-+
- /**
- * xt_write_recseq_begin - start of a write section
- *
-@@ -296,6 +299,9 @@
- {
- unsigned int addend;
-
-+ /* RT protection */
-+ local_lock(xt_write_lock);
-+
- /*
- * Low order bit of sequence is set if we already
- * called xt_write_recseq_begin().
-@@ -326,6 +332,7 @@
- /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
- smp_wmb();
- __this_cpu_add(xt_recseq.sequence, addend);
-+ local_unlock(xt_write_lock);
- }
-
- /*
-diff -Nur linux-4.1.13.orig/include/linux/notifier.h linux-4.1.13/include/linux/notifier.h
---- linux-4.1.13.orig/include/linux/notifier.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/notifier.h 2015-11-29 09:23:09.609614185 +0100
-@@ -6,7 +6,7 @@
- *
- * Alan Cox <Alan.Cox@linux.org>
- */
--
-+
- #ifndef _LINUX_NOTIFIER_H
- #define _LINUX_NOTIFIER_H
- #include <linux/errno.h>
-@@ -42,9 +42,7 @@
- * in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
- * As compensation, srcu_notifier_chain_unregister() is rather expensive.
- * SRCU notifier chains should be used when the chain will be called very
-- * often but notifier_blocks will seldom be removed. Also, SRCU notifier
-- * chains are slightly more difficult to use because they require special
-- * runtime initialization.
-+ * often but notifier_blocks will seldom be removed.
- */
-
- typedef int (*notifier_fn_t)(struct notifier_block *nb,
-@@ -88,7 +86,7 @@
- (name)->head = NULL; \
- } while (0)
-
--/* srcu_notifier_heads must be initialized and cleaned up dynamically */
-+/* srcu_notifier_heads must be cleaned up dynamically */
- extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
- #define srcu_cleanup_notifier_head(name) \
- cleanup_srcu_struct(&(name)->srcu);
-@@ -101,7 +99,13 @@
- .head = NULL }
- #define RAW_NOTIFIER_INIT(name) { \
- .head = NULL }
--/* srcu_notifier_heads cannot be initialized statically */
-+
-+#define SRCU_NOTIFIER_INIT(name, pcpu) \
-+ { \
-+ .mutex = __MUTEX_INITIALIZER(name.mutex), \
-+ .head = NULL, \
-+ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
-+ }
-
- #define ATOMIC_NOTIFIER_HEAD(name) \
- struct atomic_notifier_head name = \
-@@ -113,6 +117,18 @@
- struct raw_notifier_head name = \
- RAW_NOTIFIER_INIT(name)
-
-+#define _SRCU_NOTIFIER_HEAD(name, mod) \
-+ static DEFINE_PER_CPU(struct srcu_struct_array, \
-+ name##_head_srcu_array); \
-+ mod struct srcu_notifier_head name = \
-+ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
-+
-+#define SRCU_NOTIFIER_HEAD(name) \
-+ _SRCU_NOTIFIER_HEAD(name, )
-+
-+#define SRCU_NOTIFIER_HEAD_STATIC(name) \
-+ _SRCU_NOTIFIER_HEAD(name, static)
-+
- #ifdef __KERNEL__
-
- extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
-@@ -182,12 +198,12 @@
-
- /*
- * Declared notifiers so far. I can imagine quite a few more chains
-- * over time (eg laptop power reset chains, reboot chain (to clean
-+ * over time (eg laptop power reset chains, reboot chain (to clean
- * device units up), device [un]mount chain, module load/unload chain,
-- * low memory chain, screenblank chain (for plug in modular screenblankers)
-+ * low memory chain, screenblank chain (for plug in modular screenblankers)
- * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
- */
--
-+
- /* CPU notfiers are defined in include/linux/cpu.h. */
-
- /* netdevice notifiers are defined in include/linux/netdevice.h */
-diff -Nur linux-4.1.13.orig/include/linux/percpu.h linux-4.1.13/include/linux/percpu.h
---- linux-4.1.13.orig/include/linux/percpu.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/percpu.h 2015-11-29 09:23:09.609614185 +0100
-@@ -24,6 +24,35 @@
- PERCPU_MODULE_RESERVE)
- #endif
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+
-+#define get_local_var(var) (*({ \
-+ migrate_disable(); \
-+ this_cpu_ptr(&var); }))
-+
-+#define put_local_var(var) do { \
-+ (void)&(var); \
-+ migrate_enable(); \
-+} while (0)
-+
-+# define get_local_ptr(var) ({ \
-+ migrate_disable(); \
-+ this_cpu_ptr(var); })
-+
-+# define put_local_ptr(var) do { \
-+ (void)(var); \
-+ migrate_enable(); \
-+} while (0)
-+
-+#else
-+
-+#define get_local_var(var) get_cpu_var(var)
-+#define put_local_var(var) put_cpu_var(var)
-+#define get_local_ptr(var) get_cpu_ptr(var)
-+#define put_local_ptr(var) put_cpu_ptr(var)
-+
-+#endif
-+
- /* minimum unit size, also is the maximum supported allocation size */
- #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
-
-diff -Nur linux-4.1.13.orig/include/linux/pid.h linux-4.1.13/include/linux/pid.h
---- linux-4.1.13.orig/include/linux/pid.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/pid.h 2015-11-29 09:23:09.609614185 +0100
-@@ -2,6 +2,7 @@
- #define _LINUX_PID_H
-
- #include <linux/rcupdate.h>
-+#include <linux/atomic.h>
-
- enum pid_type
- {
-diff -Nur linux-4.1.13.orig/include/linux/preempt.h linux-4.1.13/include/linux/preempt.h
---- linux-4.1.13.orig/include/linux/preempt.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/preempt.h 2015-11-29 09:23:09.609614185 +0100
-@@ -34,6 +34,20 @@
- #define preempt_count_inc() preempt_count_add(1)
- #define preempt_count_dec() preempt_count_sub(1)
-
-+#ifdef CONFIG_PREEMPT_LAZY
-+#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
-+#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
-+#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
-+#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
-+#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
-+#else
-+#define add_preempt_lazy_count(val) do { } while (0)
-+#define sub_preempt_lazy_count(val) do { } while (0)
-+#define inc_preempt_lazy_count() do { } while (0)
-+#define dec_preempt_lazy_count() do { } while (0)
-+#define preempt_lazy_count() (0)
-+#endif
-+
- #ifdef CONFIG_PREEMPT_COUNT
-
- #define preempt_disable() \
-@@ -42,13 +56,25 @@
- barrier(); \
- } while (0)
-
-+#define preempt_lazy_disable() \
-+do { \
-+ inc_preempt_lazy_count(); \
-+ barrier(); \
-+} while (0)
-+
- #define sched_preempt_enable_no_resched() \
- do { \
- barrier(); \
- preempt_count_dec(); \
- } while (0)
-
--#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
-+# define preempt_check_resched_rt() preempt_check_resched()
-+#else
-+# define preempt_enable_no_resched() preempt_enable()
-+# define preempt_check_resched_rt() barrier();
-+#endif
-
- #ifdef CONFIG_PREEMPT
- #define preempt_enable() \
-@@ -64,6 +90,13 @@
- __preempt_schedule(); \
- } while (0)
-
-+#define preempt_lazy_enable() \
-+do { \
-+ dec_preempt_lazy_count(); \
-+ barrier(); \
-+ preempt_check_resched(); \
-+} while (0)
-+
- #else
- #define preempt_enable() \
- do { \
-@@ -122,6 +155,7 @@
- #define preempt_disable_notrace() barrier()
- #define preempt_enable_no_resched_notrace() barrier()
- #define preempt_enable_notrace() barrier()
-+#define preempt_check_resched_rt() barrier()
-
- #endif /* CONFIG_PREEMPT_COUNT */
-
-@@ -141,10 +175,31 @@
- } while (0)
- #define preempt_fold_need_resched() \
- do { \
-- if (tif_need_resched()) \
-+ if (tif_need_resched_now()) \
- set_preempt_need_resched(); \
- } while (0)
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define preempt_disable_rt() preempt_disable()
-+# define preempt_enable_rt() preempt_enable()
-+# define preempt_disable_nort() barrier()
-+# define preempt_enable_nort() barrier()
-+# ifdef CONFIG_SMP
-+ extern void migrate_disable(void);
-+ extern void migrate_enable(void);
-+# else /* CONFIG_SMP */
-+# define migrate_disable() barrier()
-+# define migrate_enable() barrier()
-+# endif /* CONFIG_SMP */
-+#else
-+# define preempt_disable_rt() barrier()
-+# define preempt_enable_rt() barrier()
-+# define preempt_disable_nort() preempt_disable()
-+# define preempt_enable_nort() preempt_enable()
-+# define migrate_disable() preempt_disable()
-+# define migrate_enable() preempt_enable()
-+#endif
-+
- #ifdef CONFIG_PREEMPT_NOTIFIERS
-
- struct preempt_notifier;
-diff -Nur linux-4.1.13.orig/include/linux/preempt_mask.h linux-4.1.13/include/linux/preempt_mask.h
---- linux-4.1.13.orig/include/linux/preempt_mask.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/preempt_mask.h 2015-11-29 09:23:09.609614185 +0100
-@@ -44,16 +44,26 @@
- #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
- #define NMI_OFFSET (1UL << NMI_SHIFT)
-
--#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-+#else
-+# define SOFTIRQ_DISABLE_OFFSET (0)
-+#endif
-
- #define PREEMPT_ACTIVE_BITS 1
- #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
- #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
-
- #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
--#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
- #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-+#else
-+# define softirq_count() (0UL)
-+extern int in_serving_softirq(void);
-+#endif
-
- /*
- * Are we doing bottom half or hardware interrupt processing?
-@@ -64,7 +74,6 @@
- #define in_irq() (hardirq_count())
- #define in_softirq() (softirq_count())
- #define in_interrupt() (irq_count())
--#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-
- /*
- * Are we in NMI context?
-@@ -83,7 +92,11 @@
- /*
- * The preempt_count offset after spin_lock()
- */
-+#if !defined(CONFIG_PREEMPT_RT_FULL)
- #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
-+#else
-+#define PREEMPT_LOCK_OFFSET 0
-+#endif
-
- /*
- * The preempt_count offset needed for things like:
-diff -Nur linux-4.1.13.orig/include/linux/printk.h linux-4.1.13/include/linux/printk.h
---- linux-4.1.13.orig/include/linux/printk.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/printk.h 2015-11-29 09:23:09.609614185 +0100
-@@ -115,9 +115,11 @@
- #ifdef CONFIG_EARLY_PRINTK
- extern asmlinkage __printf(1, 2)
- void early_printk(const char *fmt, ...);
-+extern void printk_kill(void);
- #else
- static inline __printf(1, 2) __cold
- void early_printk(const char *s, ...) { }
-+static inline void printk_kill(void) { }
- #endif
-
- typedef int(*printk_func_t)(const char *fmt, va_list args);
-diff -Nur linux-4.1.13.orig/include/linux/radix-tree.h linux-4.1.13/include/linux/radix-tree.h
---- linux-4.1.13.orig/include/linux/radix-tree.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/radix-tree.h 2015-11-29 09:23:09.609614185 +0100
-@@ -277,8 +277,13 @@
- unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
- void ***results, unsigned long *indices,
- unsigned long first_index, unsigned int max_items);
-+#ifndef CONFIG_PREEMPT_RT_FULL
- int radix_tree_preload(gfp_t gfp_mask);
- int radix_tree_maybe_preload(gfp_t gfp_mask);
-+#else
-+static inline int radix_tree_preload(gfp_t gm) { return 0; }
-+static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; }
-+#endif
- void radix_tree_init(void);
- void *radix_tree_tag_set(struct radix_tree_root *root,
- unsigned long index, unsigned int tag);
-@@ -303,7 +308,7 @@
-
- static inline void radix_tree_preload_end(void)
- {
-- preempt_enable();
-+ preempt_enable_nort();
- }
-
- /**
-diff -Nur linux-4.1.13.orig/include/linux/random.h linux-4.1.13/include/linux/random.h
---- linux-4.1.13.orig/include/linux/random.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/random.h 2015-11-29 09:23:09.609614185 +0100
-@@ -11,7 +11,7 @@
- extern void add_device_randomness(const void *, unsigned int);
- extern void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value);
--extern void add_interrupt_randomness(int irq, int irq_flags);
-+extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
-
- extern void get_random_bytes(void *buf, int nbytes);
- extern void get_random_bytes_arch(void *buf, int nbytes);
-diff -Nur linux-4.1.13.orig/include/linux/rcupdate.h linux-4.1.13/include/linux/rcupdate.h
---- linux-4.1.13.orig/include/linux/rcupdate.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/rcupdate.h 2015-11-29 09:23:09.609614185 +0100
-@@ -167,6 +167,9 @@
-
- #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+#define call_rcu_bh call_rcu
-+#else
- /**
- * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
- * @head: structure to be used for queueing the RCU updates.
-@@ -190,6 +193,7 @@
- */
- void call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-+#endif
-
- /**
- * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
-@@ -260,6 +264,11 @@
- * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
- */
- #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+#define sched_rcu_preempt_depth() rcu_preempt_depth()
-+#else
-+static inline int sched_rcu_preempt_depth(void) { return 0; }
-+#endif
-
- #else /* #ifdef CONFIG_PREEMPT_RCU */
-
-@@ -283,6 +292,8 @@
- return 0;
- }
-
-+#define sched_rcu_preempt_depth() rcu_preempt_depth()
-+
- #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
- /* Internal to kernel */
-@@ -463,7 +474,14 @@
- int debug_lockdep_rcu_enabled(void);
-
- int rcu_read_lock_held(void);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static inline int rcu_read_lock_bh_held(void)
-+{
-+ return rcu_read_lock_held();
-+}
-+#else
- int rcu_read_lock_bh_held(void);
-+#endif
-
- /**
- * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
-@@ -990,10 +1008,14 @@
- static inline void rcu_read_lock_bh(void)
- {
- local_bh_disable();
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ rcu_read_lock();
-+#else
- __acquire(RCU_BH);
- rcu_lock_acquire(&rcu_bh_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock_bh() used illegally while idle");
-+#endif
- }
-
- /*
-@@ -1003,10 +1025,14 @@
- */
- static inline void rcu_read_unlock_bh(void)
- {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ rcu_read_unlock();
-+#else
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock_bh() used illegally while idle");
- rcu_lock_release(&rcu_bh_lock_map);
- __release(RCU_BH);
-+#endif
- local_bh_enable();
- }
-
-diff -Nur linux-4.1.13.orig/include/linux/rcutree.h linux-4.1.13/include/linux/rcutree.h
---- linux-4.1.13.orig/include/linux/rcutree.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/rcutree.h 2015-11-29 09:23:09.609614185 +0100
-@@ -46,7 +46,11 @@
- rcu_note_context_switch();
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define synchronize_rcu_bh synchronize_rcu
-+#else
- void synchronize_rcu_bh(void);
-+#endif
- void synchronize_sched_expedited(void);
- void synchronize_rcu_expedited(void);
-
-@@ -74,7 +78,11 @@
- }
-
- void rcu_barrier(void);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define rcu_barrier_bh rcu_barrier
-+#else
- void rcu_barrier_bh(void);
-+#endif
- void rcu_barrier_sched(void);
- unsigned long get_state_synchronize_rcu(void);
- void cond_synchronize_rcu(unsigned long oldstate);
-@@ -85,12 +93,10 @@
- unsigned long rcu_batches_started_bh(void);
- unsigned long rcu_batches_started_sched(void);
- unsigned long rcu_batches_completed(void);
--unsigned long rcu_batches_completed_bh(void);
- unsigned long rcu_batches_completed_sched(void);
- void show_rcu_gp_kthreads(void);
-
- void rcu_force_quiescent_state(void);
--void rcu_bh_force_quiescent_state(void);
- void rcu_sched_force_quiescent_state(void);
-
- void exit_rcu(void);
-@@ -100,6 +106,14 @@
-
- bool rcu_is_watching(void);
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+void rcu_bh_force_quiescent_state(void);
-+unsigned long rcu_batches_completed_bh(void);
-+#else
-+# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
-+# define rcu_batches_completed_bh rcu_batches_completed
-+#endif
-+
- void rcu_all_qs(void);
-
- #endif /* __LINUX_RCUTREE_H */
-diff -Nur linux-4.1.13.orig/include/linux/rtmutex.h linux-4.1.13/include/linux/rtmutex.h
---- linux-4.1.13.orig/include/linux/rtmutex.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/rtmutex.h 2015-11-29 09:23:09.609614185 +0100
-@@ -14,10 +14,14 @@
-
- #include <linux/linkage.h>
- #include <linux/rbtree.h>
--#include <linux/spinlock_types.h>
-+#include <linux/spinlock_types_raw.h>
-
- extern int max_lock_depth; /* for sysctl */
-
-+#ifdef CONFIG_DEBUG_MUTEXES
-+#include <linux/debug_locks.h>
-+#endif
-+
- /**
- * The rt_mutex structure
- *
-@@ -31,8 +35,8 @@
- struct rb_root waiters;
- struct rb_node *waiters_leftmost;
- struct task_struct *owner;
--#ifdef CONFIG_DEBUG_RT_MUTEXES
- int save_state;
-+#ifdef CONFIG_DEBUG_RT_MUTEXES
- const char *name, *file;
- int line;
- void *magic;
-@@ -55,22 +59,33 @@
- # define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
- #endif
-
-+# define rt_mutex_init(mutex) \
-+ do { \
-+ raw_spin_lock_init(&(mutex)->wait_lock); \
-+ __rt_mutex_init(mutex, #mutex); \
-+ } while (0)
-+
- #ifdef CONFIG_DEBUG_RT_MUTEXES
- # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
- , .name = #mutexname, .file = __FILE__, .line = __LINE__
--# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
- extern void rt_mutex_debug_task_free(struct task_struct *tsk);
- #else
- # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
--# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
- # define rt_mutex_debug_task_free(t) do { } while (0)
- #endif
-
--#define __RT_MUTEX_INITIALIZER(mutexname) \
-- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
-+#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
-+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .waiters = RB_ROOT \
- , .owner = NULL \
-- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
-+ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-+
-+#define __RT_MUTEX_INITIALIZER(mutexname) \
-+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
-+
-+#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
-+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
-+ , .save_state = 1 }
-
- #define DEFINE_RT_MUTEX(mutexname) \
- struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
-@@ -91,6 +106,7 @@
-
- extern void rt_mutex_lock(struct rt_mutex *lock);
- extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
-+extern int rt_mutex_lock_killable(struct rt_mutex *lock);
- extern int rt_mutex_timed_lock(struct rt_mutex *lock,
- struct hrtimer_sleeper *timeout);
-
-diff -Nur linux-4.1.13.orig/include/linux/rwlock_rt.h linux-4.1.13/include/linux/rwlock_rt.h
---- linux-4.1.13.orig/include/linux/rwlock_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/linux/rwlock_rt.h 2015-11-29 09:23:09.609614185 +0100
-@@ -0,0 +1,99 @@
-+#ifndef __LINUX_RWLOCK_RT_H
-+#define __LINUX_RWLOCK_RT_H
-+
-+#ifndef __LINUX_SPINLOCK_H
-+#error Do not include directly. Use spinlock.h
-+#endif
-+
-+#define rwlock_init(rwl) \
-+do { \
-+ static struct lock_class_key __key; \
-+ \
-+ rt_mutex_init(&(rwl)->lock); \
-+ __rt_rwlock_init(rwl, #rwl, &__key); \
-+} while (0)
-+
-+extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
-+extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
-+extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
-+extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
-+extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
-+extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
-+extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
-+extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
-+extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
-+extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
-+
-+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
-+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
-+
-+#define write_trylock_irqsave(lock, flags) \
-+ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
-+
-+#define read_lock_irqsave(lock, flags) \
-+ do { \
-+ typecheck(unsigned long, flags); \
-+ flags = rt_read_lock_irqsave(lock); \
-+ } while (0)
-+
-+#define write_lock_irqsave(lock, flags) \
-+ do { \
-+ typecheck(unsigned long, flags); \
-+ flags = rt_write_lock_irqsave(lock); \
-+ } while (0)
-+
-+#define read_lock(lock) rt_read_lock(lock)
-+
-+#define read_lock_bh(lock) \
-+ do { \
-+ local_bh_disable(); \
-+ rt_read_lock(lock); \
-+ } while (0)
-+
-+#define read_lock_irq(lock) read_lock(lock)
-+
-+#define write_lock(lock) rt_write_lock(lock)
-+
-+#define write_lock_bh(lock) \
-+ do { \
-+ local_bh_disable(); \
-+ rt_write_lock(lock); \
-+ } while (0)
-+
-+#define write_lock_irq(lock) write_lock(lock)
-+
-+#define read_unlock(lock) rt_read_unlock(lock)
-+
-+#define read_unlock_bh(lock) \
-+ do { \
-+ rt_read_unlock(lock); \
-+ local_bh_enable(); \
-+ } while (0)
-+
-+#define read_unlock_irq(lock) read_unlock(lock)
-+
-+#define write_unlock(lock) rt_write_unlock(lock)
-+
-+#define write_unlock_bh(lock) \
-+ do { \
-+ rt_write_unlock(lock); \
-+ local_bh_enable(); \
-+ } while (0)
-+
-+#define write_unlock_irq(lock) write_unlock(lock)
-+
-+#define read_unlock_irqrestore(lock, flags) \
-+ do { \
-+ typecheck(unsigned long, flags); \
-+ (void) flags; \
-+ rt_read_unlock(lock); \
-+ } while (0)
-+
-+#define write_unlock_irqrestore(lock, flags) \
-+ do { \
-+ typecheck(unsigned long, flags); \
-+ (void) flags; \
-+ rt_write_unlock(lock); \
-+ } while (0)
-+
-+#endif
-diff -Nur linux-4.1.13.orig/include/linux/rwlock_types.h linux-4.1.13/include/linux/rwlock_types.h
---- linux-4.1.13.orig/include/linux/rwlock_types.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/rwlock_types.h 2015-11-29 09:23:09.609614185 +0100
-@@ -1,6 +1,10 @@
- #ifndef __LINUX_RWLOCK_TYPES_H
- #define __LINUX_RWLOCK_TYPES_H
-
-+#if !defined(__LINUX_SPINLOCK_TYPES_H)
-+# error "Do not include directly, include spinlock_types.h"
-+#endif
-+
- /*
- * include/linux/rwlock_types.h - generic rwlock type definitions
- * and initializers
-@@ -43,6 +47,7 @@
- RW_DEP_MAP_INIT(lockname) }
- #endif
-
--#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
-+#define DEFINE_RWLOCK(name) \
-+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
-
- #endif /* __LINUX_RWLOCK_TYPES_H */
-diff -Nur linux-4.1.13.orig/include/linux/rwlock_types_rt.h linux-4.1.13/include/linux/rwlock_types_rt.h
---- linux-4.1.13.orig/include/linux/rwlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/linux/rwlock_types_rt.h 2015-11-29 09:23:09.609614185 +0100
-@@ -0,0 +1,33 @@
-+#ifndef __LINUX_RWLOCK_TYPES_RT_H
-+#define __LINUX_RWLOCK_TYPES_RT_H
-+
-+#ifndef __LINUX_SPINLOCK_TYPES_H
-+#error "Do not include directly. Include spinlock_types.h instead"
-+#endif
-+
-+/*
-+ * rwlocks - rtmutex which allows single reader recursion
-+ */
-+typedef struct {
-+ struct rt_mutex lock;
-+ int read_depth;
-+ unsigned int break_lock;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ struct lockdep_map dep_map;
-+#endif
-+} rwlock_t;
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-+#else
-+# define RW_DEP_MAP_INIT(lockname)
-+#endif
-+
-+#define __RW_LOCK_UNLOCKED(name) \
-+ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
-+ RW_DEP_MAP_INIT(name) }
-+
-+#define DEFINE_RWLOCK(name) \
-+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
-+
-+#endif
-diff -Nur linux-4.1.13.orig/include/linux/rwsem.h linux-4.1.13/include/linux/rwsem.h
---- linux-4.1.13.orig/include/linux/rwsem.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/rwsem.h 2015-11-29 09:23:09.609614185 +0100
-@@ -18,6 +18,10 @@
- #include <linux/osq_lock.h>
- #endif
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+#include <linux/rwsem_rt.h>
-+#else /* PREEMPT_RT_FULL */
-+
- struct rw_semaphore;
-
- #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-@@ -177,4 +181,6 @@
- # define up_read_non_owner(sem) up_read(sem)
- #endif
-
-+#endif /* !PREEMPT_RT_FULL */
-+
- #endif /* _LINUX_RWSEM_H */
-diff -Nur linux-4.1.13.orig/include/linux/rwsem_rt.h linux-4.1.13/include/linux/rwsem_rt.h
---- linux-4.1.13.orig/include/linux/rwsem_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/linux/rwsem_rt.h 2015-11-29 09:23:09.609614185 +0100
-@@ -0,0 +1,140 @@
-+#ifndef _LINUX_RWSEM_RT_H
-+#define _LINUX_RWSEM_RT_H
-+
-+#ifndef _LINUX_RWSEM_H
-+#error "Include rwsem.h"
-+#endif
-+
-+/*
-+ * RW-semaphores are a spinlock plus a reader-depth count.
-+ *
-+ * Note that the semantics are different from the usual
-+ * Linux rw-sems, in PREEMPT_RT mode we do not allow
-+ * multiple readers to hold the lock at once, we only allow
-+ * a read-lock owner to read-lock recursively. This is
-+ * better for latency, makes the implementation inherently
-+ * fair and makes it simpler as well.
-+ */
-+
-+#include <linux/rtmutex.h>
-+
-+struct rw_semaphore {
-+ struct rt_mutex lock;
-+ int read_depth;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ struct lockdep_map dep_map;
-+#endif
-+};
-+
-+#define __RWSEM_INITIALIZER(name) \
-+ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
-+ RW_DEP_MAP_INIT(name) }
-+
-+#define DECLARE_RWSEM(lockname) \
-+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
-+
-+extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
-+ struct lock_class_key *key);
-+
-+#define __rt_init_rwsem(sem, name, key) \
-+ do { \
-+ rt_mutex_init(&(sem)->lock); \
-+ __rt_rwsem_init((sem), (name), (key));\
-+ } while (0)
-+
-+#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
-+
-+# define rt_init_rwsem(sem) \
-+do { \
-+ static struct lock_class_key __key; \
-+ \
-+ __rt_init_rwsem((sem), #sem, &__key); \
-+} while (0)
-+
-+extern void rt_down_write(struct rw_semaphore *rwsem);
-+extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
-+extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
-+extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
-+ struct lockdep_map *nest);
-+extern void rt_down_read(struct rw_semaphore *rwsem);
-+extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
-+extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
-+extern void __rt_up_read(struct rw_semaphore *rwsem);
-+extern void rt_up_read(struct rw_semaphore *rwsem);
-+extern void rt_up_write(struct rw_semaphore *rwsem);
-+extern void rt_downgrade_write(struct rw_semaphore *rwsem);
-+
-+#define init_rwsem(sem) rt_init_rwsem(sem)
-+#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
-+
-+static inline int rwsem_is_contended(struct rw_semaphore *sem)
-+{
-+ /* rt_mutex_has_waiters() */
-+ return !RB_EMPTY_ROOT(&sem->lock.waiters);
-+}
-+
-+static inline void down_read(struct rw_semaphore *sem)
-+{
-+ rt_down_read(sem);
-+}
-+
-+static inline int down_read_trylock(struct rw_semaphore *sem)
-+{
-+ return rt_down_read_trylock(sem);
-+}
-+
-+static inline void down_write(struct rw_semaphore *sem)
-+{
-+ rt_down_write(sem);
-+}
-+
-+static inline int down_write_trylock(struct rw_semaphore *sem)
-+{
-+ return rt_down_write_trylock(sem);
-+}
-+
-+static inline void __up_read(struct rw_semaphore *sem)
-+{
-+ __rt_up_read(sem);
-+}
-+
-+static inline void up_read(struct rw_semaphore *sem)
-+{
-+ rt_up_read(sem);
-+}
-+
-+static inline void up_write(struct rw_semaphore *sem)
-+{
-+ rt_up_write(sem);
-+}
-+
-+static inline void downgrade_write(struct rw_semaphore *sem)
-+{
-+ rt_downgrade_write(sem);
-+}
-+
-+static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
-+{
-+ return rt_down_read_nested(sem, subclass);
-+}
-+
-+static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
-+{
-+ rt_down_write_nested(sem, subclass);
-+}
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+static inline void down_write_nest_lock(struct rw_semaphore *sem,
-+ struct rw_semaphore *nest_lock)
-+{
-+ rt_down_write_nested_lock(sem, &nest_lock->dep_map);
-+}
-+
-+#else
-+
-+static inline void down_write_nest_lock(struct rw_semaphore *sem,
-+ struct rw_semaphore *nest_lock)
-+{
-+ rt_down_write_nested_lock(sem, NULL);
-+}
-+#endif
-+#endif
-diff -Nur linux-4.1.13.orig/include/linux/sched.h linux-4.1.13/include/linux/sched.h
---- linux-4.1.13.orig/include/linux/sched.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/sched.h 2015-11-29 09:23:09.617613651 +0100
-@@ -26,6 +26,7 @@
- #include <linux/nodemask.h>
- #include <linux/mm_types.h>
- #include <linux/preempt_mask.h>
-+#include <asm/kmap_types.h>
-
- #include <asm/page.h>
- #include <asm/ptrace.h>
-@@ -234,10 +235,7 @@
- TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
- __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
-
--#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
- #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
--#define task_is_stopped_or_traced(task) \
-- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
- #define task_contributes_to_load(task) \
- ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
- (task->flags & PF_FROZEN) == 0)
-@@ -302,6 +300,11 @@
-
- #endif
-
-+#define __set_current_state_no_track(state_value) \
-+ do { current->state = (state_value); } while (0)
-+#define set_current_state_no_track(state_value) \
-+ set_mb(current->state, (state_value))
-+
- /* Task command name length */
- #define TASK_COMM_LEN 16
-
-@@ -900,6 +903,50 @@
- #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
-
- /*
-+ * Wake-queues are lists of tasks with a pending wakeup, whose
-+ * callers have already marked the task as woken internally,
-+ * and can thus carry on. A common use case is being able to
-+ * do the wakeups once the corresponding user lock as been
-+ * released.
-+ *
-+ * We hold reference to each task in the list across the wakeup,
-+ * thus guaranteeing that the memory is still valid by the time
-+ * the actual wakeups are performed in wake_up_q().
-+ *
-+ * One per task suffices, because there's never a need for a task to be
-+ * in two wake queues simultaneously; it is forbidden to abandon a task
-+ * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
-+ * already in a wake queue, the wakeup will happen soon and the second
-+ * waker can just skip it.
-+ *
-+ * The WAKE_Q macro declares and initializes the list head.
-+ * wake_up_q() does NOT reinitialize the list; it's expected to be
-+ * called near the end of a function, where the fact that the queue is
-+ * not used again will be easy to see by inspection.
-+ *
-+ * Note that this can cause spurious wakeups. schedule() callers
-+ * must ensure the call is done inside a loop, confirming that the
-+ * wakeup condition has in fact occurred.
-+ */
-+struct wake_q_node {
-+ struct wake_q_node *next;
-+};
-+
-+struct wake_q_head {
-+ struct wake_q_node *first;
-+ struct wake_q_node **lastp;
-+};
-+
-+#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
-+
-+#define WAKE_Q(name) \
-+ struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
-+
-+extern void wake_q_add(struct wake_q_head *head,
-+ struct task_struct *task);
-+extern void wake_up_q(struct wake_q_head *head);
-+
-+/*
- * sched-domains (multiprocessor balancing) declarations:
- */
- #ifdef CONFIG_SMP
-@@ -1291,6 +1338,7 @@
-
- struct task_struct {
- volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
-+ volatile long saved_state; /* saved state for "spinlock sleepers" */
- void *stack;
- atomic_t usage;
- unsigned int flags; /* per process flags, defined below */
-@@ -1327,6 +1375,12 @@
- #endif
-
- unsigned int policy;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int migrate_disable;
-+# ifdef CONFIG_SCHED_DEBUG
-+ int migrate_disable_atomic;
-+# endif
-+#endif
- int nr_cpus_allowed;
- cpumask_t cpus_allowed;
-
-@@ -1434,7 +1488,8 @@
- struct cputime prev_cputime;
- #endif
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-- seqlock_t vtime_seqlock;
-+ raw_spinlock_t vtime_lock;
-+ seqcount_t vtime_seq;
- unsigned long long vtime_snap;
- enum {
- VTIME_SLEEPING = 0,
-@@ -1450,6 +1505,9 @@
-
- struct task_cputime cputime_expires;
- struct list_head cpu_timers[3];
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct task_struct *posix_timer_list;
-+#endif
-
- /* process credentials */
- const struct cred __rcu *real_cred; /* objective and real subjective task
-@@ -1482,10 +1540,15 @@
- /* signal handlers */
- struct signal_struct *signal;
- struct sighand_struct *sighand;
-+ struct sigqueue *sigqueue_cache;
-
- sigset_t blocked, real_blocked;
- sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
- struct sigpending pending;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ /* TODO: move me into ->restart_block ? */
-+ struct siginfo forced_info;
-+#endif
-
- unsigned long sas_ss_sp;
- size_t sas_ss_size;
-@@ -1511,6 +1574,8 @@
- /* Protection of the PI data structures: */
- raw_spinlock_t pi_lock;
-
-+ struct wake_q_node wake_q;
-+
- #ifdef CONFIG_RT_MUTEXES
- /* PI waiters blocked on a rt_mutex held by this task */
- struct rb_root pi_waiters;
-@@ -1705,6 +1770,12 @@
- unsigned long trace;
- /* bitmask and counter of trace recursion */
- unsigned long trace_recursion;
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+ u64 preempt_timestamp_hist;
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ long timer_offset;
-+#endif
-+#endif
- #endif /* CONFIG_TRACING */
- #ifdef CONFIG_MEMCG
- struct memcg_oom_info {
-@@ -1721,14 +1792,23 @@
- unsigned int sequential_io;
- unsigned int sequential_io_avg;
- #endif
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct rcu_head put_rcu;
-+ int softirq_nestcnt;
-+ unsigned int softirqs_raised;
-+#endif
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
-+ int kmap_idx;
-+ pte_t kmap_pte[KM_TYPE_NR];
-+# endif
-+#endif
- #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- unsigned long task_state_change;
- #endif
-+ int pagefault_disabled;
- };
-
--/* Future-safe accessor for struct task_struct's cpus_allowed. */
--#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
--
- #define TNF_MIGRATED 0x01
- #define TNF_NO_GROUP 0x02
- #define TNF_SHARED 0x04
-@@ -1917,6 +1997,15 @@
- extern void free_task(struct task_struct *tsk);
- #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+extern void __put_task_struct_cb(struct rcu_head *rhp);
-+
-+static inline void put_task_struct(struct task_struct *t)
-+{
-+ if (atomic_dec_and_test(&t->usage))
-+ call_rcu(&t->put_rcu, __put_task_struct_cb);
-+}
-+#else
- extern void __put_task_struct(struct task_struct *t);
-
- static inline void put_task_struct(struct task_struct *t)
-@@ -1924,6 +2013,7 @@
- if (atomic_dec_and_test(&t->usage))
- __put_task_struct(t);
- }
-+#endif
-
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- extern void task_cputime(struct task_struct *t,
-@@ -1962,6 +2052,7 @@
- /*
- * Per process flags
- */
-+#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
- #define PF_EXITING 0x00000004 /* getting shut down */
- #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
- #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
-@@ -2126,6 +2217,10 @@
-
- extern int set_cpus_allowed_ptr(struct task_struct *p,
- const struct cpumask *new_mask);
-+int migrate_me(void);
-+void tell_sched_cpu_down_begin(int cpu);
-+void tell_sched_cpu_down_done(int cpu);
-+
- #else
- static inline void do_set_cpus_allowed(struct task_struct *p,
- const struct cpumask *new_mask)
-@@ -2138,6 +2233,9 @@
- return -EINVAL;
- return 0;
- }
-+static inline int migrate_me(void) { return 0; }
-+static inline void tell_sched_cpu_down_begin(int cpu) { }
-+static inline void tell_sched_cpu_down_done(int cpu) { }
- #endif
-
- #ifdef CONFIG_NO_HZ_COMMON
-@@ -2354,6 +2452,7 @@
-
- extern int wake_up_state(struct task_struct *tsk, unsigned int state);
- extern int wake_up_process(struct task_struct *tsk);
-+extern int wake_up_lock_sleeper(struct task_struct * tsk);
- extern void wake_up_new_task(struct task_struct *tsk);
- #ifdef CONFIG_SMP
- extern void kick_process(struct task_struct *tsk);
-@@ -2470,12 +2569,24 @@
-
- /* mmdrop drops the mm and the page tables */
- extern void __mmdrop(struct mm_struct *);
-+
- static inline void mmdrop(struct mm_struct * mm)
- {
- if (unlikely(atomic_dec_and_test(&mm->mm_count)))
- __mmdrop(mm);
- }
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+extern void __mmdrop_delayed(struct rcu_head *rhp);
-+static inline void mmdrop_delayed(struct mm_struct *mm)
-+{
-+ if (atomic_dec_and_test(&mm->mm_count))
-+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
-+}
-+#else
-+# define mmdrop_delayed(mm) mmdrop(mm)
-+#endif
-+
- /* mmput gets rid of the mappings and all user-space */
- extern void mmput(struct mm_struct *);
- /* Grab a reference to a task's mm, if it is not already going away */
-@@ -2787,6 +2898,43 @@
- return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
- }
-
-+#ifdef CONFIG_PREEMPT_LAZY
-+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
-+{
-+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
-+}
-+
-+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
-+{
-+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
-+}
-+
-+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
-+{
-+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
-+}
-+
-+static inline int need_resched_lazy(void)
-+{
-+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+}
-+
-+static inline int need_resched_now(void)
-+{
-+ return test_thread_flag(TIF_NEED_RESCHED);
-+}
-+
-+#else
-+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
-+static inline int need_resched_lazy(void) { return 0; }
-+
-+static inline int need_resched_now(void)
-+{
-+ return test_thread_flag(TIF_NEED_RESCHED);
-+}
-+
-+#endif
-+
- static inline int restart_syscall(void)
- {
- set_tsk_thread_flag(current, TIF_SIGPENDING);
-@@ -2818,6 +2966,51 @@
- return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
- }
-
-+static inline bool __task_is_stopped_or_traced(struct task_struct *task)
-+{
-+ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
-+ return true;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
-+ return true;
-+#endif
-+ return false;
-+}
-+
-+static inline bool task_is_stopped_or_traced(struct task_struct *task)
-+{
-+ bool traced_stopped;
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ unsigned long flags;
-+
-+ raw_spin_lock_irqsave(&task->pi_lock, flags);
-+ traced_stopped = __task_is_stopped_or_traced(task);
-+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-+#else
-+ traced_stopped = __task_is_stopped_or_traced(task);
-+#endif
-+ return traced_stopped;
-+}
-+
-+static inline bool task_is_traced(struct task_struct *task)
-+{
-+ bool traced = false;
-+
-+ if (task->state & __TASK_TRACED)
-+ return true;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ /* in case the task is sleeping on tasklist_lock */
-+ raw_spin_lock_irq(&task->pi_lock);
-+ if (task->state & __TASK_TRACED)
-+ traced = true;
-+ else if (task->saved_state & __TASK_TRACED)
-+ traced = true;
-+ raw_spin_unlock_irq(&task->pi_lock);
-+#endif
-+ return traced;
-+}
-+
- /*
- * cond_resched() and cond_resched_lock(): latency reduction via
- * explicit rescheduling in places that are safe. The return
-@@ -2839,12 +3032,16 @@
- __cond_resched_lock(lock); \
- })
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- extern int __cond_resched_softirq(void);
-
- #define cond_resched_softirq() ({ \
- ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
- __cond_resched_softirq(); \
- })
-+#else
-+# define cond_resched_softirq() cond_resched()
-+#endif
-
- static inline void cond_resched_rcu(void)
- {
-@@ -3011,6 +3208,26 @@
-
- #endif /* CONFIG_SMP */
-
-+static inline int __migrate_disabled(struct task_struct *p)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ return p->migrate_disable;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+/* Future-safe accessor for struct task_struct's cpus_allowed. */
-+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (p->migrate_disable)
-+ return cpumask_of(task_cpu(p));
-+#endif
-+
-+ return &p->cpus_allowed;
-+}
-+
- extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
- extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
-
-diff -Nur linux-4.1.13.orig/include/linux/seqlock.h linux-4.1.13/include/linux/seqlock.h
---- linux-4.1.13.orig/include/linux/seqlock.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/seqlock.h 2015-11-29 09:23:09.617613651 +0100
-@@ -219,20 +219,30 @@
- return __read_seqcount_retry(s, start);
- }
-
--
--
--static inline void raw_write_seqcount_begin(seqcount_t *s)
-+static inline void __raw_write_seqcount_begin(seqcount_t *s)
- {
- s->sequence++;
- smp_wmb();
- }
-
--static inline void raw_write_seqcount_end(seqcount_t *s)
-+static inline void raw_write_seqcount_begin(seqcount_t *s)
-+{
-+ preempt_disable_rt();
-+ __raw_write_seqcount_begin(s);
-+}
-+
-+static inline void __raw_write_seqcount_end(seqcount_t *s)
- {
- smp_wmb();
- s->sequence++;
- }
-
-+static inline void raw_write_seqcount_end(seqcount_t *s)
-+{
-+ __raw_write_seqcount_end(s);
-+ preempt_enable_rt();
-+}
-+
- /*
- * raw_write_seqcount_latch - redirect readers to even/odd copy
- * @s: pointer to seqcount_t
-@@ -305,10 +315,32 @@
- /*
- * Read side functions for starting and finalizing a read side section.
- */
-+#ifndef CONFIG_PREEMPT_RT_FULL
- static inline unsigned read_seqbegin(const seqlock_t *sl)
- {
- return read_seqcount_begin(&sl->seqcount);
- }
-+#else
-+/*
-+ * Starvation safe read side for RT
-+ */
-+static inline unsigned read_seqbegin(seqlock_t *sl)
-+{
-+ unsigned ret;
-+
-+repeat:
-+ ret = ACCESS_ONCE(sl->seqcount.sequence);
-+ if (unlikely(ret & 1)) {
-+ /*
-+ * Take the lock and let the writer proceed (i.e. evtl
-+ * boost it), otherwise we could loop here forever.
-+ */
-+ spin_unlock_wait(&sl->lock);
-+ goto repeat;
-+ }
-+ return ret;
-+}
-+#endif
-
- static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
- {
-@@ -323,36 +355,36 @@
- static inline void write_seqlock(seqlock_t *sl)
- {
- spin_lock(&sl->lock);
-- write_seqcount_begin(&sl->seqcount);
-+ __raw_write_seqcount_begin(&sl->seqcount);
- }
-
- static inline void write_sequnlock(seqlock_t *sl)
- {
-- write_seqcount_end(&sl->seqcount);
-+ __raw_write_seqcount_end(&sl->seqcount);
- spin_unlock(&sl->lock);
- }
-
- static inline void write_seqlock_bh(seqlock_t *sl)
- {
- spin_lock_bh(&sl->lock);
-- write_seqcount_begin(&sl->seqcount);
-+ __raw_write_seqcount_begin(&sl->seqcount);
- }
-
- static inline void write_sequnlock_bh(seqlock_t *sl)
- {
-- write_seqcount_end(&sl->seqcount);
-+ __raw_write_seqcount_end(&sl->seqcount);
- spin_unlock_bh(&sl->lock);
- }
-
- static inline void write_seqlock_irq(seqlock_t *sl)
- {
- spin_lock_irq(&sl->lock);
-- write_seqcount_begin(&sl->seqcount);
-+ __raw_write_seqcount_begin(&sl->seqcount);
- }
-
- static inline void write_sequnlock_irq(seqlock_t *sl)
- {
-- write_seqcount_end(&sl->seqcount);
-+ __raw_write_seqcount_end(&sl->seqcount);
- spin_unlock_irq(&sl->lock);
- }
-
-@@ -361,7 +393,7 @@
- unsigned long flags;
-
- spin_lock_irqsave(&sl->lock, flags);
-- write_seqcount_begin(&sl->seqcount);
-+ __raw_write_seqcount_begin(&sl->seqcount);
- return flags;
- }
-
-@@ -371,7 +403,7 @@
- static inline void
- write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
- {
-- write_seqcount_end(&sl->seqcount);
-+ __raw_write_seqcount_end(&sl->seqcount);
- spin_unlock_irqrestore(&sl->lock, flags);
- }
-
-diff -Nur linux-4.1.13.orig/include/linux/signal.h linux-4.1.13/include/linux/signal.h
---- linux-4.1.13.orig/include/linux/signal.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/signal.h 2015-11-29 09:23:09.617613651 +0100
-@@ -218,6 +218,7 @@
- }
-
- extern void flush_sigqueue(struct sigpending *queue);
-+extern void flush_task_sigqueue(struct task_struct *tsk);
-
- /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
- static inline int valid_signal(unsigned long sig)
-diff -Nur linux-4.1.13.orig/include/linux/skbuff.h linux-4.1.13/include/linux/skbuff.h
---- linux-4.1.13.orig/include/linux/skbuff.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/skbuff.h 2015-11-29 09:23:09.617613651 +0100
-@@ -187,6 +187,7 @@
-
- __u32 qlen;
- spinlock_t lock;
-+ raw_spinlock_t raw_lock;
- };
-
- struct sk_buff;
-@@ -1336,6 +1337,12 @@
- __skb_queue_head_init(list);
- }
-
-+static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
-+{
-+ raw_spin_lock_init(&list->raw_lock);
-+ __skb_queue_head_init(list);
-+}
-+
- static inline void skb_queue_head_init_class(struct sk_buff_head *list,
- struct lock_class_key *class)
- {
-diff -Nur linux-4.1.13.orig/include/linux/smp.h linux-4.1.13/include/linux/smp.h
---- linux-4.1.13.orig/include/linux/smp.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/smp.h 2015-11-29 09:23:09.617613651 +0100
-@@ -185,6 +185,9 @@
- #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
- #define put_cpu() preempt_enable()
-
-+#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
-+#define put_cpu_light() migrate_enable()
-+
- /*
- * Callback to arch code if there's nosmp or maxcpus=0 on the
- * boot command line:
-diff -Nur linux-4.1.13.orig/include/linux/spinlock_api_smp.h linux-4.1.13/include/linux/spinlock_api_smp.h
---- linux-4.1.13.orig/include/linux/spinlock_api_smp.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/spinlock_api_smp.h 2015-11-29 09:23:09.617613651 +0100
-@@ -189,6 +189,8 @@
- return 0;
- }
-
--#include <linux/rwlock_api_smp.h>
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# include <linux/rwlock_api_smp.h>
-+#endif
-
- #endif /* __LINUX_SPINLOCK_API_SMP_H */
-diff -Nur linux-4.1.13.orig/include/linux/spinlock.h linux-4.1.13/include/linux/spinlock.h
---- linux-4.1.13.orig/include/linux/spinlock.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/spinlock.h 2015-11-29 09:23:09.617613651 +0100
-@@ -281,7 +281,11 @@
- #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
-
- /* Include rwlock functions */
--#include <linux/rwlock.h>
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# include <linux/rwlock_rt.h>
-+#else
-+# include <linux/rwlock.h>
-+#endif
-
- /*
- * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
-@@ -292,6 +296,10 @@
- # include <linux/spinlock_api_up.h>
- #endif
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# include <linux/spinlock_rt.h>
-+#else /* PREEMPT_RT_FULL */
-+
- /*
- * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
- */
-@@ -426,4 +434,6 @@
- #define atomic_dec_and_lock(atomic, lock) \
- __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
-
-+#endif /* !PREEMPT_RT_FULL */
-+
- #endif /* __LINUX_SPINLOCK_H */
-diff -Nur linux-4.1.13.orig/include/linux/spinlock_rt.h linux-4.1.13/include/linux/spinlock_rt.h
---- linux-4.1.13.orig/include/linux/spinlock_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/linux/spinlock_rt.h 2015-11-29 09:23:09.617613651 +0100
-@@ -0,0 +1,174 @@
-+#ifndef __LINUX_SPINLOCK_RT_H
-+#define __LINUX_SPINLOCK_RT_H
-+
-+#ifndef __LINUX_SPINLOCK_H
-+#error Do not include directly. Use spinlock.h
-+#endif
-+
-+#include <linux/bug.h>
-+
-+extern void
-+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
-+
-+#define spin_lock_init(slock) \
-+do { \
-+ static struct lock_class_key __key; \
-+ \
-+ rt_mutex_init(&(slock)->lock); \
-+ __rt_spin_lock_init(slock, #slock, &__key); \
-+} while (0)
-+
-+extern void __lockfunc rt_spin_lock(spinlock_t *lock);
-+extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
-+extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
-+extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
-+extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
-+extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
-+extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
-+extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
-+extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
-+
-+/*
-+ * lockdep-less calls, for derived types like rwlock:
-+ * (for trylock they can use rt_mutex_trylock() directly.
-+ */
-+extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
-+extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
-+extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
-+
-+#define spin_lock(lock) \
-+ do { \
-+ migrate_disable(); \
-+ rt_spin_lock(lock); \
-+ } while (0)
-+
-+#define spin_lock_bh(lock) \
-+ do { \
-+ local_bh_disable(); \
-+ migrate_disable(); \
-+ rt_spin_lock(lock); \
-+ } while (0)
-+
-+#define spin_lock_irq(lock) spin_lock(lock)
-+
-+#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
-+
-+#define spin_trylock(lock) \
-+({ \
-+ int __locked; \
-+ migrate_disable(); \
-+ __locked = spin_do_trylock(lock); \
-+ if (!__locked) \
-+ migrate_enable(); \
-+ __locked; \
-+})
-+
-+#ifdef CONFIG_LOCKDEP
-+# define spin_lock_nested(lock, subclass) \
-+ do { \
-+ migrate_disable(); \
-+ rt_spin_lock_nested(lock, subclass); \
-+ } while (0)
-+
-+#define spin_lock_bh_nested(lock, subclass) \
-+ do { \
-+ local_bh_disable(); \
-+ migrate_disable(); \
-+ rt_spin_lock_nested(lock, subclass); \
-+ } while (0)
-+
-+# define spin_lock_irqsave_nested(lock, flags, subclass) \
-+ do { \
-+ typecheck(unsigned long, flags); \
-+ flags = 0; \
-+ migrate_disable(); \
-+ rt_spin_lock_nested(lock, subclass); \
-+ } while (0)
-+#else
-+# define spin_lock_nested(lock, subclass) spin_lock(lock)
-+# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock)
-+
-+# define spin_lock_irqsave_nested(lock, flags, subclass) \
-+ do { \
-+ typecheck(unsigned long, flags); \
-+ flags = 0; \
-+ spin_lock(lock); \
-+ } while (0)
-+#endif
-+
-+#define spin_lock_irqsave(lock, flags) \
-+ do { \
-+ typecheck(unsigned long, flags); \
-+ flags = 0; \
-+ spin_lock(lock); \
-+ } while (0)
-+
-+static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
-+{
-+ unsigned long flags = 0;
-+#ifdef CONFIG_TRACE_IRQFLAGS
-+ flags = rt_spin_lock_trace_flags(lock);
-+#else
-+ spin_lock(lock); /* lock_local */
-+#endif
-+ return flags;
-+}
-+
-+/* FIXME: we need rt_spin_lock_nest_lock */
-+#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
-+
-+#define spin_unlock(lock) \
-+ do { \
-+ rt_spin_unlock(lock); \
-+ migrate_enable(); \
-+ } while (0)
-+
-+#define spin_unlock_bh(lock) \
-+ do { \
-+ rt_spin_unlock(lock); \
-+ migrate_enable(); \
-+ local_bh_enable(); \
-+ } while (0)
-+
-+#define spin_unlock_irq(lock) spin_unlock(lock)
-+
-+#define spin_unlock_irqrestore(lock, flags) \
-+ do { \
-+ typecheck(unsigned long, flags); \
-+ (void) flags; \
-+ spin_unlock(lock); \
-+ } while (0)
-+
-+#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
-+#define spin_trylock_irq(lock) spin_trylock(lock)
-+
-+#define spin_trylock_irqsave(lock, flags) \
-+ rt_spin_trylock_irqsave(lock, &(flags))
-+
-+#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
-+
-+#ifdef CONFIG_GENERIC_LOCKBREAK
-+# define spin_is_contended(lock) ((lock)->break_lock)
-+#else
-+# define spin_is_contended(lock) (((void)(lock), 0))
-+#endif
-+
-+static inline int spin_can_lock(spinlock_t *lock)
-+{
-+ return !rt_mutex_is_locked(&lock->lock);
-+}
-+
-+static inline int spin_is_locked(spinlock_t *lock)
-+{
-+ return rt_mutex_is_locked(&lock->lock);
-+}
-+
-+static inline void assert_spin_locked(spinlock_t *lock)
-+{
-+ BUG_ON(!spin_is_locked(lock));
-+}
-+
-+#define atomic_dec_and_lock(atomic, lock) \
-+ atomic_dec_and_spin_lock(atomic, lock)
-+
-+#endif
-diff -Nur linux-4.1.13.orig/include/linux/spinlock_types.h linux-4.1.13/include/linux/spinlock_types.h
---- linux-4.1.13.orig/include/linux/spinlock_types.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/spinlock_types.h 2015-11-29 09:23:09.617613651 +0100
-@@ -9,80 +9,15 @@
- * Released under the General Public License (GPL).
- */
-
--#if defined(CONFIG_SMP)
--# include <asm/spinlock_types.h>
--#else
--# include <linux/spinlock_types_up.h>
--#endif
--
--#include <linux/lockdep.h>
--
--typedef struct raw_spinlock {
-- arch_spinlock_t raw_lock;
--#ifdef CONFIG_GENERIC_LOCKBREAK
-- unsigned int break_lock;
--#endif
--#ifdef CONFIG_DEBUG_SPINLOCK
-- unsigned int magic, owner_cpu;
-- void *owner;
--#endif
--#ifdef CONFIG_DEBUG_LOCK_ALLOC
-- struct lockdep_map dep_map;
--#endif
--} raw_spinlock_t;
--
--#define SPINLOCK_MAGIC 0xdead4ead
--
--#define SPINLOCK_OWNER_INIT ((void *)-1L)
--
--#ifdef CONFIG_DEBUG_LOCK_ALLOC
--# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
--#else
--# define SPIN_DEP_MAP_INIT(lockname)
--#endif
-+#include <linux/spinlock_types_raw.h>
-
--#ifdef CONFIG_DEBUG_SPINLOCK
--# define SPIN_DEBUG_INIT(lockname) \
-- .magic = SPINLOCK_MAGIC, \
-- .owner_cpu = -1, \
-- .owner = SPINLOCK_OWNER_INIT,
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# include <linux/spinlock_types_nort.h>
-+# include <linux/rwlock_types.h>
- #else
--# define SPIN_DEBUG_INIT(lockname)
-+# include <linux/rtmutex.h>
-+# include <linux/spinlock_types_rt.h>
-+# include <linux/rwlock_types_rt.h>
- #endif
-
--#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
-- { \
-- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
-- SPIN_DEBUG_INIT(lockname) \
-- SPIN_DEP_MAP_INIT(lockname) }
--
--#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
-- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
--
--#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
--
--typedef struct spinlock {
-- union {
-- struct raw_spinlock rlock;
--
--#ifdef CONFIG_DEBUG_LOCK_ALLOC
--# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
-- struct {
-- u8 __padding[LOCK_PADSIZE];
-- struct lockdep_map dep_map;
-- };
--#endif
-- };
--} spinlock_t;
--
--#define __SPIN_LOCK_INITIALIZER(lockname) \
-- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
--
--#define __SPIN_LOCK_UNLOCKED(lockname) \
-- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
--
--#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
--
--#include <linux/rwlock_types.h>
--
- #endif /* __LINUX_SPINLOCK_TYPES_H */
-diff -Nur linux-4.1.13.orig/include/linux/spinlock_types_nort.h linux-4.1.13/include/linux/spinlock_types_nort.h
---- linux-4.1.13.orig/include/linux/spinlock_types_nort.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/linux/spinlock_types_nort.h 2015-11-29 09:23:09.617613651 +0100
-@@ -0,0 +1,33 @@
-+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
-+#define __LINUX_SPINLOCK_TYPES_NORT_H
-+
-+#ifndef __LINUX_SPINLOCK_TYPES_H
-+#error "Do not include directly. Include spinlock_types.h instead"
-+#endif
-+
-+/*
-+ * The non RT version maps spinlocks to raw_spinlocks
-+ */
-+typedef struct spinlock {
-+ union {
-+ struct raw_spinlock rlock;
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
-+ struct {
-+ u8 __padding[LOCK_PADSIZE];
-+ struct lockdep_map dep_map;
-+ };
-+#endif
-+ };
-+} spinlock_t;
-+
-+#define __SPIN_LOCK_INITIALIZER(lockname) \
-+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
-+
-+#define __SPIN_LOCK_UNLOCKED(lockname) \
-+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
-+
-+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-+
-+#endif
-diff -Nur linux-4.1.13.orig/include/linux/spinlock_types_raw.h linux-4.1.13/include/linux/spinlock_types_raw.h
---- linux-4.1.13.orig/include/linux/spinlock_types_raw.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/linux/spinlock_types_raw.h 2015-11-29 09:23:09.617613651 +0100
-@@ -0,0 +1,56 @@
-+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
-+#define __LINUX_SPINLOCK_TYPES_RAW_H
-+
-+#if defined(CONFIG_SMP)
-+# include <asm/spinlock_types.h>
-+#else
-+# include <linux/spinlock_types_up.h>
-+#endif
-+
-+#include <linux/lockdep.h>
-+
-+typedef struct raw_spinlock {
-+ arch_spinlock_t raw_lock;
-+#ifdef CONFIG_GENERIC_LOCKBREAK
-+ unsigned int break_lock;
-+#endif
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+ unsigned int magic, owner_cpu;
-+ void *owner;
-+#endif
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ struct lockdep_map dep_map;
-+#endif
-+} raw_spinlock_t;
-+
-+#define SPINLOCK_MAGIC 0xdead4ead
-+
-+#define SPINLOCK_OWNER_INIT ((void *)-1L)
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-+#else
-+# define SPIN_DEP_MAP_INIT(lockname)
-+#endif
-+
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+# define SPIN_DEBUG_INIT(lockname) \
-+ .magic = SPINLOCK_MAGIC, \
-+ .owner_cpu = -1, \
-+ .owner = SPINLOCK_OWNER_INIT,
-+#else
-+# define SPIN_DEBUG_INIT(lockname)
-+#endif
-+
-+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
-+ { \
-+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
-+ SPIN_DEBUG_INIT(lockname) \
-+ SPIN_DEP_MAP_INIT(lockname) }
-+
-+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
-+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
-+
-+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
-+
-+#endif
-diff -Nur linux-4.1.13.orig/include/linux/spinlock_types_rt.h linux-4.1.13/include/linux/spinlock_types_rt.h
---- linux-4.1.13.orig/include/linux/spinlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/linux/spinlock_types_rt.h 2015-11-29 09:23:09.617613651 +0100
-@@ -0,0 +1,51 @@
-+#ifndef __LINUX_SPINLOCK_TYPES_RT_H
-+#define __LINUX_SPINLOCK_TYPES_RT_H
-+
-+#ifndef __LINUX_SPINLOCK_TYPES_H
-+#error "Do not include directly. Include spinlock_types.h instead"
-+#endif
-+
-+#include <linux/cache.h>
-+
-+/*
-+ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
-+ */
-+typedef struct spinlock {
-+ struct rt_mutex lock;
-+ unsigned int break_lock;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ struct lockdep_map dep_map;
-+#endif
-+} spinlock_t;
-+
-+#ifdef CONFIG_DEBUG_RT_MUTEXES
-+# define __RT_SPIN_INITIALIZER(name) \
-+ { \
-+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
-+ .save_state = 1, \
-+ .file = __FILE__, \
-+ .line = __LINE__ , \
-+ }
-+#else
-+# define __RT_SPIN_INITIALIZER(name) \
-+ { \
-+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
-+ .save_state = 1, \
-+ }
-+#endif
-+
-+/*
-+.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
-+*/
-+
-+#define __SPIN_LOCK_UNLOCKED(name) \
-+ { .lock = __RT_SPIN_INITIALIZER(name.lock), \
-+ SPIN_DEP_MAP_INIT(name) }
-+
-+#define __DEFINE_SPINLOCK(name) \
-+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
-+
-+#define DEFINE_SPINLOCK(name) \
-+ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
-+
-+#endif
-diff -Nur linux-4.1.13.orig/include/linux/srcu.h linux-4.1.13/include/linux/srcu.h
---- linux-4.1.13.orig/include/linux/srcu.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/srcu.h 2015-11-29 09:23:09.617613651 +0100
-@@ -84,10 +84,10 @@
-
- void process_srcu(struct work_struct *work);
-
--#define __SRCU_STRUCT_INIT(name) \
-+#define __SRCU_STRUCT_INIT(name, pcpu_name) \
- { \
- .completed = -300, \
-- .per_cpu_ref = &name##_srcu_array, \
-+ .per_cpu_ref = &pcpu_name, \
- .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
- .running = false, \
- .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
-@@ -104,7 +104,7 @@
- */
- #define __DEFINE_SRCU(name, is_static) \
- static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
-- is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
-+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array)
- #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
- #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
-
-diff -Nur linux-4.1.13.orig/include/linux/swap.h linux-4.1.13/include/linux/swap.h
---- linux-4.1.13.orig/include/linux/swap.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/swap.h 2015-11-29 09:23:09.617613651 +0100
-@@ -11,6 +11,7 @@
- #include <linux/fs.h>
- #include <linux/atomic.h>
- #include <linux/page-flags.h>
-+#include <linux/locallock.h>
- #include <asm/page.h>
-
- struct notifier_block;
-@@ -252,7 +253,8 @@
- void *workingset_eviction(struct address_space *mapping, struct page *page);
- bool workingset_refault(void *shadow);
- void workingset_activation(struct page *page);
--extern struct list_lru workingset_shadow_nodes;
-+extern struct list_lru __workingset_shadow_nodes;
-+DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
-
- static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
- {
-@@ -296,6 +298,7 @@
-
-
- /* linux/mm/swap.c */
-+DECLARE_LOCAL_IRQ_LOCK(swapvec_lock);
- extern void lru_cache_add(struct page *);
- extern void lru_cache_add_anon(struct page *page);
- extern void lru_cache_add_file(struct page *page);
-diff -Nur linux-4.1.13.orig/include/linux/thread_info.h linux-4.1.13/include/linux/thread_info.h
---- linux-4.1.13.orig/include/linux/thread_info.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/thread_info.h 2015-11-29 09:23:09.617613651 +0100
-@@ -102,7 +102,17 @@
- #define test_thread_flag(flag) \
- test_ti_thread_flag(current_thread_info(), flag)
-
--#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
-+#ifdef CONFIG_PREEMPT_LAZY
-+#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \
-+ test_thread_flag(TIF_NEED_RESCHED_LAZY))
-+#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED))
-+#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY))
-+
-+#else
-+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
-+#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED)
-+#define tif_need_resched_lazy() 0
-+#endif
-
- #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
- /*
-diff -Nur linux-4.1.13.orig/include/linux/timer.h linux-4.1.13/include/linux/timer.h
---- linux-4.1.13.orig/include/linux/timer.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/timer.h 2015-11-29 09:23:09.617613651 +0100
-@@ -241,7 +241,7 @@
-
- extern int try_to_del_timer_sync(struct timer_list *timer);
-
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- extern int del_timer_sync(struct timer_list *timer);
- #else
- # define del_timer_sync(t) del_timer(t)
-diff -Nur linux-4.1.13.orig/include/linux/uaccess.h linux-4.1.13/include/linux/uaccess.h
---- linux-4.1.13.orig/include/linux/uaccess.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/uaccess.h 2015-11-29 09:23:09.617613651 +0100
-@@ -1,21 +1,31 @@
- #ifndef __LINUX_UACCESS_H__
- #define __LINUX_UACCESS_H__
-
--#include <linux/preempt.h>
-+#include <linux/sched.h>
- #include <asm/uaccess.h>
-
-+static __always_inline void pagefault_disabled_inc(void)
-+{
-+ current->pagefault_disabled++;
-+}
-+
-+static __always_inline void pagefault_disabled_dec(void)
-+{
-+ current->pagefault_disabled--;
-+ WARN_ON(current->pagefault_disabled < 0);
-+}
-+
- /*
-- * These routines enable/disable the pagefault handler in that
-- * it will not take any locks and go straight to the fixup table.
-+ * These routines enable/disable the pagefault handler. If disabled, it will
-+ * not take any locks and go straight to the fixup table.
- *
-- * They have great resemblance to the preempt_disable/enable calls
-- * and in fact they are identical; this is because currently there is
-- * no other way to make the pagefault handlers do this. So we do
-- * disable preemption but we don't necessarily care about that.
-+ * User access methods will not sleep when called from a pagefault_disabled()
-+ * environment.
- */
- static inline void pagefault_disable(void)
- {
-- preempt_count_inc();
-+ migrate_disable();
-+ pagefault_disabled_inc();
- /*
- * make sure to have issued the store before a pagefault
- * can hit.
-@@ -25,18 +35,32 @@
-
- static inline void pagefault_enable(void)
- {
--#ifndef CONFIG_PREEMPT
- /*
- * make sure to issue those last loads/stores before enabling
- * the pagefault handler again.
- */
- barrier();
-- preempt_count_dec();
--#else
-- preempt_enable();
--#endif
-+ pagefault_disabled_dec();
-+ migrate_enable();
- }
-
-+/*
-+ * Is the pagefault handler disabled? If so, user access methods will not sleep.
-+ */
-+#define pagefault_disabled() (current->pagefault_disabled != 0)
-+
-+/*
-+ * The pagefault handler is in general disabled by pagefault_disable() or
-+ * when in irq context (via in_atomic()).
-+ *
-+ * This function should only be used by the fault handlers. Other users should
-+ * stick to pagefault_disabled().
-+ * Please NEVER use preempt_disable() to disable the fault handler. With
-+ * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
-+ * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
-+ */
-+#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
-+
- #ifndef ARCH_HAS_NOCACHE_UACCESS
-
- static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
-diff -Nur linux-4.1.13.orig/include/linux/uprobes.h linux-4.1.13/include/linux/uprobes.h
---- linux-4.1.13.orig/include/linux/uprobes.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/uprobes.h 2015-11-29 09:23:09.617613651 +0100
-@@ -27,6 +27,7 @@
- #include <linux/errno.h>
- #include <linux/rbtree.h>
- #include <linux/types.h>
-+#include <linux/wait.h>
-
- struct vm_area_struct;
- struct mm_struct;
-diff -Nur linux-4.1.13.orig/include/linux/vmstat.h linux-4.1.13/include/linux/vmstat.h
---- linux-4.1.13.orig/include/linux/vmstat.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/vmstat.h 2015-11-29 09:23:09.617613651 +0100
-@@ -33,7 +33,9 @@
- */
- static inline void __count_vm_event(enum vm_event_item item)
- {
-+ preempt_disable_rt();
- raw_cpu_inc(vm_event_states.event[item]);
-+ preempt_enable_rt();
- }
-
- static inline void count_vm_event(enum vm_event_item item)
-@@ -43,7 +45,9 @@
-
- static inline void __count_vm_events(enum vm_event_item item, long delta)
- {
-+ preempt_disable_rt();
- raw_cpu_add(vm_event_states.event[item], delta);
-+ preempt_enable_rt();
- }
-
- static inline void count_vm_events(enum vm_event_item item, long delta)
-diff -Nur linux-4.1.13.orig/include/linux/wait.h linux-4.1.13/include/linux/wait.h
---- linux-4.1.13.orig/include/linux/wait.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/linux/wait.h 2015-11-29 09:23:09.621613384 +0100
-@@ -8,6 +8,7 @@
- #include <linux/spinlock.h>
- #include <asm/current.h>
- #include <uapi/linux/wait.h>
-+#include <linux/atomic.h>
-
- typedef struct __wait_queue wait_queue_t;
- typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
-diff -Nur linux-4.1.13.orig/include/linux/wait-simple.h linux-4.1.13/include/linux/wait-simple.h
---- linux-4.1.13.orig/include/linux/wait-simple.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/linux/wait-simple.h 2015-11-29 09:23:09.617613651 +0100
-@@ -0,0 +1,207 @@
-+#ifndef _LINUX_WAIT_SIMPLE_H
-+#define _LINUX_WAIT_SIMPLE_H
-+
-+#include <linux/spinlock.h>
-+#include <linux/list.h>
-+
-+#include <asm/current.h>
-+
-+struct swaiter {
-+ struct task_struct *task;
-+ struct list_head node;
-+};
-+
-+#define DEFINE_SWAITER(name) \
-+ struct swaiter name = { \
-+ .task = current, \
-+ .node = LIST_HEAD_INIT((name).node), \
-+ }
-+
-+struct swait_head {
-+ raw_spinlock_t lock;
-+ struct list_head list;
-+};
-+
-+#define SWAIT_HEAD_INITIALIZER(name) { \
-+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
-+ .list = LIST_HEAD_INIT((name).list), \
-+ }
-+
-+#define DEFINE_SWAIT_HEAD(name) \
-+ struct swait_head name = SWAIT_HEAD_INITIALIZER(name)
-+
-+extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
-+
-+#define init_swait_head(swh) \
-+ do { \
-+ static struct lock_class_key __key; \
-+ \
-+ __init_swait_head((swh), &__key); \
-+ } while (0)
-+
-+/*
-+ * Waiter functions
-+ */
-+extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w);
-+extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
-+extern void swait_finish_locked(struct swait_head *head, struct swaiter *w);
-+extern void swait_finish(struct swait_head *head, struct swaiter *w);
-+
-+/* Check whether a head has waiters enqueued */
-+static inline bool swaitqueue_active(struct swait_head *h)
-+{
-+ /* Make sure the condition is visible before checking list_empty() */
-+ smp_mb();
-+ return !list_empty(&h->list);
-+}
-+
-+/*
-+ * Wakeup functions
-+ */
-+extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num);
-+extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num);
-+
-+#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1)
-+#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1)
-+#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0)
-+#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0)
-+
-+/*
-+ * Event API
-+ */
-+#define __swait_event(wq, condition) \
-+do { \
-+ DEFINE_SWAITER(__wait); \
-+ \
-+ for (;;) { \
-+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
-+ if (condition) \
-+ break; \
-+ schedule(); \
-+ } \
-+ swait_finish(&wq, &__wait); \
-+} while (0)
-+
-+/**
-+ * swait_event - sleep until a condition gets true
-+ * @wq: the waitqueue to wait on
-+ * @condition: a C expression for the event to wait for
-+ *
-+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
-+ * @condition evaluates to true. The @condition is checked each time
-+ * the waitqueue @wq is woken up.
-+ *
-+ * wake_up() has to be called after changing any variable that could
-+ * change the result of the wait condition.
-+ */
-+#define swait_event(wq, condition) \
-+do { \
-+ if (condition) \
-+ break; \
-+ __swait_event(wq, condition); \
-+} while (0)
-+
-+#define __swait_event_interruptible(wq, condition, ret) \
-+do { \
-+ DEFINE_SWAITER(__wait); \
-+ \
-+ for (;;) { \
-+ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
-+ if (condition) \
-+ break; \
-+ if (signal_pending(current)) { \
-+ ret = -ERESTARTSYS; \
-+ break; \
-+ } \
-+ schedule(); \
-+ } \
-+ swait_finish(&wq, &__wait); \
-+} while (0)
-+
-+#define __swait_event_interruptible_timeout(wq, condition, ret) \
-+do { \
-+ DEFINE_SWAITER(__wait); \
-+ \
-+ for (;;) { \
-+ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
-+ if (condition) \
-+ break; \
-+ if (signal_pending(current)) { \
-+ ret = -ERESTARTSYS; \
-+ break; \
-+ } \
-+ ret = schedule_timeout(ret); \
-+ if (!ret) \
-+ break; \
-+ } \
-+ swait_finish(&wq, &__wait); \
-+} while (0)
-+
-+/**
-+ * swait_event_interruptible - sleep until a condition gets true
-+ * @wq: the waitqueue to wait on
-+ * @condition: a C expression for the event to wait for
-+ *
-+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
-+ * @condition evaluates to true. The @condition is checked each time
-+ * the waitqueue @wq is woken up.
-+ *
-+ * wake_up() has to be called after changing any variable that could
-+ * change the result of the wait condition.
-+ */
-+#define swait_event_interruptible(wq, condition) \
-+({ \
-+ int __ret = 0; \
-+ if (!(condition)) \
-+ __swait_event_interruptible(wq, condition, __ret); \
-+ __ret; \
-+})
-+
-+#define swait_event_interruptible_timeout(wq, condition, timeout) \
-+({ \
-+ int __ret = timeout; \
-+ if (!(condition)) \
-+ __swait_event_interruptible_timeout(wq, condition, __ret); \
-+ __ret; \
-+})
-+
-+#define __swait_event_timeout(wq, condition, ret) \
-+do { \
-+ DEFINE_SWAITER(__wait); \
-+ \
-+ for (;;) { \
-+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
-+ if (condition) \
-+ break; \
-+ ret = schedule_timeout(ret); \
-+ if (!ret) \
-+ break; \
-+ } \
-+ swait_finish(&wq, &__wait); \
-+} while (0)
-+
-+/**
-+ * swait_event_timeout - sleep until a condition gets true or a timeout elapses
-+ * @wq: the waitqueue to wait on
-+ * @condition: a C expression for the event to wait for
-+ * @timeout: timeout, in jiffies
-+ *
-+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
-+ * @condition evaluates to true. The @condition is checked each time
-+ * the waitqueue @wq is woken up.
-+ *
-+ * wake_up() has to be called after changing any variable that could
-+ * change the result of the wait condition.
-+ *
-+ * The function returns 0 if the @timeout elapsed, and the remaining
-+ * jiffies if the condition evaluated to true before the timeout elapsed.
-+ */
-+#define swait_event_timeout(wq, condition, timeout) \
-+({ \
-+ long __ret = timeout; \
-+ if (!(condition)) \
-+ __swait_event_timeout(wq, condition, __ret); \
-+ __ret; \
-+})
-+
-+#endif
-diff -Nur linux-4.1.13.orig/include/linux/work-simple.h linux-4.1.13/include/linux/work-simple.h
---- linux-4.1.13.orig/include/linux/work-simple.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/linux/work-simple.h 2015-11-29 09:23:09.621613384 +0100
-@@ -0,0 +1,24 @@
-+#ifndef _LINUX_SWORK_H
-+#define _LINUX_SWORK_H
-+
-+#include <linux/list.h>
-+
-+struct swork_event {
-+ struct list_head item;
-+ unsigned long flags;
-+ void (*func)(struct swork_event *);
-+};
-+
-+static inline void INIT_SWORK(struct swork_event *event,
-+ void (*func)(struct swork_event *))
-+{
-+ event->flags = 0;
-+ event->func = func;
-+}
-+
-+bool swork_queue(struct swork_event *sev);
-+
-+int swork_get(void);
-+void swork_put(void);
-+
-+#endif /* _LINUX_SWORK_H */
-diff -Nur linux-4.1.13.orig/include/net/dst.h linux-4.1.13/include/net/dst.h
---- linux-4.1.13.orig/include/net/dst.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/net/dst.h 2015-11-29 09:23:09.629612854 +0100
-@@ -403,7 +403,7 @@
- static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
- struct sk_buff *skb)
- {
-- const struct hh_cache *hh;
-+ struct hh_cache *hh;
-
- if (dst->pending_confirm) {
- unsigned long now = jiffies;
-diff -Nur linux-4.1.13.orig/include/net/neighbour.h linux-4.1.13/include/net/neighbour.h
---- linux-4.1.13.orig/include/net/neighbour.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/net/neighbour.h 2015-11-29 09:23:09.629612854 +0100
-@@ -445,7 +445,7 @@
- }
- #endif
-
--static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
-+static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
- {
- unsigned int seq;
- int hh_len;
-@@ -500,7 +500,7 @@
-
- #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
-
--static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
-+static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
- const struct net_device *dev)
- {
- unsigned int seq;
-diff -Nur linux-4.1.13.orig/include/net/netns/ipv4.h linux-4.1.13/include/net/netns/ipv4.h
---- linux-4.1.13.orig/include/net/netns/ipv4.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/net/netns/ipv4.h 2015-11-29 09:23:09.629612854 +0100
-@@ -69,6 +69,7 @@
-
- int sysctl_icmp_echo_ignore_all;
- int sysctl_icmp_echo_ignore_broadcasts;
-+ int sysctl_icmp_echo_sysrq;
- int sysctl_icmp_ignore_bogus_error_responses;
- int sysctl_icmp_ratelimit;
- int sysctl_icmp_ratemask;
-diff -Nur linux-4.1.13.orig/include/trace/events/hist.h linux-4.1.13/include/trace/events/hist.h
---- linux-4.1.13.orig/include/trace/events/hist.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/trace/events/hist.h 2015-11-29 09:23:09.629612854 +0100
-@@ -0,0 +1,74 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM hist
-+
-+#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_HIST_H
-+
-+#include "latency_hist.h"
-+#include <linux/tracepoint.h>
-+
-+#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
-+#define trace_preemptirqsoff_hist(a, b)
-+#define trace_preemptirqsoff_hist_rcuidle(a, b)
-+#else
-+TRACE_EVENT(preemptirqsoff_hist,
-+
-+ TP_PROTO(int reason, int starthist),
-+
-+ TP_ARGS(reason, starthist),
-+
-+ TP_STRUCT__entry(
-+ __field(int, reason)
-+ __field(int, starthist)
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->reason = reason;
-+ __entry->starthist = starthist;
-+ ),
-+
-+ TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
-+ __entry->starthist ? "start" : "stop")
-+);
-+#endif
-+
-+#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+#define trace_hrtimer_interrupt(a, b, c, d)
-+#define trace_hrtimer_interrupt_rcuidle(a, b, c, d)
-+#else
-+TRACE_EVENT(hrtimer_interrupt,
-+
-+ TP_PROTO(int cpu, long long offset, struct task_struct *curr,
-+ struct task_struct *task),
-+
-+ TP_ARGS(cpu, offset, curr, task),
-+
-+ TP_STRUCT__entry(
-+ __field(int, cpu)
-+ __field(long long, offset)
-+ __array(char, ccomm, TASK_COMM_LEN)
-+ __field(int, cprio)
-+ __array(char, tcomm, TASK_COMM_LEN)
-+ __field(int, tprio)
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->cpu = cpu;
-+ __entry->offset = offset;
-+ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
-+ __entry->cprio = curr->prio;
-+ memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>",
-+ task != NULL ? TASK_COMM_LEN : 7);
-+ __entry->tprio = task != NULL ? task->prio : -1;
-+ ),
-+
-+ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
-+ __entry->cpu, __entry->offset, __entry->ccomm,
-+ __entry->cprio, __entry->tcomm, __entry->tprio)
-+);
-+#endif
-+
-+#endif /* _TRACE_HIST_H */
-+
-+/* This part must be outside protection */
-+#include <trace/define_trace.h>
-diff -Nur linux-4.1.13.orig/include/trace/events/latency_hist.h linux-4.1.13/include/trace/events/latency_hist.h
---- linux-4.1.13.orig/include/trace/events/latency_hist.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/include/trace/events/latency_hist.h 2015-11-29 09:23:09.629612854 +0100
-@@ -0,0 +1,29 @@
-+#ifndef _LATENCY_HIST_H
-+#define _LATENCY_HIST_H
-+
-+enum hist_action {
-+ IRQS_ON,
-+ PREEMPT_ON,
-+ TRACE_STOP,
-+ IRQS_OFF,
-+ PREEMPT_OFF,
-+ TRACE_START,
-+};
-+
-+static char *actions[] = {
-+ "IRQS_ON",
-+ "PREEMPT_ON",
-+ "TRACE_STOP",
-+ "IRQS_OFF",
-+ "PREEMPT_OFF",
-+ "TRACE_START",
-+};
-+
-+static inline char *getaction(int action)
-+{
-+ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
-+ return actions[action];
-+ return "unknown";
-+}
-+
-+#endif /* _LATENCY_HIST_H */
-diff -Nur linux-4.1.13.orig/include/trace/events/sched.h linux-4.1.13/include/trace/events/sched.h
---- linux-4.1.13.orig/include/trace/events/sched.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/include/trace/events/sched.h 2015-11-29 09:23:09.629612854 +0100
-@@ -55,9 +55,9 @@
- */
- DECLARE_EVENT_CLASS(sched_wakeup_template,
-
-- TP_PROTO(struct task_struct *p, int success),
-+ TP_PROTO(struct task_struct *p),
-
-- TP_ARGS(__perf_task(p), success),
-+ TP_ARGS(__perf_task(p)),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
-@@ -71,25 +71,37 @@
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->prio = p->prio;
-- __entry->success = success;
-+ __entry->success = 1; /* rudiment, kill when possible */
- __entry->target_cpu = task_cpu(p);
- ),
-
-- TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
-+ TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
- __entry->comm, __entry->pid, __entry->prio,
-- __entry->success, __entry->target_cpu)
-+ __entry->target_cpu)
- );
-
-+/*
-+ * Tracepoint called when waking a task; this tracepoint is guaranteed to be
-+ * called from the waking context.
-+ */
-+DEFINE_EVENT(sched_wakeup_template, sched_waking,
-+ TP_PROTO(struct task_struct *p),
-+ TP_ARGS(p));
-+
-+/*
-+ * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
-+ * It it not always called from the waking context.
-+ */
- DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
-- TP_PROTO(struct task_struct *p, int success),
-- TP_ARGS(p, success));
-+ TP_PROTO(struct task_struct *p),
-+ TP_ARGS(p));
-
- /*
- * Tracepoint for waking up a new task:
- */
- DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
-- TP_PROTO(struct task_struct *p, int success),
-- TP_ARGS(p, success));
-+ TP_PROTO(struct task_struct *p),
-+ TP_ARGS(p));
-
- #ifdef CREATE_TRACE_POINTS
- static inline long __trace_sched_switch_state(struct task_struct *p)
-diff -Nur linux-4.1.13.orig/init/Kconfig linux-4.1.13/init/Kconfig
---- linux-4.1.13.orig/init/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/init/Kconfig 2015-11-29 09:23:09.629612854 +0100
-@@ -637,7 +637,7 @@
-
- config RCU_FAST_NO_HZ
- bool "Accelerate last non-dyntick-idle CPU's grace periods"
-- depends on NO_HZ_COMMON && SMP
-+ depends on NO_HZ_COMMON && SMP && !PREEMPT_RT_FULL
- default n
- help
- This option permits CPUs to enter dynticks-idle state even if
-@@ -664,7 +664,7 @@
- config RCU_BOOST
- bool "Enable RCU priority boosting"
- depends on RT_MUTEXES && PREEMPT_RCU
-- default n
-+ default y if PREEMPT_RT_FULL
- help
- This option boosts the priority of preempted RCU readers that
- block the current preemptible RCU grace period for too long.
-@@ -1101,6 +1101,7 @@
- config RT_GROUP_SCHED
- bool "Group scheduling for SCHED_RR/FIFO"
- depends on CGROUP_SCHED
-+ depends on !PREEMPT_RT_FULL
- default n
- help
- This feature lets you explicitly allocate real CPU bandwidth
-@@ -1688,6 +1689,7 @@
-
- config SLAB
- bool "SLAB"
-+ depends on !PREEMPT_RT_FULL
- help
- The regular slab allocator that is established and known to work
- well in all environments. It organizes cache hot objects in
-@@ -1706,6 +1708,7 @@
- config SLOB
- depends on EXPERT
- bool "SLOB (Simple Allocator)"
-+ depends on !PREEMPT_RT_FULL
- help
- SLOB replaces the stock allocator with a drastically simpler
- allocator. SLOB is generally more space efficient but
-@@ -1715,7 +1718,7 @@
-
- config SLUB_CPU_PARTIAL
- default y
-- depends on SLUB && SMP
-+ depends on SLUB && SMP && !PREEMPT_RT_FULL
- bool "SLUB per cpu partial cache"
- help
- Per cpu partial caches accellerate objects allocation and freeing
-diff -Nur linux-4.1.13.orig/init/main.c linux-4.1.13/init/main.c
---- linux-4.1.13.orig/init/main.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/init/main.c 2015-11-29 09:23:09.629612854 +0100
-@@ -525,6 +525,7 @@
- setup_command_line(command_line);
- setup_nr_cpu_ids();
- setup_per_cpu_areas();
-+ softirq_early_init();
- smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
-
- build_all_zonelists(NULL, NULL);
-diff -Nur linux-4.1.13.orig/init/Makefile linux-4.1.13/init/Makefile
---- linux-4.1.13.orig/init/Makefile 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/init/Makefile 2015-11-29 09:23:09.629612854 +0100
-@@ -33,4 +33,4 @@
- include/generated/compile.h: FORCE
- @$($(quiet)chk_compile.h)
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
-- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
-+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
-diff -Nur linux-4.1.13.orig/ipc/mqueue.c linux-4.1.13/ipc/mqueue.c
---- linux-4.1.13.orig/ipc/mqueue.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/ipc/mqueue.c 2015-11-29 09:23:09.629612854 +0100
-@@ -47,8 +47,7 @@
- #define RECV 1
-
- #define STATE_NONE 0
--#define STATE_PENDING 1
--#define STATE_READY 2
-+#define STATE_READY 1
-
- struct posix_msg_tree_node {
- struct rb_node rb_node;
-@@ -568,15 +567,12 @@
- wq_add(info, sr, ewp);
-
- for (;;) {
-- set_current_state(TASK_INTERRUPTIBLE);
-+ __set_current_state(TASK_INTERRUPTIBLE);
-
- spin_unlock(&info->lock);
- time = schedule_hrtimeout_range_clock(timeout, 0,
- HRTIMER_MODE_ABS, CLOCK_REALTIME);
-
-- while (ewp->state == STATE_PENDING)
-- cpu_relax();
--
- if (ewp->state == STATE_READY) {
- retval = 0;
- goto out;
-@@ -904,11 +900,15 @@
- * list of waiting receivers. A sender checks that list before adding the new
- * message into the message array. If there is a waiting receiver, then it
- * bypasses the message array and directly hands the message over to the
-- * receiver.
-- * The receiver accepts the message and returns without grabbing the queue
-- * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
-- * are necessary. The same algorithm is used for sysv semaphores, see
-- * ipc/sem.c for more details.
-+ * receiver. The receiver accepts the message and returns without grabbing the
-+ * queue spinlock:
-+ *
-+ * - Set pointer to message.
-+ * - Queue the receiver task for later wakeup (without the info->lock).
-+ * - Update its state to STATE_READY. Now the receiver can continue.
-+ * - Wake up the process after the lock is dropped. Should the process wake up
-+ * before this wakeup (due to a timeout or a signal) it will either see
-+ * STATE_READY and continue or acquire the lock to check the state again.
- *
- * The same algorithm is used for senders.
- */
-@@ -916,21 +916,29 @@
- /* pipelined_send() - send a message directly to the task waiting in
- * sys_mq_timedreceive() (without inserting message into a queue).
- */
--static inline void pipelined_send(struct mqueue_inode_info *info,
-+static inline void pipelined_send(struct wake_q_head *wake_q,
-+ struct mqueue_inode_info *info,
- struct msg_msg *message,
- struct ext_wait_queue *receiver)
- {
- receiver->msg = message;
- list_del(&receiver->list);
-- receiver->state = STATE_PENDING;
-- wake_up_process(receiver->task);
-- smp_wmb();
-+ wake_q_add(wake_q, receiver->task);
-+ /*
-+ * Rely on the implicit cmpxchg barrier from wake_q_add such
-+ * that we can ensure that updating receiver->state is the last
-+ * write operation: As once set, the receiver can continue,
-+ * and if we don't have the reference count from the wake_q,
-+ * yet, at that point we can later have a use-after-free
-+ * condition and bogus wakeup.
-+ */
- receiver->state = STATE_READY;
- }
-
- /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
- * gets its message and put to the queue (we have one free place for sure). */
--static inline void pipelined_receive(struct mqueue_inode_info *info)
-+static inline void pipelined_receive(struct wake_q_head *wake_q,
-+ struct mqueue_inode_info *info)
- {
- struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
-
-@@ -941,10 +949,9 @@
- }
- if (msg_insert(sender->msg, info))
- return;
-+
- list_del(&sender->list);
-- sender->state = STATE_PENDING;
-- wake_up_process(sender->task);
-- smp_wmb();
-+ wake_q_add(wake_q, sender->task);
- sender->state = STATE_READY;
- }
-
-@@ -962,6 +969,7 @@
- struct timespec ts;
- struct posix_msg_tree_node *new_leaf = NULL;
- int ret = 0;
-+ WAKE_Q(wake_q);
-
- if (u_abs_timeout) {
- int res = prepare_timeout(u_abs_timeout, &expires, &ts);
-@@ -1045,7 +1053,7 @@
- } else {
- receiver = wq_get_first_waiter(info, RECV);
- if (receiver) {
-- pipelined_send(info, msg_ptr, receiver);
-+ pipelined_send(&wake_q, info, msg_ptr, receiver);
- } else {
- /* adds message to the queue */
- ret = msg_insert(msg_ptr, info);
-@@ -1058,6 +1066,7 @@
- }
- out_unlock:
- spin_unlock(&info->lock);
-+ wake_up_q(&wake_q);
- out_free:
- if (ret)
- free_msg(msg_ptr);
-@@ -1144,14 +1153,17 @@
- msg_ptr = wait.msg;
- }
- } else {
-+ WAKE_Q(wake_q);
-+
- msg_ptr = msg_get(info);
-
- inode->i_atime = inode->i_mtime = inode->i_ctime =
- CURRENT_TIME;
-
- /* There is now free space in queue. */
-- pipelined_receive(info);
-+ pipelined_receive(&wake_q, info);
- spin_unlock(&info->lock);
-+ wake_up_q(&wake_q);
- ret = 0;
- }
- if (ret == 0) {
-diff -Nur linux-4.1.13.orig/ipc/msg.c linux-4.1.13/ipc/msg.c
---- linux-4.1.13.orig/ipc/msg.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/ipc/msg.c 2015-11-29 09:23:09.629612854 +0100
-@@ -188,6 +188,12 @@
- struct msg_receiver *msr, *t;
-
- list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
-+ /*
-+ * Make sure that the wakeup doesnt preempt
-+ * this CPU prematurely. (on PREEMPT_RT)
-+ */
-+ preempt_disable_rt();
-+
- msr->r_msg = NULL; /* initialize expunge ordering */
- wake_up_process(msr->r_tsk);
- /*
-@@ -198,6 +204,8 @@
- */
- smp_mb();
- msr->r_msg = ERR_PTR(res);
-+
-+ preempt_enable_rt();
- }
- }
-
-@@ -574,6 +582,11 @@
- if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
- !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
- msr->r_msgtype, msr->r_mode)) {
-+ /*
-+ * Make sure that the wakeup doesnt preempt
-+ * this CPU prematurely. (on PREEMPT_RT)
-+ */
-+ preempt_disable_rt();
-
- list_del(&msr->r_list);
- if (msr->r_maxsize < msg->m_ts) {
-@@ -595,12 +608,13 @@
- */
- smp_mb();
- msr->r_msg = msg;
-+ preempt_enable_rt();
-
- return 1;
- }
-+ preempt_enable_rt();
- }
- }
--
- return 0;
- }
-
-diff -Nur linux-4.1.13.orig/ipc/sem.c linux-4.1.13/ipc/sem.c
---- linux-4.1.13.orig/ipc/sem.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/ipc/sem.c 2015-11-29 09:23:09.629612854 +0100
-@@ -690,6 +690,13 @@
- static void wake_up_sem_queue_prepare(struct list_head *pt,
- struct sem_queue *q, int error)
- {
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct task_struct *p = q->sleeper;
-+ get_task_struct(p);
-+ q->status = error;
-+ wake_up_process(p);
-+ put_task_struct(p);
-+#else
- if (list_empty(pt)) {
- /*
- * Hold preempt off so that we don't get preempted and have the
-@@ -701,6 +708,7 @@
- q->pid = error;
-
- list_add_tail(&q->list, pt);
-+#endif
- }
-
- /**
-@@ -714,6 +722,7 @@
- */
- static void wake_up_sem_queue_do(struct list_head *pt)
- {
-+#ifndef CONFIG_PREEMPT_RT_BASE
- struct sem_queue *q, *t;
- int did_something;
-
-@@ -726,6 +735,7 @@
- }
- if (did_something)
- preempt_enable();
-+#endif
- }
-
- static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
-diff -Nur linux-4.1.13.orig/kernel/bpf/hashtab.c linux-4.1.13/kernel/bpf/hashtab.c
---- linux-4.1.13.orig/kernel/bpf/hashtab.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/bpf/hashtab.c 2015-11-29 09:23:09.629612854 +0100
-@@ -17,7 +17,7 @@
- struct bpf_htab {
- struct bpf_map map;
- struct hlist_head *buckets;
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- u32 count; /* number of elements in this hashtable */
- u32 n_buckets; /* number of hash buckets */
- u32 elem_size; /* size of each element in bytes */
-@@ -82,7 +82,7 @@
- for (i = 0; i < htab->n_buckets; i++)
- INIT_HLIST_HEAD(&htab->buckets[i]);
-
-- spin_lock_init(&htab->lock);
-+ raw_spin_lock_init(&htab->lock);
- htab->count = 0;
-
- htab->elem_size = sizeof(struct htab_elem) +
-@@ -230,7 +230,7 @@
- l_new->hash = htab_map_hash(l_new->key, key_size);
-
- /* bpf_map_update_elem() can be called in_irq() */
-- spin_lock_irqsave(&htab->lock, flags);
-+ raw_spin_lock_irqsave(&htab->lock, flags);
-
- head = select_bucket(htab, l_new->hash);
-
-@@ -266,11 +266,11 @@
- } else {
- htab->count++;
- }
-- spin_unlock_irqrestore(&htab->lock, flags);
-+ raw_spin_unlock_irqrestore(&htab->lock, flags);
-
- return 0;
- err:
-- spin_unlock_irqrestore(&htab->lock, flags);
-+ raw_spin_unlock_irqrestore(&htab->lock, flags);
- kfree(l_new);
- return ret;
- }
-@@ -291,7 +291,7 @@
-
- hash = htab_map_hash(key, key_size);
-
-- spin_lock_irqsave(&htab->lock, flags);
-+ raw_spin_lock_irqsave(&htab->lock, flags);
-
- head = select_bucket(htab, hash);
-
-@@ -304,7 +304,7 @@
- ret = 0;
- }
-
-- spin_unlock_irqrestore(&htab->lock, flags);
-+ raw_spin_unlock_irqrestore(&htab->lock, flags);
- return ret;
- }
-
-diff -Nur linux-4.1.13.orig/kernel/cgroup.c linux-4.1.13/kernel/cgroup.c
---- linux-4.1.13.orig/kernel/cgroup.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/cgroup.c 2015-11-29 09:23:09.629612854 +0100
-@@ -4422,10 +4422,10 @@
- queue_work(cgroup_destroy_wq, &css->destroy_work);
- }
-
--static void css_release_work_fn(struct work_struct *work)
-+static void css_release_work_fn(struct swork_event *sev)
- {
- struct cgroup_subsys_state *css =
-- container_of(work, struct cgroup_subsys_state, destroy_work);
-+ container_of(sev, struct cgroup_subsys_state, destroy_swork);
- struct cgroup_subsys *ss = css->ss;
- struct cgroup *cgrp = css->cgroup;
-
-@@ -4464,8 +4464,8 @@
- struct cgroup_subsys_state *css =
- container_of(ref, struct cgroup_subsys_state, refcnt);
-
-- INIT_WORK(&css->destroy_work, css_release_work_fn);
-- queue_work(cgroup_destroy_wq, &css->destroy_work);
-+ INIT_SWORK(&css->destroy_swork, css_release_work_fn);
-+ swork_queue(&css->destroy_swork);
- }
-
- static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5069,6 +5069,7 @@
- */
- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
- BUG_ON(!cgroup_destroy_wq);
-+ BUG_ON(swork_get());
-
- /*
- * Used to destroy pidlists and separate to serve as flush domain.
-diff -Nur linux-4.1.13.orig/kernel/cpu.c linux-4.1.13/kernel/cpu.c
---- linux-4.1.13.orig/kernel/cpu.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/cpu.c 2015-11-29 09:23:09.633612589 +0100
-@@ -74,8 +74,8 @@
- #endif
- } cpu_hotplug = {
- .active_writer = NULL,
-- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
- .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
-+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- .dep_map = {.name = "cpu_hotplug.lock" },
- #endif
-@@ -88,6 +88,289 @@
- #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
- #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
-
-+/**
-+ * hotplug_pcp - per cpu hotplug descriptor
-+ * @unplug: set when pin_current_cpu() needs to sync tasks
-+ * @sync_tsk: the task that waits for tasks to finish pinned sections
-+ * @refcount: counter of tasks in pinned sections
-+ * @grab_lock: set when the tasks entering pinned sections should wait
-+ * @synced: notifier for @sync_tsk to tell cpu_down it's finished
-+ * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
-+ * @mutex_init: zero if the mutex hasn't been initialized yet.
-+ *
-+ * Although @unplug and @sync_tsk may point to the same task, the @unplug
-+ * is used as a flag and still exists after @sync_tsk has exited and
-+ * @sync_tsk set to NULL.
-+ */
-+struct hotplug_pcp {
-+ struct task_struct *unplug;
-+ struct task_struct *sync_tsk;
-+ int refcount;
-+ int grab_lock;
-+ struct completion synced;
-+ struct completion unplug_wait;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ /*
-+ * Note, on PREEMPT_RT, the hotplug lock must save the state of
-+ * the task, otherwise the mutex will cause the task to fail
-+ * to sleep when required. (Because it's called from migrate_disable())
-+ *
-+ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
-+ * state.
-+ */
-+ spinlock_t lock;
-+#else
-+ struct mutex mutex;
-+#endif
-+ int mutex_init;
-+};
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
-+# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
-+#else
-+# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
-+# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
-+#endif
-+
-+static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
-+
-+/**
-+ * pin_current_cpu - Prevent the current cpu from being unplugged
-+ *
-+ * Lightweight version of get_online_cpus() to prevent cpu from being
-+ * unplugged when code runs in a migration disabled region.
-+ *
-+ * Must be called with preemption disabled (preempt_count = 1)!
-+ */
-+void pin_current_cpu(void)
-+{
-+ struct hotplug_pcp *hp;
-+ int force = 0;
-+
-+retry:
-+ hp = this_cpu_ptr(&hotplug_pcp);
-+
-+ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
-+ hp->unplug == current) {
-+ hp->refcount++;
-+ return;
-+ }
-+ if (hp->grab_lock) {
-+ preempt_enable();
-+ hotplug_lock(hp);
-+ hotplug_unlock(hp);
-+ } else {
-+ preempt_enable();
-+ /*
-+ * Try to push this task off of this CPU.
-+ */
-+ if (!migrate_me()) {
-+ preempt_disable();
-+ hp = this_cpu_ptr(&hotplug_pcp);
-+ if (!hp->grab_lock) {
-+ /*
-+ * Just let it continue it's already pinned
-+ * or about to sleep.
-+ */
-+ force = 1;
-+ goto retry;
-+ }
-+ preempt_enable();
-+ }
-+ }
-+ preempt_disable();
-+ goto retry;
-+}
-+
-+/**
-+ * unpin_current_cpu - Allow unplug of current cpu
-+ *
-+ * Must be called with preemption or interrupts disabled!
-+ */
-+void unpin_current_cpu(void)
-+{
-+ struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
-+
-+ WARN_ON(hp->refcount <= 0);
-+
-+ /* This is safe. sync_unplug_thread is pinned to this cpu */
-+ if (!--hp->refcount && hp->unplug && hp->unplug != current)
-+ wake_up_process(hp->unplug);
-+}
-+
-+static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
-+{
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ while (hp->refcount) {
-+ schedule_preempt_disabled();
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ }
-+}
-+
-+static int sync_unplug_thread(void *data)
-+{
-+ struct hotplug_pcp *hp = data;
-+
-+ wait_for_completion(&hp->unplug_wait);
-+ preempt_disable();
-+ hp->unplug = current;
-+ wait_for_pinned_cpus(hp);
-+
-+ /*
-+ * This thread will synchronize the cpu_down() with threads
-+ * that have pinned the CPU. When the pinned CPU count reaches
-+ * zero, we inform the cpu_down code to continue to the next step.
-+ */
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ preempt_enable();
-+ complete(&hp->synced);
-+
-+ /*
-+ * If all succeeds, the next step will need tasks to wait till
-+ * the CPU is offline before continuing. To do this, the grab_lock
-+ * is set and tasks going into pin_current_cpu() will block on the
-+ * mutex. But we still need to wait for those that are already in
-+ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
-+ * will kick this thread out.
-+ */
-+ while (!hp->grab_lock && !kthread_should_stop()) {
-+ schedule();
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ }
-+
-+ /* Make sure grab_lock is seen before we see a stale completion */
-+ smp_mb();
-+
-+ /*
-+ * Now just before cpu_down() enters stop machine, we need to make
-+ * sure all tasks that are in pinned CPU sections are out, and new
-+ * tasks will now grab the lock, keeping them from entering pinned
-+ * CPU sections.
-+ */
-+ if (!kthread_should_stop()) {
-+ preempt_disable();
-+ wait_for_pinned_cpus(hp);
-+ preempt_enable();
-+ complete(&hp->synced);
-+ }
-+
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ while (!kthread_should_stop()) {
-+ schedule();
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ }
-+ set_current_state(TASK_RUNNING);
-+
-+ /*
-+ * Force this thread off this CPU as it's going down and
-+ * we don't want any more work on this CPU.
-+ */
-+ current->flags &= ~PF_NO_SETAFFINITY;
-+ set_cpus_allowed_ptr(current, cpu_present_mask);
-+ migrate_me();
-+ return 0;
-+}
-+
-+static void __cpu_unplug_sync(struct hotplug_pcp *hp)
-+{
-+ wake_up_process(hp->sync_tsk);
-+ wait_for_completion(&hp->synced);
-+}
-+
-+static void __cpu_unplug_wait(unsigned int cpu)
-+{
-+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-+
-+ complete(&hp->unplug_wait);
-+ wait_for_completion(&hp->synced);
-+}
-+
-+/*
-+ * Start the sync_unplug_thread on the target cpu and wait for it to
-+ * complete.
-+ */
-+static int cpu_unplug_begin(unsigned int cpu)
-+{
-+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-+ int err;
-+
-+ /* Protected by cpu_hotplug.lock */
-+ if (!hp->mutex_init) {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ spin_lock_init(&hp->lock);
-+#else
-+ mutex_init(&hp->mutex);
-+#endif
-+ hp->mutex_init = 1;
-+ }
-+
-+ /* Inform the scheduler to migrate tasks off this CPU */
-+ tell_sched_cpu_down_begin(cpu);
-+
-+ init_completion(&hp->synced);
-+ init_completion(&hp->unplug_wait);
-+
-+ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
-+ if (IS_ERR(hp->sync_tsk)) {
-+ err = PTR_ERR(hp->sync_tsk);
-+ hp->sync_tsk = NULL;
-+ return err;
-+ }
-+ kthread_bind(hp->sync_tsk, cpu);
-+
-+ /*
-+ * Wait for tasks to get out of the pinned sections,
-+ * it's still OK if new tasks enter. Some CPU notifiers will
-+ * wait for tasks that are going to enter these sections and
-+ * we must not have them block.
-+ */
-+ wake_up_process(hp->sync_tsk);
-+ return 0;
-+}
-+
-+static void cpu_unplug_sync(unsigned int cpu)
-+{
-+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-+
-+ init_completion(&hp->synced);
-+ /* The completion needs to be initialzied before setting grab_lock */
-+ smp_wmb();
-+
-+ /* Grab the mutex before setting grab_lock */
-+ hotplug_lock(hp);
-+ hp->grab_lock = 1;
-+
-+ /*
-+ * The CPU notifiers have been completed.
-+ * Wait for tasks to get out of pinned CPU sections and have new
-+ * tasks block until the CPU is completely down.
-+ */
-+ __cpu_unplug_sync(hp);
-+
-+ /* All done with the sync thread */
-+ kthread_stop(hp->sync_tsk);
-+ hp->sync_tsk = NULL;
-+}
-+
-+static void cpu_unplug_done(unsigned int cpu)
-+{
-+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-+
-+ hp->unplug = NULL;
-+ /* Let all tasks know cpu unplug is finished before cleaning up */
-+ smp_wmb();
-+
-+ if (hp->sync_tsk)
-+ kthread_stop(hp->sync_tsk);
-+
-+ if (hp->grab_lock) {
-+ hotplug_unlock(hp);
-+ /* protected by cpu_hotplug.lock */
-+ hp->grab_lock = 0;
-+ }
-+ tell_sched_cpu_down_done(cpu);
-+}
-
- void get_online_cpus(void)
- {
-@@ -349,13 +632,15 @@
- /* Requires cpu_add_remove_lock to be held */
- static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
- {
-- int err, nr_calls = 0;
-+ int mycpu, err, nr_calls = 0;
- void *hcpu = (void *)(long)cpu;
- unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
- struct take_cpu_down_param tcd_param = {
- .mod = mod,
- .hcpu = hcpu,
- };
-+ cpumask_var_t cpumask;
-+ cpumask_var_t cpumask_org;
-
- if (num_online_cpus() == 1)
- return -EBUSY;
-@@ -363,7 +648,34 @@
- if (!cpu_online(cpu))
- return -EINVAL;
-
-+ /* Move the downtaker off the unplug cpu */
-+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
-+ return -ENOMEM;
-+ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
-+ free_cpumask_var(cpumask);
-+ return -ENOMEM;
-+ }
-+
-+ cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
-+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
-+ set_cpus_allowed_ptr(current, cpumask);
-+ free_cpumask_var(cpumask);
-+ migrate_disable();
-+ mycpu = smp_processor_id();
-+ if (mycpu == cpu) {
-+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
-+ migrate_enable();
-+ err = -EBUSY;
-+ goto restore_cpus;
-+ }
-+ migrate_enable();
-+
- cpu_hotplug_begin();
-+ err = cpu_unplug_begin(cpu);
-+ if (err) {
-+ printk("cpu_unplug_begin(%d) failed\n", cpu);
-+ goto out_cancel;
-+ }
-
- err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
- if (err) {
-@@ -389,8 +701,12 @@
- #endif
- synchronize_rcu();
-
-+ __cpu_unplug_wait(cpu);
- smpboot_park_threads(cpu);
-
-+ /* Notifiers are done. Don't let any more tasks pin this CPU. */
-+ cpu_unplug_sync(cpu);
-+
- /*
- * So now all preempt/rcu users must observe !cpu_active().
- */
-@@ -427,9 +743,14 @@
- check_for_tasks(cpu);
-
- out_release:
-+ cpu_unplug_done(cpu);
-+out_cancel:
- cpu_hotplug_done();
- if (!err)
- cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
-+restore_cpus:
-+ set_cpus_allowed_ptr(current, cpumask_org);
-+ free_cpumask_var(cpumask_org);
- return err;
- }
-
-diff -Nur linux-4.1.13.orig/kernel/debug/kdb/kdb_io.c linux-4.1.13/kernel/debug/kdb/kdb_io.c
---- linux-4.1.13.orig/kernel/debug/kdb/kdb_io.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/debug/kdb/kdb_io.c 2015-11-29 09:23:09.633612589 +0100
-@@ -554,7 +554,6 @@
- int linecount;
- int colcount;
- int logging, saved_loglevel = 0;
-- int saved_trap_printk;
- int got_printf_lock = 0;
- int retlen = 0;
- int fnd, len;
-@@ -565,8 +564,6 @@
- unsigned long uninitialized_var(flags);
-
- preempt_disable();
-- saved_trap_printk = kdb_trap_printk;
-- kdb_trap_printk = 0;
-
- /* Serialize kdb_printf if multiple cpus try to write at once.
- * But if any cpu goes recursive in kdb, just print the output,
-@@ -855,7 +852,6 @@
- } else {
- __release(kdb_printf_lock);
- }
-- kdb_trap_printk = saved_trap_printk;
- preempt_enable();
- return retlen;
- }
-@@ -865,9 +861,11 @@
- va_list ap;
- int r;
-
-+ kdb_trap_printk++;
- va_start(ap, fmt);
- r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
- va_end(ap);
-+ kdb_trap_printk--;
-
- return r;
- }
-diff -Nur linux-4.1.13.orig/kernel/events/core.c linux-4.1.13/kernel/events/core.c
---- linux-4.1.13.orig/kernel/events/core.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/events/core.c 2015-11-29 09:23:09.633612589 +0100
-@@ -6925,6 +6925,7 @@
-
- hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hwc->hrtimer.function = perf_swevent_hrtimer;
-+ hwc->hrtimer.irqsafe = 1;
-
- /*
- * Since hrtimers have a fixed rate, we can do a static freq->period
-diff -Nur linux-4.1.13.orig/kernel/exit.c linux-4.1.13/kernel/exit.c
---- linux-4.1.13.orig/kernel/exit.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/exit.c 2015-11-29 09:23:09.633612589 +0100
-@@ -144,7 +144,7 @@
- * Do this under ->siglock, we can race with another thread
- * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
- */
-- flush_sigqueue(&tsk->pending);
-+ flush_task_sigqueue(tsk);
- tsk->sighand = NULL;
- spin_unlock(&sighand->siglock);
-
-diff -Nur linux-4.1.13.orig/kernel/fork.c linux-4.1.13/kernel/fork.c
---- linux-4.1.13.orig/kernel/fork.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/fork.c 2015-11-29 09:23:09.633612589 +0100
-@@ -108,7 +108,7 @@
-
- DEFINE_PER_CPU(unsigned long, process_counts) = 0;
-
--__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
-+DEFINE_RWLOCK(tasklist_lock); /* outer */
-
- #ifdef CONFIG_PROVE_RCU
- int lockdep_tasklist_lock_is_held(void)
-@@ -244,7 +244,9 @@
- if (atomic_dec_and_test(&sig->sigcnt))
- free_signal_struct(sig);
- }
--
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+static
-+#endif
- void __put_task_struct(struct task_struct *tsk)
- {
- WARN_ON(!tsk->exit_state);
-@@ -260,7 +262,18 @@
- if (!profile_handoff_task(tsk))
- free_task(tsk);
- }
-+#ifndef CONFIG_PREEMPT_RT_BASE
- EXPORT_SYMBOL_GPL(__put_task_struct);
-+#else
-+void __put_task_struct_cb(struct rcu_head *rhp)
-+{
-+ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
-+
-+ __put_task_struct(tsk);
-+
-+}
-+EXPORT_SYMBOL_GPL(__put_task_struct_cb);
-+#endif
-
- void __init __weak arch_task_cache_init(void) { }
-
-@@ -680,6 +693,19 @@
- }
- EXPORT_SYMBOL_GPL(__mmdrop);
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+/*
-+ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
-+ * want another facility to make this work.
-+ */
-+void __mmdrop_delayed(struct rcu_head *rhp)
-+{
-+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
-+
-+ __mmdrop(mm);
-+}
-+#endif
-+
- /*
- * Decrement the use count and release all resources for an mm.
- */
-@@ -1214,6 +1240,9 @@
- */
- static void posix_cpu_timers_init(struct task_struct *tsk)
- {
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ tsk->posix_timer_list = NULL;
-+#endif
- tsk->cputime_expires.prof_exp = 0;
- tsk->cputime_expires.virt_exp = 0;
- tsk->cputime_expires.sched_exp = 0;
-@@ -1338,6 +1367,7 @@
- spin_lock_init(&p->alloc_lock);
-
- init_sigpending(&p->pending);
-+ p->sigqueue_cache = NULL;
-
- p->utime = p->stime = p->gtime = 0;
- p->utimescaled = p->stimescaled = 0;
-@@ -1345,7 +1375,8 @@
- p->prev_cputime.utime = p->prev_cputime.stime = 0;
- #endif
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-- seqlock_init(&p->vtime_seqlock);
-+ raw_spin_lock_init(&p->vtime_lock);
-+ seqcount_init(&p->vtime_seq);
- p->vtime_snap = 0;
- p->vtime_snap_whence = VTIME_SLEEPING;
- #endif
-@@ -1396,6 +1427,9 @@
- p->hardirq_context = 0;
- p->softirq_context = 0;
- #endif
-+
-+ p->pagefault_disabled = 0;
-+
- #ifdef CONFIG_LOCKDEP
- p->lockdep_depth = 0; /* no locks held yet */
- p->curr_chain_key = 0;
-diff -Nur linux-4.1.13.orig/kernel/futex.c linux-4.1.13/kernel/futex.c
---- linux-4.1.13.orig/kernel/futex.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/futex.c 2015-11-29 09:23:09.633612589 +0100
-@@ -738,7 +738,9 @@
- * task still owns the PI-state:
- */
- if (head->next != next) {
-+ raw_spin_unlock_irq(&curr->pi_lock);
- spin_unlock(&hb->lock);
-+ raw_spin_lock_irq(&curr->pi_lock);
- continue;
- }
-
-@@ -1090,9 +1092,11 @@
-
- /*
- * The hash bucket lock must be held when this is called.
-- * Afterwards, the futex_q must not be accessed.
-+ * Afterwards, the futex_q must not be accessed. Callers
-+ * must ensure to later call wake_up_q() for the actual
-+ * wakeups to occur.
- */
--static void wake_futex(struct futex_q *q)
-+static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
- {
- struct task_struct *p = q->task;
-
-@@ -1100,14 +1104,10 @@
- return;
-
- /*
-- * We set q->lock_ptr = NULL _before_ we wake up the task. If
-- * a non-futex wake up happens on another CPU then the task
-- * might exit and p would dereference a non-existing task
-- * struct. Prevent this by holding a reference on p across the
-- * wake up.
-+ * Queue the task for later wakeup for after we've released
-+ * the hb->lock. wake_q_add() grabs reference to p.
- */
-- get_task_struct(p);
--
-+ wake_q_add(wake_q, p);
- __unqueue_futex(q);
- /*
- * The waiting task can free the futex_q as soon as
-@@ -1117,16 +1117,15 @@
- */
- smp_wmb();
- q->lock_ptr = NULL;
--
-- wake_up_state(p, TASK_NORMAL);
-- put_task_struct(p);
- }
-
--static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
-+static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
-+ struct futex_hash_bucket *hb)
- {
- struct task_struct *new_owner;
- struct futex_pi_state *pi_state = this->pi_state;
- u32 uninitialized_var(curval), newval;
-+ bool deboost;
- int ret = 0;
-
- if (!pi_state)
-@@ -1178,7 +1177,17 @@
- raw_spin_unlock_irq(&new_owner->pi_lock);
-
- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
-- rt_mutex_unlock(&pi_state->pi_mutex);
-+
-+ deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex);
-+
-+ /*
-+ * We deboost after dropping hb->lock. That prevents a double
-+ * wakeup on RT.
-+ */
-+ spin_unlock(&hb->lock);
-+
-+ if (deboost)
-+ rt_mutex_adjust_prio(current);
-
- return 0;
- }
-@@ -1217,6 +1226,7 @@
- struct futex_q *this, *next;
- union futex_key key = FUTEX_KEY_INIT;
- int ret;
-+ WAKE_Q(wake_q);
-
- if (!bitset)
- return -EINVAL;
-@@ -1244,13 +1254,14 @@
- if (!(this->bitset & bitset))
- continue;
-
-- wake_futex(this);
-+ mark_wake_futex(&wake_q, this);
- if (++ret >= nr_wake)
- break;
- }
- }
-
- spin_unlock(&hb->lock);
-+ wake_up_q(&wake_q);
- out_put_key:
- put_futex_key(&key);
- out:
-@@ -1269,6 +1280,7 @@
- struct futex_hash_bucket *hb1, *hb2;
- struct futex_q *this, *next;
- int ret, op_ret;
-+ WAKE_Q(wake_q);
-
- retry:
- ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
-@@ -1320,7 +1332,7 @@
- ret = -EINVAL;
- goto out_unlock;
- }
-- wake_futex(this);
-+ mark_wake_futex(&wake_q, this);
- if (++ret >= nr_wake)
- break;
- }
-@@ -1334,7 +1346,7 @@
- ret = -EINVAL;
- goto out_unlock;
- }
-- wake_futex(this);
-+ mark_wake_futex(&wake_q, this);
- if (++op_ret >= nr_wake2)
- break;
- }
-@@ -1344,6 +1356,7 @@
-
- out_unlock:
- double_unlock_hb(hb1, hb2);
-+ wake_up_q(&wake_q);
- out_put_keys:
- put_futex_key(&key2);
- out_put_key1:
-@@ -1503,6 +1516,7 @@
- struct futex_pi_state *pi_state = NULL;
- struct futex_hash_bucket *hb1, *hb2;
- struct futex_q *this, *next;
-+ WAKE_Q(wake_q);
-
- if (requeue_pi) {
- /*
-@@ -1679,7 +1693,7 @@
- * woken by futex_unlock_pi().
- */
- if (++task_count <= nr_wake && !requeue_pi) {
-- wake_futex(this);
-+ mark_wake_futex(&wake_q, this);
- continue;
- }
-
-@@ -1705,6 +1719,16 @@
- requeue_pi_wake_futex(this, &key2, hb2);
- drop_count++;
- continue;
-+ } else if (ret == -EAGAIN) {
-+ /*
-+ * Waiter was woken by timeout or
-+ * signal and has set pi_blocked_on to
-+ * PI_WAKEUP_INPROGRESS before we
-+ * tried to enqueue it on the rtmutex.
-+ */
-+ this->pi_state = NULL;
-+ free_pi_state(pi_state);
-+ continue;
- } else if (ret) {
- /* -EDEADLK */
- this->pi_state = NULL;
-@@ -1719,6 +1743,7 @@
- out_unlock:
- free_pi_state(pi_state);
- double_unlock_hb(hb1, hb2);
-+ wake_up_q(&wake_q);
- hb_waiters_dec(hb2);
-
- /*
-@@ -2412,13 +2437,26 @@
- */
- match = futex_top_waiter(hb, &key);
- if (match) {
-- ret = wake_futex_pi(uaddr, uval, match);
-+ ret = wake_futex_pi(uaddr, uval, match, hb);
-+
-+ /*
-+ * In case of success wake_futex_pi dropped the hash
-+ * bucket lock.
-+ */
-+ if (!ret)
-+ goto out_putkey;
-+
- /*
- * The atomic access to the futex value generated a
- * pagefault, so retry the user-access and the wakeup:
- */
- if (ret == -EFAULT)
- goto pi_faulted;
-+
-+ /*
-+ * wake_futex_pi has detected invalid state. Tell user
-+ * space.
-+ */
- goto out_unlock;
- }
-
-@@ -2439,6 +2477,7 @@
-
- out_unlock:
- spin_unlock(&hb->lock);
-+out_putkey:
- put_futex_key(&key);
- return ret;
-
-@@ -2549,7 +2588,7 @@
- struct hrtimer_sleeper timeout, *to = NULL;
- struct rt_mutex_waiter rt_waiter;
- struct rt_mutex *pi_mutex = NULL;
-- struct futex_hash_bucket *hb;
-+ struct futex_hash_bucket *hb, *hb2;
- union futex_key key2 = FUTEX_KEY_INIT;
- struct futex_q q = futex_q_init;
- int res, ret;
-@@ -2574,10 +2613,7 @@
- * The waiter is allocated on our stack, manipulated by the requeue
- * code while we sleep on uaddr.
- */
-- debug_rt_mutex_init_waiter(&rt_waiter);
-- RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
-- RB_CLEAR_NODE(&rt_waiter.tree_entry);
-- rt_waiter.task = NULL;
-+ rt_mutex_init_waiter(&rt_waiter, false);
-
- ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
- if (unlikely(ret != 0))
-@@ -2608,20 +2644,55 @@
- /* Queue the futex_q, drop the hb lock, wait for wakeup. */
- futex_wait_queue_me(hb, &q, to);
-
-- spin_lock(&hb->lock);
-- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
-- spin_unlock(&hb->lock);
-- if (ret)
-- goto out_put_keys;
-+ /*
-+ * On RT we must avoid races with requeue and trying to block
-+ * on two mutexes (hb->lock and uaddr2's rtmutex) by
-+ * serializing access to pi_blocked_on with pi_lock.
-+ */
-+ raw_spin_lock_irq(&current->pi_lock);
-+ if (current->pi_blocked_on) {
-+ /*
-+ * We have been requeued or are in the process of
-+ * being requeued.
-+ */
-+ raw_spin_unlock_irq(&current->pi_lock);
-+ } else {
-+ /*
-+ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
-+ * prevents a concurrent requeue from moving us to the
-+ * uaddr2 rtmutex. After that we can safely acquire
-+ * (and possibly block on) hb->lock.
-+ */
-+ current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
-+ raw_spin_unlock_irq(&current->pi_lock);
-+
-+ spin_lock(&hb->lock);
-+
-+ /*
-+ * Clean up pi_blocked_on. We might leak it otherwise
-+ * when we succeeded with the hb->lock in the fast
-+ * path.
-+ */
-+ raw_spin_lock_irq(&current->pi_lock);
-+ current->pi_blocked_on = NULL;
-+ raw_spin_unlock_irq(&current->pi_lock);
-+
-+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
-+ spin_unlock(&hb->lock);
-+ if (ret)
-+ goto out_put_keys;
-+ }
-
- /*
-- * In order for us to be here, we know our q.key == key2, and since
-- * we took the hb->lock above, we also know that futex_requeue() has
-- * completed and we no longer have to concern ourselves with a wakeup
-- * race with the atomic proxy lock acquisition by the requeue code. The
-- * futex_requeue dropped our key1 reference and incremented our key2
-- * reference count.
-+ * In order to be here, we have either been requeued, are in
-+ * the process of being requeued, or requeue successfully
-+ * acquired uaddr2 on our behalf. If pi_blocked_on was
-+ * non-null above, we may be racing with a requeue. Do not
-+ * rely on q->lock_ptr to be hb2->lock until after blocking on
-+ * hb->lock or hb2->lock. The futex_requeue dropped our key1
-+ * reference and incremented our key2 reference count.
- */
-+ hb2 = hash_futex(&key2);
-
- /* Check if the requeue code acquired the second futex for us. */
- if (!q.rt_waiter) {
-@@ -2630,9 +2701,10 @@
- * did a lock-steal - fix up the PI-state in that case.
- */
- if (q.pi_state && (q.pi_state->owner != current)) {
-- spin_lock(q.lock_ptr);
-+ spin_lock(&hb2->lock);
-+ BUG_ON(&hb2->lock != q.lock_ptr);
- ret = fixup_pi_state_owner(uaddr2, &q, current);
-- spin_unlock(q.lock_ptr);
-+ spin_unlock(&hb2->lock);
- }
- } else {
- /*
-@@ -2645,7 +2717,8 @@
- ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
- debug_rt_mutex_free_waiter(&rt_waiter);
-
-- spin_lock(q.lock_ptr);
-+ spin_lock(&hb2->lock);
-+ BUG_ON(&hb2->lock != q.lock_ptr);
- /*
- * Fixup the pi_state owner and possibly acquire the lock if we
- * haven't already.
-diff -Nur linux-4.1.13.orig/kernel/irq/handle.c linux-4.1.13/kernel/irq/handle.c
---- linux-4.1.13.orig/kernel/irq/handle.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/irq/handle.c 2015-11-29 09:23:09.633612589 +0100
-@@ -133,6 +133,8 @@
- irqreturn_t
- handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
- {
-+ struct pt_regs *regs = get_irq_regs();
-+ u64 ip = regs ? instruction_pointer(regs) : 0;
- irqreturn_t retval = IRQ_NONE;
- unsigned int flags = 0, irq = desc->irq_data.irq;
-
-@@ -173,7 +175,11 @@
- action = action->next;
- } while (action);
-
-- add_interrupt_randomness(irq, flags);
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+ add_interrupt_randomness(irq, flags, ip);
-+#else
-+ desc->random_ip = ip;
-+#endif
-
- if (!noirqdebug)
- note_interrupt(irq, desc, retval);
-diff -Nur linux-4.1.13.orig/kernel/irq/manage.c linux-4.1.13/kernel/irq/manage.c
---- linux-4.1.13.orig/kernel/irq/manage.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/irq/manage.c 2015-11-29 09:23:09.633612589 +0100
-@@ -22,6 +22,7 @@
- #include "internals.h"
-
- #ifdef CONFIG_IRQ_FORCED_THREADING
-+# ifndef CONFIG_PREEMPT_RT_BASE
- __read_mostly bool force_irqthreads;
-
- static int __init setup_forced_irqthreads(char *arg)
-@@ -30,6 +31,7 @@
- return 0;
- }
- early_param("threadirqs", setup_forced_irqthreads);
-+# endif
- #endif
-
- static void __synchronize_hardirq(struct irq_desc *desc)
-@@ -179,6 +181,62 @@
- irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
- #endif
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void _irq_affinity_notify(struct irq_affinity_notify *notify);
-+static struct task_struct *set_affinity_helper;
-+static LIST_HEAD(affinity_list);
-+static DEFINE_RAW_SPINLOCK(affinity_list_lock);
-+
-+static int set_affinity_thread(void *unused)
-+{
-+ while (1) {
-+ struct irq_affinity_notify *notify;
-+ int empty;
-+
-+ set_current_state(TASK_INTERRUPTIBLE);
-+
-+ raw_spin_lock_irq(&affinity_list_lock);
-+ empty = list_empty(&affinity_list);
-+ raw_spin_unlock_irq(&affinity_list_lock);
-+
-+ if (empty)
-+ schedule();
-+ if (kthread_should_stop())
-+ break;
-+ set_current_state(TASK_RUNNING);
-+try_next:
-+ notify = NULL;
-+
-+ raw_spin_lock_irq(&affinity_list_lock);
-+ if (!list_empty(&affinity_list)) {
-+ notify = list_first_entry(&affinity_list,
-+ struct irq_affinity_notify, list);
-+ list_del_init(&notify->list);
-+ }
-+ raw_spin_unlock_irq(&affinity_list_lock);
-+
-+ if (!notify)
-+ continue;
-+ _irq_affinity_notify(notify);
-+ goto try_next;
-+ }
-+ return 0;
-+}
-+
-+static void init_helper_thread(void)
-+{
-+ if (set_affinity_helper)
-+ return;
-+ set_affinity_helper = kthread_run(set_affinity_thread, NULL,
-+ "affinity-cb");
-+ WARN_ON(IS_ERR(set_affinity_helper));
-+}
-+#else
-+
-+static inline void init_helper_thread(void) { }
-+
-+#endif
-+
- int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
- {
-@@ -218,7 +276,17 @@
-
- if (desc->affinity_notify) {
- kref_get(&desc->affinity_notify->kref);
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ raw_spin_lock(&affinity_list_lock);
-+ if (list_empty(&desc->affinity_notify->list))
-+ list_add_tail(&affinity_list,
-+ &desc->affinity_notify->list);
-+ raw_spin_unlock(&affinity_list_lock);
-+ wake_up_process(set_affinity_helper);
-+#else
- schedule_work(&desc->affinity_notify->work);
-+#endif
- }
- irqd_set(data, IRQD_AFFINITY_SET);
-
-@@ -256,10 +324,8 @@
- }
- EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
-
--static void irq_affinity_notify(struct work_struct *work)
-+static void _irq_affinity_notify(struct irq_affinity_notify *notify)
- {
-- struct irq_affinity_notify *notify =
-- container_of(work, struct irq_affinity_notify, work);
- struct irq_desc *desc = irq_to_desc(notify->irq);
- cpumask_var_t cpumask;
- unsigned long flags;
-@@ -281,6 +347,13 @@
- kref_put(&notify->kref, notify->release);
- }
-
-+static void irq_affinity_notify(struct work_struct *work)
-+{
-+ struct irq_affinity_notify *notify =
-+ container_of(work, struct irq_affinity_notify, work);
-+ _irq_affinity_notify(notify);
-+}
-+
- /**
- * irq_set_affinity_notifier - control notification of IRQ affinity changes
- * @irq: Interrupt for which to enable/disable notification
-@@ -310,6 +383,8 @@
- notify->irq = irq;
- kref_init(&notify->kref);
- INIT_WORK(&notify->work, irq_affinity_notify);
-+ INIT_LIST_HEAD(&notify->list);
-+ init_helper_thread();
- }
-
- raw_spin_lock_irqsave(&desc->lock, flags);
-@@ -697,6 +772,12 @@
- return IRQ_NONE;
- }
-
-+static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
-+{
-+ WARN(1, "Secondary action handler called for irq %d\n", irq);
-+ return IRQ_NONE;
-+}
-+
- static int irq_wait_for_interrupt(struct irqaction *action)
- {
- set_current_state(TASK_INTERRUPTIBLE);
-@@ -723,7 +804,8 @@
- static void irq_finalize_oneshot(struct irq_desc *desc,
- struct irqaction *action)
- {
-- if (!(desc->istate & IRQS_ONESHOT))
-+ if (!(desc->istate & IRQS_ONESHOT) ||
-+ action->handler == irq_forced_secondary_handler)
- return;
- again:
- chip_bus_lock(desc);
-@@ -825,7 +907,15 @@
- local_bh_disable();
- ret = action->thread_fn(action->irq, action->dev_id);
- irq_finalize_oneshot(desc, action);
-- local_bh_enable();
-+ /*
-+ * Interrupts which have real time requirements can be set up
-+ * to avoid softirq processing in the thread handler. This is
-+ * safe as these interrupts do not raise soft interrupts.
-+ */
-+ if (irq_settings_no_softirq_call(desc))
-+ _local_bh_enable();
-+ else
-+ local_bh_enable();
- return ret;
- }
-
-@@ -877,6 +967,18 @@
- irq_finalize_oneshot(desc, action);
- }
-
-+static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
-+{
-+ struct irqaction *secondary = action->secondary;
-+
-+ if (WARN_ON_ONCE(!secondary))
-+ return;
-+
-+ raw_spin_lock_irq(&desc->lock);
-+ __irq_wake_thread(desc, secondary);
-+ raw_spin_unlock_irq(&desc->lock);
-+}
-+
- /*
- * Interrupt handler thread
- */
-@@ -907,7 +1009,15 @@
- action_ret = handler_fn(desc, action);
- if (action_ret == IRQ_HANDLED)
- atomic_inc(&desc->threads_handled);
-+ if (action_ret == IRQ_WAKE_THREAD)
-+ irq_wake_secondary(desc, action);
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ migrate_disable();
-+ add_interrupt_randomness(action->irq, 0,
-+ desc->random_ip ^ (unsigned long) action);
-+ migrate_enable();
-+#endif
- wake_threads_waitq(desc);
- }
-
-@@ -951,20 +1061,36 @@
- }
- EXPORT_SYMBOL_GPL(irq_wake_thread);
-
--static void irq_setup_forced_threading(struct irqaction *new)
-+static int irq_setup_forced_threading(struct irqaction *new)
- {
- if (!force_irqthreads)
-- return;
-+ return 0;
- if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
-- return;
-+ return 0;
-
- new->flags |= IRQF_ONESHOT;
-
-- if (!new->thread_fn) {
-- set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
-- new->thread_fn = new->handler;
-- new->handler = irq_default_primary_handler;
-- }
-+ /*
-+ * Handle the case where we have a real primary handler and a
-+ * thread handler. We force thread them as well by creating a
-+ * secondary action.
-+ */
-+ if (new->handler != irq_default_primary_handler && new->thread_fn) {
-+ /* Allocate the secondary action */
-+ new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
-+ if (!new->secondary)
-+ return -ENOMEM;
-+ new->secondary->handler = irq_forced_secondary_handler;
-+ new->secondary->thread_fn = new->thread_fn;
-+ new->secondary->dev_id = new->dev_id;
-+ new->secondary->irq = new->irq;
-+ new->secondary->name = new->name;
-+ }
-+ /* Deal with the primary handler */
-+ set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
-+ new->thread_fn = new->handler;
-+ new->handler = irq_default_primary_handler;
-+ return 0;
- }
-
- static int irq_request_resources(struct irq_desc *desc)
-@@ -984,6 +1110,48 @@
- c->irq_release_resources(d);
- }
-
-+static int
-+setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
-+{
-+ struct task_struct *t;
-+ struct sched_param param = {
-+ .sched_priority = MAX_USER_RT_PRIO/2,
-+ };
-+
-+ if (!secondary) {
-+ t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
-+ new->name);
-+ } else {
-+ t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
-+ new->name);
-+ param.sched_priority += 1;
-+ }
-+
-+ if (IS_ERR(t))
-+ return PTR_ERR(t);
-+
-+ sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
-+
-+ /*
-+ * We keep the reference to the task struct even if
-+ * the thread dies to avoid that the interrupt code
-+ * references an already freed task_struct.
-+ */
-+ get_task_struct(t);
-+ new->thread = t;
-+ /*
-+ * Tell the thread to set its affinity. This is
-+ * important for shared interrupt handlers as we do
-+ * not invoke setup_affinity() for the secondary
-+ * handlers as everything is already set up. Even for
-+ * interrupts marked with IRQF_NO_BALANCE this is
-+ * correct as we want the thread to move to the cpu(s)
-+ * on which the requesting code placed the interrupt.
-+ */
-+ set_bit(IRQTF_AFFINITY, &new->thread_flags);
-+ return 0;
-+}
-+
- /*
- * Internal function to register an irqaction - typically used to
- * allocate special interrupts that are part of the architecture.
-@@ -1004,6 +1172,8 @@
- if (!try_module_get(desc->owner))
- return -ENODEV;
-
-+ new->irq = irq;
-+
- /*
- * Check whether the interrupt nests into another interrupt
- * thread.
-@@ -1021,8 +1191,11 @@
- */
- new->handler = irq_nested_primary_handler;
- } else {
-- if (irq_settings_can_thread(desc))
-- irq_setup_forced_threading(new);
-+ if (irq_settings_can_thread(desc)) {
-+ ret = irq_setup_forced_threading(new);
-+ if (ret)
-+ goto out_mput;
-+ }
- }
-
- /*
-@@ -1031,37 +1204,14 @@
- * thread.
- */
- if (new->thread_fn && !nested) {
-- struct task_struct *t;
-- static const struct sched_param param = {
-- .sched_priority = MAX_USER_RT_PRIO/2,
-- };
--
-- t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
-- new->name);
-- if (IS_ERR(t)) {
-- ret = PTR_ERR(t);
-+ ret = setup_irq_thread(new, irq, false);
-+ if (ret)
- goto out_mput;
-+ if (new->secondary) {
-+ ret = setup_irq_thread(new->secondary, irq, true);
-+ if (ret)
-+ goto out_thread;
- }
--
-- sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
--
-- /*
-- * We keep the reference to the task struct even if
-- * the thread dies to avoid that the interrupt code
-- * references an already freed task_struct.
-- */
-- get_task_struct(t);
-- new->thread = t;
-- /*
-- * Tell the thread to set its affinity. This is
-- * important for shared interrupt handlers as we do
-- * not invoke setup_affinity() for the secondary
-- * handlers as everything is already set up. Even for
-- * interrupts marked with IRQF_NO_BALANCE this is
-- * correct as we want the thread to move to the cpu(s)
-- * on which the requesting code placed the interrupt.
-- */
-- set_bit(IRQTF_AFFINITY, &new->thread_flags);
- }
-
- if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
-@@ -1221,6 +1371,9 @@
- irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
- }
-
-+ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
-+ irq_settings_set_no_softirq_call(desc);
-+
- /* Set default affinity mask once everything is setup */
- setup_affinity(irq, desc, mask);
-
-@@ -1234,7 +1387,6 @@
- irq, nmsk, omsk);
- }
-
-- new->irq = irq;
- *old_ptr = new;
-
- irq_pm_install_action(desc, new);
-@@ -1260,6 +1412,8 @@
- */
- if (new->thread)
- wake_up_process(new->thread);
-+ if (new->secondary)
-+ wake_up_process(new->secondary->thread);
-
- register_irq_proc(irq, desc);
- new->dir = NULL;
-@@ -1290,6 +1444,13 @@
- kthread_stop(t);
- put_task_struct(t);
- }
-+ if (new->secondary && new->secondary->thread) {
-+ struct task_struct *t = new->secondary->thread;
-+
-+ new->secondary->thread = NULL;
-+ kthread_stop(t);
-+ put_task_struct(t);
-+ }
- out_mput:
- module_put(desc->owner);
- return ret;
-@@ -1397,9 +1558,14 @@
- if (action->thread) {
- kthread_stop(action->thread);
- put_task_struct(action->thread);
-+ if (action->secondary && action->secondary->thread) {
-+ kthread_stop(action->secondary->thread);
-+ put_task_struct(action->secondary->thread);
-+ }
- }
-
- module_put(desc->owner);
-+ kfree(action->secondary);
- return action;
- }
-
-@@ -1543,8 +1709,10 @@
- retval = __setup_irq(irq, desc, action);
- chip_bus_sync_unlock(desc);
-
-- if (retval)
-+ if (retval) {
-+ kfree(action->secondary);
- kfree(action);
-+ }
-
- #ifdef CONFIG_DEBUG_SHIRQ_FIXME
- if (!retval && (irqflags & IRQF_SHARED)) {
-diff -Nur linux-4.1.13.orig/kernel/irq/settings.h linux-4.1.13/kernel/irq/settings.h
---- linux-4.1.13.orig/kernel/irq/settings.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/irq/settings.h 2015-11-29 09:23:09.637612322 +0100
-@@ -15,6 +15,7 @@
- _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
- _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
- _IRQ_IS_POLLED = IRQ_IS_POLLED,
-+ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
- _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
- };
-
-@@ -28,6 +29,7 @@
- #define IRQ_NESTED_THREAD GOT_YOU_MORON
- #define IRQ_PER_CPU_DEVID GOT_YOU_MORON
- #define IRQ_IS_POLLED GOT_YOU_MORON
-+#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
- #undef IRQF_MODIFY_MASK
- #define IRQF_MODIFY_MASK GOT_YOU_MORON
-
-@@ -38,6 +40,16 @@
- desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
- }
-
-+static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
-+{
-+ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
-+}
-+
-+static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
-+{
-+ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
-+}
-+
- static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
- {
- return desc->status_use_accessors & _IRQ_PER_CPU;
-diff -Nur linux-4.1.13.orig/kernel/irq/spurious.c linux-4.1.13/kernel/irq/spurious.c
---- linux-4.1.13.orig/kernel/irq/spurious.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/irq/spurious.c 2015-11-29 09:23:09.637612322 +0100
-@@ -444,6 +444,10 @@
-
- static int __init irqfixup_setup(char *str)
- {
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
-+ return 1;
-+#endif
- irqfixup = 1;
- printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
- printk(KERN_WARNING "This may impact system performance.\n");
-@@ -456,6 +460,10 @@
-
- static int __init irqpoll_setup(char *str)
- {
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
-+ return 1;
-+#endif
- irqfixup = 2;
- printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
- "enabled\n");
-diff -Nur linux-4.1.13.orig/kernel/irq_work.c linux-4.1.13/kernel/irq_work.c
---- linux-4.1.13.orig/kernel/irq_work.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/irq_work.c 2015-11-29 09:23:09.637612322 +0100
-@@ -17,6 +17,7 @@
- #include <linux/cpu.h>
- #include <linux/notifier.h>
- #include <linux/smp.h>
-+#include <linux/interrupt.h>
- #include <asm/processor.h>
-
-
-@@ -65,6 +66,8 @@
- */
- bool irq_work_queue_on(struct irq_work *work, int cpu)
- {
-+ struct llist_head *list;
-+
- /* All work should have been flushed before going offline */
- WARN_ON_ONCE(cpu_is_offline(cpu));
-
-@@ -75,7 +78,12 @@
- if (!irq_work_claim(work))
- return false;
-
-- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
-+ list = &per_cpu(lazy_list, cpu);
-+ else
-+ list = &per_cpu(raised_list, cpu);
-+
-+ if (llist_add(&work->llnode, list))
- arch_send_call_function_single_ipi(cpu);
-
- return true;
-@@ -86,6 +94,9 @@
- /* Enqueue the irq work @work on the current CPU */
- bool irq_work_queue(struct irq_work *work)
- {
-+ struct llist_head *list;
-+ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
-+
- /* Only queue if not already pending */
- if (!irq_work_claim(work))
- return false;
-@@ -93,13 +104,15 @@
- /* Queue the entry and raise the IPI if needed. */
- preempt_disable();
-
-- /* If the work is "lazy", handle it from next tick if any */
-- if (work->flags & IRQ_WORK_LAZY) {
-- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
-- tick_nohz_tick_stopped())
-- arch_irq_work_raise();
-- } else {
-- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
-+ lazy_work = work->flags & IRQ_WORK_LAZY;
-+
-+ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
-+ list = this_cpu_ptr(&lazy_list);
-+ else
-+ list = this_cpu_ptr(&raised_list);
-+
-+ if (llist_add(&work->llnode, list)) {
-+ if (!lazy_work || tick_nohz_tick_stopped())
- arch_irq_work_raise();
- }
-
-@@ -116,9 +129,8 @@
- raised = this_cpu_ptr(&raised_list);
- lazy = this_cpu_ptr(&lazy_list);
-
-- if (llist_empty(raised) || arch_irq_work_has_interrupt())
-- if (llist_empty(lazy))
-- return false;
-+ if (llist_empty(raised) && llist_empty(lazy))
-+ return false;
-
- /* All work should have been flushed before going offline */
- WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
-@@ -132,7 +144,7 @@
- struct irq_work *work;
- struct llist_node *llnode;
-
-- BUG_ON(!irqs_disabled());
-+ BUG_ON_NONRT(!irqs_disabled());
-
- if (llist_empty(list))
- return;
-@@ -169,7 +181,16 @@
- void irq_work_run(void)
- {
- irq_work_run_list(this_cpu_ptr(&raised_list));
-- irq_work_run_list(this_cpu_ptr(&lazy_list));
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
-+ /*
-+ * NOTE: we raise softirq via IPI for safety,
-+ * and execute in irq_work_tick() to move the
-+ * overhead from hard to soft irq context.
-+ */
-+ if (!llist_empty(this_cpu_ptr(&lazy_list)))
-+ raise_softirq(TIMER_SOFTIRQ);
-+ } else
-+ irq_work_run_list(this_cpu_ptr(&lazy_list));
- }
- EXPORT_SYMBOL_GPL(irq_work_run);
-
-@@ -179,8 +200,17 @@
-
- if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
- irq_work_run_list(raised);
-+
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
-+ irq_work_run_list(this_cpu_ptr(&lazy_list));
-+}
-+
-+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
-+void irq_work_tick_soft(void)
-+{
- irq_work_run_list(this_cpu_ptr(&lazy_list));
- }
-+#endif
-
- /*
- * Synchronize against the irq_work @entry, ensures the entry is not
-diff -Nur linux-4.1.13.orig/kernel/Kconfig.locks linux-4.1.13/kernel/Kconfig.locks
---- linux-4.1.13.orig/kernel/Kconfig.locks 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/Kconfig.locks 2015-11-29 09:23:09.629612854 +0100
-@@ -225,11 +225,11 @@
-
- config MUTEX_SPIN_ON_OWNER
- def_bool y
-- depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
-+ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
-
- config RWSEM_SPIN_ON_OWNER
- def_bool y
-- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
-+ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
-
- config LOCK_SPIN_ON_OWNER
- def_bool y
-diff -Nur linux-4.1.13.orig/kernel/Kconfig.preempt linux-4.1.13/kernel/Kconfig.preempt
---- linux-4.1.13.orig/kernel/Kconfig.preempt 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/Kconfig.preempt 2015-11-29 09:23:09.629612854 +0100
-@@ -1,3 +1,16 @@
-+config PREEMPT
-+ bool
-+ select PREEMPT_COUNT
-+
-+config PREEMPT_RT_BASE
-+ bool
-+ select PREEMPT
-+
-+config HAVE_PREEMPT_LAZY
-+ bool
-+
-+config PREEMPT_LAZY
-+ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
-
- choice
- prompt "Preemption Model"
-@@ -33,9 +46,9 @@
-
- Select this if you are building a kernel for a desktop system.
-
--config PREEMPT
-+config PREEMPT__LL
- bool "Preemptible Kernel (Low-Latency Desktop)"
-- select PREEMPT_COUNT
-+ select PREEMPT
- select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
- help
- This option reduces the latency of the kernel by making
-@@ -52,6 +65,22 @@
- embedded system with latency requirements in the milliseconds
- range.
-
-+config PREEMPT_RTB
-+ bool "Preemptible Kernel (Basic RT)"
-+ select PREEMPT_RT_BASE
-+ help
-+ This option is basically the same as (Low-Latency Desktop) but
-+ enables changes which are preliminary for the full preemptible
-+ RT kernel.
-+
-+config PREEMPT_RT_FULL
-+ bool "Fully Preemptible Kernel (RT)"
-+ depends on IRQ_FORCED_THREADING
-+ select PREEMPT_RT_BASE
-+ select PREEMPT_RCU
-+ help
-+ All and everything
-+
- endchoice
-
- config PREEMPT_COUNT
-diff -Nur linux-4.1.13.orig/kernel/ksysfs.c linux-4.1.13/kernel/ksysfs.c
---- linux-4.1.13.orig/kernel/ksysfs.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/ksysfs.c 2015-11-29 09:23:09.637612322 +0100
-@@ -136,6 +136,15 @@
-
- #endif /* CONFIG_KEXEC */
-
-+#if defined(CONFIG_PREEMPT_RT_FULL)
-+static ssize_t realtime_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%d\n", 1);
-+}
-+KERNEL_ATTR_RO(realtime);
-+#endif
-+
- /* whether file capabilities are enabled */
- static ssize_t fscaps_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-@@ -203,6 +212,9 @@
- &vmcoreinfo_attr.attr,
- #endif
- &rcu_expedited_attr.attr,
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ &realtime_attr.attr,
-+#endif
- NULL
- };
-
-diff -Nur linux-4.1.13.orig/kernel/locking/lglock.c linux-4.1.13/kernel/locking/lglock.c
---- linux-4.1.13.orig/kernel/locking/lglock.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/locking/lglock.c 2015-11-29 09:23:09.637612322 +0100
-@@ -4,6 +4,15 @@
- #include <linux/cpu.h>
- #include <linux/string.h>
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# define lg_lock_ptr arch_spinlock_t
-+# define lg_do_lock(l) arch_spin_lock(l)
-+# define lg_do_unlock(l) arch_spin_unlock(l)
-+#else
-+# define lg_lock_ptr struct rt_mutex
-+# define lg_do_lock(l) __rt_spin_lock(l)
-+# define lg_do_unlock(l) __rt_spin_unlock(l)
-+#endif
- /*
- * Note there is no uninit, so lglocks cannot be defined in
- * modules (but it's fine to use them from there)
-@@ -12,51 +21,60 @@
-
- void lg_lock_init(struct lglock *lg, char *name)
- {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int i;
-+
-+ for_each_possible_cpu(i) {
-+ struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);
-+
-+ rt_mutex_init(lock);
-+ }
-+#endif
- LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
- }
- EXPORT_SYMBOL(lg_lock_init);
-
- void lg_local_lock(struct lglock *lg)
- {
-- arch_spinlock_t *lock;
-+ lg_lock_ptr *lock;
-
-- preempt_disable();
-+ migrate_disable();
- lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
- lock = this_cpu_ptr(lg->lock);
-- arch_spin_lock(lock);
-+ lg_do_lock(lock);
- }
- EXPORT_SYMBOL(lg_local_lock);
-
- void lg_local_unlock(struct lglock *lg)
- {
-- arch_spinlock_t *lock;
-+ lg_lock_ptr *lock;
-
- lock_release(&lg->lock_dep_map, 1, _RET_IP_);
- lock = this_cpu_ptr(lg->lock);
-- arch_spin_unlock(lock);
-- preempt_enable();
-+ lg_do_unlock(lock);
-+ migrate_enable();
- }
- EXPORT_SYMBOL(lg_local_unlock);
-
- void lg_local_lock_cpu(struct lglock *lg, int cpu)
- {
-- arch_spinlock_t *lock;
-+ lg_lock_ptr *lock;
-
-- preempt_disable();
-+ preempt_disable_nort();
- lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
- lock = per_cpu_ptr(lg->lock, cpu);
-- arch_spin_lock(lock);
-+ lg_do_lock(lock);
- }
- EXPORT_SYMBOL(lg_local_lock_cpu);
-
- void lg_local_unlock_cpu(struct lglock *lg, int cpu)
- {
-- arch_spinlock_t *lock;
-+ lg_lock_ptr *lock;
-
- lock_release(&lg->lock_dep_map, 1, _RET_IP_);
- lock = per_cpu_ptr(lg->lock, cpu);
-- arch_spin_unlock(lock);
-- preempt_enable();
-+ lg_do_unlock(lock);
-+ preempt_enable_nort();
- }
- EXPORT_SYMBOL(lg_local_unlock_cpu);
-
-@@ -64,12 +82,12 @@
- {
- int i;
-
-- preempt_disable();
-+ preempt_disable_nort();
- lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
- for_each_possible_cpu(i) {
-- arch_spinlock_t *lock;
-+ lg_lock_ptr *lock;
- lock = per_cpu_ptr(lg->lock, i);
-- arch_spin_lock(lock);
-+ lg_do_lock(lock);
- }
- }
- EXPORT_SYMBOL(lg_global_lock);
-@@ -80,10 +98,35 @@
-
- lock_release(&lg->lock_dep_map, 1, _RET_IP_);
- for_each_possible_cpu(i) {
-- arch_spinlock_t *lock;
-+ lg_lock_ptr *lock;
- lock = per_cpu_ptr(lg->lock, i);
-- arch_spin_unlock(lock);
-+ lg_do_unlock(lock);
- }
-- preempt_enable();
-+ preempt_enable_nort();
- }
- EXPORT_SYMBOL(lg_global_unlock);
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * HACK: If you use this, you get to keep the pieces.
-+ * Used in queue_stop_cpus_work() when stop machinery
-+ * is called from inactive CPU, so we can't schedule.
-+ */
-+# define lg_do_trylock_relax(l) \
-+ do { \
-+ while (!__rt_spin_trylock(l)) \
-+ cpu_relax(); \
-+ } while (0)
-+
-+void lg_global_trylock_relax(struct lglock *lg)
-+{
-+ int i;
-+
-+ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
-+ for_each_possible_cpu(i) {
-+ lg_lock_ptr *lock;
-+ lock = per_cpu_ptr(lg->lock, i);
-+ lg_do_trylock_relax(lock);
-+ }
-+}
-+#endif
-diff -Nur linux-4.1.13.orig/kernel/locking/lockdep.c linux-4.1.13/kernel/locking/lockdep.c
---- linux-4.1.13.orig/kernel/locking/lockdep.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/locking/lockdep.c 2015-11-29 09:23:09.637612322 +0100
-@@ -3563,6 +3563,7 @@
- }
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * We dont accurately track softirq state in e.g.
- * hardirq contexts (such as on 4KSTACKS), so only
-@@ -3577,6 +3578,7 @@
- DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
- }
- }
-+#endif
-
- if (!debug_locks)
- print_irqtrace_events(current);
-diff -Nur linux-4.1.13.orig/kernel/locking/locktorture.c linux-4.1.13/kernel/locking/locktorture.c
---- linux-4.1.13.orig/kernel/locking/locktorture.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/locking/locktorture.c 2015-11-29 09:23:09.637612322 +0100
-@@ -24,7 +24,6 @@
- #include <linux/module.h>
- #include <linux/kthread.h>
- #include <linux/spinlock.h>
--#include <linux/rwlock.h>
- #include <linux/mutex.h>
- #include <linux/rwsem.h>
- #include <linux/smp.h>
-diff -Nur linux-4.1.13.orig/kernel/locking/Makefile linux-4.1.13/kernel/locking/Makefile
---- linux-4.1.13.orig/kernel/locking/Makefile 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/locking/Makefile 2015-11-29 09:23:09.637612322 +0100
-@@ -1,5 +1,5 @@
-
--obj-y += mutex.o semaphore.o rwsem.o
-+obj-y += semaphore.o
-
- ifdef CONFIG_FUNCTION_TRACER
- CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
-@@ -8,7 +8,11 @@
- CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
- endif
-
-+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
-+obj-y += mutex.o
- obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
-+obj-y += rwsem.o
-+endif
- obj-$(CONFIG_LOCKDEP) += lockdep.o
- ifeq ($(CONFIG_PROC_FS),y)
- obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-@@ -22,8 +26,11 @@
- obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
- obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
- obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
-+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
- obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
- obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
-+endif
- obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
-+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
- obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o
- obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
-diff -Nur linux-4.1.13.orig/kernel/locking/rt.c linux-4.1.13/kernel/locking/rt.c
---- linux-4.1.13.orig/kernel/locking/rt.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/kernel/locking/rt.c 2015-11-29 09:23:09.637612322 +0100
-@@ -0,0 +1,461 @@
-+/*
-+ * kernel/rt.c
-+ *
-+ * Real-Time Preemption Support
-+ *
-+ * started by Ingo Molnar:
-+ *
-+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
-+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
-+ *
-+ * historic credit for proving that Linux spinlocks can be implemented via
-+ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
-+ * and others) who prototyped it on 2.4 and did lots of comparative
-+ * research and analysis; TimeSys, for proving that you can implement a
-+ * fully preemptible kernel via the use of IRQ threading and mutexes;
-+ * Bill Huey for persuasively arguing on lkml that the mutex model is the
-+ * right one; and to MontaVista, who ported pmutexes to 2.6.
-+ *
-+ * This code is a from-scratch implementation and is not based on pmutexes,
-+ * but the idea of converting spinlocks to mutexes is used here too.
-+ *
-+ * lock debugging, locking tree, deadlock detection:
-+ *
-+ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
-+ * Released under the General Public License (GPL).
-+ *
-+ * Includes portions of the generic R/W semaphore implementation from:
-+ *
-+ * Copyright (c) 2001 David Howells (dhowells@redhat.com).
-+ * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
-+ * - Derived also from comments by Linus
-+ *
-+ * Pending ownership of locks and ownership stealing:
-+ *
-+ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
-+ *
-+ * (also by Steven Rostedt)
-+ * - Converted single pi_lock to individual task locks.
-+ *
-+ * By Esben Nielsen:
-+ * Doing priority inheritance with help of the scheduler.
-+ *
-+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
-+ * - major rework based on Esben Nielsens initial patch
-+ * - replaced thread_info references by task_struct refs
-+ * - removed task->pending_owner dependency
-+ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
-+ * in the scheduler return path as discussed with Steven Rostedt
-+ *
-+ * Copyright (C) 2006, Kihon Technologies Inc.
-+ * Steven Rostedt <rostedt@goodmis.org>
-+ * - debugged and patched Thomas Gleixner's rework.
-+ * - added back the cmpxchg to the rework.
-+ * - turned atomic require back on for SMP.
-+ */
-+
-+#include <linux/spinlock.h>
-+#include <linux/rtmutex.h>
-+#include <linux/sched.h>
-+#include <linux/delay.h>
-+#include <linux/module.h>
-+#include <linux/kallsyms.h>
-+#include <linux/syscalls.h>
-+#include <linux/interrupt.h>
-+#include <linux/plist.h>
-+#include <linux/fs.h>
-+#include <linux/futex.h>
-+#include <linux/hrtimer.h>
-+
-+#include "rtmutex_common.h"
-+
-+/*
-+ * struct mutex functions
-+ */
-+void __mutex_do_init(struct mutex *mutex, const char *name,
-+ struct lock_class_key *key)
-+{
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ /*
-+ * Make sure we are not reinitializing a held lock:
-+ */
-+ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
-+ lockdep_init_map(&mutex->dep_map, name, key, 0);
-+#endif
-+ mutex->lock.save_state = 0;
-+}
-+EXPORT_SYMBOL(__mutex_do_init);
-+
-+void __lockfunc _mutex_lock(struct mutex *lock)
-+{
-+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-+ rt_mutex_lock(&lock->lock);
-+}
-+EXPORT_SYMBOL(_mutex_lock);
-+
-+int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
-+{
-+ int ret;
-+
-+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-+ ret = rt_mutex_lock_interruptible(&lock->lock);
-+ if (ret)
-+ mutex_release(&lock->dep_map, 1, _RET_IP_);
-+ return ret;
-+}
-+EXPORT_SYMBOL(_mutex_lock_interruptible);
-+
-+int __lockfunc _mutex_lock_killable(struct mutex *lock)
-+{
-+ int ret;
-+
-+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-+ ret = rt_mutex_lock_killable(&lock->lock);
-+ if (ret)
-+ mutex_release(&lock->dep_map, 1, _RET_IP_);
-+ return ret;
-+}
-+EXPORT_SYMBOL(_mutex_lock_killable);
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
-+{
-+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
-+ rt_mutex_lock(&lock->lock);
-+}
-+EXPORT_SYMBOL(_mutex_lock_nested);
-+
-+void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
-+{
-+ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
-+ rt_mutex_lock(&lock->lock);
-+}
-+EXPORT_SYMBOL(_mutex_lock_nest_lock);
-+
-+int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
-+{
-+ int ret;
-+
-+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
-+ ret = rt_mutex_lock_interruptible(&lock->lock);
-+ if (ret)
-+ mutex_release(&lock->dep_map, 1, _RET_IP_);
-+ return ret;
-+}
-+EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
-+
-+int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
-+{
-+ int ret;
-+
-+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-+ ret = rt_mutex_lock_killable(&lock->lock);
-+ if (ret)
-+ mutex_release(&lock->dep_map, 1, _RET_IP_);
-+ return ret;
-+}
-+EXPORT_SYMBOL(_mutex_lock_killable_nested);
-+#endif
-+
-+int __lockfunc _mutex_trylock(struct mutex *lock)
-+{
-+ int ret = rt_mutex_trylock(&lock->lock);
-+
-+ if (ret)
-+ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(_mutex_trylock);
-+
-+void __lockfunc _mutex_unlock(struct mutex *lock)
-+{
-+ mutex_release(&lock->dep_map, 1, _RET_IP_);
-+ rt_mutex_unlock(&lock->lock);
-+}
-+EXPORT_SYMBOL(_mutex_unlock);
-+
-+/*
-+ * rwlock_t functions
-+ */
-+int __lockfunc rt_write_trylock(rwlock_t *rwlock)
-+{
-+ int ret;
-+
-+ migrate_disable();
-+ ret = rt_mutex_trylock(&rwlock->lock);
-+ if (ret)
-+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
-+ else
-+ migrate_enable();
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(rt_write_trylock);
-+
-+int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
-+{
-+ int ret;
-+
-+ *flags = 0;
-+ ret = rt_write_trylock(rwlock);
-+ return ret;
-+}
-+EXPORT_SYMBOL(rt_write_trylock_irqsave);
-+
-+int __lockfunc rt_read_trylock(rwlock_t *rwlock)
-+{
-+ struct rt_mutex *lock = &rwlock->lock;
-+ int ret = 1;
-+
-+ /*
-+ * recursive read locks succeed when current owns the lock,
-+ * but not when read_depth == 0 which means that the lock is
-+ * write locked.
-+ */
-+ if (rt_mutex_owner(lock) != current) {
-+ migrate_disable();
-+ ret = rt_mutex_trylock(lock);
-+ if (ret)
-+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
-+ else
-+ migrate_enable();
-+
-+ } else if (!rwlock->read_depth) {
-+ ret = 0;
-+ }
-+
-+ if (ret)
-+ rwlock->read_depth++;
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(rt_read_trylock);
-+
-+void __lockfunc rt_write_lock(rwlock_t *rwlock)
-+{
-+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
-+ migrate_disable();
-+ __rt_spin_lock(&rwlock->lock);
-+}
-+EXPORT_SYMBOL(rt_write_lock);
-+
-+void __lockfunc rt_read_lock(rwlock_t *rwlock)
-+{
-+ struct rt_mutex *lock = &rwlock->lock;
-+
-+
-+ /*
-+ * recursive read locks succeed when current owns the lock
-+ */
-+ if (rt_mutex_owner(lock) != current) {
-+ migrate_disable();
-+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
-+ __rt_spin_lock(lock);
-+ }
-+ rwlock->read_depth++;
-+}
-+
-+EXPORT_SYMBOL(rt_read_lock);
-+
-+void __lockfunc rt_write_unlock(rwlock_t *rwlock)
-+{
-+ /* NOTE: we always pass in '1' for nested, for simplicity */
-+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
-+ __rt_spin_unlock(&rwlock->lock);
-+ migrate_enable();
-+}
-+EXPORT_SYMBOL(rt_write_unlock);
-+
-+void __lockfunc rt_read_unlock(rwlock_t *rwlock)
-+{
-+ /* Release the lock only when read_depth is down to 0 */
-+ if (--rwlock->read_depth == 0) {
-+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
-+ __rt_spin_unlock(&rwlock->lock);
-+ migrate_enable();
-+ }
-+}
-+EXPORT_SYMBOL(rt_read_unlock);
-+
-+unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
-+{
-+ rt_write_lock(rwlock);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(rt_write_lock_irqsave);
-+
-+unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
-+{
-+ rt_read_lock(rwlock);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(rt_read_lock_irqsave);
-+
-+void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
-+{
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ /*
-+ * Make sure we are not reinitializing a held lock:
-+ */
-+ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
-+ lockdep_init_map(&rwlock->dep_map, name, key, 0);
-+#endif
-+ rwlock->lock.save_state = 1;
-+ rwlock->read_depth = 0;
-+}
-+EXPORT_SYMBOL(__rt_rwlock_init);
-+
-+/*
-+ * rw_semaphores
-+ */
-+
-+void rt_up_write(struct rw_semaphore *rwsem)
-+{
-+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
-+ rt_mutex_unlock(&rwsem->lock);
-+}
-+EXPORT_SYMBOL(rt_up_write);
-+
-+void __rt_up_read(struct rw_semaphore *rwsem)
-+{
-+ if (--rwsem->read_depth == 0)
-+ rt_mutex_unlock(&rwsem->lock);
-+}
-+
-+void rt_up_read(struct rw_semaphore *rwsem)
-+{
-+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
-+ __rt_up_read(rwsem);
-+}
-+EXPORT_SYMBOL(rt_up_read);
-+
-+/*
-+ * downgrade a write lock into a read lock
-+ * - just wake up any readers at the front of the queue
-+ */
-+void rt_downgrade_write(struct rw_semaphore *rwsem)
-+{
-+ BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
-+ rwsem->read_depth = 1;
-+}
-+EXPORT_SYMBOL(rt_downgrade_write);
-+
-+int rt_down_write_trylock(struct rw_semaphore *rwsem)
-+{
-+ int ret = rt_mutex_trylock(&rwsem->lock);
-+
-+ if (ret)
-+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
-+ return ret;
-+}
-+EXPORT_SYMBOL(rt_down_write_trylock);
-+
-+void rt_down_write(struct rw_semaphore *rwsem)
-+{
-+ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
-+ rt_mutex_lock(&rwsem->lock);
-+}
-+EXPORT_SYMBOL(rt_down_write);
-+
-+void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
-+{
-+ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
-+ rt_mutex_lock(&rwsem->lock);
-+}
-+EXPORT_SYMBOL(rt_down_write_nested);
-+
-+void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
-+ struct lockdep_map *nest)
-+{
-+ rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_);
-+ rt_mutex_lock(&rwsem->lock);
-+}
-+EXPORT_SYMBOL(rt_down_write_nested_lock);
-+
-+int rt_down_read_trylock(struct rw_semaphore *rwsem)
-+{
-+ struct rt_mutex *lock = &rwsem->lock;
-+ int ret = 1;
-+
-+ /*
-+ * recursive read locks succeed when current owns the rwsem,
-+ * but not when read_depth == 0 which means that the rwsem is
-+ * write locked.
-+ */
-+ if (rt_mutex_owner(lock) != current)
-+ ret = rt_mutex_trylock(&rwsem->lock);
-+ else if (!rwsem->read_depth)
-+ ret = 0;
-+
-+ if (ret) {
-+ rwsem->read_depth++;
-+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
-+ }
-+ return ret;
-+}
-+EXPORT_SYMBOL(rt_down_read_trylock);
-+
-+static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
-+{
-+ struct rt_mutex *lock = &rwsem->lock;
-+
-+ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
-+
-+ if (rt_mutex_owner(lock) != current)
-+ rt_mutex_lock(&rwsem->lock);
-+ rwsem->read_depth++;
-+}
-+
-+void rt_down_read(struct rw_semaphore *rwsem)
-+{
-+ __rt_down_read(rwsem, 0);
-+}
-+EXPORT_SYMBOL(rt_down_read);
-+
-+void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
-+{
-+ __rt_down_read(rwsem, subclass);
-+}
-+EXPORT_SYMBOL(rt_down_read_nested);
-+
-+void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
-+ struct lock_class_key *key)
-+{
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ /*
-+ * Make sure we are not reinitializing a held lock:
-+ */
-+ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
-+ lockdep_init_map(&rwsem->dep_map, name, key, 0);
-+#endif
-+ rwsem->read_depth = 0;
-+ rwsem->lock.save_state = 0;
-+}
-+EXPORT_SYMBOL(__rt_rwsem_init);
-+
-+/**
-+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
-+ * @cnt: the atomic which we are to dec
-+ * @lock: the mutex to return holding if we dec to 0
-+ *
-+ * return true and hold lock if we dec to 0, return false otherwise
-+ */
-+int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
-+{
-+ /* dec if we can't possibly hit 0 */
-+ if (atomic_add_unless(cnt, -1, 1))
-+ return 0;
-+ /* we might hit 0, so take the lock */
-+ mutex_lock(lock);
-+ if (!atomic_dec_and_test(cnt)) {
-+ /* when we actually did the dec, we didn't hit 0 */
-+ mutex_unlock(lock);
-+ return 0;
-+ }
-+ /* we hit 0, and we hold the lock */
-+ return 1;
-+}
-+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
-diff -Nur linux-4.1.13.orig/kernel/locking/rtmutex.c linux-4.1.13/kernel/locking/rtmutex.c
---- linux-4.1.13.orig/kernel/locking/rtmutex.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/locking/rtmutex.c 2015-11-29 09:23:09.637612322 +0100
-@@ -7,6 +7,11 @@
- * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
- * Copyright (C) 2006 Esben Nielsen
-+ * Adaptive Spinlocks:
-+ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
-+ * and Peter Morreale,
-+ * Adaptive Spinlocks simplification:
-+ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
- *
- * See Documentation/locking/rt-mutex-design.txt for details.
- */
-@@ -16,6 +21,7 @@
- #include <linux/sched/rt.h>
- #include <linux/sched/deadline.h>
- #include <linux/timer.h>
-+#include <linux/ww_mutex.h>
-
- #include "rtmutex_common.h"
-
-@@ -69,6 +75,12 @@
- clear_rt_mutex_waiters(lock);
- }
-
-+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
-+{
-+ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
-+ waiter != PI_REQUEUE_INPROGRESS;
-+}
-+
- /*
- * We can speed up the acquire/release, if the architecture
- * supports cmpxchg and if there's no debugging state to be set up
-@@ -300,7 +312,7 @@
- * of task. We do not use the spin_xx_mutex() variants here as we are
- * outside of the debug path.)
- */
--static void rt_mutex_adjust_prio(struct task_struct *task)
-+void rt_mutex_adjust_prio(struct task_struct *task)
- {
- unsigned long flags;
-
-@@ -335,6 +347,14 @@
- return debug_rt_mutex_detect_deadlock(waiter, chwalk);
- }
-
-+static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
-+{
-+ if (waiter->savestate)
-+ wake_up_lock_sleeper(waiter->task);
-+ else
-+ wake_up_process(waiter->task);
-+}
-+
- /*
- * Max number of times we'll walk the boosting chain:
- */
-@@ -342,7 +362,8 @@
-
- static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
- {
-- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
-+ return rt_mutex_real_waiter(p->pi_blocked_on) ?
-+ p->pi_blocked_on->lock : NULL;
- }
-
- /*
-@@ -479,7 +500,7 @@
- * reached or the state of the chain has changed while we
- * dropped the locks.
- */
-- if (!waiter)
-+ if (!rt_mutex_real_waiter(waiter))
- goto out_unlock_pi;
-
- /*
-@@ -641,13 +662,16 @@
- * follow here. This is the end of the chain we are walking.
- */
- if (!rt_mutex_owner(lock)) {
-+ struct rt_mutex_waiter *lock_top_waiter;
-+
- /*
- * If the requeue [7] above changed the top waiter,
- * then we need to wake the new top waiter up to try
- * to get the lock.
- */
-- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
-- wake_up_process(rt_mutex_top_waiter(lock)->task);
-+ lock_top_waiter = rt_mutex_top_waiter(lock);
-+ if (prerequeue_top_waiter != lock_top_waiter)
-+ rt_mutex_wake_waiter(lock_top_waiter);
- raw_spin_unlock(&lock->wait_lock);
- return 0;
- }
-@@ -740,6 +764,25 @@
- return ret;
- }
-
-+
-+#define STEAL_NORMAL 0
-+#define STEAL_LATERAL 1
-+
-+/*
-+ * Note that RT tasks are excluded from lateral-steals to prevent the
-+ * introduction of an unbounded latency
-+ */
-+static inline int lock_is_stealable(struct task_struct *task,
-+ struct task_struct *pendowner, int mode)
-+{
-+ if (mode == STEAL_NORMAL || rt_task(task)) {
-+ if (task->prio >= pendowner->prio)
-+ return 0;
-+ } else if (task->prio > pendowner->prio)
-+ return 0;
-+ return 1;
-+}
-+
- /*
- * Try to take an rt-mutex
- *
-@@ -750,8 +793,9 @@
- * @waiter: The waiter that is queued to the lock's wait list if the
- * callsite called task_blocked_on_lock(), otherwise NULL
- */
--static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
-- struct rt_mutex_waiter *waiter)
-+static int __try_to_take_rt_mutex(struct rt_mutex *lock,
-+ struct task_struct *task,
-+ struct rt_mutex_waiter *waiter, int mode)
- {
- unsigned long flags;
-
-@@ -790,8 +834,10 @@
- * If waiter is not the highest priority waiter of
- * @lock, give up.
- */
-- if (waiter != rt_mutex_top_waiter(lock))
-+ if (waiter != rt_mutex_top_waiter(lock)) {
-+ /* XXX lock_is_stealable() ? */
- return 0;
-+ }
-
- /*
- * We can acquire the lock. Remove the waiter from the
-@@ -809,14 +855,10 @@
- * not need to be dequeued.
- */
- if (rt_mutex_has_waiters(lock)) {
-- /*
-- * If @task->prio is greater than or equal to
-- * the top waiter priority (kernel view),
-- * @task lost.
-- */
-- if (task->prio >= rt_mutex_top_waiter(lock)->prio)
-- return 0;
-+ struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
-
-+ if (task != pown && !lock_is_stealable(task, pown, mode))
-+ return 0;
- /*
- * The current top waiter stays enqueued. We
- * don't have to change anything in the lock
-@@ -865,6 +907,347 @@
- return 1;
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * preemptible spin_lock functions:
-+ */
-+static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
-+ void (*slowfn)(struct rt_mutex *lock))
-+{
-+ might_sleep_no_state_check();
-+
-+ if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
-+ rt_mutex_deadlock_account_lock(lock, current);
-+ else
-+ slowfn(lock);
-+}
-+
-+static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
-+ void (*slowfn)(struct rt_mutex *lock))
-+{
-+ if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
-+ rt_mutex_deadlock_account_unlock(current);
-+ else
-+ slowfn(lock);
-+}
-+#ifdef CONFIG_SMP
-+/*
-+ * Note that owner is a speculative pointer and dereferencing relies
-+ * on rcu_read_lock() and the check against the lock owner.
-+ */
-+static int adaptive_wait(struct rt_mutex *lock,
-+ struct task_struct *owner)
-+{
-+ int res = 0;
-+
-+ rcu_read_lock();
-+ for (;;) {
-+ if (owner != rt_mutex_owner(lock))
-+ break;
-+ /*
-+ * Ensure that owner->on_cpu is dereferenced _after_
-+ * checking the above to be valid.
-+ */
-+ barrier();
-+ if (!owner->on_cpu) {
-+ res = 1;
-+ break;
-+ }
-+ cpu_relax();
-+ }
-+ rcu_read_unlock();
-+ return res;
-+}
-+#else
-+static int adaptive_wait(struct rt_mutex *lock,
-+ struct task_struct *orig_owner)
-+{
-+ return 1;
-+}
-+#endif
-+
-+# define pi_lock(lock) raw_spin_lock_irq(lock)
-+# define pi_unlock(lock) raw_spin_unlock_irq(lock)
-+
-+static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
-+ struct rt_mutex_waiter *waiter,
-+ struct task_struct *task,
-+ enum rtmutex_chainwalk chwalk);
-+/*
-+ * Slow path lock function spin_lock style: this variant is very
-+ * careful not to miss any non-lock wakeups.
-+ *
-+ * We store the current state under p->pi_lock in p->saved_state and
-+ * the try_to_wake_up() code handles this accordingly.
-+ */
-+static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
-+{
-+ struct task_struct *lock_owner, *self = current;
-+ struct rt_mutex_waiter waiter, *top_waiter;
-+ int ret;
-+
-+ rt_mutex_init_waiter(&waiter, true);
-+
-+ raw_spin_lock(&lock->wait_lock);
-+
-+ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
-+ raw_spin_unlock(&lock->wait_lock);
-+ return;
-+ }
-+
-+ BUG_ON(rt_mutex_owner(lock) == self);
-+
-+ /*
-+ * We save whatever state the task is in and we'll restore it
-+ * after acquiring the lock taking real wakeups into account
-+ * as well. We are serialized via pi_lock against wakeups. See
-+ * try_to_wake_up().
-+ */
-+ pi_lock(&self->pi_lock);
-+ self->saved_state = self->state;
-+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
-+ pi_unlock(&self->pi_lock);
-+
-+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
-+ BUG_ON(ret);
-+
-+ for (;;) {
-+ /* Try to acquire the lock again. */
-+ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
-+ break;
-+
-+ top_waiter = rt_mutex_top_waiter(lock);
-+ lock_owner = rt_mutex_owner(lock);
-+
-+ raw_spin_unlock(&lock->wait_lock);
-+
-+ debug_rt_mutex_print_deadlock(&waiter);
-+
-+ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
-+ schedule_rt_mutex(lock);
-+
-+ raw_spin_lock(&lock->wait_lock);
-+
-+ pi_lock(&self->pi_lock);
-+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
-+ pi_unlock(&self->pi_lock);
-+ }
-+
-+ /*
-+ * Restore the task state to current->saved_state. We set it
-+ * to the original state above and the try_to_wake_up() code
-+ * has possibly updated it when a real (non-rtmutex) wakeup
-+ * happened while we were blocked. Clear saved_state so
-+ * try_to_wakeup() does not get confused.
-+ */
-+ pi_lock(&self->pi_lock);
-+ __set_current_state_no_track(self->saved_state);
-+ self->saved_state = TASK_RUNNING;
-+ pi_unlock(&self->pi_lock);
-+
-+ /*
-+ * try_to_take_rt_mutex() sets the waiter bit
-+ * unconditionally. We might have to fix that up:
-+ */
-+ fixup_rt_mutex_waiters(lock);
-+
-+ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
-+ BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
-+
-+ raw_spin_unlock(&lock->wait_lock);
-+
-+ debug_rt_mutex_free_waiter(&waiter);
-+}
-+
-+static void wakeup_next_waiter(struct rt_mutex *lock);
-+/*
-+ * Slow path to release a rt_mutex spin_lock style
-+ */
-+static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
-+{
-+ raw_spin_lock(&lock->wait_lock);
-+
-+ debug_rt_mutex_unlock(lock);
-+
-+ rt_mutex_deadlock_account_unlock(current);
-+
-+ if (!rt_mutex_has_waiters(lock)) {
-+ lock->owner = NULL;
-+ raw_spin_unlock(&lock->wait_lock);
-+ return;
-+ }
-+
-+ wakeup_next_waiter(lock);
-+
-+ raw_spin_unlock(&lock->wait_lock);
-+
-+ /* Undo pi boosting.when necessary */
-+ rt_mutex_adjust_prio(current);
-+}
-+
-+void __lockfunc rt_spin_lock(spinlock_t *lock)
-+{
-+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-+}
-+EXPORT_SYMBOL(rt_spin_lock);
-+
-+void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
-+{
-+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
-+}
-+EXPORT_SYMBOL(__rt_spin_lock);
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
-+{
-+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-+}
-+EXPORT_SYMBOL(rt_spin_lock_nested);
-+#endif
-+
-+void __lockfunc rt_spin_unlock(spinlock_t *lock)
-+{
-+ /* NOTE: we always pass in '1' for nested, for simplicity */
-+ spin_release(&lock->dep_map, 1, _RET_IP_);
-+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
-+}
-+EXPORT_SYMBOL(rt_spin_unlock);
-+
-+void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
-+{
-+ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
-+}
-+EXPORT_SYMBOL(__rt_spin_unlock);
-+
-+/*
-+ * Wait for the lock to get unlocked: instead of polling for an unlock
-+ * (like raw spinlocks do), we lock and unlock, to force the kernel to
-+ * schedule if there's contention:
-+ */
-+void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
-+{
-+ spin_lock(lock);
-+ spin_unlock(lock);
-+}
-+EXPORT_SYMBOL(rt_spin_unlock_wait);
-+
-+int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
-+{
-+ return rt_mutex_trylock(lock);
-+}
-+
-+int __lockfunc rt_spin_trylock(spinlock_t *lock)
-+{
-+ int ret = rt_mutex_trylock(&lock->lock);
-+
-+ if (ret)
-+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-+ return ret;
-+}
-+EXPORT_SYMBOL(rt_spin_trylock);
-+
-+int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
-+{
-+ int ret;
-+
-+ local_bh_disable();
-+ ret = rt_mutex_trylock(&lock->lock);
-+ if (ret) {
-+ migrate_disable();
-+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-+ } else
-+ local_bh_enable();
-+ return ret;
-+}
-+EXPORT_SYMBOL(rt_spin_trylock_bh);
-+
-+int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
-+{
-+ int ret;
-+
-+ *flags = 0;
-+ ret = rt_mutex_trylock(&lock->lock);
-+ if (ret) {
-+ migrate_disable();
-+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-+ }
-+ return ret;
-+}
-+EXPORT_SYMBOL(rt_spin_trylock_irqsave);
-+
-+int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
-+{
-+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
-+ if (atomic_add_unless(atomic, -1, 1))
-+ return 0;
-+ migrate_disable();
-+ rt_spin_lock(lock);
-+ if (atomic_dec_and_test(atomic))
-+ return 1;
-+ rt_spin_unlock(lock);
-+ migrate_enable();
-+ return 0;
-+}
-+EXPORT_SYMBOL(atomic_dec_and_spin_lock);
-+
-+ void
-+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
-+{
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ /*
-+ * Make sure we are not reinitializing a held lock:
-+ */
-+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
-+ lockdep_init_map(&lock->dep_map, name, key, 0);
-+#endif
-+}
-+EXPORT_SYMBOL(__rt_spin_lock_init);
-+
-+#endif /* PREEMPT_RT_FULL */
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ static inline int __sched
-+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
-+{
-+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
-+ struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
-+
-+ if (!hold_ctx)
-+ return 0;
-+
-+ if (unlikely(ctx == hold_ctx))
-+ return -EALREADY;
-+
-+ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
-+ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
-+#ifdef CONFIG_DEBUG_MUTEXES
-+ DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
-+ ctx->contending_lock = ww;
-+#endif
-+ return -EDEADLK;
-+ }
-+
-+ return 0;
-+}
-+#else
-+ static inline int __sched
-+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
-+{
-+ BUG();
-+ return 0;
-+}
-+
-+#endif
-+
-+static inline int
-+try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
-+ struct rt_mutex_waiter *waiter)
-+{
-+ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
-+}
-+
- /*
- * Task blocks on lock.
- *
-@@ -896,6 +1279,23 @@
- return -EDEADLK;
-
- raw_spin_lock_irqsave(&task->pi_lock, flags);
-+
-+ /*
-+ * In the case of futex requeue PI, this will be a proxy
-+ * lock. The task will wake unaware that it is enqueueed on
-+ * this lock. Avoid blocking on two locks and corrupting
-+ * pi_blocked_on via the PI_WAKEUP_INPROGRESS
-+ * flag. futex_wait_requeue_pi() sets this when it wakes up
-+ * before requeue (due to a signal or timeout). Do not enqueue
-+ * the task if PI_WAKEUP_INPROGRESS is set.
-+ */
-+ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
-+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-+ return -EAGAIN;
-+ }
-+
-+ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
-+
- __rt_mutex_adjust_prio(task);
- waiter->task = task;
- waiter->lock = lock;
-@@ -919,7 +1319,7 @@
- rt_mutex_enqueue_pi(owner, waiter);
-
- __rt_mutex_adjust_prio(owner);
-- if (owner->pi_blocked_on)
-+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
- chain_walk = 1;
- } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
- chain_walk = 1;
-@@ -957,8 +1357,9 @@
- /*
- * Wake up the next waiter on the lock.
- *
-- * Remove the top waiter from the current tasks pi waiter list and
-- * wake it up.
-+ * Remove the top waiter from the current tasks pi waiter list,
-+ * wake it up and return whether the current task needs to undo
-+ * a potential priority boosting.
- *
- * Called with lock->wait_lock held.
- */
-@@ -996,7 +1397,7 @@
- * long as we hold lock->wait_lock. The waiter task needs to
- * acquire it in order to dequeue the waiter.
- */
-- wake_up_process(waiter->task);
-+ rt_mutex_wake_waiter(waiter);
- }
-
- /*
-@@ -1010,7 +1411,7 @@
- {
- bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
- struct task_struct *owner = rt_mutex_owner(lock);
-- struct rt_mutex *next_lock;
-+ struct rt_mutex *next_lock = NULL;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&current->pi_lock, flags);
-@@ -1035,7 +1436,8 @@
- __rt_mutex_adjust_prio(owner);
-
- /* Store the lock on which owner is blocked or NULL */
-- next_lock = task_blocked_on_lock(owner);
-+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
-+ next_lock = task_blocked_on_lock(owner);
-
- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
-
-@@ -1071,17 +1473,17 @@
- raw_spin_lock_irqsave(&task->pi_lock, flags);
-
- waiter = task->pi_blocked_on;
-- if (!waiter || (waiter->prio == task->prio &&
-+ if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio &&
- !dl_prio(task->prio))) {
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- return;
- }
- next_lock = waiter->lock;
-- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-
- /* gets dropped in rt_mutex_adjust_prio_chain()! */
- get_task_struct(task);
-
-+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
- next_lock, NULL, task);
- }
-@@ -1099,7 +1501,8 @@
- static int __sched
- __rt_mutex_slowlock(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
-- struct rt_mutex_waiter *waiter)
-+ struct rt_mutex_waiter *waiter,
-+ struct ww_acquire_ctx *ww_ctx)
- {
- int ret = 0;
-
-@@ -1122,6 +1525,12 @@
- break;
- }
-
-+ if (ww_ctx && ww_ctx->acquired > 0) {
-+ ret = __mutex_lock_check_stamp(lock, ww_ctx);
-+ if (ret)
-+ break;
-+ }
-+
- raw_spin_unlock(&lock->wait_lock);
-
- debug_rt_mutex_print_deadlock(waiter);
-@@ -1156,25 +1565,102 @@
- }
- }
-
-+static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
-+ struct ww_acquire_ctx *ww_ctx)
-+{
-+#ifdef CONFIG_DEBUG_MUTEXES
-+ /*
-+ * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
-+ * but released with a normal mutex_unlock in this call.
-+ *
-+ * This should never happen, always use ww_mutex_unlock.
-+ */
-+ DEBUG_LOCKS_WARN_ON(ww->ctx);
-+
-+ /*
-+ * Not quite done after calling ww_acquire_done() ?
-+ */
-+ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
-+
-+ if (ww_ctx->contending_lock) {
-+ /*
-+ * After -EDEADLK you tried to
-+ * acquire a different ww_mutex? Bad!
-+ */
-+ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
-+
-+ /*
-+ * You called ww_mutex_lock after receiving -EDEADLK,
-+ * but 'forgot' to unlock everything else first?
-+ */
-+ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
-+ ww_ctx->contending_lock = NULL;
-+ }
-+
-+ /*
-+ * Naughty, using a different class will lead to undefined behavior!
-+ */
-+ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
-+#endif
-+ ww_ctx->acquired++;
-+}
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void ww_mutex_account_lock(struct rt_mutex *lock,
-+ struct ww_acquire_ctx *ww_ctx)
-+{
-+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
-+ struct rt_mutex_waiter *waiter, *n;
-+
-+ /*
-+ * This branch gets optimized out for the common case,
-+ * and is only important for ww_mutex_lock.
-+ */
-+ ww_mutex_lock_acquired(ww, ww_ctx);
-+ ww->ctx = ww_ctx;
-+
-+ /*
-+ * Give any possible sleeping processes the chance to wake up,
-+ * so they can recheck if they have to back off.
-+ */
-+ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
-+ tree_entry) {
-+ /* XXX debug rt mutex waiter wakeup */
-+
-+ BUG_ON(waiter->lock != lock);
-+ rt_mutex_wake_waiter(waiter);
-+ }
-+}
-+
-+#else
-+
-+static void ww_mutex_account_lock(struct rt_mutex *lock,
-+ struct ww_acquire_ctx *ww_ctx)
-+{
-+ BUG();
-+}
-+#endif
-+
- /*
- * Slow path lock function:
- */
- static int __sched
- rt_mutex_slowlock(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
-- enum rtmutex_chainwalk chwalk)
-+ enum rtmutex_chainwalk chwalk,
-+ struct ww_acquire_ctx *ww_ctx)
- {
- struct rt_mutex_waiter waiter;
- int ret = 0;
-
-- debug_rt_mutex_init_waiter(&waiter);
-- RB_CLEAR_NODE(&waiter.pi_tree_entry);
-- RB_CLEAR_NODE(&waiter.tree_entry);
-+ rt_mutex_init_waiter(&waiter, false);
-
- raw_spin_lock(&lock->wait_lock);
-
- /* Try to acquire the lock again: */
- if (try_to_take_rt_mutex(lock, current, NULL)) {
-+ if (ww_ctx)
-+ ww_mutex_account_lock(lock, ww_ctx);
- raw_spin_unlock(&lock->wait_lock);
- return 0;
- }
-@@ -1192,13 +1678,23 @@
-
- if (likely(!ret))
- /* sleep on the mutex */
-- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
-+ ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
-+ ww_ctx);
-+ else if (ww_ctx) {
-+ /* ww_mutex received EDEADLK, let it become EALREADY */
-+ ret = __mutex_lock_check_stamp(lock, ww_ctx);
-+ BUG_ON(!ret);
-+ }
-
- if (unlikely(ret)) {
- __set_current_state(TASK_RUNNING);
- if (rt_mutex_has_waiters(lock))
- remove_waiter(lock, &waiter);
-- rt_mutex_handle_deadlock(ret, chwalk, &waiter);
-+ /* ww_mutex want to report EDEADLK/EALREADY, let them */
-+ if (!ww_ctx)
-+ rt_mutex_handle_deadlock(ret, chwalk, &waiter);
-+ } else if (ww_ctx) {
-+ ww_mutex_account_lock(lock, ww_ctx);
- }
-
- /*
-@@ -1255,7 +1751,7 @@
- /*
- * Slow path to release a rt-mutex:
- */
--static void __sched
-+static bool __sched
- rt_mutex_slowunlock(struct rt_mutex *lock)
- {
- raw_spin_lock(&lock->wait_lock);
-@@ -1298,7 +1794,7 @@
- while (!rt_mutex_has_waiters(lock)) {
- /* Drops lock->wait_lock ! */
- if (unlock_rt_mutex_safe(lock) == true)
-- return;
-+ return false;
- /* Relock the rtmutex and try again */
- raw_spin_lock(&lock->wait_lock);
- }
-@@ -1311,8 +1807,7 @@
-
- raw_spin_unlock(&lock->wait_lock);
-
-- /* Undo pi boosting if necessary: */
-- rt_mutex_adjust_prio(current);
-+ return true;
- }
-
- /*
-@@ -1323,31 +1818,36 @@
- */
- static inline int
- rt_mutex_fastlock(struct rt_mutex *lock, int state,
-+ struct ww_acquire_ctx *ww_ctx,
- int (*slowfn)(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
-- enum rtmutex_chainwalk chwalk))
-+ enum rtmutex_chainwalk chwalk,
-+ struct ww_acquire_ctx *ww_ctx))
- {
- if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
- rt_mutex_deadlock_account_lock(lock, current);
- return 0;
- } else
-- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
-+ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK,
-+ ww_ctx);
- }
-
- static inline int
- rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
- enum rtmutex_chainwalk chwalk,
-+ struct ww_acquire_ctx *ww_ctx,
- int (*slowfn)(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
-- enum rtmutex_chainwalk chwalk))
-+ enum rtmutex_chainwalk chwalk,
-+ struct ww_acquire_ctx *ww_ctx))
- {
- if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
- likely(rt_mutex_cmpxchg(lock, NULL, current))) {
- rt_mutex_deadlock_account_lock(lock, current);
- return 0;
- } else
-- return slowfn(lock, state, timeout, chwalk);
-+ return slowfn(lock, state, timeout, chwalk, ww_ctx);
- }
-
- static inline int
-@@ -1363,12 +1863,14 @@
-
- static inline void
- rt_mutex_fastunlock(struct rt_mutex *lock,
-- void (*slowfn)(struct rt_mutex *lock))
-+ bool (*slowfn)(struct rt_mutex *lock))
- {
-- if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
-+ if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
- rt_mutex_deadlock_account_unlock(current);
-- else
-- slowfn(lock);
-+ } else if (slowfn(lock)) {
-+ /* Undo pi boosting if necessary: */
-+ rt_mutex_adjust_prio(current);
-+ }
- }
-
- /**
-@@ -1380,7 +1882,7 @@
- {
- might_sleep();
-
-- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
-+ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock);
- }
- EXPORT_SYMBOL_GPL(rt_mutex_lock);
-
-@@ -1397,7 +1899,7 @@
- {
- might_sleep();
-
-- return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
-+ return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock);
- }
- EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-
-@@ -1410,11 +1912,30 @@
- might_sleep();
-
- return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
-- RT_MUTEX_FULL_CHAINWALK,
-+ RT_MUTEX_FULL_CHAINWALK, NULL,
- rt_mutex_slowlock);
- }
-
- /**
-+ * rt_mutex_lock_killable - lock a rt_mutex killable
-+ *
-+ * @lock: the rt_mutex to be locked
-+ * @detect_deadlock: deadlock detection on/off
-+ *
-+ * Returns:
-+ * 0 on success
-+ * -EINTR when interrupted by a signal
-+ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
-+ */
-+int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
-+{
-+ might_sleep();
-+
-+ return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock);
-+}
-+EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-+
-+/**
- * rt_mutex_timed_lock - lock a rt_mutex interruptible
- * the timeout structure is provided
- * by the caller
-@@ -1434,6 +1955,7 @@
-
- return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
- RT_MUTEX_MIN_CHAINWALK,
-+ NULL,
- rt_mutex_slowlock);
- }
- EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -1463,6 +1985,22 @@
- EXPORT_SYMBOL_GPL(rt_mutex_unlock);
-
- /**
-+ * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
-+ * @lock: the rt_mutex to be unlocked
-+ *
-+ * Returns: true/false indicating whether priority adjustment is
-+ * required or not.
-+ */
-+bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
-+{
-+ if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
-+ rt_mutex_deadlock_account_unlock(current);
-+ return false;
-+ }
-+ return rt_mutex_slowunlock(lock);
-+}
-+
-+/**
- * rt_mutex_destroy - mark a mutex unusable
- * @lock: the mutex to be destroyed
- *
-@@ -1492,13 +2030,12 @@
- void __rt_mutex_init(struct rt_mutex *lock, const char *name)
- {
- lock->owner = NULL;
-- raw_spin_lock_init(&lock->wait_lock);
- lock->waiters = RB_ROOT;
- lock->waiters_leftmost = NULL;
-
- debug_rt_mutex_init(lock, name);
- }
--EXPORT_SYMBOL_GPL(__rt_mutex_init);
-+EXPORT_SYMBOL(__rt_mutex_init);
-
- /**
- * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1513,7 +2050,7 @@
- void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
- struct task_struct *proxy_owner)
- {
-- __rt_mutex_init(lock, NULL);
-+ rt_mutex_init(lock);
- debug_rt_mutex_proxy_lock(lock, proxy_owner);
- rt_mutex_set_owner(lock, proxy_owner);
- rt_mutex_deadlock_account_lock(lock, proxy_owner);
-@@ -1561,6 +2098,35 @@
- return 1;
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ /*
-+ * In PREEMPT_RT there's an added race.
-+ * If the task, that we are about to requeue, times out,
-+ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
-+ * to skip this task. But right after the task sets
-+ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
-+ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
-+ * This will replace the PI_WAKEUP_INPROGRESS with the actual
-+ * lock that it blocks on. We *must not* place this task
-+ * on this proxy lock in that case.
-+ *
-+ * To prevent this race, we first take the task's pi_lock
-+ * and check if it has updated its pi_blocked_on. If it has,
-+ * we assume that it woke up and we return -EAGAIN.
-+ * Otherwise, we set the task's pi_blocked_on to
-+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
-+ * it will know that we are in the process of requeuing it.
-+ */
-+ raw_spin_lock_irq(&task->pi_lock);
-+ if (task->pi_blocked_on) {
-+ raw_spin_unlock_irq(&task->pi_lock);
-+ raw_spin_unlock(&lock->wait_lock);
-+ return -EAGAIN;
-+ }
-+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
-+ raw_spin_unlock_irq(&task->pi_lock);
-+#endif
-+
- /* We enforce deadlock detection for futexes */
- ret = task_blocks_on_rt_mutex(lock, waiter, task,
- RT_MUTEX_FULL_CHAINWALK);
-@@ -1575,7 +2141,7 @@
- ret = 0;
- }
-
-- if (unlikely(ret))
-+ if (ret && rt_mutex_has_waiters(lock))
- remove_waiter(lock, waiter);
-
- raw_spin_unlock(&lock->wait_lock);
-@@ -1631,7 +2197,7 @@
- set_current_state(TASK_INTERRUPTIBLE);
-
- /* sleep on the mutex */
-- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
-+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
-
- if (unlikely(ret))
- remove_waiter(lock, waiter);
-@@ -1646,3 +2212,89 @@
-
- return ret;
- }
-+
-+static inline int
-+ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
-+{
-+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
-+ unsigned tmp;
-+
-+ if (ctx->deadlock_inject_countdown-- == 0) {
-+ tmp = ctx->deadlock_inject_interval;
-+ if (tmp > UINT_MAX/4)
-+ tmp = UINT_MAX;
-+ else
-+ tmp = tmp*2 + tmp + tmp/2;
-+
-+ ctx->deadlock_inject_interval = tmp;
-+ ctx->deadlock_inject_countdown = tmp;
-+ ctx->contending_lock = lock;
-+
-+ ww_mutex_unlock(lock);
-+
-+ return -EDEADLK;
-+ }
-+#endif
-+
-+ return 0;
-+}
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+int __sched
-+__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
-+{
-+ int ret;
-+
-+ might_sleep();
-+
-+ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
-+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
-+ if (ret)
-+ mutex_release(&lock->base.dep_map, 1, _RET_IP_);
-+ else if (!ret && ww_ctx->acquired > 1)
-+ return ww_mutex_deadlock_injection(lock, ww_ctx);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
-+
-+int __sched
-+__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
-+{
-+ int ret;
-+
-+ might_sleep();
-+
-+ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
-+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
-+ if (ret)
-+ mutex_release(&lock->base.dep_map, 1, _RET_IP_);
-+ else if (!ret && ww_ctx->acquired > 1)
-+ return ww_mutex_deadlock_injection(lock, ww_ctx);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(__ww_mutex_lock);
-+
-+void __sched ww_mutex_unlock(struct ww_mutex *lock)
-+{
-+ int nest = !!lock->ctx;
-+
-+ /*
-+ * The unlocking fastpath is the 0->1 transition from 'locked'
-+ * into 'unlocked' state:
-+ */
-+ if (nest) {
-+#ifdef CONFIG_DEBUG_MUTEXES
-+ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
-+#endif
-+ if (lock->ctx->acquired > 0)
-+ lock->ctx->acquired--;
-+ lock->ctx = NULL;
-+ }
-+
-+ mutex_release(&lock->base.dep_map, nest, _RET_IP_);
-+ rt_mutex_unlock(&lock->base.lock);
-+}
-+EXPORT_SYMBOL(ww_mutex_unlock);
-+#endif
-diff -Nur linux-4.1.13.orig/kernel/locking/rtmutex_common.h linux-4.1.13/kernel/locking/rtmutex_common.h
---- linux-4.1.13.orig/kernel/locking/rtmutex_common.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/locking/rtmutex_common.h 2015-11-29 09:23:09.637612322 +0100
-@@ -49,6 +49,7 @@
- struct rb_node pi_tree_entry;
- struct task_struct *task;
- struct rt_mutex *lock;
-+ bool savestate;
- #ifdef CONFIG_DEBUG_RT_MUTEXES
- unsigned long ip;
- struct pid *deadlock_task_pid;
-@@ -119,6 +120,9 @@
- /*
- * PI-futex support (proxy locking functions, etc.):
- */
-+#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
-+#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
-+
- extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
- extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
- struct task_struct *proxy_owner);
-@@ -132,10 +136,24 @@
- struct rt_mutex_waiter *waiter);
- extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
-
-+extern bool rt_mutex_futex_unlock(struct rt_mutex *lock);
-+
-+extern void rt_mutex_adjust_prio(struct task_struct *task);
-+
- #ifdef CONFIG_DEBUG_RT_MUTEXES
- # include "rtmutex-debug.h"
- #else
- # include "rtmutex.h"
- #endif
-
-+static inline void
-+rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
-+{
-+ debug_rt_mutex_init_waiter(waiter);
-+ waiter->task = NULL;
-+ waiter->savestate = savestate;
-+ RB_CLEAR_NODE(&waiter->pi_tree_entry);
-+ RB_CLEAR_NODE(&waiter->tree_entry);
-+}
-+
- #endif
-diff -Nur linux-4.1.13.orig/kernel/locking/spinlock.c linux-4.1.13/kernel/locking/spinlock.c
---- linux-4.1.13.orig/kernel/locking/spinlock.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/locking/spinlock.c 2015-11-29 09:23:09.637612322 +0100
-@@ -124,8 +124,11 @@
- * __[spin|read|write]_lock_bh()
- */
- BUILD_LOCK_OPS(spin, raw_spinlock);
-+
-+#ifndef CONFIG_PREEMPT_RT_FULL
- BUILD_LOCK_OPS(read, rwlock);
- BUILD_LOCK_OPS(write, rwlock);
-+#endif
-
- #endif
-
-@@ -209,6 +212,8 @@
- EXPORT_SYMBOL(_raw_spin_unlock_bh);
- #endif
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+
- #ifndef CONFIG_INLINE_READ_TRYLOCK
- int __lockfunc _raw_read_trylock(rwlock_t *lock)
- {
-@@ -353,6 +358,8 @@
- EXPORT_SYMBOL(_raw_write_unlock_bh);
- #endif
-
-+#endif /* !PREEMPT_RT_FULL */
-+
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
-
- void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
-diff -Nur linux-4.1.13.orig/kernel/locking/spinlock_debug.c linux-4.1.13/kernel/locking/spinlock_debug.c
---- linux-4.1.13.orig/kernel/locking/spinlock_debug.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/locking/spinlock_debug.c 2015-11-29 09:23:09.637612322 +0100
-@@ -31,6 +31,7 @@
-
- EXPORT_SYMBOL(__raw_spin_lock_init);
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void __rwlock_init(rwlock_t *lock, const char *name,
- struct lock_class_key *key)
- {
-@@ -48,6 +49,7 @@
- }
-
- EXPORT_SYMBOL(__rwlock_init);
-+#endif
-
- static void spin_dump(raw_spinlock_t *lock, const char *msg)
- {
-@@ -159,6 +161,7 @@
- arch_spin_unlock(&lock->raw_lock);
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- static void rwlock_bug(rwlock_t *lock, const char *msg)
- {
- if (!debug_locks_off())
-@@ -300,3 +303,5 @@
- debug_write_unlock(lock);
- arch_write_unlock(&lock->raw_lock);
- }
-+
-+#endif
-diff -Nur linux-4.1.13.orig/kernel/panic.c linux-4.1.13/kernel/panic.c
---- linux-4.1.13.orig/kernel/panic.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/panic.c 2015-11-29 09:23:09.637612322 +0100
-@@ -387,9 +387,11 @@
-
- static int init_oops_id(void)
- {
-+#ifndef CONFIG_PREEMPT_RT_FULL
- if (!oops_id)
- get_random_bytes(&oops_id, sizeof(oops_id));
- else
-+#endif
- oops_id++;
-
- return 0;
-diff -Nur linux-4.1.13.orig/kernel/power/hibernate.c linux-4.1.13/kernel/power/hibernate.c
---- linux-4.1.13.orig/kernel/power/hibernate.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/power/hibernate.c 2015-11-29 09:23:09.637612322 +0100
-@@ -285,6 +285,8 @@
-
- local_irq_disable();
-
-+ system_state = SYSTEM_SUSPEND;
-+
- error = syscore_suspend();
- if (error) {
- printk(KERN_ERR "PM: Some system devices failed to power down, "
-@@ -314,6 +316,7 @@
- syscore_resume();
-
- Enable_irqs:
-+ system_state = SYSTEM_RUNNING;
- local_irq_enable();
-
- Enable_cpus:
-@@ -437,6 +440,7 @@
- goto Enable_cpus;
-
- local_irq_disable();
-+ system_state = SYSTEM_SUSPEND;
-
- error = syscore_suspend();
- if (error)
-@@ -470,6 +474,7 @@
- syscore_resume();
-
- Enable_irqs:
-+ system_state = SYSTEM_RUNNING;
- local_irq_enable();
-
- Enable_cpus:
-@@ -555,6 +560,7 @@
- goto Platform_finish;
-
- local_irq_disable();
-+ system_state = SYSTEM_SUSPEND;
- syscore_suspend();
- if (pm_wakeup_pending()) {
- error = -EAGAIN;
-@@ -567,6 +573,7 @@
-
- Power_up:
- syscore_resume();
-+ system_state = SYSTEM_RUNNING;
- local_irq_enable();
- enable_nonboot_cpus();
-
-diff -Nur linux-4.1.13.orig/kernel/power/suspend.c linux-4.1.13/kernel/power/suspend.c
---- linux-4.1.13.orig/kernel/power/suspend.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/power/suspend.c 2015-11-29 09:23:09.637612322 +0100
-@@ -356,6 +356,8 @@
- arch_suspend_disable_irqs();
- BUG_ON(!irqs_disabled());
-
-+ system_state = SYSTEM_SUSPEND;
-+
- error = syscore_suspend();
- if (!error) {
- *wakeup = pm_wakeup_pending();
-@@ -370,6 +372,8 @@
- syscore_resume();
- }
-
-+ system_state = SYSTEM_RUNNING;
-+
- arch_suspend_enable_irqs();
- BUG_ON(irqs_disabled());
-
-diff -Nur linux-4.1.13.orig/kernel/printk/printk.c linux-4.1.13/kernel/printk/printk.c
---- linux-4.1.13.orig/kernel/printk/printk.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/printk/printk.c 2015-11-29 09:23:09.641612055 +0100
-@@ -1163,6 +1163,7 @@
- {
- char *text;
- int len = 0;
-+ int attempts = 0;
-
- text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
- if (!text)
-@@ -1174,7 +1175,14 @@
- u64 seq;
- u32 idx;
- enum log_flags prev;
--
-+ int num_msg;
-+try_again:
-+ attempts++;
-+ if (attempts > 10) {
-+ len = -EBUSY;
-+ goto out;
-+ }
-+ num_msg = 0;
- if (clear_seq < log_first_seq) {
- /* messages are gone, move to first available one */
- clear_seq = log_first_seq;
-@@ -1195,6 +1203,14 @@
- prev = msg->flags;
- idx = log_next(idx);
- seq++;
-+ num_msg++;
-+ if (num_msg > 5) {
-+ num_msg = 0;
-+ raw_spin_unlock_irq(&logbuf_lock);
-+ raw_spin_lock_irq(&logbuf_lock);
-+ if (clear_seq < log_first_seq)
-+ goto try_again;
-+ }
- }
-
- /* move first record forward until length fits into the buffer */
-@@ -1208,6 +1224,14 @@
- prev = msg->flags;
- idx = log_next(idx);
- seq++;
-+ num_msg++;
-+ if (num_msg > 5) {
-+ num_msg = 0;
-+ raw_spin_unlock_irq(&logbuf_lock);
-+ raw_spin_lock_irq(&logbuf_lock);
-+ if (clear_seq < log_first_seq)
-+ goto try_again;
-+ }
- }
-
- /* last message fitting into this dump */
-@@ -1248,6 +1272,7 @@
- clear_seq = log_next_seq;
- clear_idx = log_next_idx;
- }
-+out:
- raw_spin_unlock_irq(&logbuf_lock);
-
- kfree(text);
-@@ -1401,6 +1426,7 @@
- if (!console_drivers)
- return;
-
-+ migrate_disable();
- for_each_console(con) {
- if (exclusive_console && con != exclusive_console)
- continue;
-@@ -1413,6 +1439,7 @@
- continue;
- con->write(con, text, len);
- }
-+ migrate_enable();
- }
-
- /*
-@@ -1473,6 +1500,15 @@
- static int console_trylock_for_printk(void)
- {
- unsigned int cpu = smp_processor_id();
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int lock = !early_boot_irqs_disabled && (preempt_count() == 0) &&
-+ !irqs_disabled();
-+#else
-+ int lock = 1;
-+#endif
-+
-+ if (!lock)
-+ return 0;
-
- if (!console_trylock())
- return 0;
-@@ -1607,6 +1643,62 @@
- return textlen;
- }
-
-+#ifdef CONFIG_EARLY_PRINTK
-+struct console *early_console;
-+
-+static void early_vprintk(const char *fmt, va_list ap)
-+{
-+ if (early_console) {
-+ char buf[512];
-+ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
-+
-+ early_console->write(early_console, buf, n);
-+ }
-+}
-+
-+asmlinkage void early_printk(const char *fmt, ...)
-+{
-+ va_list ap;
-+
-+ va_start(ap, fmt);
-+ early_vprintk(fmt, ap);
-+ va_end(ap);
-+}
-+
-+/*
-+ * This is independent of any log levels - a global
-+ * kill switch that turns off all of printk.
-+ *
-+ * Used by the NMI watchdog if early-printk is enabled.
-+ */
-+static bool __read_mostly printk_killswitch;
-+
-+static int __init force_early_printk_setup(char *str)
-+{
-+ printk_killswitch = true;
-+ return 0;
-+}
-+early_param("force_early_printk", force_early_printk_setup);
-+
-+void printk_kill(void)
-+{
-+ printk_killswitch = true;
-+}
-+
-+static int forced_early_printk(const char *fmt, va_list ap)
-+{
-+ if (!printk_killswitch)
-+ return 0;
-+ early_vprintk(fmt, ap);
-+ return 1;
-+}
-+#else
-+static inline int forced_early_printk(const char *fmt, va_list ap)
-+{
-+ return 0;
-+}
-+#endif
-+
- asmlinkage int vprintk_emit(int facility, int level,
- const char *dict, size_t dictlen,
- const char *fmt, va_list args)
-@@ -1623,6 +1715,13 @@
- /* cpu currently holding logbuf_lock in this function */
- static unsigned int logbuf_cpu = UINT_MAX;
-
-+ /*
-+ * Fall back to early_printk if a debugging subsystem has
-+ * killed printk output
-+ */
-+ if (unlikely(forced_early_printk(fmt, args)))
-+ return 1;
-+
- if (level == LOGLEVEL_SCHED) {
- level = LOGLEVEL_DEFAULT;
- in_sched = true;
-@@ -1764,8 +1863,7 @@
- * console_sem which would prevent anyone from printing to
- * console
- */
-- preempt_disable();
--
-+ migrate_disable();
- /*
- * Try to acquire and then immediately release the console
- * semaphore. The release will print out buffers and wake up
-@@ -1773,7 +1871,7 @@
- */
- if (console_trylock_for_printk())
- console_unlock();
-- preempt_enable();
-+ migrate_enable();
- lockdep_on();
- }
-
-@@ -1902,26 +2000,6 @@
-
- #endif /* CONFIG_PRINTK */
-
--#ifdef CONFIG_EARLY_PRINTK
--struct console *early_console;
--
--asmlinkage __visible void early_printk(const char *fmt, ...)
--{
-- va_list ap;
-- char buf[512];
-- int n;
--
-- if (!early_console)
-- return;
--
-- va_start(ap, fmt);
-- n = vscnprintf(buf, sizeof(buf), fmt, ap);
-- va_end(ap);
--
-- early_console->write(early_console, buf, n);
--}
--#endif
--
- static int __add_preferred_console(char *name, int idx, char *options,
- char *brl_options)
- {
-@@ -2143,11 +2221,16 @@
- goto out;
-
- len = cont_print_text(text, size);
-+#ifndef CONFIG_PREEMPT_RT_FULL
- raw_spin_unlock(&logbuf_lock);
- stop_critical_timings();
- call_console_drivers(cont.level, text, len);
- start_critical_timings();
- local_irq_restore(flags);
-+#else
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+ call_console_drivers(cont.level, text, len);
-+#endif
- return;
- out:
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-@@ -2235,12 +2318,17 @@
- console_idx = log_next(console_idx);
- console_seq++;
- console_prev = msg->flags;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+ call_console_drivers(level, text, len);
-+#else
- raw_spin_unlock(&logbuf_lock);
-
- stop_critical_timings(); /* don't trace print latency */
- call_console_drivers(level, text, len);
- start_critical_timings();
- local_irq_restore(flags);
-+#endif
- }
- console_locked = 0;
-
-diff -Nur linux-4.1.13.orig/kernel/ptrace.c linux-4.1.13/kernel/ptrace.c
---- linux-4.1.13.orig/kernel/ptrace.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/ptrace.c 2015-11-29 09:23:09.641612055 +0100
-@@ -129,7 +129,12 @@
-
- spin_lock_irq(&task->sighand->siglock);
- if (task_is_traced(task) && !__fatal_signal_pending(task)) {
-- task->state = __TASK_TRACED;
-+ raw_spin_lock_irq(&task->pi_lock);
-+ if (task->state & __TASK_TRACED)
-+ task->state = __TASK_TRACED;
-+ else
-+ task->saved_state = __TASK_TRACED;
-+ raw_spin_unlock_irq(&task->pi_lock);
- ret = true;
- }
- spin_unlock_irq(&task->sighand->siglock);
-diff -Nur linux-4.1.13.orig/kernel/rcu/tree.c linux-4.1.13/kernel/rcu/tree.c
---- linux-4.1.13.orig/kernel/rcu/tree.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/rcu/tree.c 2015-11-29 09:23:09.641612055 +0100
-@@ -56,6 +56,11 @@
- #include <linux/random.h>
- #include <linux/ftrace_event.h>
- #include <linux/suspend.h>
-+#include <linux/delay.h>
-+#include <linux/gfp.h>
-+#include <linux/oom.h>
-+#include <linux/smpboot.h>
-+#include "../time/tick-internal.h"
-
- #include "tree.h"
- #include "rcu.h"
-@@ -220,6 +225,19 @@
- }
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void rcu_preempt_qs(void);
-+
-+void rcu_bh_qs(void)
-+{
-+ unsigned long flags;
-+
-+ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
-+ local_irq_save(flags);
-+ rcu_preempt_qs();
-+ local_irq_restore(flags);
-+}
-+#else
- void rcu_bh_qs(void)
- {
- if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
-@@ -229,6 +247,7 @@
- __this_cpu_write(rcu_bh_data.passed_quiesce, 1);
- }
- }
-+#endif
-
- static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
-
-@@ -404,6 +423,7 @@
- }
- EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * Return the number of RCU BH batches completed thus far for debug & stats.
- */
-@@ -431,6 +451,13 @@
- }
- EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
-
-+#else
-+void rcu_force_quiescent_state(void)
-+{
-+}
-+EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
-+#endif
-+
- /*
- * Force a quiescent state for RCU-sched.
- */
-@@ -1545,7 +1572,7 @@
- !ACCESS_ONCE(rsp->gp_flags) ||
- !rsp->gp_kthread)
- return;
-- wake_up(&rsp->gp_wq);
-+ swait_wake(&rsp->gp_wq);
- }
-
- /*
-@@ -1986,7 +2013,7 @@
- ACCESS_ONCE(rsp->gpnum),
- TPS("reqwait"));
- rsp->gp_state = RCU_GP_WAIT_GPS;
-- wait_event_interruptible(rsp->gp_wq,
-+ swait_event_interruptible(rsp->gp_wq,
- ACCESS_ONCE(rsp->gp_flags) &
- RCU_GP_FLAG_INIT);
- /* Locking provides needed memory barrier. */
-@@ -2015,7 +2042,7 @@
- ACCESS_ONCE(rsp->gpnum),
- TPS("fqswait"));
- rsp->gp_state = RCU_GP_WAIT_FQS;
-- ret = wait_event_interruptible_timeout(rsp->gp_wq,
-+ ret = swait_event_interruptible_timeout(rsp->gp_wq,
- ((gf = ACCESS_ONCE(rsp->gp_flags)) &
- RCU_GP_FLAG_FQS) ||
- (!ACCESS_ONCE(rnp->qsmask) &&
-@@ -2860,18 +2887,17 @@
- /*
- * Do RCU core processing for the current CPU.
- */
--static void rcu_process_callbacks(struct softirq_action *unused)
-+static void rcu_process_callbacks(void)
- {
- struct rcu_state *rsp;
-
- if (cpu_is_offline(smp_processor_id()))
- return;
-- trace_rcu_utilization(TPS("Start RCU core"));
- for_each_rcu_flavor(rsp)
- __rcu_process_callbacks(rsp);
-- trace_rcu_utilization(TPS("End RCU core"));
- }
-
-+static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
- /*
- * Schedule RCU callback invocation. If the specified type of RCU
- * does not support RCU priority boosting, just do a direct call,
-@@ -2883,18 +2909,105 @@
- {
- if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
- return;
-- if (likely(!rsp->boost)) {
-- rcu_do_batch(rsp, rdp);
-+ rcu_do_batch(rsp, rdp);
-+}
-+
-+static void rcu_wake_cond(struct task_struct *t, int status)
-+{
-+ /*
-+ * If the thread is yielding, only wake it when this
-+ * is invoked from idle
-+ */
-+ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
-+ wake_up_process(t);
-+}
-+
-+/*
-+ * Wake up this CPU's rcuc kthread to do RCU core processing.
-+ */
-+static void invoke_rcu_core(void)
-+{
-+ unsigned long flags;
-+ struct task_struct *t;
-+
-+ if (!cpu_online(smp_processor_id()))
- return;
-+ local_irq_save(flags);
-+ __this_cpu_write(rcu_cpu_has_work, 1);
-+ t = __this_cpu_read(rcu_cpu_kthread_task);
-+ if (t != NULL && current != t)
-+ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
-+ local_irq_restore(flags);
-+}
-+
-+static void rcu_cpu_kthread_park(unsigned int cpu)
-+{
-+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
-+}
-+
-+static int rcu_cpu_kthread_should_run(unsigned int cpu)
-+{
-+ return __this_cpu_read(rcu_cpu_has_work);
-+}
-+
-+/*
-+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
-+ * RCU softirq used in flavors and configurations of RCU that do not
-+ * support RCU priority boosting.
-+ */
-+static void rcu_cpu_kthread(unsigned int cpu)
-+{
-+ unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
-+ char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
-+ int spincnt;
-+
-+ for (spincnt = 0; spincnt < 10; spincnt++) {
-+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
-+ local_bh_disable();
-+ *statusp = RCU_KTHREAD_RUNNING;
-+ this_cpu_inc(rcu_cpu_kthread_loops);
-+ local_irq_disable();
-+ work = *workp;
-+ *workp = 0;
-+ local_irq_enable();
-+ if (work)
-+ rcu_process_callbacks();
-+ local_bh_enable();
-+ if (*workp == 0) {
-+ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
-+ *statusp = RCU_KTHREAD_WAITING;
-+ return;
-+ }
- }
-- invoke_rcu_callbacks_kthread();
-+ *statusp = RCU_KTHREAD_YIELDING;
-+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
-+ schedule_timeout_interruptible(2);
-+ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
-+ *statusp = RCU_KTHREAD_WAITING;
- }
-
--static void invoke_rcu_core(void)
-+static struct smp_hotplug_thread rcu_cpu_thread_spec = {
-+ .store = &rcu_cpu_kthread_task,
-+ .thread_should_run = rcu_cpu_kthread_should_run,
-+ .thread_fn = rcu_cpu_kthread,
-+ .thread_comm = "rcuc/%u",
-+ .setup = rcu_cpu_kthread_setup,
-+ .park = rcu_cpu_kthread_park,
-+};
-+
-+/*
-+ * Spawn per-CPU RCU core processing kthreads.
-+ */
-+static int __init rcu_spawn_core_kthreads(void)
- {
-- if (cpu_online(smp_processor_id()))
-- raise_softirq(RCU_SOFTIRQ);
-+ int cpu;
-+
-+ for_each_possible_cpu(cpu)
-+ per_cpu(rcu_cpu_has_work, cpu) = 0;
-+ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
-+ return 0;
- }
-+early_initcall(rcu_spawn_core_kthreads);
-
- /*
- * Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -3040,6 +3153,7 @@
- }
- EXPORT_SYMBOL_GPL(call_rcu_sched);
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * Queue an RCU callback for invocation after a quicker grace period.
- */
-@@ -3048,6 +3162,7 @@
- __call_rcu(head, func, &rcu_bh_state, -1, 0);
- }
- EXPORT_SYMBOL_GPL(call_rcu_bh);
-+#endif
-
- /*
- * Queue an RCU callback for lazy invocation after a grace period.
-@@ -3139,6 +3254,7 @@
- }
- EXPORT_SYMBOL_GPL(synchronize_sched);
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /**
- * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
- *
-@@ -3165,6 +3281,7 @@
- wait_rcu_gp(call_rcu_bh);
- }
- EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
-+#endif
-
- /**
- * get_state_synchronize_rcu - Snapshot current RCU state
-@@ -3677,6 +3794,7 @@
- mutex_unlock(&rsp->barrier_mutex);
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /**
- * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
- */
-@@ -3685,6 +3803,7 @@
- _rcu_barrier(&rcu_bh_state);
- }
- EXPORT_SYMBOL_GPL(rcu_barrier_bh);
-+#endif
-
- /**
- * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
-@@ -4021,7 +4140,7 @@
- }
- }
-
-- init_waitqueue_head(&rsp->gp_wq);
-+ init_swait_head(&rsp->gp_wq);
- rnp = rsp->level[rcu_num_lvls - 1];
- for_each_possible_cpu(i) {
- while (i > rnp->grphi)
-@@ -4120,7 +4239,6 @@
- rcu_init_one(&rcu_bh_state, &rcu_bh_data);
- rcu_init_one(&rcu_sched_state, &rcu_sched_data);
- __rcu_init_preempt();
-- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
-
- /*
- * We don't need protection against CPU-hotplug here because
-diff -Nur linux-4.1.13.orig/kernel/rcu/tree.h linux-4.1.13/kernel/rcu/tree.h
---- linux-4.1.13.orig/kernel/rcu/tree.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/rcu/tree.h 2015-11-29 09:23:09.641612055 +0100
-@@ -27,6 +27,7 @@
- #include <linux/threads.h>
- #include <linux/cpumask.h>
- #include <linux/seqlock.h>
-+#include <linux/wait-simple.h>
-
- /*
- * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
-@@ -210,7 +211,7 @@
- /* This can happen due to race conditions. */
- #endif /* #ifdef CONFIG_RCU_BOOST */
- #ifdef CONFIG_RCU_NOCB_CPU
-- wait_queue_head_t nocb_gp_wq[2];
-+ struct swait_head nocb_gp_wq[2];
- /* Place for rcu_nocb_kthread() to wait GP. */
- #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
- int need_future_gp[2];
-@@ -349,7 +350,7 @@
- atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
- struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
- struct rcu_head **nocb_follower_tail;
-- wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
-+ struct swait_head nocb_wq; /* For nocb kthreads to sleep on. */
- struct task_struct *nocb_kthread;
- int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
-
-@@ -438,7 +439,7 @@
- unsigned long gpnum; /* Current gp number. */
- unsigned long completed; /* # of last completed gp. */
- struct task_struct *gp_kthread; /* Task for grace periods. */
-- wait_queue_head_t gp_wq; /* Where GP task waits. */
-+ struct swait_head gp_wq; /* Where GP task waits. */
- short gp_flags; /* Commands for GP task. */
- short gp_state; /* GP kthread sleep state. */
-
-@@ -529,12 +530,10 @@
- DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
- #endif /* #ifdef CONFIG_PREEMPT_RCU */
-
--#ifdef CONFIG_RCU_BOOST
- DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
- DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
- DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
- DECLARE_PER_CPU(char, rcu_cpu_has_work);
--#endif /* #ifdef CONFIG_RCU_BOOST */
-
- #ifndef RCU_TREE_NONCORE
-
-@@ -553,10 +552,9 @@
- static void __init __rcu_init_preempt(void);
- static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
- static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
--static void invoke_rcu_callbacks_kthread(void);
- static bool rcu_is_callbacks_kthread(void);
-+static void rcu_cpu_kthread_setup(unsigned int cpu);
- #ifdef CONFIG_RCU_BOOST
--static void rcu_preempt_do_callbacks(void);
- static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
- struct rcu_node *rnp);
- #endif /* #ifdef CONFIG_RCU_BOOST */
-diff -Nur linux-4.1.13.orig/kernel/rcu/tree_plugin.h linux-4.1.13/kernel/rcu/tree_plugin.h
---- linux-4.1.13.orig/kernel/rcu/tree_plugin.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/rcu/tree_plugin.h 2015-11-29 09:23:09.641612055 +0100
-@@ -24,27 +24,20 @@
- * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
- */
-
--#include <linux/delay.h>
--#include <linux/gfp.h>
--#include <linux/oom.h>
--#include <linux/smpboot.h>
--#include "../time/tick-internal.h"
--
- #ifdef CONFIG_RCU_BOOST
-
- #include "../locking/rtmutex_common.h"
-
-+#endif /* #ifdef CONFIG_RCU_BOOST */
-+
- /*
- * Control variables for per-CPU and per-rcu_node kthreads. These
- * handle all flavors of RCU.
- */
--static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
- DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
- DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
- DEFINE_PER_CPU(char, rcu_cpu_has_work);
-
--#endif /* #ifdef CONFIG_RCU_BOOST */
--
- #ifdef CONFIG_RCU_NOCB_CPU
- static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
- static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
-@@ -291,7 +284,7 @@
- }
-
- /* Hardware IRQ handlers cannot block, complain if they get here. */
-- if (in_irq() || in_serving_softirq()) {
-+ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
- lockdep_rcu_suspicious(__FILE__, __LINE__,
- "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
- pr_alert("->rcu_read_unlock_special: %#x (b: %d, nq: %d)\n",
-@@ -496,15 +489,6 @@
- t->rcu_read_unlock_special.b.need_qs = true;
- }
-
--#ifdef CONFIG_RCU_BOOST
--
--static void rcu_preempt_do_callbacks(void)
--{
-- rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
--}
--
--#endif /* #ifdef CONFIG_RCU_BOOST */
--
- /*
- * Queue a preemptible-RCU callback for invocation after a grace period.
- */
-@@ -939,6 +923,19 @@
-
- #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
-+/*
-+ * If boosting, set rcuc kthreads to realtime priority.
-+ */
-+static void rcu_cpu_kthread_setup(unsigned int cpu)
-+{
-+#ifdef CONFIG_RCU_BOOST
-+ struct sched_param sp;
-+
-+ sp.sched_priority = kthread_prio;
-+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
-+#endif /* #ifdef CONFIG_RCU_BOOST */
-+}
-+
- #ifdef CONFIG_RCU_BOOST
-
- #include "../locking/rtmutex_common.h"
-@@ -970,16 +967,6 @@
-
- #endif /* #else #ifdef CONFIG_RCU_TRACE */
-
--static void rcu_wake_cond(struct task_struct *t, int status)
--{
-- /*
-- * If the thread is yielding, only wake it when this
-- * is invoked from idle
-- */
-- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
-- wake_up_process(t);
--}
--
- /*
- * Carry out RCU priority boosting on the task indicated by ->exp_tasks
- * or ->boost_tasks, advancing the pointer to the next task in the
-@@ -1125,23 +1112,6 @@
- }
-
- /*
-- * Wake up the per-CPU kthread to invoke RCU callbacks.
-- */
--static void invoke_rcu_callbacks_kthread(void)
--{
-- unsigned long flags;
--
-- local_irq_save(flags);
-- __this_cpu_write(rcu_cpu_has_work, 1);
-- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
-- current != __this_cpu_read(rcu_cpu_kthread_task)) {
-- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
-- __this_cpu_read(rcu_cpu_kthread_status));
-- }
-- local_irq_restore(flags);
--}
--
--/*
- * Is the current CPU running the RCU-callbacks kthread?
- * Caller must have preemption disabled.
- */
-@@ -1196,67 +1166,6 @@
- return 0;
- }
-
--static void rcu_kthread_do_work(void)
--{
-- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
-- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
-- rcu_preempt_do_callbacks();
--}
--
--static void rcu_cpu_kthread_setup(unsigned int cpu)
--{
-- struct sched_param sp;
--
-- sp.sched_priority = kthread_prio;
-- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
--}
--
--static void rcu_cpu_kthread_park(unsigned int cpu)
--{
-- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
--}
--
--static int rcu_cpu_kthread_should_run(unsigned int cpu)
--{
-- return __this_cpu_read(rcu_cpu_has_work);
--}
--
--/*
-- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
-- * RCU softirq used in flavors and configurations of RCU that do not
-- * support RCU priority boosting.
-- */
--static void rcu_cpu_kthread(unsigned int cpu)
--{
-- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
-- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
-- int spincnt;
--
-- for (spincnt = 0; spincnt < 10; spincnt++) {
-- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
-- local_bh_disable();
-- *statusp = RCU_KTHREAD_RUNNING;
-- this_cpu_inc(rcu_cpu_kthread_loops);
-- local_irq_disable();
-- work = *workp;
-- *workp = 0;
-- local_irq_enable();
-- if (work)
-- rcu_kthread_do_work();
-- local_bh_enable();
-- if (*workp == 0) {
-- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
-- *statusp = RCU_KTHREAD_WAITING;
-- return;
-- }
-- }
-- *statusp = RCU_KTHREAD_YIELDING;
-- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
-- schedule_timeout_interruptible(2);
-- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
-- *statusp = RCU_KTHREAD_WAITING;
--}
--
- /*
- * Set the per-rcu_node kthread's affinity to cover all CPUs that are
- * served by the rcu_node in question. The CPU hotplug lock is still
-@@ -1286,26 +1195,12 @@
- free_cpumask_var(cm);
- }
-
--static struct smp_hotplug_thread rcu_cpu_thread_spec = {
-- .store = &rcu_cpu_kthread_task,
-- .thread_should_run = rcu_cpu_kthread_should_run,
-- .thread_fn = rcu_cpu_kthread,
-- .thread_comm = "rcuc/%u",
-- .setup = rcu_cpu_kthread_setup,
-- .park = rcu_cpu_kthread_park,
--};
--
- /*
- * Spawn boost kthreads -- called as soon as the scheduler is running.
- */
- static void __init rcu_spawn_boost_kthreads(void)
- {
- struct rcu_node *rnp;
-- int cpu;
--
-- for_each_possible_cpu(cpu)
-- per_cpu(rcu_cpu_has_work, cpu) = 0;
-- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
- rcu_for_each_leaf_node(rcu_state_p, rnp)
- (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
- }
-@@ -1328,11 +1223,6 @@
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
-
--static void invoke_rcu_callbacks_kthread(void)
--{
-- WARN_ON_ONCE(1);
--}
--
- static bool rcu_is_callbacks_kthread(void)
- {
- return false;
-@@ -1356,7 +1246,7 @@
-
- #endif /* #else #ifdef CONFIG_RCU_BOOST */
-
--#if !defined(CONFIG_RCU_FAST_NO_HZ)
-+#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
-
- /*
- * Check to see if any future RCU-related work will need to be done
-@@ -1374,7 +1264,9 @@
- return rcu_cpu_has_callbacks(NULL);
- }
- #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
-+#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
-
-+#if !defined(CONFIG_RCU_FAST_NO_HZ)
- /*
- * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
- * after it.
-@@ -1472,6 +1364,8 @@
- return cbs_ready;
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+
- /*
- * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
- * to invoke. If the CPU has callbacks, try to advance them. Tell the
-@@ -1512,7 +1406,7 @@
- return 0;
- }
- #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
--
-+#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
- /*
- * Prepare a CPU for idle from an RCU perspective. The first major task
- * is to sense whether nohz mode has been enabled or disabled via sysfs.
-@@ -1859,7 +1753,7 @@
- */
- static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
- {
-- wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
-+ swait_wake_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
- }
-
- /*
-@@ -1877,8 +1771,8 @@
-
- static void rcu_init_one_nocb(struct rcu_node *rnp)
- {
-- init_waitqueue_head(&rnp->nocb_gp_wq[0]);
-- init_waitqueue_head(&rnp->nocb_gp_wq[1]);
-+ init_swait_head(&rnp->nocb_gp_wq[0]);
-+ init_swait_head(&rnp->nocb_gp_wq[1]);
- }
-
- #ifndef CONFIG_RCU_NOCB_CPU_ALL
-@@ -1903,7 +1797,7 @@
- if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
- /* Prior smp_mb__after_atomic() orders against prior enqueue. */
- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
-- wake_up(&rdp_leader->nocb_wq);
-+ swait_wake(&rdp_leader->nocb_wq);
- }
- }
-
-@@ -2116,7 +2010,7 @@
- */
- trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
- for (;;) {
-- wait_event_interruptible(
-+ swait_event_interruptible(
- rnp->nocb_gp_wq[c & 0x1],
- (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
- if (likely(d))
-@@ -2144,7 +2038,7 @@
- /* Wait for callbacks to appear. */
- if (!rcu_nocb_poll) {
- trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
-- wait_event_interruptible(my_rdp->nocb_wq,
-+ swait_event_interruptible(my_rdp->nocb_wq,
- !ACCESS_ONCE(my_rdp->nocb_leader_sleep));
- /* Memory barrier handled by smp_mb() calls below and repoll. */
- } else if (firsttime) {
-@@ -2219,7 +2113,7 @@
- * List was empty, wake up the follower.
- * Memory barriers supplied by atomic_long_add().
- */
-- wake_up(&rdp->nocb_wq);
-+ swait_wake(&rdp->nocb_wq);
- }
- }
-
-@@ -2240,7 +2134,7 @@
- if (!rcu_nocb_poll) {
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
- "FollowerSleep");
-- wait_event_interruptible(rdp->nocb_wq,
-+ swait_event_interruptible(rdp->nocb_wq,
- ACCESS_ONCE(rdp->nocb_follower_head));
- } else if (firsttime) {
- /* Don't drown trace log with "Poll"! */
-@@ -2399,7 +2293,7 @@
- static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
- {
- rdp->nocb_tail = &rdp->nocb_head;
-- init_waitqueue_head(&rdp->nocb_wq);
-+ init_swait_head(&rdp->nocb_wq);
- rdp->nocb_follower_tail = &rdp->nocb_follower_head;
- }
-
-diff -Nur linux-4.1.13.orig/kernel/rcu/update.c linux-4.1.13/kernel/rcu/update.c
---- linux-4.1.13.orig/kernel/rcu/update.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/rcu/update.c 2015-11-29 09:23:09.641612055 +0100
-@@ -227,6 +227,7 @@
- }
- EXPORT_SYMBOL_GPL(rcu_read_lock_held);
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /**
- * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
- *
-@@ -253,6 +254,7 @@
- return in_softirq() || irqs_disabled();
- }
- EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
-+#endif
-
- #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-
-diff -Nur linux-4.1.13.orig/kernel/relay.c linux-4.1.13/kernel/relay.c
---- linux-4.1.13.orig/kernel/relay.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/relay.c 2015-11-29 09:23:09.641612055 +0100
-@@ -339,6 +339,10 @@
- {
- struct rchan_buf *buf = (struct rchan_buf *)data;
- wake_up_interruptible(&buf->read_wait);
-+ /*
-+ * Stupid polling for now:
-+ */
-+ mod_timer(&buf->timer, jiffies + 1);
- }
-
- /**
-@@ -356,6 +360,7 @@
- init_waitqueue_head(&buf->read_wait);
- kref_init(&buf->kref);
- setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
-+ mod_timer(&buf->timer, jiffies + 1);
- } else
- del_timer_sync(&buf->timer);
-
-@@ -739,15 +744,6 @@
- else
- buf->early_bytes += buf->chan->subbuf_size -
- buf->padding[old_subbuf];
-- smp_mb();
-- if (waitqueue_active(&buf->read_wait))
-- /*
-- * Calling wake_up_interruptible() from here
-- * will deadlock if we happen to be logging
-- * from the scheduler (trying to re-grab
-- * rq->lock), so defer it.
-- */
-- mod_timer(&buf->timer, jiffies + 1);
- }
-
- old = buf->data;
-diff -Nur linux-4.1.13.orig/kernel/sched/completion.c linux-4.1.13/kernel/sched/completion.c
---- linux-4.1.13.orig/kernel/sched/completion.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/sched/completion.c 2015-11-29 09:23:09.641612055 +0100
-@@ -30,10 +30,10 @@
- {
- unsigned long flags;
-
-- spin_lock_irqsave(&x->wait.lock, flags);
-+ raw_spin_lock_irqsave(&x->wait.lock, flags);
- x->done++;
-- __wake_up_locked(&x->wait, TASK_NORMAL, 1);
-- spin_unlock_irqrestore(&x->wait.lock, flags);
-+ __swait_wake_locked(&x->wait, TASK_NORMAL, 1);
-+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
- }
- EXPORT_SYMBOL(complete);
-
-@@ -50,10 +50,10 @@
- {
- unsigned long flags;
-
-- spin_lock_irqsave(&x->wait.lock, flags);
-+ raw_spin_lock_irqsave(&x->wait.lock, flags);
- x->done += UINT_MAX/2;
-- __wake_up_locked(&x->wait, TASK_NORMAL, 0);
-- spin_unlock_irqrestore(&x->wait.lock, flags);
-+ __swait_wake_locked(&x->wait, TASK_NORMAL, 0);
-+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
- }
- EXPORT_SYMBOL(complete_all);
-
-@@ -62,20 +62,20 @@
- long (*action)(long), long timeout, int state)
- {
- if (!x->done) {
-- DECLARE_WAITQUEUE(wait, current);
-+ DEFINE_SWAITER(wait);
-
-- __add_wait_queue_tail_exclusive(&x->wait, &wait);
-+ swait_prepare_locked(&x->wait, &wait);
- do {
- if (signal_pending_state(state, current)) {
- timeout = -ERESTARTSYS;
- break;
- }
- __set_current_state(state);
-- spin_unlock_irq(&x->wait.lock);
-+ raw_spin_unlock_irq(&x->wait.lock);
- timeout = action(timeout);
-- spin_lock_irq(&x->wait.lock);
-+ raw_spin_lock_irq(&x->wait.lock);
- } while (!x->done && timeout);
-- __remove_wait_queue(&x->wait, &wait);
-+ swait_finish_locked(&x->wait, &wait);
- if (!x->done)
- return timeout;
- }
-@@ -89,9 +89,9 @@
- {
- might_sleep();
-
-- spin_lock_irq(&x->wait.lock);
-+ raw_spin_lock_irq(&x->wait.lock);
- timeout = do_wait_for_common(x, action, timeout, state);
-- spin_unlock_irq(&x->wait.lock);
-+ raw_spin_unlock_irq(&x->wait.lock);
- return timeout;
- }
-
-@@ -277,12 +277,12 @@
- if (!READ_ONCE(x->done))
- return 0;
-
-- spin_lock_irqsave(&x->wait.lock, flags);
-+ raw_spin_lock_irqsave(&x->wait.lock, flags);
- if (!x->done)
- ret = 0;
- else
- x->done--;
-- spin_unlock_irqrestore(&x->wait.lock, flags);
-+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
- return ret;
- }
- EXPORT_SYMBOL(try_wait_for_completion);
-@@ -311,7 +311,7 @@
- * after it's acquired the lock.
- */
- smp_rmb();
-- spin_unlock_wait(&x->wait.lock);
-+ raw_spin_unlock_wait(&x->wait.lock);
- return true;
- }
- EXPORT_SYMBOL(completion_done);
-diff -Nur linux-4.1.13.orig/kernel/sched/core.c linux-4.1.13/kernel/sched/core.c
---- linux-4.1.13.orig/kernel/sched/core.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/sched/core.c 2015-11-29 09:23:09.645611789 +0100
-@@ -282,7 +282,11 @@
- * Number of tasks to iterate in a single balance run.
- * Limited because this is done with IRQs disabled.
- */
-+#ifndef CONFIG_PREEMPT_RT_FULL
- const_debug unsigned int sysctl_sched_nr_migrate = 32;
-+#else
-+const_debug unsigned int sysctl_sched_nr_migrate = 8;
-+#endif
-
- /*
- * period over which we average the RT time consumption, measured
-@@ -461,6 +465,7 @@
-
- hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rq->hrtick_timer.function = hrtick;
-+ rq->hrtick_timer.irqsafe = 1;
- }
- #else /* CONFIG_SCHED_HRTICK */
- static inline void hrtick_clear(struct rq *rq)
-@@ -541,6 +546,52 @@
- #endif
- #endif
-
-+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+{
-+ struct wake_q_node *node = &task->wake_q;
-+
-+ /*
-+ * Atomically grab the task, if ->wake_q is !nil already it means
-+ * its already queued (either by us or someone else) and will get the
-+ * wakeup due to that.
-+ *
-+ * This cmpxchg() implies a full barrier, which pairs with the write
-+ * barrier implied by the wakeup in wake_up_list().
-+ */
-+ if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
-+ return;
-+
-+ get_task_struct(task);
-+
-+ /*
-+ * The head is context local, there can be no concurrency.
-+ */
-+ *head->lastp = node;
-+ head->lastp = &node->next;
-+}
-+
-+void wake_up_q(struct wake_q_head *head)
-+{
-+ struct wake_q_node *node = head->first;
-+
-+ while (node != WAKE_Q_TAIL) {
-+ struct task_struct *task;
-+
-+ task = container_of(node, struct task_struct, wake_q);
-+ BUG_ON(!task);
-+ /* task can safely be re-inserted now */
-+ node = node->next;
-+ task->wake_q.next = NULL;
-+
-+ /*
-+ * wake_up_process() implies a wmb() to pair with the queueing
-+ * in wake_q_add() so as not to miss wakeups.
-+ */
-+ wake_up_process(task);
-+ put_task_struct(task);
-+ }
-+}
-+
- /*
- * resched_curr - mark rq's current task 'to be rescheduled now'.
- *
-@@ -572,6 +623,38 @@
- trace_sched_wake_idle_without_ipi(cpu);
- }
-
-+#ifdef CONFIG_PREEMPT_LAZY
-+void resched_curr_lazy(struct rq *rq)
-+{
-+ struct task_struct *curr = rq->curr;
-+ int cpu;
-+
-+ if (!sched_feat(PREEMPT_LAZY)) {
-+ resched_curr(rq);
-+ return;
-+ }
-+
-+ lockdep_assert_held(&rq->lock);
-+
-+ if (test_tsk_need_resched(curr))
-+ return;
-+
-+ if (test_tsk_need_resched_lazy(curr))
-+ return;
-+
-+ set_tsk_need_resched_lazy(curr);
-+
-+ cpu = cpu_of(rq);
-+ if (cpu == smp_processor_id())
-+ return;
-+
-+ /* NEED_RESCHED_LAZY must be visible before we test polling */
-+ smp_mb();
-+ if (!tsk_is_polling(curr))
-+ smp_send_reschedule(cpu);
-+}
-+#endif
-+
- void resched_cpu(int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
-@@ -595,12 +678,14 @@
- */
- int get_nohz_timer_target(int pinned)
- {
-- int cpu = smp_processor_id();
-+ int cpu;
- int i;
- struct sched_domain *sd;
-
-+ preempt_disable_rt();
-+ cpu = smp_processor_id();
- if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
-- return cpu;
-+ goto preempt_en_rt;
-
- rcu_read_lock();
- for_each_domain(cpu, sd) {
-@@ -613,6 +698,8 @@
- }
- unlock:
- rcu_read_unlock();
-+preempt_en_rt:
-+ preempt_enable_rt();
- return cpu;
- }
- /*
-@@ -1164,6 +1251,18 @@
-
- static int migration_cpu_stop(void *data);
-
-+static bool check_task_state(struct task_struct *p, long match_state)
-+{
-+ bool match = false;
-+
-+ raw_spin_lock_irq(&p->pi_lock);
-+ if (p->state == match_state || p->saved_state == match_state)
-+ match = true;
-+ raw_spin_unlock_irq(&p->pi_lock);
-+
-+ return match;
-+}
-+
- /*
- * wait_task_inactive - wait for a thread to unschedule.
- *
-@@ -1208,7 +1307,7 @@
- * is actually now running somewhere else!
- */
- while (task_running(rq, p)) {
-- if (match_state && unlikely(p->state != match_state))
-+ if (match_state && !check_task_state(p, match_state))
- return 0;
- cpu_relax();
- }
-@@ -1223,7 +1322,8 @@
- running = task_running(rq, p);
- queued = task_on_rq_queued(p);
- ncsw = 0;
-- if (!match_state || p->state == match_state)
-+ if (!match_state || p->state == match_state ||
-+ p->saved_state == match_state)
- ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
- task_rq_unlock(rq, p, &flags);
-
-@@ -1449,10 +1549,6 @@
- {
- activate_task(rq, p, en_flags);
- p->on_rq = TASK_ON_RQ_QUEUED;
--
-- /* if a worker is waking up, notify workqueue */
-- if (p->flags & PF_WQ_WORKER)
-- wq_worker_waking_up(p, cpu_of(rq));
- }
-
- /*
-@@ -1462,9 +1558,9 @@
- ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
- {
- check_preempt_curr(rq, p, wake_flags);
-- trace_sched_wakeup(p, true);
--
- p->state = TASK_RUNNING;
-+ trace_sched_wakeup(p);
-+
- #ifdef CONFIG_SMP
- if (p->sched_class->task_woken)
- p->sched_class->task_woken(rq, p);
-@@ -1666,8 +1762,29 @@
- */
- smp_mb__before_spinlock();
- raw_spin_lock_irqsave(&p->pi_lock, flags);
-- if (!(p->state & state))
-+ if (!(p->state & state)) {
-+ /*
-+ * The task might be running due to a spinlock sleeper
-+ * wakeup. Check the saved state and set it to running
-+ * if the wakeup condition is true.
-+ */
-+ if (!(wake_flags & WF_LOCK_SLEEPER)) {
-+ if (p->saved_state & state) {
-+ p->saved_state = TASK_RUNNING;
-+ success = 1;
-+ }
-+ }
- goto out;
-+ }
-+
-+ /*
-+ * If this is a regular wakeup, then we can unconditionally
-+ * clear the saved state of a "lock sleeper".
-+ */
-+ if (!(wake_flags & WF_LOCK_SLEEPER))
-+ p->saved_state = TASK_RUNNING;
-+
-+ trace_sched_waking(p);
-
- success = 1; /* we're going to change ->state */
- cpu = task_cpu(p);
-@@ -1710,42 +1827,6 @@
- }
-
- /**
-- * try_to_wake_up_local - try to wake up a local task with rq lock held
-- * @p: the thread to be awakened
-- *
-- * Put @p on the run-queue if it's not already there. The caller must
-- * ensure that this_rq() is locked, @p is bound to this_rq() and not
-- * the current task.
-- */
--static void try_to_wake_up_local(struct task_struct *p)
--{
-- struct rq *rq = task_rq(p);
--
-- if (WARN_ON_ONCE(rq != this_rq()) ||
-- WARN_ON_ONCE(p == current))
-- return;
--
-- lockdep_assert_held(&rq->lock);
--
-- if (!raw_spin_trylock(&p->pi_lock)) {
-- raw_spin_unlock(&rq->lock);
-- raw_spin_lock(&p->pi_lock);
-- raw_spin_lock(&rq->lock);
-- }
--
-- if (!(p->state & TASK_NORMAL))
-- goto out;
--
-- if (!task_on_rq_queued(p))
-- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
--
-- ttwu_do_wakeup(rq, p, 0);
-- ttwu_stat(p, smp_processor_id(), 0);
--out:
-- raw_spin_unlock(&p->pi_lock);
--}
--
--/**
- * wake_up_process - Wake up a specific process
- * @p: The process to be woken up.
- *
-@@ -1759,11 +1840,23 @@
- */
- int wake_up_process(struct task_struct *p)
- {
-- WARN_ON(task_is_stopped_or_traced(p));
-+ WARN_ON(__task_is_stopped_or_traced(p));
- return try_to_wake_up(p, TASK_NORMAL, 0);
- }
- EXPORT_SYMBOL(wake_up_process);
-
-+/**
-+ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
-+ * @p: The process to be woken up.
-+ *
-+ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
-+ * the nature of the wakeup.
-+ */
-+int wake_up_lock_sleeper(struct task_struct *p)
-+{
-+ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
-+}
-+
- int wake_up_state(struct task_struct *p, unsigned int state)
- {
- return try_to_wake_up(p, state, 0);
-@@ -1959,6 +2052,9 @@
- p->on_cpu = 0;
- #endif
- init_task_preempt_count(p);
-+#ifdef CONFIG_HAVE_PREEMPT_LAZY
-+ task_thread_info(p)->preempt_lazy_count = 0;
-+#endif
- #ifdef CONFIG_SMP
- plist_node_init(&p->pushable_tasks, MAX_PRIO);
- RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -2094,7 +2190,7 @@
- rq = __task_rq_lock(p);
- activate_task(rq, p, 0);
- p->on_rq = TASK_ON_RQ_QUEUED;
-- trace_sched_wakeup_new(p, true);
-+ trace_sched_wakeup_new(p);
- check_preempt_curr(rq, p, WF_FORK);
- #ifdef CONFIG_SMP
- if (p->sched_class->task_woken)
-@@ -2231,8 +2327,12 @@
- finish_arch_post_lock_switch();
-
- fire_sched_in_preempt_notifiers(current);
-+ /*
-+ * We use mmdrop_delayed() here so we don't have to do the
-+ * full __mmdrop() when we are the last user.
-+ */
- if (mm)
-- mmdrop(mm);
-+ mmdrop_delayed(mm);
- if (unlikely(prev_state == TASK_DEAD)) {
- if (prev->sched_class->task_dead)
- prev->sched_class->task_dead(prev);
-@@ -2657,6 +2757,133 @@
- schedstat_inc(this_rq(), sched_count);
- }
-
-+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
-+#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */
-+#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
-+#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
-+
-+static inline void update_migrate_disable(struct task_struct *p)
-+{
-+ const struct cpumask *mask;
-+
-+ if (likely(!p->migrate_disable))
-+ return;
-+
-+ /* Did we already update affinity? */
-+ if (unlikely(migrate_disabled_updated(p)))
-+ return;
-+
-+ /*
-+ * Since this is always current we can get away with only locking
-+ * rq->lock, the ->cpus_allowed value can normally only be changed
-+ * while holding both p->pi_lock and rq->lock, but seeing that this
-+ * is current, we cannot actually be waking up, so all code that
-+ * relies on serialization against p->pi_lock is out of scope.
-+ *
-+ * Having rq->lock serializes us against things like
-+ * set_cpus_allowed_ptr() that can still happen concurrently.
-+ */
-+ mask = tsk_cpus_allowed(p);
-+
-+ if (p->sched_class->set_cpus_allowed)
-+ p->sched_class->set_cpus_allowed(p, mask);
-+ /* mask==cpumask_of(task_cpu(p)) which has a cpumask_weight==1 */
-+ p->nr_cpus_allowed = 1;
-+
-+ /* Let migrate_enable know to fix things back up */
-+ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
-+}
-+
-+void migrate_disable(void)
-+{
-+ struct task_struct *p = current;
-+
-+ if (in_atomic()) {
-+#ifdef CONFIG_SCHED_DEBUG
-+ p->migrate_disable_atomic++;
-+#endif
-+ return;
-+ }
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+ if (unlikely(p->migrate_disable_atomic)) {
-+ tracing_off();
-+ WARN_ON_ONCE(1);
-+ }
-+#endif
-+
-+ if (p->migrate_disable) {
-+ p->migrate_disable++;
-+ return;
-+ }
-+
-+ preempt_disable();
-+ preempt_lazy_disable();
-+ pin_current_cpu();
-+ p->migrate_disable = 1;
-+ preempt_enable();
-+}
-+EXPORT_SYMBOL(migrate_disable);
-+
-+void migrate_enable(void)
-+{
-+ struct task_struct *p = current;
-+ const struct cpumask *mask;
-+ unsigned long flags;
-+ struct rq *rq;
-+
-+ if (in_atomic()) {
-+#ifdef CONFIG_SCHED_DEBUG
-+ p->migrate_disable_atomic--;
-+#endif
-+ return;
-+ }
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+ if (unlikely(p->migrate_disable_atomic)) {
-+ tracing_off();
-+ WARN_ON_ONCE(1);
-+ }
-+#endif
-+ WARN_ON_ONCE(p->migrate_disable <= 0);
-+
-+ if (migrate_disable_count(p) > 1) {
-+ p->migrate_disable--;
-+ return;
-+ }
-+
-+ preempt_disable();
-+ if (unlikely(migrate_disabled_updated(p))) {
-+ /*
-+ * Undo whatever update_migrate_disable() did, also see there
-+ * about locking.
-+ */
-+ rq = this_rq();
-+ raw_spin_lock_irqsave(&rq->lock, flags);
-+
-+ /*
-+ * Clearing migrate_disable causes tsk_cpus_allowed to
-+ * show the tasks original cpu affinity.
-+ */
-+ p->migrate_disable = 0;
-+ mask = tsk_cpus_allowed(p);
-+ if (p->sched_class->set_cpus_allowed)
-+ p->sched_class->set_cpus_allowed(p, mask);
-+ p->nr_cpus_allowed = cpumask_weight(mask);
-+ raw_spin_unlock_irqrestore(&rq->lock, flags);
-+ } else
-+ p->migrate_disable = 0;
-+
-+ unpin_current_cpu();
-+ preempt_enable();
-+ preempt_lazy_enable();
-+}
-+EXPORT_SYMBOL(migrate_enable);
-+#else
-+static inline void update_migrate_disable(struct task_struct *p) { }
-+#define migrate_disabled_updated(p) 0
-+#endif
-+
- /*
- * Pick up the highest-prio task:
- */
-@@ -2763,6 +2990,8 @@
- smp_mb__before_spinlock();
- raw_spin_lock_irq(&rq->lock);
-
-+ update_migrate_disable(prev);
-+
- rq->clock_skip_update <<= 1; /* promote REQ to ACT */
-
- switch_count = &prev->nivcsw;
-@@ -2772,19 +3001,6 @@
- } else {
- deactivate_task(rq, prev, DEQUEUE_SLEEP);
- prev->on_rq = 0;
--
-- /*
-- * If a worker went to sleep, notify and ask workqueue
-- * whether it wants to wake up a task to maintain
-- * concurrency.
-- */
-- if (prev->flags & PF_WQ_WORKER) {
-- struct task_struct *to_wakeup;
--
-- to_wakeup = wq_worker_sleeping(prev, cpu);
-- if (to_wakeup)
-- try_to_wake_up_local(to_wakeup);
-- }
- }
- switch_count = &prev->nvcsw;
- }
-@@ -2794,6 +3010,7 @@
-
- next = pick_next_task(rq, prev);
- clear_tsk_need_resched(prev);
-+ clear_tsk_need_resched_lazy(prev);
- clear_preempt_need_resched();
- rq->clock_skip_update = 0;
-
-@@ -2814,8 +3031,19 @@
-
- static inline void sched_submit_work(struct task_struct *tsk)
- {
-- if (!tsk->state || tsk_is_pi_blocked(tsk))
-+ if (!tsk->state)
-+ return;
-+ /*
-+ * If a worker went to sleep, notify and ask workqueue whether
-+ * it wants to wake up a task to maintain concurrency.
-+ */
-+ if (tsk->flags & PF_WQ_WORKER)
-+ wq_worker_sleeping(tsk);
-+
-+
-+ if (tsk_is_pi_blocked(tsk))
- return;
-+
- /*
- * If we are going to sleep and we have plugged IO queued,
- * make sure to submit it to avoid deadlocks.
-@@ -2824,6 +3052,12 @@
- blk_schedule_flush_plug(tsk);
- }
-
-+static void sched_update_worker(struct task_struct *tsk)
-+{
-+ if (tsk->flags & PF_WQ_WORKER)
-+ wq_worker_running(tsk);
-+}
-+
- asmlinkage __visible void __sched schedule(void)
- {
- struct task_struct *tsk = current;
-@@ -2832,6 +3066,7 @@
- do {
- __schedule();
- } while (need_resched());
-+ sched_update_worker(tsk);
- }
- EXPORT_SYMBOL(schedule);
-
-@@ -2923,6 +3158,14 @@
- if (likely(!preemptible()))
- return;
-
-+#ifdef CONFIG_PREEMPT_LAZY
-+ /*
-+ * Check for lazy preemption
-+ */
-+ if (current_thread_info()->preempt_lazy_count &&
-+ !test_thread_flag(TIF_NEED_RESCHED))
-+ return;
-+#endif
- do {
- __preempt_count_add(PREEMPT_ACTIVE);
- /*
-@@ -2931,7 +3174,16 @@
- * an infinite recursion.
- */
- prev_ctx = exception_enter();
-+ /*
-+ * The add/subtract must not be traced by the function
-+ * tracer. But we still want to account for the
-+ * preempt off latency tracer. Since the _notrace versions
-+ * of add/subtract skip the accounting for latency tracer
-+ * we must force it manually.
-+ */
-+ start_critical_timings();
- __schedule();
-+ stop_critical_timings();
- exception_exit(prev_ctx);
-
- __preempt_count_sub(PREEMPT_ACTIVE);
-@@ -4268,6 +4520,7 @@
- }
- EXPORT_SYMBOL(__cond_resched_lock);
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- int __sched __cond_resched_softirq(void)
- {
- BUG_ON(!in_softirq());
-@@ -4281,6 +4534,7 @@
- return 0;
- }
- EXPORT_SYMBOL(__cond_resched_softirq);
-+#endif
-
- /**
- * yield - yield the current processor to other threads.
-@@ -4635,7 +4889,9 @@
-
- /* Set the preempt count _outside_ the spinlocks! */
- init_idle_preempt_count(idle, cpu);
--
-+#ifdef CONFIG_HAVE_PREEMPT_LAZY
-+ task_thread_info(idle)->preempt_lazy_count = 0;
-+#endif
- /*
- * The idle tasks have their own, simple scheduling class:
- */
-@@ -4755,11 +5011,91 @@
-
- void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
- {
-- if (p->sched_class->set_cpus_allowed)
-- p->sched_class->set_cpus_allowed(p, new_mask);
-+ if (!migrate_disabled_updated(p)) {
-+ if (p->sched_class->set_cpus_allowed)
-+ p->sched_class->set_cpus_allowed(p, new_mask);
-+ p->nr_cpus_allowed = cpumask_weight(new_mask);
-+ }
-
- cpumask_copy(&p->cpus_allowed, new_mask);
-- p->nr_cpus_allowed = cpumask_weight(new_mask);
-+}
-+
-+static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
-+static DEFINE_MUTEX(sched_down_mutex);
-+static cpumask_t sched_down_cpumask;
-+
-+void tell_sched_cpu_down_begin(int cpu)
-+{
-+ mutex_lock(&sched_down_mutex);
-+ cpumask_set_cpu(cpu, &sched_down_cpumask);
-+ mutex_unlock(&sched_down_mutex);
-+}
-+
-+void tell_sched_cpu_down_done(int cpu)
-+{
-+ mutex_lock(&sched_down_mutex);
-+ cpumask_clear_cpu(cpu, &sched_down_cpumask);
-+ mutex_unlock(&sched_down_mutex);
-+}
-+
-+/**
-+ * migrate_me - try to move the current task off this cpu
-+ *
-+ * Used by the pin_current_cpu() code to try to get tasks
-+ * to move off the current CPU as it is going down.
-+ * It will only move the task if the task isn't pinned to
-+ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY)
-+ * and the task has to be in a RUNNING state. Otherwise the
-+ * movement of the task will wake it up (change its state
-+ * to running) when the task did not expect it.
-+ *
-+ * Returns 1 if it succeeded in moving the current task
-+ * 0 otherwise.
-+ */
-+int migrate_me(void)
-+{
-+ struct task_struct *p = current;
-+ struct migration_arg arg;
-+ struct cpumask *cpumask;
-+ struct cpumask *mask;
-+ unsigned long flags;
-+ unsigned int dest_cpu;
-+ struct rq *rq;
-+
-+ /*
-+ * We can not migrate tasks bounded to a CPU or tasks not
-+ * running. The movement of the task will wake it up.
-+ */
-+ if (p->flags & PF_NO_SETAFFINITY || p->state)
-+ return 0;
-+
-+ mutex_lock(&sched_down_mutex);
-+ rq = task_rq_lock(p, &flags);
-+
-+ cpumask = this_cpu_ptr(&sched_cpumasks);
-+ mask = &p->cpus_allowed;
-+
-+ cpumask_andnot(cpumask, mask, &sched_down_cpumask);
-+
-+ if (!cpumask_weight(cpumask)) {
-+ /* It's only on this CPU? */
-+ task_rq_unlock(rq, p, &flags);
-+ mutex_unlock(&sched_down_mutex);
-+ return 0;
-+ }
-+
-+ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
-+
-+ arg.task = p;
-+ arg.dest_cpu = dest_cpu;
-+
-+ task_rq_unlock(rq, p, &flags);
-+
-+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
-+ tlb_migrate_finish(p->mm);
-+ mutex_unlock(&sched_down_mutex);
-+
-+ return 1;
- }
-
- /*
-@@ -4805,7 +5141,7 @@
- do_set_cpus_allowed(p, new_mask);
-
- /* Can the task run on the task's current CPU? If so, we're done */
-- if (cpumask_test_cpu(task_cpu(p), new_mask))
-+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
- goto out;
-
- dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -4945,6 +5281,8 @@
-
- #ifdef CONFIG_HOTPLUG_CPU
-
-+static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
-+
- /*
- * Ensures that the idle task is using init_mm right before its cpu goes
- * offline.
-@@ -4959,7 +5297,11 @@
- switch_mm(mm, &init_mm, current);
- finish_arch_post_lock_switch();
- }
-- mmdrop(mm);
-+ /*
-+ * Defer the cleanup to an alive cpu. On RT we can neither
-+ * call mmdrop() nor mmdrop_delayed() from here.
-+ */
-+ per_cpu(idle_last_mm, smp_processor_id()) = mm;
- }
-
- /*
-@@ -5302,6 +5644,10 @@
-
- case CPU_DEAD:
- calc_load_migrate(rq);
-+ if (per_cpu(idle_last_mm, cpu)) {
-+ mmdrop(per_cpu(idle_last_mm, cpu));
-+ per_cpu(idle_last_mm, cpu) = NULL;
-+ }
- break;
- #endif
- }
-@@ -7281,7 +7627,8 @@
- #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- static inline int preempt_count_equals(int preempt_offset)
- {
-- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
-+ int nested = (preempt_count() & ~PREEMPT_ACTIVE) +
-+ sched_rcu_preempt_depth();
-
- return (nested == preempt_offset);
- }
-diff -Nur linux-4.1.13.orig/kernel/sched/cputime.c linux-4.1.13/kernel/sched/cputime.c
---- linux-4.1.13.orig/kernel/sched/cputime.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/sched/cputime.c 2015-11-29 09:23:09.645611789 +0100
-@@ -675,37 +675,45 @@
-
- void vtime_account_system(struct task_struct *tsk)
- {
-- write_seqlock(&tsk->vtime_seqlock);
-+ raw_spin_lock(&tsk->vtime_lock);
-+ write_seqcount_begin(&tsk->vtime_seq);
- __vtime_account_system(tsk);
-- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seq);
-+ raw_spin_unlock(&tsk->vtime_lock);
- }
-
- void vtime_gen_account_irq_exit(struct task_struct *tsk)
- {
-- write_seqlock(&tsk->vtime_seqlock);
-+ raw_spin_lock(&tsk->vtime_lock);
-+ write_seqcount_begin(&tsk->vtime_seq);
- __vtime_account_system(tsk);
- if (context_tracking_in_user())
- tsk->vtime_snap_whence = VTIME_USER;
-- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seq);
-+ raw_spin_unlock(&tsk->vtime_lock);
- }
-
- void vtime_account_user(struct task_struct *tsk)
- {
- cputime_t delta_cpu;
-
-- write_seqlock(&tsk->vtime_seqlock);
-+ raw_spin_lock(&tsk->vtime_lock);
-+ write_seqcount_begin(&tsk->vtime_seq);
- delta_cpu = get_vtime_delta(tsk);
- tsk->vtime_snap_whence = VTIME_SYS;
- account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
-- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seq);
-+ raw_spin_unlock(&tsk->vtime_lock);
- }
-
- void vtime_user_enter(struct task_struct *tsk)
- {
-- write_seqlock(&tsk->vtime_seqlock);
-+ raw_spin_lock(&tsk->vtime_lock);
-+ write_seqcount_begin(&tsk->vtime_seq);
- __vtime_account_system(tsk);
- tsk->vtime_snap_whence = VTIME_USER;
-- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seq);
-+ raw_spin_unlock(&tsk->vtime_lock);
- }
-
- void vtime_guest_enter(struct task_struct *tsk)
-@@ -717,19 +725,23 @@
- * synchronization against the reader (task_gtime())
- * that can thus safely catch up with a tickless delta.
- */
-- write_seqlock(&tsk->vtime_seqlock);
-+ raw_spin_lock(&tsk->vtime_lock);
-+ write_seqcount_begin(&tsk->vtime_seq);
- __vtime_account_system(tsk);
- current->flags |= PF_VCPU;
-- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seq);
-+ raw_spin_unlock(&tsk->vtime_lock);
- }
- EXPORT_SYMBOL_GPL(vtime_guest_enter);
-
- void vtime_guest_exit(struct task_struct *tsk)
- {
-- write_seqlock(&tsk->vtime_seqlock);
-+ raw_spin_lock(&tsk->vtime_lock);
-+ write_seqcount_begin(&tsk->vtime_seq);
- __vtime_account_system(tsk);
- current->flags &= ~PF_VCPU;
-- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seq);
-+ raw_spin_unlock(&tsk->vtime_lock);
- }
- EXPORT_SYMBOL_GPL(vtime_guest_exit);
-
-@@ -742,24 +754,30 @@
-
- void arch_vtime_task_switch(struct task_struct *prev)
- {
-- write_seqlock(&prev->vtime_seqlock);
-+ raw_spin_lock(&prev->vtime_lock);
-+ write_seqcount_begin(&prev->vtime_seq);
- prev->vtime_snap_whence = VTIME_SLEEPING;
-- write_sequnlock(&prev->vtime_seqlock);
-+ write_seqcount_end(&prev->vtime_seq);
-+ raw_spin_unlock(&prev->vtime_lock);
-
-- write_seqlock(&current->vtime_seqlock);
-+ raw_spin_lock(&current->vtime_lock);
-+ write_seqcount_begin(&current->vtime_seq);
- current->vtime_snap_whence = VTIME_SYS;
- current->vtime_snap = sched_clock_cpu(smp_processor_id());
-- write_sequnlock(&current->vtime_seqlock);
-+ write_seqcount_end(&current->vtime_seq);
-+ raw_spin_unlock(&current->vtime_lock);
- }
-
- void vtime_init_idle(struct task_struct *t, int cpu)
- {
- unsigned long flags;
-
-- write_seqlock_irqsave(&t->vtime_seqlock, flags);
-+ raw_spin_lock_irqsave(&t->vtime_lock, flags);
-+ write_seqcount_begin(&t->vtime_seq);
- t->vtime_snap_whence = VTIME_SYS;
- t->vtime_snap = sched_clock_cpu(cpu);
-- write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
-+ write_seqcount_end(&t->vtime_seq);
-+ raw_spin_unlock_irqrestore(&t->vtime_lock, flags);
- }
-
- cputime_t task_gtime(struct task_struct *t)
-@@ -768,13 +786,13 @@
- cputime_t gtime;
-
- do {
-- seq = read_seqbegin(&t->vtime_seqlock);
-+ seq = read_seqcount_begin(&t->vtime_seq);
-
- gtime = t->gtime;
- if (t->flags & PF_VCPU)
- gtime += vtime_delta(t);
-
-- } while (read_seqretry(&t->vtime_seqlock, seq));
-+ } while (read_seqcount_retry(&t->vtime_seq, seq));
-
- return gtime;
- }
-@@ -797,7 +815,7 @@
- *udelta = 0;
- *sdelta = 0;
-
-- seq = read_seqbegin(&t->vtime_seqlock);
-+ seq = read_seqcount_begin(&t->vtime_seq);
-
- if (u_dst)
- *u_dst = *u_src;
-@@ -821,7 +839,7 @@
- if (t->vtime_snap_whence == VTIME_SYS)
- *sdelta = delta;
- }
-- } while (read_seqretry(&t->vtime_seqlock, seq));
-+ } while (read_seqcount_retry(&t->vtime_seq, seq));
- }
-
-
-diff -Nur linux-4.1.13.orig/kernel/sched/deadline.c linux-4.1.13/kernel/sched/deadline.c
---- linux-4.1.13.orig/kernel/sched/deadline.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/sched/deadline.c 2015-11-29 09:23:09.645611789 +0100
-@@ -637,6 +637,7 @@
-
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- timer->function = dl_task_timer;
-+ timer->irqsafe = 1;
- }
-
- static
-diff -Nur linux-4.1.13.orig/kernel/sched/debug.c linux-4.1.13/kernel/sched/debug.c
---- linux-4.1.13.orig/kernel/sched/debug.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/sched/debug.c 2015-11-29 09:23:09.645611789 +0100
-@@ -260,6 +260,9 @@
- P(rt_throttled);
- PN(rt_time);
- PN(rt_runtime);
-+#ifdef CONFIG_SMP
-+ P(rt_nr_migratory);
-+#endif
-
- #undef PN
- #undef P
-@@ -648,6 +651,10 @@
- #endif
- P(policy);
- P(prio);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ P(migrate_disable);
-+#endif
-+ P(nr_cpus_allowed);
- #undef PN
- #undef __PN
- #undef P
-diff -Nur linux-4.1.13.orig/kernel/sched/fair.c linux-4.1.13/kernel/sched/fair.c
---- linux-4.1.13.orig/kernel/sched/fair.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/sched/fair.c 2015-11-29 09:23:09.645611789 +0100
-@@ -3201,7 +3201,7 @@
- ideal_runtime = sched_slice(cfs_rq, curr);
- delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
- if (delta_exec > ideal_runtime) {
-- resched_curr(rq_of(cfs_rq));
-+ resched_curr_lazy(rq_of(cfs_rq));
- /*
- * The current task ran long enough, ensure it doesn't get
- * re-elected due to buddy favours.
-@@ -3225,7 +3225,7 @@
- return;
-
- if (delta > ideal_runtime)
-- resched_curr(rq_of(cfs_rq));
-+ resched_curr_lazy(rq_of(cfs_rq));
- }
-
- static void
-@@ -3366,7 +3366,7 @@
- * validating it and just reschedule.
- */
- if (queued) {
-- resched_curr(rq_of(cfs_rq));
-+ resched_curr_lazy(rq_of(cfs_rq));
- return;
- }
- /*
-@@ -3557,7 +3557,7 @@
- * hierarchy can be throttled
- */
- if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
-- resched_curr(rq_of(cfs_rq));
-+ resched_curr_lazy(rq_of(cfs_rq));
- }
-
- static __always_inline
-@@ -4180,7 +4180,7 @@
-
- if (delta < 0) {
- if (rq->curr == p)
-- resched_curr(rq);
-+ resched_curr_lazy(rq);
- return;
- }
- hrtick_start(rq, delta);
-@@ -5076,7 +5076,7 @@
- return;
-
- preempt:
-- resched_curr(rq);
-+ resched_curr_lazy(rq);
- /*
- * Only set the backward buddy when the current task is still
- * on the rq. This can happen when a wakeup gets interleaved
-@@ -7869,7 +7869,7 @@
- * 'current' within the tree based on its new key value.
- */
- swap(curr->vruntime, se->vruntime);
-- resched_curr(rq);
-+ resched_curr_lazy(rq);
- }
-
- se->vruntime -= cfs_rq->min_vruntime;
-@@ -7894,7 +7894,7 @@
- */
- if (rq->curr == p) {
- if (p->prio > oldprio)
-- resched_curr(rq);
-+ resched_curr_lazy(rq);
- } else
- check_preempt_curr(rq, p, 0);
- }
-diff -Nur linux-4.1.13.orig/kernel/sched/features.h linux-4.1.13/kernel/sched/features.h
---- linux-4.1.13.orig/kernel/sched/features.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/sched/features.h 2015-11-29 09:23:09.645611789 +0100
-@@ -50,11 +50,19 @@
- */
- SCHED_FEAT(NONTASK_CAPACITY, true)
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+SCHED_FEAT(TTWU_QUEUE, false)
-+# ifdef CONFIG_PREEMPT_LAZY
-+SCHED_FEAT(PREEMPT_LAZY, true)
-+# endif
-+#else
-+
- /*
- * Queue remote wakeups on the target CPU and process them
- * using the scheduler IPI. Reduces rq->lock contention/bounces.
- */
- SCHED_FEAT(TTWU_QUEUE, true)
-+#endif
-
- #ifdef HAVE_RT_PUSH_IPI
- /*
-diff -Nur linux-4.1.13.orig/kernel/sched/Makefile linux-4.1.13/kernel/sched/Makefile
---- linux-4.1.13.orig/kernel/sched/Makefile 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/sched/Makefile 2015-11-29 09:23:09.641612055 +0100
-@@ -13,7 +13,7 @@
-
- obj-y += core.o proc.o clock.o cputime.o
- obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
--obj-y += wait.o completion.o idle.o
-+obj-y += wait.o wait-simple.o work-simple.o completion.o idle.o
- obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
- obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
- obj-$(CONFIG_SCHEDSTATS) += stats.o
-diff -Nur linux-4.1.13.orig/kernel/sched/rt.c linux-4.1.13/kernel/sched/rt.c
---- linux-4.1.13.orig/kernel/sched/rt.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/sched/rt.c 2015-11-29 09:23:09.645611789 +0100
-@@ -44,6 +44,7 @@
-
- hrtimer_init(&rt_b->rt_period_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ rt_b->rt_period_timer.irqsafe = 1;
- rt_b->rt_period_timer.function = sched_rt_period_timer;
- }
-
-@@ -89,6 +90,7 @@
- rt_rq->push_cpu = nr_cpu_ids;
- raw_spin_lock_init(&rt_rq->push_lock);
- init_irq_work(&rt_rq->push_work, push_irq_work_func);
-+ rt_rq->push_work.flags |= IRQ_WORK_HARD_IRQ;
- #endif
- #endif /* CONFIG_SMP */
- /* We start is dequeued state, because no RT tasks are queued */
-diff -Nur linux-4.1.13.orig/kernel/sched/sched.h linux-4.1.13/kernel/sched/sched.h
---- linux-4.1.13.orig/kernel/sched/sched.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/sched/sched.h 2015-11-29 09:23:09.645611789 +0100
-@@ -1093,6 +1093,7 @@
- #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
- #define WF_FORK 0x02 /* child wakeup after fork */
- #define WF_MIGRATED 0x4 /* internal use, task got migrated */
-+#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
-
- /*
- * To aid in avoiding the subversion of "niceness" due to uneven distribution
-@@ -1290,6 +1291,15 @@
- extern void resched_curr(struct rq *rq);
- extern void resched_cpu(int cpu);
-
-+#ifdef CONFIG_PREEMPT_LAZY
-+extern void resched_curr_lazy(struct rq *rq);
-+#else
-+static inline void resched_curr_lazy(struct rq *rq)
-+{
-+ resched_curr(rq);
-+}
-+#endif
-+
- extern struct rt_bandwidth def_rt_bandwidth;
- extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
-
-diff -Nur linux-4.1.13.orig/kernel/sched/wait-simple.c linux-4.1.13/kernel/sched/wait-simple.c
---- linux-4.1.13.orig/kernel/sched/wait-simple.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/kernel/sched/wait-simple.c 2015-11-29 09:23:09.645611789 +0100
-@@ -0,0 +1,115 @@
-+/*
-+ * Simple waitqueues without fancy flags and callbacks
-+ *
-+ * (C) 2011 Thomas Gleixner <tglx@linutronix.de>
-+ *
-+ * Based on kernel/wait.c
-+ *
-+ * For licencing details see kernel-base/COPYING
-+ */
-+#include <linux/init.h>
-+#include <linux/export.h>
-+#include <linux/sched.h>
-+#include <linux/wait-simple.h>
-+
-+/* Adds w to head->list. Must be called with head->lock locked. */
-+static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
-+{
-+ list_add(&w->node, &head->list);
-+ /* We can't let the condition leak before the setting of head */
-+ smp_mb();
-+}
-+
-+/* Removes w from head->list. Must be called with head->lock locked. */
-+static inline void __swait_dequeue(struct swaiter *w)
-+{
-+ list_del_init(&w->node);
-+}
-+
-+void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
-+{
-+ raw_spin_lock_init(&head->lock);
-+ lockdep_set_class(&head->lock, key);
-+ INIT_LIST_HEAD(&head->list);
-+}
-+EXPORT_SYMBOL(__init_swait_head);
-+
-+void swait_prepare_locked(struct swait_head *head, struct swaiter *w)
-+{
-+ w->task = current;
-+ if (list_empty(&w->node))
-+ __swait_enqueue(head, w);
-+}
-+
-+void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
-+{
-+ unsigned long flags;
-+
-+ raw_spin_lock_irqsave(&head->lock, flags);
-+ swait_prepare_locked(head, w);
-+ __set_current_state(state);
-+ raw_spin_unlock_irqrestore(&head->lock, flags);
-+}
-+EXPORT_SYMBOL(swait_prepare);
-+
-+void swait_finish_locked(struct swait_head *head, struct swaiter *w)
-+{
-+ __set_current_state(TASK_RUNNING);
-+ if (w->task)
-+ __swait_dequeue(w);
-+}
-+
-+void swait_finish(struct swait_head *head, struct swaiter *w)
-+{
-+ unsigned long flags;
-+
-+ __set_current_state(TASK_RUNNING);
-+ if (w->task) {
-+ raw_spin_lock_irqsave(&head->lock, flags);
-+ __swait_dequeue(w);
-+ raw_spin_unlock_irqrestore(&head->lock, flags);
-+ }
-+}
-+EXPORT_SYMBOL(swait_finish);
-+
-+unsigned int
-+__swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num)
-+{
-+ struct swaiter *curr, *next;
-+ int woken = 0;
-+
-+ list_for_each_entry_safe(curr, next, &head->list, node) {
-+ if (wake_up_state(curr->task, state)) {
-+ __swait_dequeue(curr);
-+ /*
-+ * The waiting task can free the waiter as
-+ * soon as curr->task = NULL is written,
-+ * without taking any locks. A memory barrier
-+ * is required here to prevent the following
-+ * store to curr->task from getting ahead of
-+ * the dequeue operation.
-+ */
-+ smp_wmb();
-+ curr->task = NULL;
-+ if (++woken == num)
-+ break;
-+ }
-+ }
-+ return woken;
-+}
-+
-+unsigned int
-+__swait_wake(struct swait_head *head, unsigned int state, unsigned int num)
-+{
-+ unsigned long flags;
-+ int woken;
-+
-+ if (!swaitqueue_active(head))
-+ return 0;
-+
-+ raw_spin_lock_irqsave(&head->lock, flags);
-+ woken = __swait_wake_locked(head, state, num);
-+ raw_spin_unlock_irqrestore(&head->lock, flags);
-+ return woken;
-+}
-+EXPORT_SYMBOL(__swait_wake);
-diff -Nur linux-4.1.13.orig/kernel/sched/work-simple.c linux-4.1.13/kernel/sched/work-simple.c
---- linux-4.1.13.orig/kernel/sched/work-simple.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/kernel/sched/work-simple.c 2015-11-29 09:23:09.645611789 +0100
-@@ -0,0 +1,173 @@
-+/*
-+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
-+ *
-+ * Provides a framework for enqueuing callbacks from irq context
-+ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
-+ */
-+
-+#include <linux/wait-simple.h>
-+#include <linux/work-simple.h>
-+#include <linux/kthread.h>
-+#include <linux/slab.h>
-+#include <linux/spinlock.h>
-+#include <linux/export.h>
-+
-+#define SWORK_EVENT_PENDING (1 << 0)
-+
-+static DEFINE_MUTEX(worker_mutex);
-+static struct sworker *glob_worker;
-+
-+struct sworker {
-+ struct list_head events;
-+ struct swait_head wq;
-+
-+ raw_spinlock_t lock;
-+
-+ struct task_struct *task;
-+ int refs;
-+};
-+
-+static bool swork_readable(struct sworker *worker)
-+{
-+ bool r;
-+
-+ if (kthread_should_stop())
-+ return true;
-+
-+ raw_spin_lock_irq(&worker->lock);
-+ r = !list_empty(&worker->events);
-+ raw_spin_unlock_irq(&worker->lock);
-+
-+ return r;
-+}
-+
-+static int swork_kthread(void *arg)
-+{
-+ struct sworker *worker = arg;
-+
-+ for (;;) {
-+ swait_event_interruptible(worker->wq,
-+ swork_readable(worker));
-+ if (kthread_should_stop())
-+ break;
-+
-+ raw_spin_lock_irq(&worker->lock);
-+ while (!list_empty(&worker->events)) {
-+ struct swork_event *sev;
-+
-+ sev = list_first_entry(&worker->events,
-+ struct swork_event, item);
-+ list_del(&sev->item);
-+ raw_spin_unlock_irq(&worker->lock);
-+
-+ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
-+ &sev->flags));
-+ sev->func(sev);
-+ raw_spin_lock_irq(&worker->lock);
-+ }
-+ raw_spin_unlock_irq(&worker->lock);
-+ }
-+ return 0;
-+}
-+
-+static struct sworker *swork_create(void)
-+{
-+ struct sworker *worker;
-+
-+ worker = kzalloc(sizeof(*worker), GFP_KERNEL);
-+ if (!worker)
-+ return ERR_PTR(-ENOMEM);
-+
-+ INIT_LIST_HEAD(&worker->events);
-+ raw_spin_lock_init(&worker->lock);
-+ init_swait_head(&worker->wq);
-+
-+ worker->task = kthread_run(swork_kthread, worker, "kswork");
-+ if (IS_ERR(worker->task)) {
-+ kfree(worker);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ return worker;
-+}
-+
-+static void swork_destroy(struct sworker *worker)
-+{
-+ kthread_stop(worker->task);
-+
-+ WARN_ON(!list_empty(&worker->events));
-+ kfree(worker);
-+}
-+
-+/**
-+ * swork_queue - queue swork
-+ *
-+ * Returns %false if @work was already on a queue, %true otherwise.
-+ *
-+ * The work is queued and processed on a random CPU
-+ */
-+bool swork_queue(struct swork_event *sev)
-+{
-+ unsigned long flags;
-+
-+ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
-+ return false;
-+
-+ raw_spin_lock_irqsave(&glob_worker->lock, flags);
-+ list_add_tail(&sev->item, &glob_worker->events);
-+ raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
-+
-+ swait_wake(&glob_worker->wq);
-+ return true;
-+}
-+EXPORT_SYMBOL_GPL(swork_queue);
-+
-+/**
-+ * swork_get - get an instance of the sworker
-+ *
-+ * Returns an negative error code if the initialization if the worker did not
-+ * work, %0 otherwise.
-+ *
-+ */
-+int swork_get(void)
-+{
-+ struct sworker *worker;
-+
-+ mutex_lock(&worker_mutex);
-+ if (!glob_worker) {
-+ worker = swork_create();
-+ if (IS_ERR(worker)) {
-+ mutex_unlock(&worker_mutex);
-+ return -ENOMEM;
-+ }
-+
-+ glob_worker = worker;
-+ }
-+
-+ glob_worker->refs++;
-+ mutex_unlock(&worker_mutex);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(swork_get);
-+
-+/**
-+ * swork_put - puts an instance of the sworker
-+ *
-+ * Will destroy the sworker thread. This function must not be called until all
-+ * queued events have been completed.
-+ */
-+void swork_put(void)
-+{
-+ mutex_lock(&worker_mutex);
-+
-+ glob_worker->refs--;
-+ if (glob_worker->refs > 0)
-+ goto out;
-+
-+ swork_destroy(glob_worker);
-+ glob_worker = NULL;
-+out:
-+ mutex_unlock(&worker_mutex);
-+}
-+EXPORT_SYMBOL_GPL(swork_put);
-diff -Nur linux-4.1.13.orig/kernel/signal.c linux-4.1.13/kernel/signal.c
---- linux-4.1.13.orig/kernel/signal.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/signal.c 2015-11-29 09:23:09.649611524 +0100
-@@ -14,6 +14,7 @@
- #include <linux/export.h>
- #include <linux/init.h>
- #include <linux/sched.h>
-+#include <linux/sched/rt.h>
- #include <linux/fs.h>
- #include <linux/tty.h>
- #include <linux/binfmts.h>
-@@ -352,13 +353,45 @@
- return false;
- }
-
-+#ifdef __HAVE_ARCH_CMPXCHG
-+static inline struct sigqueue *get_task_cache(struct task_struct *t)
-+{
-+ struct sigqueue *q = t->sigqueue_cache;
-+
-+ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
-+ return NULL;
-+ return q;
-+}
-+
-+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
-+{
-+ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
-+ return 0;
-+ return 1;
-+}
-+
-+#else
-+
-+static inline struct sigqueue *get_task_cache(struct task_struct *t)
-+{
-+ return NULL;
-+}
-+
-+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
-+{
-+ return 1;
-+}
-+
-+#endif
-+
- /*
- * allocate a new signal queue record
- * - this may be called without locks if and only if t == current, otherwise an
- * appropriate lock must be held to stop the target task from exiting
- */
- static struct sigqueue *
--__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
-+__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
-+ int override_rlimit, int fromslab)
- {
- struct sigqueue *q = NULL;
- struct user_struct *user;
-@@ -375,7 +408,10 @@
- if (override_rlimit ||
- atomic_read(&user->sigpending) <=
- task_rlimit(t, RLIMIT_SIGPENDING)) {
-- q = kmem_cache_alloc(sigqueue_cachep, flags);
-+ if (!fromslab)
-+ q = get_task_cache(t);
-+ if (!q)
-+ q = kmem_cache_alloc(sigqueue_cachep, flags);
- } else {
- print_dropped_signal(sig);
- }
-@@ -392,6 +428,13 @@
- return q;
- }
-
-+static struct sigqueue *
-+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
-+ int override_rlimit)
-+{
-+ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
-+}
-+
- static void __sigqueue_free(struct sigqueue *q)
- {
- if (q->flags & SIGQUEUE_PREALLOC)
-@@ -401,6 +444,21 @@
- kmem_cache_free(sigqueue_cachep, q);
- }
-
-+static void sigqueue_free_current(struct sigqueue *q)
-+{
-+ struct user_struct *up;
-+
-+ if (q->flags & SIGQUEUE_PREALLOC)
-+ return;
-+
-+ up = q->user;
-+ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
-+ atomic_dec(&up->sigpending);
-+ free_uid(up);
-+ } else
-+ __sigqueue_free(q);
-+}
-+
- void flush_sigqueue(struct sigpending *queue)
- {
- struct sigqueue *q;
-@@ -414,6 +472,21 @@
- }
-
- /*
-+ * Called from __exit_signal. Flush tsk->pending and
-+ * tsk->sigqueue_cache
-+ */
-+void flush_task_sigqueue(struct task_struct *tsk)
-+{
-+ struct sigqueue *q;
-+
-+ flush_sigqueue(&tsk->pending);
-+
-+ q = get_task_cache(tsk);
-+ if (q)
-+ kmem_cache_free(sigqueue_cachep, q);
-+}
-+
-+/*
- * Flush all pending signals for a task.
- */
- void __flush_signals(struct task_struct *t)
-@@ -565,7 +638,7 @@
- still_pending:
- list_del_init(&first->list);
- copy_siginfo(info, &first->info);
-- __sigqueue_free(first);
-+ sigqueue_free_current(first);
- } else {
- /*
- * Ok, it wasn't in the queue. This must be
-@@ -611,6 +684,8 @@
- {
- int signr;
-
-+ WARN_ON_ONCE(tsk != current);
-+
- /* We only dequeue private signals from ourselves, we don't let
- * signalfd steal them
- */
-@@ -1207,8 +1282,8 @@
- * We don't want to have recursive SIGSEGV's etc, for example,
- * that is why we also clear SIGNAL_UNKILLABLE.
- */
--int
--force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
-+static int
-+do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
- {
- unsigned long int flags;
- int ret, blocked, ignored;
-@@ -1233,6 +1308,39 @@
- return ret;
- }
-
-+int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
-+{
-+/*
-+ * On some archs, PREEMPT_RT has to delay sending a signal from a trap
-+ * since it can not enable preemption, and the signal code's spin_locks
-+ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
-+ * send the signal on exit of the trap.
-+ */
-+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
-+ if (in_atomic()) {
-+ if (WARN_ON_ONCE(t != current))
-+ return 0;
-+ if (WARN_ON_ONCE(t->forced_info.si_signo))
-+ return 0;
-+
-+ if (is_si_special(info)) {
-+ WARN_ON_ONCE(info != SEND_SIG_PRIV);
-+ t->forced_info.si_signo = sig;
-+ t->forced_info.si_errno = 0;
-+ t->forced_info.si_code = SI_KERNEL;
-+ t->forced_info.si_pid = 0;
-+ t->forced_info.si_uid = 0;
-+ } else {
-+ t->forced_info = *info;
-+ }
-+
-+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
-+ return 0;
-+ }
-+#endif
-+ return do_force_sig_info(sig, info, t);
-+}
-+
- /*
- * Nuke all other threads in the group.
- */
-@@ -1267,12 +1375,12 @@
- * Disable interrupts early to avoid deadlocks.
- * See rcu_read_unlock() comment header for details.
- */
-- local_irq_save(*flags);
-+ local_irq_save_nort(*flags);
- rcu_read_lock();
- sighand = rcu_dereference(tsk->sighand);
- if (unlikely(sighand == NULL)) {
- rcu_read_unlock();
-- local_irq_restore(*flags);
-+ local_irq_restore_nort(*flags);
- break;
- }
- /*
-@@ -1293,7 +1401,7 @@
- }
- spin_unlock(&sighand->siglock);
- rcu_read_unlock();
-- local_irq_restore(*flags);
-+ local_irq_restore_nort(*flags);
- }
-
- return sighand;
-@@ -1536,7 +1644,8 @@
- */
- struct sigqueue *sigqueue_alloc(void)
- {
-- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
-+ /* Preallocated sigqueue objects always from the slabcache ! */
-+ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
-
- if (q)
- q->flags |= SIGQUEUE_PREALLOC;
-@@ -1897,15 +2006,7 @@
- if (gstop_done && ptrace_reparented(current))
- do_notify_parent_cldstop(current, false, why);
-
-- /*
-- * Don't want to allow preemption here, because
-- * sys_ptrace() needs this task to be inactive.
-- *
-- * XXX: implement read_unlock_no_resched().
-- */
-- preempt_disable();
- read_unlock(&tasklist_lock);
-- preempt_enable_no_resched();
- freezable_schedule();
- } else {
- /*
-diff -Nur linux-4.1.13.orig/kernel/softirq.c linux-4.1.13/kernel/softirq.c
---- linux-4.1.13.orig/kernel/softirq.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/softirq.c 2015-11-29 09:23:09.649611524 +0100
-@@ -21,10 +21,12 @@
- #include <linux/freezer.h>
- #include <linux/kthread.h>
- #include <linux/rcupdate.h>
-+#include <linux/delay.h>
- #include <linux/ftrace.h>
- #include <linux/smp.h>
- #include <linux/smpboot.h>
- #include <linux/tick.h>
-+#include <linux/locallock.h>
- #include <linux/irq.h>
-
- #define CREATE_TRACE_POINTS
-@@ -62,6 +64,98 @@
- "TASKLET", "SCHED", "HRTIMER", "RCU"
- };
-
-+#ifdef CONFIG_NO_HZ_COMMON
-+# ifdef CONFIG_PREEMPT_RT_FULL
-+
-+struct softirq_runner {
-+ struct task_struct *runner[NR_SOFTIRQS];
-+};
-+
-+static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
-+
-+static inline void softirq_set_runner(unsigned int sirq)
-+{
-+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
-+
-+ sr->runner[sirq] = current;
-+}
-+
-+static inline void softirq_clr_runner(unsigned int sirq)
-+{
-+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
-+
-+ sr->runner[sirq] = NULL;
-+}
-+
-+/*
-+ * On preempt-rt a softirq running context might be blocked on a
-+ * lock. There might be no other runnable task on this CPU because the
-+ * lock owner runs on some other CPU. So we have to go into idle with
-+ * the pending bit set. Therefor we need to check this otherwise we
-+ * warn about false positives which confuses users and defeats the
-+ * whole purpose of this test.
-+ *
-+ * This code is called with interrupts disabled.
-+ */
-+void softirq_check_pending_idle(void)
-+{
-+ static int rate_limit;
-+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
-+ u32 warnpending;
-+ int i;
-+
-+ if (rate_limit >= 10)
-+ return;
-+
-+ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
-+ for (i = 0; i < NR_SOFTIRQS; i++) {
-+ struct task_struct *tsk = sr->runner[i];
-+
-+ /*
-+ * The wakeup code in rtmutex.c wakes up the task
-+ * _before_ it sets pi_blocked_on to NULL under
-+ * tsk->pi_lock. So we need to check for both: state
-+ * and pi_blocked_on.
-+ */
-+ if (tsk) {
-+ raw_spin_lock(&tsk->pi_lock);
-+ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
-+ /* Clear all bits pending in that task */
-+ warnpending &= ~(tsk->softirqs_raised);
-+ warnpending &= ~(1 << i);
-+ }
-+ raw_spin_unlock(&tsk->pi_lock);
-+ }
-+ }
-+
-+ if (warnpending) {
-+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
-+ warnpending);
-+ rate_limit++;
-+ }
-+}
-+# else
-+/*
-+ * On !PREEMPT_RT we just printk rate limited:
-+ */
-+void softirq_check_pending_idle(void)
-+{
-+ static int rate_limit;
-+
-+ if (rate_limit < 10 &&
-+ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
-+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
-+ local_softirq_pending());
-+ rate_limit++;
-+ }
-+}
-+# endif
-+
-+#else /* !CONFIG_NO_HZ_COMMON */
-+static inline void softirq_set_runner(unsigned int sirq) { }
-+static inline void softirq_clr_runner(unsigned int sirq) { }
-+#endif
-+
- /*
- * we cannot loop indefinitely here to avoid userspace starvation,
- * but we also don't want to introduce a worst case 1/HZ latency
-@@ -77,6 +171,68 @@
- wake_up_process(tsk);
- }
-
-+static void handle_softirq(unsigned int vec_nr)
-+{
-+ struct softirq_action *h = softirq_vec + vec_nr;
-+ int prev_count;
-+
-+ prev_count = preempt_count();
-+
-+ kstat_incr_softirqs_this_cpu(vec_nr);
-+
-+ trace_softirq_entry(vec_nr);
-+ h->action(h);
-+ trace_softirq_exit(vec_nr);
-+ if (unlikely(prev_count != preempt_count())) {
-+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
-+ vec_nr, softirq_to_name[vec_nr], h->action,
-+ prev_count, preempt_count());
-+ preempt_count_set(prev_count);
-+ }
-+}
-+
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+static inline int ksoftirqd_softirq_pending(void)
-+{
-+ return local_softirq_pending();
-+}
-+
-+static void handle_pending_softirqs(u32 pending)
-+{
-+ struct softirq_action *h = softirq_vec;
-+ int softirq_bit;
-+
-+ local_irq_enable();
-+
-+ h = softirq_vec;
-+
-+ while ((softirq_bit = ffs(pending))) {
-+ unsigned int vec_nr;
-+
-+ h += softirq_bit - 1;
-+ vec_nr = h - softirq_vec;
-+ handle_softirq(vec_nr);
-+
-+ h++;
-+ pending >>= softirq_bit;
-+ }
-+
-+ rcu_bh_qs();
-+ local_irq_disable();
-+}
-+
-+static void run_ksoftirqd(unsigned int cpu)
-+{
-+ local_irq_disable();
-+ if (ksoftirqd_softirq_pending()) {
-+ __do_softirq();
-+ local_irq_enable();
-+ cond_resched_rcu_qs();
-+ return;
-+ }
-+ local_irq_enable();
-+}
-+
- /*
- * preempt_count and SOFTIRQ_OFFSET usage:
- * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
-@@ -232,10 +388,8 @@
- unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
- unsigned long old_flags = current->flags;
- int max_restart = MAX_SOFTIRQ_RESTART;
-- struct softirq_action *h;
- bool in_hardirq;
- __u32 pending;
-- int softirq_bit;
-
- /*
- * Mask out PF_MEMALLOC s current task context is borrowed for the
-@@ -254,36 +408,7 @@
- /* Reset the pending bitmask before enabling irqs */
- set_softirq_pending(0);
-
-- local_irq_enable();
--
-- h = softirq_vec;
--
-- while ((softirq_bit = ffs(pending))) {
-- unsigned int vec_nr;
-- int prev_count;
--
-- h += softirq_bit - 1;
--
-- vec_nr = h - softirq_vec;
-- prev_count = preempt_count();
--
-- kstat_incr_softirqs_this_cpu(vec_nr);
--
-- trace_softirq_entry(vec_nr);
-- h->action(h);
-- trace_softirq_exit(vec_nr);
-- if (unlikely(prev_count != preempt_count())) {
-- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
-- vec_nr, softirq_to_name[vec_nr], h->action,
-- prev_count, preempt_count());
-- preempt_count_set(prev_count);
-- }
-- h++;
-- pending >>= softirq_bit;
-- }
--
-- rcu_bh_qs();
-- local_irq_disable();
-+ handle_pending_softirqs(pending);
-
- pending = local_softirq_pending();
- if (pending) {
-@@ -320,6 +445,255 @@
- }
-
- /*
-+ * This function must run with irqs disabled!
-+ */
-+void raise_softirq_irqoff(unsigned int nr)
-+{
-+ __raise_softirq_irqoff(nr);
-+
-+ /*
-+ * If we're in an interrupt or softirq, we're done
-+ * (this also catches softirq-disabled code). We will
-+ * actually run the softirq once we return from
-+ * the irq or softirq.
-+ *
-+ * Otherwise we wake up ksoftirqd to make sure we
-+ * schedule the softirq soon.
-+ */
-+ if (!in_interrupt())
-+ wakeup_softirqd();
-+}
-+
-+void __raise_softirq_irqoff(unsigned int nr)
-+{
-+ trace_softirq_raise(nr);
-+ or_softirq_pending(1UL << nr);
-+}
-+
-+static inline void local_bh_disable_nort(void) { local_bh_disable(); }
-+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
-+static void ksoftirqd_set_sched_params(unsigned int cpu) { }
-+static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { }
-+
-+#else /* !PREEMPT_RT_FULL */
-+
-+/*
-+ * On RT we serialize softirq execution with a cpu local lock per softirq
-+ */
-+static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
-+
-+void __init softirq_early_init(void)
-+{
-+ int i;
-+
-+ for (i = 0; i < NR_SOFTIRQS; i++)
-+ local_irq_lock_init(local_softirq_locks[i]);
-+}
-+
-+static void lock_softirq(int which)
-+{
-+ local_lock(local_softirq_locks[which]);
-+}
-+
-+static void unlock_softirq(int which)
-+{
-+ local_unlock(local_softirq_locks[which]);
-+}
-+
-+static void do_single_softirq(int which)
-+{
-+ unsigned long old_flags = current->flags;
-+
-+ current->flags &= ~PF_MEMALLOC;
-+ vtime_account_irq_enter(current);
-+ current->flags |= PF_IN_SOFTIRQ;
-+ lockdep_softirq_enter();
-+ local_irq_enable();
-+ handle_softirq(which);
-+ local_irq_disable();
-+ lockdep_softirq_exit();
-+ current->flags &= ~PF_IN_SOFTIRQ;
-+ vtime_account_irq_enter(current);
-+ tsk_restore_flags(current, old_flags, PF_MEMALLOC);
-+}
-+
-+/*
-+ * Called with interrupts disabled. Process softirqs which were raised
-+ * in current context (or on behalf of ksoftirqd).
-+ */
-+static void do_current_softirqs(void)
-+{
-+ while (current->softirqs_raised) {
-+ int i = __ffs(current->softirqs_raised);
-+ unsigned int pending, mask = (1U << i);
-+
-+ current->softirqs_raised &= ~mask;
-+ local_irq_enable();
-+
-+ /*
-+ * If the lock is contended, we boost the owner to
-+ * process the softirq or leave the critical section
-+ * now.
-+ */
-+ lock_softirq(i);
-+ local_irq_disable();
-+ softirq_set_runner(i);
-+ /*
-+ * Check with the local_softirq_pending() bits,
-+ * whether we need to process this still or if someone
-+ * else took care of it.
-+ */
-+ pending = local_softirq_pending();
-+ if (pending & mask) {
-+ set_softirq_pending(pending & ~mask);
-+ do_single_softirq(i);
-+ }
-+ softirq_clr_runner(i);
-+ unlock_softirq(i);
-+ WARN_ON(current->softirq_nestcnt != 1);
-+ }
-+}
-+
-+void __local_bh_disable(void)
-+{
-+ if (++current->softirq_nestcnt == 1)
-+ migrate_disable();
-+}
-+EXPORT_SYMBOL(__local_bh_disable);
-+
-+void __local_bh_enable(void)
-+{
-+ if (WARN_ON(current->softirq_nestcnt == 0))
-+ return;
-+
-+ local_irq_disable();
-+ if (current->softirq_nestcnt == 1 && current->softirqs_raised)
-+ do_current_softirqs();
-+ local_irq_enable();
-+
-+ if (--current->softirq_nestcnt == 0)
-+ migrate_enable();
-+}
-+EXPORT_SYMBOL(__local_bh_enable);
-+
-+void _local_bh_enable(void)
-+{
-+ if (WARN_ON(current->softirq_nestcnt == 0))
-+ return;
-+ if (--current->softirq_nestcnt == 0)
-+ migrate_enable();
-+}
-+EXPORT_SYMBOL(_local_bh_enable);
-+
-+int in_serving_softirq(void)
-+{
-+ return current->flags & PF_IN_SOFTIRQ;
-+}
-+EXPORT_SYMBOL(in_serving_softirq);
-+
-+/* Called with preemption disabled */
-+static void run_ksoftirqd(unsigned int cpu)
-+{
-+ local_irq_disable();
-+ current->softirq_nestcnt++;
-+
-+ do_current_softirqs();
-+ current->softirq_nestcnt--;
-+ rcu_note_context_switch();
-+ local_irq_enable();
-+}
-+
-+/*
-+ * Called from netif_rx_ni(). Preemption enabled, but migration
-+ * disabled. So the cpu can't go away under us.
-+ */
-+void thread_do_softirq(void)
-+{
-+ if (!in_serving_softirq() && current->softirqs_raised) {
-+ current->softirq_nestcnt++;
-+ do_current_softirqs();
-+ current->softirq_nestcnt--;
-+ }
-+}
-+
-+static void do_raise_softirq_irqoff(unsigned int nr)
-+{
-+ trace_softirq_raise(nr);
-+ or_softirq_pending(1UL << nr);
-+
-+ /*
-+ * If we are not in a hard interrupt and inside a bh disabled
-+ * region, we simply raise the flag on current. local_bh_enable()
-+ * will make sure that the softirq is executed. Otherwise we
-+ * delegate it to ksoftirqd.
-+ */
-+ if (!in_irq() && current->softirq_nestcnt)
-+ current->softirqs_raised |= (1U << nr);
-+ else if (__this_cpu_read(ksoftirqd))
-+ __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
-+}
-+
-+void __raise_softirq_irqoff(unsigned int nr)
-+{
-+ do_raise_softirq_irqoff(nr);
-+ if (!in_irq() && !current->softirq_nestcnt)
-+ wakeup_softirqd();
-+}
-+
-+/*
-+ * This function must run with irqs disabled!
-+ */
-+void raise_softirq_irqoff(unsigned int nr)
-+{
-+ do_raise_softirq_irqoff(nr);
-+
-+ /*
-+ * If we're in an hard interrupt we let irq return code deal
-+ * with the wakeup of ksoftirqd.
-+ */
-+ if (in_irq())
-+ return;
-+ /*
-+ * If we are in thread context but outside of a bh disabled
-+ * region, we need to wake ksoftirqd as well.
-+ *
-+ * CHECKME: Some of the places which do that could be wrapped
-+ * into local_bh_disable/enable pairs. Though it's unclear
-+ * whether this is worth the effort. To find those places just
-+ * raise a WARN() if the condition is met.
-+ */
-+ if (!current->softirq_nestcnt)
-+ wakeup_softirqd();
-+}
-+
-+static inline int ksoftirqd_softirq_pending(void)
-+{
-+ return current->softirqs_raised;
-+}
-+
-+static inline void local_bh_disable_nort(void) { }
-+static inline void _local_bh_enable_nort(void) { }
-+
-+static inline void ksoftirqd_set_sched_params(unsigned int cpu)
-+{
-+ struct sched_param param = { .sched_priority = 1 };
-+
-+ sched_setscheduler(current, SCHED_FIFO, &param);
-+ /* Take over all pending softirqs when starting */
-+ local_irq_disable();
-+ current->softirqs_raised = local_softirq_pending();
-+ local_irq_enable();
-+}
-+
-+static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online)
-+{
-+ struct sched_param param = { .sched_priority = 0 };
-+
-+ sched_setscheduler(current, SCHED_NORMAL, &param);
-+}
-+
-+#endif /* PREEMPT_RT_FULL */
-+/*
- * Enter an interrupt context.
- */
- void irq_enter(void)
-@@ -330,9 +704,9 @@
- * Prevent raise_softirq from needlessly waking up ksoftirqd
- * here, as softirq will be serviced on return from interrupt.
- */
-- local_bh_disable();
-+ local_bh_disable_nort();
- tick_irq_enter();
-- _local_bh_enable();
-+ _local_bh_enable_nort();
- }
-
- __irq_enter();
-@@ -340,6 +714,7 @@
-
- static inline void invoke_softirq(void)
- {
-+#ifndef CONFIG_PREEMPT_RT_FULL
- if (!force_irqthreads) {
- #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
- /*
-@@ -359,6 +734,15 @@
- } else {
- wakeup_softirqd();
- }
-+#else /* PREEMPT_RT_FULL */
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ if (__this_cpu_read(ksoftirqd) &&
-+ __this_cpu_read(ksoftirqd)->softirqs_raised)
-+ wakeup_softirqd();
-+ local_irq_restore(flags);
-+#endif
- }
-
- static inline void tick_irq_exit(void)
-@@ -395,26 +779,6 @@
- trace_hardirq_exit(); /* must be last! */
- }
-
--/*
-- * This function must run with irqs disabled!
-- */
--inline void raise_softirq_irqoff(unsigned int nr)
--{
-- __raise_softirq_irqoff(nr);
--
-- /*
-- * If we're in an interrupt or softirq, we're done
-- * (this also catches softirq-disabled code). We will
-- * actually run the softirq once we return from
-- * the irq or softirq.
-- *
-- * Otherwise we wake up ksoftirqd to make sure we
-- * schedule the softirq soon.
-- */
-- if (!in_interrupt())
-- wakeup_softirqd();
--}
--
- void raise_softirq(unsigned int nr)
- {
- unsigned long flags;
-@@ -424,12 +788,6 @@
- local_irq_restore(flags);
- }
-
--void __raise_softirq_irqoff(unsigned int nr)
--{
-- trace_softirq_raise(nr);
-- or_softirq_pending(1UL << nr);
--}
--
- void open_softirq(int nr, void (*action)(struct softirq_action *))
- {
- softirq_vec[nr].action = action;
-@@ -446,15 +804,45 @@
- static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
- static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
-
-+static void inline
-+__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
-+{
-+ if (tasklet_trylock(t)) {
-+again:
-+ /* We may have been preempted before tasklet_trylock
-+ * and __tasklet_action may have already run.
-+ * So double check the sched bit while the takslet
-+ * is locked before adding it to the list.
-+ */
-+ if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
-+ t->next = NULL;
-+ *head->tail = t;
-+ head->tail = &(t->next);
-+ raise_softirq_irqoff(nr);
-+ tasklet_unlock(t);
-+ } else {
-+ /* This is subtle. If we hit the corner case above
-+ * It is possible that we get preempted right here,
-+ * and another task has successfully called
-+ * tasklet_schedule(), then this function, and
-+ * failed on the trylock. Thus we must be sure
-+ * before releasing the tasklet lock, that the
-+ * SCHED_BIT is clear. Otherwise the tasklet
-+ * may get its SCHED_BIT set, but not added to the
-+ * list
-+ */
-+ if (!tasklet_tryunlock(t))
-+ goto again;
-+ }
-+ }
-+}
-+
- void __tasklet_schedule(struct tasklet_struct *t)
- {
- unsigned long flags;
-
- local_irq_save(flags);
-- t->next = NULL;
-- *__this_cpu_read(tasklet_vec.tail) = t;
-- __this_cpu_write(tasklet_vec.tail, &(t->next));
-- raise_softirq_irqoff(TASKLET_SOFTIRQ);
-+ __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
- local_irq_restore(flags);
- }
- EXPORT_SYMBOL(__tasklet_schedule);
-@@ -464,10 +852,7 @@
- unsigned long flags;
-
- local_irq_save(flags);
-- t->next = NULL;
-- *__this_cpu_read(tasklet_hi_vec.tail) = t;
-- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
-- raise_softirq_irqoff(HI_SOFTIRQ);
-+ __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
- local_irq_restore(flags);
- }
- EXPORT_SYMBOL(__tasklet_hi_schedule);
-@@ -476,82 +861,122 @@
- {
- BUG_ON(!irqs_disabled());
-
-- t->next = __this_cpu_read(tasklet_hi_vec.head);
-- __this_cpu_write(tasklet_hi_vec.head, t);
-- __raise_softirq_irqoff(HI_SOFTIRQ);
-+ __tasklet_hi_schedule(t);
- }
- EXPORT_SYMBOL(__tasklet_hi_schedule_first);
-
--static void tasklet_action(struct softirq_action *a)
-+void tasklet_enable(struct tasklet_struct *t)
- {
-- struct tasklet_struct *list;
-+ if (!atomic_dec_and_test(&t->count))
-+ return;
-+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
-+ tasklet_schedule(t);
-+}
-+EXPORT_SYMBOL(tasklet_enable);
-
-- local_irq_disable();
-- list = __this_cpu_read(tasklet_vec.head);
-- __this_cpu_write(tasklet_vec.head, NULL);
-- __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
-- local_irq_enable();
-+static void __tasklet_action(struct softirq_action *a,
-+ struct tasklet_struct *list)
-+{
-+ int loops = 1000000;
-
- while (list) {
- struct tasklet_struct *t = list;
-
- list = list->next;
-
-- if (tasklet_trylock(t)) {
-- if (!atomic_read(&t->count)) {
-- if (!test_and_clear_bit(TASKLET_STATE_SCHED,
-- &t->state))
-- BUG();
-- t->func(t->data);
-- tasklet_unlock(t);
-- continue;
-- }
-- tasklet_unlock(t);
-+ /*
-+ * Should always succeed - after a tasklist got on the
-+ * list (after getting the SCHED bit set from 0 to 1),
-+ * nothing but the tasklet softirq it got queued to can
-+ * lock it:
-+ */
-+ if (!tasklet_trylock(t)) {
-+ WARN_ON(1);
-+ continue;
- }
-
-- local_irq_disable();
- t->next = NULL;
-- *__this_cpu_read(tasklet_vec.tail) = t;
-- __this_cpu_write(tasklet_vec.tail, &(t->next));
-- __raise_softirq_irqoff(TASKLET_SOFTIRQ);
-- local_irq_enable();
-+
-+ /*
-+ * If we cannot handle the tasklet because it's disabled,
-+ * mark it as pending. tasklet_enable() will later
-+ * re-schedule the tasklet.
-+ */
-+ if (unlikely(atomic_read(&t->count))) {
-+out_disabled:
-+ /* implicit unlock: */
-+ wmb();
-+ t->state = TASKLET_STATEF_PENDING;
-+ continue;
-+ }
-+
-+ /*
-+ * After this point on the tasklet might be rescheduled
-+ * on another CPU, but it can only be added to another
-+ * CPU's tasklet list if we unlock the tasklet (which we
-+ * dont do yet).
-+ */
-+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
-+ WARN_ON(1);
-+
-+again:
-+ t->func(t->data);
-+
-+ /*
-+ * Try to unlock the tasklet. We must use cmpxchg, because
-+ * another CPU might have scheduled or disabled the tasklet.
-+ * We only allow the STATE_RUN -> 0 transition here.
-+ */
-+ while (!tasklet_tryunlock(t)) {
-+ /*
-+ * If it got disabled meanwhile, bail out:
-+ */
-+ if (atomic_read(&t->count))
-+ goto out_disabled;
-+ /*
-+ * If it got scheduled meanwhile, re-execute
-+ * the tasklet function:
-+ */
-+ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
-+ goto again;
-+ if (!--loops) {
-+ printk("hm, tasklet state: %08lx\n", t->state);
-+ WARN_ON(1);
-+ tasklet_unlock(t);
-+ break;
-+ }
-+ }
- }
- }
-
-+static void tasklet_action(struct softirq_action *a)
-+{
-+ struct tasklet_struct *list;
-+
-+ local_irq_disable();
-+
-+ list = __this_cpu_read(tasklet_vec.head);
-+ __this_cpu_write(tasklet_vec.head, NULL);
-+ __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
-+
-+ local_irq_enable();
-+
-+ __tasklet_action(a, list);
-+}
-+
- static void tasklet_hi_action(struct softirq_action *a)
- {
- struct tasklet_struct *list;
-
- local_irq_disable();
-+
- list = __this_cpu_read(tasklet_hi_vec.head);
- __this_cpu_write(tasklet_hi_vec.head, NULL);
- __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
-- local_irq_enable();
-
-- while (list) {
-- struct tasklet_struct *t = list;
--
-- list = list->next;
--
-- if (tasklet_trylock(t)) {
-- if (!atomic_read(&t->count)) {
-- if (!test_and_clear_bit(TASKLET_STATE_SCHED,
-- &t->state))
-- BUG();
-- t->func(t->data);
-- tasklet_unlock(t);
-- continue;
-- }
-- tasklet_unlock(t);
-- }
-+ local_irq_enable();
-
-- local_irq_disable();
-- t->next = NULL;
-- *__this_cpu_read(tasklet_hi_vec.tail) = t;
-- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
-- __raise_softirq_irqoff(HI_SOFTIRQ);
-- local_irq_enable();
-- }
-+ __tasklet_action(a, list);
- }
-
- void tasklet_init(struct tasklet_struct *t,
-@@ -572,7 +997,7 @@
-
- while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
- do {
-- yield();
-+ msleep(1);
- } while (test_bit(TASKLET_STATE_SCHED, &t->state));
- }
- tasklet_unlock_wait(t);
-@@ -646,25 +1071,26 @@
- open_softirq(HI_SOFTIRQ, tasklet_hi_action);
- }
-
--static int ksoftirqd_should_run(unsigned int cpu)
-+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
-+void tasklet_unlock_wait(struct tasklet_struct *t)
- {
-- return local_softirq_pending();
--}
--
--static void run_ksoftirqd(unsigned int cpu)
--{
-- local_irq_disable();
-- if (local_softirq_pending()) {
-+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
- /*
-- * We can safely run softirq on inline stack, as we are not deep
-- * in the task stack here.
-+ * Hack for now to avoid this busy-loop:
- */
-- __do_softirq();
-- local_irq_enable();
-- cond_resched_rcu_qs();
-- return;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ msleep(1);
-+#else
-+ barrier();
-+#endif
- }
-- local_irq_enable();
-+}
-+EXPORT_SYMBOL(tasklet_unlock_wait);
-+#endif
-+
-+static int ksoftirqd_should_run(unsigned int cpu)
-+{
-+ return ksoftirqd_softirq_pending();
- }
-
- #ifdef CONFIG_HOTPLUG_CPU
-@@ -746,6 +1172,8 @@
-
- static struct smp_hotplug_thread softirq_threads = {
- .store = &ksoftirqd,
-+ .setup = ksoftirqd_set_sched_params,
-+ .cleanup = ksoftirqd_clr_sched_params,
- .thread_should_run = ksoftirqd_should_run,
- .thread_fn = run_ksoftirqd,
- .thread_comm = "ksoftirqd/%u",
-diff -Nur linux-4.1.13.orig/kernel/stop_machine.c linux-4.1.13/kernel/stop_machine.c
---- linux-4.1.13.orig/kernel/stop_machine.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/stop_machine.c 2015-11-29 09:23:09.649611524 +0100
-@@ -30,12 +30,12 @@
- atomic_t nr_todo; /* nr left to execute */
- bool executed; /* actually executed? */
- int ret; /* collected return value */
-- struct completion completion; /* fired if nr_todo reaches 0 */
-+ struct task_struct *waiter; /* woken when nr_todo reaches 0 */
- };
-
- /* the actual stopper, one per every possible cpu, enabled on online cpus */
- struct cpu_stopper {
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- bool enabled; /* is this stopper enabled? */
- struct list_head works; /* list of pending works */
- };
-@@ -56,7 +56,7 @@
- {
- memset(done, 0, sizeof(*done));
- atomic_set(&done->nr_todo, nr_todo);
-- init_completion(&done->completion);
-+ done->waiter = current;
- }
-
- /* signal completion unless @done is NULL */
-@@ -65,8 +65,10 @@
- if (done) {
- if (executed)
- done->executed = true;
-- if (atomic_dec_and_test(&done->nr_todo))
-- complete(&done->completion);
-+ if (atomic_dec_and_test(&done->nr_todo)) {
-+ wake_up_process(done->waiter);
-+ done->waiter = NULL;
-+ }
- }
- }
-
-@@ -78,7 +80,7 @@
-
- unsigned long flags;
-
-- spin_lock_irqsave(&stopper->lock, flags);
-+ raw_spin_lock_irqsave(&stopper->lock, flags);
-
- if (stopper->enabled) {
- list_add_tail(&work->list, &stopper->works);
-@@ -86,7 +88,23 @@
- } else
- cpu_stop_signal_done(work->done, false);
-
-- spin_unlock_irqrestore(&stopper->lock, flags);
-+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
-+}
-+
-+static void wait_for_stop_done(struct cpu_stop_done *done)
-+{
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ while (atomic_read(&done->nr_todo)) {
-+ schedule();
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ }
-+ /*
-+ * We need to wait until cpu_stop_signal_done() has cleared
-+ * done->waiter.
-+ */
-+ while (done->waiter)
-+ cpu_relax();
-+ set_current_state(TASK_RUNNING);
- }
-
- /**
-@@ -120,7 +138,7 @@
-
- cpu_stop_init_done(&done, 1);
- cpu_stop_queue_work(cpu, &work);
-- wait_for_completion(&done.completion);
-+ wait_for_stop_done(&done);
- return done.executed ? done.ret : -ENOENT;
- }
-
-@@ -248,7 +266,7 @@
- struct irq_cpu_stop_queue_work_info call_args;
- struct multi_stop_data msdata;
-
-- preempt_disable();
-+ preempt_disable_nort();
- msdata = (struct multi_stop_data){
- .fn = fn,
- .data = arg,
-@@ -281,7 +299,7 @@
- * This relies on the stopper workqueues to be FIFO.
- */
- if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
-- preempt_enable();
-+ preempt_enable_nort();
- return -ENOENT;
- }
-
-@@ -295,9 +313,9 @@
- &irq_cpu_stop_queue_work,
- &call_args, 1);
- lg_local_unlock(&stop_cpus_lock);
-- preempt_enable();
-+ preempt_enable_nort();
-
-- wait_for_completion(&done.completion);
-+ wait_for_stop_done(&done);
-
- return done.executed ? done.ret : -ENOENT;
- }
-@@ -329,7 +347,7 @@
-
- static void queue_stop_cpus_work(const struct cpumask *cpumask,
- cpu_stop_fn_t fn, void *arg,
-- struct cpu_stop_done *done)
-+ struct cpu_stop_done *done, bool inactive)
- {
- struct cpu_stop_work *work;
- unsigned int cpu;
-@@ -343,11 +361,13 @@
- }
-
- /*
-- * Disable preemption while queueing to avoid getting
-- * preempted by a stopper which might wait for other stoppers
-- * to enter @fn which can lead to deadlock.
-+ * Make sure that all work is queued on all cpus before
-+ * any of the cpus can execute it.
- */
-- lg_global_lock(&stop_cpus_lock);
-+ if (!inactive)
-+ lg_global_lock(&stop_cpus_lock);
-+ else
-+ lg_global_trylock_relax(&stop_cpus_lock);
- for_each_cpu(cpu, cpumask)
- cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
- lg_global_unlock(&stop_cpus_lock);
-@@ -359,8 +379,8 @@
- struct cpu_stop_done done;
-
- cpu_stop_init_done(&done, cpumask_weight(cpumask));
-- queue_stop_cpus_work(cpumask, fn, arg, &done);
-- wait_for_completion(&done.completion);
-+ queue_stop_cpus_work(cpumask, fn, arg, &done, false);
-+ wait_for_stop_done(&done);
- return done.executed ? done.ret : -ENOENT;
- }
-
-@@ -439,9 +459,9 @@
- unsigned long flags;
- int run;
-
-- spin_lock_irqsave(&stopper->lock, flags);
-+ raw_spin_lock_irqsave(&stopper->lock, flags);
- run = !list_empty(&stopper->works);
-- spin_unlock_irqrestore(&stopper->lock, flags);
-+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
- return run;
- }
-
-@@ -453,13 +473,13 @@
-
- repeat:
- work = NULL;
-- spin_lock_irq(&stopper->lock);
-+ raw_spin_lock_irq(&stopper->lock);
- if (!list_empty(&stopper->works)) {
- work = list_first_entry(&stopper->works,
- struct cpu_stop_work, list);
- list_del_init(&work->list);
- }
-- spin_unlock_irq(&stopper->lock);
-+ raw_spin_unlock_irq(&stopper->lock);
-
- if (work) {
- cpu_stop_fn_t fn = work->fn;
-@@ -467,6 +487,16 @@
- struct cpu_stop_done *done = work->done;
- char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
-
-+ /*
-+ * Wait until the stopper finished scheduling on all
-+ * cpus
-+ */
-+ lg_global_lock(&stop_cpus_lock);
-+ /*
-+ * Let other cpu threads continue as well
-+ */
-+ lg_global_unlock(&stop_cpus_lock);
-+
- /* cpu stop callbacks are not allowed to sleep */
- preempt_disable();
-
-@@ -481,7 +511,13 @@
- kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
- ksym_buf), arg);
-
-+ /*
-+ * Make sure that the wakeup and setting done->waiter
-+ * to NULL is atomic.
-+ */
-+ local_irq_disable();
- cpu_stop_signal_done(done, true);
-+ local_irq_enable();
- goto repeat;
- }
- }
-@@ -500,20 +536,20 @@
- unsigned long flags;
-
- /* drain remaining works */
-- spin_lock_irqsave(&stopper->lock, flags);
-+ raw_spin_lock_irqsave(&stopper->lock, flags);
- list_for_each_entry(work, &stopper->works, list)
- cpu_stop_signal_done(work->done, false);
- stopper->enabled = false;
-- spin_unlock_irqrestore(&stopper->lock, flags);
-+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
- }
-
- static void cpu_stop_unpark(unsigned int cpu)
- {
- struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-
-- spin_lock_irq(&stopper->lock);
-+ raw_spin_lock_irq(&stopper->lock);
- stopper->enabled = true;
-- spin_unlock_irq(&stopper->lock);
-+ raw_spin_unlock_irq(&stopper->lock);
- }
-
- static struct smp_hotplug_thread cpu_stop_threads = {
-@@ -535,10 +571,12 @@
- for_each_possible_cpu(cpu) {
- struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-
-- spin_lock_init(&stopper->lock);
-+ raw_spin_lock_init(&stopper->lock);
- INIT_LIST_HEAD(&stopper->works);
- }
-
-+ lg_lock_init(&stop_cpus_lock, "stop_cpus_lock");
-+
- BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
- stop_machine_initialized = true;
- return 0;
-@@ -634,11 +672,11 @@
- set_state(&msdata, MULTI_STOP_PREPARE);
- cpu_stop_init_done(&done, num_active_cpus());
- queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
-- &done);
-+ &done, true);
- ret = multi_cpu_stop(&msdata);
-
- /* Busy wait for completion. */
-- while (!completion_done(&done.completion))
-+ while (atomic_read(&done.nr_todo))
- cpu_relax();
-
- mutex_unlock(&stop_cpus_mutex);
-diff -Nur linux-4.1.13.orig/kernel/time/hrtimer.c linux-4.1.13/kernel/time/hrtimer.c
---- linux-4.1.13.orig/kernel/time/hrtimer.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/time/hrtimer.c 2015-11-29 09:23:09.649611524 +0100
-@@ -48,11 +48,13 @@
- #include <linux/sched/rt.h>
- #include <linux/sched/deadline.h>
- #include <linux/timer.h>
-+#include <linux/kthread.h>
- #include <linux/freezer.h>
-
- #include <asm/uaccess.h>
-
- #include <trace/events/timer.h>
-+#include <trace/events/hist.h>
-
- #include "tick-internal.h"
-
-@@ -576,8 +578,7 @@
- * When the callback is running, we do not reprogram the clock event
- * device. The timer callback is either running on a different CPU or
- * the callback is executed in the hrtimer_interrupt context. The
-- * reprogramming is handled either by the softirq, which called the
-- * callback or at the end of the hrtimer_interrupt.
-+ * reprogramming is handled at the end of the hrtimer_interrupt.
- */
- if (hrtimer_callback_running(timer))
- return 0;
-@@ -621,6 +622,9 @@
- return res;
- }
-
-+static void __run_hrtimer(struct hrtimer *timer, ktime_t *now);
-+static int hrtimer_rt_defer(struct hrtimer *timer);
-+
- /*
- * Initialize the high resolution related parts of cpu_base
- */
-@@ -630,6 +634,21 @@
- base->hres_active = 0;
- }
-
-+static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
-+ struct hrtimer_clock_base *base,
-+ int wakeup)
-+{
-+ if (!hrtimer_reprogram(timer, base))
-+ return 0;
-+ if (!wakeup)
-+ return -ETIME;
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ if (!hrtimer_rt_defer(timer))
-+ return -ETIME;
-+#endif
-+ return 1;
-+}
-+
- static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
- {
- ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
-@@ -695,6 +714,44 @@
-
- static DECLARE_WORK(hrtimer_work, clock_was_set_work);
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * RT can not call schedule_work from real interrupt context.
-+ * Need to make a thread to do the real work.
-+ */
-+static struct task_struct *clock_set_delay_thread;
-+static bool do_clock_set_delay;
-+
-+static int run_clock_set_delay(void *ignore)
-+{
-+ while (!kthread_should_stop()) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (do_clock_set_delay) {
-+ do_clock_set_delay = false;
-+ schedule_work(&hrtimer_work);
-+ }
-+ schedule();
-+ }
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
-+}
-+
-+void clock_was_set_delayed(void)
-+{
-+ do_clock_set_delay = true;
-+ /* Make visible before waking up process */
-+ smp_wmb();
-+ wake_up_process(clock_set_delay_thread);
-+}
-+
-+static __init int create_clock_set_delay_thread(void)
-+{
-+ clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd");
-+ BUG_ON(!clock_set_delay_thread);
-+ return 0;
-+}
-+early_initcall(create_clock_set_delay_thread);
-+#else /* PREEMPT_RT_FULL */
- /*
- * Called from timekeeping and resume code to reprogramm the hrtimer
- * interrupt device on all cpus.
-@@ -703,6 +760,7 @@
- {
- schedule_work(&hrtimer_work);
- }
-+#endif
-
- #else
-
-@@ -711,6 +769,13 @@
- static inline int hrtimer_switch_to_hres(void) { return 0; }
- static inline void
- hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
-+static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
-+ struct hrtimer_clock_base *base,
-+ int wakeup)
-+{
-+ return 0;
-+}
-+
- static inline int hrtimer_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
- {
-@@ -718,7 +783,6 @@
- }
- static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
- static inline void retrigger_next_event(void *arg) { }
--
- #endif /* CONFIG_HIGH_RES_TIMERS */
-
- /*
-@@ -836,6 +900,32 @@
- }
- EXPORT_SYMBOL_GPL(hrtimer_forward);
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
-+
-+/**
-+ * hrtimer_wait_for_timer - Wait for a running timer
-+ *
-+ * @timer: timer to wait for
-+ *
-+ * The function waits in case the timers callback function is
-+ * currently executed on the waitqueue of the timer base. The
-+ * waitqueue is woken up after the timer callback function has
-+ * finished execution.
-+ */
-+void hrtimer_wait_for_timer(const struct hrtimer *timer)
-+{
-+ struct hrtimer_clock_base *base = timer->base;
-+
-+ if (base && base->cpu_base && !timer->irqsafe)
-+ wait_event(base->cpu_base->wait,
-+ !(timer->state & HRTIMER_STATE_CALLBACK));
-+}
-+
-+#else
-+# define wake_up_timer_waiters(b) do { } while (0)
-+#endif
-+
- /*
- * enqueue_hrtimer - internal function to (re)start a timer
- *
-@@ -879,6 +969,11 @@
- if (!(timer->state & HRTIMER_STATE_ENQUEUED))
- goto out;
-
-+ if (unlikely(!list_empty(&timer->cb_entry))) {
-+ list_del_init(&timer->cb_entry);
-+ goto out;
-+ }
-+
- next_timer = timerqueue_getnext(&base->active);
- timerqueue_del(&base->active, &timer->node);
- if (&timer->node == next_timer) {
-@@ -966,7 +1061,16 @@
- new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
-
- timer_stats_hrtimer_set_start_info(timer);
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ {
-+ ktime_t now = new_base->get_time();
-
-+ if (ktime_to_ns(tim) < ktime_to_ns(now))
-+ timer->praecox = now;
-+ else
-+ timer->praecox = ktime_set(0, 0);
-+ }
-+#endif
- leftmost = enqueue_hrtimer(timer, new_base);
-
- if (!leftmost) {
-@@ -980,15 +1084,26 @@
- * on dynticks target.
- */
- wake_up_nohz_cpu(new_base->cpu_base->cpu);
-- } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) &&
-- hrtimer_reprogram(timer, new_base)) {
-+ } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases)) {
-+
-+ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup);
-+ if (ret < 0) {
-+ /*
-+ * In case we failed to reprogram the timer (mostly
-+ * because out current timer is already elapsed),
-+ * remove it again and report a failure. This avoids
-+ * stale base->first entries.
-+ */
-+ debug_deactivate(timer);
-+ __remove_hrtimer(timer, new_base,
-+ timer->state & HRTIMER_STATE_CALLBACK, 0);
-+ } else if (ret > 0) {
- /*
- * Only allow reprogramming if the new base is on this CPU.
- * (it might still be on another CPU if the timer was pending)
- *
- * XXX send_remote_softirq() ?
- */
-- if (wakeup) {
- /*
- * We need to drop cpu_base->lock to avoid a
- * lock ordering issue vs. rq->lock.
-@@ -996,9 +1111,7 @@
- raw_spin_unlock(&new_base->cpu_base->lock);
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- local_irq_restore(flags);
-- return ret;
-- } else {
-- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-+ return 0;
- }
- }
-
-@@ -1089,7 +1202,7 @@
-
- if (ret >= 0)
- return ret;
-- cpu_relax();
-+ hrtimer_wait_for_timer(timer);
- }
- }
- EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1153,6 +1266,7 @@
-
- base = hrtimer_clockid_to_base(clock_id);
- timer->base = &cpu_base->clock_base[base];
-+ INIT_LIST_HEAD(&timer->cb_entry);
- timerqueue_init(&timer->node);
-
- #ifdef CONFIG_TIMER_STATS
-@@ -1236,6 +1350,126 @@
- timer->state &= ~HRTIMER_STATE_CALLBACK;
- }
-
-+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
-+
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
-+ struct hrtimer_clock_base *base)
-+{
-+ /*
-+ * Note, we clear the callback flag before we requeue the
-+ * timer otherwise we trigger the callback_running() check
-+ * in hrtimer_reprogram().
-+ */
-+ timer->state &= ~HRTIMER_STATE_CALLBACK;
-+
-+ if (restart != HRTIMER_NORESTART) {
-+ BUG_ON(hrtimer_active(timer));
-+ /*
-+ * Enqueue the timer, if it's the leftmost timer then
-+ * we need to reprogram it.
-+ */
-+ if (!enqueue_hrtimer(timer, base))
-+ return;
-+
-+#ifndef CONFIG_HIGH_RES_TIMERS
-+ }
-+#else
-+ if (base->cpu_base->hres_active &&
-+ hrtimer_reprogram(timer, base))
-+ goto requeue;
-+
-+ } else if (hrtimer_active(timer)) {
-+ /*
-+ * If the timer was rearmed on another CPU, reprogram
-+ * the event device.
-+ */
-+ if (&timer->node == base->active.next &&
-+ base->cpu_base->hres_active &&
-+ hrtimer_reprogram(timer, base))
-+ goto requeue;
-+ }
-+ return;
-+
-+requeue:
-+ /*
-+ * Timer is expired. Thus move it from tree to pending list
-+ * again.
-+ */
-+ __remove_hrtimer(timer, base, timer->state, 0);
-+ list_add_tail(&timer->cb_entry, &base->expired);
-+#endif
-+}
-+
-+/*
-+ * The changes in mainline which removed the callback modes from
-+ * hrtimer are not yet working with -rt. The non wakeup_process()
-+ * based callbacks which involve sleeping locks need to be treated
-+ * seperately.
-+ */
-+static void hrtimer_rt_run_pending(void)
-+{
-+ enum hrtimer_restart (*fn)(struct hrtimer *);
-+ struct hrtimer_cpu_base *cpu_base;
-+ struct hrtimer_clock_base *base;
-+ struct hrtimer *timer;
-+ int index, restart;
-+
-+ local_irq_disable();
-+ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
-+
-+ raw_spin_lock(&cpu_base->lock);
-+
-+ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
-+ base = &cpu_base->clock_base[index];
-+
-+ while (!list_empty(&base->expired)) {
-+ timer = list_first_entry(&base->expired,
-+ struct hrtimer, cb_entry);
-+
-+ /*
-+ * Same as the above __run_hrtimer function
-+ * just we run with interrupts enabled.
-+ */
-+ debug_hrtimer_deactivate(timer);
-+ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
-+ timer_stats_account_hrtimer(timer);
-+ fn = timer->function;
-+
-+ raw_spin_unlock_irq(&cpu_base->lock);
-+ restart = fn(timer);
-+ raw_spin_lock_irq(&cpu_base->lock);
-+
-+ hrtimer_rt_reprogram(restart, timer, base);
-+ }
-+ }
-+
-+ raw_spin_unlock_irq(&cpu_base->lock);
-+
-+ wake_up_timer_waiters(cpu_base);
-+}
-+
-+static int hrtimer_rt_defer(struct hrtimer *timer)
-+{
-+ if (timer->irqsafe)
-+ return 0;
-+
-+ __remove_hrtimer(timer, timer->base, timer->state, 0);
-+ list_add_tail(&timer->cb_entry, &timer->base->expired);
-+ return 1;
-+}
-+
-+#else
-+
-+static inline void hrtimer_rt_run_pending(void)
-+{
-+ hrtimer_peek_ahead_timers();
-+}
-+
-+static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
-+
-+#endif
-+
- #ifdef CONFIG_HIGH_RES_TIMERS
-
- /*
-@@ -1246,7 +1480,7 @@
- {
- struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
- ktime_t expires_next, now, entry_time, delta;
-- int i, retries = 0;
-+ int i, retries = 0, raise = 0;
-
- BUG_ON(!cpu_base->hres_active);
- cpu_base->nr_events++;
-@@ -1281,6 +1515,15 @@
-
- timer = container_of(node, struct hrtimer, node);
-
-+ trace_hrtimer_interrupt(raw_smp_processor_id(),
-+ ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ?
-+ timer->praecox : hrtimer_get_expires(timer),
-+ basenow)),
-+ current,
-+ timer->function == hrtimer_wakeup ?
-+ container_of(timer, struct hrtimer_sleeper,
-+ timer)->task : NULL);
-+
- /*
- * The immediate goal for using the softexpires is
- * minimizing wakeups, not running timers at the
-@@ -1296,7 +1539,10 @@
- if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
- break;
-
-- __run_hrtimer(timer, &basenow);
-+ if (!hrtimer_rt_defer(timer))
-+ __run_hrtimer(timer, &basenow);
-+ else
-+ raise = 1;
- }
- }
- /* Reevaluate the clock bases for the next expiry */
-@@ -1313,7 +1559,7 @@
- if (expires_next.tv64 == KTIME_MAX ||
- !tick_program_event(expires_next, 0)) {
- cpu_base->hang_detected = 0;
-- return;
-+ goto out;
- }
-
- /*
-@@ -1357,6 +1603,9 @@
- tick_program_event(expires_next, 1);
- printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
- ktime_to_ns(delta));
-+out:
-+ if (raise)
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- }
-
- /*
-@@ -1392,18 +1641,18 @@
- __hrtimer_peek_ahead_timers();
- local_irq_restore(flags);
- }
--
--static void run_hrtimer_softirq(struct softirq_action *h)
--{
-- hrtimer_peek_ahead_timers();
--}
--
- #else /* CONFIG_HIGH_RES_TIMERS */
-
- static inline void __hrtimer_peek_ahead_timers(void) { }
-
- #endif /* !CONFIG_HIGH_RES_TIMERS */
-
-+
-+static void run_hrtimer_softirq(struct softirq_action *h)
-+{
-+ hrtimer_rt_run_pending();
-+}
-+
- /*
- * Called from timer softirq every jiffy, expire hrtimers:
- *
-@@ -1436,7 +1685,7 @@
- struct timerqueue_node *node;
- struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
- struct hrtimer_clock_base *base;
-- int index, gettime = 1;
-+ int index, gettime = 1, raise = 0;
-
- if (hrtimer_hres_active())
- return;
-@@ -1461,10 +1710,16 @@
- hrtimer_get_expires_tv64(timer))
- break;
-
-- __run_hrtimer(timer, &base->softirq_time);
-+ if (!hrtimer_rt_defer(timer))
-+ __run_hrtimer(timer, &base->softirq_time);
-+ else
-+ raise = 1;
- }
- raw_spin_unlock(&cpu_base->lock);
- }
-+
-+ if (raise)
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- }
-
- /*
-@@ -1486,16 +1741,18 @@
- void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
- {
- sl->timer.function = hrtimer_wakeup;
-+ sl->timer.irqsafe = 1;
- sl->task = task;
- }
- EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-
--static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
-+static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode,
-+ unsigned long state)
- {
- hrtimer_init_sleeper(t, current);
-
- do {
-- set_current_state(TASK_INTERRUPTIBLE);
-+ set_current_state(state);
- hrtimer_start_expires(&t->timer, mode);
- if (!hrtimer_active(&t->timer))
- t->task = NULL;
-@@ -1539,7 +1796,8 @@
- HRTIMER_MODE_ABS);
- hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
-
-- if (do_nanosleep(&t, HRTIMER_MODE_ABS))
-+ /* cpu_chill() does not care about restart state. */
-+ if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE))
- goto out;
-
- rmtp = restart->nanosleep.rmtp;
-@@ -1556,8 +1814,10 @@
- return ret;
- }
-
--long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
-- const enum hrtimer_mode mode, const clockid_t clockid)
-+static long
-+__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
-+ const enum hrtimer_mode mode, const clockid_t clockid,
-+ unsigned long state)
- {
- struct restart_block *restart;
- struct hrtimer_sleeper t;
-@@ -1570,7 +1830,7 @@
-
- hrtimer_init_on_stack(&t.timer, clockid, mode);
- hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
-- if (do_nanosleep(&t, mode))
-+ if (do_nanosleep(&t, mode, state))
- goto out;
-
- /* Absolute timers do not update the rmtp value and restart: */
-@@ -1597,6 +1857,12 @@
- return ret;
- }
-
-+long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
-+ const enum hrtimer_mode mode, const clockid_t clockid)
-+{
-+ return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE);
-+}
-+
- SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
- struct timespec __user *, rmtp)
- {
-@@ -1611,6 +1877,26 @@
- return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * Sleep for 1 ms in hope whoever holds what we want will let it go.
-+ */
-+void cpu_chill(void)
-+{
-+ struct timespec tu = {
-+ .tv_nsec = NSEC_PER_MSEC,
-+ };
-+ unsigned int freeze_flag = current->flags & PF_NOFREEZE;
-+
-+ current->flags |= PF_NOFREEZE;
-+ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC,
-+ TASK_UNINTERRUPTIBLE);
-+ if (!freeze_flag)
-+ current->flags &= ~PF_NOFREEZE;
-+}
-+EXPORT_SYMBOL(cpu_chill);
-+#endif
-+
- /*
- * Functions related to boot-time initialization:
- */
-@@ -1622,10 +1908,14 @@
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- cpu_base->clock_base[i].cpu_base = cpu_base;
- timerqueue_init_head(&cpu_base->clock_base[i].active);
-+ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
- }
-
- cpu_base->cpu = cpu;
- hrtimer_init_hres(cpu_base);
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ init_waitqueue_head(&cpu_base->wait);
-+#endif
- }
-
- #ifdef CONFIG_HOTPLUG_CPU
-@@ -1731,9 +2021,7 @@
- hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
- (void *)(long)smp_processor_id());
- register_cpu_notifier(&hrtimers_nb);
--#ifdef CONFIG_HIGH_RES_TIMERS
- open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
--#endif
- }
-
- /**
-diff -Nur linux-4.1.13.orig/kernel/time/itimer.c linux-4.1.13/kernel/time/itimer.c
---- linux-4.1.13.orig/kernel/time/itimer.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/time/itimer.c 2015-11-29 09:23:09.649611524 +0100
-@@ -213,6 +213,7 @@
- /* We are sharing ->siglock with it_real_fn() */
- if (hrtimer_try_to_cancel(timer) < 0) {
- spin_unlock_irq(&tsk->sighand->siglock);
-+ hrtimer_wait_for_timer(&tsk->signal->real_timer);
- goto again;
- }
- expires = timeval_to_ktime(value->it_value);
-diff -Nur linux-4.1.13.orig/kernel/time/jiffies.c linux-4.1.13/kernel/time/jiffies.c
---- linux-4.1.13.orig/kernel/time/jiffies.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/time/jiffies.c 2015-11-29 09:23:09.649611524 +0100
-@@ -74,7 +74,8 @@
- .max_cycles = 10,
- };
-
--__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
-+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
-+__cacheline_aligned_in_smp seqcount_t jiffies_seq;
-
- #if (BITS_PER_LONG < 64)
- u64 get_jiffies_64(void)
-@@ -83,9 +84,9 @@
- u64 ret;
-
- do {
-- seq = read_seqbegin(&jiffies_lock);
-+ seq = read_seqcount_begin(&jiffies_seq);
- ret = jiffies_64;
-- } while (read_seqretry(&jiffies_lock, seq));
-+ } while (read_seqcount_retry(&jiffies_seq, seq));
- return ret;
- }
- EXPORT_SYMBOL(get_jiffies_64);
-diff -Nur linux-4.1.13.orig/kernel/time/ntp.c linux-4.1.13/kernel/time/ntp.c
---- linux-4.1.13.orig/kernel/time/ntp.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/time/ntp.c 2015-11-29 09:23:09.649611524 +0100
-@@ -10,6 +10,7 @@
- #include <linux/workqueue.h>
- #include <linux/hrtimer.h>
- #include <linux/jiffies.h>
-+#include <linux/kthread.h>
- #include <linux/math64.h>
- #include <linux/timex.h>
- #include <linux/time.h>
-@@ -529,10 +530,52 @@
- &sync_cmos_work, timespec_to_jiffies(&next));
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * RT can not call schedule_delayed_work from real interrupt context.
-+ * Need to make a thread to do the real work.
-+ */
-+static struct task_struct *cmos_delay_thread;
-+static bool do_cmos_delay;
-+
-+static int run_cmos_delay(void *ignore)
-+{
-+ while (!kthread_should_stop()) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (do_cmos_delay) {
-+ do_cmos_delay = false;
-+ queue_delayed_work(system_power_efficient_wq,
-+ &sync_cmos_work, 0);
-+ }
-+ schedule();
-+ }
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
-+}
-+
-+void ntp_notify_cmos_timer(void)
-+{
-+ do_cmos_delay = true;
-+ /* Make visible before waking up process */
-+ smp_wmb();
-+ wake_up_process(cmos_delay_thread);
-+}
-+
-+static __init int create_cmos_delay_thread(void)
-+{
-+ cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd");
-+ BUG_ON(!cmos_delay_thread);
-+ return 0;
-+}
-+early_initcall(create_cmos_delay_thread);
-+
-+#else
-+
- void ntp_notify_cmos_timer(void)
- {
- queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
- }
-+#endif /* CONFIG_PREEMPT_RT_FULL */
-
- #else
- void ntp_notify_cmos_timer(void) { }
-diff -Nur linux-4.1.13.orig/kernel/time/posix-cpu-timers.c linux-4.1.13/kernel/time/posix-cpu-timers.c
---- linux-4.1.13.orig/kernel/time/posix-cpu-timers.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/time/posix-cpu-timers.c 2015-11-29 09:23:09.649611524 +0100
-@@ -3,6 +3,7 @@
- */
-
- #include <linux/sched.h>
-+#include <linux/sched/rt.h>
- #include <linux/posix-timers.h>
- #include <linux/errno.h>
- #include <linux/math64.h>
-@@ -626,7 +627,7 @@
- /*
- * Disarm any old timer after extracting its expiry time.
- */
-- WARN_ON_ONCE(!irqs_disabled());
-+ WARN_ON_ONCE_NONRT(!irqs_disabled());
-
- ret = 0;
- old_incr = timer->it.cpu.incr;
-@@ -1047,7 +1048,7 @@
- /*
- * Now re-arm for the new expiry time.
- */
-- WARN_ON_ONCE(!irqs_disabled());
-+ WARN_ON_ONCE_NONRT(!irqs_disabled());
- arm_timer(timer);
- unlock_task_sighand(p, &flags);
-
-@@ -1113,10 +1114,11 @@
- sig = tsk->signal;
- if (sig->cputimer.running) {
- struct task_cputime group_sample;
-+ unsigned long flags;
-
-- raw_spin_lock(&sig->cputimer.lock);
-+ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
- group_sample = sig->cputimer.cputime;
-- raw_spin_unlock(&sig->cputimer.lock);
-+ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
-
- if (task_cputime_expired(&group_sample, &sig->cputime_expires))
- return 1;
-@@ -1130,13 +1132,13 @@
- * already updated our counts. We need to check if any timers fire now.
- * Interrupts are disabled.
- */
--void run_posix_cpu_timers(struct task_struct *tsk)
-+static void __run_posix_cpu_timers(struct task_struct *tsk)
- {
- LIST_HEAD(firing);
- struct k_itimer *timer, *next;
- unsigned long flags;
-
-- WARN_ON_ONCE(!irqs_disabled());
-+ WARN_ON_ONCE_NONRT(!irqs_disabled());
-
- /*
- * The fast path checks that there are no expired thread or thread
-@@ -1194,6 +1196,190 @@
- }
- }
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+#include <linux/kthread.h>
-+#include <linux/cpu.h>
-+DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
-+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
-+
-+static int posix_cpu_timers_thread(void *data)
-+{
-+ int cpu = (long)data;
-+
-+ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
-+
-+ while (!kthread_should_stop()) {
-+ struct task_struct *tsk = NULL;
-+ struct task_struct *next = NULL;
-+
-+ if (cpu_is_offline(cpu))
-+ goto wait_to_die;
-+
-+ /* grab task list */
-+ raw_local_irq_disable();
-+ tsk = per_cpu(posix_timer_tasklist, cpu);
-+ per_cpu(posix_timer_tasklist, cpu) = NULL;
-+ raw_local_irq_enable();
-+
-+ /* its possible the list is empty, just return */
-+ if (!tsk) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ schedule();
-+ __set_current_state(TASK_RUNNING);
-+ continue;
-+ }
-+
-+ /* Process task list */
-+ while (1) {
-+ /* save next */
-+ next = tsk->posix_timer_list;
-+
-+ /* run the task timers, clear its ptr and
-+ * unreference it
-+ */
-+ __run_posix_cpu_timers(tsk);
-+ tsk->posix_timer_list = NULL;
-+ put_task_struct(tsk);
-+
-+ /* check if this is the last on the list */
-+ if (next == tsk)
-+ break;
-+ tsk = next;
-+ }
-+ }
-+ return 0;
-+
-+wait_to_die:
-+ /* Wait for kthread_stop */
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ while (!kthread_should_stop()) {
-+ schedule();
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ }
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
-+}
-+
-+static inline int __fastpath_timer_check(struct task_struct *tsk)
-+{
-+ /* tsk == current, ensure it is safe to use ->signal/sighand */
-+ if (unlikely(tsk->exit_state))
-+ return 0;
-+
-+ if (!task_cputime_zero(&tsk->cputime_expires))
-+ return 1;
-+
-+ if (!task_cputime_zero(&tsk->signal->cputime_expires))
-+ return 1;
-+
-+ return 0;
-+}
-+
-+void run_posix_cpu_timers(struct task_struct *tsk)
-+{
-+ unsigned long cpu = smp_processor_id();
-+ struct task_struct *tasklist;
-+
-+ BUG_ON(!irqs_disabled());
-+ if(!per_cpu(posix_timer_task, cpu))
-+ return;
-+ /* get per-cpu references */
-+ tasklist = per_cpu(posix_timer_tasklist, cpu);
-+
-+ /* check to see if we're already queued */
-+ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
-+ get_task_struct(tsk);
-+ if (tasklist) {
-+ tsk->posix_timer_list = tasklist;
-+ } else {
-+ /*
-+ * The list is terminated by a self-pointing
-+ * task_struct
-+ */
-+ tsk->posix_timer_list = tsk;
-+ }
-+ per_cpu(posix_timer_tasklist, cpu) = tsk;
-+
-+ wake_up_process(per_cpu(posix_timer_task, cpu));
-+ }
-+}
-+
-+/*
-+ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
-+ * Here we can start up the necessary migration thread for the new CPU.
-+ */
-+static int posix_cpu_thread_call(struct notifier_block *nfb,
-+ unsigned long action, void *hcpu)
-+{
-+ int cpu = (long)hcpu;
-+ struct task_struct *p;
-+ struct sched_param param;
-+
-+ switch (action) {
-+ case CPU_UP_PREPARE:
-+ p = kthread_create(posix_cpu_timers_thread, hcpu,
-+ "posixcputmr/%d",cpu);
-+ if (IS_ERR(p))
-+ return NOTIFY_BAD;
-+ p->flags |= PF_NOFREEZE;
-+ kthread_bind(p, cpu);
-+ /* Must be high prio to avoid getting starved */
-+ param.sched_priority = MAX_RT_PRIO-1;
-+ sched_setscheduler(p, SCHED_FIFO, &param);
-+ per_cpu(posix_timer_task,cpu) = p;
-+ break;
-+ case CPU_ONLINE:
-+ /* Strictly unneccessary, as first user will wake it. */
-+ wake_up_process(per_cpu(posix_timer_task,cpu));
-+ break;
-+#ifdef CONFIG_HOTPLUG_CPU
-+ case CPU_UP_CANCELED:
-+ /* Unbind it from offline cpu so it can run. Fall thru. */
-+ kthread_bind(per_cpu(posix_timer_task, cpu),
-+ cpumask_any(cpu_online_mask));
-+ kthread_stop(per_cpu(posix_timer_task,cpu));
-+ per_cpu(posix_timer_task,cpu) = NULL;
-+ break;
-+ case CPU_DEAD:
-+ kthread_stop(per_cpu(posix_timer_task,cpu));
-+ per_cpu(posix_timer_task,cpu) = NULL;
-+ break;
-+#endif
-+ }
-+ return NOTIFY_OK;
-+}
-+
-+/* Register at highest priority so that task migration (migrate_all_tasks)
-+ * happens before everything else.
-+ */
-+static struct notifier_block posix_cpu_thread_notifier = {
-+ .notifier_call = posix_cpu_thread_call,
-+ .priority = 10
-+};
-+
-+static int __init posix_cpu_thread_init(void)
-+{
-+ void *hcpu = (void *)(long)smp_processor_id();
-+ /* Start one for boot CPU. */
-+ unsigned long cpu;
-+
-+ /* init the per-cpu posix_timer_tasklets */
-+ for_each_possible_cpu(cpu)
-+ per_cpu(posix_timer_tasklist, cpu) = NULL;
-+
-+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
-+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
-+ register_cpu_notifier(&posix_cpu_thread_notifier);
-+ return 0;
-+}
-+early_initcall(posix_cpu_thread_init);
-+#else /* CONFIG_PREEMPT_RT_BASE */
-+void run_posix_cpu_timers(struct task_struct *tsk)
-+{
-+ __run_posix_cpu_timers(tsk);
-+}
-+#endif /* CONFIG_PREEMPT_RT_BASE */
-+
- /*
- * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
- * The tsk->sighand->siglock must be held by the caller.
-diff -Nur linux-4.1.13.orig/kernel/time/posix-timers.c linux-4.1.13/kernel/time/posix-timers.c
---- linux-4.1.13.orig/kernel/time/posix-timers.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/time/posix-timers.c 2015-11-29 09:23:09.649611524 +0100
-@@ -499,6 +499,7 @@
- static struct pid *good_sigevent(sigevent_t * event)
- {
- struct task_struct *rtn = current->group_leader;
-+ int sig = event->sigev_signo;
-
- if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
- (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
-@@ -507,7 +508,8 @@
- return NULL;
-
- if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
-- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
-+ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
-+ sig_kernel_coredump(sig)))
- return NULL;
-
- return task_pid(rtn);
-@@ -819,6 +821,20 @@
- return overrun;
- }
-
-+/*
-+ * Protected by RCU!
-+ */
-+static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (kc->timer_set == common_timer_set)
-+ hrtimer_wait_for_timer(&timr->it.real.timer);
-+ else
-+ /* FIXME: Whacky hack for posix-cpu-timers */
-+ schedule_timeout(1);
-+#endif
-+}
-+
- /* Set a POSIX.1b interval timer. */
- /* timr->it_lock is taken. */
- static int
-@@ -896,6 +912,7 @@
- if (!timr)
- return -EINVAL;
-
-+ rcu_read_lock();
- kc = clockid_to_kclock(timr->it_clock);
- if (WARN_ON_ONCE(!kc || !kc->timer_set))
- error = -EINVAL;
-@@ -904,9 +921,12 @@
-
- unlock_timer(timr, flag);
- if (error == TIMER_RETRY) {
-+ timer_wait_for_callback(kc, timr);
- rtn = NULL; // We already got the old time...
-+ rcu_read_unlock();
- goto retry;
- }
-+ rcu_read_unlock();
-
- if (old_setting && !error &&
- copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
-@@ -944,10 +964,15 @@
- if (!timer)
- return -EINVAL;
-
-+ rcu_read_lock();
- if (timer_delete_hook(timer) == TIMER_RETRY) {
- unlock_timer(timer, flags);
-+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
-+ timer);
-+ rcu_read_unlock();
- goto retry_delete;
- }
-+ rcu_read_unlock();
-
- spin_lock(&current->sighand->siglock);
- list_del(&timer->list);
-@@ -973,8 +998,18 @@
- retry_delete:
- spin_lock_irqsave(&timer->it_lock, flags);
-
-+ /* On RT we can race with a deletion */
-+ if (!timer->it_signal) {
-+ unlock_timer(timer, flags);
-+ return;
-+ }
-+
- if (timer_delete_hook(timer) == TIMER_RETRY) {
-+ rcu_read_lock();
- unlock_timer(timer, flags);
-+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
-+ timer);
-+ rcu_read_unlock();
- goto retry_delete;
- }
- list_del(&timer->list);
-diff -Nur linux-4.1.13.orig/kernel/time/tick-common.c linux-4.1.13/kernel/time/tick-common.c
---- linux-4.1.13.orig/kernel/time/tick-common.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/time/tick-common.c 2015-11-29 09:23:09.649611524 +0100
-@@ -78,13 +78,15 @@
- static void tick_periodic(int cpu)
- {
- if (tick_do_timer_cpu == cpu) {
-- write_seqlock(&jiffies_lock);
-+ raw_spin_lock(&jiffies_lock);
-+ write_seqcount_begin(&jiffies_seq);
-
- /* Keep track of the next tick event */
- tick_next_period = ktime_add(tick_next_period, tick_period);
-
- do_timer(1);
-- write_sequnlock(&jiffies_lock);
-+ write_seqcount_end(&jiffies_seq);
-+ raw_spin_unlock(&jiffies_lock);
- update_wall_time();
- }
-
-@@ -146,9 +148,9 @@
- ktime_t next;
-
- do {
-- seq = read_seqbegin(&jiffies_lock);
-+ seq = read_seqcount_begin(&jiffies_seq);
- next = tick_next_period;
-- } while (read_seqretry(&jiffies_lock, seq));
-+ } while (read_seqcount_retry(&jiffies_seq, seq));
-
- clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
-
-diff -Nur linux-4.1.13.orig/kernel/time/tick-sched.c linux-4.1.13/kernel/time/tick-sched.c
---- linux-4.1.13.orig/kernel/time/tick-sched.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/time/tick-sched.c 2015-11-29 09:23:09.649611524 +0100
-@@ -62,7 +62,8 @@
- return;
-
- /* Reevalute with jiffies_lock held */
-- write_seqlock(&jiffies_lock);
-+ raw_spin_lock(&jiffies_lock);
-+ write_seqcount_begin(&jiffies_seq);
-
- delta = ktime_sub(now, last_jiffies_update);
- if (delta.tv64 >= tick_period.tv64) {
-@@ -85,10 +86,12 @@
- /* Keep the tick_next_period variable up to date */
- tick_next_period = ktime_add(last_jiffies_update, tick_period);
- } else {
-- write_sequnlock(&jiffies_lock);
-+ write_seqcount_end(&jiffies_seq);
-+ raw_spin_unlock(&jiffies_lock);
- return;
- }
-- write_sequnlock(&jiffies_lock);
-+ write_seqcount_end(&jiffies_seq);
-+ raw_spin_unlock(&jiffies_lock);
- update_wall_time();
- }
-
-@@ -99,12 +102,14 @@
- {
- ktime_t period;
-
-- write_seqlock(&jiffies_lock);
-+ raw_spin_lock(&jiffies_lock);
-+ write_seqcount_begin(&jiffies_seq);
- /* Did we start the jiffies update yet ? */
- if (last_jiffies_update.tv64 == 0)
- last_jiffies_update = tick_next_period;
- period = last_jiffies_update;
-- write_sequnlock(&jiffies_lock);
-+ write_seqcount_end(&jiffies_seq);
-+ raw_spin_unlock(&jiffies_lock);
- return period;
- }
-
-@@ -176,6 +181,11 @@
- return false;
- }
-
-+ if (!arch_irq_work_has_interrupt()) {
-+ trace_tick_stop(0, "missing irq work interrupt\n");
-+ return false;
-+ }
-+
- /* sched_clock_tick() needs us? */
- #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
- /*
-@@ -222,6 +232,7 @@
-
- static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
- .func = nohz_full_kick_work_func,
-+ .flags = IRQ_WORK_HARD_IRQ,
- };
-
- /*
-@@ -578,10 +589,10 @@
-
- /* Read jiffies and the time when jiffies were updated last */
- do {
-- seq = read_seqbegin(&jiffies_lock);
-+ seq = read_seqcount_begin(&jiffies_seq);
- last_update = last_jiffies_update;
- last_jiffies = jiffies;
-- } while (read_seqretry(&jiffies_lock, seq));
-+ } while (read_seqcount_retry(&jiffies_seq, seq));
-
- if (rcu_needs_cpu(&rcu_delta_jiffies) ||
- arch_needs_cpu() || irq_work_needs_cpu()) {
-@@ -759,14 +770,7 @@
- return false;
-
- if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
-- static int ratelimit;
--
-- if (ratelimit < 10 &&
-- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
-- pr_warn("NOHZ: local_softirq_pending %02x\n",
-- (unsigned int) local_softirq_pending());
-- ratelimit++;
-- }
-+ softirq_check_pending_idle();
- return false;
- }
-
-@@ -1154,6 +1158,7 @@
- * Emulate tick processing via per-CPU hrtimers:
- */
- hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-+ ts->sched_timer.irqsafe = 1;
- ts->sched_timer.function = tick_sched_timer;
-
- /* Get the next period (per cpu) */
-diff -Nur linux-4.1.13.orig/kernel/time/timekeeping.c linux-4.1.13/kernel/time/timekeeping.c
---- linux-4.1.13.orig/kernel/time/timekeeping.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/time/timekeeping.c 2015-11-29 09:23:09.649611524 +0100
-@@ -2065,8 +2065,10 @@
- */
- void xtime_update(unsigned long ticks)
- {
-- write_seqlock(&jiffies_lock);
-+ raw_spin_lock(&jiffies_lock);
-+ write_seqcount_begin(&jiffies_seq);
- do_timer(ticks);
-- write_sequnlock(&jiffies_lock);
-+ write_seqcount_end(&jiffies_seq);
-+ raw_spin_unlock(&jiffies_lock);
- update_wall_time();
- }
-diff -Nur linux-4.1.13.orig/kernel/time/timekeeping.h linux-4.1.13/kernel/time/timekeeping.h
---- linux-4.1.13.orig/kernel/time/timekeeping.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/time/timekeeping.h 2015-11-29 09:23:09.653611260 +0100
-@@ -22,7 +22,8 @@
- extern void do_timer(unsigned long ticks);
- extern void update_wall_time(void);
-
--extern seqlock_t jiffies_lock;
-+extern raw_spinlock_t jiffies_lock;
-+extern seqcount_t jiffies_seq;
-
- #define CS_NAME_LEN 32
-
-diff -Nur linux-4.1.13.orig/kernel/time/timer.c linux-4.1.13/kernel/time/timer.c
---- linux-4.1.13.orig/kernel/time/timer.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/time/timer.c 2015-11-29 09:23:09.653611260 +0100
-@@ -78,6 +78,9 @@
- struct tvec_base {
- spinlock_t lock;
- struct timer_list *running_timer;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ wait_queue_head_t wait_for_running_timer;
-+#endif
- unsigned long timer_jiffies;
- unsigned long next_timer;
- unsigned long active_timers;
-@@ -768,6 +771,36 @@
- }
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
-+ struct tvec_base *old,
-+ struct tvec_base *new)
-+{
-+ /* See the comment in lock_timer_base() */
-+ timer_set_base(timer, NULL);
-+ spin_unlock(&old->lock);
-+ spin_lock(&new->lock);
-+ timer_set_base(timer, new);
-+ return new;
-+}
-+#else
-+static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
-+ struct tvec_base *old,
-+ struct tvec_base *new)
-+{
-+ /*
-+ * We cannot do the above because we might be preempted and
-+ * then the preempter would see NULL and loop forever.
-+ */
-+ if (spin_trylock(&new->lock)) {
-+ timer_set_base(timer, new);
-+ spin_unlock(&old->lock);
-+ return new;
-+ }
-+ return old;
-+}
-+#endif
-+
- static inline int
- __mod_timer(struct timer_list *timer, unsigned long expires,
- bool pending_only, int pinned)
-@@ -798,14 +831,8 @@
- * handler yet has not finished. This also guarantees that
- * the timer is serialized wrt itself.
- */
-- if (likely(base->running_timer != timer)) {
-- /* See the comment in lock_timer_base() */
-- timer_set_base(timer, NULL);
-- spin_unlock(&base->lock);
-- base = new_base;
-- spin_lock(&base->lock);
-- timer_set_base(timer, base);
-- }
-+ if (likely(base->running_timer != timer))
-+ base = switch_timer_base(timer, base, new_base);
- }
-
- timer->expires = expires;
-@@ -979,6 +1006,29 @@
- }
- EXPORT_SYMBOL_GPL(add_timer_on);
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * Wait for a running timer
-+ */
-+static void wait_for_running_timer(struct timer_list *timer)
-+{
-+ struct tvec_base *base = timer->base;
-+
-+ if (base->running_timer == timer)
-+ wait_event(base->wait_for_running_timer,
-+ base->running_timer != timer);
-+}
-+
-+# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer)
-+#else
-+static inline void wait_for_running_timer(struct timer_list *timer)
-+{
-+ cpu_relax();
-+}
-+
-+# define wakeup_timer_waiters(b) do { } while (0)
-+#endif
-+
- /**
- * del_timer - deactive a timer.
- * @timer: the timer to be deactivated
-@@ -1036,7 +1086,7 @@
- }
- EXPORT_SYMBOL(try_to_del_timer_sync);
-
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- static DEFINE_PER_CPU(struct tvec_base, __tvec_bases);
-
- /**
-@@ -1098,7 +1148,7 @@
- int ret = try_to_del_timer_sync(timer);
- if (ret >= 0)
- return ret;
-- cpu_relax();
-+ wait_for_running_timer(timer);
- }
- }
- EXPORT_SYMBOL(del_timer_sync);
-@@ -1219,15 +1269,17 @@
- if (irqsafe) {
- spin_unlock(&base->lock);
- call_timer_fn(timer, fn, data);
-+ base->running_timer = NULL;
- spin_lock(&base->lock);
- } else {
- spin_unlock_irq(&base->lock);
- call_timer_fn(timer, fn, data);
-+ base->running_timer = NULL;
- spin_lock_irq(&base->lock);
- }
- }
- }
-- base->running_timer = NULL;
-+ wakeup_timer_waiters(base);
- spin_unlock_irq(&base->lock);
- }
-
-@@ -1367,6 +1419,14 @@
- if (cpu_is_offline(smp_processor_id()))
- return expires;
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ /*
-+ * On PREEMPT_RT we cannot sleep here. As a result we can't take
-+ * the base lock to check when the next timer is pending and so
-+ * we assume the next jiffy.
-+ */
-+ return now + 1;
-+#endif
- spin_lock(&base->lock);
- if (base->active_timers) {
- if (time_before_eq(base->next_timer, base->timer_jiffies))
-@@ -1392,13 +1452,13 @@
-
- /* Note: this timer irq context must be accounted for as well. */
- account_process_tick(p, user_tick);
-+ scheduler_tick();
- run_local_timers();
- rcu_check_callbacks(user_tick);
--#ifdef CONFIG_IRQ_WORK
-+#if defined(CONFIG_IRQ_WORK)
- if (in_irq())
- irq_work_tick();
- #endif
-- scheduler_tick();
- run_posix_cpu_timers(p);
- }
-
-@@ -1411,6 +1471,8 @@
-
- hrtimer_run_pending();
-
-+ irq_work_tick_soft();
-+
- if (time_after_eq(jiffies, base->timer_jiffies))
- __run_timers(base);
- }
-@@ -1566,7 +1628,7 @@
-
- BUG_ON(cpu_online(cpu));
- old_base = per_cpu(tvec_bases, cpu);
-- new_base = get_cpu_var(tvec_bases);
-+ new_base = get_local_var(tvec_bases);
- /*
- * The caller is globally serialized and nobody else
- * takes two locks at once, deadlock is not possible.
-@@ -1590,7 +1652,7 @@
-
- spin_unlock(&old_base->lock);
- spin_unlock_irq(&new_base->lock);
-- put_cpu_var(tvec_bases);
-+ put_local_var(tvec_bases);
- }
-
- static int timer_cpu_notify(struct notifier_block *self,
-@@ -1625,6 +1687,9 @@
- base->cpu = cpu;
- per_cpu(tvec_bases, cpu) = base;
- spin_lock_init(&base->lock);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ init_waitqueue_head(&base->wait_for_running_timer);
-+#endif
-
- for (j = 0; j < TVN_SIZE; j++) {
- INIT_LIST_HEAD(base->tv5.vec + j);
-diff -Nur linux-4.1.13.orig/kernel/trace/Kconfig linux-4.1.13/kernel/trace/Kconfig
---- linux-4.1.13.orig/kernel/trace/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/trace/Kconfig 2015-11-29 09:23:09.653611260 +0100
-@@ -187,6 +187,24 @@
- enabled. This option and the preempt-off timing option can be
- used together or separately.)
-
-+config INTERRUPT_OFF_HIST
-+ bool "Interrupts-off Latency Histogram"
-+ depends on IRQSOFF_TRACER
-+ help
-+ This option generates continuously updated histograms (one per cpu)
-+ of the duration of time periods with interrupts disabled. The
-+ histograms are disabled by default. To enable them, write a non-zero
-+ number to
-+
-+ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
-+
-+ If PREEMPT_OFF_HIST is also selected, additional histograms (one
-+ per cpu) are generated that accumulate the duration of time periods
-+ when both interrupts and preemption are disabled. The histogram data
-+ will be located in the debug file system at
-+
-+ /sys/kernel/debug/tracing/latency_hist/irqsoff
-+
- config PREEMPT_TRACER
- bool "Preemption-off Latency Tracer"
- default n
-@@ -211,6 +229,24 @@
- enabled. This option and the irqs-off timing option can be
- used together or separately.)
-
-+config PREEMPT_OFF_HIST
-+ bool "Preemption-off Latency Histogram"
-+ depends on PREEMPT_TRACER
-+ help
-+ This option generates continuously updated histograms (one per cpu)
-+ of the duration of time periods with preemption disabled. The
-+ histograms are disabled by default. To enable them, write a non-zero
-+ number to
-+
-+ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
-+
-+ If INTERRUPT_OFF_HIST is also selected, additional histograms (one
-+ per cpu) are generated that accumulate the duration of time periods
-+ when both interrupts and preemption are disabled. The histogram data
-+ will be located in the debug file system at
-+
-+ /sys/kernel/debug/tracing/latency_hist/preemptoff
-+
- config SCHED_TRACER
- bool "Scheduling Latency Tracer"
- select GENERIC_TRACER
-@@ -221,6 +257,74 @@
- This tracer tracks the latency of the highest priority task
- to be scheduled in, starting from the point it has woken up.
-
-+config WAKEUP_LATENCY_HIST
-+ bool "Scheduling Latency Histogram"
-+ depends on SCHED_TRACER
-+ help
-+ This option generates continuously updated histograms (one per cpu)
-+ of the scheduling latency of the highest priority task.
-+ The histograms are disabled by default. To enable them, write a
-+ non-zero number to
-+
-+ /sys/kernel/debug/tracing/latency_hist/enable/wakeup
-+
-+ Two different algorithms are used, one to determine the latency of
-+ processes that exclusively use the highest priority of the system and
-+ another one to determine the latency of processes that share the
-+ highest system priority with other processes. The former is used to
-+ improve hardware and system software, the latter to optimize the
-+ priority design of a given system. The histogram data will be
-+ located in the debug file system at
-+
-+ /sys/kernel/debug/tracing/latency_hist/wakeup
-+
-+ and
-+
-+ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
-+
-+ If both Scheduling Latency Histogram and Missed Timer Offsets
-+ Histogram are selected, additional histogram data will be collected
-+ that contain, in addition to the wakeup latency, the timer latency, in
-+ case the wakeup was triggered by an expired timer. These histograms
-+ are available in the
-+
-+ /sys/kernel/debug/tracing/latency_hist/timerandwakeup
-+
-+ directory. They reflect the apparent interrupt and scheduling latency
-+ and are best suitable to determine the worst-case latency of a given
-+ system. To enable these histograms, write a non-zero number to
-+
-+ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
-+
-+config MISSED_TIMER_OFFSETS_HIST
-+ depends on HIGH_RES_TIMERS
-+ select GENERIC_TRACER
-+ bool "Missed Timer Offsets Histogram"
-+ help
-+ Generate a histogram of missed timer offsets in microseconds. The
-+ histograms are disabled by default. To enable them, write a non-zero
-+ number to
-+
-+ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
-+
-+ The histogram data will be located in the debug file system at
-+
-+ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
-+
-+ If both Scheduling Latency Histogram and Missed Timer Offsets
-+ Histogram are selected, additional histogram data will be collected
-+ that contain, in addition to the wakeup latency, the timer latency, in
-+ case the wakeup was triggered by an expired timer. These histograms
-+ are available in the
-+
-+ /sys/kernel/debug/tracing/latency_hist/timerandwakeup
-+
-+ directory. They reflect the apparent interrupt and scheduling latency
-+ and are best suitable to determine the worst-case latency of a given
-+ system. To enable these histograms, write a non-zero number to
-+
-+ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
-+
- config ENABLE_DEFAULT_TRACERS
- bool "Trace process context switches and events"
- depends on !GENERIC_TRACER
-diff -Nur linux-4.1.13.orig/kernel/trace/latency_hist.c linux-4.1.13/kernel/trace/latency_hist.c
---- linux-4.1.13.orig/kernel/trace/latency_hist.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.13/kernel/trace/latency_hist.c 2015-11-29 09:23:09.653611260 +0100
-@@ -0,0 +1,1178 @@
-+/*
-+ * kernel/trace/latency_hist.c
-+ *
-+ * Add support for histograms of preemption-off latency and
-+ * interrupt-off latency and wakeup latency, it depends on
-+ * Real-Time Preemption Support.
-+ *
-+ * Copyright (C) 2005 MontaVista Software, Inc.
-+ * Yi Yang <yyang@ch.mvista.com>
-+ *
-+ * Converted to work with the new latency tracer.
-+ * Copyright (C) 2008 Red Hat, Inc.
-+ * Steven Rostedt <srostedt@redhat.com>
-+ *
-+ */
-+#include <linux/module.h>
-+#include <linux/debugfs.h>
-+#include <linux/seq_file.h>
-+#include <linux/percpu.h>
-+#include <linux/kallsyms.h>
-+#include <linux/uaccess.h>
-+#include <linux/sched.h>
-+#include <linux/sched/rt.h>
-+#include <linux/slab.h>
-+#include <linux/atomic.h>
-+#include <asm/div64.h>
-+
-+#include "trace.h"
-+#include <trace/events/sched.h>
-+
-+#define NSECS_PER_USECS 1000L
-+
-+#define CREATE_TRACE_POINTS
-+#include <trace/events/hist.h>
-+
-+enum {
-+ IRQSOFF_LATENCY = 0,
-+ PREEMPTOFF_LATENCY,
-+ PREEMPTIRQSOFF_LATENCY,
-+ WAKEUP_LATENCY,
-+ WAKEUP_LATENCY_SHAREDPRIO,
-+ MISSED_TIMER_OFFSETS,
-+ TIMERANDWAKEUP_LATENCY,
-+ MAX_LATENCY_TYPE,
-+};
-+
-+#define MAX_ENTRY_NUM 10240
-+
-+struct hist_data {
-+ atomic_t hist_mode; /* 0 log, 1 don't log */
-+ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
-+ long min_lat;
-+ long max_lat;
-+ unsigned long long below_hist_bound_samples;
-+ unsigned long long above_hist_bound_samples;
-+ long long accumulate_lat;
-+ unsigned long long total_samples;
-+ unsigned long long hist_array[MAX_ENTRY_NUM];
-+};
-+
-+struct enable_data {
-+ int latency_type;
-+ int enabled;
-+};
-+
-+static char *latency_hist_dir_root = "latency_hist";
-+
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
-+static char *irqsoff_hist_dir = "irqsoff";
-+static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
-+static DEFINE_PER_CPU(int, hist_irqsoff_counting);
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
-+static char *preemptoff_hist_dir = "preemptoff";
-+static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
-+static DEFINE_PER_CPU(int, hist_preemptoff_counting);
-+#endif
-+
-+#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
-+static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
-+static char *preemptirqsoff_hist_dir = "preemptirqsoff";
-+static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
-+static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
-+#endif
-+
-+#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
-+static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
-+static struct enable_data preemptirqsoff_enabled_data = {
-+ .latency_type = PREEMPTIRQSOFF_LATENCY,
-+ .enabled = 0,
-+};
-+#endif
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+struct maxlatproc_data {
-+ char comm[FIELD_SIZEOF(struct task_struct, comm)];
-+ char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
-+ int pid;
-+ int current_pid;
-+ int prio;
-+ int current_prio;
-+ long latency;
-+ long timeroffset;
-+ cycle_t timestamp;
-+};
-+#endif
-+
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
-+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
-+static char *wakeup_latency_hist_dir = "wakeup";
-+static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
-+static notrace void probe_wakeup_latency_hist_start(void *v,
-+ struct task_struct *p);
-+static notrace void probe_wakeup_latency_hist_stop(void *v,
-+ struct task_struct *prev, struct task_struct *next);
-+static notrace void probe_sched_migrate_task(void *,
-+ struct task_struct *task, int cpu);
-+static struct enable_data wakeup_latency_enabled_data = {
-+ .latency_type = WAKEUP_LATENCY,
-+ .enabled = 0,
-+};
-+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
-+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
-+static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
-+static DEFINE_PER_CPU(int, wakeup_sharedprio);
-+static unsigned long wakeup_pid;
-+#endif
-+
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
-+static char *missed_timer_offsets_dir = "missed_timer_offsets";
-+static notrace void probe_hrtimer_interrupt(void *v, int cpu,
-+ long long offset, struct task_struct *curr, struct task_struct *task);
-+static struct enable_data missed_timer_offsets_enabled_data = {
-+ .latency_type = MISSED_TIMER_OFFSETS,
-+ .enabled = 0,
-+};
-+static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
-+static unsigned long missed_timer_offsets_pid;
-+#endif
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
-+static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
-+static struct enable_data timerandwakeup_enabled_data = {
-+ .latency_type = TIMERANDWAKEUP_LATENCY,
-+ .enabled = 0,
-+};
-+static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
-+#endif
-+
-+void notrace latency_hist(int latency_type, int cpu, long latency,
-+ long timeroffset, cycle_t stop,
-+ struct task_struct *p)
-+{
-+ struct hist_data *my_hist;
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+ struct maxlatproc_data *mp = NULL;
-+#endif
-+
-+ if (!cpu_possible(cpu) || latency_type < 0 ||
-+ latency_type >= MAX_LATENCY_TYPE)
-+ return;
-+
-+ switch (latency_type) {
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+ case IRQSOFF_LATENCY:
-+ my_hist = &per_cpu(irqsoff_hist, cpu);
-+ break;
-+#endif
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+ case PREEMPTOFF_LATENCY:
-+ my_hist = &per_cpu(preemptoff_hist, cpu);
-+ break;
-+#endif
-+#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
-+ case PREEMPTIRQSOFF_LATENCY:
-+ my_hist = &per_cpu(preemptirqsoff_hist, cpu);
-+ break;
-+#endif
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+ case WAKEUP_LATENCY:
-+ my_hist = &per_cpu(wakeup_latency_hist, cpu);
-+ mp = &per_cpu(wakeup_maxlatproc, cpu);
-+ break;
-+ case WAKEUP_LATENCY_SHAREDPRIO:
-+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
-+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
-+ break;
-+#endif
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ case MISSED_TIMER_OFFSETS:
-+ my_hist = &per_cpu(missed_timer_offsets, cpu);
-+ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
-+ break;
-+#endif
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+ case TIMERANDWAKEUP_LATENCY:
-+ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
-+ mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
-+ break;
-+#endif
-+
-+ default:
-+ return;
-+ }
-+
-+ latency += my_hist->offset;
-+
-+ if (atomic_read(&my_hist->hist_mode) == 0)
-+ return;
-+
-+ if (latency < 0 || latency >= MAX_ENTRY_NUM) {
-+ if (latency < 0)
-+ my_hist->below_hist_bound_samples++;
-+ else
-+ my_hist->above_hist_bound_samples++;
-+ } else
-+ my_hist->hist_array[latency]++;
-+
-+ if (unlikely(latency > my_hist->max_lat ||
-+ my_hist->min_lat == LONG_MAX)) {
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+ if (latency_type == WAKEUP_LATENCY ||
-+ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
-+ latency_type == MISSED_TIMER_OFFSETS ||
-+ latency_type == TIMERANDWAKEUP_LATENCY) {
-+ strncpy(mp->comm, p->comm, sizeof(mp->comm));
-+ strncpy(mp->current_comm, current->comm,
-+ sizeof(mp->current_comm));
-+ mp->pid = task_pid_nr(p);
-+ mp->current_pid = task_pid_nr(current);
-+ mp->prio = p->prio;
-+ mp->current_prio = current->prio;
-+ mp->latency = latency;
-+ mp->timeroffset = timeroffset;
-+ mp->timestamp = stop;
-+ }
-+#endif
-+ my_hist->max_lat = latency;
-+ }
-+ if (unlikely(latency < my_hist->min_lat))
-+ my_hist->min_lat = latency;
-+ my_hist->total_samples++;
-+ my_hist->accumulate_lat += latency;
-+}
-+
-+static void *l_start(struct seq_file *m, loff_t *pos)
-+{
-+ loff_t *index_ptr = NULL;
-+ loff_t index = *pos;
-+ struct hist_data *my_hist = m->private;
-+
-+ if (index == 0) {
-+ char minstr[32], avgstr[32], maxstr[32];
-+
-+ atomic_dec(&my_hist->hist_mode);
-+
-+ if (likely(my_hist->total_samples)) {
-+ long avg = (long) div64_s64(my_hist->accumulate_lat,
-+ my_hist->total_samples);
-+ snprintf(minstr, sizeof(minstr), "%ld",
-+ my_hist->min_lat - my_hist->offset);
-+ snprintf(avgstr, sizeof(avgstr), "%ld",
-+ avg - my_hist->offset);
-+ snprintf(maxstr, sizeof(maxstr), "%ld",
-+ my_hist->max_lat - my_hist->offset);
-+ } else {
-+ strcpy(minstr, "<undef>");
-+ strcpy(avgstr, minstr);
-+ strcpy(maxstr, minstr);
-+ }
-+
-+ seq_printf(m, "#Minimum latency: %s microseconds\n"
-+ "#Average latency: %s microseconds\n"
-+ "#Maximum latency: %s microseconds\n"
-+ "#Total samples: %llu\n"
-+ "#There are %llu samples lower than %ld"
-+ " microseconds.\n"
-+ "#There are %llu samples greater or equal"
-+ " than %ld microseconds.\n"
-+ "#usecs\t%16s\n",
-+ minstr, avgstr, maxstr,
-+ my_hist->total_samples,
-+ my_hist->below_hist_bound_samples,
-+ -my_hist->offset,
-+ my_hist->above_hist_bound_samples,
-+ MAX_ENTRY_NUM - my_hist->offset,
-+ "samples");
-+ }
-+ if (index < MAX_ENTRY_NUM) {
-+ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
-+ if (index_ptr)
-+ *index_ptr = index;
-+ }
-+
-+ return index_ptr;
-+}
-+
-+static void *l_next(struct seq_file *m, void *p, loff_t *pos)
-+{
-+ loff_t *index_ptr = p;
-+ struct hist_data *my_hist = m->private;
-+
-+ if (++*pos >= MAX_ENTRY_NUM) {
-+ atomic_inc(&my_hist->hist_mode);
-+ return NULL;
-+ }
-+ *index_ptr = *pos;
-+ return index_ptr;
-+}
-+
-+static void l_stop(struct seq_file *m, void *p)
-+{
-+ kfree(p);
-+}
-+
-+static int l_show(struct seq_file *m, void *p)
-+{
-+ int index = *(loff_t *) p;
-+ struct hist_data *my_hist = m->private;
-+
-+ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
-+ my_hist->hist_array[index]);
-+ return 0;
-+}
-+
-+static const struct seq_operations latency_hist_seq_op = {
-+ .start = l_start,
-+ .next = l_next,
-+ .stop = l_stop,
-+ .show = l_show
-+};
-+
-+static int latency_hist_open(struct inode *inode, struct file *file)
-+{
-+ int ret;
-+
-+ ret = seq_open(file, &latency_hist_seq_op);
-+ if (!ret) {
-+ struct seq_file *seq = file->private_data;
-+ seq->private = inode->i_private;
-+ }
-+ return ret;
-+}
-+
-+static const struct file_operations latency_hist_fops = {
-+ .open = latency_hist_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release,
-+};
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+static void clear_maxlatprocdata(struct maxlatproc_data *mp)
-+{
-+ mp->comm[0] = mp->current_comm[0] = '\0';
-+ mp->prio = mp->current_prio = mp->pid = mp->current_pid =
-+ mp->latency = mp->timeroffset = -1;
-+ mp->timestamp = 0;
-+}
-+#endif
-+
-+static void hist_reset(struct hist_data *hist)
-+{
-+ atomic_dec(&hist->hist_mode);
-+
-+ memset(hist->hist_array, 0, sizeof(hist->hist_array));
-+ hist->below_hist_bound_samples = 0ULL;
-+ hist->above_hist_bound_samples = 0ULL;
-+ hist->min_lat = LONG_MAX;
-+ hist->max_lat = LONG_MIN;
-+ hist->total_samples = 0ULL;
-+ hist->accumulate_lat = 0LL;
-+
-+ atomic_inc(&hist->hist_mode);
-+}
-+
-+static ssize_t
-+latency_hist_reset(struct file *file, const char __user *a,
-+ size_t size, loff_t *off)
-+{
-+ int cpu;
-+ struct hist_data *hist = NULL;
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+ struct maxlatproc_data *mp = NULL;
-+#endif
-+ off_t latency_type = (off_t) file->private_data;
-+
-+ for_each_online_cpu(cpu) {
-+
-+ switch (latency_type) {
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+ case PREEMPTOFF_LATENCY:
-+ hist = &per_cpu(preemptoff_hist, cpu);
-+ break;
-+#endif
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+ case IRQSOFF_LATENCY:
-+ hist = &per_cpu(irqsoff_hist, cpu);
-+ break;
-+#endif
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
-+ case PREEMPTIRQSOFF_LATENCY:
-+ hist = &per_cpu(preemptirqsoff_hist, cpu);
-+ break;
-+#endif
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+ case WAKEUP_LATENCY:
-+ hist = &per_cpu(wakeup_latency_hist, cpu);
-+ mp = &per_cpu(wakeup_maxlatproc, cpu);
-+ break;
-+ case WAKEUP_LATENCY_SHAREDPRIO:
-+ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
-+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
-+ break;
-+#endif
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ case MISSED_TIMER_OFFSETS:
-+ hist = &per_cpu(missed_timer_offsets, cpu);
-+ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
-+ break;
-+#endif
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+ case TIMERANDWAKEUP_LATENCY:
-+ hist = &per_cpu(timerandwakeup_latency_hist, cpu);
-+ mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
-+ break;
-+#endif
-+ }
-+
-+ hist_reset(hist);
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+ if (latency_type == WAKEUP_LATENCY ||
-+ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
-+ latency_type == MISSED_TIMER_OFFSETS ||
-+ latency_type == TIMERANDWAKEUP_LATENCY)
-+ clear_maxlatprocdata(mp);
-+#endif
-+ }
-+
-+ return size;
-+}
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+static ssize_t
-+show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
-+{
-+ char buf[64];
-+ int r;
-+ unsigned long *this_pid = file->private_data;
-+
-+ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
-+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-+}
-+
-+static ssize_t do_pid(struct file *file, const char __user *ubuf,
-+ size_t cnt, loff_t *ppos)
-+{
-+ char buf[64];
-+ unsigned long pid;
-+ unsigned long *this_pid = file->private_data;
-+
-+ if (cnt >= sizeof(buf))
-+ return -EINVAL;
-+
-+ if (copy_from_user(&buf, ubuf, cnt))
-+ return -EFAULT;
-+
-+ buf[cnt] = '\0';
-+
-+ if (kstrtoul(buf, 10, &pid))
-+ return -EINVAL;
-+
-+ *this_pid = pid;
-+
-+ return cnt;
-+}
-+#endif
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+static ssize_t
-+show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
-+{
-+ int r;
-+ struct maxlatproc_data *mp = file->private_data;
-+ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
-+ unsigned long long t;
-+ unsigned long usecs, secs;
-+ char *buf;
-+
-+ if (mp->pid == -1 || mp->current_pid == -1) {
-+ buf = "(none)\n";
-+ return simple_read_from_buffer(ubuf, cnt, ppos, buf,
-+ strlen(buf));
-+ }
-+
-+ buf = kmalloc(strmaxlen, GFP_KERNEL);
-+ if (buf == NULL)
-+ return -ENOMEM;
-+
-+ t = ns2usecs(mp->timestamp);
-+ usecs = do_div(t, USEC_PER_SEC);
-+ secs = (unsigned long) t;
-+ r = snprintf(buf, strmaxlen,
-+ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
-+ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
-+ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
-+ secs, usecs);
-+ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-+ kfree(buf);
-+ return r;
-+}
-+#endif
-+
-+static ssize_t
-+show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
-+{
-+ char buf[64];
-+ struct enable_data *ed = file->private_data;
-+ int r;
-+
-+ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
-+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-+}
-+
-+static ssize_t
-+do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
-+{
-+ char buf[64];
-+ long enable;
-+ struct enable_data *ed = file->private_data;
-+
-+ if (cnt >= sizeof(buf))
-+ return -EINVAL;
-+
-+ if (copy_from_user(&buf, ubuf, cnt))
-+ return -EFAULT;
-+
-+ buf[cnt] = 0;
-+
-+ if (kstrtoul(buf, 10, &enable))
-+ return -EINVAL;
-+
-+ if ((enable && ed->enabled) || (!enable && !ed->enabled))
-+ return cnt;
-+
-+ if (enable) {
-+ int ret;
-+
-+ switch (ed->latency_type) {
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
-+ case PREEMPTIRQSOFF_LATENCY:
-+ ret = register_trace_preemptirqsoff_hist(
-+ probe_preemptirqsoff_hist, NULL);
-+ if (ret) {
-+ pr_info("wakeup trace: Couldn't assign "
-+ "probe_preemptirqsoff_hist "
-+ "to trace_preemptirqsoff_hist\n");
-+ return ret;
-+ }
-+ break;
-+#endif
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+ case WAKEUP_LATENCY:
-+ ret = register_trace_sched_wakeup(
-+ probe_wakeup_latency_hist_start, NULL);
-+ if (ret) {
-+ pr_info("wakeup trace: Couldn't assign "
-+ "probe_wakeup_latency_hist_start "
-+ "to trace_sched_wakeup\n");
-+ return ret;
-+ }
-+ ret = register_trace_sched_wakeup_new(
-+ probe_wakeup_latency_hist_start, NULL);
-+ if (ret) {
-+ pr_info("wakeup trace: Couldn't assign "
-+ "probe_wakeup_latency_hist_start "
-+ "to trace_sched_wakeup_new\n");
-+ unregister_trace_sched_wakeup(
-+ probe_wakeup_latency_hist_start, NULL);
-+ return ret;
-+ }
-+ ret = register_trace_sched_switch(
-+ probe_wakeup_latency_hist_stop, NULL);
-+ if (ret) {
-+ pr_info("wakeup trace: Couldn't assign "
-+ "probe_wakeup_latency_hist_stop "
-+ "to trace_sched_switch\n");
-+ unregister_trace_sched_wakeup(
-+ probe_wakeup_latency_hist_start, NULL);
-+ unregister_trace_sched_wakeup_new(
-+ probe_wakeup_latency_hist_start, NULL);
-+ return ret;
-+ }
-+ ret = register_trace_sched_migrate_task(
-+ probe_sched_migrate_task, NULL);
-+ if (ret) {
-+ pr_info("wakeup trace: Couldn't assign "
-+ "probe_sched_migrate_task "
-+ "to trace_sched_migrate_task\n");
-+ unregister_trace_sched_wakeup(
-+ probe_wakeup_latency_hist_start, NULL);
-+ unregister_trace_sched_wakeup_new(
-+ probe_wakeup_latency_hist_start, NULL);
-+ unregister_trace_sched_switch(
-+ probe_wakeup_latency_hist_stop, NULL);
-+ return ret;
-+ }
-+ break;
-+#endif
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ case MISSED_TIMER_OFFSETS:
-+ ret = register_trace_hrtimer_interrupt(
-+ probe_hrtimer_interrupt, NULL);
-+ if (ret) {
-+ pr_info("wakeup trace: Couldn't assign "
-+ "probe_hrtimer_interrupt "
-+ "to trace_hrtimer_interrupt\n");
-+ return ret;
-+ }
-+ break;
-+#endif
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+ case TIMERANDWAKEUP_LATENCY:
-+ if (!wakeup_latency_enabled_data.enabled ||
-+ !missed_timer_offsets_enabled_data.enabled)
-+ return -EINVAL;
-+ break;
-+#endif
-+ default:
-+ break;
-+ }
-+ } else {
-+ switch (ed->latency_type) {
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
-+ case PREEMPTIRQSOFF_LATENCY:
-+ {
-+ int cpu;
-+
-+ unregister_trace_preemptirqsoff_hist(
-+ probe_preemptirqsoff_hist, NULL);
-+ for_each_online_cpu(cpu) {
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+ per_cpu(hist_irqsoff_counting,
-+ cpu) = 0;
-+#endif
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+ per_cpu(hist_preemptoff_counting,
-+ cpu) = 0;
-+#endif
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
-+ per_cpu(hist_preemptirqsoff_counting,
-+ cpu) = 0;
-+#endif
-+ }
-+ }
-+ break;
-+#endif
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+ case WAKEUP_LATENCY:
-+ {
-+ int cpu;
-+
-+ unregister_trace_sched_wakeup(
-+ probe_wakeup_latency_hist_start, NULL);
-+ unregister_trace_sched_wakeup_new(
-+ probe_wakeup_latency_hist_start, NULL);
-+ unregister_trace_sched_switch(
-+ probe_wakeup_latency_hist_stop, NULL);
-+ unregister_trace_sched_migrate_task(
-+ probe_sched_migrate_task, NULL);
-+
-+ for_each_online_cpu(cpu) {
-+ per_cpu(wakeup_task, cpu) = NULL;
-+ per_cpu(wakeup_sharedprio, cpu) = 0;
-+ }
-+ }
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ timerandwakeup_enabled_data.enabled = 0;
-+#endif
-+ break;
-+#endif
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ case MISSED_TIMER_OFFSETS:
-+ unregister_trace_hrtimer_interrupt(
-+ probe_hrtimer_interrupt, NULL);
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+ timerandwakeup_enabled_data.enabled = 0;
-+#endif
-+ break;
-+#endif
-+ default:
-+ break;
-+ }
-+ }
-+ ed->enabled = enable;
-+ return cnt;
-+}
-+
-+static const struct file_operations latency_hist_reset_fops = {
-+ .open = tracing_open_generic,
-+ .write = latency_hist_reset,
-+};
-+
-+static const struct file_operations enable_fops = {
-+ .open = tracing_open_generic,
-+ .read = show_enable,
-+ .write = do_enable,
-+};
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+static const struct file_operations pid_fops = {
-+ .open = tracing_open_generic,
-+ .read = show_pid,
-+ .write = do_pid,
-+};
-+
-+static const struct file_operations maxlatproc_fops = {
-+ .open = tracing_open_generic,
-+ .read = show_maxlatproc,
-+};
-+#endif
-+
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
-+static notrace void probe_preemptirqsoff_hist(void *v, int reason,
-+ int starthist)
-+{
-+ int cpu = raw_smp_processor_id();
-+ int time_set = 0;
-+
-+ if (starthist) {
-+ cycle_t uninitialized_var(start);
-+
-+ if (!preempt_count() && !irqs_disabled())
-+ return;
-+
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+ if ((reason == IRQS_OFF || reason == TRACE_START) &&
-+ !per_cpu(hist_irqsoff_counting, cpu)) {
-+ per_cpu(hist_irqsoff_counting, cpu) = 1;
-+ start = ftrace_now(cpu);
-+ time_set++;
-+ per_cpu(hist_irqsoff_start, cpu) = start;
-+ }
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+ if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
-+ !per_cpu(hist_preemptoff_counting, cpu)) {
-+ per_cpu(hist_preemptoff_counting, cpu) = 1;
-+ if (!(time_set++))
-+ start = ftrace_now(cpu);
-+ per_cpu(hist_preemptoff_start, cpu) = start;
-+ }
-+#endif
-+
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
-+ if (per_cpu(hist_irqsoff_counting, cpu) &&
-+ per_cpu(hist_preemptoff_counting, cpu) &&
-+ !per_cpu(hist_preemptirqsoff_counting, cpu)) {
-+ per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
-+ if (!time_set)
-+ start = ftrace_now(cpu);
-+ per_cpu(hist_preemptirqsoff_start, cpu) = start;
-+ }
-+#endif
-+ } else {
-+ cycle_t uninitialized_var(stop);
-+
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+ if ((reason == IRQS_ON || reason == TRACE_STOP) &&
-+ per_cpu(hist_irqsoff_counting, cpu)) {
-+ cycle_t start = per_cpu(hist_irqsoff_start, cpu);
-+
-+ stop = ftrace_now(cpu);
-+ time_set++;
-+ if (start) {
-+ long latency = ((long) (stop - start)) /
-+ NSECS_PER_USECS;
-+
-+ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
-+ stop, NULL);
-+ }
-+ per_cpu(hist_irqsoff_counting, cpu) = 0;
-+ }
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+ if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
-+ per_cpu(hist_preemptoff_counting, cpu)) {
-+ cycle_t start = per_cpu(hist_preemptoff_start, cpu);
-+
-+ if (!(time_set++))
-+ stop = ftrace_now(cpu);
-+ if (start) {
-+ long latency = ((long) (stop - start)) /
-+ NSECS_PER_USECS;
-+
-+ latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
-+ 0, stop, NULL);
-+ }
-+ per_cpu(hist_preemptoff_counting, cpu) = 0;
-+ }
-+#endif
-+
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
-+ if ((!per_cpu(hist_irqsoff_counting, cpu) ||
-+ !per_cpu(hist_preemptoff_counting, cpu)) &&
-+ per_cpu(hist_preemptirqsoff_counting, cpu)) {
-+ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
-+
-+ if (!time_set)
-+ stop = ftrace_now(cpu);
-+ if (start) {
-+ long latency = ((long) (stop - start)) /
-+ NSECS_PER_USECS;
-+
-+ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
-+ latency, 0, stop, NULL);
-+ }
-+ per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
-+ }
-+#endif
-+ }
-+}
-+#endif
-+
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+static DEFINE_RAW_SPINLOCK(wakeup_lock);
-+static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
-+ int cpu)
-+{
-+ int old_cpu = task_cpu(task);
-+
-+ if (cpu != old_cpu) {
-+ unsigned long flags;
-+ struct task_struct *cpu_wakeup_task;
-+
-+ raw_spin_lock_irqsave(&wakeup_lock, flags);
-+
-+ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
-+ if (task == cpu_wakeup_task) {
-+ put_task_struct(cpu_wakeup_task);
-+ per_cpu(wakeup_task, old_cpu) = NULL;
-+ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
-+ get_task_struct(cpu_wakeup_task);
-+ }
-+
-+ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
-+ }
-+}
-+
-+static notrace void probe_wakeup_latency_hist_start(void *v,
-+ struct task_struct *p)
-+{
-+ unsigned long flags;
-+ struct task_struct *curr = current;
-+ int cpu = task_cpu(p);
-+ struct task_struct *cpu_wakeup_task;
-+
-+ raw_spin_lock_irqsave(&wakeup_lock, flags);
-+
-+ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
-+
-+ if (wakeup_pid) {
-+ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
-+ p->prio == curr->prio)
-+ per_cpu(wakeup_sharedprio, cpu) = 1;
-+ if (likely(wakeup_pid != task_pid_nr(p)))
-+ goto out;
-+ } else {
-+ if (likely(!rt_task(p)) ||
-+ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
-+ p->prio > curr->prio)
-+ goto out;
-+ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
-+ p->prio == curr->prio)
-+ per_cpu(wakeup_sharedprio, cpu) = 1;
-+ }
-+
-+ if (cpu_wakeup_task)
-+ put_task_struct(cpu_wakeup_task);
-+ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
-+ get_task_struct(cpu_wakeup_task);
-+ cpu_wakeup_task->preempt_timestamp_hist =
-+ ftrace_now(raw_smp_processor_id());
-+out:
-+ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
-+}
-+
-+static notrace void probe_wakeup_latency_hist_stop(void *v,
-+ struct task_struct *prev, struct task_struct *next)
-+{
-+ unsigned long flags;
-+ int cpu = task_cpu(next);
-+ long latency;
-+ cycle_t stop;
-+ struct task_struct *cpu_wakeup_task;
-+
-+ raw_spin_lock_irqsave(&wakeup_lock, flags);
-+
-+ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
-+
-+ if (cpu_wakeup_task == NULL)
-+ goto out;
-+
-+ /* Already running? */
-+ if (unlikely(current == cpu_wakeup_task))
-+ goto out_reset;
-+
-+ if (next != cpu_wakeup_task) {
-+ if (next->prio < cpu_wakeup_task->prio)
-+ goto out_reset;
-+
-+ if (next->prio == cpu_wakeup_task->prio)
-+ per_cpu(wakeup_sharedprio, cpu) = 1;
-+
-+ goto out;
-+ }
-+
-+ if (current->prio == cpu_wakeup_task->prio)
-+ per_cpu(wakeup_sharedprio, cpu) = 1;
-+
-+ /*
-+ * The task we are waiting for is about to be switched to.
-+ * Calculate latency and store it in histogram.
-+ */
-+ stop = ftrace_now(raw_smp_processor_id());
-+
-+ latency = ((long) (stop - next->preempt_timestamp_hist)) /
-+ NSECS_PER_USECS;
-+
-+ if (per_cpu(wakeup_sharedprio, cpu)) {
-+ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
-+ next);
-+ per_cpu(wakeup_sharedprio, cpu) = 0;
-+ } else {
-+ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ if (timerandwakeup_enabled_data.enabled) {
-+ latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
-+ next->timer_offset + latency, next->timer_offset,
-+ stop, next);
-+ }
-+#endif
-+ }
-+
-+out_reset:
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ next->timer_offset = 0;
-+#endif
-+ put_task_struct(cpu_wakeup_task);
-+ per_cpu(wakeup_task, cpu) = NULL;
-+out:
-+ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
-+}
-+#endif
-+
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+static notrace void probe_hrtimer_interrupt(void *v, int cpu,
-+ long long latency_ns, struct task_struct *curr,
-+ struct task_struct *task)
-+{
-+ if (latency_ns <= 0 && task != NULL && rt_task(task) &&
-+ (task->prio < curr->prio ||
-+ (task->prio == curr->prio &&
-+ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
-+ long latency;
-+ cycle_t now;
-+
-+ if (missed_timer_offsets_pid) {
-+ if (likely(missed_timer_offsets_pid !=
-+ task_pid_nr(task)))
-+ return;
-+ }
-+
-+ now = ftrace_now(cpu);
-+ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
-+ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
-+ task);
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+ task->timer_offset = latency;
-+#endif
-+ }
-+}
-+#endif
-+
-+static __init int latency_hist_init(void)
-+{
-+ struct dentry *latency_hist_root = NULL;
-+ struct dentry *dentry;
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+ struct dentry *dentry_sharedprio;
-+#endif
-+ struct dentry *entry;
-+ struct dentry *enable_root;
-+ int i = 0;
-+ struct hist_data *my_hist;
-+ char name[64];
-+ char *cpufmt = "CPU%d";
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+ char *cpufmt_maxlatproc = "max_latency-CPU%d";
-+ struct maxlatproc_data *mp = NULL;
-+#endif
-+
-+ dentry = tracing_init_dentry();
-+ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
-+ enable_root = debugfs_create_dir("enable", latency_hist_root);
-+
-+#ifdef CONFIG_INTERRUPT_OFF_HIST
-+ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
-+ for_each_possible_cpu(i) {
-+ sprintf(name, cpufmt, i);
-+ entry = debugfs_create_file(name, 0444, dentry,
-+ &per_cpu(irqsoff_hist, i), &latency_hist_fops);
-+ my_hist = &per_cpu(irqsoff_hist, i);
-+ atomic_set(&my_hist->hist_mode, 1);
-+ my_hist->min_lat = LONG_MAX;
-+ }
-+ entry = debugfs_create_file("reset", 0644, dentry,
-+ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_OFF_HIST
-+ dentry = debugfs_create_dir(preemptoff_hist_dir,
-+ latency_hist_root);
-+ for_each_possible_cpu(i) {
-+ sprintf(name, cpufmt, i);
-+ entry = debugfs_create_file(name, 0444, dentry,
-+ &per_cpu(preemptoff_hist, i), &latency_hist_fops);
-+ my_hist = &per_cpu(preemptoff_hist, i);
-+ atomic_set(&my_hist->hist_mode, 1);
-+ my_hist->min_lat = LONG_MAX;
-+ }
-+ entry = debugfs_create_file("reset", 0644, dentry,
-+ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
-+#endif
-+
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
-+ dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
-+ latency_hist_root);
-+ for_each_possible_cpu(i) {
-+ sprintf(name, cpufmt, i);
-+ entry = debugfs_create_file(name, 0444, dentry,
-+ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
-+ my_hist = &per_cpu(preemptirqsoff_hist, i);
-+ atomic_set(&my_hist->hist_mode, 1);
-+ my_hist->min_lat = LONG_MAX;
-+ }
-+ entry = debugfs_create_file("reset", 0644, dentry,
-+ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
-+#endif
-+
-+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
-+ entry = debugfs_create_file("preemptirqsoff", 0644,
-+ enable_root, (void *)&preemptirqsoff_enabled_data,
-+ &enable_fops);
-+#endif
-+
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+ dentry = debugfs_create_dir(wakeup_latency_hist_dir,
-+ latency_hist_root);
-+ dentry_sharedprio = debugfs_create_dir(
-+ wakeup_latency_hist_dir_sharedprio, dentry);
-+ for_each_possible_cpu(i) {
-+ sprintf(name, cpufmt, i);
-+
-+ entry = debugfs_create_file(name, 0444, dentry,
-+ &per_cpu(wakeup_latency_hist, i),
-+ &latency_hist_fops);
-+ my_hist = &per_cpu(wakeup_latency_hist, i);
-+ atomic_set(&my_hist->hist_mode, 1);
-+ my_hist->min_lat = LONG_MAX;
-+
-+ entry = debugfs_create_file(name, 0444, dentry_sharedprio,
-+ &per_cpu(wakeup_latency_hist_sharedprio, i),
-+ &latency_hist_fops);
-+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
-+ atomic_set(&my_hist->hist_mode, 1);
-+ my_hist->min_lat = LONG_MAX;
-+
-+ sprintf(name, cpufmt_maxlatproc, i);
-+
-+ mp = &per_cpu(wakeup_maxlatproc, i);
-+ entry = debugfs_create_file(name, 0444, dentry, mp,
-+ &maxlatproc_fops);
-+ clear_maxlatprocdata(mp);
-+
-+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
-+ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
-+ &maxlatproc_fops);
-+ clear_maxlatprocdata(mp);
-+ }
-+ entry = debugfs_create_file("pid", 0644, dentry,
-+ (void *)&wakeup_pid, &pid_fops);
-+ entry = debugfs_create_file("reset", 0644, dentry,
-+ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
-+ entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
-+ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
-+ entry = debugfs_create_file("wakeup", 0644,
-+ enable_root, (void *)&wakeup_latency_enabled_data,
-+ &enable_fops);
-+#endif
-+
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ dentry = debugfs_create_dir(missed_timer_offsets_dir,
-+ latency_hist_root);
-+ for_each_possible_cpu(i) {
-+ sprintf(name, cpufmt, i);
-+ entry = debugfs_create_file(name, 0444, dentry,
-+ &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
-+ my_hist = &per_cpu(missed_timer_offsets, i);
-+ atomic_set(&my_hist->hist_mode, 1);
-+ my_hist->min_lat = LONG_MAX;
-+
-+ sprintf(name, cpufmt_maxlatproc, i);
-+ mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
-+ entry = debugfs_create_file(name, 0444, dentry, mp,
-+ &maxlatproc_fops);
-+ clear_maxlatprocdata(mp);
-+ }
-+ entry = debugfs_create_file("pid", 0644, dentry,
-+ (void *)&missed_timer_offsets_pid, &pid_fops);
-+ entry = debugfs_create_file("reset", 0644, dentry,
-+ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
-+ entry = debugfs_create_file("missed_timer_offsets", 0644,
-+ enable_root, (void *)&missed_timer_offsets_enabled_data,
-+ &enable_fops);
-+#endif
-+
-+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
-+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-+ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
-+ latency_hist_root);
-+ for_each_possible_cpu(i) {
-+ sprintf(name, cpufmt, i);
-+ entry = debugfs_create_file(name, 0444, dentry,
-+ &per_cpu(timerandwakeup_latency_hist, i),
-+ &latency_hist_fops);
-+ my_hist = &per_cpu(timerandwakeup_latency_hist, i);
-+ atomic_set(&my_hist->hist_mode, 1);
-+ my_hist->min_lat = LONG_MAX;
-+
-+ sprintf(name, cpufmt_maxlatproc, i);
-+ mp = &per_cpu(timerandwakeup_maxlatproc, i);
-+ entry = debugfs_create_file(name, 0444, dentry, mp,
-+ &maxlatproc_fops);
-+ clear_maxlatprocdata(mp);
-+ }
-+ entry = debugfs_create_file("reset", 0644, dentry,
-+ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
-+ entry = debugfs_create_file("timerandwakeup", 0644,
-+ enable_root, (void *)&timerandwakeup_enabled_data,
-+ &enable_fops);
-+#endif
-+ return 0;
-+}
-+
-+device_initcall(latency_hist_init);
-diff -Nur linux-4.1.13.orig/kernel/trace/Makefile linux-4.1.13/kernel/trace/Makefile
---- linux-4.1.13.orig/kernel/trace/Makefile 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/trace/Makefile 2015-11-29 09:23:09.653611260 +0100
-@@ -36,6 +36,10 @@
- obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
- obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
- obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
-+obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
-+obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
-+obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
-+obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
- obj-$(CONFIG_NOP_TRACER) += trace_nop.o
- obj-$(CONFIG_STACK_TRACER) += trace_stack.o
- obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
-diff -Nur linux-4.1.13.orig/kernel/trace/trace.c linux-4.1.13/kernel/trace/trace.c
---- linux-4.1.13.orig/kernel/trace/trace.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/trace/trace.c 2015-11-29 09:23:09.653611260 +0100
-@@ -1630,6 +1630,7 @@
- struct task_struct *tsk = current;
-
- entry->preempt_count = pc & 0xff;
-+ entry->preempt_lazy_count = preempt_lazy_count();
- entry->pid = (tsk) ? tsk->pid : 0;
- entry->flags =
- #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -1639,8 +1640,11 @@
- #endif
- ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
- ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
-- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
-+ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
-+ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
- (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
-+
-+ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
- }
- EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-
-@@ -2558,14 +2562,17 @@
-
- static void print_lat_help_header(struct seq_file *m)
- {
-- seq_puts(m, "# _------=> CPU# \n"
-- "# / _-----=> irqs-off \n"
-- "# | / _----=> need-resched \n"
-- "# || / _---=> hardirq/softirq \n"
-- "# ||| / _--=> preempt-depth \n"
-- "# |||| / delay \n"
-- "# cmd pid ||||| time | caller \n"
-- "# \\ / ||||| \\ | / \n");
-+ seq_puts(m, "# _--------=> CPU# \n"
-+ "# / _-------=> irqs-off \n"
-+ "# | / _------=> need-resched \n"
-+ "# || / _-----=> need-resched_lazy \n"
-+ "# ||| / _----=> hardirq/softirq \n"
-+ "# |||| / _---=> preempt-depth \n"
-+ "# ||||| / _--=> preempt-lazy-depth\n"
-+ "# |||||| / _-=> migrate-disable \n"
-+ "# ||||||| / delay \n"
-+ "# cmd pid |||||||| time | caller \n"
-+ "# \\ / |||||||| \\ | / \n");
- }
-
- static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2591,11 +2598,14 @@
- print_event_info(buf, m);
- seq_puts(m, "# _-----=> irqs-off\n"
- "# / _----=> need-resched\n"
-- "# | / _---=> hardirq/softirq\n"
-- "# || / _--=> preempt-depth\n"
-- "# ||| / delay\n"
-- "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
-- "# | | | |||| | |\n");
-+ "# |/ _-----=> need-resched_lazy\n"
-+ "# || / _---=> hardirq/softirq\n"
-+ "# ||| / _--=> preempt-depth\n"
-+ "# |||| /_--=> preempt-lazy-depth\n"
-+ "# ||||| _-=> migrate-disable \n"
-+ "# ||||| / delay\n"
-+ "# TASK-PID CPU# |||||| TIMESTAMP FUNCTION\n"
-+ "# | | | |||||| | |\n");
- }
-
- void
-diff -Nur linux-4.1.13.orig/kernel/trace/trace_events.c linux-4.1.13/kernel/trace/trace_events.c
---- linux-4.1.13.orig/kernel/trace/trace_events.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/trace/trace_events.c 2015-11-29 09:23:09.653611260 +0100
-@@ -162,6 +162,8 @@
- __common_field(unsigned char, flags);
- __common_field(unsigned char, preempt_count);
- __common_field(int, pid);
-+ __common_field(unsigned short, migrate_disable);
-+ __common_field(unsigned short, padding);
-
- return ret;
- }
-diff -Nur linux-4.1.13.orig/kernel/trace/trace.h linux-4.1.13/kernel/trace/trace.h
---- linux-4.1.13.orig/kernel/trace/trace.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/trace/trace.h 2015-11-29 09:23:09.653611260 +0100
-@@ -120,6 +120,7 @@
- * NEED_RESCHED - reschedule is requested
- * HARDIRQ - inside an interrupt handler
- * SOFTIRQ - inside a softirq handler
-+ * NEED_RESCHED_LAZY - lazy reschedule is requested
- */
- enum trace_flag_type {
- TRACE_FLAG_IRQS_OFF = 0x01,
-@@ -128,6 +129,7 @@
- TRACE_FLAG_HARDIRQ = 0x08,
- TRACE_FLAG_SOFTIRQ = 0x10,
- TRACE_FLAG_PREEMPT_RESCHED = 0x20,
-+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x40,
- };
-
- #define TRACE_BUF_SIZE 1024
-diff -Nur linux-4.1.13.orig/kernel/trace/trace_irqsoff.c linux-4.1.13/kernel/trace/trace_irqsoff.c
---- linux-4.1.13.orig/kernel/trace/trace_irqsoff.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/trace/trace_irqsoff.c 2015-11-29 09:23:09.653611260 +0100
-@@ -13,6 +13,7 @@
- #include <linux/uaccess.h>
- #include <linux/module.h>
- #include <linux/ftrace.h>
-+#include <trace/events/hist.h>
-
- #include "trace.h"
-
-@@ -433,11 +434,13 @@
- {
- if (preempt_trace() || irq_trace())
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
-+ trace_preemptirqsoff_hist(TRACE_START, 1);
- }
- EXPORT_SYMBOL_GPL(start_critical_timings);
-
- void stop_critical_timings(void)
- {
-+ trace_preemptirqsoff_hist(TRACE_STOP, 0);
- if (preempt_trace() || irq_trace())
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- }
-@@ -447,6 +450,7 @@
- #ifdef CONFIG_PROVE_LOCKING
- void time_hardirqs_on(unsigned long a0, unsigned long a1)
- {
-+ trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0);
- if (!preempt_trace() && irq_trace())
- stop_critical_timing(a0, a1);
- }
-@@ -455,6 +459,7 @@
- {
- if (!preempt_trace() && irq_trace())
- start_critical_timing(a0, a1);
-+ trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1);
- }
-
- #else /* !CONFIG_PROVE_LOCKING */
-@@ -480,6 +485,7 @@
- */
- void trace_hardirqs_on(void)
- {
-+ trace_preemptirqsoff_hist(IRQS_ON, 0);
- if (!preempt_trace() && irq_trace())
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- }
-@@ -489,11 +495,13 @@
- {
- if (!preempt_trace() && irq_trace())
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
-+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
- }
- EXPORT_SYMBOL(trace_hardirqs_off);
-
- __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
- {
-+ trace_preemptirqsoff_hist(IRQS_ON, 0);
- if (!preempt_trace() && irq_trace())
- stop_critical_timing(CALLER_ADDR0, caller_addr);
- }
-@@ -503,6 +511,7 @@
- {
- if (!preempt_trace() && irq_trace())
- start_critical_timing(CALLER_ADDR0, caller_addr);
-+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
- }
- EXPORT_SYMBOL(trace_hardirqs_off_caller);
-
-@@ -512,12 +521,14 @@
- #ifdef CONFIG_PREEMPT_TRACER
- void trace_preempt_on(unsigned long a0, unsigned long a1)
- {
-+ trace_preemptirqsoff_hist(PREEMPT_ON, 0);
- if (preempt_trace() && !irq_trace())
- stop_critical_timing(a0, a1);
- }
-
- void trace_preempt_off(unsigned long a0, unsigned long a1)
- {
-+ trace_preemptirqsoff_hist(PREEMPT_ON, 1);
- if (preempt_trace() && !irq_trace())
- start_critical_timing(a0, a1);
- }
-diff -Nur linux-4.1.13.orig/kernel/trace/trace_output.c linux-4.1.13/kernel/trace/trace_output.c
---- linux-4.1.13.orig/kernel/trace/trace_output.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/trace/trace_output.c 2015-11-29 09:23:09.653611260 +0100
-@@ -430,6 +430,7 @@
- {
- char hardsoft_irq;
- char need_resched;
-+ char need_resched_lazy;
- char irqs_off;
- int hardirq;
- int softirq;
-@@ -457,6 +458,8 @@
- need_resched = '.';
- break;
- }
-+ need_resched_lazy =
-+ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
-
- hardsoft_irq =
- (hardirq && softirq) ? 'H' :
-@@ -464,14 +467,25 @@
- softirq ? 's' :
- '.';
-
-- trace_seq_printf(s, "%c%c%c",
-- irqs_off, need_resched, hardsoft_irq);
-+ trace_seq_printf(s, "%c%c%c%c",
-+ irqs_off, need_resched, need_resched_lazy,
-+ hardsoft_irq);
-
- if (entry->preempt_count)
- trace_seq_printf(s, "%x", entry->preempt_count);
- else
- trace_seq_putc(s, '.');
-
-+ if (entry->preempt_lazy_count)
-+ trace_seq_printf(s, "%x", entry->preempt_lazy_count);
-+ else
-+ trace_seq_putc(s, '.');
-+
-+ if (entry->migrate_disable)
-+ trace_seq_printf(s, "%x", entry->migrate_disable);
-+ else
-+ trace_seq_putc(s, '.');
-+
- return !trace_seq_has_overflowed(s);
- }
-
-diff -Nur linux-4.1.13.orig/kernel/trace/trace_sched_switch.c linux-4.1.13/kernel/trace/trace_sched_switch.c
---- linux-4.1.13.orig/kernel/trace/trace_sched_switch.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/trace/trace_sched_switch.c 2015-11-29 09:23:09.653611260 +0100
-@@ -26,7 +26,7 @@
- }
-
- static void
--probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
-+probe_sched_wakeup(void *ignore, struct task_struct *wakee)
- {
- if (unlikely(!sched_ref))
- return;
-diff -Nur linux-4.1.13.orig/kernel/trace/trace_sched_wakeup.c linux-4.1.13/kernel/trace/trace_sched_wakeup.c
---- linux-4.1.13.orig/kernel/trace/trace_sched_wakeup.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/trace/trace_sched_wakeup.c 2015-11-29 09:23:09.657610995 +0100
-@@ -514,7 +514,7 @@
- }
-
- static void
--probe_wakeup(void *ignore, struct task_struct *p, int success)
-+probe_wakeup(void *ignore, struct task_struct *p)
- {
- struct trace_array_cpu *data;
- int cpu = smp_processor_id();
-diff -Nur linux-4.1.13.orig/kernel/user.c linux-4.1.13/kernel/user.c
---- linux-4.1.13.orig/kernel/user.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/user.c 2015-11-29 09:23:09.657610995 +0100
-@@ -161,11 +161,11 @@
- if (!up)
- return;
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
- free_user(up, flags);
- else
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- }
-
- struct user_struct *alloc_uid(kuid_t uid)
-diff -Nur linux-4.1.13.orig/kernel/watchdog.c linux-4.1.13/kernel/watchdog.c
---- linux-4.1.13.orig/kernel/watchdog.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/watchdog.c 2015-11-29 09:23:09.657610995 +0100
-@@ -262,6 +262,8 @@
-
- #ifdef CONFIG_HARDLOCKUP_DETECTOR
-
-+static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
-+
- static struct perf_event_attr wd_hw_attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
-@@ -295,13 +297,21 @@
- /* only print hardlockups once */
- if (__this_cpu_read(hard_watchdog_warn) == true)
- return;
-+ /*
-+ * If early-printk is enabled then make sure we do not
-+ * lock up in printk() and kill console logging:
-+ */
-+ printk_kill();
-
-- if (hardlockup_panic)
-+ if (hardlockup_panic) {
- panic("Watchdog detected hard LOCKUP on cpu %d",
- this_cpu);
-- else
-+ } else {
-+ raw_spin_lock(&watchdog_output_lock);
- WARN(1, "Watchdog detected hard LOCKUP on cpu %d",
- this_cpu);
-+ raw_spin_unlock(&watchdog_output_lock);
-+ }
-
- __this_cpu_write(hard_watchdog_warn, true);
- return;
-@@ -444,6 +454,7 @@
- /* kick off the timer for the hardlockup detector */
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer->function = watchdog_timer_fn;
-+ hrtimer->irqsafe = 1;
-
- /* Enable the perf event */
- watchdog_nmi_enable(cpu);
-diff -Nur linux-4.1.13.orig/kernel/workqueue.c linux-4.1.13/kernel/workqueue.c
---- linux-4.1.13.orig/kernel/workqueue.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/workqueue.c 2015-11-29 09:23:09.657610995 +0100
-@@ -48,6 +48,8 @@
- #include <linux/nodemask.h>
- #include <linux/moduleparam.h>
- #include <linux/uaccess.h>
-+#include <linux/locallock.h>
-+#include <linux/delay.h>
-
- #include "workqueue_internal.h"
-
-@@ -121,15 +123,20 @@
- * cpu or grabbing pool->lock is enough for read access. If
- * POOL_DISASSOCIATED is set, it's identical to L.
- *
-+ * On RT we need the extra protection via rt_lock_idle_list() for
-+ * the list manipulations against read access from
-+ * wq_worker_sleeping(). All other places are nicely serialized via
-+ * pool->lock.
-+ *
- * A: pool->attach_mutex protected.
- *
- * PL: wq_pool_mutex protected.
- *
-- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
-+ * PR: wq_pool_mutex protected for writes. RCU protected for reads.
- *
- * WQ: wq->mutex protected.
- *
-- * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
-+ * WR: wq->mutex protected for writes. RCU protected for reads.
- *
- * MD: wq_mayday_lock protected.
- */
-@@ -178,7 +185,7 @@
- atomic_t nr_running ____cacheline_aligned_in_smp;
-
- /*
-- * Destruction of pool is sched-RCU protected to allow dereferences
-+ * Destruction of pool is RCU protected to allow dereferences
- * from get_work_pool().
- */
- struct rcu_head rcu;
-@@ -207,7 +214,7 @@
- /*
- * Release of unbound pwq is punted to system_wq. See put_pwq()
- * and pwq_unbound_release_workfn() for details. pool_workqueue
-- * itself is also sched-RCU protected so that the first pwq can be
-+ * itself is also RCU protected so that the first pwq can be
- * determined without grabbing wq->mutex.
- */
- struct work_struct unbound_release_work;
-@@ -329,6 +336,8 @@
- struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
- EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
-
-+static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
-+
- static int worker_thread(void *__worker);
- static void copy_workqueue_attrs(struct workqueue_attrs *to,
- const struct workqueue_attrs *from);
-@@ -338,14 +347,14 @@
- #include <trace/events/workqueue.h>
-
- #define assert_rcu_or_pool_mutex() \
-- rcu_lockdep_assert(rcu_read_lock_sched_held() || \
-+ rcu_lockdep_assert(rcu_read_lock_held() || \
- lockdep_is_held(&wq_pool_mutex), \
-- "sched RCU or wq_pool_mutex should be held")
-+ "RCU or wq_pool_mutex should be held")
-
- #define assert_rcu_or_wq_mutex(wq) \
-- rcu_lockdep_assert(rcu_read_lock_sched_held() || \
-+ rcu_lockdep_assert(rcu_read_lock_held() || \
- lockdep_is_held(&wq->mutex), \
-- "sched RCU or wq->mutex should be held")
-+ "RCU or wq->mutex should be held")
-
- #define for_each_cpu_worker_pool(pool, cpu) \
- for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
-@@ -357,7 +366,7 @@
- * @pool: iteration cursor
- * @pi: integer used for iteration
- *
-- * This must be called either with wq_pool_mutex held or sched RCU read
-+ * This must be called either with wq_pool_mutex held or RCU read
- * locked. If the pool needs to be used beyond the locking in effect, the
- * caller is responsible for guaranteeing that the pool stays online.
- *
-@@ -389,7 +398,7 @@
- * @pwq: iteration cursor
- * @wq: the target workqueue
- *
-- * This must be called either with wq->mutex held or sched RCU read locked.
-+ * This must be called either with wq->mutex held or RCU read locked.
- * If the pwq needs to be used beyond the locking in effect, the caller is
- * responsible for guaranteeing that the pwq stays online.
- *
-@@ -401,6 +410,31 @@
- if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
- else
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+static inline void rt_lock_idle_list(struct worker_pool *pool)
-+{
-+ preempt_disable();
-+}
-+static inline void rt_unlock_idle_list(struct worker_pool *pool)
-+{
-+ preempt_enable();
-+}
-+static inline void sched_lock_idle_list(struct worker_pool *pool) { }
-+static inline void sched_unlock_idle_list(struct worker_pool *pool) { }
-+#else
-+static inline void rt_lock_idle_list(struct worker_pool *pool) { }
-+static inline void rt_unlock_idle_list(struct worker_pool *pool) { }
-+static inline void sched_lock_idle_list(struct worker_pool *pool)
-+{
-+ spin_lock_irq(&pool->lock);
-+}
-+static inline void sched_unlock_idle_list(struct worker_pool *pool)
-+{
-+ spin_unlock_irq(&pool->lock);
-+}
-+#endif
-+
-+
- #ifdef CONFIG_DEBUG_OBJECTS_WORK
-
- static struct debug_obj_descr work_debug_descr;
-@@ -551,7 +585,7 @@
- * @wq: the target workqueue
- * @node: the node ID
- *
-- * This must be called either with pwq_lock held or sched RCU read locked.
-+ * This must be called either with pwq_lock held or RCU read locked.
- * If the pwq needs to be used beyond the locking in effect, the caller is
- * responsible for guaranteeing that the pwq stays online.
- *
-@@ -655,8 +689,8 @@
- * @work: the work item of interest
- *
- * Pools are created and destroyed under wq_pool_mutex, and allows read
-- * access under sched-RCU read lock. As such, this function should be
-- * called under wq_pool_mutex or with preemption disabled.
-+ * access under RCU read lock. As such, this function should be
-+ * called under wq_pool_mutex or inside of a rcu_read_lock() region.
- *
- * All fields of the returned pool are accessible as long as the above
- * mentioned locking is in effect. If the returned pool needs to be used
-@@ -793,51 +827,44 @@
- */
- static void wake_up_worker(struct worker_pool *pool)
- {
-- struct worker *worker = first_idle_worker(pool);
-+ struct worker *worker;
-+
-+ rt_lock_idle_list(pool);
-+
-+ worker = first_idle_worker(pool);
-
- if (likely(worker))
- wake_up_process(worker->task);
-+
-+ rt_unlock_idle_list(pool);
- }
-
- /**
-- * wq_worker_waking_up - a worker is waking up
-- * @task: task waking up
-- * @cpu: CPU @task is waking up to
-+ * wq_worker_running - a worker is running again
-+ * @task: task returning from sleep
- *
-- * This function is called during try_to_wake_up() when a worker is
-- * being awoken.
-- *
-- * CONTEXT:
-- * spin_lock_irq(rq->lock)
-+ * This function is called when a worker returns from schedule()
- */
--void wq_worker_waking_up(struct task_struct *task, int cpu)
-+void wq_worker_running(struct task_struct *task)
- {
- struct worker *worker = kthread_data(task);
-
-- if (!(worker->flags & WORKER_NOT_RUNNING)) {
-- WARN_ON_ONCE(worker->pool->cpu != cpu);
-+ if (!worker->sleeping)
-+ return;
-+ if (!(worker->flags & WORKER_NOT_RUNNING))
- atomic_inc(&worker->pool->nr_running);
-- }
-+ worker->sleeping = 0;
- }
-
- /**
- * wq_worker_sleeping - a worker is going to sleep
- * @task: task going to sleep
-- * @cpu: CPU in question, must be the current CPU number
-- *
-- * This function is called during schedule() when a busy worker is
-- * going to sleep. Worker on the same cpu can be woken up by
-- * returning pointer to its task.
-- *
-- * CONTEXT:
-- * spin_lock_irq(rq->lock)
-- *
-- * Return:
-- * Worker task on @cpu to wake up, %NULL if none.
-+ * This function is called from schedule() when a busy worker is
-+ * going to sleep.
- */
--struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
-+void wq_worker_sleeping(struct task_struct *task)
- {
-- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
-+ struct worker *worker = kthread_data(task);
- struct worker_pool *pool;
-
- /*
-@@ -846,29 +873,26 @@
- * checking NOT_RUNNING.
- */
- if (worker->flags & WORKER_NOT_RUNNING)
-- return NULL;
-+ return;
-
- pool = worker->pool;
-
-- /* this can only happen on the local cpu */
-- if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu))
-- return NULL;
-+ if (WARN_ON_ONCE(worker->sleeping))
-+ return;
-+
-+ worker->sleeping = 1;
-
- /*
- * The counterpart of the following dec_and_test, implied mb,
- * worklist not empty test sequence is in insert_work().
- * Please read comment there.
-- *
-- * NOT_RUNNING is clear. This means that we're bound to and
-- * running on the local cpu w/ rq lock held and preemption
-- * disabled, which in turn means that none else could be
-- * manipulating idle_list, so dereferencing idle_list without pool
-- * lock is safe.
- */
- if (atomic_dec_and_test(&pool->nr_running) &&
-- !list_empty(&pool->worklist))
-- to_wakeup = first_idle_worker(pool);
-- return to_wakeup ? to_wakeup->task : NULL;
-+ !list_empty(&pool->worklist)) {
-+ sched_lock_idle_list(pool);
-+ wake_up_worker(pool);
-+ sched_unlock_idle_list(pool);
-+ }
- }
-
- /**
-@@ -1062,12 +1086,12 @@
- {
- if (pwq) {
- /*
-- * As both pwqs and pools are sched-RCU protected, the
-+ * As both pwqs and pools are RCU protected, the
- * following lock operations are safe.
- */
-- spin_lock_irq(&pwq->pool->lock);
-+ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
- put_pwq(pwq);
-- spin_unlock_irq(&pwq->pool->lock);
-+ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
- }
- }
-
-@@ -1169,7 +1193,7 @@
- struct worker_pool *pool;
- struct pool_workqueue *pwq;
-
-- local_irq_save(*flags);
-+ local_lock_irqsave(pendingb_lock, *flags);
-
- /* try to steal the timer if it exists */
- if (is_dwork) {
-@@ -1188,6 +1212,7 @@
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
- return 0;
-
-+ rcu_read_lock();
- /*
- * The queueing is in progress, or it is already queued. Try to
- * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1226,14 +1251,16 @@
- set_work_pool_and_keep_pending(work, pool->id);
-
- spin_unlock(&pool->lock);
-+ rcu_read_unlock();
- return 1;
- }
- spin_unlock(&pool->lock);
- fail:
-- local_irq_restore(*flags);
-+ rcu_read_unlock();
-+ local_unlock_irqrestore(pendingb_lock, *flags);
- if (work_is_canceling(work))
- return -ENOENT;
-- cpu_relax();
-+ cpu_chill();
- return -EAGAIN;
- }
-
-@@ -1302,7 +1329,7 @@
- * queued or lose PENDING. Grabbing PENDING and queueing should
- * happen with IRQ disabled.
- */
-- WARN_ON_ONCE(!irqs_disabled());
-+ WARN_ON_ONCE_NONRT(!irqs_disabled());
-
- debug_work_activate(work);
-
-@@ -1310,6 +1337,8 @@
- if (unlikely(wq->flags & __WQ_DRAINING) &&
- WARN_ON_ONCE(!is_chained_work(wq)))
- return;
-+
-+ rcu_read_lock();
- retry:
- if (req_cpu == WORK_CPU_UNBOUND)
- cpu = raw_smp_processor_id();
-@@ -1366,10 +1395,8 @@
- /* pwq determined, queue */
- trace_workqueue_queue_work(req_cpu, pwq, work);
-
-- if (WARN_ON(!list_empty(&work->entry))) {
-- spin_unlock(&pwq->pool->lock);
-- return;
-- }
-+ if (WARN_ON(!list_empty(&work->entry)))
-+ goto out;
-
- pwq->nr_in_flight[pwq->work_color]++;
- work_flags = work_color_to_flags(pwq->work_color);
-@@ -1385,7 +1412,9 @@
-
- insert_work(pwq, work, worklist, work_flags);
-
-+out:
- spin_unlock(&pwq->pool->lock);
-+ rcu_read_unlock();
- }
-
- /**
-@@ -1405,14 +1434,14 @@
- bool ret = false;
- unsigned long flags;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(pendingb_lock,flags);
-
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
- __queue_work(cpu, wq, work);
- ret = true;
- }
-
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pendingb_lock, flags);
- return ret;
- }
- EXPORT_SYMBOL(queue_work_on);
-@@ -1479,14 +1508,14 @@
- unsigned long flags;
-
- /* read the comment in __queue_work() */
-- local_irq_save(flags);
-+ local_lock_irqsave(pendingb_lock, flags);
-
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
- __queue_delayed_work(cpu, wq, dwork, delay);
- ret = true;
- }
-
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pendingb_lock, flags);
- return ret;
- }
- EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1521,7 +1550,7 @@
-
- if (likely(ret >= 0)) {
- __queue_delayed_work(cpu, wq, dwork, delay);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pendingb_lock, flags);
- }
-
- /* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -1554,7 +1583,9 @@
- worker->last_active = jiffies;
-
- /* idle_list is LIFO */
-+ rt_lock_idle_list(pool);
- list_add(&worker->entry, &pool->idle_list);
-+ rt_unlock_idle_list(pool);
-
- if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
- mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1587,7 +1618,9 @@
- return;
- worker_clr_flags(worker, WORKER_IDLE);
- pool->nr_idle--;
-+ rt_lock_idle_list(pool);
- list_del_init(&worker->entry);
-+ rt_unlock_idle_list(pool);
- }
-
- static struct worker *alloc_worker(int node)
-@@ -1755,7 +1788,9 @@
- pool->nr_workers--;
- pool->nr_idle--;
-
-+ rt_lock_idle_list(pool);
- list_del_init(&worker->entry);
-+ rt_unlock_idle_list(pool);
- worker->flags |= WORKER_DIE;
- wake_up_process(worker->task);
- }
-@@ -2672,14 +2707,14 @@
-
- might_sleep();
-
-- local_irq_disable();
-+ rcu_read_lock();
- pool = get_work_pool(work);
- if (!pool) {
-- local_irq_enable();
-+ rcu_read_unlock();
- return false;
- }
-
-- spin_lock(&pool->lock);
-+ spin_lock_irq(&pool->lock);
- /* see the comment in try_to_grab_pending() with the same code */
- pwq = get_work_pwq(work);
- if (pwq) {
-@@ -2706,10 +2741,11 @@
- else
- lock_map_acquire_read(&pwq->wq->lockdep_map);
- lock_map_release(&pwq->wq->lockdep_map);
--
-+ rcu_read_unlock();
- return true;
- already_gone:
- spin_unlock_irq(&pool->lock);
-+ rcu_read_unlock();
- return false;
- }
-
-@@ -2796,7 +2832,7 @@
-
- /* tell other tasks trying to grab @work to back off */
- mark_work_canceling(work);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pendingb_lock, flags);
-
- flush_work(work);
- clear_work_data(work);
-@@ -2851,10 +2887,10 @@
- */
- bool flush_delayed_work(struct delayed_work *dwork)
- {
-- local_irq_disable();
-+ local_lock_irq(pendingb_lock);
- if (del_timer_sync(&dwork->timer))
- __queue_work(dwork->cpu, dwork->wq, &dwork->work);
-- local_irq_enable();
-+ local_unlock_irq(pendingb_lock);
- return flush_work(&dwork->work);
- }
- EXPORT_SYMBOL(flush_delayed_work);
-@@ -2889,7 +2925,7 @@
-
- set_work_pool_and_clear_pending(&dwork->work,
- get_work_pool_id(&dwork->work));
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pendingb_lock, flags);
- return ret;
- }
- EXPORT_SYMBOL(cancel_delayed_work);
-@@ -3147,7 +3183,7 @@
- * put_unbound_pool - put a worker_pool
- * @pool: worker_pool to put
- *
-- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
-+ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
- * safe manner. get_unbound_pool() calls this function on its failure path
- * and this function should be able to release pools which went through,
- * successfully or not, init_worker_pool().
-@@ -3201,8 +3237,8 @@
- del_timer_sync(&pool->idle_timer);
- del_timer_sync(&pool->mayday_timer);
-
-- /* sched-RCU protected to allow dereferences from get_work_pool() */
-- call_rcu_sched(&pool->rcu, rcu_free_pool);
-+ /* RCU protected to allow dereferences from get_work_pool() */
-+ call_rcu(&pool->rcu, rcu_free_pool);
- }
-
- /**
-@@ -3307,14 +3343,14 @@
- put_unbound_pool(pool);
- mutex_unlock(&wq_pool_mutex);
-
-- call_rcu_sched(&pwq->rcu, rcu_free_pwq);
-+ call_rcu(&pwq->rcu, rcu_free_pwq);
-
- /*
- * If we're the last pwq going away, @wq is already dead and no one
- * is gonna access it anymore. Schedule RCU free.
- */
- if (is_last)
-- call_rcu_sched(&wq->rcu, rcu_free_wq);
-+ call_rcu(&wq->rcu, rcu_free_wq);
- }
-
- /**
-@@ -3920,7 +3956,7 @@
- * The base ref is never dropped on per-cpu pwqs. Directly
- * schedule RCU free.
- */
-- call_rcu_sched(&wq->rcu, rcu_free_wq);
-+ call_rcu(&wq->rcu, rcu_free_wq);
- } else {
- /*
- * We're the sole accessor of @wq at this point. Directly
-@@ -4013,7 +4049,8 @@
- struct pool_workqueue *pwq;
- bool ret;
-
-- rcu_read_lock_sched();
-+ rcu_read_lock();
-+ preempt_disable();
-
- if (cpu == WORK_CPU_UNBOUND)
- cpu = smp_processor_id();
-@@ -4024,7 +4061,8 @@
- pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
-
- ret = !list_empty(&pwq->delayed_works);
-- rcu_read_unlock_sched();
-+ preempt_enable();
-+ rcu_read_unlock();
-
- return ret;
- }
-@@ -4050,15 +4088,15 @@
- if (work_pending(work))
- ret |= WORK_BUSY_PENDING;
-
-- local_irq_save(flags);
-+ rcu_read_lock();
- pool = get_work_pool(work);
- if (pool) {
-- spin_lock(&pool->lock);
-+ spin_lock_irqsave(&pool->lock, flags);
- if (find_worker_executing_work(pool, work))
- ret |= WORK_BUSY_RUNNING;
-- spin_unlock(&pool->lock);
-+ spin_unlock_irqrestore(&pool->lock, flags);
- }
-- local_irq_restore(flags);
-+ rcu_read_unlock();
-
- return ret;
- }
-@@ -4247,7 +4285,7 @@
- unsigned long flags;
- int pi;
-
-- rcu_read_lock_sched();
-+ rcu_read_lock();
-
- pr_info("Showing busy workqueues and worker pools:\n");
-
-@@ -4298,7 +4336,7 @@
- spin_unlock_irqrestore(&pool->lock, flags);
- }
-
-- rcu_read_unlock_sched();
-+ rcu_read_unlock();
- }
-
- /*
-@@ -4648,16 +4686,16 @@
- * nr_active is monotonically decreasing. It's safe
- * to peek without lock.
- */
-- rcu_read_lock_sched();
-+ rcu_read_lock();
- for_each_pwq(pwq, wq) {
- WARN_ON_ONCE(pwq->nr_active < 0);
- if (pwq->nr_active) {
- busy = true;
-- rcu_read_unlock_sched();
-+ rcu_read_unlock();
- goto out_unlock;
- }
- }
-- rcu_read_unlock_sched();
-+ rcu_read_unlock();
- }
- out_unlock:
- mutex_unlock(&wq_pool_mutex);
-@@ -4771,7 +4809,8 @@
- const char *delim = "";
- int node, written = 0;
-
-- rcu_read_lock_sched();
-+ get_online_cpus();
-+ rcu_read_lock();
- for_each_node(node) {
- written += scnprintf(buf + written, PAGE_SIZE - written,
- "%s%d:%d", delim, node,
-@@ -4779,7 +4818,8 @@
- delim = " ";
- }
- written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
-- rcu_read_unlock_sched();
-+ rcu_read_unlock();
-+ put_online_cpus();
-
- return written;
- }
-diff -Nur linux-4.1.13.orig/kernel/workqueue_internal.h linux-4.1.13/kernel/workqueue_internal.h
---- linux-4.1.13.orig/kernel/workqueue_internal.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/kernel/workqueue_internal.h 2015-11-29 09:23:09.657610995 +0100
-@@ -43,6 +43,7 @@
- unsigned long last_active; /* L: last active timestamp */
- unsigned int flags; /* X: flags */
- int id; /* I: worker id */
-+ int sleeping; /* None */
-
- /*
- * Opaque string set with work_set_desc(). Printed out with task
-@@ -68,7 +69,7 @@
- * Scheduler hooks for concurrency managed workqueue. Only to be used from
- * sched/core.c and workqueue.c.
- */
--void wq_worker_waking_up(struct task_struct *task, int cpu);
--struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu);
-+void wq_worker_running(struct task_struct *task);
-+void wq_worker_sleeping(struct task_struct *task);
-
- #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
-diff -Nur linux-4.1.13.orig/lib/debugobjects.c linux-4.1.13/lib/debugobjects.c
---- linux-4.1.13.orig/lib/debugobjects.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/lib/debugobjects.c 2015-11-29 09:23:09.657610995 +0100
-@@ -309,7 +309,10 @@
- struct debug_obj *obj;
- unsigned long flags;
-
-- fill_pool();
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (preempt_count() == 0 && !irqs_disabled())
-+#endif
-+ fill_pool();
-
- db = get_bucket((unsigned long) addr);
-
-diff -Nur linux-4.1.13.orig/lib/dump_stack.c linux-4.1.13/lib/dump_stack.c
---- linux-4.1.13.orig/lib/dump_stack.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/lib/dump_stack.c 2015-11-29 09:23:09.657610995 +0100
-@@ -33,7 +33,7 @@
- * Permit this cpu to perform nested stack dumps while serialising
- * against other CPUs
- */
-- preempt_disable();
-+ migrate_disable();
-
- retry:
- cpu = smp_processor_id();
-@@ -52,7 +52,7 @@
- if (!was_locked)
- atomic_set(&dump_lock, -1);
-
-- preempt_enable();
-+ migrate_enable();
- }
- #else
- asmlinkage __visible void dump_stack(void)
-diff -Nur linux-4.1.13.orig/lib/idr.c linux-4.1.13/lib/idr.c
---- linux-4.1.13.orig/lib/idr.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/lib/idr.c 2015-11-29 09:23:09.657610995 +0100
-@@ -30,6 +30,7 @@
- #include <linux/idr.h>
- #include <linux/spinlock.h>
- #include <linux/percpu.h>
-+#include <linux/locallock.h>
-
- #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
- #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
-@@ -366,6 +367,35 @@
- idr_mark_full(pa, id);
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
-+
-+static inline void idr_preload_lock(void)
-+{
-+ local_lock(idr_lock);
-+}
-+
-+static inline void idr_preload_unlock(void)
-+{
-+ local_unlock(idr_lock);
-+}
-+
-+void idr_preload_end(void)
-+{
-+ idr_preload_unlock();
-+}
-+EXPORT_SYMBOL(idr_preload_end);
-+#else
-+static inline void idr_preload_lock(void)
-+{
-+ preempt_disable();
-+}
-+
-+static inline void idr_preload_unlock(void)
-+{
-+ preempt_enable();
-+}
-+#endif
-
- /**
- * idr_preload - preload for idr_alloc()
-@@ -401,7 +431,7 @@
- WARN_ON_ONCE(in_interrupt());
- might_sleep_if(gfp_mask & __GFP_WAIT);
-
-- preempt_disable();
-+ idr_preload_lock();
-
- /*
- * idr_alloc() is likely to succeed w/o full idr_layer buffer and
-@@ -413,9 +443,9 @@
- while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
- struct idr_layer *new;
-
-- preempt_enable();
-+ idr_preload_unlock();
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
-- preempt_disable();
-+ idr_preload_lock();
- if (!new)
- break;
-
-diff -Nur linux-4.1.13.orig/lib/Kconfig linux-4.1.13/lib/Kconfig
---- linux-4.1.13.orig/lib/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/lib/Kconfig 2015-11-29 09:23:09.657610995 +0100
-@@ -391,6 +391,7 @@
-
- config CPUMASK_OFFSTACK
- bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
-+ depends on !PREEMPT_RT_FULL
- help
- Use dynamic allocation for cpumask_var_t, instead of putting
- them on the stack. This is a bit more expensive, but avoids
-diff -Nur linux-4.1.13.orig/lib/locking-selftest.c linux-4.1.13/lib/locking-selftest.c
---- linux-4.1.13.orig/lib/locking-selftest.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/lib/locking-selftest.c 2015-11-29 09:23:09.657610995 +0100
-@@ -590,6 +590,8 @@
- #include "locking-selftest-spin-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+
- #include "locking-selftest-rlock-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
-
-@@ -605,9 +607,12 @@
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
-
-+#endif
-+
- #undef E1
- #undef E2
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * Enabling hardirqs with a softirq-safe lock held:
- */
-@@ -640,6 +645,8 @@
- #undef E1
- #undef E2
-
-+#endif
-+
- /*
- * Enabling irqs with an irq-safe lock held:
- */
-@@ -663,6 +670,8 @@
- #include "locking-selftest-spin-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+
- #include "locking-selftest-rlock-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
-
-@@ -678,6 +687,8 @@
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
-
-+#endif
-+
- #undef E1
- #undef E2
-
-@@ -709,6 +720,8 @@
- #include "locking-selftest-spin-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+
- #include "locking-selftest-rlock-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
-
-@@ -724,6 +737,8 @@
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
-
-+#endif
-+
- #undef E1
- #undef E2
- #undef E3
-@@ -757,6 +772,8 @@
- #include "locking-selftest-spin-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+
- #include "locking-selftest-rlock-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
-
-@@ -772,10 +789,14 @@
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
-
-+#endif
-+
- #undef E1
- #undef E2
- #undef E3
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+
- /*
- * read-lock / write-lock irq inversion.
- *
-@@ -838,6 +859,10 @@
- #undef E2
- #undef E3
-
-+#endif
-+
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+
- /*
- * read-lock / write-lock recursion that is actually safe.
- */
-@@ -876,6 +901,8 @@
- #undef E2
- #undef E3
-
-+#endif
-+
- /*
- * read-lock / write-lock recursion that is unsafe.
- */
-@@ -1858,6 +1885,7 @@
-
- printk(" --------------------------------------------------------------------------\n");
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * irq-context testcases:
- */
-@@ -1870,6 +1898,28 @@
-
- DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
- // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
-+#else
-+ /* On -rt, we only do hardirq context test for raw spinlock */
-+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
-+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
-+
-+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
-+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
-+
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
-+
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
-+#endif
-
- ww_tests();
-
-diff -Nur linux-4.1.13.orig/lib/percpu_ida.c linux-4.1.13/lib/percpu_ida.c
---- linux-4.1.13.orig/lib/percpu_ida.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/lib/percpu_ida.c 2015-11-29 09:23:09.657610995 +0100
-@@ -26,6 +26,9 @@
- #include <linux/string.h>
- #include <linux/spinlock.h>
- #include <linux/percpu_ida.h>
-+#include <linux/locallock.h>
-+
-+static DEFINE_LOCAL_IRQ_LOCK(irq_off_lock);
-
- struct percpu_ida_cpu {
- /*
-@@ -148,13 +151,13 @@
- unsigned long flags;
- int tag;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(irq_off_lock, flags);
- tags = this_cpu_ptr(pool->tag_cpu);
-
- /* Fastpath */
- tag = alloc_local_tag(tags);
- if (likely(tag >= 0)) {
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(irq_off_lock, flags);
- return tag;
- }
-
-@@ -173,6 +176,7 @@
-
- if (!tags->nr_free)
- alloc_global_tags(pool, tags);
-+
- if (!tags->nr_free)
- steal_tags(pool, tags);
-
-@@ -184,7 +188,7 @@
- }
-
- spin_unlock(&pool->lock);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(irq_off_lock, flags);
-
- if (tag >= 0 || state == TASK_RUNNING)
- break;
-@@ -196,7 +200,7 @@
-
- schedule();
-
-- local_irq_save(flags);
-+ local_lock_irqsave(irq_off_lock, flags);
- tags = this_cpu_ptr(pool->tag_cpu);
- }
- if (state != TASK_RUNNING)
-@@ -221,7 +225,7 @@
-
- BUG_ON(tag >= pool->nr_tags);
-
-- local_irq_save(flags);
-+ local_lock_irqsave(irq_off_lock, flags);
- tags = this_cpu_ptr(pool->tag_cpu);
-
- spin_lock(&tags->lock);
-@@ -253,7 +257,7 @@
- spin_unlock(&pool->lock);
- }
-
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(irq_off_lock, flags);
- }
- EXPORT_SYMBOL_GPL(percpu_ida_free);
-
-@@ -345,7 +349,7 @@
- struct percpu_ida_cpu *remote;
- unsigned cpu, i, err = 0;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(irq_off_lock, flags);
- for_each_possible_cpu(cpu) {
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- spin_lock(&remote->lock);
-@@ -367,7 +371,7 @@
- }
- spin_unlock(&pool->lock);
- out:
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(irq_off_lock, flags);
- return err;
- }
- EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
-diff -Nur linux-4.1.13.orig/lib/radix-tree.c linux-4.1.13/lib/radix-tree.c
---- linux-4.1.13.orig/lib/radix-tree.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/lib/radix-tree.c 2015-11-29 09:23:09.657610995 +0100
-@@ -195,12 +195,13 @@
- * succeed in getting a node here (and never reach
- * kmem_cache_alloc)
- */
-- rtp = this_cpu_ptr(&radix_tree_preloads);
-+ rtp = &get_cpu_var(radix_tree_preloads);
- if (rtp->nr) {
- ret = rtp->nodes[rtp->nr - 1];
- rtp->nodes[rtp->nr - 1] = NULL;
- rtp->nr--;
- }
-+ put_cpu_var(radix_tree_preloads);
- /*
- * Update the allocation stack trace as this is more useful
- * for debugging.
-@@ -240,6 +241,7 @@
- call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * Load up this CPU's radix_tree_node buffer with sufficient objects to
- * ensure that the addition of a single element in the tree cannot fail. On
-@@ -305,6 +307,7 @@
- return 0;
- }
- EXPORT_SYMBOL(radix_tree_maybe_preload);
-+#endif
-
- /*
- * Return the maximum key which can be store into a
-diff -Nur linux-4.1.13.orig/lib/scatterlist.c linux-4.1.13/lib/scatterlist.c
---- linux-4.1.13.orig/lib/scatterlist.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/lib/scatterlist.c 2015-11-29 09:23:09.657610995 +0100
-@@ -592,7 +592,7 @@
- flush_kernel_dcache_page(miter->page);
-
- if (miter->__flags & SG_MITER_ATOMIC) {
-- WARN_ON_ONCE(preemptible());
-+ WARN_ON_ONCE(!pagefault_disabled());
- kunmap_atomic(miter->addr);
- } else
- kunmap(miter->page);
-@@ -637,7 +637,7 @@
- if (!sg_miter_skip(&miter, skip))
- return false;
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
-
- while (sg_miter_next(&miter) && offset < buflen) {
- unsigned int len;
-@@ -654,7 +654,7 @@
-
- sg_miter_stop(&miter);
-
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- return offset;
- }
-
-diff -Nur linux-4.1.13.orig/lib/smp_processor_id.c linux-4.1.13/lib/smp_processor_id.c
---- linux-4.1.13.orig/lib/smp_processor_id.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/lib/smp_processor_id.c 2015-11-29 09:23:09.657610995 +0100
-@@ -39,8 +39,9 @@
- if (!printk_ratelimit())
- goto out_enable;
-
-- printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
-- what1, what2, preempt_count() - 1, current->comm, current->pid);
-+ printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n",
-+ what1, what2, preempt_count() - 1, __migrate_disabled(current),
-+ current->comm, current->pid);
-
- print_symbol("caller is %s\n", (long)__builtin_return_address(0));
- dump_stack();
-diff -Nur linux-4.1.13.orig/lib/strnlen_user.c linux-4.1.13/lib/strnlen_user.c
---- linux-4.1.13.orig/lib/strnlen_user.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/lib/strnlen_user.c 2015-11-29 09:23:09.657610995 +0100
-@@ -85,7 +85,8 @@
- * @str: The string to measure.
- * @count: Maximum count (including NUL character)
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
-@@ -121,7 +122,8 @@
- * strlen_user: - Get the size of a user string INCLUDING final NUL.
- * @str: The string to measure.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
-diff -Nur linux-4.1.13.orig/mm/compaction.c linux-4.1.13/mm/compaction.c
---- linux-4.1.13.orig/mm/compaction.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/compaction.c 2015-11-29 09:23:09.661610728 +0100
-@@ -1406,10 +1406,12 @@
- cc->migrate_pfn & ~((1UL << cc->order) - 1);
-
- if (last_migrated_pfn < current_block_start) {
-- cpu = get_cpu();
-+ cpu = get_cpu_light();
-+ local_lock_irq(swapvec_lock);
- lru_add_drain_cpu(cpu);
-+ local_unlock_irq(swapvec_lock);
- drain_local_pages(zone);
-- put_cpu();
-+ put_cpu_light();
- /* No more flushing until we migrate again */
- last_migrated_pfn = 0;
- }
-diff -Nur linux-4.1.13.orig/mm/filemap.c linux-4.1.13/mm/filemap.c
---- linux-4.1.13.orig/mm/filemap.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/filemap.c 2015-11-29 09:23:09.661610728 +0100
-@@ -167,7 +167,9 @@
- if (!workingset_node_pages(node) &&
- list_empty(&node->private_list)) {
- node->private_data = mapping;
-- list_lru_add(&workingset_shadow_nodes, &node->private_list);
-+ local_lock(workingset_shadow_lock);
-+ list_lru_add(&__workingset_shadow_nodes, &node->private_list);
-+ local_unlock(workingset_shadow_lock);
- }
- }
-
-@@ -533,9 +535,12 @@
- * node->private_list is protected by
- * mapping->tree_lock.
- */
-- if (!list_empty(&node->private_list))
-- list_lru_del(&workingset_shadow_nodes,
-+ if (!list_empty(&node->private_list)) {
-+ local_lock(workingset_shadow_lock);
-+ list_lru_del(&__workingset_shadow_nodes,
- &node->private_list);
-+ local_unlock(workingset_shadow_lock);
-+ }
- }
- return 0;
- }
-diff -Nur linux-4.1.13.orig/mm/highmem.c linux-4.1.13/mm/highmem.c
---- linux-4.1.13.orig/mm/highmem.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/highmem.c 2015-11-29 09:23:09.661610728 +0100
-@@ -29,10 +29,11 @@
- #include <linux/kgdb.h>
- #include <asm/tlbflush.h>
-
--
-+#ifndef CONFIG_PREEMPT_RT_FULL
- #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
- DEFINE_PER_CPU(int, __kmap_atomic_idx);
- #endif
-+#endif
-
- /*
- * Virtual_count is not a pure "count".
-@@ -107,8 +108,9 @@
- unsigned long totalhigh_pages __read_mostly;
- EXPORT_SYMBOL(totalhigh_pages);
-
--
-+#ifndef CONFIG_PREEMPT_RT_FULL
- EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
-+#endif
-
- unsigned int nr_free_highpages (void)
- {
-diff -Nur linux-4.1.13.orig/mm/Kconfig linux-4.1.13/mm/Kconfig
---- linux-4.1.13.orig/mm/Kconfig 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/Kconfig 2015-11-29 09:23:09.661610728 +0100
-@@ -409,7 +409,7 @@
-
- config TRANSPARENT_HUGEPAGE
- bool "Transparent Hugepage Support"
-- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
-+ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
- select COMPACTION
- help
- Transparent Hugepages allows the kernel to use huge pages and
-diff -Nur linux-4.1.13.orig/mm/memcontrol.c linux-4.1.13/mm/memcontrol.c
---- linux-4.1.13.orig/mm/memcontrol.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/memcontrol.c 2015-11-29 09:23:09.661610728 +0100
-@@ -66,6 +66,8 @@
- #include <net/sock.h>
- #include <net/ip.h>
- #include <net/tcp_memcontrol.h>
-+#include <linux/locallock.h>
-+
- #include "slab.h"
-
- #include <asm/uaccess.h>
-@@ -85,6 +87,7 @@
- #define do_swap_account 0
- #endif
-
-+static DEFINE_LOCAL_IRQ_LOCK(event_lock);
- static const char * const mem_cgroup_stat_names[] = {
- "cache",
- "rss",
-@@ -2124,14 +2127,17 @@
- */
- static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
- {
-- struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
-+ struct memcg_stock_pcp *stock;
-+ int cpu = get_cpu_light();
-+
-+ stock = &per_cpu(memcg_stock, cpu);
-
- if (stock->cached != memcg) { /* reset if necessary */
- drain_stock(stock);
- stock->cached = memcg;
- }
- stock->nr_pages += nr_pages;
-- put_cpu_var(memcg_stock);
-+ put_cpu_light();
- }
-
- /*
-@@ -2147,7 +2153,7 @@
- return;
- /* Notify other cpus that system-wide "drain" is running */
- get_online_cpus();
-- curcpu = get_cpu();
-+ curcpu = get_cpu_light();
- for_each_online_cpu(cpu) {
- struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
- struct mem_cgroup *memcg;
-@@ -2164,7 +2170,7 @@
- schedule_work_on(cpu, &stock->work);
- }
- }
-- put_cpu();
-+ put_cpu_light();
- put_online_cpus();
- mutex_unlock(&percpu_charge_mutex);
- }
-@@ -4802,12 +4808,12 @@
-
- ret = 0;
-
-- local_irq_disable();
-+ local_lock_irq(event_lock);
- mem_cgroup_charge_statistics(to, page, nr_pages);
- memcg_check_events(to, page);
- mem_cgroup_charge_statistics(from, page, -nr_pages);
- memcg_check_events(from, page);
-- local_irq_enable();
-+ local_unlock_irq(event_lock);
- out_unlock:
- unlock_page(page);
- out:
-@@ -5544,10 +5550,10 @@
- VM_BUG_ON_PAGE(!PageTransHuge(page), page);
- }
-
-- local_irq_disable();
-+ local_lock_irq(event_lock);
- mem_cgroup_charge_statistics(memcg, page, nr_pages);
- memcg_check_events(memcg, page);
-- local_irq_enable();
-+ local_unlock_irq(event_lock);
-
- if (do_swap_account && PageSwapCache(page)) {
- swp_entry_t entry = { .val = page_private(page) };
-@@ -5603,14 +5609,14 @@
- memcg_oom_recover(memcg);
- }
-
-- local_irq_save(flags);
-+ local_lock_irqsave(event_lock, flags);
- __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
- __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
- __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
- __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
- __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
- memcg_check_events(memcg, dummy_page);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(event_lock, flags);
-
- if (!mem_cgroup_is_root(memcg))
- css_put_many(&memcg->css, nr_pages);
-@@ -5814,6 +5820,7 @@
- {
- struct mem_cgroup *memcg;
- unsigned short oldid;
-+ unsigned long flags;
-
- VM_BUG_ON_PAGE(PageLRU(page), page);
- VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5836,9 +5843,11 @@
- if (!mem_cgroup_is_root(memcg))
- page_counter_uncharge(&memcg->memory, 1);
-
-+ local_lock_irqsave(event_lock, flags);
- /* Caller disabled preemption with mapping->tree_lock */
- mem_cgroup_charge_statistics(memcg, page, -1);
- memcg_check_events(memcg, page);
-+ local_unlock_irqrestore(event_lock, flags);
- }
-
- /**
-diff -Nur linux-4.1.13.orig/mm/memory.c linux-4.1.13/mm/memory.c
---- linux-4.1.13.orig/mm/memory.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/memory.c 2015-11-29 09:23:09.661610728 +0100
-@@ -3743,7 +3743,7 @@
- }
-
- #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
--void might_fault(void)
-+void __might_fault(const char *file, int line)
- {
- /*
- * Some code (nfs/sunrpc) uses socket ops on kernel memory while
-@@ -3753,21 +3753,15 @@
- */
- if (segment_eq(get_fs(), KERNEL_DS))
- return;
--
-- /*
-- * it would be nicer only to annotate paths which are not under
-- * pagefault_disable, however that requires a larger audit and
-- * providing helpers like get_user_atomic.
-- */
-- if (in_atomic())
-+ if (pagefault_disabled())
- return;
--
-- __might_sleep(__FILE__, __LINE__, 0);
--
-+ __might_sleep(file, line, 0);
-+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
- if (current->mm)
- might_lock_read(&current->mm->mmap_sem);
-+#endif
- }
--EXPORT_SYMBOL(might_fault);
-+EXPORT_SYMBOL(__might_fault);
- #endif
-
- #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-diff -Nur linux-4.1.13.orig/mm/mmu_context.c linux-4.1.13/mm/mmu_context.c
---- linux-4.1.13.orig/mm/mmu_context.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/mmu_context.c 2015-11-29 09:23:09.661610728 +0100
-@@ -23,6 +23,7 @@
- struct task_struct *tsk = current;
-
- task_lock(tsk);
-+ preempt_disable_rt();
- active_mm = tsk->active_mm;
- if (active_mm != mm) {
- atomic_inc(&mm->mm_count);
-@@ -30,6 +31,7 @@
- }
- tsk->mm = mm;
- switch_mm(active_mm, mm, tsk);
-+ preempt_enable_rt();
- task_unlock(tsk);
- #ifdef finish_arch_post_lock_switch
- finish_arch_post_lock_switch();
-diff -Nur linux-4.1.13.orig/mm/page_alloc.c linux-4.1.13/mm/page_alloc.c
---- linux-4.1.13.orig/mm/page_alloc.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/page_alloc.c 2015-11-29 09:23:09.661610728 +0100
-@@ -60,6 +60,7 @@
- #include <linux/page_ext.h>
- #include <linux/hugetlb.h>
- #include <linux/sched/rt.h>
-+#include <linux/locallock.h>
- #include <linux/page_owner.h>
-
- #include <asm/sections.h>
-@@ -233,6 +234,18 @@
- EXPORT_SYMBOL(nr_online_nodes);
- #endif
-
-+static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
-+
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+# define cpu_lock_irqsave(cpu, flags) \
-+ local_lock_irqsave_on(pa_lock, flags, cpu)
-+# define cpu_unlock_irqrestore(cpu, flags) \
-+ local_unlock_irqrestore_on(pa_lock, flags, cpu)
-+#else
-+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
-+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
-+#endif
-+
- int page_group_by_mobility_disabled __read_mostly;
-
- void set_pageblock_migratetype(struct page *page, int migratetype)
-@@ -681,7 +694,7 @@
- }
-
- /*
-- * Frees a number of pages from the PCP lists
-+ * Frees a number of pages which have been collected from the pcp lists.
- * Assumes all pages on list are in same zone, and of same order.
- * count is the number of pages to free.
- *
-@@ -692,18 +705,51 @@
- * pinned" detection logic.
- */
- static void free_pcppages_bulk(struct zone *zone, int count,
-- struct per_cpu_pages *pcp)
-+ struct list_head *list)
- {
-- int migratetype = 0;
-- int batch_free = 0;
- int to_free = count;
- unsigned long nr_scanned;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&zone->lock, flags);
-
-- spin_lock(&zone->lock);
- nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
- if (nr_scanned)
- __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
-
-+ while (!list_empty(list)) {
-+ struct page *page = list_first_entry(list, struct page, lru);
-+ int mt; /* migratetype of the to-be-freed page */
-+
-+ /* must delete as __free_one_page list manipulates */
-+ list_del(&page->lru);
-+
-+ mt = get_freepage_migratetype(page);
-+ if (unlikely(has_isolate_pageblock(zone)))
-+ mt = get_pageblock_migratetype(page);
-+
-+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
-+ __free_one_page(page, page_to_pfn(page), zone, 0, mt);
-+ trace_mm_page_pcpu_drain(page, 0, mt);
-+ to_free--;
-+ }
-+ WARN_ON(to_free != 0);
-+ spin_unlock_irqrestore(&zone->lock, flags);
-+}
-+
-+/*
-+ * Moves a number of pages from the PCP lists to free list which
-+ * is freed outside of the locked region.
-+ *
-+ * Assumes all pages on list are in same zone, and of same order.
-+ * count is the number of pages to free.
-+ */
-+static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
-+ struct list_head *dst)
-+{
-+ int migratetype = 0;
-+ int batch_free = 0;
-+
- while (to_free) {
- struct page *page;
- struct list_head *list;
-@@ -719,7 +765,7 @@
- batch_free++;
- if (++migratetype == MIGRATE_PCPTYPES)
- migratetype = 0;
-- list = &pcp->lists[migratetype];
-+ list = &src->lists[migratetype];
- } while (list_empty(list));
-
- /* This is the only non-empty list. Free them all. */
-@@ -727,21 +773,11 @@
- batch_free = to_free;
-
- do {
-- int mt; /* migratetype of the to-be-freed page */
--
-- page = list_entry(list->prev, struct page, lru);
-- /* must delete as __free_one_page list manipulates */
-+ page = list_last_entry(list, struct page, lru);
- list_del(&page->lru);
-- mt = get_freepage_migratetype(page);
-- if (unlikely(has_isolate_pageblock(zone)))
-- mt = get_pageblock_migratetype(page);
--
-- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
-- __free_one_page(page, page_to_pfn(page), zone, 0, mt);
-- trace_mm_page_pcpu_drain(page, 0, mt);
-+ list_add(&page->lru, dst);
- } while (--to_free && --batch_free && !list_empty(list));
- }
-- spin_unlock(&zone->lock);
- }
-
- static void free_one_page(struct zone *zone,
-@@ -750,7 +786,9 @@
- int migratetype)
- {
- unsigned long nr_scanned;
-- spin_lock(&zone->lock);
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&zone->lock, flags);
- nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
- if (nr_scanned)
- __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
-@@ -760,7 +798,7 @@
- migratetype = get_pfnblock_migratetype(page, pfn);
- }
- __free_one_page(page, pfn, zone, order, migratetype);
-- spin_unlock(&zone->lock);
-+ spin_unlock_irqrestore(&zone->lock, flags);
- }
-
- static int free_tail_pages_check(struct page *head_page, struct page *page)
-@@ -825,11 +863,11 @@
- return;
-
- migratetype = get_pfnblock_migratetype(page, pfn);
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- __count_vm_events(PGFREE, 1 << order);
- set_freepage_migratetype(page, migratetype);
- free_one_page(page_zone(page), page, pfn, order, migratetype);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
- }
-
- void __init __free_pages_bootmem(struct page *page, unsigned int order)
-@@ -1371,16 +1409,18 @@
- void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
- {
- unsigned long flags;
-+ LIST_HEAD(dst);
- int to_drain, batch;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- batch = READ_ONCE(pcp->batch);
- to_drain = min(pcp->count, batch);
- if (to_drain > 0) {
-- free_pcppages_bulk(zone, to_drain, pcp);
-+ isolate_pcp_pages(to_drain, pcp, &dst);
- pcp->count -= to_drain;
- }
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
-+ free_pcppages_bulk(zone, to_drain, &dst);
- }
- #endif
-
-@@ -1396,16 +1436,21 @@
- unsigned long flags;
- struct per_cpu_pageset *pset;
- struct per_cpu_pages *pcp;
-+ LIST_HEAD(dst);
-+ int count;
-
-- local_irq_save(flags);
-+ cpu_lock_irqsave(cpu, flags);
- pset = per_cpu_ptr(zone->pageset, cpu);
-
- pcp = &pset->pcp;
-- if (pcp->count) {
-- free_pcppages_bulk(zone, pcp->count, pcp);
-+ count = pcp->count;
-+ if (count) {
-+ isolate_pcp_pages(count, pcp, &dst);
- pcp->count = 0;
- }
-- local_irq_restore(flags);
-+ cpu_unlock_irqrestore(cpu, flags);
-+ if (count)
-+ free_pcppages_bulk(zone, count, &dst);
- }
-
- /*
-@@ -1491,8 +1536,17 @@
- else
- cpumask_clear_cpu(cpu, &cpus_with_pcps);
- }
-+#ifndef CONFIG_PREEMPT_RT_BASE
- on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
- zone, 1);
-+#else
-+ for_each_cpu(cpu, &cpus_with_pcps) {
-+ if (zone)
-+ drain_pages_zone(cpu, zone);
-+ else
-+ drain_pages(cpu);
-+ }
-+#endif
- }
-
- #ifdef CONFIG_HIBERNATION
-@@ -1548,7 +1602,7 @@
-
- migratetype = get_pfnblock_migratetype(page, pfn);
- set_freepage_migratetype(page, migratetype);
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- __count_vm_event(PGFREE);
-
- /*
-@@ -1574,12 +1628,17 @@
- pcp->count++;
- if (pcp->count >= pcp->high) {
- unsigned long batch = READ_ONCE(pcp->batch);
-- free_pcppages_bulk(zone, batch, pcp);
-+ LIST_HEAD(dst);
-+
-+ isolate_pcp_pages(batch, pcp, &dst);
- pcp->count -= batch;
-+ local_unlock_irqrestore(pa_lock, flags);
-+ free_pcppages_bulk(zone, batch, &dst);
-+ return;
- }
-
- out:
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
- }
-
- /*
-@@ -1710,7 +1769,7 @@
- struct per_cpu_pages *pcp;
- struct list_head *list;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- if (list_empty(list)) {
-@@ -1742,13 +1801,15 @@
- */
- WARN_ON_ONCE(order > 1);
- }
-- spin_lock_irqsave(&zone->lock, flags);
-+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
- page = __rmqueue(zone, order, migratetype);
-- spin_unlock(&zone->lock);
-- if (!page)
-+ if (!page) {
-+ spin_unlock(&zone->lock);
- goto failed;
-+ }
- __mod_zone_freepage_state(zone, -(1 << order),
- get_freepage_migratetype(page));
-+ spin_unlock(&zone->lock);
- }
-
- __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
-@@ -1758,13 +1819,13 @@
-
- __count_zone_vm_events(PGALLOC, zone, 1 << order);
- zone_statistics(preferred_zone, zone, gfp_flags);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
-
- VM_BUG_ON_PAGE(bad_range(zone, page), page);
- return page;
-
- failed:
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
- return NULL;
- }
-
-@@ -5653,6 +5714,7 @@
- void __init page_alloc_init(void)
- {
- hotcpu_notifier(page_alloc_cpu_notify, 0);
-+ local_irq_lock_init(pa_lock);
- }
-
- /*
-@@ -6547,7 +6609,7 @@
- struct per_cpu_pageset *pset;
-
- /* avoid races with drain_pages() */
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- if (zone->pageset != &boot_pageset) {
- for_each_online_cpu(cpu) {
- pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -6556,7 +6618,7 @@
- free_percpu(zone->pageset);
- zone->pageset = &boot_pageset;
- }
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
- }
-
- #ifdef CONFIG_MEMORY_HOTREMOVE
-diff -Nur linux-4.1.13.orig/mm/slab.h linux-4.1.13/mm/slab.h
---- linux-4.1.13.orig/mm/slab.h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/slab.h 2015-11-29 09:23:09.661610728 +0100
-@@ -330,7 +330,11 @@
- * The slab lists for all objects.
- */
- struct kmem_cache_node {
-+#ifdef CONFIG_SLUB
-+ raw_spinlock_t list_lock;
-+#else
- spinlock_t list_lock;
-+#endif
-
- #ifdef CONFIG_SLAB
- struct list_head slabs_partial; /* partial list first, better asm code */
-diff -Nur linux-4.1.13.orig/mm/slub.c linux-4.1.13/mm/slub.c
---- linux-4.1.13.orig/mm/slub.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/slub.c 2015-11-29 09:23:09.665610461 +0100
-@@ -1069,7 +1069,7 @@
- {
- struct kmem_cache_node *n = get_node(s, page_to_nid(page));
-
-- spin_lock_irqsave(&n->list_lock, *flags);
-+ raw_spin_lock_irqsave(&n->list_lock, *flags);
- slab_lock(page);
-
- if (!check_slab(s, page))
-@@ -1116,7 +1116,7 @@
-
- fail:
- slab_unlock(page);
-- spin_unlock_irqrestore(&n->list_lock, *flags);
-+ raw_spin_unlock_irqrestore(&n->list_lock, *flags);
- slab_fix(s, "Object at 0x%p not freed", object);
- return NULL;
- }
-@@ -1242,6 +1242,12 @@
-
- #endif /* CONFIG_SLUB_DEBUG */
-
-+struct slub_free_list {
-+ raw_spinlock_t lock;
-+ struct list_head list;
-+};
-+static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
-+
- /*
- * Hooks for other subsystems that check memory allocations. In a typical
- * production configuration these hooks all should produce no code at all.
-@@ -1306,6 +1312,17 @@
- kasan_slab_free(s, x);
- }
-
-+static void setup_object(struct kmem_cache *s, struct page *page,
-+ void *object)
-+{
-+ setup_object_debug(s, page, object);
-+ if (unlikely(s->ctor)) {
-+ kasan_unpoison_object_data(s, object);
-+ s->ctor(object);
-+ kasan_poison_object_data(s, object);
-+ }
-+}
-+
- /*
- * Slab allocation and freeing
- */
-@@ -1336,10 +1353,17 @@
- struct page *page;
- struct kmem_cache_order_objects oo = s->oo;
- gfp_t alloc_gfp;
-+ void *start, *p;
-+ int idx, order;
-+ bool enableirqs;
-
- flags &= gfp_allowed_mask;
-
-- if (flags & __GFP_WAIT)
-+ enableirqs = (flags & __GFP_WAIT) != 0;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ enableirqs |= system_state == SYSTEM_RUNNING;
-+#endif
-+ if (enableirqs)
- local_irq_enable();
-
- flags |= s->allocflags;
-@@ -1359,13 +1383,13 @@
- * Try a lower order alloc if possible
- */
- page = alloc_slab_page(s, alloc_gfp, node, oo);
--
-- if (page)
-- stat(s, ORDER_FALLBACK);
-+ if (unlikely(!page))
-+ goto out;
-+ stat(s, ORDER_FALLBACK);
- }
-
-- if (kmemcheck_enabled && page
-- && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
-+ if (kmemcheck_enabled &&
-+ !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
- int pages = 1 << oo_order(oo);
-
- kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
-@@ -1380,51 +1404,9 @@
- kmemcheck_mark_unallocated_pages(page, pages);
- }
-
-- if (flags & __GFP_WAIT)
-- local_irq_disable();
-- if (!page)
-- return NULL;
--
- page->objects = oo_objects(oo);
-- mod_zone_page_state(page_zone(page),
-- (s->flags & SLAB_RECLAIM_ACCOUNT) ?
-- NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-- 1 << oo_order(oo));
--
-- return page;
--}
--
--static void setup_object(struct kmem_cache *s, struct page *page,
-- void *object)
--{
-- setup_object_debug(s, page, object);
-- if (unlikely(s->ctor)) {
-- kasan_unpoison_object_data(s, object);
-- s->ctor(object);
-- kasan_poison_object_data(s, object);
-- }
--}
--
--static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
--{
-- struct page *page;
-- void *start;
-- void *p;
-- int order;
-- int idx;
--
-- if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
-- pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
-- BUG();
-- }
--
-- page = allocate_slab(s,
-- flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
-- if (!page)
-- goto out;
-
- order = compound_order(page);
-- inc_slabs_node(s, page_to_nid(page), page->objects);
- page->slab_cache = s;
- __SetPageSlab(page);
- if (page_is_pfmemalloc(page))
-@@ -1448,10 +1430,34 @@
- page->freelist = start;
- page->inuse = page->objects;
- page->frozen = 1;
-+
- out:
-+ if (enableirqs)
-+ local_irq_disable();
-+ if (!page)
-+ return NULL;
-+
-+ mod_zone_page_state(page_zone(page),
-+ (s->flags & SLAB_RECLAIM_ACCOUNT) ?
-+ NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-+ 1 << oo_order(oo));
-+
-+ inc_slabs_node(s, page_to_nid(page), page->objects);
-+
- return page;
- }
-
-+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
-+{
-+ if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
-+ pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
-+ BUG();
-+ }
-+
-+ return allocate_slab(s,
-+ flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
-+}
-+
- static void __free_slab(struct kmem_cache *s, struct page *page)
- {
- int order = compound_order(page);
-@@ -1483,6 +1489,16 @@
- memcg_uncharge_slab(s, order);
- }
-
-+static void free_delayed(struct list_head *h)
-+{
-+ while(!list_empty(h)) {
-+ struct page *page = list_first_entry(h, struct page, lru);
-+
-+ list_del(&page->lru);
-+ __free_slab(page->slab_cache, page);
-+ }
-+}
-+
- #define need_reserve_slab_rcu \
- (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-
-@@ -1517,6 +1533,12 @@
- }
-
- call_rcu(head, rcu_free_slab);
-+ } else if (irqs_disabled()) {
-+ struct slub_free_list *f = this_cpu_ptr(&slub_free_list);
-+
-+ raw_spin_lock(&f->lock);
-+ list_add(&page->lru, &f->list);
-+ raw_spin_unlock(&f->lock);
- } else
- __free_slab(s, page);
- }
-@@ -1630,7 +1652,7 @@
- if (!n || !n->nr_partial)
- return NULL;
-
-- spin_lock(&n->list_lock);
-+ raw_spin_lock(&n->list_lock);
- list_for_each_entry_safe(page, page2, &n->partial, lru) {
- void *t;
-
-@@ -1655,7 +1677,7 @@
- break;
-
- }
-- spin_unlock(&n->list_lock);
-+ raw_spin_unlock(&n->list_lock);
- return object;
- }
-
-@@ -1901,7 +1923,7 @@
- * that acquire_slab() will see a slab page that
- * is frozen
- */
-- spin_lock(&n->list_lock);
-+ raw_spin_lock(&n->list_lock);
- }
- } else {
- m = M_FULL;
-@@ -1912,7 +1934,7 @@
- * slabs from diagnostic functions will not see
- * any frozen slabs.
- */
-- spin_lock(&n->list_lock);
-+ raw_spin_lock(&n->list_lock);
- }
- }
-
-@@ -1947,7 +1969,7 @@
- goto redo;
-
- if (lock)
-- spin_unlock(&n->list_lock);
-+ raw_spin_unlock(&n->list_lock);
-
- if (m == M_FREE) {
- stat(s, DEACTIVATE_EMPTY);
-@@ -1979,10 +2001,10 @@
- n2 = get_node(s, page_to_nid(page));
- if (n != n2) {
- if (n)
-- spin_unlock(&n->list_lock);
-+ raw_spin_unlock(&n->list_lock);
-
- n = n2;
-- spin_lock(&n->list_lock);
-+ raw_spin_lock(&n->list_lock);
- }
-
- do {
-@@ -2011,7 +2033,7 @@
- }
-
- if (n)
-- spin_unlock(&n->list_lock);
-+ raw_spin_unlock(&n->list_lock);
-
- while (discard_page) {
- page = discard_page;
-@@ -2050,14 +2072,21 @@
- pobjects = oldpage->pobjects;
- pages = oldpage->pages;
- if (drain && pobjects > s->cpu_partial) {
-+ struct slub_free_list *f;
- unsigned long flags;
-+ LIST_HEAD(tofree);
- /*
- * partial array is full. Move the existing
- * set to the per node partial list.
- */
- local_irq_save(flags);
- unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
-+ f = this_cpu_ptr(&slub_free_list);
-+ raw_spin_lock(&f->lock);
-+ list_splice_init(&f->list, &tofree);
-+ raw_spin_unlock(&f->lock);
- local_irq_restore(flags);
-+ free_delayed(&tofree);
- oldpage = NULL;
- pobjects = 0;
- pages = 0;
-@@ -2129,7 +2158,22 @@
-
- static void flush_all(struct kmem_cache *s)
- {
-+ LIST_HEAD(tofree);
-+ int cpu;
-+
- on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
-+ for_each_online_cpu(cpu) {
-+ struct slub_free_list *f;
-+
-+ if (!has_cpu_slab(cpu, s))
-+ continue;
-+
-+ f = &per_cpu(slub_free_list, cpu);
-+ raw_spin_lock_irq(&f->lock);
-+ list_splice_init(&f->list, &tofree);
-+ raw_spin_unlock_irq(&f->lock);
-+ free_delayed(&tofree);
-+ }
- }
-
- /*
-@@ -2165,10 +2209,10 @@
- unsigned long x = 0;
- struct page *page;
-
-- spin_lock_irqsave(&n->list_lock, flags);
-+ raw_spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
- x += get_count(page);
-- spin_unlock_irqrestore(&n->list_lock, flags);
-+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
- return x;
- }
- #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2305,9 +2349,11 @@
- static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
- unsigned long addr, struct kmem_cache_cpu *c)
- {
-+ struct slub_free_list *f;
- void *freelist;
- struct page *page;
- unsigned long flags;
-+ LIST_HEAD(tofree);
-
- local_irq_save(flags);
- #ifdef CONFIG_PREEMPT
-@@ -2375,7 +2421,13 @@
- VM_BUG_ON(!c->page->frozen);
- c->freelist = get_freepointer(s, freelist);
- c->tid = next_tid(c->tid);
-+out:
-+ f = this_cpu_ptr(&slub_free_list);
-+ raw_spin_lock(&f->lock);
-+ list_splice_init(&f->list, &tofree);
-+ raw_spin_unlock(&f->lock);
- local_irq_restore(flags);
-+ free_delayed(&tofree);
- return freelist;
-
- new_slab:
-@@ -2392,8 +2444,7 @@
-
- if (unlikely(!freelist)) {
- slab_out_of_memory(s, gfpflags, node);
-- local_irq_restore(flags);
-- return NULL;
-+ goto out;
- }
-
- page = c->page;
-@@ -2408,8 +2459,7 @@
- deactivate_slab(s, page, get_freepointer(s, freelist));
- c->page = NULL;
- c->freelist = NULL;
-- local_irq_restore(flags);
-- return freelist;
-+ goto out;
- }
-
- /*
-@@ -2593,7 +2643,7 @@
-
- do {
- if (unlikely(n)) {
-- spin_unlock_irqrestore(&n->list_lock, flags);
-+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
- n = NULL;
- }
- prior = page->freelist;
-@@ -2625,7 +2675,7 @@
- * Otherwise the list_lock will synchronize with
- * other processors updating the list of slabs.
- */
-- spin_lock_irqsave(&n->list_lock, flags);
-+ raw_spin_lock_irqsave(&n->list_lock, flags);
-
- }
- }
-@@ -2667,7 +2717,7 @@
- add_partial(n, page, DEACTIVATE_TO_TAIL);
- stat(s, FREE_ADD_PARTIAL);
- }
-- spin_unlock_irqrestore(&n->list_lock, flags);
-+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
- return;
-
- slab_empty:
-@@ -2682,7 +2732,7 @@
- remove_full(s, n, page);
- }
-
-- spin_unlock_irqrestore(&n->list_lock, flags);
-+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
- stat(s, FREE_SLAB);
- discard_slab(s, page);
- }
-@@ -2881,7 +2931,7 @@
- init_kmem_cache_node(struct kmem_cache_node *n)
- {
- n->nr_partial = 0;
-- spin_lock_init(&n->list_lock);
-+ raw_spin_lock_init(&n->list_lock);
- INIT_LIST_HEAD(&n->partial);
- #ifdef CONFIG_SLUB_DEBUG
- atomic_long_set(&n->nr_slabs, 0);
-@@ -3463,7 +3513,7 @@
- for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
- INIT_LIST_HEAD(promote + i);
-
-- spin_lock_irqsave(&n->list_lock, flags);
-+ raw_spin_lock_irqsave(&n->list_lock, flags);
-
- /*
- * Build lists of slabs to discard or promote.
-@@ -3494,7 +3544,7 @@
- for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
- list_splice(promote + i, &n->partial);
-
-- spin_unlock_irqrestore(&n->list_lock, flags);
-+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
-
- /* Release empty slabs */
- list_for_each_entry_safe(page, t, &discard, lru)
-@@ -3670,6 +3720,12 @@
- {
- static __initdata struct kmem_cache boot_kmem_cache,
- boot_kmem_cache_node;
-+ int cpu;
-+
-+ for_each_possible_cpu(cpu) {
-+ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
-+ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
-+ }
-
- if (debug_guardpage_minorder())
- slub_max_order = 0;
-@@ -3912,7 +3968,7 @@
- struct page *page;
- unsigned long flags;
-
-- spin_lock_irqsave(&n->list_lock, flags);
-+ raw_spin_lock_irqsave(&n->list_lock, flags);
-
- list_for_each_entry(page, &n->partial, lru) {
- validate_slab_slab(s, page, map);
-@@ -3934,7 +3990,7 @@
- s->name, count, atomic_long_read(&n->nr_slabs));
-
- out:
-- spin_unlock_irqrestore(&n->list_lock, flags);
-+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
- return count;
- }
-
-@@ -4122,12 +4178,12 @@
- if (!atomic_long_read(&n->nr_slabs))
- continue;
-
-- spin_lock_irqsave(&n->list_lock, flags);
-+ raw_spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
- process_slab(&t, s, page, alloc, map);
- list_for_each_entry(page, &n->full, lru)
- process_slab(&t, s, page, alloc, map);
-- spin_unlock_irqrestore(&n->list_lock, flags);
-+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
- }
-
- for (i = 0; i < t.count; i++) {
-diff -Nur linux-4.1.13.orig/mm/swap.c linux-4.1.13/mm/swap.c
---- linux-4.1.13.orig/mm/swap.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/swap.c 2015-11-29 09:23:09.665610461 +0100
-@@ -32,6 +32,7 @@
- #include <linux/gfp.h>
- #include <linux/uio.h>
- #include <linux/hugetlb.h>
-+#include <linux/locallock.h>
-
- #include "internal.h"
-
-@@ -45,6 +46,9 @@
- static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
- static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
-
-+static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
-+DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
-+
- /*
- * This path almost never happens for VM activity - pages are normally
- * freed via pagevecs. But it gets used by networking.
-@@ -481,11 +485,11 @@
- unsigned long flags;
-
- page_cache_get(page);
-- local_irq_save(flags);
-+ local_lock_irqsave(rotate_lock, flags);
- pvec = this_cpu_ptr(&lru_rotate_pvecs);
- if (!pagevec_add(pvec, page))
- pagevec_move_tail(pvec);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(rotate_lock, flags);
- }
- }
-
-@@ -536,12 +540,13 @@
- void activate_page(struct page *page)
- {
- if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
-- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
-+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
-+ activate_page_pvecs);
-
- page_cache_get(page);
- if (!pagevec_add(pvec, page))
- pagevec_lru_move_fn(pvec, __activate_page, NULL);
-- put_cpu_var(activate_page_pvecs);
-+ put_locked_var(swapvec_lock, activate_page_pvecs);
- }
- }
-
-@@ -567,7 +572,7 @@
-
- static void __lru_cache_activate_page(struct page *page)
- {
-- struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
-+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
- int i;
-
- /*
-@@ -589,7 +594,7 @@
- }
- }
-
-- put_cpu_var(lru_add_pvec);
-+ put_locked_var(swapvec_lock, lru_add_pvec);
- }
-
- /*
-@@ -628,13 +633,13 @@
-
- static void __lru_cache_add(struct page *page)
- {
-- struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
-+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
-
- page_cache_get(page);
- if (!pagevec_space(pvec))
- __pagevec_lru_add(pvec);
- pagevec_add(pvec, page);
-- put_cpu_var(lru_add_pvec);
-+ put_locked_var(swapvec_lock, lru_add_pvec);
- }
-
- /**
-@@ -814,9 +819,9 @@
- unsigned long flags;
-
- /* No harm done if a racing interrupt already did this */
-- local_irq_save(flags);
-+ local_lock_irqsave(rotate_lock, flags);
- pagevec_move_tail(pvec);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(rotate_lock, flags);
- }
-
- pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -844,18 +849,19 @@
- return;
-
- if (likely(get_page_unless_zero(page))) {
-- struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
-+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
-+ lru_deactivate_file_pvecs);
-
- if (!pagevec_add(pvec, page))
- pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
-- put_cpu_var(lru_deactivate_file_pvecs);
-+ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
- }
- }
-
- void lru_add_drain(void)
- {
-- lru_add_drain_cpu(get_cpu());
-- put_cpu();
-+ lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
-+ local_unlock_cpu(swapvec_lock);
- }
-
- static void lru_add_drain_per_cpu(struct work_struct *dummy)
-diff -Nur linux-4.1.13.orig/mm/truncate.c linux-4.1.13/mm/truncate.c
---- linux-4.1.13.orig/mm/truncate.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/truncate.c 2015-11-29 09:23:09.665610461 +0100
-@@ -56,8 +56,11 @@
- * protected by mapping->tree_lock.
- */
- if (!workingset_node_shadows(node) &&
-- !list_empty(&node->private_list))
-- list_lru_del(&workingset_shadow_nodes, &node->private_list);
-+ !list_empty(&node->private_list)) {
-+ local_lock(workingset_shadow_lock);
-+ list_lru_del(&__workingset_shadow_nodes, &node->private_list);
-+ local_unlock(workingset_shadow_lock);
-+ }
- __radix_tree_delete_node(&mapping->page_tree, node);
- unlock:
- spin_unlock_irq(&mapping->tree_lock);
-diff -Nur linux-4.1.13.orig/mm/vmalloc.c linux-4.1.13/mm/vmalloc.c
---- linux-4.1.13.orig/mm/vmalloc.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/vmalloc.c 2015-11-29 09:23:09.665610461 +0100
-@@ -819,7 +819,7 @@
- struct vmap_block *vb;
- struct vmap_area *va;
- unsigned long vb_idx;
-- int node, err;
-+ int node, err, cpu;
- void *vaddr;
-
- node = numa_node_id();
-@@ -862,11 +862,12 @@
- BUG_ON(err);
- radix_tree_preload_end();
-
-- vbq = &get_cpu_var(vmap_block_queue);
-+ cpu = get_cpu_light();
-+ vbq = this_cpu_ptr(&vmap_block_queue);
- spin_lock(&vbq->lock);
- list_add_tail_rcu(&vb->free_list, &vbq->free);
- spin_unlock(&vbq->lock);
-- put_cpu_var(vmap_block_queue);
-+ put_cpu_light();
-
- return vaddr;
- }
-@@ -935,6 +936,7 @@
- struct vmap_block *vb;
- void *vaddr = NULL;
- unsigned int order;
-+ int cpu;
-
- BUG_ON(size & ~PAGE_MASK);
- BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -949,7 +951,8 @@
- order = get_order(size);
-
- rcu_read_lock();
-- vbq = &get_cpu_var(vmap_block_queue);
-+ cpu = get_cpu_light();
-+ vbq = this_cpu_ptr(&vmap_block_queue);
- list_for_each_entry_rcu(vb, &vbq->free, free_list) {
- unsigned long pages_off;
-
-@@ -972,7 +975,7 @@
- break;
- }
-
-- put_cpu_var(vmap_block_queue);
-+ put_cpu_light();
- rcu_read_unlock();
-
- /* Allocate new block if nothing was found */
-diff -Nur linux-4.1.13.orig/mm/vmstat.c linux-4.1.13/mm/vmstat.c
---- linux-4.1.13.orig/mm/vmstat.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/vmstat.c 2015-11-29 09:23:09.665610461 +0100
-@@ -226,6 +226,7 @@
- long x;
- long t;
-
-+ preempt_disable_rt();
- x = delta + __this_cpu_read(*p);
-
- t = __this_cpu_read(pcp->stat_threshold);
-@@ -235,6 +236,7 @@
- x = 0;
- }
- __this_cpu_write(*p, x);
-+ preempt_enable_rt();
- }
- EXPORT_SYMBOL(__mod_zone_page_state);
-
-@@ -267,6 +269,7 @@
- s8 __percpu *p = pcp->vm_stat_diff + item;
- s8 v, t;
-
-+ preempt_disable_rt();
- v = __this_cpu_inc_return(*p);
- t = __this_cpu_read(pcp->stat_threshold);
- if (unlikely(v > t)) {
-@@ -275,6 +278,7 @@
- zone_page_state_add(v + overstep, zone, item);
- __this_cpu_write(*p, -overstep);
- }
-+ preempt_enable_rt();
- }
-
- void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
-@@ -289,6 +293,7 @@
- s8 __percpu *p = pcp->vm_stat_diff + item;
- s8 v, t;
-
-+ preempt_disable_rt();
- v = __this_cpu_dec_return(*p);
- t = __this_cpu_read(pcp->stat_threshold);
- if (unlikely(v < - t)) {
-@@ -297,6 +302,7 @@
- zone_page_state_add(v - overstep, zone, item);
- __this_cpu_write(*p, overstep);
- }
-+ preempt_enable_rt();
- }
-
- void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
-diff -Nur linux-4.1.13.orig/mm/workingset.c linux-4.1.13/mm/workingset.c
---- linux-4.1.13.orig/mm/workingset.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/mm/workingset.c 2015-11-29 09:23:09.665610461 +0100
-@@ -264,7 +264,8 @@
- * point where they would still be useful.
- */
-
--struct list_lru workingset_shadow_nodes;
-+struct list_lru __workingset_shadow_nodes;
-+DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
-
- static unsigned long count_shadow_nodes(struct shrinker *shrinker,
- struct shrink_control *sc)
-@@ -274,9 +275,9 @@
- unsigned long pages;
-
- /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
-- local_irq_disable();
-- shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
-- local_irq_enable();
-+ local_lock_irq(workingset_shadow_lock);
-+ shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc);
-+ local_unlock_irq(workingset_shadow_lock);
-
- pages = node_present_pages(sc->nid);
- /*
-@@ -363,9 +364,9 @@
- spin_unlock(&mapping->tree_lock);
- ret = LRU_REMOVED_RETRY;
- out:
-- local_irq_enable();
-+ local_unlock_irq(workingset_shadow_lock);
- cond_resched();
-- local_irq_disable();
-+ local_lock_irq(workingset_shadow_lock);
- spin_lock(lru_lock);
- return ret;
- }
-@@ -376,10 +377,10 @@
- unsigned long ret;
-
- /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
-- local_irq_disable();
-- ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc,
-+ local_lock_irq(workingset_shadow_lock);
-+ ret = list_lru_shrink_walk(&__workingset_shadow_nodes, sc,
- shadow_lru_isolate, NULL);
-- local_irq_enable();
-+ local_unlock_irq(workingset_shadow_lock);
- return ret;
- }
-
-@@ -400,7 +401,7 @@
- {
- int ret;
-
-- ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
-+ ret = list_lru_init_key(&__workingset_shadow_nodes, &shadow_nodes_key);
- if (ret)
- goto err;
- ret = register_shrinker(&workingset_shadow_shrinker);
-@@ -408,7 +409,7 @@
- goto err_list_lru;
- return 0;
- err_list_lru:
-- list_lru_destroy(&workingset_shadow_nodes);
-+ list_lru_destroy(&__workingset_shadow_nodes);
- err:
- return ret;
- }
-diff -Nur linux-4.1.13.orig/net/core/dev.c linux-4.1.13/net/core/dev.c
---- linux-4.1.13.orig/net/core/dev.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/net/core/dev.c 2015-11-29 09:23:09.665610461 +0100
-@@ -184,6 +184,7 @@
- static DEFINE_HASHTABLE(napi_hash, 8);
-
- static seqcount_t devnet_rename_seq;
-+static DEFINE_MUTEX(devnet_rename_mutex);
-
- static inline void dev_base_seq_inc(struct net *net)
- {
-@@ -205,14 +206,14 @@
- static inline void rps_lock(struct softnet_data *sd)
- {
- #ifdef CONFIG_RPS
-- spin_lock(&sd->input_pkt_queue.lock);
-+ raw_spin_lock(&sd->input_pkt_queue.raw_lock);
- #endif
- }
-
- static inline void rps_unlock(struct softnet_data *sd)
- {
- #ifdef CONFIG_RPS
-- spin_unlock(&sd->input_pkt_queue.lock);
-+ raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
- #endif
- }
-
-@@ -852,7 +853,8 @@
- strcpy(name, dev->name);
- rcu_read_unlock();
- if (read_seqcount_retry(&devnet_rename_seq, seq)) {
-- cond_resched();
-+ mutex_lock(&devnet_rename_mutex);
-+ mutex_unlock(&devnet_rename_mutex);
- goto retry;
- }
-
-@@ -1121,20 +1123,17 @@
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
-- write_seqcount_begin(&devnet_rename_seq);
-+ mutex_lock(&devnet_rename_mutex);
-+ __raw_write_seqcount_begin(&devnet_rename_seq);
-
-- if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
-- write_seqcount_end(&devnet_rename_seq);
-- return 0;
-- }
-+ if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
-+ goto outunlock;
-
- memcpy(oldname, dev->name, IFNAMSIZ);
-
- err = dev_get_valid_name(net, dev, newname);
-- if (err < 0) {
-- write_seqcount_end(&devnet_rename_seq);
-- return err;
-- }
-+ if (err < 0)
-+ goto outunlock;
-
- if (oldname[0] && !strchr(oldname, '%'))
- netdev_info(dev, "renamed from %s\n", oldname);
-@@ -1147,11 +1146,12 @@
- if (ret) {
- memcpy(dev->name, oldname, IFNAMSIZ);
- dev->name_assign_type = old_assign_type;
-- write_seqcount_end(&devnet_rename_seq);
-- return ret;
-+ err = ret;
-+ goto outunlock;
- }
-
-- write_seqcount_end(&devnet_rename_seq);
-+ __raw_write_seqcount_end(&devnet_rename_seq);
-+ mutex_unlock(&devnet_rename_mutex);
-
- netdev_adjacent_rename_links(dev, oldname);
-
-@@ -1172,7 +1172,8 @@
- /* err >= 0 after dev_alloc_name() or stores the first errno */
- if (err >= 0) {
- err = ret;
-- write_seqcount_begin(&devnet_rename_seq);
-+ mutex_lock(&devnet_rename_mutex);
-+ __raw_write_seqcount_begin(&devnet_rename_seq);
- memcpy(dev->name, oldname, IFNAMSIZ);
- memcpy(oldname, newname, IFNAMSIZ);
- dev->name_assign_type = old_assign_type;
-@@ -1185,6 +1186,11 @@
- }
-
- return err;
-+
-+outunlock:
-+ __raw_write_seqcount_end(&devnet_rename_seq);
-+ mutex_unlock(&devnet_rename_mutex);
-+ return err;
- }
-
- /**
-@@ -2214,6 +2220,7 @@
- sd->output_queue_tailp = &q->next_sched;
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_restore(flags);
-+ preempt_check_resched_rt();
- }
-
- void __netif_schedule(struct Qdisc *q)
-@@ -2295,6 +2302,7 @@
- __this_cpu_write(softnet_data.completion_queue, skb);
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_restore(flags);
-+ preempt_check_resched_rt();
- }
- EXPORT_SYMBOL(__dev_kfree_skb_irq);
-
-@@ -3365,6 +3373,7 @@
- rps_unlock(sd);
-
- local_irq_restore(flags);
-+ preempt_check_resched_rt();
-
- atomic_long_inc(&skb->dev->rx_dropped);
- kfree_skb(skb);
-@@ -3383,7 +3392,7 @@
- struct rps_dev_flow voidflow, *rflow = &voidflow;
- int cpu;
-
-- preempt_disable();
-+ migrate_disable();
- rcu_read_lock();
-
- cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3393,13 +3402,13 @@
- ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
-
- rcu_read_unlock();
-- preempt_enable();
-+ migrate_enable();
- } else
- #endif
- {
- unsigned int qtail;
-- ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
-- put_cpu();
-+ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
-+ put_cpu_light();
- }
- return ret;
- }
-@@ -3433,16 +3442,44 @@
-
- trace_netif_rx_ni_entry(skb);
-
-- preempt_disable();
-+ local_bh_disable();
- err = netif_rx_internal(skb);
-- if (local_softirq_pending())
-- do_softirq();
-- preempt_enable();
-+ local_bh_enable();
-
- return err;
- }
- EXPORT_SYMBOL(netif_rx_ni);
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * RT runs ksoftirqd as a real time thread and the root_lock is a
-+ * "sleeping spinlock". If the trylock fails then we can go into an
-+ * infinite loop when ksoftirqd preempted the task which actually
-+ * holds the lock, because we requeue q and raise NET_TX softirq
-+ * causing ksoftirqd to loop forever.
-+ *
-+ * It's safe to use spin_lock on RT here as softirqs run in thread
-+ * context and cannot deadlock against the thread which is holding
-+ * root_lock.
-+ *
-+ * On !RT the trylock might fail, but there we bail out from the
-+ * softirq loop after 10 attempts which we can't do on RT. And the
-+ * task holding root_lock cannot be preempted, so the only downside of
-+ * that trylock is that we need 10 loops to decide that we should have
-+ * given up in the first one :)
-+ */
-+static inline int take_root_lock(spinlock_t *lock)
-+{
-+ spin_lock(lock);
-+ return 1;
-+}
-+#else
-+static inline int take_root_lock(spinlock_t *lock)
-+{
-+ return spin_trylock(lock);
-+}
-+#endif
-+
- static void net_tx_action(struct softirq_action *h)
- {
- struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -3484,7 +3521,7 @@
- head = head->next_sched;
-
- root_lock = qdisc_lock(q);
-- if (spin_trylock(root_lock)) {
-+ if (take_root_lock(root_lock)) {
- smp_mb__before_atomic();
- clear_bit(__QDISC_STATE_SCHED,
- &q->state);
-@@ -3881,7 +3918,7 @@
- skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
- if (skb->dev == dev) {
- __skb_unlink(skb, &sd->input_pkt_queue);
-- kfree_skb(skb);
-+ __skb_queue_tail(&sd->tofree_queue, skb);
- input_queue_head_incr(sd);
- }
- }
-@@ -3890,10 +3927,13 @@
- skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
- if (skb->dev == dev) {
- __skb_unlink(skb, &sd->process_queue);
-- kfree_skb(skb);
-+ __skb_queue_tail(&sd->tofree_queue, skb);
- input_queue_head_incr(sd);
- }
- }
-+
-+ if (!skb_queue_empty(&sd->tofree_queue))
-+ raise_softirq_irqoff(NET_RX_SOFTIRQ);
- }
-
- static int napi_gro_complete(struct sk_buff *skb)
-@@ -4344,6 +4384,7 @@
- sd->rps_ipi_list = NULL;
-
- local_irq_enable();
-+ preempt_check_resched_rt();
-
- /* Send pending IPI's to kick RPS processing on remote cpus. */
- while (remsd) {
-@@ -4357,6 +4398,7 @@
- } else
- #endif
- local_irq_enable();
-+ preempt_check_resched_rt();
- }
-
- static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4438,6 +4480,7 @@
- local_irq_save(flags);
- ____napi_schedule(this_cpu_ptr(&softnet_data), n);
- local_irq_restore(flags);
-+ preempt_check_resched_rt();
- }
- EXPORT_SYMBOL(__napi_schedule);
-
-@@ -6926,7 +6969,7 @@
- void synchronize_net(void)
- {
- might_sleep();
-- if (rtnl_is_locked())
-+ if (rtnl_is_locked() && !IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
- synchronize_rcu_expedited();
- else
- synchronize_rcu();
-@@ -7167,16 +7210,20 @@
-
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_enable();
-+ preempt_check_resched_rt();
-
- /* Process offline CPU's input_pkt_queue */
- while ((skb = __skb_dequeue(&oldsd->process_queue))) {
- netif_rx_ni(skb);
- input_queue_head_incr(oldsd);
- }
-- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
-+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
- netif_rx_ni(skb);
- input_queue_head_incr(oldsd);
- }
-+ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
-+ kfree_skb(skb);
-+ }
-
- return NOTIFY_OK;
- }
-@@ -7478,8 +7525,9 @@
- for_each_possible_cpu(i) {
- struct softnet_data *sd = &per_cpu(softnet_data, i);
-
-- skb_queue_head_init(&sd->input_pkt_queue);
-- skb_queue_head_init(&sd->process_queue);
-+ skb_queue_head_init_raw(&sd->input_pkt_queue);
-+ skb_queue_head_init_raw(&sd->process_queue);
-+ skb_queue_head_init_raw(&sd->tofree_queue);
- INIT_LIST_HEAD(&sd->poll_list);
- sd->output_queue_tailp = &sd->output_queue;
- #ifdef CONFIG_RPS
-diff -Nur linux-4.1.13.orig/net/core/skbuff.c linux-4.1.13/net/core/skbuff.c
---- linux-4.1.13.orig/net/core/skbuff.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/net/core/skbuff.c 2015-11-29 09:23:09.669610195 +0100
-@@ -63,6 +63,7 @@
- #include <linux/errqueue.h>
- #include <linux/prefetch.h>
- #include <linux/if_vlan.h>
-+#include <linux/locallock.h>
-
- #include <net/protocol.h>
- #include <net/dst.h>
-@@ -356,6 +357,7 @@
- };
- static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
- static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
-+static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
-
- static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
- gfp_t gfp_mask)
-@@ -433,9 +435,9 @@
- unsigned long flags;
- void *data;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(netdev_alloc_lock, flags);
- data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(netdev_alloc_lock, flags);
- return data;
- }
-
-diff -Nur linux-4.1.13.orig/net/core/sock.c linux-4.1.13/net/core/sock.c
---- linux-4.1.13.orig/net/core/sock.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/net/core/sock.c 2015-11-29 09:23:09.669610195 +0100
-@@ -2370,12 +2370,11 @@
- if (sk->sk_lock.owned)
- __lock_sock(sk);
- sk->sk_lock.owned = 1;
-- spin_unlock(&sk->sk_lock.slock);
-+ spin_unlock_bh(&sk->sk_lock.slock);
- /*
- * The sk_lock has mutex_lock() semantics here:
- */
- mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
-- local_bh_enable();
- }
- EXPORT_SYMBOL(lock_sock_nested);
-
-diff -Nur linux-4.1.13.orig/net/ipv4/icmp.c linux-4.1.13/net/ipv4/icmp.c
---- linux-4.1.13.orig/net/ipv4/icmp.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/net/ipv4/icmp.c 2015-11-29 09:23:09.669610195 +0100
-@@ -69,6 +69,7 @@
- #include <linux/jiffies.h>
- #include <linux/kernel.h>
- #include <linux/fcntl.h>
-+#include <linux/sysrq.h>
- #include <linux/socket.h>
- #include <linux/in.h>
- #include <linux/inet.h>
-@@ -867,6 +868,30 @@
- }
-
- /*
-+ * 32bit and 64bit have different timestamp length, so we check for
-+ * the cookie at offset 20 and verify it is repeated at offset 50
-+ */
-+#define CO_POS0 20
-+#define CO_POS1 50
-+#define CO_SIZE sizeof(int)
-+#define ICMP_SYSRQ_SIZE 57
-+
-+/*
-+ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie
-+ * pattern and if it matches send the next byte as a trigger to sysrq.
-+ */
-+static void icmp_check_sysrq(struct net *net, struct sk_buff *skb)
-+{
-+ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq);
-+ char *p = skb->data;
-+
-+ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) &&
-+ !memcmp(&cookie, p + CO_POS1, CO_SIZE) &&
-+ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE])
-+ handle_sysrq(p[CO_POS0 + CO_SIZE]);
-+}
-+
-+/*
- * Handle ICMP_ECHO ("ping") requests.
- *
- * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
-@@ -893,6 +918,11 @@
- icmp_param.data_len = skb->len;
- icmp_param.head_len = sizeof(struct icmphdr);
- icmp_reply(&icmp_param, skb);
-+
-+ if (skb->len == ICMP_SYSRQ_SIZE &&
-+ net->ipv4.sysctl_icmp_echo_sysrq) {
-+ icmp_check_sysrq(net, skb);
-+ }
- }
- /* should there be an ICMP stat for ignored echos? */
- return true;
-diff -Nur linux-4.1.13.orig/net/ipv4/sysctl_net_ipv4.c linux-4.1.13/net/ipv4/sysctl_net_ipv4.c
---- linux-4.1.13.orig/net/ipv4/sysctl_net_ipv4.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/net/ipv4/sysctl_net_ipv4.c 2015-11-29 09:23:09.669610195 +0100
-@@ -779,6 +779,13 @@
- .proc_handler = proc_dointvec
- },
- {
-+ .procname = "icmp_echo_sysrq",
-+ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec
-+ },
-+ {
- .procname = "icmp_ignore_bogus_error_responses",
- .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
- .maxlen = sizeof(int),
-diff -Nur linux-4.1.13.orig/net/mac80211/rx.c linux-4.1.13/net/mac80211/rx.c
---- linux-4.1.13.orig/net/mac80211/rx.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/net/mac80211/rx.c 2015-11-29 09:23:09.669610195 +0100
-@@ -3554,7 +3554,7 @@
- struct ieee80211_supported_band *sband;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-
-- WARN_ON_ONCE(softirq_count() == 0);
-+ WARN_ON_ONCE_NONRT(softirq_count() == 0);
-
- if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
- goto drop;
-diff -Nur linux-4.1.13.orig/net/netfilter/core.c linux-4.1.13/net/netfilter/core.c
---- linux-4.1.13.orig/net/netfilter/core.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/net/netfilter/core.c 2015-11-29 09:23:09.669610195 +0100
-@@ -22,11 +22,17 @@
- #include <linux/proc_fs.h>
- #include <linux/mutex.h>
- #include <linux/slab.h>
-+#include <linux/locallock.h>
- #include <net/net_namespace.h>
- #include <net/sock.h>
-
- #include "nf_internals.h"
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+DEFINE_LOCAL_IRQ_LOCK(xt_write_lock);
-+EXPORT_PER_CPU_SYMBOL(xt_write_lock);
-+#endif
-+
- static DEFINE_MUTEX(afinfo_mutex);
-
- const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
-diff -Nur linux-4.1.13.orig/net/packet/af_packet.c linux-4.1.13/net/packet/af_packet.c
---- linux-4.1.13.orig/net/packet/af_packet.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/net/packet/af_packet.c 2015-11-29 09:23:09.669610195 +0100
-@@ -63,6 +63,7 @@
- #include <linux/if_packet.h>
- #include <linux/wireless.h>
- #include <linux/kernel.h>
-+#include <linux/delay.h>
- #include <linux/kmod.h>
- #include <linux/slab.h>
- #include <linux/vmalloc.h>
-@@ -698,7 +699,7 @@
- if (BLOCK_NUM_PKTS(pbd)) {
- while (atomic_read(&pkc->blk_fill_in_prog)) {
- /* Waiting for skb_copy_bits to finish... */
-- cpu_relax();
-+ cpu_chill();
- }
- }
-
-@@ -960,7 +961,7 @@
- if (!(status & TP_STATUS_BLK_TMO)) {
- while (atomic_read(&pkc->blk_fill_in_prog)) {
- /* Waiting for skb_copy_bits to finish... */
-- cpu_relax();
-+ cpu_chill();
- }
- }
- prb_close_block(pkc, pbd, po, status);
-diff -Nur linux-4.1.13.orig/net/rds/ib_rdma.c linux-4.1.13/net/rds/ib_rdma.c
---- linux-4.1.13.orig/net/rds/ib_rdma.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/net/rds/ib_rdma.c 2015-11-29 09:23:09.669610195 +0100
-@@ -34,6 +34,7 @@
- #include <linux/slab.h>
- #include <linux/rculist.h>
- #include <linux/llist.h>
-+#include <linux/delay.h>
-
- #include "rds.h"
- #include "ib.h"
-@@ -286,7 +287,7 @@
- for_each_online_cpu(cpu) {
- flag = &per_cpu(clean_list_grace, cpu);
- while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
-- cpu_relax();
-+ cpu_chill();
- }
- }
-
-diff -Nur linux-4.1.13.orig/net/sched/sch_generic.c linux-4.1.13/net/sched/sch_generic.c
---- linux-4.1.13.orig/net/sched/sch_generic.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/net/sched/sch_generic.c 2015-11-29 09:23:09.669610195 +0100
-@@ -894,7 +894,7 @@
- /* Wait for outstanding qdisc_run calls. */
- list_for_each_entry(dev, head, close_list)
- while (some_qdisc_is_busy(dev))
-- yield();
-+ msleep(1);
- }
-
- void dev_deactivate(struct net_device *dev)
-diff -Nur linux-4.1.13.orig/net/sunrpc/svc_xprt.c linux-4.1.13/net/sunrpc/svc_xprt.c
---- linux-4.1.13.orig/net/sunrpc/svc_xprt.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/net/sunrpc/svc_xprt.c 2015-11-29 09:23:09.669610195 +0100
-@@ -341,7 +341,7 @@
- goto out;
- }
-
-- cpu = get_cpu();
-+ cpu = get_cpu_light();
- pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
-
- atomic_long_inc(&pool->sp_stats.packets);
-@@ -377,7 +377,7 @@
-
- atomic_long_inc(&pool->sp_stats.threads_woken);
- wake_up_process(rqstp->rq_task);
-- put_cpu();
-+ put_cpu_light();
- goto out;
- }
- rcu_read_unlock();
-@@ -398,7 +398,7 @@
- goto redo_search;
- }
- rqstp = NULL;
-- put_cpu();
-+ put_cpu_light();
- out:
- trace_svc_xprt_do_enqueue(xprt, rqstp);
- }
-diff -Nur linux-4.1.13.orig/scripts/mkcompile_h linux-4.1.13/scripts/mkcompile_h
---- linux-4.1.13.orig/scripts/mkcompile_h 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/scripts/mkcompile_h 2015-11-29 09:23:09.669610195 +0100
-@@ -4,7 +4,8 @@
- ARCH=$2
- SMP=$3
- PREEMPT=$4
--CC=$5
-+RT=$5
-+CC=$6
-
- vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
-
-@@ -57,6 +58,7 @@
- CONFIG_FLAGS=""
- if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
- if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
-+if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
- UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
-
- # Truncate to maximum length
-diff -Nur linux-4.1.13.orig/sound/core/pcm_native.c linux-4.1.13/sound/core/pcm_native.c
---- linux-4.1.13.orig/sound/core/pcm_native.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/sound/core/pcm_native.c 2015-11-29 09:23:09.669610195 +0100
-@@ -123,7 +123,7 @@
- void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
- {
- if (!substream->pcm->nonatomic)
-- local_irq_disable();
-+ local_irq_disable_nort();
- snd_pcm_stream_lock(substream);
- }
- EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
-@@ -138,7 +138,7 @@
- {
- snd_pcm_stream_unlock(substream);
- if (!substream->pcm->nonatomic)
-- local_irq_enable();
-+ local_irq_enable_nort();
- }
- EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
-
-@@ -146,7 +146,7 @@
- {
- unsigned long flags = 0;
- if (!substream->pcm->nonatomic)
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- snd_pcm_stream_lock(substream);
- return flags;
- }
-@@ -164,7 +164,7 @@
- {
- snd_pcm_stream_unlock(substream);
- if (!substream->pcm->nonatomic)
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- }
- EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
-
-diff -Nur linux-4.1.13.orig/sound/soc/intel/atom/sst/sst.c linux-4.1.13/sound/soc/intel/atom/sst/sst.c
---- linux-4.1.13.orig/sound/soc/intel/atom/sst/sst.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/sound/soc/intel/atom/sst/sst.c 2015-11-29 09:23:09.673609931 +0100
-@@ -368,8 +368,8 @@
- * initialize by FW or driver when firmware is loaded
- */
- spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
-- sst_shim_write64(shim, SST_IMRX, shim_regs->imrx),
-- sst_shim_write64(shim, SST_CSR, shim_regs->csr),
-+ sst_shim_write64(shim, SST_IMRX, shim_regs->imrx);
-+ sst_shim_write64(shim, SST_CSR, shim_regs->csr);
- spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
- }
-
-diff -Nur linux-4.1.13.orig/virt/kvm/async_pf.c linux-4.1.13/virt/kvm/async_pf.c
---- linux-4.1.13.orig/virt/kvm/async_pf.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/virt/kvm/async_pf.c 2015-11-29 09:23:09.673609931 +0100
-@@ -94,8 +94,8 @@
-
- trace_kvm_async_pf_completed(addr, gva);
-
-- if (waitqueue_active(&vcpu->wq))
-- wake_up_interruptible(&vcpu->wq);
-+ if (swaitqueue_active(&vcpu->wq))
-+ swait_wake_interruptible(&vcpu->wq);
-
- mmput(mm);
- kvm_put_kvm(vcpu->kvm);
-diff -Nur linux-4.1.13.orig/virt/kvm/kvm_main.c linux-4.1.13/virt/kvm/kvm_main.c
---- linux-4.1.13.orig/virt/kvm/kvm_main.c 2015-11-09 23:34:10.000000000 +0100
-+++ linux-4.1.13/virt/kvm/kvm_main.c 2015-11-29 09:23:09.701608072 +0100
-@@ -218,7 +218,7 @@
- vcpu->kvm = kvm;
- vcpu->vcpu_id = id;
- vcpu->pid = NULL;
-- init_waitqueue_head(&vcpu->wq);
-+ init_swait_head(&vcpu->wq);
- kvm_async_pf_vcpu_init(vcpu);
-
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-@@ -1779,7 +1779,7 @@
- void kvm_vcpu_block(struct kvm_vcpu *vcpu)
- {
- ktime_t start, cur;
-- DEFINE_WAIT(wait);
-+ DEFINE_SWAITER(wait);
- bool waited = false;
-
- start = cur = ktime_get();
-@@ -1800,7 +1800,7 @@
- }
-
- for (;;) {
-- prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
-+ swait_prepare(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
-
- if (kvm_vcpu_check_block(vcpu) < 0)
- break;
-@@ -1809,7 +1809,7 @@
- schedule();
- }
-
-- finish_wait(&vcpu->wq, &wait);
-+ swait_finish(&vcpu->wq, &wait);
- cur = ktime_get();
-
- out:
-@@ -1825,11 +1825,11 @@
- {
- int me;
- int cpu = vcpu->cpu;
-- wait_queue_head_t *wqp;
-+ struct swait_head *wqp;
-
- wqp = kvm_arch_vcpu_wq(vcpu);
-- if (waitqueue_active(wqp)) {
-- wake_up_interruptible(wqp);
-+ if (swaitqueue_active(wqp)) {
-+ swait_wake_interruptible(wqp);
- ++vcpu->stat.halt_wakeup;
- }
-
-@@ -1930,7 +1930,7 @@
- continue;
- if (vcpu == me)
- continue;
-- if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
-+ if (swaitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
- continue;
- if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
- continue;
diff --git a/target/linux/patches/4.1.13/regmap-default-on.patch b/target/linux/patches/4.1.13/regmap-default-on.patch
deleted file mode 100644
index 8d72224bf..000000000
--- a/target/linux/patches/4.1.13/regmap-default-on.patch
+++ /dev/null
@@ -1,17 +0,0 @@
-diff -Nur linux-4.1.6.orig/drivers/base/regmap/Kconfig linux-4.1.6/drivers/base/regmap/Kconfig
---- linux-4.1.6.orig/drivers/base/regmap/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/base/regmap/Kconfig 2015-08-29 22:18:50.329683337 +0200
-@@ -3,7 +3,7 @@
- # subsystems should select the appropriate symbols.
-
- config REGMAP
-- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
-+ default y
- select LZO_COMPRESS
- select LZO_DECOMPRESS
- select IRQ_DOMAIN if REGMAP_IRQ
-@@ -29,3 +29,4 @@
-
- config REGMAP_IRQ
- bool
-+ default y
diff --git a/target/linux/patches/4.1.13/remove-warn.patch b/target/linux/patches/4.1.13/remove-warn.patch
deleted file mode 100644
index 1f89c710d..000000000
--- a/target/linux/patches/4.1.13/remove-warn.patch
+++ /dev/null
@@ -1,11 +0,0 @@
-diff -Nur linux-4.1.10.orig/drivers/media/v4l2-core/videobuf2-core.c linux-4.1.10/drivers/media/v4l2-core/videobuf2-core.c
---- linux-4.1.10.orig/drivers/media/v4l2-core/videobuf2-core.c 2015-10-03 13:49:38.000000000 +0200
-+++ linux-4.1.10/drivers/media/v4l2-core/videobuf2-core.c 2015-10-18 18:18:47.000000000 +0200
-@@ -1245,7 +1245,6 @@
- return;
-
- __check_once = true;
-- __WARN();
-
- pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
- if (vb->vb2_queue->allow_zero_bytesused)
diff --git a/target/linux/patches/4.1.13/startup.patch b/target/linux/patches/4.1.13/startup.patch
deleted file mode 100644
index d396b75e4..000000000
--- a/target/linux/patches/4.1.13/startup.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-diff -Nur linux-3.13.3.orig/init/main.c linux-3.13.3/init/main.c
---- linux-3.13.3.orig/init/main.c 2014-02-13 23:00:14.000000000 +0100
-+++ linux-3.13.3/init/main.c 2014-02-17 11:35:14.000000000 +0100
-@@ -916,6 +917,8 @@
- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
- pr_err("Warning: unable to open an initial console.\n");
-
-+ printk(KERN_WARNING "Starting Linux (built with OpenADK).\n");
-+
- (void) sys_dup(0);
- (void) sys_dup(0);
- /*
-diff -Nur linux-3.13.6.orig/init/initramfs.c linux-3.13.6/init/initramfs.c
---- linux-3.13.6.orig/init/initramfs.c 2014-03-07 07:07:02.000000000 +0100
-+++ linux-3.13.6/init/initramfs.c 2014-03-15 12:11:31.882731916 +0100
-@@ -622,6 +622,9 @@
- */
- load_default_modules();
- }
-+#ifdef CONFIG_DEVTMPFS_MOUNT
-+ devtmpfs_mount("dev");
-+#endif
- return 0;
- }
- rootfs_initcall(populate_rootfs);
-diff -Nur linux-3.13.6.orig/init/main.c linux-3.13.6/init/main.c
---- linux-3.13.6.orig/init/main.c 2014-03-07 07:07:02.000000000 +0100
-+++ linux-3.13.6/init/main.c 2014-03-15 12:13:16.459024452 +0100
-@@ -924,7 +924,7 @@
- */
-
- if (!ramdisk_execute_command)
-- ramdisk_execute_command = "/init";
-+ ramdisk_execute_command = "/sbin/init";
-
- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
- ramdisk_execute_command = NULL;