Merge tag 'llvmlinux-for-v3.15' of git://git.linuxfoundation.org/llvmlinux/kernel

Pull llvm patches from Behan Webster:
 "These are some initial updates to support compiling the kernel with
  clang.

  These patches have been through the proper reviews to the best of my
  ability, and have been soaking in linux-next for a few weeks.  These
  patches by themselves still do not completely allow clang to be used
  with the kernel code, but lay the foundation for other patches which
  are still under review.

  Several other of the LLVMLinux patches have been already added via
  maintainer trees"

* tag 'llvmlinux-for-v3.15' of git://git.linuxfoundation.org/llvmlinux/kernel:
  x86: LLVMLinux: Fix "incomplete type const struct x86cpu_device_id"
  x86 kbuild: LLVMLinux: More cc-options added for clang
  x86, acpi: LLVMLinux: Remove nested functions from Thinkpad ACPI
  LLVMLinux: Add support for clang to compiler.h and new compiler-clang.h
  LLVMLinux: Remove warning about returning an uninitialized variable
  kbuild: LLVMLinux: Fix LINUX_COMPILER definition script for compilation with clang
  Documentation: LLVMLinux: Update Documentation/dontdiff
  kbuild: LLVMLinux: Adapt warnings for compilation with clang
  kbuild: LLVMLinux: Add Kbuild support for building kernel with Clang
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power
index 7dbf96b..676fdf5 100644
--- a/Documentation/ABI/testing/sysfs-devices-power
+++ b/Documentation/ABI/testing/sysfs-devices-power
@@ -83,8 +83,10 @@
 Description:
 		The /sys/devices/.../wakeup_count attribute contains the number
 		of signaled wakeup events associated with the device.  This
-		attribute is read-only.  If the device is not enabled to wake up
+		attribute is read-only.  If the device is not capable to wake up
 		the system from sleep states, this attribute is not present.
+		If the device is not enabled to wake up the system from sleep
+		states, this attribute is empty.
 
 What:		/sys/devices/.../power/wakeup_active_count
 Date:		September 2010
@@ -93,8 +95,10 @@
 		The /sys/devices/.../wakeup_active_count attribute contains the
 		number of times the processing of wakeup events associated with
 		the device was completed (at the kernel level).  This attribute
-		is read-only.  If the device is not enabled to wake up the
-		system from sleep states, this attribute is not present.
+		is read-only.  If the device is not capable to wake up the
+		system from sleep states, this attribute is not present.  If
+		the device is not enabled to wake up the system from sleep
+		states, this attribute is empty.
 
 What:		/sys/devices/.../power/wakeup_abort_count
 Date:		February 2012
@@ -104,8 +108,9 @@
 		number of times the processing of a wakeup event associated with
 		the device might have aborted system transition into a sleep
 		state in progress.  This attribute is read-only.  If the device
-		is not enabled to wake up the system from sleep states, this
-		attribute is not present.
+		is not capable to wake up the system from sleep states, this
+		attribute is not present.  If the device is not enabled to wake
+		up the system from sleep states, this attribute is empty.
 
 What:		/sys/devices/.../power/wakeup_expire_count
 Date:		February 2012
@@ -114,8 +119,10 @@
 		The /sys/devices/.../wakeup_expire_count attribute contains the
 		number of times a wakeup event associated with the device has
 		been reported with a timeout that expired.  This attribute is
-		read-only.  If the device is not enabled to wake up the system
-		from sleep states, this attribute is not present.
+		read-only.  If the device is not capable to wake up the system
+		from sleep states, this attribute is not present.  If the
+		device is not enabled to wake up the system from sleep states,
+		this attribute is empty.
 
 What:		/sys/devices/.../power/wakeup_active
 Date:		September 2010
@@ -124,8 +131,10 @@
 		The /sys/devices/.../wakeup_active attribute contains either 1,
 		or 0, depending on whether or not a wakeup event associated with
 		the device is being processed (1).  This attribute is read-only.
-		If the device is not enabled to wake up the system from sleep
-		states, this attribute is not present.
+		If the device is not capable to wake up the system from sleep
+		states, this attribute is not present.  If the device is not
+		enabled to wake up the system from sleep states, this attribute
+		is empty.
 
 What:		/sys/devices/.../power/wakeup_total_time_ms
 Date:		September 2010
@@ -134,8 +143,9 @@
 		The /sys/devices/.../wakeup_total_time_ms attribute contains
 		the total time of processing wakeup events associated with the
 		device, in milliseconds.  This attribute is read-only.  If the
-		device is not enabled to wake up the system from sleep states,
-		this attribute is not present.
+		device is not capable to wake up the system from sleep states,
+		this attribute is not present.  If the device is not enabled to
+		wake up the system from sleep states, this attribute is empty.
 
 What:		/sys/devices/.../power/wakeup_max_time_ms
 Date:		September 2010
@@ -144,8 +154,10 @@
 		The /sys/devices/.../wakeup_max_time_ms attribute contains
 		the maximum time of processing a single wakeup event associated
 		with the device, in milliseconds.  This attribute is read-only.
-		If the device is not enabled to wake up the system from sleep
-		states, this attribute is not present.
+		If the device is not capable to wake up the system from sleep
+		states, this attribute is not present.  If the device is not
+		enabled to wake up the system from sleep states, this attribute
+		is empty.
 
 What:		/sys/devices/.../power/wakeup_last_time_ms
 Date:		September 2010
@@ -156,7 +168,8 @@
 		signaling the last wakeup event associated with the device, in
 		milliseconds.  This attribute is read-only.  If the device is
 		not enabled to wake up the system from sleep states, this
-		attribute is not present.
+		attribute is not present.  If the device is not enabled to wake
+		up the system from sleep states, this attribute is empty.
 
 What:		/sys/devices/.../power/wakeup_prevent_sleep_time_ms
 Date:		February 2012
@@ -165,9 +178,10 @@
 		The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute
 		contains the total time the device has been preventing
 		opportunistic transitions to sleep states from occurring.
-		This attribute is read-only.  If the device is not enabled to
+		This attribute is read-only.  If the device is not capable to
 		wake up the system from sleep states, this attribute is not
-		present.
+		present.  If the device is not enabled to wake up the system
+		from sleep states, this attribute is empty.
 
 What:		/sys/devices/.../power/autosuspend_delay_ms
 Date:		September 2010
diff --git a/Documentation/devicetree/bindings/dma/fsl-edma.txt b/Documentation/devicetree/bindings/dma/fsl-edma.txt
new file mode 100644
index 0000000..191d7bd
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-edma.txt
@@ -0,0 +1,76 @@
+* Freescale enhanced Direct Memory Access(eDMA) Controller
+
+  The eDMA channels have multiplex capability by programmble memory-mapped
+registers. channels are split into two groups, called DMAMUX0 and DMAMUX1,
+specific DMA request source can only be multiplexed by any channel of certain
+group, DMAMUX0 or DMAMUX1, but not both.
+
+* eDMA Controller
+Required properties:
+- compatible :
+	- "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
+- reg : Specifies base physical address(s) and size of the eDMA registers.
+	The 1st region is eDMA control register's address and size.
+	The 2nd and the 3rd regions are programmable channel multiplexing
+	control register's address and size.
+- interrupts : A list of interrupt-specifiers, one for each entry in
+	interrupt-names.
+- interrupt-names : Should contain:
+	"edma-tx" - the transmission interrupt
+	"edma-err" - the error interrupt
+- #dma-cells : Must be <2>.
+	The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1).
+	Specific request source can only be multiplexed by specific channels
+	group called DMAMUX.
+	The 2nd cell specifies the request source(slot) ID.
+	See the SoC's reference manual for all the supported request sources.
+- dma-channels : Number of channels supported by the controller
+- clock-names : A list of channel group clock names. Should contain:
+	"dmamux0" - clock name of mux0 group
+	"dmamux1" - clock name of mux1 group
+- clocks : A list of phandle and clock-specifier pairs, one for each entry in
+	clock-names.
+
+Optional properties:
+- big-endian: If present registers and hardware scatter/gather descriptors
+	of the eDMA are implemented in big endian mode, otherwise in little
+	mode.
+
+
+Examples:
+
+edma0: dma-controller@40018000 {
+	#dma-cells = <2>;
+	compatible = "fsl,vf610-edma";
+	reg = <0x40018000 0x2000>,
+		<0x40024000 0x1000>,
+		<0x40025000 0x1000>;
+	interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>,
+		<0 9 IRQ_TYPE_LEVEL_HIGH>;
+	interrupt-names = "edma-tx", "edma-err";
+	dma-channels = <32>;
+	clock-names = "dmamux0", "dmamux1";
+	clocks = <&clks VF610_CLK_DMAMUX0>,
+		<&clks VF610_CLK_DMAMUX1>;
+};
+
+
+* DMA clients
+DMA client drivers that uses the DMA function must use the format described
+in the dma.txt file, using a two-cell specifier for each channel: the 1st
+specifies the channel group(DMAMUX) in which this request can be multiplexed,
+and the 2nd specifies the request source.
+
+Examples:
+
+sai2: sai@40031000 {
+	compatible = "fsl,vf610-sai";
+	reg = <0x40031000 0x1000>;
+	interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+	clock-names = "sai";
+	clocks = <&clks VF610_CLK_SAI2>;
+	dma-names = "tx", "rx";
+	dmas = <&edma0 0 21>,
+		<&edma0 0 20>;
+	status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
new file mode 100644
index 0000000..d75a9d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
@@ -0,0 +1,41 @@
+QCOM BAM DMA controller
+
+Required properties:
+- compatible: must contain "qcom,bam-v1.4.0" for MSM8974
+- reg: Address range for DMA registers
+- interrupts: Should contain the one interrupt shared by all channels
+- #dma-cells: must be <1>, the cell in the dmas property of the client device
+  represents the channel number
+- clocks: required clock
+- clock-names: must contain "bam_clk" entry
+- qcom,ee : indicates the active Execution Environment identifier (0-7) used in
+  the secure world.
+
+Example:
+
+	uart-bam: dma@f9984000 = {
+		compatible = "qcom,bam-v1.4.0";
+		reg = <0xf9984000 0x15000>;
+		interrupts = <0 94 0>;
+		clocks = <&gcc GCC_BAM_DMA_AHB_CLK>;
+		clock-names = "bam_clk";
+		#dma-cells = <1>;
+		qcom,ee = <0>;
+	};
+
+DMA clients must use the format described in the dma.txt file, using a two cell
+specifier for each channel.
+
+Example:
+	serial@f991e000 {
+		compatible = "qcom,msm-uart";
+		reg = <0xf991e000 0x1000>
+			<0xf9944000 0x19000>;
+		interrupts = <0 108 0>;
+		clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+			<&gcc GCC_BLSP1_AHB_CLK>;
+		clock-names = "core", "iface";
+
+		dmas = <&uart-bam 0>, <&uart-bam 1>;
+		dma-names = "rx", "tx";
+	};
diff --git a/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt b/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt
new file mode 100644
index 0000000..ecbc96a
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt
@@ -0,0 +1,43 @@
+* CSR SiRFSoC DMA controller
+
+See dma.txt first
+
+Required properties:
+- compatible: Should be "sirf,prima2-dmac" or "sirf,marco-dmac"
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain one interrupt shared by all channel
+- #dma-cells: must be <1>. used to represent the number of integer
+    cells in the dmas property of client device.
+- clocks: clock required
+
+Example:
+
+Controller:
+dmac0: dma-controller@b00b0000 {
+	compatible = "sirf,prima2-dmac";
+	reg = <0xb00b0000 0x10000>;
+	interrupts = <12>;
+	clocks = <&clks 24>;
+	#dma-cells = <1>;
+};
+
+
+Client:
+Fill the specific dma request line in dmas. In the below example, spi0 read
+channel request line is 9 of the 2nd dma controller, while write channel uses
+4 of the 2nd dma controller; spi1 read channel request line is 12 of the 1st
+dma controller, while write channel uses 13 of the 1st dma controller:
+
+spi0: spi@b00d0000 {
+	compatible = "sirf,prima2-spi";
+	dmas = <&dmac1 9>,
+		<&dmac1 4>;
+	dma-names = "rx", "tx";
+};
+
+spi1: spi@b0170000 {
+	compatible = "sirf,prima2-spi";
+	dmas = <&dmac0 12>,
+		<&dmac0 13>;
+	dma-names = "rx", "tx";
+};
diff --git a/Documentation/devicetree/bindings/leds/leds-gpio.txt b/Documentation/devicetree/bindings/leds/leds-gpio.txt
index df1b308..f77148f 100644
--- a/Documentation/devicetree/bindings/leds/leds-gpio.txt
+++ b/Documentation/devicetree/bindings/leds/leds-gpio.txt
@@ -21,6 +21,8 @@
   on).  The "keep" setting will keep the LED at whatever its current
   state is, without producing a glitch.  The default is off if this
   property is not present.
+- retain-state-suspended: (optional) The suspend state can be retained.Such
+  as charge-led gpio.
 
 Examples:
 
@@ -50,3 +52,13 @@
 		default-state = "on";
 	};
 };
+
+leds {
+	compatible = "gpio-leds";
+
+	charger-led {
+		gpios = <&gpio1 2 0>;
+		linux,default-trigger = "max8903-charger-charging";
+		retain-state-suspended;
+	};
+};
diff --git a/Documentation/devicetree/bindings/mfd/mc13xxx.txt b/Documentation/devicetree/bindings/mfd/mc13xxx.txt
index abd9e3c..1413f39 100644
--- a/Documentation/devicetree/bindings/mfd/mc13xxx.txt
+++ b/Documentation/devicetree/bindings/mfd/mc13xxx.txt
@@ -10,9 +10,44 @@
 - fsl,mc13xxx-uses-touch : Indicate the touchscreen controller is being used
 
 Sub-nodes:
+- leds : Contain the led nodes and initial register values in property
+  "led-control". Number of register depends of used IC, for MC13783 is 6,
+  for MC13892 is 4, for MC34708 is 1. See datasheet for bits definitions of
+  these registers.
+  - #address-cells: Must be 1.
+  - #size-cells: Must be 0.
+  Each led node should contain "reg", which used as LED ID (described below).
+  Optional properties "label" and "linux,default-trigger" is described in
+  Documentation/devicetree/bindings/leds/common.txt.
 - regulators : Contain the regulator nodes. The regulators are bound using
   their names as listed below with their registers and bits for enabling.
 
+MC13783 LED IDs:
+    0  : Main display
+    1  : AUX display
+    2  : Keypad
+    3  : Red 1
+    4  : Green 1
+    5  : Blue 1
+    6  : Red 2
+    7  : Green 2
+    8  : Blue 2
+    9  : Red 3
+    10 : Green 3
+    11 : Blue 3
+
+MC13892 LED IDs:
+    0  : Main display
+    1  : AUX display
+    2  : Keypad
+    3  : Red
+    4  : Green
+    5  : Blue
+
+MC34708 LED IDs:
+    0  : Charger Red
+    1  : Charger Green
+
 MC13783 regulators:
     sw1a      : regulator SW1A      (register 24, bit 0)
     sw1b      : regulator SW1B      (register 25, bit 0)
@@ -89,6 +124,18 @@
 		interrupt-parent = <&gpio0>;
 		interrupts = <8>;
 
+		leds {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			led-control = <0x000 0x000 0x0e0 0x000>;
+
+			sysled {
+				reg = <3>;
+				label = "system:red:live";
+				linux,default-trigger = "heartbeat";
+			};
+		};
+
 		regulators {
 			sw1_reg: mc13892__sw1 {
 				regulator-min-microvolt = <600000>;
diff --git a/Documentation/devicetree/bindings/sound/fsl,ssi.txt b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
index b93e9a9..3aa4a8f 100644
--- a/Documentation/devicetree/bindings/sound/fsl,ssi.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
@@ -20,15 +20,6 @@
                     have.
 - interrupt-parent: The phandle for the interrupt controller that
                     services interrupts for this device.
-- fsl,mode:         The operating mode for the SSI interface.
-                    "i2s-slave" - I2S mode, SSI is clock slave
-                    "i2s-master" - I2S mode, SSI is clock master
-                    "lj-slave" - left-justified mode, SSI is clock slave
-                    "lj-master" - l.j. mode, SSI is clock master
-                    "rj-slave" - right-justified mode, SSI is clock slave
-                    "rj-master" - r.j., SSI is clock master
-                    "ac97-slave" - AC97 mode, SSI is clock slave
-                    "ac97-master" - AC97 mode, SSI is clock master
 - fsl,playback-dma: Phandle to a node for the DMA channel to use for
                     playback of audio.  This is typically dictated by SOC
                     design.  See the notes below.
@@ -47,6 +38,9 @@
                     be connected together, and SRFS and STFS be connected
                     together.  This would still allow different sample sizes,
                     but not different sample rates.
+ - clocks:          "ipg" - Required clock for the SSI unit
+                    "baud" - Required clock for SSI master mode. Otherwise this
+		      clock is not used
 
 Required are also ac97 link bindings if ac97 is used. See
 Documentation/devicetree/bindings/sound/soc-ac97link.txt for the necessary
@@ -64,6 +58,15 @@
 		    Documentation/devicetree/bindings/dma/dma.txt.
 - dma-names:	    Two dmas have to be defined, "tx" and "rx", if fsl,imx-fiq
 		    is not defined.
+- fsl,mode:         The operating mode for the SSI interface.
+                    "i2s-slave" - I2S mode, SSI is clock slave
+                    "i2s-master" - I2S mode, SSI is clock master
+                    "lj-slave" - left-justified mode, SSI is clock slave
+                    "lj-master" - l.j. mode, SSI is clock master
+                    "rj-slave" - right-justified mode, SSI is clock slave
+                    "rj-master" - r.j., SSI is clock master
+                    "ac97-slave" - AC97 mode, SSI is clock slave
+                    "ac97-master" - AC97 mode, SSI is clock master
 
 Child 'codec' node required properties:
 - compatible:       Compatible list, contains the name of the codec
diff --git a/Documentation/devicetree/bindings/spi/efm32-spi.txt b/Documentation/devicetree/bindings/spi/efm32-spi.txt
index 8f081c9..130cd17 100644
--- a/Documentation/devicetree/bindings/spi/efm32-spi.txt
+++ b/Documentation/devicetree/bindings/spi/efm32-spi.txt
@@ -8,7 +8,13 @@
 - interrupts: pair specifying rx and tx irq
 - clocks: phandle to the spi clock
 - cs-gpios: see spi-bus.txt
-- efm32,location: Value to write to the ROUTE register's LOCATION bitfield to configure the pinmux for the device, see datasheet for values.
+
+Recommended properties :
+- efm32,location: Value to write to the ROUTE register's LOCATION bitfield to
+                  configure the pinmux for the device, see datasheet for values.
+                  If "efm32,location" property is not provided, keeping what is
+                  already configured in the hardware, so its either the reset
+                  default 0 or whatever the bootloader did.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/video/backlight/gpio-backlight.txt b/Documentation/devicetree/bindings/video/backlight/gpio-backlight.txt
new file mode 100644
index 0000000..321be66
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/backlight/gpio-backlight.txt
@@ -0,0 +1,16 @@
+gpio-backlight bindings
+
+Required properties:
+  - compatible: "gpio-backlight"
+  - gpios: describes the gpio that is used for enabling/disabling the backlight.
+    refer to bindings/gpio/gpio.txt for more details.
+
+Optional properties:
+  - default-on: enable the backlight at boot.
+
+Example:
+	backlight {
+		compatible = "gpio-backlight";
+		gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
+		default-on;
+	};
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index efca5c1..eba7901 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -202,7 +202,7 @@
 				unsigned long *);
 	int (*migratepage)(struct address_space *, struct page *, struct page *);
 	int (*launder_page)(struct page *);
-	int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long);
+	int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
 	int (*error_remove_page)(struct address_space *, struct page *);
 	int (*swap_activate)(struct file *);
 	int (*swap_deactivate)(struct file *);
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 94eb862..617f6d7 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -596,7 +596,7 @@
 	/* migrate the contents of a page to the specified target */
 	int (*migratepage) (struct page *, struct page *);
 	int (*launder_page) (struct page *);
-	int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
+	int (*is_partially_uptodate) (struct page *, unsigned long,
 					unsigned long);
 	void (*is_dirty_writeback) (struct page *, bool *, bool *);
 	int (*error_remove_page) (struct mapping *mapping, struct page *page);
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b6c67d5..03e50b4 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2563,6 +2563,13 @@
 
 	pcmv=		[HW,PCMCIA] BadgePAD 4
 
+	pd_ignore_unused
+			[PM]
+			Keep all power-domains already enabled by bootloader on,
+			even if no driver has claimed them. This is useful
+			for debug and development, but should not be
+			needed on a platform with proper driver support.
+
 	pd.		[PARIDE]
 			See Documentation/blockdev/paride.txt.
 
diff --git a/MAINTAINERS b/MAINTAINERS
index beaa87a..6dc67b1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6543,7 +6543,7 @@
 
 OSD LIBRARY and FILESYSTEM
 M:	Boaz Harrosh <bharrosh@panasas.com>
-M:	Benny Halevy <bhalevy@tonian.com>
+M:	Benny Halevy <bhalevy@primarydata.com>
 L:	osd-dev@open-osd.org
 W:	http://open-osd.org
 T:	git git://git.open-osd.org/open-osd.git
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index f6c6b34..b7ff9a3 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -22,6 +22,7 @@
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_STRNCPY_FROM_USER
 	select GENERIC_STRNLEN_USER
+	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_MOD_ARCH_SPECIFIC
 	select MODULES_USE_ELF_RELA
 	select ODD_RT_SIGACTION
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5db05f6a..ab438cb 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -24,6 +24,7 @@
 	select GENERIC_STRNCPY_FROM_USER
 	select GENERIC_STRNLEN_USER
 	select HARDIRQS_SW_RESEND
+	select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
 	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index 55d3f79..9d72674 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -271,6 +271,7 @@
 				reg = <0xb00b0000 0x10000>;
 				interrupts = <12>;
 				clocks = <&clks 24>;
+				#dma-cells = <1>;
 			};
 
 			dmac1: dma-controller@b0160000 {
@@ -279,6 +280,7 @@
 				reg = <0xb0160000 0x10000>;
 				interrupts = <13>;
 				clocks = <&clks 25>;
+				#dma-cells = <1>;
 			};
 
 			vip@b00C0000 {
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 2014552..1e82571 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -287,6 +287,7 @@
 				reg = <0xb00b0000 0x10000>;
 				interrupts = <12>;
 				clocks = <&clks 24>;
+				#dma-cells = <1>;
 			};
 
 			dmac1: dma-controller@b0160000 {
@@ -295,6 +296,7 @@
 				reg = <0xb0160000 0x10000>;
 				interrupts = <13>;
 				clocks = <&clks 25>;
+				#dma-cells = <1>;
 			};
 
 			vip@b00C0000 {
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 380ac4f..b974184 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -23,6 +23,7 @@
 #include <asm/ptrace.h>
 #include <asm/domain.h>
 #include <asm/opcodes-virt.h>
+#include <asm/asm-offsets.h>
 
 #define IOMEM(x)	(x)
 
@@ -174,6 +175,47 @@
 	restore_irqs_notrace \oldcpsr
 	.endm
 
+/*
+ * Get current thread_info.
+ */
+	.macro	get_thread_info, rd
+ ARM(	mov	\rd, sp, lsr #13	)
+ THUMB(	mov	\rd, sp			)
+ THUMB(	lsr	\rd, \rd, #13		)
+	mov	\rd, \rd, lsl #13
+	.endm
+
+/*
+ * Increment/decrement the preempt count.
+ */
+#ifdef CONFIG_PREEMPT_COUNT
+	.macro	inc_preempt_count, ti, tmp
+	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
+	add	\tmp, \tmp, #1			@ increment it
+	str	\tmp, [\ti, #TI_PREEMPT]
+	.endm
+
+	.macro	dec_preempt_count, ti, tmp
+	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
+	sub	\tmp, \tmp, #1			@ decrement it
+	str	\tmp, [\ti, #TI_PREEMPT]
+	.endm
+
+	.macro	dec_preempt_count_ti, ti, tmp
+	get_thread_info \ti
+	dec_preempt_count \ti, \tmp
+	.endm
+#else
+	.macro	inc_preempt_count, ti, tmp
+	.endm
+
+	.macro	dec_preempt_count, ti, tmp
+	.endm
+
+	.macro	dec_preempt_count_ti, ti, tmp
+	.endm
+#endif
+
 #define USER(x...)				\
 9999:	x;					\
 	.pushsection __ex_table,"a";		\
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 42f0889..c651e3b 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -221,4 +221,23 @@
 #define	cpu_is_xscale()	1
 #endif
 
+/*
+ * Marvell's PJ4 core is based on V7 version. It has some modification
+ * for coprocessor setting. For this reason, we need a way to distinguish
+ * it.
+ */
+#ifndef CONFIG_CPU_PJ4
+#define cpu_is_pj4()	0
+#else
+static inline int cpu_is_pj4(void)
+{
+	unsigned int id;
+
+	id = read_cpuid_id();
+	if ((id & 0xfffffff0) == 0x562f5840)
+		return 1;
+
+	return 0;
+}
+#endif
 #endif
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index 73ddd72..4651f69 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -7,7 +7,7 @@
 #ifndef _ASM_ARM_SYSCALL_H
 #define _ASM_ARM_SYSCALL_H
 
-#include <linux/audit.h> /* for AUDIT_ARCH_* */
+#include <uapi/linux/audit.h> /* for AUDIT_ARCH_* */
 #include <linux/elf.h> /* for ELF_EM */
 #include <linux/err.h>
 #include <linux/sched.h>
@@ -103,8 +103,7 @@
 	memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-				   struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 	/* ARM tasks don't change audit architectures on the fly. */
 	return AUDIT_ARCH_ARM;
diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
index 90c50d4..5d1286d 100644
--- a/arch/arm/kernel/crash_dump.c
+++ b/arch/arm/kernel/crash_dump.c
@@ -39,7 +39,7 @@
 	if (!csize)
 		return 0;
 
-	vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+	vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
 	if (!vaddr)
 		return -ENOMEM;
 
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 39f89fb..1420725 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -236,11 +236,6 @@
 	movs	pc, lr				@ return & move spsr_svc into cpsr
 	.endm
 
-	.macro	get_thread_info, rd
-	mov	\rd, sp, lsr #13
-	mov	\rd, \rd, lsl #13
-	.endm
-
 	@
 	@ 32-bit wide "mov pc, reg"
 	@
@@ -306,12 +301,6 @@
 	.endm
 #endif	/* ifdef CONFIG_CPU_V7M / else */
 
-	.macro	get_thread_info, rd
-	mov	\rd, sp
-	lsr	\rd, \rd, #13
-	mov	\rd, \rd, lsl #13
-	.endm
-
 	@
 	@ 32-bit wide "mov pc, reg"
 	@
diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c
index c311ed9..0bf5d64 100644
--- a/arch/arm/kernel/kprobes-common.c
+++ b/arch/arm/kernel/kprobes-common.c
@@ -13,6 +13,7 @@
 
 #include <linux/kernel.h>
 #include <linux/kprobes.h>
+#include <asm/opcodes.h>
 
 #include "kprobes.h"
 
@@ -153,7 +154,8 @@
 
 	if (handler) {
 		/* We can emulate the instruction in (possibly) modified form */
-		asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
+		asi->insn[0] = __opcode_to_mem_arm((insn & 0xfff00000) |
+						   (rn << 16) | reglist);
 		asi->insn_handler = handler;
 		return INSN_GOOD;
 	}
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index 87839de..9db4b65 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <asm/system_info.h>
+#include <asm/opcodes.h>
 
 #include "kprobes-test.h"
 
@@ -159,9 +160,9 @@
 	TEST_SUPPORTED("cmp	sp, #0x1000");
 
 	/* Data-processing with PC as shift*/
-	TEST_UNSUPPORTED(".word 0xe15c0f1e	@ cmp	r12, r14, asl pc")
-	TEST_UNSUPPORTED(".word 0xe1a0cf1e	@ mov	r12, r14, asl pc")
-	TEST_UNSUPPORTED(".word 0xe08caf1e	@ add	r10, r12, r14, asl pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe15c0f1e) "	@ cmp	r12, r14, asl pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe1a0cf1e) "	@ mov	r12, r14, asl pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe08caf1e) "	@ add	r10, r12, r14, asl pc")
 
 	/* Data-processing with PC as shift*/
 	TEST_UNSUPPORTED("movs	pc, r1")
@@ -203,7 +204,7 @@
 	TEST("mrs	r0, cpsr")
 	TEST("mrspl	r7, cpsr")
 	TEST("mrs	r14, cpsr")
-	TEST_UNSUPPORTED(".word 0xe10ff000	@ mrs r15, cpsr")
+	TEST_UNSUPPORTED(__inst_arm(0xe10ff000) "	@ mrs r15, cpsr")
 	TEST_UNSUPPORTED("mrs	r0, spsr")
 	TEST_UNSUPPORTED("mrs	lr, spsr")
 
@@ -219,8 +220,8 @@
 	TEST_R("clzeq	r7, r",14,0x1,"")
 	TEST_R("clz	lr, r",7, 0xffffffff,"")
 	TEST(  "clz	r4, sp")
-	TEST_UNSUPPORTED(".word 0x016fff10	@ clz pc, r0")
-	TEST_UNSUPPORTED(".word 0x016f0f1f	@ clz r0, pc")
+	TEST_UNSUPPORTED(__inst_arm(0x016fff10) "	@ clz pc, r0")
+	TEST_UNSUPPORTED(__inst_arm(0x016f0f1f) "	@ clz r0, pc")
 
 #if __LINUX_ARM_ARCH__ >= 6
 	TEST_UNSUPPORTED("bxj	r0")
@@ -229,7 +230,7 @@
 	TEST_BF_R("blx	r",0,2f,"")
 	TEST_BB_R("blx	r",7,2f,"")
 	TEST_BF_R("blxeq	r",14,2f,"")
-	TEST_UNSUPPORTED(".word 0x0120003f	@ blx pc")
+	TEST_UNSUPPORTED(__inst_arm(0x0120003f) "	@ blx pc")
 
 	TEST_RR(   "qadd	r0, r",1, VAL1,", r",2, VAL2,"")
 	TEST_RR(   "qaddvs	lr, r",9, VAL2,", r",8, VAL1,"")
@@ -243,190 +244,190 @@
 	TEST_RR(   "qdsub	r0, r",1, VAL1,", r",2, VAL2,"")
 	TEST_RR(   "qdsubvs	lr, r",9, VAL2,", r",8, VAL1,"")
 	TEST_R(    "qdsub	lr, r",9, VAL2,", r13")
-	TEST_UNSUPPORTED(".word 0xe101f050	@ qadd pc, r0, r1")
-	TEST_UNSUPPORTED(".word 0xe121f050	@ qsub pc, r0, r1")
-	TEST_UNSUPPORTED(".word 0xe141f050	@ qdadd pc, r0, r1")
-	TEST_UNSUPPORTED(".word 0xe161f050	@ qdsub pc, r0, r1")
-	TEST_UNSUPPORTED(".word 0xe16f2050	@ qdsub r2, r0, pc")
-	TEST_UNSUPPORTED(".word 0xe161205f	@ qdsub r2, pc, r1")
+	TEST_UNSUPPORTED(__inst_arm(0xe101f050) "	@ qadd pc, r0, r1")
+	TEST_UNSUPPORTED(__inst_arm(0xe121f050) "	@ qsub pc, r0, r1")
+	TEST_UNSUPPORTED(__inst_arm(0xe141f050) "	@ qdadd pc, r0, r1")
+	TEST_UNSUPPORTED(__inst_arm(0xe161f050) "	@ qdsub pc, r0, r1")
+	TEST_UNSUPPORTED(__inst_arm(0xe16f2050) "	@ qdsub r2, r0, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe161205f) "	@ qdsub r2, pc, r1")
 
 	TEST_UNSUPPORTED("bkpt	0xffff")
 	TEST_UNSUPPORTED("bkpt	0x0000")
 
-	TEST_UNSUPPORTED(".word 0xe1600070 @ smc #0")
+	TEST_UNSUPPORTED(__inst_arm(0xe1600070) " @ smc #0")
 
 	TEST_GROUP("Halfword multiply and multiply-accumulate")
 
 	TEST_RRR(    "smlabb	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
 	TEST_RRR(    "smlabbge	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
 	TEST_RR(     "smlabb	lr, r",1, VAL2,", r",2, VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe10f3281 @ smlabb pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe10f3281) " @ smlabb pc, r1, r2, r3")
 	TEST_RRR(    "smlatb	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
 	TEST_RRR(    "smlatbge	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
 	TEST_RR(     "smlatb	lr, r",1, VAL2,", r",2, VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe10f32a1 @ smlatb pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe10f32a1) " @ smlatb pc, r1, r2, r3")
 	TEST_RRR(    "smlabt	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
 	TEST_RRR(    "smlabtge	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
 	TEST_RR(     "smlabt	lr, r",1, VAL2,", r",2, VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe10f32c1 @ smlabt pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe10f32c1) " @ smlabt pc, r1, r2, r3")
 	TEST_RRR(    "smlatt	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
 	TEST_RRR(    "smlattge	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
 	TEST_RR(     "smlatt	lr, r",1, VAL2,", r",2, VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe10f32e1 @ smlatt pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe10f32e1) " @ smlatt pc, r1, r2, r3")
 
 	TEST_RRR(    "smlawb	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
 	TEST_RRR(    "smlawbge	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
 	TEST_RR(     "smlawb	lr, r",1, VAL2,", r",2, VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe12f3281 @ smlawb pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe12f3281) " @ smlawb pc, r1, r2, r3")
 	TEST_RRR(    "smlawt	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
 	TEST_RRR(    "smlawtge	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
 	TEST_RR(     "smlawt	lr, r",1, VAL2,", r",2, VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe12f32c1 @ smlawt pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe12032cf @ smlawt r0, pc, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe1203fc1 @ smlawt r0, r1, pc, r3")
-	TEST_UNSUPPORTED(".word 0xe120f2c1 @ smlawt r0, r1, r2, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe12f32c1) " @ smlawt pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe12032cf) " @ smlawt r0, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe1203fc1) " @ smlawt r0, r1, pc, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe120f2c1) " @ smlawt r0, r1, r2, pc")
 
 	TEST_RR(    "smulwb	r0, r",1, VAL1,", r",2, VAL2,"")
 	TEST_RR(    "smulwbge	r7, r",8, VAL3,", r",9, VAL1,"")
 	TEST_R(     "smulwb	lr, r",1, VAL2,", r13")
-	TEST_UNSUPPORTED(".word 0xe12f02a1 @ smulwb pc, r1, r2")
+	TEST_UNSUPPORTED(__inst_arm(0xe12f02a1) " @ smulwb pc, r1, r2")
 	TEST_RR(    "smulwt	r0, r",1, VAL1,", r",2, VAL2,"")
 	TEST_RR(    "smulwtge	r7, r",8, VAL3,", r",9, VAL1,"")
 	TEST_R(     "smulwt	lr, r",1, VAL2,", r13")
-	TEST_UNSUPPORTED(".word 0xe12f02e1 @ smulwt pc, r1, r2")
+	TEST_UNSUPPORTED(__inst_arm(0xe12f02e1) " @ smulwt pc, r1, r2")
 
 	TEST_RRRR(  "smlalbb	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
 	TEST_RRRR(  "smlalbble	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
 	TEST_RRR(   "smlalbb	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-	TEST_UNSUPPORTED(".word 0xe14f1382 @ smlalbb pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe141f382 @ smlalbb r1, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe14f1382) " @ smlalbb pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe141f382) " @ smlalbb r1, pc, r2, r3")
 	TEST_RRRR(  "smlaltb	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
 	TEST_RRRR(  "smlaltble	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
 	TEST_RRR(   "smlaltb	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-	TEST_UNSUPPORTED(".word 0xe14f13a2 @ smlaltb pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe141f3a2 @ smlaltb r1, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe14f13a2) " @ smlaltb pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe141f3a2) " @ smlaltb r1, pc, r2, r3")
 	TEST_RRRR(  "smlalbt	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
 	TEST_RRRR(  "smlalbtle	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
 	TEST_RRR(   "smlalbt	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-	TEST_UNSUPPORTED(".word 0xe14f13c2 @ smlalbt pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe141f3c2 @ smlalbt r1, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe14f13c2) " @ smlalbt pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe141f3c2) " @ smlalbt r1, pc, r2, r3")
 	TEST_RRRR(  "smlaltt	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
 	TEST_RRRR(  "smlalttle	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
 	TEST_RRR(   "smlaltt	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-	TEST_UNSUPPORTED(".word 0xe14f13e2 @ smlalbb pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe140f3e2 @ smlalbb r0, pc, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe14013ef @ smlalbb r0, r1, pc, r3")
-	TEST_UNSUPPORTED(".word 0xe1401fe2 @ smlalbb r0, r1, r2, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe14f13e2) " @ smlalbb pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe140f3e2) " @ smlalbb r0, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe14013ef) " @ smlalbb r0, r1, pc, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe1401fe2) " @ smlalbb r0, r1, r2, pc")
 
 	TEST_RR(    "smulbb	r0, r",1, VAL1,", r",2, VAL2,"")
 	TEST_RR(    "smulbbge	r7, r",8, VAL3,", r",9, VAL1,"")
 	TEST_R(     "smulbb	lr, r",1, VAL2,", r13")
-	TEST_UNSUPPORTED(".word 0xe16f0281 @ smulbb pc, r1, r2")
+	TEST_UNSUPPORTED(__inst_arm(0xe16f0281) " @ smulbb pc, r1, r2")
 	TEST_RR(    "smultb	r0, r",1, VAL1,", r",2, VAL2,"")
 	TEST_RR(    "smultbge	r7, r",8, VAL3,", r",9, VAL1,"")
 	TEST_R(     "smultb	lr, r",1, VAL2,", r13")
-	TEST_UNSUPPORTED(".word 0xe16f02a1 @ smultb pc, r1, r2")
+	TEST_UNSUPPORTED(__inst_arm(0xe16f02a1) " @ smultb pc, r1, r2")
 	TEST_RR(    "smulbt	r0, r",1, VAL1,", r",2, VAL2,"")
 	TEST_RR(    "smulbtge	r7, r",8, VAL3,", r",9, VAL1,"")
 	TEST_R(     "smulbt	lr, r",1, VAL2,", r13")
-	TEST_UNSUPPORTED(".word 0xe16f02c1 @ smultb pc, r1, r2")
+	TEST_UNSUPPORTED(__inst_arm(0xe16f02c1) " @ smultb pc, r1, r2")
 	TEST_RR(    "smultt	r0, r",1, VAL1,", r",2, VAL2,"")
 	TEST_RR(    "smulttge	r7, r",8, VAL3,", r",9, VAL1,"")
 	TEST_R(     "smultt	lr, r",1, VAL2,", r13")
-	TEST_UNSUPPORTED(".word 0xe16f02e1 @ smultt pc, r1, r2")
-	TEST_UNSUPPORTED(".word 0xe16002ef @ smultt r0, pc, r2")
-	TEST_UNSUPPORTED(".word 0xe1600fe1 @ smultt r0, r1, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe16f02e1) " @ smultt pc, r1, r2")
+	TEST_UNSUPPORTED(__inst_arm(0xe16002ef) " @ smultt r0, pc, r2")
+	TEST_UNSUPPORTED(__inst_arm(0xe1600fe1) " @ smultt r0, r1, pc")
 
 	TEST_GROUP("Multiply and multiply-accumulate")
 
 	TEST_RR(    "mul	r0, r",1, VAL1,", r",2, VAL2,"")
 	TEST_RR(    "mulls	r7, r",8, VAL2,", r",9, VAL2,"")
 	TEST_R(     "mul	lr, r",4, VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe00f0291 @ mul pc, r1, r2")
-	TEST_UNSUPPORTED(".word 0xe000029f @ mul r0, pc, r2")
-	TEST_UNSUPPORTED(".word 0xe0000f91 @ mul r0, r1, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe00f0291) " @ mul pc, r1, r2")
+	TEST_UNSUPPORTED(__inst_arm(0xe000029f) " @ mul r0, pc, r2")
+	TEST_UNSUPPORTED(__inst_arm(0xe0000f91) " @ mul r0, r1, pc")
 	TEST_RR(    "muls	r0, r",1, VAL1,", r",2, VAL2,"")
 	TEST_RR(    "mullss	r7, r",8, VAL2,", r",9, VAL2,"")
 	TEST_R(     "muls	lr, r",4, VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe01f0291 @ muls pc, r1, r2")
+	TEST_UNSUPPORTED(__inst_arm(0xe01f0291) " @ muls pc, r1, r2")
 
 	TEST_RRR(    "mla	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
 	TEST_RRR(    "mlahi	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
 	TEST_RR(     "mla	lr, r",1, VAL2,", r",2, VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe02f3291 @ mla pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe02f3291) " @ mla pc, r1, r2, r3")
 	TEST_RRR(    "mlas	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
 	TEST_RRR(    "mlahis	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
 	TEST_RR(     "mlas	lr, r",1, VAL2,", r",2, VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe03f3291 @ mlas pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe03f3291) " @ mlas pc, r1, r2, r3")
 
 #if __LINUX_ARM_ARCH__ >= 6
 	TEST_RR(  "umaal	r0, r1, r",2, VAL1,", r",3, VAL2,"")
 	TEST_RR(  "umaalls	r7, r8, r",9, VAL2,", r",10, VAL1,"")
 	TEST_R(   "umaal	lr, r12, r",11,VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe041f392 @ umaal pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe04f0392 @ umaal r0, pc, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe0500090 @ undef")
-	TEST_UNSUPPORTED(".word 0xe05fff9f @ undef")
+	TEST_UNSUPPORTED(__inst_arm(0xe041f392) " @ umaal pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe04f0392) " @ umaal r0, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0500090) " @ undef")
+	TEST_UNSUPPORTED(__inst_arm(0xe05fff9f) " @ undef")
 #endif
 
 #if __LINUX_ARM_ARCH__ >= 7
 	TEST_RRR(  "mls		r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
 	TEST_RRR(  "mlshi	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
 	TEST_RR(   "mls		lr, r",1, VAL2,", r",2, VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe06f3291 @ mls pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe060329f @ mls r0, pc, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe0603f91 @ mls r0, r1, pc, r3")
-	TEST_UNSUPPORTED(".word 0xe060f291 @ mls r0, r1, r2, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe06f3291) " @ mls pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe060329f) " @ mls r0, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0603f91) " @ mls r0, r1, pc, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe060f291) " @ mls r0, r1, r2, pc")
 #endif
 
-	TEST_UNSUPPORTED(".word 0xe0700090 @ undef")
-	TEST_UNSUPPORTED(".word 0xe07fff9f @ undef")
+	TEST_UNSUPPORTED(__inst_arm(0xe0700090) " @ undef")
+	TEST_UNSUPPORTED(__inst_arm(0xe07fff9f) " @ undef")
 
 	TEST_RR(  "umull	r0, r1, r",2, VAL1,", r",3, VAL2,"")
 	TEST_RR(  "umullls	r7, r8, r",9, VAL2,", r",10, VAL1,"")
 	TEST_R(   "umull	lr, r12, r",11,VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe081f392 @ umull pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe08f1392 @ umull r1, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe081f392) " @ umull pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe08f1392) " @ umull r1, pc, r2, r3")
 	TEST_RR(  "umulls	r0, r1, r",2, VAL1,", r",3, VAL2,"")
 	TEST_RR(  "umulllss	r7, r8, r",9, VAL2,", r",10, VAL1,"")
 	TEST_R(   "umulls	lr, r12, r",11,VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe091f392 @ umulls pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe09f1392 @ umulls r1, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe091f392) " @ umulls pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe09f1392) " @ umulls r1, pc, r2, r3")
 
 	TEST_RRRR(  "umlal	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
 	TEST_RRRR(  "umlalle	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
 	TEST_RRR(   "umlal	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-	TEST_UNSUPPORTED(".word 0xe0af1392 @ umlal pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe0a1f392 @ umlal r1, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0af1392) " @ umlal pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0a1f392) " @ umlal r1, pc, r2, r3")
 	TEST_RRRR(  "umlals	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
 	TEST_RRRR(  "umlalles	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
 	TEST_RRR(   "umlals	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-	TEST_UNSUPPORTED(".word 0xe0bf1392 @ umlals pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe0b1f392 @ umlals r1, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0bf1392) " @ umlals pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0b1f392) " @ umlals r1, pc, r2, r3")
 
 	TEST_RR(  "smull	r0, r1, r",2, VAL1,", r",3, VAL2,"")
 	TEST_RR(  "smullls	r7, r8, r",9, VAL2,", r",10, VAL1,"")
 	TEST_R(   "smull	lr, r12, r",11,VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe0c1f392 @ smull pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe0cf1392 @ smull r1, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0c1f392) " @ smull pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0cf1392) " @ smull r1, pc, r2, r3")
 	TEST_RR(  "smulls	r0, r1, r",2, VAL1,", r",3, VAL2,"")
 	TEST_RR(  "smulllss	r7, r8, r",9, VAL2,", r",10, VAL1,"")
 	TEST_R(   "smulls	lr, r12, r",11,VAL3,", r13")
-	TEST_UNSUPPORTED(".word 0xe0d1f392 @ smulls pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe0df1392 @ smulls r1, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0d1f392) " @ smulls pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0df1392) " @ smulls r1, pc, r2, r3")
 
 	TEST_RRRR(  "smlal	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
 	TEST_RRRR(  "smlalle	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
 	TEST_RRR(   "smlal	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-	TEST_UNSUPPORTED(".word 0xe0ef1392 @ smlal pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe0e1f392 @ smlal r1, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0ef1392) " @ smlal pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0e1f392) " @ smlal r1, pc, r2, r3")
 	TEST_RRRR(  "smlals	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
 	TEST_RRRR(  "smlalles	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
 	TEST_RRR(   "smlals	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-	TEST_UNSUPPORTED(".word 0xe0ff1392 @ smlals pc, r1, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe0f0f392 @ smlals r0, pc, r2, r3")
-	TEST_UNSUPPORTED(".word 0xe0f0139f @ smlals r0, r1, pc, r3")
-	TEST_UNSUPPORTED(".word 0xe0f01f92 @ smlals r0, r1, r2, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe0ff1392) " @ smlals pc, r1, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0f0f392) " @ smlals r0, pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0f0139f) " @ smlals r0, r1, pc, r3")
+	TEST_UNSUPPORTED(__inst_arm(0xe0f01f92) " @ smlals r0, r1, r2, pc")
 
 	TEST_GROUP("Synchronization primitives")
 
@@ -435,28 +436,28 @@
 	TEST_R( "swpvs	r0, r",1,VAL1,", [sp]")
 	TEST_RP("swp	sp, r",14,VAL2,", [r",12,13*4,"]")
 #else
-	TEST_UNSUPPORTED(".word 0xe108e097 @ swp	lr, r7, [r8]")
-	TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs	r0, r1, [sp]")
-	TEST_UNSUPPORTED(".word 0xe10cd09e @ swp	sp, r14 [r12]")
+	TEST_UNSUPPORTED(__inst_arm(0xe108e097) " @ swp	lr, r7, [r8]")
+	TEST_UNSUPPORTED(__inst_arm(0x610d0091) " @ swpvs	r0, r1, [sp]")
+	TEST_UNSUPPORTED(__inst_arm(0xe10cd09e) " @ swp	sp, r14 [r12]")
 #endif
-	TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]")
-	TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]")
-	TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]")
+	TEST_UNSUPPORTED(__inst_arm(0xe102f091) " @ swp pc, r1, [r2]")
+	TEST_UNSUPPORTED(__inst_arm(0xe102009f) " @ swp r0, pc, [r2]")
+	TEST_UNSUPPORTED(__inst_arm(0xe10f0091) " @ swp r0, r1, [pc]")
 #if __LINUX_ARM_ARCH__ < 6
 	TEST_RP("swpb	lr, r",7,VAL2,", [r",8,0,"]")
 	TEST_R( "swpvsb	r0, r",1,VAL1,", [sp]")
 #else
-	TEST_UNSUPPORTED(".word 0xe148e097 @ swpb	lr, r7, [r8]")
-	TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb	r0, r1, [sp]")
+	TEST_UNSUPPORTED(__inst_arm(0xe148e097) " @ swpb	lr, r7, [r8]")
+	TEST_UNSUPPORTED(__inst_arm(0x614d0091) " @ swpvsb	r0, r1, [sp]")
 #endif
-	TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]")
+	TEST_UNSUPPORTED(__inst_arm(0xe142f091) " @ swpb pc, r1, [r2]")
 
-	TEST_UNSUPPORTED(".word	0xe1100090") /* Unallocated space */
-	TEST_UNSUPPORTED(".word	0xe1200090") /* Unallocated space */
-	TEST_UNSUPPORTED(".word	0xe1300090") /* Unallocated space */
-	TEST_UNSUPPORTED(".word	0xe1500090") /* Unallocated space */
-	TEST_UNSUPPORTED(".word	0xe1600090") /* Unallocated space */
-	TEST_UNSUPPORTED(".word	0xe1700090") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe1100090)) /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe1200090)) /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe1300090)) /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe1500090)) /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe1600090)) /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe1700090)) /* Unallocated space */
 #if __LINUX_ARM_ARCH__ >= 6
 	TEST_UNSUPPORTED("ldrex	r2, [sp]")
 #endif
@@ -476,9 +477,9 @@
 	TEST_RPR(  "strneh	r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
 	TEST_RPR(  "strh	r",2, VAL1,", [r",3, 24,"], r",4, 48,"")
 	TEST_RPR(  "strh	r",10,VAL2,", [r",9, 48,"], -r",11,24,"")
-	TEST_UNSUPPORTED(".word 0xe1afc0ba	@ strh r12, [pc, r10]!")
-	TEST_UNSUPPORTED(".word 0xe089f0bb	@ strh pc, [r9], r11")
-	TEST_UNSUPPORTED(".word 0xe089a0bf	@ strh r10, [r9], pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe1afc0ba) "	@ strh r12, [pc, r10]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe089f0bb) "	@ strh pc, [r9], r11")
+	TEST_UNSUPPORTED(__inst_arm(0xe089a0bf) "	@ strh r10, [r9], pc")
 
 	TEST_PR(   "ldrh	r0, [r",0,  48,", -r",2, 24,"]")
 	TEST_PR(   "ldrcsh	r14, [r",13,0, ", r",12, 48,"]")
@@ -486,9 +487,9 @@
 	TEST_PR(   "ldrcch	r12, [r",11,48,", -r",10,24,"]!")
 	TEST_PR(   "ldrh	r2, [r",3,  24,"], r",4, 48,"")
 	TEST_PR(   "ldrh	r10, [r",9, 48,"], -r",11,24,"")
-	TEST_UNSUPPORTED(".word 0xe1bfc0ba	@ ldrh r12, [pc, r10]!")
-	TEST_UNSUPPORTED(".word 0xe099f0bb	@ ldrh pc, [r9], r11")
-	TEST_UNSUPPORTED(".word 0xe099a0bf	@ ldrh r10, [r9], pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe1bfc0ba) "	@ ldrh r12, [pc, r10]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe099f0bb) "	@ ldrh pc, [r9], r11")
+	TEST_UNSUPPORTED(__inst_arm(0xe099a0bf) "	@ ldrh r10, [r9], pc")
 
 	TEST_RP(   "strh	r",0, VAL1,", [r",1, 24,", #-2]")
 	TEST_RP(   "strmih	r",14,VAL2,", [r",13,0, ", #2]")
@@ -496,8 +497,8 @@
 	TEST_RP(   "strplh	r",12,VAL2,", [r",11,24,", #-4]!")
 	TEST_RP(   "strh	r",2, VAL1,", [r",3, 24,"], #48")
 	TEST_RP(   "strh	r",10,VAL2,", [r",9, 64,"], #-48")
-	TEST_UNSUPPORTED(".word 0xe1efc3b0	@ strh r12, [pc, #48]!")
-	TEST_UNSUPPORTED(".word 0xe0c9f3b0	@ strh pc, [r9], #48")
+	TEST_UNSUPPORTED(__inst_arm(0xe1efc3b0) "	@ strh r12, [pc, #48]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe0c9f3b0) "	@ strh pc, [r9], #48")
 
 	TEST_P(	   "ldrh	r0, [r",0,  24,", #-2]")
 	TEST_P(	   "ldrvsh	r14, [r",13,0, ", #2]")
@@ -506,8 +507,8 @@
 	TEST_P(	   "ldrh	r2, [r",3,  24,"], #48")
 	TEST_P(	   "ldrh	r10, [r",9, 64,"], #-48")
 	TEST(      "ldrh	r0, [pc, #0]")
-	TEST_UNSUPPORTED(".word 0xe1ffc3b0	@ ldrh r12, [pc, #48]!")
-	TEST_UNSUPPORTED(".word 0xe0d9f3b0	@ ldrh pc, [r9], #48")
+	TEST_UNSUPPORTED(__inst_arm(0xe1ffc3b0) "	@ ldrh r12, [pc, #48]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe0d9f3b0) "	@ ldrh pc, [r9], #48")
 
 	TEST_PR(   "ldrsb	r0, [r",0,  48,", -r",2, 24,"]")
 	TEST_PR(   "ldrhisb	r14, [r",13,0,", r",12,  48,"]")
@@ -515,8 +516,8 @@
 	TEST_PR(   "ldrlssb	r12, [r",11,48,", -r",10,24,"]!")
 	TEST_PR(   "ldrsb	r2, [r",3,  24,"], r",4, 48,"")
 	TEST_PR(   "ldrsb	r10, [r",9, 48,"], -r",11,24,"")
-	TEST_UNSUPPORTED(".word 0xe1bfc0da	@ ldrsb r12, [pc, r10]!")
-	TEST_UNSUPPORTED(".word 0xe099f0db	@ ldrsb pc, [r9], r11")
+	TEST_UNSUPPORTED(__inst_arm(0xe1bfc0da) "	@ ldrsb r12, [pc, r10]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe099f0db) "	@ ldrsb pc, [r9], r11")
 
 	TEST_P(	   "ldrsb	r0, [r",0,  24,", #-1]")
 	TEST_P(	   "ldrgesb	r14, [r",13,0, ", #1]")
@@ -525,8 +526,8 @@
 	TEST_P(	   "ldrsb	r2, [r",3,  24,"], #48")
 	TEST_P(	   "ldrsb	r10, [r",9, 64,"], #-48")
 	TEST(      "ldrsb	r0, [pc, #0]")
-	TEST_UNSUPPORTED(".word 0xe1ffc3d0	@ ldrsb r12, [pc, #48]!")
-	TEST_UNSUPPORTED(".word 0xe0d9f3d0	@ ldrsb pc, [r9], #48")
+	TEST_UNSUPPORTED(__inst_arm(0xe1ffc3d0) "	@ ldrsb r12, [pc, #48]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe0d9f3d0) "	@ ldrsb pc, [r9], #48")
 
 	TEST_PR(   "ldrsh	r0, [r",0,  48,", -r",2, 24,"]")
 	TEST_PR(   "ldrgtsh	r14, [r",13,0, ", r",12, 48,"]")
@@ -534,8 +535,8 @@
 	TEST_PR(   "ldrlesh	r12, [r",11,48,", -r",10,24,"]!")
 	TEST_PR(   "ldrsh	r2, [r",3,  24,"], r",4, 48,"")
 	TEST_PR(   "ldrsh	r10, [r",9, 48,"], -r",11,24,"")
-	TEST_UNSUPPORTED(".word 0xe1bfc0fa	@ ldrsh r12, [pc, r10]!")
-	TEST_UNSUPPORTED(".word 0xe099f0fb	@ ldrsh pc, [r9], r11")
+	TEST_UNSUPPORTED(__inst_arm(0xe1bfc0fa) "	@ ldrsh r12, [pc, r10]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe099f0fb) "	@ ldrsh pc, [r9], r11")
 
 	TEST_P(	   "ldrsh	r0, [r",0,  24,", #-1]")
 	TEST_P(	   "ldreqsh	r14, [r",13,0 ,", #1]")
@@ -544,8 +545,8 @@
 	TEST_P(	   "ldrsh	r2, [r",3,  24,"], #48")
 	TEST_P(	   "ldrsh	r10, [r",9, 64,"], #-48")
 	TEST(      "ldrsh	r0, [pc, #0]")
-	TEST_UNSUPPORTED(".word 0xe1ffc3f0	@ ldrsh r12, [pc, #48]!")
-	TEST_UNSUPPORTED(".word 0xe0d9f3f0	@ ldrsh pc, [r9], #48")
+	TEST_UNSUPPORTED(__inst_arm(0xe1ffc3f0) "	@ ldrsh r12, [pc, #48]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe0d9f3f0) "	@ ldrsh pc, [r9], #48")
 
 #if __LINUX_ARM_ARCH__ >= 7
 	TEST_UNSUPPORTED("strht	r1, [r2], r3")
@@ -564,7 +565,7 @@
 	TEST_RPR(  "strcsd	r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
 	TEST_RPR(  "strd	r",2, VAL1,", [r",5, 24,"], r",4,48,"")
 	TEST_RPR(  "strd	r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
-	TEST_UNSUPPORTED(".word 0xe1afc0fa	@ strd r12, [pc, r10]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe1afc0fa) "	@ strd r12, [pc, r10]!")
 
 	TEST_PR(   "ldrd	r0, [r",0, 48,", -r",2,24,"]")
 	TEST_PR(   "ldrmid	r8, [r",13,0, ", r",12,48,"]")
@@ -572,10 +573,10 @@
 	TEST_PR(   "ldrpld	r6, [r",11,48,", -r",10,24,"]!")
 	TEST_PR(   "ldrd	r2, [r",5, 24,"], r",4,48,"")
 	TEST_PR(   "ldrd	r10, [r",9,48,"], -r",7,24,"")
-	TEST_UNSUPPORTED(".word 0xe1afc0da	@ ldrd r12, [pc, r10]!")
-	TEST_UNSUPPORTED(".word 0xe089f0db	@ ldrd pc, [r9], r11")
-	TEST_UNSUPPORTED(".word 0xe089e0db	@ ldrd lr, [r9], r11")
-	TEST_UNSUPPORTED(".word 0xe089c0df	@ ldrd r12, [r9], pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe1afc0da) "	@ ldrd r12, [pc, r10]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe089f0db) "	@ ldrd pc, [r9], r11")
+	TEST_UNSUPPORTED(__inst_arm(0xe089e0db) "	@ ldrd lr, [r9], r11")
+	TEST_UNSUPPORTED(__inst_arm(0xe089c0df) "	@ ldrd r12, [r9], pc")
 
 	TEST_RP(   "strd	r",0, VAL1,", [r",1, 24,", #-8]")
 	TEST_RP(   "strvsd	r",8, VAL2,", [r",13,0, ", #8]")
@@ -583,7 +584,7 @@
 	TEST_RP(   "strvcd	r",12,VAL2,", [r",11,24,", #-16]!")
 	TEST_RP(   "strd	r",2, VAL1,", [r",4, 24,"], #48")
 	TEST_RP(   "strd	r",10,VAL2,", [r",9, 64,"], #-48")
-	TEST_UNSUPPORTED(".word 0xe1efc3f0	@ strd r12, [pc, #48]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe1efc3f0) "	@ strd r12, [pc, #48]!")
 
 	TEST_P(	   "ldrd	r0, [r",0, 24,", #-8]")
 	TEST_P(	   "ldrhid	r8, [r",13,0, ", #8]")
@@ -591,9 +592,9 @@
 	TEST_P(	   "ldrlsd	r6, [r",11,24,", #-16]!")
 	TEST_P(	   "ldrd	r2, [r",5, 24,"], #48")
 	TEST_P(	   "ldrd	r10, [r",9,6,"], #-48")
-	TEST_UNSUPPORTED(".word 0xe1efc3d0	@ ldrd r12, [pc, #48]!")
-	TEST_UNSUPPORTED(".word 0xe0c9f3d0	@ ldrd pc, [r9], #48")
-	TEST_UNSUPPORTED(".word 0xe0c9e3d0	@ ldrd lr, [r9], #48")
+	TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) "	@ ldrd r12, [pc, #48]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe0c9f3d0) "	@ ldrd pc, [r9], #48")
+	TEST_UNSUPPORTED(__inst_arm(0xe0c9e3d0) "	@ ldrd lr, [r9], #48")
 
 	TEST_GROUP("Miscellaneous")
 
@@ -601,11 +602,11 @@
 	TEST("movw	r0, #0")
 	TEST("movw	r0, #0xffff")
 	TEST("movw	lr, #0xffff")
-	TEST_UNSUPPORTED(".word 0xe300f000	@ movw pc, #0")
+	TEST_UNSUPPORTED(__inst_arm(0xe300f000) "	@ movw pc, #0")
 	TEST_R("movt	r",0, VAL1,", #0")
 	TEST_R("movt	r",0, VAL2,", #0xffff")
 	TEST_R("movt	r",14,VAL1,", #0xffff")
-	TEST_UNSUPPORTED(".word 0xe340f000	@ movt pc, #0")
+	TEST_UNSUPPORTED(__inst_arm(0xe340f000) "	@ movt pc, #0")
 #endif
 
 	TEST_UNSUPPORTED("msr	cpsr, 0x13")
@@ -673,20 +674,20 @@
 #ifdef CONFIG_THUMB2_KERNEL
 	TEST_ARM_TO_THUMB_INTERWORK_P("ldr	pc, [r",0,0,", #15*4]")
 #endif
-	TEST_UNSUPPORTED(".word 0xe5af6008	@ str r6, [pc, #8]!")
-	TEST_UNSUPPORTED(".word 0xe7af6008	@ str r6, [pc, r8]!")
-	TEST_UNSUPPORTED(".word 0xe5bf6008	@ ldr r6, [pc, #8]!")
-	TEST_UNSUPPORTED(".word 0xe7bf6008	@ ldr r6, [pc, r8]!")
-	TEST_UNSUPPORTED(".word 0xe788600f	@ str r6, [r8, pc]")
-	TEST_UNSUPPORTED(".word 0xe798600f	@ ldr r6, [r8, pc]")
+	TEST_UNSUPPORTED(__inst_arm(0xe5af6008) "	@ str r6, [pc, #8]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe7af6008) "	@ str r6, [pc, r8]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe5bf6008) "	@ ldr r6, [pc, #8]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe7bf6008) "	@ ldr r6, [pc, r8]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe788600f) "	@ str r6, [r8, pc]")
+	TEST_UNSUPPORTED(__inst_arm(0xe798600f) "	@ ldr r6, [r8, pc]")
 
 	LOAD_STORE("b")
-	TEST_UNSUPPORTED(".word 0xe5f7f008	@ ldrb pc, [r7, #8]!")
-	TEST_UNSUPPORTED(".word 0xe7f7f008	@ ldrb pc, [r7, r8]!")
-	TEST_UNSUPPORTED(".word 0xe5ef6008	@ strb r6, [pc, #8]!")
-	TEST_UNSUPPORTED(".word 0xe7ef6008	@ strb r6, [pc, r3]!")
-	TEST_UNSUPPORTED(".word 0xe5ff6008	@ ldrb r6, [pc, #8]!")
-	TEST_UNSUPPORTED(".word 0xe7ff6008	@ ldrb r6, [pc, r3]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe5f7f008) "	@ ldrb pc, [r7, #8]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe7f7f008) "	@ ldrb pc, [r7, r8]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe5ef6008) "	@ strb r6, [pc, #8]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe7ef6008) "	@ strb r6, [pc, r3]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe5ff6008) "	@ ldrb r6, [pc, #8]!")
+	TEST_UNSUPPORTED(__inst_arm(0xe7ff6008) "	@ ldrb r6, [pc, r3]!")
 
 	TEST_UNSUPPORTED("ldrt	r0, [r1], #4")
 	TEST_UNSUPPORTED("ldrt	r1, [r2], r3")
@@ -700,153 +701,153 @@
 #if __LINUX_ARM_ARCH__ >= 7
 	TEST_GROUP("Parallel addition and subtraction, signed")
 
-	TEST_UNSUPPORTED(".word 0xe6000010") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe60fffff") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe6000010) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe60fffff) "") /* Unallocated space */
 
 	TEST_RR(    "sadd16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "sadd16	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe61cff1a	@ sadd16	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe61cff1a) "	@ sadd16	pc, r12, r10")
 	TEST_RR(    "sasx	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "sasx	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe61cff3a	@ sasx	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe61cff3a) "	@ sasx	pc, r12, r10")
 	TEST_RR(    "ssax	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "ssax	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe61cff5a	@ ssax	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe61cff5a) "	@ ssax	pc, r12, r10")
 	TEST_RR(    "ssub16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "ssub16	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe61cff7a	@ ssub16	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe61cff7a) "	@ ssub16	pc, r12, r10")
 	TEST_RR(    "sadd8	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "sadd8	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe61cff9a	@ sadd8	pc, r12, r10")
-	TEST_UNSUPPORTED(".word 0xe61000b0") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe61fffbf") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe61000d0") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe61fffdf") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe61cff9a) "	@ sadd8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe61000b0) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe61fffbf) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe61000d0) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe61fffdf) "") /* Unallocated space */
 	TEST_RR(    "ssub8	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "ssub8	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe61cfffa	@ ssub8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe61cfffa) "	@ ssub8	pc, r12, r10")
 
 	TEST_RR(    "qadd16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "qadd16	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe62cff1a	@ qadd16	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe62cff1a) "	@ qadd16	pc, r12, r10")
 	TEST_RR(    "qasx	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "qasx	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe62cff3a	@ qasx	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe62cff3a) "	@ qasx	pc, r12, r10")
 	TEST_RR(    "qsax	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "qsax	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe62cff5a	@ qsax	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe62cff5a) "	@ qsax	pc, r12, r10")
 	TEST_RR(    "qsub16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "qsub16	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe62cff7a	@ qsub16	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe62cff7a) "	@ qsub16	pc, r12, r10")
 	TEST_RR(    "qadd8	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "qadd8	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe62cff9a	@ qadd8	pc, r12, r10")
-	TEST_UNSUPPORTED(".word 0xe62000b0") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe62fffbf") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe62000d0") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe62fffdf") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe62cff9a) "	@ qadd8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe62000b0) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe62fffbf) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe62000d0) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe62fffdf) "") /* Unallocated space */
 	TEST_RR(    "qsub8	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "qsub8	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe62cfffa	@ qsub8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe62cfffa) "	@ qsub8	pc, r12, r10")
 
 	TEST_RR(    "shadd16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "shadd16	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe63cff1a	@ shadd16	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe63cff1a) "	@ shadd16	pc, r12, r10")
 	TEST_RR(    "shasx	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "shasx	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe63cff3a	@ shasx	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe63cff3a) "	@ shasx	pc, r12, r10")
 	TEST_RR(    "shsax	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "shsax	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe63cff5a	@ shsax	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe63cff5a) "	@ shsax	pc, r12, r10")
 	TEST_RR(    "shsub16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "shsub16	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe63cff7a	@ shsub16	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe63cff7a) "	@ shsub16	pc, r12, r10")
 	TEST_RR(    "shadd8	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "shadd8	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe63cff9a	@ shadd8	pc, r12, r10")
-	TEST_UNSUPPORTED(".word 0xe63000b0") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe63fffbf") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe63000d0") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe63fffdf") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe63cff9a) "	@ shadd8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe63000b0) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe63fffbf) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe63000d0) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe63fffdf) "") /* Unallocated space */
 	TEST_RR(    "shsub8	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "shsub8	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe63cfffa	@ shsub8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe63cfffa) "	@ shsub8	pc, r12, r10")
 
 	TEST_GROUP("Parallel addition and subtraction, unsigned")
 
-	TEST_UNSUPPORTED(".word 0xe6400010") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe64fffff") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe6400010) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe64fffff) "") /* Unallocated space */
 
 	TEST_RR(    "uadd16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uadd16	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe65cff1a	@ uadd16	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe65cff1a) "	@ uadd16	pc, r12, r10")
 	TEST_RR(    "uasx	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uasx	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe65cff3a	@ uasx	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe65cff3a) "	@ uasx	pc, r12, r10")
 	TEST_RR(    "usax	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "usax	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe65cff5a	@ usax	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe65cff5a) "	@ usax	pc, r12, r10")
 	TEST_RR(    "usub16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "usub16	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe65cff7a	@ usub16	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe65cff7a) "	@ usub16	pc, r12, r10")
 	TEST_RR(    "uadd8	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uadd8	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe65cff9a	@ uadd8	pc, r12, r10")
-	TEST_UNSUPPORTED(".word 0xe65000b0") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe65fffbf") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe65000d0") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe65fffdf") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe65cff9a) "	@ uadd8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe65000b0) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe65fffbf) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe65000d0) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe65fffdf) "") /* Unallocated space */
 	TEST_RR(    "usub8	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "usub8	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe65cfffa	@ usub8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe65cfffa) "	@ usub8	pc, r12, r10")
 
 	TEST_RR(    "uqadd16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uqadd16	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe66cff1a	@ uqadd16	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe66cff1a) "	@ uqadd16	pc, r12, r10")
 	TEST_RR(    "uqasx	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uqasx	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe66cff3a	@ uqasx	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe66cff3a) "	@ uqasx	pc, r12, r10")
 	TEST_RR(    "uqsax	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uqsax	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe66cff5a	@ uqsax	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe66cff5a) "	@ uqsax	pc, r12, r10")
 	TEST_RR(    "uqsub16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uqsub16	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe66cff7a	@ uqsub16	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe66cff7a) "	@ uqsub16	pc, r12, r10")
 	TEST_RR(    "uqadd8	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uqadd8	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe66cff9a	@ uqadd8	pc, r12, r10")
-	TEST_UNSUPPORTED(".word 0xe66000b0") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe66fffbf") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe66000d0") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe66fffdf") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe66cff9a) "	@ uqadd8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe66000b0) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe66fffbf) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe66000d0) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe66fffdf) "") /* Unallocated space */
 	TEST_RR(    "uqsub8	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uqsub8	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe66cfffa	@ uqsub8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe66cfffa) "	@ uqsub8	pc, r12, r10")
 
 	TEST_RR(    "uhadd16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uhadd16	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe67cff1a	@ uhadd16	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe67cff1a) "	@ uhadd16	pc, r12, r10")
 	TEST_RR(    "uhasx	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uhasx	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe67cff3a	@ uhasx	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe67cff3a) "	@ uhasx	pc, r12, r10")
 	TEST_RR(    "uhsax	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uhsax	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe67cff5a	@ uhsax	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe67cff5a) "	@ uhsax	pc, r12, r10")
 	TEST_RR(    "uhsub16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uhsub16	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe67cff7a	@ uhsub16	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe67cff7a) "	@ uhsub16	pc, r12, r10")
 	TEST_RR(    "uhadd8	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uhadd8	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe67cff9a	@ uhadd8	pc, r12, r10")
-	TEST_UNSUPPORTED(".word 0xe67000b0") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe67fffbf") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe67000d0") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe67fffdf") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe67cff9a) "	@ uhadd8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe67000b0) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe67fffbf) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe67000d0) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe67fffdf) "") /* Unallocated space */
 	TEST_RR(    "uhsub8	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uhsub8	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe67cfffa	@ uhsub8	pc, r12, r10")
-	TEST_UNSUPPORTED(".word 0xe67feffa	@ uhsub8	r14, pc, r10")
-	TEST_UNSUPPORTED(".word 0xe67cefff	@ uhsub8	r14, r12, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe67cfffa) "	@ uhsub8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe67feffa) "	@ uhsub8	r14, pc, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe67cefff) "	@ uhsub8	r14, r12, pc")
 #endif /* __LINUX_ARM_ARCH__ >= 7 */
 
 #if __LINUX_ARM_ARCH__ >= 6
@@ -854,99 +855,99 @@
 
 	TEST_RR(    "pkhbt	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "pkhbt	r14,r",12, HH1,", r",10,HH2,", lsl #2")
-	TEST_UNSUPPORTED(".word 0xe68cf11a	@ pkhbt	pc, r12, r10, lsl #2")
+	TEST_UNSUPPORTED(__inst_arm(0xe68cf11a) "	@ pkhbt	pc, r12, r10, lsl #2")
 	TEST_RR(    "pkhtb	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "pkhtb	r14,r",12, HH1,", r",10,HH2,", asr #2")
-	TEST_UNSUPPORTED(".word 0xe68cf15a	@ pkhtb	pc, r12, r10, asr #2")
-	TEST_UNSUPPORTED(".word 0xe68fe15a	@ pkhtb	r14, pc, r10, asr #2")
-	TEST_UNSUPPORTED(".word 0xe68ce15f	@ pkhtb	r14, r12, pc, asr #2")
-	TEST_UNSUPPORTED(".word 0xe6900010") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe69fffdf") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe68cf15a) "	@ pkhtb	pc, r12, r10, asr #2")
+	TEST_UNSUPPORTED(__inst_arm(0xe68fe15a) "	@ pkhtb	r14, pc, r10, asr #2")
+	TEST_UNSUPPORTED(__inst_arm(0xe68ce15f) "	@ pkhtb	r14, r12, pc, asr #2")
+	TEST_UNSUPPORTED(__inst_arm(0xe6900010) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe69fffdf) "") /* Unallocated space */
 
 	TEST_R(     "ssat	r0, #24, r",0,   VAL1,"")
 	TEST_R(     "ssat	r14, #24, r",12, VAL2,"")
 	TEST_R(     "ssat	r0, #24, r",0,   VAL1,", lsl #8")
 	TEST_R(     "ssat	r14, #24, r",12, VAL2,", asr #8")
-	TEST_UNSUPPORTED(".word 0xe6b7f01c	@ ssat	pc, #24, r12")
+	TEST_UNSUPPORTED(__inst_arm(0xe6b7f01c) "	@ ssat	pc, #24, r12")
 
 	TEST_R(     "usat	r0, #24, r",0,   VAL1,"")
 	TEST_R(     "usat	r14, #24, r",12, VAL2,"")
 	TEST_R(     "usat	r0, #24, r",0,   VAL1,", lsl #8")
 	TEST_R(     "usat	r14, #24, r",12, VAL2,", asr #8")
-	TEST_UNSUPPORTED(".word 0xe6f7f01c	@ usat	pc, #24, r12")
+	TEST_UNSUPPORTED(__inst_arm(0xe6f7f01c) "	@ usat	pc, #24, r12")
 
 	TEST_RR(    "sxtab16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "sxtab16	r14,r",12, HH2,", r",10,HH1,", ror #8")
 	TEST_R(     "sxtb16	r8, r",7,  HH1,"")
-	TEST_UNSUPPORTED(".word 0xe68cf47a	@ sxtab16	pc,r12, r10, ror #8")
+	TEST_UNSUPPORTED(__inst_arm(0xe68cf47a) "	@ sxtab16	pc,r12, r10, ror #8")
 
 	TEST_RR(    "sel	r0, r",0,  VAL1,", r",1, VAL2,"")
 	TEST_RR(    "sel	r14, r",12,VAL1,", r",10, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe68cffba	@ sel	pc, r12, r10")
-	TEST_UNSUPPORTED(".word 0xe68fefba	@ sel	r14, pc, r10")
-	TEST_UNSUPPORTED(".word 0xe68cefbf	@ sel	r14, r12, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe68cffba) "	@ sel	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe68fefba) "	@ sel	r14, pc, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe68cefbf) "	@ sel	r14, r12, pc")
 
 	TEST_R(     "ssat16	r0, #12, r",0,   HH1,"")
 	TEST_R(     "ssat16	r14, #12, r",12, HH2,"")
-	TEST_UNSUPPORTED(".word 0xe6abff3c	@ ssat16	pc, #12, r12")
+	TEST_UNSUPPORTED(__inst_arm(0xe6abff3c) "	@ ssat16	pc, #12, r12")
 
 	TEST_RR(    "sxtab	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "sxtab	r14,r",12, HH2,", r",10,HH1,", ror #8")
 	TEST_R(     "sxtb	r8, r",7,  HH1,"")
-	TEST_UNSUPPORTED(".word 0xe6acf47a	@ sxtab	pc,r12, r10, ror #8")
+	TEST_UNSUPPORTED(__inst_arm(0xe6acf47a) "	@ sxtab	pc,r12, r10, ror #8")
 
 	TEST_R(     "rev	r0, r",0,   VAL1,"")
 	TEST_R(     "rev	r14, r",12, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe6bfff3c	@ rev	pc, r12")
+	TEST_UNSUPPORTED(__inst_arm(0xe6bfff3c) "	@ rev	pc, r12")
 
 	TEST_RR(    "sxtah	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "sxtah	r14,r",12, HH2,", r",10,HH1,", ror #8")
 	TEST_R(     "sxth	r8, r",7,  HH1,"")
-	TEST_UNSUPPORTED(".word 0xe6bcf47a	@ sxtah	pc,r12, r10, ror #8")
+	TEST_UNSUPPORTED(__inst_arm(0xe6bcf47a) "	@ sxtah	pc,r12, r10, ror #8")
 
 	TEST_R(     "rev16	r0, r",0,   VAL1,"")
 	TEST_R(     "rev16	r14, r",12, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe6bfffbc	@ rev16	pc, r12")
+	TEST_UNSUPPORTED(__inst_arm(0xe6bfffbc) "	@ rev16	pc, r12")
 
 	TEST_RR(    "uxtab16	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uxtab16	r14,r",12, HH2,", r",10,HH1,", ror #8")
 	TEST_R(     "uxtb16	r8, r",7,  HH1,"")
-	TEST_UNSUPPORTED(".word 0xe6ccf47a	@ uxtab16	pc,r12, r10, ror #8")
+	TEST_UNSUPPORTED(__inst_arm(0xe6ccf47a) "	@ uxtab16	pc,r12, r10, ror #8")
 
 	TEST_R(     "usat16	r0, #12, r",0,   HH1,"")
 	TEST_R(     "usat16	r14, #12, r",12, HH2,"")
-	TEST_UNSUPPORTED(".word 0xe6ecff3c	@ usat16	pc, #12, r12")
-	TEST_UNSUPPORTED(".word 0xe6ecef3f	@ usat16	r14, #12, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe6ecff3c) "	@ usat16	pc, #12, r12")
+	TEST_UNSUPPORTED(__inst_arm(0xe6ecef3f) "	@ usat16	r14, #12, pc")
 
 	TEST_RR(    "uxtab	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uxtab	r14,r",12, HH2,", r",10,HH1,", ror #8")
 	TEST_R(     "uxtb	r8, r",7,  HH1,"")
-	TEST_UNSUPPORTED(".word 0xe6ecf47a	@ uxtab	pc,r12, r10, ror #8")
+	TEST_UNSUPPORTED(__inst_arm(0xe6ecf47a) "	@ uxtab	pc,r12, r10, ror #8")
 
 #if __LINUX_ARM_ARCH__ >= 7
 	TEST_R(     "rbit	r0, r",0,   VAL1,"")
 	TEST_R(     "rbit	r14, r",12, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe6ffff3c	@ rbit	pc, r12")
+	TEST_UNSUPPORTED(__inst_arm(0xe6ffff3c) "	@ rbit	pc, r12")
 #endif
 
 	TEST_RR(    "uxtah	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uxtah	r14,r",12, HH2,", r",10,HH1,", ror #8")
 	TEST_R(     "uxth	r8, r",7,  HH1,"")
-	TEST_UNSUPPORTED(".word 0xe6fff077	@ uxth	pc, r7")
-	TEST_UNSUPPORTED(".word 0xe6ff807f	@ uxth	r8, pc")
-	TEST_UNSUPPORTED(".word 0xe6fcf47a	@ uxtah	pc, r12, r10, ror #8")
-	TEST_UNSUPPORTED(".word 0xe6fce47f	@ uxtah	r14, r12, pc, ror #8")
+	TEST_UNSUPPORTED(__inst_arm(0xe6fff077) "	@ uxth	pc, r7")
+	TEST_UNSUPPORTED(__inst_arm(0xe6ff807f) "	@ uxth	r8, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe6fcf47a) "	@ uxtah	pc, r12, r10, ror #8")
+	TEST_UNSUPPORTED(__inst_arm(0xe6fce47f) "	@ uxtah	r14, r12, pc, ror #8")
 
 	TEST_R(     "revsh	r0, r",0,   VAL1,"")
 	TEST_R(     "revsh	r14, r",12, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe6ffff3c	@ revsh	pc, r12")
-	TEST_UNSUPPORTED(".word 0xe6ffef3f	@ revsh	r14, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe6ffff3c) "	@ revsh	pc, r12")
+	TEST_UNSUPPORTED(__inst_arm(0xe6ffef3f) "	@ revsh	r14, pc")
 
-	TEST_UNSUPPORTED(".word 0xe6900070") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe69fff7f") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe6900070) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe69fff7f) "") /* Unallocated space */
 
-	TEST_UNSUPPORTED(".word 0xe6d00070") /* Unallocated space */
-	TEST_UNSUPPORTED(".word 0xe6dfff7f") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe6d00070) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_arm(0xe6dfff7f) "") /* Unallocated space */
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
 #if __LINUX_ARM_ARCH__ >= 6
@@ -954,79 +955,79 @@
 
 	TEST_RRR(   "smlad	r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
 	TEST_RRR(   "smlad	r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe70f8a1c	@ smlad	pc, r12, r10, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe70f8a1c) "	@ smlad	pc, r12, r10, r8")
 	TEST_RRR(   "smladx	r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
 	TEST_RRR(   "smladx	r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe70f8a3c	@ smladx	pc, r12, r10, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe70f8a3c) "	@ smladx	pc, r12, r10, r8")
 
 	TEST_RR(   "smuad	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(   "smuad	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe70ffa1c	@ smuad	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe70ffa1c) "	@ smuad	pc, r12, r10")
 	TEST_RR(   "smuadx	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(   "smuadx	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe70ffa3c	@ smuadx	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe70ffa3c) "	@ smuadx	pc, r12, r10")
 
 	TEST_RRR(   "smlsd	r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
 	TEST_RRR(   "smlsd	r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe70f8a5c	@ smlsd	pc, r12, r10, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe70f8a5c) "	@ smlsd	pc, r12, r10, r8")
 	TEST_RRR(   "smlsdx	r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
 	TEST_RRR(   "smlsdx	r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe70f8a7c	@ smlsdx	pc, r12, r10, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe70f8a7c) "	@ smlsdx	pc, r12, r10, r8")
 
 	TEST_RR(   "smusd	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(   "smusd	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe70ffa5c	@ smusd	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe70ffa5c) "	@ smusd	pc, r12, r10")
 	TEST_RR(   "smusdx	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(   "smusdx	r14, r",12,HH2,", r",10,HH1,"")
-	TEST_UNSUPPORTED(".word 0xe70ffa7c	@ smusdx	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe70ffa7c) "	@ smusdx	pc, r12, r10")
 
 	TEST_RRRR( "smlald	r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
 	TEST_RRRR( "smlald	r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
-	TEST_UNSUPPORTED(".word 0xe74af819	@ smlald	pc, r10, r9, r8")
-	TEST_UNSUPPORTED(".word 0xe74fb819	@ smlald	r11, pc, r9, r8")
-	TEST_UNSUPPORTED(".word 0xe74ab81f	@ smlald	r11, r10, pc, r8")
-	TEST_UNSUPPORTED(".word 0xe74abf19	@ smlald	r11, r10, r9, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe74af819) "	@ smlald	pc, r10, r9, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe74fb819) "	@ smlald	r11, pc, r9, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe74ab81f) "	@ smlald	r11, r10, pc, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe74abf19) "	@ smlald	r11, r10, r9, pc")
 
 	TEST_RRRR( "smlaldx	r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
 	TEST_RRRR( "smlaldx	r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
-	TEST_UNSUPPORTED(".word 0xe74af839	@ smlaldx	pc, r10, r9, r8")
-	TEST_UNSUPPORTED(".word 0xe74fb839	@ smlaldx	r11, pc, r9, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe74af839) "	@ smlaldx	pc, r10, r9, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe74fb839) "	@ smlaldx	r11, pc, r9, r8")
 
 	TEST_RRR(  "smmla	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
 	TEST_RRR(  "smmla	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe75f8a1c	@ smmla	pc, r12, r10, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe75f8a1c) "	@ smmla	pc, r12, r10, r8")
 	TEST_RRR(  "smmlar	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
 	TEST_RRR(  "smmlar	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe75f8a3c	@ smmlar	pc, r12, r10, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe75f8a3c) "	@ smmlar	pc, r12, r10, r8")
 
 	TEST_RR(   "smmul	r0, r",0,  VAL1,", r",1, VAL2,"")
 	TEST_RR(   "smmul	r14, r",12,VAL2,", r",10,VAL1,"")
-	TEST_UNSUPPORTED(".word 0xe75ffa1c	@ smmul	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe75ffa1c) "	@ smmul	pc, r12, r10")
 	TEST_RR(   "smmulr	r0, r",0,  VAL1,", r",1, VAL2,"")
 	TEST_RR(   "smmulr	r14, r",12,VAL2,", r",10,VAL1,"")
-	TEST_UNSUPPORTED(".word 0xe75ffa3c	@ smmulr	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe75ffa3c) "	@ smmulr	pc, r12, r10")
 
 	TEST_RRR(  "smmls	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
 	TEST_RRR(  "smmls	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe75f8adc	@ smmls	pc, r12, r10, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe75f8adc) "	@ smmls	pc, r12, r10, r8")
 	TEST_RRR(  "smmlsr	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
 	TEST_RRR(  "smmlsr	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
-	TEST_UNSUPPORTED(".word 0xe75f8afc	@ smmlsr	pc, r12, r10, r8")
-	TEST_UNSUPPORTED(".word 0xe75e8aff	@ smmlsr	r14, pc, r10, r8")
-	TEST_UNSUPPORTED(".word 0xe75e8ffc	@ smmlsr	r14, r12, pc, r8")
-	TEST_UNSUPPORTED(".word 0xe75efafc	@ smmlsr	r14, r12, r10, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe75f8afc) "	@ smmlsr	pc, r12, r10, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe75e8aff) "	@ smmlsr	r14, pc, r10, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe75e8ffc) "	@ smmlsr	r14, r12, pc, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe75efafc) "	@ smmlsr	r14, r12, r10, pc")
 
 	TEST_RR(   "usad8	r0, r",0,  VAL1,", r",1, VAL2,"")
 	TEST_RR(   "usad8	r14, r",12,VAL2,", r",10,VAL1,"")
-	TEST_UNSUPPORTED(".word 0xe75ffa1c	@ usad8	pc, r12, r10")
-	TEST_UNSUPPORTED(".word 0xe75efa1f	@ usad8	r14, pc, r10")
-	TEST_UNSUPPORTED(".word 0xe75eff1c	@ usad8	r14, r12, pc")
+	TEST_UNSUPPORTED(__inst_arm(0xe75ffa1c) "	@ usad8	pc, r12, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe75efa1f) "	@ usad8	r14, pc, r10")
+	TEST_UNSUPPORTED(__inst_arm(0xe75eff1c) "	@ usad8	r14, r12, pc")
 
 	TEST_RRR(  "usada8	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL3,"")
 	TEST_RRR(  "usada8	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"")
-	TEST_UNSUPPORTED(".word 0xe78f8a1c	@ usada8	pc, r12, r10, r8")
-	TEST_UNSUPPORTED(".word 0xe78e8a1f	@ usada8	r14, pc, r10, r8")
-	TEST_UNSUPPORTED(".word 0xe78e8f1c	@ usada8	r14, r12, pc, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe78f8a1c) "	@ usada8	pc, r12, r10, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe78e8a1f) "	@ usada8	r14, pc, r10, r8")
+	TEST_UNSUPPORTED(__inst_arm(0xe78e8f1c) "	@ usada8	r14, r12, pc, r8")
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
 #if __LINUX_ARM_ARCH__ >= 7
@@ -1035,26 +1036,26 @@
 	TEST_R(     "sbfx	r0, r",0  , VAL1,", #0, #31")
 	TEST_R(     "sbfxeq	r14, r",12, VAL2,", #8, #16")
 	TEST_R(     "sbfx	r4, r",10,  VAL1,", #16, #15")
-	TEST_UNSUPPORTED(".word 0xe7aff45c	@ sbfx	pc, r12, #8, #16")
+	TEST_UNSUPPORTED(__inst_arm(0xe7aff45c) "	@ sbfx	pc, r12, #8, #16")
 
 	TEST_R(     "ubfx	r0, r",0  , VAL1,", #0, #31")
 	TEST_R(     "ubfxcs	r14, r",12, VAL2,", #8, #16")
 	TEST_R(     "ubfx	r4, r",10,  VAL1,", #16, #15")
-	TEST_UNSUPPORTED(".word 0xe7eff45c	@ ubfx	pc, r12, #8, #16")
-	TEST_UNSUPPORTED(".word 0xe7efc45f	@ ubfx	r12, pc, #8, #16")
+	TEST_UNSUPPORTED(__inst_arm(0xe7eff45c) "	@ ubfx	pc, r12, #8, #16")
+	TEST_UNSUPPORTED(__inst_arm(0xe7efc45f) "	@ ubfx	r12, pc, #8, #16")
 
 	TEST_R(     "bfc	r",0, VAL1,", #4, #20")
 	TEST_R(     "bfcvs	r",14,VAL2,", #4, #20")
 	TEST_R(     "bfc	r",7, VAL1,", #0, #31")
 	TEST_R(     "bfc	r",8, VAL2,", #0, #31")
-	TEST_UNSUPPORTED(".word 0xe7def01f	@ bfc	pc, #0, #31");
+	TEST_UNSUPPORTED(__inst_arm(0xe7def01f) "	@ bfc	pc, #0, #31");
 
 	TEST_RR(    "bfi	r",0, VAL1,", r",0  , VAL2,", #0, #31")
 	TEST_RR(    "bfipl	r",12,VAL1,", r",14 , VAL2,", #4, #20")
-	TEST_UNSUPPORTED(".word 0xe7d7f21e	@ bfi	pc, r14, #4, #20")
+	TEST_UNSUPPORTED(__inst_arm(0xe7d7f21e) "	@ bfi	pc, r14, #4, #20")
 
-	TEST_UNSUPPORTED(".word 0x07f000f0")  /* Permanently UNDEFINED */
-	TEST_UNSUPPORTED(".word 0x07ffffff")  /* Permanently UNDEFINED */
+	TEST_UNSUPPORTED(__inst_arm(0x07f000f0) "")  /* Permanently UNDEFINED */
+	TEST_UNSUPPORTED(__inst_arm(0x07ffffff) "")  /* Permanently UNDEFINED */
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
 	TEST_GROUP("Branch, branch with link, and block data transfer")
@@ -1181,43 +1182,43 @@
 										\
 	TEST_COPROCESSOR( "stc"two"	0, cr0, [r15, #4]")			\
 	TEST_COPROCESSOR( "stc"two"	0, cr0, [r15, #-4]")			\
-	TEST_UNSUPPORTED(".word 0x"cc"daf0001	@ stc"two"	0, cr0, [r15, #4]!")	\
-	TEST_UNSUPPORTED(".word 0x"cc"d2f0001	@ stc"two"	0, cr0, [r15, #-4]!")	\
-	TEST_UNSUPPORTED(".word 0x"cc"caf0001	@ stc"two"	0, cr0, [r15], #4")	\
-	TEST_UNSUPPORTED(".word 0x"cc"c2f0001	@ stc"two"	0, cr0, [r15], #-4")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##daf0001) "	@ stc"two"	0, cr0, [r15, #4]!")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##d2f0001) "	@ stc"two"	0, cr0, [r15, #-4]!")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##caf0001) "	@ stc"two"	0, cr0, [r15], #4")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##c2f0001) "	@ stc"two"	0, cr0, [r15], #-4")	\
 	TEST_COPROCESSOR( "stc"two"	0, cr0, [r15], {1}")			\
 	TEST_COPROCESSOR( "stc"two"l	0, cr0, [r15, #4]")			\
 	TEST_COPROCESSOR( "stc"two"l	0, cr0, [r15, #-4]")			\
-	TEST_UNSUPPORTED(".word 0x"cc"def0001	@ stc"two"l	0, cr0, [r15, #4]!")	\
-	TEST_UNSUPPORTED(".word 0x"cc"d6f0001	@ stc"two"l	0, cr0, [r15, #-4]!")	\
-	TEST_UNSUPPORTED(".word 0x"cc"cef0001	@ stc"two"l	0, cr0, [r15], #4")	\
-	TEST_UNSUPPORTED(".word 0x"cc"c6f0001	@ stc"two"l	0, cr0, [r15], #-4")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##def0001) "	@ stc"two"l	0, cr0, [r15, #4]!")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##d6f0001) "	@ stc"two"l	0, cr0, [r15, #-4]!")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##cef0001) "	@ stc"two"l	0, cr0, [r15], #4")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##c6f0001) "	@ stc"two"l	0, cr0, [r15], #-4")	\
 	TEST_COPROCESSOR( "stc"two"l	0, cr0, [r15], {1}")			\
 	TEST_COPROCESSOR( "ldc"two"	0, cr0, [r15, #4]")			\
 	TEST_COPROCESSOR( "ldc"two"	0, cr0, [r15, #-4]")			\
-	TEST_UNSUPPORTED(".word 0x"cc"dbf0001	@ ldc"two"	0, cr0, [r15, #4]!")	\
-	TEST_UNSUPPORTED(".word 0x"cc"d3f0001	@ ldc"two"	0, cr0, [r15, #-4]!")	\
-	TEST_UNSUPPORTED(".word 0x"cc"cbf0001	@ ldc"two"	0, cr0, [r15], #4")	\
-	TEST_UNSUPPORTED(".word 0x"cc"c3f0001	@ ldc"two"	0, cr0, [r15], #-4")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##dbf0001) "	@ ldc"two"	0, cr0, [r15, #4]!")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##d3f0001) "	@ ldc"two"	0, cr0, [r15, #-4]!")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##cbf0001) "	@ ldc"two"	0, cr0, [r15], #4")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##c3f0001) "	@ ldc"two"	0, cr0, [r15], #-4")	\
 	TEST_COPROCESSOR( "ldc"two"	0, cr0, [r15], {1}")			\
 	TEST_COPROCESSOR( "ldc"two"l	0, cr0, [r15, #4]")			\
 	TEST_COPROCESSOR( "ldc"two"l	0, cr0, [r15, #-4]")			\
-	TEST_UNSUPPORTED(".word 0x"cc"dff0001	@ ldc"two"l	0, cr0, [r15, #4]!")	\
-	TEST_UNSUPPORTED(".word 0x"cc"d7f0001	@ ldc"two"l	0, cr0, [r15, #-4]!")	\
-	TEST_UNSUPPORTED(".word 0x"cc"cff0001	@ ldc"two"l	0, cr0, [r15], #4")	\
-	TEST_UNSUPPORTED(".word 0x"cc"c7f0001	@ ldc"two"l	0, cr0, [r15], #-4")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##dff0001) "	@ ldc"two"l	0, cr0, [r15, #4]!")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##d7f0001) "	@ ldc"two"l	0, cr0, [r15, #-4]!")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##cff0001) "	@ ldc"two"l	0, cr0, [r15], #4")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##c7f0001) "	@ ldc"two"l	0, cr0, [r15], #-4")	\
 	TEST_COPROCESSOR( "ldc"two"l	0, cr0, [r15], {1}")
 
 #define COPROCESSOR_INSTRUCTIONS_MC_MR(two,cc)					\
 										\
 	TEST_COPROCESSOR( "mcrr"two"	0, 15, r0, r14, cr0")			\
 	TEST_COPROCESSOR( "mcrr"two"	15, 0, r14, r0, cr15")			\
-	TEST_UNSUPPORTED(".word 0x"cc"c4f00f0	@ mcrr"two"	0, 15, r0, r15, cr0")	\
-	TEST_UNSUPPORTED(".word 0x"cc"c40ff0f	@ mcrr"two"	15, 0, r15, r0, cr15")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##c4f00f0) "	@ mcrr"two"	0, 15, r0, r15, cr0")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##c40ff0f) "	@ mcrr"two"	15, 0, r15, r0, cr15")	\
 	TEST_COPROCESSOR( "mrrc"two"	0, 15, r0, r14, cr0")			\
 	TEST_COPROCESSOR( "mrrc"two"	15, 0, r14, r0, cr15")			\
-	TEST_UNSUPPORTED(".word 0x"cc"c5f00f0	@ mrrc"two"	0, 15, r0, r15, cr0")	\
-	TEST_UNSUPPORTED(".word 0x"cc"c50ff0f	@ mrrc"two"	15, 0, r15, r0, cr15")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##c5f00f0) "	@ mrrc"two"	0, 15, r0, r15, cr0")	\
+	TEST_UNSUPPORTED(__inst_arm(0x##cc##c50ff0f) "	@ mrrc"two"	15, 0, r15, r0, cr15")	\
 	TEST_COPROCESSOR( "cdp"two"	15, 15, cr15, cr15, cr15, 7")		\
 	TEST_COPROCESSOR( "cdp"two"	0, 0, cr0, cr0, cr0, 0")		\
 	TEST_COPROCESSOR( "mcr"two"	15, 7, r15, cr15, cr15, 7")		\
@@ -1225,8 +1226,8 @@
 	TEST_COPROCESSOR( "mrc"two"	15, 7, r15, cr15, cr15, 7")		\
 	TEST_COPROCESSOR( "mrc"two"	0, 0, r0, cr0, cr0, 0")
 
-	COPROCESSOR_INSTRUCTIONS_ST_LD("","e")
-	COPROCESSOR_INSTRUCTIONS_MC_MR("","e")
+	COPROCESSOR_INSTRUCTIONS_ST_LD("",e)
+	COPROCESSOR_INSTRUCTIONS_MC_MR("",e)
 	TEST_UNSUPPORTED("svc	0")
 	TEST_UNSUPPORTED("svc	0xffffff")
 
@@ -1252,14 +1253,14 @@
 	TEST_UNSUPPORTED("rfedb	sp!")
 	TEST_UNSUPPORTED("rfeia	sp!")
 	TEST_UNSUPPORTED("rfeib	sp!")
-	TEST_UNSUPPORTED(".word 0xf81d0a00	@ rfeda	pc")
-	TEST_UNSUPPORTED(".word 0xf91d0a00	@ rfedb	pc")
-	TEST_UNSUPPORTED(".word 0xf89d0a00	@ rfeia	pc")
-	TEST_UNSUPPORTED(".word 0xf99d0a00	@ rfeib	pc")
-	TEST_UNSUPPORTED(".word 0xf83d0a00	@ rfeda	pc!")
-	TEST_UNSUPPORTED(".word 0xf93d0a00	@ rfedb	pc!")
-	TEST_UNSUPPORTED(".word 0xf8bd0a00	@ rfeia	pc!")
-	TEST_UNSUPPORTED(".word 0xf9bd0a00	@ rfeib	pc!")
+	TEST_UNSUPPORTED(__inst_arm(0xf81d0a00) "	@ rfeda	pc")
+	TEST_UNSUPPORTED(__inst_arm(0xf91d0a00) "	@ rfedb	pc")
+	TEST_UNSUPPORTED(__inst_arm(0xf89d0a00) "	@ rfeia	pc")
+	TEST_UNSUPPORTED(__inst_arm(0xf99d0a00) "	@ rfeib	pc")
+	TEST_UNSUPPORTED(__inst_arm(0xf83d0a00) "	@ rfeda	pc!")
+	TEST_UNSUPPORTED(__inst_arm(0xf93d0a00) "	@ rfedb	pc!")
+	TEST_UNSUPPORTED(__inst_arm(0xf8bd0a00) "	@ rfeia	pc!")
+	TEST_UNSUPPORTED(__inst_arm(0xf9bd0a00) "	@ rfeib	pc!")
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
 #if __LINUX_ARM_ARCH__ >= 6
@@ -1286,9 +1287,9 @@
 	TEST(	"blx	__dummy_thumb_subroutine_odd")
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
-	COPROCESSOR_INSTRUCTIONS_ST_LD("2","f")
+	COPROCESSOR_INSTRUCTIONS_ST_LD("2",f)
 #if __LINUX_ARM_ARCH__ >= 6
-	COPROCESSOR_INSTRUCTIONS_MC_MR("2","f")
+	COPROCESSOR_INSTRUCTIONS_MC_MR("2",f)
 #endif
 
 	TEST_GROUP("Miscellaneous instructions, memory hints, and Advanced SIMD instructions")
@@ -1318,9 +1319,9 @@
 #endif
 
 #if __LINUX_ARM_ARCH__ >= 7
-	TEST_SUPPORTED(  ".word 0xf590f000	@ pldw [r0, #0]")
-	TEST_SUPPORTED(  ".word 0xf797f000	@ pldw	[r7, r0]")
-	TEST_SUPPORTED(  ".word 0xf798f18c	@ pldw	[r8, r12, lsl #3]");
+	TEST_SUPPORTED(  __inst_arm(0xf590f000) "	@ pldw [r0, #0]")
+	TEST_SUPPORTED(  __inst_arm(0xf797f000) "	@ pldw	[r7, r0]")
+	TEST_SUPPORTED(  __inst_arm(0xf798f18c) "	@ pldw	[r8, r12, lsl #3]");
 #endif
 
 #if __LINUX_ARM_ARCH__ >= 7
diff --git a/arch/arm/kernel/kprobes-test-thumb.c b/arch/arm/kernel/kprobes-test-thumb.c
index 5d8b857..844dd10 100644
--- a/arch/arm/kernel/kprobes-test-thumb.c
+++ b/arch/arm/kernel/kprobes-test-thumb.c
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <asm/opcodes.h>
 
 #include "kprobes-test.h"
 
@@ -119,7 +120,7 @@
 	TEST_R(   "add	sp"        ", r",8,-8,  "")
 	TEST_R(   "add	r",14,VAL1,", pc")
 	TEST_BF_R("add	pc"        ", r",0,2f-1f-8,"")
-	TEST_UNSUPPORTED(".short 0x44ff	@ add pc, pc")
+	TEST_UNSUPPORTED(__inst_thumb16(0x44ff) "	@ add pc, pc")
 
 	TEST_RR(  "cmp	r",3,VAL1,", r",8,VAL2,"")
 	TEST_RR(  "cmp	r",8,VAL2,", r",0,VAL1,"")
@@ -150,7 +151,7 @@
 
 	TEST_BF_R("blx	r",0, 2f+1,"")
 	TEST_BB_R("blx	r",14,2f+1,"")
-	TEST_UNSUPPORTED(".short 0x47f8	@ blx pc")
+	TEST_UNSUPPORTED(__inst_thumb16(0x47f8) "	@ blx pc")
 
 	TEST_GROUP("Load from Literal Pool")
 
@@ -237,8 +238,8 @@
 	TEST_R("rev	r7, r",0, VAL2,"")
 	TEST_R("rev16	r0, r",7, VAL1,"")
 	TEST_R("rev16	r7, r",0, VAL2,"")
-	TEST_UNSUPPORTED(".short 0xba80")
-	TEST_UNSUPPORTED(".short 0xbabf")
+	TEST_UNSUPPORTED(__inst_thumb16(0xba80) "")
+	TEST_UNSUPPORTED(__inst_thumb16(0xbabf) "")
 	TEST_R("revsh	r0, r",7, VAL1,"")
 	TEST_R("revsh	r7, r",0, VAL2,"")
 
@@ -272,8 +273,8 @@
 	TEST("nop")
 	TEST("wfi")
 	TEST_SUPPORTED("wfe")
-	TEST_UNSUPPORTED(".short 0xbf50") /* Unassigned hints */
-	TEST_UNSUPPORTED(".short 0xbff0") /* Unassigned hints */
+	TEST_UNSUPPORTED(__inst_thumb16(0xbf50) "") /* Unassigned hints */
+	TEST_UNSUPPORTED(__inst_thumb16(0xbff0) "") /* Unassigned hints */
 
 #define TEST_IT(code, code2)			\
 	TESTCASE_START(code)			\
@@ -310,8 +311,8 @@
 	TEST_BF("bgt	2f")
 	TEST_BB("blt	2b")
 )
-	TEST_UNSUPPORTED(".short 0xde00")
-	TEST_UNSUPPORTED(".short 0xdeff")
+	TEST_UNSUPPORTED(__inst_thumb16(0xde00) "")
+	TEST_UNSUPPORTED(__inst_thumb16(0xdeff) "")
 	TEST_UNSUPPORTED("svc	#0x00")
 	TEST_UNSUPPORTED("svc	#0xff")
 
@@ -380,13 +381,13 @@
 	TEST_THUMB_TO_ARM_INTERWORK_P("ldmia	r",0,14*4,", {r12,pc}")
 	TEST_THUMB_TO_ARM_INTERWORK_P("ldmia	r",13,2*4,", {r0-r12,pc}")
 
-	TEST_UNSUPPORTED(".short 0xe88f,0x0101	@ stmia	pc, {r0,r8}")
-	TEST_UNSUPPORTED(".short 0xe92f,0x5f00	@ stmdb	pc!, {r8-r12,r14}")
-	TEST_UNSUPPORTED(".short 0xe8bd,0xc000	@ ldmia	r13!, {r14,pc}")
-	TEST_UNSUPPORTED(".short 0xe93e,0xc000	@ ldmdb	r14!, {r14,pc}")
-	TEST_UNSUPPORTED(".short 0xe8a7,0x3f00	@ stmia	r7!, {r8-r12,sp}")
-	TEST_UNSUPPORTED(".short 0xe8a7,0x9f00	@ stmia	r7!, {r8-r12,pc}")
-	TEST_UNSUPPORTED(".short 0xe93e,0x2010	@ ldmdb	r14!, {r4,sp}")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe88f0101) "	@ stmia	pc, {r0,r8}")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe92f5f00) "	@ stmdb	pc!, {r8-r12,r14}")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe8bdc000) "	@ ldmia	r13!, {r14,pc}")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe93ec000) "	@ ldmdb	r14!, {r14,pc}")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe8a73f00) "	@ stmia	r7!, {r8-r12,sp}")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe8a79f00) "	@ stmia	r7!, {r8-r12,pc}")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe93e2010) "	@ ldmdb	r14!, {r4,sp}")
 
 	TEST_GROUP("Load/store double or exclusive, table branch")
 
@@ -402,12 +403,12 @@
 		"3:	.word	"__stringify(VAL1)"	\n\t"
 		"	.word	"__stringify(VAL2))
 
-	TEST_UNSUPPORTED(".short 0xe9ff,0xec04	@ ldrd	r14, r12, [pc, #16]!")
-	TEST_UNSUPPORTED(".short 0xe8ff,0xec04	@ ldrd	r14, r12, [pc], #16")
-	TEST_UNSUPPORTED(".short 0xe9d4,0xd800	@ ldrd	sp, r8, [r4]")
-	TEST_UNSUPPORTED(".short 0xe9d4,0xf800	@ ldrd	pc, r8, [r4]")
-	TEST_UNSUPPORTED(".short 0xe9d4,0x7d00	@ ldrd	r7, sp, [r4]")
-	TEST_UNSUPPORTED(".short 0xe9d4,0x7f00	@ ldrd	r7, pc, [r4]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe9ffec04) "	@ ldrd	r14, r12, [pc, #16]!")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe8ffec04) "	@ ldrd	r14, r12, [pc], #16")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe9d4d800) "	@ ldrd	sp, r8, [r4]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe9d4f800) "	@ ldrd	pc, r8, [r4]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe9d47d00) "	@ ldrd	r7, sp, [r4]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe9d47f00) "	@ ldrd	r7, pc, [r4]")
 
 	TEST_RRP("strd	r",0, VAL1,", r",1, VAL2,", [r",1, 24,", #-16]")
 	TEST_RR( "strd	r",12,VAL2,", r",14,VAL1,", [sp, #16]")
@@ -415,8 +416,8 @@
 	TEST_RR( "strd	r",14,VAL2,", r",12,VAL1,", [sp, #16]!")
 	TEST_RRP("strd	r",1, VAL1,", r",0, VAL2,", [r",7, 24,"], #16")
 	TEST_RR( "strd	r",7, VAL2,", r",8, VAL1,", [sp], #-16")
-	TEST_UNSUPPORTED(".short 0xe9ef,0xec04	@ strd	r14, r12, [pc, #16]!")
-	TEST_UNSUPPORTED(".short 0xe8ef,0xec04	@ strd	r14, r12, [pc], #16")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe9efec04) "	@ strd	r14, r12, [pc, #16]!")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe8efec04) "	@ strd	r14, r12, [pc], #16")
 
 	TEST_RX("tbb	[pc, r",0, (9f-(1f+4)),"]",
 		"9:			\n\t"
@@ -460,9 +461,9 @@
 		"3:	mvn	r0, r0	\n\t"
 		"2:	nop		\n\t")
 
-	TEST_UNSUPPORTED(".short 0xe8d1,0xf01f	@ tbh [r1, pc]")
-	TEST_UNSUPPORTED(".short 0xe8d1,0xf01d	@ tbh [r1, sp]")
-	TEST_UNSUPPORTED(".short 0xe8dd,0xf012	@ tbh [sp, r2]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe8d1f01f) "	@ tbh [r1, pc]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe8d1f01d) "	@ tbh [r1, sp]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xe8ddf012) "	@ tbh [sp, r2]")
 
 	TEST_UNSUPPORTED("strexb	r0, r1, [r2]")
 	TEST_UNSUPPORTED("strexh	r0, r1, [r2]")
@@ -540,40 +541,40 @@
 	TEST_RR("pkhtb	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR("pkhtb	r14,r",12, HH1,", r",10,HH2,", asr #2")
 
-	TEST_UNSUPPORTED(".short 0xea17,0x0f0d	@ tst.w r7, sp")
-	TEST_UNSUPPORTED(".short 0xea17,0x0f0f	@ tst.w r7, pc")
-	TEST_UNSUPPORTED(".short 0xea1d,0x0f07	@ tst.w sp, r7")
-	TEST_UNSUPPORTED(".short 0xea1f,0x0f07	@ tst.w pc, r7")
-	TEST_UNSUPPORTED(".short 0xf01d,0x1f08	@ tst sp, #0x00080008")
-	TEST_UNSUPPORTED(".short 0xf01f,0x1f08	@ tst pc, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea170f0d) "	@ tst.w r7, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea170f0f) "	@ tst.w r7, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea1d0f07) "	@ tst.w sp, r7")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea1f0f07) "	@ tst.w pc, r7")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf01d1f08) "	@ tst sp, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf01f1f08) "	@ tst pc, #0x00080008")
 
-	TEST_UNSUPPORTED(".short 0xea97,0x0f0d	@ teq.w r7, sp")
-	TEST_UNSUPPORTED(".short 0xea97,0x0f0f	@ teq.w r7, pc")
-	TEST_UNSUPPORTED(".short 0xea9d,0x0f07	@ teq.w sp, r7")
-	TEST_UNSUPPORTED(".short 0xea9f,0x0f07	@ teq.w pc, r7")
-	TEST_UNSUPPORTED(".short 0xf09d,0x1f08	@ tst sp, #0x00080008")
-	TEST_UNSUPPORTED(".short 0xf09f,0x1f08	@ tst pc, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea970f0d) "	@ teq.w r7, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea970f0f) "	@ teq.w r7, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea9d0f07) "	@ teq.w sp, r7")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea9f0f07) "	@ teq.w pc, r7")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf09d1f08) "	@ tst sp, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf09f1f08) "	@ tst pc, #0x00080008")
 
-	TEST_UNSUPPORTED(".short 0xeb17,0x0f0d	@ cmn.w r7, sp")
-	TEST_UNSUPPORTED(".short 0xeb17,0x0f0f	@ cmn.w r7, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb170f0d) "	@ cmn.w r7, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb170f0f) "	@ cmn.w r7, pc")
 	TEST_P("cmn.w	sp, r",7,0,"")
-	TEST_UNSUPPORTED(".short 0xeb1f,0x0f07	@ cmn.w pc, r7")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb1f0f07) "	@ cmn.w pc, r7")
 	TEST(  "cmn	sp, #0x00080008")
-	TEST_UNSUPPORTED(".short 0xf11f,0x1f08	@ cmn pc, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf11f1f08) "	@ cmn pc, #0x00080008")
 
-	TEST_UNSUPPORTED(".short 0xebb7,0x0f0d	@ cmp.w r7, sp")
-	TEST_UNSUPPORTED(".short 0xebb7,0x0f0f	@ cmp.w r7, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xebb70f0d) "	@ cmp.w r7, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xebb70f0f) "	@ cmp.w r7, pc")
 	TEST_P("cmp.w	sp, r",7,0,"")
-	TEST_UNSUPPORTED(".short 0xebbf,0x0f07	@ cmp.w pc, r7")
+	TEST_UNSUPPORTED(__inst_thumb32(0xebbf0f07) "	@ cmp.w pc, r7")
 	TEST(  "cmp	sp, #0x00080008")
-	TEST_UNSUPPORTED(".short 0xf1bf,0x1f08	@ cmp pc, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf1bf1f08) "	@ cmp pc, #0x00080008")
 
-	TEST_UNSUPPORTED(".short 0xea5f,0x070d	@ movs.w r7, sp")
-	TEST_UNSUPPORTED(".short 0xea5f,0x070f	@ movs.w r7, pc")
-	TEST_UNSUPPORTED(".short 0xea5f,0x0d07	@ movs.w sp, r7")
-	TEST_UNSUPPORTED(".short 0xea4f,0x0f07	@ mov.w  pc, r7")
-	TEST_UNSUPPORTED(".short 0xf04f,0x1d08	@ mov sp, #0x00080008")
-	TEST_UNSUPPORTED(".short 0xf04f,0x1f08	@ mov pc, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea5f070d) "	@ movs.w r7, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea5f070f) "	@ movs.w r7, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea5f0d07) "	@ movs.w sp, r7")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea4f0f07) "	@ mov.w  pc, r7")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf04f1d08) "	@ mov sp, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf04f1f08) "	@ mov pc, #0x00080008")
 
 	TEST_R("add.w	r0, sp, r",1, 4,"")
 	TEST_R("adds	r0, sp, r",1, 4,", asl #3")
@@ -581,15 +582,15 @@
 	TEST_R("add	r0, sp, r",1, 16,", ror #1")
 	TEST_R("add.w	sp, sp, r",1, 4,"")
 	TEST_R("add	sp, sp, r",1, 4,", asl #3")
-	TEST_UNSUPPORTED(".short 0xeb0d,0x1d01	@ add sp, sp, r1, asl #4")
-	TEST_UNSUPPORTED(".short 0xeb0d,0x0d71	@ add sp, sp, r1, ror #1")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb0d1d01) "	@ add sp, sp, r1, asl #4")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d71) "	@ add sp, sp, r1, ror #1")
 	TEST(  "add.w	r0, sp, #24")
 	TEST(  "add.w	sp, sp, #24")
-	TEST_UNSUPPORTED(".short 0xeb0d,0x0f01	@ add pc, sp, r1")
-	TEST_UNSUPPORTED(".short 0xeb0d,0x000f	@ add r0, sp, pc")
-	TEST_UNSUPPORTED(".short 0xeb0d,0x000d	@ add r0, sp, sp")
-	TEST_UNSUPPORTED(".short 0xeb0d,0x0d0f	@ add sp, sp, pc")
-	TEST_UNSUPPORTED(".short 0xeb0d,0x0d0d	@ add sp, sp, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0f01) "	@ add pc, sp, r1")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb0d000f) "	@ add r0, sp, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb0d000d) "	@ add r0, sp, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d0f) "	@ add sp, sp, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d0d) "	@ add sp, sp, sp")
 
 	TEST_R("sub.w	r0, sp, r",1, 4,"")
 	TEST_R("subs	r0, sp, r",1, 4,", asl #3")
@@ -597,54 +598,54 @@
 	TEST_R("sub	r0, sp, r",1, 16,", ror #1")
 	TEST_R("sub.w	sp, sp, r",1, 4,"")
 	TEST_R("sub	sp, sp, r",1, 4,", asl #3")
-	TEST_UNSUPPORTED(".short 0xebad,0x1d01	@ sub sp, sp, r1, asl #4")
-	TEST_UNSUPPORTED(".short 0xebad,0x0d71	@ sub sp, sp, r1, ror #1")
-	TEST_UNSUPPORTED(".short 0xebad,0x0f01	@ sub pc, sp, r1")
+	TEST_UNSUPPORTED(__inst_thumb32(0xebad1d01) "	@ sub sp, sp, r1, asl #4")
+	TEST_UNSUPPORTED(__inst_thumb32(0xebad0d71) "	@ sub sp, sp, r1, ror #1")
+	TEST_UNSUPPORTED(__inst_thumb32(0xebad0f01) "	@ sub pc, sp, r1")
 	TEST(  "sub.w	r0, sp, #24")
 	TEST(  "sub.w	sp, sp, #24")
 
-	TEST_UNSUPPORTED(".short 0xea02,0x010f	@ and r1, r2, pc")
-	TEST_UNSUPPORTED(".short 0xea0f,0x0103	@ and r1, pc, r3")
-	TEST_UNSUPPORTED(".short 0xea02,0x0f03	@ and pc, r2, r3")
-	TEST_UNSUPPORTED(".short 0xea02,0x010d	@ and r1, r2, sp")
-	TEST_UNSUPPORTED(".short 0xea0d,0x0103	@ and r1, sp, r3")
-	TEST_UNSUPPORTED(".short 0xea02,0x0d03	@ and sp, r2, r3")
-	TEST_UNSUPPORTED(".short 0xf00d,0x1108	@ and r1, sp, #0x00080008")
-	TEST_UNSUPPORTED(".short 0xf00f,0x1108	@ and r1, pc, #0x00080008")
-	TEST_UNSUPPORTED(".short 0xf002,0x1d08	@ and sp, r8, #0x00080008")
-	TEST_UNSUPPORTED(".short 0xf002,0x1f08	@ and pc, r8, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea02010f) "	@ and r1, r2, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea0f0103) "	@ and r1, pc, r3")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea020f03) "	@ and pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea02010d) "	@ and r1, r2, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea0d0103) "	@ and r1, sp, r3")
+	TEST_UNSUPPORTED(__inst_thumb32(0xea020d03) "	@ and sp, r2, r3")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf00d1108) "	@ and r1, sp, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf00f1108) "	@ and r1, pc, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf0021d08) "	@ and sp, r8, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf0021f08) "	@ and pc, r8, #0x00080008")
 
-	TEST_UNSUPPORTED(".short 0xeb02,0x010f	@ add r1, r2, pc")
-	TEST_UNSUPPORTED(".short 0xeb0f,0x0103	@ add r1, pc, r3")
-	TEST_UNSUPPORTED(".short 0xeb02,0x0f03	@ add pc, r2, r3")
-	TEST_UNSUPPORTED(".short 0xeb02,0x010d	@ add r1, r2, sp")
-	TEST_SUPPORTED(  ".short 0xeb0d,0x0103	@ add r1, sp, r3")
-	TEST_UNSUPPORTED(".short 0xeb02,0x0d03	@ add sp, r2, r3")
-	TEST_SUPPORTED(  ".short 0xf10d,0x1108	@ add r1, sp, #0x00080008")
-	TEST_UNSUPPORTED(".short 0xf10d,0x1f08	@ add pc, sp, #0x00080008")
-	TEST_UNSUPPORTED(".short 0xf10f,0x1108	@ add r1, pc, #0x00080008")
-	TEST_UNSUPPORTED(".short 0xf102,0x1d08	@ add sp, r8, #0x00080008")
-	TEST_UNSUPPORTED(".short 0xf102,0x1f08	@ add pc, r8, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb02010f) "	@ add r1, r2, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb0f0103) "	@ add r1, pc, r3")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb020f03) "	@ add pc, r2, r3")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb02010d) "	@ add r1, r2, sp")
+	TEST_SUPPORTED(  __inst_thumb32(0xeb0d0103) "	@ add r1, sp, r3")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb020d03) "	@ add sp, r2, r3")
+	TEST_SUPPORTED(  __inst_thumb32(0xf10d1108) "	@ add r1, sp, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf10d1f08) "	@ add pc, sp, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf10f1108) "	@ add r1, pc, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf1021d08) "	@ add sp, r8, #0x00080008")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf1021f08) "	@ add pc, r8, #0x00080008")
 
-	TEST_UNSUPPORTED(".short 0xeaa0,0x0000")
-	TEST_UNSUPPORTED(".short 0xeaf0,0x0000")
-	TEST_UNSUPPORTED(".short 0xeb20,0x0000")
-	TEST_UNSUPPORTED(".short 0xeb80,0x0000")
-	TEST_UNSUPPORTED(".short 0xebe0,0x0000")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeaa00000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeaf00000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb200000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeb800000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xebe00000) "")
 
-	TEST_UNSUPPORTED(".short 0xf0a0,0x0000")
-	TEST_UNSUPPORTED(".short 0xf0c0,0x0000")
-	TEST_UNSUPPORTED(".short 0xf0f0,0x0000")
-	TEST_UNSUPPORTED(".short 0xf120,0x0000")
-	TEST_UNSUPPORTED(".short 0xf180,0x0000")
-	TEST_UNSUPPORTED(".short 0xf1e0,0x0000")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf0a00000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf0c00000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf0f00000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf1200000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf1800000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf1e00000) "")
 
 	TEST_GROUP("Coprocessor instructions")
 
-	TEST_UNSUPPORTED(".short 0xec00,0x0000")
-	TEST_UNSUPPORTED(".short 0xeff0,0x0000")
-	TEST_UNSUPPORTED(".short 0xfc00,0x0000")
-	TEST_UNSUPPORTED(".short 0xfff0,0x0000")
+	TEST_UNSUPPORTED(__inst_thumb32(0xec000000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xeff00000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfc000000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfff00000) "")
 
 	TEST_GROUP("Data-processing (plain binary immediate)")
 
@@ -652,92 +653,92 @@
 	TEST(  "addw	r14, sp, #0xf5a")
 	TEST(  "addw	sp, sp, #0x20")
 	TEST(  "addw	r7,  pc, #0x888")
-	TEST_UNSUPPORTED(".short 0xf20f,0x1f20	@ addw pc, pc, #0x120")
-	TEST_UNSUPPORTED(".short 0xf20d,0x1f20	@ addw pc, sp, #0x120")
-	TEST_UNSUPPORTED(".short 0xf20f,0x1d20	@ addw sp, pc, #0x120")
-	TEST_UNSUPPORTED(".short 0xf200,0x1d20	@ addw sp, r0, #0x120")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf20f1f20) "	@ addw pc, pc, #0x120")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf20d1f20) "	@ addw pc, sp, #0x120")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf20f1d20) "	@ addw sp, pc, #0x120")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf2001d20) "	@ addw sp, r0, #0x120")
 
 	TEST_R("subw	r0,  r",1, VAL1,", #0x123")
 	TEST(  "subw	r14, sp, #0xf5a")
 	TEST(  "subw	sp, sp, #0x20")
 	TEST(  "subw	r7,  pc, #0x888")
-	TEST_UNSUPPORTED(".short 0xf2af,0x1f20	@ subw pc, pc, #0x120")
-	TEST_UNSUPPORTED(".short 0xf2ad,0x1f20	@ subw pc, sp, #0x120")
-	TEST_UNSUPPORTED(".short 0xf2af,0x1d20	@ subw sp, pc, #0x120")
-	TEST_UNSUPPORTED(".short 0xf2a0,0x1d20	@ subw sp, r0, #0x120")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf2af1f20) "	@ subw pc, pc, #0x120")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf2ad1f20) "	@ subw pc, sp, #0x120")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf2af1d20) "	@ subw sp, pc, #0x120")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf2a01d20) "	@ subw sp, r0, #0x120")
 
 	TEST("movw	r0, #0")
 	TEST("movw	r0, #0xffff")
 	TEST("movw	lr, #0xffff")
-	TEST_UNSUPPORTED(".short 0xf240,0x0d00	@ movw sp, #0")
-	TEST_UNSUPPORTED(".short 0xf240,0x0f00	@ movw pc, #0")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf2400d00) "	@ movw sp, #0")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf2400f00) "	@ movw pc, #0")
 
 	TEST_R("movt	r",0, VAL1,", #0")
 	TEST_R("movt	r",0, VAL2,", #0xffff")
 	TEST_R("movt	r",14,VAL1,", #0xffff")
-	TEST_UNSUPPORTED(".short 0xf2c0,0x0d00	@ movt sp, #0")
-	TEST_UNSUPPORTED(".short 0xf2c0,0x0f00	@ movt pc, #0")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf2c00d00) "	@ movt sp, #0")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf2c00f00) "	@ movt pc, #0")
 
 	TEST_R(     "ssat	r0, #24, r",0,   VAL1,"")
 	TEST_R(     "ssat	r14, #24, r",12, VAL2,"")
 	TEST_R(     "ssat	r0, #24, r",0,   VAL1,", lsl #8")
 	TEST_R(     "ssat	r14, #24, r",12, VAL2,", asr #8")
-	TEST_UNSUPPORTED(".short 0xf30c,0x0d17	@ ssat	sp, #24, r12")
-	TEST_UNSUPPORTED(".short 0xf30c,0x0f17	@ ssat	pc, #24, r12")
-	TEST_UNSUPPORTED(".short 0xf30d,0x0c17	@ ssat	r12, #24, sp")
-	TEST_UNSUPPORTED(".short 0xf30f,0x0c17	@ ssat	r12, #24, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf30c0d17) "	@ ssat	sp, #24, r12")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf30c0f17) "	@ ssat	pc, #24, r12")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf30d0c17) "	@ ssat	r12, #24, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf30f0c17) "	@ ssat	r12, #24, pc")
 
 	TEST_R(     "usat	r0, #24, r",0,   VAL1,"")
 	TEST_R(     "usat	r14, #24, r",12, VAL2,"")
 	TEST_R(     "usat	r0, #24, r",0,   VAL1,", lsl #8")
 	TEST_R(     "usat	r14, #24, r",12, VAL2,", asr #8")
-	TEST_UNSUPPORTED(".short 0xf38c,0x0d17	@ usat	sp, #24, r12")
-	TEST_UNSUPPORTED(".short 0xf38c,0x0f17	@ usat	pc, #24, r12")
-	TEST_UNSUPPORTED(".short 0xf38d,0x0c17	@ usat	r12, #24, sp")
-	TEST_UNSUPPORTED(".short 0xf38f,0x0c17	@ usat	r12, #24, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf38c0d17) "	@ usat	sp, #24, r12")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf38c0f17) "	@ usat	pc, #24, r12")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf38d0c17) "	@ usat	r12, #24, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf38f0c17) "	@ usat	r12, #24, pc")
 
 	TEST_R(     "ssat16	r0, #12, r",0,   HH1,"")
 	TEST_R(     "ssat16	r14, #12, r",12, HH2,"")
-	TEST_UNSUPPORTED(".short 0xf32c,0x0d0b	@ ssat16	sp, #12, r12")
-	TEST_UNSUPPORTED(".short 0xf32c,0x0f0b	@ ssat16	pc, #12, r12")
-	TEST_UNSUPPORTED(".short 0xf32d,0x0c0b	@ ssat16	r12, #12, sp")
-	TEST_UNSUPPORTED(".short 0xf32f,0x0c0b	@ ssat16	r12, #12, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf32c0d0b) "	@ ssat16	sp, #12, r12")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf32c0f0b) "	@ ssat16	pc, #12, r12")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf32d0c0b) "	@ ssat16	r12, #12, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf32f0c0b) "	@ ssat16	r12, #12, pc")
 
 	TEST_R(     "usat16	r0, #12, r",0,   HH1,"")
 	TEST_R(     "usat16	r14, #12, r",12, HH2,"")
-	TEST_UNSUPPORTED(".short 0xf3ac,0x0d0b	@ usat16	sp, #12, r12")
-	TEST_UNSUPPORTED(".short 0xf3ac,0x0f0b	@ usat16	pc, #12, r12")
-	TEST_UNSUPPORTED(".short 0xf3ad,0x0c0b	@ usat16	r12, #12, sp")
-	TEST_UNSUPPORTED(".short 0xf3af,0x0c0b	@ usat16	r12, #12, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf3ac0d0b) "	@ usat16	sp, #12, r12")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf3ac0f0b) "	@ usat16	pc, #12, r12")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf3ad0c0b) "	@ usat16	r12, #12, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf3af0c0b) "	@ usat16	r12, #12, pc")
 
 	TEST_R(     "sbfx	r0, r",0  , VAL1,", #0, #31")
 	TEST_R(     "sbfx	r14, r",12, VAL2,", #8, #16")
 	TEST_R(     "sbfx	r4, r",10,  VAL1,", #16, #15")
-	TEST_UNSUPPORTED(".short 0xf34c,0x2d0f	@ sbfx	sp, r12, #8, #16")
-	TEST_UNSUPPORTED(".short 0xf34c,0x2f0f	@ sbfx	pc, r12, #8, #16")
-	TEST_UNSUPPORTED(".short 0xf34d,0x2c0f	@ sbfx	r12, sp, #8, #16")
-	TEST_UNSUPPORTED(".short 0xf34f,0x2c0f	@ sbfx	r12, pc, #8, #16")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf34c2d0f) "	@ sbfx	sp, r12, #8, #16")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf34c2f0f) "	@ sbfx	pc, r12, #8, #16")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf34d2c0f) "	@ sbfx	r12, sp, #8, #16")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf34f2c0f) "	@ sbfx	r12, pc, #8, #16")
 
 	TEST_R(     "ubfx	r0, r",0  , VAL1,", #0, #31")
 	TEST_R(     "ubfx	r14, r",12, VAL2,", #8, #16")
 	TEST_R(     "ubfx	r4, r",10,  VAL1,", #16, #15")
-	TEST_UNSUPPORTED(".short 0xf3cc,0x2d0f	@ ubfx	sp, r12, #8, #16")
-	TEST_UNSUPPORTED(".short 0xf3cc,0x2f0f	@ ubfx	pc, r12, #8, #16")
-	TEST_UNSUPPORTED(".short 0xf3cd,0x2c0f	@ ubfx	r12, sp, #8, #16")
-	TEST_UNSUPPORTED(".short 0xf3cf,0x2c0f	@ ubfx	r12, pc, #8, #16")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf3cc2d0f) "	@ ubfx	sp, r12, #8, #16")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf3cc2f0f) "	@ ubfx	pc, r12, #8, #16")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf3cd2c0f) "	@ ubfx	r12, sp, #8, #16")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf3cf2c0f) "	@ ubfx	r12, pc, #8, #16")
 
 	TEST_R(     "bfc	r",0, VAL1,", #4, #20")
 	TEST_R(     "bfc	r",14,VAL2,", #4, #20")
 	TEST_R(     "bfc	r",7, VAL1,", #0, #31")
 	TEST_R(     "bfc	r",8, VAL2,", #0, #31")
-	TEST_UNSUPPORTED(".short 0xf36f,0x0d1e	@ bfc	sp, #0, #31")
-	TEST_UNSUPPORTED(".short 0xf36f,0x0f1e	@ bfc	pc, #0, #31")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf36f0d1e) "	@ bfc	sp, #0, #31")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf36f0f1e) "	@ bfc	pc, #0, #31")
 
 	TEST_RR(    "bfi	r",0, VAL1,", r",0  , VAL2,", #0, #31")
 	TEST_RR(    "bfi	r",12,VAL1,", r",14 , VAL2,", #4, #20")
-	TEST_UNSUPPORTED(".short 0xf36e,0x1d17	@ bfi	sp, r14, #4, #20")
-	TEST_UNSUPPORTED(".short 0xf36e,0x1f17	@ bfi	pc, r14, #4, #20")
-	TEST_UNSUPPORTED(".short 0xf36d,0x1e17	@ bfi	r14, sp, #4, #20")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf36e1d17) "	@ bfi	sp, r14, #4, #20")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf36e1f17) "	@ bfi	pc, r14, #4, #20")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf36d1e17) "	@ bfi	r14, sp, #4, #20")
 
 	TEST_GROUP("Branches and miscellaneous control")
 
@@ -775,14 +776,14 @@
 
 	TEST("mrs	r0, cpsr")
 	TEST("mrs	r14, cpsr")
-	TEST_UNSUPPORTED(".short 0xf3ef,0x8d00	@ mrs	sp, spsr")
-	TEST_UNSUPPORTED(".short 0xf3ef,0x8f00	@ mrs	pc, spsr")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf3ef8d00) "	@ mrs	sp, spsr")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf3ef8f00) "	@ mrs	pc, spsr")
 	TEST_UNSUPPORTED("mrs	r0, spsr")
 	TEST_UNSUPPORTED("mrs	lr, spsr")
 
-	TEST_UNSUPPORTED(".short 0xf7f0,0x8000 @ smc #0")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf7f08000) " @ smc #0")
 
-	TEST_UNSUPPORTED(".short 0xf7f0,0xa000 @ undefeined")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf7f0a000) " @ undefeined")
 
 	TEST_BF(  "b.w	2f")
 	TEST_BB(  "b.w	2b")
@@ -829,15 +830,15 @@
 	SINGLE_STORE("")
 
 	TEST("str	sp, [sp]")
-	TEST_UNSUPPORTED(".short 0xf8cf,0xe000	@ str	r14, [pc]")
-	TEST_UNSUPPORTED(".short 0xf8ce,0xf000	@ str	pc, [r14]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf8cfe000) "	@ str	r14, [pc]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf8cef000) "	@ str	pc, [r14]")
 
 	TEST_GROUP("Advanced SIMD element or structure load/store instructions")
 
-	TEST_UNSUPPORTED(".short 0xf900,0x0000")
-	TEST_UNSUPPORTED(".short 0xf92f,0xffff")
-	TEST_UNSUPPORTED(".short 0xf980,0x0000")
-	TEST_UNSUPPORTED(".short 0xf9ef,0xffff")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf9000000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf92fffff) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf9800000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf9efffff) "")
 
 	TEST_GROUP("Load single data item and memory hints")
 
@@ -881,20 +882,20 @@
 	TEST_SUPPORTED("ldr	sp, 99f")
 	TEST_SUPPORTED("ldr	pc, 99f")
 
-	TEST_UNSUPPORTED(".short 0xf854,0x700d	@ ldr	r7, [r4, sp]")
-	TEST_UNSUPPORTED(".short 0xf854,0x700f	@ ldr	r7, [r4, pc]")
-	TEST_UNSUPPORTED(".short 0xf814,0x700d	@ ldrb	r7, [r4, sp]")
-	TEST_UNSUPPORTED(".short 0xf814,0x700f	@ ldrb	r7, [r4, pc]")
-	TEST_UNSUPPORTED(".short 0xf89f,0xd004	@ ldrb	sp, 99f")
-	TEST_UNSUPPORTED(".short 0xf814,0xd008	@ ldrb	sp, [r4, r8]")
-	TEST_UNSUPPORTED(".short 0xf894,0xd000	@ ldrb	sp, [r4]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf854700d) "	@ ldr	r7, [r4, sp]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf854700f) "	@ ldr	r7, [r4, pc]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf814700d) "	@ ldrb	r7, [r4, sp]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf814700f) "	@ ldrb	r7, [r4, pc]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf89fd004) "	@ ldrb	sp, 99f")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf814d008) "	@ ldrb	sp, [r4, r8]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf894d000) "	@ ldrb	sp, [r4]")
 
-	TEST_UNSUPPORTED(".short 0xf860,0x0000") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xf9ff,0xffff") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xf950,0x0000") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xf95f,0xffff") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xf800,0x0800") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xf97f,0xfaff") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xf8600000) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xf9ffffff) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xf9500000) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xf95fffff) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xf8000800) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xf97ffaff) "") /* Unallocated space */
 
 	TEST(   "pli	[pc, #4]")
 	TEST(   "pli	[pc, #-4]")
@@ -902,22 +903,22 @@
 	TEST(   "pld	[pc, #-4]")
 
 	TEST_P( "pld	[r",0,-1024,", #1024]")
-	TEST(   ".short 0xf8b0,0xf400	@ pldw	[r0, #1024]")
+	TEST(   __inst_thumb32(0xf8b0f400) "	@ pldw	[r0, #1024]")
 	TEST_P( "pli	[r",4, 0b,", #1024]")
 	TEST_P( "pld	[r",7, 120,", #-120]")
-	TEST(   ".short 0xf837,0xfc78	@ pldw	[r7, #-120]")
+	TEST(   __inst_thumb32(0xf837fc78) "	@ pldw	[r7, #-120]")
 	TEST_P( "pli	[r",11,120,", #-120]")
 	TEST(   "pld	[sp, #0]")
 
 	TEST_PR("pld	[r",7, 24, ", r",0, 16,"]")
 	TEST_PR("pld	[r",8, 24, ", r",12,16,", lsl #3]")
-	TEST_SUPPORTED(".short 0xf837,0xf000	@ pldw	[r7, r0]")
-	TEST_SUPPORTED(".short 0xf838,0xf03c	@ pldw	[r8, r12, lsl #3]");
+	TEST_SUPPORTED(__inst_thumb32(0xf837f000) "	@ pldw	[r7, r0]")
+	TEST_SUPPORTED(__inst_thumb32(0xf838f03c) "	@ pldw	[r8, r12, lsl #3]");
 	TEST_RR("pli	[r",12,0b,", r",0, 16,"]")
 	TEST_RR("pli	[r",0, 0b,", r",12,16,", lsl #3]")
 	TEST_R( "pld	[sp, r",1, 16,"]")
-	TEST_UNSUPPORTED(".short 0xf817,0xf00d  @pld	[r7, sp]")
-	TEST_UNSUPPORTED(".short 0xf817,0xf00f  @pld	[r7, pc]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf817f00d) "  @pld	[r7, sp]")
+	TEST_UNSUPPORTED(__inst_thumb32(0xf817f00f) "  @pld	[r7, pc]")
 
 	TEST_GROUP("Data-processing (register)")
 
@@ -934,21 +935,21 @@
 	SHIFTS32("ror")
 	SHIFTS32("rors")
 
-	TEST_UNSUPPORTED(".short 0xfa01,0xff02	@ lsl	pc, r1, r2")
-	TEST_UNSUPPORTED(".short 0xfa01,0xfd02	@ lsl	sp, r1, r2")
-	TEST_UNSUPPORTED(".short 0xfa0f,0xf002	@ lsl	r0, pc, r2")
-	TEST_UNSUPPORTED(".short 0xfa0d,0xf002	@ lsl	r0, sp, r2")
-	TEST_UNSUPPORTED(".short 0xfa01,0xf00f	@ lsl	r0, r1, pc")
-	TEST_UNSUPPORTED(".short 0xfa01,0xf00d	@ lsl	r0, r1, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa01ff02) "	@ lsl	pc, r1, r2")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa01fd02) "	@ lsl	sp, r1, r2")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff002) "	@ lsl	r0, pc, r2")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa0df002) "	@ lsl	r0, sp, r2")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa01f00f) "	@ lsl	r0, r1, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa01f00d) "	@ lsl	r0, r1, sp")
 
 	TEST_RR(    "sxtah	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "sxtah	r14,r",12, HH2,", r",10,HH1,", ror #8")
 	TEST_R(     "sxth	r8, r",7,  HH1,"")
 
-	TEST_UNSUPPORTED(".short 0xfa0f,0xff87	@ sxth	pc, r7");
-	TEST_UNSUPPORTED(".short 0xfa0f,0xfd87	@ sxth	sp, r7");
-	TEST_UNSUPPORTED(".short 0xfa0f,0xf88f	@ sxth	r8, pc");
-	TEST_UNSUPPORTED(".short 0xfa0f,0xf88d	@ sxth	r8, sp");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa0fff87) "	@ sxth	pc, r7");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa0ffd87) "	@ sxth	sp, r7");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff88f) "	@ sxth	r8, pc");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff88d) "	@ sxth	r8, sp");
 
 	TEST_RR(    "uxtah	r0, r",0,  HH1,", r",1, HH2,"")
 	TEST_RR(    "uxtah	r14,r",12, HH2,", r",10,HH1,", ror #8")
@@ -970,8 +971,8 @@
 	TEST_RR(    "uxtab	r14,r",12, HH2,", r",10,HH1,", ror #8")
 	TEST_R(     "uxtb	r8, r",7,  HH1,"")
 
-	TEST_UNSUPPORTED(".short 0xfa60,0x00f0")
-	TEST_UNSUPPORTED(".short 0xfa7f,0xffff")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa6000f0) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa7fffff) "")
 
 #define PARALLEL_ADD_SUB(op)					\
 	TEST_RR(  op"add16	r0, r",0,  HH1,", r",1, HH2,"")	\
@@ -1019,10 +1020,10 @@
 	TEST_R("revsh.w	r0, r",0,   VAL1,"")
 	TEST_R("revsh	r14, r",12, VAL2,"")
 
-	TEST_UNSUPPORTED(".short 0xfa9c,0xff8c	@ rev	pc, r12");
-	TEST_UNSUPPORTED(".short 0xfa9c,0xfd8c	@ rev	sp, r12");
-	TEST_UNSUPPORTED(".short 0xfa9f,0xfe8f	@ rev	r14, pc");
-	TEST_UNSUPPORTED(".short 0xfa9d,0xfe8d	@ rev	r14, sp");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa9cff8c) "	@ rev	pc, r12");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa9cfd8c) "	@ rev	sp, r12");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa9ffe8f) "	@ rev	r14, pc");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa9dfe8d) "	@ rev	r14, sp");
 
 	TEST_RR("sel	r0, r",0,  VAL1,", r",1, VAL2,"")
 	TEST_RR("sel	r14, r",12,VAL1,", r",10, VAL2,"")
@@ -1031,31 +1032,31 @@
 	TEST_R("clz	r7, r",14,0x1,"")
 	TEST_R("clz	lr, r",7, 0xffffffff,"")
 
-	TEST_UNSUPPORTED(".short 0xfa80,0xf030") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xfaff,0xff7f") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xfab0,0xf000") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xfaff,0xff7f") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xfa80f030) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xfaffff7f) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xfab0f000) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xfaffff7f) "") /* Unallocated space */
 
 	TEST_GROUP("Multiply, multiply accumulate, and absolute difference operations")
 
 	TEST_RR(    "mul	r0, r",1, VAL1,", r",2, VAL2,"")
 	TEST_RR(    "mul	r7, r",8, VAL2,", r",9, VAL2,"")
-	TEST_UNSUPPORTED(".short 0xfb08,0xff09	@ mul	pc, r8, r9")
-	TEST_UNSUPPORTED(".short 0xfb08,0xfd09	@ mul	sp, r8, r9")
-	TEST_UNSUPPORTED(".short 0xfb0f,0xf709	@ mul	r7, pc, r9")
-	TEST_UNSUPPORTED(".short 0xfb0d,0xf709	@ mul	r7, sp, r9")
-	TEST_UNSUPPORTED(".short 0xfb08,0xf70f	@ mul	r7, r8, pc")
-	TEST_UNSUPPORTED(".short 0xfb08,0xf70d	@ mul	r7, r8, sp")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb08ff09) "	@ mul	pc, r8, r9")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb08fd09) "	@ mul	sp, r8, r9")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb0ff709) "	@ mul	r7, pc, r9")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb0df709) "	@ mul	r7, sp, r9")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb08f70f) "	@ mul	r7, r8, pc")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb08f70d) "	@ mul	r7, r8, sp")
 
 	TEST_RRR(   "mla	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
 	TEST_RRR(   "mla	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
-	TEST_UNSUPPORTED(".short 0xfb08,0xaf09	@ mla	pc, r8, r9, r10");
-	TEST_UNSUPPORTED(".short 0xfb08,0xad09	@ mla	sp, r8, r9, r10");
-	TEST_UNSUPPORTED(".short 0xfb0f,0xa709	@ mla	r7, pc, r9, r10");
-	TEST_UNSUPPORTED(".short 0xfb0d,0xa709	@ mla	r7, sp, r9, r10");
-	TEST_UNSUPPORTED(".short 0xfb08,0xa70f	@ mla	r7, r8, pc, r10");
-	TEST_UNSUPPORTED(".short 0xfb08,0xa70d	@ mla	r7, r8, sp, r10");
-	TEST_UNSUPPORTED(".short 0xfb08,0xd709	@ mla	r7, r8, r9, sp");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb08af09) "	@ mla	pc, r8, r9, r10");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb08ad09) "	@ mla	sp, r8, r9, r10");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb0fa709) "	@ mla	r7, pc, r9, r10");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb0da709) "	@ mla	r7, sp, r9, r10");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb08a70f) "	@ mla	r7, r8, pc, r10");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb08a70d) "	@ mla	r7, r8, sp, r10");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb08d709) "	@ mla	r7, r8, r9, sp");
 
 	TEST_RRR(   "mls	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
 	TEST_RRR(   "mls	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
@@ -1123,25 +1124,25 @@
 	TEST_RR(    "usad8	r0, r",0,  VAL1,", r",1, VAL2,"")
 	TEST_RR(    "usad8	r14, r",12,VAL2,", r",10,VAL1,"")
 
-	TEST_UNSUPPORTED(".short 0xfb00,0xf010") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xfb0f,0xff1f") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xfb70,0xf010") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xfb7f,0xff1f") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xfb70,0x0010") /* Unallocated space */
-	TEST_UNSUPPORTED(".short 0xfb7f,0xff1f") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb00f010) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb0fff1f) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb70f010) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb7fff1f) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb700010) "") /* Unallocated space */
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb7fff1f) "") /* Unallocated space */
 
 	TEST_GROUP("Long multiply, long multiply accumulate, and divide")
 
 	TEST_RR(   "smull	r0, r1, r",2, VAL1,", r",3, VAL2,"")
 	TEST_RR(   "smull	r7, r8, r",9, VAL2,", r",10, VAL1,"")
-	TEST_UNSUPPORTED(".short 0xfb89,0xf80a	@ smull	pc, r8, r9, r10");
-	TEST_UNSUPPORTED(".short 0xfb89,0xd80a	@ smull	sp, r8, r9, r10");
-	TEST_UNSUPPORTED(".short 0xfb89,0x7f0a	@ smull	r7, pc, r9, r10");
-	TEST_UNSUPPORTED(".short 0xfb89,0x7d0a	@ smull	r7, sp, r9, r10");
-	TEST_UNSUPPORTED(".short 0xfb8f,0x780a	@ smull	r7, r8, pc, r10");
-	TEST_UNSUPPORTED(".short 0xfb8d,0x780a	@ smull	r7, r8, sp, r10");
-	TEST_UNSUPPORTED(".short 0xfb89,0x780f	@ smull	r7, r8, r9, pc");
-	TEST_UNSUPPORTED(".short 0xfb89,0x780d	@ smull	r7, r8, r9, sp");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb89f80a) "	@ smull	pc, r8, r9, r10");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb89d80a) "	@ smull	sp, r8, r9, r10");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb897f0a) "	@ smull	r7, pc, r9, r10");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb897d0a) "	@ smull	r7, sp, r9, r10");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb8f780a) "	@ smull	r7, r8, pc, r10");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb8d780a) "	@ smull	r7, r8, sp, r10");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb89780f) "	@ smull	r7, r8, r9, pc");
+	TEST_UNSUPPORTED(__inst_thumb32(0xfb89780d) "	@ smull	r7, r8, r9, sp");
 
 	TEST_RR(   "umull	r0, r1, r",2, VAL1,", r",3, VAL2,"")
 	TEST_RR(   "umull	r7, r8, r",9, VAL2,", r",10, VAL1,"")
@@ -1175,8 +1176,8 @@
 
 	TEST_GROUP("Coprocessor instructions")
 
-	TEST_UNSUPPORTED(".short 0xfc00,0x0000")
-	TEST_UNSUPPORTED(".short 0xffff,0xffff")
+	TEST_UNSUPPORTED(__inst_thumb32(0xfc000000) "")
+	TEST_UNSUPPORTED(__inst_thumb32(0xffffffff) "")
 
 	TEST_GROUP("Testing instructions in IT blocks")
 
diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c
index c2fd06b..3796399 100644
--- a/arch/arm/kernel/kprobes-test.c
+++ b/arch/arm/kernel/kprobes-test.c
@@ -113,7 +113,7 @@
  *	@ start of inline data...
  *	.ascii "mov r0, r7"	@ text title for test case
  *	.byte	0
- *	.align	2
+ *	.align	2, 0
  *
  *	@ TEST_ARG_REG
  *	.byte	ARG_TYPE_REG
@@ -1333,7 +1333,8 @@
 static unsigned long next_instruction(unsigned long pc)
 {
 #ifdef CONFIG_THUMB2_KERNEL
-	if ((pc & 1) && !is_wide_instruction(*(u16 *)(pc - 1)))
+	if ((pc & 1) &&
+	    !is_wide_instruction(__mem_to_opcode_thumb16(*(u16 *)(pc - 1))))
 		return pc + 2;
 	else
 #endif
@@ -1378,13 +1379,13 @@
 
 	if (test_case_is_thumb) {
 		u16 *p = (u16 *)(test_code & ~1);
-		current_instruction = p[0];
+		current_instruction = __mem_to_opcode_thumb16(p[0]);
 		if (is_wide_instruction(current_instruction)) {
-			current_instruction <<= 16;
-			current_instruction |= p[1];
+			u16 instr2 = __mem_to_opcode_thumb16(p[1]);
+			current_instruction = __opcode_thumb32_compose(current_instruction, instr2);
 		}
 	} else {
-		current_instruction = *(u32 *)test_code;
+		current_instruction = __mem_to_opcode_arm(*(u32 *)test_code);
 	}
 
 	if (current_title[0] == '.')
diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h
index e28a869..eecc90a 100644
--- a/arch/arm/kernel/kprobes-test.h
+++ b/arch/arm/kernel/kprobes-test.h
@@ -115,7 +115,7 @@
 	/* multiple strings to be concatenated.  */		\
 	".ascii "#title"				\n\t"	\
 	".byte	0					\n\t"	\
-	".align	2					\n\t"
+	".align	2, 0					\n\t"
 
 #define	TEST_ARG_REG(reg, val)					\
 	".byte	"__stringify(ARG_TYPE_REG)"		\n\t"	\
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c
index 6619188..9495d7f 100644
--- a/arch/arm/kernel/kprobes-thumb.c
+++ b/arch/arm/kernel/kprobes-thumb.c
@@ -149,9 +149,9 @@
 	enum probes_insn ret = kprobe_decode_ldmstm(insn, asi, d);
 
 	/* Fixup modified instruction to have halfwords in correct order...*/
-	insn = asi->insn[0];
-	((u16 *)asi->insn)[0] = insn >> 16;
-	((u16 *)asi->insn)[1] = insn & 0xffff;
+	insn = __mem_to_opcode_arm(asi->insn[0]);
+	((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn >> 16);
+	((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0xffff);
 
 	return ret;
 }
@@ -516,7 +516,7 @@
 {
 	insn &= ~0x00ff;
 	insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
-	((u16 *)asi->insn)[0] = insn;
+	((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn);
 	asi->insn_handler = t16_emulate_hiregs;
 	return INSN_GOOD;
 }
@@ -547,8 +547,10 @@
 	 * and call it with R9=SP and LR in the register list represented
 	 * by R8.
 	 */
-	((u16 *)asi->insn)[0] = 0xe929;		/* 1st half STMDB R9!,{} */
-	((u16 *)asi->insn)[1] = insn & 0x1ff;	/* 2nd half (register list) */
+	/* 1st half STMDB R9!,{} */
+	((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe929);
+	/* 2nd half (register list) */
+	((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
 	asi->insn_handler = t16_emulate_push;
 	return INSN_GOOD;
 }
@@ -600,8 +602,10 @@
 	 * and call it with R9=SP and PC in the register list represented
 	 * by R8.
 	 */
-	((u16 *)asi->insn)[0] = 0xe8b9;		/* 1st half LDMIA R9!,{} */
-	((u16 *)asi->insn)[1] = insn & 0x1ff;	/* 2nd half (register list) */
+	/* 1st half LDMIA R9!,{} */
+	((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe8b9);
+	/* 2nd half (register list) */
+	((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
 	asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
 					 : t16_emulate_pop_nopc;
 	return INSN_GOOD;
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 8795f9f..6d64420 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -26,6 +26,7 @@
 #include <linux/stop_machine.h>
 #include <linux/stringify.h>
 #include <asm/traps.h>
+#include <asm/opcodes.h>
 #include <asm/cacheflush.h>
 #include <linux/percpu.h>
 #include <linux/bug.h>
@@ -67,10 +68,10 @@
 #ifdef CONFIG_THUMB2_KERNEL
 	thumb = true;
 	addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
-	insn = ((u16 *)addr)[0];
+	insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]);
 	if (is_wide_instruction(insn)) {
-		insn <<= 16;
-		insn |= ((u16 *)addr)[1];
+		u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]);
+		insn = __opcode_thumb32_compose(insn, inst2);
 		decode_insn = thumb32_probes_decode_insn;
 		actions = kprobes_t32_actions;
 	} else {
@@ -81,7 +82,7 @@
 	thumb = false;
 	if (addr & 0x3)
 		return -EINVAL;
-	insn = *p->addr;
+	insn = __mem_to_opcode_arm(*p->addr);
 	decode_insn = arm_probes_decode_insn;
 	actions = kprobes_arm_actions;
 #endif
diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c
index 679cf4d..fc72086 100644
--- a/arch/arm/kernel/pj4-cp0.c
+++ b/arch/arm/kernel/pj4-cp0.c
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <asm/thread_notify.h>
+#include <asm/cputype.h>
 
 static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
 {
@@ -80,6 +81,9 @@
 {
 	u32 cp_access;
 
+	if (!cpu_is_pj4())
+		return 0;
+
 	cp_access = pj4_cp_access_read() & ~0xf;
 	pj4_cp_access_write(cp_access);
 
diff --git a/arch/arm/kernel/probes.c b/arch/arm/kernel/probes.c
index b41873f..a8ab540 100644
--- a/arch/arm/kernel/probes.c
+++ b/arch/arm/kernel/probes.c
@@ -202,13 +202,14 @@
 #ifdef CONFIG_THUMB2_KERNEL
 	if (thumb) {
 		u16 *thumb_insn = (u16 *)asi->insn;
-		thumb_insn[1] = 0x4770; /* Thumb bx lr */
-		thumb_insn[2] = 0x4770; /* Thumb bx lr */
+		/* Thumb bx lr */
+		thumb_insn[1] = __opcode_to_mem_thumb16(0x4770);
+		thumb_insn[2] = __opcode_to_mem_thumb16(0x4770);
 		return insn;
 	}
-	asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
+	asi->insn[1] = __opcode_to_mem_arm(0xe12fff1e); /* ARM bx lr */
 #else
-	asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
+	asi->insn[1] = __opcode_to_mem_arm(0xe1a0f00e); /* mov pc, lr */
 #endif
 	/* Make an ARM instruction unconditional */
 	if (insn < 0xe0000000)
@@ -228,12 +229,12 @@
 	if (thumb) {
 		u16 *ip = (u16 *)asi->insn;
 		if (is_wide_instruction(insn))
-			*ip++ = insn >> 16;
-		*ip++ = insn;
+			*ip++ = __opcode_to_mem_thumb16(insn >> 16);
+		*ip++ = __opcode_to_mem_thumb16(insn);
 		return;
 	}
 #endif
-	asi->insn[0] = insn;
+	asi->insn[0] = __opcode_to_mem_arm(insn);
 }
 
 /*
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 806d287..81ef686 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -38,6 +38,7 @@
 #include <asm/processor.h>
 #include <asm/thread_notify.h>
 #include <asm/stacktrace.h>
+#include <asm/system_misc.h>
 #include <asm/mach/time.h>
 #include <asm/tls.h>
 
@@ -99,7 +100,7 @@
 	u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
 
 	/* Disable interrupts first */
-	local_irq_disable();
+	raw_local_irq_disable();
 	local_fiq_disable();
 
 	/* Disable the L2 if we're the last man standing. */
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 172ee18..abd2fc0 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -445,6 +445,7 @@
 	if (user_debug & UDBG_UNDEFINED) {
 		printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
 			current->comm, task_pid_nr(current), pc);
+		__show_regs(regs);
 		dump_instr(KERN_INFO, regs);
 	}
 #endif
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 14d4996..788495d 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -137,11 +137,16 @@
 		v7_exit_coherency_flush(all);
 
 		/*
-		 * This is a harmless no-op.  On platforms with a real
-		 * outer cache this might either be needed or not,
-		 * depending on where the outer cache sits.
+		 * A full outer cache flush could be needed at this point
+		 * on platforms with such a cache, depending on where the
+		 * outer cache sits. In some cases the notion of a "last
+		 * cluster standing" would need to be implemented if the
+		 * outer cache is shared across clusters. In any case, when
+		 * the outer cache needs flushing, there is no concurrent
+		 * access to the cache controller to worry about and no
+		 * special locking besides what is already provided by the
+		 * MCPM state machinery is needed.
 		 */
-		outer_flush_all();
 
 		/*
 		 * Disable cluster-level coherency by masking
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index ef69152..c508f41 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -120,25 +120,7 @@
 };
 
 static const struct prot_bits section_bits[] = {
-#ifndef CONFIG_ARM_LPAE
-	/* These are approximate */
-	{
-		.mask	= PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
-		.val	= 0,
-		.set	= "    ro",
-	}, {
-		.mask	= PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
-		.val	= PMD_SECT_AP_WRITE,
-		.set	= "    RW",
-	}, {
-		.mask	= PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
-		.val	= PMD_SECT_AP_READ,
-		.set	= "USR ro",
-	}, {
-		.mask	= PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
-		.val	= PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
-		.set	= "USR RW",
-#else
+#ifdef CONFIG_ARM_LPAE
 	{
 		.mask	= PMD_SECT_USER,
 		.val	= PMD_SECT_USER,
@@ -148,6 +130,41 @@
 		.val	= PMD_SECT_RDONLY,
 		.set	= "ro",
 		.clear	= "RW",
+#elif __LINUX_ARM_ARCH__ >= 6
+	{
+		.mask	= PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+		.val	= PMD_SECT_APX | PMD_SECT_AP_WRITE,
+		.set	= "    ro",
+	}, {
+		.mask	= PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+		.val	= PMD_SECT_AP_WRITE,
+		.set	= "    RW",
+	}, {
+		.mask	= PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+		.val	= PMD_SECT_AP_READ,
+		.set	= "USR ro",
+	}, {
+		.mask	= PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+		.val	= PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+		.set	= "USR RW",
+#else /* ARMv4/ARMv5  */
+	/* These are approximate */
+	{
+		.mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+		.val    = 0,
+		.set    = "    ro",
+	}, {
+		.mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+		.val    = PMD_SECT_AP_WRITE,
+		.set    = "    RW",
+	}, {
+		.mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+		.val    = PMD_SECT_AP_READ,
+		.set    = "USR ro",
+	}, {
+		.mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+		.val    = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+		.set    = "USR RW",
 #endif
 	}, {
 		.mask	= PMD_SECT_XN,
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index 46e1749..f0759e7 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -8,9 +8,12 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/init.h>
+#include <linux/linkage.h>
 #include <asm/thread_info.h>
 #include <asm/vfpmacros.h>
-#include "../kernel/entry-header.S"
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
 
 @ VFP entry point.
 @
@@ -22,11 +25,7 @@
 @  IRQs disabled.
 @
 ENTRY(do_vfp)
-#ifdef CONFIG_PREEMPT_COUNT
-	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
-	add	r11, r4, #1		@ increment it
-	str	r11, [r10, #TI_PREEMPT]
-#endif
+	inc_preempt_count r10, r4
 	enable_irq
  	ldr	r4, .LCvfp
 	ldr	r11, [r10, #TI_CPU]	@ CPU number
@@ -35,12 +34,7 @@
 ENDPROC(do_vfp)
 
 ENTRY(vfp_null_entry)
-#ifdef CONFIG_PREEMPT_COUNT
-	get_thread_info	r10
-	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
-	sub	r11, r4, #1		@ decrement it
-	str	r11, [r10, #TI_PREEMPT]
-#endif
+	dec_preempt_count_ti r10, r4
 	mov	pc, lr
 ENDPROC(vfp_null_entry)
 
@@ -53,12 +47,7 @@
 
 	__INIT
 ENTRY(vfp_testing_entry)
-#ifdef CONFIG_PREEMPT_COUNT
-	get_thread_info	r10
-	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
-	sub	r11, r4, #1		@ decrement it
-	str	r11, [r10, #TI_PREEMPT]
-#endif
+	dec_preempt_count_ti r10, r4
 	ldr	r0, VFP_arch_address
 	str	r0, [r0]		@ set to non-zero value
 	mov	pc, r9			@ we have handled the fault
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 3e5d311..be807625 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -14,10 +14,13 @@
  * r10 points at the start of the private FP workspace in the thread structure
  * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
  */
+#include <linux/init.h>
+#include <linux/linkage.h>
 #include <asm/thread_info.h>
 #include <asm/vfpmacros.h>
 #include <linux/kern_levels.h>
-#include "../kernel/entry-header.S"
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
 
 	.macro	DBGSTR, str
 #ifdef DEBUG
@@ -179,12 +182,7 @@
 					@ else it's one 32-bit instruction, so
 					@ always subtract 4 from the following
 					@ instruction address.
-#ifdef CONFIG_PREEMPT_COUNT
-	get_thread_info	r10
-	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
-	sub	r11, r4, #1		@ decrement it
-	str	r11, [r10, #TI_PREEMPT]
-#endif
+	dec_preempt_count_ti r10, r4
 	mov	pc, r9			@ we think we have handled things
 
 
@@ -203,12 +201,7 @@
 	@ not recognised by VFP
 
 	DBGSTR	"not VFP"
-#ifdef CONFIG_PREEMPT_COUNT
-	get_thread_info	r10
-	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
-	sub	r11, r4, #1		@ decrement it
-	str	r11, [r10, #TI_PREEMPT]
-#endif
+	dec_preempt_count_ti r10, r4
 	mov	pc, lr
 
 process_exception:
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1325c3b..12c3afe 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -45,6 +45,7 @@
 	select HAVE_MOD_ARCH_SPECIFIC
 	select MODULES_USE_ELF_RELA
 	select ARCH_USE_CMPXCHG_LOCKREF
+	select HAVE_ARCH_AUDITSYSCALL
 	default y
 	help
 	  The Itanium Processor Family is Intel's 64-bit successor to
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 79b9bcd..9ae0854 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -1,38 +1,38 @@
 config MICROBLAZE
 	def_bool y
 	select ARCH_MIGHT_HAVE_PC_PARPORT
-	select HAVE_MEMBLOCK
-	select HAVE_MEMBLOCK_NODE_MAP
-	select HAVE_FUNCTION_TRACER
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
-	select HAVE_FUNCTION_GRAPH_TRACER
-	select HAVE_DYNAMIC_FTRACE
-	select HAVE_FTRACE_MCOUNT_RECORD
-	select ARCH_WANT_OPTIONAL_GPIOLIB
-	select HAVE_OPROFILE
-	select HAVE_ARCH_KGDB
-	select HAVE_DMA_ATTRS
-	select HAVE_DMA_API_DEBUG
-	select TRACING_SUPPORT
-	select OF
-	select OF_EARLY_FLATTREE
 	select ARCH_WANT_IPC_PARSE_VERSION
-	select HAVE_DEBUG_KMEMLEAK
-	select IRQ_DOMAIN
-	select VIRT_TO_BUS
+	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select BUILDTIME_EXTABLE_SORT
+	select CLKSRC_OF
+	select CLONE_BACKWARDS3
+	select COMMON_CLK
+	select GENERIC_ATOMIC64
+	select GENERIC_CLOCKEVENTS
+	select GENERIC_CPU_DEVICES
+	select GENERIC_IDLE_POLL_SETUP
 	select GENERIC_IRQ_PROBE
 	select GENERIC_IRQ_SHOW
 	select GENERIC_PCI_IOMAP
-	select GENERIC_CPU_DEVICES
-	select GENERIC_ATOMIC64
-	select GENERIC_CLOCKEVENTS
-	select COMMON_CLK
 	select GENERIC_SCHED_CLOCK
-	select GENERIC_IDLE_POLL_SETUP
+	select HAVE_ARCH_KGDB
+	select HAVE_DEBUG_KMEMLEAK
+	select HAVE_DMA_API_DEBUG
+	select HAVE_DMA_ATTRS
+	select HAVE_DYNAMIC_FTRACE
+	select HAVE_FTRACE_MCOUNT_RECORD
+	select HAVE_FUNCTION_GRAPH_TRACER
+	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+	select HAVE_FUNCTION_TRACER
+	select HAVE_MEMBLOCK
+	select HAVE_MEMBLOCK_NODE_MAP
+	select HAVE_OPROFILE
+	select IRQ_DOMAIN
 	select MODULES_USE_ELF_RELA
-	select CLONE_BACKWARDS3
-	select CLKSRC_OF
-	select BUILDTIME_EXTABLE_SORT
+	select OF
+	select OF_EARLY_FLATTREE
+	select TRACING_SUPPORT
+	select VIRT_TO_BUS
 
 config SWAP
 	def_bool n
@@ -74,7 +74,7 @@
 
 source "kernel/Kconfig.freezer"
 
-source "arch/microblaze/platform/Kconfig.platform"
+source "arch/microblaze/Kconfig.platform"
 
 menu "Processor type and features"
 
diff --git a/arch/microblaze/Kconfig.platform b/arch/microblaze/Kconfig.platform
new file mode 100644
index 0000000..1b3d8c8
--- /dev/null
+++ b/arch/microblaze/Kconfig.platform
@@ -0,0 +1,69 @@
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+# Platform selection Kconfig menu for MicroBlaze targets
+#
+
+menu "Platform options"
+
+config OPT_LIB_FUNCTION
+	bool "Optimalized lib function"
+	default y
+	help
+	  Allows turn on optimalized library function (memcpy and memmove).
+	  They are optimized by using word alignment. This will work
+	  fine if both source and destination are aligned on the same
+	  boundary. However, if they are aligned on different boundaries
+	  shifts will be necessary. This might result in bad performance
+	  on MicroBlaze systems without a barrel shifter.
+
+config OPT_LIB_ASM
+	bool "Optimalized lib function ASM"
+	depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
+	default n
+	help
+	  Allows turn on optimalized library function (memcpy and memmove).
+	  Function are written in asm code.
+
+# Definitions for MICROBLAZE0
+comment "Definitions for MICROBLAZE0"
+
+config KERNEL_BASE_ADDR
+	hex "Physical address where Linux Kernel is"
+	default "0x90000000"
+	help
+	  BASE Address for kernel
+
+config XILINX_MICROBLAZE0_FAMILY
+	string "Targeted FPGA family"
+	default "virtex5"
+
+config XILINX_MICROBLAZE0_USE_MSR_INSTR
+	int "USE_MSR_INSTR range (0:1)"
+	default 0
+
+config XILINX_MICROBLAZE0_USE_PCMP_INSTR
+	int "USE_PCMP_INSTR range (0:1)"
+	default 0
+
+config XILINX_MICROBLAZE0_USE_BARREL
+	int "USE_BARREL range (0:1)"
+	default 0
+
+config XILINX_MICROBLAZE0_USE_DIV
+	int "USE_DIV range (0:1)"
+	default 0
+
+config XILINX_MICROBLAZE0_USE_HW_MUL
+	int "USE_HW_MUL values (0=NONE, 1=MUL32, 2=MUL64)"
+	default 0
+
+config XILINX_MICROBLAZE0_USE_FPU
+	int "USE_FPU values (0=NONE, 1=BASIC, 2=EXTENDED)"
+	default 0
+
+config XILINX_MICROBLAZE0_HW_VER
+	string "Core version number"
+	default 7.10.d
+
+endmenu
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index a69eaf2..740f2b8 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -48,7 +48,6 @@
 libs-y += arch/microblaze/lib/
 core-y += arch/microblaze/kernel/
 core-y += arch/microblaze/mm/
-core-y += arch/microblaze/platform/
 core-$(CONFIG_PCI) += arch/microblaze/pci/
 
 drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/
diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
deleted file mode 120000
index 7cb6578..0000000
--- a/arch/microblaze/boot/dts/system.dts
+++ /dev/null
@@ -1 +0,0 @@
-../../platform/generic/system.dts
\ No newline at end of file
diff --git a/arch/microblaze/platform/generic/system.dts b/arch/microblaze/boot/dts/system.dts
similarity index 100%
rename from arch/microblaze/platform/generic/system.dts
rename to arch/microblaze/boot/dts/system.dts
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 3fbb7f1..1e4c332 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -15,7 +15,6 @@
 #include <asm/page.h>
 #include <linux/types.h>
 #include <linux/mm.h>          /* Get struct page {...} */
-#include <asm-generic/iomap.h>
 
 #ifndef CONFIG_PCI
 #define _IO_BASE	0
@@ -25,211 +24,32 @@
 #define _IO_BASE	isa_io_base
 #define _ISA_MEM_BASE	isa_mem_base
 #define PCI_DRAM_OFFSET	pci_dram_offset
-#endif
+struct pci_dev;
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+#define pci_iounmap pci_iounmap
 
 extern unsigned long isa_io_base;
-extern unsigned long pci_io_base;
 extern unsigned long pci_dram_offset;
-
 extern resource_size_t isa_mem_base;
+#endif
 
+#define PCI_IOBASE	((void __iomem *)_IO_BASE)
 #define IO_SPACE_LIMIT (0xFFFFFFFF)
 
-/* the following is needed to support PCI with some drivers */
-
-#define mmiowb()
-
-static inline unsigned char __raw_readb(const volatile void __iomem *addr)
-{
-	return *(volatile unsigned char __force *)addr;
-}
-static inline unsigned short __raw_readw(const volatile void __iomem *addr)
-{
-	return *(volatile unsigned short __force *)addr;
-}
-static inline unsigned int __raw_readl(const volatile void __iomem *addr)
-{
-	return *(volatile unsigned int __force *)addr;
-}
-static inline unsigned long __raw_readq(const volatile void __iomem *addr)
-{
-	return *(volatile unsigned long __force *)addr;
-}
-static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
-{
-	*(volatile unsigned char __force *)addr = v;
-}
-static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
-{
-	*(volatile unsigned short __force *)addr = v;
-}
-static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
-{
-	*(volatile unsigned int __force *)addr = v;
-}
-static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
-{
-	*(volatile unsigned long __force *)addr = v;
-}
-
-/*
- * read (readb, readw, readl, readq) and write (writeb, writew,
- * writel, writeq) accessors are for PCI and thus little endian.
- * Linux 2.4 for Microblaze had this wrong.
- */
-static inline unsigned char readb(const volatile void __iomem *addr)
-{
-	return *(volatile unsigned char __force *)addr;
-}
-static inline unsigned short readw(const volatile void __iomem *addr)
-{
-	return le16_to_cpu(*(volatile unsigned short __force *)addr);
-}
-static inline unsigned int readl(const volatile void __iomem *addr)
-{
-	return le32_to_cpu(*(volatile unsigned int __force *)addr);
-}
-#define readq readq
-static inline u64 readq(const volatile void __iomem *addr)
-{
-	return le64_to_cpu(__raw_readq(addr));
-}
-static inline void writeb(unsigned char v, volatile void __iomem *addr)
-{
-	*(volatile unsigned char __force *)addr = v;
-}
-static inline void writew(unsigned short v, volatile void __iomem *addr)
-{
-	*(volatile unsigned short __force *)addr = cpu_to_le16(v);
-}
-static inline void writel(unsigned int v, volatile void __iomem *addr)
-{
-	*(volatile unsigned int __force *)addr = cpu_to_le32(v);
-}
-#define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr)
-
-/* ioread and iowrite variants. thease are for now same as __raw_
- * variants of accessors. we might check for endianess in the feature
- */
-#define ioread8(addr)		__raw_readb((u8 *)(addr))
-#define ioread16(addr)		__raw_readw((u16 *)(addr))
-#define ioread32(addr)		__raw_readl((u32 *)(addr))
-#define iowrite8(v, addr)	__raw_writeb((u8)(v), (u8 *)(addr))
-#define iowrite16(v, addr)	__raw_writew((u16)(v), (u16 *)(addr))
-#define iowrite32(v, addr)	__raw_writel((u32)(v), (u32 *)(addr))
-
-#define ioread16be(addr)	__raw_readw((u16 *)(addr))
-#define ioread32be(addr)	__raw_readl((u32 *)(addr))
-#define iowrite16be(v, addr)	__raw_writew((u16)(v), (u16 *)(addr))
-#define iowrite32be(v, addr)	__raw_writel((u32)(v), (u32 *)(addr))
-
-/* These are the definitions for the x86 IO instructions
- * inb/inw/inl/outb/outw/outl, the "string" versions
- * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
- * inb_p/inw_p/...
- * The macros don't do byte-swapping.
- */
-#define inb(port)		readb((u8 *)((unsigned long)(port)))
-#define outb(val, port)		writeb((val), (u8 *)((unsigned long)(port)))
-#define inw(port)		readw((u16 *)((unsigned long)(port)))
-#define outw(val, port)		writew((val), (u16 *)((unsigned long)(port)))
-#define inl(port)		readl((u32 *)((unsigned long)(port)))
-#define outl(val, port)		writel((val), (u32 *)((unsigned long)(port)))
-
-#define inb_p(port)		inb((port))
-#define outb_p(val, port)	outb((val), (port))
-#define inw_p(port)		inw((port))
-#define outw_p(val, port)	outw((val), (port))
-#define inl_p(port)		inl((port))
-#define outl_p(val, port)	outl((val), (port))
-
-#define memset_io(a, b, c)	memset((void *)(a), (b), (c))
-#define memcpy_fromio(a, b, c)	memcpy((a), (void *)(b), (c))
-#define memcpy_toio(a, b, c)	memcpy((void *)(a), (b), (c))
-
 #ifdef CONFIG_MMU
-
-#define phys_to_virt(addr)	((void *)__phys_to_virt(addr))
-#define virt_to_phys(addr)	((unsigned long)__virt_to_phys(addr))
-#define virt_to_bus(addr)	((unsigned long)__virt_to_phys(addr))
-
 #define page_to_bus(page)	(page_to_phys(page))
-#define bus_to_virt(addr)	(phys_to_virt(addr))
 
 extern void iounmap(void __iomem *addr);
-/*extern void *__ioremap(phys_addr_t address, unsigned long size,
-		unsigned long flags);*/
+
 extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
-#define ioremap_writethrough(addr, size) ioremap((addr), (size))
-#define ioremap_nocache(addr, size)      ioremap((addr), (size))
-#define ioremap_fullcache(addr, size)    ioremap((addr), (size))
-
-#else /* CONFIG_MMU */
-
-/**
- *	virt_to_phys - map virtual addresses to physical
- *	@address: address to remap
- *
- *	The returned physical address is the physical (CPU) mapping for
- *	the memory address given. It is only valid to use this function on
- *	addresses directly mapped or allocated via kmalloc.
- *
- *	This function does not give bus mappings for DMA transfers. In
- *	almost all conceivable cases a device driver should not be using
- *	this function
- */
-static inline unsigned long __iomem virt_to_phys(volatile void *address)
-{
-	return __pa((unsigned long)address);
-}
-
-#define virt_to_bus virt_to_phys
-
-/**
- *	phys_to_virt - map physical address to virtual
- *	@address: address to remap
- *
- *	The returned virtual address is a current CPU mapping for
- *	the memory address given. It is only valid to use this function on
- *	addresses that have a kernel mapping
- *
- *	This function does not handle bus mappings for DMA transfers. In
- *	almost all conceivable cases a device driver should not be using
- *	this function
- */
-static inline void *phys_to_virt(unsigned long address)
-{
-	return (void *)__va(address);
-}
-
-#define bus_to_virt(a) phys_to_virt(a)
-
-static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
-			unsigned long flags)
-{
-	return (void *)address;
-}
-
-#define ioremap(physaddr, size)	((void __iomem *)(unsigned long)(physaddr))
-#define iounmap(addr)		((void)0)
-#define ioremap_nocache(physaddr, size)	ioremap(physaddr, size)
+#define ioremap_writethrough(addr, size)	ioremap((addr), (size))
+#define ioremap_nocache(addr, size)		ioremap((addr), (size))
+#define ioremap_fullcache(addr, size)		ioremap((addr), (size))
+#define ioremap_wc(addr, size)			ioremap((addr), (size))
 
 #endif /* CONFIG_MMU */
 
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p)	__va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p)	p
-
-/*
- * Big Endian
- */
+/* Big Endian */
 #define out_be32(a, v) __raw_writel((v), (void __iomem __force *)(a))
 #define out_be16(a, v) __raw_writew((v), (a))
 
@@ -239,10 +59,7 @@
 #define writel_be(v, a)	out_be32((__force unsigned *)a, v)
 #define readl_be(a)	in_be32((__force unsigned *)a)
 
-/*
- * Little endian
- */
-
+/* Little endian */
 #define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
 #define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
 
@@ -253,100 +70,7 @@
 #define out_8(a, v) __raw_writeb((v), (a))
 #define in_8(a) __raw_readb(a)
 
-#define mmiowb()
-
-#define ioport_map(port, nr)	((void __iomem *)(port))
-#define ioport_unmap(addr)
-
-/* from asm-generic/io.h */
-#ifndef insb
-static inline void insb(unsigned long addr, void *buffer, int count)
-{
-	if (count) {
-		u8 *buf = buffer;
-		do {
-			u8 x = inb(addr);
-			*buf++ = x;
-		} while (--count);
-	}
-}
-#endif
-
-#ifndef insw
-static inline void insw(unsigned long addr, void *buffer, int count)
-{
-	if (count) {
-		u16 *buf = buffer;
-		do {
-			u16 x = inw(addr);
-			*buf++ = x;
-		} while (--count);
-	}
-}
-#endif
-
-#ifndef insl
-static inline void insl(unsigned long addr, void *buffer, int count)
-{
-	if (count) {
-		u32 *buf = buffer;
-		do {
-			u32 x = inl(addr);
-			*buf++ = x;
-		} while (--count);
-	}
-}
-#endif
-
-#ifndef outsb
-static inline void outsb(unsigned long addr, const void *buffer, int count)
-{
-	if (count) {
-		const u8 *buf = buffer;
-		do {
-			outb(*buf++, addr);
-		} while (--count);
-	}
-}
-#endif
-
-#ifndef outsw
-static inline void outsw(unsigned long addr, const void *buffer, int count)
-{
-	if (count) {
-		const u16 *buf = buffer;
-		do {
-			outw(*buf++, addr);
-		} while (--count);
-	}
-}
-#endif
-
-#ifndef outsl
-static inline void outsl(unsigned long addr, const void *buffer, int count)
-{
-	if (count) {
-		const u32 *buf = buffer;
-		do {
-			outl(*buf++, addr);
-		} while (--count);
-	}
-}
-#endif
-
-#define ioread8_rep(p, dst, count) \
-	insb((unsigned long) (p), (dst), (count))
-#define ioread16_rep(p, dst, count) \
-	insw((unsigned long) (p), (dst), (count))
-#define ioread32_rep(p, dst, count) \
-	insl((unsigned long) (p), (dst), (count))
-
-#define iowrite8_rep(p, src, count) \
-	outsb((unsigned long) (p), (src), (count))
-#define iowrite16_rep(p, src, count) \
-	outsw((unsigned long) (p), (src), (count))
-#define iowrite32_rep(p, src, count) \
-	outsl((unsigned long) (p), (src), (count))
+#include <asm-generic/io.h>
 
 #define readb_relaxed	readb
 #define readw_relaxed	readw
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index d6e0ffe..9d31b05 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -122,7 +122,7 @@
 }
 
 /* Free all resources held by a thread. */
-extern inline void release_thread(struct task_struct *dead_task)
+static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index f05df56..be84a4d 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -19,14 +19,12 @@
 
 extern char *klimit;
 
-void early_printk(const char *fmt, ...);
-
 int setup_early_printk(char *opt);
 void remap_early_printk(void);
 void disable_early_printk(void);
 
-void heartbeat(void);
-void setup_heartbeat(void);
+void microblaze_heartbeat(void);
+void microblaze_setup_heartbeat(void);
 
 #   ifdef CONFIG_MMU
 extern void mmu_reset(void);
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index 20043b6..8d0791b 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -93,7 +93,7 @@
 #define __NR_settimeofday	79 /* ok */
 #define __NR_getgroups		80 /* ok */
 #define __NR_setgroups		81 /* ok */
-#define __NR_select		82 /* obsolete -> sys_pselect7 */
+#define __NR_select		82 /* obsolete -> sys_pselect6 */
 #define __NR_symlink		83 /* symlinkat */
 #define __NR_oldlstat		84 /* remove */
 #define __NR_readlink		85 /* obsolete -> sys_readlinkat */
@@ -320,7 +320,7 @@
 #define __NR_readlinkat		305 /* ok */
 #define __NR_fchmodat		306 /* ok */
 #define __NR_faccessat		307 /* ok */
-#define __NR_pselect6		308 /* obsolete -> sys_pselect7 */
+#define __NR_pselect6		308 /* ok */
 #define __NR_ppoll		309 /* ok */
 #define __NR_unshare		310 /* ok */
 #define __NR_set_robust_list	311 /* ok */
@@ -396,5 +396,7 @@
 #define __NR_process_vm_writev	378
 #define __NR_kcmp		379
 #define __NR_finit_module	380
+#define __NR_sched_setattr	381
+#define __NR_sched_getattr	382
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index 5b0e512..08d50cc 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -16,7 +16,7 @@
 
 obj-y += dma.o exceptions.o \
 	hw_exception_handler.o intc.o irq.o \
-	process.o prom.o prom_parse.o ptrace.o \
+	platform.o process.o prom.o prom_parse.o ptrace.o \
 	reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
 
 obj-y += cpu/
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c
index 1879a05..4643e3a 100644
--- a/arch/microblaze/kernel/heartbeat.c
+++ b/arch/microblaze/kernel/heartbeat.c
@@ -17,7 +17,7 @@
 
 static unsigned int base_addr;
 
-void heartbeat(void)
+void microblaze_heartbeat(void)
 {
 	static unsigned int cnt, period, dist;
 
@@ -42,7 +42,7 @@
 	}
 }
 
-void setup_heartbeat(void)
+void microblaze_setup_heartbeat(void)
 {
 	struct device_node *gpio = NULL;
 	int *prop;
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 581451a..15c7c12 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -32,6 +32,29 @@
 #define MER_ME (1<<0)
 #define MER_HIE (1<<1)
 
+static unsigned int (*read_fn)(void __iomem *);
+static void (*write_fn)(u32, void __iomem *);
+
+static void intc_write32(u32 val, void __iomem *addr)
+{
+	iowrite32(val, addr);
+}
+
+static unsigned int intc_read32(void __iomem *addr)
+{
+	return ioread32(addr);
+}
+
+static void intc_write32_be(u32 val, void __iomem *addr)
+{
+	iowrite32be(val, addr);
+}
+
+static unsigned int intc_read32_be(void __iomem *addr)
+{
+	return ioread32be(addr);
+}
+
 static void intc_enable_or_unmask(struct irq_data *d)
 {
 	unsigned long mask = 1 << d->hwirq;
@@ -43,21 +66,21 @@
 	 * acks the irq before calling the interrupt handler
 	 */
 	if (irqd_is_level_type(d))
-		out_be32(intc_baseaddr + IAR, mask);
+		write_fn(mask, intc_baseaddr + IAR);
 
-	out_be32(intc_baseaddr + SIE, mask);
+	write_fn(mask, intc_baseaddr + SIE);
 }
 
 static void intc_disable_or_mask(struct irq_data *d)
 {
 	pr_debug("disable: %ld\n", d->hwirq);
-	out_be32(intc_baseaddr + CIE, 1 << d->hwirq);
+	write_fn(1 << d->hwirq, intc_baseaddr + CIE);
 }
 
 static void intc_ack(struct irq_data *d)
 {
 	pr_debug("ack: %ld\n", d->hwirq);
-	out_be32(intc_baseaddr + IAR, 1 << d->hwirq);
+	write_fn(1 << d->hwirq, intc_baseaddr + IAR);
 }
 
 static void intc_mask_ack(struct irq_data *d)
@@ -65,8 +88,8 @@
 	unsigned long mask = 1 << d->hwirq;
 
 	pr_debug("disable_and_ack: %ld\n", d->hwirq);
-	out_be32(intc_baseaddr + CIE, mask);
-	out_be32(intc_baseaddr + IAR, mask);
+	write_fn(mask, intc_baseaddr + CIE);
+	write_fn(mask, intc_baseaddr + IAR);
 }
 
 static struct irq_chip intc_dev = {
@@ -83,7 +106,7 @@
 {
 	unsigned int hwirq, irq = -1;
 
-	hwirq = in_be32(intc_baseaddr + IVR);
+	hwirq = read_fn(intc_baseaddr + IVR);
 	if (hwirq != -1U)
 		irq = irq_find_mapping(root_domain, hwirq);
 
@@ -140,17 +163,25 @@
 	pr_info("%s: num_irq=%d, edge=0x%x\n",
 		intc->full_name, nr_irq, intr_mask);
 
+	write_fn = intc_write32;
+	read_fn = intc_read32;
+
 	/*
 	 * Disable all external interrupts until they are
 	 * explicity requested.
 	 */
-	out_be32(intc_baseaddr + IER, 0);
+	write_fn(0, intc_baseaddr + IER);
 
 	/* Acknowledge any pending interrupts just in case. */
-	out_be32(intc_baseaddr + IAR, 0xffffffff);
+	write_fn(0xffffffff, intc_baseaddr + IAR);
 
 	/* Turn on the Master Enable. */
-	out_be32(intc_baseaddr + MER, MER_HIE | MER_ME);
+	write_fn(MER_HIE | MER_ME, intc_baseaddr + MER);
+	if (!(read_fn(intc_baseaddr + MER) & (MER_HIE | MER_ME))) {
+		write_fn = intc_write32_be;
+		read_fn = intc_read32_be;
+		write_fn(MER_HIE | MER_ME, intc_baseaddr + MER);
+	}
 
 	/* Yeah, okay, casting the intr_mask to a void* is butt-ugly, but I'm
 	 * lazy and Michal can clean it up to something nicer when he tests
diff --git a/arch/microblaze/platform/platform.c b/arch/microblaze/kernel/platform.c
similarity index 100%
rename from arch/microblaze/platform/platform.c
rename to arch/microblaze/kernel/platform.c
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 7d1a9c8..b2dd371 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -8,6 +8,7 @@
  * for more details.
  */
 
+#include <linux/cpu.h>
 #include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/pm.h>
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index d26d7e7..49a07a4 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -216,7 +216,7 @@
 		/* MS: I need add offset in page */
 		address += ((unsigned long)frame->tramp) & ~PAGE_MASK;
 		/* MS address is virtual */
-		address = virt_to_phys(address);
+		address = __virt_to_phys(address);
 		invalidate_icache_range(address, address + 8);
 		flush_dcache_range(address, address + 8);
 	}
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index b882ad5..329dfba 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -308,7 +308,7 @@
 	.long sys_readlinkat		/* 305 */
 	.long sys_fchmodat
 	.long sys_faccessat
-	.long sys_ni_syscall /* pselect6 */
+	.long sys_pselect6
 	.long sys_ppoll
 	.long sys_unshare		/* 310 */
 	.long sys_set_robust_list
@@ -363,8 +363,8 @@
 	.long sys_sendmsg		/* 360 */
 	.long sys_recvmsg
 	.long sys_accept4
-	.long sys_ni_syscall
-	.long sys_ni_syscall
+	.long sys_preadv
+	.long sys_pwritev
 	.long sys_rt_tgsigqueueinfo	/* 365 */
 	.long sys_perf_event_open
 	.long sys_recvmmsg
@@ -381,3 +381,5 @@
 	.long sys_process_vm_writev
 	.long sys_kcmp
 	.long sys_finit_module
+	.long sys_sched_setattr
+	.long sys_sched_getattr
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index fb0c6144..dd96f0e 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -43,10 +43,33 @@
 #define TCSR_PWMA	(1<<9)
 #define TCSR_ENALL	(1<<10)
 
+static unsigned int (*read_fn)(void __iomem *);
+static void (*write_fn)(u32, void __iomem *);
+
+static void timer_write32(u32 val, void __iomem *addr)
+{
+	iowrite32(val, addr);
+}
+
+static unsigned int timer_read32(void __iomem *addr)
+{
+	return ioread32(addr);
+}
+
+static void timer_write32_be(u32 val, void __iomem *addr)
+{
+	iowrite32be(val, addr);
+}
+
+static unsigned int timer_read32_be(void __iomem *addr)
+{
+	return ioread32be(addr);
+}
+
 static inline void xilinx_timer0_stop(void)
 {
-	out_be32(timer_baseaddr + TCSR0,
-		 in_be32(timer_baseaddr + TCSR0) & ~TCSR_ENT);
+	write_fn(read_fn(timer_baseaddr + TCSR0) & ~TCSR_ENT,
+		 timer_baseaddr + TCSR0);
 }
 
 static inline void xilinx_timer0_start_periodic(unsigned long load_val)
@@ -54,10 +77,10 @@
 	if (!load_val)
 		load_val = 1;
 	/* loading value to timer reg */
-	out_be32(timer_baseaddr + TLR0, load_val);
+	write_fn(load_val, timer_baseaddr + TLR0);
 
 	/* load the initial value */
-	out_be32(timer_baseaddr + TCSR0, TCSR_LOAD);
+	write_fn(TCSR_LOAD, timer_baseaddr + TCSR0);
 
 	/* see timer data sheet for detail
 	 * !ENALL - don't enable 'em all
@@ -72,8 +95,8 @@
 	 * UDT - set the timer as down counter
 	 * !MDT0 - generate mode
 	 */
-	out_be32(timer_baseaddr + TCSR0,
-			TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT);
+	write_fn(TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT,
+		 timer_baseaddr + TCSR0);
 }
 
 static inline void xilinx_timer0_start_oneshot(unsigned long load_val)
@@ -81,13 +104,13 @@
 	if (!load_val)
 		load_val = 1;
 	/* loading value to timer reg */
-	out_be32(timer_baseaddr + TLR0, load_val);
+	write_fn(load_val, timer_baseaddr + TLR0);
 
 	/* load the initial value */
-	out_be32(timer_baseaddr + TCSR0, TCSR_LOAD);
+	write_fn(TCSR_LOAD, timer_baseaddr + TCSR0);
 
-	out_be32(timer_baseaddr + TCSR0,
-			TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT);
+	write_fn(TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT,
+		 timer_baseaddr + TCSR0);
 }
 
 static int xilinx_timer_set_next_event(unsigned long delta,
@@ -133,14 +156,14 @@
 
 static inline void timer_ack(void)
 {
-	out_be32(timer_baseaddr + TCSR0, in_be32(timer_baseaddr + TCSR0));
+	write_fn(read_fn(timer_baseaddr + TCSR0), timer_baseaddr + TCSR0);
 }
 
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
 	struct clock_event_device *evt = &clockevent_xilinx_timer;
 #ifdef CONFIG_HEART_BEAT
-	heartbeat();
+	microblaze_heartbeat();
 #endif
 	timer_ack();
 	evt->event_handler(evt);
@@ -169,7 +192,7 @@
 
 static u64 xilinx_clock_read(void)
 {
-	return in_be32(timer_baseaddr + TCR1);
+	return read_fn(timer_baseaddr + TCR1);
 }
 
 static cycle_t xilinx_read(struct clocksource *cs)
@@ -217,10 +240,10 @@
 		panic("failed to register clocksource");
 
 	/* stop timer1 */
-	out_be32(timer_baseaddr + TCSR1,
-		 in_be32(timer_baseaddr + TCSR1) & ~TCSR_ENT);
+	write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT,
+		 timer_baseaddr + TCSR1);
 	/* start timer1 - up counting without interrupt */
-	out_be32(timer_baseaddr + TCSR1, TCSR_TINT|TCSR_ENT|TCSR_ARHT);
+	write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1);
 
 	/* register timecounter - for ftrace support */
 	init_xilinx_timecounter();
@@ -245,6 +268,15 @@
 		BUG();
 	}
 
+	write_fn = timer_write32;
+	read_fn = timer_read32;
+
+	write_fn(TCSR_MDT, timer_baseaddr + TCSR0);
+	if (!(read_fn(timer_baseaddr + TCSR0) & TCSR_MDT)) {
+		write_fn = timer_write32_be;
+		read_fn = timer_read32_be;
+	}
+
 	irq = irq_of_parse_and_map(timer, 0);
 
 	of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num);
@@ -274,7 +306,7 @@
 
 	setup_irq(irq, &timer_irqaction);
 #ifdef CONFIG_HEART_BEAT
-	setup_heartbeat();
+	microblaze_setup_heartbeat();
 #endif
 	xilinx_clocksource_init();
 	xilinx_clockevent_init();
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index dbbf224..e10ad93 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -117,7 +117,7 @@
 	ret = (void *)va;
 
 	/* This gives us the real physical address of the first page. */
-	*dma_handle = pa = virt_to_bus((void *)vaddr);
+	*dma_handle = pa = __virt_to_phys(vaddr);
 #endif
 
 	/*
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 89077d3..77bc7c7 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -369,7 +369,7 @@
 	if (initrd_start) {
 		unsigned long size;
 		size = initrd_end - initrd_start;
-		memblock_reserve(virt_to_phys(initrd_start), size);
+		memblock_reserve(__virt_to_phys(initrd_start), size);
 	}
 #endif /* CONFIG_BLK_DEV_INITRD */
 
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 10b3bd0..4f4520e 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -69,10 +69,11 @@
 	 *
 	 * However, allow remap of rootfs: TBD
 	 */
+
 	if (mem_init_done &&
 		p >= memory_start && p < virt_to_phys(high_memory) &&
-		!(p >= virt_to_phys((unsigned long)&__bss_stop) &&
-		p < virt_to_phys((unsigned long)__bss_stop))) {
+		!(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
+		p < __virt_to_phys((phys_addr_t)__bss_stop))) {
 		pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
 			(unsigned long)p, __builtin_return_address(0));
 		return NULL;
diff --git a/arch/microblaze/platform/Kconfig.platform b/arch/microblaze/platform/Kconfig.platform
deleted file mode 100644
index db1aa5c..0000000
--- a/arch/microblaze/platform/Kconfig.platform
+++ /dev/null
@@ -1,44 +0,0 @@
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-# Platform selection Kconfig menu for MicroBlaze targets
-#
-
-menu "Platform options"
-choice
-	prompt "Platform"
-	default PLATFORM_MICROBLAZE_AUTO
-	help
-	  Choose which hardware board/platform you are targeting.
-
-config PLATFORM_GENERIC
-	bool "Generic"
-	help
-	  Choose this option for the Generic platform.
-
-endchoice
-
-config OPT_LIB_FUNCTION
-	bool "Optimalized lib function"
-	default y
-	help
-	  Allows turn on optimalized library function (memcpy and memmove).
-	  They are optimized by using word alignment. This will work
-	  fine if both source and destination are aligned on the same
-	  boundary. However, if they are aligned on different boundaries
-	  shifts will be necessary. This might result in bad performance
-	  on MicroBlaze systems without a barrel shifter.
-
-config OPT_LIB_ASM
-	bool "Optimalized lib function ASM"
-	depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
-	default n
-	help
-	  Allows turn on optimalized library function (memcpy and memmove).
-	  Function are written in asm code.
-
-if PLATFORM_GENERIC=y
-	source "arch/microblaze/platform/generic/Kconfig.auto"
-endif
-
-endmenu
diff --git a/arch/microblaze/platform/Makefile b/arch/microblaze/platform/Makefile
deleted file mode 100644
index ea1b75c..0000000
--- a/arch/microblaze/platform/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Makefile for arch/microblaze/platform directory
-#
-#obj-$(CONFIG_PLATFORM_GENERIC) += generic/
-
-obj-y	+= platform.o
diff --git a/arch/microblaze/platform/generic/Kconfig.auto b/arch/microblaze/platform/generic/Kconfig.auto
deleted file mode 100644
index 25a6f01..0000000
--- a/arch/microblaze/platform/generic/Kconfig.auto
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-# (C) Copyright 2007 Michal Simek
-#
-# Michal SIMEK <monstr@monstr.eu>
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; either version 2 of
-# the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-# MA 02111-1307 USA
-#
-
-# Definitions for MICROBLAZE0
-comment "Definitions for MICROBLAZE0"
-
-config KERNEL_BASE_ADDR
-	hex "Physical address where Linux Kernel is"
-	default "0x90000000"
-	help
-	  BASE Address for kernel
-
-config XILINX_MICROBLAZE0_FAMILY
-	string "Targeted FPGA family"
-	default "virtex5"
-
-config XILINX_MICROBLAZE0_USE_MSR_INSTR
-	int "USE_MSR_INSTR range (0:1)"
-	default 0
-
-config XILINX_MICROBLAZE0_USE_PCMP_INSTR
-	int "USE_PCMP_INSTR range (0:1)"
-	default 0
-
-config XILINX_MICROBLAZE0_USE_BARREL
-	int "USE_BARREL range (0:1)"
-	default 0
-
-config XILINX_MICROBLAZE0_USE_DIV
-	int "USE_DIV range (0:1)"
-	default 0
-
-config XILINX_MICROBLAZE0_USE_HW_MUL
-	int "USE_HW_MUL values (0=NONE, 1=MUL32, 2=MUL64)"
-	default 0
-
-config XILINX_MICROBLAZE0_USE_FPU
-	int "USE_FPU values (0=NONE, 1=BASIC, 2=EXTENDED)"
-	default 0
-
-config XILINX_MICROBLAZE0_HW_VER
-	string "Core version number"
-	default 7.10.d
diff --git a/arch/microblaze/platform/generic/Makefile b/arch/microblaze/platform/generic/Makefile
deleted file mode 100644
index 9a8b1bd..0000000
--- a/arch/microblaze/platform/generic/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-#
-# Empty Makefile to keep make clean happy
-#
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 6c488c8..c6e9cd2 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -14,7 +14,7 @@
 #define __ASM_MIPS_SYSCALL_H
 
 #include <linux/compiler.h>
-#include <linux/audit.h>
+#include <uapi/linux/audit.h>
 #include <linux/elf-em.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -127,12 +127,11 @@
 extern const unsigned long sys32_call_table[];
 extern const unsigned long sysn32_call_table[];
 
-static inline int syscall_get_arch(struct task_struct *task,
-				   struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 	int arch = EM_MIPS;
 #ifdef CONFIG_64BIT
-	if (!test_tsk_thread_flag(task, TIF_32BIT_REGS))
+	if (!test_thread_flag(TIF_32BIT_REGS))
 		arch |= __AUDIT_ARCH_64BIT;
 #endif
 #if defined(__LITTLE_ENDIAN)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 7271e5a..71f85f4 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -649,7 +649,7 @@
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
 		trace_sys_enter(regs, regs->regs[2]);
 
-	audit_syscall_entry(syscall_get_arch(current, regs),
+	audit_syscall_entry(syscall_get_arch(),
 			    syscall,
 			    regs->regs[4], regs->regs[5],
 			    regs->regs[6], regs->regs[7]);
diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c
index aed32b8..e1f427f 100644
--- a/arch/mips/loongson/lemote-2f/clock.c
+++ b/arch/mips/loongson/lemote-2f/clock.c
@@ -28,16 +28,16 @@
 };
 
 struct cpufreq_frequency_table loongson2_clockmod_table[] = {
-	{DC_RESV, CPUFREQ_ENTRY_INVALID},
-	{DC_ZERO, CPUFREQ_ENTRY_INVALID},
-	{DC_25PT, 0},
-	{DC_37PT, 0},
-	{DC_50PT, 0},
-	{DC_62PT, 0},
-	{DC_75PT, 0},
-	{DC_87PT, 0},
-	{DC_DISABLE, 0},
-	{DC_RESV, CPUFREQ_TABLE_END},
+	{0, DC_RESV, CPUFREQ_ENTRY_INVALID},
+	{0, DC_ZERO, CPUFREQ_ENTRY_INVALID},
+	{0, DC_25PT, 0},
+	{0, DC_37PT, 0},
+	{0, DC_50PT, 0},
+	{0, DC_62PT, 0},
+	{0, DC_75PT, 0},
+	{0, DC_87PT, 0},
+	{0, DC_DISABLE, 0},
+	{0, DC_RESV, CPUFREQ_TABLE_END},
 };
 EXPORT_SYMBOL_GPL(loongson2_clockmod_table);
 
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index 7c137cd..2fbbe4d 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -70,7 +70,7 @@
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-static inline unsigned long kmap_atomic(struct page *page)
+static inline void *kmap_atomic(struct page *page)
 {
 	unsigned long vaddr;
 	int idx, type;
@@ -89,7 +89,7 @@
 	set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
 	local_flush_tlb_one(vaddr);
 
-	return vaddr;
+	return (void *)vaddr;
 }
 
 static inline void __kunmap_atomic(unsigned long vaddr)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index bb2a8ec..1faefed 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -28,6 +28,7 @@
 	select CLONE_BACKWARDS
 	select TTY # Needed for pdc_cons.c
 	select HAVE_DEBUG_STACKOVERFLOW
+	select HAVE_ARCH_AUDITSYSCALL
 
 	help
 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6c03a94..e099899 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -144,6 +144,7 @@
 	select HAVE_DEBUG_STACKOVERFLOW
 	select HAVE_IRQ_EXIT_ON_IRQ_STACK
 	select ARCH_USE_CMPXCHG_LOCKREF if PPC64
+	select HAVE_ARCH_AUDITSYSCALL
 
 config GENERIC_CSUM
 	def_bool CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index c2353bf..175a8b9 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -1244,7 +1244,6 @@
 CONFIG_DEBUG_HIGHMEM=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_VM=y
-CONFIG_DEBUG_WRITECOUNT=y
 CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_SG=y
 # CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 139a830..fdee37f 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -174,7 +174,6 @@
 CONFIG_PROVE_LOCKING=y
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_WRITECOUNT=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_DEBUG_LIST=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 9ea8342b..a905063 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -306,3 +306,4 @@
 CONFIG_KVM_BOOK3S_64_HV=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig
index 3c84f9d..58e3dbf 100644
--- a/arch/powerpc/configs/pseries_le_defconfig
+++ b/arch/powerpc/configs/pseries_le_defconfig
@@ -301,3 +301,4 @@
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRYPTO_DEV_NX=y
 CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 0dcc48a..e5d2e0b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -272,6 +272,10 @@
 #define SPRN_HSRR1	0x13B	/* Hypervisor Save/Restore 1 */
 #define SPRN_IC		0x350	/* Virtual Instruction Count */
 #define SPRN_VTB	0x351	/* Virtual Time Base */
+#define SPRN_PMICR	0x354   /* Power Management Idle Control Reg */
+#define SPRN_PMSR	0x355   /* Power Management Status Reg */
+#define SPRN_PMCR	0x374	/* Power Management Control Register */
+
 /* HFSCR and FSCR bit numbers are the same */
 #define FSCR_TAR_LG	8	/* Enable Target Address Register */
 #define FSCR_EBB_LG	7	/* Enable Event Based Branching */
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 895e8a2..c252ee9 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -11,6 +11,12 @@
 	select PPC_UDBG_16550
 	select PPC_SCOM
 	select ARCH_RANDOM
+	select CPU_FREQ
+	select CPU_FREQ_GOV_PERFORMANCE
+	select CPU_FREQ_GOV_POWERSAVE
+	select CPU_FREQ_GOV_USERSPACE
+	select CPU_FREQ_GOV_ONDEMAND
+	select CPU_FREQ_GOV_CONSERVATIVE
 	default y
 
 config PPC_POWERNV_RTAS
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 346d216..d68fe34 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -103,6 +103,7 @@
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_TIME_VSYSCALL
 	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
+	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_TRACEHOOK
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index ddaae2f..8df022c 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -581,7 +581,6 @@
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
-CONFIG_DEBUG_WRITECOUNT=y
 CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index cd29d2f..7776870 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -12,7 +12,7 @@
 #ifndef _ASM_SYSCALL_H
 #define _ASM_SYSCALL_H	1
 
-#include <linux/audit.h>
+#include <uapi/linux/audit.h>
 #include <linux/sched.h>
 #include <linux/err.h>
 #include <asm/ptrace.h>
@@ -89,11 +89,10 @@
 		regs->orig_gpr2 = args[0];
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-				   struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 #ifdef CONFIG_COMPAT
-	if (test_tsk_thread_flag(task, TIF_31BIT))
+	if (test_tsk_thread_flag(current, TIF_31BIT))
 		return AUDIT_ARCH_S390;
 #endif
 	return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index ba55e93..834b67c 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -42,6 +42,7 @@
 	select MODULES_USE_ELF_RELA
 	select OLD_SIGSUSPEND
 	select OLD_SIGACTION
+	select HAVE_ARCH_AUDITSYSCALL
 	help
 	  The SuperH is a RISC processor targeted for use in embedded systems
 	  and consumer electronics; it was also used in the Sega Dreamcast
diff --git a/arch/sh/configs/rsk7203_defconfig b/arch/sh/configs/rsk7203_defconfig
index 4e5229b..4723657 100644
--- a/arch/sh/configs/rsk7203_defconfig
+++ b/arch/sh/configs/rsk7203_defconfig
@@ -128,7 +128,6 @@
 CONFIG_DEBUG_SPINLOCK_SLEEP=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_VM=y
-CONFIG_DEBUG_WRITECOUNT=y
 CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_FRAME_POINTER=y
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 7d8b7e9..29f2e98 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -77,6 +77,7 @@
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select HAVE_C_RECORDMCOUNT
 	select NO_BOOTMEM
+	select HAVE_ARCH_AUDITSYSCALL
 
 config ARCH_DEFCONFIG
 	string
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 21ca44c..6915d28 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -1,6 +1,7 @@
 config UML
 	bool
 	default y
+	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_UID16
 	select GENERIC_IRQ_SHOW
 	select GENERIC_CPU_DEVICES
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5b8ec0f..25d2c6f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -129,6 +129,7 @@
 	select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
 	select HAVE_CC_STACKPROTECTOR
 	select GENERIC_CPU_AUTOPROBE
+	select HAVE_ARCH_AUDITSYSCALL
 
 config INSTRUCTION_DECODER
 	def_bool y
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 1e61461..4703a6c 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -112,7 +112,7 @@
 	efi_file_info_t *info;
 	efi_status_t status;
 	efi_guid_t info_guid = EFI_FILE_INFO_ID;
-	u32 info_sz;
+	u64 info_sz;
 
 	status = efi_early->call((unsigned long)fh->open, fh, &h, filename_16,
 				 EFI_FILE_MODE_READ, (u64)0);
@@ -167,31 +167,31 @@
 }
 
 static inline efi_status_t
-efi_file_read(void *__fh, void *handle, unsigned long *size, void *addr)
+efi_file_read(void *handle, unsigned long *size, void *addr)
 {
 	unsigned long func;
 
 	if (efi_early->is64) {
-		efi_file_handle_64_t *fh = __fh;
+		efi_file_handle_64_t *fh = handle;
 
 		func = (unsigned long)fh->read;
 		return efi_early->call(func, handle, size, addr);
 	} else {
-		efi_file_handle_32_t *fh = __fh;
+		efi_file_handle_32_t *fh = handle;
 
 		func = (unsigned long)fh->read;
 		return efi_early->call(func, handle, size, addr);
 	}
 }
 
-static inline efi_status_t efi_file_close(void *__fh, void *handle)
+static inline efi_status_t efi_file_close(void *handle)
 {
 	if (efi_early->is64) {
-		efi_file_handle_64_t *fh = __fh;
+		efi_file_handle_64_t *fh = handle;
 
 		return efi_early->call((unsigned long)fh->close, handle);
 	} else {
-		efi_file_handle_32_t *fh = __fh;
+		efi_file_handle_32_t *fh = handle;
 
 		return efi_early->call((unsigned long)fh->close, handle);
 	}
@@ -1016,6 +1016,9 @@
  * Because the x86 boot code expects to be passed a boot_params we
  * need to create one ourselves (usually the bootloader would create
  * one for us).
+ *
+ * The caller is responsible for filling out ->code32_start in the
+ * returned boot_params.
  */
 struct boot_params *make_boot_params(struct efi_config *c)
 {
@@ -1081,8 +1084,6 @@
 	hdr->vid_mode = 0xffff;
 	hdr->boot_flag = 0xAA55;
 
-	hdr->code32_start = (__u64)(unsigned long)image->image_base;
-
 	hdr->type_of_loader = 0x21;
 
 	/* Convert unicode cmdline to ascii */
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index de9d420..cbed140 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -59,6 +59,7 @@
 	call	make_boot_params
 	cmpl	$0, %eax
 	je	fail
+	movl	%esi, BP_code32_start(%eax)
 	popl	%ecx
 	pushl	%eax
 	pushl	%ecx
@@ -90,12 +91,7 @@
 	hlt
 	jmp	fail
 2:
-	call	3f
-3:
-	popl	%eax
-	subl	$3b, %eax
-	subl	BP_pref_address(%esi), %eax
-	add	BP_code32_start(%esi), %eax
+	movl	BP_code32_start(%esi), %eax
 	leal	preferred_addr(%eax), %eax
 	jmp	*%eax
 
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 57e58a5..0d558ee 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -261,6 +261,8 @@
 	cmpq	$0,%rax
 	je	fail
 	mov	%rax, %rsi
+	leaq	startup_32(%rip), %rax
+	movl	%eax, BP_code32_start(%rsi)
 	jmp	2f		/* Skip the relocation */
 
 handover_entry:
@@ -284,12 +286,7 @@
 	hlt
 	jmp	fail
 2:
-	call	3f
-3:
-	popq	%rax
-	subq	$3b, %rax
-	subq	BP_pref_address(%rsi), %rax
-	add	BP_code32_start(%esi), %eax
+	movl	BP_code32_start(%esi), %eax
 	leaq	preferred_addr(%rax), %rax
 	jmp	*%rax
 
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index aea284b..d6a756a 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -13,7 +13,7 @@
 #ifndef _ASM_X86_SYSCALL_H
 #define _ASM_X86_SYSCALL_H
 
-#include <linux/audit.h>
+#include <uapi/linux/audit.h>
 #include <linux/sched.h>
 #include <linux/err.h>
 #include <asm/asm-offsets.h>	/* For NR_syscalls */
@@ -91,8 +91,7 @@
 	memcpy(&regs->bx + i, args, n * sizeof(args[0]));
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-				   struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 	return AUDIT_ARCH_I386;
 }
@@ -221,8 +220,7 @@
 		}
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-				   struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 #ifdef CONFIG_IA32_EMULATION
 	/*
@@ -234,7 +232,7 @@
 	 *
 	 * x32 tasks should be considered AUDIT_ARCH_X86_64.
 	 */
-	if (task_thread_info(task)->status & TS_COMPAT)
+	if (task_thread_info(current)->status & TS_COMPAT)
 		return AUDIT_ARCH_I386;
 #endif
 	/* Both x32 and x86_64 are considered "64-bit". */
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index e69182f..4b28159 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -87,7 +87,9 @@
 	num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
 
 	retval = 0;
-	if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) {
+	/* If the HW does not support any sub-states in this C-state */
+	if (num_cstate_subtype == 0) {
+		pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n", cx->address, edx_part);
 		retval = -1;
 		goto out;
 	}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 481ae38..ad28db7 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1996,7 +1996,8 @@
 	};
 
 	/* First tickle the hardware, only then report what went on. -- REW */
-	apic_write(APIC_ESR, 0);
+	if (lapic_get_maxlvt() > 3)	/* Due to the Pentium erratum 3AP. */
+		apic_write(APIC_ESR, 0);
 	v = apic_read(APIC_ESR);
 	ack_APIC_irq();
 	atomic_inc(&irq_err_count);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 9b7734b..eeee23f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -89,6 +89,9 @@
 static DEFINE_PER_CPU(struct mce, mces_seen);
 static int			cpu_missing;
 
+/* CMCI storm detection filter */
+static DEFINE_PER_CPU(unsigned long, mce_polled_error);
+
 /*
  * MCA banks polled by the period polling timer for corrected events.
  * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
@@ -595,6 +598,7 @@
 {
 	struct mce m;
 	int i;
+	unsigned long *v;
 
 	this_cpu_inc(mce_poll_count);
 
@@ -614,6 +618,8 @@
 		if (!(m.status & MCI_STATUS_VAL))
 			continue;
 
+		v = &get_cpu_var(mce_polled_error);
+		set_bit(0, v);
 		/*
 		 * Uncorrected or signalled events are handled by the exception
 		 * handler when it is enabled, so don't process those here.
@@ -1278,10 +1284,18 @@
 static unsigned long (*mce_adjust_timer)(unsigned long interval) =
 	mce_adjust_timer_default;
 
+static int cmc_error_seen(void)
+{
+	unsigned long *v = &__get_cpu_var(mce_polled_error);
+
+	return test_and_clear_bit(0, v);
+}
+
 static void mce_timer_fn(unsigned long data)
 {
 	struct timer_list *t = &__get_cpu_var(mce_timer);
 	unsigned long iv;
+	int notify;
 
 	WARN_ON(smp_processor_id() != data);
 
@@ -1296,7 +1310,9 @@
 	 * polling interval, otherwise increase the polling interval.
 	 */
 	iv = __this_cpu_read(mce_next_interval);
-	if (mce_notify_irq()) {
+	notify = mce_notify_irq();
+	notify |= cmc_error_seen();
+	if (notify) {
 		iv = max(iv / 2, (unsigned long) HZ/100);
 	} else {
 		iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index fb6156f..3bdb95a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -9,6 +9,7 @@
 #include <linux/interrupt.h>
 #include <linux/percpu.h>
 #include <linux/sched.h>
+#include <linux/cpumask.h>
 #include <asm/apic.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
@@ -137,6 +138,22 @@
 	}
 }
 
+static void cmci_storm_disable_banks(void)
+{
+	unsigned long flags, *owned;
+	int bank;
+	u64 val;
+
+	raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+	owned = __get_cpu_var(mce_banks_owned);
+	for_each_set_bit(bank, owned, MAX_NR_BANKS) {
+		rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+		val &= ~MCI_CTL2_CMCI_EN;
+		wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
+	}
+	raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+}
+
 static bool cmci_storm_detect(void)
 {
 	unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
@@ -158,7 +175,7 @@
 	if (cnt <= CMCI_STORM_THRESHOLD)
 		return false;
 
-	cmci_clear();
+	cmci_storm_disable_banks();
 	__this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
 	r = atomic_add_return(1, &cmci_storm_on_cpus);
 	mce_timer_kick(CMCI_POLL_INTERVAL);
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 6d7d5a1..b0cc380 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -225,7 +225,7 @@
  *
  * And yes, so far on current devices the base addr is always under 4G.
  */
-static u32 __init intel_stolen_base(int num, int slot, int func)
+static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size)
 {
 	u32 base;
 
@@ -244,6 +244,114 @@
 #define MB(x)	(KB (KB (x)))
 #define GB(x)	(MB (KB (x)))
 
+static size_t __init i830_tseg_size(void)
+{
+	u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
+
+	if (!(tmp & TSEG_ENABLE))
+		return 0;
+
+	if (tmp & I830_TSEG_SIZE_1M)
+		return MB(1);
+	else
+		return KB(512);
+}
+
+static size_t __init i845_tseg_size(void)
+{
+	u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
+
+	if (!(tmp & TSEG_ENABLE))
+		return 0;
+
+	switch (tmp & I845_TSEG_SIZE_MASK) {
+	case I845_TSEG_SIZE_512K:
+		return KB(512);
+	case I845_TSEG_SIZE_1M:
+		return MB(1);
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static size_t __init i85x_tseg_size(void)
+{
+	u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
+
+	if (!(tmp & TSEG_ENABLE))
+		return 0;
+
+	return MB(1);
+}
+
+static size_t __init i830_mem_size(void)
+{
+	return read_pci_config_byte(0, 0, 0, I830_DRB3) * MB(32);
+}
+
+static size_t __init i85x_mem_size(void)
+{
+	return read_pci_config_byte(0, 0, 1, I85X_DRB3) * MB(32);
+}
+
+/*
+ * On 830/845/85x the stolen memory base isn't available in any
+ * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
+ */
+static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size)
+{
+	return i830_mem_size() - i830_tseg_size() - stolen_size;
+}
+
+static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size)
+{
+	return i830_mem_size() - i845_tseg_size() - stolen_size;
+}
+
+static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size)
+{
+	return i85x_mem_size() - i85x_tseg_size() - stolen_size;
+}
+
+static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size)
+{
+	/*
+	 * FIXME is the graphics stolen memory region
+	 * always at TOUD? Ie. is it always the last
+	 * one to be allocated by the BIOS?
+	 */
+	return read_pci_config_16(0, 0, 0, I865_TOUD) << 16;
+}
+
+static size_t __init i830_stolen_size(int num, int slot, int func)
+{
+	size_t stolen_size;
+	u16 gmch_ctrl;
+
+	gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
+
+	switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+	case I830_GMCH_GMS_STOLEN_512:
+		stolen_size = KB(512);
+		break;
+	case I830_GMCH_GMS_STOLEN_1024:
+		stolen_size = MB(1);
+		break;
+	case I830_GMCH_GMS_STOLEN_8192:
+		stolen_size = MB(8);
+		break;
+	case I830_GMCH_GMS_LOCAL:
+		/* local memory isn't part of the normal address space */
+		stolen_size = 0;
+		break;
+	default:
+		return 0;
+	}
+
+	return stolen_size;
+}
+
 static size_t __init gen3_stolen_size(int num, int slot, int func)
 {
 	size_t stolen_size;
@@ -310,7 +418,7 @@
 	return gmch_ctrl << 25; /* 32 MB units */
 }
 
-static inline size_t gen8_stolen_size(int num, int slot, int func)
+static size_t gen8_stolen_size(int num, int slot, int func)
 {
 	u16 gmch_ctrl;
 
@@ -320,31 +428,74 @@
 	return gmch_ctrl << 25; /* 32 MB units */
 }
 
-typedef size_t (*stolen_size_fn)(int num, int slot, int func);
+
+struct intel_stolen_funcs {
+	size_t (*size)(int num, int slot, int func);
+	u32 (*base)(int num, int slot, int func, size_t size);
+};
+
+static const struct intel_stolen_funcs i830_stolen_funcs = {
+	.base = i830_stolen_base,
+	.size = i830_stolen_size,
+};
+
+static const struct intel_stolen_funcs i845_stolen_funcs = {
+	.base = i845_stolen_base,
+	.size = i830_stolen_size,
+};
+
+static const struct intel_stolen_funcs i85x_stolen_funcs = {
+	.base = i85x_stolen_base,
+	.size = gen3_stolen_size,
+};
+
+static const struct intel_stolen_funcs i865_stolen_funcs = {
+	.base = i865_stolen_base,
+	.size = gen3_stolen_size,
+};
+
+static const struct intel_stolen_funcs gen3_stolen_funcs = {
+	.base = intel_stolen_base,
+	.size = gen3_stolen_size,
+};
+
+static const struct intel_stolen_funcs gen6_stolen_funcs = {
+	.base = intel_stolen_base,
+	.size = gen6_stolen_size,
+};
+
+static const struct intel_stolen_funcs gen8_stolen_funcs = {
+	.base = intel_stolen_base,
+	.size = gen8_stolen_size,
+};
 
 static struct pci_device_id intel_stolen_ids[] __initdata = {
-	INTEL_I915G_IDS(gen3_stolen_size),
-	INTEL_I915GM_IDS(gen3_stolen_size),
-	INTEL_I945G_IDS(gen3_stolen_size),
-	INTEL_I945GM_IDS(gen3_stolen_size),
-	INTEL_VLV_M_IDS(gen6_stolen_size),
-	INTEL_VLV_D_IDS(gen6_stolen_size),
-	INTEL_PINEVIEW_IDS(gen3_stolen_size),
-	INTEL_I965G_IDS(gen3_stolen_size),
-	INTEL_G33_IDS(gen3_stolen_size),
-	INTEL_I965GM_IDS(gen3_stolen_size),
-	INTEL_GM45_IDS(gen3_stolen_size),
-	INTEL_G45_IDS(gen3_stolen_size),
-	INTEL_IRONLAKE_D_IDS(gen3_stolen_size),
-	INTEL_IRONLAKE_M_IDS(gen3_stolen_size),
-	INTEL_SNB_D_IDS(gen6_stolen_size),
-	INTEL_SNB_M_IDS(gen6_stolen_size),
-	INTEL_IVB_M_IDS(gen6_stolen_size),
-	INTEL_IVB_D_IDS(gen6_stolen_size),
-	INTEL_HSW_D_IDS(gen6_stolen_size),
-	INTEL_HSW_M_IDS(gen6_stolen_size),
-	INTEL_BDW_M_IDS(gen8_stolen_size),
-	INTEL_BDW_D_IDS(gen8_stolen_size)
+	INTEL_I830_IDS(&i830_stolen_funcs),
+	INTEL_I845G_IDS(&i845_stolen_funcs),
+	INTEL_I85X_IDS(&i85x_stolen_funcs),
+	INTEL_I865G_IDS(&i865_stolen_funcs),
+	INTEL_I915G_IDS(&gen3_stolen_funcs),
+	INTEL_I915GM_IDS(&gen3_stolen_funcs),
+	INTEL_I945G_IDS(&gen3_stolen_funcs),
+	INTEL_I945GM_IDS(&gen3_stolen_funcs),
+	INTEL_VLV_M_IDS(&gen6_stolen_funcs),
+	INTEL_VLV_D_IDS(&gen6_stolen_funcs),
+	INTEL_PINEVIEW_IDS(&gen3_stolen_funcs),
+	INTEL_I965G_IDS(&gen3_stolen_funcs),
+	INTEL_G33_IDS(&gen3_stolen_funcs),
+	INTEL_I965GM_IDS(&gen3_stolen_funcs),
+	INTEL_GM45_IDS(&gen3_stolen_funcs),
+	INTEL_G45_IDS(&gen3_stolen_funcs),
+	INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs),
+	INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs),
+	INTEL_SNB_D_IDS(&gen6_stolen_funcs),
+	INTEL_SNB_M_IDS(&gen6_stolen_funcs),
+	INTEL_IVB_M_IDS(&gen6_stolen_funcs),
+	INTEL_IVB_D_IDS(&gen6_stolen_funcs),
+	INTEL_HSW_D_IDS(&gen6_stolen_funcs),
+	INTEL_HSW_M_IDS(&gen6_stolen_funcs),
+	INTEL_BDW_M_IDS(&gen8_stolen_funcs),
+	INTEL_BDW_D_IDS(&gen8_stolen_funcs)
 };
 
 static void __init intel_graphics_stolen(int num, int slot, int func)
@@ -361,11 +512,13 @@
 
 	for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) {
 		if (intel_stolen_ids[i].device == device) {
-			stolen_size_fn stolen_size =
-				(stolen_size_fn)intel_stolen_ids[i].driver_data;
-			size = stolen_size(num, slot, func);
-			start = intel_stolen_base(num, slot, func);
+			const struct intel_stolen_funcs *stolen_funcs =
+				(const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data;
+			size = stolen_funcs->size(num, slot, func);
+			start = stolen_funcs->base(num, slot, func, size);
 			if (size && start) {
+				printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n",
+				       start, start + (u32)size - 1);
 				/* Mark this space as reserved */
 				e820_add_region(start, size, E820_RESERVED);
 				sanitize_e820_map(e820.map,
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 42805fa..283a76a 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -125,7 +125,7 @@
 		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
 	seq_printf(p, "  Machine check polls\n");
 #endif
-#if defined(CONFIG_HYPERV) || defined(CONFIG_XEN)
+#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
 	seq_printf(p, "%*s: ", prec, "THR");
 	for_each_online_cpu(j)
 		seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index ebc9873..af1d14a 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -229,6 +229,17 @@
 		}
 	}
 
+	/*
+	 * On x86-64 we do not support 16-bit segments due to
+	 * IRET leaking the high bits of the kernel stack address.
+	 */
+#ifdef CONFIG_X86_64
+	if (!ldt_info.seg_32bit) {
+		error = -EINVAL;
+		goto out_unlock;
+	}
+#endif
+
 	fill_ldt(&ldt, &ldt_info);
 	if (oldmode)
 		ldt.avl = 0;
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 299d493..0497f71 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -1207,23 +1207,31 @@
 	return ret;
 }
 
-static inline int __init determine_tce_table_size(u64 ram)
+static inline int __init determine_tce_table_size(void)
 {
 	int ret;
 
 	if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED)
 		return specified_table_size;
 
-	/*
-	 * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
-	 * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
-	 * larger table size has twice as many entries, so shift the
-	 * max ram address by 13 to divide by 8K and then look at the
-	 * order of the result to choose between 0-7.
-	 */
-	ret = get_order(ram >> 13);
-	if (ret > TCE_TABLE_SIZE_8M)
+	if (is_kdump_kernel() && saved_max_pfn) {
+		/*
+		 * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
+		 * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
+		 * larger table size has twice as many entries, so shift the
+		 * max ram address by 13 to divide by 8K and then look at the
+		 * order of the result to choose between 0-7.
+		 */
+		ret = get_order((saved_max_pfn * PAGE_SIZE) >> 13);
+		if (ret > TCE_TABLE_SIZE_8M)
+			ret = TCE_TABLE_SIZE_8M;
+	} else {
+		/*
+		 * Use 8M by default (suggested by Muli) if it's not
+		 * kdump kernel and saved_max_pfn isn't set.
+		 */
 		ret = TCE_TABLE_SIZE_8M;
+	}
 
 	return ret;
 }
@@ -1418,8 +1426,7 @@
 		return -ENOMEM;
 	}
 
-	specified_table_size = determine_tce_table_size((is_kdump_kernel() ?
-					saved_max_pfn : max_pfn) * PAGE_SIZE);
+	specified_table_size = determine_tce_table_size();
 
 	for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
 		struct calgary_bus_info *info = &bus_info[bus];
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index d57d917..1493c68 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -627,7 +627,6 @@
 # CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_VM is not set
-# CONFIG_DEBUG_WRITECOUNT is not set
 # CONFIG_DEBUG_MEMORY_INIT is not set
 # CONFIG_DEBUG_LIST is not set
 # CONFIG_DEBUG_SG is not set
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index 583c2b0..12a492a 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -569,7 +569,6 @@
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_VM is not set
 CONFIG_DEBUG_NOMMU_REGIONS=y
-# CONFIG_DEBUG_WRITECOUNT is not set
 # CONFIG_DEBUG_MEMORY_INIT is not set
 # CONFIG_DEBUG_LIST is not set
 # CONFIG_DEBUG_SG is not set
diff --git a/block/blk-core.c b/block/blk-core.c
index 34d7c19..a0e3096 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1307,7 +1307,7 @@
 		struct request_list *rl = blk_rq_rl(req);
 
 		BUG_ON(!list_empty(&req->queuelist));
-		BUG_ON(!hlist_unhashed(&req->hash));
+		BUG_ON(ELV_ON_HASH(req));
 
 		blk_free_request(rl, req);
 		freed_request(rl, flags);
diff --git a/block/blk-map.c b/block/blk-map.c
index cca6356..f7b22bc 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -188,7 +188,7 @@
  *    unmapping.
  */
 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
-			struct rq_map_data *map_data, struct sg_iovec *iov,
+			struct rq_map_data *map_data, const struct sg_iovec *iov,
 			int iov_count, unsigned int len, gfp_t gfp_mask)
 {
 	struct bio *bio;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b1bcc61..1d2a9bd 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -956,6 +956,7 @@
 			       unsigned int cpu)
 {
 	struct blk_mq_hw_ctx *hctx = data;
+	struct request_queue *q = hctx->queue;
 	struct blk_mq_ctx *ctx;
 	LIST_HEAD(tmp);
 
@@ -965,7 +966,7 @@
 	/*
 	 * Move ctx entries to new CPU, if this one is going away.
 	 */
-	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
+	ctx = __blk_mq_get_ctx(q, cpu);
 
 	spin_lock(&ctx->lock);
 	if (!list_empty(&ctx->rq_list)) {
@@ -977,7 +978,7 @@
 	if (list_empty(&tmp))
 		return;
 
-	ctx = blk_mq_get_ctx(hctx->queue);
+	ctx = blk_mq_get_ctx(q);
 	spin_lock(&ctx->lock);
 
 	while (!list_empty(&tmp)) {
@@ -988,10 +989,13 @@
 		list_move_tail(&rq->queuelist, &ctx->rq_list);
 	}
 
+	hctx = q->mq_ops->map_queue(q, ctx->cpu);
 	blk_mq_hctx_mark_pending(hctx, ctx);
 
 	spin_unlock(&ctx->lock);
 	blk_mq_put_ctx(ctx);
+
+	blk_mq_run_hw_queue(hctx, true);
 }
 
 static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index ebd6b6f..53b1737 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -30,8 +30,8 @@
 	while (!list_empty(&local_list)) {
 		struct request *rq;
 
-		rq = list_entry(local_list.next, struct request, queuelist);
-		list_del_init(&rq->queuelist);
+		rq = list_entry(local_list.next, struct request, ipi_list);
+		list_del_init(&rq->ipi_list);
 		rq->q->softirq_done_fn(rq);
 	}
 }
@@ -45,14 +45,9 @@
 
 	local_irq_save(flags);
 	list = this_cpu_ptr(&blk_cpu_done);
-	/*
-	 * We reuse queuelist for a list of requests to process. Since the
-	 * queuelist is used by the block layer only for requests waiting to be
-	 * submitted to the device it is unused now.
-	 */
-	list_add_tail(&rq->queuelist, list);
+	list_add_tail(&rq->ipi_list, list);
 
-	if (list->next == &rq->queuelist)
+	if (list->next == &rq->ipi_list)
 		raise_softirq_irqoff(BLOCK_SOFTIRQ);
 
 	local_irq_restore(flags);
@@ -141,7 +136,7 @@
 		struct list_head *list;
 do_local:
 		list = this_cpu_ptr(&blk_cpu_done);
-		list_add_tail(&req->queuelist, list);
+		list_add_tail(&req->ipi_list, list);
 
 		/*
 		 * if the list only contains our just added request,
@@ -149,7 +144,7 @@
 		 * entries there, someone already raised the irq but it
 		 * hasn't run yet.
 		 */
-		if (list->next == &req->queuelist)
+		if (list->next == &req->ipi_list)
 			raise_softirq_irqoff(BLOCK_SOFTIRQ);
 	} else if (raise_blk_irq(ccpu, req))
 		goto do_local;
diff --git a/block/blk.h b/block/blk.h
index d23b415..1d880f1 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -78,7 +78,7 @@
 /*
  * Internal elevator interface
  */
-#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash)
+#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
 
 void blk_insert_flush(struct request *rq);
 void blk_abort_flushes(struct request_queue *q);
diff --git a/block/elevator.c b/block/elevator.c
index 42c45a7..1e01b66 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -247,6 +247,7 @@
 static inline void __elv_rqhash_del(struct request *rq)
 {
 	hash_del(&rq->hash);
+	rq->cmd_flags &= ~REQ_HASHED;
 }
 
 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -261,6 +262,7 @@
 
 	BUG_ON(ELV_ON_HASH(rq));
 	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
+	rq->cmd_flags |= REQ_HASHED;
 }
 
 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c205653..ab686b3 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -31,10 +31,14 @@
 	  ACPI CA, see:
 	  <http://acpica.org/>
 
-	  ACPI is an open industry specification co-developed by
-	  Hewlett-Packard, Intel, Microsoft, Phoenix, and Toshiba.
+	  ACPI is an open industry specification originally co-developed by
+	  Hewlett-Packard, Intel, Microsoft, Phoenix, and Toshiba. Currently,
+	  it is developed by the ACPI Specification Working Group (ASWG) under
+	  the UEFI Forum and any UEFI member can join the ASWG and contribute
+	  to the ACPI specification.
 	  The specification is available at:
 	  <http://www.acpi.info>
+	  <http://www.uefi.org/acpi/specs>
 
 if ACPI
 
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index f0fc626..d9339b4 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -51,12 +51,6 @@
 	" the driver to wait for userspace to write the undock sysfs file "
 	" before undocking");
 
-static const struct acpi_device_id dock_device_ids[] = {
-	{"LNXDOCK", 0},
-	{"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, dock_device_ids);
-
 struct dock_station {
 	acpi_handle handle;
 	unsigned long last_dock_time;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index f7fd72a..6776c59 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1219,10 +1219,9 @@
 {
 	struct semaphore *sem = NULL;
 
-	sem = acpi_os_allocate(sizeof(struct semaphore));
+	sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
 	if (!sem)
 		return AE_NO_MEMORY;
-	memset(sem, 0, sizeof(struct semaphore));
 
 	sema_init(sem, initial_units);
 
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 9640685..c1e31a4 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -344,7 +344,7 @@
 			tz->trips.hot.flags.valid = 1;
 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 					"Found hot threshold [%lu]\n",
-					tz->trips.critical.temperature));
+					tz->trips.hot.temperature));
 		}
 	}
 
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 0f5f78f..bba5261 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -164,11 +164,10 @@
 	 * Validate output buffer.
 	 */
 	if (buffer->length == ACPI_ALLOCATE_BUFFER) {
-		buffer->pointer = ACPI_ALLOCATE(size_required);
+		buffer->pointer = ACPI_ALLOCATE_ZEROED(size_required);
 		if (!buffer->pointer)
 			return AE_NO_MEMORY;
 		buffer->length = size_required;
-		memset(buffer->pointer, 0, size_required);
 	} else {
 		if (buffer->length < size_required) {
 			buffer->length = size_required;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 48c7e8a..8b6990e 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -488,6 +488,14 @@
 		},
 	},
 	{
+	.callback = video_set_use_native_backlight,
+	.ident = "Thinkpad Helix",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+		DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
+		},
+	},
+	{
 	 .callback = video_set_use_native_backlight,
 	 .ident = "Dell Inspiron 7520",
 	 .matches = {
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6f54962..ae098a2 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -705,6 +705,14 @@
 	return 0;
 }
 
+static bool pd_ignore_unused;
+static int __init pd_ignore_unused_setup(char *__unused)
+{
+	pd_ignore_unused = true;
+	return 1;
+}
+__setup("pd_ignore_unused", pd_ignore_unused_setup);
+
 /**
  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
  */
@@ -712,6 +720,11 @@
 {
 	struct generic_pm_domain *genpd;
 
+	if (pd_ignore_unused) {
+		pr_warn("genpd: Not disabling unused power domains\n");
+		return;
+	}
+
 	mutex_lock(&gpd_list_lock);
 
 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index d0a0724..63e30ef 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -761,10 +761,11 @@
 	if (ret != 0)
 		goto err_range;
 
-	if (dev)
+	if (dev) {
 		ret = regmap_attach_dev(dev, map, config);
 		if (ret != 0)
 			goto err_regcache;
+	}
 
 	return map;
 
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 18c76e8..68e3992 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -469,24 +469,14 @@
 
 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
 {
-	mm_segment_t oldfs;
 	struct kvec iov = {
 		.iov_base = buf,
 		.iov_len = size,
 	};
 	struct msghdr msg = {
-		.msg_iovlen = 1,
-		.msg_iov = (struct iovec *)&iov,
 		.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
 	};
-	int rv;
-
-	oldfs = get_fs();
-	set_fs(KERNEL_DS);
-	rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
-	set_fs(oldfs);
-
-	return rv;
+	return kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags);
 }
 
 static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 66e8c3b..f70a230 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -237,7 +237,7 @@
 	file_end_write(file);
 	if (likely(bw == len))
 		return 0;
-	printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
+	printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
 			(unsigned long long)pos, len);
 	if (bw >= 0)
 		bw = -EIO;
@@ -277,7 +277,7 @@
 		return __do_lo_send_write(lo->lo_backing_file,
 				page_address(page), bvec->bv_len,
 				pos);
-	printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
+	printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
 			"length %i.\n", (unsigned long long)pos, bvec->bv_len);
 	if (ret > 0)
 		ret = -EIO;
@@ -316,7 +316,7 @@
 out:
 	return ret;
 fail:
-	printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
+	printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
 	ret = -ENOMEM;
 	goto out;
 }
@@ -345,7 +345,7 @@
 		size = p->bsize;
 
 	if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
-		printk(KERN_ERR "loop: transfer error block %ld\n",
+		printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n",
 		       page->index);
 		size = -EINVAL;
 	}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 55298db..3a70ea2 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -630,37 +630,29 @@
 	}
  
 	case NBD_CLEAR_SOCK: {
-		struct file *file;
-
+		struct socket *sock = nbd->sock;
 		nbd->sock = NULL;
-		file = nbd->file;
-		nbd->file = NULL;
 		nbd_clear_que(nbd);
 		BUG_ON(!list_empty(&nbd->queue_head));
 		BUG_ON(!list_empty(&nbd->waiting_queue));
 		kill_bdev(bdev);
-		if (file)
-			fput(file);
+		if (sock)
+			sockfd_put(sock);
 		return 0;
 	}
 
 	case NBD_SET_SOCK: {
-		struct file *file;
-		if (nbd->file)
+		struct socket *sock;
+		int err;
+		if (nbd->sock)
 			return -EBUSY;
-		file = fget(arg);
-		if (file) {
-			struct inode *inode = file_inode(file);
-			if (S_ISSOCK(inode->i_mode)) {
-				nbd->file = file;
-				nbd->sock = SOCKET_I(inode);
-				if (max_part > 0)
-					bdev->bd_invalidated = 1;
-				nbd->disconnect = 0; /* we're connected now */
-				return 0;
-			} else {
-				fput(file);
-			}
+		sock = sockfd_lookup(arg, &err);
+		if (sock) {
+			nbd->sock = sock;
+			if (max_part > 0)
+				bdev->bd_invalidated = 1;
+			nbd->disconnect = 0; /* we're connected now */
+			return 0;
 		}
 		return -EINVAL;
 	}
@@ -697,12 +689,12 @@
 
 	case NBD_DO_IT: {
 		struct task_struct *thread;
-		struct file *file;
+		struct socket *sock;
 		int error;
 
 		if (nbd->pid)
 			return -EBUSY;
-		if (!nbd->file)
+		if (!nbd->sock)
 			return -EINVAL;
 
 		mutex_unlock(&nbd->tx_lock);
@@ -731,15 +723,15 @@
 		if (error)
 			return error;
 		sock_shutdown(nbd, 0);
-		file = nbd->file;
-		nbd->file = NULL;
+		sock = nbd->sock;
+		nbd->sock = NULL;
 		nbd_clear_que(nbd);
 		dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
 		kill_bdev(bdev);
 		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 		set_device_ro(bdev, false);
-		if (file)
-			fput(file);
+		if (sock)
+			sockfd_put(sock);
 		nbd->flags = 0;
 		nbd->bytesize = 0;
 		bdev->bd_inode->i_size = 0;
@@ -875,9 +867,7 @@
 
 	for (i = 0; i < nbds_max; i++) {
 		struct gendisk *disk = nbd_dev[i].disk;
-		nbd_dev[i].file = NULL;
 		nbd_dev[i].magic = NBD_MAGIC;
-		nbd_dev[i].flags = 0;
 		INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
 		spin_lock_init(&nbd_dev[i].queue_lock);
 		INIT_LIST_HEAD(&nbd_dev[i].queue_head);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index da085ff..7c64fa7 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1,6 +1,6 @@
 /*
  * NVM Express device driver
- * Copyright (c) 2011, Intel Corporation.
+ * Copyright (c) 2011-2014, Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -20,10 +20,12 @@
 #include <linux/bio.h>
 #include <linux/bitops.h>
 #include <linux/blkdev.h>
+#include <linux/cpu.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
 #include <linux/genhd.h>
+#include <linux/hdreg.h>
 #include <linux/idr.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -35,6 +37,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/pci.h>
+#include <linux/percpu.h>
 #include <linux/poison.h>
 #include <linux/ptrace.h>
 #include <linux/sched.h>
@@ -47,6 +50,11 @@
 #define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
 #define ADMIN_TIMEOUT	(60 * HZ)
+#define IOD_TIMEOUT	(4 * NVME_IO_TIMEOUT)
+
+unsigned char io_timeout = 30;
+module_param(io_timeout, byte, 0644);
+MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
 
 static int nvme_major;
 module_param(nvme_major, int, 0);
@@ -58,6 +66,7 @@
 static LIST_HEAD(dev_list);
 static struct task_struct *nvme_thread;
 static struct workqueue_struct *nvme_workq;
+static wait_queue_head_t nvme_kthread_wait;
 
 static void nvme_reset_failed_dev(struct work_struct *ws);
 
@@ -74,6 +83,7 @@
  * commands and one for I/O commands).
  */
 struct nvme_queue {
+	struct rcu_head r_head;
 	struct device *q_dmadev;
 	struct nvme_dev *dev;
 	char irqname[24];	/* nvme4294967295-65535\0 */
@@ -85,6 +95,7 @@
 	wait_queue_head_t sq_full;
 	wait_queue_t sq_cong_wait;
 	struct bio_list sq_cong;
+	struct list_head iod_bio;
 	u32 __iomem *q_db;
 	u16 q_depth;
 	u16 cq_vector;
@@ -95,6 +106,7 @@
 	u8 cq_phase;
 	u8 cqe_seen;
 	u8 q_suspended;
+	cpumask_var_t cpu_mask;
 	struct async_cmd_info cmdinfo;
 	unsigned long cmdid_data[];
 };
@@ -118,7 +130,7 @@
 	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
 }
 
-typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
+typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
 						struct nvme_completion *);
 
 struct nvme_cmd_info {
@@ -190,7 +202,7 @@
 #define CMD_CTX_FLUSH		(0x318 + CMD_CTX_BASE)
 #define CMD_CTX_ABORT		(0x31C + CMD_CTX_BASE)
 
-static void special_completion(struct nvme_dev *dev, void *ctx,
+static void special_completion(struct nvme_queue *nvmeq, void *ctx,
 						struct nvme_completion *cqe)
 {
 	if (ctx == CMD_CTX_CANCELLED)
@@ -198,26 +210,26 @@
 	if (ctx == CMD_CTX_FLUSH)
 		return;
 	if (ctx == CMD_CTX_ABORT) {
-		++dev->abort_limit;
+		++nvmeq->dev->abort_limit;
 		return;
 	}
 	if (ctx == CMD_CTX_COMPLETED) {
-		dev_warn(&dev->pci_dev->dev,
+		dev_warn(nvmeq->q_dmadev,
 				"completed id %d twice on queue %d\n",
 				cqe->command_id, le16_to_cpup(&cqe->sq_id));
 		return;
 	}
 	if (ctx == CMD_CTX_INVALID) {
-		dev_warn(&dev->pci_dev->dev,
+		dev_warn(nvmeq->q_dmadev,
 				"invalid id %d completed on queue %d\n",
 				cqe->command_id, le16_to_cpup(&cqe->sq_id));
 		return;
 	}
 
-	dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
+	dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
 }
 
-static void async_completion(struct nvme_dev *dev, void *ctx,
+static void async_completion(struct nvme_queue *nvmeq, void *ctx,
 						struct nvme_completion *cqe)
 {
 	struct async_cmd_info *cmdinfo = ctx;
@@ -262,14 +274,34 @@
 	return ctx;
 }
 
-struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
 {
-	return dev->queues[get_cpu() + 1];
+	return rcu_dereference_raw(dev->queues[qid]);
 }
 
-void put_nvmeq(struct nvme_queue *nvmeq)
+static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
 {
-	put_cpu();
+	unsigned queue_id = get_cpu_var(*dev->io_queue);
+	rcu_read_lock();
+	return rcu_dereference(dev->queues[queue_id]);
+}
+
+static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
+{
+	rcu_read_unlock();
+	put_cpu_var(nvmeq->dev->io_queue);
+}
+
+static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
+							__acquires(RCU)
+{
+	rcu_read_lock();
+	return rcu_dereference(dev->queues[q_idx]);
+}
+
+static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
+{
+	rcu_read_unlock();
 }
 
 /**
@@ -284,6 +316,10 @@
 	unsigned long flags;
 	u16 tail;
 	spin_lock_irqsave(&nvmeq->q_lock, flags);
+	if (nvmeq->q_suspended) {
+		spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+		return -EBUSY;
+	}
 	tail = nvmeq->sq_tail;
 	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
 	if (++tail == nvmeq->q_depth)
@@ -323,6 +359,7 @@
 		iod->npages = -1;
 		iod->length = nbytes;
 		iod->nents = 0;
+		iod->first_dma = 0ULL;
 		iod->start_time = jiffies;
 	}
 
@@ -371,19 +408,31 @@
 	part_stat_unlock();
 }
 
-static void bio_completion(struct nvme_dev *dev, void *ctx,
+static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
 						struct nvme_completion *cqe)
 {
 	struct nvme_iod *iod = ctx;
 	struct bio *bio = iod->private;
 	u16 status = le16_to_cpup(&cqe->status) >> 1;
 
+	if (unlikely(status)) {
+		if (!(status & NVME_SC_DNR ||
+				bio->bi_rw & REQ_FAILFAST_MASK) &&
+				(jiffies - iod->start_time) < IOD_TIMEOUT) {
+			if (!waitqueue_active(&nvmeq->sq_full))
+				add_wait_queue(&nvmeq->sq_full,
+							&nvmeq->sq_cong_wait);
+			list_add_tail(&iod->node, &nvmeq->iod_bio);
+			wake_up(&nvmeq->sq_full);
+			return;
+		}
+	}
 	if (iod->nents) {
-		dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+		dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
 			bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 		nvme_end_io_acct(bio, iod->start_time);
 	}
-	nvme_free_iod(dev, iod);
+	nvme_free_iod(nvmeq->dev, iod);
 	if (status)
 		bio_endio(bio, -EIO);
 	else
@@ -391,8 +440,8 @@
 }
 
 /* length is in bytes.  gfp flags indicates whether we may sleep. */
-int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
-			struct nvme_iod *iod, int total_len, gfp_t gfp)
+int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
+								gfp_t gfp)
 {
 	struct dma_pool *pool;
 	int length = total_len;
@@ -405,7 +454,6 @@
 	dma_addr_t prp_dma;
 	int nprps, i;
 
-	cmd->prp1 = cpu_to_le64(dma_addr);
 	length -= (PAGE_SIZE - offset);
 	if (length <= 0)
 		return total_len;
@@ -420,7 +468,7 @@
 	}
 
 	if (length <= PAGE_SIZE) {
-		cmd->prp2 = cpu_to_le64(dma_addr);
+		iod->first_dma = dma_addr;
 		return total_len;
 	}
 
@@ -435,13 +483,12 @@
 
 	prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
 	if (!prp_list) {
-		cmd->prp2 = cpu_to_le64(dma_addr);
+		iod->first_dma = dma_addr;
 		iod->npages = -1;
 		return (total_len - length) + PAGE_SIZE;
 	}
 	list[0] = prp_list;
 	iod->first_dma = prp_dma;
-	cmd->prp2 = cpu_to_le64(prp_dma);
 	i = 0;
 	for (;;) {
 		if (i == PAGE_SIZE / 8) {
@@ -480,10 +527,11 @@
 
 	bio_chain(split, bio);
 
-	if (bio_list_empty(&nvmeq->sq_cong))
+	if (!waitqueue_active(&nvmeq->sq_full))
 		add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
 	bio_list_add(&nvmeq->sq_cong, split);
 	bio_list_add(&nvmeq->sq_cong, bio);
+	wake_up(&nvmeq->sq_full);
 
 	return 0;
 }
@@ -536,25 +584,13 @@
 	return length;
 }
 
-/*
- * We reuse the small pool to allocate the 16-byte range here as it is not
- * worth having a special pool for these or additional cases to handle freeing
- * the iod.
- */
 static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
 		struct bio *bio, struct nvme_iod *iod, int cmdid)
 {
-	struct nvme_dsm_range *range;
+	struct nvme_dsm_range *range =
+				(struct nvme_dsm_range *)iod_list(iod)[0];
 	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
 
-	range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
-							&iod->first_dma);
-	if (!range)
-		return -ENOMEM;
-
-	iod_list(iod)[0] = (__le64 *)range;
-	iod->npages = 0;
-
 	range->cattr = cpu_to_le32(0);
 	range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
 	range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
@@ -601,44 +637,22 @@
 	return nvme_submit_flush(nvmeq, ns, cmdid);
 }
 
-/*
- * Called with local interrupts disabled and the q_lock held.  May not sleep.
- */
-static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
-								struct bio *bio)
+static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
 {
+	struct bio *bio = iod->private;
+	struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
 	struct nvme_command *cmnd;
-	struct nvme_iod *iod;
-	enum dma_data_direction dma_dir;
-	int cmdid, length, result;
+	int cmdid;
 	u16 control;
 	u32 dsmgmt;
-	int psegs = bio_phys_segments(ns->queue, bio);
 
-	if ((bio->bi_rw & REQ_FLUSH) && psegs) {
-		result = nvme_submit_flush_data(nvmeq, ns);
-		if (result)
-			return result;
-	}
-
-	result = -ENOMEM;
-	iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
-	if (!iod)
-		goto nomem;
-	iod->private = bio;
-
-	result = -EBUSY;
 	cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
 	if (unlikely(cmdid < 0))
-		goto free_iod;
+		return cmdid;
 
-	if (bio->bi_rw & REQ_DISCARD) {
-		result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
-		if (result)
-			goto free_cmdid;
-		return result;
-	}
-	if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+	if (bio->bi_rw & REQ_DISCARD)
+		return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
+	if ((bio->bi_rw & REQ_FLUSH) && !iod->nents)
 		return nvme_submit_flush(nvmeq, ns, cmdid);
 
 	control = 0;
@@ -652,42 +666,85 @@
 		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
 
 	cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
-
 	memset(cmnd, 0, sizeof(*cmnd));
-	if (bio_data_dir(bio)) {
-		cmnd->rw.opcode = nvme_cmd_write;
-		dma_dir = DMA_TO_DEVICE;
-	} else {
-		cmnd->rw.opcode = nvme_cmd_read;
-		dma_dir = DMA_FROM_DEVICE;
-	}
 
-	result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
-	if (result <= 0)
-		goto free_cmdid;
-	length = result;
-
+	cmnd->rw.opcode = bio_data_dir(bio) ? nvme_cmd_write : nvme_cmd_read;
 	cmnd->rw.command_id = cmdid;
 	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
-	length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
-								GFP_ATOMIC);
+	cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+	cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
 	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
-	cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
+	cmnd->rw.length =
+		cpu_to_le16((bio->bi_iter.bi_size >> ns->lba_shift) - 1);
 	cmnd->rw.control = cpu_to_le16(control);
 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
 
-	nvme_start_io_acct(bio);
 	if (++nvmeq->sq_tail == nvmeq->q_depth)
 		nvmeq->sq_tail = 0;
 	writel(nvmeq->sq_tail, nvmeq->q_db);
 
 	return 0;
+}
 
- free_cmdid:
-	free_cmdid(nvmeq, cmdid, NULL);
+/*
+ * Called with local interrupts disabled and the q_lock held.  May not sleep.
+ */
+static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+								struct bio *bio)
+{
+	struct nvme_iod *iod;
+	int psegs = bio_phys_segments(ns->queue, bio);
+	int result;
+
+	if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+		result = nvme_submit_flush_data(nvmeq, ns);
+		if (result)
+			return result;
+	}
+
+	iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
+	if (!iod)
+		return -ENOMEM;
+
+	iod->private = bio;
+	if (bio->bi_rw & REQ_DISCARD) {
+		void *range;
+		/*
+		 * We reuse the small pool to allocate the 16-byte range here
+		 * as it is not worth having a special pool for these or
+		 * additional cases to handle freeing the iod.
+		 */
+		range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
+						GFP_ATOMIC,
+						&iod->first_dma);
+		if (!range) {
+			result = -ENOMEM;
+			goto free_iod;
+		}
+		iod_list(iod)[0] = (__le64 *)range;
+		iod->npages = 0;
+	} else if (psegs) {
+		result = nvme_map_bio(nvmeq, iod, bio,
+			bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+			psegs);
+		if (result <= 0)
+			goto free_iod;
+		if (nvme_setup_prps(nvmeq->dev, iod, result, GFP_ATOMIC) !=
+								result) {
+			result = -ENOMEM;
+			goto free_iod;
+		}
+		nvme_start_io_acct(bio);
+	}
+	if (unlikely(nvme_submit_iod(nvmeq, iod))) {
+		if (!waitqueue_active(&nvmeq->sq_full))
+			add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+		list_add_tail(&iod->node, &nvmeq->iod_bio);
+	}
+	return 0;
+
  free_iod:
 	nvme_free_iod(nvmeq->dev, iod);
- nomem:
 	return result;
 }
 
@@ -711,7 +768,7 @@
 		}
 
 		ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
-		fn(nvmeq->dev, ctx, &cqe);
+		fn(nvmeq, ctx, &cqe);
 	}
 
 	/* If the controller ignores the cq head doorbell and continuously
@@ -747,7 +804,7 @@
 	if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
 		result = nvme_submit_bio_queue(nvmeq, ns, bio);
 	if (unlikely(result)) {
-		if (bio_list_empty(&nvmeq->sq_cong))
+		if (!waitqueue_active(&nvmeq->sq_full))
 			add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
 		bio_list_add(&nvmeq->sq_cong, bio);
 	}
@@ -791,7 +848,7 @@
 	int status;
 };
 
-static void sync_completion(struct nvme_dev *dev, void *ctx,
+static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
 						struct nvme_completion *cqe)
 {
 	struct sync_cmd_info *cmdinfo = ctx;
@@ -804,27 +861,46 @@
  * Returns 0 on success.  If the result is negative, it's a Linux error code;
  * if the result is positive, it's an NVM Express status code
  */
-int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
+						struct nvme_command *cmd,
 						u32 *result, unsigned timeout)
 {
-	int cmdid;
+	int cmdid, ret;
 	struct sync_cmd_info cmdinfo;
+	struct nvme_queue *nvmeq;
+
+	nvmeq = lock_nvmeq(dev, q_idx);
+	if (!nvmeq) {
+		unlock_nvmeq(nvmeq);
+		return -ENODEV;
+	}
 
 	cmdinfo.task = current;
 	cmdinfo.status = -EINTR;
 
-	cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
-								timeout);
-	if (cmdid < 0)
+	cmdid = alloc_cmdid(nvmeq, &cmdinfo, sync_completion, timeout);
+	if (cmdid < 0) {
+		unlock_nvmeq(nvmeq);
 		return cmdid;
+	}
 	cmd->common.command_id = cmdid;
 
 	set_current_state(TASK_KILLABLE);
-	nvme_submit_cmd(nvmeq, cmd);
+	ret = nvme_submit_cmd(nvmeq, cmd);
+	if (ret) {
+		free_cmdid(nvmeq, cmdid, NULL);
+		unlock_nvmeq(nvmeq);
+		set_current_state(TASK_RUNNING);
+		return ret;
+	}
+	unlock_nvmeq(nvmeq);
 	schedule_timeout(timeout);
 
 	if (cmdinfo.status == -EINTR) {
-		nvme_abort_command(nvmeq, cmdid);
+		nvmeq = lock_nvmeq(dev, q_idx);
+		if (nvmeq)
+			nvme_abort_command(nvmeq, cmdid);
+		unlock_nvmeq(nvmeq);
 		return -EINTR;
 	}
 
@@ -845,20 +921,26 @@
 		return cmdid;
 	cmdinfo->status = -EINTR;
 	cmd->common.command_id = cmdid;
-	nvme_submit_cmd(nvmeq, cmd);
-	return 0;
+	return nvme_submit_cmd(nvmeq, cmd);
 }
 
 int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
 								u32 *result)
 {
-	return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+	return nvme_submit_sync_cmd(dev, 0, cmd, result, ADMIN_TIMEOUT);
+}
+
+int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+								u32 *result)
+{
+	return nvme_submit_sync_cmd(dev, smp_processor_id() + 1, cmd, result,
+							NVME_IO_TIMEOUT);
 }
 
 static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
 		struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
 {
-	return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+	return nvme_submit_async_cmd(raw_nvmeq(dev, 0), cmd, cmdinfo,
 								ADMIN_TIMEOUT);
 }
 
@@ -985,6 +1067,7 @@
 	struct nvme_command cmd;
 	struct nvme_dev *dev = nvmeq->dev;
 	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+	struct nvme_queue *adminq;
 
 	if (!nvmeq->qid || info[cmdid].aborted) {
 		if (work_busy(&dev->reset_work))
@@ -1001,7 +1084,8 @@
 	if (!dev->abort_limit)
 		return;
 
-	a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+	adminq = rcu_dereference(dev->queues[0]);
+	a_cmdid = alloc_cmdid(adminq, CMD_CTX_ABORT, special_completion,
 								ADMIN_TIMEOUT);
 	if (a_cmdid < 0)
 		return;
@@ -1018,7 +1102,7 @@
 
 	dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
 							nvmeq->qid);
-	nvme_submit_cmd(dev->queues[0], &cmd);
+	nvme_submit_cmd(adminq, &cmd);
 }
 
 /**
@@ -1051,23 +1135,38 @@
 		dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
 								nvmeq->qid);
 		ctx = cancel_cmdid(nvmeq, cmdid, &fn);
-		fn(nvmeq->dev, ctx, &cqe);
+		fn(nvmeq, ctx, &cqe);
 	}
 }
 
-static void nvme_free_queue(struct nvme_queue *nvmeq)
+static void nvme_free_queue(struct rcu_head *r)
 {
+	struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head);
+
 	spin_lock_irq(&nvmeq->q_lock);
 	while (bio_list_peek(&nvmeq->sq_cong)) {
 		struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
 		bio_endio(bio, -EIO);
 	}
+	while (!list_empty(&nvmeq->iod_bio)) {
+		static struct nvme_completion cqe = {
+			.status = cpu_to_le16(
+				(NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1),
+		};
+		struct nvme_iod *iod = list_first_entry(&nvmeq->iod_bio,
+							struct nvme_iod,
+							node);
+		list_del(&iod->node);
+		bio_completion(nvmeq, iod, &cqe);
+	}
 	spin_unlock_irq(&nvmeq->q_lock);
 
 	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
 				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
 	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
 					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+	if (nvmeq->qid)
+		free_cpumask_var(nvmeq->cpu_mask);
 	kfree(nvmeq);
 }
 
@@ -1076,9 +1175,10 @@
 	int i;
 
 	for (i = dev->queue_count - 1; i >= lowest; i--) {
-		nvme_free_queue(dev->queues[i]);
+		struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
+		rcu_assign_pointer(dev->queues[i], NULL);
+		call_rcu(&nvmeq->r_head, nvme_free_queue);
 		dev->queue_count--;
-		dev->queues[i] = NULL;
 	}
 }
 
@@ -1098,6 +1198,7 @@
 		return 1;
 	}
 	nvmeq->q_suspended = 1;
+	nvmeq->dev->online_queues--;
 	spin_unlock_irq(&nvmeq->q_lock);
 
 	irq_set_affinity_hint(vector, NULL);
@@ -1116,7 +1217,7 @@
 
 static void nvme_disable_queue(struct nvme_dev *dev, int qid)
 {
-	struct nvme_queue *nvmeq = dev->queues[qid];
+	struct nvme_queue *nvmeq = raw_nvmeq(dev, qid);
 
 	if (!nvmeq)
 		return;
@@ -1152,6 +1253,9 @@
 	if (!nvmeq->sq_cmds)
 		goto free_cqdma;
 
+	if (qid && !zalloc_cpumask_var(&nvmeq->cpu_mask, GFP_KERNEL))
+		goto free_sqdma;
+
 	nvmeq->q_dmadev = dmadev;
 	nvmeq->dev = dev;
 	snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
@@ -1162,15 +1266,20 @@
 	init_waitqueue_head(&nvmeq->sq_full);
 	init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
 	bio_list_init(&nvmeq->sq_cong);
+	INIT_LIST_HEAD(&nvmeq->iod_bio);
 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
 	nvmeq->q_depth = depth;
 	nvmeq->cq_vector = vector;
 	nvmeq->qid = qid;
 	nvmeq->q_suspended = 1;
 	dev->queue_count++;
+	rcu_assign_pointer(dev->queues[qid], nvmeq);
 
 	return nvmeq;
 
+ free_sqdma:
+	dma_free_coherent(dmadev, SQ_SIZE(depth), (void *)nvmeq->sq_cmds,
+							nvmeq->sq_dma_addr);
  free_cqdma:
 	dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
 							nvmeq->cq_dma_addr);
@@ -1203,6 +1312,7 @@
 	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
 	nvme_cancel_ios(nvmeq, false);
 	nvmeq->q_suspended = 0;
+	dev->online_queues++;
 }
 
 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
@@ -1311,12 +1421,11 @@
 	if (result < 0)
 		return result;
 
-	nvmeq = dev->queues[0];
+	nvmeq = raw_nvmeq(dev, 0);
 	if (!nvmeq) {
 		nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
 		if (!nvmeq)
 			return -ENOMEM;
-		dev->queues[0] = nvmeq;
 	}
 
 	aqa = nvmeq->q_depth - 1;
@@ -1418,7 +1527,6 @@
 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
 {
 	struct nvme_dev *dev = ns->dev;
-	struct nvme_queue *nvmeq;
 	struct nvme_user_io io;
 	struct nvme_command c;
 	unsigned length, meta_len;
@@ -1492,22 +1600,14 @@
 		c.rw.metadata = cpu_to_le64(meta_dma_addr);
 	}
 
-	length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
+	length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
+	c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+	c.rw.prp2 = cpu_to_le64(iod->first_dma);
 
-	nvmeq = get_nvmeq(dev);
-	/*
-	 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
-	 * disabled.  We may be preempted at any point, and be rescheduled
-	 * to a different CPU.  That will cause cacheline bouncing, but no
-	 * additional races since q_lock already protects against other CPUs.
-	 */
-	put_nvmeq(nvmeq);
 	if (length != (io.nblocks + 1) << ns->lba_shift)
 		status = -ENOMEM;
-	else if (!nvmeq || nvmeq->q_suspended)
-		status = -EBUSY;
 	else
-		status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+		status = nvme_submit_io_cmd(dev, &c, NULL);
 
 	if (meta_len) {
 		if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
@@ -1572,8 +1672,9 @@
 								length);
 		if (IS_ERR(iod))
 			return PTR_ERR(iod);
-		length = nvme_setup_prps(dev, &c.common, iod, length,
-								GFP_KERNEL);
+		length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
+		c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+		c.common.prp2 = cpu_to_le64(iod->first_dma);
 	}
 
 	timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
@@ -1581,8 +1682,7 @@
 	if (length != cmd.data_len)
 		status = -ENOMEM;
 	else
-		status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
-								timeout);
+		status = nvme_submit_sync_cmd(dev, 0, &c, &cmd.result, timeout);
 
 	if (cmd.data_len) {
 		nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
@@ -1653,25 +1753,51 @@
 	kref_put(&dev->kref, nvme_free_dev);
 }
 
+static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
+{
+	/* some standard values */
+	geo->heads = 1 << 6;
+	geo->sectors = 1 << 5;
+	geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+	return 0;
+}
+
 static const struct block_device_operations nvme_fops = {
 	.owner		= THIS_MODULE,
 	.ioctl		= nvme_ioctl,
 	.compat_ioctl	= nvme_compat_ioctl,
 	.open		= nvme_open,
 	.release	= nvme_release,
+	.getgeo		= nvme_getgeo,
 };
 
+static void nvme_resubmit_iods(struct nvme_queue *nvmeq)
+{
+	struct nvme_iod *iod, *next;
+
+	list_for_each_entry_safe(iod, next, &nvmeq->iod_bio, node) {
+		if (unlikely(nvme_submit_iod(nvmeq, iod)))
+			break;
+		list_del(&iod->node);
+		if (bio_list_empty(&nvmeq->sq_cong) &&
+						list_empty(&nvmeq->iod_bio))
+			remove_wait_queue(&nvmeq->sq_full,
+						&nvmeq->sq_cong_wait);
+	}
+}
+
 static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
 {
 	while (bio_list_peek(&nvmeq->sq_cong)) {
 		struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
 		struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
 
-		if (bio_list_empty(&nvmeq->sq_cong))
+		if (bio_list_empty(&nvmeq->sq_cong) &&
+						list_empty(&nvmeq->iod_bio))
 			remove_wait_queue(&nvmeq->sq_full,
 							&nvmeq->sq_cong_wait);
 		if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
-			if (bio_list_empty(&nvmeq->sq_cong))
+			if (!waitqueue_active(&nvmeq->sq_full))
 				add_wait_queue(&nvmeq->sq_full,
 							&nvmeq->sq_cong_wait);
 			bio_list_add_head(&nvmeq->sq_cong, bio);
@@ -1700,8 +1826,10 @@
 				queue_work(nvme_workq, &dev->reset_work);
 				continue;
 			}
+			rcu_read_lock();
 			for (i = 0; i < dev->queue_count; i++) {
-				struct nvme_queue *nvmeq = dev->queues[i];
+				struct nvme_queue *nvmeq =
+						rcu_dereference(dev->queues[i]);
 				if (!nvmeq)
 					continue;
 				spin_lock_irq(&nvmeq->q_lock);
@@ -1710,9 +1838,11 @@
 				nvme_process_cq(nvmeq);
 				nvme_cancel_ios(nvmeq, true);
 				nvme_resubmit_bios(nvmeq);
+				nvme_resubmit_iods(nvmeq);
  unlock:
 				spin_unlock_irq(&nvmeq->q_lock);
 			}
+			rcu_read_unlock();
 		}
 		spin_unlock(&dev_list_lock);
 		schedule_timeout(round_jiffies_relative(HZ));
@@ -1787,6 +1917,143 @@
 	return NULL;
 }
 
+static int nvme_find_closest_node(int node)
+{
+	int n, val, min_val = INT_MAX, best_node = node;
+
+	for_each_online_node(n) {
+		if (n == node)
+			continue;
+		val = node_distance(node, n);
+		if (val < min_val) {
+			min_val = val;
+			best_node = n;
+		}
+	}
+	return best_node;
+}
+
+static void nvme_set_queue_cpus(cpumask_t *qmask, struct nvme_queue *nvmeq,
+								int count)
+{
+	int cpu;
+	for_each_cpu(cpu, qmask) {
+		if (cpumask_weight(nvmeq->cpu_mask) >= count)
+			break;
+		if (!cpumask_test_and_set_cpu(cpu, nvmeq->cpu_mask))
+			*per_cpu_ptr(nvmeq->dev->io_queue, cpu) = nvmeq->qid;
+	}
+}
+
+static void nvme_add_cpus(cpumask_t *mask, const cpumask_t *unassigned_cpus,
+	const cpumask_t *new_mask, struct nvme_queue *nvmeq, int cpus_per_queue)
+{
+	int next_cpu;
+	for_each_cpu(next_cpu, new_mask) {
+		cpumask_or(mask, mask, get_cpu_mask(next_cpu));
+		cpumask_or(mask, mask, topology_thread_cpumask(next_cpu));
+		cpumask_and(mask, mask, unassigned_cpus);
+		nvme_set_queue_cpus(mask, nvmeq, cpus_per_queue);
+	}
+}
+
+static void nvme_create_io_queues(struct nvme_dev *dev)
+{
+	unsigned i, max;
+
+	max = min(dev->max_qid, num_online_cpus());
+	for (i = dev->queue_count; i <= max; i++)
+		if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+			break;
+
+	max = min(dev->queue_count - 1, num_online_cpus());
+	for (i = dev->online_queues; i <= max; i++)
+		if (nvme_create_queue(raw_nvmeq(dev, i), i))
+			break;
+}
+
+/*
+ * If there are fewer queues than online cpus, this will try to optimally
+ * assign a queue to multiple cpus by grouping cpus that are "close" together:
+ * thread siblings, core, socket, closest node, then whatever else is
+ * available.
+ */
+static void nvme_assign_io_queues(struct nvme_dev *dev)
+{
+	unsigned cpu, cpus_per_queue, queues, remainder, i;
+	cpumask_var_t unassigned_cpus;
+
+	nvme_create_io_queues(dev);
+
+	queues = min(dev->online_queues - 1, num_online_cpus());
+	if (!queues)
+		return;
+
+	cpus_per_queue = num_online_cpus() / queues;
+	remainder = queues - (num_online_cpus() - queues * cpus_per_queue);
+
+	if (!alloc_cpumask_var(&unassigned_cpus, GFP_KERNEL))
+		return;
+
+	cpumask_copy(unassigned_cpus, cpu_online_mask);
+	cpu = cpumask_first(unassigned_cpus);
+	for (i = 1; i <= queues; i++) {
+		struct nvme_queue *nvmeq = lock_nvmeq(dev, i);
+		cpumask_t mask;
+
+		cpumask_clear(nvmeq->cpu_mask);
+		if (!cpumask_weight(unassigned_cpus)) {
+			unlock_nvmeq(nvmeq);
+			break;
+		}
+
+		mask = *get_cpu_mask(cpu);
+		nvme_set_queue_cpus(&mask, nvmeq, cpus_per_queue);
+		if (cpus_weight(mask) < cpus_per_queue)
+			nvme_add_cpus(&mask, unassigned_cpus,
+				topology_thread_cpumask(cpu),
+				nvmeq, cpus_per_queue);
+		if (cpus_weight(mask) < cpus_per_queue)
+			nvme_add_cpus(&mask, unassigned_cpus,
+				topology_core_cpumask(cpu),
+				nvmeq, cpus_per_queue);
+		if (cpus_weight(mask) < cpus_per_queue)
+			nvme_add_cpus(&mask, unassigned_cpus,
+				cpumask_of_node(cpu_to_node(cpu)),
+				nvmeq, cpus_per_queue);
+		if (cpus_weight(mask) < cpus_per_queue)
+			nvme_add_cpus(&mask, unassigned_cpus,
+				cpumask_of_node(
+					nvme_find_closest_node(
+						cpu_to_node(cpu))),
+				nvmeq, cpus_per_queue);
+		if (cpus_weight(mask) < cpus_per_queue)
+			nvme_add_cpus(&mask, unassigned_cpus,
+				unassigned_cpus,
+				nvmeq, cpus_per_queue);
+
+		WARN(cpumask_weight(nvmeq->cpu_mask) != cpus_per_queue,
+			"nvme%d qid:%d mis-matched queue-to-cpu assignment\n",
+			dev->instance, i);
+
+		irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+							nvmeq->cpu_mask);
+		cpumask_andnot(unassigned_cpus, unassigned_cpus,
+						nvmeq->cpu_mask);
+		cpu = cpumask_next(cpu, unassigned_cpus);
+		if (remainder && !--remainder)
+			cpus_per_queue++;
+		unlock_nvmeq(nvmeq);
+	}
+	WARN(cpumask_weight(unassigned_cpus), "nvme%d unassigned online cpus\n",
+								dev->instance);
+	i = 0;
+	cpumask_andnot(unassigned_cpus, cpu_possible_mask, cpu_online_mask);
+	for_each_cpu(cpu, unassigned_cpus)
+		*per_cpu_ptr(dev->io_queue, cpu) = (i++ % queues) + 1;
+	free_cpumask_var(unassigned_cpus);
+}
+
 static int set_queue_count(struct nvme_dev *dev, int count)
 {
 	int status;
@@ -1805,13 +2072,26 @@
 	return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
 }
 
+static int nvme_cpu_notify(struct notifier_block *self,
+				unsigned long action, void *hcpu)
+{
+	struct nvme_dev *dev = container_of(self, struct nvme_dev, nb);
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_DEAD:
+		nvme_assign_io_queues(dev);
+		break;
+	}
+	return NOTIFY_OK;
+}
+
 static int nvme_setup_io_queues(struct nvme_dev *dev)
 {
-	struct nvme_queue *adminq = dev->queues[0];
+	struct nvme_queue *adminq = raw_nvmeq(dev, 0);
 	struct pci_dev *pdev = dev->pci_dev;
-	int result, cpu, i, vecs, nr_io_queues, size, q_depth;
+	int result, i, vecs, nr_io_queues, size;
 
-	nr_io_queues = num_online_cpus();
+	nr_io_queues = num_possible_cpus();
 	result = set_queue_count(dev, nr_io_queues);
 	if (result < 0)
 		return result;
@@ -1830,7 +2110,7 @@
 			size = db_bar_size(dev, nr_io_queues);
 		} while (1);
 		dev->dbs = ((void __iomem *)dev->bar) + 4096;
-		dev->queues[0]->q_db = dev->dbs;
+		adminq->q_db = dev->dbs;
 	}
 
 	/* Deregister the admin queue's interrupt */
@@ -1856,6 +2136,7 @@
 	 * number of interrupts.
 	 */
 	nr_io_queues = vecs;
+	dev->max_qid = nr_io_queues;
 
 	result = queue_request_irq(dev, adminq, adminq->irqname);
 	if (result) {
@@ -1864,49 +2145,13 @@
 	}
 
 	/* Free previously allocated queues that are no longer usable */
-	spin_lock(&dev_list_lock);
-	for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
-		struct nvme_queue *nvmeq = dev->queues[i];
+	nvme_free_queues(dev, nr_io_queues + 1);
+	nvme_assign_io_queues(dev);
 
-		spin_lock_irq(&nvmeq->q_lock);
-		nvme_cancel_ios(nvmeq, false);
-		spin_unlock_irq(&nvmeq->q_lock);
-
-		nvme_free_queue(nvmeq);
-		dev->queue_count--;
-		dev->queues[i] = NULL;
-	}
-	spin_unlock(&dev_list_lock);
-
-	cpu = cpumask_first(cpu_online_mask);
-	for (i = 0; i < nr_io_queues; i++) {
-		irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
-		cpu = cpumask_next(cpu, cpu_online_mask);
-	}
-
-	q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
-								NVME_Q_DEPTH);
-	for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
-		dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i);
-		if (!dev->queues[i + 1]) {
-			result = -ENOMEM;
-			goto free_queues;
-		}
-	}
-
-	for (; i < num_possible_cpus(); i++) {
-		int target = i % rounddown_pow_of_two(dev->queue_count - 1);
-		dev->queues[i + 1] = dev->queues[target + 1];
-	}
-
-	for (i = 1; i < dev->queue_count; i++) {
-		result = nvme_create_queue(dev->queues[i], i);
-		if (result) {
-			for (--i; i > 0; i--)
-				nvme_disable_queue(dev, i);
-			goto free_queues;
-		}
-	}
+	dev->nb.notifier_call = &nvme_cpu_notify;
+	result = register_hotcpu_notifier(&dev->nb);
+	if (result)
+		goto free_queues;
 
 	return 0;
 
@@ -1985,6 +2230,7 @@
 
 static int nvme_dev_map(struct nvme_dev *dev)
 {
+	u64 cap;
 	int bars, result = -ENOMEM;
 	struct pci_dev *pdev = dev->pci_dev;
 
@@ -2008,7 +2254,9 @@
 		result = -ENODEV;
 		goto unmap;
 	}
-	dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
+	cap = readq(&dev->bar->cap);
+	dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
+	dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
 	dev->dbs = ((void __iomem *)dev->bar) + 4096;
 
 	return 0;
@@ -2164,7 +2412,7 @@
 	atomic_set(&dq.refcount, 0);
 	dq.worker = &worker;
 	for (i = dev->queue_count - 1; i > 0; i--) {
-		struct nvme_queue *nvmeq = dev->queues[i];
+		struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
 
 		if (nvme_suspend_queue(nvmeq))
 			continue;
@@ -2177,19 +2425,38 @@
 	kthread_stop(kworker_task);
 }
 
+/*
+* Remove the node from the device list and check
+* for whether or not we need to stop the nvme_thread.
+*/
+static void nvme_dev_list_remove(struct nvme_dev *dev)
+{
+	struct task_struct *tmp = NULL;
+
+	spin_lock(&dev_list_lock);
+	list_del_init(&dev->node);
+	if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
+		tmp = nvme_thread;
+		nvme_thread = NULL;
+	}
+	spin_unlock(&dev_list_lock);
+
+	if (tmp)
+		kthread_stop(tmp);
+}
+
 static void nvme_dev_shutdown(struct nvme_dev *dev)
 {
 	int i;
 
 	dev->initialized = 0;
+	unregister_hotcpu_notifier(&dev->nb);
 
-	spin_lock(&dev_list_lock);
-	list_del_init(&dev->node);
-	spin_unlock(&dev_list_lock);
+	nvme_dev_list_remove(dev);
 
 	if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
 		for (i = dev->queue_count - 1; i >= 0; i--) {
-			struct nvme_queue *nvmeq = dev->queues[i];
+			struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
 			nvme_suspend_queue(nvmeq);
 			nvme_clear_queue(nvmeq);
 		}
@@ -2282,6 +2549,7 @@
 	struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
 
 	nvme_free_namespaces(dev);
+	free_percpu(dev->io_queue);
 	kfree(dev->queues);
 	kfree(dev->entry);
 	kfree(dev);
@@ -2325,6 +2593,7 @@
 static int nvme_dev_start(struct nvme_dev *dev)
 {
 	int result;
+	bool start_thread = false;
 
 	result = nvme_dev_map(dev);
 	if (result)
@@ -2335,9 +2604,24 @@
 		goto unmap;
 
 	spin_lock(&dev_list_lock);
+	if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
+		start_thread = true;
+		nvme_thread = NULL;
+	}
 	list_add(&dev->node, &dev_list);
 	spin_unlock(&dev_list_lock);
 
+	if (start_thread) {
+		nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+		wake_up(&nvme_kthread_wait);
+	} else
+		wait_event_killable(nvme_kthread_wait, nvme_thread);
+
+	if (IS_ERR_OR_NULL(nvme_thread)) {
+		result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
+		goto disable;
+	}
+
 	result = nvme_setup_io_queues(dev);
 	if (result && result != -EBUSY)
 		goto disable;
@@ -2346,9 +2630,7 @@
 
  disable:
 	nvme_disable_queue(dev, 0);
-	spin_lock(&dev_list_lock);
-	list_del_init(&dev->node);
-	spin_unlock(&dev_list_lock);
+	nvme_dev_list_remove(dev);
  unmap:
 	nvme_dev_unmap(dev);
 	return result;
@@ -2367,18 +2649,10 @@
 
 static void nvme_remove_disks(struct work_struct *ws)
 {
-	int i;
 	struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
 
 	nvme_dev_remove(dev);
-	spin_lock(&dev_list_lock);
-	for (i = dev->queue_count - 1; i > 0; i--) {
-		BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
-		nvme_free_queue(dev->queues[i]);
-		dev->queue_count--;
-		dev->queues[i] = NULL;
-	}
-	spin_unlock(&dev_list_lock);
+	nvme_free_queues(dev, 1);
 }
 
 static int nvme_dev_resume(struct nvme_dev *dev)
@@ -2441,6 +2715,9 @@
 								GFP_KERNEL);
 	if (!dev->queues)
 		goto free;
+	dev->io_queue = alloc_percpu(unsigned short);
+	if (!dev->io_queue)
+		goto free;
 
 	INIT_LIST_HEAD(&dev->namespaces);
 	dev->reset_workfn = nvme_reset_failed_dev;
@@ -2455,6 +2732,7 @@
 	if (result)
 		goto release;
 
+	kref_init(&dev->kref);
 	result = nvme_dev_start(dev);
 	if (result) {
 		if (result == -EBUSY)
@@ -2462,7 +2740,6 @@
 		goto release_pools;
 	}
 
-	kref_init(&dev->kref);
 	result = nvme_dev_add(dev);
 	if (result)
 		goto shutdown;
@@ -2491,6 +2768,7 @@
  release:
 	nvme_release_instance(dev);
  free:
+	free_percpu(dev->io_queue);
 	kfree(dev->queues);
 	kfree(dev->entry);
 	kfree(dev);
@@ -2517,6 +2795,7 @@
 	nvme_dev_remove(dev);
 	nvme_dev_shutdown(dev);
 	nvme_free_queues(dev, 0);
+	rcu_barrier();
 	nvme_release_instance(dev);
 	nvme_release_prp_pools(dev);
 	kref_put(&dev->kref, nvme_free_dev);
@@ -2529,6 +2808,7 @@
 #define nvme_slot_reset NULL
 #define nvme_error_resume NULL
 
+#ifdef CONFIG_PM_SLEEP
 static int nvme_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -2549,6 +2829,7 @@
 	}
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
 
@@ -2563,7 +2844,7 @@
 /* Move to pci_ids.h later */
 #define PCI_CLASS_STORAGE_EXPRESS	0x010802
 
-static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
+static const struct pci_device_id nvme_id_table[] = {
 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
 	{ 0, }
 };
@@ -2585,14 +2866,11 @@
 {
 	int result;
 
-	nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
-	if (IS_ERR(nvme_thread))
-		return PTR_ERR(nvme_thread);
+	init_waitqueue_head(&nvme_kthread_wait);
 
-	result = -ENOMEM;
 	nvme_workq = create_singlethread_workqueue("nvme");
 	if (!nvme_workq)
-		goto kill_kthread;
+		return -ENOMEM;
 
 	result = register_blkdev(nvme_major, "nvme");
 	if (result < 0)
@@ -2609,8 +2887,6 @@
 	unregister_blkdev(nvme_major, "nvme");
  kill_workq:
 	destroy_workqueue(nvme_workq);
- kill_kthread:
-	kthread_stop(nvme_thread);
 	return result;
 }
 
@@ -2619,11 +2895,11 @@
 	pci_unregister_driver(&nvme_driver);
 	unregister_blkdev(nvme_major, "nvme");
 	destroy_workqueue(nvme_workq);
-	kthread_stop(nvme_thread);
+	BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
 }
 
 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
 MODULE_LICENSE("GPL");
-MODULE_VERSION("0.8");
+MODULE_VERSION("0.9");
 module_init(nvme_init);
 module_exit(nvme_exit);
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a0ceb6..2c3f5be 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -1562,13 +1562,14 @@
 			res = PTR_ERR(iod);
 			goto out;
 		}
-		length = nvme_setup_prps(dev, &c.common, iod, tot_len,
-								GFP_KERNEL);
+		length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
 		if (length != tot_len) {
 			res = -ENOMEM;
 			goto out_unmap;
 		}
 
+		c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+		c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
 		c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
 		c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
 	} else if (opcode == nvme_admin_activate_fw) {
@@ -2033,7 +2034,6 @@
 	int res = SNTI_TRANSLATION_SUCCESS;
 	int nvme_sc;
 	struct nvme_dev *dev = ns->dev;
-	struct nvme_queue *nvmeq;
 	u32 num_cmds;
 	struct nvme_iod *iod;
 	u64 unit_len;
@@ -2045,7 +2045,7 @@
 	struct nvme_command c;
 	u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
 	u16 control;
-	u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors);
+	u32 max_blocks = queue_max_hw_sectors(ns->queue);
 
 	num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
 
@@ -2093,8 +2093,7 @@
 			res = PTR_ERR(iod);
 			goto out;
 		}
-		retcode = nvme_setup_prps(dev, &c.common, iod, unit_len,
-							GFP_KERNEL);
+		retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL);
 		if (retcode != unit_len) {
 			nvme_unmap_user_pages(dev,
 				(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2103,21 +2102,12 @@
 			res = -ENOMEM;
 			goto out;
 		}
+		c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+		c.rw.prp2 = cpu_to_le64(iod->first_dma);
 
 		nvme_offset += unit_num_blocks;
 
-		nvmeq = get_nvmeq(dev);
-		/*
-		 * Since nvme_submit_sync_cmd sleeps, we can't keep
-		 * preemption disabled.  We may be preempted at any
-		 * point, and be rescheduled to a different CPU.  That
-		 * will cause cacheline bouncing, but no additional
-		 * races since q_lock already protects against other
-		 * CPUs.
-		 */
-		put_nvmeq(nvmeq);
-		nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
-						NVME_IO_TIMEOUT);
+		nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
 		if (nvme_sc != NVME_SC_SUCCESS) {
 			nvme_unmap_user_pages(dev,
 				(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2644,7 +2634,6 @@
 {
 	int res = SNTI_TRANSLATION_SUCCESS;
 	int nvme_sc;
-	struct nvme_queue *nvmeq;
 	struct nvme_command c;
 	u8 immed, pcmod, pc, no_flush, start;
 
@@ -2671,10 +2660,7 @@
 			c.common.opcode = nvme_cmd_flush;
 			c.common.nsid = cpu_to_le32(ns->ns_id);
 
-			nvmeq = get_nvmeq(ns->dev);
-			put_nvmeq(nvmeq);
-			nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
-
+			nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
 			res = nvme_trans_status_code(hdr, nvme_sc);
 			if (res)
 				goto out;
@@ -2697,15 +2683,12 @@
 	int res = SNTI_TRANSLATION_SUCCESS;
 	int nvme_sc;
 	struct nvme_command c;
-	struct nvme_queue *nvmeq;
 
 	memset(&c, 0, sizeof(c));
 	c.common.opcode = nvme_cmd_flush;
 	c.common.nsid = cpu_to_le32(ns->ns_id);
 
-	nvmeq = get_nvmeq(ns->dev);
-	put_nvmeq(nvmeq);
-	nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+	nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
 
 	res = nvme_trans_status_code(hdr, nvme_sc);
 	if (res)
@@ -2872,7 +2855,6 @@
 	struct nvme_dev *dev = ns->dev;
 	struct scsi_unmap_parm_list *plist;
 	struct nvme_dsm_range *range;
-	struct nvme_queue *nvmeq;
 	struct nvme_command c;
 	int i, nvme_sc, res = -ENOMEM;
 	u16 ndesc, list_len;
@@ -2914,10 +2896,7 @@
 	c.dsm.nr = cpu_to_le32(ndesc - 1);
 	c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
 
-	nvmeq = get_nvmeq(dev);
-	put_nvmeq(nvmeq);
-
-	nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+	nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
 	res = nvme_trans_status_code(hdr, nvme_sc);
 
 	dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 1386749..fbae63e 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -408,7 +408,7 @@
 
 config SONYPI
 	tristate "Sony Vaio Programmable I/O Control Device support"
-	depends on X86 && PCI && INPUT && !64BIT
+	depends on X86_32 && PCI && INPUT
 	---help---
 	  This driver enables access to the Sony Programmable I/O Control
 	  Device which can be found in many (all ?) Sony Vaio laptops.
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 6928d09..60aafb8 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -901,9 +901,9 @@
 		if (len + offset > PAGE_SIZE)
 			len = PAGE_SIZE - offset;
 
-		src = buf->ops->map(pipe, buf, 1);
+		src = kmap_atomic(buf->page);
 		memcpy(page_address(page) + offset, src + buf->offset, len);
-		buf->ops->unmap(pipe, buf, src);
+		kunmap_atomic(src);
 
 		sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
 	}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1e2b9db..0e9cce8 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -30,7 +30,7 @@
 
 config ARM_EXYNOS4210_CPUFREQ
 	bool "SAMSUNG EXYNOS4210"
-	depends on CPU_EXYNOS4210
+	depends on CPU_EXYNOS4210 && !ARCH_MULTIPLATFORM
 	default y
 	select ARM_EXYNOS_CPUFREQ
 	help
@@ -41,7 +41,7 @@
 
 config ARM_EXYNOS4X12_CPUFREQ
 	bool "SAMSUNG EXYNOS4x12"
-	depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412)
+	depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
 	default y
 	select ARM_EXYNOS_CPUFREQ
 	help
@@ -52,7 +52,7 @@
 
 config ARM_EXYNOS5250_CPUFREQ
 	bool "SAMSUNG EXYNOS5250"
-	depends on SOC_EXYNOS5250
+	depends on SOC_EXYNOS5250 && !ARCH_MULTIPLATFORM
 	default y
 	select ARM_EXYNOS_CPUFREQ
 	help
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index ca0021a..72564b7 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -54,3 +54,11 @@
 	help
 	  This adds the support for frequency switching on PA Semi
 	  PWRficient processors.
+
+config POWERNV_CPUFREQ
+       tristate "CPU frequency scaling for IBM POWERNV platform"
+       depends on PPC_POWERNV
+       default y
+       help
+	 This adds support for CPU frequency switching on IBM POWERNV
+	 platform
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 7494565..0dbb963 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -86,6 +86,7 @@
 obj-$(CONFIG_CPU_FREQ_PMAC)		+= pmac32-cpufreq.o
 obj-$(CONFIG_CPU_FREQ_PMAC64)		+= pmac64-cpufreq.o
 obj-$(CONFIG_PPC_PASEMI_CPUFREQ)	+= pasemi-cpufreq.o
+obj-$(CONFIG_POWERNV_CPUFREQ)		+= powernv-cpufreq.o
 
 ##################################################################################
 # Other platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d5eaedb..000e4e0 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -754,7 +754,7 @@
 		goto err_unreg;
 	}
 
-	data->freq_table = kmalloc(sizeof(*data->freq_table) *
+	data->freq_table = kzalloc(sizeof(*data->freq_table) *
 		    (perf->state_count+1), GFP_KERNEL);
 	if (!data->freq_table) {
 		result = -ENOMEM;
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index a1c79f5..7b612c8 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -52,7 +52,7 @@
 static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
 {
 	unsigned int frequency, rate, min_freq;
-	static struct clk *cpuclk;
+	struct clk *cpuclk;
 	int retval, steps, i;
 
 	if (policy->cpu != 0)
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
index d457303..601b88c 100644
--- a/drivers/cpufreq/cris-artpec3-cpufreq.c
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -15,9 +15,9 @@
 };
 
 static struct cpufreq_frequency_table cris_freq_table[] = {
-	{0x01,	6000},
-	{0x02,	200000},
-	{0,	CPUFREQ_TABLE_END},
+	{0, 0x01, 6000},
+	{0, 0x02, 200000},
+	{0, 0, CPUFREQ_TABLE_END},
 };
 
 static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
index 13c3361..22b2cdd 100644
--- a/drivers/cpufreq/cris-etraxfs-cpufreq.c
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -15,9 +15,9 @@
 };
 
 static struct cpufreq_frequency_table cris_freq_table[] = {
-	{0x01, 6000},
-	{0x02, 200000},
-	{0, CPUFREQ_TABLE_END},
+	{0, 0x01, 6000},
+	{0, 0x02, 200000},
+	{0, 0, CPUFREQ_TABLE_END},
 };
 
 static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index c987e94..7f5d2a6 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -56,15 +56,15 @@
 };
 
 static struct cpufreq_frequency_table elanfreq_table[] = {
-	{0,	1000},
-	{1,	2000},
-	{2,	4000},
-	{3,	8000},
-	{4,	16000},
-	{5,	33000},
-	{6,	66000},
-	{7,	99000},
-	{0,	CPUFREQ_TABLE_END},
+	{0, 0,	1000},
+	{0, 1,	2000},
+	{0, 2,	4000},
+	{0, 3,	8000},
+	{0, 4,	16000},
+	{0, 5,	33000},
+	{0, 6,	66000},
+	{0, 7,	99000},
+	{0, 0,	CPUFREQ_TABLE_END},
 };
 
 
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index 40d84c4..6384e5b 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -29,12 +29,12 @@
 };
 
 static struct cpufreq_frequency_table exynos4210_freq_table[] = {
-	{L0, 1200 * 1000},
-	{L1, 1000 * 1000},
-	{L2,  800 * 1000},
-	{L3,  500 * 1000},
-	{L4,  200 * 1000},
-	{0, CPUFREQ_TABLE_END},
+	{0, L0, 1200 * 1000},
+	{0, L1, 1000 * 1000},
+	{0, L2,  800 * 1000},
+	{0, L3,  500 * 1000},
+	{0, L4,  200 * 1000},
+	{0, 0, CPUFREQ_TABLE_END},
 };
 
 static struct apll_freq apll_freq_4210[] = {
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 7c11ace..466c76a 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -30,21 +30,21 @@
 };
 
 static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
-	{CPUFREQ_BOOST_FREQ, 1500 * 1000},
-	{L1, 1400 * 1000},
-	{L2, 1300 * 1000},
-	{L3, 1200 * 1000},
-	{L4, 1100 * 1000},
-	{L5, 1000 * 1000},
-	{L6,  900 * 1000},
-	{L7,  800 * 1000},
-	{L8,  700 * 1000},
-	{L9,  600 * 1000},
-	{L10, 500 * 1000},
-	{L11, 400 * 1000},
-	{L12, 300 * 1000},
-	{L13, 200 * 1000},
-	{0, CPUFREQ_TABLE_END},
+	{CPUFREQ_BOOST_FREQ, L0, 1500 * 1000},
+	{0, L1, 1400 * 1000},
+	{0, L2, 1300 * 1000},
+	{0, L3, 1200 * 1000},
+	{0, L4, 1100 * 1000},
+	{0, L5, 1000 * 1000},
+	{0, L6,  900 * 1000},
+	{0, L7,  800 * 1000},
+	{0, L8,  700 * 1000},
+	{0, L9,  600 * 1000},
+	{0, L10, 500 * 1000},
+	{0, L11, 400 * 1000},
+	{0, L12, 300 * 1000},
+	{0, L13, 200 * 1000},
+	{0, 0, CPUFREQ_TABLE_END},
 };
 
 static struct apll_freq *apll_freq_4x12;
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index 5f90b82..363a0b3 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -34,23 +34,23 @@
 };
 
 static struct cpufreq_frequency_table exynos5250_freq_table[] = {
-	{L0, 1700 * 1000},
-	{L1, 1600 * 1000},
-	{L2, 1500 * 1000},
-	{L3, 1400 * 1000},
-	{L4, 1300 * 1000},
-	{L5, 1200 * 1000},
-	{L6, 1100 * 1000},
-	{L7, 1000 * 1000},
-	{L8,  900 * 1000},
-	{L9,  800 * 1000},
-	{L10, 700 * 1000},
-	{L11, 600 * 1000},
-	{L12, 500 * 1000},
-	{L13, 400 * 1000},
-	{L14, 300 * 1000},
-	{L15, 200 * 1000},
-	{0, CPUFREQ_TABLE_END},
+	{0, L0, 1700 * 1000},
+	{0, L1, 1600 * 1000},
+	{0, L2, 1500 * 1000},
+	{0, L3, 1400 * 1000},
+	{0, L4, 1300 * 1000},
+	{0, L5, 1200 * 1000},
+	{0, L6, 1100 * 1000},
+	{0, L7, 1000 * 1000},
+	{0, L8,  900 * 1000},
+	{0, L9,  800 * 1000},
+	{0, L10, 700 * 1000},
+	{0, L11, 600 * 1000},
+	{0, L12, 500 * 1000},
+	{0, L13, 400 * 1000},
+	{0, L14, 300 * 1000},
+	{0, L15, 200 * 1000},
+	{0, 0, CPUFREQ_TABLE_END},
 };
 
 static struct apll_freq apll_freq_5250[] = {
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 65a4770..08e7bbc 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -33,11 +33,10 @@
 			continue;
 		}
 		if (!cpufreq_boost_enabled()
-		    && table[i].driver_data == CPUFREQ_BOOST_FREQ)
+		    && (table[i].flags & CPUFREQ_BOOST_FREQ))
 			continue;
 
-		pr_debug("table entry %u: %u kHz, %u driver_data\n",
-					i, freq, table[i].driver_data);
+		pr_debug("table entry %u: %u kHz\n", i, freq);
 		if (freq < min_freq)
 			min_freq = freq;
 		if (freq > max_freq)
@@ -175,8 +174,8 @@
 	} else
 		*index = optimal.driver_data;
 
-	pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency,
-		table[*index].driver_data);
+	pr_debug("target index is %u, freq is:%u kHz\n", *index,
+		 table[*index].frequency);
 
 	return 0;
 }
@@ -230,7 +229,7 @@
 		 * show_boost = false and driver_data != BOOST freq
 		 * display NON BOOST freqs
 		 */
-		if (show_boost ^ (table[i].driver_data == CPUFREQ_BOOST_FREQ))
+		if (show_boost ^ (table[i].flags & CPUFREQ_BOOST_FREQ))
 			continue;
 
 		count += sprintf(&buf[count], "%d ", table[i].frequency);
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index a22b5d1..c30aaa6 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -254,7 +254,7 @@
 	}
 
 	/* alloc freq_table */
-	data->freq_table = kmalloc(sizeof(*data->freq_table) *
+	data->freq_table = kzalloc(sizeof(*data->freq_table) *
 	                           (data->acpi_data.state_count + 1),
 	                           GFP_KERNEL);
 	if (!data->freq_table) {
@@ -275,7 +275,6 @@
 	/* table init */
 	for (i = 0; i <= data->acpi_data.state_count; i++)
 	{
-		data->freq_table[i].driver_data = i;
 		if (i < data->acpi_data.state_count) {
 			data->freq_table[i].frequency =
 			      data->acpi_data.states[i].core_frequency * 1000;
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 3d114bc..37a4806 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -43,9 +43,9 @@
  * table.
  */
 static struct cpufreq_frequency_table kirkwood_freq_table[] = {
-	{STATE_CPU_FREQ,	0}, /* CPU uses cpuclk */
-	{STATE_DDR_FREQ,	0}, /* CPU uses ddrclk */
-	{0,			CPUFREQ_TABLE_END},
+	{0, STATE_CPU_FREQ,	0}, /* CPU uses cpuclk */
+	{0, STATE_DDR_FREQ,	0}, /* CPU uses ddrclk */
+	{0, 0,			CPUFREQ_TABLE_END},
 };
 
 static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 5c440f8..d00e5d1 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -475,7 +475,7 @@
 		return -EINVAL;
 	}
 
-	longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table),
+	longhaul_table = kzalloc((numscales + 1) * sizeof(*longhaul_table),
 			GFP_KERNEL);
 	if (!longhaul_table)
 		return -ENOMEM;
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index a3588d6..f0bc31f 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -69,7 +69,7 @@
 
 static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-	static struct clk *cpuclk;
+	struct clk *cpuclk;
 	int i;
 	unsigned long rate;
 	int ret;
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index c4dfa42..cc3408f 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -59,9 +59,9 @@
 #define CPUFREQ_LOW                   1
 
 static struct cpufreq_frequency_table maple_cpu_freqs[] = {
-	{CPUFREQ_HIGH,		0},
-	{CPUFREQ_LOW,		0},
-	{0,			CPUFREQ_TABLE_END},
+	{0, CPUFREQ_HIGH,		0},
+	{0, CPUFREQ_LOW,		0},
+	{0, 0,				CPUFREQ_TABLE_END},
 };
 
 /* Power mode data is an array of the 32 bits PCR values to use for
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 74f593e..529cfd9 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -92,16 +92,16 @@
 
 
 static struct cpufreq_frequency_table p4clockmod_table[] = {
-	{DC_RESV, CPUFREQ_ENTRY_INVALID},
-	{DC_DFLT, 0},
-	{DC_25PT, 0},
-	{DC_38PT, 0},
-	{DC_50PT, 0},
-	{DC_64PT, 0},
-	{DC_75PT, 0},
-	{DC_88PT, 0},
-	{DC_DISABLE, 0},
-	{DC_RESV, CPUFREQ_TABLE_END},
+	{0, DC_RESV, CPUFREQ_ENTRY_INVALID},
+	{0, DC_DFLT, 0},
+	{0, DC_25PT, 0},
+	{0, DC_38PT, 0},
+	{0, DC_50PT, 0},
+	{0, DC_64PT, 0},
+	{0, DC_75PT, 0},
+	{0, DC_88PT, 0},
+	{0, DC_DISABLE, 0},
+	{0, DC_RESV, CPUFREQ_TABLE_END},
 };
 
 
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 6a2b7d3..84c84b5 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -60,12 +60,12 @@
 
 /* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */
 static struct cpufreq_frequency_table pas_freqs[] = {
-	{0,	0},
-	{1,	0},
-	{2,	0},
-	{3,	0},
-	{4,	0},
-	{0,	CPUFREQ_TABLE_END},
+	{0, 0,	0},
+	{0, 1,	0},
+	{0, 2,	0},
+	{0, 3,	0},
+	{0, 4,	0},
+	{0, 0,	CPUFREQ_TABLE_END},
 };
 
 /*
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index cf55d20..7615180 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -81,9 +81,9 @@
 #define CPUFREQ_LOW                   1
 
 static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
-	{CPUFREQ_HIGH, 		0},
-	{CPUFREQ_LOW,		0},
-	{0,			CPUFREQ_TABLE_END},
+	{0, CPUFREQ_HIGH,	0},
+	{0, CPUFREQ_LOW,	0},
+	{0, 0,			CPUFREQ_TABLE_END},
 };
 
 static inline void local_delay(unsigned long ms)
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 6a338f8..8bc4229 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -65,9 +65,9 @@
 #define CPUFREQ_LOW                   1
 
 static struct cpufreq_frequency_table g5_cpu_freqs[] = {
-	{CPUFREQ_HIGH, 		0},
-	{CPUFREQ_LOW,		0},
-	{0,			CPUFREQ_TABLE_END},
+	{0, CPUFREQ_HIGH,	0},
+	{0, CPUFREQ_LOW,	0},
+	{0, 0,			CPUFREQ_TABLE_END},
 };
 
 /* Power mode data is an array of the 32 bits PCR values to use for
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 62c6f2e..49f120e 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -37,15 +37,15 @@
 
 /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
 static struct cpufreq_frequency_table clock_ratio[] = {
-	{60,  /* 110 -> 6.0x */ 0},
-	{55,  /* 011 -> 5.5x */ 0},
-	{50,  /* 001 -> 5.0x */ 0},
-	{45,  /* 000 -> 4.5x */ 0},
-	{40,  /* 010 -> 4.0x */ 0},
-	{35,  /* 111 -> 3.5x */ 0},
-	{30,  /* 101 -> 3.0x */ 0},
-	{20,  /* 100 -> 2.0x */ 0},
-	{0, CPUFREQ_TABLE_END}
+	{0, 60,  /* 110 -> 6.0x */ 0},
+	{0, 55,  /* 011 -> 5.5x */ 0},
+	{0, 50,  /* 001 -> 5.0x */ 0},
+	{0, 45,  /* 000 -> 4.5x */ 0},
+	{0, 40,  /* 010 -> 4.0x */ 0},
+	{0, 35,  /* 111 -> 3.5x */ 0},
+	{0, 30,  /* 101 -> 3.0x */ 0},
+	{0, 20,  /* 100 -> 2.0x */ 0},
+	{0, 0, CPUFREQ_TABLE_END}
 };
 
 static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 770a9e1..1b6ae6b 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -623,7 +623,7 @@
 	if (check_pst_table(data, pst, maxvid))
 		return -EINVAL;
 
-	powernow_table = kmalloc((sizeof(*powernow_table)
+	powernow_table = kzalloc((sizeof(*powernow_table)
 		* (data->numps + 1)), GFP_KERNEL);
 	if (!powernow_table) {
 		printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
@@ -793,7 +793,7 @@
 	}
 
 	/* fill in data->powernow_table */
-	powernow_table = kmalloc((sizeof(*powernow_table)
+	powernow_table = kzalloc((sizeof(*powernow_table)
 		* (data->acpi_data.state_count + 1)), GFP_KERNEL);
 	if (!powernow_table) {
 		pr_debug("powernow_table memory alloc failure\n");
@@ -810,7 +810,6 @@
 
 	powernow_table[data->acpi_data.state_count].frequency =
 		CPUFREQ_TABLE_END;
-	powernow_table[data->acpi_data.state_count].driver_data = 0;
 	data->powernow_table = powernow_table;
 
 	if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
new file mode 100644
index 0000000..9edccc6
--- /dev/null
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -0,0 +1,341 @@
+/*
+ * POWERNV cpufreq driver for the IBM POWER processors
+ *
+ * (C) Copyright IBM 2014
+ *
+ * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"powernv-cpufreq: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+
+#include <asm/cputhreads.h>
+#include <asm/reg.h>
+
+#define POWERNV_MAX_PSTATES	256
+
+static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
+
+/*
+ * Note: The set of pstates consists of contiguous integers, the
+ * smallest of which is indicated by powernv_pstate_info.min, the
+ * largest of which is indicated by powernv_pstate_info.max.
+ *
+ * The nominal pstate is the highest non-turbo pstate in this
+ * platform. This is indicated by powernv_pstate_info.nominal.
+ */
+static struct powernv_pstate_info {
+	int min;
+	int max;
+	int nominal;
+	int nr_pstates;
+} powernv_pstate_info;
+
+/*
+ * Initialize the freq table based on data obtained
+ * from the firmware passed via device-tree
+ */
+static int init_powernv_pstates(void)
+{
+	struct device_node *power_mgt;
+	int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0;
+	const __be32 *pstate_ids, *pstate_freqs;
+	u32 len_ids, len_freqs;
+
+	power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
+	if (!power_mgt) {
+		pr_warn("power-mgt node not found\n");
+		return -ENODEV;
+	}
+
+	if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
+		pr_warn("ibm,pstate-min node not found\n");
+		return -ENODEV;
+	}
+
+	if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
+		pr_warn("ibm,pstate-max node not found\n");
+		return -ENODEV;
+	}
+
+	if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
+				 &pstate_nominal)) {
+		pr_warn("ibm,pstate-nominal not found\n");
+		return -ENODEV;
+	}
+	pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
+		pstate_nominal, pstate_max);
+
+	pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
+	if (!pstate_ids) {
+		pr_warn("ibm,pstate-ids not found\n");
+		return -ENODEV;
+	}
+
+	pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
+				      &len_freqs);
+	if (!pstate_freqs) {
+		pr_warn("ibm,pstate-frequencies-mhz not found\n");
+		return -ENODEV;
+	}
+
+	WARN_ON(len_ids != len_freqs);
+	nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
+	if (!nr_pstates) {
+		pr_warn("No PStates found\n");
+		return -ENODEV;
+	}
+
+	pr_debug("NR PStates %d\n", nr_pstates);
+	for (i = 0; i < nr_pstates; i++) {
+		u32 id = be32_to_cpu(pstate_ids[i]);
+		u32 freq = be32_to_cpu(pstate_freqs[i]);
+
+		pr_debug("PState id %d freq %d MHz\n", id, freq);
+		powernv_freqs[i].frequency = freq * 1000; /* kHz */
+		powernv_freqs[i].driver_data = id;
+	}
+	/* End of list marker entry */
+	powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
+
+	powernv_pstate_info.min = pstate_min;
+	powernv_pstate_info.max = pstate_max;
+	powernv_pstate_info.nominal = pstate_nominal;
+	powernv_pstate_info.nr_pstates = nr_pstates;
+
+	return 0;
+}
+
+/* Returns the CPU frequency corresponding to the pstate_id. */
+static unsigned int pstate_id_to_freq(int pstate_id)
+{
+	int i;
+
+	i = powernv_pstate_info.max - pstate_id;
+	BUG_ON(i >= powernv_pstate_info.nr_pstates || i < 0);
+
+	return powernv_freqs[i].frequency;
+}
+
+/*
+ * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
+ * the firmware
+ */
+static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
+					char *buf)
+{
+	return sprintf(buf, "%u\n",
+		pstate_id_to_freq(powernv_pstate_info.nominal));
+}
+
+struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
+	__ATTR_RO(cpuinfo_nominal_freq);
+
+static struct freq_attr *powernv_cpu_freq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	&cpufreq_freq_attr_cpuinfo_nominal_freq,
+	NULL,
+};
+
+/* Helper routines */
+
+/* Access helpers to power mgt SPR */
+
+static inline unsigned long get_pmspr(unsigned long sprn)
+{
+	switch (sprn) {
+	case SPRN_PMCR:
+		return mfspr(SPRN_PMCR);
+
+	case SPRN_PMICR:
+		return mfspr(SPRN_PMICR);
+
+	case SPRN_PMSR:
+		return mfspr(SPRN_PMSR);
+	}
+	BUG();
+}
+
+static inline void set_pmspr(unsigned long sprn, unsigned long val)
+{
+	switch (sprn) {
+	case SPRN_PMCR:
+		mtspr(SPRN_PMCR, val);
+		return;
+
+	case SPRN_PMICR:
+		mtspr(SPRN_PMICR, val);
+		return;
+	}
+	BUG();
+}
+
+/*
+ * Use objects of this type to query/update
+ * pstates on a remote CPU via smp_call_function.
+ */
+struct powernv_smp_call_data {
+	unsigned int freq;
+	int pstate_id;
+};
+
+/*
+ * powernv_read_cpu_freq: Reads the current frequency on this CPU.
+ *
+ * Called via smp_call_function.
+ *
+ * Note: The caller of the smp_call_function should pass an argument of
+ * the type 'struct powernv_smp_call_data *' along with this function.
+ *
+ * The current frequency on this CPU will be returned via
+ * ((struct powernv_smp_call_data *)arg)->freq;
+ */
+static void powernv_read_cpu_freq(void *arg)
+{
+	unsigned long pmspr_val;
+	s8 local_pstate_id;
+	struct powernv_smp_call_data *freq_data = arg;
+
+	pmspr_val = get_pmspr(SPRN_PMSR);
+
+	/*
+	 * The local pstate id corresponds bits 48..55 in the PMSR.
+	 * Note: Watch out for the sign!
+	 */
+	local_pstate_id = (pmspr_val >> 48) & 0xFF;
+	freq_data->pstate_id = local_pstate_id;
+	freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
+
+	pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
+		raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
+		freq_data->freq);
+}
+
+/*
+ * powernv_cpufreq_get: Returns the CPU frequency as reported by the
+ * firmware for CPU 'cpu'. This value is reported through the sysfs
+ * file cpuinfo_cur_freq.
+ */
+unsigned int powernv_cpufreq_get(unsigned int cpu)
+{
+	struct powernv_smp_call_data freq_data;
+
+	smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
+			&freq_data, 1);
+
+	return freq_data.freq;
+}
+
+/*
+ * set_pstate: Sets the pstate on this CPU.
+ *
+ * This is called via an smp_call_function.
+ *
+ * The caller must ensure that freq_data is of the type
+ * (struct powernv_smp_call_data *) and the pstate_id which needs to be set
+ * on this CPU should be present in freq_data->pstate_id.
+ */
+static void set_pstate(void *freq_data)
+{
+	unsigned long val;
+	unsigned long pstate_ul =
+		((struct powernv_smp_call_data *) freq_data)->pstate_id;
+
+	val = get_pmspr(SPRN_PMCR);
+	val = val & 0x0000FFFFFFFFFFFFULL;
+
+	pstate_ul = pstate_ul & 0xFF;
+
+	/* Set both global(bits 56..63) and local(bits 48..55) PStates */
+	val = val | (pstate_ul << 56) | (pstate_ul << 48);
+
+	pr_debug("Setting cpu %d pmcr to %016lX\n",
+			raw_smp_processor_id(), val);
+	set_pmspr(SPRN_PMCR, val);
+}
+
+/*
+ * powernv_cpufreq_target_index: Sets the frequency corresponding to
+ * the cpufreq table entry indexed by new_index on the cpus in the
+ * mask policy->cpus
+ */
+static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
+					unsigned int new_index)
+{
+	struct powernv_smp_call_data freq_data;
+
+	freq_data.pstate_id = powernv_freqs[new_index].driver_data;
+
+	/*
+	 * Use smp_call_function to send IPI and execute the
+	 * mtspr on target CPU.  We could do that without IPI
+	 * if current CPU is within policy->cpus (core)
+	 */
+	smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
+
+	return 0;
+}
+
+static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+	int base, i;
+
+	base = cpu_first_thread_sibling(policy->cpu);
+
+	for (i = 0; i < threads_per_core; i++)
+		cpumask_set_cpu(base + i, policy->cpus);
+
+	return cpufreq_table_validate_and_show(policy, powernv_freqs);
+}
+
+static struct cpufreq_driver powernv_cpufreq_driver = {
+	.name		= "powernv-cpufreq",
+	.flags		= CPUFREQ_CONST_LOOPS,
+	.init		= powernv_cpufreq_cpu_init,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= powernv_cpufreq_target_index,
+	.get		= powernv_cpufreq_get,
+	.attr		= powernv_cpu_freq_attr,
+};
+
+static int __init powernv_cpufreq_init(void)
+{
+	int rc = 0;
+
+	/* Discover pstates from device tree and init */
+	rc = init_powernv_pstates();
+	if (rc) {
+		pr_info("powernv-cpufreq disabled. System does not support PState control\n");
+		return rc;
+	}
+
+	return cpufreq_register_driver(&powernv_cpufreq_driver);
+}
+module_init(powernv_cpufreq_init);
+
+static void __exit powernv_cpufreq_exit(void)
+{
+	cpufreq_unregister_driver(&powernv_cpufreq_driver);
+}
+module_exit(powernv_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 3bd9123..b7e677b 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -13,7 +13,6 @@
 #include <linux/clk.h>
 #include <linux/cpufreq.h>
 #include <linux/errno.h>
-#include <sysdev/fsl_soc.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index af7b1ca..5be8a48 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -32,15 +32,15 @@
 
 /* the CBE supports an 8 step frequency scaling */
 static struct cpufreq_frequency_table cbe_freqs[] = {
-	{1,	0},
-	{2,	0},
-	{3,	0},
-	{4,	0},
-	{5,	0},
-	{6,	0},
-	{8,	0},
-	{10,	0},
-	{0,	CPUFREQ_TABLE_END},
+	{0, 1,	0},
+	{0, 2,	0},
+	{0, 3,	0},
+	{0, 4,	0},
+	{0, 5,	0},
+	{0, 6,	0},
+	{0, 8,	0},
+	{0, 10,	0},
+	{0, 0,	CPUFREQ_TABLE_END},
 };
 
 /*
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 826b8be..4626f90 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -72,19 +72,19 @@
 #endif
 
 static struct cpufreq_frequency_table s3c2416_freq_table[] = {
-	{ SOURCE_HCLK, FREQ_DVS },
-	{ SOURCE_ARMDIV, 133333 },
-	{ SOURCE_ARMDIV, 266666 },
-	{ SOURCE_ARMDIV, 400000 },
-	{ 0, CPUFREQ_TABLE_END },
+	{ 0, SOURCE_HCLK, FREQ_DVS },
+	{ 0, SOURCE_ARMDIV, 133333 },
+	{ 0, SOURCE_ARMDIV, 266666 },
+	{ 0, SOURCE_ARMDIV, 400000 },
+	{ 0, 0, CPUFREQ_TABLE_END },
 };
 
 static struct cpufreq_frequency_table s3c2450_freq_table[] = {
-	{ SOURCE_HCLK, FREQ_DVS },
-	{ SOURCE_ARMDIV, 133500 },
-	{ SOURCE_ARMDIV, 267000 },
-	{ SOURCE_ARMDIV, 534000 },
-	{ 0, CPUFREQ_TABLE_END },
+	{ 0, SOURCE_HCLK, FREQ_DVS },
+	{ 0, SOURCE_ARMDIV, 133500 },
+	{ 0, SOURCE_ARMDIV, 267000 },
+	{ 0, SOURCE_ARMDIV, 534000 },
+	{ 0, 0, CPUFREQ_TABLE_END },
 };
 
 static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index a3dc192..be1b2b5 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -586,7 +586,7 @@
 	size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
 	size++;
 
-	ftab = kmalloc(sizeof(*ftab) * size, GFP_KERNEL);
+	ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL);
 	if (!ftab) {
 		printk(KERN_ERR "%s: no memory for tables\n", __func__);
 		return -ENOMEM;
@@ -664,7 +664,7 @@
 
 	size = sizeof(*vals) * (plls_no + 1);
 
-	vals = kmalloc(size, GFP_KERNEL);
+	vals = kzalloc(size, GFP_KERNEL);
 	if (vals) {
 		memcpy(vals, plls, size);
 		pll_reg = vals;
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index c4226de..ff7d3ec 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -37,19 +37,19 @@
 };
 
 static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
-	{ 0,  66000 },
-	{ 0, 100000 },
-	{ 0, 133000 },
-	{ 1, 200000 },
-	{ 1, 222000 },
-	{ 1, 266000 },
-	{ 2, 333000 },
-	{ 2, 400000 },
-	{ 2, 532000 },
-	{ 2, 533000 },
-	{ 3, 667000 },
-	{ 4, 800000 },
-	{ 0, CPUFREQ_TABLE_END },
+	{ 0, 0,  66000 },
+	{ 0, 0, 100000 },
+	{ 0, 0, 133000 },
+	{ 0, 1, 200000 },
+	{ 0, 1, 222000 },
+	{ 0, 1, 266000 },
+	{ 0, 2, 333000 },
+	{ 0, 2, 400000 },
+	{ 0, 2, 532000 },
+	{ 0, 2, 533000 },
+	{ 0, 3, 667000 },
+	{ 0, 4, 800000 },
+	{ 0, 0, CPUFREQ_TABLE_END },
 };
 #endif
 
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 7242153..ab2c1a4 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -64,12 +64,12 @@
 };
 
 static struct cpufreq_frequency_table s5pv210_freq_table[] = {
-	{L0, 1000*1000},
-	{L1, 800*1000},
-	{L2, 400*1000},
-	{L3, 200*1000},
-	{L4, 100*1000},
-	{0, CPUFREQ_TABLE_END},
+	{0, L0, 1000*1000},
+	{0, L1, 800*1000},
+	{0, L2, 400*1000},
+	{0, L3, 200*1000},
+	{0, L4, 100*1000},
+	{0, 0, CPUFREQ_TABLE_END},
 };
 
 static struct regulator *arm_regulator;
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 69371bf..ac84e48 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -33,9 +33,9 @@
 #define PFX "sc520_freq: "
 
 static struct cpufreq_frequency_table sc520_freq_table[] = {
-	{0x01,	100000},
-	{0x02,	133000},
-	{0,	CPUFREQ_TABLE_END},
+	{0, 0x01,	100000},
+	{0, 0x02,	133000},
+	{0, 0,	CPUFREQ_TABLE_END},
 };
 
 static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 4cfdcff..3867839 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -195,18 +195,15 @@
 	cnt = prop->length / sizeof(u32);
 	val = prop->value;
 
-	freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
+	freq_tbl = kzalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
 	if (!freq_tbl) {
 		ret = -ENOMEM;
 		goto out_put_node;
 	}
 
-	for (i = 0; i < cnt; i++) {
-		freq_tbl[i].driver_data = i;
+	for (i = 0; i < cnt; i++)
 		freq_tbl[i].frequency = be32_to_cpup(val++);
-	}
 
-	freq_tbl[i].driver_data = i;
 	freq_tbl[i].frequency = CPUFREQ_TABLE_END;
 
 	spear_cpufreq.freq_tbl = freq_tbl;
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 394ac15..1a07b59 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -49,9 +49,9 @@
  * are in kHz for the time being.
  */
 static struct cpufreq_frequency_table speedstep_freqs[] = {
-	{SPEEDSTEP_HIGH,	0},
-	{SPEEDSTEP_LOW,		0},
-	{0,			CPUFREQ_TABLE_END},
+	{0, SPEEDSTEP_HIGH,	0},
+	{0, SPEEDSTEP_LOW,	0},
+	{0, 0,			CPUFREQ_TABLE_END},
 };
 
 
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index db5d274..8635eec 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -42,9 +42,9 @@
  * are in kHz for the time being.
  */
 static struct cpufreq_frequency_table speedstep_freqs[] = {
-	{SPEEDSTEP_HIGH,	0},
-	{SPEEDSTEP_LOW,		0},
-	{0,			CPUFREQ_TABLE_END},
+	{0, SPEEDSTEP_HIGH,	0},
+	{0, SPEEDSTEP_LOW,	0},
+	{0, 0,			CPUFREQ_TABLE_END},
 };
 
 #define GET_SPEEDSTEP_OWNER 0
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 13be802..8d045af 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -45,7 +45,7 @@
 	freqs.new = target_freq;
 
 	cpufreq_freq_transition_begin(policy, &freqs);
-	ret = clk_set_rate(policy->mclk, target_freq * 1000);
+	ret = clk_set_rate(policy->clk, target_freq * 1000);
 	cpufreq_freq_transition_end(policy, &freqs, ret);
 
 	return ret;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index e918b6d..efe2f17 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -293,6 +293,7 @@
 }
 
 define_show_state_function(exit_latency)
+define_show_state_function(target_residency)
 define_show_state_function(power_usage)
 define_show_state_ull_function(usage)
 define_show_state_ull_function(time)
@@ -304,6 +305,7 @@
 define_one_state_ro(name, show_state_name);
 define_one_state_ro(desc, show_state_desc);
 define_one_state_ro(latency, show_state_exit_latency);
+define_one_state_ro(residency, show_state_target_residency);
 define_one_state_ro(power, show_state_power_usage);
 define_one_state_ro(usage, show_state_usage);
 define_one_state_ro(time, show_state_time);
@@ -313,6 +315,7 @@
 	&attr_name.attr,
 	&attr_desc.attr,
 	&attr_latency.attr,
+	&attr_residency.attr,
 	&attr_power.attr,
 	&attr_usage.attr,
 	&attr_time.attr,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 605b016..ba06d1d 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -308,7 +308,7 @@
 
 config DMA_BCM2835
 	tristate "BCM2835 DMA engine support"
-	depends on (ARCH_BCM2835 || MACH_BCM2708)
+	depends on ARCH_BCM2835
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
@@ -350,6 +350,16 @@
 	select DMA_VIRTUAL_CHANNELS
 	help
 	  Enable support for the MOXA ART SoC DMA controller.
+ 
+config FSL_EDMA
+	tristate "Freescale eDMA engine support"
+	depends on OF
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support the Freescale eDMA engine with programmable channel
+	  multiplexing capability for DMA request sources(slot).
+	  This module can be found on Freescale Vybrid and LS-1 SoCs.
 
 config DMA_ENGINE
 	bool
@@ -401,4 +411,13 @@
 config DMA_ENGINE_RAID
 	bool
 
+config QCOM_BAM_DMA
+	tristate "QCOM BAM DMA support"
+	depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	---help---
+	  Enable support for the QCOM BAM DMA controller.  This controller
+	  provides DMA capabilities for a variety of on-chip devices.
+
 endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a029d0f4..5150c82 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -44,3 +44,5 @@
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
 obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 1e506af..de361a1 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -13,6 +13,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/err.h>
 #include <linux/module.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
@@ -265,7 +266,7 @@
  */
 void devm_acpi_dma_controller_free(struct device *dev)
 {
-	WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL));
+	WARN_ON(devres_release(dev, devm_acpi_dma_release, NULL, NULL));
 }
 EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
 
@@ -343,7 +344,7 @@
  * @index:	index of FixedDMA descriptor for @dev
  *
  * Return:
- * Pointer to appropriate dma channel on success or NULL on error.
+ * Pointer to appropriate dma channel on success or an error pointer.
  */
 struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
 		size_t index)
@@ -358,10 +359,10 @@
 
 	/* Check if the device was enumerated by ACPI */
 	if (!dev || !ACPI_HANDLE(dev))
-		return NULL;
+		return ERR_PTR(-ENODEV);
 
 	if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
-		return NULL;
+		return ERR_PTR(-ENODEV);
 
 	memset(&pdata, 0, sizeof(pdata));
 	pdata.index = index;
@@ -376,7 +377,7 @@
 	acpi_dev_free_resource_list(&resource_list);
 
 	if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0)
-		return NULL;
+		return ERR_PTR(-ENODEV);
 
 	mutex_lock(&acpi_dma_lock);
 
@@ -399,7 +400,7 @@
 	}
 
 	mutex_unlock(&acpi_dma_lock);
-	return chan;
+	return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 }
 EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
 
@@ -413,7 +414,7 @@
  * the first FixedDMA descriptor is TX and second is RX.
  *
  * Return:
- * Pointer to appropriate dma channel on success or NULL on error.
+ * Pointer to appropriate dma channel on success or an error pointer.
  */
 struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
 		const char *name)
@@ -425,7 +426,7 @@
 	else if (!strcmp(name, "rx"))
 		index = 1;
 	else
-		return NULL;
+		return ERR_PTR(-ENODEV);
 
 	return acpi_dma_request_slave_chan_by_index(dev, index);
 }
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index e2c04dc..c13a3bb 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1569,7 +1569,6 @@
 
 		/* Disable interrupts */
 		atc_disable_chan_irq(atdma, chan->chan_id);
-		tasklet_disable(&atchan->tasklet);
 
 		tasklet_kill(&atchan->tasklet);
 		list_del(&chan->device_node);
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index c18aebf..d028f36 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -620,12 +620,15 @@
 	u32 desc_phys;
 	int ret;
 
+	desc_phys = lower_32_bits(c->desc_phys);
+	desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+	if (!cdd->chan_busy[desc_num])
+		return 0;
+
 	ret = cppi41_tear_down_chan(c);
 	if (ret)
 		return ret;
 
-	desc_phys = lower_32_bits(c->desc_phys);
-	desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
 	WARN_ON(!cdd->chan_busy[desc_num]);
 	cdd->chan_busy[desc_num] = NULL;
 
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ed610b4..a886713 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -627,18 +627,13 @@
 struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
 						  const char *name)
 {
-	struct dma_chan *chan;
-
 	/* If device-tree is present get slave info from here */
 	if (dev->of_node)
 		return of_dma_request_slave_channel(dev->of_node, name);
 
 	/* If device was enumerated by ACPI get slave info from here */
-	if (ACPI_HANDLE(dev)) {
-		chan = acpi_dma_request_slave_chan_by_name(dev, name);
-		if (chan)
-			return chan;
-	}
+	if (ACPI_HANDLE(dev))
+		return acpi_dma_request_slave_chan_by_name(dev, name);
 
 	return ERR_PTR(-ENODEV);
 }
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 05b6dea..e27cec2 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -340,7 +340,7 @@
 static void result(const char *err, unsigned int n, unsigned int src_off,
 		   unsigned int dst_off, unsigned int len, unsigned long data)
 {
-	pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
+	pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
 		current->comm, n, err, src_off, dst_off, len, data);
 }
 
@@ -348,7 +348,7 @@
 		       unsigned int dst_off, unsigned int len,
 		       unsigned long data)
 {
-	pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
+	pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
 		   current->comm, n, err, src_off, dst_off, len, data);
 }
 
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 13ac3f2..cfdbb92 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -33,8 +33,8 @@
  * of which use ARM any more).  See the "Databook" from Synopsys for
  * information beyond what licensees probably provide.
  *
- * The driver has currently been tested only with the Atmel AT32AP7000,
- * which does not support descriptor writeback.
+ * The driver has been tested with the Atmel AT32AP7000, which does not
+ * support descriptor writeback.
  */
 
 static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
@@ -1479,7 +1479,6 @@
 int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
 {
 	struct dw_dma		*dw;
-	size_t			size;
 	bool			autocfg;
 	unsigned int		dw_params;
 	unsigned int		nr_channels;
@@ -1487,6 +1486,13 @@
 	int			err;
 	int			i;
 
+	dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
+	if (!dw)
+		return -ENOMEM;
+
+	dw->regs = chip->regs;
+	chip->dw = dw;
+
 	dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
 	autocfg = dw_params >> DW_PARAMS_EN & 0x1;
 
@@ -1509,9 +1515,9 @@
 	else
 		nr_channels = pdata->nr_channels;
 
-	size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
-	dw = devm_kzalloc(chip->dev, size, GFP_KERNEL);
-	if (!dw)
+	dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan),
+				GFP_KERNEL);
+	if (!dw->chan)
 		return -ENOMEM;
 
 	dw->clk = devm_clk_get(chip->dev, "hclk");
@@ -1519,9 +1525,6 @@
 		return PTR_ERR(dw->clk);
 	clk_prepare_enable(dw->clk);
 
-	dw->regs = chip->regs;
-	chip->dw = dw;
-
 	/* Get hardware configuration parameters */
 	if (autocfg) {
 		max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index e89fc24..fec59f1 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -75,6 +75,36 @@
 		dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
 }
 
+#ifdef CONFIG_PM_SLEEP
+
+static int dw_pci_suspend_late(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct dw_dma_chip *chip = pci_get_drvdata(pci);
+
+	return dw_dma_suspend(chip);
+};
+
+static int dw_pci_resume_early(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct dw_dma_chip *chip = pci_get_drvdata(pci);
+
+	return dw_dma_resume(chip);
+};
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define dw_pci_suspend_late	NULL
+#define dw_pci_resume_early	NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops dw_pci_dev_pm_ops = {
+	.suspend_late = dw_pci_suspend_late,
+	.resume_early = dw_pci_resume_early,
+};
+
 static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
 	/* Medfield */
 	{ PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata },
@@ -83,6 +113,9 @@
 	/* BayTrail */
 	{ PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata },
 	{ PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata },
+
+	/* Haswell */
+	{ PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata },
 	{ }
 };
 MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
@@ -92,6 +125,9 @@
 	.id_table	= dw_pci_id_table,
 	.probe		= dw_pci_probe,
 	.remove		= dw_pci_remove,
+	.driver	= {
+		.pm	= &dw_pci_dev_pm_ops,
+	},
 };
 
 module_pci_driver(dw_pci_driver);
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index deb4274..bb98d3e 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -252,13 +252,13 @@
 	struct tasklet_struct	tasklet;
 	struct clk		*clk;
 
+	/* channels */
+	struct dw_dma_chan	*chan;
 	u8			all_chan_mask;
 
 	/* hardware configuration */
 	unsigned char		nr_masters;
 	unsigned char		data_width[4];
-
-	struct dw_dma_chan	chan[0];
 };
 
 static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index cd8da45..cd04eb7 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -539,6 +539,7 @@
 				edma_alloc_slot(EDMA_CTLR(echan->ch_num),
 						EDMA_SLOT_ANY);
 			if (echan->slot[i] < 0) {
+				kfree(edesc);
 				dev_err(dev, "Failed to allocate slot\n");
 				return NULL;
 			}
@@ -553,8 +554,10 @@
 		ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
 				       dst_addr, burst, dev_width, period_len,
 				       direction);
-		if (ret < 0)
+		if (ret < 0) {
+			kfree(edesc);
 			return NULL;
+		}
 
 		if (direction == DMA_DEV_TO_MEM)
 			dst_addr += period_len;
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
new file mode 100644
index 0000000..381e793
--- /dev/null
+++ b/drivers/dma/fsl-edma.c
@@ -0,0 +1,975 @@
+/*
+ * drivers/dma/fsl-edma.c
+ *
+ * Copyright 2013-2014 Freescale Semiconductor, Inc.
+ *
+ * Driver for the Freescale eDMA engine with flexible channel multiplexing
+ * capability for DMA request sources. The eDMA block can be found on some
+ * Vybrid and Layerscape SoCs.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define EDMA_CR			0x00
+#define EDMA_ES			0x04
+#define EDMA_ERQ		0x0C
+#define EDMA_EEI		0x14
+#define EDMA_SERQ		0x1B
+#define EDMA_CERQ		0x1A
+#define EDMA_SEEI		0x19
+#define EDMA_CEEI		0x18
+#define EDMA_CINT		0x1F
+#define EDMA_CERR		0x1E
+#define EDMA_SSRT		0x1D
+#define EDMA_CDNE		0x1C
+#define EDMA_INTR		0x24
+#define EDMA_ERR		0x2C
+
+#define EDMA_TCD_SADDR(x)	(0x1000 + 32 * (x))
+#define EDMA_TCD_SOFF(x)	(0x1004 + 32 * (x))
+#define EDMA_TCD_ATTR(x)	(0x1006 + 32 * (x))
+#define EDMA_TCD_NBYTES(x)	(0x1008 + 32 * (x))
+#define EDMA_TCD_SLAST(x)	(0x100C + 32 * (x))
+#define EDMA_TCD_DADDR(x)	(0x1010 + 32 * (x))
+#define EDMA_TCD_DOFF(x)	(0x1014 + 32 * (x))
+#define EDMA_TCD_CITER_ELINK(x)	(0x1016 + 32 * (x))
+#define EDMA_TCD_CITER(x)	(0x1016 + 32 * (x))
+#define EDMA_TCD_DLAST_SGA(x)	(0x1018 + 32 * (x))
+#define EDMA_TCD_CSR(x)		(0x101C + 32 * (x))
+#define EDMA_TCD_BITER_ELINK(x)	(0x101E + 32 * (x))
+#define EDMA_TCD_BITER(x)	(0x101E + 32 * (x))
+
+#define EDMA_CR_EDBG		BIT(1)
+#define EDMA_CR_ERCA		BIT(2)
+#define EDMA_CR_ERGA		BIT(3)
+#define EDMA_CR_HOE		BIT(4)
+#define EDMA_CR_HALT		BIT(5)
+#define EDMA_CR_CLM		BIT(6)
+#define EDMA_CR_EMLM		BIT(7)
+#define EDMA_CR_ECX		BIT(16)
+#define EDMA_CR_CX		BIT(17)
+
+#define EDMA_SEEI_SEEI(x)	((x) & 0x1F)
+#define EDMA_CEEI_CEEI(x)	((x) & 0x1F)
+#define EDMA_CINT_CINT(x)	((x) & 0x1F)
+#define EDMA_CERR_CERR(x)	((x) & 0x1F)
+
+#define EDMA_TCD_ATTR_DSIZE(x)		(((x) & 0x0007))
+#define EDMA_TCD_ATTR_DMOD(x)		(((x) & 0x001F) << 3)
+#define EDMA_TCD_ATTR_SSIZE(x)		(((x) & 0x0007) << 8)
+#define EDMA_TCD_ATTR_SMOD(x)		(((x) & 0x001F) << 11)
+#define EDMA_TCD_ATTR_SSIZE_8BIT	(0x0000)
+#define EDMA_TCD_ATTR_SSIZE_16BIT	(0x0100)
+#define EDMA_TCD_ATTR_SSIZE_32BIT	(0x0200)
+#define EDMA_TCD_ATTR_SSIZE_64BIT	(0x0300)
+#define EDMA_TCD_ATTR_SSIZE_32BYTE	(0x0500)
+#define EDMA_TCD_ATTR_DSIZE_8BIT	(0x0000)
+#define EDMA_TCD_ATTR_DSIZE_16BIT	(0x0001)
+#define EDMA_TCD_ATTR_DSIZE_32BIT	(0x0002)
+#define EDMA_TCD_ATTR_DSIZE_64BIT	(0x0003)
+#define EDMA_TCD_ATTR_DSIZE_32BYTE	(0x0005)
+
+#define EDMA_TCD_SOFF_SOFF(x)		(x)
+#define EDMA_TCD_NBYTES_NBYTES(x)	(x)
+#define EDMA_TCD_SLAST_SLAST(x)		(x)
+#define EDMA_TCD_DADDR_DADDR(x)		(x)
+#define EDMA_TCD_CITER_CITER(x)		((x) & 0x7FFF)
+#define EDMA_TCD_DOFF_DOFF(x)		(x)
+#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x)	(x)
+#define EDMA_TCD_BITER_BITER(x)		((x) & 0x7FFF)
+
+#define EDMA_TCD_CSR_START		BIT(0)
+#define EDMA_TCD_CSR_INT_MAJOR		BIT(1)
+#define EDMA_TCD_CSR_INT_HALF		BIT(2)
+#define EDMA_TCD_CSR_D_REQ		BIT(3)
+#define EDMA_TCD_CSR_E_SG		BIT(4)
+#define EDMA_TCD_CSR_E_LINK		BIT(5)
+#define EDMA_TCD_CSR_ACTIVE		BIT(6)
+#define EDMA_TCD_CSR_DONE		BIT(7)
+
+#define EDMAMUX_CHCFG_DIS		0x0
+#define EDMAMUX_CHCFG_ENBL		0x80
+#define EDMAMUX_CHCFG_SOURCE(n)		((n) & 0x3F)
+
+#define DMAMUX_NR	2
+
+#define FSL_EDMA_BUSWIDTHS	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+				BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+				BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+				BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+struct fsl_edma_hw_tcd {
+	u32	saddr;
+	u16	soff;
+	u16	attr;
+	u32	nbytes;
+	u32	slast;
+	u32	daddr;
+	u16	doff;
+	u16	citer;
+	u32	dlast_sga;
+	u16	csr;
+	u16	biter;
+};
+
+struct fsl_edma_sw_tcd {
+	dma_addr_t			ptcd;
+	struct fsl_edma_hw_tcd		*vtcd;
+};
+
+struct fsl_edma_slave_config {
+	enum dma_transfer_direction	dir;
+	enum dma_slave_buswidth		addr_width;
+	u32				dev_addr;
+	u32				burst;
+	u32				attr;
+};
+
+struct fsl_edma_chan {
+	struct virt_dma_chan		vchan;
+	enum dma_status			status;
+	struct fsl_edma_engine		*edma;
+	struct fsl_edma_desc		*edesc;
+	struct fsl_edma_slave_config	fsc;
+	struct dma_pool			*tcd_pool;
+};
+
+struct fsl_edma_desc {
+	struct virt_dma_desc		vdesc;
+	struct fsl_edma_chan		*echan;
+	bool				iscyclic;
+	unsigned int			n_tcds;
+	struct fsl_edma_sw_tcd		tcd[];
+};
+
+struct fsl_edma_engine {
+	struct dma_device	dma_dev;
+	void __iomem		*membase;
+	void __iomem		*muxbase[DMAMUX_NR];
+	struct clk		*muxclk[DMAMUX_NR];
+	struct mutex		fsl_edma_mutex;
+	u32			n_chans;
+	int			txirq;
+	int			errirq;
+	bool			big_endian;
+	struct fsl_edma_chan	chans[];
+};
+
+/*
+ * R/W functions for big- or little-endian registers
+ * the eDMA controller's endian is independent of the CPU core's endian.
+ */
+
+static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+	if (edma->big_endian)
+		return ioread16be(addr);
+	else
+		return ioread16(addr);
+}
+
+static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+	if (edma->big_endian)
+		return ioread32be(addr);
+	else
+		return ioread32(addr);
+}
+
+static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
+{
+	iowrite8(val, addr);
+}
+
+static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
+{
+	if (edma->big_endian)
+		iowrite16be(val, addr);
+	else
+		iowrite16(val, addr);
+}
+
+static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
+{
+	if (edma->big_endian)
+		iowrite32be(val, addr);
+	else
+		iowrite32(val, addr);
+}
+
+static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct fsl_edma_chan, vchan.chan);
+}
+
+static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct fsl_edma_desc, vdesc);
+}
+
+static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
+{
+	void __iomem *addr = fsl_chan->edma->membase;
+	u32 ch = fsl_chan->vchan.chan.chan_id;
+
+	edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
+	edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
+}
+
+static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
+{
+	void __iomem *addr = fsl_chan->edma->membase;
+	u32 ch = fsl_chan->vchan.chan.chan_id;
+
+	edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
+	edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
+}
+
+static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
+			unsigned int slot, bool enable)
+{
+	u32 ch = fsl_chan->vchan.chan.chan_id;
+	void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR];
+	unsigned chans_per_mux, ch_off;
+
+	chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
+	ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+
+	if (enable)
+		edma_writeb(fsl_chan->edma,
+				EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot),
+				muxaddr + ch_off);
+	else
+		edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
+}
+
+static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
+{
+	switch (addr_width) {
+	case 1:
+		return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
+	case 2:
+		return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
+	case 4:
+		return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+	case 8:
+		return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
+	default:
+		return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+	}
+}
+
+static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
+{
+	struct fsl_edma_desc *fsl_desc;
+	int i;
+
+	fsl_desc = to_fsl_edma_desc(vdesc);
+	for (i = 0; i < fsl_desc->n_tcds; i++)
+			dma_pool_free(fsl_desc->echan->tcd_pool,
+					fsl_desc->tcd[i].vtcd,
+					fsl_desc->tcd[i].ptcd);
+	kfree(fsl_desc);
+}
+
+static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+		unsigned long arg)
+{
+	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+	struct dma_slave_config *cfg = (void *)arg;
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+		fsl_edma_disable_request(fsl_chan);
+		fsl_chan->edesc = NULL;
+		vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+		vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+		return 0;
+
+	case DMA_SLAVE_CONFIG:
+		fsl_chan->fsc.dir = cfg->direction;
+		if (cfg->direction == DMA_DEV_TO_MEM) {
+			fsl_chan->fsc.dev_addr = cfg->src_addr;
+			fsl_chan->fsc.addr_width = cfg->src_addr_width;
+			fsl_chan->fsc.burst = cfg->src_maxburst;
+			fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
+		} else if (cfg->direction == DMA_MEM_TO_DEV) {
+			fsl_chan->fsc.dev_addr = cfg->dst_addr;
+			fsl_chan->fsc.addr_width = cfg->dst_addr_width;
+			fsl_chan->fsc.burst = cfg->dst_maxburst;
+			fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
+		} else {
+			return -EINVAL;
+		}
+		return 0;
+
+	case DMA_PAUSE:
+		spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+		if (fsl_chan->edesc) {
+			fsl_edma_disable_request(fsl_chan);
+			fsl_chan->status = DMA_PAUSED;
+		}
+		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+		return 0;
+
+	case DMA_RESUME:
+		spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+		if (fsl_chan->edesc) {
+			fsl_edma_enable_request(fsl_chan);
+			fsl_chan->status = DMA_IN_PROGRESS;
+		}
+		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+		return 0;
+
+	default:
+		return -ENXIO;
+	}
+}
+
+static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
+		struct virt_dma_desc *vdesc, bool in_progress)
+{
+	struct fsl_edma_desc *edesc = fsl_chan->edesc;
+	void __iomem *addr = fsl_chan->edma->membase;
+	u32 ch = fsl_chan->vchan.chan.chan_id;
+	enum dma_transfer_direction dir = fsl_chan->fsc.dir;
+	dma_addr_t cur_addr, dma_addr;
+	size_t len, size;
+	int i;
+
+	/* calculate the total size in this desc */
+	for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
+		len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
+			* edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
+
+	if (!in_progress)
+		return len;
+
+	if (dir == DMA_MEM_TO_DEV)
+		cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
+	else
+		cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
+
+	/* figure out the finished and calculate the residue */
+	for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
+		size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
+			* edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
+		if (dir == DMA_MEM_TO_DEV)
+			dma_addr = edma_readl(fsl_chan->edma,
+					&(edesc->tcd[i].vtcd->saddr));
+		else
+			dma_addr = edma_readl(fsl_chan->edma,
+					&(edesc->tcd[i].vtcd->daddr));
+
+		len -= size;
+		if (cur_addr > dma_addr && cur_addr < dma_addr + size) {
+			len += dma_addr + size - cur_addr;
+			break;
+		}
+	}
+
+	return len;
+}
+
+static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
+		dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+	struct virt_dma_desc *vdesc;
+	enum dma_status status;
+	unsigned long flags;
+
+	status = dma_cookie_status(chan, cookie, txstate);
+	if (status == DMA_COMPLETE)
+		return status;
+
+	if (!txstate)
+		return fsl_chan->status;
+
+	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+	vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
+	if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
+		txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
+	else if (vdesc)
+		txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
+	else
+		txstate->residue = 0;
+
+	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+	return fsl_chan->status;
+}
+
+static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan,
+		u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes,
+		u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga,
+		u16 csr)
+{
+	void __iomem *addr = fsl_chan->edma->membase;
+	u32 ch = fsl_chan->vchan.chan.chan_id;
+
+	/*
+	 * TCD parameters have been swapped in fill_tcd_params(),
+	 * so just write them to registers in the cpu endian here
+	 */
+	writew(0, addr + EDMA_TCD_CSR(ch));
+	writel(src, addr + EDMA_TCD_SADDR(ch));
+	writel(dst, addr + EDMA_TCD_DADDR(ch));
+	writew(attr, addr + EDMA_TCD_ATTR(ch));
+	writew(soff, addr + EDMA_TCD_SOFF(ch));
+	writel(nbytes, addr + EDMA_TCD_NBYTES(ch));
+	writel(slast, addr + EDMA_TCD_SLAST(ch));
+	writew(citer, addr + EDMA_TCD_CITER(ch));
+	writew(biter, addr + EDMA_TCD_BITER(ch));
+	writew(doff, addr + EDMA_TCD_DOFF(ch));
+	writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch));
+	writew(csr, addr + EDMA_TCD_CSR(ch));
+}
+
+static void fill_tcd_params(struct fsl_edma_engine *edma,
+		struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+		u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
+		u16 biter, u16 doff, u32 dlast_sga, bool major_int,
+		bool disable_req, bool enable_sg)
+{
+	u16 csr = 0;
+
+	/*
+	 * eDMA hardware SGs require the TCD parameters stored in memory
+	 * the same endian as the eDMA module so that they can be loaded
+	 * automatically by the engine
+	 */
+	edma_writel(edma, src, &(tcd->saddr));
+	edma_writel(edma, dst, &(tcd->daddr));
+	edma_writew(edma, attr, &(tcd->attr));
+	edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff));
+	edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes));
+	edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast));
+	edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer));
+	edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff));
+	edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga));
+	edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter));
+	if (major_int)
+		csr |= EDMA_TCD_CSR_INT_MAJOR;
+
+	if (disable_req)
+		csr |= EDMA_TCD_CSR_D_REQ;
+
+	if (enable_sg)
+		csr |= EDMA_TCD_CSR_E_SG;
+
+	edma_writew(edma, csr, &(tcd->csr));
+}
+
+static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
+		int sg_len)
+{
+	struct fsl_edma_desc *fsl_desc;
+	int i;
+
+	fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
+				GFP_NOWAIT);
+	if (!fsl_desc)
+		return NULL;
+
+	fsl_desc->echan = fsl_chan;
+	fsl_desc->n_tcds = sg_len;
+	for (i = 0; i < sg_len; i++) {
+		fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
+					GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
+		if (!fsl_desc->tcd[i].vtcd)
+			goto err;
+	}
+	return fsl_desc;
+
+err:
+	while (--i >= 0)
+		dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
+				fsl_desc->tcd[i].ptcd);
+	kfree(fsl_desc);
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
+		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+		size_t period_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+	struct fsl_edma_desc *fsl_desc;
+	dma_addr_t dma_buf_next;
+	int sg_len, i;
+	u32 src_addr, dst_addr, last_sg, nbytes;
+	u16 soff, doff, iter;
+
+	if (!is_slave_direction(fsl_chan->fsc.dir))
+		return NULL;
+
+	sg_len = buf_len / period_len;
+	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+	if (!fsl_desc)
+		return NULL;
+	fsl_desc->iscyclic = true;
+
+	dma_buf_next = dma_addr;
+	nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
+	iter = period_len / nbytes;
+
+	for (i = 0; i < sg_len; i++) {
+		if (dma_buf_next >= dma_addr + buf_len)
+			dma_buf_next = dma_addr;
+
+		/* get next sg's physical address */
+		last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+		if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+			src_addr = dma_buf_next;
+			dst_addr = fsl_chan->fsc.dev_addr;
+			soff = fsl_chan->fsc.addr_width;
+			doff = 0;
+		} else {
+			src_addr = fsl_chan->fsc.dev_addr;
+			dst_addr = dma_buf_next;
+			soff = 0;
+			doff = fsl_chan->fsc.addr_width;
+		}
+
+		fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr,
+				dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
+				iter, iter, doff, last_sg, true, false, true);
+		dma_buf_next += period_len;
+	}
+
+	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+	struct fsl_edma_desc *fsl_desc;
+	struct scatterlist *sg;
+	u32 src_addr, dst_addr, last_sg, nbytes;
+	u16 soff, doff, iter;
+	int i;
+
+	if (!is_slave_direction(fsl_chan->fsc.dir))
+		return NULL;
+
+	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+	if (!fsl_desc)
+		return NULL;
+	fsl_desc->iscyclic = false;
+
+	nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
+	for_each_sg(sgl, sg, sg_len, i) {
+		/* get next sg's physical address */
+		last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+		if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+			src_addr = sg_dma_address(sg);
+			dst_addr = fsl_chan->fsc.dev_addr;
+			soff = fsl_chan->fsc.addr_width;
+			doff = 0;
+		} else {
+			src_addr = fsl_chan->fsc.dev_addr;
+			dst_addr = sg_dma_address(sg);
+			soff = 0;
+			doff = fsl_chan->fsc.addr_width;
+		}
+
+		iter = sg_dma_len(sg) / nbytes;
+		if (i < sg_len - 1) {
+			last_sg = fsl_desc->tcd[(i + 1)].ptcd;
+			fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
+					src_addr, dst_addr, fsl_chan->fsc.attr,
+					soff, nbytes, 0, iter, iter, doff, last_sg,
+					false, false, true);
+		} else {
+			last_sg = 0;
+			fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
+					src_addr, dst_addr, fsl_chan->fsc.attr,
+					soff, nbytes, 0, iter, iter, doff, last_sg,
+					true, true, false);
+		}
+	}
+
+	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+
+static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
+{
+	struct fsl_edma_hw_tcd *tcd;
+	struct virt_dma_desc *vdesc;
+
+	vdesc = vchan_next_desc(&fsl_chan->vchan);
+	if (!vdesc)
+		return;
+	fsl_chan->edesc = to_fsl_edma_desc(vdesc);
+	tcd = fsl_chan->edesc->tcd[0].vtcd;
+	fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr,
+			tcd->soff, tcd->nbytes, tcd->slast, tcd->citer,
+			tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr);
+	fsl_edma_enable_request(fsl_chan);
+	fsl_chan->status = DMA_IN_PROGRESS;
+}
+
+static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
+{
+	struct fsl_edma_engine *fsl_edma = dev_id;
+	unsigned int intr, ch;
+	void __iomem *base_addr;
+	struct fsl_edma_chan *fsl_chan;
+
+	base_addr = fsl_edma->membase;
+
+	intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
+	if (!intr)
+		return IRQ_NONE;
+
+	for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+		if (intr & (0x1 << ch)) {
+			edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
+				base_addr + EDMA_CINT);
+
+			fsl_chan = &fsl_edma->chans[ch];
+
+			spin_lock(&fsl_chan->vchan.lock);
+			if (!fsl_chan->edesc->iscyclic) {
+				list_del(&fsl_chan->edesc->vdesc.node);
+				vchan_cookie_complete(&fsl_chan->edesc->vdesc);
+				fsl_chan->edesc = NULL;
+				fsl_chan->status = DMA_COMPLETE;
+			} else {
+				vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
+			}
+
+			if (!fsl_chan->edesc)
+				fsl_edma_xfer_desc(fsl_chan);
+
+			spin_unlock(&fsl_chan->vchan.lock);
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
+{
+	struct fsl_edma_engine *fsl_edma = dev_id;
+	unsigned int err, ch;
+
+	err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
+	if (!err)
+		return IRQ_NONE;
+
+	for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+		if (err & (0x1 << ch)) {
+			fsl_edma_disable_request(&fsl_edma->chans[ch]);
+			edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
+				fsl_edma->membase + EDMA_CERR);
+			fsl_edma->chans[ch].status = DMA_ERROR;
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
+{
+	if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
+		return IRQ_HANDLED;
+
+	return fsl_edma_err_handler(irq, dev_id);
+}
+
+static void fsl_edma_issue_pending(struct dma_chan *chan)
+{
+	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+
+	if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
+		fsl_edma_xfer_desc(fsl_chan);
+
+	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+}
+
+static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
+		struct of_dma *ofdma)
+{
+	struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
+	struct dma_chan *chan, *_chan;
+
+	if (dma_spec->args_count != 2)
+		return NULL;
+
+	mutex_lock(&fsl_edma->fsl_edma_mutex);
+	list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
+		if (chan->client_count)
+			continue;
+		if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) {
+			chan = dma_get_slave_channel(chan);
+			if (chan) {
+				chan->device->privatecnt++;
+				fsl_edma_chan_mux(to_fsl_edma_chan(chan),
+					dma_spec->args[1], true);
+				mutex_unlock(&fsl_edma->fsl_edma_mutex);
+				return chan;
+			}
+		}
+	}
+	mutex_unlock(&fsl_edma->fsl_edma_mutex);
+	return NULL;
+}
+
+static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+	fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
+				sizeof(struct fsl_edma_hw_tcd),
+				32, 0);
+	return 0;
+}
+
+static void fsl_edma_free_chan_resources(struct dma_chan *chan)
+{
+	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+	fsl_edma_disable_request(fsl_chan);
+	fsl_edma_chan_mux(fsl_chan, 0, false);
+	fsl_chan->edesc = NULL;
+	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+	dma_pool_destroy(fsl_chan->tcd_pool);
+	fsl_chan->tcd_pool = NULL;
+}
+
+static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
+		struct dma_slave_caps *caps)
+{
+	caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
+	caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
+	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	caps->cmd_pause = true;
+	caps->cmd_terminate = true;
+
+	return 0;
+}
+
+static int
+fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+	int ret;
+
+	fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
+	if (fsl_edma->txirq < 0) {
+		dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
+		return fsl_edma->txirq;
+	}
+
+	fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
+	if (fsl_edma->errirq < 0) {
+		dev_err(&pdev->dev, "Can't get edma-err irq.\n");
+		return fsl_edma->errirq;
+	}
+
+	if (fsl_edma->txirq == fsl_edma->errirq) {
+		ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
+				fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
+		if (ret) {
+			dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
+			 return  ret;
+		}
+	} else {
+		ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
+				fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
+		if (ret) {
+			dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
+			return  ret;
+		}
+
+		ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
+				fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
+		if (ret) {
+			dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
+			return  ret;
+		}
+	}
+
+	return 0;
+}
+
+static int fsl_edma_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_edma_engine *fsl_edma;
+	struct fsl_edma_chan *fsl_chan;
+	struct resource *res;
+	int len, chans;
+	int ret, i;
+
+	ret = of_property_read_u32(np, "dma-channels", &chans);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get dma-channels.\n");
+		return ret;
+	}
+
+	len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
+	fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+	if (!fsl_edma)
+		return -ENOMEM;
+
+	fsl_edma->n_chans = chans;
+	mutex_init(&fsl_edma->fsl_edma_mutex);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_edma->membase))
+		return PTR_ERR(fsl_edma->membase);
+
+	for (i = 0; i < DMAMUX_NR; i++) {
+		char clkname[32];
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
+		fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(fsl_edma->muxbase[i]))
+			return PTR_ERR(fsl_edma->muxbase[i]);
+
+		sprintf(clkname, "dmamux%d", i);
+		fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
+		if (IS_ERR(fsl_edma->muxclk[i])) {
+			dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
+			return PTR_ERR(fsl_edma->muxclk[i]);
+		}
+
+		ret = clk_prepare_enable(fsl_edma->muxclk[i]);
+		if (ret) {
+			dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
+			return ret;
+		}
+
+	}
+
+	ret = fsl_edma_irq_init(pdev, fsl_edma);
+	if (ret)
+		return ret;
+
+	fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
+
+	INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
+	for (i = 0; i < fsl_edma->n_chans; i++) {
+		struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+
+		fsl_chan->edma = fsl_edma;
+
+		fsl_chan->vchan.desc_free = fsl_edma_free_desc;
+		vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
+
+		edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+		fsl_edma_chan_mux(fsl_chan, 0, false);
+	}
+
+	dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
+	dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
+	dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
+
+	fsl_edma->dma_dev.dev = &pdev->dev;
+	fsl_edma->dma_dev.device_alloc_chan_resources
+		= fsl_edma_alloc_chan_resources;
+	fsl_edma->dma_dev.device_free_chan_resources
+		= fsl_edma_free_chan_resources;
+	fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
+	fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
+	fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
+	fsl_edma->dma_dev.device_control = fsl_edma_control;
+	fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
+	fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps;
+
+	platform_set_drvdata(pdev, fsl_edma);
+
+	ret = dma_async_device_register(&fsl_edma->dma_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
+		dma_async_device_unregister(&fsl_edma->dma_dev);
+		return ret;
+	}
+
+	/* enable round robin arbitration */
+	edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
+
+	return 0;
+}
+
+static int fsl_edma_remove(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
+	int i;
+
+	of_dma_controller_free(np);
+	dma_async_device_unregister(&fsl_edma->dma_dev);
+
+	for (i = 0; i < DMAMUX_NR; i++)
+		clk_disable_unprepare(fsl_edma->muxclk[i]);
+
+	return 0;
+}
+
+static const struct of_device_id fsl_edma_dt_ids[] = {
+	{ .compatible = "fsl,vf610-edma", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+
+static struct platform_driver fsl_edma_driver = {
+	.driver		= {
+		.name	= "fsl-edma",
+		.owner  = THIS_MODULE,
+		.of_match_table = fsl_edma_dt_ids,
+	},
+	.probe          = fsl_edma_probe,
+	.remove		= fsl_edma_remove,
+};
+
+module_platform_driver(fsl_edma_driver);
+
+MODULE_ALIAS("platform:fsl-edma");
+MODULE_DESCRIPTION("Freescale eDMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 6f9ac20..286660a 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -422,12 +422,12 @@
 		/* Tasklet error handler */
 		tasklet_schedule(&imxdma->channel[i].dma_tasklet);
 
-		printk(KERN_WARNING
-		       "DMA timeout on channel %d -%s%s%s%s\n", i,
-		       errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
-		       errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
-		       errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
-		       errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
+		dev_warn(imxdma->dev,
+			 "DMA timeout on channel %d -%s%s%s%s\n", i,
+			 errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
+			 errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
+			 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
+			 errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
 	}
 	return IRQ_HANDLED;
 }
@@ -1236,6 +1236,7 @@
 static struct platform_driver imxdma_driver = {
 	.driver		= {
 		.name	= "imx-dma",
+		.owner	= THIS_MODULE,
 		.of_match_table = imx_dma_of_dev_id,
 	},
 	.id_table	= imx_dma_devtype,
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index b439679..bf02e7b 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -867,8 +867,8 @@
 	phy->base = pdev->base;
 
 	if (irq) {
-		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0,
-				       "pdma", phy);
+		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
+				       IRQF_SHARED, "pdma", phy);
 		if (ret) {
 			dev_err(pdev->dev, "channel request irq fail!\n");
 			return ret;
@@ -957,8 +957,8 @@
 	if (irq_num != dma_channels) {
 		/* all chan share one irq, demux inside */
 		irq = platform_get_irq(op, 0);
-		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0,
-				       "pdma", pdev);
+		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
+				       IRQF_SHARED, "pdma", pdev);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 33f96aa..724f7f4 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -22,6 +22,7 @@
 #include <mach/regs-icu.h>
 #include <linux/platform_data/dma-mmp_tdma.h>
 #include <linux/of_device.h>
+#include <linux/of_dma.h>
 
 #include "dmaengine.h"
 
@@ -541,6 +542,45 @@
 	return 0;
 }
 
+struct mmp_tdma_filter_param {
+	struct device_node *of_node;
+	unsigned int chan_id;
+};
+
+static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
+{
+	struct mmp_tdma_filter_param *param = fn_param;
+	struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+	struct dma_device *pdma_device = tdmac->chan.device;
+
+	if (pdma_device->dev->of_node != param->of_node)
+		return false;
+
+	if (chan->chan_id != param->chan_id)
+		return false;
+
+	return true;
+}
+
+struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
+			       struct of_dma *ofdma)
+{
+	struct mmp_tdma_device *tdev = ofdma->of_dma_data;
+	dma_cap_mask_t mask = tdev->device.cap_mask;
+	struct mmp_tdma_filter_param param;
+
+	if (dma_spec->args_count != 1)
+		return NULL;
+
+	param.of_node = ofdma->of_node;
+	param.chan_id = dma_spec->args[0];
+
+	if (param.chan_id >= TDMA_CHANNEL_NUM)
+		return NULL;
+
+	return dma_request_channel(mask, mmp_tdma_filter_fn, &param);
+}
+
 static struct of_device_id mmp_tdma_dt_ids[] = {
 	{ .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
 	{ .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
@@ -631,6 +671,16 @@
 		return ret;
 	}
 
+	if (pdev->dev.of_node) {
+		ret = of_dma_controller_register(pdev->dev.of_node,
+							mmp_tdma_xlate, tdev);
+		if (ret) {
+			dev_err(tdev->device.dev,
+				"failed to register controller\n");
+			dma_async_device_unregister(&tdev->device);
+		}
+	}
+
 	dev_info(tdev->device.dev, "initialized\n");
 	return 0;
 }
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 64ceca2..b19f04f 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1088,6 +1088,23 @@
 	}
 }
 
+#define OMAP_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static int omap_dma_device_slave_caps(struct dma_chan *dchan,
+				      struct dma_slave_caps *caps)
+{
+	caps->src_addr_widths = OMAP_DMA_BUSWIDTHS;
+	caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS;
+	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	caps->cmd_pause = true;
+	caps->cmd_terminate = true;
+	caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+	return 0;
+}
+
 static int omap_dma_probe(struct platform_device *pdev)
 {
 	struct omap_dmadev *od;
@@ -1118,6 +1135,7 @@
 	od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
 	od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
 	od->ddev.device_control = omap_dma_control;
+	od->ddev.device_slave_caps = omap_dma_device_slave_caps;
 	od->ddev.dev = &pdev->dev;
 	INIT_LIST_HEAD(&od->ddev.channels);
 	INIT_LIST_HEAD(&od->pending);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 61fdc54..05fa548 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -964,16 +964,16 @@
 	if (pd) {
 		dma_async_device_unregister(&pd->dma);
 
+		free_irq(pdev->irq, pd);
+
 		list_for_each_entry_safe(chan, _c, &pd->dma.channels,
 					 device_node) {
 			pd_chan = to_pd_chan(chan);
 
-			tasklet_disable(&pd_chan->tasklet);
 			tasklet_kill(&pd_chan->tasklet);
 		}
 
 		pci_pool_destroy(pd->pool);
-		free_irq(pdev->irq, pd);
 		pci_iounmap(pdev, pd->membase);
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
new file mode 100644
index 0000000..82c9231
--- /dev/null
+++ b/drivers/dma/qcom_bam_dma.c
@@ -0,0 +1,1111 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * QCOM BAM DMA engine driver
+ *
+ * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
+ * peripherals on the MSM 8x74.  The configuration of the channels are dependent
+ * on the way they are hard wired to that specific peripheral.  The peripheral
+ * device tree entries specify the configuration of each channel.
+ *
+ * The DMA controller requires the use of external memory for storage of the
+ * hardware descriptors for each channel.  The descriptor FIFO is accessed as a
+ * circular buffer and operations are managed according to the offset within the
+ * FIFO.  After pipe/channel reset, all of the pipe registers and internal state
+ * are back to defaults.
+ *
+ * During DMA operations, we write descriptors to the FIFO, being careful to
+ * handle wrapping and then write the last FIFO offset to that channel's
+ * P_EVNT_REG register to kick off the transaction.  The P_SW_OFSTS register
+ * indicates the current FIFO offset that is being processed, so there is some
+ * indication of where the hardware is currently working.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+struct bam_desc_hw {
+	u32 addr;		/* Buffer physical address */
+	u16 size;		/* Buffer size in bytes */
+	u16 flags;
+};
+
+#define DESC_FLAG_INT BIT(15)
+#define DESC_FLAG_EOT BIT(14)
+#define DESC_FLAG_EOB BIT(13)
+
+struct bam_async_desc {
+	struct virt_dma_desc vd;
+
+	u32 num_desc;
+	u32 xfer_len;
+	struct bam_desc_hw *curr_desc;
+
+	enum dma_transfer_direction dir;
+	size_t length;
+	struct bam_desc_hw desc[0];
+};
+
+#define BAM_CTRL			0x0000
+#define BAM_REVISION			0x0004
+#define BAM_SW_REVISION			0x0080
+#define BAM_NUM_PIPES			0x003C
+#define BAM_TIMER			0x0040
+#define BAM_TIMER_CTRL			0x0044
+#define BAM_DESC_CNT_TRSHLD		0x0008
+#define BAM_IRQ_SRCS			0x000C
+#define BAM_IRQ_SRCS_MSK		0x0010
+#define BAM_IRQ_SRCS_UNMASKED		0x0030
+#define BAM_IRQ_STTS			0x0014
+#define BAM_IRQ_CLR			0x0018
+#define BAM_IRQ_EN			0x001C
+#define BAM_CNFG_BITS			0x007C
+#define BAM_IRQ_SRCS_EE(ee)		(0x0800 + ((ee) * 0x80))
+#define BAM_IRQ_SRCS_MSK_EE(ee)		(0x0804 + ((ee) * 0x80))
+#define BAM_P_CTRL(pipe)		(0x1000 + ((pipe) * 0x1000))
+#define BAM_P_RST(pipe)			(0x1004 + ((pipe) * 0x1000))
+#define BAM_P_HALT(pipe)		(0x1008 + ((pipe) * 0x1000))
+#define BAM_P_IRQ_STTS(pipe)		(0x1010 + ((pipe) * 0x1000))
+#define BAM_P_IRQ_CLR(pipe)		(0x1014 + ((pipe) * 0x1000))
+#define BAM_P_IRQ_EN(pipe)		(0x1018 + ((pipe) * 0x1000))
+#define BAM_P_EVNT_DEST_ADDR(pipe)	(0x182C + ((pipe) * 0x1000))
+#define BAM_P_EVNT_REG(pipe)		(0x1818 + ((pipe) * 0x1000))
+#define BAM_P_SW_OFSTS(pipe)		(0x1800 + ((pipe) * 0x1000))
+#define BAM_P_DATA_FIFO_ADDR(pipe)	(0x1824 + ((pipe) * 0x1000))
+#define BAM_P_DESC_FIFO_ADDR(pipe)	(0x181C + ((pipe) * 0x1000))
+#define BAM_P_EVNT_TRSHLD(pipe)		(0x1828 + ((pipe) * 0x1000))
+#define BAM_P_FIFO_SIZES(pipe)		(0x1820 + ((pipe) * 0x1000))
+
+/* BAM CTRL */
+#define BAM_SW_RST			BIT(0)
+#define BAM_EN				BIT(1)
+#define BAM_EN_ACCUM			BIT(4)
+#define BAM_TESTBUS_SEL_SHIFT		5
+#define BAM_TESTBUS_SEL_MASK		0x3F
+#define BAM_DESC_CACHE_SEL_SHIFT	13
+#define BAM_DESC_CACHE_SEL_MASK		0x3
+#define BAM_CACHED_DESC_STORE		BIT(15)
+#define IBC_DISABLE			BIT(16)
+
+/* BAM REVISION */
+#define REVISION_SHIFT		0
+#define REVISION_MASK		0xFF
+#define NUM_EES_SHIFT		8
+#define NUM_EES_MASK		0xF
+#define CE_BUFFER_SIZE		BIT(13)
+#define AXI_ACTIVE		BIT(14)
+#define USE_VMIDMT		BIT(15)
+#define SECURED			BIT(16)
+#define BAM_HAS_NO_BYPASS	BIT(17)
+#define HIGH_FREQUENCY_BAM	BIT(18)
+#define INACTIV_TMRS_EXST	BIT(19)
+#define NUM_INACTIV_TMRS	BIT(20)
+#define DESC_CACHE_DEPTH_SHIFT	21
+#define DESC_CACHE_DEPTH_1	(0 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_2	(1 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_3	(2 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_4	(3 << DESC_CACHE_DEPTH_SHIFT)
+#define CMD_DESC_EN		BIT(23)
+#define INACTIV_TMR_BASE_SHIFT	24
+#define INACTIV_TMR_BASE_MASK	0xFF
+
+/* BAM NUM PIPES */
+#define BAM_NUM_PIPES_SHIFT		0
+#define BAM_NUM_PIPES_MASK		0xFF
+#define PERIPH_NON_PIPE_GRP_SHIFT	16
+#define PERIPH_NON_PIP_GRP_MASK		0xFF
+#define BAM_NON_PIPE_GRP_SHIFT		24
+#define BAM_NON_PIPE_GRP_MASK		0xFF
+
+/* BAM CNFG BITS */
+#define BAM_PIPE_CNFG		BIT(2)
+#define BAM_FULL_PIPE		BIT(11)
+#define BAM_NO_EXT_P_RST	BIT(12)
+#define BAM_IBC_DISABLE		BIT(13)
+#define BAM_SB_CLK_REQ		BIT(14)
+#define BAM_PSM_CSW_REQ		BIT(15)
+#define BAM_PSM_P_RES		BIT(16)
+#define BAM_AU_P_RES		BIT(17)
+#define BAM_SI_P_RES		BIT(18)
+#define BAM_WB_P_RES		BIT(19)
+#define BAM_WB_BLK_CSW		BIT(20)
+#define BAM_WB_CSW_ACK_IDL	BIT(21)
+#define BAM_WB_RETR_SVPNT	BIT(22)
+#define BAM_WB_DSC_AVL_P_RST	BIT(23)
+#define BAM_REG_P_EN		BIT(24)
+#define BAM_PSM_P_HD_DATA	BIT(25)
+#define BAM_AU_ACCUMED		BIT(26)
+#define BAM_CMD_ENABLE		BIT(27)
+
+#define BAM_CNFG_BITS_DEFAULT	(BAM_PIPE_CNFG |	\
+				 BAM_NO_EXT_P_RST |	\
+				 BAM_IBC_DISABLE |	\
+				 BAM_SB_CLK_REQ |	\
+				 BAM_PSM_CSW_REQ |	\
+				 BAM_PSM_P_RES |	\
+				 BAM_AU_P_RES |		\
+				 BAM_SI_P_RES |		\
+				 BAM_WB_P_RES |		\
+				 BAM_WB_BLK_CSW |	\
+				 BAM_WB_CSW_ACK_IDL |	\
+				 BAM_WB_RETR_SVPNT |	\
+				 BAM_WB_DSC_AVL_P_RST |	\
+				 BAM_REG_P_EN |		\
+				 BAM_PSM_P_HD_DATA |	\
+				 BAM_AU_ACCUMED |	\
+				 BAM_CMD_ENABLE)
+
+/* PIPE CTRL */
+#define P_EN			BIT(1)
+#define P_DIRECTION		BIT(3)
+#define P_SYS_STRM		BIT(4)
+#define P_SYS_MODE		BIT(5)
+#define P_AUTO_EOB		BIT(6)
+#define P_AUTO_EOB_SEL_SHIFT	7
+#define P_AUTO_EOB_SEL_512	(0 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_256	(1 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_128	(2 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_64	(3 << P_AUTO_EOB_SEL_SHIFT)
+#define P_PREFETCH_LIMIT_SHIFT	9
+#define P_PREFETCH_LIMIT_32	(0 << P_PREFETCH_LIMIT_SHIFT)
+#define P_PREFETCH_LIMIT_16	(1 << P_PREFETCH_LIMIT_SHIFT)
+#define P_PREFETCH_LIMIT_4	(2 << P_PREFETCH_LIMIT_SHIFT)
+#define P_WRITE_NWD		BIT(11)
+#define P_LOCK_GROUP_SHIFT	16
+#define P_LOCK_GROUP_MASK	0x1F
+
+/* BAM_DESC_CNT_TRSHLD */
+#define CNT_TRSHLD		0xffff
+#define DEFAULT_CNT_THRSHLD	0x4
+
+/* BAM_IRQ_SRCS */
+#define BAM_IRQ			BIT(31)
+#define P_IRQ			0x7fffffff
+
+/* BAM_IRQ_SRCS_MSK */
+#define BAM_IRQ_MSK		BAM_IRQ
+#define P_IRQ_MSK		P_IRQ
+
+/* BAM_IRQ_STTS */
+#define BAM_TIMER_IRQ		BIT(4)
+#define BAM_EMPTY_IRQ		BIT(3)
+#define BAM_ERROR_IRQ		BIT(2)
+#define BAM_HRESP_ERR_IRQ	BIT(1)
+
+/* BAM_IRQ_CLR */
+#define BAM_TIMER_CLR		BIT(4)
+#define BAM_EMPTY_CLR		BIT(3)
+#define BAM_ERROR_CLR		BIT(2)
+#define BAM_HRESP_ERR_CLR	BIT(1)
+
+/* BAM_IRQ_EN */
+#define BAM_TIMER_EN		BIT(4)
+#define BAM_EMPTY_EN		BIT(3)
+#define BAM_ERROR_EN		BIT(2)
+#define BAM_HRESP_ERR_EN	BIT(1)
+
+/* BAM_P_IRQ_EN */
+#define P_PRCSD_DESC_EN		BIT(0)
+#define P_TIMER_EN		BIT(1)
+#define P_WAKE_EN		BIT(2)
+#define P_OUT_OF_DESC_EN	BIT(3)
+#define P_ERR_EN		BIT(4)
+#define P_TRNSFR_END_EN		BIT(5)
+#define P_DEFAULT_IRQS_EN	(P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
+
+/* BAM_P_SW_OFSTS */
+#define P_SW_OFSTS_MASK		0xffff
+
+#define BAM_DESC_FIFO_SIZE	SZ_32K
+#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
+#define BAM_MAX_DATA_SIZE	(SZ_32K - 8)
+
+struct bam_chan {
+	struct virt_dma_chan vc;
+
+	struct bam_device *bdev;
+
+	/* configuration from device tree */
+	u32 id;
+
+	struct bam_async_desc *curr_txd;	/* current running dma */
+
+	/* runtime configuration */
+	struct dma_slave_config slave;
+
+	/* fifo storage */
+	struct bam_desc_hw *fifo_virt;
+	dma_addr_t fifo_phys;
+
+	/* fifo markers */
+	unsigned short head;		/* start of active descriptor entries */
+	unsigned short tail;		/* end of active descriptor entries */
+
+	unsigned int initialized;	/* is the channel hw initialized? */
+	unsigned int paused;		/* is the channel paused? */
+	unsigned int reconfigure;	/* new slave config? */
+
+	struct list_head node;
+};
+
+static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
+{
+	return container_of(common, struct bam_chan, vc.chan);
+}
+
+struct bam_device {
+	void __iomem *regs;
+	struct device *dev;
+	struct dma_device common;
+	struct device_dma_parameters dma_parms;
+	struct bam_chan *channels;
+	u32 num_channels;
+
+	/* execution environment ID, from DT */
+	u32 ee;
+
+	struct clk *bamclk;
+	int irq;
+
+	/* dma start transaction tasklet */
+	struct tasklet_struct task;
+};
+
+/**
+ * bam_reset_channel - Reset individual BAM DMA channel
+ * @bchan: bam channel
+ *
+ * This function resets a specific BAM channel
+ */
+static void bam_reset_channel(struct bam_chan *bchan)
+{
+	struct bam_device *bdev = bchan->bdev;
+
+	lockdep_assert_held(&bchan->vc.lock);
+
+	/* reset channel */
+	writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id));
+	writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id));
+
+	/* don't allow cpu to reorder BAM register accesses done after this */
+	wmb();
+
+	/* make sure hw is initialized when channel is used the first time  */
+	bchan->initialized = 0;
+}
+
+/**
+ * bam_chan_init_hw - Initialize channel hardware
+ * @bchan: bam channel
+ *
+ * This function resets and initializes the BAM channel
+ */
+static void bam_chan_init_hw(struct bam_chan *bchan,
+	enum dma_transfer_direction dir)
+{
+	struct bam_device *bdev = bchan->bdev;
+	u32 val;
+
+	/* Reset the channel to clear internal state of the FIFO */
+	bam_reset_channel(bchan);
+
+	/*
+	 * write out 8 byte aligned address.  We have enough space for this
+	 * because we allocated 1 more descriptor (8 bytes) than we can use
+	 */
+	writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
+			bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id));
+	writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs +
+			BAM_P_FIFO_SIZES(bchan->id));
+
+	/* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
+	writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id));
+
+	/* unmask the specific pipe and EE combo */
+	val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+	val |= BIT(bchan->id);
+	writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+	/* don't allow cpu to reorder the channel enable done below */
+	wmb();
+
+	/* set fixed direction and mode, then enable channel */
+	val = P_EN | P_SYS_MODE;
+	if (dir == DMA_DEV_TO_MEM)
+		val |= P_DIRECTION;
+
+	writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id));
+
+	bchan->initialized = 1;
+
+	/* init FIFO pointers */
+	bchan->head = 0;
+	bchan->tail = 0;
+}
+
+/**
+ * bam_alloc_chan - Allocate channel resources for DMA channel.
+ * @chan: specified channel
+ *
+ * This function allocates the FIFO descriptor memory
+ */
+static int bam_alloc_chan(struct dma_chan *chan)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct bam_device *bdev = bchan->bdev;
+
+	if (bchan->fifo_virt)
+		return 0;
+
+	/* allocate FIFO descriptor space, but only if necessary */
+	bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
+				&bchan->fifo_phys, GFP_KERNEL);
+
+	if (!bchan->fifo_virt) {
+		dev_err(bdev->dev, "Failed to allocate desc fifo\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * bam_free_chan - Frees dma resources associated with specific channel
+ * @chan: specified channel
+ *
+ * Free the allocated fifo descriptor memory and channel resources
+ *
+ */
+static void bam_free_chan(struct dma_chan *chan)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct bam_device *bdev = bchan->bdev;
+	u32 val;
+	unsigned long flags;
+
+	vchan_free_chan_resources(to_virt_chan(chan));
+
+	if (bchan->curr_txd) {
+		dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
+		return;
+	}
+
+	spin_lock_irqsave(&bchan->vc.lock, flags);
+	bam_reset_channel(bchan);
+	spin_unlock_irqrestore(&bchan->vc.lock, flags);
+
+	dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
+				bchan->fifo_phys);
+	bchan->fifo_virt = NULL;
+
+	/* mask irq for pipe/channel */
+	val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+	val &= ~BIT(bchan->id);
+	writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+	/* disable irq */
+	writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id));
+}
+
+/**
+ * bam_slave_config - set slave configuration for channel
+ * @chan: dma channel
+ * @cfg: slave configuration
+ *
+ * Sets slave configuration for channel
+ *
+ */
+static void bam_slave_config(struct bam_chan *bchan,
+		struct dma_slave_config *cfg)
+{
+	memcpy(&bchan->slave, cfg, sizeof(*cfg));
+	bchan->reconfigure = 1;
+}
+
+/**
+ * bam_prep_slave_sg - Prep slave sg transaction
+ *
+ * @chan: dma channel
+ * @sgl: scatter gather list
+ * @sg_len: length of sg
+ * @direction: DMA transfer direction
+ * @flags: DMA flags
+ * @context: transfer context (unused)
+ */
+static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
+	struct scatterlist *sgl, unsigned int sg_len,
+	enum dma_transfer_direction direction, unsigned long flags,
+	void *context)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct bam_device *bdev = bchan->bdev;
+	struct bam_async_desc *async_desc;
+	struct scatterlist *sg;
+	u32 i;
+	struct bam_desc_hw *desc;
+	unsigned int num_alloc = 0;
+
+
+	if (!is_slave_direction(direction)) {
+		dev_err(bdev->dev, "invalid dma direction\n");
+		return NULL;
+	}
+
+	/* calculate number of required entries */
+	for_each_sg(sgl, sg, sg_len, i)
+		num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE);
+
+	/* allocate enough room to accomodate the number of entries */
+	async_desc = kzalloc(sizeof(*async_desc) +
+			(num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
+
+	if (!async_desc)
+		goto err_out;
+
+	async_desc->num_desc = num_alloc;
+	async_desc->curr_desc = async_desc->desc;
+	async_desc->dir = direction;
+
+	/* fill in temporary descriptors */
+	desc = async_desc->desc;
+	for_each_sg(sgl, sg, sg_len, i) {
+		unsigned int remainder = sg_dma_len(sg);
+		unsigned int curr_offset = 0;
+
+		do {
+			desc->addr = sg_dma_address(sg) + curr_offset;
+
+			if (remainder > BAM_MAX_DATA_SIZE) {
+				desc->size = BAM_MAX_DATA_SIZE;
+				remainder -= BAM_MAX_DATA_SIZE;
+				curr_offset += BAM_MAX_DATA_SIZE;
+			} else {
+				desc->size = remainder;
+				remainder = 0;
+			}
+
+			async_desc->length += desc->size;
+			desc++;
+		} while (remainder > 0);
+	}
+
+	return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
+
+err_out:
+	kfree(async_desc);
+	return NULL;
+}
+
+/**
+ * bam_dma_terminate_all - terminate all transactions on a channel
+ * @bchan: bam dma channel
+ *
+ * Dequeues and frees all transactions
+ * No callbacks are done
+ *
+ */
+static void bam_dma_terminate_all(struct bam_chan *bchan)
+{
+	unsigned long flag;
+	LIST_HEAD(head);
+
+	/* remove all transactions, including active transaction */
+	spin_lock_irqsave(&bchan->vc.lock, flag);
+	if (bchan->curr_txd) {
+		list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
+		bchan->curr_txd = NULL;
+	}
+
+	vchan_get_all_descriptors(&bchan->vc, &head);
+	spin_unlock_irqrestore(&bchan->vc.lock, flag);
+
+	vchan_dma_desc_free_list(&bchan->vc, &head);
+}
+
+/**
+ * bam_control - DMA device control
+ * @chan: dma channel
+ * @cmd: control cmd
+ * @arg: cmd argument
+ *
+ * Perform DMA control command
+ *
+ */
+static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+	unsigned long arg)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct bam_device *bdev = bchan->bdev;
+	int ret = 0;
+	unsigned long flag;
+
+	switch (cmd) {
+	case DMA_PAUSE:
+		spin_lock_irqsave(&bchan->vc.lock, flag);
+		writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id));
+		bchan->paused = 1;
+		spin_unlock_irqrestore(&bchan->vc.lock, flag);
+		break;
+
+	case DMA_RESUME:
+		spin_lock_irqsave(&bchan->vc.lock, flag);
+		writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id));
+		bchan->paused = 0;
+		spin_unlock_irqrestore(&bchan->vc.lock, flag);
+		break;
+
+	case DMA_TERMINATE_ALL:
+		bam_dma_terminate_all(bchan);
+		break;
+
+	case DMA_SLAVE_CONFIG:
+		spin_lock_irqsave(&bchan->vc.lock, flag);
+		bam_slave_config(bchan, (struct dma_slave_config *)arg);
+		spin_unlock_irqrestore(&bchan->vc.lock, flag);
+		break;
+
+	default:
+		ret = -ENXIO;
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * process_channel_irqs - processes the channel interrupts
+ * @bdev: bam controller
+ *
+ * This function processes the channel interrupts
+ *
+ */
+static u32 process_channel_irqs(struct bam_device *bdev)
+{
+	u32 i, srcs, pipe_stts;
+	unsigned long flags;
+	struct bam_async_desc *async_desc;
+
+	srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee));
+
+	/* return early if no pipe/channel interrupts are present */
+	if (!(srcs & P_IRQ))
+		return srcs;
+
+	for (i = 0; i < bdev->num_channels; i++) {
+		struct bam_chan *bchan = &bdev->channels[i];
+
+		if (!(srcs & BIT(i)))
+			continue;
+
+		/* clear pipe irq */
+		pipe_stts = readl_relaxed(bdev->regs +
+			BAM_P_IRQ_STTS(i));
+
+		writel_relaxed(pipe_stts, bdev->regs +
+				BAM_P_IRQ_CLR(i));
+
+		spin_lock_irqsave(&bchan->vc.lock, flags);
+		async_desc = bchan->curr_txd;
+
+		if (async_desc) {
+			async_desc->num_desc -= async_desc->xfer_len;
+			async_desc->curr_desc += async_desc->xfer_len;
+			bchan->curr_txd = NULL;
+
+			/* manage FIFO */
+			bchan->head += async_desc->xfer_len;
+			bchan->head %= MAX_DESCRIPTORS;
+
+			/*
+			 * if complete, process cookie.  Otherwise
+			 * push back to front of desc_issued so that
+			 * it gets restarted by the tasklet
+			 */
+			if (!async_desc->num_desc)
+				vchan_cookie_complete(&async_desc->vd);
+			else
+				list_add(&async_desc->vd.node,
+					&bchan->vc.desc_issued);
+		}
+
+		spin_unlock_irqrestore(&bchan->vc.lock, flags);
+	}
+
+	return srcs;
+}
+
+/**
+ * bam_dma_irq - irq handler for bam controller
+ * @irq: IRQ of interrupt
+ * @data: callback data
+ *
+ * IRQ handler for the bam controller
+ */
+static irqreturn_t bam_dma_irq(int irq, void *data)
+{
+	struct bam_device *bdev = data;
+	u32 clr_mask = 0, srcs = 0;
+
+	srcs |= process_channel_irqs(bdev);
+
+	/* kick off tasklet to start next dma transfer */
+	if (srcs & P_IRQ)
+		tasklet_schedule(&bdev->task);
+
+	if (srcs & BAM_IRQ)
+		clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS);
+
+	/* don't allow reorder of the various accesses to the BAM registers */
+	mb();
+
+	writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * bam_tx_status - returns status of transaction
+ * @chan: dma channel
+ * @cookie: transaction cookie
+ * @txstate: DMA transaction state
+ *
+ * Return status of dma transaction
+ */
+static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+		struct dma_tx_state *txstate)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct virt_dma_desc *vd;
+	int ret;
+	size_t residue = 0;
+	unsigned int i;
+	unsigned long flags;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_COMPLETE)
+		return ret;
+
+	if (!txstate)
+		return bchan->paused ? DMA_PAUSED : ret;
+
+	spin_lock_irqsave(&bchan->vc.lock, flags);
+	vd = vchan_find_desc(&bchan->vc, cookie);
+	if (vd)
+		residue = container_of(vd, struct bam_async_desc, vd)->length;
+	else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
+		for (i = 0; i < bchan->curr_txd->num_desc; i++)
+			residue += bchan->curr_txd->curr_desc[i].size;
+
+	spin_unlock_irqrestore(&bchan->vc.lock, flags);
+
+	dma_set_residue(txstate, residue);
+
+	if (ret == DMA_IN_PROGRESS && bchan->paused)
+		ret = DMA_PAUSED;
+
+	return ret;
+}
+
+/**
+ * bam_apply_new_config
+ * @bchan: bam dma channel
+ * @dir: DMA direction
+ */
+static void bam_apply_new_config(struct bam_chan *bchan,
+	enum dma_transfer_direction dir)
+{
+	struct bam_device *bdev = bchan->bdev;
+	u32 maxburst;
+
+	if (dir == DMA_DEV_TO_MEM)
+		maxburst = bchan->slave.src_maxburst;
+	else
+		maxburst = bchan->slave.dst_maxburst;
+
+	writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD);
+
+	bchan->reconfigure = 0;
+}
+
+/**
+ * bam_start_dma - start next transaction
+ * @bchan - bam dma channel
+ */
+static void bam_start_dma(struct bam_chan *bchan)
+{
+	struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
+	struct bam_device *bdev = bchan->bdev;
+	struct bam_async_desc *async_desc;
+	struct bam_desc_hw *desc;
+	struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
+					sizeof(struct bam_desc_hw));
+
+	lockdep_assert_held(&bchan->vc.lock);
+
+	if (!vd)
+		return;
+
+	list_del(&vd->node);
+
+	async_desc = container_of(vd, struct bam_async_desc, vd);
+	bchan->curr_txd = async_desc;
+
+	/* on first use, initialize the channel hardware */
+	if (!bchan->initialized)
+		bam_chan_init_hw(bchan, async_desc->dir);
+
+	/* apply new slave config changes, if necessary */
+	if (bchan->reconfigure)
+		bam_apply_new_config(bchan, async_desc->dir);
+
+	desc = bchan->curr_txd->curr_desc;
+
+	if (async_desc->num_desc > MAX_DESCRIPTORS)
+		async_desc->xfer_len = MAX_DESCRIPTORS;
+	else
+		async_desc->xfer_len = async_desc->num_desc;
+
+	/* set INT on last descriptor */
+	desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
+
+	if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
+		u32 partial = MAX_DESCRIPTORS - bchan->tail;
+
+		memcpy(&fifo[bchan->tail], desc,
+				partial * sizeof(struct bam_desc_hw));
+		memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
+				sizeof(struct bam_desc_hw));
+	} else {
+		memcpy(&fifo[bchan->tail], desc,
+			async_desc->xfer_len * sizeof(struct bam_desc_hw));
+	}
+
+	bchan->tail += async_desc->xfer_len;
+	bchan->tail %= MAX_DESCRIPTORS;
+
+	/* ensure descriptor writes and dma start not reordered */
+	wmb();
+	writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
+			bdev->regs + BAM_P_EVNT_REG(bchan->id));
+}
+
+/**
+ * dma_tasklet - DMA IRQ tasklet
+ * @data: tasklet argument (bam controller structure)
+ *
+ * Sets up next DMA operation and then processes all completed transactions
+ */
+static void dma_tasklet(unsigned long data)
+{
+	struct bam_device *bdev = (struct bam_device *)data;
+	struct bam_chan *bchan;
+	unsigned long flags;
+	unsigned int i;
+
+	/* go through the channels and kick off transactions */
+	for (i = 0; i < bdev->num_channels; i++) {
+		bchan = &bdev->channels[i];
+		spin_lock_irqsave(&bchan->vc.lock, flags);
+
+		if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
+			bam_start_dma(bchan);
+		spin_unlock_irqrestore(&bchan->vc.lock, flags);
+	}
+}
+
+/**
+ * bam_issue_pending - starts pending transactions
+ * @chan: dma channel
+ *
+ * Calls tasklet directly which in turn starts any pending transactions
+ */
+static void bam_issue_pending(struct dma_chan *chan)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&bchan->vc.lock, flags);
+
+	/* if work pending and idle, start a transaction */
+	if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
+		bam_start_dma(bchan);
+
+	spin_unlock_irqrestore(&bchan->vc.lock, flags);
+}
+
+/**
+ * bam_dma_free_desc - free descriptor memory
+ * @vd: virtual descriptor
+ *
+ */
+static void bam_dma_free_desc(struct virt_dma_desc *vd)
+{
+	struct bam_async_desc *async_desc = container_of(vd,
+			struct bam_async_desc, vd);
+
+	kfree(async_desc);
+}
+
+static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
+		struct of_dma *of)
+{
+	struct bam_device *bdev = container_of(of->of_dma_data,
+					struct bam_device, common);
+	unsigned int request;
+
+	if (dma_spec->args_count != 1)
+		return NULL;
+
+	request = dma_spec->args[0];
+	if (request >= bdev->num_channels)
+		return NULL;
+
+	return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
+}
+
+/**
+ * bam_init
+ * @bdev: bam device
+ *
+ * Initialization helper for global bam registers
+ */
+static int bam_init(struct bam_device *bdev)
+{
+	u32 val;
+
+	/* read revision and configuration information */
+	val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT;
+	val &= NUM_EES_MASK;
+
+	/* check that configured EE is within range */
+	if (bdev->ee >= val)
+		return -EINVAL;
+
+	val = readl_relaxed(bdev->regs + BAM_NUM_PIPES);
+	bdev->num_channels = val & BAM_NUM_PIPES_MASK;
+
+	/* s/w reset bam */
+	/* after reset all pipes are disabled and idle */
+	val = readl_relaxed(bdev->regs + BAM_CTRL);
+	val |= BAM_SW_RST;
+	writel_relaxed(val, bdev->regs + BAM_CTRL);
+	val &= ~BAM_SW_RST;
+	writel_relaxed(val, bdev->regs + BAM_CTRL);
+
+	/* make sure previous stores are visible before enabling BAM */
+	wmb();
+
+	/* enable bam */
+	val |= BAM_EN;
+	writel_relaxed(val, bdev->regs + BAM_CTRL);
+
+	/* set descriptor threshhold, start with 4 bytes */
+	writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD);
+
+	/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
+	writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS);
+
+	/* enable irqs for errors */
+	writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
+				bdev->regs + BAM_IRQ_EN);
+
+	/* unmask global bam interrupt */
+	writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+	return 0;
+}
+
+static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
+	u32 index)
+{
+	bchan->id = index;
+	bchan->bdev = bdev;
+
+	vchan_init(&bchan->vc, &bdev->common);
+	bchan->vc.desc_free = bam_dma_free_desc;
+}
+
+static int bam_dma_probe(struct platform_device *pdev)
+{
+	struct bam_device *bdev;
+	struct resource *iores;
+	int ret, i;
+
+	bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
+	if (!bdev)
+		return -ENOMEM;
+
+	bdev->dev = &pdev->dev;
+
+	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
+	if (IS_ERR(bdev->regs))
+		return PTR_ERR(bdev->regs);
+
+	bdev->irq = platform_get_irq(pdev, 0);
+	if (bdev->irq < 0)
+		return bdev->irq;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
+	if (ret) {
+		dev_err(bdev->dev, "Execution environment unspecified\n");
+		return ret;
+	}
+
+	bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
+	if (IS_ERR(bdev->bamclk))
+		return PTR_ERR(bdev->bamclk);
+
+	ret = clk_prepare_enable(bdev->bamclk);
+	if (ret) {
+		dev_err(bdev->dev, "failed to prepare/enable clock\n");
+		return ret;
+	}
+
+	ret = bam_init(bdev);
+	if (ret)
+		goto err_disable_clk;
+
+	tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
+
+	bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
+				sizeof(*bdev->channels), GFP_KERNEL);
+
+	if (!bdev->channels) {
+		ret = -ENOMEM;
+		goto err_disable_clk;
+	}
+
+	/* allocate and initialize channels */
+	INIT_LIST_HEAD(&bdev->common.channels);
+
+	for (i = 0; i < bdev->num_channels; i++)
+		bam_channel_init(bdev, &bdev->channels[i], i);
+
+	ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
+			IRQF_TRIGGER_HIGH, "bam_dma", bdev);
+	if (ret)
+		goto err_disable_clk;
+
+	/* set max dma segment size */
+	bdev->common.dev = bdev->dev;
+	bdev->common.dev->dma_parms = &bdev->dma_parms;
+	ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE);
+	if (ret) {
+		dev_err(bdev->dev, "cannot set maximum segment size\n");
+		goto err_disable_clk;
+	}
+
+	platform_set_drvdata(pdev, bdev);
+
+	/* set capabilities */
+	dma_cap_zero(bdev->common.cap_mask);
+	dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
+
+	/* initialize dmaengine apis */
+	bdev->common.device_alloc_chan_resources = bam_alloc_chan;
+	bdev->common.device_free_chan_resources = bam_free_chan;
+	bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
+	bdev->common.device_control = bam_control;
+	bdev->common.device_issue_pending = bam_issue_pending;
+	bdev->common.device_tx_status = bam_tx_status;
+	bdev->common.dev = bdev->dev;
+
+	ret = dma_async_device_register(&bdev->common);
+	if (ret) {
+		dev_err(bdev->dev, "failed to register dma async device\n");
+		goto err_disable_clk;
+	}
+
+	ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
+					&bdev->common);
+	if (ret)
+		goto err_unregister_dma;
+
+	return 0;
+
+err_unregister_dma:
+	dma_async_device_unregister(&bdev->common);
+err_disable_clk:
+	clk_disable_unprepare(bdev->bamclk);
+	return ret;
+}
+
+static int bam_dma_remove(struct platform_device *pdev)
+{
+	struct bam_device *bdev = platform_get_drvdata(pdev);
+	u32 i;
+
+	of_dma_controller_free(pdev->dev.of_node);
+	dma_async_device_unregister(&bdev->common);
+
+	/* mask all interrupts for this execution environment */
+	writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+	devm_free_irq(bdev->dev, bdev->irq, bdev);
+
+	for (i = 0; i < bdev->num_channels; i++) {
+		bam_dma_terminate_all(&bdev->channels[i]);
+		tasklet_kill(&bdev->channels[i].vc.task);
+
+		dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
+			bdev->channels[i].fifo_virt,
+			bdev->channels[i].fifo_phys);
+	}
+
+	tasklet_kill(&bdev->task);
+
+	clk_disable_unprepare(bdev->bamclk);
+
+	return 0;
+}
+
+static const struct of_device_id bam_of_match[] = {
+	{ .compatible = "qcom,bam-v1.4.0", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, bam_of_match);
+
+static struct platform_driver bam_dma_driver = {
+	.probe = bam_dma_probe,
+	.remove = bam_dma_remove,
+	.driver = {
+		.name = "bam-dma-engine",
+		.owner = THIS_MODULE,
+		.of_match_table = bam_of_match,
+	},
+};
+
+module_platform_driver(bam_dma_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 4eddedb..b209a0f 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -192,7 +192,7 @@
 	unsigned int			id;
 	bool				valid;
 	void __iomem			*base;
-	unsigned int			irq;
+	int				irq;
 	struct clk			*clk;
 	spinlock_t			lock;
 	struct s3c24xx_dma_chan		*serving;
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index dadd9e01..b4c8138 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -29,6 +29,12 @@
 	help
 	  Enable support for the Renesas R-Car series DMA controllers.
 
+config RCAR_AUDMAC_PP
+	tristate "Renesas R-Car Audio DMAC Peripheral Peripheral support"
+	depends on SH_DMAE_BASE
+	help
+	  Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
+
 config SHDMA_R8A73A4
 	def_bool y
 	depends on ARCH_R8A73A4 && SH_DMAE != n
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index e856af2..1ce88b2 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -7,3 +7,4 @@
 shdma-objs := $(shdma-y)
 obj-$(CONFIG_SUDMAC) += sudmac.o
 obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
+obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
new file mode 100644
index 0000000..2de7728
--- /dev/null
+++ b/drivers/dma/sh/rcar-audmapp.c
@@ -0,0 +1,320 @@
+/*
+ * This is for Renesas R-Car Audio-DMAC-peri-peri.
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * based on the drivers/dma/sh/shdma.c
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_data/dma-rcar-audmapp.h>
+#include <linux/platform_device.h>
+#include <linux/shdma-base.h>
+
+/*
+ * DMA register
+ */
+#define PDMASAR		0x00
+#define PDMADAR		0x04
+#define PDMACHCR	0x0c
+
+/* PDMACHCR */
+#define PDMACHCR_DE		(1 << 0)
+
+#define AUDMAPP_MAX_CHANNELS	29
+
+/* Default MEMCPY transfer size = 2^2 = 4 bytes */
+#define LOG2_DEFAULT_XFER_SIZE	2
+#define AUDMAPP_SLAVE_NUMBER	256
+#define AUDMAPP_LEN_MAX		(16 * 1024 * 1024)
+
+struct audmapp_chan {
+	struct shdma_chan shdma_chan;
+	struct audmapp_slave_config *config;
+	void __iomem *base;
+};
+
+struct audmapp_device {
+	struct shdma_dev shdma_dev;
+	struct audmapp_pdata *pdata;
+	struct device *dev;
+	void __iomem *chan_reg;
+};
+
+#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan)
+#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device,	\
+				  struct audmapp_device, shdma_dev.dma_dev)
+
+static void audmapp_write(struct audmapp_chan *auchan, u32 data, u32 reg)
+{
+	struct audmapp_device *audev = to_dev(auchan);
+	struct device *dev = audev->dev;
+
+	dev_dbg(dev, "w %p : %08x\n", auchan->base + reg, data);
+
+	iowrite32(data, auchan->base + reg);
+}
+
+static u32 audmapp_read(struct audmapp_chan *auchan, u32 reg)
+{
+	return ioread32(auchan->base + reg);
+}
+
+static void audmapp_halt(struct shdma_chan *schan)
+{
+	struct audmapp_chan *auchan = to_chan(schan);
+	int i;
+
+	audmapp_write(auchan, 0, PDMACHCR);
+
+	for (i = 0; i < 1024; i++) {
+		if (0 == audmapp_read(auchan, PDMACHCR))
+			return;
+		udelay(1);
+	}
+}
+
+static void audmapp_start_xfer(struct shdma_chan *schan,
+			       struct shdma_desc *sdecs)
+{
+	struct audmapp_chan *auchan = to_chan(schan);
+	struct audmapp_device *audev = to_dev(auchan);
+	struct audmapp_slave_config *cfg = auchan->config;
+	struct device *dev = audev->dev;
+	u32 chcr = cfg->chcr | PDMACHCR_DE;
+
+	dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n",
+		&cfg->src, &cfg->dst, cfg->chcr);
+
+	audmapp_write(auchan, cfg->src,	PDMASAR);
+	audmapp_write(auchan, cfg->dst,	PDMADAR);
+	audmapp_write(auchan, chcr,	PDMACHCR);
+}
+
+static struct audmapp_slave_config *
+audmapp_find_slave(struct audmapp_chan *auchan, int slave_id)
+{
+	struct audmapp_device *audev = to_dev(auchan);
+	struct audmapp_pdata *pdata = audev->pdata;
+	struct audmapp_slave_config *cfg;
+	int i;
+
+	if (slave_id >= AUDMAPP_SLAVE_NUMBER)
+		return NULL;
+
+	for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+		if (cfg->slave_id == slave_id)
+			return cfg;
+
+	return NULL;
+}
+
+static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
+			     dma_addr_t slave_addr, bool try)
+{
+	struct audmapp_chan *auchan = to_chan(schan);
+	struct audmapp_slave_config *cfg =
+		audmapp_find_slave(auchan, slave_id);
+
+	if (!cfg)
+		return -ENODEV;
+	if (try)
+		return 0;
+
+	auchan->config	= cfg;
+
+	return 0;
+}
+
+static int audmapp_desc_setup(struct shdma_chan *schan,
+			      struct shdma_desc *sdecs,
+			      dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+	struct audmapp_chan *auchan = to_chan(schan);
+	struct audmapp_slave_config *cfg = auchan->config;
+
+	if (!cfg)
+		return -ENODEV;
+
+	if (*len > (size_t)AUDMAPP_LEN_MAX)
+		*len = (size_t)AUDMAPP_LEN_MAX;
+
+	return 0;
+}
+
+static void audmapp_setup_xfer(struct shdma_chan *schan,
+			       int slave_id)
+{
+}
+
+static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan)
+{
+	return 0; /* always fixed address */
+}
+
+static bool audmapp_channel_busy(struct shdma_chan *schan)
+{
+	struct audmapp_chan *auchan = to_chan(schan);
+	u32 chcr = audmapp_read(auchan, PDMACHCR);
+
+	return chcr & ~PDMACHCR_DE;
+}
+
+static bool audmapp_desc_completed(struct shdma_chan *schan,
+				   struct shdma_desc *sdesc)
+{
+	return true;
+}
+
+static struct shdma_desc *audmapp_embedded_desc(void *buf, int i)
+{
+	return &((struct shdma_desc *)buf)[i];
+}
+
+static const struct shdma_ops audmapp_shdma_ops = {
+	.halt_channel	= audmapp_halt,
+	.desc_setup	= audmapp_desc_setup,
+	.set_slave	= audmapp_set_slave,
+	.start_xfer	= audmapp_start_xfer,
+	.embedded_desc	= audmapp_embedded_desc,
+	.setup_xfer	= audmapp_setup_xfer,
+	.slave_addr	= audmapp_slave_addr,
+	.channel_busy	= audmapp_channel_busy,
+	.desc_completed	= audmapp_desc_completed,
+};
+
+static int audmapp_chan_probe(struct platform_device *pdev,
+			      struct audmapp_device *audev, int id)
+{
+	struct shdma_dev *sdev = &audev->shdma_dev;
+	struct audmapp_chan *auchan;
+	struct shdma_chan *schan;
+	struct device *dev = audev->dev;
+
+	auchan = devm_kzalloc(dev, sizeof(*auchan), GFP_KERNEL);
+	if (!auchan)
+		return -ENOMEM;
+
+	schan = &auchan->shdma_chan;
+	schan->max_xfer_len = AUDMAPP_LEN_MAX;
+
+	shdma_chan_probe(sdev, schan, id);
+
+	auchan->base = audev->chan_reg + 0x20 + (0x10 * id);
+	dev_dbg(dev, "%02d : %p / %p", id, auchan->base, audev->chan_reg);
+
+	return 0;
+}
+
+static void audmapp_chan_remove(struct audmapp_device *audev)
+{
+	struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
+	struct shdma_chan *schan;
+	int i;
+
+	shdma_for_each_chan(schan, &audev->shdma_dev, i) {
+		BUG_ON(!schan);
+		shdma_chan_remove(schan);
+	}
+	dma_dev->chancnt = 0;
+}
+
+static int audmapp_probe(struct platform_device *pdev)
+{
+	struct audmapp_pdata *pdata = pdev->dev.platform_data;
+	struct audmapp_device *audev;
+	struct shdma_dev *sdev;
+	struct dma_device *dma_dev;
+	struct resource *res;
+	int err, i;
+
+	if (!pdata)
+		return -ENODEV;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	audev = devm_kzalloc(&pdev->dev, sizeof(*audev), GFP_KERNEL);
+	if (!audev)
+		return -ENOMEM;
+
+	audev->dev	= &pdev->dev;
+	audev->pdata	= pdata;
+	audev->chan_reg	= devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(audev->chan_reg))
+		return PTR_ERR(audev->chan_reg);
+
+	sdev		= &audev->shdma_dev;
+	sdev->ops	= &audmapp_shdma_ops;
+	sdev->desc_size	= sizeof(struct shdma_desc);
+
+	dma_dev			= &sdev->dma_dev;
+	dma_dev->copy_align	= LOG2_DEFAULT_XFER_SIZE;
+	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+	err = shdma_init(&pdev->dev, sdev, AUDMAPP_MAX_CHANNELS);
+	if (err < 0)
+		return err;
+
+	platform_set_drvdata(pdev, audev);
+
+	/* Create DMA Channel */
+	for (i = 0; i < AUDMAPP_MAX_CHANNELS; i++) {
+		err = audmapp_chan_probe(pdev, audev, i);
+		if (err)
+			goto chan_probe_err;
+	}
+
+	err = dma_async_device_register(dma_dev);
+	if (err < 0)
+		goto chan_probe_err;
+
+	return err;
+
+chan_probe_err:
+	audmapp_chan_remove(audev);
+	shdma_cleanup(sdev);
+
+	return err;
+}
+
+static int audmapp_remove(struct platform_device *pdev)
+{
+	struct audmapp_device *audev = platform_get_drvdata(pdev);
+	struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
+
+	dma_async_device_unregister(dma_dev);
+
+	audmapp_chan_remove(audev);
+	shdma_cleanup(&audev->shdma_dev);
+
+	return 0;
+}
+
+static struct platform_driver audmapp_driver = {
+	.probe		= audmapp_probe,
+	.remove		= audmapp_remove,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "rcar-audmapp-engine",
+	},
+};
+module_platform_driver(audmapp_driver);
+
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_DESCRIPTION("Renesas R-Car Audio DMAC peri-peri driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 2e7b394..5239677 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -227,7 +227,7 @@
 	struct shdma_chan *schan = to_shdma_chan(chan);
 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
 	const struct shdma_ops *ops = sdev->ops;
-	int match = (int)arg;
+	int match = (long)arg;
 	int ret;
 
 	if (match < 0)
@@ -491,8 +491,8 @@
 	}
 
 	dev_dbg(schan->dev,
-		"chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
-		copy_size, *len, *src, *dst, &new->async_tx,
+		"chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
+		copy_size, *len, src, dst, &new->async_tx,
 		new->async_tx.cookie);
 
 	new->mark = DESC_PREPARED;
@@ -555,8 +555,8 @@
 			goto err_get_desc;
 
 		do {
-			dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n",
-				i, sg, len, (unsigned long long)sg_addr);
+			dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
+				i, sg, len, &sg_addr);
 
 			if (direction == DMA_DEV_TO_MEM)
 				new = shdma_add_desc(schan, flags,
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index 06473a0..b4ff9d3 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -33,7 +33,8 @@
 	/* Only slave DMA channels can be allocated via DT */
 	dma_cap_set(DMA_SLAVE, mask);
 
-	chan = dma_request_channel(mask, shdma_chan_filter, (void *)id);
+	chan = dma_request_channel(mask, shdma_chan_filter,
+				   (void *)(uintptr_t)id);
 	if (chan)
 		to_shdma_chan(chan)->hw_req = id;
 
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 0d765c0..dda7e75 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -443,6 +443,7 @@
 	return ret;
 }
 
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
 static irqreturn_t sh_dmae_err(int irq, void *data)
 {
 	struct sh_dmae_device *shdev = data;
@@ -453,6 +454,7 @@
 	sh_dmae_reset(shdev);
 	return IRQ_HANDLED;
 }
+#endif
 
 static bool sh_dmae_desc_completed(struct shdma_chan *schan,
 				   struct shdma_desc *sdesc)
@@ -637,7 +639,7 @@
 #define sh_dmae_resume NULL
 #endif
 
-const struct dev_pm_ops sh_dmae_pm = {
+static const struct dev_pm_ops sh_dmae_pm = {
 	.suspend		= sh_dmae_suspend,
 	.resume			= sh_dmae_resume,
 	.runtime_suspend	= sh_dmae_runtime_suspend,
@@ -685,9 +687,12 @@
 static int sh_dmae_probe(struct platform_device *pdev)
 {
 	const struct sh_dmae_pdata *pdata;
-	unsigned long irqflags = 0,
-		chan_flag[SH_DMAE_MAX_CHANNELS] = {};
-	int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
+	unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
+	int chan_irq[SH_DMAE_MAX_CHANNELS];
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
+	unsigned long irqflags = 0;
+	int errirq;
+#endif
 	int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
 	struct sh_dmae_device *shdev;
 	struct dma_device *dma_dev;
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index c7e9cdf..4e7df43 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -178,8 +178,8 @@
 	struct sudmac_chan *sc = to_chan(schan);
 	struct sudmac_desc *sd = to_desc(sdesc);
 
-	dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n",
-		__func__, src, dst, *len);
+	dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n",
+		__func__, &src, &dst, *len);
 
 	if (*len > schan->max_xfer_len)
 		*len = schan->max_xfer_len;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index d4d3a31..a1bd829 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -18,6 +18,7 @@
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 #include <linux/clk.h>
+#include <linux/of_dma.h>
 #include <linux/sirfsoc_dma.h>
 
 #include "dmaengine.h"
@@ -659,6 +660,18 @@
 	return 0;
 }
 
+static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
+	struct of_dma *ofdma)
+{
+	struct sirfsoc_dma *sdma = ofdma->of_dma_data;
+	unsigned int request = dma_spec->args[0];
+
+	if (request > SIRFSOC_DMA_CHANNELS)
+		return NULL;
+
+	return dma_get_slave_channel(&sdma->channels[request].chan);
+}
+
 static int sirfsoc_dma_probe(struct platform_device *op)
 {
 	struct device_node *dn = op->dev.of_node;
@@ -764,11 +777,20 @@
 	if (ret)
 		goto free_irq;
 
+	/* Device-tree DMA controller registration */
+	ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
+	if (ret) {
+		dev_err(dev, "failed to register DMA controller\n");
+		goto unreg_dma_dev;
+	}
+
 	pm_runtime_enable(&op->dev);
 	dev_info(dev, "initialized SIRFSOC DMAC driver\n");
 
 	return 0;
 
+unreg_dma_dev:
+	dma_async_device_unregister(dma);
 free_irq:
 	free_irq(sdma->irq, sdma);
 irq_dispose:
@@ -781,6 +803,7 @@
 	struct device *dev = &op->dev;
 	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
 
+	of_dma_controller_free(op->dev.of_node);
 	dma_async_device_unregister(&sdma->dma);
 	free_irq(sdma->irq, sdma);
 	irq_dispose_mapping(sdma->irq);
diff --git a/drivers/firmware/efi/efi-stub-helper.c b/drivers/firmware/efi/efi-stub-helper.c
index ff50aee..2c41eae 100644
--- a/drivers/firmware/efi/efi-stub-helper.c
+++ b/drivers/firmware/efi/efi-stub-helper.c
@@ -397,7 +397,7 @@
 				else
 					chunksize = size;
 
-				status = efi_file_read(fh, files[j].handle,
+				status = efi_file_read(files[j].handle,
 						       &chunksize,
 						       (void *)addr);
 				if (status != EFI_SUCCESS) {
@@ -408,7 +408,7 @@
 				size -= chunksize;
 			}
 
-			efi_file_close(fh, files[j].handle);
+			efi_file_close(files[j].handle);
 		}
 
 	}
@@ -425,7 +425,7 @@
 
 close_handles:
 	for (k = j; k < i; k++)
-		efi_file_close(fh, files[k].handle);
+		efi_file_close(files[k].handle);
 free_files:
 	efi_call_early(free_pool, files);
 fail:
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 51493ed..a43220c 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -196,6 +196,53 @@
 		.enter = NULL }
 };
 
+static struct cpuidle_state byt_cstates[] = {
+	{
+		.name = "C1-BYT",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 1,
+		.target_residency = 1,
+		.enter = &intel_idle },
+	{
+		.name = "C1E-BYT",
+		.desc = "MWAIT 0x01",
+		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 15,
+		.target_residency = 30,
+		.enter = &intel_idle },
+	{
+		.name = "C6N-BYT",
+		.desc = "MWAIT 0x58",
+		.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 40,
+		.target_residency = 275,
+		.enter = &intel_idle },
+	{
+		.name = "C6S-BYT",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 140,
+		.target_residency = 560,
+		.enter = &intel_idle },
+	{
+		.name = "C7-BYT",
+		.desc = "MWAIT 0x60",
+		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 1200,
+		.target_residency = 1500,
+		.enter = &intel_idle },
+	{
+		.name = "C7S-BYT",
+		.desc = "MWAIT 0x64",
+		.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 10000,
+		.target_residency = 20000,
+		.enter = &intel_idle },
+	{
+		.enter = NULL }
+};
+
 static struct cpuidle_state ivb_cstates[] = {
 	{
 		.name = "C1-IVB",
@@ -236,6 +283,105 @@
 		.enter = NULL }
 };
 
+static struct cpuidle_state ivt_cstates[] = {
+	{
+		.name = "C1-IVT",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 1,
+		.target_residency = 1,
+		.enter = &intel_idle },
+	{
+		.name = "C1E-IVT",
+		.desc = "MWAIT 0x01",
+		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 10,
+		.target_residency = 80,
+		.enter = &intel_idle },
+	{
+		.name = "C3-IVT",
+		.desc = "MWAIT 0x10",
+		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 59,
+		.target_residency = 156,
+		.enter = &intel_idle },
+	{
+		.name = "C6-IVT",
+		.desc = "MWAIT 0x20",
+		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 82,
+		.target_residency = 300,
+		.enter = &intel_idle },
+	{
+		.enter = NULL }
+};
+
+static struct cpuidle_state ivt_cstates_4s[] = {
+	{
+		.name = "C1-IVT-4S",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 1,
+		.target_residency = 1,
+		.enter = &intel_idle },
+	{
+		.name = "C1E-IVT-4S",
+		.desc = "MWAIT 0x01",
+		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 10,
+		.target_residency = 250,
+		.enter = &intel_idle },
+	{
+		.name = "C3-IVT-4S",
+		.desc = "MWAIT 0x10",
+		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 59,
+		.target_residency = 300,
+		.enter = &intel_idle },
+	{
+		.name = "C6-IVT-4S",
+		.desc = "MWAIT 0x20",
+		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 84,
+		.target_residency = 400,
+		.enter = &intel_idle },
+	{
+		.enter = NULL }
+};
+
+static struct cpuidle_state ivt_cstates_8s[] = {
+	{
+		.name = "C1-IVT-8S",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 1,
+		.target_residency = 1,
+		.enter = &intel_idle },
+	{
+		.name = "C1E-IVT-8S",
+		.desc = "MWAIT 0x01",
+		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 10,
+		.target_residency = 500,
+		.enter = &intel_idle },
+	{
+		.name = "C3-IVT-8S",
+		.desc = "MWAIT 0x10",
+		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 59,
+		.target_residency = 600,
+		.enter = &intel_idle },
+	{
+		.name = "C6-IVT-8S",
+		.desc = "MWAIT 0x20",
+		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 88,
+		.target_residency = 700,
+		.enter = &intel_idle },
+	{
+		.enter = NULL }
+};
+
 static struct cpuidle_state hsw_cstates[] = {
 	{
 		.name = "C1-HSW",
@@ -464,11 +610,21 @@
 	.disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_byt = {
+	.state_table = byt_cstates,
+	.disable_promotion_to_c1e = true,
+};
+
 static const struct idle_cpu idle_cpu_ivb = {
 	.state_table = ivb_cstates,
 	.disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_ivt = {
+	.state_table = ivt_cstates,
+	.disable_promotion_to_c1e = true,
+};
+
 static const struct idle_cpu idle_cpu_hsw = {
 	.state_table = hsw_cstates,
 	.disable_promotion_to_c1e = true,
@@ -494,8 +650,10 @@
 	ICPU(0x2f, idle_cpu_nehalem),
 	ICPU(0x2a, idle_cpu_snb),
 	ICPU(0x2d, idle_cpu_snb),
+	ICPU(0x36, idle_cpu_atom),
+	ICPU(0x37, idle_cpu_byt),
 	ICPU(0x3a, idle_cpu_ivb),
-	ICPU(0x3e, idle_cpu_ivb),
+	ICPU(0x3e, idle_cpu_ivt),
 	ICPU(0x3c, idle_cpu_hsw),
 	ICPU(0x3f, idle_cpu_hsw),
 	ICPU(0x45, idle_cpu_hsw),
@@ -572,6 +730,39 @@
 	free_percpu(intel_idle_cpuidle_devices);
 	return;
 }
+
+/*
+ * intel_idle_state_table_update()
+ *
+ * Update the default state_table for this CPU-id
+ *
+ * Currently used to access tuned IVT multi-socket targets
+ * Assumption: num_sockets == (max_package_num + 1)
+ */
+void intel_idle_state_table_update(void)
+{
+	/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
+	if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
+		int cpu, package_num, num_sockets = 1;
+
+		for_each_online_cpu(cpu) {
+			package_num = topology_physical_package_id(cpu);
+			if (package_num + 1 > num_sockets) {
+				num_sockets = package_num + 1;
+
+				if (num_sockets > 4)
+					cpuidle_state_table = ivt_cstates_8s;
+					return;
+			}
+		}
+
+		if (num_sockets > 2)
+			cpuidle_state_table = ivt_cstates_4s;
+		/* else, 1 and 2 socket systems use default ivt_cstates */
+	}
+	return;
+}
+
 /*
  * intel_idle_cpuidle_driver_init()
  * allocate, initialize cpuidle_states
@@ -581,10 +772,12 @@
 	int cstate;
 	struct cpuidle_driver *drv = &intel_idle_driver;
 
+	intel_idle_state_table_update();
+
 	drv->state_count = 1;
 
 	for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
-		int num_substates, mwait_hint, mwait_cstate, mwait_substate;
+		int num_substates, mwait_hint, mwait_cstate;
 
 		if (cpuidle_state_table[cstate].enter == NULL)
 			break;
@@ -597,14 +790,13 @@
 
 		mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
 		mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
-		mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
 
-		/* does the state exist in CPUID.MWAIT? */
+		/* number of sub-states for this state in CPUID.MWAIT */
 		num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
 					& MWAIT_SUBSTATE_MASK;
 
-		/* if sub-state in table is not enumerated by CPUID */
-		if ((mwait_substate + 1) > num_substates)
+		/* if NO sub-states for this state in CPUID, skip it */
+		if (num_substates == 0)
 			continue;
 
 		if (((mwait_cstate + 1) > 2) &&
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 8ee228e..c98fdb1 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -51,6 +51,8 @@
 static int
 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 	       struct isert_rdma_wr *wr);
+static int
+isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
 
 static void
 isert_qp_event_callback(struct ib_event *e, void *context)
@@ -87,7 +89,8 @@
 }
 
 static int
-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
+		    u8 protection)
 {
 	struct isert_device *device = isert_conn->conn_device;
 	struct ib_qp_init_attr attr;
@@ -119,6 +122,8 @@
 	attr.cap.max_recv_sge = 1;
 	attr.sq_sig_type = IB_SIGNAL_REQ_WR;
 	attr.qp_type = IB_QPT_RC;
+	if (protection)
+		attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
 
 	pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
 		 cma_id->device);
@@ -226,7 +231,8 @@
 		return ret;
 
 	/* asign function handlers */
-	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
+	    dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
 		device->use_fastreg = 1;
 		device->reg_rdma_mem = isert_reg_rdma;
 		device->unreg_rdma_mem = isert_unreg_rdma;
@@ -236,13 +242,18 @@
 		device->unreg_rdma_mem = isert_unmap_cmd;
 	}
 
+	/* Check signature cap */
+	device->pi_capable = dev_attr->device_cap_flags &
+			     IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
+
 	device->cqs_used = min_t(int, num_online_cpus(),
 				 device->ib_device->num_comp_vectors);
 	device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
 	pr_debug("Using %d CQs, device %s supports %d vectors support "
-		 "Fast registration %d\n",
+		 "Fast registration %d pi_capable %d\n",
 		 device->cqs_used, device->ib_device->name,
-		 device->ib_device->num_comp_vectors, device->use_fastreg);
+		 device->ib_device->num_comp_vectors, device->use_fastreg,
+		 device->pi_capable);
 	device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
 				device->cqs_used, GFP_KERNEL);
 	if (!device->cq_desc) {
@@ -395,6 +406,12 @@
 		list_del(&fr_desc->list);
 		ib_free_fast_reg_page_list(fr_desc->data_frpl);
 		ib_dereg_mr(fr_desc->data_mr);
+		if (fr_desc->pi_ctx) {
+			ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
+			ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
+			ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
+			kfree(fr_desc->pi_ctx);
+		}
 		kfree(fr_desc);
 		++i;
 	}
@@ -406,8 +423,10 @@
 
 static int
 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
-		     struct fast_reg_descriptor *fr_desc)
+		     struct fast_reg_descriptor *fr_desc, u8 protection)
 {
+	int ret;
+
 	fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
 							 ISCSI_ISER_SG_TABLESIZE);
 	if (IS_ERR(fr_desc->data_frpl)) {
@@ -420,27 +439,88 @@
 	if (IS_ERR(fr_desc->data_mr)) {
 		pr_err("Failed to allocate data frmr err=%ld\n",
 		       PTR_ERR(fr_desc->data_mr));
-		ib_free_fast_reg_page_list(fr_desc->data_frpl);
-		return PTR_ERR(fr_desc->data_mr);
+		ret = PTR_ERR(fr_desc->data_mr);
+		goto err_data_frpl;
 	}
 	pr_debug("Create fr_desc %p page_list %p\n",
 		 fr_desc, fr_desc->data_frpl->page_list);
+	fr_desc->ind |= ISERT_DATA_KEY_VALID;
 
-	fr_desc->valid = true;
+	if (protection) {
+		struct ib_mr_init_attr mr_init_attr = {0};
+		struct pi_context *pi_ctx;
+
+		fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
+		if (!fr_desc->pi_ctx) {
+			pr_err("Failed to allocate pi context\n");
+			ret = -ENOMEM;
+			goto err_data_mr;
+		}
+		pi_ctx = fr_desc->pi_ctx;
+
+		pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
+						    ISCSI_ISER_SG_TABLESIZE);
+		if (IS_ERR(pi_ctx->prot_frpl)) {
+			pr_err("Failed to allocate prot frpl err=%ld\n",
+			       PTR_ERR(pi_ctx->prot_frpl));
+			ret = PTR_ERR(pi_ctx->prot_frpl);
+			goto err_pi_ctx;
+		}
+
+		pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+		if (IS_ERR(pi_ctx->prot_mr)) {
+			pr_err("Failed to allocate prot frmr err=%ld\n",
+			       PTR_ERR(pi_ctx->prot_mr));
+			ret = PTR_ERR(pi_ctx->prot_mr);
+			goto err_prot_frpl;
+		}
+		fr_desc->ind |= ISERT_PROT_KEY_VALID;
+
+		mr_init_attr.max_reg_descriptors = 2;
+		mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
+		pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+		if (IS_ERR(pi_ctx->sig_mr)) {
+			pr_err("Failed to allocate signature enabled mr err=%ld\n",
+			       PTR_ERR(pi_ctx->sig_mr));
+			ret = PTR_ERR(pi_ctx->sig_mr);
+			goto err_prot_mr;
+		}
+		fr_desc->ind |= ISERT_SIG_KEY_VALID;
+	}
+	fr_desc->ind &= ~ISERT_PROTECTED;
 
 	return 0;
+err_prot_mr:
+	ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
+err_prot_frpl:
+	ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
+err_pi_ctx:
+	kfree(fr_desc->pi_ctx);
+err_data_mr:
+	ib_dereg_mr(fr_desc->data_mr);
+err_data_frpl:
+	ib_free_fast_reg_page_list(fr_desc->data_frpl);
+
+	return ret;
 }
 
 static int
-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
+isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
 {
 	struct fast_reg_descriptor *fr_desc;
 	struct isert_device *device = isert_conn->conn_device;
-	int i, ret;
+	struct se_session *se_sess = isert_conn->conn->sess->se_sess;
+	struct se_node_acl *se_nacl = se_sess->se_node_acl;
+	int i, ret, tag_num;
+	/*
+	 * Setup the number of FRMRs based upon the number of tags
+	 * available to session in iscsi_target_locate_portal().
+	 */
+	tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
+	tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
 
-	INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
 	isert_conn->conn_fr_pool_size = 0;
-	for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
+	for (i = 0; i < tag_num; i++) {
 		fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
 		if (!fr_desc) {
 			pr_err("Failed to allocate fast_reg descriptor\n");
@@ -449,7 +529,8 @@
 		}
 
 		ret = isert_create_fr_desc(device->ib_device,
-					   isert_conn->conn_pd, fr_desc);
+					   isert_conn->conn_pd, fr_desc,
+					   pi_support);
 		if (ret) {
 			pr_err("Failed to create fastreg descriptor err=%d\n",
 			       ret);
@@ -480,6 +561,7 @@
 	struct isert_device *device;
 	struct ib_device *ib_dev = cma_id->device;
 	int ret = 0;
+	u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
 
 	pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
 		 cma_id, cma_id->context);
@@ -498,6 +580,7 @@
 	kref_get(&isert_conn->conn_kref);
 	mutex_init(&isert_conn->conn_mutex);
 	spin_lock_init(&isert_conn->conn_lock);
+	INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
 
 	cma_id->context = isert_conn;
 	isert_conn->conn_cm_id = cma_id;
@@ -569,16 +652,13 @@
 		goto out_mr;
 	}
 
-	if (device->use_fastreg) {
-		ret = isert_conn_create_fastreg_pool(isert_conn);
-		if (ret) {
-			pr_err("Conn: %p failed to create fastreg pool\n",
-			       isert_conn);
-			goto out_fastreg;
-		}
+	if (pi_support && !device->pi_capable) {
+		pr_err("Protection information requested but not supported\n");
+		ret = -EINVAL;
+		goto out_mr;
 	}
 
-	ret = isert_conn_setup_qp(isert_conn, cma_id);
+	ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
 	if (ret)
 		goto out_conn_dev;
 
@@ -591,9 +671,6 @@
 	return 0;
 
 out_conn_dev:
-	if (device->use_fastreg)
-		isert_conn_free_fastreg_pool(isert_conn);
-out_fastreg:
 	ib_dereg_mr(isert_conn->conn_mr);
 out_mr:
 	ib_dealloc_pd(isert_conn->conn_pd);
@@ -967,6 +1044,18 @@
 	}
 	if (!login->login_failed) {
 		if (login->login_complete) {
+			if (isert_conn->conn_device->use_fastreg) {
+				u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
+
+				ret = isert_conn_create_fastreg_pool(isert_conn,
+								     pi_support);
+				if (ret) {
+					pr_err("Conn: %p failed to create"
+					       " fastreg pool\n", isert_conn);
+					return ret;
+				}
+			}
+
 			ret = isert_alloc_rx_descriptors(isert_conn);
 			if (ret)
 				return ret;
@@ -1392,19 +1481,60 @@
 	}
 }
 
+static int
+isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+		   struct scatterlist *sg, u32 nents, u32 length, u32 offset,
+		   enum iser_ib_op_code op, struct isert_data_buf *data)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+
+	data->dma_dir = op == ISER_IB_RDMA_WRITE ?
+			      DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+	data->len = length - offset;
+	data->offset = offset;
+	data->sg_off = data->offset / PAGE_SIZE;
+
+	data->sg = &sg[data->sg_off];
+	data->nents = min_t(unsigned int, nents - data->sg_off,
+					  ISCSI_ISER_SG_TABLESIZE);
+	data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
+					PAGE_SIZE);
+
+	data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
+					data->dma_dir);
+	if (unlikely(!data->dma_nents)) {
+		pr_err("Cmd: unable to dma map SGs %p\n", sg);
+		return -EINVAL;
+	}
+
+	pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
+		 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
+
+	return 0;
+}
+
+static void
+isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+
+	ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
+	memset(data, 0, sizeof(*data));
+}
+
+
+
 static void
 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 {
 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 
 	pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
-	if (wr->sge) {
+
+	if (wr->data.sg) {
 		pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
-		ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
-				(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-				DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		wr->sge = NULL;
+		isert_unmap_data_buf(isert_conn, &wr->data);
 	}
 
 	if (wr->send_wr) {
@@ -1424,7 +1554,6 @@
 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 {
 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 	LIST_HEAD(unmap_list);
 
 	pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
@@ -1432,18 +1561,19 @@
 	if (wr->fr_desc) {
 		pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
 			 isert_cmd, wr->fr_desc);
+		if (wr->fr_desc->ind & ISERT_PROTECTED) {
+			isert_unmap_data_buf(isert_conn, &wr->prot);
+			wr->fr_desc->ind &= ~ISERT_PROTECTED;
+		}
 		spin_lock_bh(&isert_conn->conn_lock);
 		list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
 		spin_unlock_bh(&isert_conn->conn_lock);
 		wr->fr_desc = NULL;
 	}
 
-	if (wr->sge) {
+	if (wr->data.sg) {
 		pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
-		ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
-				(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-				DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		wr->sge = NULL;
+		isert_unmap_data_buf(isert_conn, &wr->data);
 	}
 
 	wr->ib_sge = NULL;
@@ -1451,7 +1581,7 @@
 }
 
 static void
-isert_put_cmd(struct isert_cmd *isert_cmd)
+isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
 {
 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
 	struct isert_conn *isert_conn = isert_cmd->conn;
@@ -1467,8 +1597,21 @@
 			list_del_init(&cmd->i_conn_node);
 		spin_unlock_bh(&conn->cmd_lock);
 
-		if (cmd->data_direction == DMA_TO_DEVICE)
+		if (cmd->data_direction == DMA_TO_DEVICE) {
 			iscsit_stop_dataout_timer(cmd);
+			/*
+			 * Check for special case during comp_err where
+			 * WRITE_PENDING has been handed off from core,
+			 * but requires an extra target_put_sess_cmd()
+			 * before transport_generic_free_cmd() below.
+			 */
+			if (comp_err &&
+			    cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
+				struct se_cmd *se_cmd = &cmd->se_cmd;
+
+				target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+			}
+		}
 
 		device->unreg_rdma_mem(isert_cmd, isert_conn);
 		transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1523,7 +1666,7 @@
 
 static void
 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
-		     struct ib_device *ib_dev)
+		     struct ib_device *ib_dev, bool comp_err)
 {
 	if (isert_cmd->pdu_buf_dma != 0) {
 		pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
@@ -1533,7 +1676,77 @@
 	}
 
 	isert_unmap_tx_desc(tx_desc, ib_dev);
-	isert_put_cmd(isert_cmd);
+	isert_put_cmd(isert_cmd, comp_err);
+}
+
+static int
+isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
+{
+	struct ib_mr_status mr_status;
+	int ret;
+
+	ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+	if (ret) {
+		pr_err("ib_check_mr_status failed, ret %d\n", ret);
+		goto fail_mr_status;
+	}
+
+	if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+		u64 sec_offset_err;
+		u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
+
+		switch (mr_status.sig_err.err_type) {
+		case IB_SIG_BAD_GUARD:
+			se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+			break;
+		case IB_SIG_BAD_REFTAG:
+			se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+			break;
+		case IB_SIG_BAD_APPTAG:
+			se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+			break;
+		}
+		sec_offset_err = mr_status.sig_err.sig_err_offset;
+		do_div(sec_offset_err, block_size);
+		se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
+
+		pr_err("isert: PI error found type %d at sector 0x%llx "
+		       "expected 0x%x vs actual 0x%x\n",
+		       mr_status.sig_err.err_type,
+		       (unsigned long long)se_cmd->bad_sector,
+		       mr_status.sig_err.expected,
+		       mr_status.sig_err.actual);
+		ret = 1;
+	}
+
+fail_mr_status:
+	return ret;
+}
+
+static void
+isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
+			    struct isert_cmd *isert_cmd)
+{
+	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct isert_conn *isert_conn = isert_cmd->conn;
+	struct isert_device *device = isert_conn->conn_device;
+	int ret = 0;
+
+	if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
+		ret = isert_check_pi_status(se_cmd,
+					    wr->fr_desc->pi_ctx->sig_mr);
+		wr->fr_desc->ind &= ~ISERT_PROTECTED;
+	}
+
+	device->unreg_rdma_mem(isert_cmd, isert_conn);
+	wr->send_wr_num = 0;
+	if (ret)
+		transport_send_check_condition_and_sense(se_cmd,
+							 se_cmd->pi_err, 0);
+	else
+		isert_put_response(isert_conn->conn, cmd);
 }
 
 static void
@@ -1545,10 +1758,17 @@
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct isert_conn *isert_conn = isert_cmd->conn;
 	struct isert_device *device = isert_conn->conn_device;
+	int ret = 0;
+
+	if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
+		ret = isert_check_pi_status(se_cmd,
+					    wr->fr_desc->pi_ctx->sig_mr);
+		wr->fr_desc->ind &= ~ISERT_PROTECTED;
+	}
 
 	iscsit_stop_dataout_timer(cmd);
 	device->unreg_rdma_mem(isert_cmd, isert_conn);
-	cmd->write_data_done = wr->cur_rdma_length;
+	cmd->write_data_done = wr->data.len;
 	wr->send_wr_num = 0;
 
 	pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
@@ -1557,7 +1777,11 @@
 	cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
 	spin_unlock_bh(&cmd->istate_lock);
 
-	target_execute_cmd(se_cmd);
+	if (ret)
+		transport_send_check_condition_and_sense(se_cmd,
+							 se_cmd->pi_err, 0);
+	else
+		target_execute_cmd(se_cmd);
 }
 
 static void
@@ -1577,14 +1801,14 @@
 		iscsit_tmr_post_handler(cmd, cmd->conn);
 
 		cmd->i_state = ISTATE_SENT_STATUS;
-		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
 		break;
 	case ISTATE_SEND_REJECT:
 		pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
 		atomic_dec(&isert_conn->post_send_buf_count);
 
 		cmd->i_state = ISTATE_SENT_STATUS;
-		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
 		break;
 	case ISTATE_SEND_LOGOUTRSP:
 		pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
@@ -1598,7 +1822,7 @@
 	case ISTATE_SEND_TEXTRSP:
 		atomic_dec(&isert_conn->post_send_buf_count);
 		cmd->i_state = ISTATE_SENT_STATUS;
-		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
 		break;
 	default:
 		pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
@@ -1626,10 +1850,21 @@
 		queue_work(isert_comp_wq, &isert_cmd->comp_work);
 		return;
 	}
-	atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+
+	/**
+	 * If send_wr_num is 0 this means that we got
+	 * RDMA completion and we cleared it and we should
+	 * simply decrement the response post. else the
+	 * response is incorporated in send_wr_num, just
+	 * sub it.
+	 **/
+	if (wr->send_wr_num)
+		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+	else
+		atomic_dec(&isert_conn->post_send_buf_count);
 
 	cmd->i_state = ISTATE_SENT_STATUS;
-	isert_completion_put(tx_desc, isert_cmd, ib_dev);
+	isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
 }
 
 static void
@@ -1658,8 +1893,9 @@
 					  isert_conn, ib_dev);
 		break;
 	case ISER_IB_RDMA_WRITE:
-		pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
-		dump_stack();
+		pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
+		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+		isert_completion_rdma_write(tx_desc, isert_cmd);
 		break;
 	case ISER_IB_RDMA_READ:
 		pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
@@ -1709,8 +1945,20 @@
 		llnode = llist_next(llnode);
 		wr = &t->isert_cmd->rdma_wr;
 
-		atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
-		isert_completion_put(t, t->isert_cmd, ib_dev);
+		/**
+		 * If send_wr_num is 0 this means that we got
+		 * RDMA completion and we cleared it and we should
+		 * simply decrement the response post. else the
+		 * response is incorporated in send_wr_num, just
+		 * sub it.
+		 **/
+		if (wr->send_wr_num)
+			atomic_sub(wr->send_wr_num,
+				   &isert_conn->post_send_buf_count);
+		else
+			atomic_dec(&isert_conn->post_send_buf_count);
+
+		isert_completion_put(t, t->isert_cmd, ib_dev, true);
 	}
 }
 
@@ -1728,15 +1976,27 @@
 		llnode = llist_next(llnode);
 		wr = &t->isert_cmd->rdma_wr;
 
-		atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
-		isert_completion_put(t, t->isert_cmd, ib_dev);
+		/**
+		 * If send_wr_num is 0 this means that we got
+		 * RDMA completion and we cleared it and we should
+		 * simply decrement the response post. else the
+		 * response is incorporated in send_wr_num, just
+		 * sub it.
+		 **/
+		if (wr->send_wr_num)
+			atomic_sub(wr->send_wr_num,
+				   &isert_conn->post_send_buf_count);
+		else
+			atomic_dec(&isert_conn->post_send_buf_count);
+
+		isert_completion_put(t, t->isert_cmd, ib_dev, true);
 	}
 	tx_desc->comp_llnode_batch = NULL;
 
 	if (!isert_cmd)
 		isert_unmap_tx_desc(tx_desc, ib_dev);
 	else
-		isert_completion_put(tx_desc, isert_cmd, ib_dev);
+		isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
 }
 
 static void
@@ -1918,6 +2178,36 @@
 	return isert_post_response(isert_conn, isert_cmd);
 }
 
+static void
+isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	struct isert_device *device = isert_conn->conn_device;
+
+	spin_lock_bh(&conn->cmd_lock);
+	if (!list_empty(&cmd->i_conn_node))
+		list_del_init(&cmd->i_conn_node);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	if (cmd->data_direction == DMA_TO_DEVICE)
+		iscsit_stop_dataout_timer(cmd);
+
+	device->unreg_rdma_mem(isert_cmd, isert_conn);
+}
+
+static enum target_prot_op
+isert_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	struct isert_device *device = isert_conn->conn_device;
+
+	if (device->pi_capable)
+		return TARGET_PROT_ALL;
+
+	return TARGET_PROT_NORMAL;
+}
+
 static int
 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
 		bool nopout_response)
@@ -2099,54 +2389,39 @@
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct isert_data_buf *data = &wr->data;
 	struct ib_send_wr *send_wr;
 	struct ib_sge *ib_sge;
-	struct scatterlist *sg_start;
-	u32 sg_off = 0, sg_nents;
-	u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
-	int ret = 0, count, i, ib_sge_cnt;
+	u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
+	int ret = 0, i, ib_sge_cnt;
 
-	if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
-		data_left = se_cmd->data_length;
-	} else {
-		sg_off = cmd->write_data_done / PAGE_SIZE;
-		data_left = se_cmd->data_length - cmd->write_data_done;
-		offset = cmd->write_data_done;
-		isert_cmd->tx_desc.isert_cmd = isert_cmd;
-	}
+	isert_cmd->tx_desc.isert_cmd = isert_cmd;
 
-	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
-	sg_nents = se_cmd->t_data_nents - sg_off;
+	offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+	ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
+				 se_cmd->t_data_nents, se_cmd->data_length,
+				 offset, wr->iser_ib_op, &wr->data);
+	if (ret)
+		return ret;
 
-	count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
-			      (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-			      DMA_TO_DEVICE : DMA_FROM_DEVICE);
-	if (unlikely(!count)) {
-		pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
-		return -EINVAL;
-	}
-	wr->sge = sg_start;
-	wr->num_sge = sg_nents;
-	wr->cur_rdma_length = data_left;
-	pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
-		 isert_cmd, count, sg_start, sg_nents, data_left);
+	data_left = data->len;
+	offset = data->offset;
 
-	ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
+	ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
 	if (!ib_sge) {
 		pr_warn("Unable to allocate ib_sge\n");
 		ret = -ENOMEM;
-		goto unmap_sg;
+		goto unmap_cmd;
 	}
 	wr->ib_sge = ib_sge;
 
-	wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
+	wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
 	wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
 				GFP_KERNEL);
 	if (!wr->send_wr) {
 		pr_debug("Unable to allocate wr->send_wr\n");
 		ret = -ENOMEM;
-		goto unmap_sg;
+		goto unmap_cmd;
 	}
 
 	wr->isert_cmd = isert_cmd;
@@ -2185,10 +2460,9 @@
 	}
 
 	return 0;
-unmap_sg:
-	ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
-			(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-			DMA_TO_DEVICE : DMA_FROM_DEVICE);
+unmap_cmd:
+	isert_unmap_data_buf(isert_conn, data);
+
 	return ret;
 }
 
@@ -2232,49 +2506,70 @@
 }
 
 static int
-isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
-		  struct isert_conn *isert_conn, struct scatterlist *sg_start,
-		  struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
-		  unsigned int data_len)
+isert_fast_reg_mr(struct isert_conn *isert_conn,
+		  struct fast_reg_descriptor *fr_desc,
+		  struct isert_data_buf *mem,
+		  enum isert_indicator ind,
+		  struct ib_sge *sge)
 {
 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct ib_mr *mr;
+	struct ib_fast_reg_page_list *frpl;
 	struct ib_send_wr fr_wr, inv_wr;
 	struct ib_send_wr *bad_wr, *wr = NULL;
 	int ret, pagelist_len;
 	u32 page_off;
 	u8 key;
 
-	sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
-	page_off = offset % PAGE_SIZE;
+	if (mem->dma_nents == 1) {
+		sge->lkey = isert_conn->conn_mr->lkey;
+		sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
+		sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
+		pr_debug("%s:%d sge: addr: 0x%llx  length: %u lkey: %x\n",
+			 __func__, __LINE__, sge->addr, sge->length,
+			 sge->lkey);
+		return 0;
+	}
+
+	if (ind == ISERT_DATA_KEY_VALID) {
+		/* Registering data buffer */
+		mr = fr_desc->data_mr;
+		frpl = fr_desc->data_frpl;
+	} else {
+		/* Registering protection buffer */
+		mr = fr_desc->pi_ctx->prot_mr;
+		frpl = fr_desc->pi_ctx->prot_frpl;
+	}
+
+	page_off = mem->offset % PAGE_SIZE;
 
 	pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
-		 fr_desc, sg_nents, offset);
+		 fr_desc, mem->nents, mem->offset);
 
-	pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
-					     &fr_desc->data_frpl->page_list[0]);
+	pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
+					     &frpl->page_list[0]);
 
-	if (!fr_desc->valid) {
+	if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
 		memset(&inv_wr, 0, sizeof(inv_wr));
 		inv_wr.wr_id = ISER_FASTREG_LI_WRID;
 		inv_wr.opcode = IB_WR_LOCAL_INV;
-		inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
+		inv_wr.ex.invalidate_rkey = mr->rkey;
 		wr = &inv_wr;
 		/* Bump the key */
-		key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
-		ib_update_fast_reg_key(fr_desc->data_mr, ++key);
+		key = (u8)(mr->rkey & 0x000000FF);
+		ib_update_fast_reg_key(mr, ++key);
 	}
 
 	/* Prepare FASTREG WR */
 	memset(&fr_wr, 0, sizeof(fr_wr));
 	fr_wr.wr_id = ISER_FASTREG_LI_WRID;
 	fr_wr.opcode = IB_WR_FAST_REG_MR;
-	fr_wr.wr.fast_reg.iova_start =
-		fr_desc->data_frpl->page_list[0] + page_off;
-	fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
+	fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
+	fr_wr.wr.fast_reg.page_list = frpl;
 	fr_wr.wr.fast_reg.page_list_len = pagelist_len;
 	fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
-	fr_wr.wr.fast_reg.length = data_len;
-	fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
+	fr_wr.wr.fast_reg.length = mem->len;
+	fr_wr.wr.fast_reg.rkey = mr->rkey;
 	fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
 
 	if (!wr)
@@ -2287,80 +2582,242 @@
 		pr_err("fast registration failed, ret:%d\n", ret);
 		return ret;
 	}
-	fr_desc->valid = false;
+	fr_desc->ind &= ~ind;
 
-	ib_sge->lkey = fr_desc->data_mr->lkey;
-	ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
-	ib_sge->length = data_len;
+	sge->lkey = mr->lkey;
+	sge->addr = frpl->page_list[0] + page_off;
+	sge->length = mem->len;
 
-	pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
-		 ib_sge->addr, ib_sge->length, ib_sge->lkey);
+	pr_debug("%s:%d sge: addr: 0x%llx  length: %u lkey: %x\n",
+		 __func__, __LINE__, sge->addr, sge->length,
+		 sge->lkey);
 
 	return ret;
 }
 
+static inline enum ib_t10_dif_type
+se2ib_prot_type(enum target_prot_type prot_type)
+{
+	switch (prot_type) {
+	case TARGET_DIF_TYPE0_PROT:
+		return IB_T10DIF_NONE;
+	case TARGET_DIF_TYPE1_PROT:
+		return IB_T10DIF_TYPE1;
+	case TARGET_DIF_TYPE2_PROT:
+		return IB_T10DIF_TYPE2;
+	case TARGET_DIF_TYPE3_PROT:
+		return IB_T10DIF_TYPE3;
+	default:
+		return IB_T10DIF_NONE;
+	}
+}
+
+static int
+isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
+{
+	enum ib_t10_dif_type ib_prot_type = se2ib_prot_type(se_cmd->prot_type);
+
+	sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
+	sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
+	sig_attrs->mem.sig.dif.pi_interval =
+				se_cmd->se_dev->dev_attrib.block_size;
+	sig_attrs->wire.sig.dif.pi_interval =
+				se_cmd->se_dev->dev_attrib.block_size;
+
+	switch (se_cmd->prot_op) {
+	case TARGET_PROT_DIN_INSERT:
+	case TARGET_PROT_DOUT_STRIP:
+		sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE;
+		sig_attrs->wire.sig.dif.type = ib_prot_type;
+		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+		sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
+		break;
+	case TARGET_PROT_DOUT_INSERT:
+	case TARGET_PROT_DIN_STRIP:
+		sig_attrs->mem.sig.dif.type = ib_prot_type;
+		sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
+		sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
+		sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
+		break;
+	case TARGET_PROT_DIN_PASS:
+	case TARGET_PROT_DOUT_PASS:
+		sig_attrs->mem.sig.dif.type = ib_prot_type;
+		sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
+		sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
+		sig_attrs->wire.sig.dif.type = ib_prot_type;
+		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+		sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
+		break;
+	default:
+		pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline u8
+isert_set_prot_checks(u8 prot_checks)
+{
+	return (prot_checks & TARGET_DIF_CHECK_GUARD  ? 0xc0 : 0) |
+	       (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
+	       (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
+}
+
+static int
+isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
+		 struct fast_reg_descriptor *fr_desc,
+		 struct ib_sge *data_sge, struct ib_sge *prot_sge,
+		 struct ib_sge *sig_sge)
+{
+	struct ib_send_wr sig_wr, inv_wr;
+	struct ib_send_wr *bad_wr, *wr = NULL;
+	struct pi_context *pi_ctx = fr_desc->pi_ctx;
+	struct ib_sig_attrs sig_attrs;
+	int ret;
+	u32 key;
+
+	memset(&sig_attrs, 0, sizeof(sig_attrs));
+	ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
+	if (ret)
+		goto err;
+
+	sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
+
+	if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
+		memset(&inv_wr, 0, sizeof(inv_wr));
+		inv_wr.opcode = IB_WR_LOCAL_INV;
+		inv_wr.wr_id = ISER_FASTREG_LI_WRID;
+		inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+		wr = &inv_wr;
+		/* Bump the key */
+		key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
+		ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
+	}
+
+	memset(&sig_wr, 0, sizeof(sig_wr));
+	sig_wr.opcode = IB_WR_REG_SIG_MR;
+	sig_wr.wr_id = ISER_FASTREG_LI_WRID;
+	sig_wr.sg_list = data_sge;
+	sig_wr.num_sge = 1;
+	sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
+	sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
+	sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
+	if (se_cmd->t_prot_sg)
+		sig_wr.wr.sig_handover.prot = prot_sge;
+
+	if (!wr)
+		wr = &sig_wr;
+	else
+		wr->next = &sig_wr;
+
+	ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
+	if (ret) {
+		pr_err("fast registration failed, ret:%d\n", ret);
+		goto err;
+	}
+	fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
+
+	sig_sge->lkey = pi_ctx->sig_mr->lkey;
+	sig_sge->addr = 0;
+	sig_sge->length = se_cmd->data_length;
+	if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
+	    se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
+		/*
+		 * We have protection guards on the wire
+		 * so we need to set a larget transfer
+		 */
+		sig_sge->length += se_cmd->prot_length;
+
+	pr_debug("sig_sge: addr: 0x%llx  length: %u lkey: %x\n",
+		 sig_sge->addr, sig_sge->length,
+		 sig_sge->lkey);
+err:
+	return ret;
+}
+
 static int
 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 	       struct isert_rdma_wr *wr)
 {
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
-	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct isert_conn *isert_conn = conn->context;
+	struct ib_sge data_sge;
 	struct ib_send_wr *send_wr;
-	struct ib_sge *ib_sge;
-	struct scatterlist *sg_start;
-	struct fast_reg_descriptor *fr_desc;
-	u32 sg_off = 0, sg_nents;
-	u32 offset = 0, data_len, data_left, rdma_write_max;
-	int ret = 0, count;
+	struct fast_reg_descriptor *fr_desc = NULL;
+	u32 offset;
+	int ret = 0;
 	unsigned long flags;
 
-	if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
-		data_left = se_cmd->data_length;
-	} else {
-		offset = cmd->write_data_done;
-		sg_off = offset / PAGE_SIZE;
-		data_left = se_cmd->data_length - cmd->write_data_done;
-		isert_cmd->tx_desc.isert_cmd = isert_cmd;
+	isert_cmd->tx_desc.isert_cmd = isert_cmd;
+
+	offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+	ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
+				 se_cmd->t_data_nents, se_cmd->data_length,
+				 offset, wr->iser_ib_op, &wr->data);
+	if (ret)
+		return ret;
+
+	if (wr->data.dma_nents != 1 ||
+	    se_cmd->prot_op != TARGET_PROT_NORMAL) {
+		spin_lock_irqsave(&isert_conn->conn_lock, flags);
+		fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
+					   struct fast_reg_descriptor, list);
+		list_del(&fr_desc->list);
+		spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+		wr->fr_desc = fr_desc;
 	}
 
-	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
-	sg_nents = se_cmd->t_data_nents - sg_off;
+	ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
+				ISERT_DATA_KEY_VALID, &data_sge);
+	if (ret)
+		goto unmap_cmd;
 
-	count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
-			      (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-			      DMA_TO_DEVICE : DMA_FROM_DEVICE);
-	if (unlikely(!count)) {
-		pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
-		return -EINVAL;
-	}
-	wr->sge = sg_start;
-	wr->num_sge = sg_nents;
-	pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
-		 isert_cmd, count, sg_start, sg_nents, data_left);
+	if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
+		struct ib_sge prot_sge, sig_sge;
 
-	memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
-	ib_sge = &wr->s_ib_sge;
-	wr->ib_sge = ib_sge;
+		if (se_cmd->t_prot_sg) {
+			ret = isert_map_data_buf(isert_conn, isert_cmd,
+						 se_cmd->t_prot_sg,
+						 se_cmd->t_prot_nents,
+						 se_cmd->prot_length,
+						 0, wr->iser_ib_op, &wr->prot);
+			if (ret)
+				goto unmap_cmd;
 
+			ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
+						ISERT_PROT_KEY_VALID, &prot_sge);
+			if (ret)
+				goto unmap_prot_cmd;
+		}
+
+		ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
+				       &data_sge, &prot_sge, &sig_sge);
+		if (ret)
+			goto unmap_prot_cmd;
+
+		fr_desc->ind |= ISERT_PROTECTED;
+		memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
+	} else
+		memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
+
+	wr->ib_sge = &wr->s_ib_sge;
 	wr->send_wr_num = 1;
 	memset(&wr->s_send_wr, 0, sizeof(*send_wr));
 	wr->send_wr = &wr->s_send_wr;
-
 	wr->isert_cmd = isert_cmd;
-	rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
 
 	send_wr = &isert_cmd->rdma_wr.s_send_wr;
-	send_wr->sg_list = ib_sge;
+	send_wr->sg_list = &wr->s_ib_sge;
 	send_wr->num_sge = 1;
 	send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
 	if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
 		send_wr->opcode = IB_WR_RDMA_WRITE;
 		send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
 		send_wr->wr.rdma.rkey = isert_cmd->read_stag;
-		send_wr->send_flags = 0;
-		send_wr->next = &isert_cmd->tx_desc.send_wr;
+		send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
+				      0 : IB_SEND_SIGNALED;
 	} else {
 		send_wr->opcode = IB_WR_RDMA_READ;
 		send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
@@ -2368,37 +2825,18 @@
 		send_wr->send_flags = IB_SEND_SIGNALED;
 	}
 
-	data_len = min(data_left, rdma_write_max);
-	wr->cur_rdma_length = data_len;
-
-	/* if there is a single dma entry, dma mr is sufficient */
-	if (count == 1) {
-		ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
-		ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
-		ib_sge->lkey = isert_conn->conn_mr->lkey;
-		wr->fr_desc = NULL;
-	} else {
-		spin_lock_irqsave(&isert_conn->conn_lock, flags);
-		fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
-					   struct fast_reg_descriptor, list);
-		list_del(&fr_desc->list);
-		spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
-		wr->fr_desc = fr_desc;
-
-		ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
-					ib_sge, sg_nents, offset, data_len);
-		if (ret) {
-			list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
-			goto unmap_sg;
-		}
-	}
-
 	return 0;
+unmap_prot_cmd:
+	if (se_cmd->t_prot_sg)
+		isert_unmap_data_buf(isert_conn, &wr->prot);
+unmap_cmd:
+	if (fr_desc) {
+		spin_lock_irqsave(&isert_conn->conn_lock, flags);
+		list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
+		spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+	}
+	isert_unmap_data_buf(isert_conn, &wr->data);
 
-unmap_sg:
-	ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
-			(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-			DMA_TO_DEVICE : DMA_FROM_DEVICE);
 	return ret;
 }
 
@@ -2422,25 +2860,35 @@
 		return rc;
 	}
 
-	/*
-	 * Build isert_conn->tx_desc for iSCSI response PDU and attach
-	 */
-	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
-	iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
-			     &isert_cmd->tx_desc.iscsi_header);
-	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
-	isert_init_send_wr(isert_conn, isert_cmd,
-			   &isert_cmd->tx_desc.send_wr, true);
+	if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
+		/*
+		 * Build isert_conn->tx_desc for iSCSI response PDU and attach
+		 */
+		isert_create_send_desc(isert_conn, isert_cmd,
+				       &isert_cmd->tx_desc);
+		iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
+				     &isert_cmd->tx_desc.iscsi_header);
+		isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+		isert_init_send_wr(isert_conn, isert_cmd,
+				   &isert_cmd->tx_desc.send_wr, true);
+		isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
+		wr->send_wr_num += 1;
+	}
 
-	atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+	atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
 
 	rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
 	if (rc) {
 		pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
-		atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
 	}
-	pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
-		 isert_cmd);
+
+	if (se_cmd->prot_op == TARGET_PROT_NORMAL)
+		pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
+			 "READ\n", isert_cmd);
+	else
+		pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
+			 isert_cmd);
 
 	return 1;
 }
@@ -2815,6 +3263,8 @@
 	.iscsit_get_dataout	= isert_get_dataout,
 	.iscsit_queue_data_in	= isert_put_datain,
 	.iscsit_queue_status	= isert_put_response,
+	.iscsit_aborted_task	= isert_aborted_task,
+	.iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
 };
 
 static int __init isert_init(void)
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index f6ae7f5..4c072ae 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -50,11 +50,35 @@
 	struct ib_send_wr send_wr;
 } __packed;
 
+enum isert_indicator {
+	ISERT_PROTECTED		= 1 << 0,
+	ISERT_DATA_KEY_VALID	= 1 << 1,
+	ISERT_PROT_KEY_VALID	= 1 << 2,
+	ISERT_SIG_KEY_VALID	= 1 << 3,
+};
+
+struct pi_context {
+	struct ib_mr		       *prot_mr;
+	struct ib_fast_reg_page_list   *prot_frpl;
+	struct ib_mr		       *sig_mr;
+};
+
 struct fast_reg_descriptor {
-	struct list_head	list;
-	struct ib_mr		*data_mr;
-	struct ib_fast_reg_page_list	*data_frpl;
-	bool			valid;
+	struct list_head		list;
+	struct ib_mr		       *data_mr;
+	struct ib_fast_reg_page_list   *data_frpl;
+	u8				ind;
+	struct pi_context	       *pi_ctx;
+};
+
+struct isert_data_buf {
+	struct scatterlist     *sg;
+	int			nents;
+	u32			sg_off;
+	u32			len; /* cur_rdma_length */
+	u32			offset;
+	unsigned int		dma_nents;
+	enum dma_data_direction dma_dir;
 };
 
 struct isert_rdma_wr {
@@ -63,12 +87,11 @@
 	enum iser_ib_op_code	iser_ib_op;
 	struct ib_sge		*ib_sge;
 	struct ib_sge		s_ib_sge;
-	int			num_sge;
-	struct scatterlist	*sge;
 	int			send_wr_num;
 	struct ib_send_wr	*send_wr;
 	struct ib_send_wr	s_send_wr;
-	u32			cur_rdma_length;
+	struct isert_data_buf	data;
+	struct isert_data_buf	prot;
 	struct fast_reg_descriptor *fr_desc;
 };
 
@@ -141,6 +164,7 @@
 
 struct isert_device {
 	int			use_fastreg;
+	bool			pi_capable;
 	int			cqs_used;
 	int			refcount;
 	int			cq_active_qps[ISERT_MAX_CQ];
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0e537d8..fe09f27 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1078,6 +1078,7 @@
 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
 				 struct srpt_send_ioctx *ioctx)
 {
+	struct ib_device *dev = ch->sport->sdev->device;
 	struct se_cmd *cmd;
 	struct scatterlist *sg, *sg_orig;
 	int sg_cnt;
@@ -1124,7 +1125,7 @@
 
 	db = ioctx->rbufs;
 	tsize = cmd->data_length;
-	dma_len = sg_dma_len(&sg[0]);
+	dma_len = ib_sg_dma_len(dev, &sg[0]);
 	riu = ioctx->rdma_ius;
 
 	/*
@@ -1155,7 +1156,8 @@
 					++j;
 					if (j < count) {
 						sg = sg_next(sg);
-						dma_len = sg_dma_len(sg);
+						dma_len = ib_sg_dma_len(
+								dev, sg);
 					}
 				}
 			} else {
@@ -1192,8 +1194,8 @@
 	tsize = cmd->data_length;
 	riu = ioctx->rdma_ius;
 	sg = sg_orig;
-	dma_len = sg_dma_len(&sg[0]);
-	dma_addr = sg_dma_address(&sg[0]);
+	dma_len = ib_sg_dma_len(dev, &sg[0]);
+	dma_addr = ib_sg_dma_address(dev, &sg[0]);
 
 	/* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
 	for (i = 0, j = 0;
@@ -1216,8 +1218,10 @@
 					++j;
 					if (j < count) {
 						sg = sg_next(sg);
-						dma_len = sg_dma_len(sg);
-						dma_addr = sg_dma_address(sg);
+						dma_len = ib_sg_dma_len(
+								dev, sg);
+						dma_addr = ib_sg_dma_address(
+								dev, sg);
 					}
 				}
 			} else {
@@ -2580,7 +2584,7 @@
 		goto destroy_ib;
 	}
 
-	ch->sess = transport_init_session();
+	ch->sess = transport_init_session(TARGET_PROT_NORMAL);
 	if (IS_ERR(ch->sess)) {
 		rej->reason = __constant_cpu_to_be32(
 				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
@@ -3081,6 +3085,14 @@
 	srpt_queue_response(cmd);
 }
 
+static void srpt_aborted_task(struct se_cmd *cmd)
+{
+	struct srpt_send_ioctx *ioctx = container_of(cmd,
+				struct srpt_send_ioctx, cmd);
+
+	srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+}
+
 static int srpt_queue_status(struct se_cmd *cmd)
 {
 	struct srpt_send_ioctx *ioctx;
@@ -3928,6 +3940,7 @@
 	.queue_data_in			= srpt_queue_data_in,
 	.queue_status			= srpt_queue_status,
 	.queue_tm_rsp			= srpt_queue_tm_rsp,
+	.aborted_task			= srpt_aborted_task,
 	/*
 	 * Setup function pointers for generic logic in
 	 * target_core_fabric_configfs.c
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 44c358e..6de9dfb 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -416,7 +416,7 @@
 	depends on MFD_MC13XXX
 	help
 	  This option enable support for on-chip LED drivers found
-	  on Freescale Semiconductor MC13783/MC13892 PMIC.
+	  on Freescale Semiconductor MC13783/MC13892/MC34708 PMIC.
 
 config LEDS_NS2
 	tristate "LED support for Network Space v2 GPIO LEDs"
@@ -474,7 +474,7 @@
 
 config LEDS_OT200
 	tristate "LED support for the Bachmann OT200"
-	depends on LEDS_CLASS && HAS_IOMEM
+	depends on LEDS_CLASS && HAS_IOMEM && (X86_32 || COMPILE_TEST)
 	help
 	  This option enables support for the LEDs on the Bachmann OT200.
 	  Say Y to enable LEDs on the Bachmann OT200.
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index ce8921a..71b40d3 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -39,9 +39,11 @@
 	led_cdev->blink_delay_on = delay_on;
 	led_cdev->blink_delay_off = delay_off;
 
-	/* never on - don't blink */
-	if (!delay_on)
+	/* never on - just set to off */
+	if (!delay_on) {
+		__led_set_brightness(led_cdev, LED_OFF);
 		return;
+	}
 
 	/* never off - just set to brightness */
 	if (!delay_off) {
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index e387f41..c3734f1 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -13,7 +13,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
@@ -220,9 +219,12 @@
 {
 	struct led_classdev *led_cdev;
 
+	if (list_empty_careful(&trig->next_trig))
+		return;
+
 	/* Remove from the list of led triggers */
 	down_write(&triggers_list_lock);
-	list_del(&trig->next_trig);
+	list_del_init(&trig->next_trig);
 	up_write(&triggers_list_lock);
 
 	/* Remove anyone actively using this trigger */
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 5f588c0..d1e1bca 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -11,7 +11,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
index 7e311a1..86b5bdb 100644
--- a/drivers/leds/leds-adp5520.c
+++ b/drivers/leds/leds-adp5520.c
@@ -15,7 +15,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/workqueue.h>
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
index 6de216a..70c74a7 100644
--- a/drivers/leds/leds-asic3.c
+++ b/drivers/leds/leds-asic3.c
@@ -7,7 +7,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/slab.h>
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index 66d0a57..d0452b0 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -18,7 +18,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/jiffies.h>
 #include <linux/i2c.h>
@@ -444,7 +443,7 @@
 {
 	int ret;
 	struct blinkm_led *led;
-	struct blinkm_data *data ;
+	struct blinkm_data *data;
 	struct blinkm_work *blm_work = work_to_blmwork(work);
 
 	led = blm_work->blinkm_led;
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index d93e245..f58a354 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -19,7 +19,7 @@
 MODULE_DESCRIPTION("Clevo mail LED driver");
 MODULE_LICENSE("GPL");
 
-static bool __initdata nodetect;
+static bool nodetect;
 module_param_named(nodetect, nodetect, bool, 0);
 MODULE_PARM_DESC(nodetect, "Skip DMI hardware detection");
 
@@ -153,7 +153,7 @@
 	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
-static int clevo_mail_led_probe(struct platform_device *pdev)
+static int __init clevo_mail_led_probe(struct platform_device *pdev)
 {
 	return led_classdev_register(&pdev->dev, &clevo_mail_led);
 }
@@ -165,7 +165,6 @@
 }
 
 static struct platform_driver clevo_mail_led_driver = {
-	.probe		= clevo_mail_led_probe,
 	.remove		= clevo_mail_led_remove,
 	.driver		= {
 		.name		= KBUILD_MODNAME,
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index 8abcb66..910339d 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -3,7 +3,6 @@
  *
  * Control the Cobalt Qube/RaQ front LED
  */
-#include <linux/init.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/leds.h>
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index 2a4b87f..35dffb1 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -14,7 +14,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/workqueue.h>
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
index 865d4fa..01486ad 100644
--- a/drivers/leds/leds-da9052.c
+++ b/drivers/leds/leds-da9052.c
@@ -14,7 +14,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/workqueue.h>
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index b4d5a44..2b4dc73 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -16,7 +16,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/module.h>
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 78b0e27..57ff20f 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -11,7 +11,6 @@
  *
  */
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
 #include <linux/leds.h>
@@ -204,6 +203,9 @@
 				led.default_state = LEDS_GPIO_DEFSTATE_OFF;
 		}
 
+		if (of_get_property(child, "retain-state-suspended", NULL))
+			led.retain_state_suspended = 1;
+
 		ret = create_gpio_led(&led, &priv->leds[priv->num_leds++],
 				      &pdev->dev, NULL);
 		if (ret < 0) {
@@ -224,6 +226,8 @@
 	{ .compatible = "gpio-leds", },
 	{},
 };
+
+MODULE_DEVICE_TABLE(of, of_gpio_leds_match);
 #else /* CONFIG_OF_GPIO */
 static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
 {
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index 366b605..d61a988 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -12,7 +12,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <asm/hd64461.h>
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index 027ede7..e2c642c11 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -12,7 +12,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/leds.h>
 #include <linux/mfd/core.h>
 #include <linux/mutex.h>
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 2ec34cf..8ca197a 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -25,7 +25,6 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/leds.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 4ade66a..cb5ed82 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -25,7 +25,6 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/leds.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index bf006f4..ca85724 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -13,7 +13,6 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/leds.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
@@ -347,9 +346,9 @@
 /* check the size of program count */
 static inline bool _is_pc_overflow(struct lp55xx_predef_pattern *ptn)
 {
-	return (ptn->size_r >= LP5562_PROGRAM_LENGTH ||
-		ptn->size_g >= LP5562_PROGRAM_LENGTH ||
-		ptn->size_b >= LP5562_PROGRAM_LENGTH);
+	return ptn->size_r >= LP5562_PROGRAM_LENGTH ||
+	       ptn->size_g >= LP5562_PROGRAM_LENGTH ||
+	       ptn->size_b >= LP5562_PROGRAM_LENGTH;
 }
 
 static int lp5562_run_predef_led_pattern(struct lp55xx_chip *chip, int mode)
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 3417e5b..059f5b1 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -17,7 +17,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/workqueue.h>
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index ca87a1b..f1db88e 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -1,5 +1,5 @@
 /*
- * LEDs driver for Freescale MC13783/MC13892
+ * LEDs driver for Freescale MC13783/MC13892/MC34708
  *
  * Copyright (C) 2010 Philippe Rétornaz
  *
@@ -17,57 +17,56 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
+#include <linux/of.h>
 #include <linux/workqueue.h>
 #include <linux/mfd/mc13xxx.h>
 
-#define MC13XXX_REG_LED_CONTROL(x)	(51 + (x))
-
 struct mc13xxx_led_devtype {
 	int	led_min;
 	int	led_max;
 	int	num_regs;
+	u32	ledctrl_base;
 };
 
 struct mc13xxx_led {
 	struct led_classdev	cdev;
 	struct work_struct	work;
-	struct mc13xxx		*master;
 	enum led_brightness	new_brightness;
 	int			id;
+	struct mc13xxx_leds	*leds;
 };
 
 struct mc13xxx_leds {
+	struct mc13xxx			*master;
 	struct mc13xxx_led_devtype	*devtype;
 	int				num_leds;
-	struct mc13xxx_led		led[0];
+	struct mc13xxx_led		*led;
 };
 
+static unsigned int mc13xxx_max_brightness(int id)
+{
+	if (id >= MC13783_LED_MD && id <= MC13783_LED_KP)
+		return 0x0f;
+	else if (id >= MC13783_LED_R1 && id <= MC13783_LED_B3)
+		return 0x1f;
+
+	return 0x3f;
+}
+
 static void mc13xxx_led_work(struct work_struct *work)
 {
 	struct mc13xxx_led *led = container_of(work, struct mc13xxx_led, work);
-	int reg, mask, value, bank, off, shift;
+	struct mc13xxx_leds *leds = led->leds;
+	unsigned int reg, bank, off, shift;
 
 	switch (led->id) {
 	case MC13783_LED_MD:
-		reg = MC13XXX_REG_LED_CONTROL(2);
-		shift = 9;
-		mask = 0x0f;
-		value = led->new_brightness >> 4;
-		break;
 	case MC13783_LED_AD:
-		reg = MC13XXX_REG_LED_CONTROL(2);
-		shift = 13;
-		mask = 0x0f;
-		value = led->new_brightness >> 4;
-		break;
 	case MC13783_LED_KP:
-		reg = MC13XXX_REG_LED_CONTROL(2);
-		shift = 17;
-		mask = 0x0f;
-		value = led->new_brightness >> 4;
+		reg = 2;
+		shift = 9 + (led->id - MC13783_LED_MD) * 4;
 		break;
 	case MC13783_LED_R1:
 	case MC13783_LED_G1:
@@ -80,44 +79,35 @@
 	case MC13783_LED_B3:
 		off = led->id - MC13783_LED_R1;
 		bank = off / 3;
-		reg = MC13XXX_REG_LED_CONTROL(3) + bank;
+		reg = 3 + bank;
 		shift = (off - bank * 3) * 5 + 6;
-		value = led->new_brightness >> 3;
-		mask = 0x1f;
 		break;
 	case MC13892_LED_MD:
-		reg = MC13XXX_REG_LED_CONTROL(0);
-		shift = 3;
-		mask = 0x3f;
-		value = led->new_brightness >> 2;
-		break;
 	case MC13892_LED_AD:
-		reg = MC13XXX_REG_LED_CONTROL(0);
-		shift = 15;
-		mask = 0x3f;
-		value = led->new_brightness >> 2;
-		break;
 	case MC13892_LED_KP:
-		reg = MC13XXX_REG_LED_CONTROL(1);
-		shift = 3;
-		mask = 0x3f;
-		value = led->new_brightness >> 2;
+		reg = (led->id - MC13892_LED_MD) / 2;
+		shift = 3 + (led->id - MC13892_LED_MD) * 12;
 		break;
 	case MC13892_LED_R:
 	case MC13892_LED_G:
 	case MC13892_LED_B:
 		off = led->id - MC13892_LED_R;
 		bank = off / 2;
-		reg = MC13XXX_REG_LED_CONTROL(2) + bank;
+		reg = 2 + bank;
 		shift = (off - bank * 2) * 12 + 3;
-		value = led->new_brightness >> 2;
-		mask = 0x3f;
+		break;
+	case MC34708_LED_R:
+	case MC34708_LED_G:
+		reg = 0;
+		shift = 3 + (led->id - MC34708_LED_R) * 12;
 		break;
 	default:
 		BUG();
 	}
 
-	mc13xxx_reg_rmw(led->master, reg, mask << shift, value << shift);
+	mc13xxx_reg_rmw(leds->master, leds->devtype->ledctrl_base + reg,
+			mc13xxx_max_brightness(led->id) << shift,
+			led->new_brightness << shift);
 }
 
 static void mc13xxx_led_set(struct led_classdev *led_cdev,
@@ -130,47 +120,121 @@
 	schedule_work(&led->work);
 }
 
+#ifdef CONFIG_OF
+static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
+	struct platform_device *pdev)
+{
+	struct mc13xxx_leds *leds = platform_get_drvdata(pdev);
+	struct mc13xxx_leds_platform_data *pdata;
+	struct device_node *parent, *child;
+	struct device *dev = &pdev->dev;
+	int i = 0, ret = -ENODATA;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	of_node_get(dev->parent->of_node);
+
+	parent = of_find_node_by_name(dev->parent->of_node, "leds");
+	if (!parent)
+		goto out_node_put;
+
+	ret = of_property_read_u32_array(parent, "led-control",
+					 pdata->led_control,
+					 leds->devtype->num_regs);
+	if (ret)
+		goto out_node_put;
+
+	pdata->num_leds = of_get_child_count(parent);
+
+	pdata->led = devm_kzalloc(dev, pdata->num_leds * sizeof(*pdata->led),
+				  GFP_KERNEL);
+	if (!pdata->led) {
+		ret = -ENOMEM;
+		goto out_node_put;
+	}
+
+	for_each_child_of_node(parent, child) {
+		const char *str;
+		u32 tmp;
+
+		if (of_property_read_u32(child, "reg", &tmp))
+			continue;
+		pdata->led[i].id = leds->devtype->led_min + tmp;
+
+		if (!of_property_read_string(child, "label", &str))
+			pdata->led[i].name = str;
+		if (!of_property_read_string(child, "linux,default-trigger",
+					     &str))
+			pdata->led[i].default_trigger = str;
+
+		i++;
+	}
+
+	pdata->num_leds = i;
+	ret = i > 0 ? 0 : -ENODATA;
+
+out_node_put:
+	of_node_put(parent);
+
+	return ret ? ERR_PTR(ret) : pdata;
+}
+#else
+static inline struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
+	struct platform_device *pdev)
+{
+	return ERR_PTR(-ENOSYS);
+}
+#endif
+
 static int __init mc13xxx_led_probe(struct platform_device *pdev)
 {
-	struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	struct mc13xxx *mcdev = dev_get_drvdata(pdev->dev.parent);
+	struct device *dev = &pdev->dev;
+	struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(dev);
+	struct mc13xxx *mcdev = dev_get_drvdata(dev->parent);
 	struct mc13xxx_led_devtype *devtype =
 		(struct mc13xxx_led_devtype *)pdev->id_entry->driver_data;
 	struct mc13xxx_leds *leds;
-	int i, id, num_leds, ret = -ENODATA;
-	u32 reg, init_led = 0;
+	int i, id, ret = -ENODATA;
+	u32 init_led = 0;
 
-	if (!pdata) {
-		dev_err(&pdev->dev, "Missing platform data\n");
-		return -ENODEV;
-	}
-
-	num_leds = pdata->num_leds;
-
-	if ((num_leds < 1) ||
-	    (num_leds > (devtype->led_max - devtype->led_min + 1))) {
-		dev_err(&pdev->dev, "Invalid LED count %d\n", num_leds);
-		return -EINVAL;
-	}
-
-	leds = devm_kzalloc(&pdev->dev, num_leds * sizeof(struct mc13xxx_led) +
-			    sizeof(struct mc13xxx_leds), GFP_KERNEL);
+	leds = devm_kzalloc(dev, sizeof(*leds), GFP_KERNEL);
 	if (!leds)
 		return -ENOMEM;
 
 	leds->devtype = devtype;
-	leds->num_leds = num_leds;
+	leds->master = mcdev;
 	platform_set_drvdata(pdev, leds);
 
+	if (dev->parent->of_node) {
+		pdata = mc13xxx_led_probe_dt(pdev);
+		if (IS_ERR(pdata))
+			return PTR_ERR(pdata);
+	} else if (!pdata)
+		return -ENODATA;
+
+	leds->num_leds = pdata->num_leds;
+
+	if ((leds->num_leds < 1) ||
+	    (leds->num_leds > (devtype->led_max - devtype->led_min + 1))) {
+		dev_err(dev, "Invalid LED count %d\n", leds->num_leds);
+		return -EINVAL;
+	}
+
+	leds->led = devm_kzalloc(dev, leds->num_leds * sizeof(*leds->led),
+				 GFP_KERNEL);
+	if (!leds->led)
+		return -ENOMEM;
+
 	for (i = 0; i < devtype->num_regs; i++) {
-		reg = pdata->led_control[i];
-		WARN_ON(reg >= (1 << 24));
-		ret = mc13xxx_reg_write(mcdev, MC13XXX_REG_LED_CONTROL(i), reg);
+		ret = mc13xxx_reg_write(mcdev, leds->devtype->ledctrl_base + i,
+					pdata->led_control[i]);
 		if (ret)
 			return ret;
 	}
 
-	for (i = 0; i < num_leds; i++) {
+	for (i = 0; i < leds->num_leds; i++) {
 		const char *name, *trig;
 
 		ret = -EINVAL;
@@ -180,30 +244,29 @@
 		trig = pdata->led[i].default_trigger;
 
 		if ((id > devtype->led_max) || (id < devtype->led_min)) {
-			dev_err(&pdev->dev, "Invalid ID %i\n", id);
+			dev_err(dev, "Invalid ID %i\n", id);
 			break;
 		}
 
 		if (init_led & (1 << id)) {
-			dev_warn(&pdev->dev,
-				 "LED %i already initialized\n", id);
+			dev_warn(dev, "LED %i already initialized\n", id);
 			break;
 		}
 
 		init_led |= 1 << id;
 		leds->led[i].id = id;
-		leds->led[i].master = mcdev;
+		leds->led[i].leds = leds;
 		leds->led[i].cdev.name = name;
 		leds->led[i].cdev.default_trigger = trig;
+		leds->led[i].cdev.flags = LED_CORE_SUSPENDRESUME;
 		leds->led[i].cdev.brightness_set = mc13xxx_led_set;
-		leds->led[i].cdev.brightness = LED_OFF;
+		leds->led[i].cdev.max_brightness = mc13xxx_max_brightness(id);
 
 		INIT_WORK(&leds->led[i].work, mc13xxx_led_work);
 
-		ret = led_classdev_register(pdev->dev.parent,
-					    &leds->led[i].cdev);
+		ret = led_classdev_register(dev->parent, &leds->led[i].cdev);
 		if (ret) {
-			dev_err(&pdev->dev, "Failed to register LED %i\n", id);
+			dev_err(dev, "Failed to register LED %i\n", id);
 			break;
 		}
 	}
@@ -219,7 +282,6 @@
 
 static int mc13xxx_led_remove(struct platform_device *pdev)
 {
-	struct mc13xxx *mcdev = dev_get_drvdata(pdev->dev.parent);
 	struct mc13xxx_leds *leds = platform_get_drvdata(pdev);
 	int i;
 
@@ -228,9 +290,6 @@
 		cancel_work_sync(&leds->led[i].work);
 	}
 
-	for (i = 0; i < leds->devtype->num_regs; i++)
-		mc13xxx_reg_write(mcdev, MC13XXX_REG_LED_CONTROL(i), 0);
-
 	return 0;
 }
 
@@ -238,17 +297,27 @@
 	.led_min	= MC13783_LED_MD,
 	.led_max	= MC13783_LED_B3,
 	.num_regs	= 6,
+	.ledctrl_base	= 51,
 };
 
 static const struct mc13xxx_led_devtype mc13892_led_devtype = {
 	.led_min	= MC13892_LED_MD,
 	.led_max	= MC13892_LED_B,
 	.num_regs	= 4,
+	.ledctrl_base	= 51,
+};
+
+static const struct mc13xxx_led_devtype mc34708_led_devtype = {
+	.led_min	= MC34708_LED_R,
+	.led_max	= MC34708_LED_G,
+	.num_regs	= 1,
+	.ledctrl_base	= 54,
 };
 
 static const struct platform_device_id mc13xxx_led_id_table[] = {
 	{ "mc13783-led", (kernel_ulong_t)&mc13783_led_devtype, },
 	{ "mc13892-led", (kernel_ulong_t)&mc13892_led_devtype, },
+	{ "mc34708-led", (kernel_ulong_t)&mc34708_led_devtype, },
 	{ }
 };
 MODULE_DEVICE_TABLE(platform, mc13xxx_led_id_table);
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 2f9f141..e97f443 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -21,7 +21,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index c7a4230..efa6258 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -23,7 +23,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c
index 98cae52..c9d9060 100644
--- a/drivers/leds/leds-ot200.c
+++ b/drivers/leds/leds-ot200.c
@@ -8,7 +8,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/leds.h>
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 6050474..7d0aaed 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -14,7 +14,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/of_platform.h>
 #include <linux/fb.h>
@@ -84,6 +83,15 @@
 		      (sizeof(struct led_pwm_data) * num_leds);
 }
 
+static void led_pwm_cleanup(struct led_pwm_priv *priv)
+{
+	while (priv->num_leds--) {
+		led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
+		if (priv->leds[priv->num_leds].can_sleep)
+			cancel_work_sync(&priv->leds[priv->num_leds].work);
+	}
+}
+
 static int led_pwm_create_of(struct platform_device *pdev,
 			     struct led_pwm_priv *priv)
 {
@@ -131,8 +139,7 @@
 
 	return 0;
 err:
-	while (priv->num_leds--)
-		led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
+	led_pwm_cleanup(priv);
 
 	return ret;
 }
@@ -200,8 +207,8 @@
 	return 0;
 
 err:
-	while (i--)
-		led_classdev_unregister(&priv->leds[i].cdev);
+	priv->num_leds = i;
+	led_pwm_cleanup(priv);
 
 	return ret;
 }
@@ -209,13 +216,8 @@
 static int led_pwm_remove(struct platform_device *pdev)
 {
 	struct led_pwm_priv *priv = platform_get_drvdata(pdev);
-	int i;
 
-	for (i = 0; i < priv->num_leds; i++) {
-		led_classdev_unregister(&priv->leds[i].cdev);
-		if (priv->leds[i].can_sleep)
-			cancel_work_sync(&priv->leds[i].work);
-	}
+	led_pwm_cleanup(priv);
 
 	return 0;
 }
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 98174e7..28988b7 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -12,7 +12,6 @@
 */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/gpio.h>
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 5b8f938..2eb3ef6 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -63,7 +63,7 @@
 /*
  * PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives.
  */
-static DEFINE_PCI_DEVICE_TABLE(ich7_lpc_pci_id) = {
+static const struct pci_device_id ich7_lpc_pci_id[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_30) },
@@ -78,7 +78,7 @@
 	return 1;
 }
 
-static bool __initdata nodetect;
+static bool nodetect;
 module_param_named(nodetect, nodetect, bool, 0);
 MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
 
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index 0a1a13f..e72c974 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -10,7 +10,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/leds.h>
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 3f75fd2..4133ffe 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -10,7 +10,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/err.h>
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 118335e..1c3ee9f 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -26,6 +26,7 @@
 #include <linux/percpu.h>
 #include <linux/syscore_ops.h>
 #include <linux/rwsem.h>
+#include <linux/cpu.h>
 #include "../leds.h"
 
 #define MAX_NAME_LEN	8
@@ -92,6 +93,26 @@
 	.resume		= ledtrig_cpu_syscore_resume,
 };
 
+static int ledtrig_cpu_notify(struct notifier_block *self,
+					   unsigned long action, void *hcpu)
+{
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_STARTING:
+		ledtrig_cpu(CPU_LED_START);
+		break;
+	case CPU_DYING:
+		ledtrig_cpu(CPU_LED_STOP);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+
+static struct notifier_block ledtrig_cpu_nb = {
+	.notifier_call = ledtrig_cpu_notify,
+};
+
 static int __init ledtrig_cpu_init(void)
 {
 	int cpu;
@@ -113,6 +134,7 @@
 	}
 
 	register_syscore_ops(&ledtrig_cpu_syscore_ops);
+	register_cpu_notifier(&ledtrig_cpu_nb);
 
 	pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n");
 
@@ -124,6 +146,8 @@
 {
 	int cpu;
 
+	unregister_cpu_notifier(&ledtrig_cpu_nb);
+
 	for_each_possible_cpu(cpu) {
 		struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
 
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 4195a01..9a8e66a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1988,7 +1988,6 @@
 		if (mddev->bitmap_info.file) {
 			struct file *f = mddev->bitmap_info.file;
 			mddev->bitmap_info.file = NULL;
-			restore_bitmap_write_access(f);
 			fput(f);
 		}
 	} else {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4ad5cc4..8fda38d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5181,32 +5181,6 @@
 	return 0;
 }
 
-/* similar to deny_write_access, but accounts for our holding a reference
- * to the file ourselves */
-static int deny_bitmap_write_access(struct file * file)
-{
-	struct inode *inode = file->f_mapping->host;
-
-	spin_lock(&inode->i_lock);
-	if (atomic_read(&inode->i_writecount) > 1) {
-		spin_unlock(&inode->i_lock);
-		return -ETXTBSY;
-	}
-	atomic_set(&inode->i_writecount, -1);
-	spin_unlock(&inode->i_lock);
-
-	return 0;
-}
-
-void restore_bitmap_write_access(struct file *file)
-{
-	struct inode *inode = file->f_mapping->host;
-
-	spin_lock(&inode->i_lock);
-	atomic_set(&inode->i_writecount, 1);
-	spin_unlock(&inode->i_lock);
-}
-
 static void md_clean(struct mddev *mddev)
 {
 	mddev->array_sectors = 0;
@@ -5427,7 +5401,6 @@
 
 		bitmap_destroy(mddev);
 		if (mddev->bitmap_info.file) {
-			restore_bitmap_write_access(mddev->bitmap_info.file);
 			fput(mddev->bitmap_info.file);
 			mddev->bitmap_info.file = NULL;
 		}
@@ -5979,7 +5952,7 @@
 
 static int set_bitmap_file(struct mddev *mddev, int fd)
 {
-	int err;
+	int err = 0;
 
 	if (mddev->pers) {
 		if (!mddev->pers->quiesce)
@@ -5991,6 +5964,7 @@
 
 
 	if (fd >= 0) {
+		struct inode *inode;
 		if (mddev->bitmap)
 			return -EEXIST; /* cannot add when bitmap is present */
 		mddev->bitmap_info.file = fget(fd);
@@ -6001,10 +5975,21 @@
 			return -EBADF;
 		}
 
-		err = deny_bitmap_write_access(mddev->bitmap_info.file);
-		if (err) {
+		inode = mddev->bitmap_info.file->f_mapping->host;
+		if (!S_ISREG(inode->i_mode)) {
+			printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
+			       mdname(mddev));
+			err = -EBADF;
+		} else if (!(mddev->bitmap_info.file->f_mode & FMODE_WRITE)) {
+			printk(KERN_ERR "%s: error: bitmap file must open for write\n",
+			       mdname(mddev));
+			err = -EBADF;
+		} else if (atomic_read(&inode->i_writecount) != 1) {
 			printk(KERN_ERR "%s: error: bitmap file is already in use\n",
 			       mdname(mddev));
+			err = -EBUSY;
+		}
+		if (err) {
 			fput(mddev->bitmap_info.file);
 			mddev->bitmap_info.file = NULL;
 			return err;
@@ -6027,10 +6012,8 @@
 		mddev->pers->quiesce(mddev, 0);
 	}
 	if (fd < 0) {
-		if (mddev->bitmap_info.file) {
-			restore_bitmap_write_access(mddev->bitmap_info.file);
+		if (mddev->bitmap_info.file)
 			fput(mddev->bitmap_info.file);
-		}
 		mddev->bitmap_info.file = NULL;
 	}
 
@@ -7182,11 +7165,14 @@
 	return error;
 }
 
+static int md_unloading;
 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
 {
 	struct seq_file *seq = filp->private_data;
 	int mask;
 
+	if (md_unloading)
+		return POLLIN|POLLRDNORM|POLLERR|POLLPRI;;
 	poll_wait(filp, &md_event_waiters, wait);
 
 	/* always allow read */
@@ -8672,6 +8658,7 @@
 {
 	struct mddev *mddev;
 	struct list_head *tmp;
+	int delay = 1;
 
 	blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
 	blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
@@ -8680,7 +8667,19 @@
 	unregister_blkdev(mdp_major, "mdp");
 	unregister_reboot_notifier(&md_notifier);
 	unregister_sysctl_table(raid_table_header);
+
+	/* We cannot unload the modules while some process is
+	 * waiting for us in select() or poll() - wake them up
+	 */
+	md_unloading = 1;
+	while (waitqueue_active(&md_event_waiters)) {
+		/* not safe to leave yet */
+		wake_up(&md_event_waiters);
+		msleep(delay);
+		delay += delay;
+	}
 	remove_proc_entry("mdstat", NULL);
+
 	for_each_mddev(mddev, tmp) {
 		export_array(mddev);
 		mddev->hold_active = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 07bba96..a49d991 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -605,7 +605,6 @@
 extern int md_integrity_register(struct mddev *mddev);
 extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
-extern void restore_bitmap_write_access(struct file *file);
 
 extern void mddev_init(struct mddev *mddev);
 extern int md_run(struct mddev *mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4a6ca1c..56e24c0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -97,6 +97,7 @@
 	struct pool_info *pi = data;
 	struct r1bio *r1_bio;
 	struct bio *bio;
+	int need_pages;
 	int i, j;
 
 	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
@@ -119,15 +120,15 @@
 	 * RESYNC_PAGES for each bio.
 	 */
 	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
-		j = pi->raid_disks;
+		need_pages = pi->raid_disks;
 	else
-		j = 1;
-	while(j--) {
+		need_pages = 1;
+	for (j = 0; j < need_pages; j++) {
 		bio = r1_bio->bios[j];
 		bio->bi_vcnt = RESYNC_PAGES;
 
 		if (bio_alloc_pages(bio, gfp_flags))
-			goto out_free_bio;
+			goto out_free_pages;
 	}
 	/* If not user-requests, copy the page pointers to all bios */
 	if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
@@ -141,6 +142,14 @@
 
 	return r1_bio;
 
+out_free_pages:
+	while (--j >= 0) {
+		struct bio_vec *bv;
+
+		bio_for_each_segment_all(bv, r1_bio->bios[j], i)
+			__free_page(bv->bv_page);
+	}
+
 out_free_bio:
 	while (++j < pi->raid_disks)
 		bio_put(r1_bio->bios[j]);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 16f5c21..25247a8 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -679,14 +679,9 @@
 				init_stripe(sh, sector, previous);
 				atomic_inc(&sh->count);
 			}
-		} else {
+		} else if (!atomic_inc_not_zero(&sh->count)) {
 			spin_lock(&conf->device_lock);
-			if (atomic_read(&sh->count)) {
-				BUG_ON(!list_empty(&sh->lru)
-				    && !test_bit(STRIPE_EXPANDING, &sh->state)
-				    && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
-					);
-			} else {
+			if (!atomic_read(&sh->count)) {
 				if (!test_bit(STRIPE_HANDLE, &sh->state))
 					atomic_inc(&conf->active_stripes);
 				BUG_ON(list_empty(&sh->lru) &&
@@ -4552,6 +4547,8 @@
 	struct stripe_head *sh;
 	const int rw = bio_data_dir(bi);
 	int remaining;
+	DEFINE_WAIT(w);
+	bool do_prepare;
 
 	if (unlikely(bi->bi_rw & REQ_FLUSH)) {
 		md_flush_request(mddev, bi);
@@ -4575,15 +4572,18 @@
 	bi->bi_next = NULL;
 	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */
 
+	prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
 	for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
-		DEFINE_WAIT(w);
 		int previous;
 		int seq;
 
+		do_prepare = false;
 	retry:
 		seq = read_seqcount_begin(&conf->gen_lock);
 		previous = 0;
-		prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
+		if (do_prepare)
+			prepare_to_wait(&conf->wait_for_overlap, &w,
+				TASK_UNINTERRUPTIBLE);
 		if (unlikely(conf->reshape_progress != MaxSector)) {
 			/* spinlock is needed as reshape_progress may be
 			 * 64bit on a 32bit platform, and so it might be
@@ -4604,6 +4604,7 @@
 				    : logical_sector >= conf->reshape_safe) {
 					spin_unlock_irq(&conf->device_lock);
 					schedule();
+					do_prepare = true;
 					goto retry;
 				}
 			}
@@ -4640,6 +4641,7 @@
 				if (must_retry) {
 					release_stripe(sh);
 					schedule();
+					do_prepare = true;
 					goto retry;
 				}
 			}
@@ -4663,8 +4665,10 @@
 				prepare_to_wait(&conf->wait_for_overlap,
 						&w, TASK_INTERRUPTIBLE);
 				if (logical_sector >= mddev->suspend_lo &&
-				    logical_sector < mddev->suspend_hi)
+				    logical_sector < mddev->suspend_hi) {
 					schedule();
+					do_prepare = true;
+				}
 				goto retry;
 			}
 
@@ -4677,9 +4681,9 @@
 				md_wakeup_thread(mddev->thread);
 				release_stripe(sh);
 				schedule();
+				do_prepare = true;
 				goto retry;
 			}
-			finish_wait(&conf->wait_for_overlap, &w);
 			set_bit(STRIPE_HANDLE, &sh->state);
 			clear_bit(STRIPE_DELAYED, &sh->state);
 			if ((bi->bi_rw & REQ_SYNC) &&
@@ -4689,10 +4693,10 @@
 		} else {
 			/* cannot get stripe for read-ahead, just give-up */
 			clear_bit(BIO_UPTODATE, &bi->bi_flags);
-			finish_wait(&conf->wait_for_overlap, &w);
 			break;
 		}
 	}
+	finish_wait(&conf->wait_for_overlap, &w);
 
 	remaining = raid5_dec_bi_active_stripes(bi);
 	if (remaining == 0) {
diff --git a/drivers/media/dvb-frontends/drx39xyj/Kconfig b/drivers/media/dvb-frontends/drx39xyj/Kconfig
index 15628eb..6c2ccb6 100644
--- a/drivers/media/dvb-frontends/drx39xyj/Kconfig
+++ b/drivers/media/dvb-frontends/drx39xyj/Kconfig
@@ -1,7 +1,7 @@
 config DVB_DRX39XYJ
 	tristate "Micronas DRX-J demodulator"
 	depends on DVB_CORE && I2C
-	default m if DVB_FE_CUSTOMISE
+	default m if !MEDIA_SUBDRV_AUTOSELECT
 	help
 	  An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
 	  to support this frontend.
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index 1d2c473..92c891a 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -1176,6 +1176,7 @@
 	},
 	.i2c_gate_ctrl        = lgdt3305_i2c_gate_ctrl,
 	.init                 = lgdt3305_init,
+	.sleep                = lgdt3305_sleep,
 	.set_frontend         = lgdt3304_set_parameters,
 	.get_frontend         = lgdt3305_get_frontend,
 	.get_tune_settings    = lgdt3305_get_tune_settings,
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 32cffca..d63bc9c 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -297,7 +297,7 @@
 	u8 val;
 };
 
-struct inittab m88rs2000_setup[] = {
+static struct inittab m88rs2000_setup[] = {
 	{DEMOD_WRITE, 0x9a, 0x30},
 	{DEMOD_WRITE, 0x00, 0x01},
 	{WRITE_DELAY, 0x19, 0x00},
@@ -315,7 +315,7 @@
 	{0xff, 0xaa, 0xff}
 };
 
-struct inittab m88rs2000_shutdown[] = {
+static struct inittab m88rs2000_shutdown[] = {
 	{DEMOD_WRITE, 0x9a, 0x30},
 	{DEMOD_WRITE, 0xb0, 0x00},
 	{DEMOD_WRITE, 0xf1, 0x89},
@@ -325,7 +325,7 @@
 	{0xff, 0xaa, 0xff}
 };
 
-struct inittab fe_reset[] = {
+static struct inittab fe_reset[] = {
 	{DEMOD_WRITE, 0x00, 0x01},
 	{DEMOD_WRITE, 0x20, 0x81},
 	{DEMOD_WRITE, 0x21, 0x80},
@@ -363,7 +363,7 @@
 	{0xff, 0xaa, 0xff}
 };
 
-struct inittab fe_trigger[] = {
+static struct inittab fe_trigger[] = {
 	{DEMOD_WRITE, 0x97, 0x04},
 	{DEMOD_WRITE, 0x99, 0x77},
 	{DEMOD_WRITE, 0x9b, 0x64},
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 7a77a5b..5c42188 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -49,8 +49,8 @@
 #define VPE_MODULE_NAME "vpe"
 
 /* minimum and maximum frame sizes */
-#define MIN_W		128
-#define MIN_H		128
+#define MIN_W		32
+#define MIN_H		32
 #define MAX_W		1920
 #define MAX_H		1080
 
@@ -887,6 +887,9 @@
 	if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed)
 		return 0;
 
+	if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < needed)
+		return 0;
+
 	return 1;
 }
 
@@ -1277,18 +1280,17 @@
 	s_buf = &s_vb->v4l2_buf;
 	d_buf = &d_vb->v4l2_buf;
 
+	d_buf->flags = s_buf->flags;
+
 	d_buf->timestamp = s_buf->timestamp;
-	d_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-	d_buf->flags |= s_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-	if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) {
-		d_buf->flags |= V4L2_BUF_FLAG_TIMECODE;
+	if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE)
 		d_buf->timecode = s_buf->timecode;
-	}
+
 	d_buf->sequence = ctx->sequence;
-	d_buf->field = ctx->field;
 
 	d_q_data = &ctx->q_data[Q_DATA_DST];
 	if (d_q_data->flags & Q_DATA_INTERLACED) {
+		d_buf->field = ctx->field;
 		if (ctx->field == V4L2_FIELD_BOTTOM) {
 			ctx->sequence++;
 			ctx->field = V4L2_FIELD_TOP;
@@ -1297,6 +1299,7 @@
 			ctx->field = V4L2_FIELD_BOTTOM;
 		}
 	} else {
+		d_buf->field = V4L2_FIELD_NONE;
 		ctx->sequence++;
 	}
 
@@ -1335,8 +1338,9 @@
 {
 	strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
 	strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
-	strlcpy(cap->bus_info, VPE_MODULE_NAME, sizeof(cap->bus_info));
-	cap->device_caps  = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+		VPE_MODULE_NAME);
+	cap->device_caps  = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
 	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 	return 0;
 }
@@ -1476,6 +1480,7 @@
 		}
 	}
 
+	memset(pix->reserved, 0, sizeof(pix->reserved));
 	for (i = 0; i < pix->num_planes; i++) {
 		plane_fmt = &pix->plane_fmt[i];
 		depth = fmt->vpdma_fmt[i]->depth;
@@ -1487,6 +1492,8 @@
 
 		plane_fmt->sizeimage =
 				(pix->height * pix->width * depth) >> 3;
+
+		memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
 	}
 
 	return 0;
@@ -1717,6 +1724,16 @@
 	q_data = get_q_data(ctx, vb->vb2_queue->type);
 	num_planes = q_data->fmt->coplanar ? 2 : 1;
 
+	if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		if (!(q_data->flags & Q_DATA_INTERLACED)) {
+			vb->v4l2_buf.field = V4L2_FIELD_NONE;
+		} else {
+			if (vb->v4l2_buf.field != V4L2_FIELD_TOP &&
+					vb->v4l2_buf.field != V4L2_FIELD_BOTTOM)
+				return -EINVAL;
+		}
+	}
+
 	for (i = 0; i < num_planes; i++) {
 		if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
 			vpe_err(ctx->dev,
@@ -1866,9 +1883,11 @@
 	s_q_data->fmt = &vpe_formats[2];
 	s_q_data->width = 1920;
 	s_q_data->height = 1080;
-	s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height *
+	s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
 			s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
-	s_q_data->colorspace = V4L2_COLORSPACE_SMPTE170M;
+	s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
+			s_q_data->height);
+	s_q_data->colorspace = V4L2_COLORSPACE_REC709;
 	s_q_data->field = V4L2_FIELD_NONE;
 	s_q_data->c_rect.left = 0;
 	s_q_data->c_rect.top = 0;
@@ -2002,7 +2021,7 @@
 	.fops		= &vpe_fops,
 	.ioctl_ops	= &vpe_ioctl_ops,
 	.minor		= -1,
-	.release	= video_device_release,
+	.release	= video_device_release_empty,
 	.vfl_dir	= VFL_DIR_M2M,
 };
 
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index 579a52b..0127dd2 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -504,6 +504,18 @@
 	return ret;
 }
 
+static int img_ir_set_normal_filter(struct rc_dev *dev,
+				    struct rc_scancode_filter *sc_filter)
+{
+	return img_ir_set_filter(dev, RC_FILTER_NORMAL, sc_filter); 
+}
+
+static int img_ir_set_wakeup_filter(struct rc_dev *dev,
+				    struct rc_scancode_filter *sc_filter)
+{
+	return img_ir_set_filter(dev, RC_FILTER_WAKEUP, sc_filter);
+}
+
 /**
  * img_ir_set_decoder() - Set the current decoder.
  * @priv:	IR private data.
@@ -986,7 +998,8 @@
 	rdev->map_name = RC_MAP_EMPTY;
 	rc_set_allowed_protocols(rdev, img_ir_allowed_protos(priv));
 	rdev->input_name = "IMG Infrared Decoder";
-	rdev->s_filter = img_ir_set_filter;
+	rdev->s_filter = img_ir_set_normal_filter;
+	rdev->s_wakeup_filter = img_ir_set_wakeup_filter;
 
 	/* Register hardware decoder */
 	error = rc_register_device(rdev);
diff --git a/drivers/media/rc/img-ir/img-ir-nec.c b/drivers/media/rc/img-ir/img-ir-nec.c
index e7a731b..751d9d9 100644
--- a/drivers/media/rc/img-ir/img-ir-nec.c
+++ b/drivers/media/rc/img-ir/img-ir-nec.c
@@ -5,6 +5,7 @@
  */
 
 #include "img-ir-hw.h"
+#include <linux/bitrev.h>
 
 /* Convert NEC data to a scancode */
 static int img_ir_nec_scancode(int len, u64 raw, int *scancode, u64 protocols)
@@ -22,11 +23,11 @@
 	data_inv = (raw >> 24) & 0xff;
 	if ((data_inv ^ data) != 0xff) {
 		/* 32-bit NEC (used by Apple and TiVo remotes) */
-		/* scan encoding: aaAAddDD */
-		*scancode = addr_inv << 24 |
-			    addr     << 16 |
-			    data_inv <<  8 |
-			    data;
+		/* scan encoding: as transmitted, MSBit = first received bit */
+		*scancode = bitrev8(addr)     << 24 |
+			    bitrev8(addr_inv) << 16 |
+			    bitrev8(data)     <<  8 |
+			    bitrev8(data_inv);
 	} else if ((addr_inv ^ addr) != 0xff) {
 		/* Extended NEC */
 		/* scan encoding: AAaaDD */
@@ -54,13 +55,15 @@
 
 	if ((in->data | in->mask) & 0xff000000) {
 		/* 32-bit NEC (used by Apple and TiVo remotes) */
-		/* scan encoding: aaAAddDD */
-		addr_inv   = (in->data >> 24) & 0xff;
-		addr_inv_m = (in->mask >> 24) & 0xff;
-		addr       = (in->data >> 16) & 0xff;
-		addr_m     = (in->mask >> 16) & 0xff;
-		data_inv   = (in->data >>  8) & 0xff;
-		data_inv_m = (in->mask >>  8) & 0xff;
+		/* scan encoding: as transmitted, MSBit = first received bit */
+		addr       = bitrev8(in->data >> 24);
+		addr_m     = bitrev8(in->mask >> 24);
+		addr_inv   = bitrev8(in->data >> 16);
+		addr_inv_m = bitrev8(in->mask >> 16);
+		data       = bitrev8(in->data >>  8);
+		data_m     = bitrev8(in->mask >>  8);
+		data_inv   = bitrev8(in->data >>  0);
+		data_inv_m = bitrev8(in->mask >>  0);
 	} else if ((in->data | in->mask) & 0x00ff0000) {
 		/* Extended NEC */
 		/* scan encoding AAaaDD */
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 9de1791..35c42e5 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -172,10 +172,7 @@
 		if (send_32bits) {
 			/* NEC transport, but modified protocol, used by at
 			 * least Apple and TiVo remotes */
-			scancode = not_address << 24 |
-				   address     << 16 |
-				   not_command <<  8 |
-				   command;
+			scancode = data->bits;
 			IR_dprintk(1, "NEC (modified) scancode 0x%08x\n", scancode);
 		} else if ((address ^ not_address) != 0xff) {
 			/* Extended NEC */
diff --git a/drivers/media/rc/keymaps/rc-tivo.c b/drivers/media/rc/keymaps/rc-tivo.c
index 5cc1b45..454e062 100644
--- a/drivers/media/rc/keymaps/rc-tivo.c
+++ b/drivers/media/rc/keymaps/rc-tivo.c
@@ -15,62 +15,62 @@
  * Initial mapping is for the TiVo remote included in the Nero LiquidTV bundle,
  * which also ships with a TiVo-branded IR transceiver, supported by the mceusb
  * driver. Note that the remote uses an NEC-ish protocol, but instead of having
- * a command/not_command pair, it has a vendor ID of 0x3085, but some keys, the
+ * a command/not_command pair, it has a vendor ID of 0xa10c, but some keys, the
  * NEC extended checksums do pass, so the table presently has the intended
  * values and the checksum-passed versions for those keys.
  */
 static struct rc_map_table tivo[] = {
-	{ 0x3085f009, KEY_MEDIA },	/* TiVo Button */
-	{ 0x3085e010, KEY_POWER2 },	/* TV Power */
-	{ 0x3085e011, KEY_TV },		/* Live TV/Swap */
-	{ 0x3085c034, KEY_VIDEO_NEXT },	/* TV Input */
-	{ 0x3085e013, KEY_INFO },
-	{ 0x3085a05f, KEY_CYCLEWINDOWS }, /* Window */
+	{ 0xa10c900f, KEY_MEDIA },	/* TiVo Button */
+	{ 0xa10c0807, KEY_POWER2 },	/* TV Power */
+	{ 0xa10c8807, KEY_TV },		/* Live TV/Swap */
+	{ 0xa10c2c03, KEY_VIDEO_NEXT },	/* TV Input */
+	{ 0xa10cc807, KEY_INFO },
+	{ 0xa10cfa05, KEY_CYCLEWINDOWS }, /* Window */
 	{ 0x0085305f, KEY_CYCLEWINDOWS },
-	{ 0x3085c036, KEY_EPG },	/* Guide */
+	{ 0xa10c6c03, KEY_EPG },	/* Guide */
 
-	{ 0x3085e014, KEY_UP },
-	{ 0x3085e016, KEY_DOWN },
-	{ 0x3085e017, KEY_LEFT },
-	{ 0x3085e015, KEY_RIGHT },
+	{ 0xa10c2807, KEY_UP },
+	{ 0xa10c6807, KEY_DOWN },
+	{ 0xa10ce807, KEY_LEFT },
+	{ 0xa10ca807, KEY_RIGHT },
 
-	{ 0x3085e018, KEY_SCROLLDOWN },	/* Red Thumbs Down */
-	{ 0x3085e019, KEY_SELECT },
-	{ 0x3085e01a, KEY_SCROLLUP },	/* Green Thumbs Up */
+	{ 0xa10c1807, KEY_SCROLLDOWN },	/* Red Thumbs Down */
+	{ 0xa10c9807, KEY_SELECT },
+	{ 0xa10c5807, KEY_SCROLLUP },	/* Green Thumbs Up */
 
-	{ 0x3085e01c, KEY_VOLUMEUP },
-	{ 0x3085e01d, KEY_VOLUMEDOWN },
-	{ 0x3085e01b, KEY_MUTE },
-	{ 0x3085d020, KEY_RECORD },
-	{ 0x3085e01e, KEY_CHANNELUP },
-	{ 0x3085e01f, KEY_CHANNELDOWN },
+	{ 0xa10c3807, KEY_VOLUMEUP },
+	{ 0xa10cb807, KEY_VOLUMEDOWN },
+	{ 0xa10cd807, KEY_MUTE },
+	{ 0xa10c040b, KEY_RECORD },
+	{ 0xa10c7807, KEY_CHANNELUP },
+	{ 0xa10cf807, KEY_CHANNELDOWN },
 	{ 0x0085301f, KEY_CHANNELDOWN },
 
-	{ 0x3085d021, KEY_PLAY },
-	{ 0x3085d023, KEY_PAUSE },
-	{ 0x3085d025, KEY_SLOW },
-	{ 0x3085d022, KEY_REWIND },
-	{ 0x3085d024, KEY_FASTFORWARD },
-	{ 0x3085d026, KEY_PREVIOUS },
-	{ 0x3085d027, KEY_NEXT },	/* ->| */
+	{ 0xa10c840b, KEY_PLAY },
+	{ 0xa10cc40b, KEY_PAUSE },
+	{ 0xa10ca40b, KEY_SLOW },
+	{ 0xa10c440b, KEY_REWIND },
+	{ 0xa10c240b, KEY_FASTFORWARD },
+	{ 0xa10c640b, KEY_PREVIOUS },
+	{ 0xa10ce40b, KEY_NEXT },	/* ->| */
 
-	{ 0x3085b044, KEY_ZOOM },	/* Aspect */
-	{ 0x3085b048, KEY_STOP },
-	{ 0x3085b04a, KEY_DVD },	/* DVD Menu */
+	{ 0xa10c220d, KEY_ZOOM },	/* Aspect */
+	{ 0xa10c120d, KEY_STOP },
+	{ 0xa10c520d, KEY_DVD },	/* DVD Menu */
 
-	{ 0x3085d028, KEY_NUMERIC_1 },
-	{ 0x3085d029, KEY_NUMERIC_2 },
-	{ 0x3085d02a, KEY_NUMERIC_3 },
-	{ 0x3085d02b, KEY_NUMERIC_4 },
-	{ 0x3085d02c, KEY_NUMERIC_5 },
-	{ 0x3085d02d, KEY_NUMERIC_6 },
-	{ 0x3085d02e, KEY_NUMERIC_7 },
-	{ 0x3085d02f, KEY_NUMERIC_8 },
+	{ 0xa10c140b, KEY_NUMERIC_1 },
+	{ 0xa10c940b, KEY_NUMERIC_2 },
+	{ 0xa10c540b, KEY_NUMERIC_3 },
+	{ 0xa10cd40b, KEY_NUMERIC_4 },
+	{ 0xa10c340b, KEY_NUMERIC_5 },
+	{ 0xa10cb40b, KEY_NUMERIC_6 },
+	{ 0xa10c740b, KEY_NUMERIC_7 },
+	{ 0xa10cf40b, KEY_NUMERIC_8 },
 	{ 0x0085302f, KEY_NUMERIC_8 },
-	{ 0x3085c030, KEY_NUMERIC_9 },
-	{ 0x3085c031, KEY_NUMERIC_0 },
-	{ 0x3085c033, KEY_ENTER },
-	{ 0x3085c032, KEY_CLEAR },
+	{ 0xa10c0c03, KEY_NUMERIC_9 },
+	{ 0xa10c8c03, KEY_NUMERIC_0 },
+	{ 0xa10ccc03, KEY_ENTER },
+	{ 0xa10c4c03, KEY_CLEAR },
 };
 
 static struct rc_map_list tivo_map = {
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 99697aa..970b93d 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -633,19 +633,13 @@
 static void ir_do_keydown(struct rc_dev *dev, int scancode,
 			  u32 keycode, u8 toggle)
 {
-	struct rc_scancode_filter *filter;
-	bool new_event = !dev->keypressed ||
-			 dev->last_scancode != scancode ||
-			 dev->last_toggle != toggle;
+	bool new_event = (!dev->keypressed		 ||
+			  dev->last_scancode != scancode ||
+			  dev->last_toggle != toggle);
 
 	if (new_event && dev->keypressed)
 		ir_do_keyup(dev, false);
 
-	/* Generic scancode filtering */
-	filter = &dev->scancode_filters[RC_FILTER_NORMAL];
-	if (filter->mask && ((scancode ^ filter->data) & filter->mask))
-		return;
-
 	input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
 
 	if (new_event && keycode != KEY_RESERVED) {
@@ -923,6 +917,7 @@
 	int rc, i, count = 0;
 	ssize_t ret;
 	int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
+	int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter);
 	struct rc_scancode_filter local_filter, *filter;
 
 	/* Device is being removed */
@@ -1007,24 +1002,23 @@
 	 * Fall back to clearing the filter.
 	 */
 	filter = &dev->scancode_filters[fattr->type];
-	if (old_type != type && filter->mask) {
+	set_filter = (fattr->type == RC_FILTER_NORMAL)
+		? dev->s_filter : dev->s_wakeup_filter;
+
+	if (set_filter && old_type != type && filter->mask) {
 		local_filter = *filter;
 		if (!type) {
 			/* no protocol => clear filter */
 			ret = -1;
-		} else if (!dev->s_filter) {
-			/* generic filtering => accept any filter */
-			ret = 0;
 		} else {
 			/* hardware filtering => try setting, otherwise clear */
-			ret = dev->s_filter(dev, fattr->type, &local_filter);
+			ret = set_filter(dev, &local_filter);
 		}
 		if (ret < 0) {
 			/* clear the filter */
 			local_filter.data = 0;
 			local_filter.mask = 0;
-			if (dev->s_filter)
-				dev->s_filter(dev, fattr->type, &local_filter);
+			set_filter(dev, &local_filter);
 		}
 
 		/* commit the new filter */
@@ -1068,7 +1062,10 @@
 		return -EINVAL;
 
 	mutex_lock(&dev->lock);
-	if (fattr->mask)
+	if ((fattr->type == RC_FILTER_NORMAL && !dev->s_filter) ||
+	    (fattr->type == RC_FILTER_WAKEUP && !dev->s_wakeup_filter))
+		val = 0;
+	else if (fattr->mask)
 		val = dev->scancode_filters[fattr->type].mask;
 	else
 		val = dev->scancode_filters[fattr->type].data;
@@ -1106,6 +1103,7 @@
 	struct rc_scancode_filter local_filter, *filter;
 	int ret;
 	unsigned long val;
+	int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter);
 
 	/* Device is being removed */
 	if (!dev)
@@ -1115,9 +1113,11 @@
 	if (ret < 0)
 		return ret;
 
-	/* Scancode filter not supported (but still accept 0) */
-	if (!dev->s_filter && fattr->type != RC_FILTER_NORMAL)
-		return val ? -EINVAL : count;
+	/* Can the scancode filter be set? */
+	set_filter = (fattr->type == RC_FILTER_NORMAL) ? dev->s_filter :
+							 dev->s_wakeup_filter;
+	if (!set_filter)
+		return -EINVAL;
 
 	mutex_lock(&dev->lock);
 
@@ -1128,16 +1128,16 @@
 		local_filter.mask = val;
 	else
 		local_filter.data = val;
+
 	if (!dev->enabled_protocols[fattr->type] && local_filter.mask) {
 		/* refuse to set a filter unless a protocol is enabled */
 		ret = -EINVAL;
 		goto unlock;
 	}
-	if (dev->s_filter) {
-		ret = dev->s_filter(dev, fattr->type, &local_filter);
-		if (ret < 0)
-			goto unlock;
-	}
+
+	ret = set_filter(dev, &local_filter);
+	if (ret < 0)
+		goto unlock;
 
 	/* Success, commit the new filter */
 	*filter = local_filter;
@@ -1189,27 +1189,45 @@
 static RC_FILTER_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR,
 		      show_filter, store_filter, RC_FILTER_WAKEUP, true);
 
-static struct attribute *rc_dev_attrs[] = {
+static struct attribute *rc_dev_protocol_attrs[] = {
 	&dev_attr_protocols.attr.attr,
+	NULL,
+};
+
+static struct attribute_group rc_dev_protocol_attr_grp = {
+	.attrs	= rc_dev_protocol_attrs,
+};
+
+static struct attribute *rc_dev_wakeup_protocol_attrs[] = {
 	&dev_attr_wakeup_protocols.attr.attr,
+	NULL,
+};
+
+static struct attribute_group rc_dev_wakeup_protocol_attr_grp = {
+	.attrs	= rc_dev_wakeup_protocol_attrs,
+};
+
+static struct attribute *rc_dev_filter_attrs[] = {
 	&dev_attr_filter.attr.attr,
 	&dev_attr_filter_mask.attr.attr,
+	NULL,
+};
+
+static struct attribute_group rc_dev_filter_attr_grp = {
+	.attrs	= rc_dev_filter_attrs,
+};
+
+static struct attribute *rc_dev_wakeup_filter_attrs[] = {
 	&dev_attr_wakeup_filter.attr.attr,
 	&dev_attr_wakeup_filter_mask.attr.attr,
 	NULL,
 };
 
-static struct attribute_group rc_dev_attr_grp = {
-	.attrs	= rc_dev_attrs,
-};
-
-static const struct attribute_group *rc_dev_attr_groups[] = {
-	&rc_dev_attr_grp,
-	NULL
+static struct attribute_group rc_dev_wakeup_filter_attr_grp = {
+	.attrs	= rc_dev_wakeup_filter_attrs,
 };
 
 static struct device_type rc_dev_type = {
-	.groups		= rc_dev_attr_groups,
 	.release	= rc_dev_release,
 	.uevent		= rc_dev_uevent,
 };
@@ -1266,7 +1284,7 @@
 	static bool raw_init = false; /* raw decoders loaded? */
 	struct rc_map *rc_map;
 	const char *path;
-	int rc, devno;
+	int rc, devno, attr = 0;
 
 	if (!dev || !dev->map_name)
 		return -EINVAL;
@@ -1294,6 +1312,16 @@
 			return -ENOMEM;
 	} while (test_and_set_bit(devno, ir_core_dev_number));
 
+	dev->dev.groups = dev->sysfs_groups;
+	dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
+	if (dev->s_filter)
+		dev->sysfs_groups[attr++] = &rc_dev_filter_attr_grp;	
+	if (dev->s_wakeup_filter)
+		dev->sysfs_groups[attr++] = &rc_dev_wakeup_filter_attr_grp;
+	if (dev->change_wakeup_protocol)
+		dev->sysfs_groups[attr++] = &rc_dev_wakeup_protocol_attr_grp;
+	dev->sysfs_groups[attr++] = NULL;
+
 	/*
 	 * Take the lock here, as the device sysfs node will appear
 	 * when device_add() is called, which may trigger an ir-keytable udev
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index 319adc4..96ccfeb 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -1468,7 +1468,8 @@
 static int r820t_multi_read(struct r820t_priv *priv)
 {
 	int rc, i;
-	u8 data[2], min = 0, max = 255, sum = 0;
+	u16 sum = 0;
+	u8 data[2], min = 255, max = 0;
 
 	usleep_range(5000, 6000);
 
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 76a8165..6ef93ee 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -1107,6 +1107,7 @@
 				offset += 200000;
 		}
 #endif
+		break;
 	default:
 		tuner_err("Unsupported tuner type %d.\n", new_type);
 		break;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index c83c16c..61d196e 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1503,8 +1503,6 @@
 	/* RTL2832P devices: */
 	{ DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
 		&rtl2832u_props, "Astrometa DVB-T2", NULL) },
-	{ DVB_USB_DEVICE(USB_VID_KYE, 0x707f,
-		&rtl2832u_props, "Genius TVGo DVB-T03", NULL) },
 	{ }
 };
 MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table);
diff --git a/drivers/media/usb/gspca/jpeg.h b/drivers/media/usb/gspca/jpeg.h
index ab54910..0aa2b67 100644
--- a/drivers/media/usb/gspca/jpeg.h
+++ b/drivers/media/usb/gspca/jpeg.h
@@ -154,7 +154,9 @@
 {
 	int i, sc;
 
-	if (quality < 50)
+	if (quality <= 0)
+		sc = 5000;
+	else if (quality < 50)
 		sc = 5000 / quality;
 	else
 		sc = 200 - quality * 2;
diff --git a/drivers/media/usb/stk1160/stk1160-ac97.c b/drivers/media/usb/stk1160/stk1160-ac97.c
index c46c8be..2dd308f 100644
--- a/drivers/media/usb/stk1160/stk1160-ac97.c
+++ b/drivers/media/usb/stk1160/stk1160-ac97.c
@@ -108,7 +108,7 @@
 		 "stk1160-mixer");
 	snprintf(card->longname, sizeof(card->longname),
 		 "stk1160 ac97 codec mixer control");
-	strncpy(card->driver, dev->dev->driver->name, sizeof(card->driver));
+	strlcpy(card->driver, dev->dev->driver->name, sizeof(card->driver));
 
 	rc = snd_ac97_bus(card, 0, &stk1160_ac97_ops, NULL, &ac97_bus);
 	if (rc)
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index f3cdf64..63aa9d9 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -78,11 +78,19 @@
 	netdev_dbg(ndev, "Event %x, Link %x\n", status,
 		   ntb_transport_link_query(dev->qp));
 
-	/* Currently, only link status event is supported */
-	if (status)
-		netif_carrier_on(ndev);
-	else
+	switch (status) {
+	case NTB_LINK_DOWN:
 		netif_carrier_off(ndev);
+		break;
+	case NTB_LINK_UP:
+		if (!ntb_transport_link_query(dev->qp))
+			return;
+
+		netif_carrier_on(ndev);
+		break;
+	default:
+		netdev_warn(ndev, "Unsupported event type %d\n", status);
+	}
 }
 
 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
@@ -182,8 +190,10 @@
 
 		rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
 					      ndev->mtu + ETH_HLEN);
-		if (rc == -EINVAL)
+		if (rc == -EINVAL) {
+			dev_kfree_skb(skb);
 			goto err;
+		}
 	}
 
 	netif_carrier_off(ndev);
@@ -367,12 +377,15 @@
 {
 	struct net_device *ndev;
 	struct ntb_netdev *dev;
+	bool found = false;
 
 	list_for_each_entry(dev, &dev_list, list) {
-		if (dev->pdev == pdev)
+		if (dev->pdev == pdev) {
+			found = true;
 			break;
+		}
 	}
-	if (dev == NULL)
+	if (!found)
 		return;
 
 	list_del(&dev->list);
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index 170e8e6..372e08c 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -91,7 +91,7 @@
 /* Translate memory window 0,1 to BAR 2,4 */
 #define MW_TO_BAR(mw)	(mw * NTB_MAX_NUM_MW + 2)
 
-static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = {
+static const struct pci_device_id ntb_pci_tbl[] = {
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
@@ -120,7 +120,8 @@
  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  */
 int ntb_register_event_callback(struct ntb_device *ndev,
-			    void (*func)(void *handle, enum ntb_hw_event event))
+				void (*func)(void *handle,
+					     enum ntb_hw_event event))
 {
 	if (ndev->event_cb)
 		return -EINVAL;
@@ -715,9 +716,9 @@
 			       SNB_PBAR4LMT_OFFSET);
 			/* HW errata on the Limit registers.  They can only be
 			 * written when the base register is 4GB aligned and
-			 * < 32bit.  This should already be the case based on the
-			 * driver defaults, but write the Limit registers first
-			 * just in case.
+			 * < 32bit.  This should already be the case based on
+			 * the driver defaults, but write the Limit registers
+			 * first just in case.
 			 */
 		} else {
 			ndev->limits.max_mw = SNB_MAX_MW;
@@ -739,9 +740,9 @@
 			writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET);
 			/* HW errata on the Limit registers.  They can only be
 			 * written when the base register is 4GB aligned and
-			 * < 32bit.  This should already be the case based on the
-			 * driver defaults, but write the Limit registers first
-			 * just in case.
+			 * < 32bit.  This should already be the case based on
+			 * the driver defaults, but write the Limit registers
+			 * first just in case.
 			 */
 		}
 
@@ -785,7 +786,7 @@
 				/* B2B_XLAT_OFFSET is a 64bit register, but can
 				 * only take 32bit writes
 				 */
-				writel(SNB_MBAR01_DSD_ADDR & 0xffffffff,
+				writel(SNB_MBAR01_USD_ADDR & 0xffffffff,
 				       ndev->reg_base + SNB_B2B_XLAT_OFFSETL);
 				writel(SNB_MBAR01_USD_ADDR >> 32,
 				       ndev->reg_base + SNB_B2B_XLAT_OFFSETU);
@@ -803,7 +804,7 @@
 		ndev->conn_type = NTB_CONN_RP;
 
 		if (xeon_errata_workaround) {
-			dev_err(&ndev->pdev->dev, 
+			dev_err(&ndev->pdev->dev,
 				"NTB-RP disabled due to hardware errata.  To disregard this warning and potentially lock-up the system, add the parameter 'xeon_errata_workaround=0'.\n");
 			return -EINVAL;
 		}
@@ -1079,25 +1080,104 @@
 	return IRQ_HANDLED;
 }
 
-static int ntb_setup_msix(struct ntb_device *ndev)
+static int ntb_setup_snb_msix(struct ntb_device *ndev, int msix_entries)
 {
 	struct pci_dev *pdev = ndev->pdev;
 	struct msix_entry *msix;
-	int msix_entries;
 	int rc, i;
-	u16 val;
 
-	if (!pdev->msix_cap) {
-		rc = -EIO;
-		goto err;
+	if (msix_entries < ndev->limits.msix_cnt)
+		return -ENOSPC;
+
+	rc = pci_enable_msix_exact(pdev, ndev->msix_entries, msix_entries);
+	if (rc < 0)
+		return rc;
+
+	for (i = 0; i < msix_entries; i++) {
+		msix = &ndev->msix_entries[i];
+		WARN_ON(!msix->vector);
+
+		if (i == msix_entries - 1) {
+			rc = request_irq(msix->vector,
+					 xeon_event_msix_irq, 0,
+					 "ntb-event-msix", ndev);
+			if (rc)
+				goto err;
+		} else {
+			rc = request_irq(msix->vector,
+					 xeon_callback_msix_irq, 0,
+					 "ntb-callback-msix",
+					 &ndev->db_cb[i]);
+			if (rc)
+				goto err;
+		}
 	}
 
-	rc = pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &val);
-	if (rc)
-		goto err;
+	ndev->num_msix = msix_entries;
+	ndev->max_cbs = msix_entries - 1;
 
-	msix_entries = msix_table_size(val);
-	if (msix_entries > ndev->limits.msix_cnt) {
+	return 0;
+
+err:
+	while (--i >= 0) {
+		/* Code never reaches here for entry nr 'ndev->num_msix - 1' */
+		msix = &ndev->msix_entries[i];
+		free_irq(msix->vector, &ndev->db_cb[i]);
+	}
+
+	pci_disable_msix(pdev);
+	ndev->num_msix = 0;
+
+	return rc;
+}
+
+static int ntb_setup_bwd_msix(struct ntb_device *ndev, int msix_entries)
+{
+	struct pci_dev *pdev = ndev->pdev;
+	struct msix_entry *msix;
+	int rc, i;
+
+	msix_entries = pci_enable_msix_range(pdev, ndev->msix_entries,
+					     1, msix_entries);
+	if (msix_entries < 0)
+		return msix_entries;
+
+	for (i = 0; i < msix_entries; i++) {
+		msix = &ndev->msix_entries[i];
+		WARN_ON(!msix->vector);
+
+		rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
+				 "ntb-callback-msix", &ndev->db_cb[i]);
+		if (rc)
+			goto err;
+	}
+
+	ndev->num_msix = msix_entries;
+	ndev->max_cbs = msix_entries;
+
+	return 0;
+
+err:
+	while (--i >= 0)
+		free_irq(msix->vector, &ndev->db_cb[i]);
+
+	pci_disable_msix(pdev);
+	ndev->num_msix = 0;
+
+	return rc;
+}
+
+static int ntb_setup_msix(struct ntb_device *ndev)
+{
+	struct pci_dev *pdev = ndev->pdev;
+	int msix_entries;
+	int rc, i;
+
+	msix_entries = pci_msix_vec_count(pdev);
+	if (msix_entries < 0) {
+		rc = msix_entries;
+		goto err;
+	} else if (msix_entries > ndev->limits.msix_cnt) {
 		rc = -EINVAL;
 		goto err;
 	}
@@ -1112,78 +1192,19 @@
 	for (i = 0; i < msix_entries; i++)
 		ndev->msix_entries[i].entry = i;
 
-	rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
-	if (rc < 0)
-		goto err1;
-	if (rc > 0) {
-		/* On SNB, the link interrupt is always tied to 4th vector.  If
-		 * we can't get all 4, then we can't use MSI-X.
-		 */
-		if (ndev->hw_type != BWD_HW) {
-			rc = -EIO;
-			goto err1;
-		}
-
-		dev_warn(&pdev->dev,
-			 "Only %d MSI-X vectors.  Limiting the number of queues to that number.\n",
-			 rc);
-		msix_entries = rc;
-
-		rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
-		if (rc)
-			goto err1;
-	}
-
-	for (i = 0; i < msix_entries; i++) {
-		msix = &ndev->msix_entries[i];
-		WARN_ON(!msix->vector);
-
-		/* Use the last MSI-X vector for Link status */
-		if (ndev->hw_type == BWD_HW) {
-			rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
-					 "ntb-callback-msix", &ndev->db_cb[i]);
-			if (rc)
-				goto err2;
-		} else {
-			if (i == msix_entries - 1) {
-				rc = request_irq(msix->vector,
-						 xeon_event_msix_irq, 0,
-						 "ntb-event-msix", ndev);
-				if (rc)
-					goto err2;
-			} else {
-				rc = request_irq(msix->vector,
-						 xeon_callback_msix_irq, 0,
-						 "ntb-callback-msix",
-						 &ndev->db_cb[i]);
-				if (rc)
-					goto err2;
-			}
-		}
-	}
-
-	ndev->num_msix = msix_entries;
 	if (ndev->hw_type == BWD_HW)
-		ndev->max_cbs = msix_entries;
+		rc = ntb_setup_bwd_msix(ndev, msix_entries);
 	else
-		ndev->max_cbs = msix_entries - 1;
+		rc = ntb_setup_snb_msix(ndev, msix_entries);
+	if (rc)
+		goto err1;
 
 	return 0;
 
-err2:
-	while (--i >= 0) {
-		msix = &ndev->msix_entries[i];
-		if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
-			free_irq(msix->vector, ndev);
-		else
-			free_irq(msix->vector, &ndev->db_cb[i]);
-	}
-	pci_disable_msix(pdev);
 err1:
 	kfree(ndev->msix_entries);
-	dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
 err:
-	ndev->num_msix = 0;
+	dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
 	return rc;
 }
 
@@ -1281,6 +1302,7 @@
 				free_irq(msix->vector, &ndev->db_cb[i]);
 		}
 		pci_disable_msix(pdev);
+		kfree(ndev->msix_entries);
 	} else {
 		free_irq(pdev->irq, ndev);
 
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
index bbdb7ed..465517b 100644
--- a/drivers/ntb/ntb_hw.h
+++ b/drivers/ntb/ntb_hw.h
@@ -45,6 +45,7 @@
  * Contact Information:
  * Jon Mason <jon.mason@intel.com>
  */
+#include <linux/ntb.h>
 
 #define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF		0x3725
 #define PCI_DEVICE_ID_INTEL_NTB_PS_JSF		0x3726
@@ -60,8 +61,6 @@
 #define PCI_DEVICE_ID_INTEL_NTB_SS_HSX		0x2F0F
 #define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD		0x0C4E
 
-#define msix_table_size(control)	((control & PCI_MSIX_FLAGS_QSIZE)+1)
-
 #ifndef readq
 static inline u64 readq(void __iomem *addr)
 {
@@ -83,9 +82,6 @@
 #define NTB_BAR_MASK		((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
 				 (1 << NTB_BAR_45))
 
-#define NTB_LINK_DOWN		0
-#define NTB_LINK_UP		1
-
 #define NTB_HB_TIMEOUT		msecs_to_jiffies(1000)
 
 #define NTB_MAX_NUM_MW		2
@@ -233,7 +229,7 @@
 							   int db_num));
 void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
 int ntb_register_event_callback(struct ntb_device *ndev,
-				void (*event_cb_func) (void *handle,
+				void (*event_cb_func)(void *handle,
 						      enum ntb_hw_event event));
 void ntb_unregister_event_callback(struct ntb_device *ndev);
 int ntb_get_max_spads(struct ntb_device *ndev);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 3217f39..9dd63b8 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -56,7 +56,6 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-#include <linux/ntb.h>
 #include "ntb_hw.h"
 
 #define NTB_TRANSPORT_VERSION	3
@@ -107,8 +106,8 @@
 	struct ntb_rx_info __iomem *rx_info;
 	struct ntb_rx_info *remote_rx_info;
 
-	void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
-			    void *data, int len);
+	void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+			   void *data, int len);
 	struct list_head tx_free_q;
 	spinlock_t ntb_tx_free_q_lock;
 	void __iomem *tx_mw;
@@ -117,8 +116,8 @@
 	unsigned int tx_max_entry;
 	unsigned int tx_max_frame;
 
-	void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
-			    void *data, int len);
+	void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+			   void *data, int len);
 	struct list_head rx_pend_q;
 	struct list_head rx_free_q;
 	spinlock_t ntb_rx_pend_q_lock;
@@ -129,7 +128,7 @@
 	unsigned int rx_max_frame;
 	dma_cookie_t last_cookie;
 
-	void (*event_handler) (void *data, int status);
+	void (*event_handler)(void *data, int status);
 	struct delayed_work link_work;
 	struct work_struct link_cleanup;
 
@@ -480,7 +479,7 @@
 }
 
 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
-						struct list_head *list)
+					   struct list_head *list)
 {
 	struct ntb_queue_entry *entry;
 	unsigned long flags;
@@ -839,7 +838,7 @@
 }
 
 static int ntb_transport_init_queue(struct ntb_transport *nt,
-				     unsigned int qp_num)
+				    unsigned int qp_num)
 {
 	struct ntb_transport_qp *qp;
 	unsigned int num_qps_mw, tx_size;
@@ -1055,7 +1054,7 @@
 	if (!chan)
 		goto err;
 
-	if (len < copy_bytes) 
+	if (len < copy_bytes)
 		goto err_wait;
 
 	device = chan->device;
@@ -1190,8 +1189,7 @@
 	return 0;
 
 err:
-	ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
-		     &qp->rx_pend_q);
+	ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
 	/* Ensure that the data is fully copied out before clearing the flag */
 	wmb();
 	hdr->flags = 0;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 5f67843..27df2c5 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -53,6 +53,18 @@
 	  If you have an Acer Aspire One netbook, say Y or M
 	  here.
 
+config ALIENWARE_WMI
+	tristate "Alienware Special feature control"
+	depends on ACPI
+	depends on LEDS_CLASS
+	depends on NEW_LEDS
+	depends on ACPI_WMI
+	---help---
+	 This is a driver for controlling Alienware BIOS driven
+	 features.  It exposes an interface for controlling the AlienFX
+	 zones on Alienware machines that don't contain a dedicated AlienFX
+	 USB MCU such as the X51 and X51-R2.
+
 config ASUS_LAPTOP
 	tristate "Asus Laptop Extras"
 	depends on ACPI
@@ -196,7 +208,7 @@
 	  be called hp_accel.
 
 config HP_WIRELESS
-	tristate "HP WIRELESS"
+	tristate "HP wireless button"
 	depends on ACPI
 	depends on INPUT
 	help
@@ -817,12 +829,4 @@
 	  a paravirtualized device provided by QEMU; it lets a virtual machine
 	  (guest) communicate panic events to the host.
 
-config INTEL_BAYTRAIL_MBI
-	tristate
-	depends on PCI
-	---help---
-	  Needed on Baytrail platforms for access to the IOSF Sideband Mailbox
-	  Interface. This is a requirement for systems that need to configure
-	  the PUNIT for power management features such as RAPL.
-
 endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 9b87cfc..1a2eafc 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -55,4 +55,4 @@
 obj-$(CONFIG_INTEL_SMARTCONNECT)	+= intel-smartconnect.o
 
 obj-$(CONFIG_PVPANIC)           += pvpanic.o
-obj-$(CONFIG_INTEL_BAYTRAIL_MBI)	+= intel_baytrail.o
+obj-$(CONFIG_ALIENWARE_WMI)	+= alienware-wmi.o
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
new file mode 100644
index 0000000..541f951
--- /dev/null
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -0,0 +1,565 @@
+/*
+ * Alienware AlienFX control
+ *
+ * Copyright (C) 2014 Dell Inc <mario_limonciello@dell.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/leds.h>
+
+#define LEGACY_CONTROL_GUID		"A90597CE-A997-11DA-B012-B622A1EF5492"
+#define LEGACY_POWER_CONTROL_GUID	"A80593CE-A997-11DA-B012-B622A1EF5492"
+#define WMAX_CONTROL_GUID		"A70591CE-A997-11DA-B012-B622A1EF5492"
+
+#define WMAX_METHOD_HDMI_SOURCE		0x1
+#define WMAX_METHOD_HDMI_STATUS		0x2
+#define WMAX_METHOD_BRIGHTNESS		0x3
+#define WMAX_METHOD_ZONE_CONTROL	0x4
+
+MODULE_AUTHOR("Mario Limonciello <mario_limonciello@dell.com>");
+MODULE_DESCRIPTION("Alienware special feature control");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("wmi:" LEGACY_CONTROL_GUID);
+MODULE_ALIAS("wmi:" WMAX_CONTROL_GUID);
+
+enum INTERFACE_FLAGS {
+	LEGACY,
+	WMAX,
+};
+
+enum LEGACY_CONTROL_STATES {
+	LEGACY_RUNNING = 1,
+	LEGACY_BOOTING = 0,
+	LEGACY_SUSPEND = 3,
+};
+
+enum WMAX_CONTROL_STATES {
+	WMAX_RUNNING = 0xFF,
+	WMAX_BOOTING = 0,
+	WMAX_SUSPEND = 3,
+};
+
+struct quirk_entry {
+	u8 num_zones;
+};
+
+static struct quirk_entry *quirks;
+
+static struct quirk_entry quirk_unknown = {
+	.num_zones = 2,
+};
+
+static struct quirk_entry quirk_x51_family = {
+	.num_zones = 3,
+};
+
+static int dmi_matched(const struct dmi_system_id *dmi)
+{
+	quirks = dmi->driver_data;
+	return 1;
+}
+
+static struct dmi_system_id alienware_quirks[] = {
+	{
+	 .callback = dmi_matched,
+	 .ident = "Alienware X51 R1",
+	 .matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
+		     },
+	 .driver_data = &quirk_x51_family,
+	 },
+	{
+	 .callback = dmi_matched,
+	 .ident = "Alienware X51 R2",
+	 .matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R2"),
+		     },
+	 .driver_data = &quirk_x51_family,
+	 },
+	{}
+};
+
+struct color_platform {
+	u8 blue;
+	u8 green;
+	u8 red;
+} __packed;
+
+struct platform_zone {
+	u8 location;
+	struct device_attribute *attr;
+	struct color_platform colors;
+};
+
+struct wmax_brightness_args {
+	u32 led_mask;
+	u32 percentage;
+};
+
+struct hdmi_args {
+	u8 arg;
+};
+
+struct legacy_led_args {
+	struct color_platform colors;
+	u8 brightness;
+	u8 state;
+} __packed;
+
+struct wmax_led_args {
+	u32 led_mask;
+	struct color_platform colors;
+	u8 state;
+} __packed;
+
+static struct platform_device *platform_device;
+static struct device_attribute *zone_dev_attrs;
+static struct attribute **zone_attrs;
+static struct platform_zone *zone_data;
+
+static struct platform_driver platform_driver = {
+	.driver = {
+		   .name = "alienware-wmi",
+		   .owner = THIS_MODULE,
+		   }
+};
+
+static struct attribute_group zone_attribute_group = {
+	.name = "rgb_zones",
+};
+
+static u8 interface;
+static u8 lighting_control_state;
+static u8 global_brightness;
+
+/*
+ * Helpers used for zone control
+*/
+static int parse_rgb(const char *buf, struct platform_zone *zone)
+{
+	long unsigned int rgb;
+	int ret;
+	union color_union {
+		struct color_platform cp;
+		int package;
+	} repackager;
+
+	ret = kstrtoul(buf, 16, &rgb);
+	if (ret)
+		return ret;
+
+	/* RGB triplet notation is 24-bit hexadecimal */
+	if (rgb > 0xFFFFFF)
+		return -EINVAL;
+
+	repackager.package = rgb & 0x0f0f0f0f;
+	pr_debug("alienware-wmi: r: %d g:%d b: %d\n",
+		 repackager.cp.red, repackager.cp.green, repackager.cp.blue);
+	zone->colors = repackager.cp;
+	return 0;
+}
+
+static struct platform_zone *match_zone(struct device_attribute *attr)
+{
+	int i;
+	for (i = 0; i < quirks->num_zones; i++) {
+		if ((struct device_attribute *)zone_data[i].attr == attr) {
+			pr_debug("alienware-wmi: matched zone location: %d\n",
+				 zone_data[i].location);
+			return &zone_data[i];
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Individual RGB zone control
+*/
+static int alienware_update_led(struct platform_zone *zone)
+{
+	int method_id;
+	acpi_status status;
+	char *guid;
+	struct acpi_buffer input;
+	struct legacy_led_args legacy_args;
+	struct wmax_led_args wmax_args;
+	if (interface == WMAX) {
+		wmax_args.led_mask = 1 << zone->location;
+		wmax_args.colors = zone->colors;
+		wmax_args.state = lighting_control_state;
+		guid = WMAX_CONTROL_GUID;
+		method_id = WMAX_METHOD_ZONE_CONTROL;
+
+		input.length = (acpi_size) sizeof(wmax_args);
+		input.pointer = &wmax_args;
+	} else {
+		legacy_args.colors = zone->colors;
+		legacy_args.brightness = global_brightness;
+		legacy_args.state = 0;
+		if (lighting_control_state == LEGACY_BOOTING ||
+		    lighting_control_state == LEGACY_SUSPEND) {
+			guid = LEGACY_POWER_CONTROL_GUID;
+			legacy_args.state = lighting_control_state;
+		} else
+			guid = LEGACY_CONTROL_GUID;
+		method_id = zone->location + 1;
+
+		input.length = (acpi_size) sizeof(legacy_args);
+		input.pointer = &legacy_args;
+	}
+	pr_debug("alienware-wmi: guid %s method %d\n", guid, method_id);
+
+	status = wmi_evaluate_method(guid, 1, method_id, &input, NULL);
+	if (ACPI_FAILURE(status))
+		pr_err("alienware-wmi: zone set failure: %u\n", status);
+	return ACPI_FAILURE(status);
+}
+
+static ssize_t zone_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct platform_zone *target_zone;
+	target_zone = match_zone(attr);
+	if (target_zone == NULL)
+		return sprintf(buf, "red: -1, green: -1, blue: -1\n");
+	return sprintf(buf, "red: %d, green: %d, blue: %d\n",
+		       target_zone->colors.red,
+		       target_zone->colors.green, target_zone->colors.blue);
+
+}
+
+static ssize_t zone_set(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct platform_zone *target_zone;
+	int ret;
+	target_zone = match_zone(attr);
+	if (target_zone == NULL) {
+		pr_err("alienware-wmi: invalid target zone\n");
+		return 1;
+	}
+	ret = parse_rgb(buf, target_zone);
+	if (ret)
+		return ret;
+	ret = alienware_update_led(target_zone);
+	return ret ? ret : count;
+}
+
+/*
+ * LED Brightness (Global)
+*/
+static int wmax_brightness(int brightness)
+{
+	acpi_status status;
+	struct acpi_buffer input;
+	struct wmax_brightness_args args = {
+		.led_mask = 0xFF,
+		.percentage = brightness,
+	};
+	input.length = (acpi_size) sizeof(args);
+	input.pointer = &args;
+	status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+				     WMAX_METHOD_BRIGHTNESS, &input, NULL);
+	if (ACPI_FAILURE(status))
+		pr_err("alienware-wmi: brightness set failure: %u\n", status);
+	return ACPI_FAILURE(status);
+}
+
+static void global_led_set(struct led_classdev *led_cdev,
+			   enum led_brightness brightness)
+{
+	int ret;
+	global_brightness = brightness;
+	if (interface == WMAX)
+		ret = wmax_brightness(brightness);
+	else
+		ret = alienware_update_led(&zone_data[0]);
+	if (ret)
+		pr_err("LED brightness update failed\n");
+}
+
+static enum led_brightness global_led_get(struct led_classdev *led_cdev)
+{
+	return global_brightness;
+}
+
+static struct led_classdev global_led = {
+	.brightness_set = global_led_set,
+	.brightness_get = global_led_get,
+	.name = "alienware::global_brightness",
+};
+
+/*
+ * Lighting control state device attribute (Global)
+*/
+static ssize_t show_control_state(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	if (lighting_control_state == LEGACY_BOOTING)
+		return scnprintf(buf, PAGE_SIZE, "[booting] running suspend\n");
+	else if (lighting_control_state == LEGACY_SUSPEND)
+		return scnprintf(buf, PAGE_SIZE, "booting running [suspend]\n");
+	return scnprintf(buf, PAGE_SIZE, "booting [running] suspend\n");
+}
+
+static ssize_t store_control_state(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	long unsigned int val;
+	if (strcmp(buf, "booting\n") == 0)
+		val = LEGACY_BOOTING;
+	else if (strcmp(buf, "suspend\n") == 0)
+		val = LEGACY_SUSPEND;
+	else if (interface == LEGACY)
+		val = LEGACY_RUNNING;
+	else
+		val = WMAX_RUNNING;
+	lighting_control_state = val;
+	pr_debug("alienware-wmi: updated control state to %d\n",
+		 lighting_control_state);
+	return count;
+}
+
+static DEVICE_ATTR(lighting_control_state, 0644, show_control_state,
+		   store_control_state);
+
+static int alienware_zone_init(struct platform_device *dev)
+{
+	int i;
+	char buffer[10];
+	char *name;
+
+	if (interface == WMAX) {
+		global_led.max_brightness = 100;
+		lighting_control_state = WMAX_RUNNING;
+	} else if (interface == LEGACY) {
+		global_led.max_brightness = 0x0F;
+		lighting_control_state = LEGACY_RUNNING;
+	}
+	global_brightness = global_led.max_brightness;
+
+	/*
+	 *      - zone_dev_attrs num_zones + 1 is for individual zones and then
+	 *        null terminated
+	 *      - zone_attrs num_zones + 2 is for all attrs in zone_dev_attrs +
+	 *        the lighting control + null terminated
+	 *      - zone_data num_zones is for the distinct zones
+	 */
+	zone_dev_attrs =
+	    kzalloc(sizeof(struct device_attribute) * (quirks->num_zones + 1),
+		    GFP_KERNEL);
+	if (!zone_dev_attrs)
+		return -ENOMEM;
+
+	zone_attrs =
+	    kzalloc(sizeof(struct attribute *) * (quirks->num_zones + 2),
+		    GFP_KERNEL);
+	if (!zone_attrs)
+		return -ENOMEM;
+
+	zone_data =
+	    kzalloc(sizeof(struct platform_zone) * (quirks->num_zones),
+		    GFP_KERNEL);
+	if (!zone_data)
+		return -ENOMEM;
+
+	for (i = 0; i < quirks->num_zones; i++) {
+		sprintf(buffer, "zone%02X", i);
+		name = kstrdup(buffer, GFP_KERNEL);
+		if (name == NULL)
+			return 1;
+		sysfs_attr_init(&zone_dev_attrs[i].attr);
+		zone_dev_attrs[i].attr.name = name;
+		zone_dev_attrs[i].attr.mode = 0644;
+		zone_dev_attrs[i].show = zone_show;
+		zone_dev_attrs[i].store = zone_set;
+		zone_data[i].location = i;
+		zone_attrs[i] = &zone_dev_attrs[i].attr;
+		zone_data[i].attr = &zone_dev_attrs[i];
+	}
+	zone_attrs[quirks->num_zones] = &dev_attr_lighting_control_state.attr;
+	zone_attribute_group.attrs = zone_attrs;
+
+	led_classdev_register(&dev->dev, &global_led);
+
+	return sysfs_create_group(&dev->dev.kobj, &zone_attribute_group);
+}
+
+static void alienware_zone_exit(struct platform_device *dev)
+{
+	sysfs_remove_group(&dev->dev.kobj, &zone_attribute_group);
+	led_classdev_unregister(&global_led);
+	if (zone_dev_attrs) {
+		int i;
+		for (i = 0; i < quirks->num_zones; i++)
+			kfree(zone_dev_attrs[i].attr.name);
+	}
+	kfree(zone_dev_attrs);
+	kfree(zone_data);
+	kfree(zone_attrs);
+}
+
+/*
+	The HDMI mux sysfs node indicates the status of the HDMI input mux.
+	It can toggle between standard system GPU output and HDMI input.
+*/
+static ssize_t show_hdmi(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	acpi_status status;
+	struct acpi_buffer input;
+	union acpi_object *obj;
+	u32 tmp = 0;
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct hdmi_args in_args = {
+		.arg = 0,
+	};
+	input.length = (acpi_size) sizeof(in_args);
+	input.pointer = &in_args;
+	status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+				     WMAX_METHOD_HDMI_STATUS, &input, &output);
+
+	if (ACPI_SUCCESS(status)) {
+		obj = (union acpi_object *)output.pointer;
+		if (obj && obj->type == ACPI_TYPE_INTEGER)
+			tmp = (u32) obj->integer.value;
+		if (tmp == 1)
+			return scnprintf(buf, PAGE_SIZE,
+					 "[input] gpu unknown\n");
+		else if (tmp == 2)
+			return scnprintf(buf, PAGE_SIZE,
+					 "input [gpu] unknown\n");
+	}
+	pr_err("alienware-wmi: unknown HDMI status: %d\n", status);
+	return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n");
+}
+
+static ssize_t toggle_hdmi(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct acpi_buffer input;
+	acpi_status status;
+	struct hdmi_args args;
+	if (strcmp(buf, "gpu\n") == 0)
+		args.arg = 1;
+	else if (strcmp(buf, "input\n") == 0)
+		args.arg = 2;
+	else
+		args.arg = 3;
+	pr_debug("alienware-wmi: setting hdmi to %d : %s", args.arg, buf);
+	input.length = (acpi_size) sizeof(args);
+	input.pointer = &args;
+	status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+				     WMAX_METHOD_HDMI_SOURCE, &input, NULL);
+	if (ACPI_FAILURE(status))
+		pr_err("alienware-wmi: HDMI toggle failed: results: %u\n",
+		       status);
+	return count;
+}
+
+static DEVICE_ATTR(hdmi, S_IRUGO | S_IWUSR, show_hdmi, toggle_hdmi);
+
+static void remove_hdmi(struct platform_device *device)
+{
+	device_remove_file(&device->dev, &dev_attr_hdmi);
+}
+
+static int create_hdmi(void)
+{
+	int ret = -ENOMEM;
+	ret = device_create_file(&platform_device->dev, &dev_attr_hdmi);
+	if (ret)
+		goto error_create_hdmi;
+	return 0;
+
+error_create_hdmi:
+	remove_hdmi(platform_device);
+	return ret;
+}
+
+static int __init alienware_wmi_init(void)
+{
+	int ret;
+
+	if (wmi_has_guid(LEGACY_CONTROL_GUID))
+		interface = LEGACY;
+	else if (wmi_has_guid(WMAX_CONTROL_GUID))
+		interface = WMAX;
+	else {
+		pr_warn("alienware-wmi: No known WMI GUID found\n");
+		return -ENODEV;
+	}
+
+	dmi_check_system(alienware_quirks);
+	if (quirks == NULL)
+		quirks = &quirk_unknown;
+
+	ret = platform_driver_register(&platform_driver);
+	if (ret)
+		goto fail_platform_driver;
+	platform_device = platform_device_alloc("alienware-wmi", -1);
+	if (!platform_device) {
+		ret = -ENOMEM;
+		goto fail_platform_device1;
+	}
+	ret = platform_device_add(platform_device);
+	if (ret)
+		goto fail_platform_device2;
+
+	if (interface == WMAX) {
+		ret = create_hdmi();
+		if (ret)
+			goto fail_prep_hdmi;
+	}
+
+	ret = alienware_zone_init(platform_device);
+	if (ret)
+		goto fail_prep_zones;
+
+	return 0;
+
+fail_prep_zones:
+	alienware_zone_exit(platform_device);
+fail_prep_hdmi:
+	platform_device_del(platform_device);
+fail_platform_device2:
+	platform_device_put(platform_device);
+fail_platform_device1:
+	platform_driver_unregister(&platform_driver);
+fail_platform_driver:
+	return ret;
+}
+
+module_init(alienware_wmi_init);
+
+static void __exit alienware_wmi_exit(void)
+{
+	if (platform_device) {
+		alienware_zone_exit(platform_device);
+		remove_hdmi(platform_device);
+		platform_device_unregister(platform_device);
+		platform_driver_unregister(&platform_driver);
+	}
+}
+
+module_exit(alienware_wmi_exit);
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index 570926c..c3784ba 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -71,6 +71,44 @@
 	KEY_LEFTALT
 };
 
+static unsigned short keymap_Lifebook_T901[KEYMAP_LEN] __initdata = {
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_SCROLLDOWN,
+	KEY_SCROLLUP,
+	KEY_CYCLEWINDOWS,
+	KEY_LEFTCTRL,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_LEFTMETA
+};
+
+static unsigned short keymap_Lifebook_T902[KEYMAP_LEN] __initdata = {
+	KEY_RESERVED,
+	KEY_VOLUMEDOWN,
+	KEY_VOLUMEUP,
+	KEY_CYCLEWINDOWS,
+	KEY_PROG1,
+	KEY_PROG2,
+	KEY_LEFTMETA,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+};
+
 static unsigned short keymap_Lifebook_U810[KEYMAP_LEN] __initdata = {
 	KEY_RESERVED,
 	KEY_RESERVED,
@@ -302,6 +340,33 @@
 static const struct dmi_system_id dmi_ids[] __initconst = {
 	{
 		.callback = fujitsu_dmi_lifebook,
+		.ident = "Fujitsu Lifebook T901",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook T901")
+		},
+		.driver_data = keymap_Lifebook_T901
+	},
+	{
+		.callback = fujitsu_dmi_lifebook,
+		.ident = "Fujitsu Lifebook T901",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T901")
+		},
+		.driver_data = keymap_Lifebook_T901
+	},
+	{
+		.callback = fujitsu_dmi_lifebook,
+		.ident = "Fujitsu Lifebook T902",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T902")
+		},
+		.driver_data = keymap_Lifebook_T902
+	},
+	{
+		.callback = fujitsu_dmi_lifebook,
 		.ident = "Fujitsu Siemens P/T Series",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/platform/x86/intel_baytrail.c b/drivers/platform/x86/intel_baytrail.c
deleted file mode 100644
index f96626b..0000000
--- a/drivers/platform/x86/intel_baytrail.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Baytrail IOSF-SB MailBox Interface Driver
- * Copyright (c) 2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- *
- * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
- * mailbox interface (MBI) to communicate with mutiple devices. This
- * driver implements BayTrail-specific access to this interface.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-
-#include "intel_baytrail.h"
-
-static DEFINE_SPINLOCK(iosf_mbi_lock);
-
-static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
-{
-	return (op << 24) | (port << 16) | (offset << 8) | BT_MBI_ENABLE;
-}
-
-static struct pci_dev *mbi_pdev;	/* one mbi device */
-
-/* Hold lock before calling */
-static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
-{
-	int result;
-
-	if (!mbi_pdev)
-		return -ENODEV;
-
-	if (mcrx) {
-		result = pci_write_config_dword(mbi_pdev,
-						BT_MBI_MCRX_OFFSET, mcrx);
-		if (result < 0)
-			goto iosf_mbi_read_err;
-	}
-
-	result = pci_write_config_dword(mbi_pdev,
-					BT_MBI_MCR_OFFSET, mcr);
-	if (result < 0)
-		goto iosf_mbi_read_err;
-
-	result = pci_read_config_dword(mbi_pdev,
-				       BT_MBI_MDR_OFFSET, mdr);
-	if (result < 0)
-		goto iosf_mbi_read_err;
-
-	return 0;
-
-iosf_mbi_read_err:
-	dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
-		result);
-	return result;
-}
-
-/* Hold lock before calling */
-static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
-{
-	int result;
-
-	if (!mbi_pdev)
-		return -ENODEV;
-
-	result = pci_write_config_dword(mbi_pdev,
-					BT_MBI_MDR_OFFSET, mdr);
-	if (result < 0)
-		goto iosf_mbi_write_err;
-
-	if (mcrx) {
-		result = pci_write_config_dword(mbi_pdev,
-			 BT_MBI_MCRX_OFFSET, mcrx);
-		if (result < 0)
-			goto iosf_mbi_write_err;
-	}
-
-	result = pci_write_config_dword(mbi_pdev,
-					BT_MBI_MCR_OFFSET, mcr);
-	if (result < 0)
-		goto iosf_mbi_write_err;
-
-	return 0;
-
-iosf_mbi_write_err:
-	dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
-		result);
-	return result;
-}
-
-int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
-{
-	u32 mcr, mcrx;
-	unsigned long flags;
-	int ret;
-
-	/*Access to the GFX unit is handled by GPU code */
-	BUG_ON(port == BT_MBI_UNIT_GFX);
-
-	mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
-	mcrx = offset & BT_MBI_MASK_HI;
-
-	spin_lock_irqsave(&iosf_mbi_lock, flags);
-	ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
-	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(bt_mbi_read);
-
-int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
-{
-	u32 mcr, mcrx;
-	unsigned long flags;
-	int ret;
-
-	/*Access to the GFX unit is handled by GPU code */
-	BUG_ON(port == BT_MBI_UNIT_GFX);
-
-	mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
-	mcrx = offset & BT_MBI_MASK_HI;
-
-	spin_lock_irqsave(&iosf_mbi_lock, flags);
-	ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
-	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(bt_mbi_write);
-
-int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
-{
-	u32 mcr, mcrx;
-	u32 value;
-	unsigned long flags;
-	int ret;
-
-	/*Access to the GFX unit is handled by GPU code */
-	BUG_ON(port == BT_MBI_UNIT_GFX);
-
-	mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
-	mcrx = offset & BT_MBI_MASK_HI;
-
-	spin_lock_irqsave(&iosf_mbi_lock, flags);
-
-	/* Read current mdr value */
-	ret = iosf_mbi_pci_read_mdr(mcrx, mcr & BT_MBI_RD_MASK, &value);
-	if (ret < 0) {
-		spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-		return ret;
-	}
-
-	/* Apply mask */
-	value &= ~mask;
-	mdr &= mask;
-	value |= mdr;
-
-	/* Write back */
-	ret = iosf_mbi_pci_write_mdr(mcrx, mcr | BT_MBI_WR_MASK, value);
-
-	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(bt_mbi_modify);
-
-static int iosf_mbi_probe(struct pci_dev *pdev,
-			  const struct pci_device_id *unused)
-{
-	int ret;
-
-	ret = pci_enable_device(pdev);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "error: could not enable device\n");
-		return ret;
-	}
-
-	mbi_pdev = pci_dev_get(pdev);
-	return 0;
-}
-
-static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) },
-	{ 0, },
-};
-MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
-
-static struct pci_driver iosf_mbi_pci_driver = {
-	.name		= "iosf_mbi_pci",
-	.probe		= iosf_mbi_probe,
-	.id_table	= iosf_mbi_pci_ids,
-};
-
-static int __init bt_mbi_init(void)
-{
-	return pci_register_driver(&iosf_mbi_pci_driver);
-}
-
-static void __exit bt_mbi_exit(void)
-{
-	pci_unregister_driver(&iosf_mbi_pci_driver);
-	if (mbi_pdev) {
-		pci_dev_put(mbi_pdev);
-		mbi_pdev = NULL;
-	}
-}
-
-module_init(bt_mbi_init);
-module_exit(bt_mbi_exit);
-
-MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
-MODULE_DESCRIPTION("BayTrail Mailbox Interface accessor");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_baytrail.h b/drivers/platform/x86/intel_baytrail.h
deleted file mode 100644
index 8bcc311..0000000
--- a/drivers/platform/x86/intel_baytrail.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * intel_baytrail.h: MailBox access support for Intel BayTrail platforms
- */
-
-#ifndef INTEL_BAYTRAIL_MBI_SYMS_H
-#define INTEL_BAYTRAIL_MBI_SYMS_H
-
-#define BT_MBI_MCR_OFFSET	0xD0
-#define BT_MBI_MDR_OFFSET	0xD4
-#define BT_MBI_MCRX_OFFSET	0xD8
-
-#define BT_MBI_RD_MASK		0xFEFFFFFF
-#define BT_MBI_WR_MASK		0X01000000
-
-#define BT_MBI_MASK_HI		0xFFFFFF00
-#define BT_MBI_MASK_LO		0x000000FF
-#define BT_MBI_ENABLE		0xF0
-
-/* BT-SB unit access methods */
-#define BT_MBI_UNIT_AUNIT	0x00
-#define BT_MBI_UNIT_SMC		0x01
-#define BT_MBI_UNIT_CPU		0x02
-#define BT_MBI_UNIT_BUNIT	0x03
-#define BT_MBI_UNIT_PMC		0x04
-#define BT_MBI_UNIT_GFX		0x06
-#define BT_MBI_UNIT_SMI		0x0C
-#define BT_MBI_UNIT_USB		0x43
-#define BT_MBI_UNIT_SATA	0xA3
-#define BT_MBI_UNIT_PCIE	0xA6
-
-/* Read/write opcodes */
-#define BT_MBI_AUNIT_READ	0x10
-#define BT_MBI_AUNIT_WRITE	0x11
-#define BT_MBI_SMC_READ		0x10
-#define BT_MBI_SMC_WRITE	0x11
-#define BT_MBI_CPU_READ		0x10
-#define BT_MBI_CPU_WRITE	0x11
-#define BT_MBI_BUNIT_READ	0x10
-#define BT_MBI_BUNIT_WRITE	0x11
-#define BT_MBI_PMC_READ		0x06
-#define BT_MBI_PMC_WRITE	0x07
-#define BT_MBI_GFX_READ		0x00
-#define BT_MBI_GFX_WRITE	0x01
-#define BT_MBI_SMIO_READ	0x06
-#define BT_MBI_SMIO_WRITE	0x07
-#define BT_MBI_USB_READ		0x06
-#define BT_MBI_USB_WRITE	0x07
-#define BT_MBI_SATA_READ	0x00
-#define BT_MBI_SATA_WRITE	0x01
-#define BT_MBI_PCIE_READ	0x00
-#define BT_MBI_PCIE_WRITE	0x01
-
-/**
- * bt_mbi_read() - MailBox Interface read command
- * @port:	port indicating subunit being accessed
- * @opcode:	port specific read or write opcode
- * @offset:	register address offset
- * @mdr:	register data to be read
- *
- * Locking is handled by spinlock - cannot sleep.
- * Return: Nonzero on error
- */
-int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr);
-
-/**
- * bt_mbi_write() - MailBox unmasked write command
- * @port:	port indicating subunit being accessed
- * @opcode:	port specific read or write opcode
- * @offset:	register address offset
- * @mdr:	register data to be written
- *
- * Locking is handled by spinlock - cannot sleep.
- * Return: Nonzero on error
- */
-int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
-
-/**
- * bt_mbi_modify() - MailBox masked write command
- * @port:	port indicating subunit being accessed
- * @opcode:	port specific read or write opcode
- * @offset:	register address offset
- * @mdr:	register data being modified
- * @mask:	mask indicating bits in mdr to be modified
- *
- * Locking is handled by spinlock - cannot sleep.
- * Return: Nonzero on error
- */
-int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
-
-#endif /* INTEL_BAYTRAIL_MBI_SYMS_H */
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 609d387..3f87097 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -449,6 +449,7 @@
 
 /* hotkey input device driver */
 
+static int sleep_keydown_seen;
 static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
 {
 	struct input_dev *hotk_input_dev = pcc->input_dev;
@@ -462,6 +463,16 @@
 				 "error getting hotkey status\n"));
 		return;
 	}
+
+	/* hack: some firmware sends no key down for sleep / hibernate */
+	if ((result & 0xf) == 0x7 || (result & 0xf) == 0xa) {
+		if (result & 0x80)
+			sleep_keydown_seen = 1;
+		if (!sleep_keydown_seen)
+			sparse_keymap_report_event(hotk_input_dev,
+					result & 0xf, 0x80, false);
+	}
+
 	if (!sparse_keymap_report_event(hotk_input_dev,
 					result & 0xf, result & 0x80, false))
 		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 8f8551a..9c5a074 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -76,8 +76,6 @@
 		pr_warn(fmt, ##__VA_ARGS__);	\
 } while (0)
 
-#define SONY_LAPTOP_DRIVER_VERSION	"0.6"
-
 #define SONY_NC_CLASS		"sony-nc"
 #define SONY_NC_HID		"SNY5001"
 #define SONY_NC_DRIVER_NAME	"Sony Notebook Control Driver"
@@ -89,7 +87,6 @@
 MODULE_AUTHOR("Stelian Pop, Mattia Dongili");
 MODULE_DESCRIPTION("Sony laptop extras driver (SPIC and SNC ACPI device)");
 MODULE_LICENSE("GPL");
-MODULE_VERSION(SONY_LAPTOP_DRIVER_VERSION);
 
 static int debug;
 module_param(debug, int, 0);
@@ -129,7 +126,8 @@
 module_param(kbd_backlight, int, 0444);
 MODULE_PARM_DESC(kbd_backlight,
 		 "set this to 0 to disable keyboard backlight, "
-		 "1 to enable it (default: no change from current value)");
+		 "1 to enable it with automatic control and 2 to have it always "
+		 "on (default: no change from current value)");
 
 static int kbd_backlight_timeout = -1;
 module_param(kbd_backlight_timeout, int, 0444);
@@ -152,7 +150,8 @@
 static int sony_nc_thermal_setup(struct platform_device *pd);
 static void sony_nc_thermal_cleanup(struct platform_device *pd);
 
-static int sony_nc_lid_resume_setup(struct platform_device *pd);
+static int sony_nc_lid_resume_setup(struct platform_device *pd,
+				    unsigned int handle);
 static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
 
 static int sony_nc_gfx_switch_setup(struct platform_device *pd,
@@ -163,6 +162,21 @@
 static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
 static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
 
+static int sony_nc_lowbatt_setup(struct platform_device *pd);
+static void sony_nc_lowbatt_cleanup(struct platform_device *pd);
+
+static int sony_nc_fanspeed_setup(struct platform_device *pd);
+static void sony_nc_fanspeed_cleanup(struct platform_device *pd);
+
+static int sony_nc_usb_charge_setup(struct platform_device *pd);
+static void sony_nc_usb_charge_cleanup(struct platform_device *pd);
+
+static int sony_nc_panelid_setup(struct platform_device *pd);
+static void sony_nc_panelid_cleanup(struct platform_device *pd);
+
+static int sony_nc_smart_conn_setup(struct platform_device *pd);
+static void sony_nc_smart_conn_cleanup(struct platform_device *pd);
+
 static int sony_nc_touchpad_setup(struct platform_device *pd,
 				  unsigned int handle);
 static void sony_nc_touchpad_cleanup(struct platform_device *pd);
@@ -1122,6 +1136,8 @@
 	{ 0x25, SONYPI_EVENT_ANYBUTTON_RELEASED },
 	{ 0xa6, SONYPI_EVENT_HELP_PRESSED },
 	{ 0x26, SONYPI_EVENT_ANYBUTTON_RELEASED },
+	{ 0xa8, SONYPI_EVENT_FNKEY_1 },
+	{ 0x28, SONYPI_EVENT_ANYBUTTON_RELEASED },
 	{ 0, 0 },
 };
 
@@ -1339,7 +1355,8 @@
 						result);
 			break;
 		case 0x0119:
-			result = sony_nc_lid_resume_setup(pf_device);
+		case 0x015D:
+			result = sony_nc_lid_resume_setup(pf_device, handle);
 			if (result)
 				pr_err("couldn't set up lid resume function (%d)\n",
 						result);
@@ -1381,6 +1398,36 @@
 				pr_err("couldn't set up keyboard backlight function (%d)\n",
 						result);
 			break;
+		case 0x0121:
+			result = sony_nc_lowbatt_setup(pf_device);
+			if (result)
+				pr_err("couldn't set up low battery function (%d)\n",
+				       result);
+			break;
+		case 0x0149:
+			result = sony_nc_fanspeed_setup(pf_device);
+			if (result)
+				pr_err("couldn't set up fan speed function (%d)\n",
+				       result);
+			break;
+		case 0x0155:
+			result = sony_nc_usb_charge_setup(pf_device);
+			if (result)
+				pr_err("couldn't set up USB charge support (%d)\n",
+						result);
+			break;
+		case 0x011D:
+			result = sony_nc_panelid_setup(pf_device);
+			if (result)
+				pr_err("couldn't set up panel ID function (%d)\n",
+				       result);
+			break;
+		case 0x0168:
+			result = sony_nc_smart_conn_setup(pf_device);
+			if (result)
+				pr_err("couldn't set up smart connect support (%d)\n",
+						result);
+			break;
 		default:
 			continue;
 		}
@@ -1420,6 +1467,7 @@
 			sony_nc_battery_care_cleanup(pd);
 			break;
 		case 0x0119:
+		case 0x015D:
 			sony_nc_lid_resume_cleanup(pd);
 			break;
 		case 0x0122:
@@ -1444,6 +1492,21 @@
 		case 0x0163:
 			sony_nc_kbd_backlight_cleanup(pd, handle);
 			break;
+		case 0x0121:
+			sony_nc_lowbatt_cleanup(pd);
+			break;
+		case 0x0149:
+			sony_nc_fanspeed_cleanup(pd);
+			break;
+		case 0x0155:
+			sony_nc_usb_charge_cleanup(pd);
+			break;
+		case 0x011D:
+			sony_nc_panelid_cleanup(pd);
+			break;
+		case 0x0168:
+			sony_nc_smart_conn_cleanup(pd);
+			break;
 		default:
 			continue;
 		}
@@ -1719,7 +1782,7 @@
 {
 	int result;
 
-	if (value > 1)
+	if (value > 2)
 		return -EINVAL;
 
 	if (sony_call_snc_handle(kbdbl_ctl->handle,
@@ -1727,8 +1790,10 @@
 		return -EIO;
 
 	/* Try to turn the light on/off immediately */
-	sony_call_snc_handle(kbdbl_ctl->handle,
-			(value << 0x10) | (kbdbl_ctl->base + 0x100), &result);
+	if (value != 1)
+		sony_call_snc_handle(kbdbl_ctl->handle,
+				(value << 0x0f) | (kbdbl_ctl->base + 0x100),
+				&result);
 
 	kbdbl_ctl->mode = value;
 
@@ -2221,9 +2286,14 @@
 #endif
 
 /* resume on LID open */
+#define LID_RESUME_S5	0
+#define LID_RESUME_S4	1
+#define LID_RESUME_S3	2
+#define LID_RESUME_MAX	3
 struct snc_lid_resume_control {
-	struct device_attribute attrs[3];
+	struct device_attribute attrs[LID_RESUME_MAX];
 	unsigned int status;
+	int handle;
 };
 static struct snc_lid_resume_control *lid_ctl;
 
@@ -2231,8 +2301,9 @@
 					struct device_attribute *attr,
 					const char *buffer, size_t count)
 {
-	unsigned int result, pos;
+	unsigned int result;
 	unsigned long value;
+	unsigned int pos = LID_RESUME_S5;
 	if (count > 31)
 		return -EINVAL;
 
@@ -2245,21 +2316,21 @@
 	 * +--------------+
 	 *   2    1    0
 	 */
-	if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
-		pos = 2;
-	else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
-		pos = 1;
-	else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
-		pos = 0;
-	else
-               return -EINVAL;
+	while (pos < LID_RESUME_MAX) {
+		if (&lid_ctl->attrs[pos].attr == &attr->attr)
+			break;
+		pos++;
+	}
+	if (pos == LID_RESUME_MAX)
+		return -EINVAL;
 
 	if (value)
 		value = lid_ctl->status | (1 << pos);
 	else
 		value = lid_ctl->status & ~(1 << pos);
 
-	if (sony_call_snc_handle(0x0119, value << 0x10 | 0x0100, &result))
+	if (sony_call_snc_handle(lid_ctl->handle, value << 0x10 | 0x0100,
+				&result))
 		return -EIO;
 
 	lid_ctl->status = value;
@@ -2268,29 +2339,27 @@
 }
 
 static ssize_t sony_nc_lid_resume_show(struct device *dev,
-				       struct device_attribute *attr, char *buffer)
+					struct device_attribute *attr,
+					char *buffer)
 {
-	unsigned int pos;
+	unsigned int pos = LID_RESUME_S5;
 
-	if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
-		pos = 2;
-	else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
-		pos = 1;
-	else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
-		pos = 0;
-	else
-		return -EINVAL;
-	       
-	return snprintf(buffer, PAGE_SIZE, "%d\n",
-			(lid_ctl->status >> pos) & 0x01);
+	while (pos < LID_RESUME_MAX) {
+		if (&lid_ctl->attrs[pos].attr == &attr->attr)
+			return snprintf(buffer, PAGE_SIZE, "%d\n",
+					(lid_ctl->status >> pos) & 0x01);
+		pos++;
+	}
+	return -EINVAL;
 }
 
-static int sony_nc_lid_resume_setup(struct platform_device *pd)
+static int sony_nc_lid_resume_setup(struct platform_device *pd,
+					unsigned int handle)
 {
 	unsigned int result;
 	int i;
 
-	if (sony_call_snc_handle(0x0119, 0x0000, &result))
+	if (sony_call_snc_handle(handle, 0x0000, &result))
 		return -EIO;
 
 	lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL);
@@ -2298,26 +2367,29 @@
 		return -ENOMEM;
 
 	lid_ctl->status = result & 0x7;
+	lid_ctl->handle = handle;
 
 	sysfs_attr_init(&lid_ctl->attrs[0].attr);
-	lid_ctl->attrs[0].attr.name = "lid_resume_S3";
-	lid_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
-	lid_ctl->attrs[0].show = sony_nc_lid_resume_show;
-	lid_ctl->attrs[0].store = sony_nc_lid_resume_store;
+	lid_ctl->attrs[LID_RESUME_S5].attr.name = "lid_resume_S5";
+	lid_ctl->attrs[LID_RESUME_S5].attr.mode = S_IRUGO | S_IWUSR;
+	lid_ctl->attrs[LID_RESUME_S5].show = sony_nc_lid_resume_show;
+	lid_ctl->attrs[LID_RESUME_S5].store = sony_nc_lid_resume_store;
 
-	sysfs_attr_init(&lid_ctl->attrs[1].attr);
-	lid_ctl->attrs[1].attr.name = "lid_resume_S4";
-	lid_ctl->attrs[1].attr.mode = S_IRUGO | S_IWUSR;
-	lid_ctl->attrs[1].show = sony_nc_lid_resume_show;
-	lid_ctl->attrs[1].store = sony_nc_lid_resume_store;
+	if (handle == 0x0119) {
+		sysfs_attr_init(&lid_ctl->attrs[1].attr);
+		lid_ctl->attrs[LID_RESUME_S4].attr.name = "lid_resume_S4";
+		lid_ctl->attrs[LID_RESUME_S4].attr.mode = S_IRUGO | S_IWUSR;
+		lid_ctl->attrs[LID_RESUME_S4].show = sony_nc_lid_resume_show;
+		lid_ctl->attrs[LID_RESUME_S4].store = sony_nc_lid_resume_store;
 
-	sysfs_attr_init(&lid_ctl->attrs[2].attr);
-	lid_ctl->attrs[2].attr.name = "lid_resume_S5";
-	lid_ctl->attrs[2].attr.mode = S_IRUGO | S_IWUSR;
-	lid_ctl->attrs[2].show = sony_nc_lid_resume_show;
-	lid_ctl->attrs[2].store = sony_nc_lid_resume_store;
-
-	for (i = 0; i < 3; i++) {
+		sysfs_attr_init(&lid_ctl->attrs[2].attr);
+		lid_ctl->attrs[LID_RESUME_S3].attr.name = "lid_resume_S3";
+		lid_ctl->attrs[LID_RESUME_S3].attr.mode = S_IRUGO | S_IWUSR;
+		lid_ctl->attrs[LID_RESUME_S3].show = sony_nc_lid_resume_show;
+		lid_ctl->attrs[LID_RESUME_S3].store = sony_nc_lid_resume_store;
+	}
+	for (i = 0; i < LID_RESUME_MAX &&
+			lid_ctl->attrs[LID_RESUME_S3].attr.name; i++) {
 		result = device_create_file(&pd->dev, &lid_ctl->attrs[i]);
 		if (result)
 			goto liderror;
@@ -2340,8 +2412,12 @@
 	int i;
 
 	if (lid_ctl) {
-		for (i = 0; i < 3; i++)
+		for (i = 0; i < LID_RESUME_MAX; i++) {
+			if (!lid_ctl->attrs[i].attr.name)
+				break;
+
 			device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+		}
 
 		kfree(lid_ctl);
 		lid_ctl = NULL;
@@ -2524,6 +2600,355 @@
 	}
 }
 
+/* low battery function */
+static struct device_attribute *lowbatt_handle;
+
+static ssize_t sony_nc_lowbatt_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buffer, size_t count)
+{
+	unsigned int result;
+	unsigned long value;
+
+	if (count > 31)
+		return -EINVAL;
+
+	if (kstrtoul(buffer, 10, &value) || value > 1)
+		return -EINVAL;
+
+	if (sony_call_snc_handle(0x0121, value << 8, &result))
+		return -EIO;
+
+	return count;
+}
+
+static ssize_t sony_nc_lowbatt_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	unsigned int result;
+
+	if (sony_call_snc_handle(0x0121, 0x0200, &result))
+		return -EIO;
+
+	return snprintf(buffer, PAGE_SIZE, "%d\n", result & 1);
+}
+
+static int sony_nc_lowbatt_setup(struct platform_device *pd)
+{
+	unsigned int result;
+
+	lowbatt_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+	if (!lowbatt_handle)
+		return -ENOMEM;
+
+	sysfs_attr_init(&lowbatt_handle->attr);
+	lowbatt_handle->attr.name = "lowbatt_hibernate";
+	lowbatt_handle->attr.mode = S_IRUGO | S_IWUSR;
+	lowbatt_handle->show = sony_nc_lowbatt_show;
+	lowbatt_handle->store = sony_nc_lowbatt_store;
+
+	result = device_create_file(&pd->dev, lowbatt_handle);
+	if (result) {
+		kfree(lowbatt_handle);
+		lowbatt_handle = NULL;
+		return result;
+	}
+
+	return 0;
+}
+
+static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
+{
+	if (lowbatt_handle) {
+		device_remove_file(&pd->dev, lowbatt_handle);
+		kfree(lowbatt_handle);
+		lowbatt_handle = NULL;
+	}
+}
+
+/* fan speed function */
+static struct device_attribute *fan_handle, *hsf_handle;
+
+static ssize_t sony_nc_hsfan_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buffer, size_t count)
+{
+	unsigned int result;
+	unsigned long value;
+
+	if (count > 31)
+		return -EINVAL;
+
+	if (kstrtoul(buffer, 10, &value) || value > 1)
+		return -EINVAL;
+
+	if (sony_call_snc_handle(0x0149, value << 0x10 | 0x0200, &result))
+		return -EIO;
+
+	return count;
+}
+
+static ssize_t sony_nc_hsfan_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	unsigned int result;
+
+	if (sony_call_snc_handle(0x0149, 0x0100, &result))
+		return -EIO;
+
+	return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+}
+
+static ssize_t sony_nc_fanspeed_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	unsigned int result;
+
+	if (sony_call_snc_handle(0x0149, 0x0300, &result))
+		return -EIO;
+
+	return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0xff);
+}
+
+static int sony_nc_fanspeed_setup(struct platform_device *pd)
+{
+	unsigned int result;
+
+	fan_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+	if (!fan_handle)
+		return -ENOMEM;
+
+	hsf_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+	if (!hsf_handle) {
+		result = -ENOMEM;
+		goto out_hsf_handle_alloc;
+	}
+
+	sysfs_attr_init(&fan_handle->attr);
+	fan_handle->attr.name = "fanspeed";
+	fan_handle->attr.mode = S_IRUGO;
+	fan_handle->show = sony_nc_fanspeed_show;
+	fan_handle->store = NULL;
+
+	sysfs_attr_init(&hsf_handle->attr);
+	hsf_handle->attr.name = "fan_forced";
+	hsf_handle->attr.mode = S_IRUGO | S_IWUSR;
+	hsf_handle->show = sony_nc_hsfan_show;
+	hsf_handle->store = sony_nc_hsfan_store;
+
+	result = device_create_file(&pd->dev, fan_handle);
+	if (result)
+		goto out_fan_handle;
+
+	result = device_create_file(&pd->dev, hsf_handle);
+	if (result)
+		goto out_hsf_handle;
+
+	return 0;
+
+out_hsf_handle:
+	device_remove_file(&pd->dev, fan_handle);
+
+out_fan_handle:
+	kfree(hsf_handle);
+	hsf_handle = NULL;
+
+out_hsf_handle_alloc:
+	kfree(fan_handle);
+	fan_handle = NULL;
+	return result;
+}
+
+static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
+{
+	if (fan_handle) {
+		device_remove_file(&pd->dev, fan_handle);
+		kfree(fan_handle);
+		fan_handle = NULL;
+	}
+	if (hsf_handle) {
+		device_remove_file(&pd->dev, hsf_handle);
+		kfree(hsf_handle);
+		hsf_handle = NULL;
+	}
+}
+
+/* USB charge function */
+static struct device_attribute *uc_handle;
+
+static ssize_t sony_nc_usb_charge_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buffer, size_t count)
+{
+	unsigned int result;
+	unsigned long value;
+
+	if (count > 31)
+		return -EINVAL;
+
+	if (kstrtoul(buffer, 10, &value) || value > 1)
+		return -EINVAL;
+
+	if (sony_call_snc_handle(0x0155, value << 0x10 | 0x0100, &result))
+		return -EIO;
+
+	return count;
+}
+
+static ssize_t sony_nc_usb_charge_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	unsigned int result;
+
+	if (sony_call_snc_handle(0x0155, 0x0000, &result))
+		return -EIO;
+
+	return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+}
+
+static int sony_nc_usb_charge_setup(struct platform_device *pd)
+{
+	unsigned int result;
+
+	if (sony_call_snc_handle(0x0155, 0x0000, &result) || !(result & 0x01)) {
+		/* some models advertise the handle but have no implementation
+		 * for it
+		 */
+		pr_info("No USB Charge capability found\n");
+		return 0;
+	}
+
+	uc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+	if (!uc_handle)
+		return -ENOMEM;
+
+	sysfs_attr_init(&uc_handle->attr);
+	uc_handle->attr.name = "usb_charge";
+	uc_handle->attr.mode = S_IRUGO | S_IWUSR;
+	uc_handle->show = sony_nc_usb_charge_show;
+	uc_handle->store = sony_nc_usb_charge_store;
+
+	result = device_create_file(&pd->dev, uc_handle);
+	if (result) {
+		kfree(uc_handle);
+		uc_handle = NULL;
+		return result;
+	}
+
+	return 0;
+}
+
+static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
+{
+	if (uc_handle) {
+		device_remove_file(&pd->dev, uc_handle);
+		kfree(uc_handle);
+		uc_handle = NULL;
+	}
+}
+
+/* Panel ID function */
+static struct device_attribute *panel_handle;
+
+static ssize_t sony_nc_panelid_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	unsigned int result;
+
+	if (sony_call_snc_handle(0x011D, 0x0000, &result))
+		return -EIO;
+
+	return snprintf(buffer, PAGE_SIZE, "%d\n", result);
+}
+
+static int sony_nc_panelid_setup(struct platform_device *pd)
+{
+	unsigned int result;
+
+	panel_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+	if (!panel_handle)
+		return -ENOMEM;
+
+	sysfs_attr_init(&panel_handle->attr);
+	panel_handle->attr.name = "panel_id";
+	panel_handle->attr.mode = S_IRUGO;
+	panel_handle->show = sony_nc_panelid_show;
+	panel_handle->store = NULL;
+
+	result = device_create_file(&pd->dev, panel_handle);
+	if (result) {
+		kfree(panel_handle);
+		panel_handle = NULL;
+		return result;
+	}
+
+	return 0;
+}
+
+static void sony_nc_panelid_cleanup(struct platform_device *pd)
+{
+	if (panel_handle) {
+		device_remove_file(&pd->dev, panel_handle);
+		kfree(panel_handle);
+		panel_handle = NULL;
+	}
+}
+
+/* smart connect function */
+static struct device_attribute *sc_handle;
+
+static ssize_t sony_nc_smart_conn_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buffer, size_t count)
+{
+	unsigned int result;
+	unsigned long value;
+
+	if (count > 31)
+		return -EINVAL;
+
+	if (kstrtoul(buffer, 10, &value) || value > 1)
+		return -EINVAL;
+
+	if (sony_call_snc_handle(0x0168, value << 0x10, &result))
+		return -EIO;
+
+	return count;
+}
+
+static int sony_nc_smart_conn_setup(struct platform_device *pd)
+{
+	unsigned int result;
+
+	sc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+	if (!sc_handle)
+		return -ENOMEM;
+
+	sysfs_attr_init(&sc_handle->attr);
+	sc_handle->attr.name = "smart_connect";
+	sc_handle->attr.mode = S_IWUSR;
+	sc_handle->show = NULL;
+	sc_handle->store = sony_nc_smart_conn_store;
+
+	result = device_create_file(&pd->dev, sc_handle);
+	if (result) {
+		kfree(sc_handle);
+		sc_handle = NULL;
+		return result;
+	}
+
+	return 0;
+}
+
+static void sony_nc_smart_conn_cleanup(struct platform_device *pd)
+{
+	if (sc_handle) {
+		device_remove_file(&pd->dev, sc_handle);
+		kfree(sc_handle);
+		sc_handle = NULL;
+	}
+}
+
 /* Touchpad enable/disable */
 struct touchpad_control {
 	struct device_attribute attr;
@@ -2726,8 +3151,6 @@
 	int result = 0;
 	struct sony_nc_value *item;
 
-	pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
-
 	sony_nc_acpi_device = device;
 	strcpy(acpi_device_class(device), "sony/hotkey");
 
@@ -2821,6 +3244,7 @@
 		}
 	}
 
+	pr_info("SNC setup done.\n");
 	return 0;
 
 out_sysfs:
@@ -4259,8 +4683,6 @@
 	struct sony_pic_ioport *io, *tmp_io;
 	struct sony_pic_irq *irq, *tmp_irq;
 
-	pr_info("%s v%s\n", SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
-
 	spic_dev.acpi_dev = device;
 	strcpy(acpi_device_class(device), "sony/hotkey");
 	sony_pic_detect_device_type(&spic_dev);
@@ -4360,6 +4782,7 @@
 	if (result)
 		goto err_remove_pf;
 
+	pr_info("SPIC setup done.\n");
 	return 0;
 
 err_remove_pf:
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index f21e109..15e61c1 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3441,6 +3441,106 @@
 	return (res < 0)? res : 1;
 }
 
+/* Thinkpad X1 Carbon support 5 modes including Home mode, Web browser
+ * mode, Web conference mode, Function mode and Lay-flat mode.
+ * We support Home mode and Function mode currently.
+ *
+ * Will consider support rest of modes in future.
+ *
+ */
+enum ADAPTIVE_KEY_MODE {
+	HOME_MODE,
+	WEB_BROWSER_MODE,
+	WEB_CONFERENCE_MODE,
+	FUNCTION_MODE,
+	LAYFLAT_MODE
+};
+
+const int adaptive_keyboard_modes[] = {
+	HOME_MODE,
+/*	WEB_BROWSER_MODE = 2,
+	WEB_CONFERENCE_MODE = 3, */
+	FUNCTION_MODE
+};
+
+#define DFR_CHANGE_ROW			0x101
+#define DFR_SHOW_QUICKVIEW_ROW		0x102
+
+/* press Fn key a while second, it will switch to Function Mode. Then
+ * release Fn key, previous mode be restored.
+ */
+static bool adaptive_keyboard_mode_is_saved;
+static int adaptive_keyboard_prev_mode;
+
+static int adaptive_keyboard_get_next_mode(int mode)
+{
+	size_t i;
+	size_t max_mode = ARRAY_SIZE(adaptive_keyboard_modes) - 1;
+
+	for (i = 0; i <= max_mode; i++) {
+		if (adaptive_keyboard_modes[i] == mode)
+			break;
+	}
+
+	if (i >= max_mode)
+		i = 0;
+	else
+		i++;
+
+	return adaptive_keyboard_modes[i];
+}
+
+static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode)
+{
+	u32 current_mode = 0;
+	int new_mode = 0;
+
+	switch (scancode) {
+	case DFR_CHANGE_ROW:
+		if (adaptive_keyboard_mode_is_saved) {
+			new_mode = adaptive_keyboard_prev_mode;
+			adaptive_keyboard_mode_is_saved = false;
+		} else {
+			if (!acpi_evalf(
+					hkey_handle, &current_mode,
+					"GTRW", "dd", 0)) {
+				pr_err("Cannot read adaptive keyboard mode\n");
+				return false;
+			} else {
+				new_mode = adaptive_keyboard_get_next_mode(
+						current_mode);
+			}
+		}
+
+		if (!acpi_evalf(hkey_handle, NULL, "STRW", "vd", new_mode)) {
+			pr_err("Cannot set adaptive keyboard mode\n");
+			return false;
+		}
+
+		return true;
+
+	case DFR_SHOW_QUICKVIEW_ROW:
+		if (!acpi_evalf(hkey_handle,
+				&adaptive_keyboard_prev_mode,
+				"GTRW", "dd", 0)) {
+			pr_err("Cannot read adaptive keyboard mode\n");
+			return false;
+		} else {
+			adaptive_keyboard_mode_is_saved = true;
+
+			if (!acpi_evalf(hkey_handle,
+					NULL, "STRW", "vd", FUNCTION_MODE)) {
+				pr_err("Cannot set adaptive keyboard mode\n");
+				return false;
+			}
+		}
+		return true;
+
+	default:
+		return false;
+	}
+}
+
 static bool hotkey_notify_hotkey(const u32 hkey,
 				 bool *send_acpi_ev,
 				 bool *ignore_acpi_ev)
@@ -3460,6 +3560,8 @@
 			*ignore_acpi_ev = true;
 		}
 		return true;
+	} else {
+		return adaptive_keyboard_hotkey_notify_hotkey(scancode);
 	}
 	return false;
 }
@@ -3732,13 +3834,28 @@
 
 static void hotkey_suspend(void)
 {
+	int hkeyv;
+
 	/* Do these on suspend, we get the events on early resume! */
 	hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE;
 	hotkey_autosleep_ack = 0;
+
+	/* save previous mode of adaptive keyboard of X1 Carbon */
+	if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
+		if ((hkeyv >> 8) == 2) {
+			if (!acpi_evalf(hkey_handle,
+						&adaptive_keyboard_prev_mode,
+						"GTRW", "dd", 0)) {
+				pr_err("Cannot read adaptive keyboard mode.\n");
+			}
+		}
+	}
 }
 
 static void hotkey_resume(void)
 {
+	int hkeyv;
+
 	tpacpi_disable_brightness_delay();
 
 	if (hotkey_status_set(true) < 0 ||
@@ -3751,6 +3868,18 @@
 	hotkey_wakeup_reason_notify_change();
 	hotkey_wakeup_hotunplug_complete_notify_change();
 	hotkey_poll_setup_safe(false);
+
+	/* restore previous mode of adapive keyboard of X1 Carbon */
+	if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
+		if ((hkeyv >> 8) == 2) {
+			if (!acpi_evalf(hkey_handle,
+						NULL,
+						"STRW", "vd",
+						adaptive_keyboard_prev_mode)) {
+				pr_err("Cannot set adaptive keyboard mode.\n");
+			}
+		}
+	}
 }
 
 /* procfs -------------------------------------------------------------- */
@@ -8451,9 +8580,21 @@
 		tpacpi_led_set(i, false);
 }
 
+static void mute_led_resume(void)
+{
+	int i;
+
+	for (i = 0; i < TPACPI_LED_MAX; i++) {
+		struct tp_led_table *t = &led_tables[i];
+		if (t->state >= 0)
+			mute_led_on_off(t, t->state);
+	}
+}
+
 static struct ibm_struct mute_led_driver_data = {
 	.name = "mute_led",
 	.exit = mute_led_exit,
+	.resume = mute_led_resume,
 };
 
 /****************************************************************************
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 90dd764..46473ca 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -5,6 +5,7 @@
  *  Copyright (C) 2002-2004 John Belmonte
  *  Copyright (C) 2008 Philip Langdale
  *  Copyright (C) 2010 Pierre Ducroquet
+ *  Copyright (C) 2014 Azael Avalos
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -37,7 +38,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#define TOSHIBA_ACPI_VERSION	"0.19"
+#define TOSHIBA_ACPI_VERSION	"0.20"
 #define PROC_INTERFACE_VERSION	1
 
 #include <linux/kernel.h>
@@ -77,6 +78,9 @@
  * However the ACPI methods seem to be incomplete in some areas (for
  * example they allow setting, but not reading, the LCD brightness value),
  * so this is still useful.
+ *
+ * SCI stands for "System Configuration Interface" which aim is to
+ * conceal differences in hardware between different models.
  */
 
 #define HCI_WORDS			6
@@ -84,12 +88,23 @@
 /* operations */
 #define HCI_SET				0xff00
 #define HCI_GET				0xfe00
+#define SCI_OPEN			0xf100
+#define SCI_CLOSE			0xf200
+#define SCI_GET				0xf300
+#define SCI_SET				0xf400
 
 /* return codes */
 #define HCI_SUCCESS			0x0000
 #define HCI_FAILURE			0x1000
 #define HCI_NOT_SUPPORTED		0x8000
 #define HCI_EMPTY			0x8c00
+#define HCI_DATA_NOT_AVAILABLE		0x8d20
+#define HCI_NOT_INITIALIZED		0x8d50
+#define SCI_OPEN_CLOSE_OK		0x0044
+#define SCI_ALREADY_OPEN		0x8100
+#define SCI_NOT_OPENED			0x8200
+#define SCI_INPUT_DATA_ERROR		0x8300
+#define SCI_NOT_PRESENT			0x8600
 
 /* registers */
 #define HCI_FAN				0x0004
@@ -99,13 +114,22 @@
 #define HCI_HOTKEY_EVENT		0x001e
 #define HCI_LCD_BRIGHTNESS		0x002a
 #define HCI_WIRELESS			0x0056
+#define HCI_ACCELEROMETER		0x006d
+#define HCI_KBD_ILLUMINATION		0x0095
+#define HCI_ECO_MODE			0x0097
+#define HCI_ACCELEROMETER2		0x00a6
+#define SCI_ILLUMINATION		0x014e
+#define SCI_KBD_ILLUM_STATUS		0x015c
+#define SCI_TOUCHPAD			0x050e
 
 /* field definitions */
+#define HCI_ACCEL_MASK			0x7fff
 #define HCI_HOTKEY_DISABLE		0x0b
 #define HCI_HOTKEY_ENABLE		0x09
 #define HCI_LCD_BRIGHTNESS_BITS		3
 #define HCI_LCD_BRIGHTNESS_SHIFT	(16-HCI_LCD_BRIGHTNESS_BITS)
 #define HCI_LCD_BRIGHTNESS_LEVELS	(1 << HCI_LCD_BRIGHTNESS_BITS)
+#define HCI_MISC_SHIFT			0x10
 #define HCI_VIDEO_OUT_LCD		0x1
 #define HCI_VIDEO_OUT_CRT		0x2
 #define HCI_VIDEO_OUT_TV		0x4
@@ -113,6 +137,8 @@
 #define HCI_WIRELESS_BT_PRESENT		0x0f
 #define HCI_WIRELESS_BT_ATTACH		0x40
 #define HCI_WIRELESS_BT_POWER		0x80
+#define SCI_KBD_MODE_FNZ		0x1
+#define SCI_KBD_MODE_AUTO		0x2
 
 struct toshiba_acpi_dev {
 	struct acpi_device *acpi_dev;
@@ -122,10 +148,14 @@
 	struct work_struct hotkey_work;
 	struct backlight_device *backlight_dev;
 	struct led_classdev led_dev;
+	struct led_classdev kbd_led;
+	struct led_classdev eco_led;
 
 	int force_fan;
 	int last_key_event;
 	int key_event_valid;
+	int kbd_mode;
+	int kbd_time;
 
 	unsigned int illumination_supported:1;
 	unsigned int video_supported:1;
@@ -134,6 +164,12 @@
 	unsigned int ntfy_supported:1;
 	unsigned int info_supported:1;
 	unsigned int tr_backlight_supported:1;
+	unsigned int kbd_illum_supported:1;
+	unsigned int kbd_led_registered:1;
+	unsigned int touchpad_supported:1;
+	unsigned int eco_supported:1;
+	unsigned int accelerometer_supported:1;
+	unsigned int sysfs_created:1;
 
 	struct mutex mutex;
 };
@@ -280,21 +316,94 @@
 	return status;
 }
 
-/* Illumination support */
-static int toshiba_illumination_available(struct toshiba_acpi_dev *dev)
+/* common sci tasks
+ */
+
+static int sci_open(struct toshiba_acpi_dev *dev)
 {
-	u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
+	u32 in[HCI_WORDS] = { SCI_OPEN, 0, 0, 0, 0, 0 };
 	u32 out[HCI_WORDS];
 	acpi_status status;
 
-	in[0] = 0xf100;
 	status = hci_raw(dev, in, out);
-	if (ACPI_FAILURE(status)) {
+	if  (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+		pr_err("ACPI call to open SCI failed\n");
+		return 0;
+	}
+
+	if (out[0] == SCI_OPEN_CLOSE_OK) {
+		return 1;
+	} else if (out[0] == SCI_ALREADY_OPEN) {
+		pr_info("Toshiba SCI already opened\n");
+		return 1;
+	} else if (out[0] == SCI_NOT_PRESENT) {
+		pr_info("Toshiba SCI is not present\n");
+	}
+
+	return 0;
+}
+
+static void sci_close(struct toshiba_acpi_dev *dev)
+{
+	u32 in[HCI_WORDS] = { SCI_CLOSE, 0, 0, 0, 0, 0 };
+	u32 out[HCI_WORDS];
+	acpi_status status;
+
+	status = hci_raw(dev, in, out);
+	if (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+		pr_err("ACPI call to close SCI failed\n");
+		return;
+	}
+
+	if (out[0] == SCI_OPEN_CLOSE_OK)
+		return;
+	else if (out[0] == SCI_NOT_OPENED)
+		pr_info("Toshiba SCI not opened\n");
+	else if (out[0] == SCI_NOT_PRESENT)
+		pr_info("Toshiba SCI is not present\n");
+}
+
+static acpi_status sci_read(struct toshiba_acpi_dev *dev, u32 reg,
+			    u32 *out1, u32 *result)
+{
+	u32 in[HCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 };
+	u32 out[HCI_WORDS];
+	acpi_status status = hci_raw(dev, in, out);
+	*out1 = out[2];
+	*result = (ACPI_SUCCESS(status)) ? out[0] : HCI_FAILURE;
+	return status;
+}
+
+static acpi_status sci_write(struct toshiba_acpi_dev *dev, u32 reg,
+			     u32 in1, u32 *result)
+{
+	u32 in[HCI_WORDS] = { SCI_SET, reg, in1, 0, 0, 0 };
+	u32 out[HCI_WORDS];
+	acpi_status status = hci_raw(dev, in, out);
+	*result = (ACPI_SUCCESS(status)) ? out[0] : HCI_FAILURE;
+	return status;
+}
+
+/* Illumination support */
+static int toshiba_illumination_available(struct toshiba_acpi_dev *dev)
+{
+	u32 in[HCI_WORDS] = { SCI_GET, SCI_ILLUMINATION, 0, 0, 0, 0 };
+	u32 out[HCI_WORDS];
+	acpi_status status;
+
+	if (!sci_open(dev))
+		return 0;
+
+	status = hci_raw(dev, in, out);
+	sci_close(dev);
+	if (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+		pr_err("ACPI call to query Illumination support failed\n");
+		return 0;
+	} else if (out[0] == HCI_NOT_SUPPORTED || out[1] != 1) {
 		pr_info("Illumination device not available\n");
 		return 0;
 	}
-	in[0] = 0xf400;
-	status = hci_raw(dev, in, out);
+
 	return 1;
 }
 
@@ -303,82 +412,270 @@
 {
 	struct toshiba_acpi_dev *dev = container_of(cdev,
 			struct toshiba_acpi_dev, led_dev);
-	u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
-	u32 out[HCI_WORDS];
+	u32 state, result;
 	acpi_status status;
 
 	/* First request : initialize communication. */
-	in[0] = 0xf100;
-	status = hci_raw(dev, in, out);
+	if (!sci_open(dev))
+		return;
+
+	/* Switch the illumination on/off */
+	state = brightness ? 1 : 0;
+	status = sci_write(dev, SCI_ILLUMINATION, state, &result);
+	sci_close(dev);
 	if (ACPI_FAILURE(status)) {
-		pr_info("Illumination device not available\n");
+		pr_err("ACPI call for illumination failed\n");
+		return;
+	} else if (result == HCI_NOT_SUPPORTED) {
+		pr_info("Illumination not supported\n");
 		return;
 	}
-
-	if (brightness) {
-		/* Switch the illumination on */
-		in[0] = 0xf400;
-		in[1] = 0x14e;
-		in[2] = 1;
-		status = hci_raw(dev, in, out);
-		if (ACPI_FAILURE(status)) {
-			pr_info("ACPI call for illumination failed\n");
-			return;
-		}
-	} else {
-		/* Switch the illumination off */
-		in[0] = 0xf400;
-		in[1] = 0x14e;
-		in[2] = 0;
-		status = hci_raw(dev, in, out);
-		if (ACPI_FAILURE(status)) {
-			pr_info("ACPI call for illumination failed.\n");
-			return;
-		}
-	}
-
-	/* Last request : close communication. */
-	in[0] = 0xf200;
-	in[1] = 0;
-	in[2] = 0;
-	hci_raw(dev, in, out);
 }
 
 static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
 {
 	struct toshiba_acpi_dev *dev = container_of(cdev,
 			struct toshiba_acpi_dev, led_dev);
-	u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
-	u32 out[HCI_WORDS];
+	u32 state, result;
 	acpi_status status;
-	enum led_brightness result;
 
 	/* First request : initialize communication. */
-	in[0] = 0xf100;
-	status = hci_raw(dev, in, out);
-	if (ACPI_FAILURE(status)) {
-		pr_info("Illumination device not available\n");
+	if (!sci_open(dev))
 		return LED_OFF;
-	}
 
 	/* Check the illumination */
-	in[0] = 0xf300;
-	in[1] = 0x14e;
-	status = hci_raw(dev, in, out);
-	if (ACPI_FAILURE(status)) {
-		pr_info("ACPI call for illumination failed.\n");
+	status = sci_read(dev, SCI_ILLUMINATION, &state, &result);
+	sci_close(dev);
+	if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+		pr_err("ACPI call for illumination failed\n");
+		return LED_OFF;
+	} else if (result == HCI_NOT_SUPPORTED) {
+		pr_info("Illumination not supported\n");
 		return LED_OFF;
 	}
 
-	result = out[2] ? LED_FULL : LED_OFF;
+	return state ? LED_FULL : LED_OFF;
+}
 
-	/* Last request : close communication. */
-	in[0] = 0xf200;
-	in[1] = 0;
-	in[2] = 0;
-	hci_raw(dev, in, out);
+/* KBD Illumination */
+static int toshiba_kbd_illum_status_set(struct toshiba_acpi_dev *dev, u32 time)
+{
+	u32 result;
+	acpi_status status;
 
-	return result;
+	if (!sci_open(dev))
+		return -EIO;
+
+	status = sci_write(dev, SCI_KBD_ILLUM_STATUS, time, &result);
+	sci_close(dev);
+	if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+		pr_err("ACPI call to set KBD backlight status failed\n");
+		return -EIO;
+	} else if (result == HCI_NOT_SUPPORTED) {
+		pr_info("Keyboard backlight status not supported\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int toshiba_kbd_illum_status_get(struct toshiba_acpi_dev *dev, u32 *time)
+{
+	u32 result;
+	acpi_status status;
+
+	if (!sci_open(dev))
+		return -EIO;
+
+	status = sci_read(dev, SCI_KBD_ILLUM_STATUS, time, &result);
+	sci_close(dev);
+	if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+		pr_err("ACPI call to get KBD backlight status failed\n");
+		return -EIO;
+	} else if (result == HCI_NOT_SUPPORTED) {
+		pr_info("Keyboard backlight status not supported\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static enum led_brightness toshiba_kbd_backlight_get(struct led_classdev *cdev)
+{
+	struct toshiba_acpi_dev *dev = container_of(cdev,
+			struct toshiba_acpi_dev, kbd_led);
+	u32 state, result;
+	acpi_status status;
+
+	/* Check the keyboard backlight state */
+	status = hci_read1(dev, HCI_KBD_ILLUMINATION, &state, &result);
+	if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+		pr_err("ACPI call to get the keyboard backlight failed\n");
+		return LED_OFF;
+	} else if (result == HCI_NOT_SUPPORTED) {
+		pr_info("Keyboard backlight not supported\n");
+		return LED_OFF;
+	}
+
+	return state ? LED_FULL : LED_OFF;
+}
+
+static void toshiba_kbd_backlight_set(struct led_classdev *cdev,
+				     enum led_brightness brightness)
+{
+	struct toshiba_acpi_dev *dev = container_of(cdev,
+			struct toshiba_acpi_dev, kbd_led);
+	u32 state, result;
+	acpi_status status;
+
+	/* Set the keyboard backlight state */
+	state = brightness ? 1 : 0;
+	status = hci_write1(dev, HCI_KBD_ILLUMINATION, state, &result);
+	if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+		pr_err("ACPI call to set KBD Illumination mode failed\n");
+		return;
+	} else if (result == HCI_NOT_SUPPORTED) {
+		pr_info("Keyboard backlight not supported\n");
+		return;
+	}
+}
+
+/* TouchPad support */
+static int toshiba_touchpad_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+	u32 result;
+	acpi_status status;
+
+	if (!sci_open(dev))
+		return -EIO;
+
+	status = sci_write(dev, SCI_TOUCHPAD, state, &result);
+	sci_close(dev);
+	if (ACPI_FAILURE(status)) {
+		pr_err("ACPI call to set the touchpad failed\n");
+		return -EIO;
+	} else if (result == HCI_NOT_SUPPORTED) {
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+	u32 result;
+	acpi_status status;
+
+	if (!sci_open(dev))
+		return -EIO;
+
+	status = sci_read(dev, SCI_TOUCHPAD, state, &result);
+	sci_close(dev);
+	if (ACPI_FAILURE(status)) {
+		pr_err("ACPI call to query the touchpad failed\n");
+		return -EIO;
+	} else if (result == HCI_NOT_SUPPORTED) {
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/* Eco Mode support */
+static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
+{
+	acpi_status status;
+	u32 in[HCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
+	u32 out[HCI_WORDS];
+
+	status = hci_raw(dev, in, out);
+	if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+		pr_info("ACPI call to get ECO led failed\n");
+		return 0;
+	}
+
+	return 1;
+}
+
+static enum led_brightness toshiba_eco_mode_get_status(struct led_classdev *cdev)
+{
+	struct toshiba_acpi_dev *dev = container_of(cdev,
+			struct toshiba_acpi_dev, eco_led);
+	u32 in[HCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
+	u32 out[HCI_WORDS];
+	acpi_status status;
+
+	status = hci_raw(dev, in, out);
+	if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+		pr_err("ACPI call to get ECO led failed\n");
+		return LED_OFF;
+	}
+
+	return out[2] ? LED_FULL : LED_OFF;
+}
+
+static void toshiba_eco_mode_set_status(struct led_classdev *cdev,
+				     enum led_brightness brightness)
+{
+	struct toshiba_acpi_dev *dev = container_of(cdev,
+			struct toshiba_acpi_dev, eco_led);
+	u32 in[HCI_WORDS] = { HCI_SET, HCI_ECO_MODE, 0, 1, 0, 0 };
+	u32 out[HCI_WORDS];
+	acpi_status status;
+
+	/* Switch the Eco Mode led on/off */
+	in[2] = (brightness) ? 1 : 0;
+	status = hci_raw(dev, in, out);
+	if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+		pr_err("ACPI call to set ECO led failed\n");
+		return;
+	}
+}
+
+/* Accelerometer support */
+static int toshiba_accelerometer_supported(struct toshiba_acpi_dev *dev)
+{
+	u32 in[HCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER2, 0, 0, 0, 0 };
+	u32 out[HCI_WORDS];
+	acpi_status status;
+
+	/* Check if the accelerometer call exists,
+	 * this call also serves as initialization
+	 */
+	status = hci_raw(dev, in, out);
+	if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+		pr_err("ACPI call to query the accelerometer failed\n");
+		return -EIO;
+	} else if (out[0] == HCI_DATA_NOT_AVAILABLE ||
+		   out[0] == HCI_NOT_INITIALIZED) {
+		pr_err("Accelerometer not initialized\n");
+		return -EIO;
+	} else if (out[0] == HCI_NOT_SUPPORTED) {
+		pr_info("Accelerometer not supported\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
+				      u32 *xy, u32 *z)
+{
+	u32 in[HCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER, 0, 1, 0, 0 };
+	u32 out[HCI_WORDS];
+	acpi_status status;
+
+	/* Check the Accelerometer status */
+	status = hci_raw(dev, in, out);
+	if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+		pr_err("ACPI call to query the accelerometer failed\n");
+		return -EIO;
+	}
+
+	*xy = out[2];
+	*z = out[4];
+
+	return 0;
 }
 
 /* Bluetooth rfkill handlers */
@@ -904,6 +1201,177 @@
 	.update_status  = set_lcd_status,
 };
 
+/*
+ * Sysfs files
+ */
+
+static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+	int mode = -1;
+	int time = -1;
+
+	if (sscanf(buf, "%i", &mode) != 1 && (mode != 2 || mode != 1))
+		return -EINVAL;
+
+	/* Set the Keyboard Backlight Mode where:
+	 * Mode - Auto (2) | FN-Z (1)
+	 *	Auto - KBD backlight turns off automatically in given time
+	 *	FN-Z - KBD backlight "toggles" when hotkey pressed
+	 */
+	if (mode != -1 && toshiba->kbd_mode != mode) {
+		time = toshiba->kbd_time << HCI_MISC_SHIFT;
+		time = time + toshiba->kbd_mode;
+		if (toshiba_kbd_illum_status_set(toshiba, time) < 0)
+			return -EIO;
+		toshiba->kbd_mode = mode;
+	}
+
+	return count;
+}
+
+static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+	u32 time;
+
+	if (toshiba_kbd_illum_status_get(toshiba, &time) < 0)
+		return -EIO;
+
+	return sprintf(buf, "%i\n", time & 0x07);
+}
+
+static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+	int time = -1;
+
+	if (sscanf(buf, "%i", &time) != 1 && (time < 0 || time > 60))
+		return -EINVAL;
+
+	/* Set the Keyboard Backlight Timeout: 0-60 seconds */
+	if (time != -1 && toshiba->kbd_time != time) {
+		time = time << HCI_MISC_SHIFT;
+		time = (toshiba->kbd_mode == SCI_KBD_MODE_AUTO) ?
+							time + 1 : time + 2;
+		if (toshiba_kbd_illum_status_set(toshiba, time) < 0)
+			return -EIO;
+		toshiba->kbd_time = time >> HCI_MISC_SHIFT;
+	}
+
+	return count;
+}
+
+static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+	u32 time;
+
+	if (toshiba_kbd_illum_status_get(toshiba, &time) < 0)
+		return -EIO;
+
+	return sprintf(buf, "%i\n", time >> HCI_MISC_SHIFT);
+}
+
+static ssize_t toshiba_touchpad_store(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+	int state;
+
+	/* Set the TouchPad on/off, 0 - Disable | 1 - Enable */
+	if (sscanf(buf, "%i", &state) == 1 && (state == 0 || state == 1)) {
+		if (toshiba_touchpad_set(toshiba, state) < 0)
+			return -EIO;
+	}
+
+	return count;
+}
+
+static ssize_t toshiba_touchpad_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+	u32 state;
+	int ret;
+
+	ret = toshiba_touchpad_get(toshiba, &state);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", state);
+}
+
+static ssize_t toshiba_position_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+	u32 xyval, zval, tmp;
+	u16 x, y, z;
+	int ret;
+
+	xyval = zval = 0;
+	ret = toshiba_accelerometer_get(toshiba, &xyval, &zval);
+	if (ret < 0)
+		return ret;
+
+	x = xyval & HCI_ACCEL_MASK;
+	tmp = xyval >> HCI_MISC_SHIFT;
+	y = tmp & HCI_ACCEL_MASK;
+	z = zval & HCI_ACCEL_MASK;
+
+	return sprintf(buf, "%d %d %d\n", x, y, z);
+}
+
+static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR,
+		   toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store);
+static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR,
+		   toshiba_kbd_bl_timeout_show, toshiba_kbd_bl_timeout_store);
+static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR,
+		   toshiba_touchpad_show, toshiba_touchpad_store);
+static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL);
+
+static struct attribute *toshiba_attributes[] = {
+	&dev_attr_kbd_backlight_mode.attr,
+	&dev_attr_kbd_backlight_timeout.attr,
+	&dev_attr_touchpad.attr,
+	&dev_attr_position.attr,
+	NULL,
+};
+
+static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
+					struct attribute *attr, int idx)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct toshiba_acpi_dev *drv = dev_get_drvdata(dev);
+	bool exists = true;
+
+	if (attr == &dev_attr_kbd_backlight_mode.attr)
+		exists = (drv->kbd_illum_supported) ? true : false;
+	else if (attr == &dev_attr_kbd_backlight_timeout.attr)
+		exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false;
+	else if (attr == &dev_attr_touchpad.attr)
+		exists = (drv->touchpad_supported) ? true : false;
+	else if (attr == &dev_attr_position.attr)
+		exists = (drv->accelerometer_supported) ? true : false;
+
+	return exists ? attr->mode : 0;
+}
+
+static struct attribute_group toshiba_attr_group = {
+	.is_visible = toshiba_sysfs_is_visible,
+	.attrs = toshiba_attributes,
+};
+
 static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
 				      struct serio *port)
 {
@@ -1106,6 +1574,10 @@
 
 	remove_toshiba_proc_entries(dev);
 
+	if (dev->sysfs_created)
+		sysfs_remove_group(&dev->acpi_dev->dev.kobj,
+				   &toshiba_attr_group);
+
 	if (dev->ntfy_supported) {
 		i8042_remove_filter(toshiba_acpi_i8042_filter);
 		cancel_work_sync(&dev->hotkey_work);
@@ -1127,6 +1599,12 @@
 	if (dev->illumination_supported)
 		led_classdev_unregister(&dev->led_dev);
 
+	if (dev->kbd_led_registered)
+		led_classdev_unregister(&dev->kbd_led);
+
+	if (dev->eco_supported)
+		led_classdev_unregister(&dev->eco_led);
+
 	if (toshiba_acpi)
 		toshiba_acpi = NULL;
 
@@ -1172,6 +1650,7 @@
 	dev->acpi_dev = acpi_dev;
 	dev->method_hci = hci_method;
 	acpi_dev->driver_data = dev;
+	dev_set_drvdata(&acpi_dev->dev, dev);
 
 	if (toshiba_acpi_setup_keyboard(dev))
 		pr_info("Unable to activate hotkeys\n");
@@ -1212,6 +1691,40 @@
 			dev->illumination_supported = 1;
 	}
 
+	if (toshiba_eco_mode_available(dev)) {
+		dev->eco_led.name = "toshiba::eco_mode";
+		dev->eco_led.max_brightness = 1;
+		dev->eco_led.brightness_set = toshiba_eco_mode_set_status;
+		dev->eco_led.brightness_get = toshiba_eco_mode_get_status;
+		if (!led_classdev_register(&dev->acpi_dev->dev, &dev->eco_led))
+			dev->eco_supported = 1;
+	}
+
+	ret = toshiba_kbd_illum_status_get(dev, &dummy);
+	if (!ret) {
+		dev->kbd_time = dummy >> HCI_MISC_SHIFT;
+		dev->kbd_mode = dummy & 0x07;
+	}
+	dev->kbd_illum_supported = !ret;
+	/*
+	 * Only register the LED if KBD illumination is supported
+	 * and the keyboard backlight operation mode is set to FN-Z
+	 */
+	if (dev->kbd_illum_supported && dev->kbd_mode == SCI_KBD_MODE_FNZ) {
+		dev->kbd_led.name = "toshiba::kbd_backlight";
+		dev->kbd_led.max_brightness = 1;
+		dev->kbd_led.brightness_set = toshiba_kbd_backlight_set;
+		dev->kbd_led.brightness_get = toshiba_kbd_backlight_get;
+		if (!led_classdev_register(&dev->acpi_dev->dev, &dev->kbd_led))
+			dev->kbd_led_registered = 1;
+	}
+
+	ret = toshiba_touchpad_get(dev, &dummy);
+	dev->touchpad_supported = !ret;
+
+	ret = toshiba_accelerometer_supported(dev);
+	dev->accelerometer_supported = !ret;
+
 	/* Determine whether or not BIOS supports fan and video interfaces */
 
 	ret = get_video_status(dev, &dummy);
@@ -1220,6 +1733,14 @@
 	ret = get_fan_status(dev, &dummy);
 	dev->fan_supported = !ret;
 
+	ret = sysfs_create_group(&dev->acpi_dev->dev.kobj,
+				 &toshiba_attr_group);
+	if (ret) {
+		dev->sysfs_created = 0;
+		goto error;
+	}
+	dev->sysfs_created = !ret;
+
 	create_toshiba_proc_entries(dev);
 
 	toshiba_acpi = dev;
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index ab08ca7..c3750c5 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -123,6 +123,7 @@
 #define BCM590XX_REG_RANGES(_name, _ranges) \
 	{ \
 		.name = #_name, \
+		.n_voltages = 64, \
 		.n_linear_ranges = ARRAY_SIZE(_ranges), \
 		.linear_ranges = _ranges, \
 	}
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 808b3aa..f19a30f 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -192,13 +192,11 @@
 	if (!ramp_enable)
 		goto ramp_disable;
 
-	if (enable_shift) {
-		ret = regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
-					1 << enable_shift, 1 << enable_shift);
-		if (ret) {
-			dev_err(&rdev->dev, "failed to enable ramp rate\n");
-			return ret;
-		}
+	ret = regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
+				 1 << enable_shift, 1 << enable_shift);
+	if (ret) {
+		dev_err(&rdev->dev, "failed to enable ramp rate\n");
+		return ret;
 	}
 
 	ramp_val = get_ramp_delay(ramp_delay);
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 68fd547..e713c16 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -202,13 +202,11 @@
 	if (!ramp_enable)
 		goto ramp_disable;
 
-	if (enable_shift) {
-		ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
-					1 << enable_shift, 1 << enable_shift);
-		if (ret) {
-			dev_err(&rdev->dev, "failed to enable ramp rate\n");
-			return ret;
-		}
+	ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
+				 1 << enable_shift, 1 << enable_shift);
+	if (ret) {
+		dev_err(&rdev->dev, "failed to enable ramp rate\n");
+		return ret;
 	}
 
 	ramp_val = get_ramp_delay(ramp_delay);
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index f05bada..92f19a0 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -964,6 +964,7 @@
 		config.driver_data = s5m8767;
 		config.regmap = iodev->regmap_pmic;
 		config.of_node = pdata->regulators[i].reg_node;
+		config.ena_gpio = config.ena_gpio_flags = 0;
 		if (pdata->regulators[i].ext_control_gpio)
 			s5m8767_regulator_config_ext_control(s5m8767,
 					&pdata->regulators[i], &config);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index c8bd092..02832d6 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -263,6 +263,9 @@
 	  You can override this choice by specifying "scsi_mod.scan=sync"
 	  or async on the kernel's command line.
 
+	  Note that this setting also affects whether resuming from
+	  system suspend will be performed asynchronously.
+
 menu "SCSI Transports"
 	depends on SCSI
 
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 788c4fe..68fb66f 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -684,6 +684,20 @@
 	qlt_xmit_tm_rsp(mcmd);
 }
 
+static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+				struct qla_tgt_cmd, se_cmd);
+	struct scsi_qla_host *vha = cmd->vha;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!cmd->sg_mapped)
+		return;
+
+	pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+	cmd->sg_mapped = 0;
+}
+
 /* Local pointer to allocated TCM configfs fabric module */
 struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
 struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
@@ -1468,7 +1482,7 @@
 	}
 	se_tpg = &tpg->se_tpg;
 
-	se_sess = transport_init_session();
+	se_sess = transport_init_session(TARGET_PROT_NORMAL);
 	if (IS_ERR(se_sess)) {
 		pr_err("Unable to initialize struct se_session\n");
 		return PTR_ERR(se_sess);
@@ -1877,6 +1891,7 @@
 	.queue_data_in			= tcm_qla2xxx_queue_data_in,
 	.queue_status			= tcm_qla2xxx_queue_status,
 	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp,
+	.aborted_task			= tcm_qla2xxx_aborted_task,
 	/*
 	 * Setup function pointers for generic logic in
 	 * target_core_fabric_configfs.c
@@ -1926,6 +1941,7 @@
 	.queue_data_in			= tcm_qla2xxx_queue_data_in,
 	.queue_status			= tcm_qla2xxx_queue_status,
 	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp,
+	.aborted_task			= tcm_qla2xxx_aborted_task,
 	/*
 	 * Setup function pointers for generic logic in
 	 * target_core_fabric_configfs.c
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c4d632c..88d46fe 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -91,6 +91,15 @@
 ASYNC_DOMAIN(scsi_sd_probe_domain);
 EXPORT_SYMBOL(scsi_sd_probe_domain);
 
+/*
+ * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
+ * asynchronous system resume operations.  It is marked 'exclusive' to avoid
+ * being included in the async_synchronize_full() that is invoked by
+ * dpm_resume()
+ */
+ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
+EXPORT_SYMBOL(scsi_sd_pm_domain);
+
 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
  * You may not alter any existing entry (although adding new ones is
  * encouraged once assigned by ANSI/INCITS T10
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5681c05..65a123d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -184,7 +184,7 @@
  */
 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 		 int data_direction, void *buffer, unsigned bufflen,
-		 unsigned char *sense, int timeout, int retries, int flags,
+		 unsigned char *sense, int timeout, int retries, u64 flags,
 		 int *resid)
 {
 	struct request *req;
@@ -235,7 +235,7 @@
 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
 		     int data_direction, void *buffer, unsigned bufflen,
 		     struct scsi_sense_hdr *sshdr, int timeout, int retries,
-		     int *resid, int flags)
+		     int *resid, u64 flags)
 {
 	char *sense = NULL;
 	int result;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 001e9ce..7454498 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -18,35 +18,77 @@
 
 #ifdef CONFIG_PM_SLEEP
 
-static int scsi_dev_type_suspend(struct device *dev, int (*cb)(struct device *))
+static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
 {
+	return pm && pm->suspend ? pm->suspend(dev) : 0;
+}
+
+static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm)
+{
+	return pm && pm->freeze ? pm->freeze(dev) : 0;
+}
+
+static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm)
+{
+	return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+}
+
+static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)
+{
+	return pm && pm->resume ? pm->resume(dev) : 0;
+}
+
+static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm)
+{
+	return pm && pm->thaw ? pm->thaw(dev) : 0;
+}
+
+static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm)
+{
+	return pm && pm->restore ? pm->restore(dev) : 0;
+}
+
+static int scsi_dev_type_suspend(struct device *dev,
+		int (*cb)(struct device *, const struct dev_pm_ops *))
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 	int err;
 
+	/* flush pending in-flight resume operations, suspend is synchronous */
+	async_synchronize_full_domain(&scsi_sd_pm_domain);
+
 	err = scsi_device_quiesce(to_scsi_device(dev));
 	if (err == 0) {
-		if (cb) {
-			err = cb(dev);
-			if (err)
-				scsi_device_resume(to_scsi_device(dev));
-		}
+		err = cb(dev, pm);
+		if (err)
+			scsi_device_resume(to_scsi_device(dev));
 	}
 	dev_dbg(dev, "scsi suspend: %d\n", err);
 	return err;
 }
 
-static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *))
+static int scsi_dev_type_resume(struct device *dev,
+		int (*cb)(struct device *, const struct dev_pm_ops *))
 {
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 	int err = 0;
 
-	if (cb)
-		err = cb(dev);
+	err = cb(dev, pm);
 	scsi_device_resume(to_scsi_device(dev));
 	dev_dbg(dev, "scsi resume: %d\n", err);
+
+	if (err == 0) {
+		pm_runtime_disable(dev);
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
+	}
+
 	return err;
 }
 
 static int
-scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
+scsi_bus_suspend_common(struct device *dev,
+		int (*cb)(struct device *, const struct dev_pm_ops *))
 {
 	int err = 0;
 
@@ -66,20 +108,54 @@
 	return err;
 }
 
-static int
-scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *))
+static void async_sdev_resume(void *dev, async_cookie_t cookie)
 {
-	int err = 0;
+	scsi_dev_type_resume(dev, do_scsi_resume);
+}
 
-	if (scsi_is_sdev_device(dev))
-		err = scsi_dev_type_resume(dev, cb);
+static void async_sdev_thaw(void *dev, async_cookie_t cookie)
+{
+	scsi_dev_type_resume(dev, do_scsi_thaw);
+}
 
-	if (err == 0) {
+static void async_sdev_restore(void *dev, async_cookie_t cookie)
+{
+	scsi_dev_type_resume(dev, do_scsi_restore);
+}
+
+static int scsi_bus_resume_common(struct device *dev,
+		int (*cb)(struct device *, const struct dev_pm_ops *))
+{
+	async_func_t fn;
+
+	if (!scsi_is_sdev_device(dev))
+		fn = NULL;
+	else if (cb == do_scsi_resume)
+		fn = async_sdev_resume;
+	else if (cb == do_scsi_thaw)
+		fn = async_sdev_thaw;
+	else if (cb == do_scsi_restore)
+		fn = async_sdev_restore;
+	else
+		fn = NULL;
+
+	if (fn) {
+		async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
+
+		/*
+		 * If a user has disabled async probing a likely reason
+		 * is due to a storage enclosure that does not inject
+		 * staggered spin-ups.  For safety, make resume
+		 * synchronous as well in that case.
+		 */
+		if (strncmp(scsi_scan_type, "async", 5) != 0)
+			async_synchronize_full_domain(&scsi_sd_pm_domain);
+	} else {
 		pm_runtime_disable(dev);
 		pm_runtime_set_active(dev);
 		pm_runtime_enable(dev);
 	}
-	return err;
+	return 0;
 }
 
 static int scsi_bus_prepare(struct device *dev)
@@ -97,38 +173,32 @@
 
 static int scsi_bus_suspend(struct device *dev)
 {
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	return scsi_bus_suspend_common(dev, pm ? pm->suspend : NULL);
+	return scsi_bus_suspend_common(dev, do_scsi_suspend);
 }
 
 static int scsi_bus_resume(struct device *dev)
 {
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	return scsi_bus_resume_common(dev, pm ? pm->resume : NULL);
+	return scsi_bus_resume_common(dev, do_scsi_resume);
 }
 
 static int scsi_bus_freeze(struct device *dev)
 {
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	return scsi_bus_suspend_common(dev, pm ? pm->freeze : NULL);
+	return scsi_bus_suspend_common(dev, do_scsi_freeze);
 }
 
 static int scsi_bus_thaw(struct device *dev)
 {
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	return scsi_bus_resume_common(dev, pm ? pm->thaw : NULL);
+	return scsi_bus_resume_common(dev, do_scsi_thaw);
 }
 
 static int scsi_bus_poweroff(struct device *dev)
 {
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	return scsi_bus_suspend_common(dev, pm ? pm->poweroff : NULL);
+	return scsi_bus_suspend_common(dev, do_scsi_poweroff);
 }
 
 static int scsi_bus_restore(struct device *dev)
 {
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	return scsi_bus_resume_common(dev, pm ? pm->restore : NULL);
+	return scsi_bus_resume_common(dev, do_scsi_restore);
 }
 
 #else /* CONFIG_PM_SLEEP */
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index f079a59..48e5b65 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -112,6 +112,7 @@
 #endif /* CONFIG_PROC_FS */
 
 /* scsi_scan.c */
+extern char scsi_scan_type[];
 extern int scsi_complete_async_scans(void);
 extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
 				   unsigned int, unsigned int, int);
@@ -166,6 +167,7 @@
 static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
 #endif /* CONFIG_PM_RUNTIME */
 
+extern struct async_domain scsi_sd_pm_domain;
 extern struct async_domain scsi_sd_probe_domain;
 
 /* 
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 27f96d5..e02b3aa 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -97,7 +97,7 @@
 #define SCSI_SCAN_TYPE_DEFAULT "sync"
 #endif
 
-static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
+char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
 
 module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
 MODULE_PARM_DESC(scan, "sync, async or none");
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 89e6c04..efcbcd1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3026,6 +3026,7 @@
 	devt = disk_devt(sdkp->disk);
 	scsi_autopm_get_device(sdkp->device);
 
+	async_synchronize_full_domain(&scsi_sd_pm_domain);
 	async_synchronize_full_domain(&scsi_sd_probe_domain);
 	blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
 	blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index efe1960..60f2b41 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -383,7 +383,7 @@
 
 config SPI_QUP
 	tristate "Qualcomm SPI controller with QUP interface"
-	depends on ARCH_MSM_DT || (ARM && COMPILE_TEST)
+	depends on ARCH_QCOM || (ARM && COMPILE_TEST)
 	help
 	  Qualcomm Universal Peripheral (QUP) core is an AHB slave that
 	  provides a common data path (an output FIFO and an input FIFO)
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 6fb2b75..e767f58 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -441,7 +441,8 @@
 
 	m->actual_length = espi_trans.actual_length;
 	m->status = espi_trans.status;
-	m->complete(m->context);
+	if (m->complete)
+		m->complete(m->context);
 }
 
 static int fsl_espi_setup(struct spi_device *spi)
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index f35488e..b3e7775 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -408,7 +408,8 @@
 	}
 
 	m->status = status;
-	m->complete(m->context);
+	if (m->complete)
+		m->complete(m->context);
 
 	if (status || !cs_change) {
 		ndelay(nsecs);
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 3822eef..577d23a 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -300,7 +300,8 @@
 	}
 
 	m->status = status;
-	m->complete(m->context);
+	if (m->complete)
+		m->complete(m->context);
 
 	if (status || !cs_change)
 		mpc512x_psc_spi_deactivate_cs(spi);
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index 3d18d93..de532aa 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -247,7 +247,8 @@
 		}
 
 		m->status = status;
-		m->complete(m->context);
+		if (m->complete)
+			m->complete(m->context);
 
 		if (status || !cs_change)
 			mpc52xx_psc_spi_deactivate_cs(spi);
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index aac2a5d..b07db4b 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -234,7 +234,8 @@
 		dev_err(&ms->master->dev, "mode fault\n");
 		mpc52xx_spi_chipsel(ms, 0);
 		ms->message->status = -EIO;
-		ms->message->complete(ms->message->context);
+		if (ms->message->complete)
+			ms->message->complete(ms->message->context);
 		ms->state = mpc52xx_spi_fsmstate_idle;
 		return FSM_CONTINUE;
 	}
@@ -288,7 +289,8 @@
 		ms->msg_count++;
 		mpc52xx_spi_chipsel(ms, 0);
 		ms->message->status = 0;
-		ms->message->complete(ms->message->context);
+		if (ms->message->complete)
+			ms->message->complete(ms->message->context);
 		ms->state = mpc52xx_spi_fsmstate_idle;
 		return FSM_CONTINUE;
 	}
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 2941c5b..4dc77df 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1379,12 +1379,13 @@
 
 	INIT_LIST_HEAD(&mcspi->ctx.cs);
 
-	mcspi->dma_channels = kcalloc(master->num_chipselect,
-			sizeof(struct omap2_mcspi_dma),
-			GFP_KERNEL);
-
-	if (mcspi->dma_channels == NULL)
+	mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
+					   sizeof(struct omap2_mcspi_dma),
+					   GFP_KERNEL);
+	if (mcspi->dma_channels == NULL) {
+		status = -ENOMEM;
 		goto free_master;
+	}
 
 	for (i = 0; i < master->num_chipselect; i++) {
 		char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name;
@@ -1426,7 +1427,7 @@
 	}
 
 	if (status < 0)
-		goto dma_chnl_free;
+		goto free_master;
 
 	pm_runtime_use_autosuspend(&pdev->dev);
 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
@@ -1444,8 +1445,6 @@
 
 disable_pm:
 	pm_runtime_disable(&pdev->dev);
-dma_chnl_free:
-	kfree(mcspi->dma_channels);
 free_master:
 	spi_master_put(master);
 	return status;
@@ -1453,19 +1452,12 @@
 
 static int omap2_mcspi_remove(struct platform_device *pdev)
 {
-	struct spi_master	*master;
-	struct omap2_mcspi	*mcspi;
-	struct omap2_mcspi_dma	*dma_channels;
-
-	master = platform_get_drvdata(pdev);
-	mcspi = spi_master_get_devdata(master);
-	dma_channels = mcspi->dma_channels;
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
 
 	pm_runtime_put_sync(mcspi->dev);
 	pm_runtime_disable(&pdev->dev);
 
-	kfree(dma_channels);
-
 	return 0;
 }
 
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index f6f2c70..03edf5e 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -322,7 +322,8 @@
 		spin_lock_irqsave(&ss->lock, flags);
 
 		mesg->status = 0;
-		mesg->complete(mesg->context);
+		if (mesg->complete)
+			mesg->complete(mesg->context);
 	}
 
 	clear_fifo(ss);
@@ -340,7 +341,8 @@
 
  error:
 	mesg->status = ret;
-	mesg->complete(mesg->context);
+	if (mesg->complete)
+		mesg->complete(mesg->context);
 
 	spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
 			 SPI_SH_CR1);
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 820b499..5f183ba 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -262,7 +262,8 @@
 
 exit:
 	m->status = status;
-	m->complete(m->context);
+	if (m->complete)
+		m->complete(m->context);
 
 	/* normally deactivate chipselect ... unless no error and
 	 * cs_change has hinted that the next message will probably
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
index a54b506..b87b246 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
@@ -99,16 +99,7 @@
 		struct iovec   *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
 		unsigned int    niov = tx->tx_niov;
 #endif
-		struct msghdr msg = {
-			.msg_name       = NULL,
-			.msg_namelen    = 0,
-			.msg_iov	= scratchiov,
-			.msg_iovlen     = niov,
-			.msg_control    = NULL,
-			.msg_controllen = 0,
-			.msg_flags      = MSG_DONTWAIT
-		};
-		mm_segment_t oldmm = get_fs();
+		struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
 		int  i;
 
 		for (nob = i = 0; i < niov; i++) {
@@ -120,9 +111,7 @@
 		    nob < tx->tx_resid)
 			msg.msg_flags |= MSG_MORE;
 
-		set_fs (KERNEL_DS);
-		rc = sock_sendmsg(sock, &msg, nob);
-		set_fs (oldmm);
+		rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
 	}
 	return rc;
 }
@@ -174,16 +163,7 @@
 		struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
 		unsigned int  niov = tx->tx_nkiov;
 #endif
-		struct msghdr msg = {
-			.msg_name       = NULL,
-			.msg_namelen    = 0,
-			.msg_iov	= scratchiov,
-			.msg_iovlen     = niov,
-			.msg_control    = NULL,
-			.msg_controllen = 0,
-			.msg_flags      = MSG_DONTWAIT
-		};
-		mm_segment_t  oldmm = get_fs();
+		struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
 		int	   i;
 
 		for (nob = i = 0; i < niov; i++) {
@@ -196,9 +176,7 @@
 		    nob < tx->tx_resid)
 			msg.msg_flags |= MSG_MORE;
 
-		set_fs (KERNEL_DS);
-		rc = sock_sendmsg(sock, &msg, nob);
-		set_fs (oldmm);
+		rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
 
 		for (i = 0; i < niov; i++)
 			kunmap(kiov[i].kiov_page);
@@ -237,15 +215,8 @@
 #endif
 	struct iovec *iov = conn->ksnc_rx_iov;
 	struct msghdr msg = {
-		.msg_name       = NULL,
-		.msg_namelen    = 0,
-		.msg_iov	= scratchiov,
-		.msg_iovlen     = niov,
-		.msg_control    = NULL,
-		.msg_controllen = 0,
 		.msg_flags      = 0
 	};
-	mm_segment_t oldmm = get_fs();
 	int	  nob;
 	int	  i;
 	int	  rc;
@@ -263,10 +234,8 @@
 	}
 	LASSERT (nob <= conn->ksnc_rx_nob_wanted);
 
-	set_fs (KERNEL_DS);
-	rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
-	/* NB this is just a boolean..........................^ */
-	set_fs (oldmm);
+	rc = kernel_recvmsg(conn->ksnc_sock, &msg,
+		(struct kvec *)scratchiov, niov, nob, MSG_DONTWAIT);
 
 	saved_csum = 0;
 	if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
@@ -355,14 +324,8 @@
 #endif
 	lnet_kiov_t   *kiov = conn->ksnc_rx_kiov;
 	struct msghdr msg = {
-		.msg_name       = NULL,
-		.msg_namelen    = 0,
-		.msg_iov	= scratchiov,
-		.msg_control    = NULL,
-		.msg_controllen = 0,
 		.msg_flags      = 0
 	};
-	mm_segment_t oldmm = get_fs();
 	int	  nob;
 	int	  i;
 	int	  rc;
@@ -370,13 +333,14 @@
 	void	*addr;
 	int	  sum;
 	int	  fragnob;
+	int n;
 
 	/* NB we can't trust socket ops to either consume our iovs
 	 * or leave them alone. */
 	addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
 	if (addr != NULL) {
 		nob = scratchiov[0].iov_len;
-		msg.msg_iovlen = 1;
+		n = 1;
 
 	} else {
 		for (nob = i = 0; i < niov; i++) {
@@ -384,15 +348,13 @@
 			scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
 						 kiov[i].kiov_offset;
 		}
-		msg.msg_iovlen = niov;
+		n = niov;
 	}
 
 	LASSERT (nob <= conn->ksnc_rx_nob_wanted);
 
-	set_fs (KERNEL_DS);
-	rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
-	/* NB this is just a boolean.......................^ */
-	set_fs (oldmm);
+	rc = kernel_recvmsg(conn->ksnc_sock, &msg,
+			(struct kvec *)scratchiov, n, nob, MSG_DONTWAIT);
 
 	if (conn->ksnc_msg.ksm_csum != 0) {
 		for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
index e6069d7..7539fe1 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
@@ -265,17 +265,11 @@
 	 * empty enough to take the whole message immediately */
 
 	for (;;) {
-		struct iovec  iov = {
+		struct kvec  iov = {
 			.iov_base = buffer,
 			.iov_len  = nob
 		};
 		struct msghdr msg = {
-			.msg_name       = NULL,
-			.msg_namelen    = 0,
-			.msg_iov	= &iov,
-			.msg_iovlen     = 1,
-			.msg_control    = NULL,
-			.msg_controllen = 0,
 			.msg_flags      = (timeout == 0) ? MSG_DONTWAIT : 0
 		};
 
@@ -297,11 +291,9 @@
 			}
 		}
 
-		set_fs (KERNEL_DS);
 		then = jiffies;
-		rc = sock_sendmsg (sock, &msg, iov.iov_len);
+		rc = kernel_sendmsg(sock, &msg, &iov, 1, nob);
 		ticks -= jiffies - then;
-		set_fs (oldmm);
 
 		if (rc == nob)
 			return 0;
@@ -338,17 +330,11 @@
 	LASSERT (ticks > 0);
 
 	for (;;) {
-		struct iovec  iov = {
+		struct kvec  iov = {
 			.iov_base = buffer,
 			.iov_len  = nob
 		};
 		struct msghdr msg = {
-			.msg_name       = NULL,
-			.msg_namelen    = 0,
-			.msg_iov	= &iov,
-			.msg_iovlen     = 1,
-			.msg_control    = NULL,
-			.msg_controllen = 0,
 			.msg_flags      = 0
 		};
 
@@ -367,11 +353,9 @@
 			return rc;
 		}
 
-		set_fs(KERNEL_DS);
 		then = jiffies;
-		rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
+		rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0);
 		ticks -= jiffies - then;
-		set_fs(oldmm);
 
 		if (rc < 0)
 			return rc;
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index ab06891..80d48b5 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -115,27 +115,6 @@
 	return rc;
 }
 
-static int ll_readlink(struct dentry *dentry, char *buffer, int buflen)
-{
-	struct inode *inode = dentry->d_inode;
-	struct ptlrpc_request *request;
-	char *symname;
-	int rc;
-
-	CDEBUG(D_VFSTRACE, "VFS Op\n");
-
-	ll_inode_size_lock(inode);
-	rc = ll_readlink_internal(inode, &request, &symname);
-	if (rc)
-		GOTO(out, rc);
-
-	rc = vfs_readlink(dentry, buffer, buflen, symname);
- out:
-	ptlrpc_req_finished(request);
-	ll_inode_size_unlock(inode);
-	return rc;
-}
-
 static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
 	struct inode *inode = dentry->d_inode;
@@ -175,7 +154,7 @@
 }
 
 struct inode_operations ll_fast_symlink_inode_operations = {
-	.readlink	= ll_readlink,
+	.readlink	= generic_readlink,
 	.setattr	= ll_setattr,
 	.follow_link	= ll_follow_link,
 	.put_link	= ll_put_link,
diff --git a/drivers/staging/media/msi3101/msi001.c b/drivers/staging/media/msi3101/msi001.c
index ac43bae..bd0b93c 100644
--- a/drivers/staging/media/msi3101/msi001.c
+++ b/drivers/staging/media/msi3101/msi001.c
@@ -201,7 +201,7 @@
 	dev_dbg(&s->spi->dev, "%s: bandwidth selected=%d\n",
 			__func__, bandwidth_lut[i].freq);
 
-	f_vco = (f_rf + f_if + f_if1) * lo_div;
+	f_vco = (u64) (f_rf + f_if + f_if1) * lo_div;
 	tmp64 = f_vco;
 	m = do_div(tmp64, F_REF * R_REF);
 	n = (unsigned int) tmp64;
diff --git a/drivers/staging/media/msi3101/sdr-msi3101.c b/drivers/staging/media/msi3101/sdr-msi3101.c
index 260d1b7..65d351f 100644
--- a/drivers/staging/media/msi3101/sdr-msi3101.c
+++ b/drivers/staging/media/msi3101/sdr-msi3101.c
@@ -913,7 +913,6 @@
 
 	/* set tuner, subdev, filters according to sampling rate */
 	bandwidth_auto = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO);
-	bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
 	if (v4l2_ctrl_g_ctrl(bandwidth_auto)) {
 		bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
 		v4l2_ctrl_s_ctrl(bandwidth, s->f_adc);
@@ -1078,6 +1077,7 @@
 static int msi3101_stop_streaming(struct vb2_queue *vq)
 {
 	struct msi3101_state *s = vb2_get_drv_priv(vq);
+	int ret;
 	dev_dbg(&s->udev->dev, "%s:\n", __func__);
 
 	if (mutex_lock_interruptible(&s->v4l2_lock))
@@ -1090,17 +1090,22 @@
 
 	/* according to tests, at least 700us delay is required  */
 	msleep(20);
-	msi3101_ctrl_msg(s, CMD_STOP_STREAMING, 0);
+	ret = msi3101_ctrl_msg(s, CMD_STOP_STREAMING, 0);
+	if (ret)
+		goto err_sleep_tuner;
 
 	/* sleep USB IF / ADC */
-	msi3101_ctrl_msg(s, CMD_WREG, 0x01000003);
+	ret = msi3101_ctrl_msg(s, CMD_WREG, 0x01000003);
+	if (ret)
+		goto err_sleep_tuner;
 
+err_sleep_tuner:
 	/* sleep tuner */
-	v4l2_subdev_call(s->v4l2_subdev, core, s_power, 0);
+	ret = v4l2_subdev_call(s->v4l2_subdev, core, s_power, 0);
 
 	mutex_unlock(&s->v4l2_lock);
 
-	return 0;
+	return ret;
 }
 
 static struct vb2_ops msi3101_vb2_ops = {
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 773d8ca..de692d7 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -86,7 +86,6 @@
 	struct stub_device *sdev = dev_get_drvdata(dev);
 	int sockfd = 0;
 	struct socket *socket;
-	ssize_t err = -EINVAL;
 	int rv;
 
 	if (!sdev) {
@@ -99,6 +98,7 @@
 		return -EINVAL;
 
 	if (sockfd != -1) {
+		int err;
 		dev_info(dev, "stub up\n");
 
 		spin_lock_irq(&sdev->ud.lock);
@@ -108,7 +108,7 @@
 			goto err;
 		}
 
-		socket = sockfd_to_socket(sockfd);
+		socket = sockfd_lookup(sockfd, &err);
 		if (!socket)
 			goto err;
 
@@ -141,7 +141,7 @@
 
 err:
 	spin_unlock_irq(&sdev->ud.lock);
-	return err;
+	return -EINVAL;
 }
 static DEVICE_ATTR(usbip_sockfd, S_IWUSR, NULL, store_sockfd);
 
@@ -211,7 +211,7 @@
 	 * not touch NULL socket.
 	 */
 	if (ud->tcp_socket) {
-		fput(ud->tcp_socket->file);
+		sockfd_put(ud->tcp_socket);
 		ud->tcp_socket = NULL;
 	}
 
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 184fa70..facaaf0 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -382,31 +382,6 @@
 }
 EXPORT_SYMBOL_GPL(usbip_recv);
 
-struct socket *sockfd_to_socket(unsigned int sockfd)
-{
-	struct socket *socket;
-	struct file *file;
-	struct inode *inode;
-
-	file = fget(sockfd);
-	if (!file) {
-		pr_err("invalid sockfd\n");
-		return NULL;
-	}
-
-	inode = file_inode(file);
-
-	if (!inode || !S_ISSOCK(inode->i_mode)) {
-		fput(file);
-		return NULL;
-	}
-
-	socket = SOCKET_I(inode);
-
-	return socket;
-}
-EXPORT_SYMBOL_GPL(sockfd_to_socket);
-
 /* there may be more cases to tweak the flags. */
 static unsigned int tweak_transfer_flags(unsigned int flags)
 {
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index 732fb63..f555d83 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -299,7 +299,6 @@
 void usbip_dump_header(struct usbip_header *pdu);
 
 int usbip_recv(struct socket *sock, void *buf, int size);
-struct socket *sockfd_to_socket(unsigned int sockfd);
 
 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
 		    int pack);
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 1e84577..70e1755 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -788,7 +788,7 @@
 
 	/* active connection is closed */
 	if (vdev->ud.tcp_socket) {
-		fput(vdev->ud.tcp_socket->file);
+		sockfd_put(vdev->ud.tcp_socket);
 		vdev->ud.tcp_socket = NULL;
 	}
 	pr_info("release socket\n");
@@ -835,7 +835,7 @@
 	vdev->udev = NULL;
 
 	if (ud->tcp_socket) {
-		fput(ud->tcp_socket->file);
+		sockfd_put(ud->tcp_socket);
 		ud->tcp_socket = NULL;
 	}
 	ud->status = VDEV_ST_NULL;
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index e098032..47bddcd 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -176,6 +176,7 @@
 	struct socket *socket;
 	int sockfd = 0;
 	__u32 rhport = 0, devid = 0, speed = 0;
+	int err;
 
 	/*
 	 * @rhport: port number of vhci_hcd
@@ -194,8 +195,7 @@
 		return -EINVAL;
 
 	/* Extract socket from fd. */
-	/* The correct way to clean this up is to fput(socket->file). */
-	socket = sockfd_to_socket(sockfd);
+	socket = sockfd_lookup(sockfd, &err);
 	if (!socket)
 		return -EINVAL;
 
@@ -211,7 +211,7 @@
 		spin_unlock(&vdev->ud.lock);
 		spin_unlock(&the_controller->lock);
 
-		fput(socket->file);
+		sockfd_put(socket);
 
 		dev_err(dev, "port %d already used\n", rhport);
 		return -EINVAL;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index b83ec37..78cab13 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -499,6 +499,23 @@
 	return 0;
 }
 
+static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+	bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
+
+	spin_lock_bh(&conn->cmd_lock);
+	if (!list_empty(&cmd->i_conn_node))
+		list_del_init(&cmd->i_conn_node);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	__iscsit_free_cmd(cmd, scsi_cmd, true);
+}
+
+static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+	return TARGET_PROT_NORMAL;
+}
+
 static struct iscsit_transport iscsi_target_transport = {
 	.name			= "iSCSI/TCP",
 	.transport_type		= ISCSI_TCP,
@@ -513,6 +530,8 @@
 	.iscsit_response_queue	= iscsit_response_queue,
 	.iscsit_queue_data_in	= iscsit_queue_rsp,
 	.iscsit_queue_status	= iscsit_queue_rsp,
+	.iscsit_aborted_task	= iscsit_aborted_task,
+	.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
 };
 
 static int __init iscsi_target_init_module(void)
@@ -1503,6 +1522,16 @@
 {
 	u32 payload_length = ntoh24(hdr->dlength);
 
+	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
+		pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
+		if (!cmd)
+			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+						 (unsigned char *)hdr);
+		
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+					 (unsigned char *)hdr);
+	}
+
 	if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
 		pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
 			" not set, protocol error.\n");
@@ -2468,6 +2497,7 @@
 {
 	struct iscsi_cmd *cmd;
 	struct iscsi_conn *conn_p;
+	bool found = false;
 
 	/*
 	 * Only send a Asynchronous Message on connections whos network
@@ -2476,11 +2506,12 @@
 	list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
 		if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
 			iscsit_inc_conn_usage_count(conn_p);
+			found = true;
 			break;
 		}
 	}
 
-	if (!conn_p)
+	if (!found)
 		return;
 
 	cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 1c0088f..ae03f3e 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1052,6 +1052,11 @@
  */
 DEF_TPG_ATTRIB(default_erl);
 TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_t10_pi
+ */
+DEF_TPG_ATTRIB(t10_pi);
+TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR);
 
 static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
 	&iscsi_tpg_attrib_authentication.attr,
@@ -1064,6 +1069,7 @@
 	&iscsi_tpg_attrib_prod_mode_write_protect.attr,
 	&iscsi_tpg_attrib_demo_mode_discovery.attr,
 	&iscsi_tpg_attrib_default_erl.attr,
+	&iscsi_tpg_attrib_t10_pi.attr,
 	NULL,
 };
 
@@ -1815,6 +1821,13 @@
 	iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
 }
 
+static void lio_aborted_task(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
+}
+
 static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
 {
 	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
@@ -1999,6 +2012,7 @@
 	fabric->tf_ops.queue_data_in = &lio_queue_data_in;
 	fabric->tf_ops.queue_status = &lio_queue_status;
 	fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
+	fabric->tf_ops.aborted_task = &lio_aborted_task;
 	/*
 	 * Setup function pointers for generic logic in target_core_fabric_configfs.c
 	 */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 48f7b3b..886d74d 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -58,7 +58,8 @@
 #define TA_DEMO_MODE_DISCOVERY		1
 #define TA_DEFAULT_ERL			0
 #define TA_CACHE_CORE_NPS		0
-
+/* T10 protection information disabled by default */
+#define TA_DEFAULT_T10_PI		0
 
 #define ISCSI_IOV_DATA_BUFFER		5
 
@@ -765,6 +766,7 @@
 	u32			prod_mode_write_protect;
 	u32			demo_mode_discovery;
 	u32			default_erl;
+	u8			t10_pi;
 	struct iscsi_portal_group *tpg;
 };
 
@@ -787,6 +789,7 @@
 	void			*np_context;
 	struct iscsit_transport *np_transport;
 	struct list_head	np_list;
+	struct iscsi_tpg_np	*tpg_np;
 } ____cacheline_aligned;
 
 struct iscsi_tpg_np {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index e29279e..8739b98 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -259,6 +259,7 @@
 {
 	struct iscsi_session *sess = NULL;
 	struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+	enum target_prot_op sup_pro_ops;
 	int ret;
 
 	sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
@@ -320,8 +321,9 @@
 		kfree(sess);
 		return -ENOMEM;
 	}
+	sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
 
-	sess->se_sess = transport_init_session();
+	sess->se_sess = transport_init_session(sup_pro_ops);
 	if (IS_ERR(sess->se_sess)) {
 		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 				ISCSI_LOGIN_STATUS_NO_RESOURCES);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 44a5471..eb96b20 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -225,6 +225,7 @@
 	a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
 	a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
 	a->default_erl = TA_DEFAULT_ERL;
+	a->t10_pi = TA_DEFAULT_T10_PI;
 }
 
 int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -500,6 +501,7 @@
 	init_completion(&tpg_np->tpg_np_comp);
 	kref_init(&tpg_np->tpg_np_kref);
 	tpg_np->tpg_np		= np;
+	np->tpg_np		= tpg_np;
 	tpg_np->tpg		= tpg;
 
 	spin_lock(&tpg->tpg_np_lock);
@@ -858,3 +860,22 @@
 
 	return 0;
 }
+
+int iscsit_ta_t10_pi(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->t10_pi = flag;
+	pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:"
+		" %s\n", tpg->tpgt, (a->t10_pi) ?
+		"ON" : "OFF");
+
+	return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 213c0fc..0a182f2 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -39,5 +39,6 @@
 extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
 
 #endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index e655b04..53e157c 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -705,8 +705,8 @@
 }
 EXPORT_SYMBOL(iscsit_release_cmd);
 
-static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
-			      bool check_queues)
+void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
+		       bool check_queues)
 {
 	struct iscsi_conn *conn = cmd->conn;
 
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 561a424..a68508c 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -30,6 +30,7 @@
 extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
 extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
 extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);
 extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
 extern int iscsit_check_session_usage_count(struct iscsi_session *);
 extern void iscsit_dec_session_usage_count(struct iscsi_session *);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index fadad7c..c886ad1 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -212,6 +212,10 @@
 		se_cmd->se_cmd_flags |= SCF_BIDI;
 
 	}
+
+	if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
+		se_cmd->prot_pto = true;
+
 	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
 			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
 			scsi_bufflen(sc), tcm_loop_sam_attr(sc),
@@ -915,6 +919,11 @@
 	wake_up(&tl_tmr->tl_tmr_wait);
 }
 
+static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
 {
 	switch (tl_hba->tl_proto_id) {
@@ -1009,7 +1018,7 @@
 	/*
 	 * Initialize the struct se_session pointer
 	 */
-	tl_nexus->se_sess = transport_init_session();
+	tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);
 	if (IS_ERR(tl_nexus->se_sess)) {
 		ret = PTR_ERR(tl_nexus->se_sess);
 		goto out;
@@ -1483,6 +1492,7 @@
 	fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
 	fabric->tf_ops.queue_status = &tcm_loop_queue_status;
 	fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
+	fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
 
 	/*
 	 * Setup function pointers for generic logic in target_core_fabric_configfs.c
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 24884ca..e7e9372 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -210,7 +210,7 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	sess->se_sess = transport_init_session();
+	sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
 	if (IS_ERR(sess->se_sess)) {
 		pr_err("failed to init se_session\n");
 
@@ -1846,6 +1846,11 @@
 {
 }
 
+static void sbp_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
 static int sbp_check_stop_free(struct se_cmd *se_cmd)
 {
 	struct sbp_target_request *req = container_of(se_cmd,
@@ -2526,6 +2531,7 @@
 	.queue_data_in			= sbp_queue_data_in,
 	.queue_status			= sbp_queue_status,
 	.queue_tm_rsp			= sbp_queue_tm_rsp,
+	.aborted_task			= sbp_aborted_task,
 	.check_stop_free		= sbp_check_stop_free,
 
 	.fabric_make_wwn		= sbp_make_tport,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index c3d9df6..fcbe612 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -455,11 +455,26 @@
 	return rc;
 }
 
-static inline int core_alua_state_nonoptimized(
+static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
+{
+	/*
+	 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
+	 * The ALUA additional sense code qualifier (ASCQ) is determined
+	 * by the ALUA primary or secondary access state..
+	 */
+	pr_debug("[%s]: ALUA TG Port not available, "
+		"SenseKey: NOT_READY, ASC/ASCQ: "
+		"0x04/0x%02x\n",
+		cmd->se_tfo->get_fabric_name(), alua_ascq);
+
+	cmd->scsi_asc = 0x04;
+	cmd->scsi_ascq = alua_ascq;
+}
+
+static inline void core_alua_state_nonoptimized(
 	struct se_cmd *cmd,
 	unsigned char *cdb,
-	int nonop_delay_msecs,
-	u8 *alua_ascq)
+	int nonop_delay_msecs)
 {
 	/*
 	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
@@ -468,13 +483,11 @@
 	 */
 	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
 	cmd->alua_nonop_delay = nonop_delay_msecs;
-	return 0;
 }
 
 static inline int core_alua_state_lba_dependent(
 	struct se_cmd *cmd,
-	struct t10_alua_tg_pt_gp *tg_pt_gp,
-	u8 *alua_ascq)
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
 {
 	struct se_device *dev = cmd->se_dev;
 	u64 segment_size, segment_mult, sectors, lba;
@@ -520,7 +533,7 @@
 		}
 		if (!cur_map) {
 			spin_unlock(&dev->t10_alua.lba_map_lock);
-			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 			return 1;
 		}
 		list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
@@ -531,11 +544,11 @@
 			switch(map_mem->lba_map_mem_alua_state) {
 			case ALUA_ACCESS_STATE_STANDBY:
 				spin_unlock(&dev->t10_alua.lba_map_lock);
-				*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 				return 1;
 			case ALUA_ACCESS_STATE_UNAVAILABLE:
 				spin_unlock(&dev->t10_alua.lba_map_lock);
-				*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 				return 1;
 			default:
 				break;
@@ -548,8 +561,7 @@
 
 static inline int core_alua_state_standby(
 	struct se_cmd *cmd,
-	unsigned char *cdb,
-	u8 *alua_ascq)
+	unsigned char *cdb)
 {
 	/*
 	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
@@ -570,7 +582,7 @@
 		case MI_REPORT_TARGET_PGS:
 			return 0;
 		default:
-			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 			return 1;
 		}
 	case MAINTENANCE_OUT:
@@ -578,7 +590,7 @@
 		case MO_SET_TARGET_PGS:
 			return 0;
 		default:
-			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 			return 1;
 		}
 	case REQUEST_SENSE:
@@ -588,7 +600,7 @@
 	case WRITE_BUFFER:
 		return 0;
 	default:
-		*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 		return 1;
 	}
 
@@ -597,8 +609,7 @@
 
 static inline int core_alua_state_unavailable(
 	struct se_cmd *cmd,
-	unsigned char *cdb,
-	u8 *alua_ascq)
+	unsigned char *cdb)
 {
 	/*
 	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
@@ -613,7 +624,7 @@
 		case MI_REPORT_TARGET_PGS:
 			return 0;
 		default:
-			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 			return 1;
 		}
 	case MAINTENANCE_OUT:
@@ -621,7 +632,7 @@
 		case MO_SET_TARGET_PGS:
 			return 0;
 		default:
-			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 			return 1;
 		}
 	case REQUEST_SENSE:
@@ -629,7 +640,7 @@
 	case WRITE_BUFFER:
 		return 0;
 	default:
-		*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 		return 1;
 	}
 
@@ -638,8 +649,7 @@
 
 static inline int core_alua_state_transition(
 	struct se_cmd *cmd,
-	unsigned char *cdb,
-	u8 *alua_ascq)
+	unsigned char *cdb)
 {
 	/*
 	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
@@ -654,7 +664,7 @@
 		case MI_REPORT_TARGET_PGS:
 			return 0;
 		default:
-			*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+			set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 			return 1;
 		}
 	case REQUEST_SENSE:
@@ -662,7 +672,7 @@
 	case WRITE_BUFFER:
 		return 0;
 	default:
-		*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+		set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 		return 1;
 	}
 
@@ -684,8 +694,6 @@
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
 	int out_alua_state, nonop_delay_msecs;
-	u8 alua_ascq;
-	int ret;
 
 	if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
 		return 0;
@@ -701,9 +709,8 @@
 	if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
 		pr_debug("ALUA: Got secondary offline status for local"
 				" target port\n");
-		alua_ascq = ASCQ_04H_ALUA_OFFLINE;
-		ret = 1;
-		goto out;
+		set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
+		return TCM_CHECK_CONDITION_NOT_READY;
 	}
 	 /*
 	 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
@@ -731,20 +738,23 @@
 
 	switch (out_alua_state) {
 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
-		ret = core_alua_state_nonoptimized(cmd, cdb,
-					nonop_delay_msecs, &alua_ascq);
+		core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
 		break;
 	case ALUA_ACCESS_STATE_STANDBY:
-		ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
+		if (core_alua_state_standby(cmd, cdb))
+			return TCM_CHECK_CONDITION_NOT_READY;
 		break;
 	case ALUA_ACCESS_STATE_UNAVAILABLE:
-		ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
+		if (core_alua_state_unavailable(cmd, cdb))
+			return TCM_CHECK_CONDITION_NOT_READY;
 		break;
 	case ALUA_ACCESS_STATE_TRANSITION:
-		ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
+		if (core_alua_state_transition(cmd, cdb))
+			return TCM_CHECK_CONDITION_NOT_READY;
 		break;
 	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
-		ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
+		if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
+			return TCM_CHECK_CONDITION_NOT_READY;
 		break;
 	/*
 	 * OFFLINE is a secondary ALUA target port group access state, that is
@@ -757,23 +767,6 @@
 		return TCM_INVALID_CDB_FIELD;
 	}
 
-out:
-	if (ret > 0) {
-		/*
-		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
-		 * The ALUA additional sense code qualifier (ASCQ) is determined
-		 * by the ALUA primary or secondary access state..
-		 */
-		pr_debug("[%s]: ALUA TG Port not available, "
-			"SenseKey: NOT_READY, ASC/ASCQ: "
-			"0x04/0x%02x\n",
-			cmd->se_tfo->get_fabric_name(), alua_ascq);
-
-		cmd->scsi_asc = 0x04;
-		cmd->scsi_ascq = alua_ascq;
-		return TCM_CHECK_CONDITION_NOT_READY;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index f0e85b1..60a9ae6 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,6 +457,10 @@
 		pr_err("Missing tfo->queue_tm_rsp()\n");
 		return -EINVAL;
 	}
+	if (!tfo->aborted_task) {
+		pr_err("Missing tfo->aborted_task()\n");
+		return -EINVAL;
+	}
 	/*
 	 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
 	 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index cf991a9..7d6cdda 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -854,25 +854,6 @@
 	return 0;
 }
 
-static void fd_init_format_buf(struct se_device *dev, unsigned char *buf,
-			       u32 unit_size, u32 *ref_tag, u16 app_tag,
-			       bool inc_reftag)
-{
-	unsigned char *p = buf;
-	int i;
-
-	for (i = 0; i < unit_size; i += dev->prot_length) {
-		*((u16 *)&p[0]) = 0xffff;
-		*((__be16 *)&p[2]) = cpu_to_be16(app_tag);
-		*((__be32 *)&p[4]) = cpu_to_be32(*ref_tag);
-
-		if (inc_reftag)
-			(*ref_tag)++;
-
-		p += dev->prot_length;
-	}
-}
-
 static int fd_format_prot(struct se_device *dev)
 {
 	struct fd_dev *fd_dev = FD_DEV(dev);
@@ -880,10 +861,8 @@
 	sector_t prot_length, prot;
 	unsigned char *buf;
 	loff_t pos = 0;
-	u32 ref_tag = 0;
 	int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
 	int rc, ret = 0, size, len;
-	bool inc_reftag = false;
 
 	if (!dev->dev_attrib.pi_prot_type) {
 		pr_err("Unable to format_prot while pi_prot_type == 0\n");
@@ -894,37 +873,20 @@
 		return -ENODEV;
 	}
 
-	switch (dev->dev_attrib.pi_prot_type) {
-	case TARGET_DIF_TYPE3_PROT:
-		ref_tag = 0xffffffff;
-		break;
-	case TARGET_DIF_TYPE2_PROT:
-	case TARGET_DIF_TYPE1_PROT:
-		inc_reftag = true;
-		break;
-	default:
-		break;
-	}
-
 	buf = vzalloc(unit_size);
 	if (!buf) {
 		pr_err("Unable to allocate FILEIO prot buf\n");
 		return -ENOMEM;
 	}
-
 	prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
 	size = prot_length;
 
 	pr_debug("Using FILEIO prot_length: %llu\n",
 		 (unsigned long long)prot_length);
 
+	memset(buf, 0xff, unit_size);
 	for (prot = 0; prot < prot_length; prot += unit_size) {
-
-		fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff,
-				   inc_reftag);
-
 		len = min(unit_size, size);
-
 		rc = kernel_write(prot_fd, buf, len, pos);
 		if (rc != len) {
 			pr_err("vfs_write to prot file failed: %d\n", rc);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 554d4f7..9e0232c 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -203,10 +203,9 @@
 
 	if (ib_dev->ibd_bd != NULL)
 		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
-	if (ib_dev->ibd_bio_set != NULL) {
-		bioset_integrity_free(ib_dev->ibd_bio_set);
+	if (ib_dev->ibd_bio_set != NULL)
 		bioset_free(ib_dev->ibd_bio_set);
-	}
+
 	kfree(ib_dev);
 }
 
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 66a5aba..b920db3 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -242,7 +242,7 @@
 	rd_dev->sg_prot_count = 0;
 }
 
-static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
+static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
 {
 	struct rd_dev_sg_table *sg_table;
 	u32 total_sg_needed, sg_tables;
@@ -252,8 +252,13 @@
 
 	if (rd_dev->rd_flags & RDF_NULLIO)
 		return 0;
-
-	total_sg_needed = rd_dev->rd_page_count / prot_length;
+	/*
+	 * prot_length=8byte dif data
+	 * tot sg needed = rd_page_count * (PGSZ/block_size) *
+	 * 		   (prot_length/block_size) + pad
+	 * PGSZ canceled each other.
+	 */
+	total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
 
 	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
 
@@ -606,7 +611,8 @@
         if (!dev->dev_attrib.pi_prot_type)
 		return 0;
 
-	return rd_build_prot_space(rd_dev, dev->prot_length);
+	return rd_build_prot_space(rd_dev, dev->prot_length,
+				   dev->dev_attrib.block_size);
 }
 
 static void rd_free_prot(struct se_device *dev)
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 77e6531..e022959 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -89,6 +89,7 @@
 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
 	unsigned char *rbuf;
 	unsigned char buf[32];
 	unsigned long long blocks = dev->transport->get_blocks(dev);
@@ -109,8 +110,10 @@
 	/*
 	 * Set P_TYPE and PROT_EN bits for DIF support
 	 */
-	if (dev->dev_attrib.pi_prot_type)
-		buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
+	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+		if (dev->dev_attrib.pi_prot_type)
+			buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
+	}
 
 	if (dev->transport->get_lbppbe)
 		buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
@@ -425,13 +428,14 @@
 		goto out;
 	}
 
-	write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
+	write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
 			   GFP_KERNEL);
 	if (!write_sg) {
 		pr_err("Unable to allocate compare_and_write sg\n");
 		ret = TCM_OUT_OF_RESOURCES;
 		goto out;
 	}
+	sg_init_table(write_sg, cmd->t_data_nents);
 	/*
 	 * Setup verify and write data payloads from total NumberLBAs.
 	 */
@@ -569,30 +573,85 @@
 	return TCM_NO_SENSE;
 }
 
+static int
+sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
+		       bool is_write, struct se_cmd *cmd)
+{
+	if (is_write) {
+		cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
+					 TARGET_PROT_DOUT_INSERT;
+		switch (protect) {
+		case 0x0:
+		case 0x3:
+			cmd->prot_checks = 0;
+			break;
+		case 0x1:
+		case 0x5:
+			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+			if (prot_type == TARGET_DIF_TYPE1_PROT)
+				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+			break;
+		case 0x2:
+			if (prot_type == TARGET_DIF_TYPE1_PROT)
+				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+			break;
+		case 0x4:
+			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+			break;
+		default:
+			pr_err("Unsupported protect field %d\n", protect);
+			return -EINVAL;
+		}
+	} else {
+		cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
+					 TARGET_PROT_DIN_STRIP;
+		switch (protect) {
+		case 0x0:
+		case 0x1:
+		case 0x5:
+			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+			if (prot_type == TARGET_DIF_TYPE1_PROT)
+				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+			break;
+		case 0x2:
+			if (prot_type == TARGET_DIF_TYPE1_PROT)
+				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+			break;
+		case 0x3:
+			cmd->prot_checks = 0;
+			break;
+		case 0x4:
+			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+			break;
+		default:
+			pr_err("Unsupported protect field %d\n", protect);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 static bool
 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
-	       u32 sectors)
+	       u32 sectors, bool is_write)
 {
-	if (!cmd->t_prot_sg || !cmd->t_prot_nents)
+	u8 protect = cdb[1] >> 5;
+
+	if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
 		return true;
 
 	switch (dev->dev_attrib.pi_prot_type) {
 	case TARGET_DIF_TYPE3_PROT:
-		if (!(cdb[1] & 0xe0))
-			return true;
-
 		cmd->reftag_seed = 0xffffffff;
 		break;
 	case TARGET_DIF_TYPE2_PROT:
-		if (cdb[1] & 0xe0)
+		if (protect)
 			return false;
 
 		cmd->reftag_seed = cmd->t_task_lba;
 		break;
 	case TARGET_DIF_TYPE1_PROT:
-		if (!(cdb[1] & 0xe0))
-			return true;
-
 		cmd->reftag_seed = cmd->t_task_lba;
 		break;
 	case TARGET_DIF_TYPE0_PROT:
@@ -600,9 +659,15 @@
 		return true;
 	}
 
+	if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
+				   is_write, cmd))
+		return false;
+
 	cmd->prot_type = dev->dev_attrib.pi_prot_type;
 	cmd->prot_length = dev->prot_length * sectors;
-	cmd->prot_handover = PROT_SEPERATED;
+	pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n",
+		 __func__, cmd->prot_type, cmd->prot_length,
+		 cmd->prot_op, cmd->prot_checks);
 
 	return true;
 }
@@ -628,7 +693,7 @@
 		sectors = transport_get_sectors_10(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -639,7 +704,7 @@
 		sectors = transport_get_sectors_12(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -650,7 +715,7 @@
 		sectors = transport_get_sectors_16(cdb);
 		cmd->t_task_lba = transport_lba_64(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -669,7 +734,7 @@
 		sectors = transport_get_sectors_10(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
 		if (cdb[1] & 0x8)
@@ -682,7 +747,7 @@
 		sectors = transport_get_sectors_12(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
 		if (cdb[1] & 0x8)
@@ -695,7 +760,7 @@
 		sectors = transport_get_sectors_16(cdb);
 		cmd->t_task_lba = transport_lba_64(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
 		if (cdb[1] & 0x8)
@@ -1031,6 +1096,50 @@
 }
 EXPORT_SYMBOL(sbc_execute_unmap);
 
+void
+sbc_dif_generate(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_dif_v1_tuple *sdt;
+	struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+	sector_t sector = cmd->t_task_lba;
+	void *daddr, *paddr;
+	int i, j, offset = 0;
+
+	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+
+		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+			if (offset >= psg->length) {
+				kunmap_atomic(paddr);
+				psg = sg_next(psg);
+				paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+				offset = 0;
+			}
+
+			sdt = paddr + offset;
+			sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
+						dev->dev_attrib.block_size));
+			if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+				sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
+			sdt->app_tag = 0;
+
+			pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
+				 " app_tag: 0x%04x ref_tag: %u\n",
+				 (unsigned long long)sector, sdt->guard_tag,
+				 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+			sector++;
+			offset += sizeof(struct se_dif_v1_tuple);
+		}
+
+		kunmap_atomic(paddr);
+		kunmap_atomic(daddr);
+	}
+}
+
 static sense_reason_t
 sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
 		  const void *p, sector_t sector, unsigned int ei_lba)
@@ -1162,9 +1271,9 @@
 }
 EXPORT_SYMBOL(sbc_dif_verify_write);
 
-sense_reason_t
-sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
-		    unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+static sense_reason_t
+__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+		      unsigned int ei_lba, struct scatterlist *sg, int sg_off)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct se_dif_v1_tuple *sdt;
@@ -1217,8 +1326,31 @@
 		kunmap_atomic(paddr);
 		kunmap_atomic(daddr);
 	}
-	sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
 
 	return 0;
 }
+
+sense_reason_t
+sbc_dif_read_strip(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	u32 sectors = cmd->prot_length / dev->prot_length;
+
+	return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
+				     cmd->t_prot_sg, 0);
+}
+
+sense_reason_t
+sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+		    unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+	sense_reason_t rc;
+
+	rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
+	if (rc)
+		return rc;
+
+	sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
+	return 0;
+}
 EXPORT_SYMBOL(sbc_dif_verify_read);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 3bebc71..8653666 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -71,6 +71,7 @@
 {
 	struct se_lun *lun = cmd->se_lun;
 	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
 
 	/* Set RMB (removable media) for tape devices */
 	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
@@ -101,10 +102,13 @@
 	if (dev->dev_attrib.emulate_3pc)
 		buf[5] |= 0x8;
 	/*
-	 * Set Protection (PROTECT) bit when DIF has been enabled.
+	 * Set Protection (PROTECT) bit when DIF has been enabled on the
+	 * device, and the transport supports VERIFY + PASS.
 	 */
-	if (dev->dev_attrib.pi_prot_type)
-		buf[5] |= 0x1;
+	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+		if (dev->dev_attrib.pi_prot_type)
+			buf[5] |= 0x1;
+	}
 
 	buf[7] = 0x2; /* CmdQue=1 */
 
@@ -473,16 +477,19 @@
 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
 {
 	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
 
 	buf[3] = 0x3c;
 	/*
 	 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
 	 * only for TYPE3 protection.
 	 */
-	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
-		buf[4] = 0x5;
-	else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
-		buf[4] = 0x4;
+	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+		if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+			buf[4] = 0x5;
+		else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
+			buf[4] = 0x4;
+	}
 
 	/* Set HEADSUP, ORDSUP, SIMPSUP */
 	buf[5] = 0x07;
@@ -762,7 +769,7 @@
 	return ret;
 }
 
-static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
 {
 	p[0] = 0x01;
 	p[1] = 0x0a;
@@ -775,8 +782,11 @@
 	return 12;
 }
 
-static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
 {
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+
 	p[0] = 0x0a;
 	p[1] = 0x0a;
 
@@ -868,8 +878,10 @@
 	 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
 	 * TAG field.
 	 */
-	if (dev->dev_attrib.pi_prot_type)
-		p[5] |= 0x80;
+	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+		if (dev->dev_attrib.pi_prot_type)
+			p[5] |= 0x80;
+	}
 
 	p[8] = 0xff;
 	p[9] = 0xff;
@@ -879,8 +891,10 @@
 	return 12;
 }
 
-static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
 {
+	struct se_device *dev = cmd->se_dev;
+
 	p[0] = 0x08;
 	p[1] = 0x12;
 
@@ -896,7 +910,7 @@
 	return 20;
 }
 
-static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p)
+static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
 {
 	p[0] = 0x1c;
 	p[1] = 0x0a;
@@ -912,7 +926,7 @@
 static struct {
 	uint8_t		page;
 	uint8_t		subpage;
-	int		(*emulate)(struct se_device *, u8, unsigned char *);
+	int		(*emulate)(struct se_cmd *, u8, unsigned char *);
 } modesense_handlers[] = {
 	{ .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
 	{ .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
@@ -1050,7 +1064,7 @@
 			 * the only two possibilities).
 			 */
 			if ((modesense_handlers[i].subpage & ~subpage) == 0) {
-				ret = modesense_handlers[i].emulate(dev, pc, &buf[length]);
+				ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
 				if (!ten && length + ret >= 255)
 					break;
 				length += ret;
@@ -1063,7 +1077,7 @@
 	for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
 		if (modesense_handlers[i].page == page &&
 		    modesense_handlers[i].subpage == subpage) {
-			length += modesense_handlers[i].emulate(dev, pc, &buf[length]);
+			length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
 			goto set_length;
 		}
 
@@ -1095,7 +1109,6 @@
 
 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_dev;
 	char *cdb = cmd->t_task_cdb;
 	bool ten = cdb[0] == MODE_SELECT_10;
 	int off = ten ? 8 : 4;
@@ -1131,7 +1144,7 @@
 		if (modesense_handlers[i].page == page &&
 		    modesense_handlers[i].subpage == subpage) {
 			memset(tbuf, 0, SE_MODE_PAGE_BUF);
-			length = modesense_handlers[i].emulate(dev, 0, tbuf);
+			length = modesense_handlers[i].emulate(cmd, 0, tbuf);
 			goto check_contents;
 		}
 
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 70c638f..f7cd95e 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -87,14 +87,17 @@
 	struct se_cmd *cmd,
 	int tas)
 {
+	bool remove = true;
 	/*
 	 * TASK ABORTED status (TAS) bit support
 	*/
 	if ((tmr_nacl &&
-	     (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
+	     (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
+		remove = false;
 		transport_send_task_abort(cmd);
+	}
 
-	transport_cmd_finish_abort(cmd, 0);
+	transport_cmd_finish_abort(cmd, remove);
 }
 
 static int target_check_cdb_and_preempt(struct list_head *list,
@@ -127,6 +130,11 @@
 
 		if (dev != se_cmd->se_dev)
 			continue;
+
+		/* skip se_cmd associated with tmr */
+		if (tmr->task_cmd == se_cmd)
+			continue;
+
 		ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
 		if (tmr->ref_task_tag != ref_tag)
 			continue;
@@ -150,18 +158,9 @@
 
 		cancel_work_sync(&se_cmd->work);
 		transport_wait_for_tasks(se_cmd);
-		/*
-		 * Now send SAM_STAT_TASK_ABORTED status for the referenced
-		 * se_cmd descriptor..
-		 */
-		transport_send_task_abort(se_cmd);
-		/*
-		 * Also deal with possible extra acknowledge reference..
-		 */
-		if (se_cmd->se_cmd_flags & SCF_ACK_KREF)
-			target_put_sess_cmd(se_sess, se_cmd);
 
 		target_put_sess_cmd(se_sess, se_cmd);
+		transport_cmd_finish_abort(se_cmd, true);
 
 		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
 				" ref_tag: %d\n", ref_tag);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2956250..d4b9869 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -235,7 +235,7 @@
 	sub_api_initialized = 1;
 }
 
-struct se_session *transport_init_session(void)
+struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
 {
 	struct se_session *se_sess;
 
@@ -251,6 +251,7 @@
 	INIT_LIST_HEAD(&se_sess->sess_wait_list);
 	spin_lock_init(&se_sess->sess_cmd_lock);
 	kref_init(&se_sess->sess_kref);
+	se_sess->sup_prot_ops = sup_prot_ops;
 
 	return se_sess;
 }
@@ -288,12 +289,13 @@
 EXPORT_SYMBOL(transport_alloc_session_tags);
 
 struct se_session *transport_init_session_tags(unsigned int tag_num,
-					       unsigned int tag_size)
+					       unsigned int tag_size,
+					       enum target_prot_op sup_prot_ops)
 {
 	struct se_session *se_sess;
 	int rc;
 
-	se_sess = transport_init_session();
+	se_sess = transport_init_session(sup_prot_ops);
 	if (IS_ERR(se_sess))
 		return se_sess;
 
@@ -603,6 +605,15 @@
 
 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
+	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+		transport_lun_remove_cmd(cmd);
+	/*
+	 * Allow the fabric driver to unmap any resources before
+	 * releasing the descriptor via TFO->release_cmd()
+	 */
+	if (remove)
+		cmd->se_tfo->aborted_task(cmd);
+
 	if (transport_cmd_check_stop_to_fabric(cmd))
 		return;
 	if (remove)
@@ -1365,6 +1376,13 @@
 		target_put_sess_cmd(se_sess, se_cmd);
 		return 0;
 	}
+
+	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
+	if (rc != 0) {
+		transport_generic_request_failure(se_cmd, rc);
+		return 0;
+	}
+
 	/*
 	 * Save pointers for SGLs containing protection information,
 	 * if present.
@@ -1374,11 +1392,6 @@
 		se_cmd->t_prot_nents = sgl_prot_count;
 	}
 
-	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
-	if (rc != 0) {
-		transport_generic_request_failure(se_cmd, rc);
-		return 0;
-	}
 	/*
 	 * When a non zero sgl_count has been passed perform SGL passthrough
 	 * mapping for pre-allocated fabric memory instead of having target
@@ -1754,6 +1767,15 @@
 	cmd->t_state = TRANSPORT_PROCESSING;
 	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
 	spin_unlock_irq(&cmd->t_state_lock);
+	/*
+	 * Perform WRITE_INSERT of PI using software emulation when backend
+	 * device has PI enabled, if the transport has not already generated
+	 * PI using hardware WRITE_INSERT offload.
+	 */
+	if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) {
+		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
+			sbc_dif_generate(cmd);
+	}
 
 	if (target_handle_task_attr(cmd)) {
 		spin_lock_irq(&cmd->t_state_lock);
@@ -1883,6 +1905,21 @@
 	schedule_work(&cmd->se_dev->qf_work_queue);
 }
 
+static bool target_check_read_strip(struct se_cmd *cmd)
+{
+	sense_reason_t rc;
+
+	if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
+		rc = sbc_dif_read_strip(cmd);
+		if (rc) {
+			cmd->pi_err = rc;
+			return true;
+		}
+	}
+
+	return false;
+}
+
 static void target_complete_ok_work(struct work_struct *work)
 {
 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -1947,6 +1984,22 @@
 					cmd->data_length;
 		}
 		spin_unlock(&cmd->se_lun->lun_sep_lock);
+		/*
+		 * Perform READ_STRIP of PI using software emulation when
+		 * backend had PI enabled, if the transport will not be
+		 * performing hardware READ_STRIP offload.
+		 */
+		if (cmd->prot_op == TARGET_PROT_DIN_STRIP &&
+		    target_check_read_strip(cmd)) {
+			ret = transport_send_check_condition_and_sense(cmd,
+						cmd->pi_err, 0);
+			if (ret == -EAGAIN || ret == -ENOMEM)
+				goto queue_full;
+
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop_to_fabric(cmd);
+			return;
+		}
 
 		trace_target_cmd_complete(cmd);
 		ret = cmd->se_tfo->queue_data_in(cmd);
@@ -2039,6 +2092,10 @@
 	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
 	cmd->t_bidi_data_sg = NULL;
 	cmd->t_bidi_data_nents = 0;
+
+	transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+	cmd->t_prot_sg = NULL;
+	cmd->t_prot_nents = 0;
 }
 
 /**
@@ -2202,6 +2259,14 @@
 				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 		}
 
+		if (cmd->prot_op != TARGET_PROT_NORMAL) {
+			ret = target_alloc_sgl(&cmd->t_prot_sg,
+					       &cmd->t_prot_nents,
+					       cmd->prot_length, true);
+			if (ret < 0)
+				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		}
+
 		ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
 				       cmd->data_length, zero_flag);
 		if (ret < 0)
@@ -2770,13 +2835,17 @@
 	if (!(cmd->transport_state & CMD_T_ABORTED))
 		return 0;
 
-	if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+	/*
+	 * If cmd has been aborted but either no status is to be sent or it has
+	 * already been sent, just return
+	 */
+	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
 		return 1;
 
 	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
 		 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
 
-	cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+	cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 	trace_target_cmd_complete(cmd);
 	cmd->se_tfo->queue_status(cmd);
@@ -2790,7 +2859,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) {
+	if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return;
 	}
@@ -2805,6 +2874,7 @@
 	if (cmd->data_direction == DMA_TO_DEVICE) {
 		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
 			cmd->transport_state |= CMD_T_ABORTED;
+			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
 			smp_mb__after_atomic_inc();
 			return;
 		}
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 752863a..a0bcfd3 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -94,20 +94,19 @@
  */
 struct ft_tpg {
 	u32 index;
-	struct ft_lport_acl *lport_acl;
+	struct ft_lport_wwn *lport_wwn;
 	struct ft_tport *tport;		/* active tport or NULL */
-	struct list_head list;		/* linkage in ft_lport_acl tpg_list */
 	struct list_head lun_list;	/* head of LUNs */
 	struct se_portal_group se_tpg;
 	struct workqueue_struct *workqueue;
 };
 
-struct ft_lport_acl {
+struct ft_lport_wwn {
 	u64 wwpn;
 	char name[FT_NAMELEN];
-	struct list_head list;
-	struct list_head tpg_list;
-	struct se_wwn fc_lport_wwn;
+	struct list_head ft_wwn_node;
+	struct ft_tpg *tpg;
+	struct se_wwn se_wwn;
 };
 
 /*
@@ -128,7 +127,6 @@
 	u32 sg_cnt;			/* No. of item in scatterlist */
 };
 
-extern struct list_head ft_lport_list;
 extern struct mutex ft_lport_lock;
 extern struct fc4_prov ft_prov;
 extern struct target_fabric_configfs *ft_configfs;
@@ -163,6 +161,7 @@
 u32 ft_get_task_tag(struct se_cmd *);
 int ft_get_cmd_state(struct se_cmd *);
 void ft_queue_tm_resp(struct se_cmd *);
+void ft_aborted_task(struct se_cmd *);
 
 /*
  * other internal functions.
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 8b2c1aa..01cf37f 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -426,6 +426,11 @@
 	ft_send_resp_code(cmd, code);
 }
 
+void ft_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
 static void ft_send_work(struct work_struct *work);
 
 /*
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index e879da8..efdcb96 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -50,7 +50,7 @@
 
 struct target_fabric_configfs *ft_configfs;
 
-LIST_HEAD(ft_lport_list);
+static LIST_HEAD(ft_wwn_list);
 DEFINE_MUTEX(ft_lport_lock);
 
 unsigned int ft_debug_logging;
@@ -298,7 +298,7 @@
 	struct config_group *group,
 	const char *name)
 {
-	struct ft_lport_acl *lacl;
+	struct ft_lport_wwn *ft_wwn;
 	struct ft_tpg *tpg;
 	struct workqueue_struct *wq;
 	unsigned long index;
@@ -318,12 +318,17 @@
 	if (index > UINT_MAX)
 		return NULL;
 
-	lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
+	if ((index != 1)) {
+		pr_err("Error, a single TPG=1 is used for HW port mappings\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);
 	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
 	if (!tpg)
 		return NULL;
 	tpg->index = index;
-	tpg->lport_acl = lacl;
+	tpg->lport_wwn = ft_wwn;
 	INIT_LIST_HEAD(&tpg->lun_list);
 
 	wq = alloc_workqueue("tcm_fc", 0, 1);
@@ -342,7 +347,7 @@
 	tpg->workqueue = wq;
 
 	mutex_lock(&ft_lport_lock);
-	list_add_tail(&tpg->list, &lacl->tpg_list);
+	ft_wwn->tpg = tpg;
 	mutex_unlock(&ft_lport_lock);
 
 	return &tpg->se_tpg;
@@ -351,6 +356,7 @@
 static void ft_del_tpg(struct se_portal_group *se_tpg)
 {
 	struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
+	struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;
 
 	pr_debug("del tpg %s\n",
 		    config_item_name(&tpg->se_tpg.tpg_group.cg_item));
@@ -361,7 +367,7 @@
 	synchronize_rcu();
 
 	mutex_lock(&ft_lport_lock);
-	list_del(&tpg->list);
+	ft_wwn->tpg = NULL;
 	if (tpg->tport) {
 		tpg->tport->tpg = NULL;
 		tpg->tport = NULL;
@@ -380,15 +386,11 @@
  */
 struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
 {
-	struct ft_lport_acl *lacl;
-	struct ft_tpg *tpg;
+	struct ft_lport_wwn *ft_wwn;
 
-	list_for_each_entry(lacl, &ft_lport_list, list) {
-		if (lacl->wwpn == lport->wwpn) {
-			list_for_each_entry(tpg, &lacl->tpg_list, list)
-				return tpg; /* XXX for now return first entry */
-			return NULL;
-		}
+	list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) {
+		if (ft_wwn->wwpn == lport->wwpn)
+			return ft_wwn->tpg;
 	}
 	return NULL;
 }
@@ -401,50 +403,49 @@
  * Add lport to allowed config.
  * The name is the WWPN in lower-case ASCII, colon-separated bytes.
  */
-static struct se_wwn *ft_add_lport(
+static struct se_wwn *ft_add_wwn(
 	struct target_fabric_configfs *tf,
 	struct config_group *group,
 	const char *name)
 {
-	struct ft_lport_acl *lacl;
-	struct ft_lport_acl *old_lacl;
+	struct ft_lport_wwn *ft_wwn;
+	struct ft_lport_wwn *old_ft_wwn;
 	u64 wwpn;
 
-	pr_debug("add lport %s\n", name);
+	pr_debug("add wwn %s\n", name);
 	if (ft_parse_wwn(name, &wwpn, 1) < 0)
 		return NULL;
-	lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
-	if (!lacl)
+	ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL);
+	if (!ft_wwn)
 		return NULL;
-	lacl->wwpn = wwpn;
-	INIT_LIST_HEAD(&lacl->tpg_list);
+	ft_wwn->wwpn = wwpn;
 
 	mutex_lock(&ft_lport_lock);
-	list_for_each_entry(old_lacl, &ft_lport_list, list) {
-		if (old_lacl->wwpn == wwpn) {
+	list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) {
+		if (old_ft_wwn->wwpn == wwpn) {
 			mutex_unlock(&ft_lport_lock);
-			kfree(lacl);
+			kfree(ft_wwn);
 			return NULL;
 		}
 	}
-	list_add_tail(&lacl->list, &ft_lport_list);
-	ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
+	list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list);
+	ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);
 	mutex_unlock(&ft_lport_lock);
 
-	return &lacl->fc_lport_wwn;
+	return &ft_wwn->se_wwn;
 }
 
-static void ft_del_lport(struct se_wwn *wwn)
+static void ft_del_wwn(struct se_wwn *wwn)
 {
-	struct ft_lport_acl *lacl = container_of(wwn,
-				struct ft_lport_acl, fc_lport_wwn);
+	struct ft_lport_wwn *ft_wwn = container_of(wwn,
+				struct ft_lport_wwn, se_wwn);
 
-	pr_debug("del lport %s\n", lacl->name);
+	pr_debug("del wwn %s\n", ft_wwn->name);
 	mutex_lock(&ft_lport_lock);
-	list_del(&lacl->list);
+	list_del(&ft_wwn->ft_wwn_node);
 	mutex_unlock(&ft_lport_lock);
 
-	kfree(lacl);
+	kfree(ft_wwn);
 }
 
 static ssize_t ft_wwn_show_attr_version(
@@ -471,7 +472,7 @@
 {
 	struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
 
-	return tpg->lport_acl->name;
+	return tpg->lport_wwn->name;
 }
 
 static u16 ft_get_tag(struct se_portal_group *se_tpg)
@@ -536,12 +537,13 @@
 	.queue_data_in =		ft_queue_data_in,
 	.queue_status =			ft_queue_status,
 	.queue_tm_rsp =			ft_queue_tm_resp,
+	.aborted_task =			ft_aborted_task,
 	/*
 	 * Setup function pointers for generic logic in
 	 * target_core_fabric_configfs.c
 	 */
-	.fabric_make_wwn =		&ft_add_lport,
-	.fabric_drop_wwn =		&ft_del_lport,
+	.fabric_make_wwn =		&ft_add_wwn,
+	.fabric_drop_wwn =		&ft_del_wwn,
 	.fabric_make_tpg =		&ft_add_tpg,
 	.fabric_drop_tpg =		&ft_del_tpg,
 	.fabric_post_link =		NULL,
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index ae52c08..21ce508 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -51,7 +51,7 @@
  * Lookup or allocate target local port.
  * Caller holds ft_lport_lock.
  */
-static struct ft_tport *ft_tport_create(struct fc_lport *lport)
+static struct ft_tport *ft_tport_get(struct fc_lport *lport)
 {
 	struct ft_tpg *tpg;
 	struct ft_tport *tport;
@@ -68,6 +68,7 @@
 
 	if (tport) {
 		tport->tpg = tpg;
+		tpg->tport = tport;
 		return tport;
 	}
 
@@ -114,7 +115,7 @@
 void ft_lport_add(struct fc_lport *lport, void *arg)
 {
 	mutex_lock(&ft_lport_lock);
-	ft_tport_create(lport);
+	ft_tport_get(lport);
 	mutex_unlock(&ft_lport_lock);
 }
 
@@ -211,7 +212,8 @@
 		return NULL;
 
 	sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
-						    sizeof(struct ft_cmd));
+						    sizeof(struct ft_cmd),
+						    TARGET_PROT_NORMAL);
 	if (IS_ERR(sess->se_sess)) {
 		kfree(sess);
 		return NULL;
@@ -350,7 +352,7 @@
 	struct ft_node_acl *acl;
 	u32 fcp_parm;
 
-	tport = ft_tport_create(rdata->local_port);
+	tport = ft_tport_get(rdata->local_port);
 	if (!tport)
 		goto not_target;	/* not a target for this local port */
 
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 45af765..a99c631 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -62,12 +62,16 @@
 #define IMX_POLLING_DELAY		2000 /* millisecond */
 #define IMX_PASSIVE_DELAY		1000
 
+#define FACTOR0				10000000
+#define FACTOR1				15976
+#define FACTOR2				4297157
+
 struct imx_thermal_data {
 	struct thermal_zone_device *tz;
 	struct thermal_cooling_device *cdev;
 	enum thermal_device_mode mode;
 	struct regmap *tempmon;
-	int c1, c2; /* See formula in imx_get_sensor_data() */
+	u32 c1, c2; /* See formula in imx_get_sensor_data() */
 	unsigned long temp_passive;
 	unsigned long temp_critical;
 	unsigned long alarm_temp;
@@ -84,7 +88,7 @@
 	int alarm_value;
 
 	data->alarm_temp = alarm_temp;
-	alarm_value = (alarm_temp - data->c2) / data->c1;
+	alarm_value = (data->c2 - alarm_temp) / data->c1;
 	regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_ALARM_VALUE_MASK);
 	regmap_write(map, TEMPSENSE0 + REG_SET, alarm_value <<
 			TEMPSENSE0_ALARM_VALUE_SHIFT);
@@ -136,7 +140,7 @@
 	n_meas = (val & TEMPSENSE0_TEMP_CNT_MASK) >> TEMPSENSE0_TEMP_CNT_SHIFT;
 
 	/* See imx_get_sensor_data() for formula derivation */
-	*temp = data->c2 + data->c1 * n_meas;
+	*temp = data->c2 - n_meas * data->c1;
 
 	/* Update alarm value to next higher trip point */
 	if (data->alarm_temp == data->temp_passive && *temp >= data->temp_passive)
@@ -305,6 +309,7 @@
 	int t1, t2, n1, n2;
 	int ret;
 	u32 val;
+	u64 temp64;
 
 	map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
 					      "fsl,tempmon-data");
@@ -330,6 +335,8 @@
 	 *   [31:20] - sensor value @ 25C
 	 *    [19:8] - sensor value of hot
 	 *     [7:0] - hot temperature value
+	 * Use universal formula now and only need sensor value @ 25C
+	 * slope = 0.4297157 - (0.0015976 * 25C fuse)
 	 */
 	n1 = val >> 20;
 	n2 = (val & 0xfff00) >> 8;
@@ -337,20 +344,26 @@
 	t1 = 25; /* t1 always 25C */
 
 	/*
-	 * Derived from linear interpolation,
-	 * Tmeas = T2 + (Nmeas - N2) * (T1 - T2) / (N1 - N2)
+	 * Derived from linear interpolation:
+	 * slope = 0.4297157 - (0.0015976 * 25C fuse)
+	 * slope = (FACTOR2 - FACTOR1 * n1) / FACTOR0
+	 * (Nmeas - n1) / (Tmeas - t1) = slope
 	 * We want to reduce this down to the minimum computation necessary
 	 * for each temperature read.  Also, we want Tmeas in millicelsius
 	 * and we don't want to lose precision from integer division. So...
-	 * milli_Tmeas = 1000 * T2 + 1000 * (Nmeas - N2) * (T1 - T2) / (N1 - N2)
-	 * Let constant c1 = 1000 * (T1 - T2) / (N1 - N2)
-	 * milli_Tmeas = (1000 * T2) + c1 * (Nmeas - N2)
-	 * milli_Tmeas = (1000 * T2) + (c1 * Nmeas) - (c1 * N2)
-	 * Let constant c2 = (1000 * T2) - (c1 * N2)
-	 * milli_Tmeas = c2 + (c1 * Nmeas)
+	 * Tmeas = (Nmeas - n1) / slope + t1
+	 * milli_Tmeas = 1000 * (Nmeas - n1) / slope + 1000 * t1
+	 * milli_Tmeas = -1000 * (n1 - Nmeas) / slope + 1000 * t1
+	 * Let constant c1 = (-1000 / slope)
+	 * milli_Tmeas = (n1 - Nmeas) * c1 + 1000 * t1
+	 * Let constant c2 = n1 *c1 + 1000 * t1
+	 * milli_Tmeas = c2 - Nmeas * c1
 	 */
-	data->c1 = 1000 * (t1 - t2) / (n1 - n2);
-	data->c2 = 1000 * t2 - data->c1 * n2;
+	temp64 = FACTOR0;
+	temp64 *= 1000;
+	do_div(temp64, FACTOR1 * n1 - FACTOR2);
+	data->c1 = temp64;
+	data->c2 = n1 * data->c1 + 1000 * t1;
 
 	/*
 	 * Set the default passive cooling trip point to 20 °C below the
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 79a09d0..5a37940 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -299,12 +299,17 @@
 static void rcar_thermal_work(struct work_struct *work)
 {
 	struct rcar_thermal_priv *priv;
+	unsigned long cctemp, nctemp;
 
 	priv = container_of(work, struct rcar_thermal_priv, work.work);
 
+	rcar_thermal_get_temp(priv->zone, &cctemp);
 	rcar_thermal_update_temp(priv);
 	rcar_thermal_irq_enable(priv);
-	thermal_zone_device_update(priv->zone);
+
+	rcar_thermal_get_temp(priv->zone, &nctemp);
+	if (nctemp != cctemp)
+		thermal_zone_device_update(priv->zone);
 }
 
 static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
@@ -313,7 +318,7 @@
 
 	status = (status >> rcar_id_to_shift(priv)) & 0x3;
 
-	if (status & 0x3) {
+	if (status) {
 		dev_dbg(dev, "thermal%d %s%s\n",
 			priv->id,
 			(status & 0x2) ? "Rising " : "",
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 74c0e34..3ab12ee 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1500,10 +1500,8 @@
 
 	return ti_bandgap_restore_ctxt(bgp);
 }
-static const struct dev_pm_ops ti_bandgap_dev_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(ti_bandgap_suspend,
-				ti_bandgap_resume)
-};
+static SIMPLE_DEV_PM_OPS(ti_bandgap_dev_pm_ops, ti_bandgap_suspend,
+			 ti_bandgap_resume);
 
 #define DEV_PM_OPS	(&ti_bandgap_dev_pm_ops)
 #else
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index b0e5401..90ca082 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -65,6 +65,7 @@
 {
 	struct audit_buffer *ab;
 	struct task_struct *tsk = current;
+	pid_t pid = task_pid_nr(tsk);
 	uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
 	uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
 	unsigned int sessionid = audit_get_sessionid(tsk);
@@ -74,7 +75,7 @@
 		char name[sizeof(tsk->comm)];
 
 		audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
-				 " minor=%d comm=", description, tsk->pid, uid,
+				 " minor=%d comm=", description, pid, uid,
 				 loginuid, sessionid, major, minor);
 		get_task_comm(name, tsk);
 		audit_log_untrustedstring(ab, name);
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index 460c266..f058c03 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -1471,6 +1471,11 @@
 {
 }
 
+static void usbg_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
 static const char *usbg_check_wwn(const char *name)
 {
 	const char *n;
@@ -1726,7 +1731,7 @@
 		pr_err("Unable to allocate struct tcm_vhost_nexus\n");
 		goto err_unlock;
 	}
-	tv_nexus->tvn_se_sess = transport_init_session();
+	tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
 	if (IS_ERR(tv_nexus->tvn_se_sess))
 		goto err_free;
 
@@ -1897,6 +1902,7 @@
 	.queue_data_in			= usbg_send_read_response,
 	.queue_status			= usbg_send_status_response,
 	.queue_tm_rsp			= usbg_queue_tm_rsp,
+	.aborted_task			= usbg_aborted_task,
 	.check_stop_free		= usbg_check_stop_free,
 
 	.fabric_make_wwn		= usbg_make_tport,
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e1e22e0..be414d2 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -818,9 +818,9 @@
 	vhost_dev_cleanup(&n->dev, false);
 	vhost_net_vq_reset(n);
 	if (tx_sock)
-		fput(tx_sock->file);
+		sockfd_put(tx_sock);
 	if (rx_sock)
-		fput(rx_sock->file);
+		sockfd_put(rx_sock);
 	/* Make sure no callbacks are outstanding */
 	synchronize_rcu_bh();
 	/* We do an extra flush before freeing memory,
@@ -860,7 +860,7 @@
 	}
 	return sock;
 err:
-	fput(sock->file);
+	sockfd_put(sock);
 	return ERR_PTR(r);
 }
 
@@ -966,7 +966,7 @@
 
 	if (oldsock) {
 		vhost_net_flush_vq(n, index);
-		fput(oldsock->file);
+		sockfd_put(oldsock);
 	}
 
 	mutex_unlock(&n->dev.mutex);
@@ -978,7 +978,7 @@
 	if (ubufs)
 		vhost_net_ubuf_put_wait_and_free(ubufs);
 err_ubufs:
-	fput(sock->file);
+	sockfd_put(sock);
 err_vq:
 	mutex_unlock(&vq->mutex);
 err:
@@ -1009,9 +1009,9 @@
 done:
 	mutex_unlock(&n->dev.mutex);
 	if (tx_sock)
-		fput(tx_sock->file);
+		sockfd_put(tx_sock);
 	if (rx_sock)
-		fput(rx_sock->file);
+		sockfd_put(rx_sock);
 	return err;
 }
 
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e48d4a6..cf50ce9 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -539,6 +539,11 @@
 	return;
 }
 
+static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
+{
+	return;
+}
+
 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
 {
 	vs->vs_events_nr--;
@@ -1740,7 +1745,8 @@
 	 */
 	tv_nexus->tvn_se_sess = transport_init_session_tags(
 					TCM_VHOST_DEFAULT_TAGS,
-					sizeof(struct tcm_vhost_cmd));
+					sizeof(struct tcm_vhost_cmd),
+					TARGET_PROT_NORMAL);
 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
 		mutex_unlock(&tpg->tv_tpg_mutex);
 		kfree(tv_nexus);
@@ -2131,6 +2137,7 @@
 	.queue_data_in			= tcm_vhost_queue_data_in,
 	.queue_status			= tcm_vhost_queue_status,
 	.queue_tm_rsp			= tcm_vhost_queue_tm_rsp,
+	.aborted_task			= tcm_vhost_aborted_task,
 	/*
 	 * Setup callers for generic logic in target_core_fabric_configfs.c
 	 */
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 27d3cf2..bd2172c 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -347,7 +347,7 @@
 
 	rc = device_register(&new_bd->dev);
 	if (rc) {
-		kfree(new_bd);
+		put_device(&new_bd->dev);
 		return ERR_PTR(rc);
 	}
 
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index 81fb127..a2eba12 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -13,6 +13,8 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
 #include <linux/platform_data/gpio_backlight.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -23,6 +25,7 @@
 
 	int gpio;
 	int active;
+	int def_value;
 };
 
 static int gpio_backlight_update_status(struct backlight_device *bl)
@@ -60,6 +63,29 @@
 	.check_fb	= gpio_backlight_check_fb,
 };
 
+static int gpio_backlight_probe_dt(struct platform_device *pdev,
+				   struct gpio_backlight *gbl)
+{
+	struct device_node *np = pdev->dev.of_node;
+	enum of_gpio_flags gpio_flags;
+
+	gbl->gpio = of_get_gpio_flags(np, 0, &gpio_flags);
+
+	if (!gpio_is_valid(gbl->gpio)) {
+		if (gbl->gpio != -EPROBE_DEFER) {
+			dev_err(&pdev->dev,
+				"Error: The gpios parameter is missing or invalid.\n");
+		}
+		return gbl->gpio;
+	}
+
+	gbl->active = (gpio_flags & OF_GPIO_ACTIVE_LOW) ? 0 : 1;
+
+	gbl->def_value = of_property_read_bool(np, "default-on");
+
+	return 0;
+}
+
 static int gpio_backlight_probe(struct platform_device *pdev)
 {
 	struct gpio_backlight_platform_data *pdata =
@@ -67,10 +93,12 @@
 	struct backlight_properties props;
 	struct backlight_device *bl;
 	struct gpio_backlight *gbl;
+	struct device_node *np = pdev->dev.of_node;
 	int ret;
 
-	if (!pdata) {
-		dev_err(&pdev->dev, "failed to find platform data\n");
+	if (!pdata && !np) {
+		dev_err(&pdev->dev,
+			"failed to find platform data or device tree node.\n");
 		return -ENODEV;
 	}
 
@@ -79,14 +107,22 @@
 		return -ENOMEM;
 
 	gbl->dev = &pdev->dev;
-	gbl->fbdev = pdata->fbdev;
-	gbl->gpio = pdata->gpio;
-	gbl->active = pdata->active_low ? 0 : 1;
+
+	if (np) {
+		ret = gpio_backlight_probe_dt(pdev, gbl);
+		if (ret)
+			return ret;
+	} else {
+		gbl->fbdev = pdata->fbdev;
+		gbl->gpio = pdata->gpio;
+		gbl->active = pdata->active_low ? 0 : 1;
+		gbl->def_value = pdata->def_value;
+	}
 
 	ret = devm_gpio_request_one(gbl->dev, gbl->gpio, GPIOF_DIR_OUT |
 				    (gbl->active ? GPIOF_INIT_LOW
 						 : GPIOF_INIT_HIGH),
-				    pdata->name);
+				    pdata ? pdata->name : "backlight");
 	if (ret < 0) {
 		dev_err(&pdev->dev, "unable to request GPIO\n");
 		return ret;
@@ -103,17 +139,25 @@
 		return PTR_ERR(bl);
 	}
 
-	bl->props.brightness = pdata->def_value;
+	bl->props.brightness = gbl->def_value;
 	backlight_update_status(bl);
 
 	platform_set_drvdata(pdev, bl);
 	return 0;
 }
 
+#ifdef CONFIG_OF
+static struct of_device_id gpio_backlight_of_match[] = {
+	{ .compatible = "gpio-backlight" },
+	{ /* sentinel */ }
+};
+#endif
+
 static struct platform_driver gpio_backlight_driver = {
 	.driver		= {
 		.name		= "gpio-backlight",
 		.owner		= THIS_MODULE,
+		.of_match_table = of_match_ptr(gpio_backlight_of_match),
 	},
 	.probe		= gpio_backlight_probe,
 };
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 6fd60ad..5f36808 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -349,8 +349,9 @@
 	props.brightness = pdata->init_brt_led;
 	props.max_brightness = pdata->max_brt_led;
 	pchip->bled =
-	    backlight_device_register("lm3639_bled", pchip->dev, pchip,
-				      &lm3639_bled_ops, &props);
+	    devm_backlight_device_register(pchip->dev, "lm3639_bled",
+					   pchip->dev, pchip, &lm3639_bled_ops,
+					   &props);
 	if (IS_ERR(pchip->bled)) {
 		dev_err(&client->dev, "fail : backlight register\n");
 		ret = PTR_ERR(pchip->bled);
@@ -360,7 +361,7 @@
 	ret = device_create_file(&(pchip->bled->dev), &dev_attr_bled_mode);
 	if (ret < 0) {
 		dev_err(&client->dev, "failed : add sysfs entries\n");
-		goto err_bled_mode;
+		goto err_out;
 	}
 
 	/* flash */
@@ -391,8 +392,6 @@
 	led_classdev_unregister(&pchip->cdev_flash);
 err_flash:
 	device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
-err_bled_mode:
-	backlight_device_unregister(pchip->bled);
 err_out:
 	return ret;
 }
@@ -407,10 +406,8 @@
 		led_classdev_unregister(&pchip->cdev_torch);
 	if (&pchip->cdev_flash)
 		led_classdev_unregister(&pchip->cdev_flash);
-	if (pchip->bled) {
+	if (pchip->bled)
 		device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
-		backlight_device_unregister(pchip->bled);
-	}
 	return 0;
 }
 
@@ -432,6 +429,6 @@
 module_i2c_driver(lm3639_i2c_driver);
 
 MODULE_DESCRIPTION("Texas Instruments Backlight+Flash LED driver for LM3639");
-MODULE_AUTHOR("Daniel Jeong <daniel.jeong@ti.com>");
-MODULE_AUTHOR("G.Shark Jeong <gshark.jeong@gmail.com>");
+MODULE_AUTHOR("Daniel Jeong <gshark.jeong@gmail.com>");
+MODULE_AUTHOR("Ldd Mlp <ldd-mlp@list.ti.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/fs/aio.c b/fs/aio.c
index 062a5f6..12a3de0e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -52,7 +52,8 @@
 struct aio_ring {
 	unsigned	id;	/* kernel internal index number */
 	unsigned	nr;	/* number of io_events */
-	unsigned	head;
+	unsigned	head;	/* Written to by userland or under ring_lock
+				 * mutex by aio_read_events_ring(). */
 	unsigned	tail;
 
 	unsigned	magic;
@@ -243,6 +244,11 @@
 {
 	int i;
 
+	/* Disconnect the kiotx from the ring file.  This prevents future
+	 * accesses to the kioctx from page migration.
+	 */
+	put_aio_ring_file(ctx);
+
 	for (i = 0; i < ctx->nr_pages; i++) {
 		struct page *page;
 		pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
@@ -254,8 +260,6 @@
 		put_page(page);
 	}
 
-	put_aio_ring_file(ctx);
-
 	if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
 		kfree(ctx->ring_pages);
 		ctx->ring_pages = NULL;
@@ -283,29 +287,38 @@
 {
 	struct kioctx *ctx;
 	unsigned long flags;
+	pgoff_t idx;
 	int rc;
 
 	rc = 0;
 
-	/* Make sure the old page hasn't already been changed */
+	/* mapping->private_lock here protects against the kioctx teardown.  */
 	spin_lock(&mapping->private_lock);
 	ctx = mapping->private_data;
-	if (ctx) {
-		pgoff_t idx;
-		spin_lock_irqsave(&ctx->completion_lock, flags);
-		idx = old->index;
-		if (idx < (pgoff_t)ctx->nr_pages) {
-			if (ctx->ring_pages[idx] != old)
-				rc = -EAGAIN;
-		} else
-			rc = -EINVAL;
-		spin_unlock_irqrestore(&ctx->completion_lock, flags);
+	if (!ctx) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* The ring_lock mutex.  The prevents aio_read_events() from writing
+	 * to the ring's head, and prevents page migration from mucking in
+	 * a partially initialized kiotx.
+	 */
+	if (!mutex_trylock(&ctx->ring_lock)) {
+		rc = -EAGAIN;
+		goto out;
+	}
+
+	idx = old->index;
+	if (idx < (pgoff_t)ctx->nr_pages) {
+		/* Make sure the old page hasn't already been changed */
+		if (ctx->ring_pages[idx] != old)
+			rc = -EAGAIN;
 	} else
 		rc = -EINVAL;
-	spin_unlock(&mapping->private_lock);
 
 	if (rc != 0)
-		return rc;
+		goto out_unlock;
 
 	/* Writeback must be complete */
 	BUG_ON(PageWriteback(old));
@@ -314,38 +327,26 @@
 	rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
 	if (rc != MIGRATEPAGE_SUCCESS) {
 		put_page(new);
-		return rc;
+		goto out_unlock;
 	}
 
-	/* We can potentially race against kioctx teardown here.  Use the
-	 * address_space's private data lock to protect the mapping's
-	 * private_data.
+	/* Take completion_lock to prevent other writes to the ring buffer
+	 * while the old page is copied to the new.  This prevents new
+	 * events from being lost.
 	 */
-	spin_lock(&mapping->private_lock);
-	ctx = mapping->private_data;
-	if (ctx) {
-		pgoff_t idx;
-		spin_lock_irqsave(&ctx->completion_lock, flags);
-		migrate_page_copy(new, old);
-		idx = old->index;
-		if (idx < (pgoff_t)ctx->nr_pages) {
-			/* And only do the move if things haven't changed */
-			if (ctx->ring_pages[idx] == old)
-				ctx->ring_pages[idx] = new;
-			else
-				rc = -EAGAIN;
-		} else
-			rc = -EINVAL;
-		spin_unlock_irqrestore(&ctx->completion_lock, flags);
-	} else
-		rc = -EBUSY;
+	spin_lock_irqsave(&ctx->completion_lock, flags);
+	migrate_page_copy(new, old);
+	BUG_ON(ctx->ring_pages[idx] != old);
+	ctx->ring_pages[idx] = new;
+	spin_unlock_irqrestore(&ctx->completion_lock, flags);
+
+	/* The old page is no longer accessible. */
+	put_page(old);
+
+out_unlock:
+	mutex_unlock(&ctx->ring_lock);
+out:
 	spin_unlock(&mapping->private_lock);
-
-	if (rc == MIGRATEPAGE_SUCCESS)
-		put_page(old);
-	else
-		put_page(new);
-
 	return rc;
 }
 #endif
@@ -380,7 +381,7 @@
 	file = aio_private_file(ctx, nr_pages);
 	if (IS_ERR(file)) {
 		ctx->aio_ring_file = NULL;
-		return -EAGAIN;
+		return -ENOMEM;
 	}
 
 	ctx->aio_ring_file = file;
@@ -415,7 +416,7 @@
 
 	if (unlikely(i != nr_pages)) {
 		aio_free_ring(ctx);
-		return -EAGAIN;
+		return -ENOMEM;
 	}
 
 	ctx->mmap_size = nr_pages * PAGE_SIZE;
@@ -429,7 +430,7 @@
 	if (IS_ERR((void *)ctx->mmap_base)) {
 		ctx->mmap_size = 0;
 		aio_free_ring(ctx);
-		return -EAGAIN;
+		return -ENOMEM;
 	}
 
 	pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
@@ -556,6 +557,10 @@
 					rcu_read_unlock();
 					spin_unlock(&mm->ioctx_lock);
 
+					/* While kioctx setup is in progress,
+					 * we are protected from page migration
+					 * changes ring_pages by ->ring_lock.
+					 */
 					ring = kmap_atomic(ctx->ring_pages[0]);
 					ring->id = ctx->id;
 					kunmap_atomic(ring);
@@ -640,24 +645,28 @@
 
 	ctx->max_reqs = nr_events;
 
+	spin_lock_init(&ctx->ctx_lock);
+	spin_lock_init(&ctx->completion_lock);
+	mutex_init(&ctx->ring_lock);
+	/* Protect against page migration throughout kiotx setup by keeping
+	 * the ring_lock mutex held until setup is complete. */
+	mutex_lock(&ctx->ring_lock);
+	init_waitqueue_head(&ctx->wait);
+
+	INIT_LIST_HEAD(&ctx->active_reqs);
+
 	if (percpu_ref_init(&ctx->users, free_ioctx_users))
 		goto err;
 
 	if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
 		goto err;
 
-	spin_lock_init(&ctx->ctx_lock);
-	spin_lock_init(&ctx->completion_lock);
-	mutex_init(&ctx->ring_lock);
-	init_waitqueue_head(&ctx->wait);
-
-	INIT_LIST_HEAD(&ctx->active_reqs);
-
 	ctx->cpu = alloc_percpu(struct kioctx_cpu);
 	if (!ctx->cpu)
 		goto err;
 
-	if (aio_setup_ring(ctx) < 0)
+	err = aio_setup_ring(ctx);
+	if (err < 0)
 		goto err;
 
 	atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
@@ -683,6 +692,9 @@
 	if (err)
 		goto err_cleanup;
 
+	/* Release the ring_lock mutex now that all setup is complete. */
+	mutex_unlock(&ctx->ring_lock);
+
 	pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
 		 ctx, ctx->user_id, mm, ctx->nr_events);
 	return ctx;
@@ -692,6 +704,7 @@
 err_ctx:
 	aio_free_ring(ctx);
 err:
+	mutex_unlock(&ctx->ring_lock);
 	free_percpu(ctx->cpu);
 	free_percpu(ctx->reqs.pcpu_count);
 	free_percpu(ctx->users.pcpu_count);
@@ -1024,6 +1037,7 @@
 
 	mutex_lock(&ctx->ring_lock);
 
+	/* Access to ->ring_pages here is protected by ctx->ring_lock. */
 	ring = kmap_atomic(ctx->ring_pages[0]);
 	head = ring->head;
 	tail = ring->tail;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 29696b7..1c2ce0c 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -182,6 +182,9 @@
  */
 int bio_integrity_enabled(struct bio *bio)
 {
+	if (!bio_is_rw(bio))
+		return 0;
+
 	/* Already protected? */
 	if (bio_integrity(bio))
 		return 0;
@@ -309,10 +312,9 @@
 {
 	struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 	struct blk_integrity_exchg bix;
-	struct bio_vec bv;
-	struct bvec_iter iter;
+	struct bio_vec *bv;
 	sector_t sector;
-	unsigned int sectors, ret = 0;
+	unsigned int sectors, ret = 0, i;
 	void *prot_buf = bio->bi_integrity->bip_buf;
 
 	if (operate)
@@ -323,16 +325,16 @@
 	bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
 	bix.sector_size = bi->sector_size;
 
-	bio_for_each_segment(bv, bio, iter) {
-		void *kaddr = kmap_atomic(bv.bv_page);
-		bix.data_buf = kaddr + bv.bv_offset;
-		bix.data_size = bv.bv_len;
+	bio_for_each_segment_all(bv, bio, i) {
+		void *kaddr = kmap_atomic(bv->bv_page);
+		bix.data_buf = kaddr + bv->bv_offset;
+		bix.data_size = bv->bv_len;
 		bix.prot_buf = prot_buf;
 		bix.sector = sector;
 
-		if (operate) {
+		if (operate)
 			bi->generate_fn(&bix);
-		} else {
+		else {
 			ret = bi->verify_fn(&bix);
 			if (ret) {
 				kunmap_atomic(kaddr);
@@ -340,7 +342,7 @@
 			}
 		}
 
-		sectors = bv.bv_len / bi->sector_size;
+		sectors = bv->bv_len / bi->sector_size;
 		sector += sectors;
 		prot_buf += sectors * bi->tuple_size;
 
diff --git a/fs/bio.c b/fs/bio.c
index b1bc722..6f0362b 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1002,7 +1002,7 @@
 };
 
 static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
-			     struct sg_iovec *iov, int iov_count,
+			     const struct sg_iovec *iov, int iov_count,
 			     int is_our_pages)
 {
 	memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
@@ -1022,7 +1022,7 @@
 		       sizeof(struct sg_iovec) * iov_count, gfp_mask);
 }
 
-static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
+static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count,
 			  int to_user, int from_user, int do_free_page)
 {
 	int ret = 0, i;
@@ -1120,7 +1120,7 @@
  */
 struct bio *bio_copy_user_iov(struct request_queue *q,
 			      struct rq_map_data *map_data,
-			      struct sg_iovec *iov, int iov_count,
+			      const struct sg_iovec *iov, int iov_count,
 			      int write_to_vm, gfp_t gfp_mask)
 {
 	struct bio_map_data *bmd;
@@ -1259,7 +1259,7 @@
 
 static struct bio *__bio_map_user_iov(struct request_queue *q,
 				      struct block_device *bdev,
-				      struct sg_iovec *iov, int iov_count,
+				      const struct sg_iovec *iov, int iov_count,
 				      int write_to_vm, gfp_t gfp_mask)
 {
 	int i, j;
@@ -1407,7 +1407,7 @@
  *	device. Returns an error pointer in case of error.
  */
 struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
-			     struct sg_iovec *iov, int iov_count,
+			     const struct sg_iovec *iov, int iov_count,
 			     int write_to_vm, gfp_t gfp_mask)
 {
 	struct bio *bio;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index ba0d2b0..552a8d1 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1518,7 +1518,7 @@
 	BUG_ON(iocb->ki_pos != pos);
 
 	blk_start_plug(&plug);
-	ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+	ret = __generic_file_aio_write(iocb, iov, nr_segs);
 	if (ret > 0) {
 		ssize_t err;
 
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index ecb5832..5a201d8 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -323,6 +323,8 @@
 
 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
 {
+	if (!wq)
+		return;
 	wq->normal->max_active = max;
 	if (wq->high)
 		wq->high->max_active = max;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index aad7201..10db21f 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -330,7 +330,10 @@
 		goto out;
 	}
 
-	root_level = btrfs_old_root_level(root, time_seq);
+	if (path->search_commit_root)
+		root_level = btrfs_header_level(root->commit_root);
+	else
+		root_level = btrfs_old_root_level(root, time_seq);
 
 	if (root_level + 1 == level) {
 		srcu_read_unlock(&fs_info->subvol_srcu, index);
@@ -1099,9 +1102,9 @@
  *
  * returns 0 on success, < 0 on error.
  */
-int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
-				struct btrfs_fs_info *fs_info, u64 bytenr,
-				u64 time_seq, struct ulist **roots)
+static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
+				  struct btrfs_fs_info *fs_info, u64 bytenr,
+				  u64 time_seq, struct ulist **roots)
 {
 	struct ulist *tmp;
 	struct ulist_node *node = NULL;
@@ -1137,6 +1140,20 @@
 	return 0;
 }
 
+int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
+			 struct btrfs_fs_info *fs_info, u64 bytenr,
+			 u64 time_seq, struct ulist **roots)
+{
+	int ret;
+
+	if (!trans)
+		down_read(&fs_info->commit_root_sem);
+	ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots);
+	if (!trans)
+		up_read(&fs_info->commit_root_sem);
+	return ret;
+}
+
 /*
  * this makes the path point to (inum INODE_ITEM ioff)
  */
@@ -1516,6 +1533,8 @@
 		if (IS_ERR(trans))
 			return PTR_ERR(trans);
 		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
+	} else {
+		down_read(&fs_info->commit_root_sem);
 	}
 
 	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
@@ -1526,8 +1545,8 @@
 
 	ULIST_ITER_INIT(&ref_uiter);
 	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
-		ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
-					   tree_mod_seq_elem.seq, &roots);
+		ret = __btrfs_find_all_roots(trans, fs_info, ref_node->val,
+					     tree_mod_seq_elem.seq, &roots);
 		if (ret)
 			break;
 		ULIST_ITER_INIT(&root_uiter);
@@ -1549,6 +1568,8 @@
 	if (!search_commit_root) {
 		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
 		btrfs_end_transaction(trans, fs_info->extent_root);
+	} else {
+		up_read(&fs_info->commit_root_sem);
 	}
 
 	return ret;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 88d1b1e..1bcfcdb 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2769,9 +2769,13 @@
 		 * the commit roots are read only
 		 * so we always do read locks
 		 */
+		if (p->need_commit_sem)
+			down_read(&root->fs_info->commit_root_sem);
 		b = root->commit_root;
 		extent_buffer_get(b);
 		level = btrfs_header_level(b);
+		if (p->need_commit_sem)
+			up_read(&root->fs_info->commit_root_sem);
 		if (!p->skip_locking)
 			btrfs_tree_read_lock(b);
 	} else {
@@ -5360,7 +5364,6 @@
 {
 	int ret;
 	int cmp;
-	struct btrfs_trans_handle *trans = NULL;
 	struct btrfs_path *left_path = NULL;
 	struct btrfs_path *right_path = NULL;
 	struct btrfs_key left_key;
@@ -5378,9 +5381,6 @@
 	u64 right_blockptr;
 	u64 left_gen;
 	u64 right_gen;
-	u64 left_start_ctransid;
-	u64 right_start_ctransid;
-	u64 ctransid;
 
 	left_path = btrfs_alloc_path();
 	if (!left_path) {
@@ -5404,21 +5404,6 @@
 	right_path->search_commit_root = 1;
 	right_path->skip_locking = 1;
 
-	spin_lock(&left_root->root_item_lock);
-	left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
-	spin_unlock(&left_root->root_item_lock);
-
-	spin_lock(&right_root->root_item_lock);
-	right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
-	spin_unlock(&right_root->root_item_lock);
-
-	trans = btrfs_join_transaction(left_root);
-	if (IS_ERR(trans)) {
-		ret = PTR_ERR(trans);
-		trans = NULL;
-		goto out;
-	}
-
 	/*
 	 * Strategy: Go to the first items of both trees. Then do
 	 *
@@ -5455,6 +5440,7 @@
 	 *   the right if possible or go up and right.
 	 */
 
+	down_read(&left_root->fs_info->commit_root_sem);
 	left_level = btrfs_header_level(left_root->commit_root);
 	left_root_level = left_level;
 	left_path->nodes[left_level] = left_root->commit_root;
@@ -5464,6 +5450,7 @@
 	right_root_level = right_level;
 	right_path->nodes[right_level] = right_root->commit_root;
 	extent_buffer_get(right_path->nodes[right_level]);
+	up_read(&left_root->fs_info->commit_root_sem);
 
 	if (left_level == 0)
 		btrfs_item_key_to_cpu(left_path->nodes[left_level],
@@ -5482,67 +5469,6 @@
 	advance_left = advance_right = 0;
 
 	while (1) {
-		/*
-		 * We need to make sure the transaction does not get committed
-		 * while we do anything on commit roots. This means, we need to
-		 * join and leave transactions for every item that we process.
-		 */
-		if (trans && btrfs_should_end_transaction(trans, left_root)) {
-			btrfs_release_path(left_path);
-			btrfs_release_path(right_path);
-
-			ret = btrfs_end_transaction(trans, left_root);
-			trans = NULL;
-			if (ret < 0)
-				goto out;
-		}
-		/* now rejoin the transaction */
-		if (!trans) {
-			trans = btrfs_join_transaction(left_root);
-			if (IS_ERR(trans)) {
-				ret = PTR_ERR(trans);
-				trans = NULL;
-				goto out;
-			}
-
-			spin_lock(&left_root->root_item_lock);
-			ctransid = btrfs_root_ctransid(&left_root->root_item);
-			spin_unlock(&left_root->root_item_lock);
-			if (ctransid != left_start_ctransid)
-				left_start_ctransid = 0;
-
-			spin_lock(&right_root->root_item_lock);
-			ctransid = btrfs_root_ctransid(&right_root->root_item);
-			spin_unlock(&right_root->root_item_lock);
-			if (ctransid != right_start_ctransid)
-				right_start_ctransid = 0;
-
-			if (!left_start_ctransid || !right_start_ctransid) {
-				WARN(1, KERN_WARNING
-					"BTRFS: btrfs_compare_tree detected "
-					"a change in one of the trees while "
-					"iterating. This is probably a "
-					"bug.\n");
-				ret = -EIO;
-				goto out;
-			}
-
-			/*
-			 * the commit root may have changed, so start again
-			 * where we stopped
-			 */
-			left_path->lowest_level = left_level;
-			right_path->lowest_level = right_level;
-			ret = btrfs_search_slot(NULL, left_root,
-					&left_key, left_path, 0, 0);
-			if (ret < 0)
-				goto out;
-			ret = btrfs_search_slot(NULL, right_root,
-					&right_key, right_path, 0, 0);
-			if (ret < 0)
-				goto out;
-		}
-
 		if (advance_left && !left_end_reached) {
 			ret = tree_advance(left_root, left_path, &left_level,
 					left_root_level,
@@ -5672,14 +5598,6 @@
 	btrfs_free_path(left_path);
 	btrfs_free_path(right_path);
 	kfree(tmp_buf);
-
-	if (trans) {
-		if (!ret)
-			ret = btrfs_end_transaction(trans, left_root);
-		else
-			btrfs_end_transaction(trans, left_root);
-	}
-
 	return ret;
 }
 
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index bc96c03..4c48df5 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -609,6 +609,7 @@
 	unsigned int skip_locking:1;
 	unsigned int leave_spinning:1;
 	unsigned int search_commit_root:1;
+	unsigned int need_commit_sem:1;
 };
 
 /*
@@ -986,7 +987,8 @@
 #define BTRFS_BLOCK_GROUP_RAID10	(1ULL << 6)
 #define BTRFS_BLOCK_GROUP_RAID5         (1ULL << 7)
 #define BTRFS_BLOCK_GROUP_RAID6         (1ULL << 8)
-#define BTRFS_BLOCK_GROUP_RESERVED	BTRFS_AVAIL_ALLOC_BIT_SINGLE
+#define BTRFS_BLOCK_GROUP_RESERVED	(BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
+					 BTRFS_SPACE_INFO_GLOBAL_RSV)
 
 enum btrfs_raid_types {
 	BTRFS_RAID_RAID10,
@@ -1018,6 +1020,12 @@
  */
 #define BTRFS_AVAIL_ALLOC_BIT_SINGLE	(1ULL << 48)
 
+/*
+ * A fake block group type that is used to communicate global block reserve
+ * size to userspace via the SPACE_INFO ioctl.
+ */
+#define BTRFS_SPACE_INFO_GLOBAL_RSV	(1ULL << 49)
+
 #define BTRFS_EXTENDED_PROFILE_MASK	(BTRFS_BLOCK_GROUP_PROFILE_MASK | \
 					 BTRFS_AVAIL_ALLOC_BIT_SINGLE)
 
@@ -1440,7 +1448,7 @@
 	 */
 	struct mutex ordered_extent_flush_mutex;
 
-	struct rw_semaphore extent_commit_sem;
+	struct rw_semaphore commit_root_sem;
 
 	struct rw_semaphore cleanup_work_sem;
 
@@ -1711,7 +1719,6 @@
 	struct btrfs_block_rsv *block_rsv;
 
 	/* free ino cache stuff */
-	struct mutex fs_commit_mutex;
 	struct btrfs_free_space_ctl *free_ino_ctl;
 	enum btrfs_caching_type cached;
 	spinlock_t cache_lock;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index bd0f752..029d46c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -329,6 +329,8 @@
 {
 	struct extent_state *cached_state = NULL;
 	int ret;
+	bool need_lock = (current->journal_info ==
+			  (void *)BTRFS_SEND_TRANS_STUB);
 
 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
 		return 0;
@@ -336,6 +338,11 @@
 	if (atomic)
 		return -EAGAIN;
 
+	if (need_lock) {
+		btrfs_tree_read_lock(eb);
+		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+	}
+
 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
 			 0, &cached_state);
 	if (extent_buffer_uptodate(eb) &&
@@ -347,10 +354,21 @@
 		       "found %llu\n",
 		       eb->start, parent_transid, btrfs_header_generation(eb));
 	ret = 1;
-	clear_extent_buffer_uptodate(eb);
+
+	/*
+	 * Things reading via commit roots that don't have normal protection,
+	 * like send, can have a really old block in cache that may point at a
+	 * block that has been free'd and re-allocated.  So don't clear uptodate
+	 * if we find an eb that is under IO (dirty/writeback) because we could
+	 * end up reading in the stale data and then writing it back out and
+	 * making everybody very sad.
+	 */
+	if (!extent_buffer_under_io(eb))
+		clear_extent_buffer_uptodate(eb);
 out:
 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
 			     &cached_state, GFP_NOFS);
+	btrfs_tree_read_unlock_blocking(eb);
 	return ret;
 }
 
@@ -1546,7 +1564,6 @@
 	root->subv_writers = writers;
 
 	btrfs_init_free_ino_ctl(root);
-	mutex_init(&root->fs_commit_mutex);
 	spin_lock_init(&root->cache_lock);
 	init_waitqueue_head(&root->cache_wait);
 
@@ -2324,7 +2341,7 @@
 	mutex_init(&fs_info->transaction_kthread_mutex);
 	mutex_init(&fs_info->cleaner_mutex);
 	mutex_init(&fs_info->volume_mutex);
-	init_rwsem(&fs_info->extent_commit_sem);
+	init_rwsem(&fs_info->commit_root_sem);
 	init_rwsem(&fs_info->cleanup_work_sem);
 	init_rwsem(&fs_info->subvol_sem);
 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index c6b6a6e..1306487 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -419,7 +419,7 @@
 again:
 	mutex_lock(&caching_ctl->mutex);
 	/* need to make sure the commit_root doesn't disappear */
-	down_read(&fs_info->extent_commit_sem);
+	down_read(&fs_info->commit_root_sem);
 
 next:
 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
@@ -443,10 +443,10 @@
 				break;
 
 			if (need_resched() ||
-			    rwsem_is_contended(&fs_info->extent_commit_sem)) {
+			    rwsem_is_contended(&fs_info->commit_root_sem)) {
 				caching_ctl->progress = last;
 				btrfs_release_path(path);
-				up_read(&fs_info->extent_commit_sem);
+				up_read(&fs_info->commit_root_sem);
 				mutex_unlock(&caching_ctl->mutex);
 				cond_resched();
 				goto again;
@@ -513,7 +513,7 @@
 
 err:
 	btrfs_free_path(path);
-	up_read(&fs_info->extent_commit_sem);
+	up_read(&fs_info->commit_root_sem);
 
 	free_excluded_extents(extent_root, block_group);
 
@@ -633,10 +633,10 @@
 		return 0;
 	}
 
-	down_write(&fs_info->extent_commit_sem);
+	down_write(&fs_info->commit_root_sem);
 	atomic_inc(&caching_ctl->count);
 	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
-	up_write(&fs_info->extent_commit_sem);
+	up_write(&fs_info->commit_root_sem);
 
 	btrfs_get_block_group(cache);
 
@@ -2444,7 +2444,8 @@
 			spin_unlock(&locked_ref->lock);
 			spin_lock(&delayed_refs->lock);
 			spin_lock(&locked_ref->lock);
-			if (rb_first(&locked_ref->ref_root)) {
+			if (rb_first(&locked_ref->ref_root) ||
+			    locked_ref->extent_op) {
 				spin_unlock(&locked_ref->lock);
 				spin_unlock(&delayed_refs->lock);
 				continue;
@@ -5470,7 +5471,7 @@
 	struct btrfs_block_group_cache *cache;
 	struct btrfs_space_info *space_info;
 
-	down_write(&fs_info->extent_commit_sem);
+	down_write(&fs_info->commit_root_sem);
 
 	list_for_each_entry_safe(caching_ctl, next,
 				 &fs_info->caching_block_groups, list) {
@@ -5489,7 +5490,7 @@
 	else
 		fs_info->pinned_extents = &fs_info->freed_extents[0];
 
-	up_write(&fs_info->extent_commit_sem);
+	up_write(&fs_info->commit_root_sem);
 
 	list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
 		percpu_counter_set(&space_info->total_bytes_pinned, 0);
@@ -5744,6 +5745,8 @@
 			"unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
 			bytenr, parent, root_objectid, owner_objectid,
 			owner_offset);
+		btrfs_abort_transaction(trans, extent_root, ret);
+		goto out;
 	} else {
 		btrfs_abort_transaction(trans, extent_root, ret);
 		goto out;
@@ -8255,14 +8258,14 @@
 	struct btrfs_caching_control *caching_ctl;
 	struct rb_node *n;
 
-	down_write(&info->extent_commit_sem);
+	down_write(&info->commit_root_sem);
 	while (!list_empty(&info->caching_block_groups)) {
 		caching_ctl = list_entry(info->caching_block_groups.next,
 					 struct btrfs_caching_control, list);
 		list_del(&caching_ctl->list);
 		put_caching_control(caching_ctl);
 	}
-	up_write(&info->extent_commit_sem);
+	up_write(&info->commit_root_sem);
 
 	spin_lock(&info->block_group_cache_lock);
 	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
@@ -8336,9 +8339,15 @@
 			       struct btrfs_block_group_cache *cache)
 {
 	int index = get_block_group_index(cache);
+	bool first = false;
 
 	down_write(&space_info->groups_sem);
-	if (list_empty(&space_info->block_groups[index])) {
+	if (list_empty(&space_info->block_groups[index]))
+		first = true;
+	list_add_tail(&cache->list, &space_info->block_groups[index]);
+	up_write(&space_info->groups_sem);
+
+	if (first) {
 		struct kobject *kobj = &space_info->block_group_kobjs[index];
 		int ret;
 
@@ -8350,8 +8359,6 @@
 			kobject_put(&space_info->kobj);
 		}
 	}
-	list_add_tail(&cache->list, &space_info->block_groups[index]);
-	up_write(&space_info->groups_sem);
 }
 
 static struct btrfs_block_group_cache *
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index ae69a00..3955e47 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -749,6 +749,7 @@
 		 * our range starts
 		 */
 		node = tree_search(tree, start);
+process_node:
 		if (!node)
 			break;
 
@@ -769,7 +770,10 @@
 		if (start > end)
 			break;
 
-		cond_resched_lock(&tree->lock);
+		if (!cond_resched_lock(&tree->lock)) {
+			node = rb_next(node);
+			goto process_node;
+		}
 	}
 out:
 	spin_unlock(&tree->lock);
@@ -4306,7 +4310,7 @@
 	kmem_cache_free(extent_buffer_cache, eb);
 }
 
-static int extent_buffer_under_io(struct extent_buffer *eb)
+int extent_buffer_under_io(struct extent_buffer *eb)
 {
 	return (atomic_read(&eb->io_pages) ||
 		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 58b27e5..c488b45 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -320,6 +320,7 @@
 int set_extent_buffer_uptodate(struct extent_buffer *eb);
 int clear_extent_buffer_uptodate(struct extent_buffer *eb);
 int extent_buffer_uptodate(struct extent_buffer *eb);
+int extent_buffer_under_io(struct extent_buffer *eb);
 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
 		      unsigned long min_len, char **map,
 		      unsigned long *map_start,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c660527..eb742c0 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -425,13 +425,8 @@
 		struct page *page = prepared_pages[pg];
 		/*
 		 * Copy data from userspace to the current page
-		 *
-		 * Disable pagefault to avoid recursive lock since
-		 * the pages are already locked
 		 */
-		pagefault_disable();
 		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
-		pagefault_enable();
 
 		/* Flush processor's dcache for this page */
 		flush_dcache_page(page);
@@ -1665,7 +1660,7 @@
 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
 				    const struct iovec *iov,
 				    unsigned long nr_segs, loff_t pos,
-				    loff_t *ppos, size_t count, size_t ocount)
+				    size_t count, size_t ocount)
 {
 	struct file *file = iocb->ki_filp;
 	struct iov_iter i;
@@ -1674,7 +1669,7 @@
 	loff_t endbyte;
 	int err;
 
-	written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
+	written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
 					    count, ocount);
 
 	if (written < 0 || written == count)
@@ -1693,7 +1688,7 @@
 	if (err)
 		goto out;
 	written += written_buffered;
-	*ppos = pos + written_buffered;
+	iocb->ki_pos = pos + written_buffered;
 	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
 				 endbyte >> PAGE_CACHE_SHIFT);
 out:
@@ -1725,8 +1720,8 @@
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file_inode(file);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
-	loff_t *ppos = &iocb->ki_pos;
 	u64 start_pos;
+	u64 end_pos;
 	ssize_t num_written = 0;
 	ssize_t err = 0;
 	size_t count, ocount;
@@ -1781,7 +1776,9 @@
 
 	start_pos = round_down(pos, root->sectorsize);
 	if (start_pos > i_size_read(inode)) {
-		err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
+		/* Expand hole size to cover write data, preventing empty gap */
+		end_pos = round_up(pos + iov->iov_len, root->sectorsize);
+		err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
 		if (err) {
 			mutex_unlock(&inode->i_mutex);
 			goto out;
@@ -1793,7 +1790,7 @@
 
 	if (unlikely(file->f_flags & O_DIRECT)) {
 		num_written = __btrfs_direct_write(iocb, iov, nr_segs,
-						   pos, ppos, count, ocount);
+						   pos, count, ocount);
 	} else {
 		struct iov_iter i;
 
@@ -1801,7 +1798,7 @@
 
 		num_written = __btrfs_buffered_write(file, &i, pos);
 		if (num_written > 0)
-			*ppos = pos + num_written;
+			iocb->ki_pos = pos + num_written;
 	}
 
 	mutex_unlock(&inode->i_mutex);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index ab485e5..cc8ca19 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -55,7 +55,7 @@
 	key.type = BTRFS_INODE_ITEM_KEY;
 again:
 	/* need to make sure the commit_root doesn't disappear */
-	mutex_lock(&root->fs_commit_mutex);
+	down_read(&fs_info->commit_root_sem);
 
 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 	if (ret < 0)
@@ -88,7 +88,7 @@
 				btrfs_item_key_to_cpu(leaf, &key, 0);
 				btrfs_release_path(path);
 				root->cache_progress = last;
-				mutex_unlock(&root->fs_commit_mutex);
+				up_read(&fs_info->commit_root_sem);
 				schedule_timeout(1);
 				goto again;
 			} else
@@ -127,7 +127,7 @@
 	btrfs_unpin_free_ino(root);
 out:
 	wake_up(&root->cache_wait);
-	mutex_unlock(&root->fs_commit_mutex);
+	up_read(&fs_info->commit_root_sem);
 
 	btrfs_free_path(path);
 
@@ -223,11 +223,11 @@
 		 * or the caching work is done.
 		 */
 
-		mutex_lock(&root->fs_commit_mutex);
+		down_write(&root->fs_info->commit_root_sem);
 		spin_lock(&root->cache_lock);
 		if (root->cached == BTRFS_CACHE_FINISHED) {
 			spin_unlock(&root->cache_lock);
-			mutex_unlock(&root->fs_commit_mutex);
+			up_write(&root->fs_info->commit_root_sem);
 			goto again;
 		}
 		spin_unlock(&root->cache_lock);
@@ -240,7 +240,7 @@
 		else
 			__btrfs_add_free_space(pinned, objectid, 1);
 
-		mutex_unlock(&root->fs_commit_mutex);
+		up_write(&root->fs_info->commit_root_sem);
 	}
 }
 
@@ -250,7 +250,7 @@
  * and others will just be dropped, because the commit root we were
  * searching has changed.
  *
- * Must be called with root->fs_commit_mutex held
+ * Must be called with root->fs_info->commit_root_sem held
  */
 void btrfs_unpin_free_ino(struct btrfs_root *root)
 {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 06e9a41..5f805bc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -394,6 +394,14 @@
 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
 		btrfs_add_inode_defrag(NULL, inode);
 
+	/*
+	 * skip compression for a small file range(<=blocksize) that
+	 * isn't an inline extent, since it dosen't save disk space at all.
+	 */
+	if ((end - start + 1) <= blocksize &&
+	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
+		goto cleanup_and_bail_uncompressed;
+
 	actual_end = min_t(u64, isize, end + 1);
 again:
 	will_compress = 0;
@@ -1271,6 +1279,15 @@
 			disk_bytenr += cur_offset - found_key.offset;
 			num_bytes = min(end + 1, extent_end) - cur_offset;
 			/*
+			 * if there are pending snapshots for this root,
+			 * we fall into common COW way.
+			 */
+			if (!nolock) {
+				err = btrfs_start_nocow_write(root);
+				if (!err)
+					goto out_check;
+			}
+			/*
 			 * force cow if csum exists in the range.
 			 * this ensure that csum for a given extent are
 			 * either valid or do not exist.
@@ -1289,6 +1306,8 @@
 out_check:
 		if (extent_end <= start) {
 			path->slots[0]++;
+			if (!nolock && nocow)
+				btrfs_end_nocow_write(root);
 			goto next_slot;
 		}
 		if (!nocow) {
@@ -1306,8 +1325,11 @@
 			ret = cow_file_range(inode, locked_page,
 					     cow_start, found_key.offset - 1,
 					     page_started, nr_written, 1);
-			if (ret)
+			if (ret) {
+				if (!nolock && nocow)
+					btrfs_end_nocow_write(root);
 				goto error;
+			}
 			cow_start = (u64)-1;
 		}
 
@@ -1354,8 +1376,11 @@
 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
 						      num_bytes);
-			if (ret)
+			if (ret) {
+				if (!nolock && nocow)
+					btrfs_end_nocow_write(root);
 				goto error;
+			}
 		}
 
 		extent_clear_unlock_delalloc(inode, cur_offset,
@@ -1363,6 +1388,8 @@
 					     locked_page, EXTENT_LOCKED |
 					     EXTENT_DELALLOC, PAGE_UNLOCK |
 					     PAGE_SET_PRIVATE2);
+		if (!nolock && nocow)
+			btrfs_end_nocow_write(root);
 		cur_offset = extent_end;
 		if (cur_offset > end)
 			break;
@@ -8476,19 +8503,20 @@
 			else
 				iput(inode);
 			ret = -ENOMEM;
-			break;
+			goto out;
 		}
 		list_add_tail(&work->list, &works);
 		btrfs_queue_work(root->fs_info->flush_workers,
 				 &work->work);
 		ret++;
 		if (nr != -1 && ret >= nr)
-			break;
+			goto out;
 		cond_resched();
 		spin_lock(&root->delalloc_lock);
 	}
 	spin_unlock(&root->delalloc_lock);
 
+out:
 	list_for_each_entry_safe(work, next, &works, list) {
 		list_del_init(&work->list);
 		btrfs_wait_and_free_delalloc_work(work);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0401397..e79ff6b 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1472,6 +1472,7 @@
 	struct btrfs_trans_handle *trans;
 	struct btrfs_device *device = NULL;
 	char *sizestr;
+	char *retptr;
 	char *devstr = NULL;
 	int ret = 0;
 	int mod = 0;
@@ -1539,8 +1540,8 @@
 			mod = 1;
 			sizestr++;
 		}
-		new_size = memparse(sizestr, NULL);
-		if (new_size == 0) {
+		new_size = memparse(sizestr, &retptr);
+		if (*retptr != '\0' || new_size == 0) {
 			ret = -EINVAL;
 			goto out_free;
 		}
@@ -3140,8 +3141,9 @@
 							 new_key.offset + datal,
 							 1);
 				if (ret) {
-					btrfs_abort_transaction(trans, root,
-								ret);
+					if (ret != -EINVAL)
+						btrfs_abort_transaction(trans,
+							root, ret);
 					btrfs_end_transaction(trans, root);
 					goto out;
 				}
@@ -3538,6 +3540,11 @@
 		up_read(&info->groups_sem);
 	}
 
+	/*
+	 * Global block reserve, exported as a space_info
+	 */
+	slot_count++;
+
 	/* space_slots == 0 means they are asking for a count */
 	if (space_args.space_slots == 0) {
 		space_args.total_spaces = slot_count;
@@ -3596,6 +3603,21 @@
 		up_read(&info->groups_sem);
 	}
 
+	/*
+	 * Add global block reserve
+	 */
+	if (slot_count) {
+		struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv;
+
+		spin_lock(&block_rsv->lock);
+		space.total_bytes = block_rsv->size;
+		space.used_bytes = block_rsv->size - block_rsv->reserved;
+		spin_unlock(&block_rsv->lock);
+		space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
+		memcpy(dest, &space, sizeof(space));
+		space_args.total_spaces++;
+	}
+
 	user_dest = (struct btrfs_ioctl_space_info __user *)
 		(arg + sizeof(struct btrfs_ioctl_space_args));
 
@@ -4531,9 +4553,8 @@
 	}
 
 	args64 = kmalloc(sizeof(*args64), GFP_NOFS);
-	if (IS_ERR(args64)) {
-		ret = PTR_ERR(args64);
-		args64 = NULL;
+	if (!args64) {
+		ret = -ENOMEM;
 		goto out;
 	}
 
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index def428a..7f92ab1 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2317,7 +2317,6 @@
 static noinline_for_stack
 int merge_reloc_roots(struct reloc_control *rc)
 {
-	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root;
 	struct btrfs_root *reloc_root;
 	u64 last_snap;
@@ -2375,26 +2374,6 @@
 				list_add_tail(&reloc_root->root_list,
 					      &reloc_roots);
 			goto out;
-		} else if (!ret) {
-			/*
-			 * recover the last snapshot tranid to avoid
-			 * the space balance break NOCOW.
-			 */
-			root = read_fs_root(rc->extent_root->fs_info,
-					    objectid);
-			if (IS_ERR(root))
-				continue;
-
-			trans = btrfs_join_transaction(root);
-			BUG_ON(IS_ERR(trans));
-
-			/* Check if the fs/file tree was snapshoted or not. */
-			if (btrfs_root_last_snapshot(&root->root_item) ==
-			    otransid - 1)
-				btrfs_set_root_last_snapshot(&root->root_item,
-							     last_snap);
-				
-			btrfs_end_transaction(trans, root);
 		}
 	}
 
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 93e6d71..0be7799 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2235,6 +2235,47 @@
 	return 0;
 }
 
+/*
+ * Given a physical address, this will calculate it's
+ * logical offset. if this is a parity stripe, it will return
+ * the most left data stripe's logical offset.
+ *
+ * return 0 if it is a data stripe, 1 means parity stripe.
+ */
+static int get_raid56_logic_offset(u64 physical, int num,
+				   struct map_lookup *map, u64 *offset)
+{
+	int i;
+	int j = 0;
+	u64 stripe_nr;
+	u64 last_offset;
+	int stripe_index;
+	int rot;
+
+	last_offset = (physical - map->stripes[num].physical) *
+		      nr_data_stripes(map);
+	*offset = last_offset;
+	for (i = 0; i < nr_data_stripes(map); i++) {
+		*offset = last_offset + i * map->stripe_len;
+
+		stripe_nr = *offset;
+		do_div(stripe_nr, map->stripe_len);
+		do_div(stripe_nr, nr_data_stripes(map));
+
+		/* Work out the disk rotation on this stripe-set */
+		rot = do_div(stripe_nr, map->num_stripes);
+		/* calculate which stripe this data locates */
+		rot += i;
+		stripe_index = rot % map->num_stripes;
+		if (stripe_index == num)
+			return 0;
+		if (stripe_index < num)
+			j++;
+	}
+	*offset = last_offset + j * map->stripe_len;
+	return 1;
+}
+
 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 					   struct map_lookup *map,
 					   struct btrfs_device *scrub_dev,
@@ -2256,6 +2297,7 @@
 	u64 physical;
 	u64 logical;
 	u64 logic_end;
+	u64 physical_end;
 	u64 generation;
 	int mirror_num;
 	struct reada_control *reada1;
@@ -2269,16 +2311,10 @@
 	u64 extent_len;
 	struct btrfs_device *extent_dev;
 	int extent_mirror_num;
-	int stop_loop;
-
-	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-			 BTRFS_BLOCK_GROUP_RAID6)) {
-		if (num >= nr_data_stripes(map)) {
-			return 0;
-		}
-	}
+	int stop_loop = 0;
 
 	nstripes = length;
+	physical = map->stripes[num].physical;
 	offset = 0;
 	do_div(nstripes, map->stripe_len);
 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
@@ -2296,6 +2332,11 @@
 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
 		increment = map->stripe_len;
 		mirror_num = num % map->num_stripes + 1;
+	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+				BTRFS_BLOCK_GROUP_RAID6)) {
+		get_raid56_logic_offset(physical, num, map, &offset);
+		increment = map->stripe_len * nr_data_stripes(map);
+		mirror_num = 1;
 	} else {
 		increment = map->stripe_len;
 		mirror_num = 1;
@@ -2319,7 +2360,15 @@
 	 * to not hold off transaction commits
 	 */
 	logical = base + offset;
-
+	physical_end = physical + nstripes * map->stripe_len;
+	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+			 BTRFS_BLOCK_GROUP_RAID6)) {
+		get_raid56_logic_offset(physical_end, num,
+					map, &logic_end);
+		logic_end += base;
+	} else {
+		logic_end = logical + increment * nstripes;
+	}
 	wait_event(sctx->list_wait,
 		   atomic_read(&sctx->bios_in_flight) == 0);
 	scrub_blocked_if_needed(fs_info);
@@ -2328,7 +2377,7 @@
 	key_start.objectid = logical;
 	key_start.type = BTRFS_EXTENT_ITEM_KEY;
 	key_start.offset = (u64)0;
-	key_end.objectid = base + offset + nstripes * increment;
+	key_end.objectid = logic_end;
 	key_end.type = BTRFS_METADATA_ITEM_KEY;
 	key_end.offset = (u64)-1;
 	reada1 = btrfs_reada_add(root, &key_start, &key_end);
@@ -2338,7 +2387,7 @@
 	key_start.offset = logical;
 	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
 	key_end.type = BTRFS_EXTENT_CSUM_KEY;
-	key_end.offset = base + offset + nstripes * increment;
+	key_end.offset = logic_end;
 	reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
 
 	if (!IS_ERR(reada1))
@@ -2356,11 +2405,17 @@
 	/*
 	 * now find all extents for each stripe and scrub them
 	 */
-	logical = base + offset;
-	physical = map->stripes[num].physical;
-	logic_end = logical + increment * nstripes;
 	ret = 0;
-	while (logical < logic_end) {
+	while (physical < physical_end) {
+		/* for raid56, we skip parity stripe */
+		if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+				BTRFS_BLOCK_GROUP_RAID6)) {
+			ret = get_raid56_logic_offset(physical, num,
+					map, &logical);
+			logical += base;
+			if (ret)
+				goto skip;
+		}
 		/*
 		 * canceled?
 		 */
@@ -2504,15 +2559,29 @@
 			scrub_free_csums(sctx);
 			if (extent_logical + extent_len <
 			    key.objectid + bytes) {
-				logical += increment;
-				physical += map->stripe_len;
-
+				if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+					BTRFS_BLOCK_GROUP_RAID6)) {
+					/*
+					 * loop until we find next data stripe
+					 * or we have finished all stripes.
+					 */
+					do {
+						physical += map->stripe_len;
+						ret = get_raid56_logic_offset(
+								physical, num,
+								map, &logical);
+						logical += base;
+					} while (physical < physical_end && ret);
+				} else {
+					physical += map->stripe_len;
+					logical += increment;
+				}
 				if (logical < key.objectid + bytes) {
 					cond_resched();
 					goto again;
 				}
 
-				if (logical >= logic_end) {
+				if (physical >= physical_end) {
 					stop_loop = 1;
 					break;
 				}
@@ -2521,6 +2590,7 @@
 			path->slots[0]++;
 		}
 		btrfs_release_path(path);
+skip:
 		logical += increment;
 		physical += map->stripe_len;
 		spin_lock(&sctx->stat_lock);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 9b6da9d..1ac3ca9 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -493,6 +493,7 @@
 		return NULL;
 	path->search_commit_root = 1;
 	path->skip_locking = 1;
+	path->need_commit_sem = 1;
 	return path;
 }
 
@@ -771,29 +772,22 @@
 /*
  * Helper function to retrieve some fields from an inode item.
  */
-static int get_inode_info(struct btrfs_root *root,
-			  u64 ino, u64 *size, u64 *gen,
-			  u64 *mode, u64 *uid, u64 *gid,
-			  u64 *rdev)
+static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
+			  u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
+			  u64 *gid, u64 *rdev)
 {
 	int ret;
 	struct btrfs_inode_item *ii;
 	struct btrfs_key key;
-	struct btrfs_path *path;
-
-	path = alloc_path_for_send();
-	if (!path)
-		return -ENOMEM;
 
 	key.objectid = ino;
 	key.type = BTRFS_INODE_ITEM_KEY;
 	key.offset = 0;
 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-	if (ret < 0)
-		goto out;
 	if (ret) {
-		ret = -ENOENT;
-		goto out;
+		if (ret > 0)
+			ret = -ENOENT;
+		return ret;
 	}
 
 	ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -811,7 +805,22 @@
 	if (rdev)
 		*rdev = btrfs_inode_rdev(path->nodes[0], ii);
 
-out:
+	return ret;
+}
+
+static int get_inode_info(struct btrfs_root *root,
+			  u64 ino, u64 *size, u64 *gen,
+			  u64 *mode, u64 *uid, u64 *gid,
+			  u64 *rdev)
+{
+	struct btrfs_path *path;
+	int ret;
+
+	path = alloc_path_for_send();
+	if (!path)
+		return -ENOMEM;
+	ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
+			       rdev);
 	btrfs_free_path(path);
 	return ret;
 }
@@ -1085,6 +1094,7 @@
 struct backref_ctx {
 	struct send_ctx *sctx;
 
+	struct btrfs_path *path;
 	/* number of total found references */
 	u64 found;
 
@@ -1155,8 +1165,9 @@
 	 * There are inodes that have extents that lie behind its i_size. Don't
 	 * accept clones from these extents.
 	 */
-	ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL,
-			NULL);
+	ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL,
+			       NULL, NULL, NULL);
+	btrfs_release_path(bctx->path);
 	if (ret < 0)
 		return ret;
 
@@ -1235,12 +1246,17 @@
 	if (!tmp_path)
 		return -ENOMEM;
 
+	/* We only use this path under the commit sem */
+	tmp_path->need_commit_sem = 0;
+
 	backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
 	if (!backref_ctx) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
+	backref_ctx->path = tmp_path;
+
 	if (data_offset >= ino_size) {
 		/*
 		 * There may be extents that lie behind the file's size.
@@ -1268,8 +1284,10 @@
 	}
 	logical = disk_byte + btrfs_file_extent_offset(eb, fi);
 
+	down_read(&sctx->send_root->fs_info->commit_root_sem);
 	ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
 				  &found_key, &flags);
+	up_read(&sctx->send_root->fs_info->commit_root_sem);
 	btrfs_release_path(tmp_path);
 
 	if (ret < 0)
@@ -4418,6 +4436,9 @@
 	p = fs_path_alloc();
 	if (!p)
 		return -ENOMEM;
+	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+	if (ret < 0)
+		goto tlv_put_failure;
 	memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
 	while (offset < end) {
 		len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
@@ -4425,9 +4446,6 @@
 		ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
 		if (ret < 0)
 			break;
-		ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
-		if (ret < 0)
-			break;
 		TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
 		TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
 		TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
@@ -4968,7 +4986,9 @@
 
 	if (S_ISREG(sctx->cur_inode_mode)) {
 		if (need_send_hole(sctx)) {
-			if (sctx->cur_inode_last_extent == (u64)-1) {
+			if (sctx->cur_inode_last_extent == (u64)-1 ||
+			    sctx->cur_inode_last_extent <
+			    sctx->cur_inode_size) {
 				ret = get_last_extent(sctx, (u64)-1);
 				if (ret)
 					goto out;
@@ -5367,57 +5387,21 @@
 static int full_send_tree(struct send_ctx *sctx)
 {
 	int ret;
-	struct btrfs_trans_handle *trans = NULL;
 	struct btrfs_root *send_root = sctx->send_root;
 	struct btrfs_key key;
 	struct btrfs_key found_key;
 	struct btrfs_path *path;
 	struct extent_buffer *eb;
 	int slot;
-	u64 start_ctransid;
-	u64 ctransid;
 
 	path = alloc_path_for_send();
 	if (!path)
 		return -ENOMEM;
 
-	spin_lock(&send_root->root_item_lock);
-	start_ctransid = btrfs_root_ctransid(&send_root->root_item);
-	spin_unlock(&send_root->root_item_lock);
-
 	key.objectid = BTRFS_FIRST_FREE_OBJECTID;
 	key.type = BTRFS_INODE_ITEM_KEY;
 	key.offset = 0;
 
-join_trans:
-	/*
-	 * We need to make sure the transaction does not get committed
-	 * while we do anything on commit roots. Join a transaction to prevent
-	 * this.
-	 */
-	trans = btrfs_join_transaction(send_root);
-	if (IS_ERR(trans)) {
-		ret = PTR_ERR(trans);
-		trans = NULL;
-		goto out;
-	}
-
-	/*
-	 * Make sure the tree has not changed after re-joining. We detect this
-	 * by comparing start_ctransid and ctransid. They should always match.
-	 */
-	spin_lock(&send_root->root_item_lock);
-	ctransid = btrfs_root_ctransid(&send_root->root_item);
-	spin_unlock(&send_root->root_item_lock);
-
-	if (ctransid != start_ctransid) {
-		WARN(1, KERN_WARNING "BTRFS: the root that you're trying to "
-				     "send was modified in between. This is "
-				     "probably a bug.\n");
-		ret = -EIO;
-		goto out;
-	}
-
 	ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
 	if (ret < 0)
 		goto out;
@@ -5425,19 +5409,6 @@
 		goto out_finish;
 
 	while (1) {
-		/*
-		 * When someone want to commit while we iterate, end the
-		 * joined transaction and rejoin.
-		 */
-		if (btrfs_should_end_transaction(trans, send_root)) {
-			ret = btrfs_end_transaction(trans, send_root);
-			trans = NULL;
-			if (ret < 0)
-				goto out;
-			btrfs_release_path(path);
-			goto join_trans;
-		}
-
 		eb = path->nodes[0];
 		slot = path->slots[0];
 		btrfs_item_key_to_cpu(eb, &found_key, slot);
@@ -5465,12 +5436,6 @@
 
 out:
 	btrfs_free_path(path);
-	if (trans) {
-		if (!ret)
-			ret = btrfs_end_transaction(trans, send_root);
-		else
-			btrfs_end_transaction(trans, send_root);
-	}
 	return ret;
 }
 
@@ -5718,7 +5683,9 @@
 			NULL);
 	sort_clone_roots = 1;
 
+	current->journal_info = (void *)BTRFS_SEND_TRANS_STUB;
 	ret = send_subvol(sctx);
+	current->journal_info = NULL;
 	if (ret < 0)
 		goto out;
 
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 9dbf423..5011aad 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -66,6 +66,8 @@
 static const struct super_operations btrfs_super_ops;
 static struct file_system_type btrfs_fs_type;
 
+static int btrfs_remount(struct super_block *sb, int *flags, char *data);
+
 static const char *btrfs_decode_error(int errno)
 {
 	char *errstr = "unknown";
@@ -1185,6 +1187,26 @@
 	mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name,
 			     newargs);
 	kfree(newargs);
+
+	if (PTR_RET(mnt) == -EBUSY) {
+		if (flags & MS_RDONLY) {
+			mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~MS_RDONLY, device_name,
+					     newargs);
+		} else {
+			int r;
+			mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY, device_name,
+					     newargs);
+			if (IS_ERR(mnt))
+				return ERR_CAST(mnt);
+
+			r = btrfs_remount(mnt->mnt_sb, &flags, NULL);
+			if (r < 0) {
+				/* FIXME: release vfsmount mnt ??*/
+				return ERR_PTR(r);
+			}
+		}
+	}
+
 	if (IS_ERR(mnt))
 		return ERR_CAST(mnt);
 
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index a04707f..7579f6d 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -75,10 +75,21 @@
 	}
 }
 
-static noinline void switch_commit_root(struct btrfs_root *root)
+static noinline void switch_commit_roots(struct btrfs_transaction *trans,
+					 struct btrfs_fs_info *fs_info)
 {
-	free_extent_buffer(root->commit_root);
-	root->commit_root = btrfs_root_node(root);
+	struct btrfs_root *root, *tmp;
+
+	down_write(&fs_info->commit_root_sem);
+	list_for_each_entry_safe(root, tmp, &trans->switch_commits,
+				 dirty_list) {
+		list_del_init(&root->dirty_list);
+		free_extent_buffer(root->commit_root);
+		root->commit_root = btrfs_root_node(root);
+		if (is_fstree(root->objectid))
+			btrfs_unpin_free_ino(root);
+	}
+	up_write(&fs_info->commit_root_sem);
 }
 
 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
@@ -208,6 +219,7 @@
 	INIT_LIST_HEAD(&cur_trans->pending_snapshots);
 	INIT_LIST_HEAD(&cur_trans->ordered_operations);
 	INIT_LIST_HEAD(&cur_trans->pending_chunks);
+	INIT_LIST_HEAD(&cur_trans->switch_commits);
 	list_add_tail(&cur_trans->list, &fs_info->trans_list);
 	extent_io_tree_init(&cur_trans->dirty_pages,
 			     fs_info->btree_inode->i_mapping);
@@ -375,7 +387,8 @@
 	if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
 		return ERR_PTR(-EROFS);
 
-	if (current->journal_info) {
+	if (current->journal_info &&
+	    current->journal_info != (void *)BTRFS_SEND_TRANS_STUB) {
 		WARN_ON(type & TRANS_EXTWRITERS);
 		h = current->journal_info;
 		h->use_count++;
@@ -919,9 +932,6 @@
 			return ret;
 	}
 
-	if (root != root->fs_info->extent_root)
-		switch_commit_root(root);
-
 	return 0;
 }
 
@@ -977,15 +987,16 @@
 		list_del_init(next);
 		root = list_entry(next, struct btrfs_root, dirty_list);
 
+		if (root != fs_info->extent_root)
+			list_add_tail(&root->dirty_list,
+				      &trans->transaction->switch_commits);
 		ret = update_cowonly_root(trans, root);
 		if (ret)
 			return ret;
 	}
 
-	down_write(&fs_info->extent_commit_sem);
-	switch_commit_root(fs_info->extent_root);
-	up_write(&fs_info->extent_commit_sem);
-
+	list_add_tail(&fs_info->extent_root->dirty_list,
+		      &trans->transaction->switch_commits);
 	btrfs_after_dev_replace_commit(fs_info);
 
 	return 0;
@@ -1042,11 +1053,8 @@
 			smp_wmb();
 
 			if (root->commit_root != root->node) {
-				mutex_lock(&root->fs_commit_mutex);
-				switch_commit_root(root);
-				btrfs_unpin_free_ino(root);
-				mutex_unlock(&root->fs_commit_mutex);
-
+				list_add_tail(&root->dirty_list,
+					&trans->transaction->switch_commits);
 				btrfs_set_root_node(&root->root_item,
 						    root->node);
 			}
@@ -1857,11 +1865,15 @@
 
 	btrfs_set_root_node(&root->fs_info->tree_root->root_item,
 			    root->fs_info->tree_root->node);
-	switch_commit_root(root->fs_info->tree_root);
+	list_add_tail(&root->fs_info->tree_root->dirty_list,
+		      &cur_trans->switch_commits);
 
 	btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
 			    root->fs_info->chunk_root->node);
-	switch_commit_root(root->fs_info->chunk_root);
+	list_add_tail(&root->fs_info->chunk_root->dirty_list,
+		      &cur_trans->switch_commits);
+
+	switch_commit_roots(cur_trans, root->fs_info);
 
 	assert_qgroups_uptodate(trans);
 	update_super_roots(root);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 6ac037e..b57b924 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -57,6 +57,7 @@
 	struct list_head pending_snapshots;
 	struct list_head ordered_operations;
 	struct list_head pending_chunks;
+	struct list_head switch_commits;
 	struct btrfs_delayed_ref_root delayed_refs;
 	int aborted;
 };
@@ -78,6 +79,8 @@
 #define TRANS_EXTWRITERS	(__TRANS_USERSPACE | __TRANS_START |	\
 				 __TRANS_ATTACH)
 
+#define BTRFS_SEND_TRANS_STUB	1
+
 struct btrfs_trans_handle {
 	u64 transid;
 	u64 bytes_reserved;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d241130a..49d7fab 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -448,6 +448,14 @@
 	run_scheduled_bios(device);
 }
 
+/*
+ * Add new device to list of registered devices
+ *
+ * Returns:
+ * 1   - first time device is seen
+ * 0   - device already known
+ * < 0 - error
+ */
 static noinline int device_list_add(const char *path,
 			   struct btrfs_super_block *disk_super,
 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
@@ -455,6 +463,7 @@
 	struct btrfs_device *device;
 	struct btrfs_fs_devices *fs_devices;
 	struct rcu_string *name;
+	int ret = 0;
 	u64 found_transid = btrfs_super_generation(disk_super);
 
 	fs_devices = find_fsid(disk_super->fsid);
@@ -495,6 +504,7 @@
 		fs_devices->num_devices++;
 		mutex_unlock(&fs_devices->device_list_mutex);
 
+		ret = 1;
 		device->fs_devices = fs_devices;
 	} else if (!device->name || strcmp(device->name->str, path)) {
 		name = rcu_string_strdup(path, GFP_NOFS);
@@ -513,7 +523,8 @@
 		fs_devices->latest_trans = found_transid;
 	}
 	*fs_devices_ret = fs_devices;
-	return 0;
+
+	return ret;
 }
 
 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
@@ -910,17 +921,19 @@
 	transid = btrfs_super_generation(disk_super);
 	total_devices = btrfs_super_num_devices(disk_super);
 
-	if (disk_super->label[0]) {
-		if (disk_super->label[BTRFS_LABEL_SIZE - 1])
-			disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
-		printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
-	} else {
-		printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
-	}
-
-	printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
-
 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
+	if (ret > 0) {
+		if (disk_super->label[0]) {
+			if (disk_super->label[BTRFS_LABEL_SIZE - 1])
+				disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
+			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
+		} else {
+			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
+		}
+
+		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
+		ret = 0;
+	}
 	if (!ret && fs_devices_ret)
 		(*fs_devices_ret)->total_devices = total_devices;
 
diff --git a/fs/buffer.c b/fs/buffer.c
index 8c53a2b..9ddb9fc 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2114,8 +2114,8 @@
  * Returns true if all buffers which correspond to a file portion
  * we want to read are uptodate.
  */
-int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
-					unsigned long from)
+int block_is_partially_uptodate(struct page *page, unsigned long from,
+					unsigned long count)
 {
 	unsigned block_start, block_end, blocksize;
 	unsigned to;
@@ -2127,7 +2127,7 @@
 
 	head = page_buffers(page);
 	blocksize = head->b_size;
-	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
+	to = min_t(unsigned, PAGE_CACHE_SIZE - from, count);
 	to = from + to;
 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
 		return 0;
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
index 622f469..5b99baf 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
@@ -124,7 +124,6 @@
 	/* check parameters */
 	ret = -EOPNOTSUPP;
 	if (!root->d_inode ||
-	    !root->d_inode->i_op ||
 	    !root->d_inode->i_op->lookup ||
 	    !root->d_inode->i_op->mkdir ||
 	    !root->d_inode->i_op->setxattr ||
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 6494d9f..c0a6817 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -779,8 +779,7 @@
 	}
 
 	ret = -EPERM;
-	if (!subdir->d_inode->i_op ||
-	    !subdir->d_inode->i_op->setxattr ||
+	if (!subdir->d_inode->i_op->setxattr ||
 	    !subdir->d_inode->i_op->getxattr ||
 	    !subdir->d_inode->i_op->lookup ||
 	    !subdir->d_inode->i_op->mkdir ||
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 66075a4..39da1c2 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -601,7 +601,7 @@
 					    false);
 		if (IS_ERR(req)) {
 			ret = PTR_ERR(req);
-			goto out;
+			break;
 		}
 
 		num_pages = calc_pages_for(page_align, len);
@@ -719,7 +719,7 @@
 					    false);
 		if (IS_ERR(req)) {
 			ret = PTR_ERR(req);
-			goto out;
+			break;
 		}
 
 		/*
@@ -972,6 +972,7 @@
 		}
 	} else {
 		loff_t old_size = inode->i_size;
+		struct iov_iter from;
 		/*
 		 * No need to acquire the i_truncate_mutex. Because
 		 * the MDS revokes Fwb caps before sending truncate
@@ -979,9 +980,10 @@
 		 * are pending vmtruncate. So write and vmtruncate
 		 * can not run at the same time
 		 */
-		written = generic_file_buffered_write(iocb, iov, nr_segs,
-						      pos, &iocb->ki_pos,
-						      count, 0);
+		iov_iter_init(&from, iov, nr_segs, count, 0);
+		written = generic_perform_write(file, &from, pos);
+		if (likely(written >= 0))
+			iocb->ki_pos = pos + written;
 		if (inode->i_size > old_size)
 			ceph_fscache_update_objectsize(inode);
 		mutex_unlock(&inode->i_mutex);
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index efbe082..fdf941b 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -1,9 +1,8 @@
+#include <linux/ceph/ceph_debug.h>
 #include <linux/in.h>
 
 #include "super.h"
 #include "mds_client.h"
-#include <linux/ceph/ceph_debug.h>
-
 #include "ioctl.h"
 
 
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 2c70cbe..df9c9141 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -850,7 +850,6 @@
 /*	revalidate:cifs_revalidate, */
 	.setattr = cifs_setattr,
 	.getattr = cifs_getattr, /* do we need this anymore? */
-	.rename = cifs_rename,
 	.permission = cifs_permission,
 #ifdef CONFIG_CIFS_XATTR
 	.setxattr = cifs_setxattr,
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 216d7e9..8807442 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2579,19 +2579,32 @@
 	struct cifsInodeInfo *cinode = CIFS_I(inode);
 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
 	ssize_t rc = -EACCES;
-	loff_t lock_pos = pos;
+	loff_t lock_pos = iocb->ki_pos;
 
-	if (file->f_flags & O_APPEND)
-		lock_pos = i_size_read(inode);
 	/*
 	 * We need to hold the sem to be sure nobody modifies lock list
 	 * with a brlock that prevents writing.
 	 */
 	down_read(&cinode->lock_sem);
+	mutex_lock(&inode->i_mutex);
+	if (file->f_flags & O_APPEND)
+		lock_pos = i_size_read(inode);
 	if (!cifs_find_lock_conflict(cfile, lock_pos, iov_length(iov, nr_segs),
 				     server->vals->exclusive_lock_type, NULL,
-				     CIFS_WRITE_OP))
-		rc = generic_file_aio_write(iocb, iov, nr_segs, pos);
+				     CIFS_WRITE_OP)) {
+		rc = __generic_file_aio_write(iocb, iov, nr_segs);
+		mutex_unlock(&inode->i_mutex);
+
+		if (rc > 0) {
+			ssize_t err;
+
+			err = generic_write_sync(file, iocb->ki_pos - rc, rc);
+			if (rc < 0)
+				rc = err;
+		}
+	} else {
+		mutex_unlock(&inode->i_mutex);
+	}
 	up_read(&cinode->lock_sem);
 	return rc;
 }
@@ -2727,56 +2740,27 @@
 /**
  * cifs_readdata_to_iov - copy data from pages in response to an iovec
  * @rdata:	the readdata response with list of pages holding data
- * @iov:	vector in which we should copy the data
- * @nr_segs:	number of segments in vector
- * @offset:	offset into file of the first iovec
- * @copied:	used to return the amount of data copied to the iov
+ * @iter:	destination for our data
  *
  * This function copies data from a list of pages in a readdata response into
  * an array of iovecs. It will first calculate where the data should go
  * based on the info in the readdata and then copy the data into that spot.
  */
-static ssize_t
-cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
-			unsigned long nr_segs, loff_t offset, ssize_t *copied)
+static int
+cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
 {
-	int rc = 0;
-	struct iov_iter ii;
-	size_t pos = rdata->offset - offset;
-	ssize_t remaining = rdata->bytes;
-	unsigned char *pdata;
+	size_t remaining = rdata->bytes;
 	unsigned int i;
 
-	/* set up iov_iter and advance to the correct offset */
-	iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
-	iov_iter_advance(&ii, pos);
-
-	*copied = 0;
 	for (i = 0; i < rdata->nr_pages; i++) {
-		ssize_t copy;
 		struct page *page = rdata->pages[i];
-
-		/* copy a whole page or whatever's left */
-		copy = min_t(ssize_t, remaining, PAGE_SIZE);
-
-		/* ...but limit it to whatever space is left in the iov */
-		copy = min_t(ssize_t, copy, iov_iter_count(&ii));
-
-		/* go while there's data to be copied and no errors */
-		if (copy && !rc) {
-			pdata = kmap(page);
-			rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
-						(int)copy);
-			kunmap(page);
-			if (!rc) {
-				*copied += copy;
-				remaining -= copy;
-				iov_iter_advance(&ii, copy);
-			}
-		}
+		size_t copy = min(remaining, PAGE_SIZE);
+		size_t written = copy_page_to_iter(page, 0, copy, iter);
+		remaining -= written;
+		if (written < copy && iov_iter_count(iter) > 0)
+			break;
 	}
-
-	return rc;
+	return remaining ? -EFAULT : 0;
 }
 
 static void
@@ -2837,20 +2821,21 @@
 	return total_read > 0 ? total_read : result;
 }
 
-static ssize_t
-cifs_iovec_read(struct file *file, const struct iovec *iov,
-		 unsigned long nr_segs, loff_t *poffset)
+ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
+			       unsigned long nr_segs, loff_t pos)
 {
+	struct file *file = iocb->ki_filp;
 	ssize_t rc;
 	size_t len, cur_len;
 	ssize_t total_read = 0;
-	loff_t offset = *poffset;
+	loff_t offset = pos;
 	unsigned int npages;
 	struct cifs_sb_info *cifs_sb;
 	struct cifs_tcon *tcon;
 	struct cifsFileInfo *open_file;
 	struct cifs_readdata *rdata, *tmp;
 	struct list_head rdata_list;
+	struct iov_iter to;
 	pid_t pid;
 
 	if (!nr_segs)
@@ -2860,6 +2845,8 @@
 	if (!len)
 		return 0;
 
+	iov_iter_init(&to, iov, nr_segs, len, 0);
+
 	INIT_LIST_HEAD(&rdata_list);
 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 	open_file = file->private_data;
@@ -2917,55 +2904,44 @@
 	if (!list_empty(&rdata_list))
 		rc = 0;
 
+	len = iov_iter_count(&to);
 	/* the loop below should proceed in the order of increasing offsets */
-restart_loop:
 	list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
+	again:
 		if (!rc) {
-			ssize_t copied;
-
 			/* FIXME: freezable sleep too? */
 			rc = wait_for_completion_killable(&rdata->done);
 			if (rc)
 				rc = -EINTR;
-			else if (rdata->result)
+			else if (rdata->result) {
 				rc = rdata->result;
-			else {
-				rc = cifs_readdata_to_iov(rdata, iov,
-							nr_segs, *poffset,
-							&copied);
-				total_read += copied;
+				/* resend call if it's a retryable error */
+				if (rc == -EAGAIN) {
+					rc = cifs_retry_async_readv(rdata);
+					goto again;
+				}
+			} else {
+				rc = cifs_readdata_to_iov(rdata, &to);
 			}
 
-			/* resend call if it's a retryable error */
-			if (rc == -EAGAIN) {
-				rc = cifs_retry_async_readv(rdata);
-				goto restart_loop;
-			}
 		}
 		list_del_init(&rdata->list);
 		kref_put(&rdata->refcount, cifs_uncached_readdata_release);
 	}
 
+	total_read = len - iov_iter_count(&to);
+
 	cifs_stats_bytes_read(tcon, total_read);
-	*poffset += total_read;
 
 	/* mask nodata case */
 	if (rc == -ENODATA)
 		rc = 0;
 
-	return total_read ? total_read : rc;
-}
-
-ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
-			       unsigned long nr_segs, loff_t pos)
-{
-	ssize_t read;
-
-	read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
-	if (read > 0)
-		iocb->ki_pos = pos;
-
-	return read;
+	if (total_read) {
+		iocb->ki_pos = pos + total_read;
+		return total_read;
+	}
+	return rc;
 }
 
 ssize_t
diff --git a/fs/exec.c b/fs/exec.c
index 9e81c63..476f3eb 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -813,7 +813,7 @@
 
 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
 {
-	ssize_t res = file->f_op->read(file, (void __user *)addr, len, &pos);
+	ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
 	if (res > 0)
 		flush_icache_range(addr, addr + len);
 	return res;
diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
index 7682b97..4e2c032 100644
--- a/fs/exofs/ore_raid.c
+++ b/fs/exofs/ore_raid.c
@@ -21,12 +21,12 @@
 #undef ORE_DBGMSG2
 #define ORE_DBGMSG2 ORE_DBGMSG
 
-struct page *_raid_page_alloc(void)
+static struct page *_raid_page_alloc(void)
 {
 	return alloc_page(GFP_KERNEL);
 }
 
-void _raid_page_free(struct page *p)
+static void _raid_page_free(struct page *p)
 {
 	__free_page(p);
 }
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 9d97633..ed73ed8 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -543,7 +543,7 @@
 	return !(odi->systemid_len || odi->osdname_len);
 }
 
-int __alloc_dev_table(struct exofs_sb_info *sbi, unsigned numdevs,
+static int __alloc_dev_table(struct exofs_sb_info *sbi, unsigned numdevs,
 		      struct exofs_dev **peds)
 {
 	struct __alloc_ore_devs_and_exofs_devs {
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 4e508fc..ca7502d 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -146,7 +146,7 @@
 			overwrite = 1;
 	}
 
-	ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+	ret = __generic_file_aio_write(iocb, iov, nr_segs);
 	mutex_unlock(&inode->i_mutex);
 
 	if (ret > 0) {
diff --git a/fs/file.c b/fs/file.c
index b61293b..8f294cf 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -25,7 +25,10 @@
 
 int sysctl_nr_open __read_mostly = 1024*1024;
 int sysctl_nr_open_min = BITS_PER_LONG;
-int sysctl_nr_open_max = 1024 * 1024; /* raised later */
+/* our max() is unusable in constant expressions ;-/ */
+#define __const_max(x, y) ((x) < (y) ? (x) : (y))
+int sysctl_nr_open_max = __const_max(INT_MAX, ~(size_t)0/sizeof(void *)) &
+			 -BITS_PER_LONG;
 
 static void *alloc_fdmem(size_t size)
 {
@@ -429,12 +432,6 @@
 	}
 }
 
-void __init files_defer_init(void)
-{
-	sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) &
-			     -BITS_PER_LONG;
-}
-
 struct files_struct init_files = {
 	.count		= ATOMIC_INIT(1),
 	.fdt		= &init_files.fdtab,
diff --git a/fs/file_table.c b/fs/file_table.c
index 01071c4..a374f50 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -52,7 +52,6 @@
 static inline void file_free(struct file *f)
 {
 	percpu_counter_dec(&nr_files);
-	file_check_state(f);
 	call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
 }
 
@@ -178,47 +177,12 @@
 	file->f_mapping = path->dentry->d_inode->i_mapping;
 	file->f_mode = mode;
 	file->f_op = fop;
-
-	/*
-	 * These mounts don't really matter in practice
-	 * for r/o bind mounts.  They aren't userspace-
-	 * visible.  We do this for consistency, and so
-	 * that we can do debugging checks at __fput()
-	 */
-	if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
-		file_take_write(file);
-		WARN_ON(mnt_clone_write(path->mnt));
-	}
 	if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
 		i_readcount_inc(path->dentry->d_inode);
 	return file;
 }
 EXPORT_SYMBOL(alloc_file);
 
-/**
- * drop_file_write_access - give up ability to write to a file
- * @file: the file to which we will stop writing
- *
- * This is a central place which will give up the ability
- * to write to @file, along with access to write through
- * its vfsmount.
- */
-static void drop_file_write_access(struct file *file)
-{
-	struct vfsmount *mnt = file->f_path.mnt;
-	struct dentry *dentry = file->f_path.dentry;
-	struct inode *inode = dentry->d_inode;
-
-	put_write_access(inode);
-
-	if (special_file(inode->i_mode))
-		return;
-	if (file_check_writeable(file) != 0)
-		return;
-	__mnt_drop_write(mnt);
-	file_release_write(file);
-}
-
 /* the real guts of fput() - releasing the last reference to file
  */
 static void __fput(struct file *file)
@@ -253,8 +217,10 @@
 	put_pid(file->f_owner.pid);
 	if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
 		i_readcount_dec(inode);
-	if (file->f_mode & FMODE_WRITE)
-		drop_file_write_access(file);
+	if (file->f_mode & FMODE_WRITER) {
+		put_write_access(inode);
+		__mnt_drop_write(mnt);
+	}
 	file->f_path.dentry = NULL;
 	file->f_path.mnt = NULL;
 	file->f_inode = NULL;
@@ -359,6 +325,5 @@
 
 	n = (mempages * (PAGE_SIZE / 1024)) / 10;
 	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
-	files_defer_init();
 	percpu_counter_init(&nr_files, 0);
 } 
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 0a648bb..aac71ce 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -667,15 +667,15 @@
 		struct pipe_buffer *buf = cs->currbuf;
 
 		if (!cs->write) {
-			buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
+			kunmap_atomic(cs->mapaddr);
 		} else {
-			kunmap(buf->page);
+			kunmap_atomic(cs->mapaddr);
 			buf->len = PAGE_SIZE - cs->len;
 		}
 		cs->currbuf = NULL;
 		cs->mapaddr = NULL;
 	} else if (cs->mapaddr) {
-		kunmap(cs->pg);
+		kunmap_atomic(cs->mapaddr);
 		if (cs->write) {
 			flush_dcache_page(cs->pg);
 			set_page_dirty_lock(cs->pg);
@@ -706,7 +706,7 @@
 
 			BUG_ON(!cs->nr_segs);
 			cs->currbuf = buf;
-			cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
+			cs->mapaddr = kmap_atomic(buf->page);
 			cs->len = buf->len;
 			cs->buf = cs->mapaddr + buf->offset;
 			cs->pipebufs++;
@@ -726,7 +726,7 @@
 			buf->len = 0;
 
 			cs->currbuf = buf;
-			cs->mapaddr = kmap(page);
+			cs->mapaddr = kmap_atomic(page);
 			cs->buf = cs->mapaddr;
 			cs->len = PAGE_SIZE;
 			cs->pipebufs++;
@@ -745,7 +745,7 @@
 			return err;
 		BUG_ON(err != 1);
 		offset = cs->addr % PAGE_SIZE;
-		cs->mapaddr = kmap(cs->pg);
+		cs->mapaddr = kmap_atomic(cs->pg);
 		cs->buf = cs->mapaddr + offset;
 		cs->len = min(PAGE_SIZE - offset, cs->seglen);
 		cs->seglen -= cs->len;
@@ -874,7 +874,7 @@
 out_fallback_unlock:
 	unlock_page(newpage);
 out_fallback:
-	cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
+	cs->mapaddr = kmap_atomic(buf->page);
 	cs->buf = cs->mapaddr + buf->offset;
 
 	err = lock_request(cs->fc, cs->req);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 48992ca..13f8bde 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1086,9 +1086,7 @@
 		if (mapping_writably_mapped(mapping))
 			flush_dcache_page(page);
 
-		pagefault_disable();
 		tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
-		pagefault_enable();
 		flush_dcache_page(page);
 
 		mark_page_accessed(page);
@@ -1237,8 +1235,7 @@
 		goto out;
 
 	if (file->f_flags & O_DIRECT) {
-		written = generic_file_direct_write(iocb, iov, &nr_segs,
-						    pos, &iocb->ki_pos,
+		written = generic_file_direct_write(iocb, iov, &nr_segs, pos, 
 						    count, ocount);
 		if (written < 0 || written == count)
 			goto out;
diff --git a/fs/mount.h b/fs/mount.h
index b29e42f..d55297f 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -10,7 +10,7 @@
 	struct user_namespace	*user_ns;
 	u64			seq;	/* Sequence number to prevent loops */
 	wait_queue_head_t poll;
-	int event;
+	u64 event;
 };
 
 struct mnt_pcp {
@@ -104,6 +104,9 @@
 	struct mnt_namespace *ns;
 	struct path root;
 	int (*show)(struct seq_file *, struct vfsmount *);
+	void *cached_mount;
+	u64 cached_event;
+	loff_t cached_index;
 };
 
 #define proc_mounts(p) (container_of((p), struct proc_mounts, m))
diff --git a/fs/namei.c b/fs/namei.c
index 88339f5..c6157c8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -358,6 +358,7 @@
 
 	return -EACCES;
 }
+EXPORT_SYMBOL(generic_permission);
 
 /*
  * We _really_ want to just do "generic_permission()" without
@@ -455,6 +456,7 @@
 		return retval;
 	return __inode_permission(inode, mask);
 }
+EXPORT_SYMBOL(inode_permission);
 
 /**
  * path_get - get a reference to a path
@@ -924,6 +926,7 @@
 	path->mnt = &parent->mnt;
 	return 1;
 }
+EXPORT_SYMBOL(follow_up);
 
 /*
  * Perform an automount
@@ -1085,6 +1088,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(follow_down_one);
 
 static inline bool managed_dentry_might_block(struct dentry *dentry)
 {
@@ -1223,6 +1227,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(follow_down);
 
 /*
  * Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
@@ -2025,6 +2030,7 @@
 		*path = nd.path;
 	return res;
 }
+EXPORT_SYMBOL(kern_path);
 
 /**
  * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
@@ -2049,6 +2055,7 @@
 		*path = nd.path;
 	return err;
 }
+EXPORT_SYMBOL(vfs_path_lookup);
 
 /*
  * Restricted form of lookup. Doesn't follow links, single-component only,
@@ -2111,6 +2118,7 @@
 
 	return __lookup_hash(&this, base, 0);
 }
+EXPORT_SYMBOL(lookup_one_len);
 
 int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
 		 struct path *path, int *empty)
@@ -2135,6 +2143,7 @@
 {
 	return user_path_at_empty(dfd, name, flags, path, NULL);
 }
+EXPORT_SYMBOL(user_path_at);
 
 /*
  * NB: most callers don't do anything directly with the reference to the
@@ -2477,6 +2486,7 @@
 	mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
 	return NULL;
 }
+EXPORT_SYMBOL(lock_rename);
 
 void unlock_rename(struct dentry *p1, struct dentry *p2)
 {
@@ -2486,6 +2496,7 @@
 		mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
 	}
 }
+EXPORT_SYMBOL(unlock_rename);
 
 int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		bool want_excl)
@@ -2506,6 +2517,7 @@
 		fsnotify_create(dir, dentry);
 	return error;
 }
+EXPORT_SYMBOL(vfs_create);
 
 static int may_open(struct path *path, int acc_mode, int flag)
 {
@@ -3375,6 +3387,7 @@
 		fsnotify_create(dir, dentry);
 	return error;
 }
+EXPORT_SYMBOL(vfs_mknod);
 
 static int may_mknod(umode_t mode)
 {
@@ -3464,6 +3477,7 @@
 		fsnotify_mkdir(dir, dentry);
 	return error;
 }
+EXPORT_SYMBOL(vfs_mkdir);
 
 SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
 {
@@ -3518,6 +3532,7 @@
 		__d_drop(dentry);
 	spin_unlock(&dentry->d_lock);
 }
+EXPORT_SYMBOL(dentry_unhash);
 
 int vfs_rmdir(struct inode *dir, struct dentry *dentry)
 {
@@ -3555,6 +3570,7 @@
 		d_delete(dentry);
 	return error;
 }
+EXPORT_SYMBOL(vfs_rmdir);
 
 static long do_rmdir(int dfd, const char __user *pathname)
 {
@@ -3672,6 +3688,7 @@
 
 	return error;
 }
+EXPORT_SYMBOL(vfs_unlink);
 
 /*
  * Make sure that the actual truncation of the file will occur outside its
@@ -3785,6 +3802,7 @@
 		fsnotify_create(dir, dentry);
 	return error;
 }
+EXPORT_SYMBOL(vfs_symlink);
 
 SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
 		int, newdfd, const char __user *, newname)
@@ -3893,6 +3911,7 @@
 		fsnotify_link(dir, inode, new_dentry);
 	return error;
 }
+EXPORT_SYMBOL(vfs_link);
 
 /*
  * Hardlinks are often used in delicate situations.  We avoid
@@ -4152,6 +4171,7 @@
 
 	return error;
 }
+EXPORT_SYMBOL(vfs_rename);
 
 SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
 		int, newdfd, const char __user *, newname, unsigned int, flags)
@@ -4304,11 +4324,9 @@
 	return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
 }
 
-int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
+int readlink_copy(char __user *buffer, int buflen, const char *link)
 {
-	int len;
-
-	len = PTR_ERR(link);
+	int len = PTR_ERR(link);
 	if (IS_ERR(link))
 		goto out;
 
@@ -4320,6 +4338,7 @@
 out:
 	return len;
 }
+EXPORT_SYMBOL(readlink_copy);
 
 /*
  * A helper for ->readlink().  This should be used *ONLY* for symlinks that
@@ -4337,11 +4356,12 @@
 	if (IS_ERR(cookie))
 		return PTR_ERR(cookie);
 
-	res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
+	res = readlink_copy(buffer, buflen, nd_get_link(&nd));
 	if (dentry->d_inode->i_op->put_link)
 		dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
 	return res;
 }
+EXPORT_SYMBOL(generic_readlink);
 
 /* get the link contents into pagecache */
 static char *page_getlink(struct dentry * dentry, struct page **ppage)
@@ -4361,14 +4381,14 @@
 int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
 {
 	struct page *page = NULL;
-	char *s = page_getlink(dentry, &page);
-	int res = vfs_readlink(dentry,buffer,buflen,s);
+	int res = readlink_copy(buffer, buflen, page_getlink(dentry, &page));
 	if (page) {
 		kunmap(page);
 		page_cache_release(page);
 	}
 	return res;
 }
+EXPORT_SYMBOL(page_readlink);
 
 void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
 {
@@ -4376,6 +4396,7 @@
 	nd_set_link(nd, page_getlink(dentry, &page));
 	return page;
 }
+EXPORT_SYMBOL(page_follow_link_light);
 
 void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
 {
@@ -4386,6 +4407,7 @@
 		page_cache_release(page);
 	}
 }
+EXPORT_SYMBOL(page_put_link);
 
 /*
  * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
@@ -4423,45 +4445,18 @@
 fail:
 	return err;
 }
+EXPORT_SYMBOL(__page_symlink);
 
 int page_symlink(struct inode *inode, const char *symname, int len)
 {
 	return __page_symlink(inode, symname, len,
 			!(mapping_gfp_mask(inode->i_mapping) & __GFP_FS));
 }
+EXPORT_SYMBOL(page_symlink);
 
 const struct inode_operations page_symlink_inode_operations = {
 	.readlink	= generic_readlink,
 	.follow_link	= page_follow_link_light,
 	.put_link	= page_put_link,
 };
-
-EXPORT_SYMBOL(user_path_at);
-EXPORT_SYMBOL(follow_down_one);
-EXPORT_SYMBOL(follow_down);
-EXPORT_SYMBOL(follow_up);
-EXPORT_SYMBOL(get_write_access); /* nfsd */
-EXPORT_SYMBOL(lock_rename);
-EXPORT_SYMBOL(lookup_one_len);
-EXPORT_SYMBOL(page_follow_link_light);
-EXPORT_SYMBOL(page_put_link);
-EXPORT_SYMBOL(page_readlink);
-EXPORT_SYMBOL(__page_symlink);
-EXPORT_SYMBOL(page_symlink);
 EXPORT_SYMBOL(page_symlink_inode_operations);
-EXPORT_SYMBOL(kern_path);
-EXPORT_SYMBOL(vfs_path_lookup);
-EXPORT_SYMBOL(inode_permission);
-EXPORT_SYMBOL(unlock_rename);
-EXPORT_SYMBOL(vfs_create);
-EXPORT_SYMBOL(vfs_link);
-EXPORT_SYMBOL(vfs_mkdir);
-EXPORT_SYMBOL(vfs_mknod);
-EXPORT_SYMBOL(generic_permission);
-EXPORT_SYMBOL(vfs_readlink);
-EXPORT_SYMBOL(vfs_rename);
-EXPORT_SYMBOL(vfs_rmdir);
-EXPORT_SYMBOL(vfs_symlink);
-EXPORT_SYMBOL(vfs_unlink);
-EXPORT_SYMBOL(dentry_unhash);
-EXPORT_SYMBOL(generic_readlink);
diff --git a/fs/namespace.c b/fs/namespace.c
index 2ffc5a2..182bc41 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -52,7 +52,7 @@
 }
 __setup("mphash_entries=", set_mphash_entries);
 
-static int event;
+static u64 event;
 static DEFINE_IDA(mnt_id_ida);
 static DEFINE_IDA(mnt_group_ida);
 static DEFINE_SPINLOCK(mnt_id_lock);
@@ -414,9 +414,7 @@
  */
 int __mnt_want_write_file(struct file *file)
 {
-	struct inode *inode = file_inode(file);
-
-	if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
+	if (!(file->f_mode & FMODE_WRITER))
 		return __mnt_want_write(file->f_path.mnt);
 	else
 		return mnt_clone_write(file->f_path.mnt);
@@ -570,13 +568,17 @@
 static void free_vfsmnt(struct mount *mnt)
 {
 	kfree(mnt->mnt_devname);
-	mnt_free_id(mnt);
 #ifdef CONFIG_SMP
 	free_percpu(mnt->mnt_pcp);
 #endif
 	kmem_cache_free(mnt_cache, mnt);
 }
 
+static void delayed_free_vfsmnt(struct rcu_head *head)
+{
+	free_vfsmnt(container_of(head, struct mount, mnt_rcu));
+}
+
 /* call under rcu_read_lock */
 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
 {
@@ -848,6 +850,7 @@
 
 	root = mount_fs(type, flags, name, data);
 	if (IS_ERR(root)) {
+		mnt_free_id(mnt);
 		free_vfsmnt(mnt);
 		return ERR_CAST(root);
 	}
@@ -885,7 +888,7 @@
 			goto out_free;
 	}
 
-	mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
+	mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
 	/* Don't allow unprivileged users to change mount flags */
 	if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
 		mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
@@ -928,20 +931,11 @@
 	return mnt;
 
  out_free:
+	mnt_free_id(mnt);
 	free_vfsmnt(mnt);
 	return ERR_PTR(err);
 }
 
-static void delayed_free(struct rcu_head *head)
-{
-	struct mount *mnt = container_of(head, struct mount, mnt_rcu);
-	kfree(mnt->mnt_devname);
-#ifdef CONFIG_SMP
-	free_percpu(mnt->mnt_pcp);
-#endif
-	kmem_cache_free(mnt_cache, mnt);
-}
-
 static void mntput_no_expire(struct mount *mnt)
 {
 put_again:
@@ -991,7 +985,7 @@
 	dput(mnt->mnt.mnt_root);
 	deactivate_super(mnt->mnt.mnt_sb);
 	mnt_free_id(mnt);
-	call_rcu(&mnt->mnt_rcu, delayed_free);
+	call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
 }
 
 void mntput(struct vfsmount *mnt)
@@ -1100,14 +1094,29 @@
 	struct proc_mounts *p = proc_mounts(m);
 
 	down_read(&namespace_sem);
-	return seq_list_start(&p->ns->list, *pos);
+	if (p->cached_event == p->ns->event) {
+		void *v = p->cached_mount;
+		if (*pos == p->cached_index)
+			return v;
+		if (*pos == p->cached_index + 1) {
+			v = seq_list_next(v, &p->ns->list, &p->cached_index);
+			return p->cached_mount = v;
+		}
+	}
+
+	p->cached_event = p->ns->event;
+	p->cached_mount = seq_list_start(&p->ns->list, *pos);
+	p->cached_index = *pos;
+	return p->cached_mount;
 }
 
 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 {
 	struct proc_mounts *p = proc_mounts(m);
 
-	return seq_list_next(v, &p->ns->list, pos);
+	p->cached_mount = seq_list_next(v, &p->ns->list, pos);
+	p->cached_index = *pos;
+	return p->cached_mount;
 }
 
 static void m_stop(struct seq_file *m, void *v)
@@ -1661,9 +1670,9 @@
 		if (err)
 			goto out;
 		err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
+		lock_mount_hash();
 		if (err)
 			goto out_cleanup_ids;
-		lock_mount_hash();
 		for (p = source_mnt; p; p = next_mnt(p, source_mnt))
 			set_mnt_shared(p);
 	} else {
@@ -1690,6 +1699,11 @@
 	return 0;
 
  out_cleanup_ids:
+	while (!hlist_empty(&tree_list)) {
+		child = hlist_entry(tree_list.first, struct mount, mnt_hash);
+		umount_tree(child, 0);
+	}
+	unlock_mount_hash();
 	cleanup_group_ids(source_mnt, NULL);
  out:
 	return err;
@@ -2044,7 +2058,7 @@
 	struct mount *parent;
 	int err;
 
-	mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT);
+	mnt_flags &= ~MNT_INTERNAL_FLAGS;
 
 	mp = lock_mount(path);
 	if (IS_ERR(mp))
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 81b4f64..e31e589 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -470,9 +470,7 @@
 {
 	struct ncp_mount_data_kernel data;
 	struct ncp_server *server;
-	struct file *ncp_filp;
 	struct inode *root_inode;
-	struct inode *sock_inode;
 	struct socket *sock;
 	int error;
 	int default_bufsize;
@@ -541,18 +539,10 @@
 	if (!uid_valid(data.mounted_uid) || !uid_valid(data.uid) ||
 	    !gid_valid(data.gid))
 		goto out;
-	error = -EBADF;
-	ncp_filp = fget(data.ncp_fd);
-	if (!ncp_filp)
-		goto out;
-	error = -ENOTSOCK;
-	sock_inode = file_inode(ncp_filp);
-	if (!S_ISSOCK(sock_inode->i_mode))
-		goto out_fput;
-	sock = SOCKET_I(sock_inode);
+	sock = sockfd_lookup(data.ncp_fd, &error);
 	if (!sock)
-		goto out_fput;
-		
+		goto out;
+
 	if (sock->type == SOCK_STREAM)
 		default_bufsize = 0xF000;
 	else
@@ -574,27 +564,16 @@
 	if (error)
 		goto out_fput;
 
-	server->ncp_filp = ncp_filp;
 	server->ncp_sock = sock;
 	
 	if (data.info_fd != -1) {
-		struct socket *info_sock;
-
-		error = -EBADF;
-		server->info_filp = fget(data.info_fd);
-		if (!server->info_filp)
-			goto out_bdi;
-		error = -ENOTSOCK;
-		sock_inode = file_inode(server->info_filp);
-		if (!S_ISSOCK(sock_inode->i_mode))
-			goto out_fput2;
-		info_sock = SOCKET_I(sock_inode);
+		struct socket *info_sock = sockfd_lookup(data.info_fd, &error);
 		if (!info_sock)
-			goto out_fput2;
+			goto out_bdi;
+		server->info_sock = info_sock;
 		error = -EBADFD;
 		if (info_sock->type != SOCK_STREAM)
 			goto out_fput2;
-		server->info_sock = info_sock;
 	}
 
 /*	server->lock = 0;	*/
@@ -766,17 +745,12 @@
 	mutex_destroy(&server->root_setup_lock);
 	mutex_destroy(&server->mutex);
 out_fput2:
-	if (server->info_filp)
-		fput(server->info_filp);
+	if (server->info_sock)
+		sockfd_put(server->info_sock);
 out_bdi:
 	bdi_destroy(&server->bdi);
 out_fput:
-	/* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
-	 * 
-	 * The previously used put_filp(ncp_filp); was bogus, since
-	 * it doesn't perform proper unlocking.
-	 */
-	fput(ncp_filp);
+	sockfd_put(sock);
 out:
 	put_pid(data.wdog_pid);
 	sb->s_fs_info = NULL;
@@ -809,9 +783,9 @@
 	mutex_destroy(&server->root_setup_lock);
 	mutex_destroy(&server->mutex);
 
-	if (server->info_filp)
-		fput(server->info_filp);
-	fput(server->ncp_filp);
+	if (server->info_sock)
+		sockfd_put(server->info_sock);
+	sockfd_put(server->ncp_sock);
 	kill_pid(server->m.wdog_pid, SIGTERM, 1);
 	put_pid(server->m.wdog_pid);
 
diff --git a/fs/ncpfs/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h
index b81e97a..7fa17e4 100644
--- a/fs/ncpfs/ncp_fs_sb.h
+++ b/fs/ncpfs/ncp_fs_sb.h
@@ -45,9 +45,7 @@
 
 	__u8 name_space[NCP_NUMBER_OF_VOLUMES + 2];
 
-	struct file *ncp_filp;	/* File pointer to ncp socket */
 	struct socket *ncp_sock;/* ncp socket */
-	struct file *info_filp;
 	struct socket *info_sock;
 
 	u8 sequence;
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 9d8153e..f47af5e 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -1704,8 +1704,6 @@
 	iput(bvi);
 skip_large_index_stuff:
 	/* Setup the operations for this index inode. */
-	vi->i_op = NULL;
-	vi->i_fop = NULL;
 	vi->i_mapping->a_ops = &ntfs_mst_aops;
 	vi->i_blocks = ni->allocated_size >> 9;
 	/*
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index eb649d2..dfda2ff 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -916,57 +916,30 @@
 
 static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
 {
-	int ret;
-	mm_segment_t oldfs;
-	struct kvec vec = {
-		.iov_len = len,
-		.iov_base = data,
-	};
-	struct msghdr msg = {
-		.msg_iovlen = 1,
-		.msg_iov = (struct iovec *)&vec,
-       		.msg_flags = MSG_DONTWAIT,
-	};
-
-	oldfs = get_fs();
-	set_fs(get_ds());
-	ret = sock_recvmsg(sock, &msg, len, msg.msg_flags);
-	set_fs(oldfs);
-
-	return ret;
+	struct kvec vec = { .iov_len = len, .iov_base = data, };
+	struct msghdr msg = { .msg_flags = MSG_DONTWAIT, };
+	return kernel_recvmsg(sock, &msg, &vec, 1, len, msg.msg_flags);
 }
 
 static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
 			      size_t veclen, size_t total)
 {
 	int ret;
-	mm_segment_t oldfs;
-	struct msghdr msg = {
-		.msg_iov = (struct iovec *)vec,
-		.msg_iovlen = veclen,
-	};
+	struct msghdr msg;
 
 	if (sock == NULL) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	oldfs = get_fs();
-	set_fs(get_ds());
-	ret = sock_sendmsg(sock, &msg, total);
-	set_fs(oldfs);
-	if (ret != total) {
-		mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret,
-		     total);
-		if (ret >= 0)
-			ret = -EPIPE; /* should be smarter, I bet */
-		goto out;
-	}
-
-	ret = 0;
+	ret = kernel_sendmsg(sock, &msg, vec, veclen, total);
+	if (likely(ret == total))
+		return 0;
+	mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret, total);
+	if (ret >= 0)
+		ret = -EPIPE; /* should be smarter, I bet */
 out:
-	if (ret < 0)
-		mlog(0, "returning error: %d\n", ret);
+	mlog(0, "returning error: %d\n", ret);
 	return ret;
 }
 
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index ff33c5e..8970dcf 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2367,15 +2367,18 @@
 
 	if (direct_io) {
 		written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
-						    ppos, count, ocount);
+						    count, ocount);
 		if (written < 0) {
 			ret = written;
 			goto out_dio;
 		}
 	} else {
+		struct iov_iter from;
+		iov_iter_init(&from, iov, nr_segs, count, 0);
 		current->backing_dev_info = file->f_mapping->backing_dev_info;
-		written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos,
-						      ppos, count, 0);
+		written = generic_perform_write(file, &from, *ppos);
+		if (likely(written >= 0))
+			iocb->ki_pos = *ppos + written;
 		current->backing_dev_info = NULL;
 	}
 
diff --git a/fs/open.c b/fs/open.c
index 631aea8..3d30eb1 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -655,35 +655,6 @@
 	return error;
 }
 
-/*
- * You have to be very careful that these write
- * counts get cleaned up in error cases and
- * upon __fput().  This should probably never
- * be called outside of __dentry_open().
- */
-static inline int __get_file_write_access(struct inode *inode,
-					  struct vfsmount *mnt)
-{
-	int error;
-	error = get_write_access(inode);
-	if (error)
-		return error;
-	/*
-	 * Do not take mount writer counts on
-	 * special files since no writes to
-	 * the mount itself will occur.
-	 */
-	if (!special_file(inode->i_mode)) {
-		/*
-		 * Balanced in __fput()
-		 */
-		error = __mnt_want_write(mnt);
-		if (error)
-			put_write_access(inode);
-	}
-	return error;
-}
-
 int open_check_o_direct(struct file *f)
 {
 	/* NB: we're sure to have correct a_ops only after f_op->open */
@@ -708,26 +679,28 @@
 	f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
 				FMODE_PREAD | FMODE_PWRITE;
 
-	if (unlikely(f->f_flags & O_PATH))
-		f->f_mode = FMODE_PATH;
-
 	path_get(&f->f_path);
 	inode = f->f_inode = f->f_path.dentry->d_inode;
-	if (f->f_mode & FMODE_WRITE) {
-		error = __get_file_write_access(inode, f->f_path.mnt);
-		if (error)
-			goto cleanup_file;
-		if (!special_file(inode->i_mode))
-			file_take_write(f);
-	}
-
 	f->f_mapping = inode->i_mapping;
 
-	if (unlikely(f->f_mode & FMODE_PATH)) {
+	if (unlikely(f->f_flags & O_PATH)) {
+		f->f_mode = FMODE_PATH;
 		f->f_op = &empty_fops;
 		return 0;
 	}
 
+	if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
+		error = get_write_access(inode);
+		if (unlikely(error))
+			goto cleanup_file;
+		error = __mnt_want_write(f->f_path.mnt);
+		if (unlikely(error)) {
+			put_write_access(inode);
+			goto cleanup_file;
+		}
+		f->f_mode |= FMODE_WRITER;
+	}
+
 	/* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */
 	if (S_ISREG(inode->i_mode))
 		f->f_mode |= FMODE_ATOMIC_POS;
@@ -764,18 +737,9 @@
 
 cleanup_all:
 	fops_put(f->f_op);
-	if (f->f_mode & FMODE_WRITE) {
+	if (f->f_mode & FMODE_WRITER) {
 		put_write_access(inode);
-		if (!special_file(inode->i_mode)) {
-			/*
-			 * We don't consider this a real
-			 * mnt_want/drop_write() pair
-			 * because it all happenend right
-			 * here, so just reset the state.
-			 */
-			file_reset_write(f);
-			__mnt_drop_write(f->f_path.mnt);
-		}
+		__mnt_drop_write(f->f_path.mnt);
 	}
 cleanup_file:
 	path_put(&f->f_path);
diff --git a/fs/pipe.c b/fs/pipe.c
index 78fd0d0..034bffa 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -142,55 +142,6 @@
 	return 0;
 }
 
-static int
-pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
-		      int atomic)
-{
-	unsigned long copy;
-
-	while (len > 0) {
-		while (!iov->iov_len)
-			iov++;
-		copy = min_t(unsigned long, len, iov->iov_len);
-
-		if (atomic) {
-			if (__copy_to_user_inatomic(iov->iov_base, from, copy))
-				return -EFAULT;
-		} else {
-			if (copy_to_user(iov->iov_base, from, copy))
-				return -EFAULT;
-		}
-		from += copy;
-		len -= copy;
-		iov->iov_base += copy;
-		iov->iov_len -= copy;
-	}
-	return 0;
-}
-
-/*
- * Attempt to pre-fault in the user memory, so we can use atomic copies.
- * Returns the number of bytes not faulted in.
- */
-static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
-{
-	while (!iov->iov_len)
-		iov++;
-
-	while (len > 0) {
-		unsigned long this_len;
-
-		this_len = min_t(unsigned long, len, iov->iov_len);
-		if (fault_in_pages_writeable(iov->iov_base, this_len))
-			break;
-
-		len -= this_len;
-		iov++;
-	}
-
-	return len;
-}
-
 /*
  * Pre-fault in the user memory, so we can use atomic copies.
  */
@@ -226,52 +177,6 @@
 }
 
 /**
- * generic_pipe_buf_map - virtually map a pipe buffer
- * @pipe:	the pipe that the buffer belongs to
- * @buf:	the buffer that should be mapped
- * @atomic:	whether to use an atomic map
- *
- * Description:
- *	This function returns a kernel virtual address mapping for the
- *	pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
- *	and the caller has to be careful not to fault before calling
- *	the unmap function.
- *
- *	Note that this function calls kmap_atomic() if @atomic != 0.
- */
-void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
-			   struct pipe_buffer *buf, int atomic)
-{
-	if (atomic) {
-		buf->flags |= PIPE_BUF_FLAG_ATOMIC;
-		return kmap_atomic(buf->page);
-	}
-
-	return kmap(buf->page);
-}
-EXPORT_SYMBOL(generic_pipe_buf_map);
-
-/**
- * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
- * @pipe:	the pipe that the buffer belongs to
- * @buf:	the buffer that should be unmapped
- * @map_data:	the data that the mapping function returned
- *
- * Description:
- *	This function undoes the mapping that ->map() provided.
- */
-void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
-			    struct pipe_buffer *buf, void *map_data)
-{
-	if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
-		buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
-		kunmap_atomic(map_data);
-	} else
-		kunmap(buf->page);
-}
-EXPORT_SYMBOL(generic_pipe_buf_unmap);
-
-/**
  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
  * @pipe:	the pipe that the buffer belongs to
  * @buf:	the buffer to attempt to steal
@@ -351,8 +256,6 @@
 
 static const struct pipe_buf_operations anon_pipe_buf_ops = {
 	.can_merge = 1,
-	.map = generic_pipe_buf_map,
-	.unmap = generic_pipe_buf_unmap,
 	.confirm = generic_pipe_buf_confirm,
 	.release = anon_pipe_buf_release,
 	.steal = generic_pipe_buf_steal,
@@ -361,8 +264,6 @@
 
 static const struct pipe_buf_operations packet_pipe_buf_ops = {
 	.can_merge = 0,
-	.map = generic_pipe_buf_map,
-	.unmap = generic_pipe_buf_unmap,
 	.confirm = generic_pipe_buf_confirm,
 	.release = anon_pipe_buf_release,
 	.steal = generic_pipe_buf_steal,
@@ -379,12 +280,15 @@
 	ssize_t ret;
 	struct iovec *iov = (struct iovec *)_iov;
 	size_t total_len;
+	struct iov_iter iter;
 
 	total_len = iov_length(iov, nr_segs);
 	/* Null read succeeds. */
 	if (unlikely(total_len == 0))
 		return 0;
 
+	iov_iter_init(&iter, iov, nr_segs, total_len, 0);
+
 	do_wakeup = 0;
 	ret = 0;
 	__pipe_lock(pipe);
@@ -394,9 +298,9 @@
 			int curbuf = pipe->curbuf;
 			struct pipe_buffer *buf = pipe->bufs + curbuf;
 			const struct pipe_buf_operations *ops = buf->ops;
-			void *addr;
 			size_t chars = buf->len;
-			int error, atomic;
+			size_t written;
+			int error;
 
 			if (chars > total_len)
 				chars = total_len;
@@ -408,21 +312,10 @@
 				break;
 			}
 
-			atomic = !iov_fault_in_pages_write(iov, chars);
-redo:
-			addr = ops->map(pipe, buf, atomic);
-			error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
-			ops->unmap(pipe, buf, addr);
-			if (unlikely(error)) {
-				/*
-				 * Just retry with the slow path if we failed.
-				 */
-				if (atomic) {
-					atomic = 0;
-					goto redo;
-				}
+			written = copy_page_to_iter(buf->page, buf->offset, chars, &iter);
+			if (unlikely(written < chars)) {
 				if (!ret)
-					ret = error;
+					ret = -EFAULT;
 				break;
 			}
 			ret += chars;
@@ -538,10 +431,16 @@
 
 			iov_fault_in_pages_read(iov, chars);
 redo1:
-			addr = ops->map(pipe, buf, atomic);
+			if (atomic)
+				addr = kmap_atomic(buf->page);
+			else
+				addr = kmap(buf->page);
 			error = pipe_iov_copy_from_user(offset + addr, iov,
 							chars, atomic);
-			ops->unmap(pipe, buf, addr);
+			if (atomic)
+				kunmap_atomic(addr);
+			else
+				kunmap(buf->page);
 			ret = error;
 			do_wakeup = 1;
 			if (error) {
diff --git a/fs/pnode.c b/fs/pnode.c
index 88396df..302bf22 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -164,46 +164,94 @@
 	}
 }
 
-/*
- * return the source mount to be used for cloning
- *
- * @dest 	the current destination mount
- * @last_dest  	the last seen destination mount
- * @last_src  	the last seen source mount
- * @type	return CL_SLAVE if the new mount has to be
- * 		cloned as a slave.
- */
-static struct mount *get_source(struct mount *dest,
-				struct mount *last_dest,
-				struct mount *last_src,
-				int *type)
+static struct mount *next_group(struct mount *m, struct mount *origin)
 {
-	struct mount *p_last_src = NULL;
-	struct mount *p_last_dest = NULL;
-
-	while (last_dest != dest->mnt_master) {
-		p_last_dest = last_dest;
-		p_last_src = last_src;
-		last_dest = last_dest->mnt_master;
-		last_src = last_src->mnt_master;
-	}
-
-	if (p_last_dest) {
-		do {
-			p_last_dest = next_peer(p_last_dest);
-		} while (IS_MNT_NEW(p_last_dest));
-		/* is that a peer of the earlier? */
-		if (dest == p_last_dest) {
-			*type = CL_MAKE_SHARED;
-			return p_last_src;
+	while (1) {
+		while (1) {
+			struct mount *next;
+			if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+				return first_slave(m);
+			next = next_peer(m);
+			if (m->mnt_group_id == origin->mnt_group_id) {
+				if (next == origin)
+					return NULL;
+			} else if (m->mnt_slave.next != &next->mnt_slave)
+				break;
+			m = next;
 		}
+		/* m is the last peer */
+		while (1) {
+			struct mount *master = m->mnt_master;
+			if (m->mnt_slave.next != &master->mnt_slave_list)
+				return next_slave(m);
+			m = next_peer(master);
+			if (master->mnt_group_id == origin->mnt_group_id)
+				break;
+			if (master->mnt_slave.next == &m->mnt_slave)
+				break;
+			m = master;
+		}
+		if (m == origin)
+			return NULL;
 	}
-	/* slave of the earlier, then */
-	*type = CL_SLAVE;
-	/* beginning of peer group among the slaves? */
-	if (IS_MNT_SHARED(dest))
-		*type |= CL_MAKE_SHARED;
-	return last_src;
+}
+
+/* all accesses are serialized by namespace_sem */
+static struct user_namespace *user_ns;
+static struct mount *last_dest, *last_source, *dest_master;
+static struct mountpoint *mp;
+static struct hlist_head *list;
+
+static int propagate_one(struct mount *m)
+{
+	struct mount *child;
+	int type;
+	/* skip ones added by this propagate_mnt() */
+	if (IS_MNT_NEW(m))
+		return 0;
+	/* skip if mountpoint isn't covered by it */
+	if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
+		return 0;
+	if (m->mnt_group_id == last_dest->mnt_group_id) {
+		type = CL_MAKE_SHARED;
+	} else {
+		struct mount *n, *p;
+		for (n = m; ; n = p) {
+			p = n->mnt_master;
+			if (p == dest_master || IS_MNT_MARKED(p)) {
+				while (last_dest->mnt_master != p) {
+					last_source = last_source->mnt_master;
+					last_dest = last_source->mnt_parent;
+				}
+				if (n->mnt_group_id != last_dest->mnt_group_id) {
+					last_source = last_source->mnt_master;
+					last_dest = last_source->mnt_parent;
+				}
+				break;
+			}
+		}
+		type = CL_SLAVE;
+		/* beginning of peer group among the slaves? */
+		if (IS_MNT_SHARED(m))
+			type |= CL_MAKE_SHARED;
+	}
+		
+	/* Notice when we are propagating across user namespaces */
+	if (m->mnt_ns->user_ns != user_ns)
+		type |= CL_UNPRIVILEGED;
+	child = copy_tree(last_source, last_source->mnt.mnt_root, type);
+	if (IS_ERR(child))
+		return PTR_ERR(child);
+	mnt_set_mountpoint(m, mp, child);
+	last_dest = m;
+	last_source = child;
+	if (m->mnt_master != dest_master) {
+		read_seqlock_excl(&mount_lock);
+		SET_MNT_MARK(m->mnt_master);
+		read_sequnlock_excl(&mount_lock);
+	}
+	hlist_add_head(&child->mnt_hash, list);
+	return 0;
 }
 
 /*
@@ -222,56 +270,48 @@
 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
 		    struct mount *source_mnt, struct hlist_head *tree_list)
 {
-	struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
-	struct mount *m, *child;
+	struct mount *m, *n;
 	int ret = 0;
-	struct mount *prev_dest_mnt = dest_mnt;
-	struct mount *prev_src_mnt  = source_mnt;
-	HLIST_HEAD(tmp_list);
 
-	for (m = propagation_next(dest_mnt, dest_mnt); m;
-			m = propagation_next(m, dest_mnt)) {
-		int type;
-		struct mount *source;
+	/*
+	 * we don't want to bother passing tons of arguments to
+	 * propagate_one(); everything is serialized by namespace_sem,
+	 * so globals will do just fine.
+	 */
+	user_ns = current->nsproxy->mnt_ns->user_ns;
+	last_dest = dest_mnt;
+	last_source = source_mnt;
+	mp = dest_mp;
+	list = tree_list;
+	dest_master = dest_mnt->mnt_master;
 
-		if (IS_MNT_NEW(m))
-			continue;
-
-		source =  get_source(m, prev_dest_mnt, prev_src_mnt, &type);
-
-		/* Notice when we are propagating across user namespaces */
-		if (m->mnt_ns->user_ns != user_ns)
-			type |= CL_UNPRIVILEGED;
-
-		child = copy_tree(source, source->mnt.mnt_root, type);
-		if (IS_ERR(child)) {
-			ret = PTR_ERR(child);
-			tmp_list = *tree_list;
-			tmp_list.first->pprev = &tmp_list.first;
-			INIT_HLIST_HEAD(tree_list);
+	/* all peers of dest_mnt, except dest_mnt itself */
+	for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
+		ret = propagate_one(n);
+		if (ret)
 			goto out;
-		}
+	}
 
-		if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
-			mnt_set_mountpoint(m, dest_mp, child);
-			hlist_add_head(&child->mnt_hash, tree_list);
-		} else {
-			/*
-			 * This can happen if the parent mount was bind mounted
-			 * on some subdirectory of a shared/slave mount.
-			 */
-			hlist_add_head(&child->mnt_hash, &tmp_list);
-		}
-		prev_dest_mnt = m;
-		prev_src_mnt  = child;
+	/* all slave groups */
+	for (m = next_group(dest_mnt, dest_mnt); m;
+			m = next_group(m, dest_mnt)) {
+		/* everything in that slave group */
+		n = m;
+		do {
+			ret = propagate_one(n);
+			if (ret)
+				goto out;
+			n = next_peer(n);
+		} while (n != m);
 	}
 out:
-	lock_mount_hash();
-	while (!hlist_empty(&tmp_list)) {
-		child = hlist_entry(tmp_list.first, struct mount, mnt_hash);
-		umount_tree(child, 0);
+	read_seqlock_excl(&mount_lock);
+	hlist_for_each_entry(n, tree_list, mnt_hash) {
+		m = n->mnt_parent;
+		if (m->mnt_master != dest_mnt->mnt_master)
+			CLEAR_MNT_MARK(m->mnt_master);
 	}
-	unlock_mount_hash();
+	read_sequnlock_excl(&mount_lock);
 	return ret;
 }
 
diff --git a/fs/pnode.h b/fs/pnode.h
index fc28a27..4a24635 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -16,6 +16,9 @@
 #define IS_MNT_NEW(m)  (!(m)->mnt_ns)
 #define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
 #define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
+#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
+#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
+#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
 
 #define CL_EXPIRE    		0x01
 #define CL_SLAVE     		0x02
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 6b7087e..2d696b0 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -200,41 +200,9 @@
 	return result;
 }
 
-static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+static int proc_pid_cmdline(struct task_struct *task, char *buffer)
 {
-	int res = 0;
-	unsigned int len;
-	struct mm_struct *mm = get_task_mm(task);
-	if (!mm)
-		goto out;
-	if (!mm->arg_end)
-		goto out_mm;	/* Shh! No looking before we're done */
-
- 	len = mm->arg_end - mm->arg_start;
- 
-	if (len > PAGE_SIZE)
-		len = PAGE_SIZE;
- 
-	res = access_process_vm(task, mm->arg_start, buffer, len, 0);
-
-	// If the nul at the end of args has been overwritten, then
-	// assume application is using setproctitle(3).
-	if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
-		len = strnlen(buffer, res);
-		if (len < res) {
-		    res = len;
-		} else {
-			len = mm->env_end - mm->env_start;
-			if (len > PAGE_SIZE - res)
-				len = PAGE_SIZE - res;
-			res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
-			res = strnlen(buffer, res);
-		}
-	}
-out_mm:
-	mmput(mm);
-out:
-	return res;
+	return get_cmdline(task, buffer, PAGE_SIZE);
 }
 
 static int proc_pid_auxv(struct task_struct *task, char *buffer)
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 9ae46b8..8902609 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -146,7 +146,7 @@
 	struct task_struct *task;
 	void *ns;
 	char name[50];
-	int len = -EACCES;
+	int res = -EACCES;
 
 	task = get_proc_task(inode);
 	if (!task)
@@ -155,24 +155,18 @@
 	if (!ptrace_may_access(task, PTRACE_MODE_READ))
 		goto out_put_task;
 
-	len = -ENOENT;
+	res = -ENOENT;
 	ns = ns_ops->get(task);
 	if (!ns)
 		goto out_put_task;
 
 	snprintf(name, sizeof(name), "%s:[%u]", ns_ops->name, ns_ops->inum(ns));
-	len = strlen(name);
-
-	if (len > buflen)
-		len = buflen;
-	if (copy_to_user(buffer, name, len))
-		len = -EFAULT;
-
+	res = readlink_copy(buffer, buflen, name);
 	ns_ops->put(ns);
 out_put_task:
 	put_task_struct(task);
 out:
-	return len;
+	return res;
 }
 
 static const struct inode_operations proc_ns_link_inode_operations = {
diff --git a/fs/proc/self.c b/fs/proc/self.c
index ffeb202..4348bb8 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -16,7 +16,7 @@
 	if (!tgid)
 		return -ENOENT;
 	sprintf(tmp, "%d", tgid);
-	return vfs_readlink(dentry,buffer,buflen,tmp);
+	return readlink_copy(buffer, buflen, tmp);
 }
 
 static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 7be26f0..1a81373 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -267,6 +267,7 @@
 	p->root = root;
 	p->m.poll_event = ns->event;
 	p->show = show;
+	p->cached_event = ~0ULL;
 
 	return 0;
 
diff --git a/fs/splice.c b/fs/splice.c
index 12028fa..9bc07d2 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -136,8 +136,6 @@
 
 const struct pipe_buf_operations page_cache_pipe_buf_ops = {
 	.can_merge = 0,
-	.map = generic_pipe_buf_map,
-	.unmap = generic_pipe_buf_unmap,
 	.confirm = page_cache_pipe_buf_confirm,
 	.release = page_cache_pipe_buf_release,
 	.steal = page_cache_pipe_buf_steal,
@@ -156,8 +154,6 @@
 
 static const struct pipe_buf_operations user_page_pipe_buf_ops = {
 	.can_merge = 0,
-	.map = generic_pipe_buf_map,
-	.unmap = generic_pipe_buf_unmap,
 	.confirm = generic_pipe_buf_confirm,
 	.release = page_cache_pipe_buf_release,
 	.steal = user_page_pipe_buf_steal,
@@ -547,8 +543,6 @@
 
 static const struct pipe_buf_operations default_pipe_buf_ops = {
 	.can_merge = 0,
-	.map = generic_pipe_buf_map,
-	.unmap = generic_pipe_buf_unmap,
 	.confirm = generic_pipe_buf_confirm,
 	.release = generic_pipe_buf_release,
 	.steal = generic_pipe_buf_steal,
@@ -564,8 +558,6 @@
 /* Pipe buffer operations for a socket and similar. */
 const struct pipe_buf_operations nosteal_pipe_buf_ops = {
 	.can_merge = 0,
-	.map = generic_pipe_buf_map,
-	.unmap = generic_pipe_buf_unmap,
 	.confirm = generic_pipe_buf_confirm,
 	.release = generic_pipe_buf_release,
 	.steal = generic_pipe_buf_nosteal,
@@ -767,13 +759,13 @@
 		goto out;
 
 	if (buf->page != page) {
-		char *src = buf->ops->map(pipe, buf, 1);
+		char *src = kmap_atomic(buf->page);
 		char *dst = kmap_atomic(page);
 
 		memcpy(dst + offset, src + buf->offset, this_len);
 		flush_dcache_page(page);
 		kunmap_atomic(dst);
-		buf->ops->unmap(pipe, buf, src);
+		kunmap_atomic(src);
 	}
 	ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
 				page, fsdata);
@@ -1067,9 +1059,9 @@
 	void *data;
 	loff_t tmp = sd->pos;
 
-	data = buf->ops->map(pipe, buf, 0);
+	data = kmap(buf->page);
 	ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
-	buf->ops->unmap(pipe, buf, data);
+	kunmap(buf->page);
 
 	return ret;
 }
@@ -1528,116 +1520,48 @@
 static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
 			struct splice_desc *sd)
 {
-	char *src;
-	int ret;
-
-	/*
-	 * See if we can use the atomic maps, by prefaulting in the
-	 * pages and doing an atomic copy
-	 */
-	if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) {
-		src = buf->ops->map(pipe, buf, 1);
-		ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset,
-							sd->len);
-		buf->ops->unmap(pipe, buf, src);
-		if (!ret) {
-			ret = sd->len;
-			goto out;
-		}
-	}
-
-	/*
-	 * No dice, use slow non-atomic map and copy
- 	 */
-	src = buf->ops->map(pipe, buf, 0);
-
-	ret = sd->len;
-	if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
-		ret = -EFAULT;
-
-	buf->ops->unmap(pipe, buf, src);
-out:
-	if (ret > 0)
-		sd->u.userptr += ret;
-	return ret;
+	int n = copy_page_to_iter(buf->page, buf->offset, sd->len, sd->u.data);
+	return n == sd->len ? n : -EFAULT;
 }
 
 /*
  * For lack of a better implementation, implement vmsplice() to userspace
  * as a simple copy of the pipes pages to the user iov.
  */
-static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
+static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
 			     unsigned long nr_segs, unsigned int flags)
 {
 	struct pipe_inode_info *pipe;
 	struct splice_desc sd;
-	ssize_t size;
-	int error;
 	long ret;
+	struct iovec iovstack[UIO_FASTIOV];
+	struct iovec *iov = iovstack;
+	struct iov_iter iter;
+	ssize_t count = 0;
 
 	pipe = get_pipe_info(file);
 	if (!pipe)
 		return -EBADF;
 
+	ret = rw_copy_check_uvector(READ, uiov, nr_segs,
+				    ARRAY_SIZE(iovstack), iovstack, &iov);
+	if (ret <= 0)
+		return ret;
+
+	iov_iter_init(&iter, iov, nr_segs, count, 0);
+
+	sd.len = 0;
+	sd.total_len = count;
+	sd.flags = flags;
+	sd.u.data = &iter;
+	sd.pos = 0;
+
 	pipe_lock(pipe);
-
-	error = ret = 0;
-	while (nr_segs) {
-		void __user *base;
-		size_t len;
-
-		/*
-		 * Get user address base and length for this iovec.
-		 */
-		error = get_user(base, &iov->iov_base);
-		if (unlikely(error))
-			break;
-		error = get_user(len, &iov->iov_len);
-		if (unlikely(error))
-			break;
-
-		/*
-		 * Sanity check this iovec. 0 read succeeds.
-		 */
-		if (unlikely(!len))
-			break;
-		if (unlikely(!base)) {
-			error = -EFAULT;
-			break;
-		}
-
-		if (unlikely(!access_ok(VERIFY_WRITE, base, len))) {
-			error = -EFAULT;
-			break;
-		}
-
-		sd.len = 0;
-		sd.total_len = len;
-		sd.flags = flags;
-		sd.u.userptr = base;
-		sd.pos = 0;
-
-		size = __splice_from_pipe(pipe, &sd, pipe_to_user);
-		if (size < 0) {
-			if (!ret)
-				ret = size;
-
-			break;
-		}
-
-		ret += size;
-
-		if (size < len)
-			break;
-
-		nr_segs--;
-		iov++;
-	}
-
+	ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
 	pipe_unlock(pipe);
 
-	if (!ret)
-		ret = error;
+	if (iov != iovstack)
+		kfree(iov);
 
 	return ret;
 }
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 1037637..d2c170f 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -171,7 +171,7 @@
 	} else
 		up_write(&iinfo->i_data_sem);
 
-	retval = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+	retval = __generic_file_aio_write(iocb, iov, nr_segs);
 	mutex_unlock(&inode->i_mutex);
 
 	if (retval > 0) {
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 003c005..79e96ce 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -699,7 +699,7 @@
 
 	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
 	ret = generic_file_direct_write(iocb, iovp,
-			&nr_segs, pos, &iocb->ki_pos, count, ocount);
+			&nr_segs, pos, count, ocount);
 
 out:
 	xfs_rw_iunlock(ip, iolock);
@@ -715,7 +715,7 @@
 	const struct iovec	*iovp,
 	unsigned long		nr_segs,
 	loff_t			pos,
-	size_t			ocount)
+	size_t			count)
 {
 	struct file		*file = iocb->ki_filp;
 	struct address_space	*mapping = file->f_mapping;
@@ -724,7 +724,7 @@
 	ssize_t			ret;
 	int			enospc = 0;
 	int			iolock = XFS_IOLOCK_EXCL;
-	size_t			count = ocount;
+	struct iov_iter		from;
 
 	xfs_rw_ilock(ip, iolock);
 
@@ -732,14 +732,15 @@
 	if (ret)
 		goto out;
 
+	iov_iter_init(&from, iovp, nr_segs, count, 0);
 	/* We can write back this queue in page reclaim */
 	current->backing_dev_info = mapping->backing_dev_info;
 
 write_retry:
 	trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
-	ret = generic_file_buffered_write(iocb, iovp, nr_segs,
-			pos, &iocb->ki_pos, count, 0);
-
+	ret = generic_perform_write(file, &from, pos);
+	if (likely(ret >= 0))
+		iocb->ki_pos = pos + ret;
 	/*
 	 * If we just got an ENOSPC, try to write back all dirty inodes to
 	 * convert delalloc space to free up some of the excess reserved
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index bcfe612..0b18776 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -271,32 +271,6 @@
 	return error;
 }
 
-/*
- * This is a copy from fs/namei.c:vfs_readlink(), except for removing it's
- * unused first argument.
- */
-STATIC int
-do_readlink(
-	char __user		*buffer,
-	int			buflen,
-	const char		*link)
-{
-        int len;
-
-	len = PTR_ERR(link);
-	if (IS_ERR(link))
-		goto out;
-
-	len = strlen(link);
-	if (len > (unsigned) buflen)
-		len = buflen;
-	if (copy_to_user(buffer, link, len))
-		len = -EFAULT;
- out:
-	return len;
-}
-
-
 int
 xfs_readlink_by_handle(
 	struct file		*parfilp,
@@ -334,7 +308,7 @@
 	error = -xfs_readlink(XFS_I(dentry->d_inode), link);
 	if (error)
 		goto out_kfree;
-	error = do_readlink(hreq->ohandle, olen, link);
+	error = readlink_copy(hreq->ohandle, olen, link);
 	if (error)
 		goto out_kfree;
 
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index 5b09392..d401e54 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -144,8 +144,6 @@
 
 /**
  * syscall_get_arch - return the AUDIT_ARCH for the current system call
- * @task:	task of interest, must be in system call entry tracing
- * @regs:	task_pt_regs() of @task
  *
  * Returns the AUDIT_ARCH_* based on the system call convention in use.
  *
@@ -155,5 +153,5 @@
  * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
  * provide an implementation of this.
  */
-int syscall_get_arch(struct task_struct *task, struct pt_regs *regs);
+int syscall_get_arch(void);
 #endif	/* _ASM_SYSCALL_H */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 97d5497..595f85c 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -56,6 +56,12 @@
 
 #define I830_GMCH_CTRL			0x52
 
+#define I830_GMCH_GMS_MASK		0x70
+#define I830_GMCH_GMS_LOCAL		0x10
+#define I830_GMCH_GMS_STOLEN_512	0x20
+#define I830_GMCH_GMS_STOLEN_1024	0x30
+#define I830_GMCH_GMS_STOLEN_8192	0x40
+
 #define I855_GMCH_GMS_MASK		0xF0
 #define I855_GMCH_GMS_STOLEN_0M		0x0
 #define I855_GMCH_GMS_STOLEN_1M		(0x1 << 4)
@@ -72,4 +78,18 @@
 #define INTEL_GMCH_GMS_STOLEN_224M	(0xc << 4)
 #define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
 
+#define I830_DRB3		0x63
+#define I85X_DRB3		0x43
+#define I865_TOUD		0xc4
+
+#define I830_ESMRAMC		0x91
+#define I845_ESMRAMC		0x9e
+#define I85X_ESMRAMC		0x61
+#define    TSEG_ENABLE		(1 << 0)
+#define    I830_TSEG_SIZE_512K	(0 << 1)
+#define    I830_TSEG_SIZE_1M	(1 << 1)
+#define    I845_TSEG_SIZE_MASK	(3 << 1)
+#define    I845_TSEG_SIZE_512K	(2 << 1)
+#define    I845_TSEG_SIZE_1M	(3 << 1)
+
 #endif				/* _I915_DRM_H_ */
diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h
index fb02980..329436d 100644
--- a/include/linux/acpi_dma.h
+++ b/include/linux/acpi_dma.h
@@ -16,6 +16,7 @@
 
 #include <linux/list.h>
 #include <linux/device.h>
+#include <linux/err.h>
 #include <linux/dmaengine.h>
 
 /**
@@ -103,12 +104,12 @@
 static inline struct dma_chan *acpi_dma_request_slave_chan_by_index(
 		struct device *dev, size_t index)
 {
-	return NULL;
+	return ERR_PTR(-ENODEV);
 }
 static inline struct dma_chan *acpi_dma_request_slave_chan_by_name(
 		struct device *dev, const char *name)
 {
-	return NULL;
+	return ERR_PTR(-ENODEV);
 }
 
 #define acpi_dma_simple_xlate	NULL
diff --git a/include/linux/audit.h b/include/linux/audit.h
index ec1464d..22cfddb 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -79,6 +79,14 @@
 extern int __init audit_register_class(int class, unsigned *list);
 extern int audit_classify_syscall(int abi, unsigned syscall);
 extern int audit_classify_arch(int arch);
+/* only for compat system calls */
+extern unsigned compat_write_class[];
+extern unsigned compat_read_class[];
+extern unsigned compat_dir_class[];
+extern unsigned compat_chattr_class[];
+extern unsigned compat_signal_class[];
+
+extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall);
 
 /* audit_names->type values */
 #define	AUDIT_TYPE_UNKNOWN	0	/* we don't know yet */
@@ -94,6 +102,12 @@
 
 extern void audit_log_session_info(struct audit_buffer *ab);
 
+#ifdef CONFIG_AUDIT_COMPAT_GENERIC
+#define audit_is_compat(arch)  (!((arch) & __AUDIT_ARCH_64BIT))
+#else
+#define audit_is_compat(arch)  false
+#endif
+
 #ifdef CONFIG_AUDITSYSCALL
 /* These are defined in auditsc.c */
 				/* Public API */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5a4d39b..bba5508 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -216,9 +216,9 @@
 }
 
 #define for_each_bvec(bvl, bio_vec, iter, start)			\
-	for ((iter) = start;						\
-	     (bvl) = bvec_iter_bvec((bio_vec), (iter)),			\
-		(iter).bi_size;						\
+	for (iter = (start);						\
+	     (iter).bi_size &&						\
+		((bvl = bvec_iter_bvec((bio_vec), (iter))), 1);	\
 	     bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
 
 
@@ -388,7 +388,7 @@
 struct rq_map_data;
 extern struct bio *bio_map_user_iov(struct request_queue *,
 				    struct block_device *,
-				    struct sg_iovec *, int, int, gfp_t);
+				    const struct sg_iovec *, int, int, gfp_t);
 extern void bio_unmap_user(struct bio *);
 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
 				gfp_t);
@@ -414,7 +414,8 @@
 extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
 				 unsigned long, unsigned int, int, gfp_t);
 extern struct bio *bio_copy_user_iov(struct request_queue *,
-				     struct rq_map_data *, struct sg_iovec *,
+				     struct rq_map_data *,
+				     const struct sg_iovec *,
 				     int, int, gfp_t);
 extern int bio_uncopy_user(struct bio *);
 void zero_fill_bio(struct bio *bio);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index bbc3a6c..aa0eaa2 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -189,6 +189,7 @@
 	__REQ_KERNEL, 		/* direct IO to kernel pages */
 	__REQ_PM,		/* runtime pm request */
 	__REQ_END,		/* last of chain of requests */
+	__REQ_HASHED,		/* on IO scheduler merge hash */
 	__REQ_NR_BITS,		/* stops here */
 };
 
@@ -241,5 +242,6 @@
 #define REQ_KERNEL		(1ULL << __REQ_KERNEL)
 #define REQ_PM			(1ULL << __REQ_PM)
 #define REQ_END			(1ULL << __REQ_END)
+#define REQ_HASHED		(1ULL << __REQ_HASHED)
 
 #endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1e1fa3f..0d84981 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -118,7 +118,18 @@
 	struct bio *bio;
 	struct bio *biotail;
 
-	struct hlist_node hash;	/* merge hash */
+	/*
+	 * The hash is used inside the scheduler, and killed once the
+	 * request reaches the dispatch list. The ipi_list is only used
+	 * to queue the request for softirq completion, which is long
+	 * after the request has been unhashed (and even removed from
+	 * the dispatch list).
+	 */
+	union {
+		struct hlist_node hash;	/* merge hash */
+		struct list_head ipi_list;
+	};
+
 	/*
 	 * The rb_node is only used inside the io scheduler, requests
 	 * are pruned when moved to the dispatch queue. So let the
@@ -824,8 +835,8 @@
 extern int blk_rq_unmap_user(struct bio *);
 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
-			       struct rq_map_data *, struct sg_iovec *, int,
-			       unsigned int, gfp_t);
+			       struct rq_map_data *, const struct sg_iovec *,
+			       int, unsigned int, gfp_t);
 extern int blk_execute_rq(struct request_queue *, struct gendisk *,
 			  struct request *, int);
 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index d77797a..c40302f 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -210,8 +210,8 @@
 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
 			struct writeback_control *wbc, bh_end_io_t *handler);
 int block_read_full_page(struct page*, get_block_t*);
-int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
-				unsigned long from);
+int block_is_partially_uptodate(struct page *page, unsigned long from,
+				unsigned long count);
 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
 		unsigned flags, struct page **pagep, get_block_t *get_block);
 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index c48e595..5ae5100 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -455,11 +455,14 @@
  *                     FREQUENCY TABLE HELPERS                       *
  *********************************************************************/
 
-#define CPUFREQ_ENTRY_INVALID ~0
-#define CPUFREQ_TABLE_END     ~1
-#define CPUFREQ_BOOST_FREQ    ~2
+/* Special Values of .frequency field */
+#define CPUFREQ_ENTRY_INVALID	~0
+#define CPUFREQ_TABLE_END	~1
+/* Special Values of .flags field */
+#define CPUFREQ_BOOST_FREQ	(1 << 0)
 
 struct cpufreq_frequency_table {
+	unsigned int	flags;
 	unsigned int	driver_data; /* driver specific data, not used by core */
 	unsigned int	frequency; /* kHz - doesn't need to be in ascending
 				    * order */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c5c92d5..8300fb8 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -341,15 +341,11 @@
  * and this struct will then be passed in as an argument to the
  * DMA engine device_control() function.
  *
- * The rationale for adding configuration information to this struct
- * is as follows: if it is likely that most DMA slave controllers in
- * the world will support the configuration option, then make it
- * generic. If not: if it is fixed so that it be sent in static from
- * the platform data, then prefer to do that. Else, if it is neither
- * fixed at runtime, nor generic enough (such as bus mastership on
- * some CPU family and whatnot) then create a custom slave config
- * struct and pass that, then make this config a member of that
- * struct, if applicable.
+ * The rationale for adding configuration information to this struct is as
+ * follows: if it is likely that more than one DMA slave controllers in
+ * the world will support the configuration option, then make it generic.
+ * If not: if it is fixed so that it be sent in static from the platform
+ * data, then prefer to do that.
  */
 struct dma_slave_config {
 	enum dma_transfer_direction direction;
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index 481ab23..68b4024 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -1,6 +1,5 @@
 /*
- * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
- * AVR32 systems.)
+ * Driver for the Synopsys DesignWare DMA Controller
  *
  * Copyright (C) 2007 Atmel Corporation
  * Copyright (C) 2010-2011 ST Microelectronics
@@ -44,8 +43,6 @@
  * @nr_masters: Number of AHB masters supported by the controller
  * @data_width: Maximum data width supported by hardware per AHB master
  *		(0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
- * @sd: slave specific data. Used for configuring channels
- * @sd_count: count of slave data structures passed.
  */
 struct dw_dma_platform_data {
 	unsigned int	nr_channels;
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 70e8e21..230f87b 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -63,8 +63,6 @@
 struct vfsmount;
 struct dentry;
 
-extern void __init files_defer_init(void);
-
 #define rcu_dereference_check_fdtable(files, fdtfd) \
 	rcu_dereference_check((fdtfd), lockdep_is_held(&(files)->file_lock))
 
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 81048f9..7a9c5bc 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -48,6 +48,7 @@
 struct swap_info_struct;
 struct seq_file;
 struct workqueue_struct;
+struct iov_iter;
 
 extern void __init inode_init(void);
 extern void __init inode_init_early(void);
@@ -125,6 +126,8 @@
 
 /* File needs atomic accesses to f_pos */
 #define FMODE_ATOMIC_POS	((__force fmode_t)0x8000)
+/* Write access to underlying fs */
+#define FMODE_WRITER		((__force fmode_t)0x10000)
 
 /* File was opened by fanotify and shouldn't generate fanotify events */
 #define FMODE_NONOTIFY		((__force fmode_t)0x1000000)
@@ -293,38 +296,6 @@
 struct address_space;
 struct writeback_control;
 
-struct iov_iter {
-	const struct iovec *iov;
-	unsigned long nr_segs;
-	size_t iov_offset;
-	size_t count;
-};
-
-size_t iov_iter_copy_from_user_atomic(struct page *page,
-		struct iov_iter *i, unsigned long offset, size_t bytes);
-size_t iov_iter_copy_from_user(struct page *page,
-		struct iov_iter *i, unsigned long offset, size_t bytes);
-void iov_iter_advance(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-size_t iov_iter_single_seg_count(const struct iov_iter *i);
-
-static inline void iov_iter_init(struct iov_iter *i,
-			const struct iovec *iov, unsigned long nr_segs,
-			size_t count, size_t written)
-{
-	i->iov = iov;
-	i->nr_segs = nr_segs;
-	i->iov_offset = 0;
-	i->count = count + written;
-
-	iov_iter_advance(i, written);
-}
-
-static inline size_t iov_iter_count(struct iov_iter *i)
-{
-	return i->count;
-}
-
 /*
  * "descriptor" for what we're up to with a read.
  * This allows us to use the same read code yet
@@ -383,7 +354,7 @@
 	int (*migratepage) (struct address_space *,
 			struct page *, struct page *, enum migrate_mode);
 	int (*launder_page) (struct page *);
-	int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
+	int (*is_partially_uptodate) (struct page *, unsigned long,
 					unsigned long);
 	void (*is_dirty_writeback) (struct page *, bool *, bool *);
 	int (*error_remove_page)(struct address_space *, struct page *);
@@ -770,9 +741,6 @@
 		index <  ra->start + ra->size);
 }
 
-#define FILE_MNT_WRITE_TAKEN	1
-#define FILE_MNT_WRITE_RELEASED	2
-
 struct file {
 	union {
 		struct llist_node	fu_llist;
@@ -810,9 +778,6 @@
 	struct list_head	f_tfile_llink;
 #endif /* #ifdef CONFIG_EPOLL */
 	struct address_space	*f_mapping;
-#ifdef CONFIG_DEBUG_WRITECOUNT
-	unsigned long f_mnt_write_state;
-#endif
 } __attribute__((aligned(4)));	/* lest something weird decides that 2 is OK */
 
 struct file_handle {
@@ -830,49 +795,6 @@
 #define fput_atomic(x)	atomic_long_add_unless(&(x)->f_count, -1, 1)
 #define file_count(x)	atomic_long_read(&(x)->f_count)
 
-#ifdef CONFIG_DEBUG_WRITECOUNT
-static inline void file_take_write(struct file *f)
-{
-	WARN_ON(f->f_mnt_write_state != 0);
-	f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
-}
-static inline void file_release_write(struct file *f)
-{
-	f->f_mnt_write_state |= FILE_MNT_WRITE_RELEASED;
-}
-static inline void file_reset_write(struct file *f)
-{
-	f->f_mnt_write_state = 0;
-}
-static inline void file_check_state(struct file *f)
-{
-	/*
-	 * At this point, either both or neither of these bits
-	 * should be set.
-	 */
-	WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN);
-	WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_RELEASED);
-}
-static inline int file_check_writeable(struct file *f)
-{
-	if (f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN)
-		return 0;
-	printk(KERN_WARNING "writeable file with no "
-			    "mnt_want_write()\n");
-	WARN_ON(1);
-	return -EINVAL;
-}
-#else /* !CONFIG_DEBUG_WRITECOUNT */
-static inline void file_take_write(struct file *filp) {}
-static inline void file_release_write(struct file *filp) {}
-static inline void file_reset_write(struct file *filp) {}
-static inline void file_check_state(struct file *filp) {}
-static inline int file_check_writeable(struct file *filp)
-{
-	return 0;
-}
-#endif /* CONFIG_DEBUG_WRITECOUNT */
-
 #define	MAX_NON_LFS	((1UL<<31) - 1)
 
 /* Page cache limit. The filesystems should put that into their s_maxbytes 
@@ -2481,16 +2403,13 @@
 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
 extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
 		unsigned long size, pgoff_t pgoff);
-extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
 int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
 extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
-extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long,
-		loff_t *);
+extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long);
 extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
 extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
-		unsigned long *, loff_t, loff_t *, size_t, size_t);
-extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
-		unsigned long, loff_t, loff_t *, size_t, ssize_t);
+		unsigned long *, loff_t, size_t, size_t);
+extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
 extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
 extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
 extern int generic_segment_checks(const struct iovec *iov,
@@ -2582,7 +2501,7 @@
 
 #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
 
-extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
+extern int readlink_copy(char __user *, int, const char *);
 extern int page_readlink(struct dentry *, char __user *, int);
 extern void *page_follow_link_light(struct dentry *, struct nameidata *);
 extern void page_put_link(struct dentry *, struct nameidata *, void *);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index cdc3011..d16da3e 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -7,6 +7,7 @@
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
 #include <linux/perf_event.h>
+#include <linux/tracepoint.h>
 
 struct trace_array;
 struct trace_buffer;
@@ -232,6 +233,7 @@
 	TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
 	TRACE_EVENT_FL_WAS_ENABLED_BIT,
 	TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
+	TRACE_EVENT_FL_TRACEPOINT_BIT,
 };
 
 /*
@@ -244,6 +246,7 @@
  *                    (used for module unloading, if a module event is enabled,
  *                     it is best to clear the buffers that used it).
  *  USE_CALL_FILTER - For ftrace internal events, don't use file filter
+ *  TRACEPOINT    - Event is a tracepoint
  */
 enum {
 	TRACE_EVENT_FL_FILTERED		= (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -252,12 +255,17 @@
 	TRACE_EVENT_FL_IGNORE_ENABLE	= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
 	TRACE_EVENT_FL_WAS_ENABLED	= (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
 	TRACE_EVENT_FL_USE_CALL_FILTER	= (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
+	TRACE_EVENT_FL_TRACEPOINT	= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
 };
 
 struct ftrace_event_call {
 	struct list_head	list;
 	struct ftrace_event_class *class;
-	char			*name;
+	union {
+		char			*name;
+		/* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
+		struct tracepoint	*tp;
+	};
 	struct trace_event	event;
 	const char		*print_fmt;
 	struct event_filter	*filter;
@@ -271,6 +279,7 @@
 	 *   bit 3:		ftrace internal event (do not enable)
 	 *   bit 4:		Event was enabled by module
 	 *   bit 5:		use call filter rather than file filter
+	 *   bit 6:		Event is a tracepoint
 	 */
 	int			flags; /* static flags of different events */
 
@@ -283,6 +292,15 @@
 #endif
 };
 
+static inline const char *
+ftrace_event_name(struct ftrace_event_call *call)
+{
+	if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
+		return call->tp ? call->tp->name : NULL;
+	else
+		return call->name;
+}
+
 struct trace_array;
 struct ftrace_subsystem_dir;
 
@@ -353,7 +371,7 @@
 #define __TRACE_EVENT_FLAGS(name, value)				\
 	static int __init trace_init_flags_##name(void)			\
 	{								\
-		event_##name.flags = value;				\
+		event_##name.flags |= value;				\
 		return 0;						\
 	}								\
 	early_initcall(trace_init_flags_##name);
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index ac39d91..a326c85 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -104,6 +104,9 @@
 	MC13892_LED_R,
 	MC13892_LED_G,
 	MC13892_LED_B,
+	/* MC34708 LED IDs */
+	MC34708_LED_R,
+	MC34708_LED_G,
 };
 
 struct mc13xxx_led_platform_data {
@@ -163,6 +166,9 @@
 #define MC13892_LED_C2_CURRENT_G(x)	(((x) & 0x7) << 21)
 /* MC13892 LED Control 3 */
 #define MC13892_LED_C3_CURRENT_B(x)	(((x) & 0x7) << 9)
+/* MC34708 LED Control 0 */
+#define MC34708_LED_C0_CURRENT_R(x)	(((x) & 0x3) << 9)
+#define MC34708_LED_C0_CURRENT_G(x)	(((x) & 0x3) << 21)
 	u32 led_control[MAX_LED_CONTROL_REGS];
 };
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index abc8484..bf9811e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1204,6 +1204,7 @@
 int set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
+int get_cmdline(struct task_struct *task, char *buffer, int buflen);
 
 /* Is the vma a continuation of the stack vma above it? */
 static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 371d346..839bac2 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -44,6 +44,8 @@
 #define MNT_SHARED_MASK	(MNT_UNBINDABLE)
 #define MNT_PROPAGATION_MASK	(MNT_SHARED | MNT_UNBINDABLE)
 
+#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
+			    MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
 
 #define MNT_INTERNAL	0x4000
 
@@ -51,6 +53,7 @@
 #define MNT_LOCKED		0x800000
 #define MNT_DOOMED		0x1000000
 #define MNT_SYNC_UMOUNT		0x2000000
+#define MNT_MARKED		0x4000000
 
 struct vfsmount {
 	struct dentry *mnt_root;	/* root of the mounted tree */
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index ae4981e..f62f78a 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -24,8 +24,7 @@
 struct nbd_device {
 	int flags;
 	int harderror;		/* Code of hard error			*/
-	struct socket * sock;
-	struct file * file; 	/* If == NULL, device is not ready, yet	*/
+	struct socket * sock;	/* If == NULL, device is not ready, yet	*/
 	int magic;
 
 	spinlock_t queue_lock;
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index f6a1520..9ac1a62 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -50,8 +50,13 @@
 
 struct ntb_client {
 	struct device_driver driver;
-	int (*probe) (struct pci_dev *pdev);
-	void (*remove) (struct pci_dev *pdev);
+	int (*probe)(struct pci_dev *pdev);
+	void (*remove)(struct pci_dev *pdev);
+};
+
+enum {
+	NTB_LINK_DOWN = 0,
+	NTB_LINK_UP,
 };
 
 int ntb_register_client(struct ntb_client *drvr);
@@ -60,11 +65,11 @@
 void ntb_unregister_client_dev(char *device_name);
 
 struct ntb_queue_handlers {
-	void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
-			    void *data, int len);
-	void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
-			    void *data, int len);
-	void (*event_handler) (void *data, int status);
+	void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+			   void *data, int len);
+	void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+			   void *data, int len);
+	void (*event_handler)(void *data, int status);
 };
 
 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 6b9aafe..a50173c 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -66,20 +66,25 @@
 
 #define NVME_VS(major, minor)	(major << 16 | minor)
 
-#define NVME_IO_TIMEOUT	(5 * HZ)
+extern unsigned char io_timeout;
+#define NVME_IO_TIMEOUT	(io_timeout * HZ)
 
 /*
  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
  */
 struct nvme_dev {
 	struct list_head node;
-	struct nvme_queue **queues;
+	struct nvme_queue __rcu **queues;
+	unsigned short __percpu *io_queue;
 	u32 __iomem *dbs;
 	struct pci_dev *pci_dev;
 	struct dma_pool *prp_page_pool;
 	struct dma_pool *prp_small_pool;
 	int instance;
-	int queue_count;
+	unsigned queue_count;
+	unsigned online_queues;
+	unsigned max_qid;
+	int q_depth;
 	u32 db_stride;
 	u32 ctrl_config;
 	struct msix_entry *entry;
@@ -89,6 +94,7 @@
 	struct miscdevice miscdev;
 	work_func_t reset_workfn;
 	struct work_struct reset_work;
+	struct notifier_block nb;
 	char name[12];
 	char serial[20];
 	char model[40];
@@ -131,6 +137,7 @@
 	int length;		/* Of data, in bytes */
 	unsigned long start_time;
 	dma_addr_t first_dma;
+	struct list_head node;
 	struct scatterlist sg[0];
 };
 
@@ -146,16 +153,12 @@
  */
 void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
 
-int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
-			struct nvme_iod *iod, int total_len, gfp_t gfp);
+int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t);
 struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
 				unsigned long addr, unsigned length);
 void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
 			struct nvme_iod *iod);
-struct nvme_queue *get_nvmeq(struct nvme_dev *dev);
-void put_nvmeq(struct nvme_queue *nvmeq);
-int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
-						u32 *result, unsigned timeout);
+int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *);
 int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
 int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
 							u32 *result);
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 4d9389c..eb8b8ac 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -83,23 +83,6 @@
 	int can_merge;
 
 	/*
-	 * ->map() returns a virtual address mapping of the pipe buffer.
-	 * The last integer flag reflects whether this should be an atomic
-	 * mapping or not. The atomic map is faster, however you can't take
-	 * page faults before calling ->unmap() again. So if you need to eg
-	 * access user data through copy_to/from_user(), then you must get
-	 * a non-atomic map. ->map() uses the kmap_atomic slot for
-	 * atomic maps, you have to be careful if mapping another page as
-	 * source or destination for a copy.
-	 */
-	void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
-
-	/*
-	 * Undoes ->map(), finishes the virtual mapping of the pipe buffer.
-	 */
-	void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
-
-	/*
 	 * ->confirm() verifies that the data in the pipe buffer is there
 	 * and that the contents are good. If the pages in the pipe belong
 	 * to a file system, we may need to wait for IO completion in this
@@ -150,8 +133,6 @@
 void free_pipe_info(struct pipe_inode_info *);
 
 /* Generic pipe buffer ops functions */
-void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
-void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
 void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
diff --git a/include/linux/platform_data/dma-rcar-audmapp.h b/include/linux/platform_data/dma-rcar-audmapp.h
new file mode 100644
index 0000000..471fffe
--- /dev/null
+++ b/include/linux/platform_data/dma-rcar-audmapp.h
@@ -0,0 +1,34 @@
+/*
+ * This is for Renesas R-Car Audio-DMAC-peri-peri.
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This file is based on the include/linux/sh_dma.h
+ *
+ * Header for the new SH dmaengine driver
+ *
+ * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef SH_AUDMAPP_H
+#define SH_AUDMAPP_H
+
+#include <linux/dmaengine.h>
+
+struct audmapp_slave_config {
+	int		slave_id;
+	dma_addr_t	src;
+	dma_addr_t	dst;
+	u32		chcr;
+};
+
+struct audmapp_pdata {
+	struct audmapp_slave_config *slave;
+	int slave_num;
+};
+
+#endif /* SH_AUDMAPP_H */
diff --git a/include/linux/platform_data/leds-s3c24xx.h b/include/linux/platform_data/leds-s3c24xx.h
index d8a7672..441a6f2 100644
--- a/include/linux/platform_data/leds-s3c24xx.h
+++ b/include/linux/platform_data/leds-s3c24xx.h
@@ -1,5 +1,4 @@
-/* arch/arm/mach-s3c2410/include/mach/leds-gpio.h
- *
+/*
  * Copyright (c) 2006 Simtec Electronics
  *	http://armlinux.simtec.co.uk/
  *	Ben Dooks <ben@simtec.co.uk>
@@ -11,8 +10,8 @@
  * published by the Free Software Foundation.
 */
 
-#ifndef __ASM_ARCH_LEDSGPIO_H
-#define __ASM_ARCH_LEDSGPIO_H "leds-gpio.h"
+#ifndef __LEDS_S3C24XX_H
+#define __LEDS_S3C24XX_H
 
 #define S3C24XX_LEDF_ACTLOW	(1<<0)		/* LED is on when GPIO low */
 #define S3C24XX_LEDF_TRISTATE	(1<<1)		/* tristate to turn off */
@@ -25,4 +24,4 @@
 	char			*def_trigger;
 };
 
-#endif /* __ASM_ARCH_LEDSGPIO_H */
+#endif /* __LEDS_S3C24XX_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 075b305..25f54c7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1719,6 +1719,24 @@
 }
 
 
+static inline int pid_alive(const struct task_struct *p);
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+	pid_t pid = 0;
+
+	rcu_read_lock();
+	if (pid_alive(tsk))
+		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+	rcu_read_unlock();
+
+	return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+	return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
 					struct pid_namespace *ns)
 {
@@ -1758,7 +1776,7 @@
  *
  * Return: 1 if the process is alive. 0 otherwise.
  */
-static inline int pid_alive(struct task_struct *p)
+static inline int pid_alive(const struct task_struct *p)
 {
 	return p->pids[PIDTYPE_PID].pid != NULL;
 }
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 697ceb7..a4a0588 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -119,8 +119,10 @@
 	static struct syscall_metadata __syscall_meta_##sname;		\
 	static struct ftrace_event_call __used				\
 	  event_enter_##sname = {					\
-		.name                   = "sys_enter"#sname,		\
 		.class			= &event_class_syscall_enter,	\
+		{							\
+			.name                   = "sys_enter"#sname,	\
+		},							\
 		.event.funcs            = &enter_syscall_print_funcs,	\
 		.data			= (void *)&__syscall_meta_##sname,\
 		.flags                  = TRACE_EVENT_FL_CAP_ANY,	\
@@ -133,8 +135,10 @@
 	static struct syscall_metadata __syscall_meta_##sname;		\
 	static struct ftrace_event_call __used				\
 	  event_exit_##sname = {					\
-		.name                   = "sys_exit"#sname,		\
 		.class			= &event_class_syscall_exit,	\
+		{							\
+			.name                   = "sys_exit"#sname,	\
+		},							\
 		.event.funcs		= &exit_syscall_print_funcs,	\
 		.data			= (void *)&__syscall_meta_##sname,\
 		.flags                  = TRACE_EVENT_FL_CAP_ANY,	\
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 812b255..9d30ee4 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -6,7 +6,7 @@
  *
  * See Documentation/trace/tracepoints.txt.
  *
- * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
  * Heavily inspired from the Linux Kernel Markers.
  *
@@ -21,6 +21,7 @@
 
 struct module;
 struct tracepoint;
+struct notifier_block;
 
 struct tracepoint_func {
 	void *func;
@@ -35,31 +36,38 @@
 	struct tracepoint_func __rcu *funcs;
 };
 
-/*
- * Connect a probe to a tracepoint.
- * Internal API, should not be used directly.
- */
-extern int tracepoint_probe_register(const char *name, void *probe, void *data);
-
-/*
- * Disconnect a probe from a tracepoint.
- * Internal API, should not be used directly.
- */
 extern int
-tracepoint_probe_unregister(const char *name, void *probe, void *data);
+tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
+extern int
+tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
+extern void
+for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
+		void *priv);
 
 #ifdef CONFIG_MODULES
 struct tp_module {
 	struct list_head list;
-	unsigned int num_tracepoints;
-	struct tracepoint * const *tracepoints_ptrs;
+	struct module *mod;
 };
+
 bool trace_module_has_bad_taint(struct module *mod);
+extern int register_tracepoint_module_notifier(struct notifier_block *nb);
+extern int unregister_tracepoint_module_notifier(struct notifier_block *nb);
 #else
 static inline bool trace_module_has_bad_taint(struct module *mod)
 {
 	return false;
 }
+static inline
+int register_tracepoint_module_notifier(struct notifier_block *nb)
+{
+	return 0;
+}
+static inline
+int unregister_tracepoint_module_notifier(struct notifier_block *nb)
+{
+	return 0;
+}
 #endif /* CONFIG_MODULES */
 
 /*
@@ -72,6 +80,11 @@
 	synchronize_sched();
 }
 
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+extern void syscall_regfunc(void);
+extern void syscall_unregfunc(void);
+#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
+
 #define PARAMS(args...) args
 
 #endif /* _LINUX_TRACEPOINT_H */
@@ -160,14 +173,14 @@
 	static inline int						\
 	register_trace_##name(void (*probe)(data_proto), void *data)	\
 	{								\
-		return tracepoint_probe_register(#name, (void *)probe,	\
-						 data);			\
+		return tracepoint_probe_register(&__tracepoint_##name,	\
+						(void *)probe, data);	\
 	}								\
 	static inline int						\
 	unregister_trace_##name(void (*probe)(data_proto), void *data)	\
 	{								\
-		return tracepoint_probe_unregister(#name, (void *)probe, \
-						   data);		\
+		return tracepoint_probe_unregister(&__tracepoint_##name,\
+						(void *)probe, data);	\
 	}								\
 	static inline void						\
 	check_trace_callback_type_##name(void (*cb)(data_proto))	\
diff --git a/include/linux/uio.h b/include/linux/uio.h
index c55ce24..199bcc3 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -9,14 +9,23 @@
 #ifndef __LINUX_UIO_H
 #define __LINUX_UIO_H
 
+#include <linux/kernel.h>
 #include <uapi/linux/uio.h>
 
+struct page;
 
 struct kvec {
 	void *iov_base; /* and that should *never* hold a userland pointer */
 	size_t iov_len;
 };
 
+struct iov_iter {
+	const struct iovec *iov;
+	unsigned long nr_segs;
+	size_t iov_offset;
+	size_t count;
+};
+
 /*
  * Total number of bytes covered by an iovec.
  *
@@ -34,8 +43,51 @@
 	return ret;
 }
 
+static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
+{
+	return (struct iovec) {
+		.iov_base = iter->iov->iov_base + iter->iov_offset,
+		.iov_len = min(iter->count,
+			       iter->iov->iov_len - iter->iov_offset),
+	};
+}
+
+#define iov_for_each(iov, iter, start)				\
+	for (iter = (start);					\
+	     (iter).count &&					\
+	     ((iov = iov_iter_iovec(&(iter))), 1);		\
+	     iov_iter_advance(&(iter), (iov).iov_len))
+
 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
 
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+		struct iov_iter *i, unsigned long offset, size_t bytes);
+size_t iov_iter_copy_from_user(struct page *page,
+		struct iov_iter *i, unsigned long offset, size_t bytes);
+void iov_iter_advance(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+size_t iov_iter_single_seg_count(const struct iov_iter *i);
+size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+			 struct iov_iter *i);
+
+static inline void iov_iter_init(struct iov_iter *i,
+			const struct iovec *iov, unsigned long nr_segs,
+			size_t count, size_t written)
+{
+	i->iov = iov;
+	i->nr_segs = nr_segs;
+	i->iov_offset = 0;
+	i->count = count + written;
+
+	iov_iter_advance(i, written);
+}
+
+static inline size_t iov_iter_count(struct iov_iter *i)
+{
+	return i->count;
+}
+
 int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
 int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
+
 #endif
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 0b9f890..fde142e 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -60,6 +60,7 @@
 /**
  * struct rc_dev - represents a remote control device
  * @dev: driver model's view of this device
+ * @sysfs_groups: sysfs attribute groups
  * @input_name: name of the input child device
  * @input_phys: physical path to the input child device
  * @input_id: id of the input child device (struct input_id)
@@ -112,10 +113,12 @@
  *	device doesn't interrupt host until it sees IR pulses
  * @s_learning_mode: enable wide band receiver used for learning
  * @s_carrier_report: enable carrier reports
- * @s_filter: set the scancode filter of a given type
+ * @s_filter: set the scancode filter 
+ * @s_wakeup_filter: set the wakeup scancode filter
  */
 struct rc_dev {
 	struct device			dev;
+	const struct attribute_group	*sysfs_groups[5];
 	const char			*input_name;
 	const char			*input_phys;
 	struct input_id			input_id;
@@ -159,8 +162,9 @@
 	int				(*s_learning_mode)(struct rc_dev *dev, int enable);
 	int				(*s_carrier_report) (struct rc_dev *dev, int enable);
 	int				(*s_filter)(struct rc_dev *dev,
-						    enum rc_filter_type type,
 						    struct rc_scancode_filter *filter);
+	int				(*s_wakeup_filter)(struct rc_dev *dev,
+							   struct rc_scancode_filter *filter);
 };
 
 #define to_rc_dev(d) container_of(d, struct rc_dev, dev)
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index c38a005..6fab66c 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -67,7 +67,6 @@
  * @REQ_STATUS_ALLOC: request has been allocated but not sent
  * @REQ_STATUS_UNSENT: request waiting to be sent
  * @REQ_STATUS_SENT: request sent to server
- * @REQ_STATUS_FLSH: a flush has been sent for this request
  * @REQ_STATUS_RCVD: response received from server
  * @REQ_STATUS_FLSHD: request has been flushed
  * @REQ_STATUS_ERROR: request encountered an error on the client side
@@ -83,7 +82,6 @@
 	REQ_STATUS_ALLOC,
 	REQ_STATUS_UNSENT,
 	REQ_STATUS_SENT,
-	REQ_STATUS_FLSH,
 	REQ_STATUS_RCVD,
 	REQ_STATUS_FLSHD,
 	REQ_STATUS_ERROR,
@@ -130,7 +128,6 @@
  * @proto_version: 9P protocol version to use
  * @trans_mod: module API instantiated with this client
  * @trans: tranport instance state and API
- * @conn: connection state information used by trans_fd
  * @fidpool: fid handle accounting for session
  * @fidlist: List of active fid handles
  * @tagpool - transaction id accounting for session
@@ -159,7 +156,6 @@
 	struct p9_trans_module *trans_mod;
 	enum p9_trans_status status;
 	void *trans;
-	struct p9_conn *conn;
 
 	struct p9_idpool *fidpool;
 	struct list_head fidlist;
@@ -261,7 +257,7 @@
 int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status);
 int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
 struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
-void p9_client_cb(struct p9_client *c, struct p9_req_t *req);
+void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
 
 int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
 int p9stat_read(struct p9_client *, char *, int, struct p9_wstat *);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 9a36d92..d9fa68f 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -40,6 +40,8 @@
  * @close: member function to discard a connection on this transport
  * @request: member function to issue a request to the transport
  * @cancel: member function to cancel a request (if it hasn't been sent)
+ * @cancelled: member function to notify that a cancelled request will not
+ *             not receive a reply
  *
  * This is the basic API for a transport module which is registered by the
  * transport module with the 9P core network module and used by the client
@@ -58,6 +60,7 @@
 	void (*close) (struct p9_client *);
 	int (*request) (struct p9_client *, struct p9_req_t *req);
 	int (*cancel) (struct p9_client *, struct p9_req_t *req);
+	int (*cancelled)(struct p9_client *, struct p9_req_t *req);
 	int (*zc_request)(struct p9_client *, struct p9_req_t *,
 			  char *, char *, int , int, int, int);
 };
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 4e845b8..5853c91 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -423,11 +423,11 @@
 extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 			int data_direction, void *buffer, unsigned bufflen,
 			unsigned char *sense, int timeout, int retries,
-			int flag, int *resid);
+			u64 flags, int *resid);
 extern int scsi_execute_req_flags(struct scsi_device *sdev,
 	const unsigned char *cmd, int data_direction, void *buffer,
 	unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
-	int retries, int *resid, int flags);
+	int retries, int *resid, u64 flags);
 static inline int scsi_execute_req(struct scsi_device *sdev,
 	const unsigned char *cmd, int data_direction, void *buffer,
 	unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
diff --git a/include/sound/cs8427.h b/include/sound/cs8427.h
index f862cff..0b6a187 100644
--- a/include/sound/cs8427.h
+++ b/include/sound/cs8427.h
@@ -188,6 +188,7 @@
 
 struct snd_pcm_substream;
 
+int snd_cs8427_init(struct snd_i2c_bus *bus, struct snd_i2c_device *device);
 int snd_cs8427_create(struct snd_i2c_bus *bus, unsigned char addr,
 		      unsigned int reset_timeout, struct snd_i2c_device **r_cs8427);
 int snd_cs8427_reg_write(struct snd_i2c_device *device, unsigned char reg,
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 4483fad..33b487b 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -21,6 +21,8 @@
 	int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool);
 	int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
 	int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
+	void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
+	enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *);
 };
 
 static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 7020e33..3a1c1ee 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -73,10 +73,12 @@
 	sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv,
 				      sector_t lba, sector_t nolb),
 	void *priv);
+void	sbc_dif_generate(struct se_cmd *);
 sense_reason_t	sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int,
 				     unsigned int, struct scatterlist *, int);
 sense_reason_t	sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int,
 				    unsigned int, struct scatterlist *, int);
+sense_reason_t	sbc_dif_read_strip(struct se_cmd *);
 
 void	transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
 int	transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 1772fad..9ec9864 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -162,7 +162,7 @@
 	SCF_SENT_CHECK_CONDITION	= 0x00000800,
 	SCF_OVERFLOW_BIT		= 0x00001000,
 	SCF_UNDERFLOW_BIT		= 0x00002000,
-	SCF_SENT_DELAYED_TAS		= 0x00004000,
+	SCF_SEND_DELAYED_TAS		= 0x00004000,
 	SCF_ALUA_NON_OPTIMIZED		= 0x00008000,
 	SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
 	SCF_ACK_KREF			= 0x00040000,
@@ -442,19 +442,18 @@
 };
 
 enum target_prot_op {
-	TARGET_PROT_NORMAL = 0,
-	TARGET_PROT_DIN_INSERT,
-	TARGET_PROT_DOUT_INSERT,
-	TARGET_PROT_DIN_STRIP,
-	TARGET_PROT_DOUT_STRIP,
-	TARGET_PROT_DIN_PASS,
-	TARGET_PROT_DOUT_PASS,
+	TARGET_PROT_NORMAL	= 0,
+	TARGET_PROT_DIN_INSERT	= (1 << 0),
+	TARGET_PROT_DOUT_INSERT	= (1 << 1),
+	TARGET_PROT_DIN_STRIP	= (1 << 2),
+	TARGET_PROT_DOUT_STRIP	= (1 << 3),
+	TARGET_PROT_DIN_PASS	= (1 << 4),
+	TARGET_PROT_DOUT_PASS	= (1 << 5),
 };
 
-enum target_prot_ho {
-	PROT_SEPERATED,
-	PROT_INTERLEAVED,
-};
+#define TARGET_PROT_ALL	TARGET_PROT_DIN_INSERT | TARGET_PROT_DOUT_INSERT | \
+			TARGET_PROT_DIN_STRIP | TARGET_PROT_DOUT_STRIP | \
+			TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS
 
 enum target_prot_type {
 	TARGET_DIF_TYPE0_PROT,
@@ -463,6 +462,12 @@
 	TARGET_DIF_TYPE3_PROT,
 };
 
+enum target_core_dif_check {
+	TARGET_DIF_CHECK_GUARD  = 0x1 << 0,
+	TARGET_DIF_CHECK_APPTAG = 0x1 << 1,
+	TARGET_DIF_CHECK_REFTAG = 0x1 << 2,
+};
+
 struct se_dif_v1_tuple {
 	__be16			guard_tag;
 	__be16			app_tag;
@@ -556,13 +561,14 @@
 	/* DIF related members */
 	enum target_prot_op	prot_op;
 	enum target_prot_type	prot_type;
+	u8			prot_checks;
 	u32			prot_length;
 	u32			reftag_seed;
 	struct scatterlist	*t_prot_sg;
 	unsigned int		t_prot_nents;
-	enum target_prot_ho	prot_handover;
 	sense_reason_t		pi_err;
 	sector_t		bad_sector;
+	bool			prot_pto;
 };
 
 struct se_ua {
@@ -603,6 +609,7 @@
 struct se_session {
 	unsigned		sess_tearing_down:1;
 	u64			sess_bin_isid;
+	enum target_prot_op	sup_prot_ops;
 	struct se_node_acl	*se_node_acl;
 	struct se_portal_group *se_tpg;
 	void			*fabric_sess_ptr;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 0218d68..22a4e98 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -62,6 +62,7 @@
 	int (*queue_data_in)(struct se_cmd *);
 	int (*queue_status)(struct se_cmd *);
 	void (*queue_tm_rsp)(struct se_cmd *);
+	void (*aborted_task)(struct se_cmd *);
 	/*
 	 * fabric module calls for target_core_fabric_configfs.c
 	 */
@@ -83,10 +84,11 @@
 	void (*fabric_drop_nodeacl)(struct se_node_acl *);
 };
 
-struct se_session *transport_init_session(void);
+struct se_session *transport_init_session(enum target_prot_op);
 int transport_alloc_session_tags(struct se_session *, unsigned int,
 		unsigned int);
-struct se_session *transport_init_session_tags(unsigned int, unsigned int);
+struct se_session *transport_init_session_tags(unsigned int, unsigned int,
+		enum target_prot_op);
 void	__transport_register_session(struct se_portal_group *,
 		struct se_node_acl *, struct se_session *, void *);
 void	transport_register_session(struct se_portal_group *,
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 5a4c04a..14e49c7 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -13,9 +13,6 @@
 
 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
 
-extern void syscall_regfunc(void);
-extern void syscall_unregfunc(void);
-
 TRACE_EVENT_FN(sys_enter,
 
 	TP_PROTO(struct pt_regs *regs, long id),
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 8765126..0a1a4f7 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -470,10 +470,13 @@
  * };
  *
  * static struct ftrace_event_call event_<call> = {
- *	.name			= "<call>",
  *	.class			= event_class_<template>,
+ *	{
+ *		.tp			= &__tracepoint_<call>,
+ *	},
  *	.event			= &ftrace_event_type_<call>,
  *	.print_fmt		= print_fmt_<call>,
+ *	.flags			= TRACE_EVENT_FL_TRACEPOINT,
  * };
  * // its only safe to use pointers when doing linker tricks to
  * // create an array.
@@ -605,10 +608,13 @@
 #define DEFINE_EVENT(template, call, proto, args)			\
 									\
 static struct ftrace_event_call __used event_##call = {			\
-	.name			= #call,				\
 	.class			= &event_class_##template,		\
+	{								\
+		.tp			= &__tracepoint_##call,		\
+	},								\
 	.event.funcs		= &ftrace_event_type_funcs_##template,	\
 	.print_fmt		= print_fmt_##template,			\
+	.flags			= TRACE_EVENT_FL_TRACEPOINT,		\
 };									\
 static struct ftrace_event_call __used					\
 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
@@ -619,10 +625,13 @@
 static const char print_fmt_##call[] = print;				\
 									\
 static struct ftrace_event_call __used event_##call = {			\
-	.name			= #call,				\
 	.class			= &event_class_##template,		\
+	{								\
+		.tp			= &__tracepoint_##call,		\
+	},								\
 	.event.funcs		= &ftrace_event_type_funcs_##call,	\
 	.print_fmt		= print_fmt_##call,			\
+	.flags			= TRACE_EVENT_FL_TRACEPOINT,		\
 };									\
 static struct ftrace_event_call __used					\
 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 2d48fe1..11917f7 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -70,7 +70,6 @@
 #define AUDIT_TTY_SET		1017	/* Set TTY auditing status */
 #define AUDIT_SET_FEATURE	1018	/* Turn an audit feature on or off */
 #define AUDIT_GET_FEATURE	1019	/* Get which features are enabled */
-#define AUDIT_FEATURE_CHANGE	1020	/* audit log listing feature changes */
 
 #define AUDIT_FIRST_USER_MSG	1100	/* Userspace messages mostly uninteresting to kernel */
 #define AUDIT_USER_AVC		1107	/* We filter this differently */
@@ -109,6 +108,8 @@
 #define AUDIT_NETFILTER_PKT	1324	/* Packets traversing netfilter chains */
 #define AUDIT_NETFILTER_CFG	1325	/* Netfilter chain modifications */
 #define AUDIT_SECCOMP		1326	/* Secure Computing event */
+#define AUDIT_PROCTITLE		1327	/* Proctitle emit event */
+#define AUDIT_FEATURE_CHANGE	1328	/* audit log listing feature changes */
 
 #define AUDIT_AVC		1400	/* SE Linux avc denial or grant */
 #define AUDIT_SELINUX_ERR	1401	/* Internal SE Linux Errors */
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index ba478fa..154dd6d 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -308,8 +308,12 @@
 
 #define CAP_LEASE            28
 
+/* Allow writing the audit log via unicast netlink socket */
+
 #define CAP_AUDIT_WRITE      29
 
+/* Allow configuration of audit via unicast netlink socket */
+
 #define CAP_AUDIT_CONTROL    30
 
 #define CAP_SETFCAP	     31
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index e5ab622..096fe1c 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -434,6 +434,7 @@
 	NVME_SC_REFTAG_CHECK		= 0x284,
 	NVME_SC_COMPARE_FAILED		= 0x285,
 	NVME_SC_ACCESS_DENIED		= 0x286,
+	NVME_SC_DNR			= 0x4000,
 };
 
 struct nvme_completion {
diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h
index 270db89..9bf508a 100644
--- a/include/uapi/linux/v4l2-common.h
+++ b/include/uapi/linux/v4l2-common.h
@@ -29,6 +29,8 @@
 #ifndef __V4L2_COMMON__
 #define __V4L2_COMMON__
 
+#include <linux/types.h>
+
 /*
  *
  * Selection interface definitions
diff --git a/init/Kconfig b/init/Kconfig
index 427ba60..765018c 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -292,9 +292,12 @@
 	  logging of avc messages output).  Does not do system-call
 	  auditing without CONFIG_AUDITSYSCALL.
 
+config HAVE_ARCH_AUDITSYSCALL
+	bool
+
 config AUDITSYSCALL
 	bool "Enable system-call auditing support"
-	depends on AUDIT && (X86 || PARISC || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT) || ALPHA)
+	depends on AUDIT && HAVE_ARCH_AUDITSYSCALL
 	default y if SECURITY_SELINUX
 	help
 	  Enable low-overhead system-call auditing infrastructure that
diff --git a/kernel/audit.c b/kernel/audit.c
index 95a20f3..7c28936 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -182,7 +182,7 @@
 
 struct audit_reply {
 	__u32 portid;
-	struct net *net;	
+	struct net *net;
 	struct sk_buff *skb;
 };
 
@@ -396,7 +396,7 @@
 		if (printk_ratelimit())
 			pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
 		else
-			audit_log_lost("printk limit exceeded\n");
+			audit_log_lost("printk limit exceeded");
 	}
 
 	audit_hold_skb(skb);
@@ -412,7 +412,7 @@
 		BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */
 		if (audit_pid) {
 			pr_err("*NO* daemon at audit_pid=%d\n", audit_pid);
-			audit_log_lost("auditd disappeared\n");
+			audit_log_lost("auditd disappeared");
 			audit_pid = 0;
 			audit_sock = NULL;
 		}
@@ -607,7 +607,7 @@
 {
 	int err = 0;
 
-	/* Only support the initial namespaces for now. */
+	/* Only support initial user namespace for now. */
 	/*
 	 * We return ECONNREFUSED because it tricks userspace into thinking
 	 * that audit was not configured into the kernel.  Lots of users
@@ -618,8 +618,7 @@
 	 * userspace will reject all logins.  This should be removed when we
 	 * support non init namespaces!!
 	 */
-	if ((current_user_ns() != &init_user_ns) ||
-	    (task_active_pid_ns(current) != &init_pid_ns))
+	if (current_user_ns() != &init_user_ns)
 		return -ECONNREFUSED;
 
 	switch (msg_type) {
@@ -639,6 +638,11 @@
 	case AUDIT_TTY_SET:
 	case AUDIT_TRIM:
 	case AUDIT_MAKE_EQUIV:
+		/* Only support auditd and auditctl in initial pid namespace
+		 * for now. */
+		if ((task_active_pid_ns(current) != &init_pid_ns))
+			return -EPERM;
+
 		if (!capable(CAP_AUDIT_CONTROL))
 			err = -EPERM;
 		break;
@@ -659,6 +663,7 @@
 {
 	int rc = 0;
 	uid_t uid = from_kuid(&init_user_ns, current_uid());
+	pid_t pid = task_tgid_nr(current);
 
 	if (!audit_enabled && msg_type != AUDIT_USER_AVC) {
 		*ab = NULL;
@@ -668,7 +673,7 @@
 	*ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
 	if (unlikely(!*ab))
 		return rc;
-	audit_log_format(*ab, "pid=%d uid=%u", task_tgid_vnr(current), uid);
+	audit_log_format(*ab, "pid=%d uid=%u", pid, uid);
 	audit_log_session_info(*ab);
 	audit_log_task_context(*ab);
 
@@ -1097,7 +1102,7 @@
 		audit_sock = NULL;
 	}
 
-	rcu_assign_pointer(aunet->nlsk, NULL);
+	RCU_INIT_POINTER(aunet->nlsk, NULL);
 	synchronize_net();
 	netlink_kernel_release(sock);
 }
@@ -1829,11 +1834,11 @@
 	spin_unlock_irq(&tsk->sighand->siglock);
 
 	audit_log_format(ab,
-			 " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
+			 " ppid=%d pid=%d auid=%u uid=%u gid=%u"
 			 " euid=%u suid=%u fsuid=%u"
 			 " egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
-			 sys_getppid(),
-			 tsk->pid,
+			 task_ppid_nr(tsk),
+			 task_pid_nr(tsk),
 			 from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
 			 from_kuid(&init_user_ns, cred->uid),
 			 from_kgid(&init_user_ns, cred->gid),
diff --git a/kernel/audit.h b/kernel/audit.h
index 8df13221..7bb6573 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -106,6 +106,11 @@
 	bool			should_free;
 };
 
+struct audit_proctitle {
+	int	len;	/* length of the cmdline field. */
+	char	*value;	/* the cmdline field */
+};
+
 /* The per-task audit context. */
 struct audit_context {
 	int		    dummy;	/* must be the first element */
@@ -202,6 +207,7 @@
 		} execve;
 	};
 	int fds[2];
+	struct audit_proctitle proctitle;
 
 #if AUDIT_DEBUG
 	int		    put_count;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 92062fd..8e9bc9c 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -19,6 +19,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/audit.h>
 #include <linux/kthread.h>
@@ -226,7 +228,7 @@
 #endif
 
 /* Common user-space to kernel rule translation. */
-static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule)
+static inline struct audit_entry *audit_to_entry_common(struct audit_rule_data *rule)
 {
 	unsigned listnr;
 	struct audit_entry *entry;
@@ -249,7 +251,7 @@
 		;
 	}
 	if (unlikely(rule->action == AUDIT_POSSIBLE)) {
-		printk(KERN_ERR "AUDIT_POSSIBLE is deprecated\n");
+		pr_err("AUDIT_POSSIBLE is deprecated\n");
 		goto exit_err;
 	}
 	if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS)
@@ -403,7 +405,7 @@
 	int i;
 	char *str;
 
-	entry = audit_to_entry_common((struct audit_rule *)data);
+	entry = audit_to_entry_common(data);
 	if (IS_ERR(entry))
 		goto exit_nofree;
 
@@ -431,6 +433,19 @@
 			f->val = 0;
 		}
 
+		if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) {
+			struct pid *pid;
+			rcu_read_lock();
+			pid = find_vpid(f->val);
+			if (!pid) {
+				rcu_read_unlock();
+				err = -ESRCH;
+				goto exit_free;
+			}
+			f->val = pid_nr(pid);
+			rcu_read_unlock();
+		}
+
 		err = audit_field_valid(entry, f);
 		if (err)
 			goto exit_free;
@@ -479,8 +494,8 @@
 			/* Keep currently invalid fields around in case they
 			 * become valid after a policy reload. */
 			if (err == -EINVAL) {
-				printk(KERN_WARNING "audit rule for LSM "
-				       "\'%s\' is invalid\n",  str);
+				pr_warn("audit rule for LSM \'%s\' is invalid\n",
+					str);
 				err = 0;
 			}
 			if (err) {
@@ -709,8 +724,8 @@
 	/* Keep currently invalid fields around in case they
 	 * become valid after a policy reload. */
 	if (ret == -EINVAL) {
-		printk(KERN_WARNING "audit rule for LSM \'%s\' is "
-		       "invalid\n", df->lsm_str);
+		pr_warn("audit rule for LSM \'%s\' is invalid\n",
+			df->lsm_str);
 		ret = 0;
 	}
 
@@ -1240,12 +1255,14 @@
 
 	for (i = 0; i < rule->field_count; i++) {
 		struct audit_field *f = &rule->fields[i];
+		pid_t pid;
 		int result = 0;
 		u32 sid;
 
 		switch (f->type) {
 		case AUDIT_PID:
-			result = audit_comparator(task_pid_vnr(current), f->op, f->val);
+			pid = task_pid_nr(current);
+			result = audit_comparator(pid, f->op, f->val);
 			break;
 		case AUDIT_UID:
 			result = audit_uid_comparator(current_uid(), f->op, f->uid);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 7aef2f4..f251a5e 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -42,6 +42,8 @@
  * and <dustin.kirkland@us.ibm.com> for LSPP certification compliance.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <asm/types.h>
 #include <linux/atomic.h>
@@ -68,6 +70,7 @@
 #include <linux/capability.h>
 #include <linux/fs_struct.h>
 #include <linux/compat.h>
+#include <linux/ctype.h>
 
 #include "audit.h"
 
@@ -79,6 +82,9 @@
 /* no execve audit message should be longer than this (userspace limits) */
 #define MAX_EXECVE_AUDIT_LEN 7500
 
+/* max length to print of cmdline/proctitle value during audit */
+#define MAX_PROCTITLE_AUDIT_LEN 128
+
 /* number of audit rules */
 int audit_n_rules;
 
@@ -451,15 +457,17 @@
 		struct audit_field *f = &rule->fields[i];
 		struct audit_names *n;
 		int result = 0;
+		pid_t pid;
 
 		switch (f->type) {
 		case AUDIT_PID:
-			result = audit_comparator(tsk->pid, f->op, f->val);
+			pid = task_pid_nr(tsk);
+			result = audit_comparator(pid, f->op, f->val);
 			break;
 		case AUDIT_PPID:
 			if (ctx) {
 				if (!ctx->ppid)
-					ctx->ppid = sys_getppid();
+					ctx->ppid = task_ppid_nr(tsk);
 				result = audit_comparator(ctx->ppid, f->op, f->val);
 			}
 			break;
@@ -805,7 +813,8 @@
 	rcu_read_unlock();
 }
 
-static inline struct audit_context *audit_get_context(struct task_struct *tsk,
+/* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */
+static inline struct audit_context *audit_take_context(struct task_struct *tsk,
 						      int return_valid,
 						      long return_code)
 {
@@ -842,6 +851,13 @@
 	return context;
 }
 
+static inline void audit_proctitle_free(struct audit_context *context)
+{
+	kfree(context->proctitle.value);
+	context->proctitle.value = NULL;
+	context->proctitle.len = 0;
+}
+
 static inline void audit_free_names(struct audit_context *context)
 {
 	struct audit_names *n, *next;
@@ -850,16 +866,15 @@
 	if (context->put_count + context->ino_count != context->name_count) {
 		int i = 0;
 
-		printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d"
-		       " name_count=%d put_count=%d"
-		       " ino_count=%d [NOT freeing]\n",
-		       __FILE__, __LINE__,
+		pr_err("%s:%d(:%d): major=%d in_syscall=%d"
+		       " name_count=%d put_count=%d ino_count=%d"
+		       " [NOT freeing]\n", __FILE__, __LINE__,
 		       context->serial, context->major, context->in_syscall,
 		       context->name_count, context->put_count,
 		       context->ino_count);
 		list_for_each_entry(n, &context->names_list, list) {
-			printk(KERN_ERR "names[%d] = %p = %s\n", i++,
-			       n->name, n->name->name ?: "(null)");
+			pr_err("names[%d] = %p = %s\n", i++, n->name,
+			       n->name->name ?: "(null)");
 		}
 		dump_stack();
 		return;
@@ -955,6 +970,7 @@
 	audit_free_aux(context);
 	kfree(context->filterkey);
 	kfree(context->sockaddr);
+	audit_proctitle_free(context);
 	kfree(context);
 }
 
@@ -1157,7 +1173,7 @@
 	 */
 	buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
 	if (!buf) {
-		audit_panic("out of memory for argv string\n");
+		audit_panic("out of memory for argv string");
 		return;
 	}
 
@@ -1271,6 +1287,59 @@
 	audit_log_end(ab);
 }
 
+static inline int audit_proctitle_rtrim(char *proctitle, int len)
+{
+	char *end = proctitle + len - 1;
+	while (end > proctitle && !isprint(*end))
+		end--;
+
+	/* catch the case where proctitle is only 1 non-print character */
+	len = end - proctitle + 1;
+	len -= isprint(proctitle[len-1]) == 0;
+	return len;
+}
+
+static void audit_log_proctitle(struct task_struct *tsk,
+			 struct audit_context *context)
+{
+	int res;
+	char *buf;
+	char *msg = "(null)";
+	int len = strlen(msg);
+	struct audit_buffer *ab;
+
+	ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE);
+	if (!ab)
+		return;	/* audit_panic or being filtered */
+
+	audit_log_format(ab, "proctitle=");
+
+	/* Not  cached */
+	if (!context->proctitle.value) {
+		buf = kmalloc(MAX_PROCTITLE_AUDIT_LEN, GFP_KERNEL);
+		if (!buf)
+			goto out;
+		/* Historically called this from procfs naming */
+		res = get_cmdline(tsk, buf, MAX_PROCTITLE_AUDIT_LEN);
+		if (res == 0) {
+			kfree(buf);
+			goto out;
+		}
+		res = audit_proctitle_rtrim(buf, res);
+		if (res == 0) {
+			kfree(buf);
+			goto out;
+		}
+		context->proctitle.value = buf;
+		context->proctitle.len = res;
+	}
+	msg = context->proctitle.value;
+	len = context->proctitle.len;
+out:
+	audit_log_n_untrustedstring(ab, msg, len);
+	audit_log_end(ab);
+}
+
 static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
 {
 	int i, call_panic = 0;
@@ -1388,6 +1457,8 @@
 		audit_log_name(context, n, NULL, i++, &call_panic);
 	}
 
+	audit_log_proctitle(tsk, context);
+
 	/* Send end of event record to help user space know we are finished */
 	ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
 	if (ab)
@@ -1406,7 +1477,7 @@
 {
 	struct audit_context *context;
 
-	context = audit_get_context(tsk, 0, 0);
+	context = audit_take_context(tsk, 0, 0);
 	if (!context)
 		return;
 
@@ -1500,7 +1571,7 @@
 	else
 		success = AUDITSC_FAILURE;
 
-	context = audit_get_context(tsk, success, return_code);
+	context = audit_take_context(tsk, success, return_code);
 	if (!context)
 		return;
 
@@ -1550,7 +1621,7 @@
 	if (likely(put_tree_ref(context, chunk)))
 		return;
 	if (unlikely(!grow_tree_refs(context))) {
-		printk(KERN_WARNING "out of memory, audit has lost a tree reference\n");
+		pr_warn("out of memory, audit has lost a tree reference\n");
 		audit_set_auditable(context);
 		audit_put_chunk(chunk);
 		unroll_tree_refs(context, p, count);
@@ -1609,8 +1680,7 @@
 			goto retry;
 		}
 		/* too bad */
-		printk(KERN_WARNING
-			"out of memory, audit has lost a tree reference\n");
+		pr_warn("out of memory, audit has lost a tree reference\n");
 		unroll_tree_refs(context, p, count);
 		audit_set_auditable(context);
 		return;
@@ -1682,7 +1752,7 @@
 
 	if (!context->in_syscall) {
 #if AUDIT_DEBUG == 2
-		printk(KERN_ERR "%s:%d(:%d): ignoring getname(%p)\n",
+		pr_err("%s:%d(:%d): ignoring getname(%p)\n",
 		       __FILE__, __LINE__, context->serial, name);
 		dump_stack();
 #endif
@@ -1721,15 +1791,15 @@
 	BUG_ON(!context);
 	if (!name->aname || !context->in_syscall) {
 #if AUDIT_DEBUG == 2
-		printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n",
+		pr_err("%s:%d(:%d): final_putname(%p)\n",
 		       __FILE__, __LINE__, context->serial, name);
 		if (context->name_count) {
 			struct audit_names *n;
 			int i = 0;
 
 			list_for_each_entry(n, &context->names_list, list)
-				printk(KERN_ERR "name[%d] = %p = %s\n", i++,
-				       n->name, n->name->name ?: "(null)");
+				pr_err("name[%d] = %p = %s\n", i++, n->name,
+				       n->name->name ?: "(null)");
 			}
 #endif
 		final_putname(name);
@@ -1738,9 +1808,8 @@
 	else {
 		++context->put_count;
 		if (context->put_count > context->name_count) {
-			printk(KERN_ERR "%s:%d(:%d): major=%d"
-			       " in_syscall=%d putname(%p) name_count=%d"
-			       " put_count=%d\n",
+			pr_err("%s:%d(:%d): major=%d in_syscall=%d putname(%p)"
+			       " name_count=%d put_count=%d\n",
 			       __FILE__, __LINE__,
 			       context->serial, context->major,
 			       context->in_syscall, name->name,
@@ -1981,12 +2050,10 @@
 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
 	if (!ab)
 		return;
-	audit_log_format(ab, "pid=%d uid=%u"
-			 " old-auid=%u new-auid=%u old-ses=%u new-ses=%u"
-			 " res=%d",
-			 current->pid, uid,
-			 oldloginuid, loginuid, oldsessionid, sessionid,
-			 !rc);
+	audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
+	audit_log_task_context(ab);
+	audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d",
+			 oldloginuid, loginuid, oldsessionid, sessionid, !rc);
 	audit_log_end(ab);
 }
 
@@ -2208,7 +2275,7 @@
 {
 	struct audit_context *context = current->audit_context;
 
-	context->target_pid = t->pid;
+	context->target_pid = task_pid_nr(t);
 	context->target_auid = audit_get_loginuid(t);
 	context->target_uid = task_uid(t);
 	context->target_sessionid = audit_get_sessionid(t);
@@ -2233,7 +2300,7 @@
 
 	if (audit_pid && t->tgid == audit_pid) {
 		if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
-			audit_sig_pid = tsk->pid;
+			audit_sig_pid = task_pid_nr(tsk);
 			if (uid_valid(tsk->loginuid))
 				audit_sig_uid = tsk->loginuid;
 			else
@@ -2247,7 +2314,7 @@
 	/* optimize the common case by putting first signal recipient directly
 	 * in audit_context */
 	if (!ctx->target_pid) {
-		ctx->target_pid = t->tgid;
+		ctx->target_pid = task_tgid_nr(t);
 		ctx->target_auid = audit_get_loginuid(t);
 		ctx->target_uid = t_uid;
 		ctx->target_sessionid = audit_get_sessionid(t);
@@ -2268,7 +2335,7 @@
 	}
 	BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS);
 
-	axp->target_pid[axp->pid_count] = t->tgid;
+	axp->target_pid[axp->pid_count] = task_tgid_nr(t);
 	axp->target_auid[axp->pid_count] = audit_get_loginuid(t);
 	axp->target_uid[axp->pid_count] = t_uid;
 	axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t);
@@ -2368,7 +2435,7 @@
 			 from_kgid(&init_user_ns, gid),
 			 sessionid);
 	audit_log_task_context(ab);
-	audit_log_format(ab, " pid=%d comm=", current->pid);
+	audit_log_format(ab, " pid=%d comm=", task_pid_nr(current));
 	audit_log_untrustedstring(ab, current->comm);
 	if (mm) {
 		down_read(&mm->mmap_sem);
diff --git a/kernel/relay.c b/kernel/relay.c
index 52d6a6f..5a56d3c 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1195,8 +1195,6 @@
 
 static const struct pipe_buf_operations relay_pipe_buf_ops = {
 	.can_merge = 0,
-	.map = generic_pipe_buf_map,
-	.unmap = generic_pipe_buf_unmap,
 	.confirm = generic_pipe_buf_confirm,
 	.release = relay_pipe_buf_release,
 	.steal = generic_pipe_buf_steal,
@@ -1253,7 +1251,7 @@
 	subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT;
 	pidx = (read_start / PAGE_SIZE) % subbuf_pages;
 	poff = read_start & ~PAGE_MASK;
-	nr_pages = min_t(unsigned int, subbuf_pages, pipe->buffers);
+	nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max);
 
 	for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) {
 		unsigned int this_len, this_end, private;
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index fd609bd..d8d046c 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -71,7 +71,7 @@
 	struct pt_regs *regs = task_pt_regs(task);
 
 	sd->nr = syscall_get_nr(task, regs);
-	sd->arch = syscall_get_arch(task, regs);
+	sd->arch = syscall_get_arch();
 
 	/* Unroll syscall_get_args to help gcc on arm. */
 	syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]);
@@ -348,7 +348,7 @@
 	info.si_code = SYS_SECCOMP;
 	info.si_call_addr = (void __user *)KSTK_EIP(current);
 	info.si_errno = reason;
-	info.si_arch = syscall_get_arch(current, task_pt_regs(current));
+	info.si_arch = syscall_get_arch();
 	info.si_syscall = syscall;
 	force_sig_info(SIGSYS, &info, current);
 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9be67c5..737b0ef 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3611,6 +3611,8 @@
 #ifdef CONFIG_TRACER_SNAPSHOT
 	"\t\t      snapshot\n"
 #endif
+	"\t\t      dump\n"
+	"\t\t      cpudump\n"
 	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
 	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
 	"\t     The first one will disable tracing every time do_fault is hit\n"
@@ -4390,8 +4392,6 @@
 
 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
 	.can_merge		= 0,
-	.map			= generic_pipe_buf_map,
-	.unmap			= generic_pipe_buf_unmap,
 	.confirm		= generic_pipe_buf_confirm,
 	.release		= generic_pipe_buf_release,
 	.steal			= generic_pipe_buf_steal,
@@ -4486,7 +4486,7 @@
 	trace_access_lock(iter->cpu_file);
 
 	/* Fill as many pages as possible. */
-	for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
+	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
 		spd.pages[i] = alloc_page(GFP_KERNEL);
 		if (!spd.pages[i])
 			break;
@@ -5279,8 +5279,6 @@
 /* Pipe buffer operations for a buffer. */
 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
 	.can_merge		= 0,
-	.map			= generic_pipe_buf_map,
-	.unmap			= generic_pipe_buf_unmap,
 	.confirm		= generic_pipe_buf_confirm,
 	.release		= buffer_pipe_buf_release,
 	.steal			= generic_pipe_buf_steal,
@@ -5356,7 +5354,7 @@
 	trace_access_lock(iter->cpu_file);
 	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
 
-	for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
+	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
 		struct page *page;
 		int r;
 
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 83a4378..3ddfd8f 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -223,24 +223,25 @@
 {
 	struct ftrace_event_file *file = data;
 
+	WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
 	switch (type) {
 	case TRACE_REG_REGISTER:
-		return tracepoint_probe_register(call->name,
+		return tracepoint_probe_register(call->tp,
 						 call->class->probe,
 						 file);
 	case TRACE_REG_UNREGISTER:
-		tracepoint_probe_unregister(call->name,
+		tracepoint_probe_unregister(call->tp,
 					    call->class->probe,
 					    file);
 		return 0;
 
 #ifdef CONFIG_PERF_EVENTS
 	case TRACE_REG_PERF_REGISTER:
-		return tracepoint_probe_register(call->name,
+		return tracepoint_probe_register(call->tp,
 						 call->class->perf_probe,
 						 call);
 	case TRACE_REG_PERF_UNREGISTER:
-		tracepoint_probe_unregister(call->name,
+		tracepoint_probe_unregister(call->tp,
 					    call->class->perf_probe,
 					    call);
 		return 0;
@@ -352,7 +353,7 @@
 			if (ret) {
 				tracing_stop_cmdline_record();
 				pr_info("event trace: Could not enable event "
-					"%s\n", call->name);
+					"%s\n", ftrace_event_name(call));
 				break;
 			}
 			set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
@@ -481,27 +482,29 @@
 {
 	struct ftrace_event_file *file;
 	struct ftrace_event_call *call;
+	const char *name;
 	int ret = -EINVAL;
 
 	list_for_each_entry(file, &tr->events, list) {
 
 		call = file->event_call;
+		name = ftrace_event_name(call);
 
-		if (!call->name || !call->class || !call->class->reg)
+		if (!name || !call->class || !call->class->reg)
 			continue;
 
 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
 			continue;
 
 		if (match &&
-		    strcmp(match, call->name) != 0 &&
+		    strcmp(match, name) != 0 &&
 		    strcmp(match, call->class->system) != 0)
 			continue;
 
 		if (sub && strcmp(sub, call->class->system) != 0)
 			continue;
 
-		if (event && strcmp(event, call->name) != 0)
+		if (event && strcmp(event, name) != 0)
 			continue;
 
 		ftrace_event_enable_disable(file, set);
@@ -699,7 +702,7 @@
 
 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
 		seq_printf(m, "%s:", call->class->system);
-	seq_printf(m, "%s\n", call->name);
+	seq_printf(m, "%s\n", ftrace_event_name(call));
 
 	return 0;
 }
@@ -792,7 +795,7 @@
 	mutex_lock(&event_mutex);
 	list_for_each_entry(file, &tr->events, list) {
 		call = file->event_call;
-		if (!call->name || !call->class || !call->class->reg)
+		if (!ftrace_event_name(call) || !call->class || !call->class->reg)
 			continue;
 
 		if (system && strcmp(call->class->system, system->name) != 0)
@@ -907,7 +910,7 @@
 
 	switch ((unsigned long)v) {
 	case FORMAT_HEADER:
-		seq_printf(m, "name: %s\n", call->name);
+		seq_printf(m, "name: %s\n", ftrace_event_name(call));
 		seq_printf(m, "ID: %d\n", call->event.type);
 		seq_printf(m, "format:\n");
 		return 0;
@@ -1527,6 +1530,7 @@
 	struct trace_array *tr = file->tr;
 	struct list_head *head;
 	struct dentry *d_events;
+	const char *name;
 	int ret;
 
 	/*
@@ -1540,10 +1544,11 @@
 	} else
 		d_events = parent;
 
-	file->dir = debugfs_create_dir(call->name, d_events);
+	name = ftrace_event_name(call);
+	file->dir = debugfs_create_dir(name, d_events);
 	if (!file->dir) {
 		pr_warning("Could not create debugfs '%s' directory\n",
-			   call->name);
+			   name);
 		return -1;
 	}
 
@@ -1567,7 +1572,7 @@
 		ret = call->class->define_fields(call);
 		if (ret < 0) {
 			pr_warning("Could not initialize trace point"
-				   " events/%s\n", call->name);
+				   " events/%s\n", name);
 			return -1;
 		}
 	}
@@ -1631,15 +1636,17 @@
 static int event_init(struct ftrace_event_call *call)
 {
 	int ret = 0;
+	const char *name;
 
-	if (WARN_ON(!call->name))
+	name = ftrace_event_name(call);
+	if (WARN_ON(!name))
 		return -EINVAL;
 
 	if (call->class->raw_init) {
 		ret = call->class->raw_init(call);
 		if (ret < 0 && ret != -ENOSYS)
 			pr_warn("Could not initialize trace events/%s\n",
-				call->name);
+				name);
 	}
 
 	return ret;
@@ -1885,7 +1892,7 @@
 		ret = __trace_add_new_event(call, tr);
 		if (ret < 0)
 			pr_warning("Could not create directory for event %s\n",
-				   call->name);
+				   ftrace_event_name(call));
 	}
 }
 
@@ -1894,18 +1901,20 @@
 {
 	struct ftrace_event_file *file;
 	struct ftrace_event_call *call;
+	const char *name;
 
 	list_for_each_entry(file, &tr->events, list) {
 
 		call = file->event_call;
+		name = ftrace_event_name(call);
 
-		if (!call->name || !call->class || !call->class->reg)
+		if (!name || !call->class || !call->class->reg)
 			continue;
 
 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
 			continue;
 
-		if (strcmp(event, call->name) == 0 &&
+		if (strcmp(event, name) == 0 &&
 		    strcmp(system, call->class->system) == 0)
 			return file;
 	}
@@ -1973,7 +1982,7 @@
 	seq_printf(m, "%s:%s:%s",
 		   data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
 		   data->file->event_call->class->system,
-		   data->file->event_call->name);
+		   ftrace_event_name(data->file->event_call));
 
 	if (data->count == -1)
 		seq_printf(m, ":unlimited\n");
@@ -2193,7 +2202,7 @@
 		ret = event_create_dir(tr->event_dir, file);
 		if (ret < 0)
 			pr_warning("Could not create directory for event %s\n",
-				   file->event_call->name);
+				   ftrace_event_name(file->event_call));
 	}
 }
 
@@ -2217,7 +2226,7 @@
 		ret = __trace_early_add_new_event(call, tr);
 		if (ret < 0)
 			pr_warning("Could not create early event %s\n",
-				   call->name);
+				   ftrace_event_name(call));
 	}
 }
 
@@ -2549,7 +2558,7 @@
 			continue;
 #endif
 
-		pr_info("Testing event %s: ", call->name);
+		pr_info("Testing event %s: ", ftrace_event_name(call));
 
 		/*
 		 * If an event is already enabled, someone is using
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 8efbb69..925f537 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -1095,7 +1095,7 @@
 	seq_printf(m, "%s:%s:%s",
 		   enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
 		   enable_data->file->event_call->class->system,
-		   enable_data->file->event_call->name);
+		   ftrace_event_name(enable_data->file->event_call));
 
 	if (data->count == -1)
 		seq_puts(m, ":unlimited");
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index ee0a509..d4ddde2 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -173,9 +173,11 @@
 };									\
 									\
 struct ftrace_event_call __used event_##call = {			\
-	.name			= #call,				\
-	.event.type		= etype,				\
 	.class			= &event_class_ftrace_##call,		\
+	{								\
+		.name			= #call,			\
+	},								\
+	.event.type		= etype,				\
 	.print_fmt		= print,				\
 	.flags			= TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \
 };									\
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d021d21..903ae28 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -341,7 +341,7 @@
 	struct trace_kprobe *tk;
 
 	list_for_each_entry(tk, &probe_list, list)
-		if (strcmp(tk->tp.call.name, event) == 0 &&
+		if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 &&
 		    strcmp(tk->tp.call.class->system, group) == 0)
 			return tk;
 	return NULL;
@@ -516,7 +516,8 @@
 	mutex_lock(&probe_lock);
 
 	/* Delete old (same name) event if exist */
-	old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system);
+	old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call),
+			tk->tp.call.class->system);
 	if (old_tk) {
 		ret = unregister_trace_kprobe(old_tk);
 		if (ret < 0)
@@ -564,7 +565,8 @@
 			if (ret)
 				pr_warning("Failed to re-register probe %s on"
 					   "%s: %d\n",
-					   tk->tp.call.name, mod->name, ret);
+					   ftrace_event_name(&tk->tp.call),
+					   mod->name, ret);
 		}
 	}
 	mutex_unlock(&probe_lock);
@@ -818,7 +820,8 @@
 	int i;
 
 	seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p');
-	seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name);
+	seq_printf(m, ":%s/%s", tk->tp.call.class->system,
+			ftrace_event_name(&tk->tp.call));
 
 	if (!tk->symbol)
 		seq_printf(m, " 0x%p", tk->rp.kp.addr);
@@ -876,7 +879,8 @@
 {
 	struct trace_kprobe *tk = v;
 
-	seq_printf(m, "  %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit,
+	seq_printf(m, "  %-44s %15lu %15lu\n",
+		   ftrace_event_name(&tk->tp.call), tk->nhit,
 		   tk->rp.kp.nmissed);
 
 	return 0;
@@ -1011,7 +1015,7 @@
 	field = (struct kprobe_trace_entry_head *)iter->ent;
 	tp = container_of(event, struct trace_probe, call.event);
 
-	if (!trace_seq_printf(s, "%s: (", tp->call.name))
+	if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)))
 		goto partial;
 
 	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
@@ -1047,7 +1051,7 @@
 	field = (struct kretprobe_trace_entry_head *)iter->ent;
 	tp = container_of(event, struct trace_probe, call.event);
 
-	if (!trace_seq_printf(s, "%s: (", tp->call.name))
+	if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)))
 		goto partial;
 
 	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
@@ -1286,7 +1290,8 @@
 	call->data = tk;
 	ret = trace_add_event_call(call);
 	if (ret) {
-		pr_info("Failed to register kprobe event: %s\n", call->name);
+		pr_info("Failed to register kprobe event: %s\n",
+			ftrace_event_name(call));
 		kfree(call->print_fmt);
 		unregister_ftrace_event(&call->event);
 	}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index ca0e79e2..a436de1 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -431,7 +431,7 @@
 	}
 
 	trace_seq_init(p);
-	ret = trace_seq_printf(s, "%s: ", event->name);
+	ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event));
 	if (!ret)
 		return TRACE_TYPE_PARTIAL_LINE;
 
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index e447336..930e514 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -294,7 +294,7 @@
 	struct trace_uprobe *tu;
 
 	list_for_each_entry(tu, &uprobe_list, list)
-		if (strcmp(tu->tp.call.name, event) == 0 &&
+		if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 &&
 		    strcmp(tu->tp.call.class->system, group) == 0)
 			return tu;
 
@@ -324,7 +324,8 @@
 	mutex_lock(&uprobe_lock);
 
 	/* register as an event */
-	old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system);
+	old_tu = find_probe_event(ftrace_event_name(&tu->tp.call),
+			tu->tp.call.class->system);
 	if (old_tu) {
 		/* delete old event */
 		ret = unregister_trace_uprobe(old_tu);
@@ -599,7 +600,8 @@
 	char c = is_ret_probe(tu) ? 'r' : 'p';
 	int i;
 
-	seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name);
+	seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
+			ftrace_event_name(&tu->tp.call));
 	seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
 
 	for (i = 0; i < tu->tp.nr_args; i++)
@@ -649,7 +651,8 @@
 {
 	struct trace_uprobe *tu = v;
 
-	seq_printf(m, "  %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit);
+	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
+			ftrace_event_name(&tu->tp.call), tu->nhit);
 	return 0;
 }
 
@@ -844,12 +847,14 @@
 	tu = container_of(event, struct trace_uprobe, tp.call.event);
 
 	if (is_ret_probe(tu)) {
-		if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name,
+		if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
+					ftrace_event_name(&tu->tp.call),
 					entry->vaddr[1], entry->vaddr[0]))
 			goto partial;
 		data = DATAOF_TRACE_ENTRY(entry, true);
 	} else {
-		if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name,
+		if (!trace_seq_printf(s, "%s: (0x%lx)",
+					ftrace_event_name(&tu->tp.call),
 					entry->vaddr[0]))
 			goto partial;
 		data = DATAOF_TRACE_ENTRY(entry, false);
@@ -1275,7 +1280,8 @@
 	ret = trace_add_event_call(call);
 
 	if (ret) {
-		pr_info("Failed to register uprobe event: %s\n", call->name);
+		pr_info("Failed to register uprobe event: %s\n",
+			ftrace_event_name(call));
 		kfree(call->print_fmt);
 		unregister_ftrace_event(&call->event);
 	}
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index fb0a38a..ac5b23c 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 Mathieu Desnoyers
+ * Copyright (C) 2008-2014 Mathieu Desnoyers
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -33,39 +33,27 @@
 /* Set to 1 to enable tracepoint debug output */
 static const int tracepoint_debug;
 
-/*
- * Tracepoints mutex protects the builtin and module tracepoints and the hash
- * table, as well as the local module list.
- */
-static DEFINE_MUTEX(tracepoints_mutex);
-
 #ifdef CONFIG_MODULES
-/* Local list of struct module */
+/*
+ * Tracepoint module list mutex protects the local module list.
+ */
+static DEFINE_MUTEX(tracepoint_module_list_mutex);
+
+/* Local list of struct tp_module */
 static LIST_HEAD(tracepoint_module_list);
 #endif /* CONFIG_MODULES */
 
 /*
- * Tracepoint hash table, containing the active tracepoints.
- * Protected by tracepoints_mutex.
+ * tracepoints_mutex protects the builtin and module tracepoints.
+ * tracepoints_mutex nests inside tracepoint_module_list_mutex.
  */
-#define TRACEPOINT_HASH_BITS 6
-#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
-static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
+static DEFINE_MUTEX(tracepoints_mutex);
 
 /*
  * Note about RCU :
  * It is used to delay the free of multiple probes array until a quiescent
  * state is reached.
- * Tracepoint entries modifications are protected by the tracepoints_mutex.
  */
-struct tracepoint_entry {
-	struct hlist_node hlist;
-	struct tracepoint_func *funcs;
-	int refcount;	/* Number of times armed. 0 if disarmed. */
-	int enabled;	/* Tracepoint enabled */
-	char name[0];
-};
-
 struct tp_probes {
 	struct rcu_head rcu;
 	struct tracepoint_func probes[0];
@@ -92,34 +80,33 @@
 	}
 }
 
-static void debug_print_probes(struct tracepoint_entry *entry)
+static void debug_print_probes(struct tracepoint_func *funcs)
 {
 	int i;
 
-	if (!tracepoint_debug || !entry->funcs)
+	if (!tracepoint_debug || !funcs)
 		return;
 
-	for (i = 0; entry->funcs[i].func; i++)
-		printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func);
+	for (i = 0; funcs[i].func; i++)
+		printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
 }
 
-static struct tracepoint_func *
-tracepoint_entry_add_probe(struct tracepoint_entry *entry,
-			   void *probe, void *data)
+static struct tracepoint_func *func_add(struct tracepoint_func **funcs,
+		struct tracepoint_func *tp_func)
 {
 	int nr_probes = 0;
 	struct tracepoint_func *old, *new;
 
-	if (WARN_ON(!probe))
+	if (WARN_ON(!tp_func->func))
 		return ERR_PTR(-EINVAL);
 
-	debug_print_probes(entry);
-	old = entry->funcs;
+	debug_print_probes(*funcs);
+	old = *funcs;
 	if (old) {
 		/* (N -> N+1), (N != 0, 1) probes */
 		for (nr_probes = 0; old[nr_probes].func; nr_probes++)
-			if (old[nr_probes].func == probe &&
-			    old[nr_probes].data == data)
+			if (old[nr_probes].func == tp_func->func &&
+			    old[nr_probes].data == tp_func->data)
 				return ERR_PTR(-EEXIST);
 	}
 	/* + 2 : one for new probe, one for NULL func */
@@ -128,33 +115,30 @@
 		return ERR_PTR(-ENOMEM);
 	if (old)
 		memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
-	new[nr_probes].func = probe;
-	new[nr_probes].data = data;
+	new[nr_probes] = *tp_func;
 	new[nr_probes + 1].func = NULL;
-	entry->refcount = nr_probes + 1;
-	entry->funcs = new;
-	debug_print_probes(entry);
+	*funcs = new;
+	debug_print_probes(*funcs);
 	return old;
 }
 
-static void *
-tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
-			      void *probe, void *data)
+static void *func_remove(struct tracepoint_func **funcs,
+		struct tracepoint_func *tp_func)
 {
 	int nr_probes = 0, nr_del = 0, i;
 	struct tracepoint_func *old, *new;
 
-	old = entry->funcs;
+	old = *funcs;
 
 	if (!old)
 		return ERR_PTR(-ENOENT);
 
-	debug_print_probes(entry);
+	debug_print_probes(*funcs);
 	/* (N -> M), (N > 1, M >= 0) probes */
-	if (probe) {
+	if (tp_func->func) {
 		for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
-			if (old[nr_probes].func == probe &&
-			     old[nr_probes].data == data)
+			if (old[nr_probes].func == tp_func->func &&
+			     old[nr_probes].data == tp_func->data)
 				nr_del++;
 		}
 	}
@@ -165,9 +149,8 @@
 	 */
 	if (nr_probes - nr_del == 0) {
 		/* N -> 0, (N > 1) */
-		entry->funcs = NULL;
-		entry->refcount = 0;
-		debug_print_probes(entry);
+		*funcs = NULL;
+		debug_print_probes(*funcs);
 		return old;
 	} else {
 		int j = 0;
@@ -177,91 +160,35 @@
 		if (new == NULL)
 			return ERR_PTR(-ENOMEM);
 		for (i = 0; old[i].func; i++)
-			if (old[i].func != probe || old[i].data != data)
+			if (old[i].func != tp_func->func
+					|| old[i].data != tp_func->data)
 				new[j++] = old[i];
 		new[nr_probes - nr_del].func = NULL;
-		entry->refcount = nr_probes - nr_del;
-		entry->funcs = new;
+		*funcs = new;
 	}
-	debug_print_probes(entry);
+	debug_print_probes(*funcs);
 	return old;
 }
 
 /*
- * Get tracepoint if the tracepoint is present in the tracepoint hash table.
- * Must be called with tracepoints_mutex held.
- * Returns NULL if not present.
+ * Add the probe function to a tracepoint.
  */
-static struct tracepoint_entry *get_tracepoint(const char *name)
+static int tracepoint_add_func(struct tracepoint *tp,
+		struct tracepoint_func *func)
 {
-	struct hlist_head *head;
-	struct tracepoint_entry *e;
-	u32 hash = jhash(name, strlen(name), 0);
+	struct tracepoint_func *old, *tp_funcs;
 
-	head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
-	hlist_for_each_entry(e, head, hlist) {
-		if (!strcmp(name, e->name))
-			return e;
+	if (tp->regfunc && !static_key_enabled(&tp->key))
+		tp->regfunc();
+
+	tp_funcs = rcu_dereference_protected(tp->funcs,
+			lockdep_is_held(&tracepoints_mutex));
+	old = func_add(&tp_funcs, func);
+	if (IS_ERR(old)) {
+		WARN_ON_ONCE(1);
+		return PTR_ERR(old);
 	}
-	return NULL;
-}
-
-/*
- * Add the tracepoint to the tracepoint hash table. Must be called with
- * tracepoints_mutex held.
- */
-static struct tracepoint_entry *add_tracepoint(const char *name)
-{
-	struct hlist_head *head;
-	struct tracepoint_entry *e;
-	size_t name_len = strlen(name) + 1;
-	u32 hash = jhash(name, name_len-1, 0);
-
-	head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
-	hlist_for_each_entry(e, head, hlist) {
-		if (!strcmp(name, e->name)) {
-			printk(KERN_NOTICE
-				"tracepoint %s busy\n", name);
-			return ERR_PTR(-EEXIST);	/* Already there */
-		}
-	}
-	/*
-	 * Using kmalloc here to allocate a variable length element. Could
-	 * cause some memory fragmentation if overused.
-	 */
-	e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
-	if (!e)
-		return ERR_PTR(-ENOMEM);
-	memcpy(&e->name[0], name, name_len);
-	e->funcs = NULL;
-	e->refcount = 0;
-	e->enabled = 0;
-	hlist_add_head(&e->hlist, head);
-	return e;
-}
-
-/*
- * Remove the tracepoint from the tracepoint hash table. Must be called with
- * mutex_lock held.
- */
-static inline void remove_tracepoint(struct tracepoint_entry *e)
-{
-	hlist_del(&e->hlist);
-	kfree(e);
-}
-
-/*
- * Sets the probe callback corresponding to one tracepoint.
- */
-static void set_tracepoint(struct tracepoint_entry **entry,
-	struct tracepoint *elem, int active)
-{
-	WARN_ON(strcmp((*entry)->name, elem->name) != 0);
-
-	if (elem->regfunc && !static_key_enabled(&elem->key) && active)
-		elem->regfunc();
-	else if (elem->unregfunc && static_key_enabled(&elem->key) && !active)
-		elem->unregfunc();
+	release_probes(old);
 
 	/*
 	 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
@@ -270,193 +197,90 @@
 	 * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
 	 * is used.
 	 */
-	rcu_assign_pointer(elem->funcs, (*entry)->funcs);
-	if (active && !static_key_enabled(&elem->key))
-		static_key_slow_inc(&elem->key);
-	else if (!active && static_key_enabled(&elem->key))
-		static_key_slow_dec(&elem->key);
+	rcu_assign_pointer(tp->funcs, tp_funcs);
+	if (!static_key_enabled(&tp->key))
+		static_key_slow_inc(&tp->key);
+	return 0;
 }
 
 /*
- * Disable a tracepoint and its probe callback.
+ * Remove a probe function from a tracepoint.
  * Note: only waiting an RCU period after setting elem->call to the empty
  * function insures that the original callback is not used anymore. This insured
  * by preempt_disable around the call site.
  */
-static void disable_tracepoint(struct tracepoint *elem)
+static int tracepoint_remove_func(struct tracepoint *tp,
+		struct tracepoint_func *func)
 {
-	if (elem->unregfunc && static_key_enabled(&elem->key))
-		elem->unregfunc();
+	struct tracepoint_func *old, *tp_funcs;
 
-	if (static_key_enabled(&elem->key))
-		static_key_slow_dec(&elem->key);
-	rcu_assign_pointer(elem->funcs, NULL);
-}
-
-/**
- * tracepoint_update_probe_range - Update a probe range
- * @begin: beginning of the range
- * @end: end of the range
- *
- * Updates the probe callback corresponding to a range of tracepoints.
- * Called with tracepoints_mutex held.
- */
-static void tracepoint_update_probe_range(struct tracepoint * const *begin,
-					  struct tracepoint * const *end)
-{
-	struct tracepoint * const *iter;
-	struct tracepoint_entry *mark_entry;
-
-	if (!begin)
-		return;
-
-	for (iter = begin; iter < end; iter++) {
-		mark_entry = get_tracepoint((*iter)->name);
-		if (mark_entry) {
-			set_tracepoint(&mark_entry, *iter,
-					!!mark_entry->refcount);
-			mark_entry->enabled = !!mark_entry->refcount;
-		} else {
-			disable_tracepoint(*iter);
-		}
+	tp_funcs = rcu_dereference_protected(tp->funcs,
+			lockdep_is_held(&tracepoints_mutex));
+	old = func_remove(&tp_funcs, func);
+	if (IS_ERR(old)) {
+		WARN_ON_ONCE(1);
+		return PTR_ERR(old);
 	}
-}
+	release_probes(old);
 
-#ifdef CONFIG_MODULES
-void module_update_tracepoints(void)
-{
-	struct tp_module *tp_mod;
+	if (!tp_funcs) {
+		/* Removed last function */
+		if (tp->unregfunc && static_key_enabled(&tp->key))
+			tp->unregfunc();
 
-	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
-		tracepoint_update_probe_range(tp_mod->tracepoints_ptrs,
-			tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints);
-}
-#else /* CONFIG_MODULES */
-void module_update_tracepoints(void)
-{
-}
-#endif /* CONFIG_MODULES */
-
-
-/*
- * Update probes, removing the faulty probes.
- * Called with tracepoints_mutex held.
- */
-static void tracepoint_update_probes(void)
-{
-	/* Core kernel tracepoints */
-	tracepoint_update_probe_range(__start___tracepoints_ptrs,
-		__stop___tracepoints_ptrs);
-	/* tracepoints in modules. */
-	module_update_tracepoints();
-}
-
-static struct tracepoint_func *
-tracepoint_add_probe(const char *name, void *probe, void *data)
-{
-	struct tracepoint_entry *entry;
-	struct tracepoint_func *old;
-
-	entry = get_tracepoint(name);
-	if (!entry) {
-		entry = add_tracepoint(name);
-		if (IS_ERR(entry))
-			return (struct tracepoint_func *)entry;
+		if (static_key_enabled(&tp->key))
+			static_key_slow_dec(&tp->key);
 	}
-	old = tracepoint_entry_add_probe(entry, probe, data);
-	if (IS_ERR(old) && !entry->refcount)
-		remove_tracepoint(entry);
-	return old;
+	rcu_assign_pointer(tp->funcs, tp_funcs);
+	return 0;
 }
 
 /**
  * tracepoint_probe_register -  Connect a probe to a tracepoint
- * @name: tracepoint name
+ * @tp: tracepoint
  * @probe: probe handler
- * @data: probe private data
  *
- * Returns:
- * - 0 if the probe was successfully registered, and tracepoint
- *   callsites are currently loaded for that probe,
- * - -ENODEV if the probe was successfully registered, but no tracepoint
- *   callsite is currently loaded for that probe,
- * - other negative error value on error.
- *
- * When tracepoint_probe_register() returns either 0 or -ENODEV,
- * parameters @name, @probe, and @data may be used by the tracepoint
- * infrastructure until the probe is unregistered.
- *
- * The probe address must at least be aligned on the architecture pointer size.
+ * Returns 0 if ok, error value on error.
+ * Note: if @tp is within a module, the caller is responsible for
+ * unregistering the probe before the module is gone. This can be
+ * performed either with a tracepoint module going notifier, or from
+ * within module exit functions.
  */
-int tracepoint_probe_register(const char *name, void *probe, void *data)
+int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
 {
-	struct tracepoint_func *old;
-	struct tracepoint_entry *entry;
-	int ret = 0;
+	struct tracepoint_func tp_func;
+	int ret;
 
 	mutex_lock(&tracepoints_mutex);
-	old = tracepoint_add_probe(name, probe, data);
-	if (IS_ERR(old)) {
-		mutex_unlock(&tracepoints_mutex);
-		return PTR_ERR(old);
-	}
-	tracepoint_update_probes();		/* may update entry */
-	entry = get_tracepoint(name);
-	/* Make sure the entry was enabled */
-	if (!entry || !entry->enabled)
-		ret = -ENODEV;
+	tp_func.func = probe;
+	tp_func.data = data;
+	ret = tracepoint_add_func(tp, &tp_func);
 	mutex_unlock(&tracepoints_mutex);
-	release_probes(old);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(tracepoint_probe_register);
 
-static struct tracepoint_func *
-tracepoint_remove_probe(const char *name, void *probe, void *data)
-{
-	struct tracepoint_entry *entry;
-	struct tracepoint_func *old;
-
-	entry = get_tracepoint(name);
-	if (!entry)
-		return ERR_PTR(-ENOENT);
-	old = tracepoint_entry_remove_probe(entry, probe, data);
-	if (IS_ERR(old))
-		return old;
-	if (!entry->refcount)
-		remove_tracepoint(entry);
-	return old;
-}
-
 /**
  * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
- * @name: tracepoint name
+ * @tp: tracepoint
  * @probe: probe function pointer
- * @data: probe private data
  *
- * We do not need to call a synchronize_sched to make sure the probes have
- * finished running before doing a module unload, because the module unload
- * itself uses stop_machine(), which insures that every preempt disabled section
- * have finished.
+ * Returns 0 if ok, error value on error.
  */
-int tracepoint_probe_unregister(const char *name, void *probe, void *data)
+int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
 {
-	struct tracepoint_func *old;
+	struct tracepoint_func tp_func;
+	int ret;
 
 	mutex_lock(&tracepoints_mutex);
-	old = tracepoint_remove_probe(name, probe, data);
-	if (IS_ERR(old)) {
-		mutex_unlock(&tracepoints_mutex);
-		return PTR_ERR(old);
-	}
-	tracepoint_update_probes();		/* may update entry */
+	tp_func.func = probe;
+	tp_func.data = data;
+	ret = tracepoint_remove_func(tp, &tp_func);
 	mutex_unlock(&tracepoints_mutex);
-	release_probes(old);
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
 
-
 #ifdef CONFIG_MODULES
 bool trace_module_has_bad_taint(struct module *mod)
 {
@@ -464,6 +288,74 @@
 			       (1 << TAINT_UNSIGNED_MODULE));
 }
 
+static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
+
+/**
+ * register_tracepoint_notifier - register tracepoint coming/going notifier
+ * @nb: notifier block
+ *
+ * Notifiers registered with this function are called on module
+ * coming/going with the tracepoint_module_list_mutex held.
+ * The notifier block callback should expect a "struct tp_module" data
+ * pointer.
+ */
+int register_tracepoint_module_notifier(struct notifier_block *nb)
+{
+	struct tp_module *tp_mod;
+	int ret;
+
+	mutex_lock(&tracepoint_module_list_mutex);
+	ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
+	if (ret)
+		goto end;
+	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
+		(void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
+end:
+	mutex_unlock(&tracepoint_module_list_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
+
+/**
+ * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
+ * @nb: notifier block
+ *
+ * The notifier block callback should expect a "struct tp_module" data
+ * pointer.
+ */
+int unregister_tracepoint_module_notifier(struct notifier_block *nb)
+{
+	struct tp_module *tp_mod;
+	int ret;
+
+	mutex_lock(&tracepoint_module_list_mutex);
+	ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
+	if (ret)
+		goto end;
+	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
+		(void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
+end:
+	mutex_unlock(&tracepoint_module_list_mutex);
+	return ret;
+
+}
+EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
+
+/*
+ * Ensure the tracer unregistered the module's probes before the module
+ * teardown is performed. Prevents leaks of probe and data pointers.
+ */
+static void tp_module_going_check_quiescent(struct tracepoint * const *begin,
+		struct tracepoint * const *end)
+{
+	struct tracepoint * const *iter;
+
+	if (!begin)
+		return;
+	for (iter = begin; iter < end; iter++)
+		WARN_ON_ONCE((*iter)->funcs);
+}
+
 static int tracepoint_module_coming(struct module *mod)
 {
 	struct tp_module *tp_mod;
@@ -479,36 +371,41 @@
 	 */
 	if (trace_module_has_bad_taint(mod))
 		return 0;
-	mutex_lock(&tracepoints_mutex);
+	mutex_lock(&tracepoint_module_list_mutex);
 	tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
 	if (!tp_mod) {
 		ret = -ENOMEM;
 		goto end;
 	}
-	tp_mod->num_tracepoints = mod->num_tracepoints;
-	tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs;
+	tp_mod->mod = mod;
 	list_add_tail(&tp_mod->list, &tracepoint_module_list);
-	tracepoint_update_probe_range(mod->tracepoints_ptrs,
-		mod->tracepoints_ptrs + mod->num_tracepoints);
+	blocking_notifier_call_chain(&tracepoint_notify_list,
+			MODULE_STATE_COMING, tp_mod);
 end:
-	mutex_unlock(&tracepoints_mutex);
+	mutex_unlock(&tracepoint_module_list_mutex);
 	return ret;
 }
 
-static int tracepoint_module_going(struct module *mod)
+static void tracepoint_module_going(struct module *mod)
 {
-	struct tp_module *pos;
+	struct tp_module *tp_mod;
 
 	if (!mod->num_tracepoints)
-		return 0;
+		return;
 
-	mutex_lock(&tracepoints_mutex);
-	tracepoint_update_probe_range(mod->tracepoints_ptrs,
-		mod->tracepoints_ptrs + mod->num_tracepoints);
-	list_for_each_entry(pos, &tracepoint_module_list, list) {
-		if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) {
-			list_del(&pos->list);
-			kfree(pos);
+	mutex_lock(&tracepoint_module_list_mutex);
+	list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
+		if (tp_mod->mod == mod) {
+			blocking_notifier_call_chain(&tracepoint_notify_list,
+					MODULE_STATE_GOING, tp_mod);
+			list_del(&tp_mod->list);
+			kfree(tp_mod);
+			/*
+			 * Called the going notifier before checking for
+			 * quiescence.
+			 */
+			tp_module_going_check_quiescent(mod->tracepoints_ptrs,
+				mod->tracepoints_ptrs + mod->num_tracepoints);
 			break;
 		}
 	}
@@ -518,12 +415,11 @@
 	 * flag on "going", in case a module taints the kernel only after being
 	 * loaded.
 	 */
-	mutex_unlock(&tracepoints_mutex);
-	return 0;
+	mutex_unlock(&tracepoint_module_list_mutex);
 }
 
-int tracepoint_module_notify(struct notifier_block *self,
-			     unsigned long val, void *data)
+static int tracepoint_module_notify(struct notifier_block *self,
+		unsigned long val, void *data)
 {
 	struct module *mod = data;
 	int ret = 0;
@@ -535,24 +431,58 @@
 	case MODULE_STATE_LIVE:
 		break;
 	case MODULE_STATE_GOING:
-		ret = tracepoint_module_going(mod);
+		tracepoint_module_going(mod);
+		break;
+	case MODULE_STATE_UNFORMED:
 		break;
 	}
 	return ret;
 }
 
-struct notifier_block tracepoint_module_nb = {
+static struct notifier_block tracepoint_module_nb = {
 	.notifier_call = tracepoint_module_notify,
 	.priority = 0,
 };
 
-static int init_tracepoints(void)
+static __init int init_tracepoints(void)
 {
-	return register_module_notifier(&tracepoint_module_nb);
+	int ret;
+
+	ret = register_module_notifier(&tracepoint_module_nb);
+	if (ret)
+		pr_warning("Failed to register tracepoint module enter notifier\n");
+
+	return ret;
 }
 __initcall(init_tracepoints);
 #endif /* CONFIG_MODULES */
 
+static void for_each_tracepoint_range(struct tracepoint * const *begin,
+		struct tracepoint * const *end,
+		void (*fct)(struct tracepoint *tp, void *priv),
+		void *priv)
+{
+	struct tracepoint * const *iter;
+
+	if (!begin)
+		return;
+	for (iter = begin; iter < end; iter++)
+		fct(*iter, priv);
+}
+
+/**
+ * for_each_kernel_tracepoint - iteration on all kernel tracepoints
+ * @fct: callback
+ * @priv: private data
+ */
+void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
+		void *priv)
+{
+	for_each_tracepoint_range(__start___tracepoints_ptrs,
+		__stop___tracepoints_ptrs, fct, priv);
+}
+EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
+
 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
 
 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
diff --git a/lib/Kconfig b/lib/Kconfig
index 5d4984c..4771fb3 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -182,6 +182,15 @@
 	depends on AUDIT && !AUDIT_ARCH
 	default y
 
+config AUDIT_ARCH_COMPAT_GENERIC
+	bool
+	default n
+
+config AUDIT_COMPAT_GENERIC
+	bool
+	depends on AUDIT_GENERIC && AUDIT_ARCH_COMPAT_GENERIC && COMPAT
+	default y
+
 config RANDOM32_SELFTEST
 	bool "PRNG perform self test on init"
 	default n
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dd7f885..140b66a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1045,16 +1045,6 @@
 	  of the BUG call as well as the EIP and oops trace.  This aids
 	  debugging but costs about 70-100K of memory.
 
-config DEBUG_WRITECOUNT
-	bool "Debug filesystem writers count"
-	depends on DEBUG_KERNEL
-	help
-	  Enable this to catch wrong use of the writers count in struct
-	  vfsmount.  This will increase the size of each file struct by
-	  32 bits.
-
-	  If unsure, say N.
-
 config DEBUG_LIST
 	bool "Debug linked list manipulation"
 	depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 48140e3..0cd7b68 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -96,6 +96,7 @@
 obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
 obj-$(CONFIG_SMP) += percpu_counter.o
 obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
 
 obj-$(CONFIG_SWIOTLB) += swiotlb.o
 obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
diff --git a/lib/audit.c b/lib/audit.c
index 76bbed4..1d726a2 100644
--- a/lib/audit.c
+++ b/lib/audit.c
@@ -30,11 +30,17 @@
 
 int audit_classify_arch(int arch)
 {
-	return 0;
+	if (audit_is_compat(arch))
+		return 1;
+	else
+		return 0;
 }
 
 int audit_classify_syscall(int abi, unsigned syscall)
 {
+	if (audit_is_compat(abi))
+		return audit_classify_compat_syscall(abi, syscall);
+
 	switch(syscall) {
 #ifdef __NR_open
 	case __NR_open:
@@ -57,6 +63,13 @@
 
 static int __init audit_classes_init(void)
 {
+#ifdef CONFIG_AUDIT_COMPAT_GENERIC
+	audit_register_class(AUDIT_CLASS_WRITE_32, compat_write_class);
+	audit_register_class(AUDIT_CLASS_READ_32, compat_read_class);
+	audit_register_class(AUDIT_CLASS_DIR_WRITE_32, compat_dir_class);
+	audit_register_class(AUDIT_CLASS_CHATTR_32, compat_chattr_class);
+	audit_register_class(AUDIT_CLASS_SIGNAL_32, compat_signal_class);
+#endif
 	audit_register_class(AUDIT_CLASS_WRITE, write_class);
 	audit_register_class(AUDIT_CLASS_READ, read_class);
 	audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
diff --git a/lib/compat_audit.c b/lib/compat_audit.c
new file mode 100644
index 0000000..873f75b
--- /dev/null
+++ b/lib/compat_audit.c
@@ -0,0 +1,50 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <asm/unistd32.h>
+
+unsigned compat_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned compat_read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+unsigned compat_write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+unsigned compat_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+unsigned compat_signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int audit_classify_compat_syscall(int abi, unsigned syscall)
+{
+	switch (syscall) {
+#ifdef __NR_open
+	case __NR_open:
+		return 2;
+#endif
+#ifdef __NR_openat
+	case __NR_openat:
+		return 3;
+#endif
+#ifdef __NR_socketcall
+	case __NR_socketcall:
+		return 4;
+#endif
+	case __NR_execve:
+		return 5;
+	default:
+		return 1;
+	}
+}
diff --git a/mm/Makefile b/mm/Makefile
index 9e5aaf9..b484452 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -17,7 +17,8 @@
 			   util.o mmzone.o vmstat.o backing-dev.o \
 			   mm_init.o mmu_context.o percpu.o slab_common.o \
 			   compaction.o balloon_compaction.o vmacache.o \
-			   interval_tree.o list_lru.o workingset.o $(mmu-y)
+			   interval_tree.o list_lru.o workingset.o \
+			   iov_iter.o $(mmu-y)
 
 obj-y += init-mm.o
 
diff --git a/mm/filemap.c b/mm/filemap.c
index 27ebc0c..a82fbe4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -77,7 +77,7 @@
  *  ->mmap_sem
  *    ->lock_page		(access_process_vm)
  *
- *  ->i_mutex			(generic_file_buffered_write)
+ *  ->i_mutex			(generic_perform_write)
  *    ->mmap_sem		(fault_in_pages_readable->do_page_fault)
  *
  *  bdi->wb.list_lock
@@ -1428,7 +1428,8 @@
  * do_generic_file_read - generic file read routine
  * @filp:	the file to read
  * @ppos:	current file position
- * @desc:	read_descriptor
+ * @iter:	data destination
+ * @written:	already copied
  *
  * This is a generic file read routine, and uses the
  * mapping->a_ops->readpage() function for the actual low-level stuff.
@@ -1436,8 +1437,8 @@
  * This is really ugly. But the goto's actually try to clarify some
  * of the logic when it comes to error handling etc.
  */
-static void do_generic_file_read(struct file *filp, loff_t *ppos,
-		read_descriptor_t *desc)
+static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
+		struct iov_iter *iter, ssize_t written)
 {
 	struct address_space *mapping = filp->f_mapping;
 	struct inode *inode = mapping->host;
@@ -1447,12 +1448,12 @@
 	pgoff_t prev_index;
 	unsigned long offset;      /* offset into pagecache page */
 	unsigned int prev_offset;
-	int error;
+	int error = 0;
 
 	index = *ppos >> PAGE_CACHE_SHIFT;
 	prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
 	prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
-	last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
+	last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
 	offset = *ppos & ~PAGE_CACHE_MASK;
 
 	for (;;) {
@@ -1487,7 +1488,7 @@
 			if (!page->mapping)
 				goto page_not_up_to_date_locked;
 			if (!mapping->a_ops->is_partially_uptodate(page,
-								desc, offset))
+							offset, iter->count))
 				goto page_not_up_to_date_locked;
 			unlock_page(page);
 		}
@@ -1537,24 +1538,23 @@
 		/*
 		 * Ok, we have the page, and it's up-to-date, so
 		 * now we can copy it to user space...
-		 *
-		 * The file_read_actor routine returns how many bytes were
-		 * actually used..
-		 * NOTE! This may not be the same as how much of a user buffer
-		 * we filled up (we may be padding etc), so we can only update
-		 * "pos" here (the actor routine has to update the user buffer
-		 * pointers and the remaining count).
 		 */
-		ret = file_read_actor(desc, page, offset, nr);
+
+		ret = copy_page_to_iter(page, offset, nr, iter);
 		offset += ret;
 		index += offset >> PAGE_CACHE_SHIFT;
 		offset &= ~PAGE_CACHE_MASK;
 		prev_offset = offset;
 
 		page_cache_release(page);
-		if (ret == nr && desc->count)
-			continue;
-		goto out;
+		written += ret;
+		if (!iov_iter_count(iter))
+			goto out;
+		if (ret < nr) {
+			error = -EFAULT;
+			goto out;
+		}
+		continue;
 
 page_not_up_to_date:
 		/* Get exclusive access to the page ... */
@@ -1589,6 +1589,7 @@
 		if (unlikely(error)) {
 			if (error == AOP_TRUNCATED_PAGE) {
 				page_cache_release(page);
+				error = 0;
 				goto find_page;
 			}
 			goto readpage_error;
@@ -1619,7 +1620,6 @@
 
 readpage_error:
 		/* UHHUH! A synchronous read error occurred. Report it */
-		desc->error = error;
 		page_cache_release(page);
 		goto out;
 
@@ -1630,16 +1630,17 @@
 		 */
 		page = page_cache_alloc_cold(mapping);
 		if (!page) {
-			desc->error = -ENOMEM;
+			error = -ENOMEM;
 			goto out;
 		}
 		error = add_to_page_cache_lru(page, mapping,
 						index, GFP_KERNEL);
 		if (error) {
 			page_cache_release(page);
-			if (error == -EEXIST)
+			if (error == -EEXIST) {
+				error = 0;
 				goto find_page;
-			desc->error = error;
+			}
 			goto out;
 		}
 		goto readpage;
@@ -1652,44 +1653,7 @@
 
 	*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
 	file_accessed(filp);
-}
-
-int file_read_actor(read_descriptor_t *desc, struct page *page,
-			unsigned long offset, unsigned long size)
-{
-	char *kaddr;
-	unsigned long left, count = desc->count;
-
-	if (size > count)
-		size = count;
-
-	/*
-	 * Faults on the destination of a read are common, so do it before
-	 * taking the kmap.
-	 */
-	if (!fault_in_pages_writeable(desc->arg.buf, size)) {
-		kaddr = kmap_atomic(page);
-		left = __copy_to_user_inatomic(desc->arg.buf,
-						kaddr + offset, size);
-		kunmap_atomic(kaddr);
-		if (left == 0)
-			goto success;
-	}
-
-	/* Do it the slow way */
-	kaddr = kmap(page);
-	left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
-	kunmap(page);
-
-	if (left) {
-		size -= left;
-		desc->error = -EFAULT;
-	}
-success:
-	desc->count = count - size;
-	desc->written += size;
-	desc->arg.buf += size;
-	return size;
+	return written ? written : error;
 }
 
 /*
@@ -1747,14 +1711,15 @@
 {
 	struct file *filp = iocb->ki_filp;
 	ssize_t retval;
-	unsigned long seg = 0;
 	size_t count;
 	loff_t *ppos = &iocb->ki_pos;
+	struct iov_iter i;
 
 	count = 0;
 	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
 	if (retval)
 		return retval;
+	iov_iter_init(&i, iov, nr_segs, count, 0);
 
 	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
 	if (filp->f_flags & O_DIRECT) {
@@ -1776,6 +1741,11 @@
 		if (retval > 0) {
 			*ppos = pos + retval;
 			count -= retval;
+			/*
+			 * If we did a short DIO read we need to skip the
+			 * section of the iov that we've already read data into.
+			 */
+			iov_iter_advance(&i, retval);
 		}
 
 		/*
@@ -1792,39 +1762,7 @@
 		}
 	}
 
-	count = retval;
-	for (seg = 0; seg < nr_segs; seg++) {
-		read_descriptor_t desc;
-		loff_t offset = 0;
-
-		/*
-		 * If we did a short DIO read we need to skip the section of the
-		 * iov that we've already read data into.
-		 */
-		if (count) {
-			if (count > iov[seg].iov_len) {
-				count -= iov[seg].iov_len;
-				continue;
-			}
-			offset = count;
-			count = 0;
-		}
-
-		desc.written = 0;
-		desc.arg.buf = iov[seg].iov_base + offset;
-		desc.count = iov[seg].iov_len - offset;
-		if (desc.count == 0)
-			continue;
-		desc.error = 0;
-		do_generic_file_read(filp, ppos, &desc);
-		retval += desc.written;
-		if (desc.error) {
-			retval = retval ?: desc.error;
-			break;
-		}
-		if (desc.count > 0)
-			break;
-	}
+	retval = do_generic_file_read(filp, ppos, &i, retval);
 out:
 	return retval;
 }
@@ -2335,150 +2273,6 @@
 }
 EXPORT_SYMBOL(read_cache_page_gfp);
 
-static size_t __iovec_copy_from_user_inatomic(char *vaddr,
-			const struct iovec *iov, size_t base, size_t bytes)
-{
-	size_t copied = 0, left = 0;
-
-	while (bytes) {
-		char __user *buf = iov->iov_base + base;
-		int copy = min(bytes, iov->iov_len - base);
-
-		base = 0;
-		left = __copy_from_user_inatomic(vaddr, buf, copy);
-		copied += copy;
-		bytes -= copy;
-		vaddr += copy;
-		iov++;
-
-		if (unlikely(left))
-			break;
-	}
-	return copied - left;
-}
-
-/*
- * Copy as much as we can into the page and return the number of bytes which
- * were successfully copied.  If a fault is encountered then return the number of
- * bytes which were copied.
- */
-size_t iov_iter_copy_from_user_atomic(struct page *page,
-		struct iov_iter *i, unsigned long offset, size_t bytes)
-{
-	char *kaddr;
-	size_t copied;
-
-	BUG_ON(!in_atomic());
-	kaddr = kmap_atomic(page);
-	if (likely(i->nr_segs == 1)) {
-		int left;
-		char __user *buf = i->iov->iov_base + i->iov_offset;
-		left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
-		copied = bytes - left;
-	} else {
-		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
-						i->iov, i->iov_offset, bytes);
-	}
-	kunmap_atomic(kaddr);
-
-	return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
-
-/*
- * This has the same sideeffects and return value as
- * iov_iter_copy_from_user_atomic().
- * The difference is that it attempts to resolve faults.
- * Page must not be locked.
- */
-size_t iov_iter_copy_from_user(struct page *page,
-		struct iov_iter *i, unsigned long offset, size_t bytes)
-{
-	char *kaddr;
-	size_t copied;
-
-	kaddr = kmap(page);
-	if (likely(i->nr_segs == 1)) {
-		int left;
-		char __user *buf = i->iov->iov_base + i->iov_offset;
-		left = __copy_from_user(kaddr + offset, buf, bytes);
-		copied = bytes - left;
-	} else {
-		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
-						i->iov, i->iov_offset, bytes);
-	}
-	kunmap(page);
-	return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user);
-
-void iov_iter_advance(struct iov_iter *i, size_t bytes)
-{
-	BUG_ON(i->count < bytes);
-
-	if (likely(i->nr_segs == 1)) {
-		i->iov_offset += bytes;
-		i->count -= bytes;
-	} else {
-		const struct iovec *iov = i->iov;
-		size_t base = i->iov_offset;
-		unsigned long nr_segs = i->nr_segs;
-
-		/*
-		 * The !iov->iov_len check ensures we skip over unlikely
-		 * zero-length segments (without overruning the iovec).
-		 */
-		while (bytes || unlikely(i->count && !iov->iov_len)) {
-			int copy;
-
-			copy = min(bytes, iov->iov_len - base);
-			BUG_ON(!i->count || i->count < copy);
-			i->count -= copy;
-			bytes -= copy;
-			base += copy;
-			if (iov->iov_len == base) {
-				iov++;
-				nr_segs--;
-				base = 0;
-			}
-		}
-		i->iov = iov;
-		i->iov_offset = base;
-		i->nr_segs = nr_segs;
-	}
-}
-EXPORT_SYMBOL(iov_iter_advance);
-
-/*
- * Fault in the first iovec of the given iov_iter, to a maximum length
- * of bytes. Returns 0 on success, or non-zero if the memory could not be
- * accessed (ie. because it is an invalid address).
- *
- * writev-intensive code may want this to prefault several iovecs -- that
- * would be possible (callers must not rely on the fact that _only_ the
- * first iovec will be faulted with the current implementation).
- */
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
-{
-	char __user *buf = i->iov->iov_base + i->iov_offset;
-	bytes = min(bytes, i->iov->iov_len - i->iov_offset);
-	return fault_in_pages_readable(buf, bytes);
-}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
-
-/*
- * Return the count of just the current iov_iter segment.
- */
-size_t iov_iter_single_seg_count(const struct iov_iter *i)
-{
-	const struct iovec *iov = i->iov;
-	if (i->nr_segs == 1)
-		return i->count;
-	else
-		return min(i->count, iov->iov_len - i->iov_offset);
-}
-EXPORT_SYMBOL(iov_iter_single_seg_count);
-
 /*
  * Performs necessary checks before doing a write
  *
@@ -2585,7 +2379,7 @@
 
 ssize_t
 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
-		unsigned long *nr_segs, loff_t pos, loff_t *ppos,
+		unsigned long *nr_segs, loff_t pos,
 		size_t count, size_t ocount)
 {
 	struct file	*file = iocb->ki_filp;
@@ -2646,7 +2440,7 @@
 			i_size_write(inode, pos);
 			mark_inode_dirty(inode);
 		}
-		*ppos = pos;
+		iocb->ki_pos = pos;
 	}
 out:
 	return written;
@@ -2692,7 +2486,7 @@
 }
 EXPORT_SYMBOL(grab_cache_page_write_begin);
 
-static ssize_t generic_perform_write(struct file *file,
+ssize_t generic_perform_write(struct file *file,
 				struct iov_iter *i, loff_t pos)
 {
 	struct address_space *mapping = file->f_mapping;
@@ -2742,9 +2536,7 @@
 		if (mapping_writably_mapped(mapping))
 			flush_dcache_page(page);
 
-		pagefault_disable();
 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
-		pagefault_enable();
 		flush_dcache_page(page);
 
 		mark_page_accessed(page);
@@ -2782,27 +2574,7 @@
 
 	return written ? written : status;
 }
-
-ssize_t
-generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
-		unsigned long nr_segs, loff_t pos, loff_t *ppos,
-		size_t count, ssize_t written)
-{
-	struct file *file = iocb->ki_filp;
-	ssize_t status;
-	struct iov_iter i;
-
-	iov_iter_init(&i, iov, nr_segs, count, written);
-	status = generic_perform_write(file, &i, pos);
-
-	if (likely(status >= 0)) {
-		written += status;
-		*ppos = pos + status;
-  	}
-	
-	return written ? written : status;
-}
-EXPORT_SYMBOL(generic_file_buffered_write);
+EXPORT_SYMBOL(generic_perform_write);
 
 /**
  * __generic_file_aio_write - write data to a file
@@ -2824,16 +2596,18 @@
  * avoid syncing under i_mutex.
  */
 ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-				 unsigned long nr_segs, loff_t *ppos)
+				 unsigned long nr_segs)
 {
 	struct file *file = iocb->ki_filp;
 	struct address_space * mapping = file->f_mapping;
 	size_t ocount;		/* original count */
 	size_t count;		/* after file limit checks */
 	struct inode 	*inode = mapping->host;
-	loff_t		pos;
-	ssize_t		written;
+	loff_t		pos = iocb->ki_pos;
+	ssize_t		written = 0;
 	ssize_t		err;
+	ssize_t		status;
+	struct iov_iter from;
 
 	ocount = 0;
 	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
@@ -2841,12 +2615,9 @@
 		return err;
 
 	count = ocount;
-	pos = *ppos;
 
 	/* We can write back this queue in page reclaim */
 	current->backing_dev_info = mapping->backing_dev_info;
-	written = 0;
-
 	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
 	if (err)
 		goto out;
@@ -2862,45 +2633,47 @@
 	if (err)
 		goto out;
 
+	iov_iter_init(&from, iov, nr_segs, count, 0);
+
 	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
 	if (unlikely(file->f_flags & O_DIRECT)) {
 		loff_t endbyte;
-		ssize_t written_buffered;
 
-		written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
-							ppos, count, ocount);
+		written = generic_file_direct_write(iocb, iov, &from.nr_segs, pos,
+							count, ocount);
 		if (written < 0 || written == count)
 			goto out;
+		iov_iter_advance(&from, written);
+
 		/*
 		 * direct-io write to a hole: fall through to buffered I/O
 		 * for completing the rest of the request.
 		 */
 		pos += written;
 		count -= written;
-		written_buffered = generic_file_buffered_write(iocb, iov,
-						nr_segs, pos, ppos, count,
-						written);
+
+		status = generic_perform_write(file, &from, pos);
 		/*
-		 * If generic_file_buffered_write() retuned a synchronous error
+		 * If generic_perform_write() returned a synchronous error
 		 * then we want to return the number of bytes which were
 		 * direct-written, or the error code if that was zero.  Note
 		 * that this differs from normal direct-io semantics, which
 		 * will return -EFOO even if some bytes were written.
 		 */
-		if (written_buffered < 0) {
-			err = written_buffered;
+		if (unlikely(status < 0) && !written) {
+			err = status;
 			goto out;
 		}
-
+		iocb->ki_pos = pos + status;
 		/*
 		 * We need to ensure that the page cache pages are written to
 		 * disk and invalidated to preserve the expected O_DIRECT
 		 * semantics.
 		 */
-		endbyte = pos + written_buffered - written - 1;
+		endbyte = pos + status - 1;
 		err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
 		if (err == 0) {
-			written = written_buffered;
+			written += status;
 			invalidate_mapping_pages(mapping,
 						 pos >> PAGE_CACHE_SHIFT,
 						 endbyte >> PAGE_CACHE_SHIFT);
@@ -2911,8 +2684,9 @@
 			 */
 		}
 	} else {
-		written = generic_file_buffered_write(iocb, iov, nr_segs,
-				pos, ppos, count, written);
+		written = generic_perform_write(file, &from, pos);
+		if (likely(written >= 0))
+			iocb->ki_pos = pos + written;
 	}
 out:
 	current->backing_dev_info = NULL;
@@ -2941,7 +2715,7 @@
 	BUG_ON(iocb->ki_pos != pos);
 
 	mutex_lock(&inode->i_mutex);
-	ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+	ret = __generic_file_aio_write(iocb, iov, nr_segs);
 	mutex_unlock(&inode->i_mutex);
 
 	if (ret > 0) {
diff --git a/mm/iov_iter.c b/mm/iov_iter.c
new file mode 100644
index 0000000..10e46cd
--- /dev/null
+++ b/mm/iov_iter.c
@@ -0,0 +1,224 @@
+#include <linux/export.h>
+#include <linux/uio.h>
+#include <linux/pagemap.h>
+
+size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+			 struct iov_iter *i)
+{
+	size_t skip, copy, left, wanted;
+	const struct iovec *iov;
+	char __user *buf;
+	void *kaddr, *from;
+
+	if (unlikely(bytes > i->count))
+		bytes = i->count;
+
+	if (unlikely(!bytes))
+		return 0;
+
+	wanted = bytes;
+	iov = i->iov;
+	skip = i->iov_offset;
+	buf = iov->iov_base + skip;
+	copy = min(bytes, iov->iov_len - skip);
+
+	if (!fault_in_pages_writeable(buf, copy)) {
+		kaddr = kmap_atomic(page);
+		from = kaddr + offset;
+
+		/* first chunk, usually the only one */
+		left = __copy_to_user_inatomic(buf, from, copy);
+		copy -= left;
+		skip += copy;
+		from += copy;
+		bytes -= copy;
+
+		while (unlikely(!left && bytes)) {
+			iov++;
+			buf = iov->iov_base;
+			copy = min(bytes, iov->iov_len);
+			left = __copy_to_user_inatomic(buf, from, copy);
+			copy -= left;
+			skip = copy;
+			from += copy;
+			bytes -= copy;
+		}
+		if (likely(!bytes)) {
+			kunmap_atomic(kaddr);
+			goto done;
+		}
+		offset = from - kaddr;
+		buf += copy;
+		kunmap_atomic(kaddr);
+		copy = min(bytes, iov->iov_len - skip);
+	}
+	/* Too bad - revert to non-atomic kmap */
+	kaddr = kmap(page);
+	from = kaddr + offset;
+	left = __copy_to_user(buf, from, copy);
+	copy -= left;
+	skip += copy;
+	from += copy;
+	bytes -= copy;
+	while (unlikely(!left && bytes)) {
+		iov++;
+		buf = iov->iov_base;
+		copy = min(bytes, iov->iov_len);
+		left = __copy_to_user(buf, from, copy);
+		copy -= left;
+		skip = copy;
+		from += copy;
+		bytes -= copy;
+	}
+	kunmap(page);
+done:
+	i->count -= wanted - bytes;
+	i->nr_segs -= iov - i->iov;
+	i->iov = iov;
+	i->iov_offset = skip;
+	return wanted - bytes;
+}
+EXPORT_SYMBOL(copy_page_to_iter);
+
+static size_t __iovec_copy_from_user_inatomic(char *vaddr,
+			const struct iovec *iov, size_t base, size_t bytes)
+{
+	size_t copied = 0, left = 0;
+
+	while (bytes) {
+		char __user *buf = iov->iov_base + base;
+		int copy = min(bytes, iov->iov_len - base);
+
+		base = 0;
+		left = __copy_from_user_inatomic(vaddr, buf, copy);
+		copied += copy;
+		bytes -= copy;
+		vaddr += copy;
+		iov++;
+
+		if (unlikely(left))
+			break;
+	}
+	return copied - left;
+}
+
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were successfully copied.  If a fault is encountered then return the number of
+ * bytes which were copied.
+ */
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+		struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+	char *kaddr;
+	size_t copied;
+
+	kaddr = kmap_atomic(page);
+	if (likely(i->nr_segs == 1)) {
+		int left;
+		char __user *buf = i->iov->iov_base + i->iov_offset;
+		left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
+		copied = bytes - left;
+	} else {
+		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
+						i->iov, i->iov_offset, bytes);
+	}
+	kunmap_atomic(kaddr);
+
+	return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+
+/*
+ * This has the same sideeffects and return value as
+ * iov_iter_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+size_t iov_iter_copy_from_user(struct page *page,
+		struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+	char *kaddr;
+	size_t copied;
+
+	kaddr = kmap(page);
+	if (likely(i->nr_segs == 1)) {
+		int left;
+		char __user *buf = i->iov->iov_base + i->iov_offset;
+		left = __copy_from_user(kaddr + offset, buf, bytes);
+		copied = bytes - left;
+	} else {
+		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
+						i->iov, i->iov_offset, bytes);
+	}
+	kunmap(page);
+	return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user);
+
+void iov_iter_advance(struct iov_iter *i, size_t bytes)
+{
+	BUG_ON(i->count < bytes);
+
+	if (likely(i->nr_segs == 1)) {
+		i->iov_offset += bytes;
+		i->count -= bytes;
+	} else {
+		const struct iovec *iov = i->iov;
+		size_t base = i->iov_offset;
+		unsigned long nr_segs = i->nr_segs;
+
+		/*
+		 * The !iov->iov_len check ensures we skip over unlikely
+		 * zero-length segments (without overruning the iovec).
+		 */
+		while (bytes || unlikely(i->count && !iov->iov_len)) {
+			int copy;
+
+			copy = min(bytes, iov->iov_len - base);
+			BUG_ON(!i->count || i->count < copy);
+			i->count -= copy;
+			bytes -= copy;
+			base += copy;
+			if (iov->iov_len == base) {
+				iov++;
+				nr_segs--;
+				base = 0;
+			}
+		}
+		i->iov = iov;
+		i->iov_offset = base;
+		i->nr_segs = nr_segs;
+	}
+}
+EXPORT_SYMBOL(iov_iter_advance);
+
+/*
+ * Fault in the first iovec of the given iov_iter, to a maximum length
+ * of bytes. Returns 0 on success, or non-zero if the memory could not be
+ * accessed (ie. because it is an invalid address).
+ *
+ * writev-intensive code may want this to prefault several iovecs -- that
+ * would be possible (callers must not rely on the fact that _only_ the
+ * first iovec will be faulted with the current implementation).
+ */
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+	char __user *buf = i->iov->iov_base + i->iov_offset;
+	bytes = min(bytes, i->iov->iov_len - i->iov_offset);
+	return fault_in_pages_readable(buf, bytes);
+}
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+size_t iov_iter_single_seg_count(const struct iov_iter *i)
+{
+	const struct iovec *iov = i->iov;
+	if (i->nr_segs == 1)
+		return i->count;
+	else
+		return min(i->count, iov->iov_len - i->iov_offset);
+}
+EXPORT_SYMBOL(iov_iter_single_seg_count);
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index cb79065..8505c92 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -23,129 +23,44 @@
 
 /**
  * process_vm_rw_pages - read/write pages from task specified
- * @task: task to read/write from
- * @mm: mm for task
- * @process_pages: struct pages area that can store at least
- *  nr_pages_to_copy struct page pointers
- * @pa: address of page in task to start copying from/to
+ * @pages: array of pointers to pages we want to copy
  * @start_offset: offset in page to start copying from/to
  * @len: number of bytes to copy
- * @lvec: iovec array specifying where to copy to/from
- * @lvec_cnt: number of elements in iovec array
- * @lvec_current: index in iovec array we are up to
- * @lvec_offset: offset in bytes from current iovec iov_base we are up to
+ * @iter: where to copy to/from locally
  * @vm_write: 0 means copy from, 1 means copy to
- * @nr_pages_to_copy: number of pages to copy
- * @bytes_copied: returns number of bytes successfully copied
  * Returns 0 on success, error code otherwise
  */
-static int process_vm_rw_pages(struct task_struct *task,
-			       struct mm_struct *mm,
-			       struct page **process_pages,
-			       unsigned long pa,
-			       unsigned long start_offset,
-			       unsigned long len,
-			       const struct iovec *lvec,
-			       unsigned long lvec_cnt,
-			       unsigned long *lvec_current,
-			       size_t *lvec_offset,
-			       int vm_write,
-			       unsigned int nr_pages_to_copy,
-			       ssize_t *bytes_copied)
+static int process_vm_rw_pages(struct page **pages,
+			       unsigned offset,
+			       size_t len,
+			       struct iov_iter *iter,
+			       int vm_write)
 {
-	int pages_pinned;
-	void *target_kaddr;
-	int pgs_copied = 0;
-	int j;
-	int ret;
-	ssize_t bytes_to_copy;
-	ssize_t rc = 0;
-
-	*bytes_copied = 0;
-
-	/* Get the pages we're interested in */
-	down_read(&mm->mmap_sem);
-	pages_pinned = get_user_pages(task, mm, pa,
-				      nr_pages_to_copy,
-				      vm_write, 0, process_pages, NULL);
-	up_read(&mm->mmap_sem);
-
-	if (pages_pinned != nr_pages_to_copy) {
-		rc = -EFAULT;
-		goto end;
-	}
-
 	/* Do the copy for each page */
-	for (pgs_copied = 0;
-	     (pgs_copied < nr_pages_to_copy) && (*lvec_current < lvec_cnt);
-	     pgs_copied++) {
-		/* Make sure we have a non zero length iovec */
-		while (*lvec_current < lvec_cnt
-		       && lvec[*lvec_current].iov_len == 0)
-			(*lvec_current)++;
-		if (*lvec_current == lvec_cnt)
-			break;
+	while (len && iov_iter_count(iter)) {
+		struct page *page = *pages++;
+		size_t copy = PAGE_SIZE - offset;
+		size_t copied;
 
-		/*
-		 * Will copy smallest of:
-		 * - bytes remaining in page
-		 * - bytes remaining in destination iovec
-		 */
-		bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset,
-				      len - *bytes_copied);
-		bytes_to_copy = min_t(ssize_t, bytes_to_copy,
-				      lvec[*lvec_current].iov_len
-				      - *lvec_offset);
+		if (copy > len)
+			copy = len;
 
-		target_kaddr = kmap(process_pages[pgs_copied]) + start_offset;
-
-		if (vm_write)
-			ret = copy_from_user(target_kaddr,
-					     lvec[*lvec_current].iov_base
-					     + *lvec_offset,
-					     bytes_to_copy);
-		else
-			ret = copy_to_user(lvec[*lvec_current].iov_base
-					   + *lvec_offset,
-					   target_kaddr, bytes_to_copy);
-		kunmap(process_pages[pgs_copied]);
-		if (ret) {
-			*bytes_copied += bytes_to_copy - ret;
-			pgs_copied++;
-			rc = -EFAULT;
-			goto end;
-		}
-		*bytes_copied += bytes_to_copy;
-		*lvec_offset += bytes_to_copy;
-		if (*lvec_offset == lvec[*lvec_current].iov_len) {
-			/*
-			 * Need to copy remaining part of page into the
-			 * next iovec if there are any bytes left in page
-			 */
-			(*lvec_current)++;
-			*lvec_offset = 0;
-			start_offset = (start_offset + bytes_to_copy)
-				% PAGE_SIZE;
-			if (start_offset)
-				pgs_copied--;
+		if (vm_write) {
+			if (copy > iov_iter_count(iter))
+				copy = iov_iter_count(iter);
+			copied = iov_iter_copy_from_user(page, iter,
+					offset, copy);
+			iov_iter_advance(iter, copied);
+			set_page_dirty_lock(page);
 		} else {
-			start_offset = 0;
+			copied = copy_page_to_iter(page, offset, copy, iter);
 		}
+		len -= copied;
+		if (copied < copy && iov_iter_count(iter))
+			return -EFAULT;
+		offset = 0;
 	}
-
-end:
-	if (vm_write) {
-		for (j = 0; j < pages_pinned; j++) {
-			if (j < pgs_copied)
-				set_page_dirty_lock(process_pages[j]);
-			put_page(process_pages[j]);
-		}
-	} else {
-		for (j = 0; j < pages_pinned; j++)
-			put_page(process_pages[j]);
-	}
-
-	return rc;
+	return 0;
 }
 
 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
@@ -155,67 +70,60 @@
  * process_vm_rw_single_vec - read/write pages from task specified
  * @addr: start memory address of target process
  * @len: size of area to copy to/from
- * @lvec: iovec array specifying where to copy to/from locally
- * @lvec_cnt: number of elements in iovec array
- * @lvec_current: index in iovec array we are up to
- * @lvec_offset: offset in bytes from current iovec iov_base we are up to
+ * @iter: where to copy to/from locally
  * @process_pages: struct pages area that can store at least
  *  nr_pages_to_copy struct page pointers
  * @mm: mm for task
  * @task: task to read/write from
  * @vm_write: 0 means copy from, 1 means copy to
- * @bytes_copied: returns number of bytes successfully copied
  * Returns 0 on success or on failure error code
  */
 static int process_vm_rw_single_vec(unsigned long addr,
 				    unsigned long len,
-				    const struct iovec *lvec,
-				    unsigned long lvec_cnt,
-				    unsigned long *lvec_current,
-				    size_t *lvec_offset,
+				    struct iov_iter *iter,
 				    struct page **process_pages,
 				    struct mm_struct *mm,
 				    struct task_struct *task,
-				    int vm_write,
-				    ssize_t *bytes_copied)
+				    int vm_write)
 {
 	unsigned long pa = addr & PAGE_MASK;
 	unsigned long start_offset = addr - pa;
 	unsigned long nr_pages;
-	ssize_t bytes_copied_loop;
 	ssize_t rc = 0;
-	unsigned long nr_pages_copied = 0;
-	unsigned long nr_pages_to_copy;
 	unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
 		/ sizeof(struct pages *);
 
-	*bytes_copied = 0;
-
 	/* Work out address and page range required */
 	if (len == 0)
 		return 0;
 	nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
 
-	while ((nr_pages_copied < nr_pages) && (*lvec_current < lvec_cnt)) {
-		nr_pages_to_copy = min(nr_pages - nr_pages_copied,
-				       max_pages_per_loop);
+	while (!rc && nr_pages && iov_iter_count(iter)) {
+		int pages = min(nr_pages, max_pages_per_loop);
+		size_t bytes;
 
-		rc = process_vm_rw_pages(task, mm, process_pages, pa,
-					 start_offset, len,
-					 lvec, lvec_cnt,
-					 lvec_current, lvec_offset,
-					 vm_write, nr_pages_to_copy,
-					 &bytes_copied_loop);
+		/* Get the pages we're interested in */
+		down_read(&mm->mmap_sem);
+		pages = get_user_pages(task, mm, pa, pages,
+				      vm_write, 0, process_pages, NULL);
+		up_read(&mm->mmap_sem);
+
+		if (pages <= 0)
+			return -EFAULT;
+
+		bytes = pages * PAGE_SIZE - start_offset;
+		if (bytes > len)
+			bytes = len;
+
+		rc = process_vm_rw_pages(process_pages,
+					 start_offset, bytes, iter,
+					 vm_write);
+		len -= bytes;
 		start_offset = 0;
-		*bytes_copied += bytes_copied_loop;
-
-		if (rc < 0) {
-			return rc;
-		} else {
-			len -= bytes_copied_loop;
-			nr_pages_copied += nr_pages_to_copy;
-			pa += nr_pages_to_copy * PAGE_SIZE;
-		}
+		nr_pages -= pages;
+		pa += pages * PAGE_SIZE;
+		while (pages)
+			put_page(process_pages[--pages]);
 	}
 
 	return rc;
@@ -228,8 +136,7 @@
 /**
  * process_vm_rw_core - core of reading/writing pages from task specified
  * @pid: PID of process to read/write from/to
- * @lvec: iovec array specifying where to copy to/from locally
- * @liovcnt: size of lvec array
+ * @iter: where to copy to/from locally
  * @rvec: iovec array specifying where to copy to/from in the other process
  * @riovcnt: size of rvec array
  * @flags: currently unused
@@ -238,8 +145,7 @@
  *  return less bytes than expected if an error occurs during the copying
  *  process.
  */
-static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
-				  unsigned long liovcnt,
+static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
 				  const struct iovec *rvec,
 				  unsigned long riovcnt,
 				  unsigned long flags, int vm_write)
@@ -250,13 +156,10 @@
 	struct mm_struct *mm;
 	unsigned long i;
 	ssize_t rc = 0;
-	ssize_t bytes_copied_loop;
-	ssize_t bytes_copied = 0;
 	unsigned long nr_pages = 0;
 	unsigned long nr_pages_iov;
-	unsigned long iov_l_curr_idx = 0;
-	size_t iov_l_curr_offset = 0;
 	ssize_t iov_len;
+	size_t total_len = iov_iter_count(iter);
 
 	/*
 	 * Work out how many pages of struct pages we're going to need
@@ -310,24 +213,20 @@
 		goto put_task_struct;
 	}
 
-	for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
+	for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
 		rc = process_vm_rw_single_vec(
 			(unsigned long)rvec[i].iov_base, rvec[i].iov_len,
-			lvec, liovcnt, &iov_l_curr_idx, &iov_l_curr_offset,
-			process_pages, mm, task, vm_write, &bytes_copied_loop);
-		bytes_copied += bytes_copied_loop;
-		if (rc != 0) {
-			/* If we have managed to copy any data at all then
-			   we return the number of bytes copied. Otherwise
-			   we return the error code */
-			if (bytes_copied)
-				rc = bytes_copied;
-			goto put_mm;
-		}
-	}
+			iter, process_pages, mm, task, vm_write);
 
-	rc = bytes_copied;
-put_mm:
+	/* copied = space before - space after */
+	total_len -= iov_iter_count(iter);
+
+	/* If we have managed to copy any data at all then
+	   we return the number of bytes copied. Otherwise
+	   we return the error code */
+	if (total_len)
+		rc = total_len;
+
 	mmput(mm);
 
 put_task_struct:
@@ -363,6 +262,7 @@
 	struct iovec iovstack_r[UIO_FASTIOV];
 	struct iovec *iov_l = iovstack_l;
 	struct iovec *iov_r = iovstack_r;
+	struct iov_iter iter;
 	ssize_t rc;
 
 	if (flags != 0)
@@ -378,13 +278,14 @@
 	if (rc <= 0)
 		goto free_iovecs;
 
+	iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
+
 	rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
 				   iovstack_r, &iov_r);
 	if (rc <= 0)
 		goto free_iovecs;
 
-	rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
-				vm_write);
+	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
 
 free_iovecs:
 	if (iov_r != iovstack_r)
@@ -424,6 +325,7 @@
 	struct iovec iovstack_r[UIO_FASTIOV];
 	struct iovec *iov_l = iovstack_l;
 	struct iovec *iov_r = iovstack_r;
+	struct iov_iter iter;
 	ssize_t rc = -EFAULT;
 
 	if (flags != 0)
@@ -439,14 +341,14 @@
 						  &iov_l);
 	if (rc <= 0)
 		goto free_iovecs;
+	iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
 	rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
 					  UIO_FASTIOV, iovstack_r,
 					  &iov_r);
 	if (rc <= 0)
 		goto free_iovecs;
 
-	rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
-			   vm_write);
+	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
 
 free_iovecs:
 	if (iov_r != iovstack_r)
diff --git a/mm/shmem.c b/mm/shmem.c
index 70273f8..8f1a954 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1402,13 +1402,25 @@
 	return copied;
 }
 
-static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
+static ssize_t shmem_file_aio_read(struct kiocb *iocb,
+		const struct iovec *iov, unsigned long nr_segs, loff_t pos)
 {
-	struct inode *inode = file_inode(filp);
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file_inode(file);
 	struct address_space *mapping = inode->i_mapping;
 	pgoff_t index;
 	unsigned long offset;
 	enum sgp_type sgp = SGP_READ;
+	int error;
+	ssize_t retval;
+	size_t count;
+	loff_t *ppos = &iocb->ki_pos;
+	struct iov_iter iter;
+
+	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
+	if (retval)
+		return retval;
+	iov_iter_init(&iter, iov, nr_segs, count, 0);
 
 	/*
 	 * Might this read be for a stacking filesystem?  Then when reading
@@ -1436,10 +1448,10 @@
 				break;
 		}
 
-		desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
-		if (desc->error) {
-			if (desc->error == -EINVAL)
-				desc->error = 0;
+		error = shmem_getpage(inode, index, &page, sgp, NULL);
+		if (error) {
+			if (error == -EINVAL)
+				error = 0;
 			break;
 		}
 		if (page)
@@ -1483,61 +1495,26 @@
 		/*
 		 * Ok, we have the page, and it's up-to-date, so
 		 * now we can copy it to user space...
-		 *
-		 * The actor routine returns how many bytes were actually used..
-		 * NOTE! This may not be the same as how much of a user buffer
-		 * we filled up (we may be padding etc), so we can only update
-		 * "pos" here (the actor routine has to update the user buffer
-		 * pointers and the remaining count).
 		 */
-		ret = actor(desc, page, offset, nr);
+		ret = copy_page_to_iter(page, offset, nr, &iter);
+		retval += ret;
 		offset += ret;
 		index += offset >> PAGE_CACHE_SHIFT;
 		offset &= ~PAGE_CACHE_MASK;
 
 		page_cache_release(page);
-		if (ret != nr || !desc->count)
+		if (!iov_iter_count(&iter))
 			break;
-
+		if (ret < nr) {
+			error = -EFAULT;
+			break;
+		}
 		cond_resched();
 	}
 
 	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
-	file_accessed(filp);
-}
-
-static ssize_t shmem_file_aio_read(struct kiocb *iocb,
-		const struct iovec *iov, unsigned long nr_segs, loff_t pos)
-{
-	struct file *filp = iocb->ki_filp;
-	ssize_t retval;
-	unsigned long seg;
-	size_t count;
-	loff_t *ppos = &iocb->ki_pos;
-
-	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
-	if (retval)
-		return retval;
-
-	for (seg = 0; seg < nr_segs; seg++) {
-		read_descriptor_t desc;
-
-		desc.written = 0;
-		desc.arg.buf = iov[seg].iov_base;
-		desc.count = iov[seg].iov_len;
-		if (desc.count == 0)
-			continue;
-		desc.error = 0;
-		do_shmem_file_read(filp, ppos, &desc, file_read_actor);
-		retval += desc.written;
-		if (desc.error) {
-			retval = retval ?: desc.error;
-			break;
-		}
-		if (desc.count > 0)
-			break;
-	}
-	return retval;
+	file_accessed(file);
+	return retval ? retval : error;
 }
 
 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
@@ -1576,7 +1553,7 @@
 	index = *ppos >> PAGE_CACHE_SHIFT;
 	loff = *ppos & ~PAGE_CACHE_MASK;
 	req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	nr_pages = min(req_pages, pipe->buffers);
+	nr_pages = min(req_pages, spd.nr_pages_max);
 
 	spd.nr_pages = find_get_pages_contig(mapping, index,
 						nr_pages, spd.pages);
diff --git a/mm/util.c b/mm/util.c
index d7813e6..f380af7 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -446,6 +446,54 @@
 	return allowed;
 }
 
+/**
+ * get_cmdline() - copy the cmdline value to a buffer.
+ * @task:     the task whose cmdline value to copy.
+ * @buffer:   the buffer to copy to.
+ * @buflen:   the length of the buffer. Larger cmdline values are truncated
+ *            to this length.
+ * Returns the size of the cmdline field copied. Note that the copy does
+ * not guarantee an ending NULL byte.
+ */
+int get_cmdline(struct task_struct *task, char *buffer, int buflen)
+{
+	int res = 0;
+	unsigned int len;
+	struct mm_struct *mm = get_task_mm(task);
+	if (!mm)
+		goto out;
+	if (!mm->arg_end)
+		goto out_mm;	/* Shh! No looking before we're done */
+
+	len = mm->arg_end - mm->arg_start;
+
+	if (len > buflen)
+		len = buflen;
+
+	res = access_process_vm(task, mm->arg_start, buffer, len, 0);
+
+	/*
+	 * If the nul at the end of args has been overwritten, then
+	 * assume application is using setproctitle(3).
+	 */
+	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
+		len = strnlen(buffer, res);
+		if (len < res) {
+			res = len;
+		} else {
+			len = mm->env_end - mm->env_start;
+			if (len > buflen - res)
+				len = buflen - res;
+			res += access_process_vm(task, mm->env_start,
+						 buffer+res, len, 0);
+			res = strnlen(buffer, res);
+		}
+	}
+out_mm:
+	mmput(mm);
+out:
+	return res;
+}
 
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
diff --git a/net/9p/client.c b/net/9p/client.c
index 9186550..0004cba 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -415,9 +415,17 @@
  * req: request received
  *
  */
-void p9_client_cb(struct p9_client *c, struct p9_req_t *req)
+void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status)
 {
 	p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc->tag);
+
+	/*
+	 * This barrier is needed to make sure any change made to req before
+	 * the other thread wakes up will indeed be seen by the waiting side.
+	 */
+	smp_wmb();
+	req->status = status;
+
 	wake_up(req->wq);
 	p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
 }
@@ -655,16 +663,13 @@
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-
 	/*
 	 * if we haven't received a response for oldreq,
 	 * remove it from the list
 	 */
-	if (oldreq->status == REQ_STATUS_FLSH) {
-		spin_lock(&c->lock);
-		list_del(&oldreq->req_list);
-		spin_unlock(&c->lock);
-	}
+	if (oldreq->status == REQ_STATUS_SENT)
+		if (c->trans_mod->cancelled)
+			c->trans_mod->cancelled(c, oldreq);
 
 	p9_free_req(c, req);
 	return 0;
@@ -751,6 +756,12 @@
 	err = wait_event_interruptible(*req->wq,
 				       req->status >= REQ_STATUS_RCVD);
 
+	/*
+	 * Make sure our req is coherent with regard to updates in other
+	 * threads - echoes to wmb() in the callback
+	 */
+	smp_rmb();
+
 	if ((err == -ERESTARTSYS) && (c->status == Connected)
 				  && (type == P9_TFLUSH)) {
 		sigpending = 1;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index b7bd7f2..80d08f6 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -66,20 +66,6 @@
 	int privport;
 };
 
-/**
- * struct p9_trans_fd - transport state
- * @rd: reference to file to read from
- * @wr: reference of file to write to
- * @conn: connection state reference
- *
- */
-
-struct p9_trans_fd {
-	struct file *rd;
-	struct file *wr;
-	struct p9_conn *conn;
-};
-
 /*
   * Option Parsing (code inspired by NFS code)
   *  - a little lazy - parse all fd-transport options
@@ -159,6 +145,20 @@
 	unsigned long wsched;
 };
 
+/**
+ * struct p9_trans_fd - transport state
+ * @rd: reference to file to read from
+ * @wr: reference of file to write to
+ * @conn: connection state reference
+ *
+ */
+
+struct p9_trans_fd {
+	struct file *rd;
+	struct file *wr;
+	struct p9_conn conn;
+};
+
 static void p9_poll_workfn(struct work_struct *work);
 
 static DEFINE_SPINLOCK(p9_poll_lock);
@@ -212,15 +212,9 @@
 	m->err = err;
 
 	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
-		req->status = REQ_STATUS_ERROR;
-		if (!req->t_err)
-			req->t_err = err;
 		list_move(&req->req_list, &cancel_list);
 	}
 	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
-		req->status = REQ_STATUS_ERROR;
-		if (!req->t_err)
-			req->t_err = err;
 		list_move(&req->req_list, &cancel_list);
 	}
 	spin_unlock_irqrestore(&m->client->lock, flags);
@@ -228,7 +222,9 @@
 	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
 		p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
 		list_del(&req->req_list);
-		p9_client_cb(m->client, req);
+		if (!req->t_err)
+			req->t_err = err;
+		p9_client_cb(m->client, req, REQ_STATUS_ERROR);
 	}
 }
 
@@ -302,6 +298,7 @@
 {
 	int n, err;
 	struct p9_conn *m;
+	int status = REQ_STATUS_ERROR;
 
 	m = container_of(work, struct p9_conn, rq);
 
@@ -348,8 +345,7 @@
 			 "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
 
 		m->req = p9_tag_lookup(m->client, tag);
-		if (!m->req || (m->req->status != REQ_STATUS_SENT &&
-					m->req->status != REQ_STATUS_FLSH)) {
+		if (!m->req || (m->req->status != REQ_STATUS_SENT)) {
 			p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
 				 tag);
 			err = -EIO;
@@ -375,10 +371,10 @@
 		p9_debug(P9_DEBUG_TRANS, "got new packet\n");
 		spin_lock(&m->client->lock);
 		if (m->req->status != REQ_STATUS_ERROR)
-			m->req->status = REQ_STATUS_RCVD;
+			status = REQ_STATUS_RCVD;
 		list_del(&m->req->req_list);
 		spin_unlock(&m->client->lock);
-		p9_client_cb(m->client, m->req);
+		p9_client_cb(m->client, m->req, status);
 		m->rbuf = NULL;
 		m->rpos = 0;
 		m->rsize = 0;
@@ -573,21 +569,19 @@
 }
 
 /**
- * p9_conn_create - allocate and initialize the per-session mux data
+ * p9_conn_create - initialize the per-session mux data
  * @client: client instance
  *
  * Note: Creates the polling task if this is the first session.
  */
 
-static struct p9_conn *p9_conn_create(struct p9_client *client)
+static void p9_conn_create(struct p9_client *client)
 {
 	int n;
-	struct p9_conn *m;
+	struct p9_trans_fd *ts = client->trans;
+	struct p9_conn *m = &ts->conn;
 
 	p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
-	m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
-	if (!m)
-		return ERR_PTR(-ENOMEM);
 
 	INIT_LIST_HEAD(&m->mux_list);
 	m->client = client;
@@ -609,8 +603,6 @@
 		p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
 		set_bit(Wpending, &m->wsched);
 	}
-
-	return m;
 }
 
 /**
@@ -669,7 +661,7 @@
 {
 	int n;
 	struct p9_trans_fd *ts = client->trans;
-	struct p9_conn *m = ts->conn;
+	struct p9_conn *m = &ts->conn;
 
 	p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
 		 m, current, req->tc, req->tc->id);
@@ -704,14 +696,26 @@
 		list_del(&req->req_list);
 		req->status = REQ_STATUS_FLSHD;
 		ret = 0;
-	} else if (req->status == REQ_STATUS_SENT)
-		req->status = REQ_STATUS_FLSH;
-
+	}
 	spin_unlock(&client->lock);
 
 	return ret;
 }
 
+static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
+{
+	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
+
+	/* we haven't received a response for oldreq,
+	 * remove it from the list.
+	 */
+	spin_lock(&client->lock);
+	list_del(&req->req_list);
+	spin_unlock(&client->lock);
+
+	return 0;
+}
+
 /**
  * parse_opts - parse mount options into p9_fd_opts structure
  * @params: options string passed from mount
@@ -780,7 +784,7 @@
 
 static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
 {
-	struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
+	struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd),
 					   GFP_KERNEL);
 	if (!ts)
 		return -ENOMEM;
@@ -806,9 +810,8 @@
 {
 	struct p9_trans_fd *p;
 	struct file *file;
-	int ret;
 
-	p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
+	p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
 	if (!p)
 		return -ENOMEM;
 
@@ -829,20 +832,12 @@
 
 	p->rd->f_flags |= O_NONBLOCK;
 
-	p->conn = p9_conn_create(client);
-	if (IS_ERR(p->conn)) {
-		ret = PTR_ERR(p->conn);
-		p->conn = NULL;
-		kfree(p);
-		sockfd_put(csocket);
-		sockfd_put(csocket);
-		return ret;
-	}
+	p9_conn_create(client);
 	return 0;
 }
 
 /**
- * p9_mux_destroy - cancels all pending requests and frees mux resources
+ * p9_mux_destroy - cancels all pending requests of mux
  * @m: mux to destroy
  *
  */
@@ -859,7 +854,6 @@
 	p9_conn_cancel(m, -ECONNRESET);
 
 	m->client = NULL;
-	kfree(m);
 }
 
 /**
@@ -881,7 +875,7 @@
 
 	client->status = Disconnected;
 
-	p9_conn_destroy(ts->conn);
+	p9_conn_destroy(&ts->conn);
 
 	if (ts->rd)
 		fput(ts->rd);
@@ -1033,14 +1027,7 @@
 		return err;
 
 	p = (struct p9_trans_fd *) client->trans;
-	p->conn = p9_conn_create(client);
-	if (IS_ERR(p->conn)) {
-		err = PTR_ERR(p->conn);
-		p->conn = NULL;
-		fput(p->rd);
-		fput(p->wr);
-		return err;
-	}
+	p9_conn_create(client);
 
 	return 0;
 }
@@ -1053,6 +1040,7 @@
 	.close = p9_fd_close,
 	.request = p9_fd_request,
 	.cancel = p9_fd_cancel,
+	.cancelled = p9_fd_cancelled,
 	.owner = THIS_MODULE,
 };
 
@@ -1064,6 +1052,7 @@
 	.close = p9_fd_close,
 	.request = p9_fd_request,
 	.cancel = p9_fd_cancel,
+	.cancelled = p9_fd_cancelled,
 	.owner = THIS_MODULE,
 };
 
@@ -1075,6 +1064,7 @@
 	.close = p9_fd_close,
 	.request = p9_fd_request,
 	.cancel = p9_fd_cancel,
+	.cancelled = p9_fd_cancelled,
 	.owner = THIS_MODULE,
 };
 
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 8f68df5..14ad43b 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -193,6 +193,8 @@
 		if (!*p)
 			continue;
 		token = match_token(p, tokens, args);
+		if (token == Opt_err)
+			continue;
 		r = match_int(&args[0], &option);
 		if (r < 0) {
 			p9_debug(P9_DEBUG_ERROR,
@@ -305,8 +307,7 @@
 	}
 
 	req->rc = c->rc;
-	req->status = REQ_STATUS_RCVD;
-	p9_client_cb(client, req);
+	p9_client_cb(client, req, REQ_STATUS_RCVD);
 
 	return;
 
@@ -511,6 +512,11 @@
 		goto send_error;
 	}
 
+	/* Mark request as `sent' *before* we actually send it,
+	 * because doing if after could erase the REQ_STATUS_RCVD
+	 * status in case of a very fast reply.
+	 */
+	req->status = REQ_STATUS_SENT;
 	err = ib_post_send(rdma->qp, &wr, &bad_wr);
 	if (err)
 		goto send_error;
@@ -520,6 +526,7 @@
 
  /* Handle errors that happened during or while preparing the send: */
  send_error:
+	req->status = REQ_STATUS_ERROR;
 	kfree(c);
 	p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
 
@@ -582,12 +589,24 @@
 	return rdma;
 }
 
-/* its not clear to me we can do anything after send has been posted */
 static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
 {
+	/* Nothing to do here.
+	 * We will take care of it (if we have to) in rdma_cancelled()
+	 */
 	return 1;
 }
 
+/* A request has been fully flushed without a reply.
+ * That means we have posted one buffer in excess.
+ */
+static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
+{
+	struct p9_trans_rdma *rdma = client->trans;
+	atomic_inc(&rdma->excess_rc);
+	return 0;
+}
+
 /**
  * trans_create_rdma - Transport method for creating atransport instance
  * @client: client instance
@@ -721,6 +740,7 @@
 	.close = rdma_close,
 	.request = rdma_request,
 	.cancel = rdma_cancel,
+	.cancelled = rdma_cancelled,
 };
 
 /**
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index ac2666c..6940d8f 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -164,8 +164,7 @@
 		p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc);
 		p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
 		req = p9_tag_lookup(chan->client, rc->tag);
-		req->status = REQ_STATUS_RCVD;
-		p9_client_cb(chan->client, req);
+		p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
 	}
 }
 
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index babd862..6b540f1 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -139,7 +139,7 @@
 	int error;
 	int size;
 
-	if (!inode->i_op || !inode->i_op->getxattr)
+	if (!inode->i_op->getxattr)
 		return -EOPNOTSUPP;
 	desc = init_desc(type);
 	if (IS_ERR(desc))
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 996092f..6e0bd93 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -64,7 +64,7 @@
 	int error;
 	int count = 0;
 
-	if (!inode->i_op || !inode->i_op->getxattr)
+	if (!inode->i_op->getxattr)
 		return -EOPNOTSUPP;
 
 	for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) {
diff --git a/security/integrity/integrity_audit.c b/security/integrity/integrity_audit.c
index aab9fa5..90987d1 100644
--- a/security/integrity/integrity_audit.c
+++ b/security/integrity/integrity_audit.c
@@ -40,7 +40,7 @@
 
 	ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno);
 	audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
-			 current->pid,
+			 task_pid_nr(current),
 			 from_kuid(&init_user_ns, current_cred()->uid),
 			 from_kuid(&init_user_ns, audit_get_loginuid(current)),
 			 audit_get_sessionid(current));
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 9a62045..69fdf3b 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -220,7 +220,7 @@
 	 */
 	BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
 
-	audit_log_format(ab, " pid=%d comm=", tsk->pid);
+	audit_log_format(ab, " pid=%d comm=", task_pid_nr(tsk));
 	audit_log_untrustedstring(ab, tsk->comm);
 
 	switch (a->type) {
@@ -278,9 +278,12 @@
 	}
 	case LSM_AUDIT_DATA_TASK:
 		tsk = a->u.tsk;
-		if (tsk && tsk->pid) {
-			audit_log_format(ab, " pid=%d comm=", tsk->pid);
-			audit_log_untrustedstring(ab, tsk->comm);
+		if (tsk) {
+			pid_t pid = task_pid_nr(tsk);
+			if (pid) {
+				audit_log_format(ab, " pid=%d comm=", pid);
+				audit_log_untrustedstring(ab, tsk->comm);
+			}
 		}
 		break;
 	case LSM_AUDIT_DATA_NET:
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 80a09c3..a3386d1 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -173,7 +173,7 @@
 		 * Use filesystem name if filesystem does not support rename()
 		 * operation.
 		 */
-		if (inode->i_op && !inode->i_op->rename)
+		if (!inode->i_op->rename)
 			goto prepend_filesystem_name;
 	}
 	/* Prepend device name. */
@@ -282,7 +282,7 @@
 		 * Get local name for filesystems without rename() operation
 		 * or dentry without vfsmount.
 		 */
-		if (!path->mnt || (inode->i_op && !inode->i_op->rename))
+		if (!path->mnt || !inode->i_op->rename)
 			pos = tomoyo_get_local_path(path->dentry, buf,
 						    buf_len - 1);
 		/* Get absolute name for the rest. */
diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c
index 6c2dc38..7e21621 100644
--- a/sound/i2c/cs8427.c
+++ b/sound/i2c/cs8427.c
@@ -150,10 +150,8 @@
 	kfree(device->private_data);
 }
 
-int snd_cs8427_create(struct snd_i2c_bus *bus,
-		      unsigned char addr,
-		      unsigned int reset_timeout,
-		      struct snd_i2c_device **r_cs8427)
+int snd_cs8427_init(struct snd_i2c_bus *bus,
+		    struct snd_i2c_device *device)
 {
 	static unsigned char initvals1[] = {
 	  CS8427_REG_CONTROL1 | CS8427_REG_AUTOINC,
@@ -200,22 +198,10 @@
 	     Inhibit E->F transfers. */
 	  CS8427_UD | CS8427_EFTUI | CS8427_DETUI,
 	};
+	struct cs8427 *chip = device->private_data;
 	int err;
-	struct cs8427 *chip;
-	struct snd_i2c_device *device;
 	unsigned char buf[24];
 
-	if ((err = snd_i2c_device_create(bus, "CS8427",
-					 CS8427_ADDR | (addr & 7),
-					 &device)) < 0)
-		return err;
-	chip = device->private_data = kzalloc(sizeof(*chip), GFP_KERNEL);
-	if (chip == NULL) {
-	      	snd_i2c_device_free(device);
-		return -ENOMEM;
-	}
-	device->private_free = snd_cs8427_free;
-	
 	snd_i2c_lock(bus);
 	err = snd_cs8427_reg_read(device, CS8427_REG_ID_AND_VER);
 	if (err != CS8427_VER8427A) {
@@ -264,10 +250,44 @@
 	snd_i2c_unlock(bus);
 
 	/* turn on run bit and rock'n'roll */
+	snd_cs8427_reset(device);
+
+	return 0;
+
+__fail:
+	snd_i2c_unlock(bus);
+
+	return err;
+}
+EXPORT_SYMBOL(snd_cs8427_init);
+
+int snd_cs8427_create(struct snd_i2c_bus *bus,
+		      unsigned char addr,
+		      unsigned int reset_timeout,
+		      struct snd_i2c_device **r_cs8427)
+{
+	int err;
+	struct cs8427 *chip;
+	struct snd_i2c_device *device;
+
+	err = snd_i2c_device_create(bus, "CS8427", CS8427_ADDR | (addr & 7),
+				    &device);
+	if (err < 0)
+		return err;
+	chip = device->private_data = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (chip == NULL) {
+		snd_i2c_device_free(device);
+		return -ENOMEM;
+	}
+	device->private_free = snd_cs8427_free;
+
 	if (reset_timeout < 1)
 		reset_timeout = 1;
 	chip->reset_timeout = reset_timeout;
-	snd_cs8427_reset(device);
+
+	err = snd_cs8427_init(bus, device);
+	if (err)
+		goto __fail;
 
 #if 0	// it's nice for read tests
 	{
@@ -286,7 +306,6 @@
 	return 0;
 
       __fail:
-      	snd_i2c_unlock(bus);
       	snd_i2c_device_free(device);
       	return err < 0 ? err : -EIO;
 }
diff --git a/sound/mips/au1x00.c b/sound/mips/au1x00.c
index a7cc49e..d10ef76 100644
--- a/sound/mips/au1x00.c
+++ b/sound/mips/au1x00.c
@@ -725,15 +725,4 @@
 	.remove		= au1000_ac97_remove,
 };
 
-static int __init au1000_ac97_load(void)
-{
-	return platform_driver_register(&au1000_ac97c_driver);
-}
-
-static void __exit au1000_ac97_unload(void)
-{
-	platform_driver_unregister(&au1000_ac97c_driver);
-}
-
-module_init(au1000_ac97_load);
-module_exit(au1000_ac97_unload);
+module_platform_driver(au1000_ac97c_driver);
diff --git a/sound/oss/ad1848.c b/sound/oss/ad1848.c
index 4918b71..ec1ee07 100644
--- a/sound/oss/ad1848.c
+++ b/sound/oss/ad1848.c
@@ -50,8 +50,6 @@
 #include <linux/pnp.h>
 #include <linux/spinlock.h>
 
-#define DEB(x)
-#define DEB1(x)
 #include "sound_config.h"
 
 #include "ad1848.h"
@@ -1016,8 +1014,6 @@
 	ad1848_info    *devc = (ad1848_info *) audio_devs[dev]->devc;
 	ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc;
 
-	DEB(printk("ad1848_close(void)\n"));
-
 	devc->intr_active = 0;
 	ad1848_halt(dev);
 
diff --git a/sound/oss/dmasound/dmasound_paula.c b/sound/oss/dmasound/dmasound_paula.c
index 87910e9..c2d45a5 100644
--- a/sound/oss/dmasound/dmasound_paula.c
+++ b/sound/oss/dmasound/dmasound_paula.c
@@ -733,19 +733,7 @@
 	},
 };
 
-static int __init amiga_audio_init(void)
-{
-	return platform_driver_probe(&amiga_audio_driver, amiga_audio_probe);
-}
-
-module_init(amiga_audio_init);
-
-static void __exit amiga_audio_exit(void)
-{
-	platform_driver_unregister(&amiga_audio_driver);
-}
-
-module_exit(amiga_audio_exit);
+module_platform_driver_probe(amiga_audio_driver, amiga_audio_probe);
 
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:amiga-audio");
diff --git a/sound/oss/opl3.c b/sound/oss/opl3.c
index c5c2440..4709e59 100644
--- a/sound/oss/opl3.c
+++ b/sound/oss/opl3.c
@@ -275,7 +275,6 @@
 	 devc->v_alloc->map[voice] = 0;
 
 	 map = &pv_map[devc->lv_map[voice]];
-	 DEB(printk("Kill note %d\n", voice));
 
 	 if (map->voice_mode == 0)
 		 return 0;
@@ -873,8 +872,6 @@
 
 	map = &pv_map[devc->lv_map[voice]];
 
-	DEB(printk("Aftertouch %d\n", voice));
-
 	if (map->voice_mode == 0)
 		return;
 
diff --git a/sound/oss/pas2_mixer.c b/sound/oss/pas2_mixer.c
index a0bcb85..50b5bd5 100644
--- a/sound/oss/pas2_mixer.c
+++ b/sound/oss/pas2_mixer.c
@@ -21,10 +21,6 @@
 
 #include "pas2.h"
 
-#ifndef DEB
-#define DEB(what)		/* (what) */
-#endif
-
 extern int      pas_translate_code;
 extern char     pas_model;
 extern int     *pas_osp;
@@ -120,8 +116,6 @@
 {
 	int             left, right, devmask, changed, i, mixer = 0;
 
-	DEB(printk("static int pas_mixer_set(int whichDev = %d, unsigned int level = %X)\n", whichDev, level));
-
 	left = level & 0x7f;
 	right = (level & 0x7f00) >> 8;
 
@@ -207,8 +201,6 @@
 {
 	int             foo;
 
-	DEB(printk("pas2_mixer.c: void pas_mixer_reset(void)\n"));
-
 	for (foo = 0; foo < SOUND_MIXER_NRDEVICES; foo++)
 		pas_mixer_set(foo, levels[foo]);
 
@@ -220,7 +212,6 @@
 	int level,v ;
 	int __user *p = (int __user *)arg;
 
-	DEB(printk("pas2_mixer.c: int pas_mixer_ioctl(unsigned int cmd = %X, unsigned int arg = %X)\n", cmd, arg));
 	if (cmd == SOUND_MIXER_PRIVATE1) { /* Set loudness bit */
 		if (get_user(level, p))
 			return -EFAULT;
diff --git a/sound/oss/pas2_pcm.c b/sound/oss/pas2_pcm.c
index 6f13ab4..474803b 100644
--- a/sound/oss/pas2_pcm.c
+++ b/sound/oss/pas2_pcm.c
@@ -22,10 +22,6 @@
 
 #include "pas2.h"
 
-#ifndef DEB
-#define DEB(WHAT)
-#endif
-
 #define PAS_PCM_INTRBITS (0x08)
 /*
  * Sample buffer timer interrupt enable
@@ -156,8 +152,6 @@
 	int val, ret;
 	int __user *p = arg;
 
-	DEB(printk("pas2_pcm.c: static int pas_audio_ioctl(unsigned int cmd = %X, unsigned int arg = %X)\n", cmd, arg));
-
 	switch (cmd) 
 	{
 	case SOUND_PCM_WRITE_RATE:
@@ -204,8 +198,6 @@
 
 static void pas_audio_reset(int dev)
 {
-	DEB(printk("pas2_pcm.c: static void pas_audio_reset(void)\n"));
-
 	pas_write(pas_read(0xF8A) & ~0x40, 0xF8A);	/* Disable PCM */
 }
 
@@ -214,8 +206,6 @@
 	int             err;
 	unsigned long   flags;
 
-	DEB(printk("pas2_pcm.c: static int pas_audio_open(int mode = %X)\n", mode));
-
 	spin_lock_irqsave(&pas_lock, flags);
 	if (pcm_busy)
 	{
@@ -239,8 +229,6 @@
 {
 	unsigned long   flags;
 
-	DEB(printk("pas2_pcm.c: static void pas_audio_close(void)\n"));
-
 	spin_lock_irqsave(&pas_lock, flags);
 
 	pas_audio_reset(dev);
@@ -256,8 +244,6 @@
 {
 	unsigned long   flags, cnt;
 
-	DEB(printk("pas2_pcm.c: static void pas_audio_output_block(char *buf = %P, int count = %X)\n", buf, count));
-
 	cnt = count;
 	if (audio_devs[dev]->dmap_out->dma > 3)
 		cnt >>= 1;
@@ -303,8 +289,6 @@
 	unsigned long   flags;
 	int             cnt;
 
-	DEB(printk("pas2_pcm.c: static void pas_audio_start_input(char *buf = %P, int count = %X)\n", buf, count));
-
 	cnt = count;
 	if (audio_devs[dev]->dmap_out->dma > 3)
 		cnt >>= 1;
@@ -388,8 +372,6 @@
 
 void __init pas_pcm_init(struct address_info *hw_config)
 {
-	DEB(printk("pas2_pcm.c: long pas_pcm_init()\n"));
-
 	pcm_bitsok = 8;
 	if (pas_read(0xEF8B) & 0x08)
 		pcm_bitsok |= 16;
diff --git a/sound/oss/sb_common.c b/sound/oss/sb_common.c
index 851a1da..3d50fb4 100644
--- a/sound/oss/sb_common.c
+++ b/sound/oss/sb_common.c
@@ -226,8 +226,6 @@
 {
 	int loopc;
 
-	DEB(printk("Entered sb_dsp_reset()\n"));
-
 	if (devc->model == MDL_ESS) return ess_dsp_reset (devc);
 
 	/* This is only for non-ESS chips */
@@ -246,8 +244,6 @@
 		return 0;	/* Sorry */
 	}
 
-	DEB(printk("sb_dsp_reset() OK\n"));
-
 	return 1;
 }
 
diff --git a/sound/oss/sb_ess.c b/sound/oss/sb_ess.c
index 0e7254b..b47a690 100644
--- a/sound/oss/sb_ess.c
+++ b/sound/oss/sb_ess.c
@@ -865,8 +865,6 @@
 ess_show_mixerregs (devc);
 #endif
 
-	DEB(printk("Entered ess_dsp_reset()\n"));
-
 	outb(3, DSP_RESET); /* Reset FIFO too */
 
 	udelay(10);
@@ -881,8 +879,6 @@
 	}
 	ess_extended (devc);
 
-	DEB(printk("sb_dsp_reset() OK\n"));
-
 #ifdef FKS_LOGGING
 printk(KERN_INFO "FKS: dsp_reset 2\n");
 ess_show_mixerregs (devc);
diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c
index 9b9f7d3..c0eea1d 100644
--- a/sound/oss/sequencer.c
+++ b/sound/oss/sequencer.c
@@ -216,8 +216,6 @@
 
 	dev = dev >> 4;
 
-	DEB(printk("sequencer_write(dev=%d, count=%d)\n", dev, count));
-
 	if (mode == OPEN_READ)
 		return -EIO;
 
@@ -959,8 +957,6 @@
 	dev = dev >> 4;
 	mode = translate_mode(file);
 
-	DEB(printk("sequencer_open(dev=%d)\n", dev));
-
 	if (!sequencer_ok)
 	{
 /*		printk("Sound card: sequencer not initialized\n");*/
@@ -1133,8 +1129,6 @@
 
 	dev = dev >> 4;
 
-	DEB(printk("sequencer_release(dev=%d)\n", dev));
-
 	/*
 	 * Wait until the queue is empty (if we don't have nonblock)
 	 */
diff --git a/sound/oss/sound_config.h b/sound/oss/sound_config.h
index 9d35c4c..f2554ab 100644
--- a/sound/oss/sound_config.h
+++ b/sound/oss/sound_config.h
@@ -123,10 +123,6 @@
 #include "sound_calls.h"
 #include "dev_table.h"
 
-#ifndef DEB
-#define DEB(x)
-#endif
-
 #ifndef DDB
 #define DDB(x) do {} while (0)
 #endif
diff --git a/sound/oss/soundcard.c b/sound/oss/soundcard.c
index e778034..b70c7c8 100644
--- a/sound/oss/soundcard.c
+++ b/sound/oss/soundcard.c
@@ -154,7 +154,6 @@
 	 
 	mutex_lock(&soundcard_mutex);
 	
-	DEB(printk("sound_read(dev=%d, count=%d)\n", dev, count));
 	switch (dev & 0x0f) {
 	case SND_DEV_DSP:
 	case SND_DEV_DSP16:
@@ -180,7 +179,6 @@
 	int ret = -EINVAL;
 	
 	mutex_lock(&soundcard_mutex);
-	DEB(printk("sound_write(dev=%d, count=%d)\n", dev, count));
 	switch (dev & 0x0f) {
 	case SND_DEV_SEQ:
 	case SND_DEV_SEQ2:
@@ -206,7 +204,6 @@
 	int dev = iminor(inode);
 	int retval;
 
-	DEB(printk("sound_open(dev=%d)\n", dev));
 	if ((dev >= SND_NDEVS) || (dev < 0)) {
 		printk(KERN_ERR "Invalid minor device %d\n", dev);
 		return -ENXIO;
@@ -257,7 +254,6 @@
 	int dev = iminor(inode);
 
 	mutex_lock(&soundcard_mutex);
-	DEB(printk("sound_release(dev=%d)\n", dev));
 	switch (dev & 0x0f) {
 	case SND_DEV_CTL:
 		module_put(mixer_devs[dev >> 4]->owner);
@@ -351,7 +347,6 @@
 			if (!access_ok(VERIFY_WRITE, p, len))
 				return -EFAULT;
 	}
-	DEB(printk("sound_ioctl(dev=%d, cmd=0x%x, arg=0x%x)\n", dev, cmd, arg));
 	if (cmd == OSS_GETVERSION)
 		return __put_user(SOUND_VERSION, (int __user *)p);
 	
@@ -409,7 +404,6 @@
 	struct inode *inode = file_inode(file);
 	int dev = iminor(inode);
 
-	DEB(printk("sound_poll(dev=%d)\n", dev));
 	switch (dev & 0x0f) {
 	case SND_DEV_SEQ:
 	case SND_DEV_SEQ2:
diff --git a/sound/oss/uart401.c b/sound/oss/uart401.c
index 5433c6f..62b8869 100644
--- a/sound/oss/uart401.c
+++ b/sound/oss/uart401.c
@@ -274,19 +274,12 @@
 		}
 	}
 
-
+	/* Flush input before enabling interrupts */
 	if (ok)
-	{
-		DEB(printk("Reset UART401 OK\n"));
-	}
+		uart401_input_loop(devc);
 	else
 		DDB(printk("Reset UART401 failed - No hardware detected.\n"));
 
-	if (ok)
-		uart401_input_loop(devc);	/*
-						 * Flush input before enabling interrupts
-						 */
-
 	return ok;
 }
 
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 97993e1..248b90a 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -187,13 +187,14 @@
 		struct azx_dev *azx_dev = &chip->azx_dev[dev];
 		dsp_lock(azx_dev);
 		if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
-			res = azx_dev;
-			if (res->assigned_key == key) {
-				res->opened = 1;
-				res->assigned_key = key;
+			if (azx_dev->assigned_key == key) {
+				azx_dev->opened = 1;
+				azx_dev->assigned_key = key;
 				dsp_unlock(azx_dev);
 				return azx_dev;
 			}
+			if (!res)
+				res = azx_dev;
 		}
 		dsp_unlock(azx_dev);
 	}
@@ -1604,7 +1605,7 @@
 }
 
 /* reset codec link */
-static int azx_reset(struct azx *chip, int full_reset)
+static int azx_reset(struct azx *chip, bool full_reset)
 {
 	if (!full_reset)
 		goto __skip;
@@ -1701,7 +1702,7 @@
 /*
  * reset and start the controller registers
  */
-void azx_init_chip(struct azx *chip, int full_reset)
+void azx_init_chip(struct azx *chip, bool full_reset)
 {
 	if (chip->initialized)
 		return;
@@ -1758,7 +1759,7 @@
 
 #ifdef CONFIG_PM_RUNTIME
 	if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
-		if (chip->card->dev->power.runtime_status != RPM_ACTIVE)
+		if (!pm_runtime_active(chip->card->dev))
 			return IRQ_NONE;
 #endif
 
@@ -1841,7 +1842,7 @@
 
 	bus->in_reset = 1;
 	azx_stop_chip(chip);
-	azx_init_chip(chip, 1);
+	azx_init_chip(chip, true);
 #ifdef CONFIG_PM
 	if (chip->initialized) {
 		struct azx_pcm *p;
@@ -1948,7 +1949,7 @@
 				 * get back to the sanity state.
 				 */
 				azx_stop_chip(chip);
-				azx_init_chip(chip, 1);
+				azx_init_chip(chip, true);
 			}
 		}
 	}
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index 1d2e3be..baf0e77 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -37,7 +37,7 @@
 void azx_free_stream_pages(struct azx *chip);
 
 /* Low level azx interface */
-void azx_init_chip(struct azx *chip, int full_reset);
+void azx_init_chip(struct azx *chip, bool full_reset);
 void azx_stop_chip(struct azx *chip);
 void azx_enter_link_reset(struct azx *chip);
 irqreturn_t azx_interrupt(int irq, void *dev_id);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 77ca894..d6bca62 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -636,7 +636,7 @@
 		return -EIO;
 	azx_init_pci(chip);
 
-	azx_init_chip(chip, 1);
+	azx_init_chip(chip, true);
 
 	snd_hda_resume(chip->bus);
 	snd_power_change_state(card, SNDRV_CTL_POWER_D0);
@@ -689,7 +689,7 @@
 	status = azx_readw(chip, STATESTS);
 
 	azx_init_pci(chip);
-	azx_init_chip(chip, 1);
+	azx_init_chip(chip, true);
 
 	bus = chip->bus;
 	if (status && bus) {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ea2351d..14ae979 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3026,6 +3026,11 @@
 	bool hp_pin_sense;
 	int val;
 
+	if (!spec->gen.autocfg.hp_outs) {
+		if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
+			hp_pin = spec->gen.autocfg.line_out_pins[0];
+	}
+
 	alc283_restore_default_value(codec);
 
 	if (!hp_pin)
@@ -3062,6 +3067,11 @@
 	bool hp_pin_sense;
 	int val;
 
+	if (!spec->gen.autocfg.hp_outs) {
+		if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
+			hp_pin = spec->gen.autocfg.line_out_pins[0];
+	}
+
 	if (!hp_pin) {
 		alc269_shutup(codec);
 		return;
@@ -3085,6 +3095,7 @@
 
 	if (hp_pin_sense)
 		msleep(100);
+	alc_auto_setup_eapd(codec, false);
 	snd_hda_shutup_pins(codec);
 	alc_write_coef_idx(codec, 0x43, 0x9614);
 }
@@ -3361,8 +3372,9 @@
 
 	if (spec->mute_led_polarity)
 		enabled = !enabled;
-	pinval = AC_PINCTL_IN_EN |
-		(enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80);
+	pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid);
+	pinval &= ~AC_PINCTL_VREFEN;
+	pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80;
 	if (spec->mute_led_nid)
 		snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval);
 }
@@ -3994,6 +4006,10 @@
 		spec->gen.mixer_nid = 0;
 		break;
 	case HDA_FIXUP_ACT_INIT:
+		/* MIC2-VREF control */
+		/* Set to manual mode */
+		val = alc_read_coef_idx(codec, 0x06);
+		alc_write_coef_idx(codec, 0x06, val & ~0x000c);
 		/* Enable Line1 input control by verb */
 		val = alc_read_coef_idx(codec, 0x1a);
 		alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
@@ -4602,6 +4618,7 @@
 	SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -4768,7 +4785,7 @@
 	{.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
 	{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
 	{.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
-	{.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-chrome"},
+	{.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
 	{.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
 	{}
 };
diff --git a/sound/pci/ice1712/delta.c b/sound/pci/ice1712/delta.c
index ed2144e..496dbd0 100644
--- a/sound/pci/ice1712/delta.c
+++ b/sound/pci/ice1712/delta.c
@@ -579,12 +579,37 @@
 #ifdef CONFIG_PM_SLEEP
 static int snd_ice1712_delta_resume(struct snd_ice1712 *ice)
 {
-	unsigned char akm_backup[AK4XXX_IMAGE_SIZE];
+	unsigned char akm_img_bak[AK4XXX_IMAGE_SIZE];
+	unsigned char akm_vol_bak[AK4XXX_IMAGE_SIZE];
+
+	/* init spdif */
+	switch (ice->eeprom.subvendor) {
+	case ICE1712_SUBDEVICE_AUDIOPHILE:
+	case ICE1712_SUBDEVICE_DELTA410:
+	case ICE1712_SUBDEVICE_DELTA1010E:
+	case ICE1712_SUBDEVICE_DELTA1010LT:
+	case ICE1712_SUBDEVICE_VX442:
+	case ICE1712_SUBDEVICE_DELTA66E:
+		snd_cs8427_init(ice->i2c, ice->cs8427);
+		break;
+	case ICE1712_SUBDEVICE_DELTA1010:
+	case ICE1712_SUBDEVICE_MEDIASTATION:
+		/* nothing */
+		break;
+	case ICE1712_SUBDEVICE_DELTADIO2496:
+	case ICE1712_SUBDEVICE_DELTA66:
+		/* Set spdif defaults */
+		snd_ice1712_delta_cs8403_spdif_write(ice, ice->spdif.cs8403_bits);
+		break;
+	}
+
 	/* init codec and restore registers */
 	if (ice->akm_codecs) {
-		memcpy(akm_backup, ice->akm->images, sizeof(akm_backup));
+		memcpy(akm_img_bak, ice->akm->images, sizeof(akm_img_bak));
+		memcpy(akm_vol_bak, ice->akm->volumes, sizeof(akm_vol_bak));
 		snd_akm4xxx_init(ice->akm);
-		memcpy(ice->akm->images, akm_backup, sizeof(akm_backup));
+		memcpy(ice->akm->images, akm_img_bak, sizeof(akm_img_bak));
+		memcpy(ice->akm->volumes, akm_vol_bak, sizeof(akm_vol_bak));
 		snd_akm4xxx_reset(ice->akm, 0);
 	}
 
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 291672f..d9b9e45 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -685,9 +685,10 @@
 	if (!(snd_ice1712_read(ice, ICE1712_IREG_PBK_CTRL) & 1))
 		return 0;
 	ptr = runtime->buffer_size - inw(ice->ddma_port + 4);
+	ptr = bytes_to_frames(substream->runtime, ptr);
 	if (ptr == runtime->buffer_size)
 		ptr = 0;
-	return bytes_to_frames(substream->runtime, ptr);
+	return ptr;
 }
 
 static snd_pcm_uframes_t snd_ice1712_playback_ds_pointer(struct snd_pcm_substream *substream)
@@ -704,9 +705,10 @@
 		addr = ICE1712_DSC_ADDR0;
 	ptr = snd_ice1712_ds_read(ice, substream->number * 2, addr) -
 		ice->playback_con_virt_addr[substream->number];
+	ptr = bytes_to_frames(substream->runtime, ptr);
 	if (ptr == substream->runtime->buffer_size)
 		ptr = 0;
-	return bytes_to_frames(substream->runtime, ptr);
+	return ptr;
 }
 
 static snd_pcm_uframes_t snd_ice1712_capture_pointer(struct snd_pcm_substream *substream)
@@ -717,9 +719,10 @@
 	if (!(snd_ice1712_read(ice, ICE1712_IREG_CAP_CTRL) & 1))
 		return 0;
 	ptr = inl(ICEREG(ice, CONCAP_ADDR)) - ice->capture_con_virt_addr;
+	ptr = bytes_to_frames(substream->runtime, ptr);
 	if (ptr == substream->runtime->buffer_size)
 		ptr = 0;
-	return bytes_to_frames(substream->runtime, ptr);
+	return ptr;
 }
 
 static const struct snd_pcm_hardware snd_ice1712_playback = {
@@ -1048,6 +1051,8 @@
 	old = inb(ICEMT(ice, RATE));
 	if (!force && old == val)
 		goto __out;
+
+	ice->cur_rate = rate;
 	outb(val, ICEMT(ice, RATE));
 	spin_unlock_irqrestore(&ice->reg_lock, flags);
 
@@ -1114,9 +1119,10 @@
 	if (!(inl(ICEMT(ice, PLAYBACK_CONTROL)) & ICE1712_PLAYBACK_START))
 		return 0;
 	ptr = ice->playback_pro_size - (inw(ICEMT(ice, PLAYBACK_SIZE)) << 2);
+	ptr = bytes_to_frames(substream->runtime, ptr);
 	if (ptr == substream->runtime->buffer_size)
 		ptr = 0;
-	return bytes_to_frames(substream->runtime, ptr);
+	return ptr;
 }
 
 static snd_pcm_uframes_t snd_ice1712_capture_pro_pointer(struct snd_pcm_substream *substream)
@@ -1127,9 +1133,10 @@
 	if (!(inl(ICEMT(ice, PLAYBACK_CONTROL)) & ICE1712_CAPTURE_START_SHADOW))
 		return 0;
 	ptr = ice->capture_pro_size - (inw(ICEMT(ice, CAPTURE_SIZE)) << 2);
+	ptr = bytes_to_frames(substream->runtime, ptr);
 	if (ptr == substream->runtime->buffer_size)
 		ptr = 0;
-	return bytes_to_frames(substream->runtime, ptr);
+	return ptr;
 }
 
 static const struct snd_pcm_hardware snd_ice1712_playback_pro = {
@@ -2832,6 +2839,12 @@
 	snd_pcm_suspend_all(ice->pcm_ds);
 	snd_ac97_suspend(ice->ac97);
 
+	spin_lock_irq(&ice->reg_lock);
+	ice->pm_saved_is_spdif_master = is_spdif_master(ice);
+	ice->pm_saved_spdif_ctrl = inw(ICEMT(ice, ROUTE_SPDOUT));
+	ice->pm_saved_route = inw(ICEMT(ice, ROUTE_PSDOUT03));
+	spin_unlock_irq(&ice->reg_lock);
+
 	if (ice->pm_suspend)
 		ice->pm_suspend(ice);
 
@@ -2846,6 +2859,7 @@
 	struct pci_dev *pci = to_pci_dev(dev);
 	struct snd_card *card = dev_get_drvdata(dev);
 	struct snd_ice1712 *ice = card->private_data;
+	int rate;
 
 	if (!ice->pm_suspend_enabled)
 		return 0;
@@ -2860,14 +2874,37 @@
 
 	pci_set_master(pci);
 
+	if (ice->cur_rate)
+		rate = ice->cur_rate;
+	else
+		rate = PRO_RATE_DEFAULT;
+
 	if (snd_ice1712_chip_init(ice) < 0) {
 		snd_card_disconnect(card);
 		return -EIO;
 	}
 
+	ice->cur_rate = rate;
+
 	if (ice->pm_resume)
 		ice->pm_resume(ice);
 
+	if (ice->pm_saved_is_spdif_master) {
+		/* switching to external clock via SPDIF */
+		spin_lock_irq(&ice->reg_lock);
+		outb(inb(ICEMT(ice, RATE)) | ICE1712_SPDIF_MASTER,
+			ICEMT(ice, RATE));
+		spin_unlock_irq(&ice->reg_lock);
+		snd_ice1712_set_input_clock_source(ice, 1);
+	} else {
+		/* internal on-card clock */
+		snd_ice1712_set_pro_rate(ice, rate, 1);
+		snd_ice1712_set_input_clock_source(ice, 0);
+	}
+
+	outw(ice->pm_saved_spdif_ctrl, ICEMT(ice, ROUTE_SPDOUT));
+	outw(ice->pm_saved_route, ICEMT(ice, ROUTE_PSDOUT03));
+
 	if (ice->ac97)
 		snd_ac97_resume(ice->ac97);
 
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
index 09f7e77..f500905 100644
--- a/sound/soc/codecs/alc5623.c
+++ b/sound/soc/codecs/alc5623.c
@@ -902,7 +902,6 @@
 {
 	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
 	struct snd_soc_dapm_context *dapm = &codec->dapm;
-	int ret;
 
 	alc5623_reset(codec);
 
@@ -961,7 +960,7 @@
 		return -EINVAL;
 	}
 
-	return ret;
+	return 0;
 }
 
 /* power down chip */
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index ec071a6..85942ca 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -1061,7 +1061,6 @@
 static int alc5632_probe(struct snd_soc_codec *codec)
 {
 	struct alc5632_priv *alc5632 = snd_soc_codec_get_drvdata(codec);
-	int ret;
 
 	/* power on device  */
 	alc5632_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -1075,7 +1074,7 @@
 		return -EINVAL;
 	}
 
-	return ret;
+	return 0;
 }
 
 /* power down chip */
@@ -1191,11 +1190,18 @@
 };
 MODULE_DEVICE_TABLE(i2c, alc5632_i2c_table);
 
+static const struct of_device_id alc5632_of_match[] = {
+	{ .compatible = "realtek,alc5632", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, alc5632_of_match);
+
 /* i2c codec control layer */
 static struct i2c_driver alc5632_i2c_driver = {
 	.driver = {
 		.name = "alc5632",
 		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(alc5632_of_match),
 	},
 	.probe = alc5632_i2c_probe,
 	.remove =  alc5632_i2c_remove,
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index f0ca6be..460d355 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -1259,7 +1259,7 @@
 	}
 
 	dev_info(&i2c_client->dev, "Cirrus Logic CS42L52, Revision: %02X\n",
-			reg & 0xFF);
+		 reg & CS42L52_CHIP_REV_MASK);
 
 	/* Set Platform Data */
 	if (cs42l52->pdata.mica_diff_cfg)
diff --git a/sound/soc/codecs/cs42l52.h b/sound/soc/codecs/cs42l52.h
index 6fb8f00..ac445993 100644
--- a/sound/soc/codecs/cs42l52.h
+++ b/sound/soc/codecs/cs42l52.h
@@ -37,7 +37,7 @@
 #define CS42L52_CHIP_REV_A0			0x00
 #define CS42L52_CHIP_REV_A1			0x01
 #define CS42L52_CHIP_REV_B0			0x02
-#define CS42L52_CHIP_REV_MASK			0x03
+#define CS42L52_CHIP_REV_MASK			0x07
 
 #define CS42L52_PWRCTL1				0x02
 #define CS42L52_PWRCTL1_PDN_ALL			0x9F
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index 082299a..8502032 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -495,17 +495,16 @@
 	regcache_cache_bypass(cs42xx8->regmap, true);
 
 	/* Validate the chip ID */
-	regmap_read(cs42xx8->regmap, CS42XX8_CHIPID, &val);
-	if (val < 0) {
-		dev_err(dev, "failed to get device ID: %x", val);
-		ret = -EINVAL;
+	ret = regmap_read(cs42xx8->regmap, CS42XX8_CHIPID, &val);
+	if (ret < 0) {
+		dev_err(dev, "failed to get device ID, ret = %d", ret);
 		goto err_enable;
 	}
 
 	/* The top four bits of the chip ID should be 0000 */
-	if ((val & CS42XX8_CHIPID_CHIP_ID_MASK) != 0x00) {
+	if (((val & CS42XX8_CHIPID_CHIP_ID_MASK) >> 4) != 0x00) {
 		dev_err(dev, "unmatched chip ID: %d\n",
-				val & CS42XX8_CHIPID_CHIP_ID_MASK);
+			(val & CS42XX8_CHIPID_CHIP_ID_MASK) >> 4);
 		ret = -EINVAL;
 		goto err_enable;
 	}
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index 7d168ec..48f3fef 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -1571,7 +1571,8 @@
 	}
 
 	dev_info(&i2c->dev, "Revision: %d.%d\n",
-		 (reg & DA732X_ID_MAJOR_MASK), (reg & DA732X_ID_MINOR_MASK));
+		 (reg & DA732X_ID_MAJOR_MASK) >> 4,
+		 (reg & DA732X_ID_MINOR_MASK));
 
 	ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_da732x,
 				     da732x_dai, ARRAY_SIZE(da732x_dai));
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 98c6e10..f7b0b37 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -2399,11 +2399,18 @@
 };
 MODULE_DEVICE_TABLE(i2c, max98090_i2c_id);
 
+static const struct of_device_id max98090_of_match[] = {
+	{ .compatible = "maxim,max98090", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, max98090_of_match);
+
 static struct i2c_driver max98090_i2c_driver = {
 	.driver = {
 		.name = "max98090",
 		.owner = THIS_MODULE,
 		.pm = &max98090_pm,
+		.of_match_table = of_match_ptr(max98090_of_match),
 	},
 	.probe  = max98090_i2c_probe,
 	.remove = max98090_i2c_remove,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 0061ae6..68b4dd6 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -2074,6 +2074,14 @@
 };
 MODULE_DEVICE_TABLE(i2c, rt5640_i2c_id);
 
+#if defined(CONFIG_OF)
+static const struct of_device_id rt5640_of_match[] = {
+	{ .compatible = "realtek,rt5640", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, rt5640_of_match);
+#endif
+
 #ifdef CONFIG_ACPI
 static struct acpi_device_id rt5640_acpi_match[] = {
 	{ "INT33CA", 0 },
@@ -2203,6 +2211,7 @@
 		.name = "rt5640",
 		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(rt5640_acpi_match),
+		.of_match_table = of_match_ptr(rt5640_of_match),
 	},
 	.probe = rt5640_i2c_probe,
 	.remove   = rt5640_i2c_remove,
diff --git a/sound/soc/codecs/tlv320aic23-i2c.c b/sound/soc/codecs/tlv320aic23-i2c.c
index 20fc460..b73c94e 100644
--- a/sound/soc/codecs/tlv320aic23-i2c.c
+++ b/sound/soc/codecs/tlv320aic23-i2c.c
@@ -43,9 +43,16 @@
 
 MODULE_DEVICE_TABLE(i2c, tlv320aic23_id);
 
+static const struct of_device_id tlv320aic23_of_match[] = {
+	{ .compatible = "ti,tlv320aic23", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, tlv320aic23_of_match);
+
 static struct i2c_driver tlv320aic23_i2c_driver = {
 	.driver = {
 		   .name = "tlv320aic23-codec",
+		   .of_match_table = of_match_ptr(tlv320aic23_of_match),
 		   },
 	.probe = tlv320aic23_i2c_probe,
 	.remove = __exit_p(tlv320aic23_i2c_remove),
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index a01ae97..4f75cac 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -336,7 +336,7 @@
 		mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
 		mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
 
-		mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+		mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
 		mcasp_clr_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
 		break;
 
@@ -344,7 +344,7 @@
 		mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
 		mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
 
-		mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+		mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
 		mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
 		break;
 
@@ -352,7 +352,7 @@
 		mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
 		mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
 
-		mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+		mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
 		mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
 		break;
 
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index c4a4231..56da8c8 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -23,6 +23,71 @@
 
 #include "fsl_sai.h"
 
+#define FSL_SAI_FLAGS (FSL_SAI_CSR_SEIE |\
+		       FSL_SAI_CSR_FEIE)
+
+static irqreturn_t fsl_sai_isr(int irq, void *devid)
+{
+	struct fsl_sai *sai = (struct fsl_sai *)devid;
+	struct device *dev = &sai->pdev->dev;
+	u32 xcsr, mask;
+
+	/* Only handle those what we enabled */
+	mask = (FSL_SAI_FLAGS >> FSL_SAI_CSR_xIE_SHIFT) << FSL_SAI_CSR_xF_SHIFT;
+
+	/* Tx IRQ */
+	regmap_read(sai->regmap, FSL_SAI_TCSR, &xcsr);
+	xcsr &= mask;
+
+	if (xcsr & FSL_SAI_CSR_WSF)
+		dev_dbg(dev, "isr: Start of Tx word detected\n");
+
+	if (xcsr & FSL_SAI_CSR_SEF)
+		dev_warn(dev, "isr: Tx Frame sync error detected\n");
+
+	if (xcsr & FSL_SAI_CSR_FEF) {
+		dev_warn(dev, "isr: Transmit underrun detected\n");
+		/* FIFO reset for safety */
+		xcsr |= FSL_SAI_CSR_FR;
+	}
+
+	if (xcsr & FSL_SAI_CSR_FWF)
+		dev_dbg(dev, "isr: Enabled transmit FIFO is empty\n");
+
+	if (xcsr & FSL_SAI_CSR_FRF)
+		dev_dbg(dev, "isr: Transmit FIFO watermark has been reached\n");
+
+	regmap_update_bits(sai->regmap, FSL_SAI_TCSR,
+			   FSL_SAI_CSR_xF_W_MASK | FSL_SAI_CSR_FR, xcsr);
+
+	/* Rx IRQ */
+	regmap_read(sai->regmap, FSL_SAI_RCSR, &xcsr);
+	xcsr &= mask;
+
+	if (xcsr & FSL_SAI_CSR_WSF)
+		dev_dbg(dev, "isr: Start of Rx word detected\n");
+
+	if (xcsr & FSL_SAI_CSR_SEF)
+		dev_warn(dev, "isr: Rx Frame sync error detected\n");
+
+	if (xcsr & FSL_SAI_CSR_FEF) {
+		dev_warn(dev, "isr: Receive overflow detected\n");
+		/* FIFO reset for safety */
+		xcsr |= FSL_SAI_CSR_FR;
+	}
+
+	if (xcsr & FSL_SAI_CSR_FWF)
+		dev_dbg(dev, "isr: Enabled receive FIFO is full\n");
+
+	if (xcsr & FSL_SAI_CSR_FRF)
+		dev_dbg(dev, "isr: Receive FIFO watermark has been reached\n");
+
+	regmap_update_bits(sai->regmap, FSL_SAI_RCSR,
+			   FSL_SAI_CSR_xF_W_MASK | FSL_SAI_CSR_FR, xcsr);
+
+	return IRQ_HANDLED;
+}
+
 static int fsl_sai_set_dai_sysclk_tr(struct snd_soc_dai *cpu_dai,
 		int clk_id, unsigned int freq, int fsl_dir)
 {
@@ -114,7 +179,7 @@
 		 * that is, together with the last bit of the previous
 		 * data word.
 		 */
-		val_cr2 &= ~FSL_SAI_CR2_BCP;
+		val_cr2 |= FSL_SAI_CR2_BCP;
 		val_cr4 |= FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP;
 		break;
 	case SND_SOC_DAIFMT_LEFT_J:
@@ -122,7 +187,7 @@
 		 * Frame high, one word length for frame sync,
 		 * frame sync asserts with the first bit of the frame.
 		 */
-		val_cr2 &= ~FSL_SAI_CR2_BCP;
+		val_cr2 |= FSL_SAI_CR2_BCP;
 		val_cr4 &= ~(FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP);
 		break;
 	case SND_SOC_DAIFMT_DSP_A:
@@ -132,7 +197,7 @@
 		 * that is, together with the last bit of the previous
 		 * data word.
 		 */
-		val_cr2 &= ~FSL_SAI_CR2_BCP;
+		val_cr2 |= FSL_SAI_CR2_BCP;
 		val_cr4 &= ~FSL_SAI_CR4_FSP;
 		val_cr4 |= FSL_SAI_CR4_FSE;
 		sai->is_dsp_mode = true;
@@ -142,7 +207,7 @@
 		 * Frame high, one bit for frame sync,
 		 * frame sync asserts with the first bit of the frame.
 		 */
-		val_cr2 &= ~FSL_SAI_CR2_BCP;
+		val_cr2 |= FSL_SAI_CR2_BCP;
 		val_cr4 &= ~(FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP);
 		sai->is_dsp_mode = true;
 		break;
@@ -373,8 +438,8 @@
 {
 	struct fsl_sai *sai = dev_get_drvdata(cpu_dai->dev);
 
-	regmap_update_bits(sai->regmap, FSL_SAI_TCSR, 0xffffffff, 0x0);
-	regmap_update_bits(sai->regmap, FSL_SAI_RCSR, 0xffffffff, 0x0);
+	regmap_update_bits(sai->regmap, FSL_SAI_TCSR, 0xffffffff, FSL_SAI_FLAGS);
+	regmap_update_bits(sai->regmap, FSL_SAI_RCSR, 0xffffffff, FSL_SAI_FLAGS);
 	regmap_update_bits(sai->regmap, FSL_SAI_TCR1, FSL_SAI_CR1_RFW_MASK,
 			   FSL_SAI_MAXBURST_TX * 2);
 	regmap_update_bits(sai->regmap, FSL_SAI_RCR1, FSL_SAI_CR1_RFW_MASK,
@@ -490,12 +555,14 @@
 	struct fsl_sai *sai;
 	struct resource *res;
 	void __iomem *base;
-	int ret;
+	int irq, ret;
 
 	sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL);
 	if (!sai)
 		return -ENOMEM;
 
+	sai->pdev = pdev;
+
 	sai->big_endian_regs = of_property_read_bool(np, "big-endian-regs");
 	if (sai->big_endian_regs)
 		fsl_sai_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG;
@@ -514,6 +581,18 @@
 		return PTR_ERR(sai->regmap);
 	}
 
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
+		return irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, irq, fsl_sai_isr, 0, np->name, sai);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to claim irq %u\n", irq);
+		return ret;
+	}
+
 	sai->dma_params_rx.addr = res->start + FSL_SAI_RDR;
 	sai->dma_params_tx.addr = res->start + FSL_SAI_TDR;
 	sai->dma_params_rx.maxburst = FSL_SAI_MAXBURST_RX;
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
index e432260..a264185 100644
--- a/sound/soc/fsl/fsl_sai.h
+++ b/sound/soc/fsl/fsl_sai.h
@@ -37,7 +37,21 @@
 
 /* SAI Transmit/Recieve Control Register */
 #define FSL_SAI_CSR_TERE	BIT(31)
+#define FSL_SAI_CSR_FR		BIT(25)
+#define FSL_SAI_CSR_xF_SHIFT	16
+#define FSL_SAI_CSR_xF_W_SHIFT	18
+#define FSL_SAI_CSR_xF_MASK	(0x1f << FSL_SAI_CSR_xF_SHIFT)
+#define FSL_SAI_CSR_xF_W_MASK	(0x7 << FSL_SAI_CSR_xF_W_SHIFT)
+#define FSL_SAI_CSR_WSF		BIT(20)
+#define FSL_SAI_CSR_SEF		BIT(19)
+#define FSL_SAI_CSR_FEF		BIT(18)
 #define FSL_SAI_CSR_FWF		BIT(17)
+#define FSL_SAI_CSR_FRF		BIT(16)
+#define FSL_SAI_CSR_xIE_SHIFT	8
+#define FSL_SAI_CSR_WSIE	BIT(12)
+#define FSL_SAI_CSR_SEIE	BIT(11)
+#define FSL_SAI_CSR_FEIE	BIT(10)
+#define FSL_SAI_CSR_FWIE	BIT(9)
 #define FSL_SAI_CSR_FRIE	BIT(8)
 #define FSL_SAI_CSR_FRDE	BIT(0)
 
@@ -99,6 +113,7 @@
 #define FSL_SAI_MAXBURST_RX 6
 
 struct fsl_sai {
+	struct platform_device *pdev;
 	struct regmap *regmap;
 
 	bool big_endian_regs;
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index 4a88e36..76b072b 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -39,15 +39,15 @@
 };
 static struct s3c_ac97_info s3c_ac97;
 
-static struct s3c2410_dma_client s3c_dma_client_out = {
+static struct s3c_dma_client s3c_dma_client_out = {
 	.name = "AC97 PCMOut"
 };
 
-static struct s3c2410_dma_client s3c_dma_client_in = {
+static struct s3c_dma_client s3c_dma_client_in = {
 	.name = "AC97 PCMIn"
 };
 
-static struct s3c2410_dma_client s3c_dma_client_micin = {
+static struct s3c_dma_client s3c_dma_client_micin = {
 	.name = "AC97 MicIn"
 };
 
diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
index 225e537..ad7c0f0 100644
--- a/sound/soc/samsung/dma.h
+++ b/sound/soc/samsung/dma.h
@@ -14,8 +14,12 @@
 
 #include <sound/dmaengine_pcm.h>
 
+struct s3c_dma_client {
+	char *name;
+};
+
 struct s3c_dma_params {
-	struct s3c2410_dma_client *client;	/* stream identifier */
+	struct s3c_dma_client *client;	/* stream identifier */
 	int channel;				/* Channel ID */
 	dma_addr_t dma_addr;
 	int dma_size;			/* Size of the DMA transfer */
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 0a9b44c..048ead9 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1211,10 +1211,10 @@
 	pri_dai->dma_playback.dma_addr = regs_base + I2STXD;
 	pri_dai->dma_capture.dma_addr = regs_base + I2SRXD;
 	pri_dai->dma_playback.client =
-		(struct s3c2410_dma_client *)&pri_dai->dma_playback;
+		(struct s3c_dma_client *)&pri_dai->dma_playback;
 	pri_dai->dma_playback.ch_name = "tx";
 	pri_dai->dma_capture.client =
-		(struct s3c2410_dma_client *)&pri_dai->dma_capture;
+		(struct s3c_dma_client *)&pri_dai->dma_capture;
 	pri_dai->dma_capture.ch_name = "rx";
 	pri_dai->dma_playback.dma_size = 4;
 	pri_dai->dma_capture.dma_size = 4;
@@ -1233,7 +1233,7 @@
 		}
 		sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
 		sec_dai->dma_playback.client =
-			(struct s3c2410_dma_client *)&sec_dai->dma_playback;
+			(struct s3c_dma_client *)&sec_dai->dma_playback;
 		sec_dai->dma_playback.ch_name = "tx-sec";
 
 		if (!np) {
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index 6a5e4bf..ab54e29 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -20,7 +20,6 @@
 #include <sound/pcm_params.h>
 
 #include <linux/platform_data/asoc-s3c.h>
-#include <mach/dma.h>
 
 #include "dma.h"
 #include "pcm.h"
@@ -132,11 +131,11 @@
 	struct s3c_dma_params	*dma_capture;
 };
 
-static struct s3c2410_dma_client s3c_pcm_dma_client_out = {
+static struct s3c_dma_client s3c_pcm_dma_client_out = {
 	.name		= "PCM Stereo out"
 };
 
-static struct s3c2410_dma_client s3c_pcm_dma_client_in = {
+static struct s3c_dma_client s3c_pcm_dma_client_in = {
 	.name		= "PCM Stereo in"
 };
 
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index d079445..e9bb5d7 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -33,11 +33,11 @@
 #include "regs-i2s-v2.h"
 #include "s3c2412-i2s.h"
 
-static struct s3c2410_dma_client s3c2412_dma_client_out = {
+static struct s3c_dma_client s3c2412_dma_client_out = {
 	.name		= "I2S PCM Stereo out"
 };
 
-static struct s3c2410_dma_client s3c2412_dma_client_in = {
+static struct s3c_dma_client s3c2412_dma_client_in = {
 	.name		= "I2S PCM Stereo in"
 };
 
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index f31e916..d7b8457 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -31,11 +31,11 @@
 #include "dma.h"
 #include "s3c24xx-i2s.h"
 
-static struct s3c2410_dma_client s3c24xx_dma_client_out = {
+static struct s3c_dma_client s3c24xx_dma_client_out = {
 	.name = "I2S PCM Stereo out"
 };
 
-static struct s3c2410_dma_client s3c24xx_dma_client_in = {
+static struct s3c_dma_client s3c24xx_dma_client_in = {
 	.name = "I2S PCM Stereo in"
 };
 
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index 28487dc..cfe63b7 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -18,7 +18,6 @@
 #include <sound/pcm_params.h>
 
 #include <linux/platform_data/asoc-s3c.h>
-#include <mach/dma.h>
 
 #include "dma.h"
 #include "spdif.h"
@@ -94,7 +93,7 @@
 	struct s3c_dma_params	*dma_playback;
 };
 
-static struct s3c2410_dma_client spdif_dma_client_out = {
+static struct s3c_dma_client spdif_dma_client_out = {
 	.name		= "S/PDIF Stereo out",
 };
 
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 49de5c1..131336d 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -1501,7 +1501,8 @@
 	 * The error should be lower than 2ms since the estimate relies
 	 * on two reads of a counter updated every ms.
 	 */
-	if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
+	if (printk_ratelimit() &&
+	    abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
 		dev_dbg(&subs->dev->dev,
 			"delay: estimated %d, actual %d\n",
 			est_delay, subs->last_delay);
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index b4ddb74..56bfb52 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -47,21 +47,22 @@
 .PP
 .SH FIELD DESCRIPTIONS
 .nf
-\fBpk\fP processor package number.
-\fBcor\fP processor core number.
+\fBPackage\fP processor package number.
+\fBCore\fP processor core number.
 \fBCPU\fP Linux CPU (logical processor) number.
 Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology.
-\fB%c0\fP percent of the interval that the CPU retired instructions.
-\fBGHz\fP average clock rate while the CPU was in c0 state.
-\fBTSC\fP average GHz that the TSC ran during the entire interval.
-\fB%c1, %c3, %c6, %c7\fP show the percentage residency in hardware core idle states.
-\fBCTMP\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
-\fBPTMP\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
-\fB%pc2, %pc3, %pc6, %pc7\fP percentage residency in hardware package idle states.
-\fBPkg_W\fP Watts consumed by the whole package.
-\fBCor_W\fP Watts consumed by the core part of the package.
-\fBGFX_W\fP Watts consumed by the Graphics part of the package -- available only on client processors.
-\fBRAM_W\fP Watts consumed by the DRAM DIMMS -- available only on server processors.
+\fBAVG_MHz\fP number of cycles executed divided by time elapsed.
+\fB%Buzy\fP percent of the interval that the CPU retired instructions, aka. % of time in "C0" state.
+\fBBzy_MHz\fP average clock rate while the CPU was busy (in "c0" state).
+\fBTSC_MHz\fP average MHz that the TSC ran during the entire interval.
+\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states.
+\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
+\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
+\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states.
+\fBPkgWatt\fP Watts consumed by the whole package.
+\fBCorWatt\fP Watts consumed by the core part of the package.
+\fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors.
+\fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors.
 \fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package.
 \fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM.
 .fi
@@ -78,29 +79,17 @@
 Subsequent rows show per-CPU statistics.
 
 .nf
-[root@sandy]# ./turbostat
-cor CPU    %c0  GHz  TSC    %c1    %c3    %c6    %c7 CTMP PTMP   %pc2   %pc3   %pc6   %pc7  Pkg_W  Cor_W GFX_W
-          0.06 0.80 2.29   0.11   0.00   0.00  99.83   47   40   0.26   0.01   0.44  98.78   3.49   0.12  0.14
-  0   0   0.07 0.80 2.29   0.07   0.00   0.00  99.86   40   40   0.26   0.01   0.44  98.78   3.49   0.12  0.14
-  0   4   0.03 0.80 2.29   0.12
-  1   1   0.04 0.80 2.29   0.25   0.01   0.00  99.71   40
-  1   5   0.16 0.80 2.29   0.13
-  2   2   0.05 0.80 2.29   0.06   0.01   0.00  99.88   40
-  2   6   0.03 0.80 2.29   0.08
-  3   3   0.05 0.80 2.29   0.08   0.00   0.00  99.87   47
-  3   7   0.04 0.84 2.29   0.09
-.fi
-.SH SUMMARY EXAMPLE
-The "-s" option prints the column headers just once,
-and then the one line system summary for each sample interval.
-
-.nf
-[root@wsm]# turbostat -S
-   %c0  GHz  TSC    %c1    %c3    %c6 CTMP   %pc3   %pc6
-  1.40 2.81 3.38  10.78  43.47  44.35   42  13.67   2.09
-  1.34 2.90 3.38  11.48  58.96  28.23   41  19.89   0.15
-  1.55 2.72 3.38  26.73  37.66  34.07   42   2.53   2.80
-  1.37 2.83 3.38  16.95  60.05  21.63   42   5.76   0.20
+[root@ivy]# ./turbostat
+    Core     CPU Avg_MHz   %Busy Bzy_MHz TSC_MHz     SMI  CPU%c1  CPU%c3  CPU%c6  CPU%c7 CoreTmp  PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt 
+       -       -       6    0.36    1596    3492       0    0.59    0.01   99.04    0.00      23      24   23.82    0.01   72.47    0.00    6.40    1.01    0.00
+       0       0       9    0.58    1596    3492       0    0.28    0.01   99.13    0.00      23      24   23.82    0.01   72.47    0.00    6.40    1.01    0.00
+       0       4       1    0.07    1596    3492       0    0.79
+       1       1      10    0.65    1596    3492       0    0.59    0.00   98.76    0.00      23
+       1       5       5    0.28    1596    3492       0    0.95
+       2       2      10    0.66    1596    3492       0    0.41    0.01   98.92    0.00      23
+       2       6       2    0.10    1597    3492       0    0.97
+       3       3       3    0.20    1596    3492       0    0.44    0.00   99.37    0.00      23
+       3       7       5    0.31    1596    3492       0    0.33
 .fi
 .SH VERBOSE EXAMPLE
 The "-v" option adds verbosity to the output:
@@ -154,55 +143,35 @@
 until ^C while the other CPUs are mostly idle:
 
 .nf
-[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
+root@ivy: turbostat cat /dev/zero > /dev/null
 ^C
-cor CPU    %c0  GHz  TSC    %c1    %c3    %c6   %pc3   %pc6
-          8.86 3.61 3.38  15.06  31.19  44.89   0.00   0.00
-  0   0   1.46 3.22 3.38  16.84  29.48  52.22   0.00   0.00
-  0   6   0.21 3.06 3.38  18.09
-  1   2   0.53 3.33 3.38   2.80  46.40  50.27
-  1   8   0.89 3.47 3.38   2.44
-  2   4   1.36 3.43 3.38   9.04  23.71  65.89
-  2  10   0.18 2.86 3.38  10.22
-  8   1   0.04 2.87 3.38  99.96   0.01   0.00
-  8   7  99.72 3.63 3.38   0.27
-  9   3   0.31 3.21 3.38   7.64  56.55  35.50
-  9   9   0.08 2.95 3.38   7.88
- 10   5   1.42 3.43 3.38   2.14  30.99  65.44
- 10  11   0.16 2.88 3.38   3.40
+    Core     CPU Avg_MHz   %Busy Bzy_MHz TSC_MHz     SMI  CPU%c1  CPU%c3  CPU%c6  CPU%c7 CoreTmp  PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt 
+       -       -     496   12.75    3886    3492       0   13.16    0.04   74.04    0.00      36      36    0.00    0.00    0.00    0.00   23.15   17.65    0.00
+       0       0      22    0.57    3830    3492       0    0.83    0.02   98.59    0.00      27      36    0.00    0.00    0.00    0.00   23.15   17.65    0.00
+       0       4       9    0.24    3829    3492       0    1.15
+       1       1       4    0.09    3783    3492       0   99.91    0.00    0.00    0.00      36
+       1       5    3880   99.82    3888    3492       0    0.18
+       2       2      17    0.44    3813    3492       0    0.77    0.04   98.75    0.00      28
+       2       6      12    0.32    3823    3492       0    0.89
+       3       3      16    0.43    3844    3492       0    0.63    0.11   98.84    0.00      30
+       3       7       4    0.11    3827    3492       0    0.94
+30.372243 sec
+
 .fi
-Above the cycle soaker drives cpu7 up its 3.6 GHz turbo limit
+Above the cycle soaker drives cpu5 up its 3.8 GHz turbo limit
 while the other processors are generally in various states of idle.
 
-Note that cpu1 and cpu7 are HT siblings within core8.
-As cpu7 is very busy, it prevents its sibling, cpu1,
+Note that cpu1 and cpu5 are HT siblings within core1.
+As cpu5 is very busy, it prevents its sibling, cpu1,
 from entering a c-state deeper than c1.
 
-Note that turbostat reports average GHz of 3.63, while
-the arithmetic average of the GHz column above is lower.
-This is a weighted average, where the weight is %c0.  ie. it is the total number of
-un-halted cycles elapsed per time divided by the number of CPUs.
-.SH SMI COUNTING EXAMPLE
-On Intel Nehalem and newer processors, MSR 0x34 is a System Management Mode Interrupt (SMI) counter.
-This counter is shown by default under the "SMI" column.
-.nf
-[root@x980 ~]# turbostat
-cor CPU    %c0  GHz  TSC SMI    %c1    %c3    %c6 CTMP   %pc3   %pc6
-          0.11 1.91 3.38   0   1.84   0.26  97.79   29   0.82  83.87
-  0   0   0.40 1.63 3.38   0  10.27   0.12  89.20   20   0.82  83.88
-  0   6   0.06 1.63 3.38   0  10.61
-  1   2   0.37 2.63 3.38   0   0.02   0.10  99.51   22
-  1   8   0.01 1.62 3.38   0   0.39
-  2   4   0.07 1.62 3.38   0   0.04   0.07  99.82   23
-  2  10   0.02 1.62 3.38   0   0.09
-  8   1   0.23 1.64 3.38   0   0.10   1.07  98.60   24
-  8   7   0.02 1.64 3.38   0   0.31
-  9   3   0.03 1.62 3.38   0   0.03   0.05  99.89   29
-  9   9   0.02 1.62 3.38   0   0.05
- 10   5   0.07 1.62 3.38   0   0.08   0.12  99.73   27
- 10  11   0.03 1.62 3.38   0   0.13
-^C
-.fi
+Note that the Avg_MHz column reflects the total number of cycles executed
+divided by the measurement interval.  If the %Busy column is 100%,
+then the processor was running at that speed the entire interval.
+The Avg_MHz multiplied by the %Busy results in the Bzy_MHz --
+which is the average frequency while the processor was executing --
+not including any non-busy idle time.
+
 .SH NOTES
 
 .B "turbostat "
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 77eb130..7c9d8e7 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -56,7 +56,7 @@
 unsigned int use_c1_residency_msr;
 unsigned int has_aperf;
 unsigned int has_epb;
-unsigned int units = 1000000000;	/* Ghz etc */
+unsigned int units = 1000000;	/* MHz etc */
 unsigned int genuine_intel;
 unsigned int has_invariant_tsc;
 unsigned int do_nehalem_platform_info;
@@ -264,88 +264,93 @@
 	return 0;
 }
 
+/*
+ * Example Format w/ field column widths:
+ *
+ * Package    Core     CPU Avg_MHz Bzy_MHz TSC_MHz     SMI   %Busy CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 CoreTmp  PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt
+ * 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567
+ */
+
 void print_header(void)
 {
 	if (show_pkg)
-		outp += sprintf(outp, "pk");
-	if (show_pkg)
-		outp += sprintf(outp, " ");
+		outp += sprintf(outp, "Package ");
 	if (show_core)
-		outp += sprintf(outp, "cor");
+		outp += sprintf(outp, "    Core ");
 	if (show_cpu)
-		outp += sprintf(outp, " CPU");
-	if (show_pkg || show_core || show_cpu)
-		outp += sprintf(outp, " ");
-	if (do_nhm_cstates)
-		outp += sprintf(outp, "   %%c0");
+		outp += sprintf(outp, "    CPU ");
 	if (has_aperf)
-		outp += sprintf(outp, "  GHz");
-	outp += sprintf(outp, "  TSC");
+		outp += sprintf(outp, "Avg_MHz ");
+	if (do_nhm_cstates)
+		outp += sprintf(outp, "  %%Busy ");
+	if (has_aperf)
+		outp += sprintf(outp, "Bzy_MHz ");
+	outp += sprintf(outp, "TSC_MHz ");
 	if (do_smi)
-		outp += sprintf(outp, " SMI");
+		outp += sprintf(outp, "    SMI ");
 	if (extra_delta_offset32)
-		outp += sprintf(outp, "  count 0x%03X", extra_delta_offset32);
+		outp += sprintf(outp, " count 0x%03X ", extra_delta_offset32);
 	if (extra_delta_offset64)
-		outp += sprintf(outp, "  COUNT 0x%03X", extra_delta_offset64);
+		outp += sprintf(outp, " COUNT 0x%03X ", extra_delta_offset64);
 	if (extra_msr_offset32)
-		outp += sprintf(outp, "   MSR 0x%03X", extra_msr_offset32);
+		outp += sprintf(outp, "  MSR 0x%03X ", extra_msr_offset32);
 	if (extra_msr_offset64)
-		outp += sprintf(outp, "           MSR 0x%03X", extra_msr_offset64);
+		outp += sprintf(outp, "          MSR 0x%03X ", extra_msr_offset64);
 	if (do_nhm_cstates)
-		outp += sprintf(outp, "    %%c1");
+		outp += sprintf(outp, " CPU%%c1 ");
 	if (do_nhm_cstates && !do_slm_cstates)
-		outp += sprintf(outp, "    %%c3");
+		outp += sprintf(outp, " CPU%%c3 ");
 	if (do_nhm_cstates)
-		outp += sprintf(outp, "    %%c6");
+		outp += sprintf(outp, " CPU%%c6 ");
 	if (do_snb_cstates)
-		outp += sprintf(outp, "    %%c7");
+		outp += sprintf(outp, " CPU%%c7 ");
 
 	if (do_dts)
-		outp += sprintf(outp, " CTMP");
+		outp += sprintf(outp, "CoreTmp ");
 	if (do_ptm)
-		outp += sprintf(outp, " PTMP");
+		outp += sprintf(outp, " PkgTmp ");
 
 	if (do_snb_cstates)
-		outp += sprintf(outp, "   %%pc2");
+		outp += sprintf(outp, "Pkg%%pc2 ");
 	if (do_nhm_cstates && !do_slm_cstates)
-		outp += sprintf(outp, "   %%pc3");
+		outp += sprintf(outp, "Pkg%%pc3 ");
 	if (do_nhm_cstates && !do_slm_cstates)
-		outp += sprintf(outp, "   %%pc6");
+		outp += sprintf(outp, "Pkg%%pc6 ");
 	if (do_snb_cstates)
-		outp += sprintf(outp, "   %%pc7");
+		outp += sprintf(outp, "Pkg%%pc7 ");
 	if (do_c8_c9_c10) {
-		outp += sprintf(outp, "   %%pc8");
-		outp += sprintf(outp, "   %%pc9");
-		outp += sprintf(outp, "  %%pc10");
+		outp += sprintf(outp, "Pkg%%pc8 ");
+		outp += sprintf(outp, "Pkg%%pc9 ");
+		outp += sprintf(outp, "Pk%%pc10 ");
 	}
 
 	if (do_rapl && !rapl_joules) {
 		if (do_rapl & RAPL_PKG)
-			outp += sprintf(outp, "  Pkg_W");
+			outp += sprintf(outp, "PkgWatt ");
 		if (do_rapl & RAPL_CORES)
-			outp += sprintf(outp, "  Cor_W");
+			outp += sprintf(outp, "CorWatt ");
 		if (do_rapl & RAPL_GFX)
-			outp += sprintf(outp, " GFX_W");
+			outp += sprintf(outp, "GFXWatt ");
 		if (do_rapl & RAPL_DRAM)
-			outp += sprintf(outp, " RAM_W");
+			outp += sprintf(outp, "RAMWatt ");
 		if (do_rapl & RAPL_PKG_PERF_STATUS)
-			outp += sprintf(outp, " PKG_%%");
+			outp += sprintf(outp, "  PKG_%% ");
 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
-			outp += sprintf(outp, " RAM_%%");
+			outp += sprintf(outp, "  RAM_%% ");
 	} else {
 		if (do_rapl & RAPL_PKG)
-			outp += sprintf(outp, "  Pkg_J");
+			outp += sprintf(outp, "  Pkg_J ");
 		if (do_rapl & RAPL_CORES)
-			outp += sprintf(outp, "  Cor_J");
+			outp += sprintf(outp, "  Cor_J ");
 		if (do_rapl & RAPL_GFX)
-			outp += sprintf(outp, " GFX_J");
+			outp += sprintf(outp, "  GFX_J ");
 		if (do_rapl & RAPL_DRAM)
-			outp += sprintf(outp, " RAM_W");
+			outp += sprintf(outp, "  RAM_W ");
 		if (do_rapl & RAPL_PKG_PERF_STATUS)
-			outp += sprintf(outp, " PKG_%%");
+			outp += sprintf(outp, "  PKG_%% ");
 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
-			outp += sprintf(outp, " RAM_%%");
-		outp += sprintf(outp, " time");
+			outp += sprintf(outp, "  RAM_%% ");
+		outp += sprintf(outp, "  time ");
 
 	}
 	outp += sprintf(outp, "\n");
@@ -410,25 +415,12 @@
 
 /*
  * column formatting convention & formats
- * package: "pk" 2 columns %2d
- * core: "cor" 3 columns %3d
- * CPU: "CPU" 3 columns %3d
- * Pkg_W: %6.2
- * Cor_W: %6.2
- * GFX_W: %5.2
- * RAM_W: %5.2
- * GHz: "GHz" 3 columns %3.2
- * TSC: "TSC" 3 columns %3.2
- * SMI: "SMI" 4 columns %4d
- * percentage " %pc3" %6.2
- * Perf Status percentage: %5.2
- * "CTMP" 4 columns %4d
  */
 int format_counters(struct thread_data *t, struct core_data *c,
 	struct pkg_data *p)
 {
 	double interval_float;
-	char *fmt5, *fmt6;
+	char *fmt8;
 
 	 /* if showing only 1st thread in core and this isn't one, bail out */
 	if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
@@ -443,65 +435,52 @@
 	/* topo columns, print blanks on 1st (average) line */
 	if (t == &average.threads) {
 		if (show_pkg)
-			outp += sprintf(outp, "  ");
-		if (show_pkg && show_core)
-			outp += sprintf(outp, " ");
+			outp += sprintf(outp, "       -");
 		if (show_core)
-			outp += sprintf(outp, "   ");
+			outp += sprintf(outp, "       -");
 		if (show_cpu)
-			outp += sprintf(outp, " " "   ");
+			outp += sprintf(outp, "       -");
 	} else {
 		if (show_pkg) {
 			if (p)
-				outp += sprintf(outp, "%2d", p->package_id);
+				outp += sprintf(outp, "%8d", p->package_id);
 			else
-				outp += sprintf(outp, "  ");
+				outp += sprintf(outp, "       -");
 		}
-		if (show_pkg && show_core)
-			outp += sprintf(outp, " ");
 		if (show_core) {
 			if (c)
-				outp += sprintf(outp, "%3d", c->core_id);
+				outp += sprintf(outp, "%8d", c->core_id);
 			else
-				outp += sprintf(outp, "   ");
+				outp += sprintf(outp, "       -");
 		}
 		if (show_cpu)
-			outp += sprintf(outp, " %3d", t->cpu_id);
+			outp += sprintf(outp, "%8d", t->cpu_id);
 	}
+
+	/* AvgMHz */
+	if (has_aperf)
+		outp += sprintf(outp, "%8.0f",
+			1.0 / units * t->aperf / interval_float);
+
 	/* %c0 */
 	if (do_nhm_cstates) {
-		if (show_pkg || show_core || show_cpu)
-			outp += sprintf(outp, " ");
 		if (!skip_c0)
-			outp += sprintf(outp, "%6.2f", 100.0 * t->mperf/t->tsc);
+			outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
 		else
-			outp += sprintf(outp, "  ****");
+			outp += sprintf(outp, "********");
 	}
 
-	/* GHz */
-	if (has_aperf) {
-		if (!aperf_mperf_unstable) {
-			outp += sprintf(outp, " %3.2f",
-				1.0 * t->tsc / units * t->aperf /
-				t->mperf / interval_float);
-		} else {
-			if (t->aperf > t->tsc || t->mperf > t->tsc) {
-				outp += sprintf(outp, " ***");
-			} else {
-				outp += sprintf(outp, "%3.1f*",
-					1.0 * t->tsc /
-					units * t->aperf /
-					t->mperf / interval_float);
-			}
-		}
-	}
+	/* BzyMHz */
+	if (has_aperf)
+		outp += sprintf(outp, "%8.0f",
+			1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
 
 	/* TSC */
-	outp += sprintf(outp, "%5.2f", 1.0 * t->tsc/units/interval_float);
+	outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
 
 	/* SMI */
 	if (do_smi)
-		outp += sprintf(outp, "%4d", t->smi_count);
+		outp += sprintf(outp, "%8d", t->smi_count);
 
 	/* delta */
 	if (extra_delta_offset32)
@@ -520,9 +499,9 @@
 
 	if (do_nhm_cstates) {
 		if (!skip_c1)
-			outp += sprintf(outp, " %6.2f", 100.0 * t->c1/t->tsc);
+			outp += sprintf(outp, "%8.2f", 100.0 * t->c1/t->tsc);
 		else
-			outp += sprintf(outp, "  ****");
+			outp += sprintf(outp, "********");
 	}
 
 	/* print per-core data only for 1st thread in core */
@@ -530,79 +509,76 @@
 		goto done;
 
 	if (do_nhm_cstates && !do_slm_cstates)
-		outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc);
+		outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc);
 	if (do_nhm_cstates)
-		outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc);
+		outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc);
 	if (do_snb_cstates)
-		outp += sprintf(outp, " %6.2f", 100.0 * c->c7/t->tsc);
+		outp += sprintf(outp, "%8.2f", 100.0 * c->c7/t->tsc);
 
 	if (do_dts)
-		outp += sprintf(outp, " %4d", c->core_temp_c);
+		outp += sprintf(outp, "%8d", c->core_temp_c);
 
 	/* print per-package data only for 1st core in package */
 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
 		goto done;
 
 	if (do_ptm)
-		outp += sprintf(outp, " %4d", p->pkg_temp_c);
+		outp += sprintf(outp, "%8d", p->pkg_temp_c);
 
 	if (do_snb_cstates)
-		outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc);
+		outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc);
 	if (do_nhm_cstates && !do_slm_cstates)
-		outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc);
+		outp += sprintf(outp, "%8.2f", 100.0 * p->pc3/t->tsc);
 	if (do_nhm_cstates && !do_slm_cstates)
-		outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
+		outp += sprintf(outp, "%8.2f", 100.0 * p->pc6/t->tsc);
 	if (do_snb_cstates)
-		outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
+		outp += sprintf(outp, "%8.2f", 100.0 * p->pc7/t->tsc);
 	if (do_c8_c9_c10) {
-		outp += sprintf(outp, " %6.2f", 100.0 * p->pc8/t->tsc);
-		outp += sprintf(outp, " %6.2f", 100.0 * p->pc9/t->tsc);
-		outp += sprintf(outp, " %6.2f", 100.0 * p->pc10/t->tsc);
+		outp += sprintf(outp, "%8.2f", 100.0 * p->pc8/t->tsc);
+		outp += sprintf(outp, "%8.2f", 100.0 * p->pc9/t->tsc);
+		outp += sprintf(outp, "%8.2f", 100.0 * p->pc10/t->tsc);
 	}
 
 	/*
  	 * If measurement interval exceeds minimum RAPL Joule Counter range,
  	 * indicate that results are suspect by printing "**" in fraction place.
  	 */
-	if (interval_float < rapl_joule_counter_range) {
-		fmt5 = " %5.2f";
-		fmt6 = " %6.2f";
-	} else {
-		fmt5 = " %3.0f**";
-		fmt6 = " %4.0f**";
-	}
+	if (interval_float < rapl_joule_counter_range)
+		fmt8 = "%8.2f";
+	else
+		fmt8 = " %6.0f**";
 
 	if (do_rapl && !rapl_joules) {
 		if (do_rapl & RAPL_PKG)
-			outp += sprintf(outp, fmt6, p->energy_pkg * rapl_energy_units / interval_float);
+			outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float);
 		if (do_rapl & RAPL_CORES)
-			outp += sprintf(outp, fmt6, p->energy_cores * rapl_energy_units / interval_float);
+			outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float);
 		if (do_rapl & RAPL_GFX)
-			outp += sprintf(outp, fmt5, p->energy_gfx * rapl_energy_units / interval_float);
+			outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float);
 		if (do_rapl & RAPL_DRAM)
-			outp += sprintf(outp, fmt5, p->energy_dram * rapl_energy_units / interval_float);
+			outp += sprintf(outp, fmt8, p->energy_dram * rapl_energy_units / interval_float);
 		if (do_rapl & RAPL_PKG_PERF_STATUS)
-			outp += sprintf(outp, fmt5, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
+			outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
-			outp += sprintf(outp, fmt5, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
+			outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
 	} else {
 		if (do_rapl & RAPL_PKG)
-			outp += sprintf(outp, fmt6,
+			outp += sprintf(outp, fmt8,
 					p->energy_pkg * rapl_energy_units);
 		if (do_rapl & RAPL_CORES)
-			outp += sprintf(outp, fmt6,
+			outp += sprintf(outp, fmt8,
 					p->energy_cores * rapl_energy_units);
 		if (do_rapl & RAPL_GFX)
-			outp += sprintf(outp, fmt5,
+			outp += sprintf(outp, fmt8,
 					p->energy_gfx * rapl_energy_units);
 		if (do_rapl & RAPL_DRAM)
-			outp += sprintf(outp, fmt5,
+			outp += sprintf(outp, fmt8,
 					p->energy_dram * rapl_energy_units);
 		if (do_rapl & RAPL_PKG_PERF_STATUS)
-			outp += sprintf(outp, fmt5, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
+			outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
-			outp += sprintf(outp, fmt5, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
-	outp += sprintf(outp, fmt5, interval_float);
+			outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
+	outp += sprintf(outp, fmt8, interval_float);
 
 	}
 done:
@@ -1516,6 +1492,9 @@
 	case 0x46:	/* HSW */
 	case 0x37:	/* BYT */
 	case 0x4D:	/* AVN */
+	case 0x3D:	/* BDW */
+	case 0x4F:	/* BDX */
+	case 0x56:	/* BDX-DE */
 		return 1;
 	case 0x2E:	/* Nehalem-EX Xeon - Beckton */
 	case 0x2F:	/* Westmere-EX Xeon - Eagleton */
@@ -1629,9 +1608,12 @@
 	case 0x3C:	/* HSW */
 	case 0x45:	/* HSW */
 	case 0x46:	/* HSW */
+	case 0x3D:	/* BDW */
 		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
 		break;
 	case 0x3F:	/* HSX */
+	case 0x4F:	/* BDX */
+	case 0x56:	/* BDX-DE */
 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
 		break;
 	case 0x2D:
@@ -1875,6 +1857,9 @@
 	case 0x3F:	/* HSW */
 	case 0x45:	/* HSW */
 	case 0x46:	/* HSW */
+	case 0x3D:	/* BDW */
+	case 0x4F:	/* BDX */
+	case 0x56:	/* BDX-DE */
 		return 1;
 	}
 	return 0;
@@ -1886,7 +1871,8 @@
 		return 0;
 
 	switch (model) {
-	case 0x45:
+	case 0x45:	/* HSW */
+	case 0x3D:	/* BDW */
 		return 1;
 	}
 	return 0;
@@ -2455,7 +2441,7 @@
 	cmdline(argc, argv);
 
 	if (verbose)
-		fprintf(stderr, "turbostat v3.6 Dec 2, 2013"
+		fprintf(stderr, "turbostat v3.7 Feb 6, 2014"
 			" - Len Brown <lenb@kernel.org>\n");
 
 	turbostat_init();
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 5081e80..22fa819 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -277,7 +277,7 @@
 
 	host_vtimer_irq = ppi;
 
-	err = register_cpu_notifier(&kvm_timer_cpu_nb);
+	err = __register_cpu_notifier(&kvm_timer_cpu_nb);
 	if (err) {
 		kvm_err("Cannot register timer CPU notifier\n");
 		goto out_free;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 8ca405c..47b2983 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1496,7 +1496,7 @@
 		goto out;
 	}
 
-	ret = register_cpu_notifier(&vgic_cpu_nb);
+	ret = __register_cpu_notifier(&vgic_cpu_nb);
 	if (ret) {
 		kvm_err("Cannot register vgic CPU notifier\n");
 		goto out_free_irq;