METALinux 3.2 release

METALinux 3.2 was the final major release of METALinux, based on
mainline Linux v3.10.

Features include:
- Full SoC support for TZ1090.
- Full board support for the TZ1090 based Minimoprh & MetaMorph boards.
- Partial board support for Polaris (Pure Sensia 200D Connect).
- Various smaller arch features such as KGDB.

Particular thanks go to Will Newton, Graham Whaley, Neil Jones, George
Shore, Matt Fleming, Markos Chandras, Paul Burton, and Qais Yousef who
have all made significant contributions to the Meta port over the years.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 45b3df9..92211a6 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -401,6 +401,8 @@
 	- description of the SGI IOC4 PCI (multi function) device.
 sgi-visws.txt
 	- short blurb on the SGI Visual Workstations.
+sgx2d.txt
+	- description of the SGX 2D char device ioctl interface.
 sh/
 	- directory with info on porting Linux to a new architecture.
 smsc_ece1099.txt
@@ -433,6 +435,8 @@
 	- directory with information on managing thermal issues (CPU/temp)
 trace/
 	- directory with info on tracing technologies within linux
+uccp.txt
+	- description of the UCCP char device ioctl interface.
 unaligned-memory-access.txt
 	- info on how to avoid arch breaking unaligned memory access in code.
 unicode.txt
diff --git a/Documentation/clk.txt b/Documentation/clk.txt
index b9911c2..3110ba4 100644
--- a/Documentation/clk.txt
+++ b/Documentation/clk.txt
@@ -70,6 +70,10 @@
 						unsigned long parent_rate);
 		long		(*round_rate)(struct clk_hw *hw, unsigned long,
 						unsigned long *);
+		long		(*determine_rate)(struct clk_hw *hw,
+						unsigned long rate,
+						unsigned long *best_parent_rate,
+						struct clk **best_parent_clk);
 		int		(*set_parent)(struct clk_hw *hw, u8 index);
 		u8		(*get_parent)(struct clk_hw *hw);
 		int		(*set_rate)(struct clk_hw *hw, unsigned long);
diff --git a/Documentation/devicetree/bindings/clock/divider-clock.txt b/Documentation/devicetree/bindings/clock/divider-clock.txt
new file mode 100644
index 0000000..c9243aa
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/divider-clock.txt
@@ -0,0 +1,37 @@
+Binding for generic integer clock divider.
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible         : Shall be "divider-clock".
+- #clock-cells       : From common clock binding; shall be set to 0.
+- reg                : Address of divider register.
+- shift              : Shift of field in divider register.
+- width              : Width of field in divider register.
+- clocks             : From common clock binding.
+
+Required source clocks:
+- 0                  : Reference clock which is divided to produce the output
+                       clock (doesn't have to be named).
+
+Optional properties:
+- one-based          : Indicates that register field stores the divisor directly
+                       rather than the default of divisor-1.
+- power-of-two       : Indicates that register field stores log2(divisor) rather
+                       that the default of divisor-1.
+- clock-output-names : From common clock binding.
+- linux,clk-read-only: Flag to indicate that the divide shouldn't be altered.
+- default-divide     : Default divider value.
+
+Example:
+	clock {
+		compatible = "divider-clock";
+		#clock-cells = <0>;
+		clocks = <&sys_pll>;
+		reg = <0x02005914 0x4>;	/* CR_TOP_SYSCLK_DIV */
+		shift = <0>;		/* CR_TOP_SYSDIV */
+		width = <8>;
+		clock-output-names = "sysclk_div";
+	};
diff --git a/Documentation/devicetree/bindings/clock/img,meta-gate-clock.txt b/Documentation/devicetree/bindings/clock/img,meta-gate-clock.txt
new file mode 100644
index 0000000..483097c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/img,meta-gate-clock.txt
@@ -0,0 +1,28 @@
+Binding for clock gate requiring global Meta locking.
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible         : Shall be "img,meta-mux-clock".
+- #clock-cells       : From common clock binding; shall be set to 0.
+- reg                : Address of configuration register.
+- bit                : Bit number of gate switch in configuration register.
+- clocks             : From common clock binding.
+
+Required source clocks:
+- 0                  : Input clock that can be gated (doesn't have to be named).
+
+Optional properties:
+- clock-output-names : From common clock binding.
+
+Example:
+	clock {
+		compatible = "img,meta-gate-clock";
+		#clock-cells = <0>;
+		clocks = <&sys_clk>;
+		reg = <0x02004010 0x4>;
+		bit = <0>;
+		clock-output-names = "scb0";
+	};
diff --git a/Documentation/devicetree/bindings/clock/img,meta-mux-clock.txt b/Documentation/devicetree/bindings/clock/img,meta-mux-clock.txt
new file mode 100644
index 0000000..550876a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/img,meta-mux-clock.txt
@@ -0,0 +1,35 @@
+Binding for clock multiplexer requiring global Meta locking.
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible         : Shall be "img,meta-mux-clock".
+- #clock-cells       : From common clock binding; shall be set to 0.
+- reg                : Address of configuration register.
+- shift              : Shift of mux value field in configuration register.
+- width              : Width of mux value field in configuration register.
+- clocks             : From common clock binding.
+
+Required source clocks:
+- 0..(1<<width)-1    : Input clocks to multiplex (don't have to be named).
+
+Optional properties:
+- clock-output-names : From common clock binding.
+- default-clock      : Mux value to set initially.
+- linux,clk-set-rate-parent : Allow modification of parent clock.
+- linux,clk-set-rate-remux : Allow remux in response to rate change.
+
+Example:
+	clock {
+		compatible = "img,meta-mux-clock";
+		#clock-cells = <0>;
+		clocks = <&xtal1>,
+			 <&xtal2>;
+		reg = <0x02005908 0x4>;
+		shift = <0>;
+		width = <1>;
+		clock-output-names = "sysclk0_sw";
+		default-clock = <1>; /* default to xtal2 */
+	};
diff --git a/Documentation/devicetree/bindings/clock/img,tz1090-deleter.txt b/Documentation/devicetree/bindings/clock/img,tz1090-deleter.txt
new file mode 100644
index 0000000..935c38c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/img,tz1090-deleter.txt
@@ -0,0 +1,37 @@
+Binding for TZ1090 clock deleter.
+
+This binding uses the common clock binding[1]. The TZ1090 clock deleters can be
+configured to delete a certain fraction of the input clock pulses. For a
+configuration field of "width" bits with the value "delete_cycles",
+delete_cycles cycles are deleted out of every 1 << width cycles, so the output
+frequency is:
+
+    f_out = f_in * ((1 << width) - config) / (1 << width)
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible         : Shall be "img,tz1090-deleter".
+- #clock-cells       : From common clock binding; shall be set to 0.
+- reg                : Address of configuration register.
+- shift              : Shift of config value field in configuration register.
+- width              : Width of config value field in configuration register.
+- clocks             : From common clock binding.
+
+Required source clocks:
+- 0                  : Reference clock which is deleted to produce the output
+                       clock (doesn't have to be named).
+
+Optional properties:
+- clock-output-names : From common clock binding.
+
+Example:
+	meta_clkdelete {
+		compatible = "img,tz1090-deleter";
+		#clock-cells = <0>;
+		clocks = <&sys_clk_x2_undeleted>;
+		reg = <0x0200591c 0x4>;	/* CR_TOP_META_CLKDELETE */
+		shift = <0>;		/* CR_TOP_META_CLKDELETE */
+		width = <12>;
+		clock-output-names = "meta";
+	};
diff --git a/Documentation/devicetree/bindings/clock/img,tz1090-pll.txt b/Documentation/devicetree/bindings/clock/img,tz1090-pll.txt
new file mode 100644
index 0000000..20aa622
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/img,tz1090-pll.txt
@@ -0,0 +1,33 @@
+Binding for TZ1090 Phase-Lock Loop (PLL) clocks.
+
+This binding uses the common clock binding[1]. These PLLs are configured with 2
+registers specified with the reg property. These contain various fields which
+among other things specify the reference divider value (r), the frequency
+divider value (f), and the output divider value (od). When enabled, the output
+clock rate is:
+
+    f_out = f_ref / r * (f / 2) / od
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible         : Shall be "img,tz1090-pll".
+- #clock-cells       : From common clock binding; shall be set to 0.
+- reg                : Address of configuration register pair.
+- clocks             : From common clock binding.
+
+Required source clocks:
+- 0                  : Reference clock used to generate the output clock
+                       (doesn't have to be named).
+
+Optional properties:
+- clock-output-names : From common clock binding.
+
+Example:
+	sys_pll {
+		compatible = "img,tz1090-pll";
+		#clock-cells = <0>;
+		clocks = <&sysclk0_sw>;
+		reg = <0x02005950 0x8>;	/* CR_TOP_SYSPLL_CTL{0,1} */
+		clock-output-names = "sys_pll";
+	};
diff --git a/Documentation/devicetree/bindings/clock/specified-clock.txt b/Documentation/devicetree/bindings/clock/specified-clock.txt
new file mode 100644
index 0000000..b36ccf9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/specified-clock.txt
@@ -0,0 +1,39 @@
+Binding for fixed-rate clock sources with readable configuration.
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible         : Shall be "specified-clock".
+- #clock-cells       : From common clock binding; shall be set to 0.
+- reg                : Address of configuration register.
+- shift              : Shift of config value field in configuration register.
+- width              : Width of config value field in configuration register.
+- clock-frequency    : Frequency mapping of clock. Consecutive pairs of cells
+                       represent the config value to match and the clock
+                       frequency in Hz for that config value.
+
+Optional properties:
+- clock-output-names : From common clock binding.
+
+Example:
+	clock {
+		compatible = "specified-clock";
+		#clock-cells = <0>;
+		reg = <0x02004004 0x4>;	/* CR_PERIP_RESET_CFG */
+		shift = <8>;		/* FXTAL */
+		width = <4>;
+		clock-frequency =
+		/*	 FXTAL	Frequency */
+			<0	16384000>,
+			<1	19200000>,
+			<2	24000000>,
+			<3	24576000>,
+			<4	26000000>,
+			<5	36000000>,
+			<6	36864000>,
+			<7	38400000>,
+			<8	40000000>;
+		clock-output-names = "xtal1";
+	};
diff --git a/Documentation/devicetree/bindings/dma/img-mdc-dma.txt b/Documentation/devicetree/bindings/dma/img-mdc-dma.txt
new file mode 100644
index 0000000..214f267
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/img-mdc-dma.txt
@@ -0,0 +1,40 @@
+* Imagination Technologies Meta DMA Controller (MDC)
+
+Required properties:
+- #dma-cells : Must be <2>. Used by slave devices to request a DMA channel.
+- compatible : Should be "img,mdc-dma".
+- reg        : Should contain DMA registers location and length. This should include
+  all of the per-channel registers.
+- interrupts : Should contain all of the per-channel DMA interrupts.
+
+Examples:
+
+dma: dma-controller@0200c000 {
+	#dma-cells = <2>;
+	compatible = "img,mdc-dma";
+	reg = <0x0200c000 0x1000>;
+	interrupts = < 21 4
+		       22 4
+		       23 4
+		       24 4
+		       25 4
+		       26 4
+		       27 4
+		       28 4 >;
+};
+
+DMA clients connected to the Meta DMA Controller (MDC) must comply to the format
+described in the dma.txt file, using a two-cell specifier for each channel. The
+cells are:
+
+1. The number of the DMA peripheral.
+2. The DMA request line number.
+
+Example:
+
+spi1: spi@02004e00 {
+	...
+	dmas = <&dma 7 0xffffffff
+		&dma 8 0xffffffff>; /* -1 for any channel */
+	dma-names = "rx", "tx";
+};
diff --git a/Documentation/devicetree/bindings/dma/tz1090-mdc-dma.txt b/Documentation/devicetree/bindings/dma/tz1090-mdc-dma.txt
new file mode 100644
index 0000000..d491824
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/tz1090-mdc-dma.txt
@@ -0,0 +1,23 @@
+* Toumaz Xenif TZ1090 (Comet) specific callbacks for the Imagination Technologies Meta DMA Controller (MDC)
+
+Required properties:
+- compatible   : Should be "img,tz1090-mdc-dma".
+- The remaining properties are described in the img-mdc-dma.txt file.
+
+Examples
+
+dma: dma-controller@0200c000 {
+	#dma-cells = <2>;
+	compatible = "img,tz1090-mdc-dma";
+	reg = <0x0200c000 0x1000>;
+	interrupts = < 21 4
+		       22 4
+		       23 4
+		       24 4
+		       25 4
+		       26 4
+		       27 4
+		       28 4 >;
+};
+
+DMA clients should use the properties described in the img-mdc-dma.txt file.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-tz1090-pdc.txt b/Documentation/devicetree/bindings/gpio/gpio-tz1090-pdc.txt
new file mode 100644
index 0000000..66a4440
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-tz1090-pdc.txt
@@ -0,0 +1,41 @@
+ImgTec TZ1090 PDC GPIO Controller
+
+Required properties:
+- compatible: Compatible property value should be "img,tz1090-pdc-gpio".
+
+- reg: Physical base address of the controller and length of memory mapped
+  region. This starts at and cover the SOC_GPIO_CONTROL registers.
+
+- gpio-controller: Specifies that the node is a gpio controller.
+
+- #gpio-cells: Should be 2. The syntax of the gpio specifier used by client
+  nodes should have the following values.
+     <[phandle of the gpio controller node]
+      [PDC gpio number]
+      [gpio flags]>
+
+  Values for gpio specifier:
+  - GPIO number: a value in the range 0 to 6.
+  - GPIO flags: bit field of flags:
+      1: active low
+
+Optional properties:
+- gpio-ranges: Mapping to pin controller pins
+
+- interrupts: Individual syswake interrupts (other GPIOs cannot interrupt)
+
+
+Example:
+
+	pdc_gpios: gpio-controller@02006500 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		compatible = "img,tz1090-pdc-gpio";
+		reg = <0x02006500 0x100>;
+
+		interrupt-parent = <&pdc>;
+		interrupts =	<1 0 0>,	/* Syswake 0 */
+				<1 1 0>,	/* Syswake 1 */
+				<1 2 0>;	/* Syswake 2 */
+	};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-tz1090.txt b/Documentation/devicetree/bindings/gpio/gpio-tz1090.txt
new file mode 100644
index 0000000..fc16da1
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-tz1090.txt
@@ -0,0 +1,83 @@
+ImgTec TZ1090 GPIO Controller
+
+Required properties:
+- compatible: Compatible property value should be "img,tz1090-gpio".
+
+- reg: Physical base address of the controller and length of memory mapped
+  region.
+
+- #address-cells: Should be 1 (for bank subnodes)
+
+- #size-cells: Should be 0 (for bank subnodes)
+
+- Each bank of GPIOs should have a subnode to represent it.
+
+  Bank subnode required properties:
+  - reg: Index of bank in the range 0 to 2.
+
+  - gpio-controller: Specifies that the node is a gpio controller.
+
+  - #gpio-cells: Should be 2. The syntax of the gpio specifier used by client
+    nodes should have the following values.
+       <[phandle of the gpio controller node]
+        [gpio number within the gpio bank]
+        [gpio flags]>
+
+    Values for gpio specifier:
+    - GPIO number: a value in the range 0 to 29.
+    - GPIO flags: bit field of flags:
+        1: active low
+
+  Bank subnode optional properties:
+  - gpio-ranges: Mapping to pin controller pins
+
+  - interrupts: Interrupt for the entire bank
+
+  - interrupt-controller: Specifies that the node is an interrupt controller
+
+  - #interrupt-cells: Should be 2. The syntax of the interrupt specifier used by
+    client nodes should have the following values.
+       <[phandle of the interurupt controller]
+        [gpio number within the gpio bank]
+        [irq flags]>
+
+    Values for irq specifier:
+    - GPIO number: a value in the range 0 to 29
+    - IRQ flags: value to describe edge and level triggering
+        1: trigger on rising edge
+        2: trigger on falling edge
+        3: trigger on both rising and falling edges
+        4: trigger when high
+        8: trigger when low
+
+
+
+Example:
+
+	gpios: gpio-controller@02005800 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "img,tz1090-gpio";
+		reg = <0x02005800 0x90>;
+
+		/* bank 0 with an interrupt */
+		gpios0: bank@0 {
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			reg = <0>;
+			interrupts = <13 4 /* level */>;
+			gpio-controller;
+			gpio-ranges = <&pinctrl 0 30>;
+			interrupt-controller;
+		};
+
+		/* bank 2 without interrupt */
+		gpios2: bank@2 {
+			#gpio-cells = <2>;
+			reg = <2>;
+			gpio-controller;
+			gpio-ranges = <&pinctrl 60 30>;
+		};
+	};
+
+
diff --git a/Documentation/devicetree/bindings/i2c/i2c-chorus2.txt b/Documentation/devicetree/bindings/i2c/i2c-chorus2.txt
new file mode 100644
index 0000000..54911b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-chorus2.txt
@@ -0,0 +1,13 @@
+ImgTec Serial Control Protocol (SCB) I2C Controller for Chorus2 compatible
+boards
+
+Required properties:
+- compatible: Must be "img,chorus2-i2c"
+- reg
+- interrupts
+- id: The bus id number
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties:
+- Child nodes conforming to i2s bus binding
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imgscb.txt b/Documentation/devicetree/bindings/i2c/i2c-imgscb.txt
new file mode 100644
index 0000000..00f143b
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-imgscb.txt
@@ -0,0 +1,27 @@
+ImgTec Serial Control Protocol (SCB) I2C Controller:
+
+Required properties:
+- compatible : Must be "img,scb";
+- reg
+- interrupts
+- clocks : A reference to the SCB clock node;
+- bit-rate : Speed of bus in Hz;
+- bus-delay : Bus delay in ms;
+- quirks : Quirks present on the hardware:
+  - 0x00000001 : write read fence.
+                 2 writes required to ensure subsequent read reflects the
+                 effects of a prior write, due to clock domain crossing problem.
+  - 0x00000002 : atomic mode only.
+                 Automatic mode unreliable when using different clock domain for
+                 data clock. This can be worked around by forcing atomic mode
+                 only with this quirk (slower), or using the same clock domain.
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties:
+- pinctrl-names : Can contain "default"
+- pinctrl-0 : Pin control maps for when enabled
+- linux,i2c-index: Can be used to override the I2C bus number. This is useful
+  when some of the I2C adapters are used by other non-Linux cores so would get
+  numbered incorrectly by default.
+- Child nodes conforming to i2c bus binding
diff --git a/Documentation/devicetree/bindings/input/img-ir.txt b/Documentation/devicetree/bindings/input/img-ir.txt
new file mode 100644
index 0000000..1f783c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/img-ir.txt
@@ -0,0 +1,17 @@
+* ImgTec Infrared (IR) decoder
+
+Required properties:
+- compatible:		Should be "img,ir"
+- reg:			physical base address of the controller and length of
+			memory mapped region.
+- interrupts:		The interrupt number to the cpu should be specified. The
+			number of cells representing a interrupt depends on the
+			parent interrupt controller.
+
+Example:
+
+	ir@02006200 {
+		compatible = "img,ir";
+		reg = <0x02006200 0x100>;
+		interrupts = <29 4>;
+	};
diff --git a/Documentation/devicetree/bindings/metag/meta-intc.txt b/Documentation/devicetree/bindings/metag/meta-intc.txt
index 80994ad..16e6ae8 100644
--- a/Documentation/devicetree/bindings/metag/meta-intc.txt
+++ b/Documentation/devicetree/bindings/metag/meta-intc.txt
@@ -25,6 +25,13 @@
 
     - no-mask: The controller doesn't have any mask registers.
 
+    - default-level: List of default level bitfields, one for each bank, where
+      each set bit indicates that the interrupt should be level sensitive by
+      default rather than edge. The purpose of this is mainly to ease the
+      transition to device tree from platform data. It should not be necessary
+      if all IRQ users are configured with devicetree as each one should set the
+      flow type in the interrupt specifier.
+
 * Interrupt Specifier Definition
 
   Interrupt specifiers consists of 2 cells encoded as follows:
@@ -58,6 +65,9 @@
 		// Number of interrupt banks
 		num-banks = <2>;
 
+		// IRQs 0-35 are level sensitive, except 16 and 34
+		default-level = <0xfffeffff 0xb>;
+
 		// No HWMASKEXT is available (specify on Chorus2 and Comet ES1)
 		no-mask;
 
diff --git a/Documentation/devicetree/bindings/metag/pdc-intc.txt b/Documentation/devicetree/bindings/metag/pdc-intc.txt
new file mode 100644
index 0000000..9e3f968
--- /dev/null
+++ b/Documentation/devicetree/bindings/metag/pdc-intc.txt
@@ -0,0 +1,112 @@
+* ImgTec Powerdown Controller (PDC) Interrupt Controller Binding
+
+This binding specifies what properties must be available in the device tree
+representation of a PDC IRQ controller. This has a number of input interrupt
+lines which can wake the system, and are passed on through output interrupt
+lines.
+
+Required properties:
+
+    - compatible: Specifies the compatibility list for the interrupt controller.
+      The type shall be <string> and the value shall include "img,pdc-intc".
+
+    - reg: Specifies the base PDC physical address(s) and size(s) of the
+      addressable register space. The type shall be <prop-encoded-array>.
+
+    - interrupt-controller: The presence of this property identifies the node
+      as an interrupt controller. No property value shall be defined.
+
+    - #interrupt-cells: Specifies the number of cells needed to encode an
+      interrupt source. The type shall be a <u32> and the value shall be 3.
+
+    - num-perips: Number of waking peripherals.
+
+    - num-syswakes: Number of SysWake inputs.
+
+    - interrupts: List of interrupt specifiers. The first specifier shall be the
+      shared SysWake interrupt, and remaining specifies shall be PDC peripheral
+      interrupts in order.
+
+* Interrupt Specifier Definition
+
+  Interrupt specifiers consists of 3 cells encoded as follows:
+
+    - <1st-cell>: The type of interrupt:
+                    0 = peripheral interrupt
+                    1 = SysWake interrupt
+
+    - <2nd-cell>: The interrupt-number that identifies the interrupt source.
+
+    - <3rd-cell>: The level-sense information, encoded using the Linux interrupt
+                  flags as follows (only 4 valid for peripheral interrupts):
+                    1 = low-to-high edge triggered
+                    2 = high-to-low edge triggered
+                    3 = both edge triggered
+                    4 = active-high level-sensitive (required for perip irqs)
+                    8 = active-low level-sensitive
+
+* Examples
+
+Example 1:
+
+	/*
+	 * TZ1090 PDC block
+	 */
+	pdc: pdc@0x02006000 {
+		// This is an interrupt controller node.
+		interrupt-controller;
+
+		// Three cells to encode interrupt sources.
+		#interrupt-cells = <3>;
+
+		// Offset address of 0x02006000 and size of 0x1000.
+		reg = <0x02006000 0x1000>;
+
+		// Compatible with Meta hardware trigger block.
+		compatible = "img,pdc-intc";
+
+		// Three peripherals are connected.
+		num-perips = <3>;
+
+		// Four SysWakes are connected.
+		num-syswakes = <4>;
+
+		interrupts = <18 4 /* level */>, /* Syswakes */
+			     <30 4 /* level */>, /* Peripheral 0 (RTC) */
+			     <29 4 /* level */>, /* Peripheral 1 (IR) */
+			     <31 4 /* level */>; /* Peripheral 2 (WDT) */
+	};
+
+Example 2:
+
+	/*
+	 * An SoC peripheral that is wired through the PDC.
+	 */
+	rtc0 {
+		// The interrupt controller that this device is wired to.
+		interrupt-parent = <&pdc>;
+
+		// Interrupt source Peripheral 0
+		// Note that there are three cells as specified in the interrupt
+		// parent's '#interrupt-cells' property.
+		interrupts = <1   /* Peripheral */
+		              0   /* Peripheral 0 (RTC) */
+		              4>  /* IRQ_TYPE_LEVEL_HIGH */
+	};
+
+Example 3:
+
+	/*
+	 * An interrupt generating device that is wired to a SysWake pin.
+	 */
+	touchscreen0 {
+		// The interrupt controller that this device is wired to.
+		interrupt-parent = <&pdc>;
+
+		// Interrupt source SysWake 0 that is active-low level-sensitive
+		// Note that there are three cells as specified in the interrupt
+		// parent's '#interrupt-cells' property.
+		interrupts = <1 /* Syswake */
+			      0 /* SysWake0 */
+			      8 /* IRQ_TYPE_LEVEL_LOW */>;
+	};
diff --git a/Documentation/devicetree/bindings/misc/img,tz1090-2d.txt b/Documentation/devicetree/bindings/misc/img,tz1090-2d.txt
new file mode 100644
index 0000000..9667cd2
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/img,tz1090-2d.txt
@@ -0,0 +1,18 @@
+Binding for 2D block in TZ1090.
+
+Required properties:
+- compatible         : Shall be "img,tz1090-2d".
+- reg                : Register address ranges.
+                       0: The 2D block slave port.
+                       1: The HEP register region.
+- interrupts         : Interrupt specifier for 2D block interrupt.
+- clocks             : 2D block clock.
+
+Example:
+	twod {
+		compatible = "img,tz1090-2d";
+		reg =	<0x02008900 0x100>,	/* slave port */
+			<0x02008c00 0x400>;	/* HEP registers */
+		interrupts = <34 0>;
+		clocks = <&twod_sysclk>;
+	};
diff --git a/Documentation/devicetree/bindings/misc/img,tz1090-socif.txt b/Documentation/devicetree/bindings/misc/img,tz1090-socif.txt
new file mode 100644
index 0000000..7257a0f
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/img,tz1090-socif.txt
@@ -0,0 +1,14 @@
+Binding for TZ1090 SOCIF error controller.
+
+Required properties:
+- compatible         : Shall be "img,tz1090-socif".
+- reg                : Address of CR_SOCIF_TIMEOUT with size 0x8 to include
+                       CR_SOCIF_STATUS too.
+- interrupts         : Interrupt specifier for SOCIF error interrupt.
+
+Example:
+	socif {
+		compatible = "img,tz1090-socif";
+		reg = <0x02008c2c 0x8>;
+		interrupts = <16 0>;
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt
new file mode 100644
index 0000000..0cfc339
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt
@@ -0,0 +1,128 @@
+ImgTec TZ1090 PDC pin controller
+
+Required properties:
+- compatible: "img,tz1090-pdc-pinctrl"
+- reg: Should contain the register physical address and length of the
+  SOC_GPIO_CONTROL registers in the PDC register region.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+TZ1090-PDC's pin configuration nodes act as a container for an abitrary number
+of subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function. For this reason, even seemingly boolean
+values are actually tristates in this binding: unspecified, off, or on.
+Unspecified is represented as an absent property, and off/on are represented as
+integer values 0 and 1.
+
+Required subnode-properties:
+- pins : An array of strings. Each string contains the name of a pin or group.
+    Valid values for these names are listed below.
+
+Optional subnode-properties:
+- function: A string containing the name of the function to mux to the pin or
+  group. Valid values for function names are listed below, including which
+  pingroups can be muxed to them.
+- tristate: Flag, put pin into high impedance state.
+- pull-up: Flag, pull pin high.
+- pull-down: Flag: pull pin low.
+- bus-hold: Flag, weak latch last value on tristate bus.
+- schmitt: Integer, enable or disable Schmitt trigger mode for the pins.
+    0: no hysteresis
+    1: schmitt trigger
+- slew-rate: Integer, control slew rate of pins.
+    0: slow (half frequency)
+    1: fast
+- drive-strength: Integer, control drive strength of pins in mA.
+    2:  2mA
+    4:  4mA
+    8:  8mA
+    12: 12mA
+- power-on-start: Integer, power on start value of pins
+    0: weak pull-down disabled
+    1: weak pull-down for invalid power
+
+Note that many of these properties are only valid for certain specific pins
+or groups. See the TZ1090 TRM for complete details regarding which groups
+support which functionality. The Linux pinctrl driver may also be a useful
+reference.
+
+Valid values for pin and group names are:
+
+  pins:
+
+    These all support tristate, pull-up, pull-down, and bus-hold (which can also
+    be provided to any of the groups below to set it for all gpio pins in that
+    group).
+
+    gpio0, gpio1, sys_wake0, sys_wake1, sys_wake2, ir_data, ext_power.
+
+  mux groups:
+
+    These all support function.
+
+    gpio0
+        pins:       gpio0.
+        function:   ir_mod_stable_out.
+    gpio1
+        pins:       gpio1.
+        function:   ir_mod_power_out.
+
+  drive groups:
+
+    These support schmitt, slew-rate, drive-strength, and power-on-start.
+
+    pdc
+        pins:   gpio0, gpio1, sys_wake0, sys_wake1, sys_wake2, ir_data,
+                ext_power.
+
+Example:
+
+	pinctrl_pdc: pinctrl@02006500 {
+		#gpio-range-cells = <2>;
+		compatible = "img,tz1090-pdc-pinctrl";
+		reg = <0x02006500 0x100>;
+	};
+
+Example board file extracts:
+
+	&pinctrl_pdc {
+		pinctrl-names = "default";
+		pinctrl-0 = <&syswake_default>;
+
+		syswake_default: syswakes {
+			syswake_cfg {
+				pins =	"sys_wake0",
+					"sys_wake1",
+					"sys_wake2";
+				pull-up;
+			};
+		};
+		irmod_default: irmod {
+			gpio0_cfg {
+				pins =	"gpio0";
+				function = "ir_mod_stable_out";
+			};
+			gpio1_cfg {
+				pins =	"gpio1";
+				function = "ir_mod_power_out";
+			};
+		};
+	};
+
+	ir: ir@02006200 {
+		pinctrl-names = "default";
+		pinctrl-0 = <&irmod_default>;
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt
new file mode 100644
index 0000000..6a6fc79
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt
@@ -0,0 +1,223 @@
+ImgTec TZ1090 pin controller
+
+Required properties:
+- compatible: "img,tz1090-pinctrl"
+- reg: Should contain the register physical address and length of the pad
+  configuration registers (CR_PADS_* and CR_IF_CTL0).
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+TZ1090's pin configuration nodes act as a container for an abitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function. For this reason, even seemingly boolean
+values are actually tristates in this binding: unspecified, off, or on.
+Unspecified is represented as an absent property, and off/on are represented as
+integer values 0 and 1.
+
+Required subnode-properties:
+- pins : An array of strings. Each string contains the name of a pin or group.
+    Valid values for these names are listed below.
+
+Optional subnode-properties:
+- function: A string containing the name of the function to mux to the pin or
+  group. Valid values for function names are listed below, including which
+  pingroups can be muxed to them.
+- tristate: Flag, put pin into high impedance state.
+- pull-up: Flag, pull pin high.
+- pull-down: Flag, pull pin low.
+- bus-hold: Flag, weak latch last value on tristate bus.
+- schmitt: Integer, enable or disable Schmitt trigger mode for the pins.
+    0: no hysteresis
+    1: schmitt trigger
+- slew-rate: Integer, control slew rate of pins.
+    0: slow (half frequency)
+    1: fast
+- drive-strength: Integer, control drive strength of pins in mA.
+    2: 2mA
+    4: 4mA
+    8: 8mA
+    12: 12mA
+
+Note that many of these properties are only valid for certain specific pins
+or groups. See the TZ1090 TRM for complete details regarding which groups
+support which functionality. The Linux pinctrl driver may also be a useful
+reference.
+
+Valid values for pin and group names are:
+
+  gpio pins:
+
+    These all support tristate, pull-up, pull-down, and bus-hold (which can also
+    be provided to any of the groups below to set it for all pins in that
+    group).
+
+    They also all support the some form of muxing. Any pins which are contained
+    in one of the mux groups (see below) can be muxed only to the functions
+    supported by the mux group. All other pins can be muxed to the "perip"
+    function which which enables them with their intended peripheral.
+
+    Different pins in the same mux group cannot be muxed to different functions,
+    however it is possible to mux only a subset of the pins in a mux group to a
+    particular function and leave the remaining pins unmuxed. This is useful if
+    the board connects certain pins in a group to other devices to be controlled
+    by GPIO, and you don't want the usual peripheral to have any control of the
+    pin.
+
+    ant_sel0, ant_sel1, gain0, gain1, gain2, gain3, gain4, gain5, gain6, gain7,
+    i2s_bclk_out, i2s_din, i2s_dout0, i2s_dout1, i2s_dout2, i2s_lrclk_out,
+    i2s_mclk, pa_on, pdm_a, pdm_b, pdm_c, pdm_d, pll_on, rx_hp, rx_on,
+    scb0_sclk, scb0_sdat, scb1_sclk, scb1_sdat, scb2_sclk, scb2_sdat, sdh_cd,
+    sdh_clk_in, sdh_wp, sdio_clk, sdio_cmd, sdio_d0, sdio_d1, sdio_d2, sdio_d3,
+    spi0_cs0, spi0_cs1, spi0_cs2, spi0_din, spi0_dout, spi0_mclk, spi1_cs0,
+    spi1_cs1, spi1_cs2, spi1_din, spi1_dout, spi1_mclk, tft_blank_ls, tft_blue0,
+    tft_blue1, tft_blue2, tft_blue3, tft_blue4, tft_blue5, tft_blue6, tft_blue7,
+    tft_green0, tft_green1, tft_green2, tft_green3, tft_green4, tft_green5,
+    tft_green6, tft_green7, tft_hsync_nr, tft_panelclk, tft_pwrsave, tft_red0,
+    tft_red1, tft_red2, tft_red3, tft_red4, tft_red5, tft_red6, tft_red7,
+    tft_vd12acb, tft_vdden_gd, tft_vsync_ns, tx_on, uart0_cts, uart0_rts,
+    uart0_rxd, uart0_txd, uart1_rxd, uart1_txd.
+
+        tristate:       supported.
+        pull-up:        supported.
+        pull-down:      supported.
+        bus-hold:       supported.
+        function:       perip or those supported by pin's mux group.
+
+  other pins:
+
+    These other pins are part of various pin groups below, but can't be
+    controlled as GPIOs. They do however support tristate, pull-up, pull-down,
+    and bus-hold (which can also be provided to any of the groups below to set
+    it for all pins in that group).
+
+    clk_out0, clk_out1, tck, tdi, tdo, tms, trst.
+
+        tristate:       supported.
+        pull-up:        supported.
+        pull-down:      supported.
+        bus-hold:       supported.
+
+  mux groups:
+
+    These all support function, and some support drive configs.
+
+    afe
+        pins:           tx_on, rx_on, pll_on, pa_on, rx_hp, ant_sel0, ant_sel1,
+                        gain0, gain1, gain2, gain3, gain4, gain5, gain6, gain7.
+        function:       afe, ts_out_0.
+        schmitt:        supported.
+        slew-rate:      supported.
+        drive-strength: supported.
+    pdm_d
+        pins:           pdm_d.
+        function:       pdm_dac, usb_vbus.
+    sdh
+        pins:           sdh_cd, sdh_wp, sdh_clk_in.
+        function:       sdh, sdio.
+    sdio
+        pins:           sdio_clk, sdio_cmd, sdio_d0, sdio_d1, sdio_d2, sdio_d3.
+        function:       sdio, sdh.
+    spi1_cs2
+        pins:           spi1_cs2.
+        function:       spi1_cs2, usb_vbus.
+    tft
+        pins:           tft_red0, tft_red1, tft_red2, tft_red3,
+                        tft_red4, tft_red5, tft_red6, tft_red7,
+                        tft_green0, tft_green1, tft_green2, tft_green3,
+                        tft_green4, tft_green5, tft_green6, tft_green7,
+                        tft_blue0, tft_blue1, tft_blue2, tft_blue3,
+                        tft_blue4, tft_blue5, tft_blue6, tft_blue7,
+                        tft_vdden_gd, tft_panelclk, tft_blank_ls, tft_vsync_ns,
+                        tft_hsync_nr, tft_vd12acb, tft_pwrsave.
+        function:       tft, ext_dac, not_iqadc_stb, iqdac_stb, ts_out_1,
+                        lcd_trace, phy_ringosc.
+        schmitt:        supported.
+        slew-rate:      supported.
+        drive-strength: supported.
+
+  drive groups:
+
+    These all support schmitt, slew-rate, and drive-strength.
+
+    jtag
+        pins:   tck, trst, tdi, tdo, tms.
+    scb1
+        pins:   scb1_sdat, scb1_sclk.
+    scb2
+        pins:   scb2_sdat, scb2_sclk.
+    spi0
+        pins:   spi0_mclk, spi0_cs0, spi0_cs1, spi0_cs2, spi0_dout, spi0_din.
+    spi1
+        pins:   spi1_mclk, spi1_cs0, spi1_cs1, spi1_cs2, spi1_dout, spi1_din.
+    uart
+        pins:   uart0_txd, uart0_rxd, uart0_rts, uart0_cts,
+                uart1_txd, uart1_rxd.
+    drive_i2s
+        pins:   clk_out1, i2s_din, i2s_dout0, i2s_dout1, i2s_dout2,
+                i2s_lrclk_out, i2s_bclk_out, i2s_mclk.
+    drive_pdm
+        pins:   clk_out0, pdm_b, pdm_a.
+    drive_scb0
+        pins:   scb0_sclk, scb0_sdat, pdm_d, pdm_c.
+    drive_sdio
+        pins:   sdio_clk, sdio_cmd, sdio_d0, sdio_d1, sdio_d2, sdio_d3,
+                sdh_wp, sdh_cd, sdh_clk_in.
+
+  convenience groups:
+
+    These are just convenient groupings of pins and don't support any drive
+    configs.
+
+    uart0
+        pins:   uart0_cts, uart0_rts, uart0_rxd, uart0_txd.
+    uart1
+        pins:   uart1_rxd, uart1_txd.
+    scb0
+        pins:   scb0_sclk, scb0_sdat.
+    i2s
+        pins:   i2s_bclk_out, i2s_din, i2s_dout0, i2s_dout1, i2s_dout2,
+                i2s_lrclk_out, i2s_mclk.
+
+Example:
+
+	pinctrl: pinctrl@02005800 {
+		#gpio-range-cells = <2>;
+		compatible = "img,tz1090-pinctrl";
+		reg = <0x02005800 0xe4>;
+	};
+
+Example board file extract:
+
+	&pinctrl {
+		uart0_default: uart0 {
+			uart0_cfg {
+				pins =	"uart0_rxd",
+					"uart0_txd";
+				function = "perip";
+			};
+		};
+		tft_default: tft {
+			tft_cfg {
+				pins =	"tft";
+				function = "tft";
+			};
+		};
+	};
+
+	uart@02004b00 {
+		pinctrl-names = "default";
+		pinctrl-0 = <&uart0_default>;
+	};
diff --git a/Documentation/devicetree/bindings/rtc/imgpdc-rtc.txt b/Documentation/devicetree/bindings/rtc/imgpdc-rtc.txt
new file mode 100644
index 0000000..be5f263
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/imgpdc-rtc.txt
@@ -0,0 +1,28 @@
+* ImgTec PowerDown Controller (PDC) Real Time Clock (RTC)
+
+Required properties:
+- compatible:		Should be "img,pdc-rtc"
+- reg:			physical base address of the controller and length of
+			memory mapped region. May also contain another region
+			with 1 or 2 non-volatile registers to aid Comet clock
+			rate during poweroff workarounds.
+- reg-names:		Should contain at least "regs".
+			May also contain "nonvolatile".
+- interrupts:		The interrupt number to the cpu should be specified. The
+			number of cells representing a interrupt depends on the
+			parent interrupt controller.
+
+Optional properties:
+- time-set-delay:	Number of seconds it takes to set the time.
+- alarm-irq-delay:	Number of seconds that alarm interrupts are delayed.
+			Must not exceed 59 seconds.
+
+Example:
+
+	rtc@02006100 {
+		compatible = "img,pdc-rtc";
+		reg = <0x02006100 0x100>,	/* regs */
+		      <0x02006038 0x8>;		/* nonvolatile */
+		reg-names = "regs", "nonvolatile";
+		interrupts = <30 4>;
+	};
diff --git a/Documentation/devicetree/bindings/sound/img,tz1090-audio.txt b/Documentation/devicetree/bindings/sound/img,tz1090-audio.txt
new file mode 100644
index 0000000..59e917a
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/img,tz1090-audio.txt
@@ -0,0 +1,19 @@
+* Imagination Technologies audio complex for the TZ1090 SoC.
+
+Required properties:
+- compatible         : Must be "img,tz1090-audio".
+- reg                : Register list containing the ADC control register,
+the HP control register and the GTI port for the tansen codec.
+- img,i2s-controller : phandle for the i2s controller.
+- img,audio-codec    : phandle for the tansen codec.
+
+Example:
+
+sound {
+	compatible = "img,tz1090-audio";
+	reg = < 0x020059E0 0x4
+		0x020059E4 0x4
+		0x020059B8 0x4>;
+	img,i2s-controller = <&i2s>;
+	img,audio-codec = <&tansen>;
+};
diff --git a/Documentation/devicetree/bindings/sound/img-i2s.txt b/Documentation/devicetree/bindings/sound/img-i2s.txt
new file mode 100644
index 0000000..b98eac2
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/img-i2s.txt
@@ -0,0 +1,29 @@
+* Imagination Technologies I2S controller
+
+Required properties:
+- compatible : Must be "img,i2s".
+- reg        : A list containing the I2S audio-out and audio-in registers
+and length respectively.
+- interrupts : Should contain all the interrupts for audio out/in lines. The
+interrupts for the output channels must be first followed by the input one.
+- interrupt-names: The interrupt names "output0", "output1", "output2", "input".
+- clocks     : A reference to the I2S clock node.
+- dmas       : DMA client properties as described in img-mdc-dma.txt. The dmas
+property should contain the DMA phandle along with the DMA peripheral and channel
+number for each channel starting with the output channels first.
+
+Example:
+
+i2s: tz1090-i2s@02004f00 {
+	compatible = "img,i2s";
+	reg = < 0x02004f00 0x100 /* out */
+		0x02005000 0x100 /* in */>;
+	interrupts = < 9 4 /* level */
+		       10 4 /* level */
+		       11 4 /* level */
+		       12 4 /* level */>;
+	interrupt-names = "output0", "output1", "output2", "input";
+	clocks = <&i2sout_sysclk>;
+	dmas = <&dma 9 0xffffffff /* out */
+		&dma 10 0xffffffff /* in */>;
+};
diff --git a/Documentation/devicetree/bindings/sound/tansen.txt b/Documentation/devicetree/bindings/sound/tansen.txt
new file mode 100644
index 0000000..5087d96
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tansen.txt
@@ -0,0 +1,12 @@
+CosmicCircuits Tansen audio codec.
+
+Required properties:
+- compatible : Must be "cosmic,tansen".
+- reg        : The GTI port register.
+
+Example:
+
+tansen: tansen@020059B8 {
+	compatible = "cosmic,tansen";
+	reg = <0x020059B8 0x4>; /* GTI CTRL register */
+};
diff --git a/Documentation/devicetree/bindings/spi/img-spi.txt b/Documentation/devicetree/bindings/spi/img-spi.txt
new file mode 100644
index 0000000..6851e93
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/img-spi.txt
@@ -0,0 +1,48 @@
+* Imagination Technologies SPI master controller.
+
+Required properties:
+- compatible     : Must be "img,spi"
+- reg            : The register base for the controller.
+- interrupts     : Interrupt line.
+- clocks         : A reference to the SPI clock node.
+- #address-cells : <1>, as required by the generic SPI binding.
+- #size-cells    : <0>, as required by the generic SPI binding.
+
+Optional properties:
+- num-cs         : Number of chip-select pins.
+- clock-frequency: Desired clock frequency. If not specified, it will use the default
+one (40Mhz).
+- dmas           : As described in dma.txt, dmaengine bindings for channel allocation.
+- dma-names      : Similarly, required dmaengine bindings for the previously
+mentioned 'dmas' property. For more information see the dma.txt text file. If
+used, the dma channels should be named "rx" and "tx".
+
+Child nodes follow the generic SPI binding described in spi-bus.txt
+
+Example:
+
+SoC configuration:
+
+spi1: spi@02004e00 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "img,spi";
+	reg = <0x02004e00 0x100>;
+	interrupts = <8 4 /* level */>;
+	clocks = <&spim1_clk>;
+	num-cs = <3>;
+	clock-frequency = <40000000>;
+	dmas = <&dma 7 0xffffffff
+		&dma 8 0xffffffff>; /* -1 for any channel */
+	dma-names = "rx", "tx";
+};
+
+Board-specific configuration:
+
+&spi1 {
+	mtd@0 {
+		compatible = "atmel,at45", "atmel,dataflash";
+		spi-max-frequency = <20000000>;
+		reg = <0>; /* chip select */
+	};
+};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 6931c43..ef59ef3 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -22,6 +22,7 @@
 emmicro	EM Microelectronic
 epson	Seiko Epson Corp.
 est	ESTeem Wireless Modems
+frontier Frontier Silicon
 fsl	Freescale Semiconductor
 GEFanuc	GE Fanuc Intelligent Platforms Embedded Systems, Inc.
 gef	GE Fanuc Intelligent Platforms Embedded Systems, Inc.
@@ -57,6 +58,7 @@
 st	STMicroelectronics
 ste	ST-Ericsson
 stericsson	ST-Ericsson
+toumaz	Toumaz
 ti	Texas Instruments
 toshiba	Toshiba Corporation
 via	VIA Technologies, Inc.
diff --git a/Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt b/Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt
new file mode 100644
index 0000000..cd7ede6
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt
@@ -0,0 +1,14 @@
+* ImgTec PowerDown Controller (PDC) Watchdog Timer (WDT)
+
+Required properties:
+- compatible : Should be "img,pdc-wdt"
+- reg : Should contain WDT registers location and length
+- interrupts : Should contain WDT interrupt
+
+Examples:
+
+wdt@02006000 {
+	compatible = "img,pdc-wdt";
+	reg = <0x02006000 0x20>;
+	interrupts = <31 4>;
+};
diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
index 30a7054..d442f2d 100644
--- a/Documentation/fb/00-INDEX
+++ b/Documentation/fb/00-INDEX
@@ -39,6 +39,8 @@
 	- info on the driver for the Metronome display controller.
 modedb.txt
 	- info on the video mode database.
+pdpfb.txt
+	- info on the ImgTec PDP frame buffer driver.
 pvr2fb.txt
 	- info on the PowerVR 2 frame buffer driver.
 pxafb.txt
diff --git a/Documentation/fb/pdpfb.txt b/Documentation/fb/pdpfb.txt
new file mode 100644
index 0000000..bc5a561
--- /dev/null
+++ b/Documentation/fb/pdpfb.txt
@@ -0,0 +1,383 @@
+
+
+pdpfb - fbdev driver for PDP in ImgTec's system on chips
+========================================================
+
+
+Supported Hardware
+==================
+
+	System on Chips from Imagination Technologies
+	* Chorus 2 (PDP 1.0.0)
+	* Comet (PDP 1.0.1)
+
+
+Supported Features
+==================
+
+	Graphics plane framebuffer
+	* 8bit colour indexed mode
+	* 8bit grayscale mode
+	* 16bit RGB 565 mode
+	* 16bit ARGB 4444 mode
+	* 16bit ARGB 1555 mode
+	* 24bit RGB 888 mode (PDP 1.0.1)
+	* 32bit ARGB 8888 mode (PDP 1.0.1)
+	* panning in both directions (horizontally in steps)
+	* pixel and line doubling and halving (using PDP_SETGEOM ioctl)
+
+	Video plane framebuffer (optional)
+	* 12bit 4:2:0 planar mode (see PDPIO_GETPLANAR ioctl)
+	* 12bit 4:2:0 interleaved planar YUV (PDP 1.0.1)
+	* 16bit 4:2:2 UY0VY1 8888 mode
+	* 16bit 4:2:2 VY0UY1 8888 mode
+	* 16bit 4:2:2 Y0UY1V 8888 mode
+	* 16bit 4:2:2 Y0VY1U 8888 mode
+	* panning in both directions (in small steps)
+	* scaling as supported by hardware (using PDP_SETGEOM ioctl)
+	* adjustment of YUV plane offsets and strides (for planar mode)
+	* adjustment of colour space conversion coefficients
+
+	General
+	* background colour
+	* plane enabling and disabling
+	* plane rearranging and positioning
+	* plane blending
+	* plane colour keying
+
+Missing Features
+================
+
+	General
+	* Hardware gamma correction (PDP 1.0.1)
+	* Hardware acceleration using 2D unit (Comet)
+	* Hardware cursor (PDP 1.0.1)
+
+	Video plane framebuffer
+	* 16bit 4:2:0 T88CP tiled mode (PDP 1.0.0 only)
+	* 16bit 4:2:2 T88CP tiled mode (PDP 1.0.0 only)
+
+
+Known Problems
+==============
+
+	* Timing data provided by FBIOGET_VSCREENINFO not accurate when plane
+	  is partially off the screen.
+
+
+Video Framebuffer
+=================
+
+	To set the pixel format of the video framebuffer, set the nonstd field
+	in struct fb_var_screeninfo to one of the PDP_VID_PIXFMT_ constants
+	specified in <video/pdpfb.h>:
+	* PDP_VID_PIXFMT_420_PL8
+		12bit 4:2:0 planar mode
+	* PDP_VID_PIXFMT_420_PL8IVU
+		12bit 4:2:0 planar with byte interleaved chroma
+		V occupies the least significant bits
+	* PDP_VID_PIXFMT_420_PL8IUV
+		12bit 4:2:0 planar with byte interleaved chroma
+		U occupies the least significant bits
+	* PDP_VID_PIXFMT_422_UY0VY1_8888
+		16bit 4:2:2 UY0VY1 8888 mode
+		U occupies the least significant bits
+	* PDP_VID_PIXFMT_422_VY0UY1_8888
+		16bit 4:2:2 VY0UY1 8888 mode
+		V occupies the least significant bits
+	* PDP_VID_PIXFMT_422_Y0UY1V_8888
+		16bit 4:2:2 Y0UY1V 8888 mode
+		Y0 occupies the least significant bits
+	* PDP_VID_PIXFMT_422_Y0VY1U_8888
+		16bit 4:2:2 Y0VY1U 8888 mode
+		Y0 occupies the least significant bits
+
+	Planar pixel format offsets are found using the PDPIO_GETPLANAR ioctl.
+	See also PDPIO_SETCSC ioctl for configuring colour space conversion.
+
+
+Configuration
+=============
+
+	For the PDP driver to be used, the screen may need to be enabled
+	(usually found in Processor type and features -> Board support).
+
+	Kernel config options (found in Device Drivers -> Graphics support ->
+	Support for framebuffer devices).
+	* FB_PDP
+		Enables the PDP framebuffer driver.
+	* FB_PDP_GFX_VIDEOMEM
+		Size of graphics framebuffer in bytes.
+	* FB_PDP_GFX_FIX_NATIVE_RES
+		Forces the graphics framebuffer to always be at the native
+		screen resolution. This can be useful in preventing programs
+		from setting an inappropriate resolution and thinking that it
+		fills the screen.
+	* FB_PDP_VID
+		Enable the video plane framebuffer device. This framebuffer can
+		be positioned on top of the main framebuffer and supports YUV
+		pixel formats.
+	* FB_PDP_VID_VIDEOMEM
+		Size of video framebuffer in bytes
+	* FB_PDP_USERMEM
+		Allows userland code to provide framebuffer memory.
+
+
+Module Parameters
+=================
+
+	Physical memory regions can be provided to the driver using module
+	parameters. This allows video memory to be set aside by the bootloader
+	which might be either too large for linux to allocate, or in some fast
+	internal memory.
+
+	These parameters can be provided in the kernel command line:
+	* pdpfb.videomem_base
+	* pdpfb.videomem_len
+		Physical base address and length of a region of memory for use
+		by both graphics and video planes if they don't have their own
+		dedicated memory available. This overrides any memory
+		ioresources of type PDPFB_IORES_MEM (see below).
+	* pdpfb.gfx_videomem_base
+	* pdpfb.gfx_videomem_len
+		[Base and] length of graphics plane video memory. The length
+		defaults to the kernel config option FB_PDP_GFX_VIDEOMEM. The
+		base is ignored on SoCs which have a shared base pointer and
+		overrides any memory ioresources of type PDPFB_IORES_GFXMEM
+		(see below).
+	* pdpfb.vid_videomem_base
+	* pdpfb.vid_videomem_len
+		[Base and] length of video plane video memory. The length
+		defaults to the kernel config option FB_PDP_VID_VIDEOMEM. The
+		base is ignored on SoCs which have a shared base pointer and
+		overrides any memory ioresources of type PDPFB_IORES_VIDMEM
+		(see below).
+
+
+Platform Data
+=============
+
+	Physical memory regions can be provided to the driver using resources
+	in platform data. This allows video memory to be set aside by the
+	bootloader which might be either too large for linux to allocate, or in
+	some fast internal memory.
+
+	Use the flags in <video/pdpfb.h> to specify the memory types:
+	* IORESOURCE_MEM | PDPFB_IORES_MEM
+		A region of memory for use by both graphics and video planes if
+		they don't have their own dedicated memory available (see
+		above).
+	* IORESOURCE_MEM | PDPFB_IORES_GFXMEM
+		A region of memory for use by graphics planes only. Don't use
+		on SoCs which have a shared base pointer.
+	* IORESOURCE_MEM | PDPFB_IORES_VIDMEM
+		A region of memory for use by video planes only. Don't use on
+		SoCs which have a shared base pointer.
+	If dedicated memory for a plane cannot be found in the regions
+	specified by platform data, kernel memory is allocated instead.
+
+
+Ioctl Interface
+===============
+
+#include <video/pdpfb.h>
+
+The following ioctls can be used with all PDP framebuffer devices.
+
+* FBIOGET_VBLANK
+	Takes a pointer to a struct fb_vblank.
+
+	Gets some information about the current scan line position.
+
+	The following are supported (the flags field should be checked
+	programatically before using these):
+	* Vertical blank status (FB_VBLANK_VBLANKING)
+	* Vertical sync status (FB_VBLANK_VSYNCING)
+	* Vertical sync count (count)
+	* Line number (vcount)
+
+* FBIO_WAITFORVSYNC
+	Takes a pointer to a display number (must be 0).
+
+	Waits until the start of the next vertical sync on the specified
+	display before returning.
+
+* PDPIO_GETBGND
+* PDPIO_SETBGND
+	Takes a pointer to a 32bit unsigned integer.
+
+	Gets or sets the background colour in RGB888 format.
+
+* PDPIO_GETSCRGEOM
+	Takes a pointer to a struct pdpfb_geom.
+
+	Gets the geometry of the screen in the coordinate space of the
+	PDPIO_GETGEOM and PDPIO_SETGEOM ioctls below.
+
+* PDPIO_SETUSERMEM
+	Takes a pointer to a struct pdpfb_usermem.
+
+	Only available when CONFIG_FB_PDP_USERMEM is enabled.
+
+	Attempts to allocate framebuffers using user-provided memory. The
+	phys & len fields of the struct describe the memory to use, and
+	the flags field consists of one of more of the following flags:
+
+	* PDP_USERMEM_ALLPLANES
+	  When set, the provided memory will be used to allocate framebuffers
+	  for all planes. Otherwise, only the plane corresponding to the
+	  opened framebuffer device will be affected. Note that on systems
+	  where planes share a base pointer it is only valid to affect all
+	  planes (ie. to set this flag).
+
+	If allocation from user memory fails, planes will be reallocated
+	from non-user-provided memory. Either way, note that any previous
+	mappings of the framebuffer will now be invalid.
+
+
+The following ioctls can be used with all PDP framebuffer devices but apply
+specifically to the one you use it with.
+
+* PDPIO_GETEN
+* PDPIO_SETEN
+	Takes a pointer to an integer.
+
+	Gets or sets whether the plane of the chosen framebuffer is enabled
+	or not.
+
+* PDPIO_GETPLANEPOS
+* PDPIO_SETPLANEPOS
+	Takes a pointer to an integer.
+
+	Gets or sets the planes position in relation to other planes, where
+	higher values are on top of other planes and lower values are below
+	other planes. For example set the video framebuffer's PLANEPOS to 0
+	to move it below the graphics plane so that the graphics plane's new
+	PLANEPOS is changed to 1.
+
+* PDPIO_GETGEOM
+* PDPIO_SETGEOM
+	Takes a pointer to a struct pdpfb_geom.
+
+	Gets or sets the screen position and size of the plane.
+	A width and height of 0 indicates that no scaling takes place, and
+	the plane's size will be the framebuffer's xres and yres.
+	Some PDPs only support arbitrary scaling horizontally on the video
+	plane. Other scale values will snap to doubling or halving.
+
+* PDPIO_GETCKEYMODE
+* PDPIO_SETCKEYMODE
+	Takes a pointer to an unsigned integer.
+
+	Gets or sets the colour key mode:
+	* PDP_CKEYMODE_DISABLE
+		Do not perform colour keying on this plane.
+	* PDP_CKEYMODE_PREVIOUS
+		Compare the masked bits of the previous plane with the colour
+		key and display previous plane colour if they match.
+	* PDP_CKEYMODE_CURRENT
+		Compare the masked bits of the current plane with the colour
+		key and display previous plane colour if they match.
+
+* PDPIO_GETCKEY
+* PDPIO_SETCKEY
+	Takes a pointer to a struct pdpfb_ckey.
+
+	Gets or sets the colour key and the colour key mask of the plane in
+	RGB888 format.
+	When colour keying is in use, a colour specified by the colour key
+	mode is compared with the colour key, and if all the masked bits match
+	then the previous plane colour is used instead.
+
+* PDPIO_GETBLENDMODE
+* PDPIO_SETBLENDMODE
+	Takes a pointer to an unsigned integer.
+
+	Gets or sets the blend mode of the plane:
+	* PDP_BLENDMODE_NOALPHA
+		No blending takes place with the previous plane.
+	* PDP_BLENDMODE_INVERT
+		Blending takes place using each pixel's alpha.
+		This behaves the same as PDP_BLENDMODE_PIXEL except that an
+		alpha other than all 0's or all 1's inverts the previous colour.
+	* PDP_BLENDMODE_GLOBAL
+		Blending takes place using the global alpha value.
+	* PDP_BLENDMODE_PIXEL
+		Blending takes place using each pixel's alpha.
+		For single bit alpha pixel formats, pixels with alpha of 0 use
+		the global alpha value, and pixels with alpha of 1 are opaque.
+
+* PDPIO_GETGALPHA
+* PDPIO_SETGALPHA
+	Takes a pointer to an unsigned integer.
+
+	Gets or sets the global alpha value of a plane in the range 0-255.
+	The blend mode must be set to PDP_BLENDMODE_GLOBAL for this value to
+	take effect.
+
+
+The following ioctls are specific to PDP video plane framebuffers.
+
+* PDPIO_GETCSC
+* PDPIO_SETCSC
+	Takes a pointer to a struct pdpfb_vid_csc.
+
+	Gets or sets YUV to RGB colour space conversion settings.
+	The colour space conversion coefficients should be in the range
+	-1024 to 1023, and are used as follows:
+		R = (ry*(Y-16)/64 + rv*(V-128)/64 + ru*(U-128)/64) / 4
+		G = (gy*(Y-16)/64 + gv*(V-128)/64 + gu*(U-128)/64) / 4
+		B = (by*(Y-16)/64 + bv*(V-128)/64 + bu*(U-128)/64) / 4
+
+	The following preset coefficients are available. When setting,
+	the structure will be filled with the new coefficients.
+	* PDP_VID_CSCPRESET_HDTV
+		ITU-R BT709 (1990)
+		MPEG2 format 0,1
+	* PDP_VID_CSCPRESET_SDTV (default)
+		ITU-R BT601
+		ITU-R BT624-4 Sys BG
+		ITU-R BT470-2 Sys BG
+		SMPTE 170M
+		MPEG2 format 2,3,5,6
+	* PDP_VID_CSCPRESET_LEGACYHDTV
+		SMPTE 240M (1987)
+		MPEG2 format 7
+	* PDP_VID_CSCPRESET_LEGACYSDTV
+		FCC
+		ITU-R BT624-4 Sys M
+		ITU-R BT470-2 Sys M
+		MPEG2 format 4
+
+	You can also specify whether chroma samples are co-sited with luma as
+	opposed to horizontally offset, by setting the cosited field.
+
+	PDPIO_SETCSC errors:
+	EINVAL	Preset code was unrecognised, or was set to 0 (custom)
+		and one or more of the coefficients was out of range.
+
+* PDPIO_GETPLANAR
+* PDPIO_SETPLANAR
+	Takes a pointer to a struct pdpfb_vid_planar.
+
+	Gets or sets the YUV plane arrangement in the framebuffer memory.
+
+	If you use PDPIO_SETPLANAR, the overridden offsets and strides will
+	apply until the next change of pixel format or virtual resolution.
+
+	If the PDPIO_SETPLANAR ioctl fails, fields in the pdpfb_vid_planar
+	struct may have been altered to give an indiciation what was invalid.
+	Be sure to rewrite them before trying again.
+
+	PDPIO_SETPLANAR errors:
+	ENOMEM	The planes do not fit in the video memory.
+	EINVAL	Pixel format isn't a planar pixel format.
+	EINVAL	Offsets or strides do not conform to hardware restrictions.
+		Y offset must be 16 byte aligned, U and V offsets must be 8 or
+		16 byte aligned on Comet or Chorus2 respectively, Y line length
+		must be 16 byte aligned, and U and V line lengths must be equal
+		to or half of the Y line length.
+
+
+--
+James Hogan <james.hogan@imgtec.com>
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 237acab..61804c5 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -316,6 +316,14 @@
 0xDB	00-0F	drivers/char/mwave/mwavepub.h
 0xDD	00-3F	ZFCP device driver	see drivers/s390/scsi/
 					<mailto:aherrman@de.ibm.com>
+0xF2	00-3F	video/pdpfb.h		pdpfb
+					<mailto:james.hogan@imgtec.com>
+0xF2	40-7F	linux/event_timer.h	event timer
+					<mailto:neil.jones@imgtec.com>
+0xF2	80-BF	linux/uccp.h		uccp
+					<mailto:james.hogan@imgtec.com>
+0xF2	C0-FF	linux/sgx2d.h		SGX 2D block
+					<mailto:james.hogan@imgtec.com>
 0xF3	00-3F	drivers/usb/misc/sisusbvga/sisusb.h	sisfb (in development)
 					<mailto:thomas@winischhofer.net>
 0xF4	00-1F	video/mbxfb.h		mbxfb
diff --git a/Documentation/metag/00-INDEX b/Documentation/metag/00-INDEX
index db11c51..85ed097 100644
--- a/Documentation/metag/00-INDEX
+++ b/Documentation/metag/00-INDEX
@@ -1,4 +1,10 @@
 00-INDEX
 	- this file
+cachepart.txt
+	- Documents the Meta cache partition manipulation interface
+coremem.txt
+	- Documents the core memory interface used by suspend code
 kernel-ABI.txt
 	- Documents metag ABI details
+tz1090/
+	- Documentation specific to the TZ1090 (Comet) SoC
diff --git a/Documentation/metag/cachepart.txt b/Documentation/metag/cachepart.txt
new file mode 100644
index 0000000..1f84173
--- /dev/null
+++ b/Documentation/metag/cachepart.txt
@@ -0,0 +1,40 @@
+
+Meta Cache Partitioning
+=======================
+
+The cachepart functions are for manipulating the local and global per hardware
+thread cache partitions. See arch/metag/include/asm/cachepart.h for the
+documentation of the functions.
+
+The main use of this at the moment is to make sure that the current thread has a
+global cache partition that is big enough to hold locked in core cache during a
+standby (see coremem.txt for more information).
+
+
+cachepart_min_iglobal
+---------------------
+
+This function rearranges the current thread's cache partitions so that global
+cache is available, specifically private global cache if CACHEPART_PRIV_GLOBAL
+is specified in the flags (e.g. for use by locked in core cache mode). If the
+rearrangement would violate the constraints of the architecture, for example by
+having a cache partition that is too small, or smaller than the requested size,
+an error code is returned and nothing is changed. For example, if the icache is
+mapped like this:
+
+0          1/4         1/2         3/4          1
++-----------+-----------+-----------------------+
+| T0 local  | T1 local  | T0/T1 global          |
++-----------+-----------+-----------------------+
+
+It would be rearranged to the following when called from T0 with the PRIV_GLOBAL
+flag:
+
+0    1/8   1/4         1/2         3/4          1
++-----+-----+-----------+-----------------------+
+| T0L | T0G | T1 local  | T1 global             |
++-----+-----+-----------+-----------------------+
+
+And would be left alone without the PRIV_GLOBAL flag (since T0's global
+partition is likely already big enough). Upon calling cachepart_restore_iglobal,
+the cache partitions would be restored to their previous layout.
diff --git a/Documentation/metag/coremem.txt b/Documentation/metag/coremem.txt
new file mode 100644
index 0000000..577023b
--- /dev/null
+++ b/Documentation/metag/coremem.txt
@@ -0,0 +1,33 @@
+
+Meta Core Memory
+================
+
+The files coremem.h and coremem.c implement the main interface which allows
+suspend code to copy a suspend routine into core memory so that DDR can be put
+into self refresh and the DDRC powered down.
+
+metag_coremem_alloc() reserves a section of core mem according to some flags.
+
+metag_coremem_free() indicates that the memory is finished with.
+
+metag_coremem_push() memcpy's data to the next space in the core memory and
+moves the free pointer onwards. This is intended for copying asm function code
+into instruction memory.
+
+metag_coremem_alloc uses a simple array of coremem regions to find an
+appropriate region to allocate. This array is defined for TZ1090 only at the
+moment (soc/tz1090/coremem.c).
+
+
+Locked in Core Cache Mode
+-------------------------
+
+Locked in core instruction cache memory can be asked for. After the code is
+prefetched or executed to lock the code into the icache it is possible to
+power down the DDR controller. Since this cache is thread specific it does not
+interfere with the running of other threads.
+
+The actual setup is done by metag_coremem_push (which can only be called once
+after a metag_coremem_alloc). See soc/tz1090/suspend.S for an example of
+prefetching code into the icache to get it locked in. It is not currently
+possible to use ICACHE and DCACHE coremem regions at the same time.
diff --git a/Documentation/metag/tz1090/00-INDEX b/Documentation/metag/tz1090/00-INDEX
new file mode 100644
index 0000000..903263f
--- /dev/null
+++ b/Documentation/metag/tz1090/00-INDEX
@@ -0,0 +1,6 @@
+00-INDEX
+	- this file
+pdc.txt
+	- Describes the PowerDown Controller (PDC) SYS_WAKEUP interface
+suspend.txt
+	- Documents how suspend code works on Comet
diff --git a/Documentation/metag/tz1090/pdc.txt b/Documentation/metag/tz1090/pdc.txt
new file mode 100644
index 0000000..86ce079
--- /dev/null
+++ b/Documentation/metag/tz1090/pdc.txt
@@ -0,0 +1,15 @@
+
+Powerdown Controller (PDC) and SYS_WAKE inputs
+==============================================
+
+Implemented a PDC driver, primarily to handle the sys_wake input pins.  It makes
+a simple sys_wake interface available so that boards can set up the sys_wake
+pins in the appropriate way. For example the bring up board has buttons wired up
+to 3 of the sys_wake inputs, and one is configured as a wake button. They can
+also be configured to call a callback function when they fire, or to cause the
+watchdog to reset the system if they are held for several seconds. Additionally
+each sys_wake's interrupt mode can be set.
+
+These capabilities would allow an input driver to be added for the buttons on
+the bring up board so that userland can react to the button presses and choose
+which ones should be able to wake the system.
diff --git a/Documentation/metag/tz1090/suspend.txt b/Documentation/metag/tz1090/suspend.txt
new file mode 100644
index 0000000..afa07ab
--- /dev/null
+++ b/Documentation/metag/tz1090/suspend.txt
@@ -0,0 +1,99 @@
+
+Comet Suspend Modes
+===================
+
+See Documentation/metag/tz1090/pdc.txt for PDC and SYS_WAKEUP information.
+
+
+Standby
+-------
+
+Comet power management code (soc/tz1090/pm.c) implements standby, and works
+using the metag coremem interface (see coremem.txt) to copy assembly code into
+core memory. It also disables peripheral clocks and the clock outputs.
+
+Note that on SMP in global space this process may not work correctly if another
+thread is locking in code into the same global icache partition and there isn't
+enough of it for the suspend function to be locked in. The SoC will simply not
+wake up as it will have tried to fetch the suspend function from the DDR while
+the DDRC is powered down. This can be worked around if the core code memory is
+not in use by using that instead of locked in cache lines (see call to
+metag_coremem_alloc in comet_pm_standby).
+
+The actual assembly code is in soc/tz1090/suspend.S. The reason for hand coding
+the assembly is so that it can be easily and safely copied into core memory
+(copying C functions is trickier as it's hard to know the length of the code in
+them, and we need to be sure it's position independent after loading).  It
+powers down the DDR controller, first putting the DDR into self refresh mode.
+It bypasses the PLL by switching the system clock to XTAL1, and then powers down
+the PLL.  It enables clock deletion to further decrease the system clock.  It
+then waits for an interrupt on a given trigger (the txmask value is provided in
+the first argument to the function), and powers the PLL and DDR back up again.
+
+Due to a problem with the DDR clock floating high when the DDRC is powered down
+(which prevents self-refresh of the DDR and causes data loss), a DDR package
+specific value is written to the EMR register of the DDR to remove the
+termination voltage. This is done in a macro in asm/soc-tz1090/ddr.inc which is
+intended to abstract the suspend code from the DDR package specific code. Only
+code for the bring up board DDR package is currently in ddr.inc.
+
+Comet standby mode does not use the Powerdown Controller (PDC) in the Comet to
+do the power down or power up, because use of individual power domains is not
+implemented in the chip, and the Meta must remain powered for standby mode.
+
+
+Suspend to RAM
+--------------
+
+Comet power management code (soc/tz1090/pm.c) implements suspend to RAM for
+boards which support controlling of the SoC power supplies with the EXT_POWER
+pin, which like standby works using the metag coremem interface (see
+coremem.txt) to copy assembly code into core memory.
+
+The assembly code for suspend just puts the DDR into self refresh like standby,
+powers down the main power island, and waits for a while. This should cause the
+board to power down the SoC including the Meta (for boards that don't support
+this the wait will complete and it will return, and be treated as an error).
+
+Since the PDC is responsible for powering the SoC back up, only PDC interrupts
+can be used to wake (RTC, Infrared, Sys Wakes), and routing of interrupts from
+other peripherals on the SoC to Sys Wake 3 is not possible since there's only
+one power island so the whole SoC except the PDC power domain is powered down.
+
+When the SoC powers back up it boots using the normal Comet boot process,
+therefore prior to suspending the power management code does a setjmp (see
+arch/metag/kernel/suspend.S and arch/metag/include/asm/suspend.h) and stores the
+jump buffer address and the address of a resume function into the PDC soft reset
+protected registers (see arch/metag/soc/tz1090/bootprot.c). When the bootloader
+is loaded soon after wake and reset the DDR will be reconfigured which takes the
+DDR out of self refresh. The bootloader checks the PDC soft reset protected
+registers, and if it needs to resume it simply calls the provided function with
+the provided data in the first argument.
+
+Note: bad things can happen if the bootloader used to boot Linux is different to
+the bootloader used to resume from suspend.
+
+The kernel's resume entry function then sets a few things up and performs a
+longjmp back to the point where setjmp was called, at which point the resume
+process begins.
+
+In summary the core suspend to RAM process looks like this:
+ - setjmp (kernel/suspend.S)
+ - store jump buffer pointer and resume entry function in PDC soft reset
+   protected registers using a certain boot protocol (soc/tz1090/bootprot.c)
+ - jump into suspend function (soc/tz1090/suspend.S) in cache locked lines,
+   which puts DDR into self refresh and powers down the main power domain
+   (EXT_POWER pin)
+ - the board then powers down all of the SoC except the PDC power domain
+ - when the PDC receives a wake interrupt from one of it's wake sources (RTC,
+   Infrared, Syswakes) it signals the board to power the SoC back up again
+   (EXT_POWER pin)
+ - the board powers back up the SoC power supplies and the PDC does a soft reset
+ - the Meta boots normally using the Comet bootrom
+ - secload will initialise the DDR as it loads the LDR of the bootloader. This
+   brings the DDR out of self refresh mode with it's contents intact.
+ - one of the bootloaders reads the PDC soft reset protected registers before
+   loading Linux and jumps to Linux's Comet resume entry function
+ - the resume entry function performs a longjmp to get back to the same stack
+   position and PC as setjmp was called. The MMU is reconfigured and the resume
+   process begins.
diff --git a/Documentation/sgx2d.txt b/Documentation/sgx2d.txt
new file mode 100644
index 0000000..64de225
--- /dev/null
+++ b/Documentation/sgx2d.txt
@@ -0,0 +1,99 @@
+
+
+sgx2d - char driver for ImgTec's PowerVR SGX 2D block
+=====================================================
+
+This driver creates a device node (/dev/sgx2d) which allows a userland SGX 2D
+driver (e.g. in DirectFB) to access the SGX 2D slave port and associated
+registers.
+
+
+Configuration
+=============
+
+	Kernel config options (found in Device Drivers -> Character Devices).
+	* IMG_SGX2D
+		Enables the SGX2D character driver.
+
+	The SGX 2D block is set up with platform data, which you may need to
+	enable in Processor type and features.
+
+
+Ioctl Interface
+===============
+
+#include <linux/sgx2d.h>
+
+The following ioctls can be used with the sgx2d device.
+
+* SGX2DIO_WAITIDLE
+	Sleeps until the SGX 2D block is idle. This waits for a 2D event
+	interrupt if the block busy.
+
+* SGX2DIO_SOFTRST
+	Soft reset the 2D block. This releases any processes waiting for
+	idleness.
+
+* SGX2DIO_GETVERS
+	Takes a pointer to a struct sgx2d_vers.
+
+	Gets information and version information about the SGX 2D block. The
+	caps field is a bitmask of capabilities, and can include the following
+	bitmasks, OR'd together:
+	* SGX2D_CAP_FIFOFREE
+		Indicates that a fifo free field is useable.
+
+* SGX2DIO_GETREG
+	Takes a pointer to a struct sgx2d_reg.
+
+	Gets information about where in the IO memory to access a particular
+	register field. Set the id field before the ioctl call. If the ioctl
+	succeeds the offset field will specify the offset of the register into
+	the IO memory, and the mask and shift fields specify where in the
+	register the specified field is.
+
+	Valid register field ids are:
+	* SGX2D_REG_SLAVEPORT (write only)
+		The 32 bit slave port to write the SGX command stream to. It
+		is safe to ignore the mask and offset fields.
+	* SGX2D_REG_FIFOFREE (read only)
+		The field specifying how much space is left in the slave port.
+	* SGX2D_REG_BLTCOUNT (read only)
+		The field specifying how many blits have been completed.
+	* SGX2D_REG_BUSY (read only)
+		The field specifying whether the 2D block is busy.
+	* SGX2D_REG_IDLE (read only)
+		The field specifying whether the 2D block is idle.
+	* SGX2D_REG_BASEADDR
+		The field to put the base physical memory address (divided by
+		4) that all addresses written to the slave port are relative
+		to. It is safe to ignore the mask and offset fields.
+
+	Note that not all of these registers might exist. For example the
+	SGX2D_REG_FIFOFREE register does not exist on Comet.
+
+	Important: It is NOT safe to read from a write only register or write
+	to a read only register.
+
+* SGX2DIO_GETMEM
+	Takes a pointer to a struct sgx2d_meminfo.
+
+	Gets information about the IO memory, and how to map it into userland
+	if necessary. After using this ioctl successfully, there are two cases
+	the userland program should handle:
+	* If the flags field is zero
+		The IO registers needed to access the SGX 2D block are already
+		accessible from userland. The memory area starts at the
+		address in the addr field, and the length in bytes is provided
+		in the len field.
+	* If the flags field is non-zero
+		The IO registers must be mapped in with the mmap syscall. In
+		this case the userland process should provide the value in the
+		addr field as the start parameter, the value in the len field
+		as the length parameter, and the value in the flags field as
+		the flags parameter. If the mmap is successful, the address
+		returned should be used to access the IO registers.
+
+	Important: It is NOT safe or portable to read or write any registers
+	except those accessed with a successful call to the SGX2DIO_GETREG
+	ioctl.
diff --git a/Documentation/sound/alsa/Chorus2-Audio.txt b/Documentation/sound/alsa/Chorus2-Audio.txt
new file mode 100644
index 0000000..948f737
--- /dev/null
+++ b/Documentation/sound/alsa/Chorus2-Audio.txt
@@ -0,0 +1,34 @@
+Chorus2 Audio Support Readme
+============================
+
+Audio support for Chorus2 uses the ALSA SoC Audio framework (ASOC).
+
+Supported Data Formats
+======================
+Signed 16bit Little Endian (left and right Data Packed to 32bit frame)
+Signed 24bit Little Endian (Data left justified to fit 32bit frame).
+
+Supported Sample Rates
+======================
+32000 (32KHz)
+48000 (48KHz)
+64000 (64KHz)
+96000 (96KHz)
+
+Development Status
+==================
+Only Playback has been implemented.
+Testing performed on ATP-DP (Metamorph) base layer only (Line Out).
+Playback of PCM Data (in .pcm and .wav formats) is working for all supported
+sample rates and formats.
+Playback via aplay and madplay / mplayer has been tested.
+Note: mpg123 or apps using libmpg do not play due to the use of floating point,
+use libmad(madplay) instead as this uses fixed point maths instead.
+
+Further Work
+=============
+Add support for ATP-DP-Audio (audio Layer) 4 x stereo channels
+Add SPDIF Output support.
+Add SPDIF Input support.
+Add I2S Audio In Support.
+
diff --git a/Documentation/uccp.txt b/Documentation/uccp.txt
new file mode 100644
index 0000000..a8c39f7
--- /dev/null
+++ b/Documentation/uccp.txt
@@ -0,0 +1,71 @@
+
+
+uccp - char driver for ImgTec's Ensigma UCCPs
+=============================================
+
+
+Configuration
+=============
+
+	Kernel config options (found in Device Drivers -> Character Devices).
+	* IMG_UCCP
+		Enables the UCCP character driver.
+
+	The UCCPs are set up with platform data, which you may need to enable
+	in Processor type and features.
+
+
+Ioctl Interface
+===============
+
+#include <linux/uccp.h>
+
+The following ioctls can be used with each uccp device (/dev/uccp0 etc).
+
+* UCCPIO_GETREGION
+	Takes a pointer to a struct uccp_region.
+
+	Gets the offset and size of a specific type of region in the uccp
+	memory area. Where applicable the region returned will be specific to
+	the UCCP the ioctl is performed on. Set the type field to one of the
+	following constants before the ioctl, and the other fields will be
+	filled in if no error occurred:
+	* UCCP_REGION_ALL
+		The entire UCCP memory region.
+	* UCCP_REGION_SYS_INTERNAL
+		System internal memory.
+	* UCCP_REGION_MTX
+		MTX specific memory.
+	* UCCP_REGION_MCP_16_BIT
+		MCP 16bit memory.
+	* UCCP_REGION_MCP_24_BIT
+		MCP 24bit memory.
+
+* UCCPIO_WRREG
+* UCCPIO_RDREG
+	Takes a pointer to a struct uccp_reg.
+
+	Read or write to a register on the UCCP (on the system bus). Set the
+	op field to one of the following:
+	* UCCP_REG_DIRECT
+		Accesses the register directly.
+	* UCCP_REG_INDIRECT (write only)
+		Accesses the register indirectly.
+	* UCCP_REG_MCPPERIP
+		Accesses MCP peripheral memory (indirectly using
+		MCP_SYS_HACC_CMD).
+	* UCCP_REG_MCPPERIP_PACK
+		Accesses MCP peripheral memory (indirectly using
+		MCP_SYS_HACC_CMD with the PACK bit set).
+	And set the reg field to the register number you want to use.
+
+* UCCPIO_CLRMCREQ
+* UCCPIO_SETMCREQ
+* UCCPIO_GETMCREQ
+	Takes a pointer to a struct uccp_mcreq.
+
+	Clear set or get an mcreq field, defining a memory mapping from
+	physical memory into bulk memory.
+
+* UCCPIO_SRST
+	Soft reset the UCCP.
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index dcd9440..f8c9c79 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -6,6 +6,7 @@
 	select GENERIC_IRQ_SHOW
 	select GENERIC_SMP_IDLE_THREAD
 	select HAVE_64BIT_ALIGNED_ACCESS
+	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_C_RECORDMCOUNT
 	select HAVE_DEBUG_KMEMLEAK
@@ -96,6 +97,12 @@
 
 source "arch/metag/Kconfig.soc"
 
+config METAG_ROM_WRAPPERS
+	bool
+	help
+	  This indicates that this core needs to use patched versions of the
+	  ROM for its interrupt entry points.
+
 config METAG_META12
 	bool
 	help
@@ -255,6 +262,16 @@
 
 endmenu
 
+menu "Board support"
+
+source "arch/metag/Kconfig.board"
+
+endmenu
+
+source "arch/metag/drivers/Kconfig"
+
+source "arch/metag/kernel/cpu/Kconfig"
+
 source "kernel/Kconfig.preempt"
 
 source kernel/Kconfig.hz
diff --git a/arch/metag/Kconfig.board b/arch/metag/Kconfig.board
new file mode 100644
index 0000000..3b53671
--- /dev/null
+++ b/arch/metag/Kconfig.board
@@ -0,0 +1,162 @@
+if SOC_TZ1090
+
+choice
+	prompt "Board support"
+
+config COMET_NOBOARD
+	bool "None"
+	help
+	  No specific board support.
+
+config COMET_BUB
+	bool "Comet bring-up-board on-board peripherals (flash)"
+	help
+	  Support for the bring-up-board on-board peripherals.
+
+config TZ1090_01XX
+	bool "01SP/01TT (Comet Metamorph/Minimorph) on-board peripherals"
+	help
+	  Support for on-board peripherals on the 01SP (Comet Metamorph) and
+	  01TT (Comet Minimorph) boards.
+
+config POLARIS
+	bool "01XK (Polaris) on-board peripherals"
+	select METAG_COREMEM
+	help
+	  Support for the Polaris on-board peripherals.
+
+endchoice
+
+endif
+
+config COMET_BUB_XTAL3
+	bool "Crystal in XTAL3 (32.768KHz)"
+	depends on COMET_BUB
+	help
+	  Select this option if the bring-up-board has a 32.768 KHz crystal in
+	  XTAL3.
+
+config COMET_BUB_DISPLAY
+	bool "Comet bring-up-board 640x480 TFT display"
+	depends on COMET_BUB
+	select FB
+	select FB_PDP
+	help
+	  Support for the bring-up-board TFT display (640x480).
+
+config COMET_BUB_LCD
+	bool "Comet Bring-up-board LCD display"
+	depends on COMET_BUB
+	help
+	  Support for the LCD Display.
+
+choice
+	prompt "Chorus2 display type"
+	depends on SOC_CHORUS2
+	default CHORUS2_NODISPLAY
+
+config CHORUS2_NODISPLAY
+	bool "None"
+	help
+	  No display support.
+
+config CHORUS2_LCD
+	bool "LCD"
+	help
+	  Support for LCD displays.
+
+config CHORUS2_TFT
+	bool "TFT"
+	help
+	  Support for TFT displays.
+
+endchoice
+
+config ATP_DP
+	bool "ATP-dp on-board peripherals (SD card, flash)"
+	depends on SOC_CHORUS2
+	help
+	  Support for ATP-dp on-board peripherals.
+
+config ATP_DP_DISPLAY1
+	bool "ATP-dp:display1 240x320 TFT board"
+	depends on ATP_DP && CHORUS2_TFT
+	select FB
+	select FB_PDP
+	select I2C
+	select I2C_CHORUS2
+	help
+	  Support for ATP-dp:display1 TFT daughterboard (240x320).
+
+config ATP_DP_DISPLAY2
+	bool "ATP-dp:display2 640x480 TFT board"
+	depends on ATP_DP && CHORUS2_TFT
+	select FB
+	select FB_PDP
+	select I2C
+	select I2C_CHORUS2
+	help
+	  Support for ATP-dp:display2 TFT daughterboard (640x480).
+
+config ATP_DP_LAN1
+	bool "ATP-dp:lan1 ethernet board"
+	depends on ATP_DP
+	help
+	  Support for ATP-dp:lan1 ethernet daughterboard.
+
+config VIVALDI
+	bool "Vivaldi on-board peripherals (flash, wifi)"
+	depends on SOC_CHORUS2
+	help
+	  Support for Vivaldi on-board peripherals.
+
+choice
+	prompt "01SP/01TT Video output"
+	depends on TZ1090_01XX
+	default TZ1090_01XX_HDMI_VID_OUT
+	help
+	  Select which component to drive as the display.
+
+config TZ1090_01XX_NO_VID_OUT
+	bool "None"
+	help
+	  Don't support any Video Out components.
+
+config TZ1090_01XX_HDMI_VID_OUT
+	bool "HDMI"
+	select FB
+	select FB_PDP
+	select FB_MODE_HELPERS
+	help
+	  Suport for 01SP/01TT HDMI Video Out component.
+
+config TZ1090_01XX_DISPLAY
+	bool "640x480 TFT display"
+	select FB
+	select FB_PDP
+	help
+	  Support for the 01SP/01TT TFT display (640x480).
+
+endchoice
+
+config TZ1090_01XX_HDMI_AUDIO
+	bool "01SP/01TT HDMI audio out"
+	depends on TZ1090_01XX
+	depends on TZ1090_01XX_HDMI_VID_OUT
+	help
+	  Support for 01SP/01TT HDMI Audio out component.
+
+config TZ1090_01XX_HDMI_AUDIO_51
+	bool "01SP/01TT HDMI 5.1 audio out"
+	depends on TZ1090_01XX
+	depends on TZ1090_01XX_HDMI_VID_OUT
+	depends on TZ1090_01XX_HDMI_AUDIO
+	help
+	  Configure the HDMI to support 5.1 Audio.
+
+config SENSIA_TOUCHSCREEN
+	bool "Sensia touchscreen"
+	depends on COMET_BUB_DISPLAY || TZ1090_01XX_DISPLAY || POLARIS
+	default y if POLARIS
+	help
+	  Support for the Sensia touchscreen.
diff --git a/arch/metag/Kconfig.soc b/arch/metag/Kconfig.soc
index ec079cf..c8ee0386 100644
--- a/arch/metag/Kconfig.soc
+++ b/arch/metag/Kconfig.soc
@@ -8,16 +8,222 @@
 	help
 	  This is a Meta 1.2 FPGA bitstream, just a bare CPU.
 
+config SOC_CHORUS2
+	bool "Chorus2 SoC"
+	select METAG_META12
+	select ARCH_REQUIRE_GPIOLIB
+	select HAVE_CLK
+	select IMG_DMAC
+	select METAG_ROM_WRAPPERS
+	help
+	  This is a Frontier Silicon Chorus2 SoC.
+
 config META21_FPGA
 	bool "Meta 2.1 FPGA"
 	select METAG_META21
 	help
 	  This is a Meta 2.1 FPGA bitstream, just a bare CPU.
 
+config SOC_TZ1090
+	bool "Toumaz Xenif TZ1090 SoC (Comet)"
+	select ARCH_REQUIRE_GPIOLIB
+	select COMMON_CLK
+	select DMADEVICES
+	select IMGPDC_IRQ
+	select MDC_DMA
+	select METAG_LNKGET_AROUND_CACHE
+	select METAG_META21
+	select METAG_SMP_WRITE_REORDERING
+	select PINCTRL
+	select PINCTRL_TZ1090
+	select PINCTRL_TZ1090_PDC
+	select TZ1090_MDC_DMA
+	help
+	  This is a Toumaz Technology Xenif TZ1090 (A.K.A. Comet) SoC containing
+	  a 2-threaded HTP.
+
 endchoice
 
 menu "SoC configuration"
 
+if SOC_TZ1090
+
+config SOC_COMET_ES1
+	bool "Prototype Silicon (ES1)"
+	select FB_PDP_QUEUE_CLUT
+	help
+	  Select this if using Prototype ES1 Silicon
+
+config COMET_CACHEFLUSH_BUG
+	depends on SMP && SOC_COMET_ES1
+	def_bool y
+
+config ARCH_SUSPEND_POSSIBLE
+	def_bool y
+
+config METAG_SUSPEND_MEM
+	bool "Suspend to RAM support"
+	depends on SUSPEND
+	help
+	  Say Y here to enable Suspend to RAM support for Comet.
+
+config COMET_SUSPEND_MEM_SAFE
+	bool "Fake Suspend to RAM in SAFE mode (development only)"
+	depends on METAG_SUSPEND_MEM
+	help
+	  Say Y here to fake Suspend to RAM using a watchdog reset if the SoC is
+	  in SAFE mode.
+
+	  This is intended for purely development purposes to emulate suspend to
+	  RAM on boards which have SAFE mode asserted. Instead of powering off
+	  the SoC and allowing the PDC to control wakeup, it blocks waiting for
+	  the wake interrupt and watchdog soft resets. This causes the PDC to be
+	  reset too, therefore wake interrupts will not be detected by Linux on
+	  resume.
+
+menu "DMA channel assigment"
+
+config SOC_COMET_DMA0
+	bool "Comet DMA channel 0"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_COMET_DMA1
+	bool "Comet DMA channel 1"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_COMET_DMA2
+	bool "Comet DMA Channel 2"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_COMET_DMA3
+	bool "Comet DMA Channel 3"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_COMET_DMA4
+	bool "Comet DMA Channel 4"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_COMET_DMA5
+	bool "Comet DMA Channel 5"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_COMET_DMA6
+	bool "Comet DMA Channel 6"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_COMET_DMA7
+	bool "Comet DMA Channel 7"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+
+endmenu
+
+config SOC_COMET_AUDIOCODEC
+	tristate "Expose audio codec setup"
+	depends on SND_SOC_TANSEN
+	help
+	  This will expose audio codec setup through sysfs.
+
+endif
+
+if SOC_CHORUS2
+
+# Chorus2 specific options
+
+menu "DMA channel assigment"
+
+config SOC_CHORUSX_DMA0
+	bool "Chorus2/3 DMA channel 0"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_CHORUSX_DMA1
+	bool "Chorus2/3 DMA channel 1"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_CHORUSX_DMA2
+	bool "Chorus2/3 DMA channel 2"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_CHORUSX_DMA3
+	bool "Chorus2/3 DMA channel 3"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_CHORUSX_DMA4
+	bool "Chorus2/3 DMA channel 4"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_CHORUSX_DMA5
+	bool "Chorus2/3 DMA channel 5"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_CHORUSX_DMA6
+	bool "Chorus2/3 DMA channel 6"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_CHORUSX_DMA7
+	bool "Chorus2/3 DMA channel 7"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_CHORUSX_DMA8
+	bool "Chorus2/3 DMA channel 8"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_CHORUSX_DMA9
+	bool "Chorus2/3 DMA channel 9"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_CHORUSX_DMA10
+	bool "Chorus2/3 DMA channel 10"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+config SOC_CHORUSX_DMA11
+	bool "Chorus2/3 DMA channel 11"
+	default y
+	help
+	  Make this DMA channel available to Linux.
+
+endmenu
+
+endif
+
 if METAG_META21
 
 # Meta 2.x specific options
diff --git a/arch/metag/Makefile b/arch/metag/Makefile
index b566116..f288714 100644
--- a/arch/metag/Makefile
+++ b/arch/metag/Makefile
@@ -20,7 +20,7 @@
 checkflags-$(CONFIG_METAG_META21)	+= -DMETAC_2_1
 CHECKFLAGS				+= -D__metag__ $(checkflags-y)
 
-KBUILD_DEFCONFIG			:= meta2_defconfig
+KBUILD_DEFCONFIG			:= tz1090_defconfig
 
 sflags-$(CONFIG_METAG_META12)		+= -mmetac=1.2
 ifeq ($(CONFIG_METAG_META12),y)
@@ -46,9 +46,33 @@
 core-y					+= arch/metag/kernel/
 core-y					+= arch/metag/mm/
 
+# SoCs
+socdir-$(CONFIG_SOC_CHORUS2)		+= chorus2
+socdir-$(CONFIG_SOC_TZ1090)		+= tz1090
+
+socdirs		:= $(filter-out ., $(patsubst %,%/,$(socdir-y)))
+core-y		+= $(addprefix arch/metag/soc/, $(socdirs))
+
+# Boards
+machdir-$(CONFIG_ATP_DP)		+= atp-dp
+machdir-$(CONFIG_ATP_DP_DISPLAY1)	+= atp-dp/display1
+machdir-$(CONFIG_ATP_DP_DISPLAY2)	+= atp-dp/display2
+machdir-$(CONFIG_ATP_DP_LAN1)		+= atp-dp/lan1
+machdir-$(CONFIG_COMET_BUB)		+= comet-bub
+machdir-$(CONFIG_COMET_BUB_DISPLAY)	+= comet-bub/display
+machdir-$(CONFIG_COMET_BUB_LCD)		+= comet-bub/lcd
+machdir-$(CONFIG_POLARIS)		+= polaris
+machdir-$(CONFIG_SENSIA_TOUCHSCREEN)	+= sensia-touchscreen
+machdir-$(CONFIG_TZ1090_01XX)		+= TZ1090-01XX
+machdir-$(CONFIG_VIVALDI)		+= vivaldi
+
+machdirs	:= $(filter-out ., $(patsubst %,%/,$(machdir-y)))
+core-y		+= $(addprefix arch/metag/boards/, $(machdirs))
+
 libs-y					+= arch/metag/lib/
 libs-y					+= arch/metag/tbx/
 
+drivers-y				+= arch/metag/drivers/
 drivers-$(CONFIG_OPROFILE)		+= arch/metag/oprofile/
 
 boot					:= arch/metag/boot
diff --git a/arch/metag/boards/TZ1090-01XX/Makefile b/arch/metag/boards/TZ1090-01XX/Makefile
new file mode 100644
index 0000000..5a3f500
--- /dev/null
+++ b/arch/metag/boards/TZ1090-01XX/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the comet 01SP/01TT board specific parts of the kernel
+#
+
+obj-y	:= setup.o
+
+# HDMI Device
+obj-$(CONFIG_TZ1090_01XX_HDMI_VID_OUT)	+= hdmi-out/
+obj-$(CONFIG_TZ1090_01XX_DISPLAY)	+= display/
diff --git a/arch/metag/boards/TZ1090-01XX/display/Makefile b/arch/metag/boards/TZ1090-01XX/display/Makefile
new file mode 100644
index 0000000..2a15adc
--- /dev/null
+++ b/arch/metag/boards/TZ1090-01XX/display/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the comet bring up board TFT screen specific parts of the kernel
+#
+
+obj-y	:= setup.o
diff --git a/arch/metag/boards/TZ1090-01XX/display/setup.c b/arch/metag/boards/TZ1090-01XX/display/setup.c
new file mode 100644
index 0000000..83ebc3f
--- /dev/null
+++ b/arch/metag/boards/TZ1090-01XX/display/setup.c
@@ -0,0 +1,74 @@
+/*
+ * boards/TZ1090-01XX/display/setup.c
+ *
+ * Copyright (C) 2010-2011 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <video/pdpfb.h>
+#include <asm/soc-tz1090/pdp.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/gpio.h>
+#include <asm/soc-tz1090/clock.h>
+
+static void comet_01xx_set_screen_power(int pwr)
+{
+	/*
+	 * This is a work around.
+	 * The board has the wrong polarity, so the TFT power can be
+	 * controlled instead using a GPIO signal.
+	 */
+	gpio_set_value(GPIO_TFT_PWRSAVE, !!pwr);
+}
+
+/* based on atp-dp:display2 */
+static struct fb_videomode comet01xx_lcd_cfg = {
+	.name = "01SP/01TT:display",
+	.refresh = 60,
+
+	.hsync_len = 30,
+	.left_margin = 114,
+	.xres = 640,
+	.right_margin = 16,
+
+	.vsync_len = 3,
+	.upper_margin = 29,
+	.yres = 480,
+	.lower_margin = 10,
+
+	/* hsync and vsync are active low */
+	.sync = 0,
+};
+
+static struct pdp_lcd_size_cfg comet01xx_lcd_sizecfg = {
+	.width = 115,	/* 115.2mm */
+	.height = 86,	/* 86.4mm */
+};
+
+static struct pdp_sync_cfg comet01xx_lcd_synccfg = {
+	.force_vsyncs = 0,
+	.hsync_dis = 0,
+	.vsync_dis = 0,
+	.blank_dis = 0,
+	.blank_pol = PDP_ACTIVE_LOW,
+	.clock_pol = PDP_CLOCK_INVERTED,
+};
+
+static struct pdp_hwops comet01xx_lcd_hwops = {
+	.set_screen_power = comet_01xx_set_screen_power,
+	.set_shared_base = comet_pdp_set_shared_base,
+};
+
+static int __init display_device_setup(void)
+{
+	pix_clk_set_limits(22660000,	/* 22.66MHz */
+	                   27690000);	/* 27.69MHz */
+
+	return comet_pdp_setup(&comet01xx_lcd_cfg, &comet01xx_lcd_sizecfg,
+			       NULL, &comet01xx_lcd_synccfg,
+			       &comet01xx_lcd_hwops);
+}
+device_initcall(display_device_setup);
diff --git a/arch/metag/boards/TZ1090-01XX/hdmi-out/Makefile b/arch/metag/boards/TZ1090-01XX/hdmi-out/Makefile
new file mode 100644
index 0000000..97b51fb
--- /dev/null
+++ b/arch/metag/boards/TZ1090-01XX/hdmi-out/Makefile
@@ -0,0 +1,8 @@
+obj-${CONFIG_TZ1090_01XX_HDMI_VID_OUT}	+= setup.o sii9022a.o
+
+ifeq (${CONFIG_SND_TZ1090_SOC},y)
+# ALSA output
+snd-soc-sii9022a-objs			:= sii9022a-audio.o
+obj-${CONFIG_TZ1090_01XX_HDMI_AUDIO}	+= snd-soc-sii9022a.o
+endif
+
diff --git a/arch/metag/boards/TZ1090-01XX/hdmi-out/edid.h b/arch/metag/boards/TZ1090-01XX/hdmi-out/edid.h
new file mode 100644
index 0000000..b6765da
--- /dev/null
+++ b/arch/metag/boards/TZ1090-01XX/hdmi-out/edid.h
@@ -0,0 +1,138 @@
+/*
+ * drivers/video/edid.h - EDID/DDC Header
+ *
+ * Based on:
+ *   1. XFree86 4.3.0, edid.h
+ *      Copyright 1998 by Egbert Eich <Egbert.Eich@Physik.TU-Darmstadt.DE>
+ *
+ *   2. John Fremlin <vii@users.sourceforge.net> and
+ *      Ani Joshi <ajoshi@unixbox.com>
+ *
+ * DDC is a Trademark of VESA (Video Electronics Standard Association).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+*/
+
+#ifndef __EDID_H__
+#define __EDID_H__
+
+#define EDID_LENGTH				0x80
+#define EDID_HEADER				0x00
+#define EDID_HEADER_END				0x07
+
+#define ID_MANUFACTURER_NAME			0x08
+#define ID_MANUFACTURER_NAME_END		0x09
+#define ID_MODEL				0x0a
+
+#define ID_SERIAL_NUMBER			0x0c
+
+#define MANUFACTURE_WEEK			0x10
+#define MANUFACTURE_YEAR			0x11
+
+#define EDID_STRUCT_VERSION			0x12
+#define EDID_STRUCT_REVISION			0x13
+
+#define EDID_STRUCT_DISPLAY                     0x14
+
+#define DPMS_FLAGS				0x18
+#define ESTABLISHED_TIMING_1			0x23
+#define ESTABLISHED_TIMING_2			0x24
+#define MANUFACTURERS_TIMINGS			0x25
+
+/* standard timings supported */
+#define STD_TIMING                              8
+#define STD_TIMING_DESCRIPTION_SIZE             2
+#define STD_TIMING_DESCRIPTIONS_START           0x26
+
+#define DETAILED_TIMING_DESCRIPTIONS_START	0x36
+#define DETAILED_TIMING_DESCRIPTION_SIZE	18
+#define NO_DETAILED_TIMING_DESCRIPTIONS		4
+
+#define DETAILED_TIMING_DESCRIPTION_1		0x36
+#define DETAILED_TIMING_DESCRIPTION_2		0x48
+#define DETAILED_TIMING_DESCRIPTION_3		0x5a
+#define DETAILED_TIMING_DESCRIPTION_4		0x6c
+
+#define DESCRIPTOR_DATA				5
+
+#define UPPER_NIBBLE( x ) \
+        (((128|64|32|16) & (x)) >> 4)
+
+#define LOWER_NIBBLE( x ) \
+        ((1|2|4|8) & (x))
+
+#define COMBINE_HI_8LO( hi, lo ) \
+        ( (((unsigned)hi) << 8) | (unsigned)lo )
+
+#define COMBINE_HI_4LO( hi, lo ) \
+        ( (((unsigned)hi) << 4) | (unsigned)lo )
+
+#define PIXEL_CLOCK_LO     (unsigned)block[ 0 ]
+#define PIXEL_CLOCK_HI     (unsigned)block[ 1 ]
+#define PIXEL_CLOCK	   (COMBINE_HI_8LO( PIXEL_CLOCK_HI,PIXEL_CLOCK_LO )*10000)
+#define H_ACTIVE_LO        (unsigned)block[ 2 ]
+#define H_BLANKING_LO      (unsigned)block[ 3 ]
+#define H_ACTIVE_HI        UPPER_NIBBLE( (unsigned)block[ 4 ] )
+#define H_ACTIVE           COMBINE_HI_8LO( H_ACTIVE_HI, H_ACTIVE_LO )
+#define H_BLANKING_HI      LOWER_NIBBLE( (unsigned)block[ 4 ] )
+#define H_BLANKING         COMBINE_HI_8LO( H_BLANKING_HI, H_BLANKING_LO )
+
+#define V_ACTIVE_LO        (unsigned)block[ 5 ]
+#define V_BLANKING_LO      (unsigned)block[ 6 ]
+#define V_ACTIVE_HI        UPPER_NIBBLE( (unsigned)block[ 7 ] )
+#define V_ACTIVE           COMBINE_HI_8LO( V_ACTIVE_HI, V_ACTIVE_LO )
+#define V_BLANKING_HI      LOWER_NIBBLE( (unsigned)block[ 7 ] )
+#define V_BLANKING         COMBINE_HI_8LO( V_BLANKING_HI, V_BLANKING_LO )
+
+#define H_SYNC_OFFSET_LO   (unsigned)block[ 8 ]
+#define H_SYNC_WIDTH_LO    (unsigned)block[ 9 ]
+
+#define V_SYNC_OFFSET_LO   UPPER_NIBBLE( (unsigned)block[ 10 ] )
+#define V_SYNC_WIDTH_LO    LOWER_NIBBLE( (unsigned)block[ 10 ] )
+
+#define V_SYNC_WIDTH_HI    ((unsigned)block[ 11 ] & (1|2))
+#define V_SYNC_OFFSET_HI   (((unsigned)block[ 11 ] & (4|8)) >> 2)
+
+#define H_SYNC_WIDTH_HI    (((unsigned)block[ 11 ] & (16|32)) >> 4)
+#define H_SYNC_OFFSET_HI   (((unsigned)block[ 11 ] & (64|128)) >> 6)
+
+#define V_SYNC_WIDTH       COMBINE_HI_4LO( V_SYNC_WIDTH_HI, V_SYNC_WIDTH_LO )
+#define V_SYNC_OFFSET      COMBINE_HI_4LO( V_SYNC_OFFSET_HI, V_SYNC_OFFSET_LO )
+
+#define H_SYNC_WIDTH       COMBINE_HI_4LO( H_SYNC_WIDTH_HI, H_SYNC_WIDTH_LO )
+#define H_SYNC_OFFSET      COMBINE_HI_4LO( H_SYNC_OFFSET_HI, H_SYNC_OFFSET_LO )
+
+#define H_SIZE_LO          (unsigned)block[ 12 ]
+#define V_SIZE_LO          (unsigned)block[ 13 ]
+
+#define H_SIZE_HI          UPPER_NIBBLE( (unsigned)block[ 14 ] )
+#define V_SIZE_HI          LOWER_NIBBLE( (unsigned)block[ 14 ] )
+
+#define H_SIZE             COMBINE_HI_8LO( H_SIZE_HI, H_SIZE_LO )
+#define V_SIZE             COMBINE_HI_8LO( V_SIZE_HI, V_SIZE_LO )
+
+#define H_BORDER           (unsigned)block[ 15 ]
+#define V_BORDER           (unsigned)block[ 16 ]
+
+#define FLAGS              (unsigned)block[ 17 ]
+
+#define INTERLACED         (FLAGS&128)
+#define SYNC_TYPE          (FLAGS&3<<3)	/* bits 4,3 */
+#define SYNC_SEPARATE      (3<<3)
+#define HSYNC_POSITIVE     (FLAGS & 4)
+#define VSYNC_POSITIVE     (FLAGS & 2)
+
+#define V_MIN_RATE              block[ 5 ]
+#define V_MAX_RATE              block[ 6 ]
+#define H_MIN_RATE              block[ 7 ]
+#define H_MAX_RATE              block[ 8 ]
+#define MAX_PIXEL_CLOCK         (((int)block[ 9 ]) * 10)
+#define GTF_SUPPORT		block[10]
+
+#define DPMS_ACTIVE_OFF		(1 << 5)
+#define DPMS_SUSPEND		(1 << 6)
+#define DPMS_STANDBY		(1 << 7)
+
+#endif /* __EDID_H__ */
diff --git a/arch/metag/boards/TZ1090-01XX/hdmi-out/setup.c b/arch/metag/boards/TZ1090-01XX/hdmi-out/setup.c
new file mode 100644
index 0000000..74d8e0c
--- /dev/null
+++ b/arch/metag/boards/TZ1090-01XX/hdmi-out/setup.c
@@ -0,0 +1,98 @@
+/* HDMI Board support */
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <video/pdpfb.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/gpio.h>
+#include <asm/soc-tz1090/pdp.h>
+#include <asm/soc-tz1090/hdmi-video.h>
+
+/*
+ * 01SP/01TT HDMI has been rated at a maximum of 82MHz, however it may be capable of
+ * higher unsupported frequencies over 100MHz. The HDMI chip supports up to
+ * 165MHz.
+ */
+#define HDMI_MAX_PIXFREQ	82000000
+
+/* HDMI */
+static struct pdp_lcd_size_cfg plsc = {
+	.dynamic_mode	= 1,
+	.width		= 476,
+	.height		= 268,
+};
+
+static struct pdp_sync_cfg psc = {
+	.force_vsyncs	= 0,
+	.hsync_dis	= 0,
+	.blank_dis	= 0,
+	.blank_pol	= PDP_ACTIVE_LOW,
+	.clock_pol	= PDP_CLOCK_INVERTED,
+};
+
+static struct pdp_hwops hwops = {
+	.set_shared_base	= comet_pdp_set_shared_base,
+};
+
+static struct hdmi_platform_data hdmi_pdata = {
+	.pix_clk	= "pixel",
+	.max_pixfreq	= HDMI_MAX_PIXFREQ,
+};
+
+static struct i2c_board_info __initdata board_info[] = {
+	{
+		I2C_BOARD_INFO("sii9022a-tpi", 0x39),
+		.platform_data = &hdmi_pdata,
+	},
+};
+
+static struct fb_videomode fbvm = {
+	.name		= "Generic HDMI Monitor (maybe)",
+	.refresh	= 60,
+
+	.hsync_len	= 96,
+	.left_margin	= 48,
+	.xres		= 640,
+	.right_margin	= 16,
+
+	.vsync_len	= 2,
+	.upper_margin	= 33,
+	.yres		= 480,
+	.lower_margin	= 10,
+
+	.sync		= 0,
+};
+
+static int __init comet_01xx_init_hdmi(void)
+{
+	int irq = gpio_to_irq(GPIO_SPI1_CS2);
+	int err = 0;
+
+	err = gpio_request(GPIO_SPI1_CS2, "HDMI irqn");
+	if (err) {
+		printk(KERN_WARNING "HDMI IRQ pin request failed: %d\n", err);
+		goto out;
+	}
+
+	err = irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+	if (err) {
+		printk(KERN_WARNING "Unable to setup HDMI IRQ\n");
+		goto out;
+	}
+
+	/* Set the pixel clock limits */
+	comet_pdp_set_limits(20000000, HDMI_MAX_PIXFREQ);
+
+	/* Setup the I2C device */
+	board_info[0].irq = irq;
+	i2c_register_board_info(2, board_info, ARRAY_SIZE(board_info));
+	comet_pdp_setup(&fbvm, &plsc, NULL, &psc, &hwops);
+
+out:
+	return err;
+}
+device_initcall(comet_01xx_init_hdmi);
diff --git a/arch/metag/boards/TZ1090-01XX/hdmi-out/sii9022a-audio.c b/arch/metag/boards/TZ1090-01XX/hdmi-out/sii9022a-audio.c
new file mode 100644
index 0000000..f88a6b9
--- /dev/null
+++ b/arch/metag/boards/TZ1090-01XX/hdmi-out/sii9022a-audio.c
@@ -0,0 +1,112 @@
+/*
+ * SiI9022a HDMI Transmitter Audio codec component.
+ * Copyright (C) 2010 Imagination Technologies.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation, version 2 of the License.
+ *
+ *  This program is distributed in the hope that, in addition to its
+ *  original purpose to support Neuros hardware, it will be useful
+ *  otherwise, but WITHOUT ANY WARRANTY; without even the implied
+ *  warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *  See the GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include <asm/soc-tz1090/hdmi-audio.h>
+#include "sii9022a.h"
+
+#define SII9022A_AUDIO_FORMATS	SNDRV_PCM_FMTBIT_S24_LE
+#define SII9022A_AUDIO_RATES	(/*SNDRV_PCM_RATE_32000 |*/\
+				 SNDRV_PCM_RATE_48000)
+
+
+/*
+ * Note the DAI ops bellow must be implemented here, even if they only return 0
+ * otherwise soc-core returns -EINVAL if the call isn't implemented and
+ * assumes the requested format is unsupported
+ */
+
+static int sii9022a_audio_hw_params(struct snd_pcm_substream *substream,
+				    struct snd_pcm_hw_params *params,
+				    struct snd_soc_dai *dai)
+{
+	return 0;
+}
+
+static int sii9022a_audio_set_dai_fmt(struct snd_soc_dai *codec_dai,
+				      unsigned int fmt)
+{
+	return 0;
+}
+
+static struct snd_soc_dai_ops sii9022a_audio_ops = {
+	.hw_params	= sii9022a_audio_hw_params,
+	.set_fmt	= sii9022a_audio_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver sii9022a_audio_dai = {
+	.name		= "sii9022a-audio",
+	.playback	= {
+		.stream_name	= "Playback",
+		.channels_min	= 2,
+		.channels_max	= 6,
+		.rates		= SII9022A_AUDIO_RATES,
+		.formats	= SII9022A_AUDIO_FORMATS,
+	},
+	.ops = &sii9022a_audio_ops,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_sii9022a;
+
+static int sii9022a_audio_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_sii9022a,
+				      &sii9022a_audio_dai, 1);
+}
+
+static int sii9022a_audio_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver sii9022a_codec_driver = {
+	.driver = {
+		.name	= "sii9022a-codec",
+		.owner	= THIS_MODULE,
+	},
+
+	.probe	= sii9022a_audio_probe,
+	.remove	= sii9022a_audio_remove,
+};
+
+static int __init sii9022a_audio_init(void)
+{
+	return platform_driver_register(&sii9022a_codec_driver);
+}
+module_init(sii9022a_audio_init);
+
+static void __exit sii9022a_audio_exit(void)
+{
+	platform_driver_unregister(&sii9022a_codec_driver);
+}
+module_exit(sii9022a_audio_exit);
+
+MODULE_DESCRIPTION("ALSA SoC codec layer for SiI9022a HDMI transmitter");
+MODULE_AUTHOR("Imagination Technologies");
+MODULE_LICENSE("GPL");
+
diff --git a/arch/metag/boards/TZ1090-01XX/hdmi-out/sii9022a.c b/arch/metag/boards/TZ1090-01XX/hdmi-out/sii9022a.c
new file mode 100644
index 0000000..7401625
--- /dev/null
+++ b/arch/metag/boards/TZ1090-01XX/hdmi-out/sii9022a.c
@@ -0,0 +1,1185 @@
+/*
+ * SiI9022a HMDI Transmitter I2C interface driver.
+ * Copyright (C) 2010,2011 Imagination Technologies.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation, version 2 of the License.
+ *
+ *  This program is distributed in the hope that, in addition to its
+ *  original purpose to support Neuros hardware, it will be useful
+ *  otherwise, but WITHOUT ANY WARRANTY; without even the implied
+ *  warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *  See the GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/fb.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/miscdevice.h>
+#include <video/edid.h>
+#include <asm/soc-tz1090/hdmi-video.h>
+#include <asm/soc-tz1090/hdmi-audio.h>
+
+#include "sii9022a.h"
+#include "edid.h"
+
+#define EDID_ADDRESS	0x50
+
+/* Private driver data */
+struct sii9022a_data {
+	struct i2c_client	*client;
+	struct clk		*pix_clk;
+
+	u32			revision;	/* The chip revision */
+	u8			hdmi_sink;	/* Is the connected device
+						 * HDMI or DVI ? */
+
+	u8			interlace;	/* Interlaced output mode */
+	u8			polarity;	/* VSYNC pol << 4 | HSYNC pol */
+
+	struct mutex		lock;		/* Protects fbinfo and specs */
+	struct fb_info		*fbinfo;	/* Framebuffer to attach to */
+	struct fb_monspecs	specs;		/* Monitor specs gleaned from
+						 * EDID data */
+	struct list_head	modedb;		/* Display mode db */
+	struct spinlock		mode_lock;	/* Protects current_mode */
+	struct fb_videomode	current_mode;	/* Current videomode */
+	bool			audio_enabled;	/* enable audio output? */
+
+	unsigned int		state;		/* Current state of xmitter */
+#define HDMI_STATE_RESET	0		/* Error state/Post reset */
+#define HDMI_STATE_UNPLUGGED	1		/* Initialised */
+#define HDMI_STATE_PLUGGED	2		/* Cable plugged, no rx sense */
+
+	unsigned int		deferred_edid;	/* Just cos hardware sucks... */
+
+	unsigned char		int_mask;	/* Interrupt mask */
+	unsigned char		int_stat;
+
+	struct notifier_block	fb_notify;	/* framebuffer notification */
+};
+
+/* EDID data for later processing */
+static unsigned char hdmi_edid[128];
+static unsigned char *hdmi_ext_edid;
+static unsigned int hdmi_ext_edid_len;
+
+/* Misc device node */
+static int hdmi_misc_open(struct inode *inode, struct file *file)
+{
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	return 0;
+}
+
+static ssize_t hdmi_misc_read(struct file *file, char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	if (*ppos > 128)
+		*ppos = 0;
+
+	if (count + *ppos > 128)
+		count = 128 - *ppos;
+
+	copy_to_user(buffer, &hdmi_edid[*ppos], count);
+
+	*ppos += count;
+	return count;
+}
+
+const static struct file_operations hdmi_misc_fops = {
+	.open		= hdmi_misc_open,
+	.read		= hdmi_misc_read,
+};
+
+static struct miscdevice hdmi_misc_device = {
+	.minor		= MISC_DYNAMIC_MINOR,
+	.name		= "hdmi-i2c",
+	.fops		= &hdmi_misc_fops,
+};
+
+/* ----------------------- EDID ----------------------- */
+static int sii9022a_read_edid(struct i2c_client *client, int addr, u8 *buf)
+{
+	int old_addr = client->addr;
+	int ret = 0, scratch = 0, i;
+	unsigned char tmp = '\0';
+	client->addr = addr;
+
+	for (i = 0; i < 128; i += ret) {
+		/* i2c_smbus_read_block_data only returns the first 32 bytes */
+		ret = i2c_master_send(client, &tmp, 1);
+		if (ret != 1) {
+			ret = -EIO;
+			goto done;
+		}
+
+		ret = i2c_master_recv(client, &buf[i], 128);
+		if (ret <= 0) {
+			if (!ret)
+				ret = -EIO;
+			goto done;
+		}
+	}
+
+	/* See how many extension blocks there are  - none signifies DVI */
+	scratch = buf[126];
+	if (!scratch) {
+		ret = 0;
+		goto done;
+	}
+
+	hdmi_ext_edid = kzalloc(scratch * 128, GFP_KERNEL);
+	if (!hdmi_ext_edid) {
+		dev_err(&client->dev,
+				"Cannot allocate memory for Extended EDID "
+				"block(s) - number requested %d\n", scratch);
+		ret = -ENOMEM;
+	} else {
+		hdmi_ext_edid_len = scratch;
+		for (i = 0; i < scratch; i++) {
+			ret = i2c_master_recv(client,
+					hdmi_ext_edid + (i * 128), 128);
+		}
+		ret = scratch;
+	}
+
+done:
+	client->addr = old_addr;
+	return ret;
+}
+
+static void sii9022a_process_cea_timings(struct sii9022a_data *data,
+		unsigned char *ext_block)
+{
+	struct i2c_client *client = data->client;
+	struct fb_var_screeninfo var;
+	struct fb_videomode fbvm;
+	int offset, num_dtds;
+	unsigned char *block;
+
+	if ((*ext_block != 0x02) && (*(ext_block + 0x01) != 0x03))
+		return;
+
+	/* This is a CEA EDID Timing Extension block */
+	offset = *(ext_block + 0x02);
+	if (offset == 0x00)
+		return;	/* There are no DTD blocks and no non-DTD data */
+
+	num_dtds = *(ext_block + 0x03);
+	dev_dbg(&client->dev, "CEA EDID Timing block.\n"
+		"Display support:\nUnderscan is %s\nBasic audio is %s\n"
+		"YCbCr 4:4:4 is %s\nYCbCr 4:2:2 is %s\n"
+		"Number of native DTDs: %d\n",
+		(num_dtds & 0x80) ? "supported" : "not supported",
+		(num_dtds & 0x40) ? "supported" : "not supported",
+		(num_dtds & 0x20) ? "supported" : "not supported",
+		(num_dtds & 0x10) ? "supported" : "not supported",
+		(num_dtds & 0x0f));
+
+	do {
+		/* Parse DTD - taken from fb_parse_edid */
+		block = ext_block + offset;
+
+		memset(&var, 0, sizeof(struct fb_var_screeninfo));
+
+		var.xres = var.xres_virtual = H_ACTIVE;
+		var.yres = var.yres_virtual = V_ACTIVE;
+		var.height = var.width = 0;
+		var.right_margin = H_SYNC_OFFSET;
+		var.left_margin = (H_ACTIVE + H_BLANKING) -
+			(H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH);
+		var.upper_margin = V_BLANKING - V_SYNC_OFFSET -
+			V_SYNC_WIDTH;
+		var.lower_margin = V_SYNC_OFFSET;
+		var.hsync_len = H_SYNC_WIDTH;
+		var.vsync_len = V_SYNC_WIDTH;
+		var.pixclock = PIXEL_CLOCK;
+		var.pixclock /= 1000;
+		var.pixclock = KHZ2PICOS(var.pixclock);
+
+		if (HSYNC_POSITIVE)
+			var.sync |= FB_SYNC_HOR_HIGH_ACT;
+		if (VSYNC_POSITIVE)
+			var.sync |= FB_SYNC_VERT_HIGH_ACT;
+
+		memset(&fbvm, 0, sizeof(fbvm));
+		fb_var_to_videomode(&fbvm, &var);
+		fb_add_videomode(&fbvm, &data->modedb);
+
+		offset += DETAILED_TIMING_DESCRIPTION_SIZE;
+	} while (offset < 110);
+}
+
+/* remove modes that we cannot support even if the monitor can */
+static void sii902a_filter_modelist(struct sii9022a_data *data)
+{
+	struct list_head *pos, *n;
+	struct fb_modelist *modelist;
+	struct fb_videomode *m;
+	struct hdmi_platform_data *pdata;
+	u32 min_pixclock;
+
+	pdata = data->client->dev.platform_data;
+	if (!pdata->max_pixfreq)
+		return;
+
+	min_pixclock = KHZ2PICOS(pdata->max_pixfreq/1000);
+
+	list_for_each_safe(pos, n, &data->modedb) {
+		modelist = list_entry(pos, struct fb_modelist, list);
+		m = &modelist->mode;
+		if (m->pixclock < min_pixclock) {
+			list_del(pos);
+			kfree(pos);
+		}
+	}
+}
+
+/* ------------------ Hotplug Event ------------------- */
+
+/*
+ * Replace the specified framebuffer's mode list with a copy of our modedb
+ * based on the monitor's EDID.
+ * Console semaphore and data->lock must both be held (in order).
+ */
+static void sii9022a_store_modes(struct fb_info *fbinfo,
+				 struct sii9022a_data *data)
+{
+	struct fb_modelist *modelist;
+	struct list_head *pos;
+	LIST_HEAD(old_list);
+
+	list_splice(&fbinfo->modelist, &old_list);
+	INIT_LIST_HEAD(&fbinfo->modelist);
+	list_for_each(pos, &data->modedb) {
+		modelist = list_entry(pos, struct fb_modelist, list);
+		if (fb_add_videomode(&modelist->mode, &fbinfo->modelist))
+			break;
+	}
+	if (fb_new_modelist(fbinfo)) {
+		fb_destroy_modelist(&fbinfo->modelist);
+		list_splice(&old_list, &fbinfo->modelist);
+	} else {
+		fb_destroy_modelist(&old_list);
+	}
+
+	return;
+}
+
+static int sii9022a_handle_unplug(struct sii9022a_data *data)
+{
+	int ret = 0, status = data->int_stat, retries = 0;
+
+	if (data->state == HDMI_STATE_UNPLUGGED)
+		goto done;
+
+	/* Drop the power state to low power */
+	i2c_smbus_write_byte_data(data->client, SII9022A_POWER_STATE,
+			PWRSTATE_PWR_LOW);
+
+	while (status & (INT_STAT_HOTPLUG | INT_STAT_RX_SENSE)) {
+		if (++retries > 5) {
+			dev_err(&data->client->dev, "Unplug event timed out\n");
+			goto done;
+		}
+		status = i2c_smbus_read_byte_data(data->client,
+				SII9022A_INT_STATUS);
+	}
+
+	dev_dbg(&data->client->dev, "Cable detect dropped.\n");
+
+	/* Clear the edid data */
+	memset(hdmi_edid, 0, 128);
+	kfree(hdmi_ext_edid);
+	hdmi_ext_edid = NULL;
+
+	fb_destroy_modelist(&data->modedb);
+
+	data->state = HDMI_STATE_UNPLUGGED;
+
+	/* Setup Interrupt Enable - listen for just Hotplug interrupts. */
+	data->int_mask = INT_HOTPLUG;
+	i2c_smbus_write_byte_data(data->client, SII9022A_INT_ENABLE,
+		data->int_mask);
+done:
+	return ret;
+}
+
+static int sii9022a_handle_hotplug(struct sii9022a_data *data)
+{
+	int ret = 0, scratch = 0, i = 0;
+
+	/*
+	 * Read EDID data into the EDID buffer
+	 * First, request DDC bus access
+	 */
+	scratch = i2c_smbus_read_byte_data(data->client, SII9022A_TPI_SYS_CTL);
+	scratch |= SYS_CTL_DDC_REQ;
+	ret = i2c_smbus_write_byte_data(data->client,
+			SII9022A_TPI_SYS_CTL, scratch);
+
+	do {
+		scratch = i2c_smbus_read_byte_data(data->client,
+				SII9022A_TPI_SYS_CTL);
+	} while (!(scratch & SYS_CTL_DDC_GRANT));
+
+	/* We have pass-through access to the DDC bus after we ack it */
+	ret = i2c_smbus_write_byte_data(data->client, SII9022A_TPI_SYS_CTL,
+			SYS_CTL_DDC_ACK);
+
+	/*
+	 * Do EDID stuffs!! EDID is at address 0x50. However, we need to use
+	 * raw i2c for the transfer...
+	 */
+	ret = sii9022a_read_edid(data->client, EDID_ADDRESS, hdmi_edid);
+
+	/* Release the passthrough so we can continue issuing commands */
+	do {
+		i2c_smbus_write_byte_data(data->client,
+				SII9022A_TPI_SYS_CTL, 0x00);
+
+		scratch = i2c_smbus_read_byte_data(data->client,
+				SII9022A_TPI_SYS_CTL);
+	} while (scratch & SYS_CTL_DDC_ACK);
+
+	if (ret < 0) {
+		/* We don't have a sink connected... */
+		goto done;
+	}
+
+	/* ret holds whether this is an HDMI or DVI sink */
+	if (ret)
+		data->hdmi_sink = SYS_CTL_OUTPUT_HDMI;
+	else
+		data->hdmi_sink = SYS_CTL_OUTPUT_DVI;
+
+	i2c_smbus_write_byte_data(data->client, SII9022A_TPI_SYS_CTL,
+			SYS_CTL_TDMS_OUTN | data->hdmi_sink);
+
+	/* Build fb mode db */
+	console_lock();
+	mutex_lock(&data->lock);
+	fb_edid_to_monspecs(hdmi_edid, &data->specs);
+	fb_videomode_to_modelist(data->specs.modedb, data->specs.modedb_len,
+			&data->modedb);
+	if (hdmi_ext_edid) {
+		/* process the extended edid stuff */
+		for (i = 0; i < hdmi_ext_edid_len; i++) {
+			sii9022a_process_cea_timings(data,
+					hdmi_ext_edid + (i * 128));
+		}
+	}
+	sii902a_filter_modelist(data);
+	if (data->fbinfo)
+		sii9022a_store_modes(data->fbinfo, data);
+	mutex_unlock(&data->lock);
+	console_unlock();
+
+	data->state = HDMI_STATE_PLUGGED;
+
+	/* Start looking for Receiver Sense interrupts */
+	data->int_mask = INT_HOTPLUG;
+	i2c_smbus_write_byte_data(data->client, SII9022A_INT_ENABLE,
+		data->int_mask);
+
+	ret = 0;
+done:
+	return ret;
+}
+
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+/*
+ * Values taken from IEC60958-3 specification (Table 1 channel status)
+ * and the sil9022a spec.
+ *
+ *  byte 0: 0x02 - Consumer, LPCM, Non copyright, no pre-emp, mode 0
+ *  byte 1: 0x00 - Category Code
+ *  byte 2: 0x00 - source and channel number 0 (do not take into account)
+ *  byte 3: 0x02 - 48kHz, clock accuracy level II (normal accuracy)
+ *  byte 4: 0x0b - 24bit, original sampling frequency not indicated
+ */
+
+
+static unsigned char audio_header[5] = {0x02, 0x00, 0x00, 0x02, 0x0B};
+
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO_51
+/*
+ *  Audio Info Frame See Section 6.6 of IEC CEA-861-E (for bytes D0-D9)
+ *  and Table 16 of SIL-PR-1032 (The HDMI chips Programmer Reference)
+ *  byte 0: 0xC2 - Frame enabled , repeat enabled, Audio Info Frame
+ *  byte 1: 0x84 - InfoFrame, type audio
+ *  byte 2: 0x01 - version 1
+ *  byte 3: 0x0a - length 10 bytes
+ *  byte 4: 0x42 - data checksum = 0x100 - (Sum of bytes 1-3, 5-14 mod 256)
+ *  byte 5 (D0): 0x15 - 6 channel PCM (Data byte 0)
+ *  byte 6 (D1): 0x0f - 48kHz, 24bit
+ *  byte 7 (D2): 0x00 - extended data formats
+ *  byte 8 (D3): 0x0B - standard5.1 speaker layout FL,FR,LFE,C,SR,SL
+ *  byte 9 (D4): 0x00 - 0db level shift
+ *  byte 10 (D5): 0x00 - downmix allowed
+ *  byte 11 (D6): 0x00 - reserved
+ *  byte 12 (D7): 0x00 - reserved
+ *  byte 13 (D8): 0x00 - reserved
+ *  byte 14 (D9): 0x00 - reserved
+ */
+static unsigned char audio_frame[15] = {0xc2, 0x84, 0x01, 0x0a, 0x42, 0x15,
+		0x0F, 0x00, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+#else
+/*
+ *  Audio Info Frame See Section 6.6 of IEC CEA-861-E
+ *  and Table 16 of SIL-PR-1032 (The HDMI chips Programmer Reference)
+ *  byte 0: 0xc2 - Frame enabled , repeat enabled, Audio Info Frame
+ *  byte 1: 0x84 - InfoFrame, type audio
+ *  byte 2: 0x01 - version 1
+ *  byte 3: 0x0a - length 10 bytes
+ *  byte 4: 0x51 - data checksum 0x100 - (Sum of bytes 1-3, 5-14 mod 256)
+ *  byte 5: 0x11 - 2 channel PCM
+ *  byte 6: 0x0f - 48kHz, 24bit
+ *  byte 7 (D2): 0x00 - extended data formats
+ *  byte 8 (D3): 0x00 - reserved
+ *  byte 9 (D4): 0x00 - reserved
+ *  byte 10 (D5): 0x00 - reserved
+ *  byte 11 (D6): 0x00 - reserved
+ *  byte 12 (D7): 0x00 - reserved
+ *  byte 13 (D8): 0x00 - reserved
+ *  byte 14 (D9): 0x00 - reserved
+ */
+static unsigned char audio_frame[15] = {0xc2, 0x84, 0x01, 0x0a, 0x51, 0x11,
+		0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+#endif
+
+#endif
+
+static unsigned char avi_info[14] = {0x00, 0x12, 0x28, 0x00, 0x04, 0x00, 0x00,
+				0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+#define ASPECT_RATIO_NONE	0x08
+#define ASPECT_RATIO_4_3	0x18
+#define ASPECT_RATIO_16_9	0x28
+
+struct video_mode_map {
+	u16 xres;
+	u16 yres;
+	u8	video_code;
+	u8	refresh;
+	u8	aspect_ratio;
+	u8	overscan;
+};
+
+#define MAX_XRES	2048
+#define MAX_YRES	2048
+#define DIFF(a, b) (a > b ? a - b : b - a)
+
+static struct video_mode_map vmode_map[] = {
+	{ 1, 1, 0, 60, ASPECT_RATIO_NONE, 0 },
+	{ 640, 480, 1, 60, ASPECT_RATIO_4_3, 1 },
+	{ 720, 480, 3, 60, ASPECT_RATIO_16_9, 0 },
+	{ 720, 576, 17, 50, ASPECT_RATIO_4_3, 0 },
+	{ 800, 600, 0, 60, ASPECT_RATIO_4_3, 0 },
+	{ 1024, 768, 0, 60, ASPECT_RATIO_4_3, 0 },
+	{ 1280, 720, 4, 60, ASPECT_RATIO_16_9, 0 },
+	{ 1280, 720, 19, 50, ASPECT_RATIO_16_9, 0 },
+	{ 1280, 768, 0, 60, ASPECT_RATIO_16_9, 0 },
+	{ 1360, 768, 0, 60, ASPECT_RATIO_16_9, 0 },
+	{ 1680, 1050, 0, 60, ASPECT_RATIO_16_9, 0 },
+	{ 1920, 1080, 33, 25, ASPECT_RATIO_16_9, 0 },
+	{ 1920, 1080, 34, 30, ASPECT_RATIO_16_9, 0 },
+	{ MAX_XRES+1, MAX_YRES+1, 0, 0, ASPECT_RATIO_NONE, 0 },
+};
+
+static inline u8 cksum(u8 *buf, int len, int init)
+{
+	int i, cksum = init;
+
+	for (i = 0; i < len; i++)
+		cksum += buf[i];
+
+	return 0x100 - cksum;
+}
+
+static const struct video_mode_map *fb_to_vmm(const struct fb_videomode * fbvm)
+{
+	int i = 0;
+
+	for (i = 0; (vmode_map[i].xres < MAX_YRES); i++) {
+		if (fbvm->xres > vmode_map[i].xres)
+			continue;
+		if (i && (fbvm->xres <= vmode_map[i].xres) &&
+				(fbvm->yres <= vmode_map[i].yres)) {
+			if (DIFF(fbvm->refresh, vmode_map[i].refresh) < 3)
+				/* refresh rate diff < 3Hz */
+				return &vmode_map[i];
+			else
+				continue;
+		}
+
+		if (fbvm->yres > vmode_map[i].yres) /* no similar found */
+			return &vmode_map[0];
+	}
+	return &vmode_map[0];
+}
+
+static void sii9022a_update_avi_infoframe(const struct video_mode_map *vmm)
+{
+	avi_info[1] = 0x10 | vmm->overscan;
+	avi_info[2] = vmm->aspect_ratio;
+	avi_info[4] = vmm->video_code;
+
+	avi_info[0] = cksum(avi_info, 14, 0x91);
+
+	return;
+}
+
+static void sii9022a_audio_set_enabled(struct sii9022a_data *data, bool en)
+{
+	int i;
+	uint8_t scratch;
+
+	data->audio_enabled = en;
+
+	scratch = i2c_smbus_read_byte_data(data->client, 0x40);
+	scratch |= 0x1;
+	i2c_smbus_write_byte_data(data->client, 0x40, scratch);
+	msleep(100);
+	scratch &= ~0x1;
+	i2c_smbus_write_byte_data(data->client, 0x40, scratch);
+
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+	if (en) {
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO_51
+		/* mute, select i2s and PCM */
+		i2c_smbus_write_byte_data(data->client, 0x26, 0xB1);
+#else
+		/* mute, select i2s, multi-channel layout and PCM */
+		i2c_smbus_write_byte_data(data->client, 0x26, 0x91);
+#endif
+		/*
+		 * rising edge, 256fps, left low, right justified,
+		 * MSB first.
+		 * Sony mode (no delay(shift) to first bit)
+		 *
+		 * Note 1: as the Comet I2S out block expects 24 bit data left
+		 * aligned from the DMA and ALSA provides 24 bit data
+		 * right aligned we configure the I2S out block in 32 bit mode
+		 * we then get 24 bit data out of the I2S right aligned, which
+		 * conforms to the Sony right aligned I2S timings.
+		 *
+		 * Note 2: the HDMI chip expects 24bit audio in a 24 + 24 frame
+		 * the Comet I2S out block cannot supply this, we have to use a
+		 * 32 + 32 frame (data right aligned due to reasons above), this
+		 * generally works but we get slight crackling on full range
+		 * audio, this can be reduced by limiting the gain of the PCM
+		 * audio being played to be less than the full 24 bits.
+		 *
+		 * 16 bit audio in a 16 + 16 frame is better but I2S in
+		 * is broken in 16bit mode on Comet.
+		 */
+		i2c_smbus_write_byte_data(data->client, 0x20, 0x95);
+
+		/*
+		 * ALSAs channel order doesn't match the HDMI standard channel order
+		 * so swap channel 2-3 and 4-5 here at the fifo input selection.
+		 *
+		 * Also swap the L/R ordering of channels 1,2 and 5,6.
+		 */
+
+		i2c_smbus_write_byte_data(data->client, 0x1f, 0x84);
+		i2c_smbus_write_byte_data(data->client, 0x1f, 0xA1);
+		i2c_smbus_write_byte_data(data->client, 0x1f, 0x96);
+
+		/* sample size and frequency defined by stream header*/
+		i2c_smbus_write_byte_data(data->client, 0x27, 0xD8);
+
+		/* Input word length */
+		i2c_smbus_write_byte_data(data->client, SII9022A_INTERNAL_PAGE, 0x02);
+		i2c_smbus_write_byte_data(data->client, SII9022A_INDEXED_REG, 0x24);
+		scratch = i2c_smbus_read_byte_data(data->client, SII9022A_IND_REG_VAL);
+		scratch &= 0xF0;
+		scratch |= 0x0B;
+		i2c_smbus_write_byte_data(data->client, SII9022A_IND_REG_VAL, scratch);
+
+		/* Header Layout Settings */
+		i2c_smbus_write_byte_data(data->client, SII9022A_INTERNAL_PAGE, 0x02);
+
+		i2c_smbus_write_byte_data(data->client, SII9022A_INDEXED_REG, 0x2F);
+		scratch = i2c_smbus_read_byte_data(data->client, SII9022A_IND_REG_VAL);
+		scratch &= ~0x02;
+		scratch |= 0x02;
+		i2c_smbus_write_byte_data(data->client, SII9022A_IND_REG_VAL, scratch);
+		i2c_smbus_write_block_data(data->client, 0x21, 5, audio_header);
+
+		/*
+		 * write audio info frame.
+		 *
+		 * Note the Programmers reference says to program the info frame data
+		 * with a single block write, but when we do this the data reads back
+		 * incorrectly and we cannot get 5.1 to work. Programming a byte at a
+		 * time in a loop appears to work fine.
+		 *
+		 */
+		for(i=0; i<15; i++)
+			i2c_smbus_write_byte_data(data->client, 0xBF+i, audio_frame[i]);
+
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO_51
+		i2c_smbus_write_byte_data(data->client, 0x26, 0xA1);
+#else
+		i2c_smbus_write_byte_data(data->client, 0x26, 0x81);
+#endif
+	} else {
+#endif /* CONFIG_TZ1090_01XX_HDMI_AUDIO */
+
+		/* disable audio interface */
+		i2c_smbus_write_byte_data(data->client, 0x26, 0x11);
+
+		/* enable 1-1 I2Sn to FIFOn mapping, disabled */
+		for (i = 0; i < 4; i++)
+			i2c_smbus_write_byte_data(data->client, 0x1f,
+				0x00 | (i << 4) | i);
+
+		/* disable audio interface */
+		i2c_smbus_write_byte_data(data->client, 0x26, 0x01);
+
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+	}
+#endif
+}
+
+static int sii9022a_change_res(struct sii9022a_data *data)
+{
+	const struct video_mode_map *vmm;
+	int ret = 0, scratch = 0, htotal, vtotal, refresh, pixclk;
+	unsigned char vid_res[8];
+
+	scratch = i2c_smbus_read_byte_data(data->client, SII9022A_TPI_SYS_CTL);
+	scratch |= SYS_CTL_AV_MUTE;
+	ret = i2c_smbus_write_byte_data(data->client, SII9022A_TPI_SYS_CTL,
+			scratch);
+
+	msleep(128);	/* wait 128ms to allow info frames to be sent */
+
+	/* Turn off TDMS */
+	ret = i2c_smbus_write_byte_data(data->client, SII9022A_POWER_STATE,
+			PWRSTATE_PWR_LOW);
+	scratch = i2c_smbus_read_byte_data(data->client, SII9022A_TPI_SYS_CTL);
+	scratch |= SYS_CTL_TDMS_OUTN;
+	ret = i2c_smbus_write_byte_data(data->client, SII9022A_TPI_SYS_CTL,
+			scratch);
+
+	/* Setup Input mode - 8bit RGB*/
+	i2c_smbus_write_byte_data(data->client,
+			SII9022A_INPUT_FORMAT, IN_FMT_RGB);
+
+	/* Set the output mode - HDMI RGB with no c/s conversion */
+	i2c_smbus_write_byte_data(data->client, 0x0a, 0x00);
+
+	pixclk = clk_get_rate(data->pix_clk) / 10000;
+
+	spin_lock_bh(&data->mode_lock);
+	htotal = data->current_mode.xres +
+		 data->current_mode.left_margin +
+		 data->current_mode.right_margin +
+		 data->current_mode.hsync_len;
+	vtotal = data->current_mode.yres +
+		 data->current_mode.upper_margin +
+		 data->current_mode.lower_margin +
+		 data->current_mode.vsync_len;
+	vmm = fb_to_vmm(&data->current_mode);
+	spin_unlock_bh(&data->mode_lock);
+	refresh = 1000000 / (htotal * vtotal / pixclk);
+
+	vid_res[0] = pixclk & 0xff;
+	vid_res[1] = (pixclk & 0xff00) >> 8;
+	vid_res[2] = refresh & 0xff;
+	vid_res[3] = (refresh & 0xff00) >> 8;
+	vid_res[4] = htotal & 0xff;
+	vid_res[5] = (htotal & 0xff00) >> 8;
+	vid_res[6] = vtotal & 0xff;
+	vid_res[7] = (vtotal & 0xff00) >> 8;
+
+	i2c_smbus_write_i2c_block_data(data->client, SII9022A_PXLCLK_LSB,
+		8, vid_res);
+
+	/* Set the AVI Info frame - HDMI only */
+	if (data->hdmi_sink) {
+		/* AVI Info Frame stuff */
+		sii9022a_update_avi_infoframe(vmm);
+		i2c_smbus_write_i2c_block_data(data->client, SII9022A_AVIINFO,
+				0x0e, avi_info);
+	}
+
+	/* Audio mode */
+	sii9022a_audio_set_enabled(data, data->audio_enabled);
+
+	ret = i2c_smbus_write_byte_data(data->client, SII9022A_POWER_STATE,
+			PWRSTATE_PWR_ON);
+
+	/* Enable video output */
+	scratch = i2c_smbus_read_byte_data(data->client, SII9022A_TPI_SYS_CTL);
+	scratch &= ~(SYS_CTL_TDMS_OUTN);
+	ret = i2c_smbus_write_byte_data(data->client, SII9022A_TPI_SYS_CTL,
+			scratch);
+
+	scratch = i2c_smbus_read_byte_data(data->client, SII9022A_TPI_SYS_CTL);
+	scratch &= ~(SYS_CTL_AV_MUTE);
+	ret = i2c_smbus_write_byte_data(data->client, SII9022A_TPI_SYS_CTL,
+			scratch);
+	/* Pixel repetition - same as default */
+	return ret;
+}
+
+static int sii9022a_set_res(struct fb_info *info, struct sii9022a_data *data)
+{
+	const struct fb_videomode *new_mode;
+
+	new_mode = fb_match_mode(&info->var, &info->modelist);
+	dev_dbg(info->dev, "%s - %dx%d@%d\n", __FUNCTION__, new_mode->xres,
+			new_mode->yres, new_mode->refresh);
+	/* only if mode has actually changed */
+	if (new_mode && !fb_mode_is_equal(&data->current_mode, new_mode)) {
+		/* this is the only place to write to current_mode */
+		spin_lock_bh(&data->mode_lock);
+		data->current_mode = *new_mode;
+		spin_unlock_bh(&data->mode_lock);
+		sii9022a_change_res(data);
+	}
+
+	return 0;
+}
+
+/* -------------------- Work Queue -------------------- */
+static irqreturn_t sii9022a_irq_thread(int irq, void *priv)
+{
+	struct sii9022a_data *data = priv;
+	int ret =0;
+	u8 status = 0;
+
+	if (data->state == HDMI_STATE_RESET)
+		return IRQ_HANDLED;
+
+	/* Do we need locking here ? */
+	status = i2c_smbus_read_byte_data(data->client, SII9022A_INT_STATUS);
+
+	if (data->deferred_edid) {
+		data->deferred_edid = 0;
+		msleep(500);
+		ret = sii9022a_handle_hotplug(data);
+		if (ret) {
+			dev_err(&data->client->dev,
+				"EDID read error or sink not connected.\n");
+			return IRQ_HANDLED;
+		}
+		if (data->current_mode.xres)
+			sii9022a_change_res(data);
+	}
+
+
+	/* Check the state of the interrupt against our mask */
+	if (!(status & data->int_mask))
+		return IRQ_HANDLED;
+
+	/* Is this a hotplug based event? */
+	if (!(status & INT_HOTPLUG))
+		return IRQ_HANDLED;
+
+	if (data->state == HDMI_STATE_PLUGGED) {
+		if (!(status & INT_STAT_HOTPLUG))
+			sii9022a_handle_unplug(data);
+	}
+	else if (data->state == HDMI_STATE_UNPLUGGED) {
+		/* Wait for the 500ms that is needed for the
+		 * connection debounce */
+		if (status & INT_STAT_HOTPLUG) {
+			msleep(500);
+			data->deferred_edid = 1;
+		}
+	}
+
+	/* Clear down the interrupt pending states */
+	i2c_smbus_write_byte_data(data->client, SII9022A_INT_STATUS, status);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t sii9022a_thread_wake(int irq, void *data)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+/* ------------- Framebuffer Notification ------------- */
+static int sii9022a_fb_notification(struct notifier_block *self,
+		unsigned long event, void *evdata)
+{
+	struct fb_event *event_data = evdata;
+	struct fb_info *info = event_data->info;
+	struct sii9022a_data *data;
+
+	data = container_of(self, struct sii9022a_data, fb_notify);
+
+	if (event == FB_EVENT_FB_REGISTERED) {
+		mutex_lock(&data->lock);
+		/* Only attach to the first registered fb */
+		if (data->fbinfo) {
+			mutex_unlock(&data->lock);
+			return 0;
+		}
+		data->fbinfo = info;
+		sii9022a_store_modes(info, data);
+		mutex_unlock(&data->lock);
+
+		/* also set the video mode */
+		event = FB_EVENT_MODE_CHANGE;
+	}
+	if (data->fbinfo != info)
+		return 0;
+
+	switch (event) {
+	case FB_EVENT_FB_UNREGISTERED:
+		mutex_lock(&data->lock);
+		data->fbinfo = NULL;
+		mutex_unlock(&data->lock);
+		break;
+
+	case FB_EVENT_MODE_CHANGE:
+		if (sii9022a_set_res(info, data))
+			dev_err(info->dev, "Unable to change resolution.\n");
+		break;
+
+	case FB_EVENT_SUSPEND:
+		dev_dbg(info->dev, "FB Suspend\n");
+		break;
+
+	case FB_EVENT_RESUME:
+		dev_dbg(info->dev, "FB Resume\n");
+		break;
+
+	case FB_EVENT_BLANK:
+		dev_dbg(info->dev, "FB Blank\n");
+		break;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+
+static struct sii9022a_data *zero1sp_hdmi;
+
+bool zero1sp_hdmi_audio_get_enabled(void)
+{
+	if (!zero1sp_hdmi)
+		return false;
+	return zero1sp_hdmi->audio_enabled;
+}
+EXPORT_SYMBOL_GPL(zero1sp_hdmi_audio_get_enabled);
+
+void zero1sp_hdmi_audio_set_enabled(bool en)
+{
+	if (!zero1sp_hdmi)
+		return;
+	sii9022a_audio_set_enabled(zero1sp_hdmi, en);
+}
+EXPORT_SYMBOL_GPL(zero1sp_hdmi_audio_set_enabled);
+#endif
+
+/* --------------- Chip initialisation ---------------- */
+static int sii9022a_chip_init(struct i2c_client *client,
+		struct sii9022a_data *data)
+{
+	int ret = 0, rev = 0, scratch = 0;
+	int retries = 0;
+
+	/* Enable TPI mode */
+	do {
+		ret = i2c_smbus_write_byte_data(client,
+				SII9022A_RESET_AND_INIT, 0x00);
+		if (++retries > 5) {
+			dev_err(&client->dev,
+				"I2C Bus timed out on initialisation.\n");
+			goto out;
+		}
+	} while (ret);
+
+	/*
+	 * When we can read a value from the revision register, the device has
+	 * initialised.
+	 */
+	do {
+		ret = i2c_smbus_read_byte_data(client, SII9022A_REV_DEV_ID);
+		if (ret < 0) {
+			dev_err(&client->dev, "HDMI transmitter not connected."
+				" Error %d\n", ret);
+			goto out;
+		}
+	} while (ret != 0xb0);	/* Maybe include 0xb4 for SiI9136/9334 */
+
+	/* Get revision data */
+	rev = ret << 16;	/* Device ID, normally 0xb0 */
+	/* Production revision */
+	rev |= i2c_smbus_read_byte_data(client, SII9022A_REV_PROD_ID) << 8;
+	/* TPI revision */
+	rev |= i2c_smbus_read_byte_data(client, SII9022A_REV_TPI_ID);
+
+	if (rev == 0xb00000)
+		dev_info(&client->dev, "SiI9022/9024 HDMI Transmitter.\n");
+	else if (rev == 0xb00203)
+		dev_info(&client->dev, "SiI9022a/9024a HDMI Transmitter.\n");
+
+	data->revision = rev;
+
+	/* Power up */
+	i2c_smbus_write_byte_data(client, SII9022A_POWER_STATE,
+			PWRSTATE_PWR_ON);
+
+	/* Enable source termination */
+	if (rev == 0xb00203) {
+		i2c_smbus_write_byte_data(client, SII9022A_INTERNAL_PAGE, 0x01);
+		i2c_smbus_write_byte_data(client, SII9022A_INDEXED_REG, 0x82);
+		scratch = i2c_smbus_read_byte_data(client,
+				SII9022A_IND_REG_VAL);
+		scratch |= 0x01;
+		i2c_smbus_write_byte_data(client,
+				SII9022A_IND_REG_VAL, scratch);
+	}
+
+	/* Input bus defaults to: 1x clock, full pixel wide bus, falling edge
+	 * latching and no pixel repetition */
+
+	/* YC input mode defaults to: don't swap LSB/MSB, lower 12 bits select
+	 * DDR, non gap mode disabled, normal YC input mode */
+
+	/* Sync register recommends DE_ADJ to be cleared */
+	i2c_smbus_write_byte_data(client, SII9022A_SYNC_METHOD, 0x00);
+
+	/* Get interlace and polarity */
+	ret = i2c_smbus_read_byte_data(client, SII9022A_SYNC_POLARITY);
+	data->interlace = (ret & 0x04) >> 2;
+	data->polarity = ((ret & 0x02) << 3) | (ret & 0x01);
+
+	ret = i2c_smbus_read_byte_data(client, SII9022A_INT_STATUS);
+	dev_dbg(&client->dev, "Initial Status: 0x%02x\n", ret);
+
+	/*
+	 * Detect if a monitor is already connected (e.g. it may have been set
+	 * up by the bootloader).
+	 */
+	if (ret & INT_STAT_HOTPLUG) {
+		/* assume it's been plugged in for a while, so no delay */
+		ret = sii9022a_handle_hotplug(data);
+		if (ret)
+			dev_err(&client->dev,
+				"EDID read error or sink not connected.\n");
+		else if (data->current_mode.xres)
+			sii9022a_change_res(data);
+	}
+
+	/* Setup Interrupt Enable - listen for just Hotplug interrupts. */
+	data->int_mask = INT_HOTPLUG;
+	if (data->state == HDMI_STATE_RESET)
+		data->state = HDMI_STATE_UNPLUGGED;
+	i2c_smbus_write_byte_data(client, SII9022A_INT_ENABLE,
+		data->int_mask);
+
+	ret = 0;
+out:
+	return ret;
+}
+
+/* --------- Probe, Remove, Suspend & Resume ---------- */
+static int sii9022a_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	int ret = 0;
+	struct sii9022a_data *sii9022a;
+	struct hdmi_platform_data *pdata = NULL;
+
+	pdata = client->dev.platform_data;
+	if (!pdata) {
+		ret = -ENODEV;
+		dev_err(&client->dev, "No platform data defined!\n");
+		goto out;
+	}
+
+	/* Get some private driver data space */
+	sii9022a = kzalloc(sizeof(struct sii9022a_data), GFP_KERNEL);
+	if (!sii9022a) {
+		ret = -ENOMEM;
+		dev_err(&client->dev,
+			"No memory available for HDMI I2C interface.\n");
+		goto out;
+	}
+	mutex_init(&sii9022a->lock);
+	spin_lock_init(&sii9022a->mode_lock);
+	INIT_LIST_HEAD(&sii9022a->modedb);
+
+	/* Keep a reference to the I2C client we are in */
+	sii9022a->client = client;
+	sii9022a->state = HDMI_STATE_RESET;
+	sii9022a->audio_enabled = true;
+
+	ret = sii9022a_chip_init(client, sii9022a);
+	if (ret) {
+		dev_err(&client->dev,
+			"HDMI Transmitter initialisation failed; "
+			"Error: %d\n", ret);
+		goto free_hdmi_dev;
+	}
+
+	sii9022a->pix_clk = clk_get(&client->dev, pdata->pix_clk);
+	if (IS_ERR(sii9022a->pix_clk)) {
+		dev_err(&client->dev,
+			"Could not get pixel clock named \"%s\"\n",
+			pdata->pix_clk);
+		goto free_hdmi_dev;
+	}
+
+	i2c_set_clientdata(client, sii9022a);
+
+	/* Setup the IRQ - use default thread wakey routine */
+	ret = request_threaded_irq(client->irq, sii9022a_thread_wake,
+			sii9022a_irq_thread, IRQF_ONESHOT,
+			"hdmi-event-irq", sii9022a);
+	if (ret) {
+		dev_err(&client->dev, "Unable to setup HDMI IRQ.\n");
+		goto free_pix_clk;
+	}
+
+	sii9022a->fb_notify.notifier_call = sii9022a_fb_notification;
+	ret = fb_register_client(&sii9022a->fb_notify);
+	if (ret) {
+		dev_err(&client->dev,
+			"Unable to register framebuffer notifier (%d)\n", ret);
+		/* carry on anyway */
+	}
+
+	/*
+	 * If a framebuffer is already registered, we'll have missed it, so
+	 * fake a framebuffer register event.
+	 */
+	console_lock();
+	if (num_registered_fb) {
+		struct fb_event ev;
+		ev.info = registered_fb[0];
+		sii9022a_fb_notification(&sii9022a->fb_notify,
+			FB_EVENT_FB_REGISTERED, &ev);
+	}
+	console_unlock();
+
+	/* Register a misc device node */
+	ret = misc_register(&hdmi_misc_device);
+	if (ret) {
+		dev_err(&client->dev,
+			"Unable to register device node - Error %d\n", ret);
+		goto free_hdmi_irq;
+	}
+
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+	zero1sp_hdmi = sii9022a;
+#endif
+
+	dev_info(&client->dev,
+			"SiI922x HDMI Transmitter probed successfully.\n");
+	goto out;
+
+free_hdmi_irq:
+	free_irq(client->irq, sii9022a);
+free_pix_clk:
+	clk_put(sii9022a->pix_clk);
+free_hdmi_dev:
+	kfree(sii9022a);
+out:
+	return ret;
+}
+
+static int sii9022a_remove(struct i2c_client *client)
+{
+	struct sii9022a_data *sii9022a;
+
+	sii9022a = i2c_get_clientdata(client);
+
+	fb_unregister_client(&sii9022a->fb_notify);
+	misc_deregister(&hdmi_misc_device);
+	free_irq(client->irq, sii9022a);
+	clk_put(sii9022a->pix_clk);
+	kfree(sii9022a);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sii9022a_suspend(struct device *dev)
+{
+	/* Set transmitter power mode to D2 */
+	return 0;
+}
+
+static int sii9022a_resume(struct device *dev)
+{
+	/* Set transmitter power mode to D0 */
+	return 0;
+}
+#else
+#define sii9022a_suspend	NULL
+#define sii9022a_resume		NULL
+#endif
+
+static SIMPLE_DEV_PM_OPS(sii9022a_pmops, sii9022a_suspend, sii9022a_resume);
+
+
+/* ----------- Compulsory I2C declarations ------------ */
+static struct i2c_device_id sii9022a_ids[] = {
+	{ "sii9022a-tpi", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, sii9022a_ids)
+
+/* Driver structure */
+static struct i2c_driver sii9022a_driver = {
+	.driver	= {
+		.name	= "sii9022a-tpi",
+		.owner	= THIS_MODULE,
+		.pm	= &sii9022a_pmops,
+	},
+
+	.id_table	= sii9022a_ids,
+	.probe		= sii9022a_probe,
+	.remove		= sii9022a_remove,
+};
+
+/* ------------ Compulsory module routines ------------ */
+static int __init sii9022a_init(void)
+{
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+	zero1sp_hdmi = NULL;
+#endif
+	return i2c_add_driver(&sii9022a_driver);
+}
+module_init(sii9022a_init);
+
+static void __exit sii9022a_exit(void)
+{
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+	zero1sp_hdmi = NULL;
+#endif
+	i2c_del_driver(&sii9022a_driver);
+}
+module_exit(sii9022a_exit);
+
+MODULE_DESCRIPTION("SiI9022a HDMI Transmitter I2C Command interface driver");
+MODULE_AUTHOR("Imagination Technologies");
+MODULE_LICENSE("GPL");
diff --git a/arch/metag/boards/TZ1090-01XX/hdmi-out/sii9022a.h b/arch/metag/boards/TZ1090-01XX/hdmi-out/sii9022a.h
new file mode 100644
index 0000000..88fb18a
--- /dev/null
+++ b/arch/metag/boards/TZ1090-01XX/hdmi-out/sii9022a.h
@@ -0,0 +1,84 @@
+/*
+ * SiI9022a HDMI Transmitter driver
+ * Register Defs
+ */
+
+/* Reset Register */
+#define SII9022A_RESET_AND_INIT	0xc7
+
+/* Revision registers */
+#define SII9022A_REV_DEV_ID	0x1b
+#define SII9022A_REV_PROD_ID	0x1c
+#define SII9022A_REV_TPI_ID	0x1d
+#define SII9022A_REV_HDCP	0x30
+
+/* Power state register */
+#define SII9022A_POWER_STATE	0x1e
+#define PWRSTATE_PWR_ON		0x00
+#define PWRSTATE_PWR_LOW	0x02
+#define PWRSTATE_PWR_V_LOW	0x03
+#define PWRSTATE_PWR_VV_LOW	0x04
+
+/* Internal registers */
+#define SII9022A_INTERNAL_PAGE	0xbc
+#define SII9022A_INDEXED_REG	0xbd
+#define SII9022A_IND_REG_VAL	0xbe
+
+/* Sync register */
+#define SII9022A_SYNC_METHOD	0x60
+#define SYNC_EMBEDDED		0x80
+#define SYNC_YC_MUX_ENABLE	0x20
+#define SYNC_INVERT_POLARITY	0x10
+#define SYNC_ADJUST_VSYNC	0x02
+#define SYNC_VSYNC_PLUS		0x01
+#define SYNC_VSYNC_MINUS	0x00
+
+/* Sync Polarity (RO) */
+#define SII9022A_SYNC_POLARITY	0x61
+
+/* Interrupts */
+#define SII9022A_INT_ENABLE	0x3c
+#define SII9022A_INT_STATUS	0x3d
+#define INT_AUDIO_ERR		0x10
+#define INT_STAT_RX_SENSE	0x08
+#define INT_STAT_HOTPLUG	0x04
+#define INT_RX_SENSE		0x02
+#define INT_HOTPLUG		0x01
+
+/* TPI System Control */
+#define SII9022A_TPI_SYS_CTL	0x1a
+#define SYS_CTL_DYNAMIC_LI	0x40
+#define SYS_CTL_TDMS_OUTN	0x10
+#define SYS_CTL_AV_MUTE		0x08
+#define SYS_CTL_DDC_REQ		0x04
+#define SYS_CTL_DDC_GRANT	0x02
+#define SYS_CTL_OUTPUT_HDMI	0x01
+#define SYS_CTL_OUTPUT_DVI	0x00
+#define SYS_CTL_DDC_ACK		(SYS_CTL_DDC_REQ | SYS_CTL_DDC_GRANT)
+
+/* Input mode */
+#define SII9022A_INPUT_FORMAT	0x09
+#define IN_FMT_12BIT		0xC0
+#define IN_FMT_10BIT		0x80
+#define IN_FMT_16BIT		0x40
+#define IN_FMT_VREXP_OFF	0x08
+#define IN_FMT_VREXP_ON		0x04
+#define IN_FMT_BLACK		0x03
+#define IN_FMT_YC422		0x02
+#define IN_FMT_YC444		0x01
+#define IN_FMT_RGB		0x00
+
+/* Output resolution options */
+#define SII9022A_PXLCLK_LSB	0x00
+#define SII9022A_PXLCLK_MSB	0x01
+#define SII9022A_VFREQ_LSB	0x02
+#define SII9022A_VFREQ_MSB	0x03
+#define SII9022A_PIXELS_LSB	0x04
+#define SII9022A_PIXELS_MSB	0x05
+#define SII9022A_LINES_LSB	0x06
+#define SII9022A_LINES_MSB	0x07
+
+/* AVI Info Frame */
+#define SII9022A_AVIINFO	0x0C
+
+
diff --git a/arch/metag/boards/TZ1090-01XX/setup.c b/arch/metag/boards/TZ1090-01XX/setup.c
new file mode 100644
index 0000000..4f0a635
--- /dev/null
+++ b/arch/metag/boards/TZ1090-01XX/setup.c
@@ -0,0 +1,202 @@
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/dw_mmc.h>
+#include <video/pdpfb.h>
+#include <asm/global_lock.h>
+#include <asm/mach/arch.h>
+#include <asm/soc-tz1090/sdhost.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/gpio.h>
+#include <asm/soc-tz1090/pdp.h>
+#include <asm/soc-tz1090/pdc.h>
+#include <asm/soc-tz1090/setup.h>
+#include <asm/soc-tz1090/hdmi-video.h>
+#include <asm/soc-tz1090/hdmi-audio.h>
+#include <asm/soc-tz1090/usb.h>
+
+#ifdef CONFIG_TZ1090_01XX_GPIO_KEYS
+extern int __init comet_01xx_init_gpio_buttons(void);
+#endif
+
+static int __init comet_01xx_init_tft(void)
+{
+	int err;
+
+	/*
+	 * The board has the wrong polarity, so the TFT power can be
+	 * controlled instead using a GPIO signal.
+	 */
+
+	err = gpio_request(GPIO_TFT_PWRSAVE, "TFT pwrsave");
+	if (err) {
+		printk(KERN_WARNING "TFT_PWRSAVE GPIO request failed: %d",
+					err);
+		return -EINVAL;
+	}
+	err = gpio_direction_output(GPIO_TFT_PWRSAVE, 0);
+	if (err) {
+		printk(KERN_WARNING "TFT_PWRSAVE GPIO set direction failed: %d",
+					err);
+		gpio_free(GPIO_TFT_PWRSAVE);
+		return -EINVAL;
+	}
+
+	/*
+	 * Switch the screen off until it wants to be used.
+	 */
+
+	gpio_set_value(GPIO_TFT_PWRSAVE, 0);
+
+	return 0;
+}
+
+/*
+ * USB setup and VBUS control
+ */
+
+static void comet_01xx_enable_vbus(void)
+{
+	gpio_set_value(GPIO_PDM_D, 1);
+}
+
+static void comet_01xx_disable_vbus(void)
+{
+	gpio_set_value(GPIO_PDM_D, 0);
+}
+
+static struct dwc_otg_board comet_01xx_usb_board = {
+	.enable_vbus = comet_01xx_enable_vbus,
+	.disable_vbus = comet_01xx_disable_vbus,
+};
+
+static int __init comet_01xx_init_usb(void)
+{
+	int err;
+
+	err = gpio_request(GPIO_PDM_D, "USB VBus");
+	if (err) {
+		pr_err("Failed to request PDM_D GPIO\n");
+		return err;
+	}
+
+	gpio_direction_output(GPIO_PDM_D, 1);
+	gpio_set_value(GPIO_PDM_D, 0);
+
+	comet_usb_setup(&comet_01xx_usb_board);
+
+	return 0;
+}
+
+/* Allocate all SDIO GPIOs and drive them low. */
+static struct gpio sd_temp_gpios[] = {
+	{ GPIO_SDIO_CMD,	GPIOF_OUT_INIT_LOW, "SDIO_CMD"},
+	{ GPIO_SDIO_CLK,	GPIOF_OUT_INIT_LOW, "SDIO_CLK"},
+	{ GPIO_SDIO_D0,		GPIOF_OUT_INIT_LOW, "SDIO_D<0>"},
+	{ GPIO_SDIO_D1,		GPIOF_OUT_INIT_LOW, "SDIO_D<1>"},
+	{ GPIO_SDIO_D2,		GPIOF_OUT_INIT_LOW, "SDIO_D<2>"},
+	{ GPIO_SDIO_D3,		GPIOF_OUT_INIT_LOW, "SDIO_D<3>"},
+};
+
+/*
+ * Toggle the power by switching off the power line, and driving the SD pins
+ * low to ensure it switches off. NB switch only on rev2 onwards.
+ */
+static void mci_setpower(u32 slot_id, u32 volt)
+{
+	int err;
+	if (volt) {
+		gpio_free_array(sd_temp_gpios, ARRAY_SIZE(sd_temp_gpios));
+
+		gpio_set_value(GPIO_PDC_GPIO0, 1);
+	} else {
+		gpio_set_value(GPIO_PDC_GPIO0, 0);
+
+		err = gpio_request_array(sd_temp_gpios,
+			ARRAY_SIZE(sd_temp_gpios));
+		if (err)
+			pr_warn("SDIO pins already allocated. Can not pull low.\n");
+	}
+}
+
+/*
+ * Initialise power control and initialise the SD host. The SD host configures
+ * the slot power and does not rely on it's initial state, so don't change it
+ * here.
+ */
+static void __init comet_01xx_init_sdhost(void)
+{
+	int err;
+
+	err = gpio_request_one(GPIO_PDC_GPIO0, GPIOF_OUT_INIT_LOW, "PDC_GPIO0");
+	if (err)
+		pr_warn("Cannot request PDC_GPIO0 GPIO for SD Card power.\n");
+
+	comet_mci_platform_data.setpower = mci_setpower;
+
+	comet_sdhost_init();
+}
+
+
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+static void __init comet_01xx_init_audio(void)
+{
+	u32 flags;
+
+	comet_gpio_disable_block(GPIO_I2S_FIRST, GPIO_I2S_LAST);
+	__global_lock2(flags);
+	writel(0x01000000, CR_AUDIO_HP_CTRL);
+	__global_unlock2(flags);
+}
+#endif
+
+static void __init comet_01xx_init(void)
+{
+	comet_init_machine();
+
+	/* SPI0 header */
+	comet_01xx_init_sdhost();
+
+
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+	comet_01xx_init_audio();
+#endif
+
+	comet_01xx_init_tft();
+	comet_01xx_init_usb();
+
+#ifdef CONFIG_TZ1090_01XX_GPIO_KEYS
+	comet_01xx_init_gpio_buttons();
+#endif
+}
+
+/* Comet MetaMorph */
+
+static const char *comet_01sp_boards_compat[] __initdata = {
+	"img,tz1090-01sp",
+	NULL,
+};
+
+MACHINE_START(COMET_01SP, "01SP Comet METAmorph")
+	.dt_compat	= comet_01sp_boards_compat,
+	TZ1090_MACHINE_DEFAULTS,
+	.init_machine	= comet_01xx_init,
+MACHINE_END
+
+/* Comet MiniMorph */
+
+static const char *comet_01tt_boards_compat[] __initdata = {
+	"img,tz1090-01tt",
+	NULL,
+};
+
+MACHINE_START(COMET_01TT, "01TT Comet MiniMorph")
+	.dt_compat	= comet_01tt_boards_compat,
+	TZ1090_MACHINE_DEFAULTS,
+	.init_machine	= comet_01xx_init,
+MACHINE_END
diff --git a/arch/metag/boards/atp-dp/Makefile b/arch/metag/boards/atp-dp/Makefile
new file mode 100644
index 0000000..6976eaf
--- /dev/null
+++ b/arch/metag/boards/atp-dp/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the ATP-dp board specific parts of the kernel
+#
+
+obj-y	:= setup.o
diff --git a/arch/metag/boards/atp-dp/display1/Makefile b/arch/metag/boards/atp-dp/display1/Makefile
new file mode 100644
index 0000000..4a7eb0c
--- /dev/null
+++ b/arch/metag/boards/atp-dp/display1/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the ATP-dp:display1 TFT board specific parts of the kernel
+#
+
+obj-y	:= setup.o
diff --git a/arch/metag/boards/atp-dp/display1/setup.c b/arch/metag/boards/atp-dp/display1/setup.c
new file mode 100644
index 0000000..83da313
--- /dev/null
+++ b/arch/metag/boards/atp-dp/display1/setup.c
@@ -0,0 +1,156 @@
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <video/imgpdi_lcd.h>
+#include <video/pdpfb.h>
+#include <asm/soc-chorus2/pdp.h>
+#include <asm/soc-chorus2/c2_irqnums.h>
+#include <asm/soc-chorus2/clock.h>
+
+static struct resource pdp_resources[] = {
+	{
+		.start = PDP_IRQ_NUM,
+		/* mapped in display1_device_setup() */
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start	= PDP_BASE_ADDR,
+		.end	= PDP_BASE_ADDR + PDP_SIZE,
+		.flags	= IORESOURCE_MEM | PDPFB_IORES_PDP,
+	},
+};
+
+static struct pdp_info pdp_platform_data = {
+	.bpp = 16,
+	.lcd_cfg = {
+		.name = "ATP-dp:display1",
+		.refresh = 60,
+
+		.hsync_len = 8,
+		.left_margin = 9,
+		.xres = 240,
+		.right_margin = 11,
+
+		.vsync_len = 4,
+		.upper_margin = 4,
+		.yres = 320,
+		.lower_margin = 5,
+
+		/* hsync is active high, vsync is active low */
+		.sync = FB_SYNC_HOR_HIGH_ACT,
+	},
+	.lcd_size_cfg = {
+		.width = 54,	/* 53.64mm */
+		.height = 72,	/* 71.52mm */
+	},
+	.sync_cfg = {
+		.force_vsyncs = 1,
+		.hsync_dis = 0,
+		.vsync_dis = 0,
+		.blank_dis = 1,
+		.blank_pol = PDP_ACTIVE_LOW,
+		.clock_pol = PDP_CLOCK_INVERTED,
+		.sync_slave = 1,
+	},
+};
+
+static struct platform_device pdp_device = {
+	.name           = "pdpfb",
+	.id             = -1,
+	.num_resources  = ARRAY_SIZE(pdp_resources),
+	.resource 	= pdp_resources,
+	.dev            = {
+		.platform_data = &pdp_platform_data,
+	},
+};
+
+static struct resource pdi_resources[] = {
+	{
+		.start	= PDI_BASE_ADDR,
+		.end	= PDI_BASE_ADDR + PDI_SIZE,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static int display1_pdi_match_fb(struct imgpdi_lcd_pdata *pdata,
+			      struct fb_info *info)
+{
+	return !strncmp(info->fix.id, "pdp", 16);
+}
+
+struct imgpdi_lcd_timings pdi_timings = {
+	.pwrsvgd	= 16,
+	.ls		= 16,
+	.pwrsvgd2	= 246,
+	.nl		= 7,
+	.acb		= 256,
+
+	.newframe_en	= 1,
+	.gatedriver_en	= 1,
+};
+
+static struct imgpdi_lcd_pdata pdi_pdata = {
+	.match_fb	= display1_pdi_match_fb,
+	.active		= &pdi_timings,
+};
+
+static struct platform_device pdi_device = {
+	.name		= "imgpdi-lcd",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(pdi_resources),
+	.resource	= pdi_resources,
+	.dev		= {
+		.platform_data	= &pdi_pdata,
+	},
+};
+
+static struct platform_device *display1_devices[] __initdata = {
+	&pdi_device,
+	&pdp_device,
+};
+
+/* display1 has a PCA9533 on I2C bus, at address 99 */
+
+static int display1_pca9533_probe(struct i2c_adapter *adap)
+{
+	if (i2c_smbus_xfer(adap, 99, 0, 0, 0, I2C_SMBUS_QUICK, NULL) >= 0) {
+		printk(KERN_INFO "Detected ATP120-dp:display1\n");
+		pix_clk_set_limits(4500000,	/* 4.5MHz */
+				   6800000);	/* 6.8MHz */
+		platform_add_devices(display1_devices,
+				     ARRAY_SIZE(display1_devices));
+	}
+	return -EIO;
+}
+
+static const struct i2c_device_id display1_pca9533_id[] = {
+	{ "display1_pca9533", 0 },
+	{ },
+};
+
+static struct i2c_driver display1_pca9533_driver = {
+	.driver.name	= "display1_pca9533",
+	.attach_adapter = display1_pca9533_probe,
+	.id_table	= display1_pca9533_id,
+};
+
+static int __init display1_device_setup(void)
+{
+	int irq;
+
+	/* Map the IRQ */
+	irq = external_irq_map(pdp_resources[0].start);
+	if (irq < 0) {
+		pr_err("%s: irq map failed (%d)\n",
+		       __func__, irq);
+		return irq;
+	}
+	pdp_resources[0].start = irq;
+	pdp_resources[0].end = irq;
+
+	i2c_add_driver(&display1_pca9533_driver);
+	return 0;
+}
+device_initcall(display1_device_setup);
diff --git a/arch/metag/boards/atp-dp/display2/Makefile b/arch/metag/boards/atp-dp/display2/Makefile
new file mode 100644
index 0000000..11b9032
--- /dev/null
+++ b/arch/metag/boards/atp-dp/display2/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the ATP-dp:display2 TFT board specific parts of the kernel
+#
+
+obj-y	:= setup.o
diff --git a/arch/metag/boards/atp-dp/display2/setup.c b/arch/metag/boards/atp-dp/display2/setup.c
new file mode 100644
index 0000000..549ad99
--- /dev/null
+++ b/arch/metag/boards/atp-dp/display2/setup.c
@@ -0,0 +1,143 @@
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <video/imgpdi_lcd.h>
+#include <video/pdpfb.h>
+#include <asm/soc-chorus2/pdp.h>
+#include <asm/soc-chorus2/c2_irqnums.h>
+#include <asm/soc-chorus2/clock.h>
+
+static struct resource pdp_resources[] = {
+	{
+		.start = PDP_IRQ_NUM,
+		/* mapped in display2_device_setup() */
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start	= PDP_BASE_ADDR,
+		.end	= PDP_BASE_ADDR + PDP_SIZE,
+		.flags	= IORESOURCE_MEM | PDPFB_IORES_PDP,
+	},
+};
+
+static struct pdp_info pdp_platform_data = {
+	.bpp = 16,
+	.lcd_cfg = {
+		.name = "ATP-dp:display2",
+		.refresh = 60,
+
+		.hsync_len = 16,
+		.left_margin = 138,
+		.xres = 640,
+		.right_margin = 6,
+
+		.vsync_len = 3,
+		.upper_margin = 36,
+		.yres = 480,
+		.lower_margin = 6,
+
+		/* hsync and vsync are active low */
+		.sync = 0,
+	},
+	.lcd_size_cfg = {
+		.width = 115,	/* 115.2mm */
+		.height = 86,	/* 86.4mm */
+	},
+	.sync_cfg = {
+		.force_vsyncs = 1,
+		.hsync_dis = 0,
+		.vsync_dis = 0,
+		.blank_dis = 0,
+		.blank_pol = PDP_ACTIVE_LOW,
+		.clock_pol = PDP_CLOCK_INVERTED,
+	},
+};
+
+static struct platform_device pdp_device = {
+	.name           = "pdpfb",
+	.id             = -1,
+	.num_resources  = ARRAY_SIZE(pdp_resources),
+	.resource 	= pdp_resources,
+	.dev            = {
+		.platform_data = &pdp_platform_data,
+	},
+};
+
+static struct resource pdi_resources[] = {
+	{
+		.start	= PDI_BASE_ADDR,
+		.end	= PDI_BASE_ADDR + PDI_SIZE,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static int display2_pdi_match_fb(struct imgpdi_lcd_pdata *pdata,
+			      struct fb_info *info)
+{
+	return !strncmp(info->fix.id, "pdp", 16);
+}
+
+static struct imgpdi_lcd_pdata pdi_pdata = {
+	.match_fb	= display2_pdi_match_fb,
+};
+
+static struct platform_device pdi_device = {
+	.name		= "imgpdi-lcd",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(pdi_resources),
+	.resource	= pdi_resources,
+	.dev		= {
+		.platform_data	= &pdi_pdata,
+	},
+};
+
+static struct platform_device *display2_devices[] __initdata = {
+	&pdi_device,
+	&pdp_device,
+};
+
+/* display2 has a CH7012 on I2C bus, at address 117 */
+
+static int display2_ch7012_probe(struct i2c_adapter *adap)
+{
+	if (i2c_smbus_xfer(adap, 117, 0, 0, 0, I2C_SMBUS_QUICK, NULL) >= 0) {
+		printk(KERN_INFO "Detected ATP120-dp:display2\n");
+		pix_clk_set_limits(22660000,	/* 22.66MHz */
+				   27690000);	/* 27.69MHz */
+		platform_add_devices(display2_devices,
+				     ARRAY_SIZE(display2_devices));
+	}
+	return -EIO;
+}
+
+static const struct i2c_device_id display2_ch7012_id[] = {
+	{ "display2_ch7012", 0 },
+	{ },
+};
+
+static struct i2c_driver display2_ch7012_driver = {
+	.driver.name	= "display2_ch7012",
+	.attach_adapter = display2_ch7012_probe,
+	.id_table	= display2_ch7012_id,
+};
+
+static int __init display2_device_setup(void)
+{
+	int irq;
+
+	/* Map the IRQ */
+	irq = external_irq_map(pdp_resources[0].start);
+	if (irq < 0) {
+		pr_err("%s: irq map failed (%d)\n",
+		       __func__, irq);
+		return irq;
+	}
+	pdp_resources[0].start = irq;
+	pdp_resources[0].end = irq;
+
+	i2c_add_driver(&display2_ch7012_driver);
+	return 0;
+}
+device_initcall(display2_device_setup);
diff --git a/arch/metag/boards/atp-dp/lan1/Makefile b/arch/metag/boards/atp-dp/lan1/Makefile
new file mode 100644
index 0000000..c24f181
--- /dev/null
+++ b/arch/metag/boards/atp-dp/lan1/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the ATP-dp:lan1 ethernet board specific parts of the kernel
+#
+
+obj-y	:= setup.o
diff --git a/arch/metag/boards/atp-dp/lan1/setup.c b/arch/metag/boards/atp-dp/lan1/setup.c
new file mode 100644
index 0000000..000b4a4
--- /dev/null
+++ b/arch/metag/boards/atp-dp/lan1/setup.c
@@ -0,0 +1,98 @@
+
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/smsc911x.h>
+#include <asm/io.h>
+#include <asm/soc-chorus2/gpio.h>
+
+#define ATP_DP_LAN1_IRQ_PIN	GPIO_D_PIN(10)
+
+#define ATP_DP_LAN1_BASE 0xc2f80000
+#define ATP_DP_LAN1_LEN  0x10000
+
+#define LAN1_FLASH_BASE  0xC1C00000
+#define LAN1_FLASH_SIZE  0x800000
+
+static struct resource smsc911x_resources[] = {
+	[0] = {
+		.start  = ATP_DP_LAN1_BASE,
+		.end    = (ATP_DP_LAN1_BASE +
+			   ATP_DP_LAN1_LEN),
+		.flags  = IORESOURCE_MEM,
+	},
+	[1] = {
+		/* start and end filled in by atp_dp_lan1_init */
+		.flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
+	}
+};
+
+static struct smsc911x_platform_config smsc911x_pdata = {
+	.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+	.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+	.flags = SMSC911X_USE_32BIT,
+	.phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
+static struct platform_device smsc911x_device = {
+	.name           = "smsc911x",
+	.id             = 0,
+	.num_resources  = ARRAY_SIZE(smsc911x_resources),
+	.resource       = smsc911x_resources,
+	.dev            = {
+		.platform_data = &smsc911x_pdata,
+	},
+};
+
+static struct physmap_flash_data lan1_flash_data = {
+	.width          = 2,
+};
+
+static struct resource lan1_flash_resources[] = {
+	{
+		.start  = LAN1_FLASH_BASE,
+		.end    = LAN1_FLASH_BASE + LAN1_FLASH_SIZE - 1,
+		.flags  = IORESOURCE_MEM,
+	}
+};
+
+static struct platform_device lan1_flash = {
+	.name           = "physmap-flash",
+	.id             = 0,
+	.dev            = {
+		.platform_data = &lan1_flash_data,
+	},
+	.resource       = lan1_flash_resources,
+	.num_resources  = ARRAY_SIZE(lan1_flash_resources),
+};
+
+
+static struct platform_device *platform_devices[] __initdata = {
+	&smsc911x_device,
+	&lan1_flash,
+};
+
+static int __init atp_dp_lan1_init(void)
+{
+	int irq;
+
+	/*
+	 * Setup the RDI pin to take the ethernet interrupt.
+	 * This requires the correct jumper settings on the ATP-dp board.
+	 */
+
+	gpio_request(ATP_DP_LAN1_IRQ_PIN, "ethernet irq");
+	gpio_direction_input(ATP_DP_LAN1_IRQ_PIN);
+
+	irq = gpio_to_irq(ATP_DP_LAN1_IRQ_PIN);
+	smsc911x_resources[1].start = irq;
+	smsc911x_resources[1].end = irq;
+
+	platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+
+	return 0;
+}
+
+__initcall(atp_dp_lan1_init);
diff --git a/arch/metag/boards/atp-dp/setup.c b/arch/metag/boards/atp-dp/setup.c
new file mode 100644
index 0000000..e810bf0
--- /dev/null
+++ b/arch/metag/boards/atp-dp/setup.c
@@ -0,0 +1,135 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/mmc_spi.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+
+#include <asm/soc-chorus2/gpio.h>
+
+#define WRITE_PROTECT_PIN  GPIO_EXP_PIN(12)
+#define CARD_DETECT_PIN    GPIO_EXP_PIN(13)
+/* LED used to signal card detect */
+#define LED_PIN            GPIO_EXP_PIN(14)
+/* Interrupt mask pin needs to be driven high to enable interrupt */
+#define INT_MASK_PIN       GPIO_EXP_PIN(15)
+
+static irqreturn_t (*mmc_spi_handler)(int, void *);
+static int mmc_init;
+
+static irqreturn_t mmc_detect_check(int irq, void *mmc)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t mmc_detect_irq(int irq, void *mmc)
+{
+	/* Reading the value acks the interrupt */
+	if (gpio_get_value_cansleep(CARD_DETECT_PIN) == 0)
+		gpio_set_value_cansleep(LED_PIN, 0);
+	else
+		gpio_set_value_cansleep(LED_PIN, 1);
+
+	return mmc_spi_handler(irq, mmc);
+}
+
+static int mmc_spi_init(struct device *dev,
+			irqreturn_t (*handler)(int, void *),
+			void *mmc)
+{
+	int err = 0;
+	int irq;
+	err |= gpio_request(WRITE_PROTECT_PIN, "SD write protect");
+	err |= gpio_request(CARD_DETECT_PIN, "SD card detect");
+	err |= gpio_request(INT_MASK_PIN, "SD card detect (int mask)");
+	err |= gpio_request(LED_PIN, "LED");
+	err |= gpio_request(GPIO_H_PIN(5), "mmc card detect irq");
+
+	if (err) {
+		printk(KERN_WARNING "request for mmc card detect gpios failed\n");
+		return 0;
+	}
+
+	gpio_direction_input(WRITE_PROTECT_PIN);
+	gpio_direction_input(CARD_DETECT_PIN);
+
+	gpio_direction_output(INT_MASK_PIN, 1);
+	gpio_direction_output(LED_PIN, 1);
+
+	if (gpio_get_value_cansleep(CARD_DETECT_PIN) == 0)
+		gpio_set_value_cansleep(LED_PIN, 0);
+
+	irq = gpio_to_irq(GPIO_H_PIN(5));
+	gpio_direction_input(GPIO_H_PIN(5));
+
+	mmc_spi_handler = handler;
+
+	if (request_threaded_irq(irq, mmc_detect_check, mmc_detect_irq,
+				 IRQF_TRIGGER_FALLING, "mmc_card_detect",
+				 mmc)) {
+		printk(KERN_WARNING "failed to get mmc card detect irq\n");
+	}
+
+	mmc_init = 1;
+
+	return 0;
+}
+
+static int mmc_spi_get_ro(struct device *dev)
+{
+	if (mmc_init)
+		return gpio_get_value_cansleep(WRITE_PROTECT_PIN);
+	return 1;
+}
+
+struct mmc_spi_platform_data mmc_spi_data = {
+	.init = &mmc_spi_init,
+	.get_ro = &mmc_spi_get_ro,
+};
+
+static struct spi_board_info spi_device_info[] __initdata = {
+#if 0
+	{
+		.modalias       = "mtd_dataflash",
+		.max_speed_hz   = 12500000,
+		.chip_select    = 0,
+	},
+#endif
+	{
+		.modalias       = "mmc_spi",
+		.max_speed_hz   = 12500000,
+		.chip_select    = 2,
+		.platform_data  = &mmc_spi_data,
+	},
+};
+
+static struct pca953x_platform_data pca9555_data = {
+	.gpio_base = GPIO_EXP_BASE,
+};
+
+static struct i2c_board_info __initdata atp_dp_i2c_devices[] = {
+	{
+		I2C_BOARD_INFO("pca953x", 0x20),
+		.type = "pca9555",
+		.platform_data = &pca9555_data,
+	},
+};
+
+static void __init atp_dp_i2c_init(void)
+{
+	i2c_register_board_info(1, atp_dp_i2c_devices,
+				ARRAY_SIZE(atp_dp_i2c_devices));
+}
+
+static int __init atp_dp_init(void)
+{
+	spi_register_board_info(spi_device_info, ARRAY_SIZE(spi_device_info));
+	atp_dp_i2c_init();
+	return 0;
+}
+
+device_initcall(atp_dp_init);
diff --git a/arch/metag/boards/comet-bub/Makefile b/arch/metag/boards/comet-bub/Makefile
new file mode 100644
index 0000000..e44fe3a
--- /dev/null
+++ b/arch/metag/boards/comet-bub/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the comet bring-up-board specific parts of the kernel
+#
+
+obj-y	:= setup.o
diff --git a/arch/metag/boards/comet-bub/display/Makefile b/arch/metag/boards/comet-bub/display/Makefile
new file mode 100644
index 0000000..2a15adc
--- /dev/null
+++ b/arch/metag/boards/comet-bub/display/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the comet bring up board TFT screen specific parts of the kernel
+#
+
+obj-y	:= setup.o
diff --git a/arch/metag/boards/comet-bub/display/setup.c b/arch/metag/boards/comet-bub/display/setup.c
new file mode 100644
index 0000000..916e851
--- /dev/null
+++ b/arch/metag/boards/comet-bub/display/setup.c
@@ -0,0 +1,70 @@
+/*
+ * boards/comet-bub/display/setup.c - board specific initialisation
+ *
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <video/pdpfb.h>
+#include <asm/soc-tz1090/pdp.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/gpio.h>
+#include <asm/soc-tz1090/clock.h>
+
+static void comet_bub_set_screen_power(int pwr)
+{
+	/*
+	 * This is a work around.
+	 * The bringup board has the wrong polarity, so the TFT power can be
+	 * controlled instead using a GPIO signal.
+	 */
+	gpio_set_value(GPIO_TFT_PWRSAVE, !!pwr);
+}
+
+/* based on atp-dp:display2 */
+static struct fb_videomode fbvm = {
+	.name = "Comet-bub:display",
+	.refresh = 60,
+
+	.hsync_len = 30,
+	.left_margin = 114,
+	.xres = 640,
+	.right_margin = 16,
+
+	.vsync_len = 3,
+	.upper_margin = 29,
+	.yres = 480,
+	.lower_margin = 10,
+
+	/* hsync and vsync are active low */
+	.sync = 0,
+};
+
+static struct pdp_lcd_size_cfg plsc = {
+	.width = 115,	/* 115.2mm */
+	.height = 86,	/* 86.4mm */
+};
+
+static struct pdp_sync_cfg psc = {
+	.force_vsyncs = 0,
+	.hsync_dis = 0,
+	.vsync_dis = 0,
+	.blank_dis = 0,
+	.blank_pol = PDP_ACTIVE_LOW,
+	.clock_pol = PDP_CLOCK_INVERTED,
+};
+
+static struct pdp_hwops hwops = {
+	.set_screen_power = comet_bub_set_screen_power,
+	.set_shared_base = comet_pdp_set_shared_base,
+};
+
+static int __init display_device_setup(void)
+{
+	comet_pdp_set_limits(22660000, 27690000);
+	return comet_pdp_setup(&fbvm, &plsc, NULL, &psc, &hwops);
+}
+device_initcall(display_device_setup);
diff --git a/arch/metag/boards/comet-bub/lcd/Makefile b/arch/metag/boards/comet-bub/lcd/Makefile
new file mode 100644
index 0000000..52754ac
--- /dev/null
+++ b/arch/metag/boards/comet-bub/lcd/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the comet bring up board LCD screen.
+#
+
+obj-$(CONFIG_COMET_BUB_LCD)	:= setup.o
+
diff --git a/arch/metag/boards/comet-bub/lcd/setup.c b/arch/metag/boards/comet-bub/lcd/setup.c
new file mode 100644
index 0000000..c611248
--- /dev/null
+++ b/arch/metag/boards/comet-bub/lcd/setup.c
@@ -0,0 +1,112 @@
+/*
+ * boards/comet-bub/lcd/setup.c
+ *
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <asm/global_lock.h>
+#include <asm/soc-tz1090/clock.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/gpio.h>
+
+
+
+static struct resource lcd_resources[] = {
+	[0] = {
+		.start = LCD_BASE_ADDR,
+		.end = LCD_BASE_ADDR + LCD_SIZE,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = LCD_IRQ_NUM,
+		.end = LCD_IRQ_NUM,
+		.flags = IORESOURCE_IRQ,
+	},
+	[2] = {
+		.name = "dma_periph",
+		.start = DMA_MUX_LCD_WR,
+		.end = DMA_MUX_LCD_WR,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static u64 lcd_dmamask = DMA_BIT_MASK(64);
+
+static struct platform_device lcd_device = {
+	.name           = "img-lcd",
+	.id             = -1,
+	.num_resources = ARRAY_SIZE(lcd_resources),
+	.resource = lcd_resources,
+	.dev = {
+		.dma_mask = &lcd_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(64),
+	},
+};
+
+
+static void lcd_init(void)
+{
+	u32 temp;
+	int lstat;
+
+
+	/* Setup Clock */
+	struct clk *pixel_clock = clk_get(NULL, "pixel");
+	pix_clk_set_limits(22660000,	/* 22.66MHz */
+			   27690000);	/* 27.69MHz */
+	if (clk_set_rate(pixel_clock, 24576000))
+		printk(KERN_WARNING"Failed to set Pixel clock to "
+				 "24.576MHZ for LCD");
+	clk_prepare_enable(pixel_clock);
+
+	/*set TFT Pin mux to LCD Mode*/
+	__global_lock2(lstat);
+	temp = readl(CR_IF_CTL0);
+	temp &= ~0x7;
+	temp |= 0x3; /*LCD is on same mode as trace*/
+	writel(temp, CR_IF_CTL0);
+	__global_unlock2(lstat);
+
+	/*Set Pins to non gpio mode*/
+	comet_gpio_disable_block(GPIO_TFT_RED0, GPIO_TFT_RED5);
+	comet_gpio_disable_block(GPIO_TFT_GREEN0, GPIO_TFT_GREEN1);
+	comet_gpio_disable_block(GPIO_TFT_PANELCLK, GPIO_TFT_HSYNC_NR);
+
+	/*Enable pixel clock (used by lcd)*/
+	__global_lock2(lstat);
+	temp = readl(CR_TOP_CLKENAB2);
+	temp |= (1 << CR_TOP_PIXEL_CLK_2_EN_BIT);
+	writel(temp, CR_TOP_CLKENAB2);
+	__global_unlock2(lstat);
+
+	/*Enable LCD clock*/
+	__global_lock2(lstat);
+	temp = readl(CR_PERIP_CLK_EN);
+	temp |= (1 << CR_PERIP_LCD_CLK_EN_BIT);
+	writel(temp, CR_PERIP_CLK_EN);
+	__global_unlock2(lstat);
+
+#ifdef CONFIG_SOC_TZ1090
+	/* map IRQ on Comet */
+	lstat = lcd_resources[1].start;
+	lstat = external_irq_map(lstat);
+	/* store mapped irq */
+	lcd_resources[1].start = lstat;
+	lcd_resources[1].end = lstat;
+#endif
+
+}
+
+
+static int __init lcd_setup(void)
+{
+	lcd_init();
+
+	return platform_device_register(&lcd_device);
+
+} device_initcall(lcd_setup);
diff --git a/arch/metag/boards/comet-bub/setup.c b/arch/metag/boards/comet-bub/setup.c
new file mode 100644
index 0000000..afdfbd6
--- /dev/null
+++ b/arch/metag/boards/comet-bub/setup.c
@@ -0,0 +1,103 @@
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <asm/mach/arch.h>
+#include <asm/soc-tz1090/sdhost.h>
+#include <asm/soc-tz1090/gpio.h>
+#include <asm/soc-tz1090/setup.h>
+#include <asm/soc-tz1090/usb.h>
+
+/* If XTAL3 isn't used, override get_xtal3 */
+#ifndef CONFIG_COMET_BUB_XTAL3
+unsigned long get_xtal3(void)
+{
+	return 0;
+}
+#endif
+
+static int __init comet_bub_init_tft(void)
+{
+	int err;
+
+	/*
+	 * The bringup board has the wrong polarity, so the TFT power can be
+	 * controlled instead using a GPIO signal.
+	 */
+
+	comet_gpio_disable_block(GPIO_TFT_FIRST, GPIO_TFT_LAST);
+	err = gpio_request(GPIO_TFT_PWRSAVE, "TFT pwrsave");
+	if (err) {
+		printk(KERN_WARNING "TFT_PWRSAVE GPIO request failed: %d",
+					err);
+		return -EINVAL;
+	}
+	err = gpio_direction_output(GPIO_TFT_PWRSAVE, 0);
+	if (err) {
+		printk(KERN_WARNING "TFT_PWRSAVE GPIO set direction failed: %d",
+					err);
+		return -EINVAL;
+	}
+
+	/*
+	 * Switch the screen off until it wants to be used.
+	 */
+
+	gpio_set_value(GPIO_TFT_PWRSAVE, 0);
+
+	return 0;
+}
+
+/*
+ * USB setup and VBUS control
+ */
+
+static void comet_bub_enable_vbus(void)
+{
+	gpio_set_value(GPIO_PDM_D, 1);
+}
+
+static void comet_bub_disable_vbus(void)
+{
+	gpio_set_value(GPIO_PDM_D, 0);
+}
+
+static struct dwc_otg_board comet_bub_usb_board = {
+	.enable_vbus = comet_bub_enable_vbus,
+	.disable_vbus = comet_bub_disable_vbus,
+};
+
+static int __init comet_bub_init_usb(void)
+{
+	if (gpio_request(GPIO_PDM_D, "USB VBus"))
+		pr_err("Failed to request PDM_D GPIO\n");
+
+	gpio_direction_output(GPIO_PDM_D, 1);
+	gpio_set_value(GPIO_PDM_D, 0);
+
+	comet_usb_setup(&comet_bub_usb_board);
+
+	return 0;
+}
+
+static void __init comet_bub_init(void)
+{
+	comet_init_machine();
+
+	comet_bub_init_tft();
+	comet_bub_init_usb();
+	comet_sdhost_init();
+}
+
+/* Comet Bring-Up-Board */
+
+static const char *comet_bub_boards_compat[] __initdata = {
+	"img,tz1090-01ry",
+	NULL,
+};
+
+MACHINE_START(PURE_01XK, "01RY Comet Bring-Up-Board")
+	.dt_compat	= comet_bub_boards_compat,
+	TZ1090_MACHINE_DEFAULTS,
+	.init_machine	= comet_bub_init,
+MACHINE_END
diff --git a/arch/metag/boards/polaris/Makefile b/arch/metag/boards/polaris/Makefile
new file mode 100644
index 0000000..4d13e5e
--- /dev/null
+++ b/arch/metag/boards/polaris/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Polaris board specific parts of the kernel
+#
+
+obj-y	+= poweroff.o
+obj-y	+= setup.o
+obj-y	+= tft/
+
diff --git a/arch/metag/boards/polaris/gpio.h b/arch/metag/boards/polaris/gpio.h
new file mode 100644
index 0000000..c613ae9
--- /dev/null
+++ b/arch/metag/boards/polaris/gpio.h
@@ -0,0 +1,21 @@
+/*
+ * board/polaris/gpio.h
+ * Polaris GPIO aliases
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef _POLARIS_GPIO_H_
+#define _POLARIS_GPIO_H_
+
+#include <asm/soc-tz1090/gpio.h>
+
+/* EN_TFT_BL controls power to EN_VBUS and power to the backlight booster */
+#define EN_TFT_BL	GPIO_TFT_VD12ACB
+#define EN_VBUS		GPIO_SDH_CD
+#define KILL_1V8	GPIO_I2S_DOUT2
+#define KILL_3V3_MAIN	GPIO_UART0_CTS
+#define LATCH_3V3_MAIN	GPIO_PDC_GPIO1
+
+#endif /* _POLARIS_GPIO_H_ */
diff --git a/arch/metag/boards/polaris/poweroff.S b/arch/metag/boards/polaris/poweroff.S
new file mode 100644
index 0000000..e83b3bd
--- /dev/null
+++ b/arch/metag/boards/polaris/poweroff.S
@@ -0,0 +1,130 @@
+! Copyright 2012 Imagination Technologies Ltd.
+!
+! Functions for powering down the Comet SoC on Polaris board.
+
+#include <linux/linkage.h>
+#include <asm/metag_isa.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/pdc.h>
+
+	.text
+
+! Pause for approximately 100*time instructions
+! This is modelled on ldlk's MPAUSE command
+.macro MPAUSE time
+	MOVT	D1Ar5, #HI((\time)*25)
+	ADD	D1Ar5, D1Ar5, #LO((\time)*25)
+1:
+	NOP
+	SUB	D1Ar5, D1Ar5, #1
+	CMP	D1Ar5, #0
+	BNZ	1b
+.endm
+
+
+! Instruction cache prefetch instruction
+.macro INSTR_ICACHE offset pfcount
+	.long (0xae000001	| (((\offset) & 0xffff) << 9) \
+				| (((\pfcount) & 0xf) << 1))
+.endm
+
+! Prefetch between two markers
+.macro INSTR_ICACHE_BETWEEN start end
+	INSTR_ICACHE ((\start) - .), \
+		      (1 + (((\end) - (\start)) >> ICACHE_LINE_S))
+.endm
+
+!================ POWER OFF ===================================================!
+
+! Cut the following power supplies in order:
+!  +1v8 (powering DDR etc)
+!  3v3_main (powering pads etc)
+!  +1v2 (powering most of the SoC peripherals including the Meta)
+
+ENTRY(_polaris_raw_power_off)
+	! Prefetch enough cache lines to cover the power off sequence
+	INSTR_ICACHE_BETWEEN ., $Licache_extent
+	! Wait for prefetch to complete
+	MPAUSE 1000
+
+	!---------------- KILL +1V8 SUPPLY (BYE BYE RAM!) ------------!
+	! KILL_1V8 is controlled by the I2S_DOUT2 GPIO (GPIO1[7])
+#define KILL_1V8_MASK	(1 << 7)
+
+	! DOUT1[7] = 1 (high)
+	MOVT	A0.2,#HI(CR_PADS_GPIO_DOUT1)
+	ADD	A0.2,A0.2,#LO(CR_PADS_GPIO_DOUT1)
+	GETD	D0Re0, [A0.2]
+	OR	D0Re0, D0Re0, #LO(KILL_1V8_MASK)
+	SETD	[A0.2], D0Re0
+
+	! DIR1[7] = 0 (output)
+	MOVT	A0.2,#HI(CR_PADS_GPIO_DIR1)
+	ADD	A0.2,A0.2,#LO(CR_PADS_GPIO_DIR1)
+	GETD	D0Re0, [A0.2]
+	ANDMB	D0Re0, D0Re0, #LO(~KILL_1V8_MASK)
+	SETD	[A0.2], D0Re0
+
+	! SELECT1[7] = 1 (gpio)
+	MOVT	A0.2,#HI(CR_PADS_GPIO_SELECT1)
+	ADD	A0.2,A0.2,#LO(CR_PADS_GPIO_SELECT1)
+	GETD	D0Re0, [A0.2]
+	OR	D0Re0, D0Re0, #LO(KILL_1V8_MASK)
+	SETD	[A0.2], D0Re0
+
+	! Allow some time for it to take effect before turning off pads
+	MPAUSE 4000 ! ~1ms
+
+	!---------------- KILL 3V3_MAIN SUPPLY -----------------------!
+	! KILL_3V3_MAIN is controlled by the UART0_CTS GPIO (GPIO0[22])
+#define KILL_3V3_MAIN_MASK	(1 << 22)
+
+	! DOUT0[22] = 1 (high)
+	MOVT	A0.2,#HI(CR_PADS_GPIO_DOUT0)
+	ADD	A0.2,A0.2,#LO(CR_PADS_GPIO_DOUT0)
+	GETD	D0Re0, [A0.2]
+	ORT	D0Re0, D0Re0, #HI(KILL_3V3_MAIN_MASK)
+	SETD	[A0.2], D0Re0
+
+	! DIR0[22] = 0 (output)
+	MOVT	A0.2,#HI(CR_PADS_GPIO_DIR0)
+	ADD	A0.2,A0.2,#LO(CR_PADS_GPIO_DIR0)
+	GETD	D0Re0, [A0.2]
+	ANDMT	D0Re0, D0Re0, #HI(~KILL_3V3_MAIN_MASK)
+	SETD	[A0.2], D0Re0
+
+	! SELECT0[22] = 1 (gpio)
+	MOVT	A0.2,#HI(CR_PADS_GPIO_SELECT0)
+	ADD	A0.2,A0.2,#LO(CR_PADS_GPIO_SELECT0)
+	GETD	D0Re0, [A0.2]
+	ORT	D0Re0, D0Re0, #HI(KILL_3V3_MAIN_MASK)
+	SETD	[A0.2], D0Re0
+
+	! Allow some time for it to take effect before turning off SoC
+	MPAUSE 4000 ! ~1ms
+
+	!---------------- POWER OFF MAIN POWER ISLAND ----------------!
+
+	! PDC_SOC_POWER = 0
+	MOVT	A0.2,#HI(PDC_BASE_ADDR + PDC_SOC_POWER)
+	ADD	A0.2,A0.2,#LO(PDC_BASE_ADDR + PDC_SOC_POWER)
+	MOV	D0Re0, #0
+	SETD	[A0.2], D0Re0
+
+	!---------------- STOP THREAD TO ALLOW TIME TO DIE -----------!
+
+#define OP3_EXIT	0
+#define HALT_OK		0
+	MOV	D1Ar1, #HALT_OK
+	MOV	D0Ar6, #OP3_EXIT
+	MSETL	[A0StP],D0Ar6,D0Ar4,D0Ar2
+1:	SWITCH	#0xC30006
+	! Never return
+	B	1b
+
+$Licache_extent:
+ENDPROC(_polaris_raw_power_off)
+
+! Make the length available so the code can be copied into core memory.
+ENTRY(_polaris_raw_power_off_sz)
+        .long   . - _polaris_raw_power_off
diff --git a/arch/metag/boards/polaris/poweroff.h b/arch/metag/boards/polaris/poweroff.h
new file mode 100644
index 0000000..ee3050e
--- /dev/null
+++ b/arch/metag/boards/polaris/poweroff.h
@@ -0,0 +1,15 @@
+#ifndef _POLARIS_POWEROFF_H_
+#define _POLARIS_POWEROFF_H_
+
+/**
+ * polaris_raw_power_off() - Power down the board and SoC.
+ *
+ * This powers down +1v8 (DDR etc), 3v3_main (SoC pads etc), and 1v2 (SoC
+ * peripherals).
+ */
+extern void polaris_raw_power_off(void);
+
+/* Size in bytes of the polaris_raw_power_off() code. */
+extern unsigned int polaris_raw_power_off_sz;
+
+#endif /* _POLARIS_POWEROFF_H_ */
diff --git a/arch/metag/boards/polaris/setup.c b/arch/metag/boards/polaris/setup.c
new file mode 100644
index 0000000..c002202
--- /dev/null
+++ b/arch/metag/boards/polaris/setup.c
@@ -0,0 +1,323 @@
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <asm/coremem.h>
+#include <asm/mach/arch.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/pdc.h>
+#include <asm/soc-tz1090/pm.h>
+#include <asm/soc-tz1090/setup.h>
+#include <asm/soc-tz1090/usb.h>
+#include "gpio.h"
+#include "poweroff.h"
+#include "setup.h"
+
+#define GPIO_KEY_DEBOUNCE_MSEC		5	/* ms */
+
+/* No XTAL3 (32.768KHz) oscillator is fitted */
+unsigned long get_xtal3(void)
+{
+	return 0;
+}
+
+/*
+ * Backlight is powered by boost converter on USB power switch chip. and must be
+ * turned on for VBUS to be powered.
+ */
+static DEFINE_SPINLOCK(bl_boost_lock);
+#define POLARIS_BL_BOOST_SETUP_M	(1 << 31)
+static unsigned int bl_boost_users;
+
+static int polaris_bl_boost_setup(void)
+{
+	unsigned long flags;
+	int err = 0;
+	spin_lock_irqsave(&bl_boost_lock, flags);
+
+	/* is it already set up? */
+	if (bl_boost_users & POLARIS_BL_BOOST_SETUP_M)
+		goto out;
+
+	/* get control of EN_TFT_BL */
+	err = gpio_request(EN_TFT_BL, "EN_TFT_BL");
+	if (err) {
+		pr_err("Couldn't get EN_TFT_BL gpio!\n");
+		goto out;
+	}
+
+	err = gpio_direction_output(EN_TFT_BL, 1);
+	if (err) {
+		pr_err("Couldn't make EN_TFT_BL an output!\n");
+		gpio_free(EN_TFT_BL);
+		goto out;
+	}
+
+	/* record that the GPIO is configured */
+	bl_boost_users |= POLARIS_BL_BOOST_SETUP_M;
+
+out:
+	spin_unlock_irqrestore(&bl_boost_lock, flags);
+	return err;
+}
+
+int polaris_bl_boost_set(unsigned int user, int en)
+{
+	unsigned long flags;
+	int ret;
+
+	if (user >= 31)
+		return -EINVAL;
+
+	/* ensure backlight is setup */
+	ret = polaris_bl_boost_setup();
+
+	spin_lock_irqsave(&bl_boost_lock, flags);
+	if (en)
+		bl_boost_users |= (1 << user);
+	else
+		bl_boost_users &= ~(1 << user);
+	/* update backlight boost power */
+	if (!ret)
+		gpio_set_value(EN_TFT_BL,
+			       bl_boost_users & ~POLARIS_BL_BOOST_SETUP_M);
+	spin_unlock_irqrestore(&bl_boost_lock, flags);
+	return ret;
+}
+
+/*------------------------------------ USB/VBUS ------------------------------*/
+static void polaris_enable_vbus(void)
+{
+	polaris_bl_boost_set(POLARIS_BL_BOOST_VBUS, 1);
+	gpio_set_value(EN_VBUS, 1);
+}
+
+static void polaris_disable_vbus(void)
+{
+	gpio_set_value(EN_VBUS, 0);
+	polaris_bl_boost_set(POLARIS_BL_BOOST_VBUS, 0);
+}
+
+static struct dwc_otg_board polaris_usb_board = {
+	.enable_vbus = polaris_enable_vbus,
+	.disable_vbus = polaris_disable_vbus,
+};
+
+static int __init polaris_init_usb(void)
+{
+	if (gpio_request(EN_VBUS, "EN_VBUS"))
+		pr_err("Failed to request EN_VBUS GPIO\n");
+
+	gpio_direction_output(EN_VBUS, 1);
+
+	comet_usb_setup(&polaris_usb_board);
+
+	return 0;
+}
+
+/*--------------------------- GPIO Buttons -----------------------------------*/
+static struct gpio_keys_button polaris_gpio_buttons[] = {
+	{
+		.code			= KEY_VOLUMEDOWN,
+		.gpio			= GPIO_PLL_ON,
+		.desc			= "Vol-",
+		.type			= EV_KEY,
+		.debounce_interval	= GPIO_KEY_DEBOUNCE_MSEC,
+		.can_disable		= true,
+	},
+	{
+		.code			= KEY_VOLUMEUP,
+		.gpio			= GPIO_PA_ON,
+		.desc			= "Vol+",
+		.type			= EV_KEY,
+		.debounce_interval	= GPIO_KEY_DEBOUNCE_MSEC,
+		.can_disable		= true,
+	},
+	{
+		.code			= KEY_POWER,
+		.gpio			= GPIO_SYS_WAKE0,
+		.desc			= "Power",
+		.type			= EV_KEY,
+		.wakeup			= true,
+		.debounce_interval	= GPIO_KEY_DEBOUNCE_MSEC,
+		.can_disable		= true,
+	},
+};
+
+static struct gpio_keys_platform_data polaris_gpio_buttons_data = {
+	.buttons	= polaris_gpio_buttons,
+	.nbuttons	= ARRAY_SIZE(polaris_gpio_buttons),
+	.rep		= false, /* enable auto repeat */
+};
+
+static struct platform_device polaris_gpio_buttons_device = {
+	.name		= "gpio-keys",
+	.dev		= {
+		.platform_data	= &polaris_gpio_buttons_data,
+	}
+};
+
+static int __init polaris_init_gpio_buttons(void)
+{
+	int err;
+
+	err = platform_device_register(&polaris_gpio_buttons_device);
+	if (err) {
+		pr_err("gpio-keys: register failed: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+/*--------------------------- power management -------------------------------*/
+static void polaris_power_off(void)
+{
+	struct metag_coremem_region *reg;
+	void (*power_off_func)(void);
+	unsigned long flags;
+
+	/*
+	 * Ensure power button is set as wake.
+	 */
+	enable_irq_wake(gpio_to_irq(GPIO_SYS_WAKE0));
+
+	/*
+	 * Increase delay after reset and power on for the external regulators.
+	 * 0x7 = 128 clock cycles (32kHz clock domain).
+	 */
+	writel(0x07777777, PDC_BASE_ADDR + PDC_SOC_DELAY);
+
+	/*
+	 * Cutting +1v8 power supply kills RAM, so we need to ensure no RAM
+	 * accesses occur after this is done. This is guaranteed by executing a
+	 * stub of assembly code from locked in cache lines.
+	 */
+
+	/* Get the address of some core memory */
+	reg = metag_coremem_alloc(METAG_COREMEM_ICACHE,
+				  polaris_raw_power_off_sz);
+	if (!reg) {
+		pr_err("Couldn't allocate core memory for power off\n");
+		return;
+	}
+
+	/* Copy the power off code into core memory */
+	power_off_func = metag_coremem_push(reg, polaris_raw_power_off,
+					    polaris_raw_power_off_sz);
+	if (!power_off_func) {
+		pr_err("Couldn't push power off function to core memory\n");
+		metag_coremem_free(reg);
+		return;
+	}
+
+	/* Run the poweroff code, with other threads stopped */
+	__global_lock2(flags);
+	wmb();
+	power_off_func();
+
+	/* polaris_raw_power_off() should never return */
+	BUG();
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int polaris_suspend(suspend_state_t state)
+{
+	/*
+	 * Increase delay after reset and power on for the external regulators.
+	 * 0x7 = 128 clock cycles (32kHz clock domain).
+	 */
+	writel(0x07777777, PDC_BASE_ADDR + PDC_SOC_DELAY);
+
+	/*
+	 * Kill 3v3 supply.
+	 */
+	gpio_direction_output(KILL_3V3_MAIN, 1);
+	udelay(1000);
+	gpio_direction_output(KILL_3V3_MAIN, 0);
+
+	return 0;
+}
+
+static void polaris_resume(suspend_state_t state)
+{
+	/*
+	 * Standby won't restore 3v3 because EXT_POWER doesn't change, so use
+	 * LATCH_3V3_MAIN to turn it back on again.
+	 */
+	if (state == PM_SUSPEND_STANDBY) {
+		gpio_direction_output(LATCH_3V3_MAIN, 1);
+		udelay(5000);
+		gpio_direction_output(LATCH_3V3_MAIN, 0);
+	}
+}
+#endif	/* CONFIG_PM_SLEEP */
+
+static void polaris_init_pm(void)
+{
+	int kill_1v8_err, kill_3v3_err, latch_3v3_err;
+
+	/* Request power kill GPIOs */
+	kill_1v8_err = gpio_request(KILL_1V8, "KILL_1V8");
+	if (kill_1v8_err) {
+		pr_err("%s: Could not get KILL_1V8 GPIO", __func__);
+		goto err_kill_1v8;
+	}
+
+	kill_3v3_err = gpio_request(KILL_3V3_MAIN, "KILL_3V3_MAIN");
+	if (kill_3v3_err) {
+		pr_err("%s: Could not get KILL_3V3_MAIN GPIO", __func__);
+		goto err_kill_3v3;
+	}
+
+	latch_3v3_err = gpio_request(LATCH_3V3_MAIN, "LATCH_3V3_MAIN");
+	if (latch_3v3_err) {
+		pr_err("%s: Could not get LATCH_3V3_MAIN GPIO", __func__);
+		goto err_latch_3v3;
+	}
+
+	/* Register board callbacks */
+	board_power_off = polaris_power_off;
+#ifdef CONFIG_PM_SLEEP
+	board_suspend = polaris_suspend;
+	board_resume = polaris_resume;
+#endif
+
+	return;
+
+err_latch_3v3:
+	gpio_free(KILL_3V3_MAIN);
+err_kill_3v3:
+	gpio_free(KILL_1V8);
+err_kill_1v8:
+	return;
+}
+
+/*--------------------------- Init -------------------------------------------*/
+static void __init polaris_init(void)
+{
+	comet_init_machine();
+
+	polaris_init_gpio_buttons();
+	polaris_init_usb();
+	polaris_init_pm();
+}
+
+/* PURE Polaris */
+
+static const char *polaris_boards_compat[] __initdata = {
+	"img,tz1090-01xk",
+	NULL,
+};
+
+MACHINE_START(PURE_01XK, "01XK PURE Polaris")
+	.dt_compat	= polaris_boards_compat,
+	TZ1090_MACHINE_DEFAULTS,
+	.init_machine	= polaris_init,
+MACHINE_END
diff --git a/arch/metag/boards/polaris/setup.h b/arch/metag/boards/polaris/setup.h
new file mode 100644
index 0000000..88cd8d0
--- /dev/null
+++ b/arch/metag/boards/polaris/setup.h
@@ -0,0 +1,16 @@
+/*
+ * board/polaris/setup.h
+ * Polaris related things
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef _POLARIS_SETUP_H_
+#define _POLARIS_SETUP_H_
+
+#define POLARIS_BL_BOOST_VBUS	0
+#define POLARIS_BL_BOOST_5V_BL	1
+int polaris_bl_boost_set(unsigned int user, int en);
+
+#endif /* _POLARIS_SETUP_H_ */
diff --git a/arch/metag/boards/polaris/tft/Makefile b/arch/metag/boards/polaris/tft/Makefile
new file mode 100644
index 0000000..caa8f86
--- /dev/null
+++ b/arch/metag/boards/polaris/tft/Makefile
@@ -0,0 +1,2 @@
+
+obj-y	:= setup.o
diff --git a/arch/metag/boards/polaris/tft/setup.c b/arch/metag/boards/polaris/tft/setup.c
new file mode 100644
index 0000000..e773d36
--- /dev/null
+++ b/arch/metag/boards/polaris/tft/setup.c
@@ -0,0 +1,107 @@
+/*
+ * boards/polaris/tft/setup.c
+ *
+ * Copyright (C) 2010-2012 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/backlight.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <video/tz1090_auxdac_bl.h>
+#include <video/pdpfb.h>
+#include <asm/soc-tz1090/clock.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/pdp.h>
+#include "../gpio.h"
+#include "../setup.h"
+
+static struct fb_videomode polaris_lcd_cfg = {
+	.name = "polaris:display",
+	.refresh = 60,
+
+	.hsync_len = 20,
+	.left_margin = 124,
+	.xres = 640,
+	.right_margin = 64,
+
+	.vsync_len = 2,
+	.upper_margin = 11,
+	.yres = 480,
+	.lower_margin = 32,
+
+	/* hsync and vsync are active low */
+	.sync = 0,
+};
+
+static struct pdp_lcd_size_cfg polaris_lcd_sizecfg = {
+	.width = 115,	/* 115.2mm */
+	.height = 86,	/* 86.4mm */
+};
+
+static struct pdp_sync_cfg polaris_lcd_synccfg = {
+	.force_vsyncs = 0,
+	.hsync_dis = 0,
+	.vsync_dis = 0,
+	.blank_dis = 0,
+	.blank_pol = PDP_ACTIVE_LOW,
+	.clock_pol = PDP_CLOCK_INVERTED,
+};
+
+static struct pdp_hwops polaris_lcd_hwops = {
+	.set_shared_base = comet_pdp_set_shared_base,
+};
+
+/* backlight control */
+
+static int polaris_bl_match_fb(struct tz1090_auxdac_bl_pdata *pdata,
+			       struct fb_info *info)
+{
+	return !strncmp(info->fix.id, "pdp", 16);
+}
+
+static void polaris_bl_set_power(int power)
+{
+	polaris_bl_boost_set(POLARIS_BL_BOOST_5V_BL,
+			     power != FB_BLANK_POWERDOWN);
+}
+
+static struct tz1090_auxdac_bl_pdata polaris_bl_info = {
+	.name			= "polaris-bl",
+	.default_intensity	= 0xff,
+	.match_fb		= polaris_bl_match_fb,
+	.set_bl_power		= polaris_bl_set_power,
+};
+
+static struct platform_device polaris_bl_dev = {
+	.name			= "tz1090-auxdac-bl",
+	.dev			= {
+		.platform_data	= &polaris_bl_info,
+	},
+};
+
+static int __init display_device_setup(void)
+{
+	int err;
+
+	pix_clk_set_limits(6000000,	/* 6MHz */
+			   50000000);	/* 50MHz */
+
+	/* set up the PDP */
+	err = comet_pdp_setup(&polaris_lcd_cfg, &polaris_lcd_sizecfg,
+			      NULL, &polaris_lcd_synccfg,
+			      &polaris_lcd_hwops);
+	if (err) {
+		pr_err("Couldn't set up PDP!\n");
+		goto err_pdp_setup;
+	}
+
+	/* register the backlight device */
+	platform_device_register(&polaris_bl_dev);
+
+	return 0;
+
+err_pdp_setup:
+	return err;
+}
+device_initcall(display_device_setup);
diff --git a/arch/metag/boards/sensia-touchscreen/Makefile b/arch/metag/boards/sensia-touchscreen/Makefile
new file mode 100644
index 0000000..1bdee15
--- /dev/null
+++ b/arch/metag/boards/sensia-touchscreen/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the comet bring up board touchscreen.
+#
+
+obj-y	:= setup.o
diff --git a/arch/metag/boards/sensia-touchscreen/setup.c b/arch/metag/boards/sensia-touchscreen/setup.c
new file mode 100644
index 0000000..33f84f4
--- /dev/null
+++ b/arch/metag/boards/sensia-touchscreen/setup.c
@@ -0,0 +1,100 @@
+/*
+ * boards/comet-bub/touchscreen/setup.c
+ *
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/i2c.h>
+#include <linux/input/ts_qt5480.h>
+
+#include <asm/soc-tz1090/gpio.h>
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#include "ts_qt5480_cfg.c"
+
+#if defined(CONFIG_TZ1090_01XX)
+#define TS_GPIO_PIN	GPIO_UART0_CTS
+#define TS_I2C_BUS	2
+#elif defined(CONFIG_COMET_BUB)
+#define TS_GPIO_PIN	GPIO_PDM_C
+#define TS_I2C_BUS	2
+#elif defined(CONFIG_POLARIS)
+#define TS_GPIO_PIN	GPIO_SYS_WAKE1
+#define TS_I2C_BUS	0
+#else
+#error Sensia touchscreen enabled for unsupported board?
+#endif
+
+static ts_qt5480_mapping_t phy_map = {
+	x_sensor_res: 255,
+	x_screen_res: 640,
+	x_flip: 0,
+	x_sensor_size: 117,
+	x_screen_size: 120,
+	x_sensor_offset: 3,
+	y_sensor_res: 255,
+	y_screen_res: 480,
+	y_flip: 0,
+	y_sensor_size: 89,
+	y_screen_size: 95,
+	y_sensor_offset: 3
+};
+
+int qt5480_poll_status(void)
+{
+	return gpio_get_value(TS_GPIO_PIN);
+}
+
+static struct qt5480_platform_data qt5480_data = {
+	.poll_status	= qt5480_poll_status,
+	.phy_map	= &phy_map,
+	.config		= config,
+};
+
+static struct i2c_board_info __initdata touchscreen_i2c_devices[] = {
+	{
+		I2C_BOARD_INFO("ts_qt5480", 0x30),
+		.type = "ts_qt5480",
+		.platform_data = &qt5480_data,
+	},
+};
+
+static int __init touchscreen_setup(void)
+{
+	int irq, ret;
+
+	ret = gpio_request(TS_GPIO_PIN, "ts_qt5480 irq gpio" );
+	if (ret) {
+		pr_err("ts_qt5480 gpio (%d) not available\n",
+		       TS_GPIO_PIN);
+		return 1;
+	}
+
+	irq = gpio_to_irq(TS_GPIO_PIN);
+	if (irq < 0) {
+		pr_err("ts_qt5480 gpio irq not available\n");
+		return 1;
+	}
+	gpio_direction_input(TS_GPIO_PIN);
+	comet_gpio_pullup_type(TS_GPIO_PIN, GPIO_PULLUP_TRISTATE);
+	irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+	touchscreen_i2c_devices[0].irq = irq;
+
+	i2c_register_board_info(TS_I2C_BUS, touchscreen_i2c_devices,
+				ARRAY_SIZE(touchscreen_i2c_devices));
+
+	return 0;
+
+}
+device_initcall(touchscreen_setup);
diff --git a/arch/metag/boards/sensia-touchscreen/ts_qt5480_cfg.c b/arch/metag/boards/sensia-touchscreen/ts_qt5480_cfg.c
new file mode 100644
index 0000000..ba1b90f
--- /dev/null
+++ b/arch/metag/boards/sensia-touchscreen/ts_qt5480_cfg.c
@@ -0,0 +1,758 @@
+/*
+ *  AUTOGENERATED file. Don't edit manually!!!!
+ *
+ *  Configuration of the QT5480 Quantum TouchScreen Controller
+ */
+
+
+/* QT5480 Configuration table */
+ts_qt5480_conf_reg_t config[QT_MAX_REG] =	{
+												{FALSE, },			//    0
+												{FALSE, },			//    1
+												{FALSE, },			//    2
+												{FALSE, },			//    3
+												{FALSE, },			//    4
+												{FALSE, },			//    5
+												{FALSE, },			//    6
+												{FALSE, },			//    7
+												{FALSE, },			//    8
+												{FALSE, },			//    9
+												{FALSE, },			//   10
+												{FALSE, },			//   11
+												{FALSE, },			//   12
+												{FALSE, },			//   13
+												{FALSE, },			//   14
+												{FALSE, },			//   15
+												{FALSE, },			//   16
+												{FALSE, },			//   17
+												{FALSE, },			//   18
+												{FALSE, },			//   19
+												{FALSE, },			//   20
+												{FALSE, },			//   21
+												{FALSE, },			//   22
+												{FALSE, },			//   23
+												{FALSE, },			//   24
+												{FALSE, },			//   25
+												{FALSE, },			//   26
+												{FALSE, },			//   27
+												{FALSE, },			//   28
+												{FALSE, },			//   29
+												{FALSE, },			//   30
+												{FALSE, },			//   31
+												{FALSE, },			//   32
+												{FALSE, },			//   33
+												{FALSE, },			//   34
+												{FALSE, },			//   35
+												{FALSE, },			//   36
+												{FALSE, },			//   37
+												{FALSE, },			//   38
+												{FALSE, },			//   39
+												{FALSE, },			//   40
+												{FALSE, },			//   41
+												{FALSE, },			//   42
+												{FALSE, },			//   43
+												{FALSE, },			//   44
+												{FALSE, },			//   45
+												{FALSE, },			//   46
+												{FALSE, },			//   47
+												{FALSE, },			//   48
+												{FALSE, },			//   49
+												{FALSE, },			//   50
+												{FALSE, },			//   51
+												{FALSE, },			//   52
+												{FALSE, },			//   53
+												{FALSE, },			//   54
+												{FALSE, },			//   55
+												{FALSE, },			//   56
+												{FALSE, },			//   57
+												{FALSE, },			//   58
+												{FALSE, },			//   59
+												{FALSE, },			//   60
+												{FALSE, },			//   61
+												{FALSE, },			//   62
+												{FALSE, },			//   63
+												{FALSE, },			//   64
+												{FALSE, },			//   65
+												{FALSE, },			//   66
+												{FALSE, },			//   67
+												{FALSE, },			//   68
+												{FALSE, },			//   69
+												{FALSE, },			//   70
+												{FALSE, },			//   71
+												{FALSE, },			//   72
+												{FALSE, },			//   73
+												{FALSE, },			//   74
+												{FALSE, },			//   75
+												{FALSE, },			//   76
+												{FALSE, },			//   77
+												{FALSE, },			//   78
+												{FALSE, },			//   79
+												{FALSE, },			//   80
+												{FALSE, },			//   81
+												{FALSE, },			//   82
+												{FALSE, },			//   83
+												{FALSE, },			//   84
+												{FALSE, },			//   85
+												{FALSE, },			//   86
+												{FALSE, },			//   87
+												{FALSE, },			//   88
+												{FALSE, },			//   89
+												{FALSE, },			//   90
+												{FALSE, },			//   91
+												{FALSE, },			//   92
+												{FALSE, },			//   93
+												{FALSE, },			//   94
+												{FALSE, },			//   95
+												{FALSE, },			//   96
+												{FALSE, },			//   97
+												{FALSE, },			//   98
+												{FALSE, },			//   99
+												{FALSE, },			//  100
+												{FALSE, },			//  101
+												{FALSE, },			//  102
+												{FALSE, },			//  103
+												{FALSE, },			//  104
+												{FALSE, },			//  105
+												{FALSE, },			//  106
+												{FALSE, },			//  107
+												{FALSE, },			//  108
+												{FALSE, },			//  109
+												{FALSE, },			//  110
+												{FALSE, },			//  111
+												{FALSE, },			//  112
+												{FALSE, },			//  113
+												{FALSE, },			//  114
+												{FALSE, },			//  115
+												{FALSE, },			//  116
+												{FALSE, },			//  117
+												{FALSE, },			//  118
+												{FALSE, },			//  119
+												{FALSE, },			//  120
+												{FALSE, },			//  121
+												{FALSE, },			//  122
+												{FALSE, },			//  123
+												{FALSE, },			//  124
+												{FALSE, },			//  125
+												{FALSE, },			//  126
+												{FALSE, },			//  127
+												{FALSE, },			//  128
+												{FALSE, },			//  129
+												{FALSE, },			//  130
+												{FALSE, },			//  131
+												{FALSE, },			//  132
+												{FALSE, },			//  133
+												{FALSE, },			//  134
+												{FALSE, },			//  135
+												{FALSE, },			//  136
+												{FALSE, },			//  137
+												{FALSE, },			//  138
+												{FALSE, },			//  139
+												{FALSE, },			//  140
+												{FALSE, },			//  141
+												{FALSE, },			//  142
+												{FALSE, },			//  143
+												{FALSE, },			//  144
+												{FALSE, },			//  145
+												{FALSE, },			//  146
+												{FALSE, },			//  147
+												{FALSE, },			//  148
+												{FALSE, },			//  149
+												{FALSE, },			//  150
+												{FALSE, },			//  151
+												{FALSE, },			//  152
+												{FALSE, },			//  153
+												{FALSE, },			//  154
+												{FALSE, },			//  155
+												{FALSE, },			//  156
+												{FALSE, },			//  157
+												{FALSE, },			//  158
+												{FALSE, },			//  159
+												{FALSE, },			//  160
+												{FALSE, },			//  161
+												{FALSE, },			//  162
+												{FALSE, },			//  163
+												{FALSE, },			//  164
+												{FALSE, },			//  165
+												{FALSE, },			//  166
+												{FALSE, },			//  167
+												{FALSE, },			//  168
+												{FALSE, },			//  169
+												{FALSE, },			//  170
+												{FALSE, },			//  171
+												{FALSE, },			//  172
+												{FALSE, },			//  173
+												{FALSE, },			//  174
+												{FALSE, },			//  175
+												{FALSE, },			//  176
+												{FALSE, },			//  177
+												{FALSE, },			//  178
+												{FALSE, },			//  179
+												{FALSE, },			//  180
+												{FALSE, },			//  181
+												{FALSE, },			//  182
+												{FALSE, },			//  183
+												{FALSE, },			//  184
+												{FALSE, },			//  185
+												{FALSE, },			//  186
+												{FALSE, },			//  187
+												{FALSE, },			//  188
+												{FALSE, },			//  189
+												{FALSE, },			//  190
+												{FALSE, },			//  191
+												{FALSE, },			//  192
+												{FALSE, },			//  193
+												{FALSE, },			//  194
+												{FALSE, },			//  195
+												{FALSE, },			//  196
+												{FALSE, },			//  197
+												{FALSE, },			//  198
+												{FALSE, },			//  199
+												{FALSE, },			//  200
+												{FALSE, },			//  201
+												{FALSE, },			//  202
+												{FALSE, },			//  203
+												{FALSE, },			//  204
+												{FALSE, },			//  205
+												{FALSE, },			//  206
+												{FALSE, },			//  207
+												{FALSE, },			//  208
+												{FALSE, },			//  209
+												{FALSE, },			//  210
+												{FALSE, },			//  211
+												{FALSE, },			//  212
+												{FALSE, },			//  213
+												{FALSE, },			//  214
+												{FALSE, },			//  215
+												{FALSE, },			//  216
+												{FALSE, },			//  217
+												{FALSE, },			//  218
+												{FALSE, },			//  219
+												{FALSE, },			//  220
+												{FALSE, },			//  221
+												{FALSE, },			//  222
+												{FALSE, },			//  223
+												{FALSE, },			//  224
+												{FALSE, },			//  225
+												{FALSE, },			//  226
+												{FALSE, },			//  227
+												{FALSE, },			//  228
+												{FALSE, },			//  229
+												{FALSE, },			//  230
+												{FALSE, },			//  231
+												{FALSE, },			//  232
+												{FALSE, },			//  233
+												{FALSE, },			//  234
+												{FALSE, },			//  235
+												{FALSE, },			//  236
+												{FALSE, },			//  237
+												{FALSE, },			//  238
+												{FALSE, },			//  239
+												{FALSE, },			//  240
+												{FALSE, },			//  241
+												{FALSE, },			//  242
+												{FALSE, },			//  243
+												{FALSE, },			//  244
+												{FALSE, },			//  245
+												{FALSE, },			//  246
+												{FALSE, },			//  247
+												{FALSE, },			//  248
+												{FALSE, },			//  249
+												{FALSE, },			//  250
+												{FALSE, },			//  251
+												{FALSE, },			//  252
+												{FALSE, },			//  253
+												{FALSE, },			//  254
+												{FALSE, },			//  255
+												{FALSE, },			//  256
+												{FALSE, },			//  257
+												{FALSE, },			//  258
+												{FALSE, },			//  259
+												{FALSE, },			//  260
+												{FALSE, },			//  261
+												{FALSE, },			//  262
+												{FALSE, },			//  263
+												{FALSE, },			//  264
+												{FALSE, },			//  265
+												{FALSE, },			//  266
+												{FALSE, },			//  267
+												{FALSE, },			//  268
+												{FALSE, },			//  269
+												{FALSE, },			//  270
+												{FALSE, },			//  271
+												{FALSE, },			//  272
+												{FALSE, },			//  273
+												{FALSE, },			//  274
+												{FALSE, },			//  275
+												{FALSE, },			//  276
+												{FALSE, },			//  277
+												{FALSE, },			//  278
+												{FALSE, },			//  279
+												{FALSE, },			//  280
+												{FALSE, },			//  281
+												{FALSE, },			//  282
+												{FALSE, },			//  283
+												{FALSE, },			//  284
+												{FALSE, },			//  285
+												{FALSE, },			//  286
+												{FALSE, },			//  287
+												{FALSE, },			//  288
+												{FALSE, },			//  289
+												{FALSE, },			//  290
+												{FALSE, },			//  291
+												{FALSE, },			//  292
+												{FALSE, },			//  293
+												{FALSE, },			//  294
+												{FALSE, },			//  295
+												{FALSE, },			//  296
+												{FALSE, },			//  297
+												{FALSE, },			//  298
+												{FALSE, },			//  299
+												{FALSE, },			//  300
+												{FALSE, },			//  301
+												{FALSE, },			//  302
+												{FALSE, },			//  303
+												{FALSE, },			//  304
+												{FALSE, },			//  305
+												{FALSE, },			//  306
+												{FALSE, },			//  307
+												{FALSE, },			//  308
+												{FALSE, },			//  309
+												{FALSE, },			//  310
+												{FALSE, },			//  311
+												{FALSE, },			//  312
+												{FALSE, },			//  313
+												{FALSE, },			//  314
+												{FALSE, },			//  315
+												{FALSE, },			//  316
+												{FALSE, },			//  317
+												{FALSE, },			//  318
+												{FALSE, },			//  319
+												{FALSE, },			//  320
+												{FALSE, },			//  321
+												{FALSE, },			//  322
+												{FALSE, },			//  323
+												{FALSE, },			//  324
+												{FALSE, },			//  325
+												{FALSE, },			//  326
+												{FALSE, },			//  327
+												{FALSE, },			//  328
+												{FALSE, },			//  329
+												{FALSE, },			//  330
+												{FALSE, },			//  331
+												{FALSE, },			//  332
+												{FALSE, },			//  333
+												{FALSE, },			//  334
+												{FALSE, },			//  335
+												{FALSE, },			//  336
+												{FALSE, },			//  337
+												{FALSE, },			//  338
+												{FALSE, },			//  339
+												{FALSE, },			//  340
+												{FALSE, },			//  341
+												{FALSE, },			//  342
+												{FALSE, },			//  343
+												{FALSE, },			//  344
+												{FALSE, },			//  345
+												{FALSE, },			//  346
+												{FALSE, },			//  347
+												{FALSE, },			//  348
+												{FALSE, },			//  349
+												{FALSE, },			//  350
+												{FALSE, },			//  351
+												{FALSE, },			//  352
+												{FALSE, },			//  353
+												{FALSE, },			//  354
+												{FALSE, },			//  355
+												{FALSE, },			//  356
+												{FALSE, },			//  357
+												{FALSE, },			//  358
+												{FALSE, },			//  359
+												{FALSE, },			//  360
+												{FALSE, },			//  361
+												{FALSE, },			//  362
+												{FALSE, },			//  363
+												{FALSE, },			//  364
+												{FALSE, },			//  365
+												{FALSE, },			//  366
+												{FALSE, },			//  367
+												{FALSE, },			//  368
+												{FALSE, },			//  369
+												{FALSE, },			//  370
+												{FALSE, },			//  371
+												{FALSE, },			//  372
+												{FALSE, },			//  373
+												{FALSE, },			//  374
+												{FALSE, },			//  375
+												{FALSE, },			//  376
+												{FALSE, },			//  377
+												{FALSE, },			//  378
+												{FALSE, },			//  379
+												{FALSE, },			//  380
+												{FALSE, },			//  381
+												{FALSE, },			//  382
+												{FALSE, },			//  383
+												{FALSE, },			//  384
+												{FALSE, },			//  385
+												{FALSE, },			//  386
+												{FALSE, },			//  387
+												{FALSE, },			//  388
+												{FALSE, },			//  389
+												{FALSE, },			//  390
+												{FALSE, },			//  391
+												{FALSE, },			//  392
+												{FALSE, },			//  393
+												{FALSE, },			//  394
+												{FALSE, },			//  395
+												{FALSE, },			//  396
+												{FALSE, },			//  397
+												{FALSE, },			//  398
+												{FALSE, },			//  399
+												{FALSE, },			//  400
+												{FALSE, },			//  401
+												{FALSE, },			//  402
+												{FALSE, },			//  403
+												{FALSE, },			//  404
+												{FALSE, },			//  405
+												{FALSE, },			//  406
+												{FALSE, },			//  407
+												{FALSE, },			//  408
+												{FALSE, },			//  409
+												{FALSE, },			//  410
+												{FALSE, },			//  411
+												{FALSE, },			//  412
+												{FALSE, },			//  413
+												{FALSE, },			//  414
+												{FALSE, },			//  415
+												{FALSE, },			//  416
+												{FALSE, },			//  417
+												{FALSE, },			//  418
+												{FALSE, },			//  419
+												{FALSE, },			//  420
+												{FALSE, },			//  421
+												{FALSE, },			//  422
+												{FALSE, },			//  423
+												{FALSE, },			//  424
+												{FALSE, },			//  425
+												{FALSE, },			//  426
+												{FALSE, },			//  427
+												{FALSE, },			//  428
+												{FALSE, },			//  429
+												{FALSE, },			//  430
+												{FALSE, },			//  431
+												{FALSE, },			//  432
+												{FALSE, },			//  433
+												{FALSE, },			//  434
+												{FALSE, },			//  435
+												{FALSE, },			//  436
+												{FALSE, },			//  437
+												{FALSE, },			//  438
+												{FALSE, },			//  439
+												{FALSE, },			//  440
+												{FALSE, },			//  441
+												{FALSE, },			//  442
+												{FALSE, },			//  443
+												{FALSE, },			//  444
+												{FALSE, },			//  445
+												{FALSE, },			//  446
+												{FALSE, },			//  447
+												{FALSE, },			//  448
+												{FALSE, },			//  449
+												{FALSE, },			//  450
+												{FALSE, },			//  451
+												{FALSE, },			//  452
+												{FALSE, },			//  453
+												{FALSE, },			//  454
+												{FALSE, },			//  455
+												{FALSE, },			//  456
+												{FALSE, },			//  457
+												{FALSE, },			//  458
+												{FALSE, },			//  459
+												{FALSE, },			//  460
+												{FALSE, },			//  461
+												{FALSE, },			//  462
+												{FALSE, },			//  463
+												{FALSE, },			//  464
+												{FALSE, },			//  465
+												{FALSE, },			//  466
+												{FALSE, },			//  467
+												{FALSE, },			//  468
+												{FALSE, },			//  469
+												{FALSE, },			//  470
+												{FALSE, },			//  471
+												{FALSE, },			//  472
+												{FALSE, },			//  473
+												{FALSE, },			//  474
+												{FALSE, },			//  475
+												{FALSE, },			//  476
+												{FALSE, },			//  477
+												{FALSE, },			//  478
+												{FALSE, },			//  479
+												{FALSE, },			//  480
+												{FALSE, },			//  481
+												{FALSE, },			//  482
+												{FALSE, },			//  483
+												{FALSE, },			//  484
+												{FALSE, },			//  485
+												{FALSE, },			//  486
+												{FALSE, },			//  487
+												{FALSE, },			//  488
+												{FALSE, },			//  489
+												{FALSE, },			//  490
+												{FALSE, },			//  491
+												{FALSE, },			//  492
+												{FALSE, },			//  493
+												{FALSE, },			//  494
+												{FALSE, },			//  495
+												{FALSE, },			//  496
+												{FALSE, },			//  497
+												{FALSE, },			//  498
+												{FALSE, },			//  499
+												{FALSE, },			//  500
+												{FALSE, },			//  501
+												{FALSE, },			//  502
+												{FALSE, },			//  503
+												{FALSE, },			//  504
+												{FALSE, },			//  505
+												{FALSE, },			//  506
+												{FALSE, },			//  507
+												{FALSE, },			//  508
+												{FALSE, },			//  509
+												{FALSE, },			//  510
+												{FALSE, },			//  511
+												{TRUE,  60},		//  512
+												{TRUE,  60},		//  513
+												{TRUE,   0},		//  514
+												{TRUE,  60},		//  515
+												{TRUE,  60},		//  516
+												{TRUE,  60},		//  517
+												{TRUE,   0},		//  518
+												{TRUE,   0},		//  519
+												{TRUE,   0},		//  520
+												{TRUE,   0},		//  521
+												{TRUE,   0},		//  522
+												{TRUE,   0},		//  523
+												{TRUE,   0},		//  524
+												{TRUE,   0},		//  525
+												{TRUE,   0},		//  526
+												{TRUE,   0},		//  527
+												{TRUE,   0},		//  528
+												{TRUE,   0},		//  529
+												{TRUE,   0},		//  530
+												{TRUE,   0},		//  531
+												{TRUE,   0},		//  532
+												{TRUE,   0},		//  533
+												{TRUE,   0},		//  534
+												{TRUE,   0},		//  535
+												{TRUE,   0},		//  536
+												{TRUE,   0},		//  537
+												{TRUE,   0},		//  538
+												{TRUE,   0},		//  539
+												{TRUE,   0},		//  540
+												{TRUE,   0},		//  541
+												{TRUE,   0},		//  542
+												{TRUE,   0},		//  543
+												{TRUE,   0},		//  544
+												{TRUE,   0},		//  545
+												{TRUE,   0},		//  546
+												{TRUE,   0},		//  547
+												{TRUE,   0},		//  548
+												{TRUE,   0},		//  549
+												{TRUE,   0},		//  550
+												{TRUE,   0},		//  551
+												{TRUE,   0},		//  552
+												{TRUE,   0},		//  553
+												{TRUE,   0},		//  554
+												{TRUE,   0},		//  555
+												{TRUE,   0},		//  556
+												{TRUE,   0},		//  557
+												{TRUE,   0},		//  558
+												{TRUE,   0},		//  559
+												{TRUE,  20},		//  560
+												{TRUE,  20},		//  561
+												{TRUE,  20},		//  562
+												{TRUE,  20},		//  563
+												{TRUE,  20},		//  564
+												{TRUE,  20},		//  565
+												{TRUE,  20},		//  566
+												{TRUE,  20},		//  567
+												{TRUE,  20},		//  568
+												{TRUE,  20},		//  569
+												{TRUE,  20},		//  570
+												{TRUE,  20},		//  571
+												{TRUE,  20},		//  572
+												{TRUE,  20},		//  573
+												{TRUE,  20},		//  574
+												{TRUE,  20},		//  575
+												{TRUE,  20},		//  576
+												{TRUE,  20},		//  577
+												{TRUE,  20},		//  578
+												{TRUE,  20},		//  579
+												{TRUE,  20},		//  580
+												{TRUE,  20},		//  581
+												{TRUE,  20},		//  582
+												{TRUE,  20},		//  583
+												{TRUE,  20},		//  584
+												{TRUE,  20},		//  585
+												{TRUE,  20},		//  586
+												{TRUE,  20},		//  587
+												{TRUE,  20},		//  588
+												{TRUE,  20},		//  589
+												{TRUE,  20},		//  590
+												{TRUE,  20},		//  591
+												{TRUE,  20},		//  592
+												{TRUE,  20},		//  593
+												{TRUE,  20},		//  594
+												{TRUE,  20},		//  595
+												{TRUE,  20},		//  596
+												{TRUE,  20},		//  597
+												{TRUE,  20},		//  598
+												{TRUE,  20},		//  599
+												{TRUE,  20},		//  600
+												{TRUE,  20},		//  601
+												{TRUE,  20},		//  602
+												{TRUE,  20},		//  603
+												{TRUE,  20},		//  604
+												{TRUE,  20},		//  605
+												{TRUE,  20},		//  606
+												{TRUE,  20},		//  607
+												{TRUE,  64},		//  608
+												{TRUE,  46},		//  609
+												{TRUE,  46},		//  610
+												{TRUE,  46},		//  611
+												{TRUE,  64},		//  612
+												{TRUE,  64},		//  613
+												{TRUE,  64},		//  614
+												{TRUE,  64},		//  615
+												{TRUE,  64},		//  616
+												{TRUE,  64},		//  617
+												{TRUE,  64},		//  618
+												{TRUE,  64},		//  619
+												{TRUE,  64},		//  620
+												{TRUE,  64},		//  621
+												{TRUE,  64},		//  622
+												{TRUE,  64},		//  623
+												{TRUE,  64},		//  624
+												{TRUE,  64},		//  625
+												{TRUE,  64},		//  626
+												{TRUE,  64},		//  627
+												{TRUE,  64},		//  628
+												{TRUE,  64},		//  629
+												{TRUE,  64},		//  630
+												{TRUE,  64},		//  631
+												{TRUE,  64},		//  632
+												{TRUE,  64},		//  633
+												{TRUE,  64},		//  634
+												{TRUE,  64},		//  635
+												{TRUE,  64},		//  636
+												{TRUE,  64},		//  637
+												{TRUE,  64},		//  638
+												{TRUE,  64},		//  639
+												{TRUE,  64},		//  640
+												{TRUE,  64},		//  641
+												{TRUE,  64},		//  642
+												{TRUE,  64},		//  643
+												{TRUE,  64},		//  644
+												{TRUE,  64},		//  645
+												{TRUE,  64},		//  646
+												{TRUE,  64},		//  647
+												{TRUE,  64},		//  648
+												{TRUE,  64},		//  649
+												{TRUE,  64},		//  650
+												{TRUE,  64},		//  651
+												{TRUE,  64},		//  652
+												{TRUE,  64},		//  653
+												{TRUE,  64},		//  654
+												{TRUE,  64},		//  655
+												{TRUE,  20},		//  656
+												{TRUE, 255},		//  657
+												{TRUE,  50},		//  658
+												{TRUE,   0},		//  659
+												{TRUE,  48},		//  660
+												{TRUE,  22},		//  661
+												{TRUE,   8},		//  662
+												{TRUE,   0},		//  663
+												{TRUE,   0},		//  664
+												{TRUE,   0},		//  665
+												{TRUE,   0},		//  666
+												{TRUE,   0},		//  667
+												{TRUE,   5},		//  668
+												{TRUE,  50},		//  669
+												{TRUE,  10},		//  670
+												{TRUE,   0},		//  671
+												{TRUE,   0},		//  672
+												{TRUE,   0},		//  673
+												{TRUE,   0},		//  674
+												{TRUE,  20},		//  675
+												{TRUE,   5},		//  676
+												{TRUE,   3},		//  677
+												{TRUE,   2},		//  678
+												{TRUE,   0},		//  679
+												{TRUE,  20},		//  680
+												{TRUE, 255},		//  681
+												{TRUE,   0},		//  682
+												{TRUE,   0},		//  683
+												{TRUE,  32},		//  684
+												{TRUE,   0},		//  685
+												{TRUE, 100},		//  686
+												{TRUE,  59},		//  687
+												{TRUE,  64},		//  688
+												{TRUE,  64},		//  689
+												{TRUE,  53},		//  690
+												{TRUE,  64},		//  691
+												{TRUE,  64},		//  692
+												{TRUE,  64},		//  693
+												{TRUE,  58},		//  694
+												{TRUE,  61},		//  695
+												{TRUE,  64},		//  696
+												{TRUE,  62},		//  697
+												{TRUE,  81},		//  698
+												{TRUE,  73},		//  699
+												{TRUE,  83},		//  700
+												{TRUE, 181},		//  701
+												{TRUE,  40},		//  702
+												{TRUE,   0},		//  703
+												{TRUE, 141},		//  704
+												{TRUE,  21},		//  705
+												{TRUE,  91},		//  706
+												{TRUE,  37},		//  707
+												{TRUE,  65},		//  708
+												{TRUE,  65},		//  709
+												{TRUE,  65},		//  710
+												{TRUE,  58},		//  711
+												{TRUE,  59},		//  712
+												{TRUE,  62},		//  713
+												{TRUE,  59},		//  714
+												{TRUE,  54},		//  715
+												{TRUE,  86},		//  716
+												{TRUE,  76},		//  717
+												{TRUE,  83},		//  718
+												{TRUE, 181},		//  719
+												{TRUE,   4},		//  720
+												{TRUE, 255},		//  721
+												{TRUE,   8},		//  722
+												{TRUE,   3},		//  723
+												{TRUE,   5},		//  724
+												{TRUE,   0},		//  725
+												{TRUE,   0},		//  726
+												{TRUE,   3},		//  727
+												{TRUE,  10},		//  728
+												{TRUE,   0},		//  729
+												{TRUE,   0},		//  730
+												{TRUE,   0},		//  731
+												{TRUE,   0},		//  732
+												{TRUE,   0},		//  733
+												{TRUE,   0},		//  734
+												{TRUE,   0},		//  735
+												{TRUE,   0},		//  736
+												{TRUE,   0},		//  737
+												{TRUE,   0},		//  738
+												{TRUE,   0},		//  739
+												{TRUE,   0},		//  740
+												{TRUE,   0},		//  741
+												{TRUE,   0},		//  742
+												{TRUE,   0},		//  743
+												{TRUE, 255},		//  744
+												{TRUE,   0},		//  745
+												{TRUE,   0},		//  746
+												{TRUE, 255},		//  747
+									};
diff --git a/arch/metag/boards/vivaldi/Makefile b/arch/metag/boards/vivaldi/Makefile
new file mode 100644
index 0000000..bf9d3e9
--- /dev/null
+++ b/arch/metag/boards/vivaldi/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Vivaldi board specific parts of the kernel
+#
+
+obj-y	:= setup.o
diff --git a/arch/metag/boards/vivaldi/setup.c b/arch/metag/boards/vivaldi/setup.c
new file mode 100644
index 0000000..c73bcf9
--- /dev/null
+++ b/arch/metag/boards/vivaldi/setup.c
@@ -0,0 +1,88 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/libertas_spi.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/soc-chorus2/gpio.h>
+
+#define WIFI_IRQ_PIN		GPIO_G_PIN(9)
+#define WIFI_POWERDOWN_PIN	GPIO_G_PIN(8)
+#define WIFI_RESET_PIN		GPIO_G_PIN(7)
+
+static int vivaldi_libertas_setup(struct spi_device *spi)
+{
+	int err = 0;
+
+	/* Assign the GPIO pin to get the WIFI interrupts */
+	err = gpio_request(WIFI_IRQ_PIN, "wifi_irq");
+	if (err)
+		goto out;
+	gpio_direction_input(WIFI_IRQ_PIN);
+
+	/* Drive the powerdown pin */
+	err = gpio_request(WIFI_POWERDOWN_PIN, "wifi_powerdown");
+	if (err)
+		goto out;
+	gpio_direction_output(WIFI_POWERDOWN_PIN, 0);
+	udelay(500);
+	gpio_direction_output(WIFI_POWERDOWN_PIN, 1);
+	udelay(1500);
+
+	/* Drive the reset pin */
+	err = gpio_request(WIFI_RESET_PIN, 0);
+	if (err)
+		goto out;
+	gpio_direction_output(WIFI_RESET_PIN, 0);
+	udelay(100);
+	gpio_direction_output(WIFI_RESET_PIN, 1);
+
+	spi->bits_per_word = 16;
+	spi_setup(spi);
+
+out:
+	return err;
+}
+
+static int vivaldi_libertas_teardown(struct spi_device *spi)
+{
+	gpio_free(WIFI_RESET_PIN);
+	gpio_free(WIFI_POWERDOWN_PIN);
+	gpio_free(WIFI_IRQ_PIN);
+
+	return 0;
+}
+
+/* Libertas board data */
+static struct libertas_spi_platform_data vivaldi_libertas_pdata = {
+	.use_dummy_writes	= 0,
+	.setup			= vivaldi_libertas_setup,
+	.teardown		= vivaldi_libertas_teardown,
+};
+
+static struct spi_board_info spi_device_info[] __initdata = {
+	{
+		.modalias		= "mtd_dataflash",
+		.max_speed_hz		= 12500000,
+		.chip_select		= 0,
+	},
+	{
+		.modalias		= "libertas_spi",
+		.max_speed_hz		= 12500000,
+		.chip_select		= 1,
+		.platform_data		= &vivaldi_libertas_pdata,
+	},
+};
+
+static int __init vivaldi_init(void)
+{
+	spi_device_info[1].irq = gpio_to_irq(WIFI_IRQ_PIN);
+	spi_register_board_info(spi_device_info, ARRAY_SIZE(spi_device_info));
+	return 0;
+}
+
+device_initcall(vivaldi_init);
diff --git a/arch/metag/boot/.gitignore b/arch/metag/boot/.gitignore
index a021da2..2d6c0c1 100644
--- a/arch/metag/boot/.gitignore
+++ b/arch/metag/boot/.gitignore
@@ -1,4 +1,4 @@
 vmlinux*
 uImage*
 ramdisk.*
-*.dtb
+*.dtb*
diff --git a/arch/metag/boot/dts/Makefile b/arch/metag/boot/dts/Makefile
index dbd95217..89c1e26 100644
--- a/arch/metag/boot/dts/Makefile
+++ b/arch/metag/boot/dts/Makefile
@@ -1,7 +1,18 @@
 dtb-y	+= skeleton.dtb
+dtb-y	+= chorus2_generic.dtb
+dtb-y	+= tz1090_generic.dtb
+dtb-y	+= tz1090_01ry.dtb
+dtb-y	+= tz1090_01sp.dtb
+dtb-y	+= tz1090_01tt.dtb
+dtb-y	+= tz1090_01xk.dtb
 
 # Built-in dtb
 builtindtb-y				:= skeleton
+builtindtb-$(CONFIG_SOC_CHORUS2)	:= chorus2_generic
+builtindtb-$(CONFIG_SOC_TZ1090)		:= tz1090_generic
+builtindtb-$(CONFIG_COMET_BUB)		:= tz1090_01ry
+builtindtb-$(CONFIG_TZ1090_01XX)	:= tz1090_01sp
+builtindtb-$(CONFIG_POLARIS)		:= tz1090_01xk
 
 ifneq ($(CONFIG_METAG_BUILTIN_DTB_NAME),"")
 	builtindtb-y			:= $(patsubst "%",%,$(CONFIG_METAG_BUILTIN_DTB_NAME))
diff --git a/arch/metag/boot/dts/chorus2.dtsi b/arch/metag/boot/dts/chorus2.dtsi
new file mode 100644
index 0000000..b9977ae
--- /dev/null
+++ b/arch/metag/boot/dts/chorus2.dtsi
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version2 as
+ * published by the Free Software Foundation.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+	compatible = "frontier,chorus2", "img,meta";
+
+	interrupt-parent = <&intc>;
+
+	intc: interrupt-controller {
+		compatible = "img,meta-intc";
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		num-banks = <3>;
+		no-mask;
+		default-level = <0x0 0x00030fff 0xff0007e0>;
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		uart0: uart@02020000 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0x02020000 0x1000>;
+			interrupts = <48 4 /* level */>;
+			clock-frequency = <1843200>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+		};
+		uart1: uart@02021000 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0x02021000 0x1000>;
+			interrupts = <49 4 /* level */>;
+			clock-frequency = <1843200>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+		};
+		i2c0: i2c@0200A000 {
+			compatible = "img,chorus2-i2c";
+			reg = <0x0200A000 0x1000>;
+			interrupts = <77 1 /* edge */>;
+			id = <0>;
+		};
+		i2c1: i2c@0200B000 {
+			compatible = "img,chorus2-i2c";
+			reg = <0x0200B000 0x1000>;
+			interrupts = <78 1 /* edge */>;
+			id = <1>;
+		};
+	};
+};
diff --git a/arch/metag/boot/dts/chorus2_generic.dts b/arch/metag/boot/dts/chorus2_generic.dts
new file mode 100644
index 0000000..9fbae71
--- /dev/null
+++ b/arch/metag/boot/dts/chorus2_generic.dts
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+/include/ "chorus2.dtsi"
diff --git a/arch/metag/boot/dts/tz1090.dtsi b/arch/metag/boot/dts/tz1090.dtsi
new file mode 100644
index 0000000..2a34f12
--- /dev/null
+++ b/arch/metag/boot/dts/tz1090.dtsi
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/include/ "skeleton.dtsi"
+/include/ "tz1090_clk.dtsi"
+
+/*
+ * default scb clock to sys_clk_undeleted to work around clock
+ * domain crossing problems
+ */
+&scb_sw {
+	default-clock = <1>;	/* sys_clk_undeleted */
+};
+
+/ {
+	compatible = "toumaz,tz1090", "img,meta";
+
+	interrupt-parent = <&intc>;
+
+	clocks = <&meta_core_clk>;
+	clock-names = "core";
+
+	aliases {
+		spi1 = &spi1;
+	};
+
+	intc: interrupt-controller {
+		compatible = "img,meta-intc";
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		num-banks = <2>;
+		/* only soc-bus and 2d are edge sensitive */
+		default-level = <0xfffeffff 0xb>;
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		socif: socif@02008c2c {
+			compatible = "img,tz1090-socif";
+			reg = <0x02008c2c 0x8>;
+			interrupts = <16 0 /* edge */>;
+		};
+
+		dma: dma-controller@0200c000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			#dma-cells = <2>;
+			compatible = "img,tz1090-mdc-dma";
+			reg = <0x0200c000 0x1000>;
+			interrupts = < 21 4 /* level */
+				       22 4 /* level */
+				       23 4 /* level */
+				       24 4 /* level */
+				       25 4 /* level */
+				       26 4 /* level */
+				       27 4 /* level */
+				       28 4 /* level */ >;
+
+		};
+
+		pinctrl: pinctrl@02005800 {
+			#gpio-range-cells = <3>;
+			compatible = "img,tz1090-pinctrl";
+			reg = <0x02005800 0xe4>;
+
+			/* common pin configurations */
+			pinctrl_uart0: uart0 {
+				uart0_cfg {
+					pins =	"uart0_rxd",
+						"uart0_txd";
+					function = "perip";
+				};
+			};
+			pinctrl_uart0_flow: uart0_flow {
+				uart0_cfg {
+					pins =	"uart0_cts",
+						"uart0_rts";
+					function = "perip";
+				};
+			};
+			pinctrl_uart1: uart1 {
+				uart1_cfg {
+					pins =	"uart1";
+					function = "perip";
+				};
+			};
+
+			pinctrl_spi1: spi1 {
+				spi1_cfg {
+					pins =	"spi1_mclk",
+						"spi1_dout",
+						"spi1_din";
+					function = "perip";
+				};
+			};
+			pinctrl_spi1_cs0: spi1_cs0 {
+				spi1_cfg {
+					pins =	"spi1_cs0";
+					function = "perip";
+				};
+			};
+
+			pinctrl_tft: tft {
+				tft_cfg {
+					pins =	"tft";
+					function = "tft";
+				};
+			};
+
+			pinctrl_sdh: sdh {
+				sdh_cfg {
+					pins =	"sdio",
+						"sdh";
+					function = "sdh";
+				};
+			};
+
+			pinctrl_scb0: scb0 {
+				scb0_cfg {
+					pins =	"scb0";
+					function = "perip";
+				};
+			};
+			pinctrl_scb1: scb1 {
+				scb1_cfg {
+					pins =	"scb1";
+					function = "perip";
+				};
+			};
+			pinctrl_scb2: scb2 {
+				scb2_cfg {
+					pins =	"scb2";
+					function = "perip";
+				};
+			};
+		};
+
+		pdc_pinctrl: pinctrl@02006500 {
+			#gpio-range-cells = <3>;
+			compatible = "img,tz1090-pdc-pinctrl";
+			reg = <0x02006500 0x100>;
+		};
+
+		gpios: gpios@02005800 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "img,tz1090-gpio";
+			reg = <0x02005800 0x90>;
+
+			gpios0: bank@0 {
+				gpio-controller;
+				interrupt-controller;
+				#gpio-cells = <2>;
+				#interrupt-cells = <2>;
+				reg = <0>;
+				interrupts = <13 4 /* level */>;
+				gpio-ranges = <&pinctrl 0 0 30>;
+			};
+			gpios1: bank@1 {
+				gpio-controller;
+				interrupt-controller;
+				#gpio-cells = <2>;
+				#interrupt-cells = <2>;
+				reg = <1>;
+				interrupts = <14 4 /* level */>;
+				gpio-ranges = <&pinctrl 0 30 30>;
+			};
+			gpios2: bank@2 {
+				gpio-controller;
+				interrupt-controller;
+				#gpio-cells = <2>;
+				#interrupt-cells = <2>;
+				reg = <2>;
+				interrupts = <15 4 /* level */>;
+				gpio-ranges = <&pinctrl 0 60 30>;
+			};
+		};
+
+		i2s: tz1090-i2s@02004f00 {
+			compatible = "img,i2s";
+			reg = < 0x02004f00 0x100 /* out */
+				0x02005000 0x100 /* in */>;
+			interrupts = < 9 4 /* level */
+				       10 4 /* level */
+				       11 4 /* level */
+				       12 4 /* level */>;
+			interrupt-names = "output0", "output1",
+					  "output2", "input";
+
+			clocks = <&i2sout_sysclk>;
+			dmas = <&dma 9 0xffffffff /* out */
+				&dma 10 0xffffffff /* in */>;
+		};
+
+		uart0: uart@02004b00 {
+			compatible = "snps,dw-apb-uart";
+			pinctrl-names = "default";
+			pinctrl-0 = <	&pinctrl_uart0>;
+			reg = <0x02004b00 0x100>;
+			interrupts = <4 4 /* level */>;
+			clocks = <&uart_clk>;
+			clock-frequency = <1843200>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			status = "disabled";
+		};
+		uart1: uart@02004c00 {
+			compatible = "snps,dw-apb-uart";
+			pinctrl-names = "default";
+			pinctrl-0 = <	&pinctrl_uart1>;
+			reg = <0x02004c00 0x100>;
+			interrupts = <5 4 /* level */>;
+			clocks = <&uart_clk>;
+			clock-frequency = <1843200>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			status = "disabled";
+		};
+
+		pdc: pdc@0x02006000 {
+			interrupt-controller;
+			#interrupt-cells = <3>;
+
+			reg = <0x02006000 0x1000>;
+			compatible = "img,pdc-intc";
+
+			num-perips = <3>;
+			num-syswakes = <3>;
+
+			interrupts = <18 4 /* level */>, /* Syswakes */
+			             <30 4 /* level */>, /* Perip 0 (RTC) */
+			             <29 4 /* level */>, /* Perip 1 (IR) */
+			             <31 4 /* level */>; /* Perip 2 (WDT) */
+		};
+
+		pdc_gpios: gpios@02006500 {
+			gpio-controller;
+			#gpio-cells = <2>;
+
+			compatible = "img,tz1090-pdc-gpio";
+			reg = <0x02006500 0x100>;
+
+			interrupt-parent = <&pdc>;
+			interrupts =	<1 0 0>,	/* Syswake 0 */
+					<1 1 0>,	/* Syswake 1 */
+					<1 2 0>;	/* Syswake 2 */
+			gpio-ranges = <&pdc_pinctrl 0 0 7>;
+		};
+
+		wdt: wdt@02006000 {
+			compatible = "img,pdc-wdt";
+			reg = <0x02006000 0x20>;
+			interrupt-parent = <&pdc>;
+			interrupts = <0 2 4>;
+		};
+
+		rtc: rtc@02006100 {
+			compatible = "img,pdc-rtc";
+			reg = <0x02006100 0x100>,
+			      <0x02006038 0x8>;	/* SW_PROT[6-7] */
+			reg-names = "regs", "nonvolatile";
+			interrupt-parent = <&pdc>;
+			interrupts = <0 0 4>;
+
+			/* setting the time takes 3 seconds */
+			time-set-delay	= <3>;
+			/* alarm interrupts arrive 2 seconds late */
+			alarm-irq-delay	= <2>;
+		};
+
+		ir: ir@02006200 {
+			compatible = "img,ir";
+			reg = <0x02006200 0x100>;
+			interrupt-parent = <&pdc>;
+			interrupts = <0 1 4>;
+			status = "disabled";
+		};
+
+		i2c0: i2c@02004400 {
+			compatible = "img,scb";
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_scb0>;
+			reg = <0x02004400 0x200>;
+			interrupts = <0 4 /* level */>;
+			clocks = <&scb0_sysclk>;
+			linux,i2c-index = <0>;
+			quirks = <0x1>;
+			status = "disabled";
+		};
+
+		i2c1: i2c@02004600 {
+			compatible = "img,scb";
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_scb1>;
+			reg = <0x02004600 0x200>;
+			interrupts = <1 4 /* level */>;
+			clocks = <&scb1_sysclk>;
+			linux,i2c-index = <1>;
+			quirks = <0x1>;
+			status = "disabled";
+		};
+
+		i2c2: i2c@02004800 {
+			compatible = "img,scb";
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_scb2>;
+			reg = <0x02004800 0x200>;
+			interrupts = <2 4 /* level */>;
+			clocks = <&scb2_sysclk>;
+			linux,i2c-index = <2>;
+			quirks = <0x1>;
+			status = "disabled";
+		};
+
+		spi1: spi@02004e00 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "img,spi";
+			pinctrl-names = "default";
+			pinctrl-0 = <	&pinctrl_spi1>;
+			reg = <0x02004e00 0x100>;
+			interrupts = <8 4 /* level */>;
+			clocks = <&spim1_sysclk>;
+			num-cs = <3>;
+			clock-frequency = <40000000>;
+			dmas = <&dma 7 0xffffffff
+				&dma 8 0xffffffff>; /* -1 for any channel */
+			dma-names = "rx", "tx";
+			status = "disabled";
+
+		};
+
+		twod: twod@02008900 {
+			compatible = "img,tz1090-2d";
+			reg =	<0x02008900 0x100>,	/* slave port */
+				<0x02008c00 0x400>;	/* HEP registers */
+			interrupts = <34 0 /* edge */>;
+			clocks = <&twod_sysclk>;
+		};
+
+		tansen: tansen@020059B8 {
+			compatible = "cosmic,tansen";
+			reg = <0x020059B8 0x4>; /* GTI CTRL register */
+		};
+
+		sound {
+			compatible = "img,tz1090-audio";
+			reg = < 0x020059E0 0x4
+				0x020059E4 0x4
+				0x020059B8 0x4>;
+			img,i2s-controller = <&i2s>;
+			img,audio-codec = <&tansen>;
+		};
+	};
+};
diff --git a/arch/metag/boot/dts/tz1090_01ry.dts b/arch/metag/boot/dts/tz1090_01ry.dts
new file mode 100644
index 0000000..31b1d5e
--- /dev/null
+++ b/arch/metag/boot/dts/tz1090_01ry.dts
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "tz1090.dtsi"
+
+&xtal2 {
+	clock-frequency = <12000000>;
+};
+
+&pinctrl {
+	pinctrl-names = "default";
+	pinctrl-0 = <	&pinctrl_uart0_flow
+			&pinctrl_spi1_cs0
+			&pinctrl_sdh>;
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&uart1 {
+	status = "okay";
+};
+
+&ir {
+	status = "okay";
+};
+
+&i2c2 {
+	status = "okay";
+};
+
+/ {
+	model = "ImgTec Comet Bring-Up-Board";
+	compatible =	"img,tz1090-01ry",
+			"toumaz,tz1090", "img,meta";
+};
+
+&spi1 {
+	status = "okay";
+
+	mtd@0 {
+		compatible = "atmel,at45db642d", "atmel,at45", "atmel,dataflash";
+		spi-max-frequency = <20000000>;
+		reg = <0>; /* chip select */
+	};
+};
diff --git a/arch/metag/boot/dts/tz1090_01sp.dts b/arch/metag/boot/dts/tz1090_01sp.dts
new file mode 100644
index 0000000..f26e553
--- /dev/null
+++ b/arch/metag/boot/dts/tz1090_01sp.dts
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "tz1090.dtsi"
+
+&xtal2 {
+	clock-frequency = <12000000>;
+};
+
+&xtal3 {
+	clock-frequency = <32768>;
+};
+
+&pinctrl {
+	pinctrl-names = "default";
+	pinctrl-0 = <	&pinctrl_spi1_cs0
+			&pinctrl_tft
+			&pinctrl_sdh>;
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&ir {
+	status = "okay";
+};
+
+&i2c2 {
+	bit-rate = <100000>;	/* limit for HDMI */
+	status = "okay";
+};
+
+/ {
+	model = "ImgTec Comet METAmorph";
+	compatible =	"img,tz1090-01sp",
+			"toumaz,tz1090", "img,meta";
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <>;
+		softled1 {
+			label = "softled1";
+			gpios = <&gpios0 23 0>;		/* uart0_rts */
+			linux,default-trigger = "mmc0";
+		};
+	};
+};
+
+&spi1 {
+	status = "okay";
+
+	mtd@0 {
+		compatible = "atmel,at45db642d", "atmel,at45", "atmel,dataflash";
+		spi-max-frequency = <20000000>;
+		reg = <0>; /* chip select */
+	};
+};
diff --git a/arch/metag/boot/dts/tz1090_01tt.dts b/arch/metag/boot/dts/tz1090_01tt.dts
new file mode 100644
index 0000000..be6aa53
--- /dev/null
+++ b/arch/metag/boot/dts/tz1090_01tt.dts
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "tz1090.dtsi"
+
+&xtal2 {
+	clock-frequency = <12000000>;
+};
+
+&xtal3 {
+	clock-frequency = <32768>;
+};
+
+&pinctrl {
+	pinctrl-names = "default";
+	pinctrl-0 = <	&pinctrl_spi1_cs0
+			&pinctrl_tft
+			&pinctrl_sdh>;
+};
+
+&pdc_pinctrl {
+	pinctrl-names = "default";
+	pinctrl-0 = <	&keys_pulldown>;
+
+	keys_pulldown: keys-pulldown {
+		keys {
+			pins =	"sys_wake0",
+				"sys_wake1",
+				"sys_wake2";
+			pull-down;
+		};
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&ir {
+	status = "okay";
+};
+
+&i2c2 {
+	bit-rate = <100000>;	/* limit for HDMI */
+	status = "okay";
+};
+
+/ {
+	model = "ImgTec Comet MiniMorph";
+	compatible =	"img,tz1090-01tt", "img,tz1090-01sp",
+			"toumaz,tz1090", "img,meta";
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		button@0 {
+			label = "SW1 - Previous (Page Up)";
+			gpios = <&pdc_gpios 2 0>; /* syswake0 */
+			gpio-key,wakeup;
+			linux,code = <104>; /* Page Up */
+		};
+		button@1 {
+			label = "SW2 - Insert";
+			gpios = <&pdc_gpios 3 0>; /* syswake1 */
+			gpio-key,wakeup;
+			linux,code = <110>; /* Insert */
+		};
+		button@2 {
+			label = "SW3 - Next (Page Down)";
+			gpios = <&pdc_gpios 4 0>; /* syswake2 */
+			gpio-key,wakeup;
+			linux,code = <109>; /* Page Down */
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <>;
+		softled1 {
+			label = "softled1";
+			gpios = <&gpios0 23 0>;		/* uart0_rts */
+			linux,default-trigger = "mmc0";
+		};
+		softled2 {
+			label = "softled2";
+			gpios = <&gpios0 11 0>;		/* spi0_cs2 */
+			linux,default-trigger = "heartbeat";
+		};
+	};
+};
+
+&spi1 {
+	status = "okay";
+
+	mtd@0 {
+		compatible = "atmel,at45db642d", "atmel,at45", "atmel,dataflash";
+		spi-max-frequency = <20000000>;
+		reg = <0>; /* chip select */
+	};
+};
diff --git a/arch/metag/boot/dts/tz1090_01xk.dts b/arch/metag/boot/dts/tz1090_01xk.dts
new file mode 100644
index 0000000..737ed69
--- /dev/null
+++ b/arch/metag/boot/dts/tz1090_01xk.dts
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "tz1090.dtsi"
+
+&xtal2 {
+	clock-frequency = <12000000>;
+};
+
+&pinctrl {
+	pinctrl-names = "default";
+	pinctrl-0 = <	&pinctrl_spi1_cs0
+			&pinctrl_tft
+			&keys_pulldown>;
+
+	keys_pulldown: keys-pulldown {
+		keys {
+			pins =	"pll_on",
+				"pa_on";
+			pull-down;
+		};
+	};
+};
+
+&pdc_pinctrl {
+	pinctrl-names = "default";
+	pinctrl-0 = <	&syswake0_pulldown
+			&ts_irq_tristate>;
+
+	syswake0_pulldown: keys-pulldown {
+		keys {
+			pins =	"sys_wake0";
+			pull-down;
+		};
+	};
+	ts_irq_tristate: ts-irq-tristate {
+		syswake1 {
+			pins =	"sys_wake1";
+			tristate;
+		};
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&ir {
+	status = "okay";
+};
+
+&i2c0 { /* i2c0 */
+	status = "okay";
+};
+
+&i2c2 { /* i2c2 */
+	bit-rate = <100000>;
+	status = "okay";
+};
+
+&spi1 {
+	status = "okay";
+};
+
+/ {
+	model = "PURE Polaris";
+	compatible =	"img,tz1090-01xk",
+			"toumaz,tz1090", "img,meta";
+};
diff --git a/arch/metag/boot/dts/tz1090_clk.dtsi b/arch/metag/boot/dts/tz1090_clk.dtsi
new file mode 100644
index 0000000..cda2090
--- /dev/null
+++ b/arch/metag/boot/dts/tz1090_clk.dtsi
@@ -0,0 +1,498 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+	soc {
+		/*
+		 * ============ OSCILLATORS ============
+		 */
+
+		/* XTAL1 frequency is specified in reset bootstrap config */
+		xtal1: xtal1 {
+			compatible = "specified-clock";
+			#clock-cells = <0>;
+			reg = <0x02004004 0x4>;	/* CR_PERIP_RESET_CFG */
+			shift = <8>;		/* FXTAL */
+			width = <4>;
+			clock-frequency =
+			/*	 FXTAL	Frequency */
+				<0	16384000>,
+				<1	19200000>,
+				<2	24000000>,
+				<3	24576000>,
+				<4	26000000>,
+				<5	36000000>,
+				<6	36864000>,
+				<7	38400000>,
+				<8	40000000>;
+			clock-output-names = "xtal1";
+		};
+
+		/* xtal2 oscillator (board specific, but 12MHz recommended) */
+		xtal2: xtal2 {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <0>;
+			clock-output-names = "xtal2";
+		};
+
+		/* xtal3 oscillator (32.768KHz if fitted, assume not fitted) */
+		xtal3: xtal3 {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <0>;
+			clock-output-names = "xtal3";
+		};
+
+
+		/*
+		 * ============ CORE/SYSTEM CLOCK SETUP ============
+		 */
+
+		/* ==== System Clock Undeleted Generation ==== */
+
+		/* xtal1 ---[sysclk0_sw]__ *
+		 * xtal2 --o[          ]   */
+		sysclk0_sw: sysclk0_sw {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&xtal1>,
+				 <&xtal2>;
+			reg = <0x02005908 0x4>;	/* CR_TOP_CLKSWITCH */
+			shift = <0>;		/* CR_TOP_SYSCLK0_SW */
+			width = <1>;
+			clock-output-names = "sysclk0_sw";
+		};
+
+		/* sysclk0_sw ---[sys_pll]--- */
+		sys_pll: sys_pll {
+			compatible = "img,tz1090-pll";
+			#clock-cells = <0>;
+			clocks = <&sysclk0_sw>;
+			reg = <0x02005950 0x8>;	/* CR_TOP_SYSPLL_CTL{0,1} */
+			clock-output-names = "sys_pll";
+		};
+
+		/* sys_pll ---[sysclk_div]--- */
+		sysclk_div: sysclk_div {
+			compatible = "divider-clock";
+			#clock-cells = <0>;
+			clocks = <&sys_pll>;
+			reg = <0x02005914 0x4>;	/* CR_TOP_SYSCLK_DIV */
+			shift = <0>;		/* CR_TOP_SYSDIV */
+			width = <8>;
+			clock-output-names = "sysclk_div";
+		};
+
+		/* sysclk_div ---[sysclk1_sw]__ sys_clk_x2_undeleted	*
+		 * xtal1      --o[          ]				*/
+		sys_clk_x2_undeleted: sysclk1_sw {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&xtal1>,
+				 <&sysclk_div>;
+			reg = <0x02005908 0x4>;	/* CR_TOP_CLKSWITCH */
+			shift = <1>;		/* CR_TOP_SYSCLK1_SW */
+			width = <1>;
+			clock-output-names = "sys_clk_x2_undeleted";
+		};
+
+		/* sys_clk_x2_undeleted ---[meta_clkdiv]--- sys_clk_undeleted */
+		sys_clk_undeleted: meta_clkdiv {
+			compatible = "divider-clock";
+			#clock-cells = <0>;
+			clocks = <&sys_clk_x2_undeleted>;
+			reg = <0x02005918 0x4>;	/* CR_TOP_META_CLKDIV */
+			shift = <0>;		/* CR_TOP_META_X2_EN */
+			width = <2>;
+			clock-output-names = "sys_clk_undeleted";
+			linux,clk-read-only;
+		};
+
+
+		/* ==== Meta core clock Generation ==== */
+
+		/* sys_clk_x2_undeleted ---[meta_clkdelete]--- meta_core_clk */
+		meta_core_clk: meta_clkdelete {
+			compatible = "img,tz1090-deleter";
+			#clock-cells = <0>;
+			clocks = <&sys_clk_x2_undeleted>;
+			reg = <0x0200591c 0x4>;	/* CR_TOP_META_CLKDELETE */
+			shift = <0>;		/* CR_TOP_META_CLKDELETE */
+			width = <10>;
+			clock-output-names = "meta";
+		};
+
+
+		/* ==== Peripheral System Clock Switches ==== */
+
+		scb0_sysclk: scb0_sysclk {
+			compatible = "img,meta-gate-clock";
+			#clock-cells = <0>;
+			/* technically incorrect, compatible with old clk API */
+			clocks = <&scb_clk>;
+			reg = <0x02004010 0x4>;	/* CR_PERIP_CLKEN */
+			bit = <0>;		/* CR_PERIP_SCB0_SYS_CLK_EN */
+			clock-output-names = "scb0";
+		};
+
+		scb1_sysclk: scb1_sysclk {
+			compatible = "img,meta-gate-clock";
+			#clock-cells = <0>;
+			/* technically incorrect, compatible with old clk API */
+			clocks = <&scb_clk>;
+			reg = <0x02004010 0x4>;	/* CR_PERIP_CLKEN */
+			bit = <1>;		/* CR_PERIP_SCB1_SYS_CLK_EN */
+			clock-output-names = "scb1";
+		};
+
+		scb2_sysclk: scb2_sysclk {
+			compatible = "img,meta-gate-clock";
+			#clock-cells = <0>;
+			/* technically incorrect, compatible with old clk API */
+			clocks = <&scb_clk>;
+			reg = <0x02004010 0x4>;	/* CR_PERIP_CLKEN */
+			bit = <2>;		/* CR_PERIP_SCB2_SYS_CLK_EN */
+			clock-output-names = "scb2";
+		};
+
+		spim1_sysclk: spim1_sysclk {
+			compatible = "img,meta-gate-clock";
+			#clock-cells = <0>;
+			/* technically incorrect, compatible with old clk API */
+			clocks = <&spim1_clk>;
+			reg = <0x02004010 0x4>;	/* CR_PERIP_CLKEN */
+			bit = <8>;		/* CR_PERIP_SPIM1_SYS_CLK_EN */
+			clock-output-names = "spi";
+		};
+
+		i2sout_sysclk: i2sout_sysclk {
+			compatible = "img,meta-gate-clock";
+			#clock-cells = <0>;
+			/* technically incorrect, compatible with old clk API */
+			clocks = <&i2s_mclk>;
+			reg = <0x02004010 0x4>;	/* CR_PERIP_CLKEN */
+			bit = <9>;		/* CR_PERIP_I2S_OUT_SYS_CLK_EN */
+			clock-output-names = "i2s";
+		};
+
+
+		/* ==== HEP System Clock Switches ==== */
+		/* sys_clk ---[CR_HEP_CLK_EN.*_CLK_EN]--- *_sysclk */
+
+		twod_sysclk: twod_sysclk {
+			compatible = "img,meta-gate-clock";
+			#clock-cells = <0>;
+			clocks = <&sys_clk_undeleted>;
+			reg = <0x02008c04 0x4>;	/* CR_HEP_CLK_EN */
+			bit = <0>;		/* CR_2D_CLK_EN */
+			clock-output-names = "sgx2d";
+		};
+
+		pdp_sysclk: pdp_sysclk {
+			compatible = "img,meta-gate-clock";
+			#clock-cells = <0>;
+			clocks = <&sys_clk_undeleted>;
+			reg = <0x02008c04 0x4>;	/* CR_HEP_CLK_EN */
+			bit = <2>;		/* CR_PDP_PDI_CLK_EN */
+			clock-output-names = "pdp";
+		};
+
+
+		/*
+		 * ============ ANALOG IP CLOCK SETUP ============
+		 */
+
+		/* ==== ADC PLL ==== */
+
+		/* xtal2 ---[adc_pll_sw0]___	*
+		 * xtal1 --o[           ]	*/
+		adc_pll_sw0: adc_pll_sw0 {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&xtal1>,
+				 <&xtal2>;
+			reg = <0x02005908 0x4>;	/* CR_TOP_CLKSWITCH */
+			shift = <22>;		/* CR_TOP_ADCPLL_CLK_0_SW */
+			width = <1>;
+			clock-output-names = "adc_pll_sw0";
+		};
+
+		/* adc_pll_sw0 ---[adc_pll]--- */
+		adc_pll_clk: adc_pll_clk {
+			compatible = "img,tz1090-pll";
+			#clock-cells = <0>;
+			clocks = <&adc_pll_sw0>;
+			reg = <0x02005958 0x8>;	/* CR_TOP_ADCPLL_CTL{0,1} */
+			clock-output-names = "adc_pll";
+		};
+
+
+		/* ==== AFE Progdiv3clk_to_soc ==== */
+
+		afe_progdiv3clk_to_soc: afe_progdiv3clk_to_soc {
+			#clock-cells = <0>;
+			/* Not yet implemented */
+		};
+
+
+		/*
+		 * ============ PERIPHERAL CLOCK SETUP ============
+		 */
+
+		/* ==== UART Clock Generation ==== */
+
+		/* sys_clk_undeleted ---[uart_sw]---	*
+		 * xtal1             --o[       ]	*/
+		uart_sw: uart_sw {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&xtal1>,
+				 <&sys_clk_undeleted>;
+			reg = <0x02005908 0x4>;	/* CR_TOP_CLKSWITCH */
+			shift = <14>;		/* CR_TOP_UART_SW */
+			width = <1>;
+			clock-output-names = "uart_sw";
+			linux,clk-set-rate-remux;
+		};
+
+		/* uart_sw ---[uart_en]--- */
+		uart_en: uart_en {
+			compatible = "img,meta-gate-clock";
+			#clock-cells = <0>;
+			clocks = <&uart_sw>;
+			reg = <0x0200590c 0x4>;	/* CR_TOP_CLKENAB */
+			bit = <14>;		/* CR_TOP_UART_EN */
+			clock-output-names = "uart_en";
+		};
+
+		/* uart_en ---[uart_div]--- uart_clk */
+		uart_clk: uart_clk {
+			compatible = "divider-clock";
+			#clock-cells = <0>;
+			clocks = <&uart_en>;
+			reg = <0x02005928 0x4>;	/* CR_TOP_UARTCLK_DIV */
+			shift = <0>;
+			width = <8>;
+			clock-output-names = "uart";
+		};
+
+
+		/* ==== SCB (I2C) Clock Generation ==== */
+
+		/* sys_clk_undeleted ---[scb_sw]---	*
+		 * xtal1             --o[      ]	*/
+		scb_sw: scb_sw {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&xtal1>,
+				 <&sys_clk_undeleted>;
+			reg = <0x02005908 0x4>;	/* CR_TOP_CLKSWITCH */
+			shift = <13>;		/* CR_TOP_SCB_SW */
+			width = <1>;
+			clock-output-names = "scb_sw";
+		};
+
+		/* scb_sw ---[scb_en]--- scb_clk */
+		scb_clk: scb_en {
+			compatible = "img,meta-gate-clock";
+			#clock-cells = <0>;
+			clocks = <&scb_sw>;
+			reg = <0x0200590c 0x4>;	/* CR_TOP_CLKENAB */
+			bit = <13>;		/* CR_TOP_SCB_EN */
+			clock-output-names = "scb";
+		};
+
+
+		/* ==== SPI Master 1 Clock Generation ==== */
+
+		/* sys_clk_undeleted ---[spim1_div]--- spim1_clk */
+		spim1_clk: spim1_clk {
+			compatible = "divider-clock";
+			#clock-cells = <0>;
+			clocks = <&sys_clk_undeleted>;
+			reg = <0x02005938 0x4>;	/* CR_TOP_SPI1CLK_DIV */
+			shift = <0>;
+			width = <8>;
+			clock-output-names = "spim1_clk";
+		};
+
+
+		/* ==== I2S Clock Generation ==== */
+
+		/* sys_clk_undeleted ---[i2s_sw21	*
+		 * xtal1             --o[       ]	*/
+		i2s_sw2: i2s_sw2 {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&xtal1>,
+				 <&sys_clk_undeleted>;
+			reg = <0x02005908 0x4>;	/* CR_TOP_CLKSWITCH */
+			shift = <10>;		/* CR_TOP_I2S_2_SW */
+			width = <1>;
+			clock-output-names = "i2s_sw2";
+			linux,clk-set-rate-remux;
+		};
+
+		/* adc_pll_clk ---[i2s_sw01	*
+		 * xtal2       --o[       ]	*/
+		i2s_sw0: i2s_sw0 {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&xtal2>,
+				 <&adc_pll_clk>;
+			reg = <0x02005908 0x4>;	/* CR_TOP_CLKSWITCH */
+			shift = <11>;		/* CR_TOP_I2S_0_SW */
+			width = <1>;
+			clock-output-names = "i2s_sw0";
+			default-clock = <0>;
+		};
+
+		/* i2s_sw0 ---[i2s_sw11	*
+		 * i2s_sw2 --o[       ]	*/
+		i2s_sw1: i2s_sw1 {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&i2s_sw2>,
+				 <&i2s_sw0>;
+			reg = <0x02005908 0x4>;	/* CR_TOP_CLKSWITCH */
+			shift = <12>;		/* CR_TOP_I2S_1_SW */
+			width = <1>;
+			clock-output-names = "i2s_sw1";
+			linux,clk-set-rate-parent;
+			linux,clk-set-rate-remux;
+		};
+
+		/* i2s_sw1 ---[i2s_en] */
+		i2s_en: i2s_en {
+			compatible = "img,meta-gate-clock";
+			#clock-cells = <0>;
+			clocks = <&i2s_sw1>;
+			reg = <0x0200590c 0x4>;	/* CR_TOP_CLKENAB */
+			bit = <12>;		/* CR_TOP_I2S_1_EN */
+			clock-output-names = "i2s_en";
+		};
+
+		/* i2s_en ---[i2s_div]--- i2s_mclk */
+		i2s_mclk: i2s_mclk {
+			compatible = "divider-clock";
+			#clock-cells = <0>;
+			clocks = <&i2s_en>;
+			reg = <0x0200593c 0x4>;	/* CR_TOP_I2SCLK_DIV */
+			shift = <0>;
+			width = <8>;
+			clock-output-names = "i2s_mclk";
+		};
+
+
+		/* ==== USB Clock Generation ==== */
+
+		usb_phy_clk: usb_phy_clk {
+			#clock-cells = <0>;
+			/* Not yet implemented */
+		};
+
+
+		/* ==== Pixel Clock Generation ==== */
+
+		/* pixel_sw3 ---[pixel_sw0]---	*
+		 * xtal1     --o[         ]	*/
+		pixel_sw0: pixel_sw0 {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&xtal1>,
+				 <&pixel_sw3>;
+			reg = <0x02005988 0x4>;	/* CR_TOP_CLKSWITCH2 */
+			shift = <0>;		/* CR_TOP_PIXEL_CLK_0_SW */
+			width = <1>;
+			clock-output-names = "pixel_sw0";
+			linux,clk-set-rate-parent;
+			linux,clk-set-rate-remux;
+		};
+
+		/* pixel_sw4         ---[pixel_sw1]---	*
+		 * sys_clk_undeleted --o[         ]	*/
+		pixel_sw1: pixel_sw1 {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&sys_clk_undeleted>,
+				 <&pixel_sw4>;
+			reg = <0x02005988 0x4>;	/* CR_TOP_CLKSWITCH2 */
+			shift = <1>;		/* CR_TOP_PIXEL_CLK_1_SW */
+			width = <1>;
+			clock-output-names = "pixel_sw1";
+			linux,clk-set-rate-parent;
+			linux,clk-set-rate-remux;
+		};
+
+		/* pixel_sw1 ---[pixel_sw2]---	*
+		 * pixel_sw0 --o[         ]	*/
+		pixel_sw2: pixel_sw2 {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&pixel_sw0>,
+				 <&pixel_sw1>;
+			reg = <0x02005988 0x4>;	/* CR_TOP_CLKSWITCH2 */
+			shift = <2>;		/* CR_TOP_PIXEL_CLK_2_SW */
+			width = <1>;
+			clock-output-names = "pixel_sw2";
+			linux,clk-set-rate-parent;
+			linux,clk-set-rate-remux;
+		};
+
+		/* afe_progdiv3clk_to_soc ---[pixel_sw3]---	*
+		 * adc_pll_clk            --o[         ]	*/
+		pixel_sw3: pixel_sw3 {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&adc_pll_clk>,
+				 <&afe_progdiv3clk_to_soc>;
+			reg = <0x02005988 0x4>;	/* CR_TOP_CLKSWITCH2 */
+			shift = <3>;		/* CR_TOP_PIXEL_CLK_3_SW */
+			width = <1>;
+			clock-output-names = "pixel_sw3";
+			linux,clk-set-rate-parent;
+			linux,clk-set-rate-remux;
+		};
+
+		/* xtal2       ---[pixel_sw4]---	*
+		 * usb_phy_clk --o[         ]		*/
+		pixel_sw4: pixel_sw4 {
+			compatible = "img,meta-mux-clock";
+			#clock-cells = <0>;
+			clocks = <&usb_phy_clk>,
+				 <&xtal2>;
+			reg = <0x02005988 0x4>;	/* CR_TOP_CLKSWITCH2 */
+			shift = <4>;		/* CR_TOP_PIXEL_CLK_4_SW */
+			width = <1>;
+			clock-output-names = "pixel_sw4";
+			linux,clk-set-rate-remux;
+		};
+
+		/* pixel_sw2 ---[pixel_en]--- */
+		pixel_en: pixel_en {
+			compatible = "img,meta-gate-clock";
+			#clock-cells = <0>;
+			clocks = <&pixel_sw2>;
+			reg = <0x0200598c 0x4>;	/* CR_TOP_CLKENAB2 */
+			bit = <2>;		/* CR_TOP_PIXEL_CLK_2_EN */
+			clock-output-names = "pixel_en";
+		};
+
+		/* pixel_en ---[pixel_div]--- */
+		pixel_clk: pixel_div {
+			compatible = "divider-clock";
+			#clock-cells = <0>;
+			clocks = <&pixel_en>;
+			reg = <0x02005998 0x4>;	/* CR_TOP_PIXEL_CLK_DIV */
+			shift = <0>;		/* CR_TOP_PIXEL_CLK_DIV */
+			width = <8>;
+			clock-output-names = "pixel";
+		};
+	};
+};
diff --git a/arch/metag/boot/dts/tz1090_generic.dts b/arch/metag/boot/dts/tz1090_generic.dts
new file mode 100644
index 0000000..de65454
--- /dev/null
+++ b/arch/metag/boot/dts/tz1090_generic.dts
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "tz1090.dtsi"
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/metag/configs/chorus2_defconfig b/arch/metag/configs/chorus2_defconfig
new file mode 100644
index 0000000..534587a
--- /dev/null
+++ b/arch/metag/configs/chorus2_defconfig
@@ -0,0 +1,89 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="arch/metag/boot/ramdisk.cpio"
+CONFIG_INITRAMFS_COMPRESSION_GZIP=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_SOC_CHORUS2=y
+CONFIG_METAG_DA=y
+CONFIG_CHORUS2_TFT=y
+CONFIG_ATP_DP=y
+CONFIG_ATP_DP_LAN1=y
+CONFIG_HZ_100=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_SMSC911X=y
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_DA_TTY=y
+CONFIG_DA_CONSOLE=y
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_DW=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_CHORUS2=y
+CONFIG_SPI=y
+CONFIG_GPIO_PCA953X=y
+# CONFIG_HWMON is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+CONFIG_AUXDISPLAY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/metag/configs/comet_minimorph_defconfig b/arch/metag/configs/comet_minimorph_defconfig
new file mode 100644
index 0000000..1a1add8
--- /dev/null
+++ b/arch/metag/configs/comet_minimorph_defconfig
@@ -0,0 +1,147 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="arch/metag/boot/ramdisk.cpio"
+CONFIG_INITRAMFS_COMPRESSION_GZIP=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_SOC_TZ1090=y
+# CONFIG_METAG_FPU is not set
+CONFIG_METAG_DA=y
+CONFIG_METAG_BUILTIN_DTB_NAME="tz1090_01tt"
+CONFIG_TZ1090_01XX=y
+CONFIG_TZ1090_01XX_HDMI_AUDIO=y
+CONFIG_HZ_100=y
+CONFIG_PM_RUNTIME=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+CONFIG_CFG80211=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_DATAFLASH_OTP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_NETDEVICES=y
+# CONFIG_ETHERNET is not set
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_NCM is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_UCCP310WLAN=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_CONSOLE_TRANSLATIONS is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_DA_TTY=y
+# CONFIG_DEVKMEM is not set
+CONFIG_IMG_UCCP=y
+CONFIG_IMG_SGX2D=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_DW=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_TIMERIOMEM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_IMG=y
+CONFIG_SPI=y
+CONFIG_SPI_IMG=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_IMGPDC_WDT=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_RC_SUPPORT=y
+# CONFIG_RC_DECODERS is not set
+CONFIG_RC_DEVICES=y
+CONFIG_IR_IMG=y
+CONFIG_IR_IMG_NEC=y
+CONFIG_IR_IMG_JVC=y
+CONFIG_IR_IMG_SONY=y
+CONFIG_IR_IMG_SHARP=y
+CONFIG_IR_IMG_SANYO=y
+CONFIG_FB_PDP_GFX_VIDEOMEM=3145728
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_IMGPDI=y
+# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_TZ1090_SOC_TZ1090_01XX=y
+CONFIG_USB=y
+CONFIG_USB_DWC_OTG=y
+CONFIG_USB_DWC_OTG_OTG=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DWC_OTG=y
+CONFIG_USB_CDC_COMPOSITE=m
+CONFIG_MMC=y
+CONFIG_MMC_DW=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_FTRACE is not set
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/metag/configs/meta1_defconfig b/arch/metag/configs/meta1_defconfig
index 01cd67e..37762d5 100644
--- a/arch/metag/configs/meta1_defconfig
+++ b/arch/metag/configs/meta1_defconfig
@@ -2,6 +2,9 @@
 # CONFIG_SWAP is not set
 CONFIG_SYSFS_DEPRECATED=y
 CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="arch/metag/boot/ramdisk.cpio"
+CONFIG_INITRAMFS_COMPRESSION_GZIP=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_ELF_CORE is not set
 CONFIG_SLAB=y
diff --git a/arch/metag/configs/meta2_defconfig b/arch/metag/configs/meta2_defconfig
index 643392b..8d6f5c6 100644
--- a/arch/metag/configs/meta2_defconfig
+++ b/arch/metag/configs/meta2_defconfig
@@ -3,6 +3,9 @@
 CONFIG_SYSVIPC=y
 CONFIG_SYSFS_DEPRECATED=y
 CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="arch/metag/boot/ramdisk.cpio"
+CONFIG_INITRAMFS_COMPRESSION_GZIP=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_ELF_CORE is not set
 CONFIG_SLAB=y
diff --git a/arch/metag/configs/meta2_gen_defconfig b/arch/metag/configs/meta2_gen_defconfig
new file mode 100644
index 0000000..643392b
--- /dev/null
+++ b/arch/metag/configs/meta2_gen_defconfig
@@ -0,0 +1,40 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_ELF_CORE is not set
+CONFIG_SLAB=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_METAG_L2C=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_METAG_HALT_ON_PANIC=y
+CONFIG_METAG_DA=y
+CONFIG_HZ_100=y
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_DA_TTY=y
+CONFIG_DA_CONSOLE=y
+# CONFIG_DEVKMEM is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
diff --git a/arch/metag/configs/meta2_smp_defconfig b/arch/metag/configs/meta2_smp_defconfig
index f330673..c479b67 100644
--- a/arch/metag/configs/meta2_smp_defconfig
+++ b/arch/metag/configs/meta2_smp_defconfig
@@ -3,6 +3,9 @@
 CONFIG_SYSVIPC=y
 CONFIG_SYSFS_DEPRECATED=y
 CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="arch/metag/boot/ramdisk.cpio"
+CONFIG_INITRAMFS_COMPRESSION_GZIP=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_ELF_CORE is not set
 CONFIG_SLAB=y
diff --git a/arch/metag/configs/meta2_smp_gen_defconfig b/arch/metag/configs/meta2_smp_gen_defconfig
new file mode 100644
index 0000000..f330673
--- /dev/null
+++ b/arch/metag/configs/meta2_smp_gen_defconfig
@@ -0,0 +1,41 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_ELF_CORE is not set
+CONFIG_SLAB=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_METAG_L2C=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_METAG_HALT_ON_PANIC=y
+CONFIG_SMP=y
+CONFIG_METAG_DA=y
+CONFIG_HZ_100=y
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_DA_TTY=y
+CONFIG_DA_CONSOLE=y
+# CONFIG_DEVKMEM is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
diff --git a/arch/metag/configs/polaris_defconfig b/arch/metag/configs/polaris_defconfig
new file mode 100644
index 0000000..c7d1642
--- /dev/null
+++ b/arch/metag/configs/polaris_defconfig
@@ -0,0 +1,123 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="arch/metag/boot/ramdisk.cpio"
+CONFIG_INITRAMFS_COMPRESSION_GZIP=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_SOC_TZ1090=y
+CONFIG_METAG_SUSPEND_MEM=y
+# CONFIG_METAG_FPU is not set
+CONFIG_METAG_DA=y
+CONFIG_POLARIS=y
+CONFIG_HZ_100=y
+CONFIG_PM_RUNTIME=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_UNIX=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_DATAFLASH_OTP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_QT5480=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_CONSOLE_TRANSLATIONS is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_DA_TTY=y
+CONFIG_DA_CONSOLE=y
+# CONFIG_DEVKMEM is not set
+CONFIG_IMG_UCCP=y
+CONFIG_IMG_SGX2D=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_DW=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_TIMERIOMEM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_IMG=y
+CONFIG_SPI=y
+CONFIG_SPI_IMG=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_IMGPDC_WDT=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_RC_SUPPORT=y
+# CONFIG_RC_DECODERS is not set
+CONFIG_RC_DEVICES=y
+CONFIG_IR_IMG=y
+CONFIG_IR_IMG_NEC=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_PDP=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_IMGPDI=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_TZ1090_AUXDAC=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_TZ1090_SOC=y
+# CONFIG_HID is not set
+# CONFIG_USB_HID is not set
+CONFIG_USB=y
+CONFIG_USB_DWC_OTG=y
+CONFIG_USB_DWC_OTG_ISOCHRONOUS=y
+CONFIG_USB_GADGET=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_RTC_CLASS=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_FTRACE is not set
diff --git a/arch/metag/configs/tz1090_defconfig b/arch/metag/configs/tz1090_defconfig
new file mode 100644
index 0000000..3b93455
--- /dev/null
+++ b/arch/metag/configs/tz1090_defconfig
@@ -0,0 +1,122 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="arch/metag/boot/ramdisk.cpio"
+CONFIG_INITRAMFS_COMPRESSION_GZIP=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_SOC_TZ1090=y
+# CONFIG_METAG_FPU is not set
+CONFIG_METAG_DA=y
+CONFIG_TZ1090_01XX=y
+CONFIG_TZ1090_01XX_HDMI_AUDIO=y
+CONFIG_HZ_100=y
+CONFIG_PM_RUNTIME=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_UNIX=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_DATAFLASH_OTP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_CONSOLE_TRANSLATIONS is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_DA_TTY=y
+CONFIG_DA_CONSOLE=y
+# CONFIG_DEVKMEM is not set
+CONFIG_IMG_UCCP=y
+CONFIG_IMG_SGX2D=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_DW=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_TIMERIOMEM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_IMG=y
+CONFIG_SPI=y
+CONFIG_SPI_IMG=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_IMGPDC_WDT=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_RC_SUPPORT=y
+# CONFIG_RC_DECODERS is not set
+CONFIG_RC_DEVICES=y
+CONFIG_IR_IMG=y
+CONFIG_IR_IMG_NEC=y
+CONFIG_IR_IMG_JVC=y
+CONFIG_IR_IMG_SONY=y
+CONFIG_IR_IMG_SHARP=y
+CONFIG_IR_IMG_SANYO=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_IMGPDI=y
+# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_TZ1090_SOC_TZ1090_01XX=y
+# CONFIG_HID is not set
+# CONFIG_USB_HID is not set
+CONFIG_USB=y
+CONFIG_USB_DWC_OTG=y
+CONFIG_USB_DWC_OTG_ISOCHRONOUS=y
+CONFIG_USB_GADGET=y
+CONFIG_MMC=y
+CONFIG_MMC_DW=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_RTC_CLASS=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_FTRACE is not set
diff --git a/arch/metag/drivers/Kconfig b/arch/metag/drivers/Kconfig
new file mode 100644
index 0000000..84d645e
--- /dev/null
+++ b/arch/metag/drivers/Kconfig
@@ -0,0 +1,9 @@
+menu "Additional Meta Device Drivers"
+
+config IMG_DMAC
+	depends on SOC_CHORUS2
+	bool "IMG DMAC Driver"
+	help
+	  This enables the IMG DMAC driver
+
+endmenu
diff --git a/arch/metag/drivers/Makefile b/arch/metag/drivers/Makefile
new file mode 100644
index 0000000..55ae4e6
--- /dev/null
+++ b/arch/metag/drivers/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for the Linux Meta-specific device drivers.
+#
+obj-y					+= dma/
diff --git a/arch/metag/drivers/dma/Makefile b/arch/metag/drivers/dma/Makefile
new file mode 100644
index 0000000..d7930b4
--- /dev/null
+++ b/arch/metag/drivers/dma/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Chorus2 DMA Controller(DMAC) device driver and associated tests.
+#
+
+obj-$(CONFIG_IMG_DMAC)			+= img_dmac.o
diff --git a/arch/metag/drivers/dma/img_dmac.c b/arch/metag/drivers/dma/img_dmac.c
new file mode 100644
index 0000000..334cb98
--- /dev/null
+++ b/arch/metag/drivers/dma/img_dmac.c
@@ -0,0 +1,320 @@
+/*
+ * IMG DMA Controller (DMAC) specific DMA code.
+ *
+ * Copyright (C) 2010,2012 Imagination Technologies Ltd.
+*/
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include <asm/img_dma.h>
+#include <asm/img_dmac.h>
+
+#if defined(CONFIG_SOC_CHORUS2)
+#include <asm/soc-chorus2/dma.h>
+#include <asm/soc-chorus2/c2_irqnums.h>
+#else
+#error /*Add your SoC here*/
+#endif
+
+int img_dma_reset(int dmanr)
+{
+	unsigned int *dmac_base;
+
+	if (dmanr >= MAX_DMA_CHANNELS)
+		return -EINVAL;
+
+	dmac_base = (unsigned int *)(DMAC_HWBASE +
+				     (dmanr * DMAC_CHANNEL_STRIDE));
+
+	/* Set SRST bit in DMAC_COUNT_REG */
+	iowrite32(DMAC_CNTN_REG_SRST_BIT, dmac_base+DMAC_COUNT_REG);
+
+	/* Clear all other registers */
+	iowrite32(0, dmac_base+DMAC_SETUP_REG);
+	iowrite32(0, dmac_base+DMAC_PERIPH_REG);
+	iowrite32(0, dmac_base+DMAC_IRQSTAT_REG);
+	iowrite32(0, dmac_base+DMAC_2DMODE_REG);
+
+	/* Finally release the SRST bit */
+	iowrite32(0, dmac_base+DMAC_COUNT_REG);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_dma_reset);
+
+int img_dma_get_irq(int dmanr)
+{
+	return external_irq_map(DMA0_IRQ_NUM + dmanr);
+}
+EXPORT_SYMBOL(img_dma_get_irq);
+
+int img_dma_set_data_address(int dmanr, u32 address)
+{
+	u32 *base;
+
+	base = get_dma_regs(dmanr);
+	iowrite32(address, base+DMAC_SETUP_REG);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_dma_set_data_address);
+
+int img_dma_set_length(int dmanr, unsigned long bytes, unsigned int width)
+{
+	u32 c;
+	u32 *base;
+	u16 count = (u16)bytes;
+
+	base = get_dma_regs(dmanr);
+	c = ioread32(base+DMAC_COUNT_REG);
+
+	c &= ~DMAC_CNTN_REG_PW_MASK;
+
+	switch (width) {
+	case IMG_DMA_WIDTH_32:
+		c |= DMAC_CNTN_REG_PW_32;
+		count /= 4;
+		break;
+
+	case IMG_DMA_WIDTH_16:
+		c |= DMAC_CNTN_REG_PW_16;
+		count /= 2;
+		break;
+
+	case IMG_DMA_WIDTH_8:
+		c |= DMAC_CNTN_REG_PW_8;
+		break;
+
+	default:
+		BUG();
+		break;
+	}
+
+	c |= count;
+	iowrite32(c, base+DMAC_COUNT_REG);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_dma_set_length);
+
+int img_dma_set_io_address(int dmanr, u32 address,
+				unsigned int burst_size)
+{
+	u32 *base;
+	u32 addr, periph;
+
+	base = get_dma_regs(dmanr);
+
+	periph = ioread32(base + DMAC_PERIPH_REG);
+	/* We set all the non-list mode bits here. */
+	addr = (address & DMAC_PHADDRN_REG_ADDR_BITS) >> 2;
+	addr |= ((burst_size << DMAC_PHADDRN_REG_BURST_S)
+		 & DMAC_PHADDRN_REG_BURST_BITS);
+
+	periph &= ~(DMAC_PHADDRN_REG_ADDR_BITS|DMAC_PHADDRN_REG_BURST_BITS);
+	periph |= addr;
+
+	iowrite32(periph, base + DMAC_PERIPH_REG);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_dma_set_io_address);
+
+int img_dma_set_direction(int dmanr, enum img_dma_direction direction)
+{
+	u32 *base;
+	u32 c;
+
+	base = get_dma_regs(dmanr);
+	c = ioread32(base+DMAC_COUNT_REG);
+
+	switch (direction) {
+	case DMA_FROM_DEVICE:
+		c |= DMAC_CNTN_REG_DIR_BIT;
+		break;
+
+	case DMA_TO_DEVICE:
+		c &= ~DMAC_CNTN_REG_DIR_BIT;
+		break;
+
+	default:
+		BUG();
+		break;
+	}
+
+	iowrite32(c, base+DMAC_COUNT_REG);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_dma_set_direction);
+
+int img_dma_start(int dmanr)
+{
+	u32 *base;
+	u32 c;
+	u32 s;
+
+	base = get_dma_regs(dmanr);
+
+	/* Clear down the done bit */
+	s = ioread32(base+DMAC_IRQSTAT_REG);
+	s &= ~DMAC_IRQSTATN_REG_FIN_BIT;
+	iowrite32(s, base+DMAC_IRQSTAT_REG);
+
+	c = ioread32(base+DMAC_COUNT_REG);
+
+	c &= ~DMAC_CNTN_REG_SRST_BIT;
+	iowrite32(c, base+DMAC_COUNT_REG);
+
+	c |= DMAC_CNTN_REG_EN_BIT;
+	iowrite32(c, base+DMAC_COUNT_REG);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_dma_start);
+
+int img_dma_is_finished(int dmanr)
+{
+	u32 *base;
+	u32 s;
+
+	base = get_dma_regs(dmanr);
+
+	s = ioread32(base+DMAC_IRQSTAT_REG);
+	return s & DMAC_IRQSTATN_REG_FIN_BIT;
+}
+EXPORT_SYMBOL(img_dma_is_finished);
+
+int img_dma_set_list_addr(int dma_channel, u32 address)
+{
+	u32 temp;
+	u32 *base;
+
+	if (dma_channel >= MAX_DMA_CHANNELS)
+		return -EINVAL;
+
+	base = get_dma_regs(dma_channel);
+
+	iowrite32(address, base+DMAC_SETUP_REG);
+
+	temp = ioread32(base+DMAC_COUNT_REG);
+	temp |= DMAC_CNTN_REG_LIEN_BIT;
+	iowrite32(temp, base+DMAC_COUNT_REG);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_dma_set_list_addr);
+
+int img_dma_start_list(int dma_channel)
+{
+	u32 temp;
+	u32 *base;
+
+	if (dma_channel >= MAX_DMA_CHANNELS)
+		return -EINVAL;
+
+	base = get_dma_regs(dma_channel);
+
+	temp = ioread32(base+DMAC_COUNT_REG);
+	temp |= DMAC_CNTN_REG_LEN_BIT;
+	iowrite32(temp, base+DMAC_COUNT_REG);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(img_dma_start_list);
+
+int img_dma_stop_list(int dma_channel)
+{
+	u32 temp;
+	u32 *base;
+
+	if (dma_channel >= MAX_DMA_CHANNELS)
+		return -EINVAL;
+
+	base = get_dma_regs(dma_channel);
+
+	temp = ioread32(base+DMAC_COUNT_REG);
+	temp &= ~DMAC_CNTN_REG_LEN_BIT;
+	iowrite32(temp, base+DMAC_COUNT_REG);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(img_dma_stop_list);
+
+int img_dma_get_int_status(int dma_channel, u32 *status)
+{
+	u32 *base;
+
+	if (dma_channel >= MAX_DMA_CHANNELS)
+		return -EINVAL;
+
+	base = get_dma_regs(dma_channel);
+
+	*status = ioread32(base + DMAC_IRQSTAT_REG);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_dma_get_int_status);
+
+int img_dma_set_int_status(int dma_channel, u32 status)
+{
+
+	u32 *base;
+
+	if (dma_channel >= MAX_DMA_CHANNELS)
+		return -EINVAL;
+
+	base = get_dma_regs(dma_channel);
+
+	iowrite32(status, base + DMAC_IRQSTAT_REG);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_dma_set_int_status);
+
+int img_dma_has_request(int dma_channel)
+{
+	u32 *base;
+
+	if (dma_channel >= MAX_DMA_CHANNELS)
+		return -EINVAL;
+
+	base = get_dma_regs(dma_channel);
+
+	return !!(ioread32(base + DMAC_COUNT_REG) & DMAC_CNTN_REG_DREQ_BIT);
+}
+EXPORT_SYMBOL(img_dma_has_request);
+
+/**
+ * img_dma_set_access_delay() - sets the access delay
+ * @dma_channel:	DMA channel number
+ * @delay:		Delay in range 0-7, where actual delay = @delay * 256
+ *			cycles
+ *
+ */
+int img_dma_set_access_delay(int dma_channel, u8 delay)
+{
+	u32 *base;
+	u32 paddr;
+
+	if (dma_channel >= MAX_DMA_CHANNELS)
+		return -EINVAL;
+	if (delay > 7)
+		return -EINVAL;
+
+	base = get_dma_regs(dma_channel);
+
+	paddr = ioread32(base + DMAC_PERIPH_REG);
+	paddr |= delay << DMAC_PHADDRN_REG_ACCDEL_S;
+	iowrite32(paddr, base + DMAC_PERIPH_REG);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_dma_set_access_delay);
diff --git a/arch/metag/include/asm/bug.h b/arch/metag/include/asm/bug.h
index d04b48c..9f8967f 100644
--- a/arch/metag/include/asm/bug.h
+++ b/arch/metag/include/asm/bug.h
@@ -6,7 +6,7 @@
 struct pt_regs;
 
 extern const char *trap_name(int trapno);
-extern void die(const char *str, struct pt_regs *regs, long err,
-		unsigned long addr) __attribute__ ((noreturn));
+extern void __noreturn die(const char *str, struct pt_regs *regs, long err,
+		unsigned long addr);
 
 #endif
diff --git a/arch/metag/include/asm/cacheflush.h b/arch/metag/include/asm/cacheflush.h
index 7787ec5..268954b 100644
--- a/arch/metag/include/asm/cacheflush.h
+++ b/arch/metag/include/asm/cacheflush.h
@@ -215,19 +215,9 @@
 		l2c_fence(start + size - 1);
 	}
 }
-
-/* Invalidate (may also write back if necessary) */
-static inline void invalidate_dcache_region(void *start, unsigned long size)
-{
-	if (meta_l2c_is_enabled())
-		cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2);
-	else
-		metag_data_cache_flush(start, size);
-}
 #else
 #define flush_dcache_region(s, l)	metag_data_cache_flush((s), (l))
 #define writeback_dcache_region(s, l)	do {} while (0)
-#define invalidate_dcache_region(s, l)	flush_dcache_region((s), (l))
 #endif
 
 static inline void copy_to_user_page(struct vm_area_struct *vma,
diff --git a/arch/metag/include/asm/cachepart.h b/arch/metag/include/asm/cachepart.h
index cf6b44e..a2388cd 100644
--- a/arch/metag/include/asm/cachepart.h
+++ b/arch/metag/include/asm/cachepart.h
@@ -32,6 +32,25 @@
 unsigned int get_global_icache_size(void);
 
 /**
+ * cachepart_min_iglobal() - Ensure we have @min_size global icache.
+ * @min_size:	Minimum size of global icache for this thread.
+ * @old_val:	Pointer to area to store old cache information so it can be
+ *		restored to it's previous setting.
+ *
+ * If possible, this will take cache from the thread local cache to satisfy the
+ * @min_size requirement.  Returns 0 on success, or an error code on failure.
+ * The operation can be reversed with cachepart_restore_iglobal().
+ */
+int cachepart_min_iglobal(unsigned int min_size, unsigned int *old_val);
+
+/**
+ * cachepart_restore_iglobal() - Restore global icache to previous setting.
+ * @old_val:	Pointer to area to find old cache information so it can be
+ *		restored, as given to cachepart_min_iglobal().
+ */
+void cachepart_restore_iglobal(unsigned int *old_val);
+
+/**
  * check_for_dache_aliasing() - Ensure that the bootloader has configured the
  * dache and icache properly to avoid aliasing
  * @thread_id: Hardware thread ID
diff --git a/arch/metag/include/asm/checksum.h b/arch/metag/include/asm/checksum.h
index 999bf76..08dd1cc 100644
--- a/arch/metag/include/asm/checksum.h
+++ b/arch/metag/include/asm/checksum.h
@@ -64,7 +64,8 @@
 					__wsum sum)
 {
 	unsigned long len_proto = (proto + len) << 8;
-	asm ("ADD    %0, %0, %1\n"
+	asm ("ADDS   %0, %0, %1\n"
+	     "ADDCS  %0, %0, #1\n"
 	     "ADDS   %0, %0, %2\n"
 	     "ADDCS  %0, %0, #1\n"
 	     "ADDS   %0, %0, %3\n"
diff --git a/arch/metag/include/asm/clock.h b/arch/metag/include/asm/clock.h
index 3e2915a..ded4ab2 100644
--- a/arch/metag/include/asm/clock.h
+++ b/arch/metag/include/asm/clock.h
@@ -19,6 +19,8 @@
  *			core frequency will be determined like this:
  *			Meta 1: based on loops_per_jiffy.
  *			Meta 2: (EXPAND_TIMER_DIV + 1) MHz.
+ *			If a "core" clock is provided by the device tree, it
+ *			will override this function.
  */
 struct meta_clock_desc {
 	unsigned long		(*get_core_freq)(void);
@@ -27,6 +29,12 @@
 extern struct meta_clock_desc _meta_clock;
 
 /*
+ * Perform platform clock initialisation, reading clocks from device tree etc.
+ * Only accessible during boot.
+ */
+void init_metag_clocks(void);
+
+/*
  * Set up the default clock, ensuring all callbacks are valid - only accessible
  * during boot.
  */
diff --git a/arch/metag/include/asm/core-sysfs.h b/arch/metag/include/asm/core-sysfs.h
new file mode 100644
index 0000000..b2168b1
--- /dev/null
+++ b/arch/metag/include/asm/core-sysfs.h
@@ -0,0 +1,9 @@
+#ifndef _METAG_ASM_CORE_SYSFS_H
+#define _METAG_ASM_CORE_SYSFS_H
+
+#include <linux/device.h>
+
+extern struct bus_type performance_subsys;
+extern struct bus_type cache_subsys;
+
+#endif /* _METAG_ASM_CORE_SYSFS_H */
diff --git a/arch/metag/include/asm/coremem.h b/arch/metag/include/asm/coremem.h
new file mode 100644
index 0000000..c4f12b9
--- /dev/null
+++ b/arch/metag/include/asm/coremem.h
@@ -0,0 +1,100 @@
+#ifndef _METAG_COREMEM_H_
+#define _METAG_COREMEM_H_
+
+#ifdef CONFIG_METAG_COREMEM
+
+/* Core memory region flags. */
+#define METAG_COREMEM_IMEM	0x0001	/* instruction memory */
+#define METAG_COREMEM_DMEM	0x0002	/* data memory */
+#define METAG_COREMEM_CACHE	0x0100	/* cache memory */
+#define METAG_COREMEM_ICACHE	(METAG_COREMEM_IMEM | METAG_COREMEM_CACHE)
+#define METAG_COREMEM_DCACHE	(METAG_COREMEM_DMEM | METAG_COREMEM_CACHE)
+
+/**
+ * struct metag_coremem_region - A region of core memory.
+ * @flags:	Bitwise OR of METAG_COREMEM_*.
+ * @start:	Start of region (if not cache).
+ * @size:	Size of region (if not cache).
+ * @pos:	Current position in region.
+ * @data:	Private data.
+ */
+struct metag_coremem_region {
+	unsigned long flags;
+	char *start;
+	unsigned int size;
+	unsigned int pos;
+	unsigned int data;
+};
+
+/**
+ * metag_coremem_alloc() - Initialise access to core memory.
+ * @flags:	Bitwise or of METAG_COREMEM_* flags.
+ * @size:	Minimum size required.
+ *
+ * This gets a coremem resource. Use metag_coremem_free() to indicate that
+ * the memory has been finished with. Returns NULL on failure.
+ */
+struct metag_coremem_region *metag_coremem_alloc(unsigned int flags,
+						 unsigned int size);
+
+/**
+ * metag_coremem_free() - Finish using some core memory.
+ * @region:	Coremem region returned from metag_coremem_alloc().
+ *
+ * Indicates that the core memory region has been finished with and can be
+ * reused again by the system (e.g. for cache).
+ */
+void metag_coremem_free(struct metag_coremem_region *region);
+
+/**
+ * metag_coremem_push() - Push code or data into a coremem section.
+ * @region:	Coremem region returned from metag_coremem_alloc().
+ * @start:  	Start address of memory to copy from.
+ * @size:   	Size of memory to copy.
+ *
+ * The specified memory is pushed onto the next free area in the coremem region.
+ * The address of the location in coremem where the memory has been copied to is
+ * returned, or NULL if there isn't enough space left in the region.
+ *
+ * If the METAG_COREMEM_ICACHE flags were specified, then the region won't be
+ * locked into the cache until the code is executed or icache prefetched.
+ */
+void *metag_coremem_push(struct metag_coremem_region *region,
+			 void *start, unsigned long size);
+
+
+/*
+ * SoC specific coremem regions.
+ * Each SoC that uses suspend should define this.
+ */
+extern struct metag_coremem_region metag_coremems[];
+
+/* ARRAY_SIZE(metag_coremems) */
+extern unsigned int metag_coremems_sz;
+
+/**
+ * metag_cache_lock() - Lock memory into core cache for this hw thread.
+ * @flags:	Which cache, METAG_COREMEM_IMEM or METAG_COREMEM_DMEM.
+ * @phys:	Physical start address.
+ * @size:	Amount of memory to lock into cache.
+ *
+ * Locks the specified physical memory into core cache mode so that it can be
+ * accessed and modified directly in the cache. The physical address returned
+ * points to where the cache can be found. Modifications here will not modify
+ * the original memory. When the memory has been finished with, call
+ * metag_cache_unlock().
+ */
+unsigned long metag_cache_lock(unsigned int flags, unsigned long phys,
+			       unsigned long size);
+
+/**
+ * metag_cache_unlock() - Unlock core cache locked memory for this hw thread.
+ * @flags:	Which cache, METAG_COREMEM_IMEM or METAG_COREMEM_DMEM.
+ *
+ * Unlocks either instruction or data locked into core cache.
+ */
+void metag_cache_unlock(unsigned int flags);
+
+#endif
+
+#endif
diff --git a/arch/metag/include/asm/disas.h b/arch/metag/include/asm/disas.h
new file mode 100644
index 0000000..2472398
--- /dev/null
+++ b/arch/metag/include/asm/disas.h
@@ -0,0 +1,150 @@
+#ifndef __ASM_DISAS_H
+#define __ASM_DISAS_H
+
+#define INSN_SIZE		4
+
+/* General decoding */
+#define OP_CODE(op)		((op) >> 24)
+#define OP_IMM19(op)		(((int)((op) << 8) >> 13) * INSN_SIZE)
+#define OP_IMM16(op)		(((op) >> 3) & 0xffff)
+#define OP_REG19(op)		(((op) >> 19) & 0x1f)
+
+/* Instruction matching */
+#define OP_BCC(op)		(OP_CODE(op) == OPCODE_BCC)
+#define OP_CALLR(op)		(OP_CODE(op) == OPCODE_CALLR)
+#define OP_JUMP(op)		(OP_CODE(op) == OPCODE_JUMP)
+
+/* Instruction decoding */
+#define OP_BCC_CC(op)		(((op) >> 1) & 0xf)
+#define OP_BCC_R(op)		((op) & 0x1)
+#define OP_BCC_IMM(op)		(OP_IMM19(op) * INSN_SIZE)
+#define OP_JUMP_IMM(op)		OP_IMM16(op)
+#define OP_JUMP_RS(op)		OP_REG19(op)
+#define OP_JUMP_BU(op)		((op) & 0x3)
+#define OP_JUMP_UB(op)		metag_bu_map[OP_JUMP_BU(op)]
+#define OP_CALLR_IMM(op)	(OP_IMM19(op) * INSN_SIZE)
+
+/* op codes */
+enum metag_opcode {
+	OPCODE_BCC	= 0xa0,
+	OPCODE_CALLR	= 0xab,
+	OPCODE_JUMP	= 0xac,
+};
+
+/* unit codes */
+enum metag_unit {
+	UNIT_CT,       /* 0x0 */
+	UNIT_D0,
+	UNIT_D1,
+	UNIT_A0,
+	UNIT_A1,       /* 0x4 */
+	UNIT_PC,
+	UNIT_RA,
+	UNIT_TR,
+	UNIT_TT,       /* 0x8 */
+	UNIT_FX,
+	UNIT_MAX,
+};
+
+/* condition codes */
+enum metag_cc {
+	CC_A,  /* 0x0 */
+	CC_EQ,
+	CC_NE,
+	CC_CS,
+	CC_CC, /* 0x4 */
+	CC_N,
+	CC_PL,
+	CC_VS,
+	CC_VC, /* 0x8 */
+	CC_HI,
+	CC_LS,
+	CC_GE,
+	CC_LT, /* 0xC */
+	CC_GT,
+	CC_LE,
+	CC_NV,
+};
+
+/* condition flags */
+enum metag_cf {
+	CF_C = 0x1, /* Carry */
+	CF_V = 0x2, /* oVerflow */
+	CF_N = 0x4, /* Negative */
+	CF_Z = 0x8, /* Zero */
+};
+
+/* 2 bit base unit (BU) mapping */
+static enum metag_unit metag_bu_map[4] = {
+	UNIT_A1,
+	UNIT_D0,
+	UNIT_D1,
+	UNIT_A0,
+};
+
+/* decoding operand 2 replace (o2r) */
+static enum metag_unit metag_o2r_map[4] = {
+	UNIT_A1,
+	UNIT_D0,
+	UNIT_RA,
+	UNIT_A0,
+};
+static enum metag_unit metag_o2r_alternative = UNIT_D1;
+
+static enum metag_unit decode_o2r(enum metag_unit us1,
+				  int *rs2 /* in out */)
+{
+	enum metag_unit u;
+	int us = (*rs2 >> 3) & 0x3;
+
+	*rs2 &= 0x7;
+	u = metag_o2r_map[us];
+	if (u == us1)
+		u = metag_o2r_alternative;
+	return u;
+}
+
+
+static inline int test_cc(struct pt_regs *regs, int cc)
+{
+	uint32_t cflags = regs->ctx.Flags & 0xf;
+	switch (cc) {
+	case CC_A:  /* 1 */
+		return 1;
+	case CC_EQ: /* Z */
+		return cflags & CF_Z;
+	case CC_NE: /* !Z */
+		return !(cflags & CF_Z);
+	case CC_CS: /* C */
+		return cflags & CF_C;
+	case CC_CC: /* !C */
+		return !(cflags & CF_C);
+	case CC_N: /* N */
+		return cflags & CF_N;
+	case CC_PL: /* !N */
+		return !(cflags & CF_N);
+	case CC_VS: /* V */
+		return cflags & CF_V;
+	case CC_VC: /* !V */
+		return !(cflags & CF_V);
+	case CC_HI: /* !(C | Z) */
+		return !((cflags & CF_C) || (cflags & CF_Z));
+	case CC_LS: /* C | Z */
+		return (cflags & CF_C) || (cflags & CF_Z);
+	case CC_GE: /* !(N ^ V) */
+		return !(cflags & CF_N) == !(cflags & CF_V);
+	case CC_LT: /* N ^ V */
+		return !(cflags & CF_N) != !(cflags & CF_V);
+	case CC_GT: /* !(Z | (N ^ V)) */
+		return !((cflags & CF_Z) ||
+			 ((cflags & CF_N) != (cflags & CF_V)));
+	case CC_LE: /* Z | (N ^ V) */
+		return (cflags & CF_Z) ||
+			((cflags & CF_N) != (cflags & CF_V));
+	case CC_NV: /* 0 */
+	default:
+		return 0;
+	}
+}
+
+#endif
diff --git a/arch/metag/include/asm/img_dma.h b/arch/metag/include/asm/img_dma.h
new file mode 100644
index 0000000..2e866b6
--- /dev/null
+++ b/arch/metag/include/asm/img_dma.h
@@ -0,0 +1,75 @@
+/*
+ * img_dma.h
+ *
+ * API for IMG's DMAC Controller.
+ *
+ * Copyright (C) 2010,2012 Imagination Technologies Ltd.
+*/
+
+#ifndef IMG_DMA_H_
+#define IMG_DMA_H_
+
+enum img_dma_channel_state {
+	IMG_DMA_CHANNEL_RESERVED,
+	IMG_DMA_CHANNEL_AVAILABLE,
+	IMG_DMA_CHANNEL_INUSE,
+};
+
+enum img_dma_priority {
+	IMG_DMA_PRIO_BULK = 0,
+	IMG_DMA_PRIO_REALTIME,
+};
+
+enum img_dma_direction {
+	IMG_DMA_INVALID_DIR,
+	IMG_DMA_TO_PERIPHERAL,
+	IMG_DMA_FROM_PERIPHERAL,
+	IMG_DMA_MEM2MEM,
+};
+
+enum img_dma_width {
+	IMG_DMA_WIDTH_8 = 0,
+	IMG_DMA_WIDTH_16,
+	IMG_DMA_WIDTH_32,
+	IMG_DMA_WIDTH_64,
+	IMG_DMA_WIDTH_128,
+};
+
+int img_request_dma(int dma_channel, unsigned int periph);
+int img_free_dma(int dma_channel);
+int img_dma_reset(int dma_channel);
+int img_dma_get_irq(int dma_channel);
+int img_dma_set_direction(int dma_channel, enum img_dma_direction direction);
+int img_dma_set_data_address(int dma_channel, u32 address);
+int img_dma_set_length(int dma_channel, unsigned long bytes,
+		enum img_dma_width width);
+int img_dma_set_io_address(int dma_channel, u32 address,
+		unsigned int burst_size);
+int img_dma_start(int dma_channel);
+int img_dma_is_finished(int dma_channel);
+int img_dma_set_priority(int dma_channel, enum img_dma_priority prio);
+int img_dma_get_int_status(int dma_channel, u32 *status);
+int img_dma_set_int_status(int dma_channel, u32 status);
+int img_dma_has_request(int dma_channel);
+int img_dma_set_level_int(int dma_channel, int enable);
+int img_dma_set_access_delay(int dma_channel, u8 delay);
+
+/*List API*/
+int img_dma_set_list_addr(int dma_channel, u32 address);
+u32 img_dma_get_list_addr(int dma_channel);
+int img_dma_start_list(int dma_channel);
+int img_dma_stop_list(int dma_channel);
+int img_dma_translate_sglist(int dma_channel,
+			     struct scatterlist	*sg,
+			     void *list,
+			     dma_addr_t list_phys,
+			     unsigned long len,
+			     enum dma_data_direction dir,
+			     u32 perip_address,
+			     enum img_dma_width perip_width,
+			     int access_delay);
+
+
+#endif /* IMG_DMA_H_ */
+
+
diff --git a/arch/metag/include/asm/img_dmac.h b/arch/metag/include/asm/img_dmac.h
new file mode 100644
index 0000000..e4ae59e
--- /dev/null
+++ b/arch/metag/include/asm/img_dmac.h
@@ -0,0 +1,75 @@
+/*
+ * IMG DMA Controller (DMAC) specific DMA code.
+ *
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+*/
+
+#ifndef IMG_DMAC_H_
+#define IMG_DMAC_H_
+
+#define DMAC_SETUPN_REG      0x000
+#define DMAC_CNTN_REG        0x004
+#define DMAC_PHADDRN_REG     0x008
+#define DMAC_IRQSTATN_REG    0x00C
+#define DMAC_2DMODEN_REG     0x010
+
+#define DMAC_SETUP_REG       (DMAC_SETUPN_REG >> 2)
+#define DMAC_COUNT_REG       (DMAC_CNTN_REG >> 2)
+#define DMAC_PERIPH_REG      (DMAC_PHADDRN_REG >> 2)
+#define DMAC_IRQSTAT_REG     (DMAC_IRQSTATN_REG >> 2)
+#define DMAC_2DMODE_REG      (DMAC_2DMODEN_REG >> 2)
+
+#define DMAC_CNTN_REG_PW_S      27
+#define DMAC_CNTN_REG_PW_MASK   (0x3 << DMAC_CNTN_REG_PW_S)
+#define DMAC_CNTN_REG_PW_8      (2 << DMAC_CNTN_REG_PW_S)
+#define DMAC_CNTN_REG_PW_16     (1 << DMAC_CNTN_REG_PW_S)
+#define DMAC_CNTN_REG_PW_32              0
+#define DMAC_CNTN_REG_LIEN_BIT  0x80000000
+#define DMAC_CNTN_REG_IEN_BIT   0x20000000
+#define DMAC_CNTN_REG_DIR_BIT   0x04000000
+#define DMAC_CNTN_REG_DREQ_BIT	0x00100000
+#define DMAC_CNTN_REG_SRST_BIT  0x00080000
+#define DMAC_CNTN_REG_LEN_BIT	0x00040000
+#define DMAC_CNTN_REG_EN_BIT    0x00010000
+#define DMAC_CNTN_REG_CNT_BITS  0x0000FFFF
+
+#define DMAC_PHADDRN_REG_ADDR_BITS   0x007FFFFF
+#define DMAC_PHADDRN_REG_BURST_BITS  0x07000000
+#define DMAC_PHADDRN_REG_BURST_S     24
+#define DMAC_PHADDRN_REG_ACCDEL_BITS 0xE0000000
+#define DMAC_PHADDRN_REG_ACCDEL_S    29
+
+/* The rest of the DMAC internal states should be moved in here as other
+ * drivers (SPI for instance) stop accessing the registers directly and
+ * use the DMAC API.
+ */
+#define DMAC_IRQSTATN_REG_FIN_BIT 0x20000
+
+/*List Support*/
+
+struct img_dmac_desc {
+	volatile u32 perip_setup;
+	volatile u32 len_ints;
+	volatile u32 perip_address;
+	volatile u32 burst;
+	volatile u32 twod;
+	volatile u32 twod_addr;
+	volatile u32 data_addr;
+	volatile u32 next;
+};
+
+#define DMAC_LIST_PW_S	 28
+#define DMAC_LIST_PW_8	 (2 << DMAC_LIST_PW_S)
+#define DMAC_LIST_PW_16  (1 << DMAC_LIST_PW_S)
+#define DMAC_LIST_PW_32  (0 << DMAC_LIST_PW_S)
+#define DMAC_LIST_PERIP_TO_MEM	(1 << 30)
+
+#define DMAC_LIST_FIN_BIT 	(1 << 31)
+#define DMAC_LIST_INT_BIT	(1 << 30)
+#define DMAC_LIST_INCR_BIT	(1 << 16)
+#define DMAC_LIST_LEN_MASK	0xFFFF
+
+#define DMAC_LIST_ACC_DELAY_S	29
+#define DMAC_LIST_BURST_S	26
+
+#endif /* IMG_DMAC_H_ */
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h
index 9359e50..bf25380 100644
--- a/arch/metag/include/asm/io.h
+++ b/arch/metag/include/asm/io.h
@@ -111,6 +111,31 @@
 #define writeq	__raw_writeq
 
 /*
+ * A load of the architecture/SoC code uses readl/writel functions with raw
+ * physical address numbers rather than __iomem pointers. Until these are fixed
+ * to use metag_in32/metag_out32 or ioremap and __iomem pointers, do the cast
+ * here to hide the warnings.
+ */
+
+#undef readb
+#undef readw
+#undef readl
+#undef readq
+#define readb(addr)	__raw_readb((volatile void __iomem *)(addr))
+#define readw(addr)	__raw_readw((volatile void __iomem *)(addr))
+#define readl(addr)	__raw_readl((volatile void __iomem *)(addr))
+#define readq(addr)	__raw_readq((volatile void __iomem *)(addr))
+
+#undef writeb
+#undef writew
+#undef writel
+#undef writeq
+#define writeb(b, addr)	__raw_writeb(b, (volatile void __iomem *)(addr))
+#define writew(b, addr)	__raw_writew(b, (volatile void __iomem *)(addr))
+#define writel(b, addr)	__raw_writel(b, (volatile void __iomem *)(addr))
+#define writeq(b, addr)	__raw_writeq(b, (volatile void __iomem *)(addr))
+
+/*
  * Meta specific I/O for accessing non-MMU areas.
  *
  * These can be provided with a physical address rather than an __iomem pointer
diff --git a/arch/metag/include/asm/irq.h b/arch/metag/include/asm/irq.h
index be0c8f3..ad6bd0e 100644
--- a/arch/metag/include/asm/irq.h
+++ b/arch/metag/include/asm/irq.h
@@ -17,6 +17,7 @@
 
 int tbisig_map(unsigned int hw);
 extern void do_IRQ(int irq, struct pt_regs *regs);
+extern void init_IRQ(void);
 
 #ifdef CONFIG_METAG_SUSPEND_MEM
 int traps_save_context(void);
diff --git a/arch/metag/include/asm/kdebug.h b/arch/metag/include/asm/kdebug.h
new file mode 100644
index 0000000..66eff34
--- /dev/null
+++ b/arch/metag/include/asm/kdebug.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_METAG_KDEBUG_H
+#define _ASM_METAG_KDEBUG_H
+
+
+enum die_val {
+	DIE_UNUSED,
+	DIE_OOPS = 1,
+	DIE_TRAP,
+};
+
+#endif /* _ASM_METAG_KDEBUG_H */
diff --git a/arch/metag/include/asm/kgdb.h b/arch/metag/include/asm/kgdb.h
new file mode 100644
index 0000000..7690275
--- /dev/null
+++ b/arch/metag/include/asm/kgdb.h
@@ -0,0 +1,46 @@
+#ifndef __ASM_METAG_KGDB_H
+#define __ASM_METAG_KGDB_H
+
+#include <asm/cacheflush.h>
+#include <asm/ptrace.h>
+#include <asm/switch.h>
+
+enum regnames {
+	/* units */
+	GDB_CT		= 0,
+	GDB_D0		= GDB_CT + 32,
+	GDB_D1		= GDB_D0 + 32,
+	GDB_A0		= GDB_D1 + 32,
+	GDB_A1		= GDB_A0 + 16,
+	GDB_PC		= GDB_A1 + 16,
+	GDB_RA		= GDB_PC + 2,
+	GDB_TR		= GDB_RA + 0,
+	GDB_TT		= GDB_TR + 8,
+	GDB_FX		= GDB_TT + 5,
+	GDB_NUM_REGS	= GDB_FX + 16,
+
+	/* Control unit registers (CT.x) */
+	GDB_TXMODE	= GDB_CT + 1,
+	GDB_TXSTATUS	= GDB_CT + 2,
+	GDB_TXRPT	= GDB_CT + 3,
+	GDB_TXBPOBITS	= GDB_CT + 11,
+	GDB_TXDIVTIME	= GDB_CT + 28,
+
+	/* Address unit registers (AX.y) */
+	GDB_A0StP	= GDB_A0 + 0,
+
+};
+
+#define NUMREGBYTES    (GDB_NUM_REGS * 4)
+
+static inline void arch_kgdb_breakpoint(void)
+{
+	asm volatile ("SWITCH #%c0" : : "i" (__METAG_SW_PERM_BREAK));
+}
+
+#define BUFMAX                 2048
+
+#define CACHE_FLUSH_IS_SAFE	1
+#define BREAK_INSTR_SIZE	4
+
+#endif /* __ASM_METAG_KGDB_H */
diff --git a/arch/metag/include/asm/l2cache.h b/arch/metag/include/asm/l2cache.h
index bffbeaa..aac094e 100644
--- a/arch/metag/include/asm/l2cache.h
+++ b/arch/metag/include/asm/l2cache.h
@@ -15,6 +15,15 @@
 /* defined in arch/metag/drivers/core-sysfs.c */
 extern struct sysdev_class cache_sysclass;
 
+/*
+ * Telling whether there is a usable invalidation operation.
+ * Be sure to include <linux/static_key.h> if you use this.
+ */
+struct static_key;
+extern struct static_key l2c_has_invalidate;
+
+#define meta_l2c_has_invalidate() static_key_false(&l2c_has_invalidate)
+
 static inline void wr_fence(void);
 
 /*
@@ -229,6 +238,7 @@
 
 #else /* CONFIG_METAG_L2C */
 
+#define meta_l2c_has_invalidate()	0
 #define meta_l2c_config()		0
 #define meta_l2c_is_present()		0
 #define meta_l2c_is_writeback()		0
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
index 1cd13d5..cf682d7 100644
--- a/arch/metag/include/asm/pgtable.h
+++ b/arch/metag/include/asm/pgtable.h
@@ -147,12 +147,29 @@
 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
 				 _PAGE_CACHEABLE)
 
+#ifdef CONFIG_SOC_CHORUS2
+/* Workaround older Meta 1.2 silicon bugs. The rules are as follows:
+   - code pages must always be cached to avoid cached and uncached code fetches
+     being mixed.
+   - data pages which are c-o-wed or marked read-only to implement page
+     dirtying must be uncached to avoid short dcache turnaround issue.
+   Luckily these two requirements can both be satisfied at the same time.
+ */
+#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
+				 _PAGE_ACCESSED)
+#define PAGE_SHARED_C	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
+				 _PAGE_ACCESSED | _PAGE_CACHEABLE)
+#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+#define PAGE_COPY_C	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+				 _PAGE_CACHEABLE)
+#else
 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
 				 _PAGE_ACCESSED | _PAGE_CACHEABLE)
 #define PAGE_SHARED_C	PAGE_SHARED
 #define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
 				 _PAGE_CACHEABLE)
 #define PAGE_COPY_C	PAGE_COPY
+#endif
 
 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
 				 _PAGE_CACHEABLE)
@@ -197,7 +214,19 @@
 
 #define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
 
+#ifdef CONFIG_SOC_CHORUS2
+extern unsigned long zero_pfn;
+
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+	unsigned long prot_bits = pgprot_val(prot);
+	if (pfn == zero_pfn)
+		prot_bits &= ~(_PAGE_CACHEABLE);
+	return __pte(((pfn) << PAGE_SHIFT) | prot_bits);
+}
+#else
 #define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#endif
 
 #define pte_none(x)		(!pte_val(x))
 #define pte_present(x)		(pte_val(x) & _PAGE_PRESENT)
@@ -222,10 +251,23 @@
 static inline int pte_file(pte_t pte)    { return pte_val(pte) & _PAGE_FILE; }
 static inline int pte_special(pte_t pte) { return 0; }
 
+#ifdef CONFIG_SOC_CHORUS2
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+	if (pte_write(pte))
+		pte_val(pte) &= (~(_PAGE_WRITE | _PAGE_CACHEABLE));
+	return pte;
+}
+#else
 static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; }
+#endif
 static inline pte_t pte_mkclean(pte_t pte)   { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
 static inline pte_t pte_mkold(pte_t pte)     { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+#ifdef CONFIG_SOC_CHORUS2
+static inline pte_t pte_mkwrite(pte_t pte)   { pte_val(pte) |= (_PAGE_WRITE | _PAGE_CACHEABLE); return pte; }
+#else
 static inline pte_t pte_mkwrite(pte_t pte)   { pte_val(pte) |= _PAGE_WRITE; return pte; }
+#endif
 static inline pte_t pte_mkdirty(pte_t pte)   { pte_val(pte) |= _PAGE_DIRTY; return pte; }
 static inline pte_t pte_mkyoung(pte_t pte)   { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
index 9b029a7..f16477d 100644
--- a/arch/metag/include/asm/processor.h
+++ b/arch/metag/include/asm/processor.h
@@ -199,4 +199,6 @@
 extern void show_trace(struct task_struct *tsk, unsigned long *sp,
 		       struct pt_regs *regs);
 
+extern const struct seq_operations cpuinfo_op;
+
 #endif
diff --git a/arch/metag/include/asm/resource.h b/arch/metag/include/asm/resource.h
new file mode 100644
index 0000000..ea8d2f0
--- /dev/null
+++ b/arch/metag/include/asm/resource.h
@@ -0,0 +1,12 @@
+#ifndef _METAG_RESOURCE_H
+#define _METAG_RESOURCE_H
+
+/* Chorus2 needs to lock a lot of pages, so set no locked page limit */
+#ifdef CONFIG_SOC_CHORUS2
+#undef MLOCK_LIMIT
+#define MLOCK_LIMIT	RLIM_INFINITY
+#endif
+
+#include <uapi/asm/resource.h>
+
+#endif
diff --git a/arch/metag/include/asm/soc-chorus2/c2_irqnums.h b/arch/metag/include/asm/soc-chorus2/c2_irqnums.h
new file mode 100644
index 0000000..a7683de
--- /dev/null
+++ b/arch/metag/include/asm/soc-chorus2/c2_irqnums.h
@@ -0,0 +1,63 @@
+#ifndef _C2_IRQNUMS_H
+#define _C2_IRQNUMS_H
+
+#include <linux/irqchip/metag-ext.h>
+
+#include <asm/irq.h>
+
+/*
+ * Hardware IRQ numbers.
+ * These are DEPRECATED, use device tree instead.
+ * These can be mapped to virtual IRQ numbers using external_irq_map().
+ */
+#define LOCAL_BUS_IRQ_NUM		 4
+#define DMA0_IRQ_NUM			32
+#define DMA1_IRQ_NUM			33
+#define DMA2_IRQ_NUM			34
+#define DMA3_IRQ_NUM			35
+#define DMA4_IRQ_NUM			36
+#define DMA5_IRQ_NUM			37
+#define DMA6_IRQ_NUM			38
+#define DMA7_IRQ_NUM			39
+#define DMA8_IRQ_NUM			40
+#define DMA9_IRQ_NUM			41
+#define DMA10_IRQ_NUM			42
+#define DMA11_IRQ_NUM			43
+#define UART1_IRQ_NUM			48
+#define UART2_IRQ_NUM			49
+#define RDI_IRQ_NUM			50
+#define NAND_IRQ_NUM			51
+#define PDP_IRQ_NUM			52
+#define RSD_IRQ_NUM			64
+#define MEM_STICK_IRQ_NUM		65
+#define SCP_DMA_IRQ_NUM			66
+#define SCP_AGC_IRQ_NUM			67
+#define ECP_IRQ_NUM			68
+#define ATAPI_IRQ_NUM			69
+#define SPI1_DMAW_IRQ_NUM		70
+#define SPI1_DMAR_IRQ_NUM		71
+#define SPI1_CMP_IRQ_NUM		72
+#define SPI2_DMAW_IRQ_NUM		73
+#define SPI2_DMAR_IRQ_NUM		74
+#define USB_IRQ_NUM			76
+#define SCB1_IRQ_NUM			77
+#define SCB2_IRQ_NUM			78
+#define LCD_IRQ_NUM			79
+#define LCD_DMA_IRQ_NUM			80
+#define I2S_O0_IRQ_NUM			81
+#define I2S_O1_IRQ_NUM			82
+#define I2S_O2_IRQ_NUM			83
+#define I2S_O3_IRQ_NUM			84
+#define I2S_IN_IRQ_NUM			85
+#define SPDIF_OUT_IRQ_NUM		86
+#define SPDIF_IN_IRQ_NUM		87
+#define GPIO_A_IRQ_NUM			88
+#define GPIO_B_IRQ_NUM			89
+#define GPIO_C_IRQ_NUM			90
+#define GPIO_D_IRQ_NUM			91
+#define GPIO_E_IRQ_NUM			92
+#define GPIO_F_IRQ_NUM			93
+#define GPIO_G_IRQ_NUM			94
+#define GPIO_H_IRQ_NUM			95
+
+#endif /* _C2_IRQNUMS_H */
diff --git a/arch/metag/include/asm/soc-chorus2/clock.h b/arch/metag/include/asm/soc-chorus2/clock.h
new file mode 100644
index 0000000..f658250
--- /dev/null
+++ b/arch/metag/include/asm/soc-chorus2/clock.h
@@ -0,0 +1,14 @@
+#ifndef _CHORUS2_CLOCK_H_
+#define _CHORUS2_CLOCK_H_
+
+struct meta_clock_desc;
+
+unsigned long chorus2_get_coreclock(void);
+/* passed through machine descriptor (init only) */
+extern struct meta_clock_desc chorus2_meta_clocks;
+
+unsigned long get_sysclock(void);
+
+void pix_clk_set_limits(unsigned long min, unsigned long max);
+
+#endif /* _CHORUS2_CLOCK_H_ */
diff --git a/arch/metag/include/asm/soc-chorus2/dma.h b/arch/metag/include/asm/soc-chorus2/dma.h
new file mode 100644
index 0000000..b84d6a7
--- /dev/null
+++ b/arch/metag/include/asm/soc-chorus2/dma.h
@@ -0,0 +1,28 @@
+/*
+ * dma.h
+ *
+ * Copyright (C) 2007,2008 Imagination Technologies Ltd.
+ */
+
+#ifndef _CHORUS2_DMA_H_
+#define _CHORUS2_DMA_H_
+
+
+
+#define SYSBUS_HWBASE           0x02000000
+#define DMAC_HWBASE             0x02001000
+
+#define DMA_CHAN_SEL_0_3        (SYSBUS_HWBASE+0x048)
+#define DMA_CHAN_SEL_4_7        (SYSBUS_HWBASE+0x04C)
+#define DMA_CHAN_SEL_8_11       (SYSBUS_HWBASE+0x050)
+
+#define MAX_DMA_CHANNELS        12
+#define DMAC_CHANNEL_STRIDE     0x20
+
+#define MAX_PERIPH_CHANNELS     63
+#define DMAC_PERIPH_MASK        0x3f
+
+unsigned int *get_dma_regs(int dmanr);
+
+
+#endif /* DMA_H_ */
diff --git a/arch/metag/include/asm/soc-chorus2/gpio.h b/arch/metag/include/asm/soc-chorus2/gpio.h
new file mode 100644
index 0000000..07ce0f6
--- /dev/null
+++ b/arch/metag/include/asm/soc-chorus2/gpio.h
@@ -0,0 +1,48 @@
+#ifndef _CHORUS2_GPIO_H_
+#define _CHORUS2_GPIO_H_
+
+/* Chorus 2 has 128 GPIOs, in 8 banks of 16. */
+#define NR_BUILTIN_GPIO 128
+
+#define GPIO_A_BASE     0
+#define GPIO_A_PIN(x)   (GPIO_A_BASE + (x))
+
+#define GPIO_B_BASE     16
+#define GPIO_B_PIN(x)   (GPIO_B_BASE + (x))
+
+#define GPIO_C_BASE     32
+#define GPIO_C_PIN(x)   (GPIO_C_BASE + (x))
+
+#define GPIO_D_BASE     48
+#define GPIO_D_PIN(x)   (GPIO_D_BASE + (x))
+
+#define GPIO_E_BASE     64
+#define GPIO_E_PIN(x)   (GPIO_E_BASE + (x))
+
+#define GPIO_F_BASE     80
+#define GPIO_F_PIN(x)   (GPIO_F_BASE + (x))
+
+#define GPIO_G_BASE     96
+#define GPIO_G_PIN(x)   (GPIO_G_BASE + (x))
+
+#define GPIO_H_BASE     112
+#define GPIO_H_PIN(x)   (GPIO_H_BASE + (x))
+
+#define GPIO_EXP_BASE   NR_BUILTIN_GPIO
+#define GPIO_EXP_PIN(x) (GPIO_EXP_BASE + (x))
+
+/* Forward declaration of struct irq_data, defined in linux/irq.h */
+struct irq_data;
+
+int chorus2_gpio_disable(unsigned int gpio);
+void chorus2_init_gpio(void);
+
+#define GPIO_POLARITY_LOW          0x0
+#define GPIO_POLARITY_HIGH         0x1
+
+#define GPIO_LEVEL_TRIGGERED       0x0
+#define GPIO_EDGE_TRIGGERED        0x1
+
+#include <asm-generic/gpio.h>
+
+#endif
diff --git a/arch/metag/include/asm/soc-chorus2/pdp.h b/arch/metag/include/asm/soc-chorus2/pdp.h
new file mode 100644
index 0000000..b0a481e
--- /dev/null
+++ b/arch/metag/include/asm/soc-chorus2/pdp.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_SOC_CHORUS2_PDP_H
+#define __ASM_SOC_CHORUS2_PDP_H
+
+#define PDP_BASE_ADDR	0x02027000
+#define PDP_SIZE		0x800
+
+#define PDI_BASE_ADDR	0x0201B000
+#define PDI_SIZE		0x100
+
+#endif
diff --git a/arch/metag/include/asm/soc-chorus2/setup.h b/arch/metag/include/asm/soc-chorus2/setup.h
new file mode 100644
index 0000000..0a9e744
--- /dev/null
+++ b/arch/metag/include/asm/soc-chorus2/setup.h
@@ -0,0 +1,25 @@
+/*
+ * arch/metag/include/asm/soc-chorus2/setup.h
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SOC_CHORUS2_SETUP_H__
+#define __SOC_CHORUS2_SETUP_H__
+
+/* for chorus2_meta_clocks */
+#include <asm/soc-chorus2/clock.h>
+
+void chorus2_init_irq(void);
+void chorus2_init_machine(void);
+
+#define CHORUS2_MACHINE_DEFAULTS		\
+	.clocks		= &chorus2_meta_clocks,	\
+	.init_irq	= chorus2_init_irq,	\
+	.init_machine	= chorus2_init_machine
+
+#endif /* __SOC_CHORUS2_SETUP_H__ */
diff --git a/arch/metag/include/asm/soc-tz1090/afe.h b/arch/metag/include/asm/soc-tz1090/afe.h
new file mode 100644
index 0000000..3e7ed2a
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/afe.h
@@ -0,0 +1,23 @@
+/*
+ * asm/soc-tz1090/afe.h
+ * A simple interface to the Comet AUX DAC.
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef _ASM_METAG_SOC_TZ1090_AFE_H
+#define _ASM_METAG_SOC_TZ1090_AFE_H
+
+int comet_afe_auxdac_get(void);
+void comet_afe_auxdac_put(void);
+
+void comet_afe_auxdac_set_power(unsigned int power);
+unsigned int comet_afe_auxdac_get_power(void);
+void comet_afe_auxdac_set_standby(unsigned int standby);
+unsigned int comet_afe_auxdac_get_standby(void);
+int comet_afe_auxdac_set_source(unsigned int source);
+void comet_afe_auxdac_set_value(unsigned int value);
+int comet_afe_auxdac_get_value(void);
+
+#endif /* _ASM_METAG_SOC_TZ1090_AFE_H */
diff --git a/arch/metag/include/asm/soc-tz1090/audiocodec.h b/arch/metag/include/asm/soc-tz1090/audiocodec.h
new file mode 100644
index 0000000..390cc72
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/audiocodec.h
@@ -0,0 +1,123 @@
+#ifndef __SOC_TZ1090_AUDIOCODEC_H__
+#define __SOC_TZ1090_AUDIOCODEC_H__
+
+#define AUDIOCODEC_NUM_STEREOPAIRS 3
+
+/* CR_AUDIO_ADC_CTRL fields */
+#define AUDIO_PSCNTADC_L				(1 << 0)
+#define AUDIO_PSCNTADC_R				(1 << 1)
+#define AUDIO_GAINCTRL_MIC_P(x)			((x & 0x7) << 8)
+#define AUDIO_GAINCTRL_MIC_P_MASK		AUDIO_GAINCTRL_MIC_P(0x7)
+#define AUDIO_GAINCTRL_MIC_N(x)			((x & 0x7) << 12)
+#define AUDIO_GAINCTRL_MIC_N_MASK		AUDIO_GAINCTRL_MIC_N(0x7)
+#define AUDIO_GAINCTRL_LINE_L(x)		((x & 0x7) << 16)
+#define AUDIO_GAINCTRL_LINE_L_MASK		AUDIO_GAINCTRL_LINE_L(0x7)
+#define AUDIO_GAINCTRL_LINE_R(x)		((x & 0x7) << 20)
+#define AUDIO_GAINCTRL_LINE_R_MASK		AUDIO_GAINCTRL_LINE_R(0x7)
+#define AUDIO_GAINCTRL_ADC_L(x)			((x & 0x3) << 24)
+#define AUDIO_GAINCTRL_ADC_L_MASK		AUDIO_GAINCTRL_ADC_L(0x3)
+#define AUDIO_GAINCTRL_ADC_R(x)			((x & 0x3) << 28)
+#define AUDIO_GAINCTRL_ADC_R_MASK		AUDIO_GAINCTRL_ADC_R(0x3)
+
+/* CR_AUDIO_HP_CTRL fields */
+#define AUDIO_PSCNTHP_L			(1 << 0)
+#define AUDIO_PSCNTHP_R			(1 << 1)
+#define AUDIO_RSTB_DIG_IP		(1 << 5)
+#define AUDIO_RSTB_ANA_IP		(1 << 6)
+#define AUDIO_RSTB_DIG_OP		(1 << 7)
+#define AUDIO_RSTB_ANA_OP		(1 << 8)
+#define AUDIO_PSCNT_PWM_A		(1 << 9)
+#define AUDIO_PSCNT_PWM_B		(1 << 10)
+#define AUDIO_PSCNT_PWM_C		(1 << 11)
+#define AUDIO_PSCNT_PWM_D		(1 << 12)
+#define AUDIO_PSCNT_PWM_E		(1 << 13)
+#define AUDIO_PSCNT_PWM_F		(1 << 14)
+#define AUDIO_PGA_MODE_SHIFT            16
+#define AUDIO_PGA_MODE(x)		((x & 0x7) << AUDIO_PGA_MODE_SHIFT)
+#define AUDIO_PGA_MODE_MASK		AUDIO_PGA_MODE(0x7)
+#define AUDIO_RST_BG_IP			(1 << 20)
+#define AUDIO_PWDN_BG_IP		(1 << 21)
+#define AUDIO_RST_BG_OP			(1 << 22)
+#define AUDIO_PWDN_BG_OP		(1 << 23)
+#define AUDIO_I2S_EXT			(1 << 24)
+#define AUDIO_PWDN_PLL			(1 << 28)
+
+#define AUDIO_OUT_CONTROL_MAIN          (AUDIO_OUT_BASE_ADDR + 0x04)
+#define AUDIO_OUT_SOFT_RESET            (AUDIO_OUT_BASE_ADDR + 0x08)
+
+#define AUDIO_OUT_CM_ACTIVE_CHAN_SHIFT  13
+#define AUDIO_OUT_CM_USB_MODE           (1 << 12)
+#define AUDIO_OUT_CM_FRAME_SHIFT        7
+#define AUDIO_OUT_CM_FRAME_MASK         0x3
+#define AUDIO_OUT_CM_MASTER             (1 << 6)
+#define AUDIO_OUT_CM_ACLK_SHIFT         5
+#define AUDIO_OUT_CM_ACLK_MASK          0x1
+#define AUDIO_OUT_CM_BLRCLK_EN          (1 << 4)
+#define AUDIO_OUT_CM_LEFTPOL_0L1R       (0 << 3)
+#define AUDIO_OUT_CM_LEFTPOL_1L0R       (1 << 3)
+#define AUDIO_OUT_CM_BCLKPOL_RISING     (0 << 2)
+#define AUDIO_OUT_CM_BCLKPOL_FALLING    (1 << 2)
+#define AUDIO_OUT_CM_PACKED             (1 << 1)
+#define AUDIO_OUT_CM_ME                 (1 << 0)
+
+#define AUDIO_OUT_CC_FORMAT_SHIFT       4
+#define AUDIO_OUT_CC_FORMAT_MASK        (0xf << AUDIO_OUT_CC_FORMAT_SHIFT)
+#define AUDIO_OUT_CC_LEFT_JUST          (1 << 3)
+
+enum ac_lrchannel {
+	AUDIOCODEC_LEFT = 1,
+	AUDIOCODEC_RIGHT = 2,
+};
+
+enum ac_mute {
+	AUDIOCODEC_MUTE_NONE = 0x0,
+	AUDIOCODEC_MUTE_HARD = 0x1,
+	AUDIOCODEC_MUTE_90DB = 0x2,
+	AUDIOCODEC_MUTE_SQUAREWAVE = 0x4,
+};
+
+enum ac_samplewidth {
+	AUDIOCODEC_SAMPLEWIDTH_INVALID = -1,
+	AUDIOCODEC_SAMPLEWIDTH_16 = 3,
+	AUDIOCODEC_SAMPLEWIDTH_20 = 0,
+	AUDIOCODEC_SAMPLEWIDTH_24 = 1,
+	AUDIOCODEC_SAMPLEWIDTH_32 = 2,
+};
+
+enum ac_input {
+	AUDIOCODEC_INPUT_INVALID = -1,
+	AUDIOCODEC_INPUT_MIC = 0,
+	AUDIOCODEC_INPUT_LINE,
+	AUDIOCODEC_INPUT_IPOD,
+	AUDIOCODEC_INPUT_MIC_DIFFERENTIAL,
+};
+
+enum ac_frame {
+	AUDIOCODEC_FRAME_1616 = 0,
+	AUDIOCODEC_FRAME_3232 = 2,
+};
+
+enum ac_clock {
+	AUDIOCODEC_CLOCK_256FS = 0,
+	AUDIOCODEC_CLOCK_384FS = 1,
+};
+
+enum ac_i2sclock {
+	AUDIOCODEC_I2SCLOCK_INVALID = -1,
+	AUDIOCODEC_I2SCLOCK_DISABLED = 0,
+	AUDIOCODEC_I2SCLOCK_XTAL1,
+	AUDIOCODEC_I2SCLOCK_XTAL2,
+	AUDIOCODEC_I2SCLOCK_SYS_UNDELETED,
+	AUDIOCODEC_I2SCLOCK_ADC_PLL,
+};
+
+enum ac_preset {
+	AUDIOCODEC_PRESET_NONE = 0x0,
+	AUDIOCODEC_PRESET_PHONO = 0x1,
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+	AUDIOCODEC_PRESET_HDMI = 0x2,
+#endif
+};
+
+#endif /* __SOC_TZ1090_AUDIOCODEC_H__ */
+
diff --git a/arch/metag/include/asm/soc-tz1090/bootprot.h b/arch/metag/include/asm/soc-tz1090/bootprot.h
new file mode 100644
index 0000000..cfce035
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/bootprot.h
@@ -0,0 +1,18 @@
+/*
+ * bootprot.h
+ *
+ * Boot protocol abstraction API (DFU, resume from suspend-to-RAM etc).
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ */
+
+#ifndef _TZ1090_BOOTPROT_H_
+#define _TZ1090_BOOTPROT_H_
+
+void bootprot_normal_boot(unsigned long swprot0);
+void bootprot_suspend_ram(unsigned long swprot0,
+			  int (*resume)(void *),
+			  void *data);
+void bootprot_resume_ram(unsigned long swprot0);
+
+#endif /* _TZ1090_BOOTPROT_H_ */
diff --git a/arch/metag/include/asm/soc-tz1090/clock.h b/arch/metag/include/asm/soc-tz1090/clock.h
new file mode 100644
index 0000000..50d3858
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/clock.h
@@ -0,0 +1,57 @@
+/*
+ * clock.h
+ *
+ * Copyright (C) 2009 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef _TZ1090_CLOCK_H_
+#define _TZ1090_CLOCK_H_
+
+/*
+ * Secondary clock input - used if we cant synthesise all frequencies from 1
+ * clock
+ */
+#define COMET_XTAL2	12000000
+
+/* Tertiary clock input - used for real time clock */
+#define COMET_XTAL3	32768
+
+unsigned long get_xtal1(void);
+unsigned long get_xtal2(void);
+unsigned long get_xtal3(void);
+unsigned long get_32kclock(void);
+unsigned long set_32kclock_src(int xtal1, unsigned int xtal1_div);
+unsigned long get_sysclock_x2_undeleted(void);
+unsigned long get_sysclock_undeleted(void);
+unsigned long get_sdhostclock(void);
+unsigned long set_sdhostclock(unsigned long f);
+unsigned long get_uartclock(void);
+unsigned long set_uartclock(unsigned long f);
+unsigned long get_ddrclock(void);
+
+void pix_clk_set_limits(unsigned long min, unsigned long max);
+
+/*
+ * The 32k clock should be at 32.768KHz, and is used by the PDC (RTC, IRC, WDT).
+ * This can change unexpectedly when powering down due to a hardware quirk, so
+ * drivers that use it need to be able to handle frequency changes.
+ */
+
+#define CLK32K_DESIRED_FREQUENCY	32768
+
+extern unsigned long clk32k_bootfreq;
+
+#define CLK32K_CHANGE_FREQUENCY 0x0001
+
+struct clk32k_change_freq {
+	unsigned long old_freq;
+	unsigned long new_freq;
+};
+
+struct notifier_block;
+
+int clk32k_register_notify(struct notifier_block *nb);
+int clk32k_unregister_notify(struct notifier_block *nb);
+
+#endif /* _TZ1090_CLOCK_H_ */
diff --git a/arch/metag/include/asm/soc-tz1090/ddr-mt47h.inc b/arch/metag/include/asm/soc-tz1090/ddr-mt47h.inc
new file mode 100644
index 0000000..7b1bb5c
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/ddr-mt47h.inc
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ *
+ * DDR chip specific definitions for Micron DDR2 512Mbit SDRAMs:
+ * MT47H128M4	- 32Mbit*4*4
+ * MT47H64M8	- 16Mbit*8*4
+ * MT47H32M16	- 8Mbit*16*4
+ */
+
+#ifndef _TZ1090_DDR_MT47H_H_
+#define _TZ1090_DDR_MT47H_H_
+
+/* Extended mode register */
+
+/* On-Die Terminal (ODT) Effective Resistance */
+#define DDR_EMR_ODT		0x0044
+#define DDR_EMR_ODT_DISABLED	0x0000
+#define DDR_EMR_ODT_75_OHM	0x0004
+#define DDR_EMR_ODT_150_OHM	0x0040
+#define DDR_EMR_ODT_50_OHM	0x0044
+
+#endif
diff --git a/arch/metag/include/asm/soc-tz1090/defs.h b/arch/metag/include/asm/soc-tz1090/defs.h
new file mode 100644
index 0000000..4241405
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/defs.h
@@ -0,0 +1,472 @@
+/*
+ * asm/soc-tz1090/defs.h
+ *
+ *  Useful defines for TZ1090 SoC Configuration
+ *
+ *  Copyright (C) 2009 Imagination Technologies Ltd.
+ */
+
+#ifndef _TZ1090_DEFS_H_
+#define _TZ1090_DEFS_H_
+
+#ifndef __ASSEMBLY__
+#include <asm/soc-tz1090/irqnums.h>
+#endif
+
+/* Peripheral Addresses */
+#define CR_PERIP_BASE_ADDR		0x02004000
+#define CR_PERIP_SIZE			0x400
+#define CR_PERIP_SRST			(CR_PERIP_BASE_ADDR + 0x00)
+#define CR_PERIP_USB_PHY_PON_RESET_BIT	(1 << 18)
+#define CR_PERIP_USB_PHY_PORTRESET_BIT	(1 << 17)
+#define CR_PERIP_RESET_CFG		(CR_PERIP_BASE_ADDR + 0x04)
+#define CR_PERIP_RESET_CFG_FXTAL_BITS	0x00000F00
+#define CR_PERIP_RESET_CFG_FXTAL_SHIFT	8
+#define CR_PERIP_DMA_ROUTE_SEL2_REG 	(CR_PERIP_BASE_ADDR + 0x88)
+#define CR_PERIP_USB_PHY_TUNE_CONTROL	(CR_PERIP_BASE_ADDR + 0x204)
+#define CR_PERIP_USB_VMT_VBUSVALID	0x10000000
+#define CR_PERIP_USB_PHY_STRAP_CONTROL	(CR_PERIP_BASE_ADDR + 0x208)
+#define CR_PERIP_USB_ISO_PHY_BIT	(1 << 12)
+#define CR_PERIP_USB_REFCLKSEL_BITS	0x00000030
+#define CR_PERIP_USB_REFCLKSEL_SHIFT	4
+#define CR_PERIP_USB_REFCLKDIV_BITS	0x00000003
+#define CR_PERIP_USB_REFCLKDIV_SHIFT	0
+#define CR_PERIP_SDHOST_DMA_RDATA	(CR_PERIP_BASE_ADDR + 0x220)
+#define CR_PERIP_SDHOST_DMA_WDATA	(CR_PERIP_BASE_ADDR + 0x224)
+#define CR_PERIP_RNG_CTRL		(CR_PERIP_BASE_ADDR + 0x280)
+#define CR_PERIP_RNG_SINGLE_STEP_BITS	0x00000040	/* not in ES1 */
+#define CR_PERIP_RNG_RO_ALT_BITS	0x00000020
+#define CR_PERIP_RNG_RNG_ALT_BITS	0x00000010
+#define CR_PERIP_RNG_PSEED_DIS_BITS	0x00000008
+#define CR_PERIP_RNG_RSEED_DIS_BITS	0x00000004
+#define CR_PERIP_RNG_RING_DIS_BITS	0x00000002
+#define CR_PERIP_RNG_START_BITS		0x00000001
+#define CR_PERIP_RNG_SEED		(CR_PERIP_BASE_ADDR + 0x284)
+#define CR_PERIP_RNG_NUM		(CR_PERIP_BASE_ADDR + 0x288)
+#define CR_COMET_CORE_REV		(CR_PERIP_BASE_ADDR + 0x2D0)
+#define CR_COMET_CORE_REV_MAJOR_BITS	0x00FF0000
+#define CR_COMET_CORE_REV_MAJOR_SHIFT	16
+#define CR_COMET_CORE_REV_MAJOR_ES1	(0x00 << CR_COMET_CORE_REV_MAJOR_SHIFT)
+#define CR_COMET_CORE_REV_MAJOR_PS1	(0x01 << CR_COMET_CORE_REV_MAJOR_SHIFT)
+
+
+#define SCB0_BASE_ADDR		0x02004400
+#define SCB0_SIZE		0x1ff
+
+#define SCB1_BASE_ADDR		0x02004600
+#define SCB1_SIZE		0x1ff
+
+#define SCB2_BASE_ADDR		0x02004800
+#define SCB2_SIZE		0x1ff
+
+#define SDIO_DEV_BASE_ADDR	0x02004A00
+#define SDIO_DEV_SIZE		0xff
+
+#define UART0_BASE_ADDR		0x02004B00
+#define UART0_SIZE		0xff
+
+#define UART1_BASE_ADDR		0x02004C00
+#define UART1_SIZE		0xff
+
+#define SPI_MS_BASE_ADDR	0x02004D00
+#define SPI_MS_SIZE		0xff
+
+#define SPI_MASTER_BASE_ADDR	0x02004E00
+#define SPI_MASTER_SIZE		0xff
+
+#define AUDIO_OUT_BASE_ADDR	0x02004F00
+#define AUDIO_OUT_SIZE		0xff
+
+#define AUDIO_IN_BASE_ADDR	0x02005000
+#define AUDIO_IN_SIZE		0xff
+
+#define LCD_BASE_ADDR		0x02005200
+#define LCD_SIZE		0xff
+
+#define EVENT_TS_BASE_ADDR	0x02005300
+#define EVENT_TS_SIZE		0xff
+
+#define SDIO_HOST_BASE_ADDR	0x02005400
+#define SDIO_HOST_SIZE		0x1ff
+
+#define GPIO_CRTOP_BASE_ADDR	0x02005800
+#define GPIO_CRTOP_SIZE		0x1ff
+#define CR_PADS_GPIO_DIR0	(GPIO_CRTOP_BASE_ADDR + 0x00)
+#define CR_PADS_GPIO_DIR1	(GPIO_CRTOP_BASE_ADDR + 0x04)
+#define CR_PADS_GPIO_DIR2	(GPIO_CRTOP_BASE_ADDR + 0x08)
+#define CR_PADS_GPIO_SELECT0	(GPIO_CRTOP_BASE_ADDR + 0x10)
+#define CR_PADS_GPIO_SELECT1	(GPIO_CRTOP_BASE_ADDR + 0x14)
+#define CR_PADS_GPIO_SELECT2	(GPIO_CRTOP_BASE_ADDR + 0x18)
+#define CR_PADS_DBGEN		(GPIO_CRTOP_BASE_ADDR + 0x1c)
+#define CR_PADS_IRQ_PLRT0	(GPIO_CRTOP_BASE_ADDR + 0x20)
+#define CR_PADS_IRQ_PLRT1	(GPIO_CRTOP_BASE_ADDR + 0x24)
+#define CR_PADS_IRQ_PLRT2	(GPIO_CRTOP_BASE_ADDR + 0x28)
+#define CR_PADS_IRQ_TYPE0	(GPIO_CRTOP_BASE_ADDR + 0x30)
+#define CR_PADS_IRQ_TYPE1	(GPIO_CRTOP_BASE_ADDR + 0x34)
+#define CR_PADS_IRQ_TYPE2	(GPIO_CRTOP_BASE_ADDR + 0x38)
+#define CR_PADS_IRQ_EN0		(GPIO_CRTOP_BASE_ADDR + 0x40)
+#define CR_PADS_IRQ_EN1		(GPIO_CRTOP_BASE_ADDR + 0x44)
+#define CR_PADS_IRQ_EN2		(GPIO_CRTOP_BASE_ADDR + 0x48)
+#define CR_PADS_IRQ_STS0	(GPIO_CRTOP_BASE_ADDR + 0x50)
+#define CR_PADS_IRQ_STS1	(GPIO_CRTOP_BASE_ADDR + 0x54)
+#define CR_PADS_IRQ_STS2	(GPIO_CRTOP_BASE_ADDR + 0x58)
+#define CR_PADS_GPIO_BIT_EN0	(GPIO_CRTOP_BASE_ADDR + 0x60)
+#define CR_PADS_GPIO_BIT_EN1	(GPIO_CRTOP_BASE_ADDR + 0x64)
+#define CR_PADS_GPIO_BIT_EN2	(GPIO_CRTOP_BASE_ADDR + 0x68)
+#define CR_PADS_GPIO_DIN0	(GPIO_CRTOP_BASE_ADDR + 0x70)
+#define CR_PADS_GPIO_DIN1	(GPIO_CRTOP_BASE_ADDR + 0x74)
+#define CR_PADS_GPIO_DIN2	(GPIO_CRTOP_BASE_ADDR + 0x78)
+#define CR_PADS_GPIO_DOUT0	(GPIO_CRTOP_BASE_ADDR + 0x80)
+#define CR_PADS_GPIO_DOUT1	(GPIO_CRTOP_BASE_ADDR + 0x84)
+#define CR_PADS_GPIO_DOUT2	(GPIO_CRTOP_BASE_ADDR + 0x88)
+#define CR_PADS_SCHMITT_EN0	(GPIO_CRTOP_BASE_ADDR + 0x90)
+#define CR_PADS_DBGSEL0		(GPIO_CRTOP_BASE_ADDR + 0x94)
+#define CR_PADS_DBGSEL1		(GPIO_CRTOP_BASE_ADDR + 0x98)
+#define CR_PADS_DBGSEL2		(GPIO_CRTOP_BASE_ADDR + 0x9c)
+#define CR_PADS_PU_PD_0		(GPIO_CRTOP_BASE_ADDR + 0xa0)
+#define CR_PADS_PU_PD_1		(GPIO_CRTOP_BASE_ADDR + 0xa4)
+#define CR_PADS_PU_PD_2		(GPIO_CRTOP_BASE_ADDR + 0xa8)
+#define CR_PADS_PU_PD_3		(GPIO_CRTOP_BASE_ADDR + 0xac)
+#define CR_PADS_PU_PD_4		(GPIO_CRTOP_BASE_ADDR + 0xb0)
+#define CR_PADS_PU_PD_5		(GPIO_CRTOP_BASE_ADDR + 0xb4)
+#define CR_PADS_PU_PD_6		(GPIO_CRTOP_BASE_ADDR + 0xb8)
+#define CR_PADS_PU_PD_6		(GPIO_CRTOP_BASE_ADDR + 0xb8)
+#define CR_PADS_SR_0		(GPIO_CRTOP_BASE_ADDR + 0xc0)
+#define CR_PADS_DBGSEL4		(GPIO_CRTOP_BASE_ADDR + 0xc4)
+#define CR_PADS_DBGSEL5		(GPIO_CRTOP_BASE_ADDR + 0xc8)
+#define CR_PADS_DBGSEL6		(GPIO_CRTOP_BASE_ADDR + 0xcc)
+#define CR_PADS_DR_0		(GPIO_CRTOP_BASE_ADDR + 0xd0)
+#define GPIO_SEL2_SDH_CLKIN	0x20000000 /*bit 29*/
+#define GPIO_SEL2_TFT		0x00003fff /*bits 13-0*/
+#define GPIO_SEL2_RF		0x3fffc000 /*bits 12-14*/
+#define CR_DDR_CTRL		(GPIO_CRTOP_BASE_ADDR + 0xe4)
+#define CR_DDR_POWERDOWN_BIT	2
+#define CR_PADS_GPIO_DOUT0	(GPIO_CRTOP_BASE_ADDR + 0x80)
+#define CR_PADS_GPIO_DOUT1	(GPIO_CRTOP_BASE_ADDR + 0x84)
+#define CR_PADS_GPIO_DOUT2	(GPIO_CRTOP_BASE_ADDR + 0x88)
+#define CR_IF_CTL0		(GPIO_CRTOP_BASE_ADDR + 0xe0)
+#define CR_PADS_SDIO_CTL_BIT	16
+
+#define PDC_BASE_ADDR		0x02006000
+#define PDC_SIZE		0xfff
+#define PDC_IR_BASE_ADDR	0x02006200
+#define PDC_IR_SIZE		0xff
+#define PDC_SOC_BOOTSTRAP_SYS_CLKSEL	0x00000002
+#define PDC_SOC_BOOTSTRAP_SAFE_MODE	0x00000001
+#define PDC_SOC_GPIO0_RTC_SW		0x40000000
+#define PDC_SOC_GPIO0_XTAL1_DIV		0x07ff0000
+#define PDC_SOC_GPIO0_XTAL1_DIV_SHIFT	16
+#define PDC_SOC_GPIO2_PU_PD_GPIO0	0x00030000
+#define PDC_SOC_GPIO2_PU_PD_GPIO0_SHIFT	16
+#define PDC_SOC_GPIO2_XTAL3_BYPASS	0x00000400
+#define PDC_SOC_GPIO2_XTAL3_EN		0x00000200
+#define PDC_SOC_GPIO2_XTAL2_BYPASS	0x00000100
+#define PDC_SOC_GPIO2_XTAL2_EN		0x00000080
+#define PDC_SOC_GPIO2_XTAL1_BYPASS	0x00000040
+#define PDC_SOC_GPIO2_XTAL1_EN		0x00000020
+
+#define PDP_BASE_ADDR		0x02008000
+#define PDP_SIZE		0x7ff
+
+#define PDI_BASE_ADDR		0x02008800
+#define PDI_SIZE		0xff
+
+#define TWOD_BASE_ADDR		0x02008900
+#define TWOD_SIZE		0xff
+#define TWOD_SLAVE_PORT_OFFSET	0x000
+#define TWOD_SLAVE_PORT		(TWOD_BASE_ADDR + TWOD_SLAVE_PORT_OFFSET)
+
+#define HEP_BASE_ADDR		0x02008c00
+#define HEP_SIZE		0x3ff
+#define CR_HEP_SRST_OFFSET	0x000
+#define CR_HEP_SRST		(HEP_BASE_ADDR + CR_HEP_SRST_OFFSET)
+#define CR_2D_SOFT_RESET			0x00000002
+#define CR_2D_SOFT_RESET_SHIFT			1
+#define CR_PDP_PDI_SOFT_RESET			0x00000001
+#define CR_PDP_PDI_SOFT_RESET_SHIFT		0
+#define CR_HEP_CLK_EN_OFFSET	0x004
+#define CR_HEP_CLK_EN		(HEP_BASE_ADDR + CR_HEP_CLK_EN_OFFSET)
+#define CR_PDP_PDI_CLK_EN			0x00000004
+#define CR_DDR_CLK_EN				0x00000002
+#define CR_2D_CLK_EN				0x00000001
+#define CR_2D_SETTINGS_OFFSET	0x008
+#define CR_2D_SETTINGS		(HEP_BASE_ADDR + CR_2D_SETTINGS_OFFSET)
+#define CR_2D_CLK_RATIO				0x00000006
+#define CR_2D_CLK_RATIO_SHIFT			1
+#define CR_2D_CLKGATESTATUS			0x00000001
+#define CR_2D_STATUS_OFFSET	0x00c
+#define CR_2D_BLIT_STATUS_COMPLETE		0x0ffffff0
+#define CR_2D_BLIT_STATUS_COMPLETE_SHIFT	4
+#define CR_2D_BLIT_STATUS_BUSY			0x00000004
+#define CR_2D_BLIT_STATUS_BUSY_SHIFT		2
+#define CR_2D_IDLE				0x00000002
+#define CR_2D_IDLE_SHIFT			1
+#define CR_PDP_MEM_BASE_ADDR_OFFSET	0x020
+#define CR_PDP_MEM_BASE_ADDR	(HEP_BASE_ADDR + CR_PDP_MEM_BASE_ADDR_OFFSET)
+#define CR_2D_MEM_BASE_ADDR_OFFSET	0x024
+#define CR_SOCIF_TIMEOUT_OFFSET	0x02c
+#define CR_SOCIF_TIMEOUT_ADDR	(HEP_BASE_ADDR + CR_SOCIF_TIMEOUT_OFFSET)
+#define CR_SOCIF_FLUSH			0x2000
+#define CR_SOCIF_TIMEOUT_ENABLE		0x1000
+#define CR_SOCIF_TIMEOUT_PERIOD		0x0fff
+#define CR_SOCIF_TIMEOUT_PERIOD_SHIFT	0
+#define CR_SOCIF_STATUS_OFFSET	0x030
+#define CR_SOCIF_STATUS_ADDR	(HEP_BASE_ADDR + CR_SOCIF_STATUS_OFFSET)
+#define CR_SOCIF_STATUS			0x1
+#define CR_DDRC_EMR_OFFSET		0x1bc
+#define CR_DDRC_EMR		(HEP_BASE_ADDR + CR_DDRC_EMR_OFFSET)
+#define CR_DDRC_SELFREF_EN_OFFSET	0x248
+#define CR_DDRC_SELFREF_EN	(HEP_BASE_ADDR + CR_DDRC_SELFREF_EN_OFFSET)
+#define CR_DDRC_OPERATING_MODE_OFFSET	0x26c
+#define CR_DDRC_OPERATING_MODE	(HEP_BASE_ADDR + CR_DDRC_OPERATING_MODE_OFFSET)
+#define CR_DDRC_OPERATING_MODE_SELFREF	0x3
+/* CR_DDRC_PADS not in ES1 */
+#define CR_DDRC_PADS_OFFSET		0x2ac
+#define CR_DDRC_PADS		(HEP_BASE_ADDR + CR_DDRC_PADS_OFFSET)
+#define CR_DDRC_PADS_MDDR_E2		0x10
+#define CR_DDRC_PADS_MDDR_E1		0x08
+#define CR_DDRC_PADS_ODTEN		0x04
+#define CR_DDRC_PADS_ODTSEL		0x02
+#define CR_DDRC_PADS_ODT		(CR_DDRC_PADS_ODTEN | CR_DDRC_PADS_ODTSEL)
+#define CR_DDRC_PDAS_ODT_DISABLED	0
+#define CR_DDRC_PADS_ODT_75_OHM		CR_DDRC_PADS_ODTEN
+#define CR_DDRC_PADS_ODT_150_OHM	CR_DDRC_PADS_ODTSEL
+#define CR_DDRC_PDAS_ODT_50_OHM		(CR_DDRC_PADS_ODTEN | CR_DDRC_PADS_ODTSEL)
+#define CR_DDRC_PADS_DRIVESEL		0x01
+
+#define SYS_INF_BASE_ADDR	0x02009000
+#define SYS_INF_SIZE		0xff
+
+#define MDC_BASE_ADDR		0x0200c000
+#define MDC_SIZE		0xfff
+
+#define UCC0_HOST_BASE_ADDR	0x02010000
+#define UCC0_HOST_SIZE		0x2fff
+
+#define UCC0_MC_BASE_ADDR	0x02013000
+#define UCC0_MC_SIZE		0xfff
+#define UCC0_MC_REQ_MAX		8
+
+#define UCC1_HOST_BASE_ADDR	0x02014000
+#define UCC1_HOST_SIZE		0x2fff
+
+#define UCC1_MC_BASE_ADDR	0x02017000
+#define UCC1_MC_SIZE		0xfff
+#define UCC1_MC_REQ_MAX		8
+
+#define USB_BASE_ADDR		0x02020000
+#define USB_SIZE		0xefff
+
+/*TOP LEVEL Clock Control registers*/
+#define CR_TOP_CLKSWITCH	(GPIO_CRTOP_BASE_ADDR + 0x0108)
+#define CR_TOP_CLKENAB		(GPIO_CRTOP_BASE_ADDR + 0x010C)
+#define CR_TOP_CLKDELETE	(GPIO_CRTOP_BASE_ADDR + 0x0110)
+#define CR_TOP_SYSCLK_DIV	(GPIO_CRTOP_BASE_ADDR + 0x0114)
+#define CR_TOP_META_CLKDIV	(GPIO_CRTOP_BASE_ADDR + 0x0118)
+#define CR_TOP_META_CLKDELETE	(GPIO_CRTOP_BASE_ADDR + 0x011c)
+#define CR_TOP_AFE_DIV		(GPIO_CRTOP_BASE_ADDR + 0x0120)
+#define CR_TOP_ADC_PLLDIV	(GPIO_CRTOP_BASE_ADDR + 0x0124)
+#define CR_TOP_UART_CLK_DIV	(GPIO_CRTOP_BASE_ADDR + 0x0128)
+#define CR_TOP_PDMCK_CTL	(GPIO_CRTOP_BASE_ADDR + 0x0130)
+#define CR_TOP_SPICLK_DIV	(GPIO_CRTOP_BASE_ADDR + 0x0134)
+#define CR_TOP_SPI1CLK_DIV	(GPIO_CRTOP_BASE_ADDR + 0x0138)
+#define CR_TOP_I2SCLK_DIV	(GPIO_CRTOP_BASE_ADDR + 0x013C)
+#define CR_TOP_USB_PLLDIV	(GPIO_CRTOP_BASE_ADDR + 0x0140)
+#define CR_TOP_SDHOSTCLK_DIV	(GPIO_CRTOP_BASE_ADDR + 0x0144)
+#define CR_TOP_SYSPLL_CTL0	(GPIO_CRTOP_BASE_ADDR + 0x0150)
+#define CR_TOP_SYSPLL_CTL1	(GPIO_CRTOP_BASE_ADDR + 0x0154)
+#define CR_TOP_ADCPLL_CTL0	(GPIO_CRTOP_BASE_ADDR + 0x0158)
+#define CR_TOP_ADCPLL_CTL1	(GPIO_CRTOP_BASE_ADDR + 0x015C)
+#define CR_TOP_CLKSWITCH2	(GPIO_CRTOP_BASE_ADDR + 0x0188)
+#define CR_TOP_CLKENAB2		(GPIO_CRTOP_BASE_ADDR + 0x018C)
+#define CR_TOP_I2S_DIV2		(GPIO_CRTOP_BASE_ADDR + 0x0190)
+#define CR_TOP_PIXEL_CLK_DIV	(GPIO_CRTOP_BASE_ADDR + 0x0198)
+#define CR_TOP_DDR_CLKDIV	(GPIO_CRTOP_BASE_ADDR + 0x01AC)
+#define CR_TOP_AUDGTI_CTRL	(GPIO_CRTOP_BASE_ADDR + 0x01B8)
+#define CR_TOP_AUDGTI_DOUT	(GPIO_CRTOP_BASE_ADDR + 0x01BC)
+
+/* AFE control registers */
+#define CR_AFE_CTRL		(GPIO_CRTOP_BASE_ADDR + 0x01C0)
+#define CR_AFE_AUXDACPD				(1 << 6)
+#define CR_AFE_AUXDACSTBY			(1 << 5)
+#define CR_AFE_AUXDAC		(GPIO_CRTOP_BASE_ADDR + 0x01C4)
+#define CR_AFE_AUXDACSEL			0x00030000
+#define CR_AFE_AUXDACSEL_SHIFT			16
+#define CR_AFE_AUXDACSEL_UCC0_EXT_CTL_1			2
+#define CR_AFE_AUXDACSEL_UCC0_EXT_GAIN_1		1
+#define CR_AFE_AUXDACSEL_CR_AFE_AUXDACIN		0
+#define CR_AFE_AUXDACIN				0x000000FF
+#define CR_AFE_AUXDACIN_SHIFT			0
+
+/* Audio control registers */
+#define CR_AUDIO_ADC_CTRL	(GPIO_CRTOP_BASE_ADDR + 0x01E0)
+#define CR_AUDIO_PSCNTADC_L_SHIFT		0
+#define CR_AUDIO_PSCNTADC_L_MASK		0x1
+#define CR_AUDIO_PSCNTADC_R_SHIFT		1
+#define CR_AUDIO_PSCNTADC_R_MASK		0x1
+#define CR_AUDIO_PSCNTADC_L_SHIFT		0
+#define CR_AUDIO_PSCNTADC_L_MASK		0x1
+#define CR_AUDIO_GAINCTRL_MIC_P_SHIFT		8
+#define CR_AUDIO_GAINCTRL_MIC_P_MASK		0x7
+#define CR_AUDIO_GAINCTRL_MIC_N_SHIFT		12
+#define CR_AUDIO_GAINCTRL_MIC_N_MASK		0x7
+#define CR_AUDIO_GAINCTRL_LINE_L_SHIFT		16
+#define CR_AUDIO_GAINCTRL_LINE_L_MASK		0x7
+#define CR_AUDIO_GAINCTRL_LINE_R_SHIFT		20
+#define CR_AUDIO_GAINCTRL_LINE_R_MASK		0x7
+#define CR_AUDIO_GAINCTRL_ADC_L_SHIFT		24
+#define CR_AUDIO_GAINCTRL_ADC_L_MASK		0x7
+#define CR_AUDIO_GAINCTRL_ADC_R_SHIFT		28
+#define CR_AUDIO_GAINCTRL_ADC_R_MASK		0x7
+
+#define CR_AUDIO_HP_CTRL	(GPIO_CRTOP_BASE_ADDR + 0x01E4)
+#define CR_AUDIO_PSCNTHP_L_SHIFT		0
+#define CR_AUDIO_PSCNTHP_L_MASK			0x1
+#define CR_AUDIO_PSCNTHP_R_SHIFT		1
+#define CR_AUDIO_PSCNTHP_R_MASK			0x1
+#define CR_AUDIO_RSTB_DIG_IP_SHIFT		5
+#define CR_AUDIO_RSTB_DIG_IP_MASK		0x1
+#define CR_AUDIO_RSTB_ANA_IP_SHIFT		6
+#define CR_AUDIO_RSTB_ANA_IP_MASK		0x1
+#define CR_AUDIO_RSTB_DIG_OP_SHIFT		7
+#define CR_AUDIO_RSTB_DIG_OP_MASK		0x1
+#define CR_AUDIO_RSTB_ANA_OP_SHIFT		8
+#define CR_AUDIO_RSTB_ANA_OP_MASK		0x1
+#define CR_AUDIO_PSCNT_PWM_A_SHIFT		9
+#define CR_AUDIO_PSCNT_PWM_A_MASK		0x1
+#define CR_AUDIO_PSCNT_PWM_B_SHIFT		10
+#define CR_AUDIO_PSCNT_PWM_B_MASK		0x1
+#define CR_AUDIO_PSCNT_PWM_C_SHIFT		11
+#define CR_AUDIO_PSCNT_PWM_C_MASK		0x1
+#define CR_AUDIO_PSCNT_PWM_D_SHIFT		12
+#define CR_AUDIO_PSCNT_PWM_D_MASK		0x1
+#define CR_AUDIO_PSCNT_PWM_E_SHIFT		13
+#define CR_AUDIO_PSCNT_PWM_E_MASK		0x1
+#define CR_AUDIO_PSCNT_PWM_F_SHIFT		14
+#define CR_AUDIO_PSCNT_PWM_F_MASK		0x1
+#define CR_AUDIO_PGA_MODE_SHIFT			16
+#define CR_AUDIO_PGA_MODE_MASK			0x7
+#define CR_AUDIO_RST_BG_IP_SHIFT		20
+#define CR_AUDIO_RST_BG_IP_MASK			0x1
+#define CR_AUDIO_PWDN_BG_IP_SHIFT		21
+#define CR_AUDIO_PWDN_BG_IP_MASK		0x1
+#define CR_AUDIO_RST_BG_OP_SHIFT		22
+#define CR_AUDIO_RST_BG_OP_MASK			0x1
+#define CR_AUDIO_PWDN_BG_OP_SHIFT		23
+#define CR_AUDIO_PWDN_BG_OP_MASK		0x1
+#define CR_AUDIO_I2S_EXT_SHIFT			24
+#define CR_AUDIO_I2S_EXT_MASK			0x1
+#define CR_AUDIO_PWDN_PLL_SHIFT			28
+#define CR_AUDIO_PWDN_PLL_MASK			0x1
+
+#define CR_AUDIO_GAIN0		(GPIO_CRTOP_BASE_ADDR + 0x01e8)
+#define CR_AUDIO_GAIN1		(GPIO_CRTOP_BASE_ADDR + 0x01ec)
+#define CR_AUDIO_MUTE		(GPIO_CRTOP_BASE_ADDR + 0x01f4)
+
+
+/* CR_TOP_CLKSWITCH */
+#define CR_TOP_USBPLL_CLK_3_SW_BIT	29
+#define CR_TOP_USBPLL_CLK_2_SW_BIT	28
+#define CR_TOP_USBPLL_CLK_1_SW_BIT	27
+#define CR_TOP_USBPLL_CLK_0_SW_BIT	26
+#define CR_TOP_ADCPLL_CLK_0_SW_BIT	22
+#define CR_TOP_UART_SW_BIT		14
+#define CR_TOP_SCB_SW_BIT		13
+#define CR_TOP_I2S_1_SW_BIT		12
+#define CR_TOP_I2S_0_SW_BIT		11
+#define CR_TOP_I2S_2_SW_BIT		10
+#define CR_TOP_SYSCLK1_SW_BIT		1
+/* CR_TOP_CLKENAB */
+#ifdef CONFIG_SOC_COMET_ES1
+#define CR_TOP_USBPLL_CLK_3_EN_BIT	29
+#endif
+#define CR_TOP_USB_CLK_1_EN_BIT		19
+#define CR_TOP_UART_EN_BIT		14
+#define CR_TOP_SCB_EN_BIT		13
+#define CR_TOP_I2S_1_EN_BIT		12
+#define CR_TOP_CLKOUT1_3_EN_BIT		9
+#define CR_TOP_CLKOUT0_3_EN_BIT		5
+/* CR_TOP_SYSPLL_CTL1 */
+#define CR_TOP_SYSPLL_PWRDN_BIT		24
+/* CR_TOP_ADCPLL_CTL0 */
+#define CR_TOP_ADCPLL_BWADJ_SHIFT	20
+#define CR_TOP_ADCPLL_BWADJ_MASK	0xfff
+#define CR_TOP_ADCPLL_CLKF_SHIFT	4
+#define CR_TOP_ADCPLL_CLKF_MASK		0x1fff
+#define CR_TOP_ADCPLL_CLKOD_SHIFT	0
+#define CR_TOP_ADCPLL_CLKOD_MASK	0x7
+/* CR_TOP_ADCPLL_CTL1 */
+#define CR_TOP_ADCPLL_RESET_BIT		28
+#define CR_TOP_ADCPLL_FASTEN_BIT	27
+#define CR_TOP_ADCPLL_ENSAT_BIT		26
+#define CR_TOP_ADCPLL_BYPASS_BIT	25
+#define CR_TOP_ADCPLL_PWRDN_BIT		24
+#define CR_TOP_ADCPLL_CLKR_SHIFT	0
+#define CR_TOP_ADCPLL_CLKR_MASK		0x3f
+/* CR_TOP_CLKSWITCH2 */
+#define CR_TOP_PIXEL_CLK_0_SW_BIT	0
+#define CR_TOP_PIXEL_CLK_1_SW_BIT	1
+#define CR_TOP_PIXEL_CLK_2_SW_BIT	2
+/* CR_TOP_CLKENAB2 */
+#define CR_TOP_PIXEL_CLK_2_EN_BIT	2
+
+/*Peripheral Clock control*/
+#define CR_PERIP_CLK_EN			(CR_PERIP_BASE_ADDR + 0x0010)
+#define CR_PERIP_USB_CLK_EN_BIT		13
+#define CR_PERIP_SDHOST_CLK_EN_BIT	12
+#define CR_PERIP_LCD_CLK_EN_BIT		11
+#define CR_PERIP_I2SIN_CLK_EN_BIT	10
+#define CR_PERIP_I2SOUT_CLK_EN_BIT	9
+#define CR_PERIP_SPIM1_CLK_EN_BIT	8
+#define CR_PERIP_SPIS0_CLK_EN_BIT	7
+#define CR_PERIP_SPIM0_CLK_EN_BIT	6
+#define CR_PERIP_UART1_CLK_EN_BIT	5
+#define CR_PERIP_UART0_CLK_EN_BIT	4
+#define CR_PERIP_SDIO_CLK_EN_BIT	3
+#define CR_PERIP_I2C2_CLK_EN_BIT	2
+#define CR_PERIP_I2C1_CLK_EN_BIT	1
+#define CR_PERIP_I2C0_CLK_EN_BIT	0
+
+
+/* Peripheral DMA Channel Mux values.
+ * Comet has a top-level mux which allows assigning any peripheral to
+ * any DMA channel.
+ */
+
+#define DMA_MUX_SDIO_DEV_WR	1
+#define DMA_MUX_SDIO_DEV_RD	2
+#define DMA_MUX_SPI_MASTER0_WR	3
+#define DMA_MUX_SPI_MASTER0_RD	4
+#define DMA_MUX_SPI_SLAVE_WR	5
+#define DMA_MUX_SPI_SLAVE_RD	6
+#define DMA_MUX_SPI_MASTER1_RD	7
+#define DMA_MUX_SPI_MASTER1_WR	8
+#define DMA_MUX_I2S_WR		9
+#define DMA_MUX_I2S_RD		10
+#define DMA_MUX_LCD_WR		11
+#define DMA_MUX_SDIO_HOST_RD	12
+#define DMA_MUX_SDIO_HOST_WR	13
+
+/* UCC address blocks */
+
+#define UCCP_BASE_ADDR			0xe0200000
+#define UCCP_SIZE			0x00150000
+
+#define UCCP_SYSINT_BASE_ADDR		0xe0200000
+#define UCCP_SYSINT_SIZE		0x00060000
+
+#define UCCP0_MTX_BASE_ADDR		0xe0260000
+#define UCCP0_MTX_SIZE			0x00050000
+
+#define UCCP0_MCP16BIT_BASE_ADDR	0xe02b0000
+#define UCCP0_MCP16BIT_SIZE		0x00008e00
+
+#define UCCP0_MCP24BIT_BASE_ADDR	0xe02c0000
+#define UCCP0_MCP24BIT_SIZE		0x0002b800
+
+#define UCCP1_MTX_BASE_ADDR		0xe0300000
+#define UCCP1_MTX_SIZE			0x00050000
+
+
+
+#endif /* _TZ1090_DEFS_H_ */
diff --git a/arch/metag/include/asm/soc-tz1090/gpio.h b/arch/metag/include/asm/soc-tz1090/gpio.h
new file mode 100644
index 0000000..f888fae
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/gpio.h
@@ -0,0 +1,172 @@
+#ifndef _TZ1090_GPIO_H_
+#define _TZ1090_GPIO_H_
+
+/* TZ1090 has 90 main GPIOs, in 3 banks of 30. */
+#define NR_BUILTIN_GPIO		90
+/* Plus 7 PDC GPIOs */
+#define NR_PDC_GPIO		7
+
+#define GPIO_0_BASE	0
+#define GPIO_0_PIN(x)	(GPIO_0_BASE + (x))
+
+#define GPIO_1_BASE	30
+#define GPIO_1_PIN(x)	(GPIO_1_BASE + (x))
+
+#define GPIO_2_BASE	60
+#define GPIO_2_PIN(x)	(GPIO_2_BASE + (x))
+
+#define GPIO_PDC_BASE	NR_BUILTIN_GPIO
+#define GPIO_PDC_PIN(x)	(GPIO_PDC_BASE + (x))
+
+#define GPIO_SDIO_CLK		GPIO_0_PIN(0)
+#define GPIO_SDIO_CMD		GPIO_0_PIN(1)
+#define GPIO_SDIO_D0		GPIO_0_PIN(2)
+#define GPIO_SDIO_D1		GPIO_0_PIN(3)
+#define GPIO_SDIO_D2		GPIO_0_PIN(4)
+#define GPIO_SDIO_D3		GPIO_0_PIN(5)
+#define GPIO_SDIO_FIRST		GPIO_SDIO_CLK
+#define GPIO_SDIO_LAST		GPIO_SDIO_D3
+#define GPIO_SDH_CD		GPIO_0_PIN(6)
+#define GPIO_SDH_WP		GPIO_0_PIN(7)
+#define GPIO_SDH_0_FIRST	GPIO_SDH_CD
+#define GPIO_SDH_0_LAST		GPIO_SDH_WP
+#define GPIO_SPI0_MCLK		GPIO_0_PIN(8)
+#define GPIO_SPI0_CS0		GPIO_0_PIN(9)
+#define GPIO_SPI0_CS1		GPIO_0_PIN(10)
+#define GPIO_SPI0_CS2		GPIO_0_PIN(11)
+#define GPIO_SPI0_DOUT		GPIO_0_PIN(12)
+#define GPIO_SPI0_DIN		GPIO_0_PIN(13)
+#define GPIO_SPI0_FIRST		GPIO_SPI0_MCLK
+#define GPIO_SPI0_LAST		GPIO_SPI0_DIN
+#define GPIO_SPI1_MCLK		GPIO_0_PIN(14)
+#define GPIO_SPI1_CS0		GPIO_0_PIN(15)
+#define GPIO_SPI1_CS1		GPIO_0_PIN(16)
+#define GPIO_SPI1_CS2		GPIO_0_PIN(17)
+#define GPIO_SPI1_DOUT		GPIO_0_PIN(18)
+#define GPIO_SPI1_DIN		GPIO_0_PIN(19)
+#define GPIO_SPI1_FIRST		GPIO_SPI1_MCLK
+#define GPIO_SPI1_LAST		GPIO_SPI1_DIN
+#define GPIO_UART0_RXD		GPIO_0_PIN(20)
+#define GPIO_UART0_TXD		GPIO_0_PIN(21)
+#define GPIO_UART0_CTS		GPIO_0_PIN(22)
+#define GPIO_UART0_RTS		GPIO_0_PIN(23)
+#define GPIO_UART0_FIRST	GPIO_UART0_RXD
+#define GPIO_UART0_LAST		GPIO_UART0_RTS
+#define GPIO_UART1_RXD		GPIO_0_PIN(24)
+#define GPIO_UART1_TXD		GPIO_0_PIN(25)
+#define GPIO_UART1_FIRST	GPIO_UART1_RXD
+#define GPIO_UART1_LAST		GPIO_UART1_TXD
+#define GPIO_SCB0_SDAT		GPIO_0_PIN(26)
+#define GPIO_SCB0_SCLK		GPIO_0_PIN(27)
+#define GPIO_SCB0_FIRST		GPIO_SCB0_SDAT
+#define GPIO_SCB0_LAST		GPIO_SCB0_SCLK
+#define GPIO_SCB1_SDAT		GPIO_0_PIN(28)
+#define GPIO_SCB1_SCLK		GPIO_0_PIN(29)
+#define GPIO_SCB1_FIRST		GPIO_SCB1_SDAT
+#define GPIO_SCB1_LAST		GPIO_SCB1_SCLK
+#define GPIO_SCB2_SDAT		GPIO_1_PIN(0)
+#define GPIO_SCB2_SCLK		GPIO_1_PIN(1)
+#define GPIO_SCB2_FIRST		GPIO_SCB2_SDAT
+#define GPIO_SCB2_LAST		GPIO_SCB2_SCLK
+#define GPIO_I2S_MCLK		GPIO_1_PIN(2)
+#define GPIO_I2S_BCLK_OUT	GPIO_1_PIN(3)
+#define GPIO_I2S_LRCLK_OUT	GPIO_1_PIN(4)
+#define GPIO_I2S_DOUT0		GPIO_1_PIN(5)
+#define GPIO_I2S_DOUT1		GPIO_1_PIN(6)
+#define GPIO_I2S_DOUT2		GPIO_1_PIN(7)
+#define GPIO_I2S_DIN		GPIO_1_PIN(8)
+#define GPIO_I2S_FIRST		GPIO_I2S_MCLK
+#define GPIO_I2S_LAST		GPIO_I2S_DIN
+#define GPIO_PDM_A		GPIO_1_PIN(9)
+#define GPIO_PDM_B		GPIO_1_PIN(10)
+#define GPIO_PDM_C		GPIO_1_PIN(11)
+#define GPIO_PDM_D		GPIO_1_PIN(12)
+#define GPIO_PDM_FIRST		GPIO_PDM_A
+#define GPIO_PDM_LAST		GPIO_PDM_D
+#define GPIO_TFT_RED0		GPIO_1_PIN(13)
+#define GPIO_TFT_RED1		GPIO_1_PIN(14)
+#define GPIO_TFT_RED2		GPIO_1_PIN(15)
+#define GPIO_TFT_RED3		GPIO_1_PIN(16)
+#define GPIO_TFT_RED4		GPIO_1_PIN(17)
+#define GPIO_TFT_RED5		GPIO_1_PIN(18)
+#define GPIO_TFT_RED6		GPIO_1_PIN(19)
+#define GPIO_TFT_RED7		GPIO_1_PIN(20)
+#define GPIO_TFT_GREEN0		GPIO_1_PIN(21)
+#define GPIO_TFT_GREEN1		GPIO_1_PIN(22)
+#define GPIO_TFT_GREEN2		GPIO_1_PIN(23)
+#define GPIO_TFT_GREEN3		GPIO_1_PIN(24)
+#define GPIO_TFT_GREEN4		GPIO_1_PIN(25)
+#define GPIO_TFT_GREEN5		GPIO_1_PIN(26)
+#define GPIO_TFT_GREEN6		GPIO_1_PIN(27)
+#define GPIO_TFT_GREEN7		GPIO_1_PIN(28)
+#define GPIO_TFT_BLUE0		GPIO_1_PIN(29)
+#define GPIO_TFT_BLUE1		GPIO_2_PIN(0)
+#define GPIO_TFT_BLUE2		GPIO_2_PIN(1)
+#define GPIO_TFT_BLUE3		GPIO_2_PIN(2)
+#define GPIO_TFT_BLUE4		GPIO_2_PIN(3)
+#define GPIO_TFT_BLUE5		GPIO_2_PIN(4)
+#define GPIO_TFT_BLUE6		GPIO_2_PIN(5)
+#define GPIO_TFT_BLUE7		GPIO_2_PIN(6)
+#define GPIO_TFT_VDDEN_GD	GPIO_2_PIN(7)
+#define GPIO_TFT_PANELCLK	GPIO_2_PIN(8)
+#define GPIO_TFT_BLANK_LS	GPIO_2_PIN(9)
+#define GPIO_TFT_VSYNC_NS	GPIO_2_PIN(10)
+#define GPIO_TFT_HSYNC_NR	GPIO_2_PIN(11)
+#define GPIO_TFT_VD12ACB	GPIO_2_PIN(12)
+#define GPIO_TFT_PWRSAVE	GPIO_2_PIN(13)
+#define GPIO_TFT_FIRST		GPIO_TFT_RED0
+#define GPIO_TFT_LAST		GPIO_TFT_PWRSAVE
+#define GPIO_TX_ON		GPIO_2_PIN(14)
+#define GPIO_RX_ON		GPIO_2_PIN(15)
+#define GPIO_PLL_ON		GPIO_2_PIN(16)
+#define GPIO_PA_ON		GPIO_2_PIN(17)
+#define GPIO_RX_HP		GPIO_2_PIN(18)
+#define GPIO_GAIN0		GPIO_2_PIN(19)
+#define GPIO_GAIN1		GPIO_2_PIN(20)
+#define GPIO_GAIN2		GPIO_2_PIN(21)
+#define GPIO_GAIN3		GPIO_2_PIN(22)
+#define GPIO_GAIN4		GPIO_2_PIN(23)
+#define GPIO_GAIN5		GPIO_2_PIN(24)
+#define GPIO_GAIN6		GPIO_2_PIN(25)
+#define GPIO_GAIN7		GPIO_2_PIN(26)
+#define GPIO_ANT_SEL0		GPIO_2_PIN(27)
+#define GPIO_ANT_SEL1		GPIO_2_PIN(28)
+#define GPIO_RF_FIRST		GPIO_TX_ON
+#define GPIO_RF_LAST		GPIO_ANT_SEL1
+#define GPIO_SDH_CLK_IN		GPIO_2_PIN(29)
+#define GPIO_SDH_1_FIRST	GPIO_SDH_CLK_IN
+#define GPIO_SDH_1_LAST		GPIO_SDH_CLK_IN
+
+#define GPIO_SPI_FIRST		GPIO_SPI0_FIRST
+#define GPIO_SPI_LAST		GPIO_SPI1_LAST
+#define GPIO_UART_FIRST		GPIO_UART0_FIRST
+#define GPIO_UART_LAST		GPIO_UART1_LAST
+#define GPIO_SCB_FIRST		GPIO_SCB0_FIRST
+#define GPIO_SCB_LAST		GPIO_SCB2_LAST
+
+/* PDC GPIOs */
+#define GPIO_PDC_GPIO0		GPIO_PDC_PIN(0)
+#define GPIO_PDC_GPIO1		GPIO_PDC_PIN(1)
+#define GPIO_SYS_WAKE0		GPIO_PDC_PIN(2)
+#define GPIO_SYS_WAKE1		GPIO_PDC_PIN(3)
+#define GPIO_SYS_WAKE2		GPIO_PDC_PIN(4)
+#define GPIO_IR_DATA		GPIO_PDC_PIN(5)
+#define GPIO_EXT_POWER		GPIO_PDC_PIN(6)
+
+struct irq_data;
+
+/* gpio.c */
+int comet_gpio_disable(unsigned int gpio);
+int comet_gpio_enable(unsigned int gpio);
+int comet_gpio_disable_block(unsigned int first, unsigned int last);
+int comet_gpio_enable_block(unsigned int first, unsigned int last);
+int comet_gpio_pullup_type(unsigned int gpio, unsigned int pullup);
+
+#define GPIO_PULLUP_TRISTATE	0x0
+#define GPIO_PULLUP_UP		0x1
+#define GPIO_PULLUP_DOWN	0x2
+#define GPIO_PULLUP_REPEATER	0x3
+
+#include <asm-generic/gpio.h>
+
+#endif
diff --git a/arch/metag/include/asm/soc-tz1090/hdmi-audio.h b/arch/metag/include/asm/soc-tz1090/hdmi-audio.h
new file mode 100644
index 0000000..cd48b74
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/hdmi-audio.h
@@ -0,0 +1,14 @@
+/*
+ * HDMI Audio component interface for ASoC layer
+ */
+
+#ifndef _HDMI_AUDIO_H_
+#define _HDMI_AUDIO_H_
+
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+
+extern bool zero1sp_hdmi_audio_get_enabled(void);
+extern void zero1sp_hdmi_audio_set_enabled(bool en);
+#endif
+
+#endif
diff --git a/arch/metag/include/asm/soc-tz1090/hdmi-video.h b/arch/metag/include/asm/soc-tz1090/hdmi-video.h
new file mode 100644
index 0000000..586f06b
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/hdmi-video.h
@@ -0,0 +1,27 @@
+/*
+ * HDMI public interface (platform data et al.)
+ */
+
+#ifndef _HDMI_VIDEO_H_
+#define _HDMI_VIDEO_H_
+
+/* Sync generation */
+#define HDMI_SYNC_MODE_EXTERN	0x00	/* All external signals */
+#define HDMI_SYNC_MODE_GEN_DE	0x01	/* [HV]SYNC external, DE internal */
+#define HDMI_SYNC_MODE_EMBEDDED	0x02	/* Embedded ITU.656 sync signals */
+
+struct hdmi_fb_ops {
+	int (*get_pixclk)(void);
+};
+
+/* Platform data container */
+struct hdmi_platform_data {
+	/* Name of pixel clock */
+	const char		*pix_clk;
+
+	/* max supported pixel clock */
+	unsigned long		max_pixfreq;
+};
+
+#endif
+
diff --git a/arch/metag/include/asm/soc-tz1090/irqnums.h b/arch/metag/include/asm/soc-tz1090/irqnums.h
new file mode 100644
index 0000000..1268983
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/irqnums.h
@@ -0,0 +1,58 @@
+#ifndef _TZ1090_IRQNUMS_H
+#define _TZ1090_IRQNUMS_H
+
+#include <linux/irqchip/metag-ext.h>
+
+#include <asm/irq.h>
+
+/*
+ * Hardware IRQ numbers.
+ * These are DEPRECATED, use device tree instead.
+ * These can be mapped to virtual IRQ numbers using external_irq_map().
+ */
+#define SCB0_IRQ_NUM			 0
+#define SCB1_IRQ_NUM			 1
+#define SCB2_IRQ_NUM			 2
+#define SDIO_DEV_IRQ_NUM		 3
+#define UART0_IRQ_NUM			 4
+#define UART1_IRQ_NUM			 5
+#define SPI_MASTER0_IRQ_NUM		 6
+#define SPI_SLAVE_IRQ_NUM		 7
+#define SPI_MASTER1_IRQ_NUM		 8
+#define I2S_OUT0_IRQ_NUM		 9
+#define I2S_OUT1_IRQ_NUM		10
+#define I2S_OUT2_IRQ_NUM		11
+#define I2S_IN_IRQ_NUM			12
+#define GPIO0_IRQ_NUM			13
+#define GPIO1_IRQ_NUM			14
+#define GPIO2_IRQ_NUM			15
+#define BUSERR_IRQ_NUM			16
+#define LCD_IRQ_NUM			17
+#define PDC_IRQ_NUM			18
+#define USB_IRQ_NUM			19
+#define SDIO_HOST_IRQ_NUM		20
+#define MDC0_IRQ_NUM			21
+#define MDC1_IRQ_NUM			22
+#define MDC2_IRQ_NUM			23
+#define MDC3_IRQ_NUM			24
+#define MDC4_IRQ_NUM			25
+#define MDC5_IRQ_NUM			26
+#define MDC6_IRQ_NUM			27
+#define MDC7_IRQ_NUM			28
+#define PDC_IR_IRQ_NUM			29
+#define PDC_RTC_IRQ_NUM			30
+#define PDC_WDOG_IRQ_NUM		31
+#define UCC0_IRQ_NUM			32
+#define UCC1_IRQ_NUM			33
+#define TWOD_IRQ_NUM			34
+#define PDP_IRQ_NUM			35
+#define SOFT0_IRQ_NUM			36
+#define SOFT1_IRQ_NUM			37
+#define SOFT2_IRQ_NUM			38
+#define SOFT3_IRQ_NUM			39
+#define SOFT4_IRQ_NUM			40
+#define SOFT5_IRQ_NUM			41
+#define SOFT6_IRQ_NUM			42
+#define SOFT7_IRQ_NUM			43
+
+#endif /* _TZ1090_IRQNUMS_H */
diff --git a/arch/metag/include/asm/soc-tz1090/pdc.h b/arch/metag/include/asm/soc-tz1090/pdc.h
new file mode 100644
index 0000000..2b7e152
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/pdc.h
@@ -0,0 +1,28 @@
+/*
+ * pdc.h
+ * TZ1090 Powerdown Controller (PDC)
+ *
+ * Copyright 2010 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef _TZ1090_PDC_H_
+#define _TZ1090_PDC_H_
+
+/* PDC registers */
+
+#define PDC_WD_SW_RESET			0x000
+
+#define PDC_SOC_SW_PROT			0x020
+
+#define PDC_SOC_POWER			0x300
+#define PDC_SOC_DELAY			0x30C
+
+#define PDC_SOC_BOOTSTRAP		0x400
+#define PDC_SOC_GPIO_CONTROL0		0x500
+#define PDC_SOC_GPIO_CONTROL1		0x504
+#define PDC_SOC_GPIO_CONTROL2		0x508
+#define PDC_SOC_GPIO_CONTROL3		0x50c
+#define PDC_SOC_GPIO_STATUS		0x580
+
+#endif
diff --git a/arch/metag/include/asm/soc-tz1090/pdp.h b/arch/metag/include/asm/soc-tz1090/pdp.h
new file mode 100644
index 0000000..61a094e
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/pdp.h
@@ -0,0 +1,26 @@
+/*
+ * pdp.h
+ *
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef _TZ1090_PDP_H_
+#define _TZ1090_PDP_H_
+
+#include <linux/init.h>
+
+struct fb_videomode;
+struct pdp_lcd_size_cfg;
+struct imgpdi_lcd_pdata;
+struct pdp_sync_cfg;
+struct pdp_hwops;
+
+void comet_pdp_set_shared_base(unsigned long pa);
+
+extern void __init comet_pdp_set_limits(unsigned long min, unsigned long max);
+extern int __init comet_pdp_setup(const struct fb_videomode *fbvm,
+		struct pdp_lcd_size_cfg *pslc, struct imgpdi_lcd_pdata *pdic,
+		struct pdp_sync_cfg *psc, struct pdp_hwops *hwops);
+
+#endif
diff --git a/arch/metag/include/asm/soc-tz1090/pm.h b/arch/metag/include/asm/soc-tz1090/pm.h
new file mode 100644
index 0000000..1eae171
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/pm.h
@@ -0,0 +1,35 @@
+/*
+ * pdc.h
+ * TZ1090 Powerdown Controller (PDC)
+ *
+ * Copyright 2012 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef _TZ1090_PM_H_
+#define _TZ1090_PM_H_
+
+/* Set these hooks to call board specific code to restart/halt/power off. */
+extern void (*board_restart)(char *cmd);
+extern void (*board_halt)(void);
+extern void (*board_power_off)(void);
+
+#ifdef CONFIG_PM_SLEEP
+#include <linux/suspend.h>
+
+/*
+ * Set these hooks to call board specific code to suspend/resume, such as
+ * turning off power rails. These are called close to the actual suspend.
+ */
+extern int (*board_suspend)(suspend_state_t state);
+extern void (*board_resume)(suspend_state_t state);
+#endif
+
+/* soc/tz1090/setup.c */
+void comet_prepare_reset(void);
+
+/* soc/tz1090/pm.c */
+void comet_pdc_restart(void);
+int comet_pdc_power_off(void);
+
+#endif /* _TZ1090_PM_H_ */
diff --git a/arch/metag/include/asm/soc-tz1090/sdhost.h b/arch/metag/include/asm/soc-tz1090/sdhost.h
new file mode 100644
index 0000000..5ad3bcf
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/sdhost.h
@@ -0,0 +1,27 @@
+/*
+ * sdhost.h
+ *
+ * Copyright (C) 2011 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef _TZ1090_SDHOST_H_
+#define _TZ1090_SDHOST_H_
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+extern int mci_get_ocr(u32 slot_id);
+extern int mci_get_bus_wd(u32 slot_id);
+extern int mci_init(u32 slot_id, irq_handler_t irqhdlr, void *data);
+extern int __init comet_sdhost_init(void);
+
+extern struct dw_mci_board comet_mci_platform_data;
+extern struct block_settings blk_settings;
+extern struct dma_pdata dma_pdata;
+
+#ifndef CONFIG_MMC_DW_IDMAC
+extern struct dw_mci_dma_ops comet_dma_ops;
+#endif
+
+#endif
diff --git a/arch/metag/include/asm/soc-tz1090/setup.h b/arch/metag/include/asm/soc-tz1090/setup.h
new file mode 100644
index 0000000..bf73049
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/setup.h
@@ -0,0 +1,25 @@
+/*
+ *  arch/metag/include/asm/soc-tz1090/setup.h
+ *
+ *  Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SOC_TZ1090_SETUP_H__
+#define __SOC_TZ1090_SETUP_H__
+
+bool comet_is_evaluation_silicon(void);
+
+void comet_init_early(void);
+void comet_init_irq(void);
+void comet_init_machine(void);
+
+#define TZ1090_MACHINE_DEFAULTS			\
+	.init_early	= comet_init_early,	\
+	.init_irq	= comet_init_irq,	\
+	.init_machine	= comet_init_machine
+
+#endif /* __SOC_TZ1090_SETUP_H__ */
diff --git a/arch/metag/include/asm/soc-tz1090/suspend.h b/arch/metag/include/asm/soc-tz1090/suspend.h
new file mode 100644
index 0000000..a0c87f8
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/suspend.h
@@ -0,0 +1,56 @@
+/*
+ * suspend.h
+ * Comet suspend functions defined in suspend.S
+ *
+ * Copyright 2010 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef _METAG_TZ1090_SUSPEND_H_
+#define _METAG_TZ1090_SUSPEND_H_
+
+#ifdef CONFIG_SUSPEND
+
+/**
+ * metag_standby() - Wait for a wake interrupt.
+ * @txmask:	TXMASK wake trigger bits.
+ *
+ * This is the basic version that doesn't make any effort to reduce power
+ * consumption while in standby.
+ */
+extern void metag_standby(unsigned int txmask);
+
+/* Size in bytes of the metag_standby() code. */
+extern unsigned int metag_standby_sz;
+
+/**
+ * metag_comet_standby() - Comet power saving version of metag_standby().
+ * @txmask:	TXMASK wake trigger bits.
+ *
+ * This adds comet specific power saving techniques on top of metag_standby()
+ * such as putting DDR into self refresh so the DDRC can be powered down, and
+ * clocking the Meta core down.
+ */
+extern void metag_comet_standby(unsigned int txmask);
+
+/* Size in bytes of the metag_comet_standby() code. */
+extern unsigned int metag_comet_standby_sz;
+
+/**
+ * metag_comet_suspend() - SoC power down suspend to RAM.
+ * @txmask:	TXMASK wake trigger bits used when faking suspend to RAM.
+ *
+ * This switches as much of the SoC off as possible, including the Meta core,
+ * and puts DDR RAM into self refresh.
+ *
+ * If SAFE mode is on then suspend will be faked by waiting for the wake trigger
+ * and performing a watchdog soft-reset.
+ */
+extern void metag_comet_suspend(unsigned int txmask);
+
+/* Size in bytes of the metag_comet_suspend() code. */
+extern unsigned int metag_comet_suspend_sz;
+
+#endif
+
+#endif
diff --git a/arch/metag/include/asm/soc-tz1090/usb.h b/arch/metag/include/asm/soc-tz1090/usb.h
new file mode 100644
index 0000000..5b14bbf
--- /dev/null
+++ b/arch/metag/include/asm/soc-tz1090/usb.h
@@ -0,0 +1,16 @@
+/*
+ * usb.h
+ *
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef _TZ1090_USB_H_
+#define _TZ1090_USB_H_
+
+#include <linux/init.h>
+#include <linux/usb/dwc_otg_platform.h>
+
+extern int __init comet_usb_setup(const struct dwc_otg_board *board);
+
+#endif
diff --git a/arch/metag/include/asm/suspend.h b/arch/metag/include/asm/suspend.h
new file mode 100644
index 0000000..f7ee0cf
--- /dev/null
+++ b/arch/metag/include/asm/suspend.h
@@ -0,0 +1,60 @@
+/*
+ * suspend.h
+ * Generic Metag suspend functions defined in kernel/suspend.S
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef _METAG_SUSPEND_H_
+#define _METAG_SUSPEND_H_
+
+#if defined(CONFIG_METAG_SUSPEND_MEM)
+
+#include <linux/kernel.h>
+
+/**
+ * struct metag_suspend_jmpbuf - Contains core processor state through suspend.
+ * @aregs:	Address unit registers A0StP,A1GbP, and A0FrP,A1LbP.
+ * @dregs:	Data unit registers D0.5,D1.5, D0.6,D1.6, and D0.7,D1.7.
+ * @d1rtp:	Data unit register D1RtP
+ * @pcx:	PC unit register PCX.
+ * @txtimer:	Control unit register TXTIMER.
+ * @txtimeri:	Control unit register TXTIMERI.
+ *
+ * This structure stores core processor state that needs to be restored after
+ * suspend, and is analagous to jmp_buf in the standard C library.
+ */
+struct metag_suspend_jmpbuf {
+	u64 aregs[2];
+	u64 dregs[3];
+	u32 d1rtp;
+	u32 pcx;
+	u32 txtimer;
+	u32 txtimeri;
+};
+
+/**
+ * metag_suspend_setjmp() - Store core processor state.
+ * @jmp:	jump buffer to store state to.
+ *
+ * This stores core processor state to a jump buffer, where it can later be
+ * restored with metag_resume_longjmp(), and is analagous to setjmp() in the
+ * standard C library.
+ */
+int metag_suspend_setjmp(struct metag_suspend_jmpbuf *jmp);
+
+/**
+ * metag_resume_longjmp() - Restore core processor state.
+ * @jmp:	jump buffer containing state.
+ * @ret:	return value to return from metag_suspend_setjmp().
+ *
+ * This restores core processor state from a jump buffer, where the jump buffer
+ * had originally been set by metag_suspend_setjmp(), and is analagous to
+ * longjmp() in the standard C library.
+ */
+void __noreturn metag_resume_longjmp(struct metag_suspend_jmpbuf *jmp, int ret);
+
+#endif /* CONFIG_METAG_SUSPEND_MEM */
+
+#endif /* _METAG_SUSPEND_H_ */
diff --git a/arch/metag/include/asm/syscalls.h b/arch/metag/include/asm/syscalls.h
index a02b955..fa89811 100644
--- a/arch/metag/include/asm/syscalls.h
+++ b/arch/metag/include/asm/syscalls.h
@@ -12,6 +12,13 @@
 
 #include <asm-generic/syscalls.h>
 
+/* kernel/process.c */
+#ifdef CONFIG_SOC_CHORUS2
+asmlinkage long sys_execve_chorus2(const char __user *filename,
+				   const char __user *const __user *argv,
+				   const char __user *const __user *envp);
+#endif
+
 /* kernel/sys_metag.c */
 asmlinkage int sys_metag_setglobalbit(char __user *, int);
 asmlinkage void sys_metag_set_fpu_flags(unsigned int);
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
index 7c4a330..7db358a 100644
--- a/arch/metag/include/asm/thread_info.h
+++ b/arch/metag/include/asm/thread_info.h
@@ -37,6 +37,10 @@
 	mm_segment_t addr_limit;	/* thread address space */
 	struct restart_block restart_block;
 
+#ifdef CONFIG_SOC_CHORUS2
+	struct pt_regs *replay_regs;
+	unsigned long pad;
+#endif
 	u8 supervisor_stack[0];
 };
 
diff --git a/arch/metag/include/asm/traps.h b/arch/metag/include/asm/traps.h
index ac80874..5e1da68 100644
--- a/arch/metag/include/asm/traps.h
+++ b/arch/metag/include/asm/traps.h
@@ -45,4 +45,19 @@
 
 #endif
 
+#ifdef CONFIG_METAG_ROM_WRAPPERS
+
+#ifndef __ASSEMBLY__
+/* Entry type of ROM vector patch table */
+typedef TBIRES (*tbi_ptr)(TBIRES);
+extern tbi_ptr tbi_vectors[];
+extern int tbi_vector_base;
+#endif
+
+/* Entry slots for ROM vector patches */
+#define TBI_VEC_ASYNC_TRIGGER	0
+#define TBI_VEC_ASYNC_RESUME	1
+#define TBI_VEC_RESUME	2
+#endif
+
 #endif /* _METAG_TBIVECTORS_H */
diff --git a/arch/metag/kernel/Makefile b/arch/metag/kernel/Makefile
index d7675f4..9f78870 100644
--- a/arch/metag/kernel/Makefile
+++ b/arch/metag/kernel/Makefile
@@ -5,6 +5,8 @@
 extra-y	+= head.o
 extra-y	+= vmlinux.lds
 
+obj-y	+= cpu/
+
 obj-y	+= cachepart.o
 obj-y	+= clock.o
 obj-y	+= core_reg.o
@@ -31,9 +33,11 @@
 obj-$(CONFIG_METAG_DA)			+= da.o
 obj-$(CONFIG_DYNAMIC_FTRACE)		+= ftrace.o
 obj-$(CONFIG_FUNCTION_TRACER)		+= ftrace_stub.o
+obj-$(CONFIG_KGDB)			+= kgdb.o
 obj-$(CONFIG_MODULES)			+= metag_ksyms.o
 obj-$(CONFIG_MODULES)			+= module.o
 obj-$(CONFIG_PERF_EVENTS)		+= perf_callchain.o
+obj-$(CONFIG_METAG_ROM_WRAPPERS)	+= romwrappers.o
 obj-$(CONFIG_SMP)			+= smp.o
 obj-$(CONFIG_METAG_SUSPEND_MEM)		+= suspend.o
 obj-$(CONFIG_METAG_USER_TCM)		+= tcm.o
diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c
index 954548b..63bcf08 100644
--- a/arch/metag/kernel/cachepart.c
+++ b/arch/metag/kernel/cachepart.c
@@ -9,6 +9,7 @@
 #include <linux/errno.h>
 #include <asm/processor.h>
 #include <asm/cachepart.h>
+#include <asm/global_lock.h>
 #include <asm/metag_isa.h>
 #include <asm/metag_mem.h>
 
@@ -92,6 +93,111 @@
 	return -1;
 }
 
+#if PAGE_OFFSET >= LINGLOBAL_BASE
+/*
+ * The global icache partition should be useable if big enough, but we can't
+ * trust the icache local partition to be valid.
+ */
+int cachepart_min_iglobal(unsigned int min_size, unsigned int *old_val)
+{
+	if (get_global_icache_size() < min_size) {
+		pr_err("cachepart_min_iglobal: not enough icache available\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void cachepart_restore_iglobal(unsigned int *old_val)
+{
+	/* cachepart_min_iglobal() hasn't changed anything */
+}
+
+#else
+int cachepart_min_iglobal(unsigned int min_size, unsigned int *old_val)
+{
+	unsigned int lflags;
+	unsigned int cpart;
+	unsigned long cpart_addr = SYSC_ICPART(hard_processor_id());
+	unsigned int ic_size = get_icache_size();
+	unsigned int lic_size;
+	unsigned int temp;
+	int ret = 0;
+
+	__global_lock2(lflags);
+	*old_val = cpart = metag_in32(cpart_addr);
+
+	/* Is the condition possible by halving this thread's local cache? */
+	lic_size = cpart & SYSC_xCPARTL_AND_BITS;
+	if (lic_size) {
+		temp = (ic_size * ((lic_size >> SYSC_xCPARTL_AND_S) + 1)) >> 5;
+		if (temp < min_size) {
+			pr_err("cachepart_min_iglobal: max global icache %u < "
+				"%u\n", temp, min_size);
+			ret = -ENOMEM;
+			goto out;
+		}
+		/*
+		 * It is invalid to attempt to operate the cache with fewer than
+		 * four cache lines allocated to a region (unless it is never
+		 * used).
+		 */
+		if (temp < 4*ICACHE_LINE_BYTES) {
+			pr_err("cachepart_min_iglobal: available global icache "
+				"%u < 4 cache lines\n", temp);
+			ret = -ENOMEM;
+			goto out;
+		}
+	} else {
+		pr_err("cachepart_min_iglobal: no global icache available\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	cpart &= ~(SYSC_xCPARTL_AND_BITS | SYSC_xCPARTG_AND_BITS |
+		   SYSC_xCPARTG_OR_BITS);
+	/* Halve local size, and write into local and global size */
+	lic_size >>= (SYSC_xCPARTL_AND_S + 1);
+	cpart |= lic_size << SYSC_xCPARTL_AND_S;
+	cpart |= lic_size << SYSC_xCPARTG_AND_S;
+	/* Adjust global offset = local offset + local size */
+	temp = (cpart & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S;
+	temp += lic_size + 1;
+	cpart |= temp << SYSC_xCPARTG_OR_S;
+
+	/* Disable icache */
+	metag_out32(0, MMCU_ICACHE_CTRL_ADDR);
+	/* Flush icache */
+	metag_out32(1, SYSC_ICACHE_FLUSH);
+	/* Re-partition icache */
+	metag_out32(cpart, cpart_addr);
+	/* Enable icache */
+	metag_out32(1, MMCU_ICACHE_CTRL_ADDR);
+
+out:
+	__global_unlock2(lflags);
+	return ret;
+}
+
+void cachepart_restore_iglobal(unsigned int *old_val)
+{
+	unsigned int flags;
+	unsigned long cpart_addr = SYSC_ICPART(hard_processor_id());
+
+	if (*old_val != metag_in32(cpart_addr)) {
+		__global_lock2(flags);
+		/* Disable icache */
+		metag_out32(0, MMCU_ICACHE_CTRL_ADDR);
+		/* Flush icache */
+		metag_out32(1, SYSC_ICACHE_FLUSH);
+		/* Re-partition icache */
+		metag_out32(*old_val, cpart_addr);
+		/* Enable icache */
+		metag_out32(1, MMCU_ICACHE_CTRL_ADDR);
+		__global_unlock2(flags);
+	}
+}
+#endif
+
 void check_for_cache_aliasing(int thread_id)
 {
 	unsigned int thread_cache_size;
@@ -100,22 +206,23 @@
 		thread_cache_size =
 				get_thread_cache_size(cache_type, thread_id);
 		if (thread_cache_size < 0)
-			pr_emerg("Can't read %s cache size", \
+			pr_emerg("Can't read %s cache size\n",
 				 cache_type ? "DCACHE" : "ICACHE");
 		else if (thread_cache_size == 0)
 			/* Cache is off. No need to check for aliasing */
 			continue;
 		if (thread_cache_size / CACHE_ASSOCIATIVITY > PAGE_SIZE) {
-			pr_emerg("Cache aliasing detected in %s on Thread %d",
+			pr_emerg("Potential cache aliasing detected in %s on Thread %d\n",
 				 cache_type ? "DCACHE" : "ICACHE", thread_id);
-			pr_warn("Total %s size: %u bytes",
-				cache_type ? "DCACHE" : "ICACHE ",
+			pr_warn("Total %s size: %u bytes\n",
+				cache_type ? "DCACHE" : "ICACHE",
 				cache_type ? get_dcache_size()
 				: get_icache_size());
-			pr_warn("Thread %s size: %d bytes",
+			pr_warn("Thread %s size: %d bytes\n",
 				cache_type ? "CACHE" : "ICACHE",
 				thread_cache_size);
-			pr_warn("Page Size: %lu bytes", PAGE_SIZE);
+			pr_warn("Page Size: %lu bytes\n", PAGE_SIZE);
+			panic("Potential cache aliasing detected");
 		}
 	}
 }
diff --git a/arch/metag/kernel/clock.c b/arch/metag/kernel/clock.c
index defc840..6339c9c 100644
--- a/arch/metag/kernel/clock.c
+++ b/arch/metag/kernel/clock.c
@@ -8,8 +8,10 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/io.h>
+#include <linux/of.h>
 
 #include <asm/param.h>
 #include <asm/clock.h>
@@ -34,8 +36,63 @@
 #endif
 }
 
+static struct clk *clk_core;
+
+/* Clk based get_core_freq callback. */
+static unsigned long get_core_freq_clk(void)
+{
+	return clk_get_rate(clk_core);
+}
+
 /**
- * setup_meta_clocks() - Set up the Meta clock.
+ * init_metag_core_clock() - Set up core clock from devicetree.
+ *
+ * Checks to see if a "core" clock is provided in the device tree, and overrides
+ * the get_core_freq callback to use it.
+ */
+static void __init init_metag_core_clock(void)
+{
+	/*
+	 * See if a core clock is provided by the devicetree (and
+	 * registered by the init callback above).
+	 */
+	struct device_node *node;
+	node = of_find_compatible_node(NULL, NULL, "img,meta");
+	if (!node) {
+		pr_warn("%s: no compatible img,meta DT node found\n",
+			__func__);
+		return;
+	}
+
+	clk_core = of_clk_get_by_name(node, "core");
+	if (IS_ERR(clk_core)) {
+		pr_warn("%s: no core clock found in DT\n",
+			__func__);
+		return;
+	}
+
+	/*
+	 * Override the core frequency callback to use
+	 * this clk.
+	 */
+	_meta_clock.get_core_freq = get_core_freq_clk;
+}
+
+/**
+ * init_metag_clocks() - Set up clocks from devicetree.
+ *
+ * Set up important clocks from device tree. In particular any needed for clock
+ * sources.
+ */
+void __init init_metag_clocks(void)
+{
+	init_metag_core_clock();
+
+	pr_info("Core clock frequency: %lu Hz\n", get_coreclock());
+}
+
+/**
+ * setup_meta_clocks() - Early set up of the Meta clock.
  * @desc:	Clock descriptor usually provided by machine description
  *
  * Ensures all callbacks are valid.
diff --git a/arch/metag/kernel/coremem.c b/arch/metag/kernel/coremem.c
new file mode 100644
index 0000000..22b4d8e
--- /dev/null
+++ b/arch/metag/kernel/coremem.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2010 Imagination Technologies Ltd.
+ *
+ * These functions allow code to be pushed into core memory so that DDR can be
+ * put into self-refresh during standby.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <asm/cacheflush.h>
+#include <asm/coremem.h>
+#include <asm/processor.h>
+#include <asm/tlbflush.h>
+#include <asm/cachepart.h>
+#include <asm/global_lock.h>
+#include <asm/metag_isa.h>
+#include <asm/metag_mem.h>
+
+/* private coremem region flags */
+#define METAG_COREMEM_MASK	0x0000ffff	/* mask of bits to compare */
+#define METAG_COREMEM_BUSY_BIT	31		/* busy bit */
+
+struct metag_coremem_region *metag_coremem_alloc(unsigned int flags,
+						 unsigned int size)
+{
+	/*
+	 * Look in metag_coremems for regions that we can use.
+	 * metag_coremems should be defined by SoC's.
+	 */
+	int i;
+	for (i = 0; i < metag_coremems_sz; ++i) {
+		/* METAG_COREMEM_MASK bits of flags must match */
+		if (METAG_COREMEM_MASK & (metag_coremems[i].flags ^ flags))
+			continue;
+
+		/* must be big enough */
+		if (!(flags & METAG_COREMEM_CACHE) &&
+		    size > metag_coremems[i].size)
+			continue;
+
+		/* one at a time */
+		if (test_and_set_bit_lock(METAG_COREMEM_BUSY_BIT,
+					  &metag_coremems[i].flags))
+			continue;
+
+		return &metag_coremems[i];
+	}
+	return NULL;
+}
+
+void metag_coremem_free(struct metag_coremem_region *region)
+{
+	/* finish with locked in cache */
+	if ((region->flags & METAG_COREMEM_CACHE) && region->start) {
+		metag_cache_unlock(region->flags);
+		cachepart_restore_iglobal(&region->data);
+		region->start = NULL;
+	}
+	/* reset region and unmark busy flag */
+	region->pos = 0;
+	clear_bit_unlock(METAG_COREMEM_BUSY_BIT, &region->flags);
+}
+
+void *metag_coremem_push(struct metag_coremem_region *region,
+			 void *start, unsigned long size)
+{
+	void *ret;
+	if (region->flags & METAG_COREMEM_CACHE) {
+		/* only do this once */
+		if (region->start)
+			return NULL;
+		/* try and lock the pushed memory into the cache */
+		if (cachepart_min_iglobal(size, &region->data))
+			return NULL;
+		ret = (void *)metag_cache_lock(region->flags, __pa(start),
+						size);
+		if (!ret) {
+			cachepart_restore_iglobal(&region->data);
+			return NULL;
+		}
+		region->start = ret;
+	} else {
+		/* don't overflow */
+		if (size > region->size - region->pos)
+			return NULL;
+		/* push at region->pos */
+		ret = region->start + region->pos;
+		memcpy(ret, start, size);
+		region->pos += size;
+	}
+	return ret;
+}
+
+#ifdef CONFIG_METAG_META21
+
+#define MMCU_TnCCM_ICTRL(n)	\
+	(MMCU_T0CCM_ICCTRL + MMCU_TnCCM_xxCTRL_STRIDE*(n))
+#define MMCU_TnCCM_DCTRL(n)	\
+	(MMCU_T0CCM_DCCTRL + MMCU_TnCCM_xxCTRL_STRIDE*(n))
+#define MMCU_TnTLBINVALIDATE(n)	\
+	(LINSYSCFLUSH_TxMMCU_BASE + LINSYSCFLUSH_TxMMCU_STRIDE*(n))
+
+unsigned long metag_cache_lock(unsigned int flags, unsigned long phys,
+			       unsigned long size)
+{
+	unsigned int hwt = hard_processor_id();
+	unsigned long ccmctrl_addr;
+	unsigned int ccmctrl;
+	unsigned long ccr; /* core cached region in physical memory */
+	unsigned int offset;
+	unsigned long rounded_size;
+	int rounded_size_sh;
+	unsigned long i;
+
+	if (!size)
+		return 0;
+	/* Which cache are we referring to? instruction or data? */
+	if (flags & METAG_COREMEM_IMEM) {
+		ccmctrl_addr = MMCU_TnCCM_ICTRL(hwt);
+		ccr = LINCORE_ICACHE_BASE;
+	} else if (flags & METAG_COREMEM_DMEM) {
+		ccmctrl_addr = MMCU_TnCCM_DCTRL(hwt);
+		ccr = LINCORE_DCACHE_BASE;
+	} else {
+		return 0;
+	}
+
+	/* First take account of offset of memory in page */
+	offset = offset_in_page(phys);
+	rounded_size = size + offset;
+	phys -= offset;
+	ccr += offset;
+
+	/* Get rounded up log2 of size */
+	rounded_size_sh = ilog2((rounded_size<<1)-1) - MMCU_TnCCM_REGSZ0_POWER;
+	if (rounded_size_sh < 0)
+		rounded_size_sh = 0;
+	rounded_size = (1 << MMCU_TnCCM_REGSZ0_POWER) << rounded_size_sh;
+
+	/* There's a maximum amount of lockable cache */
+	if (rounded_size > MMCU_TnCCM_REGSZ_MAXBYTES)
+		return 0;
+	/* The size of the current global cache partition may be limited too */
+	if (rounded_size > ((flags & METAG_COREMEM_IMEM)
+				? get_global_icache_size()
+				: get_global_dcache_size()))
+		return 0;
+
+	ccmctrl = metag_in32(ccmctrl_addr);
+	if (ccmctrl & MMCU_TnCCM_ENABLE_BIT)
+		/* already enabled */
+		return 0;
+
+	/* set the physical mapping (to start of cache data region) */
+	ccmctrl ^= (ccmctrl ^ phys) & MMCU_TnCCM_ADDR_BITS;
+	/* set the size */
+	ccmctrl &= ~MMCU_TnCCM_REGSZ_BITS;
+	ccmctrl |= rounded_size_sh << MMCU_TnCCM_REGSZ_S;
+	/* WIN3 */
+	ccmctrl |= MMCU_TnCCM_WIN3_BIT;
+	/* Enable */
+	ccmctrl |= MMCU_TnCCM_ENABLE_BIT;
+	metag_out32(ccmctrl, ccmctrl_addr);
+
+	if (flags & METAG_COREMEM_DMEM) {
+		/*
+		 * Lock cache lines and TLB entries, up to size. This is done by
+		 * reading from the locked cache at DCACHE_LINE_BYTES intervals.
+		 */
+		unsigned int pf_start = ccr & -DCACHE_LINE_BYTES;
+		unsigned int pf_end = ccr + size;
+		for (i = pf_start; i < pf_end; i += DCACHE_LINE_BYTES)
+			metag_in32(i);
+	}
+
+	return ccr;
+}
+
+void metag_cache_unlock(unsigned int flags)
+{
+	unsigned int hwt = hard_processor_id();
+	unsigned int lstat;
+	unsigned long ccmctrl_addr;
+	unsigned int ccmctrl;
+	unsigned int size;
+
+	if (flags & METAG_COREMEM_IMEM)
+		ccmctrl_addr = MMCU_TnCCM_ICTRL(hwt);
+	else if (flags & METAG_COREMEM_DMEM)
+		ccmctrl_addr = MMCU_TnCCM_DCTRL(hwt);
+	else
+		return;
+
+	__global_lock2(lstat);
+
+	ccmctrl = metag_in32(ccmctrl_addr);
+	size = (ccmctrl & MMCU_TnCCM_REGSZ_BITS) >> MMCU_TnCCM_REGSZ_S;
+	size = (1 << MMCU_TnCCM_REGSZ0_POWER) << size;
+
+	/* Invalidate cache and TLB */
+	if (flags & METAG_COREMEM_IMEM)
+		metag_code_cache_flush((void *)LINCORE_ICACHE_BASE, size);
+	else if (flags & METAG_COREMEM_DMEM)
+		metag_data_cache_flush((void *)LINCORE_DCACHE_BASE, size);
+	flush_tlb_all();
+
+	/* Disable mem locked in cache */
+	if (ccmctrl & MMCU_TnCCM_ENABLE_BIT) {
+		ccmctrl &= ~MMCU_TnCCM_ENABLE_BIT;
+		metag_out32(ccmctrl, ccmctrl_addr);
+	}
+	__global_unlock2(lstat);
+}
+#endif
diff --git a/arch/metag/kernel/cpu/Kconfig b/arch/metag/kernel/cpu/Kconfig
new file mode 100644
index 0000000..77b8915
--- /dev/null
+++ b/arch/metag/kernel/cpu/Kconfig
@@ -0,0 +1,53 @@
+config METAG_CORE_SYSFS
+	bool "Meta core sysfs interface"
+	select SYSFS
+	help
+	  This enables a sysfs driver that gives access to the CPU core
+	  parameters such as cycle counters and configuration registers.
+
+config METAG_WRITE_COMBINER
+	bool "Meta write combiner sysfs interface"
+	depends on METAG_CORE_SYSFS
+	help
+	  This enables a sysfs driver that gives access to the CPU core
+	  write combiner hardware.
+
+config METAG_MEM_ARBITER
+	bool "Meta memory arbiter sysfs interface"
+	depends on METAG_CORE_SYSFS
+	help
+	  This enables a sysfs driver that gives access to the CPU core
+	  memory arbiter hardware.
+
+config METAG_CYCLE_COUNTER
+	bool "Meta cycle counters"
+	depends on METAG_CORE_SYSFS
+	help
+	  This enables a sysfs driver that gives access to the CPU core
+	  cycle counters.
+
+config METAG_PERFORMANCE_COUNTER
+	bool "Meta performance counters"
+	depends on METAG_CORE_SYSFS
+	help
+	  This enables a sysfs driver that gives access to the CPU core
+	  performance counters.
+
+config METAG_AMA_COUNTER
+	bool "Meta Automatic MIPs Allocation counters"
+	depends on METAG_CORE_SYSFS
+	help
+	  This enables a sysfs driver that gives access to the CPU core
+	  Automatic MIPs Allocation counters.
+
+config METAG_L2C_CONTROL
+	bool "Level 2 Cache control"
+	depends on METAG_CORE_SYSFS && METAG_L2C
+	help
+	  This enables a sysfs driver that provides access to the CPU core level
+	  2 cache configuration (information like line size, ways of
+	  associativity, total size, whether unified, and the hardware revision
+	  number). It also provides controls to enable and disable the L2 cache,
+	  to enable and disable prefetch, and to trigger write backs and
+	  flushes, mostly for debug purposes. They can be found in
+	  /sys/devices/system/cache/l2/.
diff --git a/arch/metag/kernel/cpu/Makefile b/arch/metag/kernel/cpu/Makefile
new file mode 100644
index 0000000..141546e
--- /dev/null
+++ b/arch/metag/kernel/cpu/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux Meta-specific sysfs interfaces.
+#
+
+obj-$(CONFIG_METAG_CORE_SYSFS)		+= core-sysfs.o counters/
+obj-$(CONFIG_METAG_WRITE_COMBINER)	+= write-combiner.o
+obj-$(CONFIG_METAG_MEM_ARBITER)		+= memory-arbiter.o
+obj-$(CONFIG_METAG_L2C_CONTROL)		+= l2cache-control.o
diff --git a/arch/metag/kernel/cpu/core-sysfs.c b/arch/metag/kernel/cpu/core-sysfs.c
new file mode 100644
index 0000000..ebb5d8b
--- /dev/null
+++ b/arch/metag/kernel/cpu/core-sysfs.c
@@ -0,0 +1,60 @@
+/*
+ * linux/arch/metag/drivers/core-sysfs.c
+ *
+ * Meta core sysfs interface, including cycle counters, perf counters and AMA
+ * configuration registers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/core-sysfs.h>
+#include <linux/device.h>
+#include <linux/init.h>
+
+struct bus_type performance_subsys = {
+	.name = "performance",
+	.dev_name = "counter",
+};
+
+struct bus_type cache_subsys = {
+	.name = "cache",
+	.dev_name = "cache",
+};
+
+static int __init meta_core_sysfs_init(void)
+{
+	int err, ret = 0;
+
+	err = subsys_system_register(&performance_subsys, NULL);
+	if (err) {
+		performance_subsys.name = NULL;
+		ret = err;
+	}
+
+	err = subsys_system_register(&cache_subsys, NULL);
+	if (err) {
+		cache_subsys.name = NULL;
+		ret = err;
+	}
+
+	return ret;
+}
+arch_initcall(meta_core_sysfs_init);
diff --git a/arch/metag/kernel/cpu/counters/Makefile b/arch/metag/kernel/cpu/counters/Makefile
new file mode 100644
index 0000000..521e2bf
--- /dev/null
+++ b/arch/metag/kernel/cpu/counters/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Meta performance counter specific device drivers.
+#
+
+obj-$(CONFIG_METAG_CYCLE_COUNTER)	+= cyclecount.o
+obj-$(CONFIG_METAG_PERFORMANCE_COUNTER)	+= perfcount.o
+obj-$(CONFIG_METAG_AMA_COUNTER)		+= amacount.o
diff --git a/arch/metag/kernel/cpu/counters/amacount.c b/arch/metag/kernel/cpu/counters/amacount.c
new file mode 100644
index 0000000..87ec437
--- /dev/null
+++ b/arch/metag/kernel/cpu/counters/amacount.c
@@ -0,0 +1,227 @@
+/*
+ * linux/arch/metag/drivers/amacount.c
+ *
+ * Meta core Automatic MIPs Allocation (AMA) sysfs interface
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/core_reg.h>
+#include <asm/core-sysfs.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+/* Control unit registers for AMA */
+static unsigned int cu_ama_regs[] = {
+	TXAMAREG0_REGNUM,
+	TXAMAREG1_REGNUM,
+	TXAMAREG2_REGNUM,
+	TXAMAREG3_REGNUM,
+};
+
+#define CU_AMA_REG(thread, reg)					\
+	(T0UCTREG0 + (TnUCTRX_STRIDE * thread) +		\
+	 (TXUCTREGn_STRIDE * cu_ama_regs[reg]))
+
+/* Memory mapped registers for AMA */
+static unsigned int mmap_ama_regs[] = {
+	T0AMAREG4,
+	T0AMAREG5,
+	T0AMAREG6,
+};
+
+#define MMAP_AMA_REG(thread, reg)				\
+	((TnAMAREGX_STRIDE * thread) + mmap_ama_regs[reg])
+
+enum cu_reg_num {reg0, reg1, reg2, reg3};
+enum mmap_reg_num {reg4, reg5, reg6};
+
+static ssize_t show_cu_amareg(unsigned int thread,
+	enum cu_reg_num reg, char *buf)
+{
+	ssize_t err;
+	u32 val;
+
+	val = metag_in32(CU_AMA_REG(thread, reg));
+	err = sprintf(buf, "%u\n", val);
+
+	return err;
+}
+
+static ssize_t store_cu_amareg(unsigned int thread,
+	enum mmap_reg_num reg, const char *buf, size_t count)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	metag_out32(val, CU_AMA_REG(thread, reg));
+
+	return count;
+}
+
+#define SYSFS_CUREG_SETUP(REG) \
+static ssize_t show_##REG(struct device *dev, \
+			  struct device_attribute *attr, char *buf) \
+{ \
+	return show_cu_amareg(dev->id, REG, buf); \
+} \
+static ssize_t store_##REG(struct device *dev, \
+			   struct device_attribute *attr, const char *buf, \
+			   size_t count) \
+{ \
+	return store_cu_amareg(dev->id, REG, buf, count); \
+}
+
+static ssize_t show_mmap_amareg(unsigned int thread,
+	enum mmap_reg_num reg, char *buf)
+{
+	ssize_t err;
+	u32 val;
+
+	val = metag_in32(MMAP_AMA_REG(thread, reg));
+	err = sprintf(buf, "%u\n", val);
+
+	return err;
+}
+
+static ssize_t store_mmap_amareg(unsigned int thread,
+				 enum mmap_reg_num reg, const char *buf,
+				 size_t count)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	metag_out32(val, MMAP_AMA_REG(thread, reg));
+
+	return count;
+}
+
+#define SYSFS_MMAPREG_SETUP(REG) \
+static ssize_t show_##REG(struct device *dev, \
+			  struct device_attribute *attr, char *buf) \
+{ \
+	return show_mmap_amareg(dev->id, REG, buf); \
+} \
+static ssize_t store_##REG(struct device *dev, \
+			   struct device_attribute *attr, \
+			   const char *buf, size_t count) \
+{ \
+	return store_mmap_amareg(dev->id, REG, buf, count); \
+}
+
+SYSFS_CUREG_SETUP(reg0);
+SYSFS_CUREG_SETUP(reg1);
+SYSFS_CUREG_SETUP(reg2);
+SYSFS_CUREG_SETUP(reg3);
+
+SYSFS_MMAPREG_SETUP(reg4);
+SYSFS_MMAPREG_SETUP(reg5);
+SYSFS_MMAPREG_SETUP(reg6);
+
+static struct device_attribute cu_ama_attrs[] = {
+	__ATTR(amareg0, 0644, show_reg0, store_reg0),
+	__ATTR(amareg1, 0644, show_reg1, store_reg1),
+	__ATTR(amareg2, 0644, show_reg2, store_reg2),
+	__ATTR(amareg3, 0644, show_reg3, store_reg3),
+};
+
+static struct device_attribute mmap_ama_attrs[] = {
+	__ATTR(amareg4, 0644, show_reg4, store_reg4),
+	__ATTR(amareg5, 0644, show_reg5, store_reg5),
+	__ATTR(amareg6, 0644, show_reg6, store_reg6),
+};
+
+static struct device device_ama = {
+	.bus = &performance_subsys,
+	.init_name = "ama",
+};
+
+static struct device device_ama_threads[4] = {
+	{
+		.id = 0,
+		.bus = &performance_subsys,
+		.parent = &device_ama,
+		.init_name = "thread0",
+	},
+	{
+		.id = 1,
+		.bus = &performance_subsys,
+		.parent = &device_ama,
+		.init_name = "thread1",
+	},
+	{
+		.id = 2,
+		.bus = &performance_subsys,
+		.parent = &device_ama,
+		.init_name = "thread2",
+	},
+	{
+		.id = 3,
+		.bus = &performance_subsys,
+		.parent = &device_ama,
+		.init_name = "thread3",
+	},
+};
+
+static int __init meta_amacount_init(void)
+{
+	int i, thread, exists, ret;
+
+	if (!performance_subsys.name)
+		return -EINVAL;
+
+	ret = device_register(&device_ama);
+	if (ret)
+		return ret;
+
+	for (thread = 0; thread < 4; thread++) {
+		exists = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, thread);
+		if (!exists)
+			break;
+
+		ret = device_register(&device_ama_threads[thread]);
+		if (ret)
+			return ret;
+
+		for (i = 0; i < ARRAY_SIZE(cu_ama_attrs); i++) {
+			ret = device_create_file(&device_ama_threads[thread],
+						 &cu_ama_attrs[i]);
+			if (ret)
+				return ret;
+		}
+		for (i = 0; i < ARRAY_SIZE(mmap_ama_attrs); i++) {
+			ret = device_create_file(&device_ama_threads[thread],
+						 &mmap_ama_attrs[i]);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+device_initcall(meta_amacount_init);
diff --git a/arch/metag/kernel/cpu/counters/cyclecount.c b/arch/metag/kernel/cpu/counters/cyclecount.c
new file mode 100644
index 0000000..22062a5
--- /dev/null
+++ b/arch/metag/kernel/cpu/counters/cyclecount.c
@@ -0,0 +1,135 @@
+/*
+ * linux/arch/metag/drivers/cyclecount.c
+ *
+ * Meta core cycle counter sysfs interface
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Make sure we include these first with the TXUXXRX values available,
+ * or else we cannot get hold of them later on after somebody else has
+ * included them from the arch headers.
+ */
+
+#include <asm/core_reg.h>
+#include <asm/core-sysfs.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define IDLE_COUNTER()	(T0UCTREG0 + TXUCTREGn_STRIDE * TXIDLECYC_REGNUM)
+
+#define CYCLE_COUNTER(thread)					\
+	(T0UCTREG0 + (TnUCTRX_STRIDE * thread) +		\
+	 (TXUCTREGn_STRIDE * TXTACTCYC_REGNUM))
+
+enum thread_id {
+	thread0,
+	thread1,
+	thread2,
+	thread3,
+	idle
+};
+
+static ssize_t show_cycles(enum thread_id thread, char *buf)
+{
+	int err = -EINVAL;
+	unsigned int cycles;
+
+	switch (thread) {
+	case thread0:
+	case thread1:
+	case thread2:
+	case thread3:
+		cycles = metag_in32(CYCLE_COUNTER(thread));
+		metag_out32(0, CYCLE_COUNTER(thread));
+		err = sprintf(buf, "%u\n", cycles);
+		break;
+	case idle:
+		cycles = metag_in32(IDLE_COUNTER());
+		metag_out32(0, IDLE_COUNTER());
+		err = sprintf(buf, "%u\n", cycles);
+	}
+
+	return err;
+}
+
+#define SYSFS_CYCLE_SETUP(NAME) \
+static ssize_t show_##NAME(struct device *dev,  \
+			   struct device_attribute *attr, char *buf) \
+{ \
+	return show_cycles(NAME, buf); \
+}
+
+SYSFS_CYCLE_SETUP(thread0);
+SYSFS_CYCLE_SETUP(thread1);
+SYSFS_CYCLE_SETUP(thread2);
+SYSFS_CYCLE_SETUP(thread3);
+SYSFS_CYCLE_SETUP(idle);
+
+static struct device_attribute thread_attrs[] = {
+	__ATTR(thread0, 0444, show_thread0, NULL),
+	__ATTR(thread1, 0444, show_thread1, NULL),
+	__ATTR(thread2, 0444, show_thread2, NULL),
+	__ATTR(thread3, 0444, show_thread3, NULL),
+};
+
+static DEVICE_ATTR(idle, 0444, show_idle, NULL);
+
+static struct device device_perf_cycles = {
+	.bus = &performance_subsys,
+	.init_name = "cycles",
+};
+
+static int __init meta_cyclecount_init(void)
+{
+	int i, exists, ret;
+
+	if (!performance_subsys.name)
+		return -EINVAL;
+
+	ret = device_register(&device_perf_cycles);
+	if (ret)
+		return ret;
+
+
+	/* We always have an idle counter */
+	ret = device_create_file(&device_perf_cycles, &dev_attr_idle);
+	if (ret)
+		return ret;
+
+	/* Check for up to four threads */
+	for (i = 0; i < ARRAY_SIZE(thread_attrs); i++) {
+		exists = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, i);
+		if (exists) {
+			ret = device_create_file(&device_perf_cycles,
+						 &thread_attrs[i]);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+device_initcall(meta_cyclecount_init);
diff --git a/arch/metag/kernel/cpu/counters/perfcount.c b/arch/metag/kernel/cpu/counters/perfcount.c
new file mode 100644
index 0000000..e90e45c
--- /dev/null
+++ b/arch/metag/kernel/cpu/counters/perfcount.c
@@ -0,0 +1,153 @@
+/*
+ * linux/arch/metag/drivers/perfcount.c
+ *
+ * Meta core performance counter sysfs interface
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/core-sysfs.h>
+#include <asm/metag_mem.h>
+
+static int counter_map[] = {
+	PERF_COUNT0,
+	PERF_COUNT1
+};
+
+static ssize_t show_counter(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	u32 perf, val;
+
+	val = metag_in32(counter_map[dev->id]);
+	perf = val & PERF_COUNT_BITS;
+	metag_out32(val & ~PERF_COUNT_BITS, counter_map[dev->id]);
+
+	return sprintf(buf, "%u\n", perf);
+}
+
+static ssize_t show_mask(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	u32 mask;
+
+	mask = metag_in32(counter_map[dev->id]) & PERF_THREAD_BITS;
+	return sprintf(buf, "%u\n", mask);
+}
+
+static ssize_t store_mask(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	unsigned long val;
+	u32 read_val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	read_val = metag_in32(counter_map[dev->id]) & ~PERF_THREAD_BITS;
+	val <<= PERF_THREAD_S;
+	metag_out32(read_val | val, counter_map[dev->id]);
+
+	return count;
+}
+
+static ssize_t show_ctrl(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	u32 ctrl;
+
+	ctrl = metag_in32(counter_map[dev->id]) & PERF_CTRL_BITS;
+	return sprintf(buf, "%u\n", ctrl);
+}
+
+static ssize_t store_ctrl(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	unsigned long val;
+	u32 read_val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	read_val = metag_in32(counter_map[dev->id]) & ~PERF_CTRL_BITS;
+	val <<= PERF_CTRL_S;
+	metag_out32(read_val | val, counter_map[dev->id]);
+
+	return count;
+}
+
+static struct device_attribute perf_attrs[] = {
+	__ATTR(counter,	0644, show_counter, NULL),
+	__ATTR(mask,	0644, show_mask, store_mask),
+	__ATTR(ctrl,	0644, show_ctrl, store_ctrl),
+};
+
+static struct device device_perfcount = {
+	.bus = &performance_subsys,
+	.init_name = "perfcount",
+};
+
+static struct device device_perf_counters[] = {
+	{
+		.id = 0,
+		.parent = &device_perfcount,
+		.bus = &performance_subsys,
+	},
+	{
+		.id = 1,
+		.parent = &device_perfcount,
+		.bus = &performance_subsys,
+	},
+};
+
+static int __init meta_perfcount_init(void)
+{
+	int i, j, ret;
+
+	if (!performance_subsys.name)
+		return -EINVAL;
+
+	ret = device_register(&device_perfcount);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(device_perf_counters); ++i) {
+		ret = device_register(&device_perf_counters[i]);
+		if (ret)
+			return ret;
+
+		for (j = 0; j < ARRAY_SIZE(perf_attrs); ++j) {
+			ret = device_create_file(&device_perf_counters[i],
+						 &perf_attrs[j]);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+device_initcall(meta_perfcount_init);
diff --git a/arch/metag/kernel/cpu/l2cache-control.c b/arch/metag/kernel/cpu/l2cache-control.c
new file mode 100644
index 0000000..5d17dbe
--- /dev/null
+++ b/arch/metag/kernel/cpu/l2cache-control.c
@@ -0,0 +1,289 @@
+/*
+ * l2cache-control.c
+ *
+ * Meta Level 2 cache sysfs interface
+ *
+ * Copyright (C) 2011-2012 Imagination Technologies Ltd.
+ * Written by James Hogan <james.hogan@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/core-sysfs.h>
+#include <asm/l2cache.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+static ssize_t show_l2c_enabled(struct device *sysdev,
+				struct device_attribute *attr, char *buf)
+{
+	ssize_t err;
+	int val;
+
+	val = !!meta_l2c_is_enabled();
+	err = sprintf(buf, "%d\n", val);
+	return err;
+}
+
+static ssize_t store_l2c_enabled(struct device *sysdev,
+				 struct device_attribute *attr, const char *buf,
+				 size_t count)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	if (val) {
+		pr_info("L2 Cache: Enabling... ");
+		if (meta_l2c_enable())
+			pr_cont("already enabled\n");
+		else
+			pr_cont("done\n");
+	} else {
+		pr_info("L2 Cache: Disabling... ");
+		if (meta_l2c_disable())
+			pr_cont("already disabled\n");
+		else
+			pr_cont("done\n");
+	}
+
+	return count;
+}
+
+static ssize_t show_l2c_prefetch(struct device *sysdev,
+				 struct device_attribute *attr, char *buf)
+{
+	ssize_t err;
+	int val;
+
+	val = !!meta_l2c_pf_is_enabled();
+	err = sprintf(buf, "%d\n", val);
+	return err;
+}
+
+static ssize_t store_l2c_prefetch(struct device *sysdev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	if (val) {
+		pr_info("L2 Cache: Enabling prefetch... ");
+		if (meta_l2c_pf_enable(1))
+			pr_cont("already enabled\n");
+		else
+			pr_cont("done\n");
+	} else {
+		pr_info("L2 Cache: Disabling prefetch... ");
+		if (!meta_l2c_pf_enable(0))
+			pr_cont("already disabled\n");
+		else
+			pr_cont("done\n");
+	}
+
+	return count;
+}
+
+static ssize_t show_l2c_writeback(struct device *sysdev,
+				  struct device_attribute *attr, char *buf)
+{
+	ssize_t err;
+
+	/* when read, we return whether the L2 is a writeback cache */
+	err = sprintf(buf, "%d\n", !!meta_l2c_is_writeback());
+	return err;
+}
+
+static ssize_t store_l2c_writeback(struct device *sysdev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	if (val)
+		meta_l2c_writeback();
+
+	return count;
+}
+
+static ssize_t show_l2c_flush(struct device *sysdev,
+			      struct device_attribute *attr, char *buf)
+{
+	ssize_t err;
+
+	err = sprintf(buf, "%d\n", 0);
+	return err;
+}
+
+static ssize_t store_l2c_flush(struct device *sysdev,
+			       struct device_attribute *attr, const char *buf,
+			       size_t count)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	if (val)
+		meta_l2c_flush();
+
+	return count;
+}
+
+static ssize_t type_show(struct device *sysdev, struct device_attribute *attr,
+			    char *buf)
+{
+	ssize_t err;
+	const char *type;
+
+	if (meta_l2c_is_unified()) {
+		type = "Unified";
+	} else {
+		/*
+		 * Should be "Instruction" or "Data" really, but we're
+		 * representing the L2 cache as a whole.
+		 */
+		type = "Separate";
+	}
+	err = sprintf(buf, "%s\n", type);
+	return err;
+}
+
+static ssize_t level_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	ssize_t err;
+
+	err = sprintf(buf, "%d\n", 2);
+	return err;
+}
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	ssize_t err;
+
+	err = sprintf(buf, "%uK\n", meta_l2c_size() >> 10);
+	return err;
+}
+
+static ssize_t coherency_line_size_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	ssize_t err;
+
+	err = sprintf(buf, "%u\n", meta_l2c_linesize());
+	return err;
+}
+
+static ssize_t number_of_sets_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	ssize_t err;
+	unsigned int sets;
+
+	sets = meta_l2c_size() / (meta_l2c_ways() * meta_l2c_linesize());
+	err = sprintf(buf, "%u\n", sets);
+	return err;
+}
+
+static ssize_t ways_of_associativity_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	ssize_t err;
+
+	err = sprintf(buf, "%u\n", meta_l2c_ways());
+	return err;
+}
+
+static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	ssize_t err;
+
+	err = sprintf(buf, "%u\n", meta_l2c_revision());
+	return err;
+}
+
+static ssize_t config_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	ssize_t err;
+
+	err = sprintf(buf, "0x%08x\n", meta_l2c_config());
+	return err;
+}
+
+static struct device_attribute l2c_attrs[] = {
+	/*
+	 * These are fairly standard attributes, used by other architectures in
+	 * /sys/devices/system/cpu/cpuX/cache/indexX/ (but on Meta they're
+	 * elsewhere).
+	 */
+	__ATTR_RO(type),
+	__ATTR_RO(level),
+	__ATTR_RO(size),
+	__ATTR_RO(coherency_line_size),
+	__ATTR_RO(number_of_sets),
+	__ATTR_RO(ways_of_associativity),
+
+	/*
+	 * Other read only attributes, specific to Meta.
+	 */
+	__ATTR_RO(revision),
+	__ATTR_RO(config),
+
+	/*
+	 * These can be used to perform operations on the cache, such as
+	 * enabling the cache and prefetch, and triggering a full writeback or
+	 * flush.
+	 */
+	__ATTR(enabled,   0644, show_l2c_enabled, store_l2c_enabled),
+	__ATTR(prefetch,  0644, show_l2c_prefetch, store_l2c_prefetch),
+	__ATTR(writeback, 0644, show_l2c_writeback, store_l2c_writeback),
+	__ATTR(flush,     0644, show_l2c_flush, store_l2c_flush),
+};
+
+static struct device device_cache_l2 = {
+	.bus = &cache_subsys,
+	.init_name = "l2",
+};
+
+static int __init meta_l2c_sysfs_init(void)
+{
+	int i, ret;
+
+	if (!cache_subsys.name)
+		return -EINVAL;
+
+	/* if there's no L2 cache, don't add the sysfs nodes */
+	if (!meta_l2c_is_present())
+		return 0;
+
+	ret = device_register(&device_cache_l2);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(l2c_attrs); i++) {
+		ret = device_create_file(&device_cache_l2,
+					 &l2c_attrs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+device_initcall(meta_l2c_sysfs_init);
diff --git a/arch/metag/kernel/cpu/memory-arbiter.c b/arch/metag/kernel/cpu/memory-arbiter.c
new file mode 100644
index 0000000..d862b8d
--- /dev/null
+++ b/arch/metag/kernel/cpu/memory-arbiter.c
@@ -0,0 +1,133 @@
+/*
+ * linux/arch/metag/drivers/write_combiner.c
+ *
+ * Meta memory arbiter sysfs interface
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Make sure we include these first with the TXUXXRX values available,
+ * or else we cannot get hold of them later on after somebody else has
+ * included them from the arch headers.
+ */
+
+#include <asm/core_reg.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define MEMARBITER_REG(thread)						\
+	(EXPAND_T0ARBITER + (EXPAND_TnARBITER_STRIDE * thread))
+
+enum thread_id {
+	thread0,
+	thread1,
+	thread2,
+	thread3
+};
+
+static ssize_t show_memarbiter(enum thread_id thread, char *buf)
+{
+	ssize_t err;
+	u32 val;
+
+	val = metag_in32(MEMARBITER_REG(thread));
+	err = sprintf(buf, "%u\n", val);
+	return err;
+}
+
+static void store_memarbiter(enum thread_id thread, const char *buf)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return;
+
+	metag_out32(val, MEMARBITER_REG(thread));
+}
+
+#define SYSFS_MEMARBITER_SETUP(NAME) \
+static ssize_t show_##NAME##_ma(struct device *dev,  \
+				struct device_attribute *attr, char *buf) \
+{ \
+	return show_memarbiter(NAME, buf); \
+} \
+static ssize_t store_##NAME##_ma(struct device *dev, \
+				struct device_attribute *attr, \
+				const char *buf, size_t count) \
+{ \
+	store_memarbiter(NAME, buf); \
+	return count; \
+}
+
+SYSFS_MEMARBITER_SETUP(thread0);
+SYSFS_MEMARBITER_SETUP(thread1);
+SYSFS_MEMARBITER_SETUP(thread2);
+SYSFS_MEMARBITER_SETUP(thread3);
+
+static DEVICE_ATTR(thread0, 0644, show_thread0_ma, store_thread0_ma);
+static DEVICE_ATTR(thread1, 0644, show_thread1_ma, store_thread1_ma);
+static DEVICE_ATTR(thread2, 0644, show_thread2_ma, store_thread2_ma);
+static DEVICE_ATTR(thread3, 0644, show_thread3_ma, store_thread3_ma);
+
+static struct attribute *memory_arbiter_root_attrs[] = {
+	&dev_attr_thread0.attr,
+	&dev_attr_thread1.attr,
+	&dev_attr_thread2.attr,
+	&dev_attr_thread3.attr,
+	NULL,
+};
+
+static struct attribute_group memory_arbiter_root_attr_group = {
+	.attrs = memory_arbiter_root_attrs,
+};
+
+static const struct attribute_group *memory_arbiter_root_attr_groups[] = {
+	&memory_arbiter_root_attr_group,
+	NULL,
+};
+
+struct bus_type memory_arbiter_subsys = {
+	.name = "memory_arbiter",
+	.dev_name = "ma",
+};
+
+static int __init meta_memarbiter_init(void)
+{
+	int i, exists, ret;
+
+	/* modify number of threads displayed */
+	for (i = 0; i < ARRAY_SIZE(memory_arbiter_root_attrs); i++) {
+		exists = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, i);
+		if (!exists) {
+			memory_arbiter_root_attrs[i] = NULL;
+			break;
+		}
+	}
+
+	ret = subsys_system_register(&memory_arbiter_subsys,
+				     memory_arbiter_root_attr_groups);
+	return ret;
+}
+device_initcall(meta_memarbiter_init);
diff --git a/arch/metag/kernel/cpu/write-combiner.c b/arch/metag/kernel/cpu/write-combiner.c
new file mode 100644
index 0000000..d5203b5
--- /dev/null
+++ b/arch/metag/kernel/cpu/write-combiner.c
@@ -0,0 +1,215 @@
+/*
+ * linux/arch/metag/drivers/write_combiner.c
+ *
+ * Meta write combiner sysfs interface
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Make sure we include these first with the TXUXXRX values available,
+ * or else we cannot get hold of them later on after somebody else has
+ * included them from the arch headers.
+ */
+
+#include <asm/core_reg.h>
+#include <asm/core-sysfs.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define WRCOMBINER_REG(thread)						\
+	(EXPAND_T0WRCOMBINE + (EXPAND_TnWRCOMBINE_STRIDE * thread))
+
+enum thread_id {
+	thread0,
+	thread1,
+	thread2,
+	thread3
+};
+
+static ssize_t show_wrcombiner(enum thread_id thread, char *buf)
+{
+	ssize_t err;
+	u32 val;
+
+	val = metag_in32(WRCOMBINER_REG(thread));
+	err = sprintf(buf, "%u\n", val);
+	return err;
+}
+
+static ssize_t store_wrcombiner(enum thread_id thread, const char *buf,
+				size_t count)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	metag_out32(val, WRCOMBINER_REG(thread));
+
+	return count;
+}
+
+#define SYSFS_WRCOMBINER_SETUP(NAME) \
+static ssize_t show_##NAME##_wc(struct device *dev,  \
+				struct device_attribute *attr, char *buf) \
+{ \
+	return show_wrcombiner(NAME, buf); \
+} \
+static ssize_t store_##NAME##_wc(struct device *dev, \
+				 struct device_attribute *attr, \
+				 const char *buf, size_t count) \
+{ \
+	return store_wrcombiner(NAME, buf, count); \
+}
+
+static ssize_t show_perfchan0(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	ssize_t err;
+	u32 val;
+
+	val = metag_in32(EXPAND_PERFCHAN0);
+	err = sprintf(buf, "%u\n", val);
+	return err;
+}
+
+static ssize_t store_perfchan0(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	u32 read_val;
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	read_val = metag_in32(EXPAND_PERFCHAN0) & ~EXPPERF_CTRL_BITS;
+	metag_out32(read_val | val, EXPAND_PERFCHAN0);
+	return count;
+}
+
+static ssize_t show_perfchan1(struct device *sysdev,
+			      struct device_attribute *attr, char *buf)
+{
+	ssize_t err;
+	u32 val;
+
+	val = metag_in32(EXPAND_PERFCHAN1);
+	err = sprintf(buf, "%u\n", val);
+	return err;
+}
+
+static ssize_t store_perfchan1(struct device *sysdev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	u32 read_val;
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	read_val = metag_in32(EXPAND_PERFCHAN1) & ~EXPPERF_CTRL_BITS;
+	metag_out32(read_val | val, EXPAND_PERFCHAN1);
+	return count;
+}
+
+SYSFS_WRCOMBINER_SETUP(thread0);
+SYSFS_WRCOMBINER_SETUP(thread1);
+SYSFS_WRCOMBINER_SETUP(thread2);
+SYSFS_WRCOMBINER_SETUP(thread3);
+
+static DEVICE_ATTR(thread0, 0644, show_thread0_wc, store_thread0_wc);
+static DEVICE_ATTR(thread1, 0644, show_thread1_wc, store_thread1_wc);
+static DEVICE_ATTR(thread2, 0644, show_thread2_wc, store_thread2_wc);
+static DEVICE_ATTR(thread3, 0644, show_thread3_wc, store_thread3_wc);
+
+static struct attribute *write_combiner_root_attrs[] = {
+	&dev_attr_thread0.attr,
+	&dev_attr_thread1.attr,
+	&dev_attr_thread2.attr,
+	&dev_attr_thread3.attr,
+	NULL,
+};
+
+static struct attribute_group write_combiner_root_attr_group = {
+	.attrs = write_combiner_root_attrs,
+};
+
+static const struct attribute_group *write_combiner_root_attr_groups[] = {
+	&write_combiner_root_attr_group,
+	NULL,
+};
+
+static struct device_attribute perfchan_attrs[] = {
+	__ATTR(perfchan0, 0644, show_perfchan0, store_perfchan0),
+	__ATTR(perfchan1, 0644, show_perfchan1, store_perfchan1),
+};
+
+struct bus_type write_combiner_subsys = {
+	.name = "write_combiner",
+	.dev_name = "wc",
+};
+
+static struct device device_perf_write_combiner = {
+	.bus = &performance_subsys,
+	.init_name = "write_combiner",
+};
+
+static int __init meta_writecombiner_init(void)
+{
+	int i, exists, ret;
+
+	/* modify number of threads displayed */
+	for (i = 0; i < ARRAY_SIZE(write_combiner_root_attrs); i++) {
+		exists = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, i);
+		if (!exists) {
+			write_combiner_root_attrs[i] = NULL;
+			break;
+		}
+	}
+
+	ret = subsys_system_register(&write_combiner_subsys,
+				     write_combiner_root_attr_groups);
+	if (ret)
+		return ret;
+
+	if (!performance_subsys.name)
+		return -EINVAL;
+
+	ret = device_register(&device_perf_write_combiner);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(perfchan_attrs); i++) {
+		ret = device_create_file(&device_perf_write_combiner,
+					 &perfchan_attrs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+device_initcall(meta_writecombiner_init);
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c
index 8c00ded..300feb5 100644
--- a/arch/metag/kernel/dma.c
+++ b/arch/metag/kernel/dma.c
@@ -34,6 +34,7 @@
 #include <linux/highmem.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
+#include <linux/static_key.h>
 
 #include <asm/tlbflush.h>
 #include <asm/mmu.h>
@@ -427,6 +428,19 @@
 }
 early_initcall(dma_alloc_init);
 
+#ifdef CONFIG_METAG_L2C
+/* Invalidate (may also write back if necessary) */
+static inline void invalidate_dcache_region(void *start, unsigned long size)
+{
+	if (meta_l2c_has_invalidate())
+		cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2);
+	else
+		flush_dcache_region(start, size);
+}
+#else
+#define invalidate_dcache_region(s, l)	flush_dcache_region((s), (l))
+#endif
+
 /*
  * make an area consistent to devices.
  */
diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c
index 87707ef..2a2c9d5 100644
--- a/arch/metag/kernel/irq.c
+++ b/arch/metag/kernel/irq.c
@@ -25,7 +25,7 @@
 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
 #endif
 
-struct irq_domain *root_domain;
+static struct irq_domain *root_domain;
 
 static unsigned int startup_meta_irq(struct irq_data *data)
 {
@@ -279,11 +279,12 @@
 {
 	struct irq_desc *desc = irq_to_desc(irq);
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
+	unsigned long flags;
 
-	raw_spin_lock_irq(&desc->lock);
+	raw_spin_lock_irqsave(&desc->lock, flags);
 	if (chip->irq_set_affinity)
 		chip->irq_set_affinity(data, cpumask_of(cpu), false);
-	raw_spin_unlock_irq(&desc->lock);
+	raw_spin_unlock_irqrestore(&desc->lock, flags);
 }
 
 /*
diff --git a/arch/metag/kernel/kgdb.c b/arch/metag/kernel/kgdb.c
new file mode 100644
index 0000000..8f650d5
--- /dev/null
+++ b/arch/metag/kernel/kgdb.c
@@ -0,0 +1,536 @@
+/*
+ * Meta KGDB support
+ *
+ * Copyright (C) 2008 - 2009  Paul Mundt
+ * Copyright (C) 2011 - 2012  Imagination Technologies Ltd
+ *
+ * Based on SH version.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/disas.h>
+
+#define STEP_OPCODE		0xaf400001
+
+/**
+ * get_reg_by_unit() - Get the value of a register by unit.reg in regs.
+ * @regs:	Register state.
+ * @unit:	Meta unit number.
+ * @reg:	Meta register number.
+ * @val:	Output value.
+ *
+ * Puts the value of a register in *val and returns 0, or returns non-zero on
+ * failure.
+ */
+static int get_reg_by_unit(struct pt_regs *regs, int unit, int reg,
+			   unsigned long *val)
+{
+	switch (unit) {
+	case UNIT_CT:
+		switch (reg) {
+		case 1: /* TXMODE */
+			*val = regs->ctx.CurrMODE;
+			break;
+		case 2: /* TXSTATUS */
+			*val = regs->ctx.Flags;
+			break;
+		case 3: /* TXRPT */
+			*val = regs->ctx.CurrRPT;
+			break;
+		case 11: /* TXBPOBITS */
+			*val = regs->ctx.CurrBPOBITS;
+			break;
+		case 28: /* TXDIVTIME */
+			*val = regs->ctx.CurrDIVTIME;
+			break;
+		default:
+			return 1;
+		}
+	case UNIT_D0:
+		if (reg < 8)
+			*val = regs->ctx.DX[reg].U0;
+		else
+			return 1;
+		break;
+	case UNIT_D1:
+		if (reg < 8)
+			*val = regs->ctx.DX[reg].U1;
+		else
+			return 1;
+		break;
+	case UNIT_A0:
+		if (reg < 2)
+			*val = regs->ctx.AX[reg].U0;
+		else if (reg == 2)
+			*val = regs->ctx.Ext.AX2.U0;
+		else if (reg == 3)
+			*val = regs->ctx.AX3[0].U0;
+		else
+			return 1;
+		break;
+	case UNIT_A1:
+		if (reg < 2)
+			*val = regs->ctx.AX[reg].U1;
+		else if (reg == 2)
+			*val = regs->ctx.Ext.AX2.U1;
+		else if (reg == 3)
+			*val = regs->ctx.AX3[0].U1;
+		else
+			return 1;
+		break;
+	case UNIT_PC:
+		if (reg == 0) /* PC */
+			*val = regs->ctx.CurrPC;
+		else
+			return 1;
+		break;
+	}
+	return 0;
+}
+
+/**
+ * get_step_address() - Calculate the step address (next PC).
+ * @linux_regs:		Register state.
+ *
+ * Returns the next PC to break on when single stepping.
+ */
+static long get_step_address(struct pt_regs *linux_regs)
+{
+	long pc = linux_regs->ctx.CurrPC;
+	long addr;
+	unsigned long op = __raw_readl((unsigned long *)pc);
+	unsigned long opcode, group;
+
+	/* Bcc */
+	if (OP_BCC(op)) {
+		if (!test_cc(linux_regs, OP_BCC_CC(op)))
+			goto no_jump;
+		/* branch repeat automatically uses TXRPT as counter */
+		if (OP_BCC_R(op) && !linux_regs->ctx.CurrRPT)
+			goto no_jump;
+		return pc + OP_BCC_IMM(op);
+	/* JUMP/CALL */
+	} else if (OP_JUMP(op)) {
+		if (get_reg_by_unit(linux_regs, OP_JUMP_UB(op), OP_JUMP_RS(op),
+				    &addr))
+			goto unknown;
+		return (addr + OP_JUMP_IMM(op)) & -INSN_SIZE;
+	/* CALLR */
+	} else if (OP_CALLR(op)) {
+		return pc + OP_CALLR_IMM(op);
+	}
+
+	/* MOV PC,... */
+	opcode = op >> 24;
+	group = opcode >> 4;
+	/* add,and,or,xor,shift, aadd, mul */
+	if (group < 6 || group == 8 || (opcode & 0xfc) == 0x84) {
+		/*
+		 * conditional form
+		 * unit (0xf << 4) == PC
+		 * reg (0x1f << 19) == 0
+		 */
+		if ((op & 0x06f801e0) == 0x040000a0) {
+			int cc = (op >> 1) & 0xf;
+			if (!test_cc(linux_regs, cc))
+				goto no_jump;
+			/* address unit add */
+			if (group == 8) {
+				int pm = op & (1 << 27);
+				int au = (op >> 24) & 0x1;
+				int pc1 = op & (1 << 18);
+				int rs1 = (op >> 14) & 0xf;
+				int pc2 = op & (1 << 13);
+				int rs2 = (op >> 9) & 0x1f;
+				int o2r = op & (1 << 0);
+				int us1, us2;
+				unsigned long src1, src2;
+
+				us1 = UNIT_A0 + au;
+				if (o2r) {
+					us2 = decode_o2r(us1, &rs2);
+					pc2 = 0;
+				} else {
+					us2 = us1;
+				}
+
+				if (!pc1 && get_reg_by_unit(linux_regs, us1,
+							    rs1, &src1))
+					goto unknown;
+				if (!pc2 && get_reg_by_unit(linux_regs, us2,
+							    rs2, &src2))
+					goto unknown;
+
+				if (pc1 && pc2) {
+					/* src1 = src2 = pc (silly) */
+					if (pm)
+						return 0;
+					else
+						return pc * 2;
+				} else if (pc1) {
+					/* src1 = pc */
+					if (pm)
+						return pc - src2;
+					else
+						return pc + src2;
+				} else if (pc2) {
+					/* src2 = pc */
+					if (pm)
+						return src1 - pc;
+					else
+						return src1 + pc;
+				} else {
+					if (pm)
+						return src1 - src2;
+					else
+						return src1 + src2;
+				}
+			}
+			pr_debug("KGDB: Confused, arithmetic conditional with PC destination @%08lx: %08lx\n",
+				 pc, op);
+			goto unknown;
+		}
+		/* conditional immediate form with condition always
+		 * ca (1 << 5) == 1
+		 * unit (0xf << 1) == PC
+		 * reg (0x1f << 19) == 0
+		 */
+		if ((op & 0x06f8003e) == 0x0600002a) {
+			pr_debug("KGDB: Confused, arithmetic immediate with PC destination @%08lx: %08lx\n",
+				 pc, op);
+			goto unknown;
+		}
+	}
+
+	/* RTH */
+	if (op == 0xa37fffff) {
+		/*
+		 * Well it probably will jump, but the point of this is to
+		 * disambiguate the conflict with TTMOV.
+		 */
+		goto no_jump;
+	}
+
+	/* SWAP/MOV/TTMOV */
+	if ((op & 0xfff83e01) == 0xa3001600 || /* SWAP ..., PC */
+	    (op & 0xff07c1e1) == 0xa30000a0 || /* SWAP/MOV PC, ... */
+	    (op & 0xff07c3e1) == 0xa30002a1) { /* TTMOV PC, ... */
+		int cc = (op >> 1) & 0xf;
+		int swap = 0;
+		int ud, rd;
+		unsigned long addr;
+		if (!test_cc(linux_regs, cc))
+			goto no_jump;
+		/* if not TT, read swap bit */
+		if (!(op & 1))
+			swap = op & (1 << 9);
+		/* read source register */
+		ud = (op >> 10) & 0xf;
+		rd = (op >> 19) & 0x1f;
+		/* if swap and source is PC, read destination opead */
+		if (swap && (ud == UNIT_PC || !rd)) {
+			ud = (op >> 5) & 0xf;
+			rd = (op >> 14) & 0x1f;
+		}
+		/* read the value of the register */
+		if (get_reg_by_unit(linux_regs, ud, rd, &addr))
+			goto unknown;
+		return addr & -INSN_SIZE;
+	}
+
+	/* DEFRcc PC, ...*/
+	if ((op & 0xfff83fe1) == 0xa30020a1) {
+		int cc = (op >> 1) & 0xf;
+		if (!test_cc(linux_regs, cc))
+			goto no_jump;
+		pr_debug("KGDB: Confused, DEFRcc PC,... @%08lx: %08lx\n",
+			 pc, op);
+		goto unknown;
+	}
+	/* KICKcc PC, ... */
+	if ((op & 0xfff83fe1) == 0xa30000a1) {
+		int cc = (op >> 1) & 0xf;
+		if (!test_cc(linux_regs, cc))
+			goto no_jump;
+		pr_debug("KGDB: Confused, KICKcc PC,... @%08lx: %08lx\n",
+			 pc, op);
+		goto unknown;
+	}
+
+	/* GETD PC, [Rx.y] */
+	if ((op & 0xfff8001e) == 0xc600000a) {
+		int rb = (op >> 14) & 0x1f;
+		int imm = ((int)(op << 18) >> 26) << 2;
+		int bu = (op >> 5) & 0x3;
+		int pp = op & (1 << 0);
+		unsigned long addr;
+		int ub;
+
+		ub = metag_bu_map[bu];
+		pr_debug("KGDB: GETD PC, [%d.%d+%d]\n", ub, rb, imm);
+		if (get_reg_by_unit(linux_regs, ub, rb, &addr))
+			goto unknown;
+
+		if (!pp)
+			addr += imm;
+
+		pr_debug("KGDB: GETD attempting to read [%08lx]\n",
+			 addr);
+		addr = __raw_readl((unsigned long *)addr);
+		pr_debug("KGDB: GETD PC, %08lx\n", addr);
+		return addr & -INSN_SIZE;
+	}
+
+no_jump:
+	return pc + INSN_SIZE;
+unknown:
+	return 0;
+}
+
+static unsigned long stepped_address;
+static unsigned long stepped_opcode;
+
+/**
+ * do_single_step() - Set up a single step.
+ * @linux_regs:		Register state.
+ *
+ * Replace the instruction immediately after the current instruction
+ * (i.e. next in the expected flow of control) with a trap instruction,
+ * so that returning will cause only a single instruction to be executed.
+ */
+static void do_single_step(struct pt_regs *linux_regs)
+{
+	/* Determine where the target instruction will send us to */
+	long addr = get_step_address(linux_regs);
+	unsigned long *paddr = (unsigned long *)addr;
+
+	if (unlikely(addr))
+		return;
+
+	stepped_address = addr;
+
+	/* Replace it */
+	stepped_opcode = __raw_readl(paddr);
+	*paddr = STEP_OPCODE;
+
+	/* Flush and return */
+	flush_icache_range(addr, addr + INSN_SIZE);
+}
+
+/* Undo a single step */
+static void undo_single_step(struct pt_regs *linux_regs)
+{
+	/* If we have stepped, put back the old instruction */
+	/* Use stepped_address in case we stopped elsewhere */
+	if (stepped_opcode != 0) {
+		__raw_writel(stepped_opcode, (unsigned long *)stepped_address);
+		flush_icache_range(stepped_address,
+				   stepped_address + INSN_SIZE);
+	}
+
+	stepped_opcode = 0;
+}
+
+static void tbictx_to_gdb_regs(unsigned long *gdb_regs, TBICTX *regs)
+{
+	int i;
+
+	gdb_regs[GDB_TXSTATUS]	= regs->Flags;
+	gdb_regs[GDB_PC]	= regs->CurrPC;
+	for (i = 0; i < 8; ++i) {
+		gdb_regs[GDB_D0 + i] = regs->DX[i].U0;
+		gdb_regs[GDB_D1 + i] = regs->DX[i].U1;
+	}
+	gdb_regs[GDB_TXRPT]	= regs->CurrRPT;
+	gdb_regs[GDB_TXBPOBITS]	= regs->CurrBPOBITS;
+	gdb_regs[GDB_TXMODE]	= regs->CurrMODE;
+	gdb_regs[GDB_TXDIVTIME]	= regs->CurrDIVTIME;
+	for (i = 0; i < 2; ++i) {
+		gdb_regs[GDB_A0 + i] = regs->AX[i].U0;
+		gdb_regs[GDB_A1 + i] = regs->AX[i].U1;
+	}
+	gdb_regs[GDB_A0 + 2]	= regs->Ext.AX2.U0;
+	gdb_regs[GDB_A1 + 2]	= regs->Ext.AX2.U1;
+	gdb_regs[GDB_A0 + 3]	= regs->AX3[0].U0;
+	gdb_regs[GDB_A1 + 3]	= regs->AX3[0].U1;
+}
+
+static void gdb_regs_to_tbictx(unsigned long *gdb_regs, TBICTX *regs)
+{
+	int i;
+
+	regs->Flags		= gdb_regs[GDB_TXSTATUS];
+	regs->CurrPC		= gdb_regs[GDB_PC];
+	for (i = 0; i < 8; ++i) {
+		regs->DX[i].U0	= gdb_regs[GDB_D0 + i];
+		regs->DX[i].U1	= gdb_regs[GDB_D1 + i];
+	}
+	regs->CurrRPT		= gdb_regs[GDB_TXRPT];
+	regs->CurrBPOBITS	= gdb_regs[GDB_TXBPOBITS];
+	regs->CurrMODE		= gdb_regs[GDB_TXMODE];
+	regs->CurrDIVTIME	= gdb_regs[GDB_TXDIVTIME];
+	for (i = 0; i < 2; ++i) {
+		regs->AX[i].U0	= gdb_regs[GDB_A0 + i];
+		regs->AX[i].U1	= gdb_regs[GDB_A1 + i];
+	}
+	regs->Ext.AX2.U0	= gdb_regs[GDB_A0 + 2];
+	regs->Ext.AX2.U1	= gdb_regs[GDB_A1 + 2];
+	regs->AX3[0].U0		= gdb_regs[GDB_A0 + 3];
+	regs->AX3[0].U1		= gdb_regs[GDB_A1 + 3];
+}
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+	tbictx_to_gdb_regs(gdb_regs, &regs->ctx);
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+	gdb_regs_to_tbictx(gdb_regs, &regs->ctx);
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+	tbictx_to_gdb_regs(gdb_regs, p->thread.kernel_context);
+}
+
+#ifdef CONFIG_SMP
+static void kgdb_call_nmi_hook(void *ignored)
+{
+	kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+	local_irq_enable();
+	smp_call_function(kgdb_call_nmi_hook, NULL, 0);
+	local_irq_disable();
+}
+#endif
+
+int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+			       char *remcomInBuffer, char *remcomOutBuffer,
+			       struct pt_regs *linux_regs)
+{
+	unsigned long addr;
+	char *ptr;
+	unsigned long instr;
+
+	/* Undo any stepping we may have done */
+	undo_single_step(linux_regs);
+
+	switch (remcomInBuffer[0]) {
+	case 'c':
+	case 's':
+		/*
+		 * Try to read optional parameter, pc unchanged if no parm.
+		 * If this was a compiled breakpoint, we need to move to the
+		 * next instruction or we will just breakpoint over and over
+		 * again.
+		 */
+		ptr = &remcomInBuffer[1];
+		if (kgdb_hex2long(&ptr, &addr))
+			linux_regs->ctx.CurrPC = addr;
+		else if (!probe_kernel_read(&instr,
+					    (void *)linux_regs->ctx.CurrPC, 4)
+			 && instr ==  __METAG_SW_ENCODING(PERM_BREAK))
+			linux_regs->ctx.CurrPC += 4;
+	case 'D':
+	case 'k':
+		atomic_set(&kgdb_cpu_doing_single_step, -1);
+
+		if (remcomInBuffer[0] == 's') {
+			do_single_step(linux_regs);
+			kgdb_single_step = 1;
+
+			atomic_set(&kgdb_cpu_doing_single_step,
+				   raw_smp_processor_id());
+		}
+
+		return 0;
+	}
+
+	/* this means that we do not want to exit from the handler: */
+	return -1;
+}
+
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+	return instruction_pointer(regs);
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+	regs->ctx.CurrPC = ip;
+}
+
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+	struct pt_regs *regs = args->regs;
+
+	switch (cmd) {
+	case DIE_TRAP:
+		/*
+		 * This means a user thread is single stepping
+		 * a system call which should be ignored
+		 */
+		if (test_thread_flag(TIF_SINGLESTEP))
+			return NOTIFY_DONE;
+
+		/* fall through */
+	default:
+		if (user_mode(regs))
+			return NOTIFY_DONE;
+	}
+
+	if (kgdb_handle_exception(args->trapnr & 0xff, args->signr, args->err,
+				  regs))
+		return NOTIFY_DONE;
+
+	return NOTIFY_STOP;
+}
+
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+	unsigned long flags;
+	int ret;
+
+	local_irq_save(flags);
+	ret = __kgdb_notify(ptr, cmd);
+	local_irq_restore(flags);
+
+	return ret;
+}
+
+static struct notifier_block kgdb_notifier = {
+	.notifier_call	= kgdb_notify,
+
+	/*
+	 * Lowest-prio notifier priority, we want to be notified last:
+	 */
+	.priority	= -INT_MAX,
+};
+
+int kgdb_arch_init(void)
+{
+	return register_die_notifier(&kgdb_notifier);
+}
+
+void kgdb_arch_exit(void)
+{
+	unregister_die_notifier(&kgdb_notifier);
+}
+
+struct kgdb_arch arch_kgdb_ops = {
+	/* Breakpoint instruction: SWITCH #0x400001 */
+	.gdb_bpt_instr		= { 0x01, 0x00, 0x40, 0xaf },
+};
diff --git a/arch/metag/kernel/kick.c b/arch/metag/kernel/kick.c
index 50fcbec..beb3776 100644
--- a/arch/metag/kernel/kick.c
+++ b/arch/metag/kernel/kick.c
@@ -26,6 +26,8 @@
  * pass it as an argument.
  */
 #include <linux/export.h>
+#include <linux/hardirq.h>
+#include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/types.h>
@@ -66,6 +68,7 @@
 TBIRES
 kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
 {
+	struct pt_regs *old_regs;
 	struct kick_irq_handler *kh;
 	struct list_head *lh;
 	int handled = 0;
@@ -79,6 +82,9 @@
 
 	trace_hardirqs_off();
 
+	old_regs = set_irq_regs((struct pt_regs *)State.Sig.pCtx);
+	irq_enter();
+
 	/*
 	 * There is no need to disable interrupts here because we
 	 * can't nest KICK interrupts in a KICK interrupt handler.
@@ -97,5 +103,8 @@
 
 	WARN_ON(!handled);
 
+	irq_exit();
+	set_irq_regs(old_regs);
+
 	return tail_end(ret);
 }
diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c
index ec872ef..f14e653 100644
--- a/arch/metag/kernel/metag_ksyms.c
+++ b/arch/metag/kernel/metag_ksyms.c
@@ -1,10 +1,14 @@
 #include <linux/export.h>
+#include <linux/types.h>
 
+#include <asm/checksum.h>
+#include <asm/delay.h>
 #include <asm/div64.h>
 #include <asm/ftrace.h>
 #include <asm/page.h>
 #include <asm/string.h>
 #include <asm/tbx.h>
+#include <asm/uaccess.h>
 
 EXPORT_SYMBOL(clear_page);
 EXPORT_SYMBOL(copy_page);
@@ -15,11 +19,39 @@
 EXPORT_SYMBOL(min_low_pfn);
 #endif
 
+/* Network checksum functions */
+EXPORT_SYMBOL(ip_fast_csum);
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(ip_compute_csum);
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+EXPORT_SYMBOL(csum_partial_copy);
+
+/* Delay functions */
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(__const_udelay);
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__ndelay);
+
+/* Userspace copying functions */
+EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(__do_clear_user);
+EXPORT_SYMBOL(__get_user_asm_b);
+EXPORT_SYMBOL(__get_user_asm_w);
+EXPORT_SYMBOL(__get_user_asm_d);
+EXPORT_SYMBOL(__put_user_asm_b);
+EXPORT_SYMBOL(__put_user_asm_w);
+EXPORT_SYMBOL(__put_user_asm_d);
+EXPORT_SYMBOL(__put_user_asm_l);
+EXPORT_SYMBOL(strnlen_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+
 /* TBI symbols */
 EXPORT_SYMBOL(__TBI);
 EXPORT_SYMBOL(__TBIFindSeg);
 EXPORT_SYMBOL(__TBIPoll);
 EXPORT_SYMBOL(__TBITimeStamp);
+EXPORT_SYMBOL(__TBITransStr);
 
 #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
 
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 3665694..5490765 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -628,6 +628,8 @@
 
 	/* Check for a core internal or performance channel event. */
 	if (tmp) {
+		/* PERF_ICORE/PERF_CHAN only exist since Meta2 */
+#ifdef METAC_2_1
 		void *perf_addr;
 
 		/*
@@ -650,6 +652,7 @@
 
 		if (perf_addr)
 			metag_out32((config & 0x0f), perf_addr);
+#endif
 
 		/*
 		 * Now we use the high nibble as the performance event to
diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c
index 483dff9..477a66e 100644
--- a/arch/metag/kernel/process.c
+++ b/arch/metag/kernel/process.c
@@ -174,6 +174,41 @@
 	show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs);
 }
 
+#ifdef CONFIG_SOC_CHORUS2
+static int lock_executable_vmas(struct task_struct *tsk)
+{
+	struct mm_struct *mm = tsk->mm;
+	struct vm_area_struct *vma = mm->mmap;
+	unsigned long start, end;
+	int ret = 0, write, len;
+
+	down_read(&mm->mmap_sem);
+
+	while (vma) {
+		/* All code mappings will have EXEC|READ set. It's possible
+		 * to create a mapping with just EXEC, but get_user_pages()
+		 * will fail on such a mapping so we must skip it here.
+		 */
+		if ((vma->vm_flags & (VM_EXEC|VM_READ)) == (VM_EXEC|VM_READ)
+		    && vma->vm_file) {
+			vma->vm_flags |= VM_LOCKED;
+			start = vma->vm_start;
+			end = vma->vm_end;
+			write = (vma->vm_flags & VM_WRITE) != 0;
+			len = DIV_ROUND_UP(end, PAGE_SIZE) - start/PAGE_SIZE;
+			ret = get_user_pages(tsk, tsk->mm, start,
+					     len, write, 0, NULL, NULL);
+			if (ret < 0)
+				goto out;
+		}
+		vma = vma->vm_next;
+	}
+out:
+	up_read(&mm->mmap_sem);
+	return ret;
+}
+#endif
+
 int copy_thread(unsigned long clone_flags, unsigned long usp,
 		unsigned long arg, struct task_struct *tsk)
 {
@@ -252,6 +287,10 @@
 	}
 #endif
 
+#ifdef CONFIG_SOC_CHORUS2
+	lock_executable_vmas(tsk);
+#endif
+
 	return 0;
 }
 
@@ -332,6 +371,24 @@
 	return (struct task_struct *) from.Switch.pPara;
 }
 
+#ifdef CONFIG_SOC_CHORUS2
+long sys_execve_chorus2(const char __user *filename,
+			const char __user *const __user *argv,
+			const char __user *const __user *envp)
+{
+	long error = sys_execve(filename, argv, envp);
+	if (error == 0) {
+		/*
+		 * To workaround a chip bug on Chorus2 we must make sure
+		 * we never take a code fetch page fault. We lock all
+		 * non-anonymous executable vmas here.
+		 */
+		error = lock_executable_vmas(current);
+	}
+	return error;
+}
+#endif
+
 void flush_thread(void)
 {
 	clear_fpu(&current->thread);
diff --git a/arch/metag/kernel/romwrappers.S b/arch/metag/kernel/romwrappers.S
new file mode 100644
index 0000000..b0449b9
--- /dev/null
+++ b/arch/metag/kernel/romwrappers.S
@@ -0,0 +1,68 @@
+/*
+ * romwrappers.S
+ *
+ * Copyright (C) 2001, 2002, 2005 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * ROM interrupt wrap routines that divert the return path from the default
+ * code to a replacement. Only install these handlers if the tbi_vectors[]
+ * entry is valid!
+ */
+
+        .file   "romwrappers.S"
+
+#include <asm/traps.h>
+
+/** Function - wrapper for fault handler */
+        .text
+        .type   _fault_wrapper,function
+        .global _fault_wrapper
+_fault_wrapper:
+	MOVT	D0Re0,#HI(_tbi_vectors + (TBI_VEC_RESUME * 4))
+	ADD	D0Re0,D0Re0,#LO(_tbi_vectors + (TBI_VEC_RESUME * 4))
+	GETD	D1RtP,[D0Re0]
+	B	_fault_handler
+        .size   _fault_wrapper,.-_fault_wrapper
+
+/** Function - wrapper for switch 1 (syscall) handler */
+        .type   _switch1_wrapper,function
+        .global _switch1_wrapper
+_switch1_wrapper:
+	MOVT	D0Re0,#HI(_tbi_vectors + (TBI_VEC_RESUME * 4))
+	ADD	D0Re0,D0Re0,#LO(_tbi_vectors + (TBI_VEC_RESUME * 4))
+	GETD	D1RtP,[D0Re0]
+	B	_switch1_handler
+        .size   _switch1_wrapper,.-_switch1_wrapper
+
+/** Function - wrapper for unhandled switch handler */
+        .type   _switchx_wrapper,function
+        .global _switchx_wrapper
+_switchx_wrapper:
+	MOVT	D0Re0,#HI(_tbi_vectors + (TBI_VEC_RESUME * 4))
+	ADD	D0Re0,D0Re0,#LO(_tbi_vectors + (TBI_VEC_RESUME * 4))
+	GETD	D1RtP,[D0Re0]
+	B	_switchx_handler
+        .size   _switchx_wrapper,.-_switchx_wrapper
+
+/** Function - wrapper for generic interrupt handler */
+        .type   _trigger_wrapper,function
+        .global _trigger_wrapper
+_trigger_wrapper:
+	MOVT	D0Re0,#HI(_tbi_vectors + (TBI_VEC_RESUME * 4))
+	ADD	D0Re0,D0Re0,#LO(_tbi_vectors + (TBI_VEC_RESUME * 4))
+	GETD	D1RtP,[D0Re0]
+	B	_trigger_handler
+        .size   _trigger_wrapper,.-_trigger_wrapper
+
+/** Function - wrapper for kick interrupt handler */
+        .type   _kick_wrapper,function
+        .global _kick_wrapper
+_kick_wrapper:
+	MOVT	D0Re0,#HI(_tbi_vectors + (TBI_VEC_RESUME * 4))
+	ADD	D0Re0,D0Re0,#LO(_tbi_vectors + (TBI_VEC_RESUME * 4))
+	GETD	D1RtP,[D0Re0]
+	B	_kick_handler
+        .size   _kick_wrapper,.-_kick_wrapper
diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c
index 4f5726f..895c8d0 100644
--- a/arch/metag/kernel/setup.c
+++ b/arch/metag/kernel/setup.c
@@ -20,6 +20,7 @@
 #include <linux/memblock.h>
 #include <linux/mm.h>
 #include <linux/of_fdt.h>
+#include <linux/of_platform.h>
 #include <linux/pfn.h>
 #include <linux/root_dev.h>
 #include <linux/sched.h>
@@ -149,6 +150,23 @@
 
 DEFINE_PER_CPU(PTBI, pTBI);
 
+#ifdef CONFIG_METAG_ROM_WRAPPERS
+/* If non-0, then points to a ROM patch table for TBI */
+int tbi_vector_base;
+
+static int __init parse_tbi_vector_base(char *p)
+{
+	char *cp = p;
+
+	if (get_option(&cp, &tbi_vector_base) != 1) {
+		pr_err("Bad tbi_vector_base parameter (%s)\n", p);
+		return 1;
+	}
+	return 0;
+}
+early_param("tbi_vector_base", parse_tbi_vector_base);
+#endif
+
 /*
  * Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g.
  *
@@ -424,6 +442,9 @@
 	/* customizes platform devices, or adds new ones */
 	if (machine_desc->init_machine)
 		machine_desc->init_machine();
+	else
+		of_platform_populate(NULL, of_default_bus_match_table, NULL,
+				     NULL);
 	return 0;
 }
 arch_initcall(customize_machine);
@@ -587,20 +608,20 @@
 EXPORT_SYMBOL(pTBI_get);
 
 #if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU)
-char capabilites[] = "dsp fpu";
+static char capabilities[] = "dsp fpu";
 #elif defined(CONFIG_METAG_DSP)
-char capabilites[] = "dsp";
+static char capabilities[] = "dsp";
 #elif defined(CONFIG_METAG_FPU)
-char capabilites[] = "fpu";
+static char capabilities[] = "fpu";
 #else
-char capabilites[] = "";
+static char capabilities[] = "";
 #endif
 
 static struct ctl_table caps_kern_table[] = {
 	{
 		.procname	= "capabilities",
-		.data		= capabilites,
-		.maxlen		= sizeof(capabilites),
+		.data		= capabilities,
+		.maxlen		= sizeof(capabilities),
 		.mode		= 0444,
 		.proc_handler	= proc_dostring,
 	},
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index f443ec9..c49a398 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -8,6 +8,7 @@
  * published by the Free Software Foundation.
  */
 #include <linux/atomic.h>
+#include <linux/completion.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
@@ -62,6 +63,8 @@
 
 static DEFINE_SPINLOCK(boot_lock);
 
+static DECLARE_COMPLETION(cpu_running);
+
 /*
  * "thread" is assumed to be a valid Meta hardware thread ID.
  */
@@ -235,20 +238,12 @@
 	 */
 	ret = boot_secondary(thread, idle);
 	if (ret == 0) {
-		unsigned long timeout;
-
 		/*
 		 * CPU was successfully started, wait for it
 		 * to come online or time out.
 		 */
-		timeout = jiffies + HZ;
-		while (time_before(jiffies, timeout)) {
-			if (cpu_online(cpu))
-				break;
-
-			udelay(10);
-			barrier();
-		}
+		wait_for_completion_timeout(&cpu_running,
+					    msecs_to_jiffies(1000));
 
 		if (!cpu_online(cpu))
 			ret = -EIO;
@@ -276,7 +271,6 @@
 int __cpuexit __cpu_disable(void)
 {
 	unsigned int cpu = smp_processor_id();
-	struct task_struct *p;
 
 	/*
 	 * Take this CPU offline.  Once we clear this, we can't return,
@@ -296,12 +290,7 @@
 	flush_cache_all();
 	local_flush_tlb_all();
 
-	read_lock(&tasklist_lock);
-	for_each_process(p) {
-		if (p->mm)
-			cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
-	}
-	read_unlock(&tasklist_lock);
+	clear_tasks_mm_cpumask(cpu);
 
 	return 0;
 }
@@ -327,7 +316,7 @@
 	local_irq_disable();
 	idle_task_exit();
 
-	complete(&cpu_killed);
+	RCU_NONIDLE(complete(&cpu_killed));
 
 	asm ("XOR	TXENABLE, D0Re0,D0Re0\n");
 }
@@ -385,12 +374,7 @@
 
 	setup_priv();
 
-	/*
-	 * Enable local interrupts.
-	 */
-	tbi_startup_interrupt(TBID_SIGNUM_TRT);
 	notify_cpu_starting(cpu);
-	local_irq_enable();
 
 	pr_info("CPU%u (thread %u): Booted secondary processor\n",
 		cpu, cpu_2_hwthread_id[cpu]);
@@ -402,12 +386,13 @@
 	 * OK, now it's safe to let the boot CPU continue
 	 */
 	set_cpu_online(cpu, true);
+	complete(&cpu_running);
 
 	/*
-	 * Check for cache aliasing.
-	 * Preemption is disabled
+	 * Enable local interrupts.
 	 */
-	check_for_cache_aliasing(cpu);
+	tbi_startup_interrupt(TBID_SIGNUM_TRT);
+	local_irq_enable();
 
 	/*
 	 * OK, it's off to the idle thread for us
diff --git a/arch/metag/kernel/suspend.S b/arch/metag/kernel/suspend.S
new file mode 100644
index 0000000..8189db7
--- /dev/null
+++ b/arch/metag/kernel/suspend.S
@@ -0,0 +1,39 @@
+! Copyright (C) 2012 Imagination Technologies Ltd.
+!
+! Generic Metag functions to help with suspend to RAM.
+
+#include <linux/linkage.h>
+
+!================ SUSPEND TO RAM ================!
+
+! int metag_suspend_setjmp(struct metag_suspend_jmpbuf *jmp)
+ENTRY(_metag_suspend_setjmp)
+	! D1Ar1 is buffer
+	ADD	A0.2, D1Ar1, #16
+	MSETL	[D1Ar1++], A0StP, A0FrP
+	MSETL	[A0.2++], D0.5, D0.6, D0.7
+	SETD	[A0.2++], D1RtP
+	SETD	[A0.2++], PCX
+	SETD	[A0.2++], TXTIMER
+	SETD	[A0.2++], TXTIMERI
+	MOV	D0Re0, #0
+	MOV	PC, D1RtP
+ENDPROC(_metag_suspend_setjmp)
+
+! void metag_resume_longjmp(struct metag_suspend_jmpbuf *jmp, int ret)
+ENTRY(_metag_resume_longjmp)
+	! D1Ar1 is buffer
+	! D0Ar2 is return value
+
+	ADD	A0.2, D1Ar1, #16
+	MGETL	A0StP, A0FrP, [D1Ar1++]
+	MGETL	D0.5, D0.6, D0.7, [A0.2++]
+	GETD	D1RtP, [A0.2++]
+	GETD	PCX, [A0.2++]
+	GETD	TXTIMER, [A0.2++]
+	GETD	TXTIMERI, [A0.2++]
+
+	MOV	D0Re0, D0Ar2
+	MOV	PC, D1RtP
+ENDPROC(_metag_resume_longjmp)
+
diff --git a/arch/metag/kernel/sys_metag.c b/arch/metag/kernel/sys_metag.c
index efe833a..f50eb03 100644
--- a/arch/metag/kernel/sys_metag.c
+++ b/arch/metag/kernel/sys_metag.c
@@ -41,6 +41,16 @@
 			  unsigned long prot, unsigned long flags,
 			  unsigned long fd, unsigned long pgoff)
 {
+#ifdef CONFIG_SOC_CHORUS2
+	/*
+	 * To workaround a chip bug on Chorus2 we must make sure we never take a
+	 * code fetch page fault. We lock all non-anonymous executable vmas
+	 * here.
+	 */
+	if (!(flags & MAP_ANONYMOUS) && (prot & PROT_EXEC))
+		flags |= MAP_LOCKED;
+#endif
+
 	/* The shift for mmap2 is constant, regardless of PAGE_SIZE setting. */
 	if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
 		return -EINVAL;
@@ -171,6 +181,13 @@
 #define sys_sync_file_range	sys_sync_file_range_metag
 
 /*
+ * Chorus2 needs some workarounds
+ */
+#ifdef CONFIG_SOC_CHORUS2
+#define sys_execve		sys_execve_chorus2
+#endif
+
+/*
  * Note that we can't include <linux/unistd.h> here since the header
  * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
  */
diff --git a/arch/metag/kernel/time.c b/arch/metag/kernel/time.c
index 17dc107..f1c8c53 100644
--- a/arch/metag/kernel/time.c
+++ b/arch/metag/kernel/time.c
@@ -5,11 +5,21 @@
  *
  */
 
-#include <linux/init.h>
-
 #include <clocksource/metag_generic.h>
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <asm/clock.h>
 
 void __init time_init(void)
 {
+#ifdef CONFIG_COMMON_CLK
+	/* Init clocks from device tree */
+	of_clk_init(NULL);
+#endif
+
+	/* Init meta clocks, particularly the core clock */
+	init_metag_clocks();
+
+	/* Set up the timer clock sources */
 	metag_generic_timer_init();
 }
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
index 2ceeaae..fab50e3 100644
--- a/arch/metag/kernel/traps.c
+++ b/arch/metag/kernel/traps.c
@@ -33,6 +33,7 @@
 #include <asm/siginfo.h>
 #include <asm/traps.h>
 #include <asm/hwthread.h>
+#include <asm/setup.h>
 #include <asm/switch.h>
 #include <asm/user_gateway.h>
 #include <asm/syscall.h>
@@ -65,6 +66,33 @@
 
 unsigned long per_cpu__stack_save[NR_CPUS];
 
+#ifdef CONFIG_METAG_ROM_WRAPPERS
+/*
+ * ROM vector patch table. By default it points to our internal functions,
+ * but these can be overridden if a ROM patch is found to be present.
+ * NB - the TBI_VEC_RESUME vector is normally NULL, as it is implicitly
+ * called within our internal code, but it is filled out during a ROM patch
+ * installation, as we then require its address. This fact can be used to
+ * determine if a patch is installed or not.
+ */
+tbi_ptr tbi_vectors[] = {
+	(tbi_ptr) __TBIASyncTrigger,
+	(tbi_ptr) __TBIASyncResume,
+	(tbi_ptr) 0		/* Place holder for TBIResume in ROM */
+};
+
+TBIRES fault_wrapper(TBIRES State, int SigNum, int Triggers, int Inst,
+		     PTBI pTBI);
+TBIRES switch1_wrapper(TBIRES State, int SigNum, int Triggers, int Inst,
+		       PTBI pTBI);
+TBIRES switchx_wrapper(TBIRES State, int SigNum, int Triggers, int Inst,
+		       PTBI pTBI);
+TBIRES trigger_wrapper(TBIRES State, int SigNum, int Triggers, int Inst,
+		       PTBI pTBI);
+TBIRES kick_wrapper(TBIRES State, int SigNum, int Triggers, int Inst,
+		    PTBI pTBI);
+#endif
+
 static const char * const trap_names[] = {
 	[TBIXXF_SIGNUM_IIF] = "Illegal instruction fault",
 	[TBIXXF_SIGNUM_PGF] = "Privilege violation",
@@ -87,8 +115,8 @@
 
 static DEFINE_SPINLOCK(die_lock);
 
-void die(const char *str, struct pt_regs *regs, long err,
-	 unsigned long addr)
+void __noreturn die(const char *str, struct pt_regs *regs,
+		    long err, unsigned long addr)
 {
 	static int die_counter;
 
@@ -122,6 +150,349 @@
 	do_exit(SIGSEGV);
 }
 
+#ifdef CONFIG_SOC_CHORUS2
+static void replay_catchbuffer(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs)
+{
+	int reg = 0;
+	int unit = 0;
+	int mask = 0;
+	int raxxx = 0;
+	int load_size = 0;
+	int pp = 0;
+	int die = 0;
+	int datal = 0;
+	int datah = 0;
+
+	current_thread_info()->replay_regs = regs;
+
+	/********* READS or LOADS *************/
+	if (pcbuf->CBFlags & TXCATCH0_READ_BIT) {
+		reg =
+		    (pcbuf->CBFlags & TXCATCH0_LDRXX_BITS) >> TXCATCH0_LDRXX_S;
+		unit =
+		    (pcbuf->CBFlags & TXCATCH0_LDDST_BITS) >> TXCATCH0_LDDST_S;
+
+		if (unit) {
+			mask = 0;
+			load_size =
+			    (pcbuf->
+			     CBFlags & TXCATCH0_LDL2L1_BITS) >>
+			    TXCATCH0_LDL2L1_S;
+			pp = (pcbuf->CBFlags & TXCATCH0_LDM16_BIT) != 0;
+		} else {
+			raxxx =
+			    (pcbuf->
+			     CBFlags & TXCATCH0_RAXX_BITS) >> TXCATCH0_RAXX_S;
+			pr_debug(" Don't yet do RD READs: raxxx %#x\n", raxxx);
+			die = 1;
+			goto out;
+		}
+
+		switch (load_size) {
+		case 0:	/* 8 bit */
+			datal = *(char *)(pcbuf->CBAddr);
+			break;
+
+		case 1:	/* 16 bit */
+			datal = *(short *)(pcbuf->CBAddr);
+			break;
+
+		case 2:	/* 32 bit */
+			datal = *(int *)(pcbuf->CBAddr);
+			break;
+
+		case 3:	/* 64 bit */
+			datal = *(int *)(pcbuf->CBAddr);
+			datah = *(int *)(pcbuf->CBAddr + 4);
+
+			/* pp bit means swap the units we are writing to!
+			 * So, if we swap here, we don't have to swap later on.
+			 */
+			if (pp) {
+				int tmp = datal;
+				datal = datah;
+				datah = tmp;
+			}
+			break;
+
+		default:
+			pr_debug("  unknown read load_size %d\n", load_size);
+			die = 1;
+			goto out;
+			break;
+		}
+
+		switch (unit) {
+		case 0:
+			pr_debug(" Do not do RD Reads yet\n");
+			die = 1;
+			goto out;
+			break;
+
+		case TXCATCH0_LDDST_D1DSP:
+			pr_debug(" Do not handle D1DSP\n");
+			die = 1;
+			goto out;
+			break;
+
+		case TXCATCH0_LDDST_D0DSP:
+			pr_debug(" Do not handle D0DSP\n");
+			die = 1;
+			goto out;
+			break;
+
+		case TXCATCH0_LDDST_TMPLT:
+			pr_debug(" Do not handle TMPLT\n");
+			die = 1;
+			goto out;
+			break;
+
+		case TXCATCH0_LDDST_TR:
+			pr_debug(" Do not handle TR\n");
+			die = 1;
+			goto out;
+			break;
+
+		case TXCATCH0_LDDST_PC:
+			pr_debug(" Do not handle PC\n");
+			die = 1;
+			goto out;
+			break;
+
+		case TXCATCH0_LDDST_A1:
+			/* FIXME I think this can go away if we can be
+			 * sure that 64 bit loads set both units in
+			 * the LDDST field.
+			 */
+			if (load_size == 3) {
+				pr_warn("single dest unit (a1) but 64 bit size, pc %#x\n",
+					regs->ctx.CurrPC);
+				/* 64bit - both units */
+				regs->ctx.AX[reg].U1 = datal;
+				regs->ctx.AX[reg].U0 = datah;
+			} else {
+				regs->ctx.AX[reg].U1 = datal;
+			}
+			break;
+
+		case TXCATCH0_LDDST_A0:
+			/* FIXME I think this can go away if we can be
+			 * sure that 64 bit loads set both units in
+			 * the LDDST field.
+			 */
+			if (load_size == 3) {
+				pr_warn("single dest unit (a0) but 64 bit size, pc %#x\n",
+					regs->ctx.CurrPC);
+				/* 64bit - both units */
+				regs->ctx.AX[reg].U0 = datal;
+				regs->ctx.AX[reg].U1 = datah;
+			} else {
+				regs->ctx.AX[reg].U0 = datal;
+			}
+			break;
+
+		case TXCATCH0_LDDST_D1:
+			/* FIXME - XXX - we can sanity check that we have
+			 * not over-run the context both here and in other
+			 * cases !!! - eg. movs to/from globregs in user
+			 * space.
+			 * Graham
+			 */
+			/* FIXME I think this can go away if we can be
+			 * sure that 64 bit loads set both units in
+			 * the LDDST field.
+			 */
+			if (load_size == 3) {
+				pr_warn("single dest unit (d1) but 64 bit size, pc %#x\n",
+					regs->ctx.CurrPC);
+				/* 64bit - both units */
+				regs->ctx.DX[reg].U1 = datal;
+				regs->ctx.DX[reg].U0 = datah;
+			} else {
+				regs->ctx.DX[reg].U1 = datal;
+			}
+			break;
+
+		case TXCATCH0_LDDST_D0:
+			/* FIXME I think this can go away if we can be
+			 * sure that 64 bit loads set both units in
+			 * the LDDST field.
+			 */
+			if (load_size == 3) {
+				pr_warn("single dest unit (d0) but 64 bit size, pc %#x\n",
+					regs->ctx.CurrPC);
+				/* 64bit - both units */
+				regs->ctx.DX[reg].U0 = datal;
+				regs->ctx.DX[reg].U1 = datah;
+			} else {
+				regs->ctx.DX[reg].U0 = datal;
+			}
+			break;
+
+			/* 64bit load into a pair of units */
+		case TXCATCH0_LDDST_D0 | TXCATCH0_LDDST_D1:
+			if (load_size != 3) {
+				pr_warn("Dual data unit read with non-64bit value?\n");
+				die = 1;
+				goto out;
+			}
+
+			regs->ctx.DX[reg].U0 = datal;
+			regs->ctx.DX[reg].U1 = datah;
+			break;
+
+		case TXCATCH0_LDDST_A0 | TXCATCH0_LDDST_A1:
+			if (load_size != 3) {
+				pr_warn("Dual addr unit read with non-64bit value?\n");
+				die = 1;
+				goto out;
+			}
+
+			regs->ctx.AX[reg].U0 = datal;
+			regs->ctx.AX[reg].U1 = datah;
+			break;
+
+		case TXCATCH0_LDDST_CT:
+			pr_debug(" Do not handle CT reads\n");
+			die = 1;
+			goto out;
+			break;
+
+		default:
+			pr_debug("Unhandled unit %d\n", unit);
+			die = 1;
+			goto out;
+			break;
+		}
+	} else
+	/********************* WRITES **************/
+	{
+		int base_addr = pcbuf->CBAddr & ~0x7;
+
+		mask =
+		    (pcbuf->CBFlags & TXCATCH0_WMASK_BITS) >> TXCATCH0_WMASK_S;
+
+		/* The mask is an active low byte lane mask. Use it to
+		 * figure out how large a transfer we want, and also to
+		 * which address!
+		 */
+		switch (mask) {
+		case 0:
+			/* Full 64bit write */
+			*(int *)(base_addr) = pcbuf->CBData.U0;
+			*(int *)(base_addr + 4) = pcbuf->CBData.U1;
+			break;
+
+		case 0xF0:	/* Bottom 32bits */
+			*(int *)(base_addr) = pcbuf->CBData.U0;
+			break;
+
+		case 0x0F:	/* Top 32bits */
+			*(int *)(base_addr + 4) = pcbuf->CBData.U1;
+			break;
+
+		case 0xFC:	/* Bottom 16bits */
+			*(short *)(base_addr) = pcbuf->CBData.U0;
+			break;
+
+		case 0xF3:	/* Second 16bits */
+			*(short *)(base_addr + 2) = pcbuf->CBData.U0 >> 16;
+			break;
+
+		case 0xCF:	/* Third 16bits */
+			*(short *)(base_addr + 4) = pcbuf->CBData.U1;
+			break;
+
+		case 0x3F:	/* Top 16bits */
+			*(short *)(base_addr + 6) = pcbuf->CBData.U1 >> 16;
+			break;
+
+		case 0xFE:	/* Bottom byte */
+			*(char *)(base_addr) = pcbuf->CBData.U0;
+			break;
+
+		case 0xFD:	/* 2nd byte */
+			*(char *)(base_addr + 1) = pcbuf->CBData.U0 >> 8;
+			break;
+
+		case 0xFB:	/* 3rd byte */
+			*(char *)(base_addr + 2) = pcbuf->CBData.U0 >> 16;
+			break;
+
+		case 0xF7:	/* 4th byte */
+			*(char *)(base_addr + 3) = pcbuf->CBData.U0 >> 24;
+			break;
+
+		case 0xEF:	/* 5th byte */
+			*(char *)(base_addr + 4) = pcbuf->CBData.U1;
+			break;
+
+		case 0xDF:	/* 6th byte */
+			*(char *)(base_addr + 5) = pcbuf->CBData.U1 >> 8;
+			break;
+
+		case 0xBF:	/* 7th byte */
+			*(char *)(base_addr + 6) = pcbuf->CBData.U1 >> 16;
+			break;
+
+		case 0x7F:	/* Top byte */
+			*(char *)(base_addr + 7) = pcbuf->CBData.U1 >> 24;
+			break;
+
+		default:
+			pr_debug("Unknown catch write mask %#x\n", mask);
+			die = 1;
+			goto out;
+			break;
+		}
+	}
+
+out:
+	if (die) {
+		pr_debug("Failed to soft replay catch ...\n");
+
+		pr_debug(" pid %d, PC %#x\n", current->pid, regs->ctx.CurrPC);
+
+		pr_debug(" CBFlags %#x\n", (int)(pcbuf->CBFlags));
+		pr_debug(" CBAddr %#x\n", (int)(pcbuf->CBAddr));
+		pr_debug(" CBData.U0 %#x\n", pcbuf->CBData.U0);
+		pr_debug(" CBData.U1 %#x\n", pcbuf->CBData.U1);
+
+		if (pcbuf->CBFlags & TXCATCH0_READ_BIT) {
+			pr_debug(" Read\n");
+
+			pr_debug("  unit %d, reg %d\n", unit, reg);
+		} else {
+			pr_debug(" Write\n");
+		}
+
+		show_regs(regs);
+		/* Probably should signal user process here! */
+		hard_processor_halt(HALT_PANIC);
+	} else {
+		/* As we have just done the catch buffer action by hand,
+		 * we must now clear out the stored catch buffer state,
+		 * so the hardware does not try to replay it upon resume.
+		 */
+
+		/* What do we do if there is data in the RD pipe ? */
+		if (regs->ctx.SaveMask & TBICTX_CBRP_BIT) {
+			pr_debug("Cannot fix up the RD pipe yet\n");
+			die = 1;
+			goto out;
+		}
+
+		regs->ctx.SaveMask &= ~(TBICTX_XCBF_BIT | TBICTX_CBUF_BIT);
+		pcbuf->CBFlags = 0;
+		pcbuf->CBAddr = 0;
+		pcbuf->CBData.U0 = 0;
+		pcbuf->CBData.U1 = 0;
+	}
+
+	return;
+}
+#endif
+
 #ifdef CONFIG_METAG_DSP
 /*
  * The ECH encoding specifies the size of a DSPRAM as,
@@ -338,6 +709,16 @@
 #endif
 	}
 
+#ifdef CONFIG_SOC_CHORUS2
+	if (State.Sig.pCtx->SaveMask & (TBICTX_CBUF_BIT | TBICTX_XCBF_BIT)) {
+		PTBICTXEXTCB0 cbuf = regs->extcb0;
+
+		if (cbuf->CBFlags | cbuf->CBAddr |
+		    cbuf->CBData.U0 | cbuf->CBData.U1)
+			replay_catchbuffer(cbuf, regs);
+	}
+#endif
+
 	/* TBI will turn interrupts back on at some point. */
 	if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask))
 		trace_hardirqs_on();
@@ -527,6 +908,11 @@
 
 	ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno);
 
+#ifdef CONFIG_SOC_CHORUS2
+	if (ret == 0)
+		replay_catchbuffer(pcbuf, regs);
+#endif
+
 	return ret;
 }
 
@@ -564,6 +950,10 @@
 	case TBIXXF_SIGNUM_IPF: {
 		/* 2nd-level entry invalid (instruction fetch) */
 		unsigned long addr = get_inst_fault_address(regs);
+#ifdef CONFIG_SOC_CHORUS2
+		pr_err("instruction fetch fault at %#lx pid %d comm %s\n",
+		       addr, current->pid, current->comm);
+#endif
 		do_page_fault(regs, addr, 0, SigNum);
 		break;
 	}
@@ -648,7 +1038,9 @@
 		 * safely ignore it, so treat all unknown switches
 		 * (including breakpoints) as traps.
 		 */
-		force_sig(SIGTRAP, current);
+		if (notify_die(DIE_TRAP, "debug trap", regs, 0, SigNum,
+			       SIGTRAP) != NOTIFY_STOP)
+			force_sig(SIGTRAP, current);
 		return tail_end(State);
 	}
 
@@ -828,7 +1220,11 @@
 	int_context.Sig.SaveMask = 0;
 
 	/* And call __TBIASyncTrigger() */
+#ifdef CONFIG_METAG_ROM_WRAPPERS
+	tbi_vectors[TBI_VEC_ASYNC_TRIGGER] (int_context);
+#else
 	__TBIASyncTrigger(int_context);
+#endif
 }
 
 void __init trap_init(void)
@@ -836,12 +1232,37 @@
 	unsigned long cpu = smp_processor_id();
 	PTBI _pTBI = per_cpu(pTBI, cpu);
 
+#ifdef CONFIG_METAG_ROM_WRAPPERS
+	unsigned int *paddr;
+	int i;
+
+	/* Check to see if there is a ROM based system that we should use
+	 * instead of our internal copy.
+	 */
+	if (!tbi_vector_base)
+		panic("This core requires a tbi_vector_base parameter.\n");
+
+	paddr = (unsigned int *)tbi_vector_base;
+
+	for (i = 0; i < ARRAY_SIZE(tbi_vectors); i++) {
+		if (paddr[i])
+			tbi_vectors[i] = (tbi_ptr) paddr[i];
+	}
+
+	_pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_wrapper;
+	_pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_wrapper;
+	_pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_wrapper;
+	_pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_wrapper;
+	_pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_wrapper;
+	_pTBI->fnSigs[TBID_SIGNUM_SWK] = kick_wrapper;
+#else
 	_pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler;
 	_pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler;
 	_pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler;
 	_pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler;
 	_pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler;
 	_pTBI->fnSigs[TBID_SIGNUM_SWK] = kick_handler;
+#endif
 
 #ifdef CONFIG_METAG_META21
 	_pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR;
@@ -864,7 +1285,11 @@
 
 	set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq));
 
+#ifdef CONFIG_METAG_ROM_WRAPPERS
+	_pTBI->fnSigs[irq] = trigger_wrapper;
+#else
 	_pTBI->fnSigs[irq] = trigger_handler;
+#endif
 }
 
 void tbi_shutdown_interrupt(int irq)
@@ -912,7 +1337,11 @@
 	/* And interrupts should come back on when we resume the real usermode
 	 * code. Call __TBIASyncResume()
 	 */
+#ifdef CONFIG_METAG_ROM_WRAPPERS
+	tbi_vectors[TBI_VEC_ASYNC_RESUME](tail_end(Next));
+#else
 	__TBIASyncResume(tail_end(Next));
+#endif
 	/* ASyncResume should NEVER return */
 	BUG();
 	return 0;
diff --git a/arch/metag/kernel/vmlinux.lds.S b/arch/metag/kernel/vmlinux.lds.S
index e12055e..eb9bb1c 100644
--- a/arch/metag/kernel/vmlinux.lds.S
+++ b/arch/metag/kernel/vmlinux.lds.S
@@ -19,6 +19,11 @@
   __stext = .;
   HEAD_TEXT_SECTION
   .text : {
+#ifdef CONFIG_SOC_CHORUS2
+	___replay_text_start = .;
+	*(.text.replay_catchbuffer)
+	___replay_text_end = .;
+#endif
 	TEXT_TEXT
 	SCHED_TEXT
 	LOCK_TEXT
diff --git a/arch/metag/lib/checksum.c b/arch/metag/lib/checksum.c
index 44d2e19..1793719 100644
--- a/arch/metag/lib/checksum.c
+++ b/arch/metag/lib/checksum.c
@@ -99,7 +99,6 @@
 out:
 	return result;
 }
-EXPORT_SYMBOL(ip_fast_csum);
 
 /*
  * computes the checksum of a memory block at buff, length len,
@@ -124,7 +123,6 @@
 		result += 1;
 	return (__force __wsum)result;
 }
-EXPORT_SYMBOL(csum_partial);
 
 /*
  * this routine is used for miscellaneous IP-like checksums, mainly
@@ -134,7 +132,6 @@
 {
 	return (__force __sum16)~do_csum(buff, len);
 }
-EXPORT_SYMBOL(ip_compute_csum);
 
 /*
  * copy from fs while checksumming, otherwise like csum_partial
@@ -154,7 +151,6 @@
 
 	return csum_partial(dst, len, sum);
 }
-EXPORT_SYMBOL(csum_partial_copy_from_user);
 
 /*
  * copy from ds while checksumming, otherwise like csum_partial
@@ -165,4 +161,3 @@
 	memcpy(dst, src, len);
 	return csum_partial(dst, len, sum);
 }
-EXPORT_SYMBOL(csum_partial_copy);
diff --git a/arch/metag/lib/delay.c b/arch/metag/lib/delay.c
index 0b308f4..e6df8af 100644
--- a/arch/metag/lib/delay.c
+++ b/arch/metag/lib/delay.c
@@ -34,23 +34,19 @@
 		rdtimer(now);
 	} while ((now-bclock) < loops);
 }
-EXPORT_SYMBOL(__delay);
 
 inline void __const_udelay(unsigned long xloops)
 {
 	u64 loops = (u64)xloops * (u64)loops_per_jiffy * HZ;
 	__delay(loops >> 32);
 }
-EXPORT_SYMBOL(__const_udelay);
 
 void __udelay(unsigned long usecs)
 {
 	__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
 }
-EXPORT_SYMBOL(__udelay);
 
 void __ndelay(unsigned long nsecs)
 {
 	__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
 }
-EXPORT_SYMBOL(__ndelay);
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index b3ebfe9..19e5f92 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -611,7 +611,6 @@
 
 	return retn;
 }
-EXPORT_SYMBOL(__copy_user);
 
 #define __asm_copy_from_user_1(to, from, ret) \
 	__asm_copy_user_cont(to, from, ret,	\
@@ -938,7 +937,6 @@
 
 	return retn + n;
 }
-EXPORT_SYMBOL(__copy_user_zeroing);
 
 #define __asm_clear_8x64(to, ret) \
 	asm volatile (					\
@@ -1092,7 +1090,6 @@
 
 	return retn;
 }
-EXPORT_SYMBOL(__do_clear_user);
 
 unsigned char __get_user_asm_b(const void __user *addr, long *err)
 {
@@ -1116,7 +1113,6 @@
 		: "D0FrT");
 	return x;
 }
-EXPORT_SYMBOL(__get_user_asm_b);
 
 unsigned short __get_user_asm_w(const void __user *addr, long *err)
 {
@@ -1140,7 +1136,6 @@
 		: "D0FrT");
 	return x;
 }
-EXPORT_SYMBOL(__get_user_asm_w);
 
 unsigned int __get_user_asm_d(const void __user *addr, long *err)
 {
@@ -1164,7 +1159,6 @@
 		: "D0FrT");
 	return x;
 }
-EXPORT_SYMBOL(__get_user_asm_d);
 
 long __put_user_asm_b(unsigned int x, void __user *addr)
 {
@@ -1188,7 +1182,6 @@
 		: "D0FrT");
 	return err;
 }
-EXPORT_SYMBOL(__put_user_asm_b);
 
 long __put_user_asm_w(unsigned int x, void __user *addr)
 {
@@ -1212,7 +1205,6 @@
 		: "D0FrT");
 	return err;
 }
-EXPORT_SYMBOL(__put_user_asm_w);
 
 long __put_user_asm_d(unsigned int x, void __user *addr)
 {
@@ -1236,7 +1228,6 @@
 		: "D0FrT");
 	return err;
 }
-EXPORT_SYMBOL(__put_user_asm_d);
 
 long __put_user_asm_l(unsigned long long x, void __user *addr)
 {
@@ -1260,7 +1251,6 @@
 		: "D0FrT");
 	return err;
 }
-EXPORT_SYMBOL(__put_user_asm_l);
 
 long strnlen_user(const char __user *src, long count)
 {
@@ -1297,7 +1287,6 @@
 
 	return res;
 }
-EXPORT_SYMBOL(strnlen_user);
 
 long __strncpy_from_user(char *dst, const char __user *src, long count)
 {
@@ -1351,4 +1340,3 @@
 
 	return res;
 }
-EXPORT_SYMBOL(__strncpy_from_user);
diff --git a/arch/metag/mm/cache.c b/arch/metag/mm/cache.c
index b5d3b2e..a622852 100644
--- a/arch/metag/mm/cache.c
+++ b/arch/metag/mm/cache.c
@@ -45,7 +45,7 @@
 
 #define LNKGET_CONSTANT 0xdeadbeef
 
-void __init metag_lnkget_probe(void)
+static void __init metag_lnkget_probe(void)
 {
 	int temp;
 	long flags;
diff --git a/arch/metag/mm/extable.c b/arch/metag/mm/extable.c
index 2a21eae..1183646 100644
--- a/arch/metag/mm/extable.c
+++ b/arch/metag/mm/extable.c
@@ -2,11 +2,29 @@
 #include <linux/module.h>
 #include <linux/uaccess.h>
 
+#ifdef CONFIG_SOC_CHORUS2
+extern unsigned long __replay_text_start;
+extern unsigned long __replay_text_end;
+#endif
+
 int fixup_exception(struct pt_regs *regs)
 {
 	const struct exception_table_entry *fixup;
 	unsigned long pc = instruction_pointer(regs);
 
+#ifdef CONFIG_SOC_CHORUS2
+	/*
+	 * If we hit an exception in the replay code then we have to find
+	 * the original PC to find out where the appropriate fixup is.
+	 */
+	if (current_thread_info()->replay_regs &&
+	    pc >= (unsigned long)&__replay_text_start &&
+	    pc < (unsigned long)&__replay_text_end) {
+		memcpy(regs, current_thread_info()->replay_regs,
+		       sizeof(*regs));
+		pc = instruction_pointer(regs);
+	}
+#endif
 	fixup = search_exception_tables(pc);
 	if (fixup)
 		regs->ctx.CurrPC = fixup->fixup;
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
index 2c75bf7..614499b6 100644
--- a/arch/metag/mm/fault.c
+++ b/arch/metag/mm/fault.c
@@ -43,6 +43,38 @@
 	}
 }
 
+#ifdef CONFIG_SOC_CHORUS2
+static void check_cached_pte(struct mm_struct *mm, unsigned long address,
+			     struct pt_regs *regs)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte;
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd_present(*pgd))
+		return;
+
+	pud = pud_offset(pgd, address);
+	if (!pud_present(*pud))
+		return;
+
+	pmd = pmd_offset(pud, address);
+	if (!pmd_present(*pmd))
+		return;
+
+	pte = pte_offset_kernel(pmd, address);
+	if (pte_present(*pte) &&
+	    !pte_write(*pte) &&
+	    pte_val(*pte) & _PAGE_CACHEABLE) {
+		pr_err("write to read-only cached page (addr %#x, pc %#x, pte %#lx) is invalid\n",
+		       (int) address, regs->ctx.CurrPC, pte_val(*pte));
+	}
+}
+#endif
+
+
 int show_unhandled_signals = 1;
 
 int do_page_fault(struct pt_regs *regs, unsigned long address,
@@ -121,6 +153,9 @@
 	if (write_access) {
 		if (!(vma->vm_flags & VM_WRITE))
 			goto bad_area;
+#ifdef CONFIG_SOC_CHORUS2
+		check_cached_pte(mm, address, regs);
+#endif
 	} else {
 		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
 			goto bad_area;
diff --git a/arch/metag/mm/l2cache.c b/arch/metag/mm/l2cache.c
index c64ee61..9d96286 100644
--- a/arch/metag/mm/l2cache.c
+++ b/arch/metag/mm/l2cache.c
@@ -1,6 +1,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
+#include <linux/static_key.h>
 
 #include <asm/l2cache.h>
 #include <asm/metag_isa.h>
@@ -12,6 +13,7 @@
 
 int l2c_pfenable;
 
+struct static_key l2c_has_invalidate = STATIC_KEY_INIT_FALSE;
 static volatile u32 l2c_testdata[16] __initdata __aligned(64);
 
 static int __init parse_l2cache(char *p)
@@ -38,6 +40,29 @@
 }
 early_param("l2cache_pf", parse_l2cache_pf);
 
+static int __init meta_l2c_test_invalidate(void)
+{
+	if (meta_l2c_is_enabled() && meta_l2c_is_writeback()) {
+		/* force 1's into memory */
+		l2c_testdata[0] = 0x11111111;
+		__builtin_meta2_cachewd((void *)l2c_testdata,
+					CACHEW_FLUSH_L1D_L2);
+		/* dirty the L2 with 2's */
+		l2c_testdata[0] = 0x22222222;
+		/* read it back to fill a line in L1 too */
+		(void)l2c_testdata[0];
+		/* attempt an invalidation of both caches */
+		__builtin_meta2_cachewd((void *)l2c_testdata,
+					CACHEW_INVALIDATE_L1D_L2);
+		/*
+		 * We should now read back the data from memory if the
+		 * invalidate worked
+		 */
+		return l2c_testdata[0] == 0x11111111;
+	}
+	return 0;
+}
+
 static int __init meta_l2c_setup(void)
 {
 	/*
@@ -89,6 +114,15 @@
 		pr_info("L2 Cache: Not enabling prefetch\n");
 	}
 
+	/*
+	 * Test whether invalidate (CACHEWD 0xC) is usable.
+	 */
+	if (meta_l2c_test_invalidate())
+		static_key_slow_inc(&l2c_has_invalidate);
+
+	if (!meta_l2c_has_invalidate())
+		pr_info("L2 Cache: Invalidation op disabled\n");
+
 	return 0;
 }
 core_initcall(meta_l2c_setup);
diff --git a/arch/metag/soc/chorus2/Makefile b/arch/metag/soc/chorus2/Makefile
new file mode 100644
index 0000000..9746d08
--- /dev/null
+++ b/arch/metag/soc/chorus2/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the Chorus2-specific device drivers.
+#
+
+obj-y	+= clock.o
+obj-y	+= dma.o
+obj-y	+= gpio.o
+obj-y	+= irq.o
+obj-y	+= setup.o
+
+obj-$(CONFIG_CHORUS2_LCD)	+= lcd.o
diff --git a/arch/metag/soc/chorus2/clock.c b/arch/metag/soc/chorus2/clock.c
new file mode 100644
index 0000000..99bc3c8
--- /dev/null
+++ b/arch/metag/soc/chorus2/clock.c
@@ -0,0 +1,513 @@
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <asm/clock.h>
+#include <asm/global_lock.h>
+#include <asm/soc-chorus2/clock.h>
+
+/* report pixel clock settings */
+/*#define DEBUG_PIXEL_CLOCK*/
+
+/* Clock control */
+#define CCR_ADDR		0x02000064
+#define CCR_CLK_SEL		0x80000000
+#define CCR_CLK_DEL		0x7FE00000
+#define CCR_CLK_DEL_SHIFT	21
+#define CCR_CLK_DIV		0x001C0000
+#define CCR_CLK_DIV_SHIFT	18
+#define CCR_CLK_PD		0x00020000
+#define CCR_CLK_BP		0x00010000
+#define CCR_CLK_OEB		0x00008000
+#define CCR_CLK_OD		0x00006000
+#define CCR_CLK_OD_SHIFT	13
+#define CCR_CLK_NR		0x00001F00
+#define CCR_CLK_NR_SHIFT	8
+#define CCR_CLK_NF		0x000000FF
+#define CCR_CLK_NF_SHIFT	0
+
+/* Pixel clock control */
+#define PCC_ADDR		0x020000A8
+#define PCC_DIVR1		0x00000038
+#define PCC_DIVR1_SHIFT		3
+#define PCC_DIVR1_MAX		0x5
+#define PCC_ENABLE		0x00000004
+#define PCC_ENABLE_SHIFT	2
+#define PCC_DIV_SOURCE		0x00000003
+#define PCC_DIV_SOURCE_SHIFT	0
+#define PCC_DIV_SOURCE_REFCLK	0x0
+#define PCC_DIV_SOURCE_PLL	0x1
+#define PCC_DIV_SOURCE_SYS	0x2
+
+/* Misc clock control */
+#define MCC_ADDR		0x020000BC
+#define MCC_PIX_DIVR2		0x000000F8
+#define MCC_PIX_DIVR2_SHIFT	3
+#define MCC_PIX_DIVR2_MAX	(1 << 5)
+
+/*
+ * Return the system clock (xtal/oscillator). On Chorus2, this
+ * is (almost) always a 24.576MHz oscillator. This is defined as weak so
+ * can be overridden on a board by board basis if necessary.
+ */
+unsigned long __weak get_sysclock(void)
+{
+	return 24576000;
+}
+
+/* Determine what speed the PLL output is. */
+static unsigned long get_pllclock(void)
+{
+	unsigned long sclk = get_sysclock();
+	u32 ccr;
+	u32 od, nr, nf;
+	u32 no;
+	unsigned long fout;
+
+	ccr = ioread32((void *)CCR_ADDR);
+
+	/* Is the PLL bypassed */
+	if (ccr & CCR_CLK_BP)
+		return sclk;
+
+	od = (ccr & CCR_CLK_OD) >> CCR_CLK_OD_SHIFT;
+	nr = (ccr & CCR_CLK_NR) >> CCR_CLK_NR_SHIFT;
+	/* Note the 2*, hidden in the PLL manual */
+	nf = 2 * ((ccr & CCR_CLK_NF) >> CCR_CLK_NF_SHIFT);
+
+	/* 2bit decimal encoding of binary series */
+	if (od == 3)
+		no = 4;
+	else
+		no = od;
+
+	fout = (sclk * nf) / (nr * no);
+
+	return fout;
+}
+
+/* Determine what speed we are running the CPU at. Ask the board what speed
+ * the main oscillator is running at (normally 24.576MHz for a Chorus2), and
+ * then examine the PLL to see how that is being manipulated.
+ */
+unsigned long chorus2_get_coreclock(void)
+{
+	u32 ccr;
+	u32 del, div;
+	unsigned long fout;
+
+	ccr = ioread32((void *)CCR_ADDR);
+
+	del = (ccr & CCR_CLK_DEL) >> CCR_CLK_DEL_SHIFT;
+	div = (ccr & CCR_CLK_DIV) >> CCR_CLK_DIV_SHIFT;
+
+	WARN_ON_ONCE(del);
+
+	/* Is the PLL selected */
+	if (!(ccr & CCR_CLK_SEL))
+		return get_sysclock();
+
+	/* The PLL is in use, so it cannot be powered down, or we would not be
+	 * running. Thus, do not even check the PD bit.
+	 */
+
+	fout = get_pllclock();
+
+	switch (div) {
+		/* Output turned off - how are we running then ? */
+	case 0:
+	case 7:
+		WARN_ON_ONCE(1);
+		break;
+		/* no divide */
+	case 1:
+		break;
+		/* Must be one of the dividers then */
+	default:
+		fout = fout / (1 << (div - 1));
+		break;
+	}
+
+	return fout;
+}
+
+struct meta_clock_desc chorus2_meta_clocks __initdata = {
+	.get_core_freq = chorus2_get_coreclock,
+};
+
+/* Clock framework types */
+
+struct clk {
+	const char *id;
+	struct clk_ops *ops;
+	spinlock_t lock;
+	unsigned int refcount;
+};
+
+struct clk_ops {
+	/* combined enable/disable */
+	int (*mode)(struct clk *clk, int enabled);
+	unsigned long (*get_rate)(struct clk *clk);
+	int (*set_rate)(struct clk *clk, unsigned long rate);
+};
+
+#define INIT_CLK(NAME, OPS) {				\
+		.id = (NAME),				\
+		.ops = &(OPS),				\
+		.lock = __SPIN_LOCK_UNLOCKED(clk.lock),	\
+	}
+
+static struct clk_ops dummy_clk_ops = {};
+static struct clk dummy_clk = INIT_CLK("dummy", dummy_clk_ops);
+
+/* Pixel clock */
+
+struct pix_clk {
+	struct clk clk;
+	unsigned long min_rate;
+	unsigned long max_rate;
+};
+#define CLK_TO_PIX_CLK(CLK) container_of((CLK), struct pix_clk, clk)
+
+
+/*
+ * functions for finding the best clock and divider settings.
+ * divrs[0] is a power of 2, up to 5 (2^5 = 32).
+ * divrs[1] is in range [1,32].
+ * pixel_clock = (clock / divrs[1]) >> divrs[0]
+ */
+
+static unsigned long (*pixclock_sources[])(void) = {
+	[PCC_DIV_SOURCE_REFCLK]	= get_sysclock,
+	[PCC_DIV_SOURCE_PLL]	= get_pllclock,
+};
+
+static unsigned long pixclock_do_divrs(unsigned long clock_freq, u32 *divrs)
+{
+	return (clock_freq / divrs[1]) >> divrs[0];
+}
+
+static void pixclock_inc_divrs(u32 *divrs)
+{
+	if (unlikely(divrs[1] >= MCC_PIX_DIVR2_MAX)) {
+		if (likely(divrs[0] < PCC_DIVR1_MAX)) {
+			++divrs[0];
+			divrs[1] = divrs[1] / 2 + 1;
+		}
+	} else
+		++divrs[1];
+}
+
+static void pixclock_dec_divrs(u32 *divrs)
+{
+	if (divrs[0] && divrs[1] <= MCC_PIX_DIVR2_MAX/2) {
+		--divrs[0];
+		divrs[1] = divrs[1] * 2 - 1;
+	} else if (likely(divrs[1] > 1))
+		--divrs[1];
+}
+
+/* normalise so both values in range */
+static void pixclock_norm_divrs(u32 *divrs)
+{
+	while (divrs[1] > MCC_PIX_DIVR2_MAX) {
+		++divrs[0];
+		divrs[1] /= 2;
+	}
+
+	if (unlikely(divrs[0] > PCC_DIVR1_MAX)) {
+		divrs[0] = PCC_DIVR1_MAX;
+		divrs[1] = MCC_PIX_DIVR2_MAX;
+	} else if (unlikely(divrs[1] < 1)) {
+		divrs[0] = 0;
+		divrs[1] = 1;
+	}
+}
+
+/*
+ * returns the actual pixel frequency obtained
+ * assumes pixclock is in range [min, max]
+ */
+static unsigned long pixclock_calc_divrs(unsigned long pixclock,
+			unsigned long min, unsigned long max,
+			unsigned long clock_freq,
+			u32 *divrs)
+{
+	unsigned long result;
+
+	divrs[0] = 0;
+	divrs[1] = (pixclock/2 + clock_freq) / pixclock;
+	pixclock_norm_divrs(divrs);
+
+	/* don't allow exceeding of hardware limits */
+	result = pixclock_do_divrs(clock_freq, divrs);
+	if (max && result > max)
+		pixclock_inc_divrs(divrs);
+	else if (min && result < min)
+		pixclock_dec_divrs(divrs);
+	else
+		return result;
+	return pixclock_do_divrs(clock_freq, divrs);
+}
+
+static int pixclock_calc_best_src(unsigned long pixclock,
+				unsigned long min, unsigned long max,
+				u32 *divrs, u32 *src)
+{
+	int result = 1;
+	unsigned long err = ~0;
+	u32 i;
+
+#ifdef DEBUG_PIXEL_CLOCK
+	printk(KERN_DEBUG "Desired pixel clock: %lu [%lu,%lu]\n",
+		pixclock,
+		min,
+		max);
+#endif
+
+	/* try not to exceed frequency limits of screen */
+	if (min && pixclock < min)
+		pixclock = min;
+	else if (max && pixclock > max)
+		pixclock = max;
+
+	for (i = 0; i < ARRAY_SIZE(pixclock_sources); ++i) {
+		u32 tmp_divrs[2];
+		unsigned long tmp_pixclock;
+		unsigned long tmp_err;
+
+		if (!pixclock_sources[i])
+			continue;
+
+		tmp_pixclock = pixclock_calc_divrs(pixclock, min, max,
+					pixclock_sources[i](), tmp_divrs);
+
+#ifdef DEBUG_PIXEL_CLOCK
+		printk(KERN_DEBUG "Pixel clock src %d offers: "
+				  "%lu (%lu/%u/%u)\n",
+			i,
+			tmp_pixclock,
+			pixclock_sources[i](),
+			1 << tmp_divrs[0],
+			tmp_divrs[1]);
+#endif
+
+		if (unlikely(tmp_pixclock < min || tmp_pixclock > max))
+			continue;
+
+		tmp_err = abs(tmp_pixclock - pixclock);
+		if (tmp_err <= err) {
+			divrs[0] = tmp_divrs[0];
+			divrs[1] = tmp_divrs[1];
+			*src = i;
+			err = tmp_err;
+			result = 0;
+#ifdef DEBUG_PIXEL_CLOCK
+			printk(KERN_DEBUG "Pixel clock src %d chosen\n", i);
+#endif
+		}
+	}
+
+	return result;
+}
+
+/*
+ * Sets the pixel clock to be as close to pixclock as possible within the limits
+ * of min and max. Use get_pixclock to get the actual frequency.
+ * A zero min or max means no limit.
+ * Returns zero if successful.
+ */
+static int set_pixclock(struct clk *clk, unsigned long pixclock)
+{
+	struct pix_clk *pix_clk = CLK_TO_PIX_CLK(clk);
+	u32 src = 0;
+	u32 divrs[2] = {0, 1};
+	int state;
+	u32 pcc, mcc;
+
+	if (!pixclock)
+		return -EINVAL;
+
+	if (pixclock_calc_best_src(pixclock,
+				   pix_clk->min_rate, pix_clk->max_rate,
+				   divrs, &src))
+		return -EINVAL;
+
+	pcc = ioread32((void *)PCC_ADDR);
+	pcc &= ~(PCC_DIVR1 | PCC_DIV_SOURCE);
+	pcc |= divrs[0]		<< PCC_DIVR1_SHIFT;
+	pcc |= src		<< PCC_DIV_SOURCE_SHIFT;
+	iowrite32(pcc, (void *)PCC_ADDR);
+
+	/*
+	 * misc_clock_control has other uses
+	 * lock out interrupts and other HW threads
+	 */
+	__global_lock2(state);
+	mcc = ioread32((void *)MCC_ADDR);
+	mcc &= ~MCC_PIX_DIVR2;
+	mcc |= (divrs[1] - 1)	<< MCC_PIX_DIVR2_SHIFT;
+	iowrite32(mcc, (void *)MCC_ADDR);
+	__global_unlock2(state);
+
+	return 0;
+}
+
+static unsigned long get_pixclock(struct clk *clk)
+{
+	u32 pcc, mcc;
+	unsigned long source;
+	u32 divrs[2];
+
+	pcc = ioread32((void *)PCC_ADDR);
+
+	if (!(pcc & PCC_ENABLE))
+		return 0;
+
+	mcc = ioread32((void *)MCC_ADDR);
+	divrs[0] = (pcc & PCC_DIVR1) >> PCC_DIVR1_SHIFT;
+	divrs[1] = 1 + ((mcc & MCC_PIX_DIVR2) >> MCC_PIX_DIVR2_SHIFT);
+	source = (pcc & PCC_DIV_SOURCE) >> PCC_DIV_SOURCE_SHIFT;
+
+	if (source >= ARRAY_SIZE(pixclock_sources) || !pixclock_sources[source])
+		return 0;
+	return pixclock_do_divrs(pixclock_sources[source](), divrs);
+}
+
+static int pix_clk_mode(struct clk *clk, int enabled)
+{
+	unsigned int temp;
+	temp = ioread32((void *)PCC_ADDR);
+	if (enabled)
+		temp |= PCC_ENABLE;
+	else
+		temp &= ~PCC_ENABLE;
+	iowrite32(temp, (void *)PCC_ADDR);
+	return 0;
+}
+
+static struct clk_ops pix_clk_ops = {
+	.get_rate = get_pixclock,
+	.set_rate = set_pixclock,
+	.mode = pix_clk_mode,
+};
+
+/* SPI clock */
+
+static unsigned long get_spiclock(struct clk *clk)
+{
+	return get_sysclock();
+}
+
+static struct clk_ops spi_clk_ops = {
+	.get_rate = get_spiclock,
+	.mode = pix_clk_mode,
+};
+
+
+/* Clock data */
+
+static char *dummy_clk_ids[] = {
+	"pdp",
+	"pdi",
+};
+
+static struct clk simple_clks[] = {
+	INIT_CLK("spi", spi_clk_ops),
+};
+
+static struct pix_clk pix_clk = {
+	.clk = INIT_CLK("pixel", pix_clk_ops)
+};
+
+void pix_clk_set_limits(unsigned long min, unsigned long max)
+{
+	pix_clk.min_rate = min;
+	pix_clk.max_rate = max;
+}
+
+
+/* Clock framework */
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+	int i;
+	if (!strcmp(id, pix_clk.clk.id))
+		return &pix_clk.clk;
+	/* simple clocks */
+	for (i = 0; i < ARRAY_SIZE(simple_clks); ++i)
+		if (!strcmp(id, simple_clks[i].id))
+			return &simple_clks[i];
+	/* dummy clocks */
+	for (i = 0; i < ARRAY_SIZE(dummy_clk_ids); ++i)
+		if (!strcmp(id, dummy_clk_ids[i]))
+			return &dummy_clk;
+	return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(clk_get);
+
+int clk_enable(struct clk *clk)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&clk->lock, flags);
+	if (!clk->refcount) {
+		if (clk->ops->mode)
+			clk->ops->mode(clk, 1);
+	}
+	++clk->refcount;
+	spin_unlock_irqrestore(&clk->lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&clk->lock, flags);
+	--clk->refcount;
+	if (!clk->refcount) {
+		if (clk->ops->mode)
+			clk->ops->mode(clk, 0);
+	}
+	spin_unlock_irqrestore(&clk->lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+	unsigned long result;
+	unsigned long flags;
+
+	spin_lock_irqsave(&clk->lock, flags);
+	if (clk->ops->get_rate)
+		result = clk->ops->get_rate(clk);
+	else
+		result = 0;
+	spin_unlock_irqrestore(&clk->lock, flags);
+	return result;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	int result;
+	unsigned long flags;
+
+	spin_lock_irqsave(&clk->lock, flags);
+	if (clk->ops->set_rate)
+		result = clk->ops->set_rate(clk, rate);
+	else
+		result = -EINVAL;
+	spin_unlock_irqrestore(&clk->lock, flags);
+	return result;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+void clk_put(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_put);
diff --git a/arch/metag/soc/chorus2/dma.c b/arch/metag/soc/chorus2/dma.c
new file mode 100644
index 0000000..2586635
--- /dev/null
+++ b/arch/metag/soc/chorus2/dma.c
@@ -0,0 +1,286 @@
+/*
+ * Chorus2 specific DMA code.
+ *
+ * Copyright (C) 2007,2008 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include <asm/global_lock.h>
+#include <asm/img_dma.h>
+#include <asm/soc-chorus2/dma.h>
+
+
+
+static DEFINE_SPINLOCK(dma_spin_lock);
+
+static unsigned int dma_channels[MAX_DMA_CHANNELS] = {
+#ifdef CONFIG_SOC_CHORUSX_DMA0
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_CHORUSX_DMA1
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_CHORUSX_DMA2
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_CHORUSX_DMA3
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_CHORUSX_DMA4
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_CHORUSX_DMA5
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_CHORUSX_DMA6
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_CHORUSX_DMA7
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_CHORUSX_DMA8
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_CHORUSX_DMA9
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_CHORUSX_DMA10
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_CHORUSX_DMA11
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+};
+
+static int get_free_channel(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+		if (dma_channels[i] == IMG_DMA_CHANNEL_AVAILABLE)
+			return i;
+	}
+	return -1;
+}
+
+static void setup_dma_channel(int dmanr, unsigned int periph)
+{
+	unsigned int shift, val, mask;
+	int flags;
+	unsigned int *reg = NULL;
+
+	if ((dmanr >= 0) && (dmanr < 4))
+		reg = (unsigned int *)DMA_CHAN_SEL_0_3;
+	else if ((dmanr >= 4) && (dmanr < 8))
+		reg = (unsigned int *)DMA_CHAN_SEL_4_7;
+	else if ((dmanr >= 8) && (dmanr < 12))
+		reg = (unsigned int *)DMA_CHAN_SEL_8_11;
+
+	BUG_ON(!reg);
+
+	/* Peripherals are specified using 6 bits but have an 8 bit stride
+	   in the register. */
+	shift = (dmanr & 0x3) * 8;
+
+	mask = ~(DMAC_PERIPH_MASK << shift);
+
+	periph = periph << shift;
+
+	__global_lock2(flags);
+
+	val = ioread32(reg);
+	val &= mask;
+	val |= periph;
+	iowrite32(val, reg);
+
+	__global_unlock2(flags);
+}
+
+int img_request_dma(int dmanr, unsigned int periph)
+{
+	unsigned long flags;
+	int err;
+
+	if (dmanr >= MAX_DMA_CHANNELS)
+		return -EINVAL;
+
+	if (periph > MAX_PERIPH_CHANNELS)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dma_spin_lock, flags);
+
+	/* If dmanr is -1 we pick the DMA channel to use. */
+	if (dmanr == -1) {
+		dmanr = get_free_channel();
+		if (dmanr == -1) {
+			err = -EBUSY;
+			goto out;
+		}
+	}
+
+	if (dma_channels[dmanr] != IMG_DMA_CHANNEL_AVAILABLE) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	dma_channels[dmanr] = IMG_DMA_CHANNEL_INUSE;
+
+	setup_dma_channel(dmanr, periph);
+
+	err = dmanr;
+out:
+	spin_unlock_irqrestore(&dma_spin_lock, flags);
+
+	return err;
+}
+EXPORT_SYMBOL(img_request_dma);
+
+int img_free_dma(int dmanr)
+{
+	unsigned long flags;
+
+	if (dmanr >= MAX_DMA_CHANNELS)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dma_spin_lock, flags);
+
+	if (dma_channels[dmanr] == IMG_DMA_CHANNEL_INUSE)
+		dma_channels[dmanr] = IMG_DMA_CHANNEL_AVAILABLE;
+
+	setup_dma_channel(dmanr, 0);
+
+	spin_unlock_irqrestore(&dma_spin_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_free_dma);
+
+unsigned int *get_dma_regs(int dmanr)
+{
+	return (unsigned int *)(DMAC_HWBASE + (dmanr * DMAC_CHANNEL_STRIDE));
+}
+EXPORT_SYMBOL(get_dma_regs);
+
+#ifdef CONFIG_PROC_FS
+
+static const char *periph_names[] = {
+	"Local Bus",
+	"Reed Solomon Input",
+	"Reed Solomon Output",
+	"Memory Stick",
+	"4",
+	"SCP input",
+	"SCP output",
+	"7",
+	"ECP output",
+	"ATAPI",
+	"SPI 1 output",
+	"SPI 1 input",
+	"SPI 2 output",
+	"SPI 2 input",
+	"Noise Shaper",
+	"LCD",
+	"I2S out",
+	"I2S in",
+	"SPDIF out",
+	"SPDIF in",
+	"I2S out",
+	"Core/SCP",
+	"Core/Local",
+	"Core",
+	"RDI",
+	"NAND",
+};
+
+const static char *state_names[] = {
+	"Reserved",
+	"Available",
+	"In Use",
+};
+
+static int proc_dma_show_reg(struct seq_file *m, unsigned int val, int chan)
+{
+	int i;
+
+	for (i = 0; i < 4; i++, chan++) {
+		int periph = (val >> (i * 8)) & DMAC_PERIPH_MASK;
+		if (periph < sizeof(periph_names))
+			seq_printf(m, "%2d: %s (%s)\n", chan,
+				   periph_names[periph],
+				   state_names[dma_channels[chan]]);
+		else
+			seq_printf(m, "%2d: %d (%s)\n", chan, periph,
+				   state_names[dma_channels[chan]]);
+	}
+	return chan;
+}
+
+static int proc_dma_show(struct seq_file *m, void *v)
+{
+	int chan = 0;
+	unsigned int *reg;
+
+	reg = (unsigned int *)DMA_CHAN_SEL_0_3;
+	chan = proc_dma_show_reg(m, *reg, chan);
+
+	reg = (unsigned int *)DMA_CHAN_SEL_4_7;
+	chan = proc_dma_show_reg(m, *reg, chan);
+
+	reg = (unsigned int *)DMA_CHAN_SEL_8_11;
+	chan = proc_dma_show_reg(m, *reg, chan);
+
+	return 0;
+}
+
+static int proc_dma_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_dma_show, NULL);
+}
+
+static const struct file_operations proc_dma_operations = {
+	.open = proc_dma_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int __init proc_dma_init(void)
+{
+	proc_create("dma", 0, NULL, &proc_dma_operations);
+	return 0;
+}
+
+__initcall(proc_dma_init);
+#endif
diff --git a/arch/metag/soc/chorus2/gpio.c b/arch/metag/soc/chorus2/gpio.c
new file mode 100644
index 0000000..7e8f6ab
--- /dev/null
+++ b/arch/metag/soc/chorus2/gpio.c
@@ -0,0 +1,686 @@
+/*
+ *  Generic Chorus2 GPIO handling.
+ *
+ *   Based on ARM PXA code and others.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+
+#include <asm/global_lock.h>
+#include <asm/soc-chorus2/c2_irqnums.h>
+#include <asm/soc-chorus2/gpio.h>
+
+#define GPIO_BASE_A 0x0200F000
+#define GPIO_BASE_B 0x02010000
+#define GPIO_BASE_C 0x02011000
+#define GPIO_BASE_D 0x02012000
+#define GPIO_BASE_E 0x02013000
+#define GPIO_BASE_F 0x02014000
+#define GPIO_BASE_G 0x02015000
+#define GPIO_BASE_H 0x02016000
+
+#define GPIO_REG_OE_OFFSET         0x00000000
+#define GPIO_REG_OUTPUT_OFFSET     0x00000004
+#define GPIO_REG_INPUT_OFFSET      0x00000008
+#define GPIO_REG_POLARITY_OFFSET   0x0000000C
+#define GPIO_REG_INT_TYPE_OFFSET   0x00000010
+#define GPIO_REG_INT_ENABLE_OFFSET 0x00000014
+#define GPIO_REG_INT_STAT_OFFSET   0x00000018
+#define GPIO_REG_MASTER_SEL_OFFSET 0x0000001C
+#define GPIO_REG_PULLUP_OFFSET     0x00000020
+
+struct chorus2_gpio_chip {
+	struct gpio_chip chip;
+	struct irq_domain *domain;
+	unsigned int index;
+	unsigned int irq;
+};
+
+/*
+ * Return the GPIO_BASE for the GPIO chip
+ */
+static unsigned int get_gpio_base(unsigned int index) {
+	switch (index) {
+	case 0:
+		return GPIO_BASE_A;
+	case 1:
+		return GPIO_BASE_B;
+	case 2:
+		return GPIO_BASE_C;
+	case 3:
+		return GPIO_BASE_D;
+	case 4:
+		return GPIO_BASE_E;
+	case 5:
+		return GPIO_BASE_F;
+	case 6:
+		return GPIO_BASE_G;
+	case 7:
+		return GPIO_BASE_H;
+	default:
+		/* We should never be here */
+		return -EINVAL;
+	}
+}
+
+static void __iomem *get_output_enable(unsigned int index)
+{
+	return (void __iomem *) (get_gpio_base(index) + GPIO_REG_OE_OFFSET);
+}
+
+static void __iomem *get_output(unsigned int index)
+{
+	return (void __iomem *) (get_gpio_base(index) + GPIO_REG_OUTPUT_OFFSET);
+}
+
+static void __iomem *get_input(unsigned int index)
+{
+	return (void __iomem *) (get_gpio_base(index) + GPIO_REG_INPUT_OFFSET);
+}
+
+static void __iomem *get_master_select(unsigned int index)
+{
+	return (void __iomem *) (get_gpio_base(index) +
+		GPIO_REG_MASTER_SEL_OFFSET);
+}
+
+static void __iomem *get_irq_stat(unsigned int index)
+{
+	return (void __iomem *) (get_gpio_base(index) +
+		GPIO_REG_INT_STAT_OFFSET);
+}
+
+static void __iomem *get_irq_enable(unsigned int index)
+{
+	return (void __iomem *) (get_gpio_base(index) +
+		GPIO_REG_INT_ENABLE_OFFSET);
+}
+
+static void __iomem *get_irq_type(unsigned int index)
+{
+	return (void __iomem *) (get_gpio_base(index) +
+		GPIO_REG_INT_TYPE_OFFSET);
+}
+
+static void __iomem *get_irq_polarity(unsigned int index)
+{
+	return (void __iomem *) (get_gpio_base(index) +
+		GPIO_REG_POLARITY_OFFSET);
+}
+
+static int chorus2_gpio_direction_input(struct gpio_chip *chip,
+					unsigned offset)
+{
+	u32 value;
+	struct chorus2_gpio_chip *chorus2;
+	void __iomem *output_enable;
+
+	chorus2 = container_of(chip, struct chorus2_gpio_chip, chip);
+	output_enable = get_output_enable(chorus2->index);
+
+	value = 0x00010000 << offset;
+	__raw_writel(value, output_enable);
+
+	return 0;
+}
+
+static int chorus2_gpio_direction_output(struct gpio_chip *chip,
+					 unsigned offset, int output_value)
+{
+	u32 value;
+	struct chorus2_gpio_chip *chorus2;
+	void __iomem *output_enable;
+	void __iomem *output;
+
+	chorus2 = container_of(chip, struct chorus2_gpio_chip, chip);
+	output_enable = get_output_enable(chorus2->index);
+	output = get_output(chorus2->index);
+
+	value = 0x00010001 << offset;
+	__raw_writel(value, output_enable);
+
+	value = (0x00010000 | (output_value & 0x1)) << offset;
+	__raw_writel(value, output);
+
+	return 0;
+}
+
+/*
+ * Return GPIO level
+ */
+static int chorus2_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct chorus2_gpio_chip *chorus2;
+	void __iomem *input;
+
+	chorus2 = container_of(chip, struct chorus2_gpio_chip, chip);
+	input = get_input(chorus2->index);
+
+	return __raw_readl(input) & (1 << offset);
+}
+
+/*
+ * Set output GPIO level
+ */
+static void chorus2_gpio_set(struct gpio_chip *chip, unsigned offset,
+			     int output_value)
+{
+	u32 value;
+	struct chorus2_gpio_chip *chorus2;
+	void __iomem *output;
+
+	chorus2 = container_of(chip, struct chorus2_gpio_chip, chip);
+	output = get_output(chorus2->index);
+
+	value = (0x00010000 | (output_value & 0x1)) << offset;
+	__raw_writel(value, output);
+}
+
+static int chorus2_gpio_gpio(struct gpio_chip *chip, unsigned offset)
+{
+	u32 value;
+	struct chorus2_gpio_chip *chorus2;
+	void __iomem *master_select;
+
+	chorus2 = container_of(chip, struct chorus2_gpio_chip, chip);
+	master_select = get_master_select(chorus2->index);
+
+	/* write select bit and '1' for that pin */
+	value = 0x00010001 << offset;
+	__raw_writel(value, master_select);
+
+	return chorus2_gpio_direction_input(chip, offset);
+}
+
+void chorus2_gpio_mastermode(struct gpio_chip *chip, unsigned offset)
+{
+	u32 value;
+	struct chorus2_gpio_chip *chorus2;
+	void __iomem *master_select;
+
+	chorus2 = container_of(chip, struct chorus2_gpio_chip, chip);
+	master_select = get_master_select(chorus2->index);
+
+	/* clear select bit for that pin */
+	value = 0x00010000 << offset;
+	__raw_writel(value, master_select);
+}
+
+static int chorus2_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	struct chorus2_gpio_chip *chorus2;
+
+	chorus2 = container_of(chip, struct chorus2_gpio_chip, chip);
+	return irq_create_mapping(chorus2->domain, offset);
+}
+
+static struct chorus2_gpio_chip chorus2_gpio_chip[] = {
+	[0] = {
+		.index = 0,
+		.irq = GPIO_A_IRQ_NUM,
+		.chip = {
+			.label            = "gpio-A",
+			.direction_input  = chorus2_gpio_direction_input,
+			.direction_output = chorus2_gpio_direction_output,
+			.get              = chorus2_gpio_get,
+			.set              = chorus2_gpio_set,
+			.free             = chorus2_gpio_mastermode,
+			.request          = chorus2_gpio_gpio,
+			.to_irq           = chorus2_gpio_to_irq,
+			.base             = 0,
+			.ngpio            = 16,
+		},
+	},
+	[1] = {
+		.index = 1,
+		.irq = GPIO_B_IRQ_NUM,
+		.chip = {
+			.label            = "gpio-B",
+			.direction_input  = chorus2_gpio_direction_input,
+			.direction_output = chorus2_gpio_direction_output,
+			.get              = chorus2_gpio_get,
+			.set              = chorus2_gpio_set,
+			.free             = chorus2_gpio_mastermode,
+			.request          = chorus2_gpio_gpio,
+			.to_irq           = chorus2_gpio_to_irq,
+			.base             = 16,
+			.ngpio            = 16,
+		},
+	},
+	[2] = {
+		.index = 2,
+		.irq = GPIO_C_IRQ_NUM,
+		.chip = {
+			.label            = "gpio-C",
+			.direction_input  = chorus2_gpio_direction_input,
+			.direction_output = chorus2_gpio_direction_output,
+			.get              = chorus2_gpio_get,
+			.set              = chorus2_gpio_set,
+			.free             = chorus2_gpio_mastermode,
+			.request          = chorus2_gpio_gpio,
+			.to_irq           = chorus2_gpio_to_irq,
+			.base             = 32,
+			.ngpio            = 16,
+		},
+	},
+	[3] = {
+		.index = 3,
+		.irq = GPIO_D_IRQ_NUM,
+		.chip = {
+			.label            = "gpio-D",
+			.direction_input  = chorus2_gpio_direction_input,
+			.direction_output = chorus2_gpio_direction_output,
+			.get              = chorus2_gpio_get,
+			.set              = chorus2_gpio_set,
+			.free             = chorus2_gpio_mastermode,
+			.request          = chorus2_gpio_gpio,
+			.to_irq           = chorus2_gpio_to_irq,
+			.base             = 48,
+			.ngpio            = 16,
+		},
+	},
+	[4] = {
+		.index = 4,
+		.irq = GPIO_E_IRQ_NUM,
+		.chip = {
+			.label            = "gpio-E",
+			.direction_input  = chorus2_gpio_direction_input,
+			.direction_output = chorus2_gpio_direction_output,
+			.get              = chorus2_gpio_get,
+			.set              = chorus2_gpio_set,
+			.free             = chorus2_gpio_mastermode,
+			.request          = chorus2_gpio_gpio,
+			.to_irq           = chorus2_gpio_to_irq,
+			.base             = 64,
+			.ngpio            = 16,
+		},
+	},
+	[5] = {
+		.index = 5,
+		.irq = GPIO_F_IRQ_NUM,
+		.chip = {
+			.label            = "gpio-F",
+			.direction_input  = chorus2_gpio_direction_input,
+			.direction_output = chorus2_gpio_direction_output,
+			.get              = chorus2_gpio_get,
+			.set              = chorus2_gpio_set,
+			.free             = chorus2_gpio_mastermode,
+			.request          = chorus2_gpio_gpio,
+			.to_irq           = chorus2_gpio_to_irq,
+			.base             = 80,
+			.ngpio            = 16,
+		},
+	},
+	[6] = {
+		.index = 6,
+		.irq = GPIO_G_IRQ_NUM,
+		.chip = {
+			.label            = "gpio-G",
+			.direction_input  = chorus2_gpio_direction_input,
+			.direction_output = chorus2_gpio_direction_output,
+			.get              = chorus2_gpio_get,
+			.set              = chorus2_gpio_set,
+			.free             = chorus2_gpio_mastermode,
+			.request          = chorus2_gpio_gpio,
+			.to_irq           = chorus2_gpio_to_irq,
+			.base             = 96,
+			.ngpio            = 16,
+		},
+	},
+	[7] = {
+		.index = 7,
+		.irq = GPIO_H_IRQ_NUM,
+		.chip = {
+			.label            = "gpio-H",
+			.direction_input  = chorus2_gpio_direction_input,
+			.direction_output = chorus2_gpio_direction_output,
+			.get              = chorus2_gpio_get,
+			.set              = chorus2_gpio_set,
+			.free             = chorus2_gpio_mastermode,
+			.request          = chorus2_gpio_gpio,
+			.to_irq           = chorus2_gpio_to_irq,
+			.base             = 112,
+			.ngpio            = 11,
+		},
+	},
+};
+
+/*
+ * Return chip number for gpio
+ */
+static int chorus2_gpio_to_chip(unsigned int gpio)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(chorus2_gpio_chip); i++) {
+		if (gpio >= chorus2_gpio_chip[i].chip.base &&
+		    gpio < (chorus2_gpio_chip[i].chip.base +
+			    chorus2_gpio_chip[i].chip.ngpio)) {
+				return i;
+		}
+	}
+	return -EINVAL;
+}
+
+int chorus2_gpio_disable(unsigned int gpio)
+{
+	struct chorus2_gpio_chip *chorus2 = NULL;
+	unsigned int offset = 0, value;
+	void __iomem *master_select;
+	int idx;
+
+	idx = chorus2_gpio_to_chip(gpio);
+
+	if (idx < 0)
+		return -EINVAL;
+
+	chorus2 = &chorus2_gpio_chip[idx];
+	offset = gpio - chorus2_gpio_chip[idx].chip.base;
+
+	master_select = get_master_select(chorus2->index);
+
+	/* clear select bit for that pin */
+	value = 0x00010000 << offset;
+	__raw_writel(value, master_select);
+
+	return 0;
+}
+EXPORT_SYMBOL(chorus2_gpio_disable);
+
+/* Get Chorus2 GPIO chip from irq data provided to generic IRQ callbacks */
+static struct chorus2_gpio_chip *irqd_to_chorus2_gpio_chip(
+						struct irq_data *data)
+{
+	return (struct chorus2_gpio_chip *)data->domain->host_data;
+}
+
+/*
+ * Clear interrupt status register for gpio module
+ */
+static void chorus2_gpio_irq_clear(struct chorus2_gpio_chip *chorus2,
+				   unsigned int offset)
+{
+	unsigned int value;
+	void __iomem *int_stat;
+
+	int_stat = get_irq_stat(chorus2->index);
+	/*
+	 * Clear interrupt that fired without checking
+	 * the value of int_stat again as it may contain new
+	 * interrupts
+	 */
+	value = 0x00010000 << offset;
+	__raw_writel(value, int_stat);
+}
+
+static void chorus2_gpio_irq_enable(struct chorus2_gpio_chip *chorus2,
+				    unsigned int offset)
+{
+	unsigned int value;
+	void __iomem *irq_enable;
+
+	irq_enable = get_irq_enable(chorus2->index);
+
+	/* clear select bit for that pin */
+	value = 0x00010001 << offset;
+	__raw_writel(value, irq_enable);
+}
+
+static void chorus2_gpio_irq_disable(struct chorus2_gpio_chip *chorus2,
+				     unsigned int offset)
+{
+	unsigned int value;
+	void __iomem *irq_enable;
+
+	irq_enable = get_irq_enable(chorus2->index);
+
+	/* clear select bit for that pin */
+	value = 0x00010000 << offset;
+	__raw_writel(value, irq_enable);
+}
+
+static void chorus2_gpio_irq_polarity(struct chorus2_gpio_chip *chorus2,
+				      unsigned int offset,
+				      unsigned int polarity)
+{
+	unsigned int value;
+	void __iomem *irq_polarity;
+
+	irq_polarity = get_irq_polarity(chorus2->index);
+
+	value = (0x00010000 | (polarity & 0x1)) << offset;
+	__raw_writel(value, irq_polarity);
+}
+
+static int chorus2_gpio_valid_handler(struct irq_desc *desc)
+{
+	return desc->handle_irq == handle_level_irq ||
+		desc->handle_irq == handle_edge_irq;
+}
+
+static void chorus2_gpio_irq_type(struct chorus2_gpio_chip *chorus2,
+				  unsigned int offset, unsigned int type)
+{
+	unsigned int value;
+	void __iomem *irq_type;
+
+	irq_type = get_irq_type(chorus2->index);
+
+	value = (0x00010000 | (type & 0x1)) << offset;
+	__raw_writel(value, irq_type);
+}
+
+/*
+ * set polarity to trigger on next edge, whether rising or falling
+ * @chorus2: gpio chip
+ * @offset: offset of gpio within chip
+ */
+static void chorus2_gpio_irq_next_edge(struct chorus2_gpio_chip *chorus2,
+				       unsigned int offset)
+{
+	unsigned int value_p, value_i, int_type;
+	void __iomem *irq_polarity, *input;
+	int lstat;
+
+	irq_polarity = get_irq_polarity(chorus2->index);
+	input = get_input(chorus2->index);
+	int_type = __raw_readl(get_irq_type(chorus2->index));
+
+	__global_lock2(lstat);
+	value_i = ~(__raw_readl(input));
+	value_p = __raw_readl(irq_polarity);
+	value_p &= ~(0x1 << offset);
+	value_p |= (0x00010000|((value_i >> offset) & 0x1)) << offset;
+	__raw_writel(value_p, irq_polarity);
+	__global_unlock2(lstat);
+}
+
+static void gpio_ack_irq(struct irq_data *data)
+{
+	struct chorus2_gpio_chip *chorus2 = irqd_to_chorus2_gpio_chip(data);
+
+	chorus2_gpio_irq_clear(chorus2, data->hwirq);
+}
+
+static void gpio_mask_irq(struct irq_data *data)
+{
+	struct chorus2_gpio_chip *chorus2 = irqd_to_chorus2_gpio_chip(data);
+
+	chorus2_gpio_irq_disable(chorus2, data->hwirq);
+}
+
+static void gpio_unmask_irq(struct irq_data *data)
+{
+	struct chorus2_gpio_chip *chorus2 = irqd_to_chorus2_gpio_chip(data);
+
+	chorus2_gpio_irq_enable(chorus2, data->hwirq);
+}
+
+static unsigned int gpio_startup_irq(struct irq_data *data)
+{
+	struct chorus2_gpio_chip *chorus2 = irqd_to_chorus2_gpio_chip(data);
+	irq_hw_number_t hw = data->hwirq;
+	struct irq_desc *desc = irq_to_desc(data->irq);
+
+	/*
+	 * This warning indicates that the type of the irq hasn't been set
+	 * before enabling the irq. This would normally be done by passing some
+	 * trigger flags to request_irq().
+	 */
+	WARN(!chorus2_gpio_valid_handler(desc),
+		"irq type not set before enabling gpio irq %d", data->irq);
+
+	chorus2_gpio_irq_clear(chorus2, hw);
+	chorus2_gpio_irq_enable(chorus2, hw);
+	return 0;
+}
+
+static int gpio_set_irq_type(struct irq_data *data, unsigned int flow_type)
+{
+	struct chorus2_gpio_chip *chorus2 = irqd_to_chorus2_gpio_chip(data);
+	unsigned int type;
+	unsigned int polarity;
+
+	switch (flow_type) {
+	case IRQ_TYPE_EDGE_BOTH:
+		type = GPIO_EDGE_TRIGGERED;
+		polarity = GPIO_POLARITY_LOW;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		type = GPIO_EDGE_TRIGGERED;
+		polarity = GPIO_POLARITY_HIGH;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		type = GPIO_EDGE_TRIGGERED;
+		polarity = GPIO_POLARITY_LOW;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		type = GPIO_LEVEL_TRIGGERED;
+		polarity = GPIO_POLARITY_HIGH;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		type = GPIO_LEVEL_TRIGGERED;
+		polarity = GPIO_POLARITY_LOW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	chorus2_gpio_irq_type(chorus2, data->hwirq, type);
+	if (type == GPIO_LEVEL_TRIGGERED)
+		__irq_set_handler_locked(data->irq, handle_level_irq);
+	else
+		__irq_set_handler_locked(data->irq, handle_edge_irq);
+
+	if (flow_type == IRQ_TYPE_EDGE_BOTH)
+		chorus2_gpio_irq_next_edge(chorus2, data->hwirq);
+	else
+		chorus2_gpio_irq_polarity(chorus2, data->hwirq, polarity);
+
+	return 0;
+}
+
+/* gpio virtual interrupt functions */
+static struct irq_chip gpio_irq_chip = {
+	.irq_startup = gpio_startup_irq,
+	.irq_ack = gpio_ack_irq,
+	.irq_mask = gpio_mask_irq,
+	.irq_unmask = gpio_unmask_irq,
+	.irq_set_type = gpio_set_irq_type,
+};
+
+/*
+ * handler for real IRQ lines. Clears the gpio pin that triggered the irq and
+ * setup the handler for virtual irq line.
+ * @irq: real irq number
+ * @desc: irq descriptor
+ */
+static void chorus2_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+	irq_hw_number_t hw;
+	unsigned int irq_stat, irq_no;
+	struct chorus2_gpio_chip *port;
+	struct irq_desc *child_desc;
+
+	port = (struct chorus2_gpio_chip *)irq_desc_get_handler_data(desc);
+	/*
+	 * for irq to be valid, the gpio pin has to be configured as input,
+	 * set as interrupt in interrupt enable register, and triggered in
+	 * interrupt status register.
+	 */
+	irq_stat = __raw_readl(get_irq_stat(port->index)) &
+			__raw_readl(get_irq_enable(port->index)) &
+			~__raw_readl(get_output_enable(port->index));
+
+	for (hw = 0; irq_stat; irq_stat >>= 1, ++hw) {
+		if (!(irq_stat & 1))
+			continue;
+
+		irq_no = irq_linear_revmap(port->domain, hw);
+		child_desc = irq_to_desc(irq_no);
+
+		/* Toggle edge for pin with both edge triggering enabled */
+		if (irqd_get_trigger_type(&child_desc->irq_data)
+				== IRQ_TYPE_EDGE_BOTH)
+			chorus2_gpio_irq_next_edge(port, hw);
+
+		BUG_ON(!chorus2_gpio_valid_handler(child_desc));
+		/* Call the device handler for virtual irq */
+		generic_handle_irq_desc(irq_no, child_desc);
+	}
+}
+
+static int chorus2_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+				irq_hw_number_t hw)
+{
+	irq_set_chip(irq, &gpio_irq_chip);
+	return 0;
+}
+
+static const struct irq_domain_ops chorus2_gpio_irq_domain_ops = {
+	.map = chorus2_gpio_irq_map,
+};
+
+void __init chorus2_init_gpio(void)
+{
+	int i, ret, irq;
+
+	for (i = 0; i < ARRAY_SIZE(chorus2_gpio_chip); i++) {
+		ret = gpiochip_add(&chorus2_gpio_chip[i].chip);
+		if (ret) {
+			pr_warning("gpio: Unable to register gpio block for IRQ %d\n",
+				chorus2_gpio_chip[i].irq);
+			break;
+		}
+
+		irq = external_irq_map(chorus2_gpio_chip[i].irq);
+		if (irq < 0) {
+			pr_err("%s: unable to map GPIO block %d irq %u (%d)\n",
+			       __func__, i, chorus2_gpio_chip[i].irq, irq);
+			continue;
+		}
+		chorus2_gpio_chip[i].irq = irq;
+
+		pr_info("Setting up virtual IRQs for GPIO block %d\n", i);
+
+		/* Add virtual irqs for each gpio */
+		chorus2_gpio_chip[i].domain = irq_domain_add_linear(
+					NULL,
+					chorus2_gpio_chip[i].chip.ngpio,
+					&chorus2_gpio_irq_domain_ops,
+					&chorus2_gpio_chip[i]);
+		/* Setup Chained handler for this gpio block */
+		irq_set_handler_data(irq, &chorus2_gpio_chip[i]);
+		irq_set_chained_handler(irq, chorus2_gpio_irq_handler);
+	}
+}
+
diff --git a/arch/metag/soc/chorus2/irq.c b/arch/metag/soc/chorus2/irq.c
new file mode 100644
index 0000000..d16d3bd
--- /dev/null
+++ b/arch/metag/soc/chorus2/irq.c
@@ -0,0 +1,106 @@
+/*
+ * Chorus2 specific interrupt code.
+ *
+ * Copyright (C) 2005-2012 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqchip/metag-ext.h>
+
+#include <asm/global_lock.h>
+#include <asm/irq.h>
+#include <asm/soc-chorus2/gpio.h>
+
+/*
+ * The Chorus 2 SoC has three interrupt status and three interrupt
+ * enable registers,
+ *
+ * sys_interrupt_status1, sys_interrupt_status2, sys_interrupt_status3,
+ * sys_interrupt_enable1, sys_interrupt_enable2, sys_interrupt_enable3.
+ *
+ * Each register is 4 bytes in size and each type of register (status
+ * or enable) is at consecutive addresses.
+ */
+
+/*
+ * The Chorus2 SOC IRQ numbers start at zero, but when they were connected to
+ * the Meta Core Vector block they were wired at an offset of 4, irq numbering
+ * is based on the Meta Core numbers.
+ */
+#define SOC_IRQ_OFFSET 4
+
+/*
+ * The Chorus 2 SoC has a sys_interrupt_enable register that allows
+ * individual interrupt lines to be masked.
+ */
+static void __iomem *sys_interrupt_enable_addr = (void __iomem *)0x02000070;
+
+/**
+ *	chorus2_mask_irq - mask SoC irq @irq
+ *	@irq_data:	the data for the IRQ to mask
+ *
+ *	Mask the IRQ at the Chorus 2-specific sys_interrupt_enable
+ *	regiser.
+ */
+void chorus2_mask_irq(struct irq_data *irq_data)
+{
+	unsigned int reg, bit, enable;
+	unsigned int soc_irq;
+	void __iomem *addr;
+	int state;
+
+	meta_intc_mask_irq_simple(irq_data);
+
+	/* Scale the irq into the SoC range. */
+	soc_irq = irq_data->hwirq - SOC_IRQ_OFFSET;
+
+	reg = (soc_irq >> 5) & 0x3;
+	bit = (soc_irq & 0x1f);
+
+	addr = sys_interrupt_enable_addr + (reg << 2);
+
+	__global_lock2(state);
+	enable = readl(addr);
+	writel(enable & ~(1 << bit), addr);
+	__global_unlock2(state);
+}
+
+/**
+ *	chorus2_unmask_irq - unmask a SoC irq
+ *	@irq_data:	the data for the IRQ to unmask
+ */
+void chorus2_unmask_irq(struct irq_data *irq_data)
+{
+	unsigned int reg, bit, enable;
+	unsigned int soc_irq;
+	void __iomem *addr;
+	int state;
+
+	meta_intc_unmask_irq_simple(irq_data);
+
+	/* Scale the irq into the SoC range. */
+	soc_irq = irq_data->hwirq - SOC_IRQ_OFFSET;
+
+	reg = (soc_irq >> 5) & 0x3;
+	bit = (soc_irq & 0x1f);
+
+	addr = sys_interrupt_enable_addr + (reg << 2);
+
+	__global_lock2(state);
+	enable = readl(addr);
+	writel(enable | (1 << bit), addr);
+	__global_unlock2(state);
+}
+
+void __init chorus2_init_irq(void)
+{
+	/* override default masking IRQ callbacks to make use of SoC masks */
+	meta_intc_edge_chip.irq_mask = chorus2_mask_irq;
+	meta_intc_level_chip.irq_mask = chorus2_mask_irq;
+	meta_intc_edge_chip.irq_unmask = chorus2_unmask_irq;
+	meta_intc_level_chip.irq_unmask = chorus2_unmask_irq;
+
+	chorus2_init_gpio();
+}
diff --git a/arch/metag/soc/chorus2/lcd.c b/arch/metag/soc/chorus2/lcd.c
new file mode 100644
index 0000000..85507342
--- /dev/null
+++ b/arch/metag/soc/chorus2/lcd.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2010 Imagination Technologies
+ */
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <asm/global_lock.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/soc-chorus2/c2_irqnums.h>
+
+#define CHORUS2_LCD_BASE	0x0200E000
+#define CHORUS2_LCD_DMA		15
+
+#define C2_CLOCK_ENABLE         0x020000B8
+#define C2_CLOCK_LCD_BIT        0x00000010
+
+#define C2_PIN_CONTROLLER_BASE	0x02024000
+#define PIN_CONTROL_LCD_OFFSET	0x8
+#define PIN_CONTROL_LCD_ON	1
+
+static struct resource lcd_resources[] = {
+	[0] = {
+		.start = CHORUS2_LCD_BASE,
+		.end = CHORUS2_LCD_BASE + 0x30,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = LCD_IRQ_NUM,
+		/* mapped in chorus2_lcd_init() */
+		.flags = IORESOURCE_IRQ,
+	},
+	[2] = {
+		.name = "dma_periph",
+		.start = CHORUS2_LCD_DMA,
+		.end = CHORUS2_LCD_DMA,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static void __init chorus2_lcd_init(void)
+{
+	unsigned int clock_enable;
+	int lstat, irq;
+
+	/* Map the IRQ */
+	irq = external_irq_map(lcd_resources[1].start);
+	if (irq < 0) {
+		pr_err("%s: irq map failed (%d)\n",
+		       __func__, irq);
+		return;
+	}
+	lcd_resources[1].start = irq;
+	lcd_resources[1].end = irq;
+
+	/* Set up the pin controller */
+	writel(PIN_CONTROL_LCD_ON, C2_PIN_CONTROLLER_BASE +
+	       PIN_CONTROL_LCD_OFFSET);
+
+	__global_lock2(lstat);
+
+	/* Turn on the clock */
+	clock_enable = readl(C2_CLOCK_ENABLE);
+	if (!(clock_enable & C2_CLOCK_LCD_BIT))
+		writel(clock_enable | C2_CLOCK_LCD_BIT, C2_CLOCK_ENABLE);
+
+	__global_unlock2(lstat);
+}
+
+static u64 lcd_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device lcd_device = {
+	.name           = "img-lcd",
+	.id             = -1,
+	.num_resources = ARRAY_SIZE(lcd_resources),
+	.resource = lcd_resources,
+	.dev = {
+		.dma_mask = &lcd_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(64),
+	},
+};
+
+static struct platform_device *chorus2_lcd_devices[] __initdata = {
+	&lcd_device,
+};
+
+static int __init chorus2_lcd_devices_setup(void)
+{
+	chorus2_lcd_init();
+	return platform_add_devices(chorus2_lcd_devices,
+				ARRAY_SIZE(chorus2_lcd_devices));
+}
+__initcall(chorus2_lcd_devices_setup);
diff --git a/arch/metag/soc/chorus2/setup.c b/arch/metag/soc/chorus2/setup.c
new file mode 100644
index 0000000..21021e9
--- /dev/null
+++ b/arch/metag/soc/chorus2/setup.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2007 Imagination Technologies
+ */
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/serial_8250.h>
+#include <linux/fsl_devices.h>
+#include <linux/spi/spi_img.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mach/arch.h>
+#include <asm/soc-chorus2/c2_irqnums.h>
+#include <asm/soc-chorus2/gpio.h>
+#include <asm/soc-chorus2/setup.h>
+
+#define CHORUS2_UART_REF_CLK 0x020000A4
+
+#define CHORUS2_UART_ENABLE 0x4
+
+/* USB starts at this address, EHCI regs are 0x100 above */
+#define CHORUS2_USB_BASE   0x0200C000
+#define CHORUS2_USB_LEN    0x00001000
+
+#define EHCI_OFFSET        0x00000100
+#define CHORUS2_EHCI_BASE  (CHORUS2_USB_BASE + EHCI_OFFSET)
+#define CHORUS2_EHCI_LEN   (CHORUS2_USB_LEN - EHCI_OFFSET)
+
+#define CHORUS2_SPI1_HWBASE    0x02008000
+
+#define CHORUS2_SCBA_BASE  0x0200A000
+
+#define CHORUS2_SCBB_BASE  0x0200B000
+
+#define MISC_CLOCK_CTRL_REG 0x020000bc
+
+#define DMATRIG_SPI1O_NUM       10	/* SPI 1 output DMA */
+#define DMATRIG_SPI1I_NUM       11      /* SPI 1 input DMA */
+
+static void __init chorus2_serial_init(void)
+{
+	chorus2_gpio_disable(GPIO_E_PIN(8));  /* S1 In */
+	chorus2_gpio_disable(GPIO_E_PIN(9));  /* S1 Out */
+
+	/* Enable the UART clock - set for the default 1.8432 MHz */
+	writel(CHORUS2_UART_ENABLE, CHORUS2_UART_REF_CLK);
+
+}
+
+#ifdef CONFIG_USB_EHCI_HCD
+static struct resource usb_resources[] = {
+	[0] = {
+		.start          = CHORUS2_EHCI_BASE,
+		.end            = CHORUS2_EHCI_BASE + CHORUS2_EHCI_LEN - 1,
+		.flags          = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start          = USB_IRQ_NUM,
+		/* mapped in chorus2_usb_init() */
+		.flags          = IORESOURCE_IRQ,
+	},
+};
+
+static u64 ehci_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device usb_device = {
+	.name           = "chorus2-ehci",
+	.id             = 0,
+	.dev = {
+		.dma_mask               = &ehci_dmamask,
+		.coherent_dma_mask      = 0xffffffff,
+	},
+	.num_resources  = ARRAY_SIZE(usb_resources),
+	.resource       = usb_resources,
+};
+
+#else
+
+static struct resource usb_resources[] = {
+	[0] = {
+		.start          = CHORUS2_USB_BASE,
+		.end            = CHORUS2_USB_BASE + CHORUS2_USB_LEN - 1,
+		.flags          = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start          = USB_IRQ_NUM,
+		/* mapped in chorus2_usb_init() */
+		.flags          = IORESOURCE_IRQ,
+	},
+};
+
+static struct fsl_usb2_platform_data fsl_usb_data = {
+        .operating_mode = FSL_USB2_DR_DEVICE,
+        .phy_mode = FSL_USB2_PHY_UTMI,
+};
+
+static u64 fsl_usb_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device usb_device = {
+	.name           = "fsl-usb2-udc",
+	.id             = 0,
+	.dev = {
+		.dma_mask               = &fsl_usb_dmamask,
+		.coherent_dma_mask      = 0xffffffff,
+		.platform_data          = &fsl_usb_data,
+	},
+	.num_resources  = ARRAY_SIZE(usb_resources),
+	.resource       = usb_resources,
+};
+#endif
+
+static void __init chorus2_usb_init(void)
+{
+	int irq;
+	u32 val;
+
+	/* Map the IRQ */
+	irq = external_irq_map(usb_resources[1].start);
+	if (irq < 0) {
+		pr_err("%s: irq map failed (%d)\n",
+		       __func__, irq);
+		return;
+	}
+	usb_resources[1].start = irq;
+	usb_resources[1].end = irq;
+
+	/*
+	 * Rev C and later silicon require this bit to be set to disable
+	 * IDDQ mode.
+	 */
+	val = readl(MISC_CLOCK_CTRL_REG);
+	writel(val | 0x2, MISC_CLOCK_CTRL_REG);
+
+	platform_device_register(&usb_device);
+}
+
+static struct resource spi_resources[] = {
+	[0] = {
+		.start          = SPI1_DMAR_IRQ_NUM,
+		/* mapped in chorus2_spi_init() */
+		.flags          = IORESOURCE_IRQ,
+	},
+	[1] = {
+		.start          = CHORUS2_SPI1_HWBASE,
+		.end            = CHORUS2_SPI1_HWBASE + 0xfff,
+		.flags          = IORESOURCE_MEM,
+	},
+};
+
+static struct img_spi_master spi_platform_data = {
+	.num_chipselect = 3,
+	.tx_dma_channel_num = -1, /*auto allocate*/
+	.tx_dma_peripheral_num = DMATRIG_SPI1O_NUM,
+	.rx_dma_channel_num = -1,
+	.rx_dma_peripheral_num = DMATRIG_SPI1I_NUM,
+};
+
+static u64 spi_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device spi_master_device = {
+	.name = "img-spi",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(spi_resources),
+	.resource = spi_resources,
+	.dev = {
+		.dma_mask = &spi_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.platform_data = &spi_platform_data, /* Passed to driver */
+	},
+};
+
+static void __init chorus2_spi_init(void)
+{
+	int irq;
+
+	/* Map the IRQ */
+	irq = external_irq_map(spi_resources[0].start);
+	if (irq < 0) {
+		pr_err("%s: irq map failed (%d)\n",
+		       __func__, irq);
+		return;
+	}
+	spi_resources[0].start = irq;
+	spi_resources[0].end = irq;
+
+	platform_device_register(&spi_master_device);
+}
+
+static struct platform_device asoc_device = {
+	.name = "chorus2-pcm-audio",
+	.id = -1,
+};
+
+
+static struct platform_device *chorus2_devices[] __initdata = {
+	&asoc_device,
+};
+
+void __init chorus2_init_machine(void)
+{
+	chorus2_serial_init();
+	chorus2_usb_init();
+	chorus2_spi_init();
+	platform_add_devices(chorus2_devices, ARRAY_SIZE(chorus2_devices));
+
+	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+static const char *chorus2_boards_compat[] __initdata = {
+	"frontier,chorus2",
+	NULL,
+};
+
+MACHINE_START(CHORUS2, "Generic Chorus2")
+	.dt_compat	= chorus2_boards_compat,
+	CHORUS2_MACHINE_DEFAULTS,
+MACHINE_END
diff --git a/arch/metag/soc/tz1090/Makefile b/arch/metag/soc/tz1090/Makefile
new file mode 100644
index 0000000..0e5d6cb
--- /dev/null
+++ b/arch/metag/soc/tz1090/Makefile
@@ -0,0 +1,25 @@
+#
+# Makefile for TZ1090 platform setup.
+#
+
+obj-y	+= afe.o
+obj-y	+= clock.o
+obj-y	+= irq.o
+obj-y	+= pdp.o
+obj-y	+= pm.o
+obj-y	+= sdhost.o
+obj-y	+= setup.o
+obj-y	+= usb.o
+
+obj-$(CONFIG_SUSPEND)		+= suspend.o
+obj-$(CONFIG_METAG_COREMEM)	+= coremem.o
+obj-$(CONFIG_METAG_SUSPEND_MEM)	+= bootprot.o
+obj-$(CONFIG_METAG_USER_TCM)	+= tcm.o
+obj-$(CONFIG_NUMA)		+= numa.o
+
+ifneq ($(CONFIG_MMC_DW_IDMAC),y)
+obj-y	+= sdhost-dma.o
+endif
+
+obj-$(CONFIG_SOC_COMET_AUDIOCODEC)	+= audiocodec.o
+
diff --git a/arch/metag/soc/tz1090/afe.c b/arch/metag/soc/tz1090/afe.c
new file mode 100644
index 0000000..11208ae
--- /dev/null
+++ b/arch/metag/soc/tz1090/afe.c
@@ -0,0 +1,249 @@
+/*
+ * soc/tz1090/afe.c
+ * A simple interface to the Comet AFE.
+ * This includes the aux DAC.
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ */
+
+#include <asm/global_lock.h>
+#include <asm/io.h>
+#include <asm/soc-tz1090/afe.h>
+#include <asm/soc-tz1090/defs.h>
+#include <linux/syscore_ops.h>
+
+/* bit 0 is set if auxdac is in use */
+static unsigned long comet_afe_auxdac_busy;
+
+/**
+ * comet_afe_auxdac_get() - claim the auxiliary DAC.
+ *
+ * Use comet_afe_auxdac_put() to disclaim after use.
+ *
+ * Returns:	0 on success, -errno on failure.
+ */
+int comet_afe_auxdac_get(void)
+{
+	/* one at a time */
+	if (test_and_set_bit_lock(0, &comet_afe_auxdac_busy))
+		return -EBUSY;
+
+	return 0;
+}
+
+/**
+ * comet_afe_auxdac_put() - disclaim the auxiliary DAC.
+ *
+ * Must match a successful call to comet_afe_auxdac_get().
+ */
+void comet_afe_auxdac_put(void)
+{
+	clear_bit_unlock(0, &comet_afe_auxdac_busy);
+}
+
+/**
+ * comet_afe_auxdac_set_power() - power up/down the auxiliary DAC.
+ * @power:	1 to power up, 0 to power down.
+ *
+ * Powers up or down the aux DAC.
+ * The aux DAC should already be claimed with comet_afe_auxdac_get().
+ */
+void comet_afe_auxdac_set_power(unsigned int power)
+{
+	long flags;
+	u32 afe_ctrl;
+
+	__global_lock2(flags);
+	afe_ctrl = readl(CR_AFE_CTRL);
+	if (power)
+		afe_ctrl &= ~CR_AFE_AUXDACPD;
+	else
+		afe_ctrl |= CR_AFE_AUXDACPD;
+	writel(afe_ctrl, CR_AFE_CTRL);
+	__global_unlock2(flags);
+}
+
+/**
+ * comet_afe_auxdac_get_power() - get power up/down of auxiliary DAC.
+ *
+ * Gets the current power state of the aux DAC.
+ * The aux DAC should already be claimed with comet_afe_auxdac_get().
+ *
+ * Returns:	0 if powered down, otherwise powered up.
+ */
+unsigned int comet_afe_auxdac_get_power(void)
+{
+	u32 afe_ctrl;
+
+	afe_ctrl = readl(CR_AFE_CTRL);
+	return !(afe_ctrl & CR_AFE_AUXDACPD);
+}
+
+/**
+ * comet_afe_auxdac_set_standby() - put auxiliary DAC in/out of standby.
+ * @standby:	1 to put in standby, 0 to take out of standby.
+ *
+ * Puts the aux DAC in standby, or takes it out of standby.
+ * The aux DAC should already be claimed with comet_afe_auxdac_get().
+ */
+void comet_afe_auxdac_set_standby(unsigned int standby)
+{
+	long flags;
+	u32 afe_ctrl;
+
+	__global_lock2(flags);
+	afe_ctrl = readl(CR_AFE_CTRL);
+	if (standby)
+		afe_ctrl |= CR_AFE_AUXDACSTBY;
+	else
+		afe_ctrl &= ~CR_AFE_AUXDACSTBY;
+	writel(afe_ctrl, CR_AFE_CTRL);
+	__global_unlock2(flags);
+}
+
+/**
+ * comet_afe_auxdac_get_standby() - get standby state of auxiliary DAC.
+ *
+ * Gets the current standby state of the aux DAC.
+ * The aux DAC should already be claimed with comet_afe_auxdac_get().
+ *
+ * Returns:	0 if not in standby, otherwise in standby.
+ */
+unsigned int comet_afe_auxdac_get_standby(void)
+{
+	u32 afe_ctrl;
+
+	afe_ctrl = readl(CR_AFE_CTRL);
+	return afe_ctrl & CR_AFE_AUXDACSTBY;
+}
+
+/**
+ * comet_afe_auxdac_set_source() - set the source of the auxiliary DAC output.
+ * @source:	auxdac source (CR_AFE_AUXDACSEL_* in asm/soc-tz1090/defs.h)
+ *
+ * Sets the aux DAC source.
+ * The aux DAC should already be claimed with comet_afe_auxdac_get().
+ *
+ * Returns:	0 on success, -errno on failure.
+ */
+int comet_afe_auxdac_set_source(unsigned int source)
+{
+	long flags;
+	u32 afe_auxdac;
+
+	if (source > CR_AFE_AUXDACSEL_UCC0_EXT_CTL_1)
+		return -EINVAL;
+	__global_lock2(flags);
+	afe_auxdac = readl(CR_AFE_AUXDAC);
+	afe_auxdac &= ~CR_AFE_AUXDACSEL;
+	afe_auxdac |= source << CR_AFE_AUXDACSEL_SHIFT;
+	writel(afe_auxdac, CR_AFE_AUXDAC);
+	__global_unlock2(flags);
+
+	return 0;
+}
+
+/**
+ * comet_afe_auxdac_set_value() - set fixed value of the auxiliary DAC output.
+ * @value:	value to output from aux DAC in range 0-255.
+ *
+ * Sets a fixed value output from the aux DAC.
+ * The aux DAC should already be claimed with comet_afe_auxdac_get().
+ */
+void comet_afe_auxdac_set_value(unsigned int value)
+{
+	u32 afe_auxdac;
+	afe_auxdac = CR_AFE_AUXDACSEL_CR_AFE_AUXDACIN << CR_AFE_AUXDACSEL_SHIFT;
+	afe_auxdac |= (value << CR_AFE_AUXDACIN_SHIFT) & CR_AFE_AUXDACIN;
+	writel(afe_auxdac, CR_AFE_AUXDAC);
+}
+
+/**
+ * comet_afe_auxdac_get_value() - get fixed value of the auxiliary DAC output.
+ *
+ * Gets the fixed value output from the aux DAC, or returns a negative value if
+ * the output isn't fixed.
+ * The aux DAC should already be claimed with comet_afe_auxdac_get().
+ *
+ * Returns:	value of output from aux DAC in range 0-255, or < 0 if unfixed.
+ */
+int comet_afe_auxdac_get_value(void)
+{
+	u32 afe_auxdac;
+	u32 src;
+
+	afe_auxdac = readl(CR_AFE_AUXDAC);
+	src = (afe_auxdac & CR_AFE_AUXDACSEL) >> CR_AFE_AUXDACSEL_SHIFT;
+
+	if (src == CR_AFE_AUXDACSEL_CR_AFE_AUXDACIN)
+		return (afe_auxdac & CR_AFE_AUXDACIN) >> CR_AFE_AUXDACIN_SHIFT;
+
+	/* we can't work out what the value is */
+	return -EIO;
+
+}
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+
+/* stores state across suspend */
+static unsigned int comet_afe_auxdac_state;
+
+/**
+ * comet_afe_suspend() - stores hardware state so it can be restored.
+ *
+ * Stores aux DAC hardware state in comet_afe_auxdac_state.
+ */
+int comet_afe_suspend(void)
+{
+	u32 afe_ctrl, afe_auxdac;
+
+	if (comet_afe_auxdac_busy & 0x1) {
+		afe_ctrl = readl(CR_AFE_CTRL);
+		afe_auxdac = readl(CR_AFE_AUXDAC);
+
+		comet_afe_auxdac_state = (afe_ctrl & (CR_AFE_AUXDACPD |
+						      CR_AFE_AUXDACSTBY)) << 24
+					| (afe_auxdac & 0xffffff);
+	}
+
+	return 0;
+}
+
+/**
+ * comet_afe_resume() - restores hardware state.
+ *
+ * Restores aux DAC hardware state from comet_afe_auxdac_state.
+ */
+void comet_afe_resume(void)
+{
+	long flags;
+	u32 afe_ctrl, afe_auxdac;
+
+	if (comet_afe_auxdac_busy & 0x1) {
+		__global_lock2(flags);
+		afe_ctrl = readl(CR_AFE_CTRL);
+		afe_ctrl &= ~(CR_AFE_AUXDACPD | CR_AFE_AUXDACSTBY);
+		afe_ctrl |= comet_afe_auxdac_state >> 24;
+		writel(afe_ctrl, CR_AFE_CTRL);
+		__global_unlock2(flags);
+
+		afe_auxdac = comet_afe_auxdac_state & 0xffffff;
+		writel(afe_auxdac, CR_AFE_AUXDAC);
+	}
+}
+
+static struct syscore_ops comet_afe_syscore_ops = {
+	.suspend = comet_afe_suspend,
+	.resume = comet_afe_resume,
+};
+
+static int comet_afe_init(void)
+{
+	register_syscore_ops(&comet_afe_syscore_ops);
+	return 0;
+}
+
+device_initcall(comet_afe_init);
+
+#endif /* CONFIG_METAG_SUSPEND_MEM */
diff --git a/arch/metag/soc/tz1090/audiocodec.c b/arch/metag/soc/tz1090/audiocodec.c
new file mode 100644
index 0000000..e66c97c
--- /dev/null
+++ b/arch/metag/soc/tz1090/audiocodec.c
@@ -0,0 +1,1211 @@
+/*
+ *  audiocodec.c - setup comet audio codec, DAC & ADC.
+ *
+ *  Copyright (C) 2010 Imagination Technologies
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/kobject.h>
+#include <sound/tansen.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/audiocodec.h>
+#include <asm/soc-tz1090/hdmi-audio.h>
+
+static struct kobject *ac_kobj;
+
+/*
+ * TM_PWM*_S1 registers
+ * Apparently not where indicated in the register list, but in 'reserved' regions?!?
+ */
+static unsigned long pwms1_regs[AUDIOCODEC_NUM_STEREOPAIRS] = {
+	0x826, /* TW_PWM1_S1 */
+	0x8AA, /* TW_PWM2_S1 */
+	0x926, /* TW_PWM3_S1 */
+};
+
+struct str_int_map_t {
+	char *name;
+	int val;
+};
+
+static struct str_int_map_t map_bool[] = {
+	{ "false", 0 },
+	{ "true", 1 },
+	{ NULL, -1 },
+};
+
+static struct str_int_map_t map_samplewidth[] = {
+	{ "16b", AUDIOCODEC_SAMPLEWIDTH_16 },
+	{ "20b", AUDIOCODEC_SAMPLEWIDTH_20 },
+	{ "24b", AUDIOCODEC_SAMPLEWIDTH_24 },
+	{ "32b", AUDIOCODEC_SAMPLEWIDTH_32 },
+	{ NULL, -1 },
+};
+
+static struct str_int_map_t map_input[] = {
+	{ "mic", AUDIOCODEC_INPUT_MIC },
+	{ "line", AUDIOCODEC_INPUT_LINE },
+	{ "ipod", AUDIOCODEC_INPUT_IPOD },
+	{ "mic_differential", AUDIOCODEC_INPUT_MIC_DIFFERENTIAL },
+	{ NULL, -1 },
+};
+
+static struct str_int_map_t map_mute[] = {
+	{ "none", AUDIOCODEC_MUTE_NONE },
+	{ "hard", AUDIOCODEC_MUTE_HARD },
+	{ "90db", AUDIOCODEC_MUTE_90DB },
+	{ "squarewave", AUDIOCODEC_MUTE_SQUAREWAVE },
+	{ NULL, -1 },
+};
+
+static struct str_int_map_t map_frame[] = {
+	{ "1616", AUDIOCODEC_FRAME_1616 },
+	{ "3232", AUDIOCODEC_FRAME_3232 },
+};
+
+static struct str_int_map_t map_clock[] = {
+	{ "256fs", AUDIOCODEC_CLOCK_256FS },
+	{ "384fs", AUDIOCODEC_CLOCK_384FS },
+};
+
+static struct str_int_map_t map_i2sclock[] = {
+	{ "disabled", AUDIOCODEC_I2SCLOCK_DISABLED },
+	{ "xtal1", AUDIOCODEC_I2SCLOCK_XTAL1 },
+	{ "xtal2", AUDIOCODEC_I2SCLOCK_XTAL2 },
+	{ "sys_undeleted", AUDIOCODEC_I2SCLOCK_SYS_UNDELETED },
+	{ "adc_pll", AUDIOCODEC_I2SCLOCK_ADC_PLL },
+	{ NULL, -1 },
+};
+
+static struct str_int_map_t map_preset[] = {
+	{ "none", AUDIOCODEC_PRESET_NONE },
+	{ "phono", AUDIOCODEC_PRESET_PHONO },
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+	{ "hdmi", AUDIOCODEC_PRESET_HDMI },
+	{ "default", AUDIOCODEC_PRESET_PHONO | AUDIOCODEC_PRESET_HDMI },
+#else
+	{ "default", AUDIOCODEC_PRESET_PHONO },
+#endif
+	{ NULL, -1 },
+};
+
+static int map_lookup(struct str_int_map_t *map, const char *str)
+{
+	size_t len = strlen(str);
+	size_t cmplen = len;
+	int i;
+
+	if (len == 0)
+		return -1;
+
+	if (str[len-1] == '\n')
+		cmplen--;
+
+	for (i = 0; map[i].name; i++) {
+		if (!strncmp(map[i].name, str, cmplen))
+			return map[i].val;
+	}
+
+	return -1;
+}
+
+static int map_lookup_flags(struct str_int_map_t *map, const char *str)
+{
+	size_t len;
+	int i, val = 0;
+	bool change;
+
+	do {
+		change = false;
+
+		for (i = 0; map[i].name; i++) {
+			len = strlen(map[i].name);
+
+			if (strncmp(map[i].name, str, len))
+				continue;
+
+			if (str[len] == ',' || str[len] == '\n' ||
+			    str[len] == 0) {
+				val |= map[i].val;
+				change = true;
+				str += len + (str[len] == ',' ? 1 : 0);
+			}
+		}
+	} while (change);
+
+	return val;
+}
+
+static ssize_t map_string(struct str_int_map_t *map, int val, char *buf,
+	size_t sz)
+{
+	int i;
+
+	for (i = 0; map[i].name; i++) {
+		if (map[i].val == val)
+			return snprintf(buf, sz, "%s\n", map[i].name);
+	}
+
+	return snprintf(buf, sz, "err\n");
+}
+
+static ssize_t map_string_flags(struct str_int_map_t *map, int val, char *buf,
+	size_t sz)
+{
+	int i;
+	char *str = buf;
+	size_t written, rem = sz - 1;
+
+	for (i = 0; map[i].name; i++) {
+		if (!map[i].val)
+			continue;
+
+		if ((map[i].val & val) == map[i].val) {
+			if (buf < str) {
+				*str++ = ',';
+				*str = 0;
+			}
+
+			written = snprintf(str, rem, "%s", map[i].name);
+			str += written;
+			rem -= written;
+
+			val &= ~map[i].val;
+		}
+	}
+
+	if (str == buf) {
+		/* outputted nothing */
+		/* look for a zero mapping */
+
+		for (i = 0; map[i].name; i++) {
+			if (!map[i].val)
+				return snprintf(buf, sz, "%s\n", map[i].name);
+		}
+	}
+
+	*str++ = '\n';
+	*str = 0;
+	return (ssize_t)(str - buf);
+}
+
+static void ac_reset(void)
+{
+	writel(1, AUDIO_OUT_SOFT_RESET);
+	udelay(1000);
+	writel(0, AUDIO_OUT_SOFT_RESET);
+	udelay(1000);
+}
+
+static int ac_dac_power_get(bool *pwr)
+{
+	uint32_t hpctrl = readl(CR_AUDIO_HP_CTRL);
+	*pwr = !(hpctrl & AUDIO_PWDN_BG_OP);
+	return 0;
+}
+
+static int ac_dac_power_set(bool pwr)
+{
+	uint32_t hpctrl = readl(CR_AUDIO_HP_CTRL);
+	uint32_t adcctrl = readl(CR_AUDIO_ADC_CTRL);
+
+	/* GTI port must be in reset for power up to work */
+	gti_reset((void __iomem *)CR_TOP_AUDGTI_CTRL, 1);
+
+	if (pwr) {
+		/* Power up bandgap */
+		hpctrl &= ~(AUDIO_PWDN_BG_OP | AUDIO_PWDN_BG_IP);
+		writel(hpctrl, CR_AUDIO_HP_CTRL);
+		udelay(100);
+
+		/* Reset bandgap filter */
+		hpctrl |= (AUDIO_RST_BG_OP | AUDIO_RST_BG_IP);
+		writel(hpctrl, CR_AUDIO_HP_CTRL);
+		udelay(250);
+
+		hpctrl &= ~(AUDIO_RST_BG_OP | AUDIO_RST_BG_IP);
+		writel(hpctrl, CR_AUDIO_HP_CTRL);
+		udelay(100);
+
+		/* Power up PLL */
+		hpctrl &= ~AUDIO_PWDN_PLL;
+		writel(hpctrl, CR_AUDIO_HP_CTRL);
+		udelay(100);
+
+		/* Take digital blocks out of reset */
+		hpctrl &= ~(AUDIO_RSTB_DIG_OP | AUDIO_RSTB_DIG_IP);
+		writel(hpctrl, CR_AUDIO_HP_CTRL);
+		udelay(100);
+
+		/* Power up outputs */
+		hpctrl |= (AUDIO_PSCNT_PWM_F | AUDIO_PSCNT_PWM_E |
+			AUDIO_PSCNT_PWM_D | AUDIO_PSCNT_PWM_C |
+			AUDIO_PSCNT_PWM_B | AUDIO_PSCNT_PWM_A);
+		hpctrl |= (AUDIO_PSCNTHP_R | AUDIO_PSCNTHP_L);
+		writel(hpctrl, CR_AUDIO_HP_CTRL);
+
+		adcctrl |= (AUDIO_PSCNTADC_R | AUDIO_PSCNTADC_L);
+		writel(adcctrl, CR_AUDIO_ADC_CTRL);
+
+		udelay(100);
+
+		/* Reset analogue filters */
+		hpctrl |= (AUDIO_RSTB_ANA_OP | AUDIO_RSTB_ANA_IP);
+		writel(hpctrl, CR_AUDIO_HP_CTRL);
+		udelay(250);
+
+		hpctrl &= ~(AUDIO_RSTB_ANA_OP | AUDIO_RSTB_ANA_IP);
+		writel(hpctrl, CR_AUDIO_HP_CTRL);
+		udelay(250);
+	} else {
+		hpctrl |= (AUDIO_PWDN_BG_OP | AUDIO_PWDN_BG_IP);
+		hpctrl |= (AUDIO_RSTB_DIG_OP | AUDIO_RSTB_DIG_IP);
+		writel(hpctrl, CR_AUDIO_HP_CTRL);
+	}
+
+	/* Take the GTI port out of reset */
+	gti_reset((void __iomem *)CR_TOP_AUDGTI_CTRL, 0);
+
+	return 0;
+}
+
+static int ac_i2s_out_clock_get(int *clksrc)
+{
+	uint32_t val = readl(CR_TOP_CLKENAB);
+	bool sw0, sw1, sw2;
+
+	if (!(val & (1 << CR_TOP_I2S_1_EN_BIT))) {
+		*clksrc = AUDIOCODEC_I2SCLOCK_DISABLED;
+	} else {
+		val = readl(CR_TOP_CLKSWITCH);
+		sw0 = !!(val & (1 << CR_TOP_I2S_0_SW_BIT));
+		sw1 = !!(val & (1 << CR_TOP_I2S_1_SW_BIT));
+		sw2 = !!(val & (1 << CR_TOP_I2S_2_SW_BIT));
+
+		if (!sw1) {
+			if (sw2)
+				*clksrc = AUDIOCODEC_I2SCLOCK_SYS_UNDELETED;
+			else
+				*clksrc = AUDIOCODEC_I2SCLOCK_XTAL1;
+		} else if (!sw0)
+			*clksrc = AUDIOCODEC_I2SCLOCK_XTAL2;
+		else
+			*clksrc = AUDIOCODEC_I2SCLOCK_ADC_PLL;
+	}
+
+	return 0;
+}
+
+static int ac_i2s_out_clock_set(int clksrc)
+{
+	uint32_t val = readl(CR_TOP_CLKENAB);
+
+	if (clksrc == AUDIOCODEC_I2SCLOCK_DISABLED) {
+		val &= ~(1 << CR_TOP_I2S_1_EN_BIT);
+		writel(val, CR_TOP_CLKENAB);
+		return 0;
+	}
+
+	val |= (1 << CR_TOP_I2S_1_EN_BIT);
+	writel(val, CR_TOP_CLKENAB);
+
+	val = readl(CR_TOP_CLKSWITCH);
+	val &= ~(1 << CR_TOP_I2S_0_SW_BIT);
+	val &= ~(1 << CR_TOP_I2S_1_SW_BIT);
+	val &= ~(1 << CR_TOP_I2S_2_SW_BIT);
+
+	switch (clksrc) {
+	case AUDIOCODEC_I2SCLOCK_XTAL1:
+		break;
+	case AUDIOCODEC_I2SCLOCK_XTAL2:
+		val |= (1 << CR_TOP_I2S_1_SW_BIT);
+		break;
+	case AUDIOCODEC_I2SCLOCK_SYS_UNDELETED:
+		val |= (1 << CR_TOP_I2S_2_SW_BIT);
+		break;
+	case AUDIOCODEC_I2SCLOCK_ADC_PLL:
+		val |= (1 << CR_TOP_I2S_0_SW_BIT);
+		val |= (1 << CR_TOP_I2S_1_SW_BIT);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	writel(val, CR_TOP_CLKSWITCH);
+	return 0;
+}
+
+static int ac_i2s_out_clockdiv_get(int *div)
+{
+	*div = readl(CR_TOP_I2SCLK_DIV) & 0xff;
+	return 0;
+}
+
+static int ac_i2s_out_clockdiv_set(int div)
+{
+	writel(div & 0xff, CR_TOP_I2SCLK_DIV);
+	return 0;
+}
+
+static int ac_i2s_out_clockdiv2_get(int *div)
+{
+	*div = readl(CR_TOP_I2S_DIV2) & 0x3;
+	return 0;
+}
+
+static int ac_i2s_out_clockdiv2_set(int div)
+{
+	writel(div & 0x3, CR_TOP_I2S_DIV2);
+	return 0;
+}
+
+static int ac_i2s_out_active_channels_get(int *chans)
+{
+	uint32_t val = readl(AUDIO_OUT_CONTROL_MAIN);
+	*chans = (val >> AUDIO_OUT_CM_ACTIVE_CHAN_SHIFT) + 1;
+	return 0;
+}
+
+static int ac_i2s_out_active_channels_set(int chans)
+{
+	uint32_t val = readl(AUDIO_OUT_CONTROL_MAIN);
+	val &= ~0xffffc000;
+	val |= (chans - 1) << AUDIO_OUT_CM_ACTIVE_CHAN_SHIFT;
+	writel(val, AUDIO_OUT_CONTROL_MAIN);
+	return 0;
+}
+
+static int ac_i2s_out_frame_get(int *frame)
+{
+	uint32_t val = readl(AUDIO_OUT_CONTROL_MAIN);
+	*frame = (val >> AUDIO_OUT_CM_FRAME_SHIFT) & AUDIO_OUT_CM_FRAME_MASK;
+	return 0;
+}
+
+static int ac_i2s_out_frame_set(int frame)
+{
+	uint32_t val = readl(AUDIO_OUT_CONTROL_MAIN);
+	val &= ~AUDIO_OUT_CM_FRAME_MASK;
+	val |= frame << AUDIO_OUT_CM_FRAME_SHIFT;
+	writel(val, AUDIO_OUT_CONTROL_MAIN);
+	return 0;
+}
+
+static int ac_i2s_out_master_get(bool *en)
+{
+	uint32_t val = readl(AUDIO_OUT_CONTROL_MAIN);
+	*en = !!(val & AUDIO_OUT_CM_MASTER);
+	return 0;
+}
+
+static int ac_i2s_out_master_set(bool en)
+{
+	uint32_t val = readl(AUDIO_OUT_CONTROL_MAIN);
+	val &= ~AUDIO_OUT_CM_MASTER;
+	if (en)
+		val |= AUDIO_OUT_CM_MASTER;
+	writel(val, AUDIO_OUT_CONTROL_MAIN);
+	return 0;
+}
+
+static int ac_i2s_out_aclk_get(int *aclk)
+{
+	uint32_t val = readl(AUDIO_OUT_CONTROL_MAIN);
+	*aclk = (val >> AUDIO_OUT_CM_ACLK_SHIFT) & AUDIO_OUT_CM_ACLK_MASK;
+	return 0;
+}
+
+static int ac_i2s_out_aclk_set(int aclk)
+{
+	uint32_t val = readl(AUDIO_OUT_CONTROL_MAIN);
+	val &= ~AUDIO_OUT_CM_ACLK_MASK;
+	val |= aclk << AUDIO_OUT_CM_ACLK_SHIFT;
+	writel(val, AUDIO_OUT_CONTROL_MAIN);
+	return 0;
+}
+
+static int ac_i2s_out_blrclk_get(bool *en)
+{
+	uint32_t val = readl(AUDIO_OUT_CONTROL_MAIN);
+	*en = !!(val & AUDIO_OUT_CM_BLRCLK_EN);
+	return 0;
+}
+
+static int ac_i2s_out_blrclk_set(bool en)
+{
+	uint32_t val = readl(AUDIO_OUT_CONTROL_MAIN);
+	val &= ~AUDIO_OUT_CM_BLRCLK_EN;
+	if (en)
+		val |= AUDIO_OUT_CM_BLRCLK_EN;
+	writel(val, AUDIO_OUT_CONTROL_MAIN);
+	return 0;
+}
+
+static int ac_i2s_out_enable_get(bool *en)
+{
+	uint32_t val = readl(AUDIO_OUT_CONTROL_MAIN);
+	*en = !!(val & AUDIO_OUT_CM_ME);
+	return 0;
+}
+
+static int ac_i2s_out_enable_set(bool en)
+{
+	uint32_t val = readl(AUDIO_OUT_CONTROL_MAIN);
+	val &= ~AUDIO_OUT_CM_ME;
+	if (en)
+		val |= AUDIO_OUT_CM_ME;
+	writel(val, AUDIO_OUT_CONTROL_MAIN);
+	return 0;
+}
+
+static int ac_i2swidth_dac_get(int *sw)
+{
+	uint32_t val = gti_read((void __iomem *)CR_TOP_AUDGTI_CTRL, 0xe85);
+	*sw = val & 0x3;
+	return 0;
+}
+
+static int ac_i2swidth_dac_set(int sw)
+{
+	int i;
+	uint32_t addr;
+	uint32_t val = gti_read((void __iomem *)CR_TOP_AUDGTI_CTRL, 0xe85);
+	val &= ~0x3; /* Clear TX width field */
+	val |= sw;
+	gti_write((void __iomem *)CR_TOP_AUDGTI_CTRL, 0xe85, val);
+
+	for (i = 0; i < AUDIOCODEC_NUM_STEREOPAIRS; i++) {
+		addr = AUDIO_OUT_BASE_ADDR + (0x20 * AUDIOCODEC_NUM_STEREOPAIRS)
+			+ (0x20 * i) + 0x04;
+		val = readl(addr);
+		val &= ~AUDIO_OUT_CC_FORMAT_MASK;
+		val |= 4 << AUDIO_OUT_CC_FORMAT_SHIFT;
+		val |= AUDIO_OUT_CC_LEFT_JUST;
+		writel(val, addr);
+	}
+
+	return 0;
+}
+
+static int ac_i2swidth_adc_get(int *sw)
+{
+	uint32_t val = gti_read((void __iomem *)CR_TOP_AUDGTI_CTRL, 0xe86);
+	*sw = val & 0x3;
+	return 0;
+}
+
+static int ac_i2swidth_adc_set(int sw)
+{
+	uint32_t val = gti_read((void __iomem *)CR_TOP_AUDGTI_CTRL, 0xe86);
+	val &= ~0x3; /* Clear RX width field */
+	val |= sw;
+	gti_write((void __iomem *)CR_TOP_AUDGTI_CTRL, 0xe86, val);
+	return 0;
+}
+
+static int ac_bypass_adc_get(bool *bypass)
+{
+	uint32_t val = readl(CR_AUDIO_HP_CTRL);
+	*bypass = !(val & AUDIO_I2S_EXT);
+	return 0;
+}
+
+static int ac_bypass_adc_set(bool bypass)
+{
+	uint32_t val = readl(CR_AUDIO_HP_CTRL);
+	val &= ~AUDIO_I2S_EXT;
+	if (!bypass)
+		val |= AUDIO_I2S_EXT;
+	writel(val, CR_AUDIO_HP_CTRL);
+	return 0;
+}
+
+static int ac_input_route_get(int *route)
+{
+	uint32_t val = readl(CR_AUDIO_HP_CTRL);
+	*route = (val & AUDIO_PGA_MODE_MASK) >> AUDIO_PGA_MODE_SHIFT;
+	return 0;
+}
+
+static int ac_input_route_set(int route)
+{
+	uint32_t val = readl(CR_AUDIO_HP_CTRL);
+	val &= ~AUDIO_PGA_MODE_MASK;
+	val |= AUDIO_PGA_MODE(route);
+	writel(val, CR_AUDIO_HP_CTRL);
+	return 0;
+}
+
+static int ac_chan_i2sinput_get(int chan, int *in)
+{
+	uint32_t val = gti_read((void __iomem *)CR_TOP_AUDGTI_CTRL,
+				pwms1_regs[chan]);
+	*in = (val & 0x3) - 1;
+	return 0;
+}
+
+static int ac_chan_i2sinput_set(int chan, int in)
+{
+	uint32_t val = gti_read((void __iomem *)CR_TOP_AUDGTI_CTRL,
+				pwms1_regs[chan]);
+	val &= ~0x3;  /* Clear routing */
+	val |= (in + 1) & 0x3;
+	gti_write((void __iomem *)CR_TOP_AUDGTI_CTRL, pwms1_regs[chan], val);
+	return 0;
+}
+
+static int ac_chan_i2swidth_get(int chan, int *sw)
+{
+	uint32_t val = gti_read((void __iomem *)CR_TOP_AUDGTI_CTRL,
+				pwms1_regs[chan]);
+	*sw = (val >> 6) & 0x3;
+	return 0;
+}
+
+static int ac_chan_i2swidth_set(int chan, int sw)
+{
+	uint32_t val = gti_read((void __iomem *)CR_TOP_AUDGTI_CTRL,
+				pwms1_regs[chan]);
+	val &= ~0xc;  /* Clear width */
+	val |= sw << 6;
+	gti_write((void __iomem *)CR_TOP_AUDGTI_CTRL, pwms1_regs[chan], val);
+	return 0;
+}
+
+static int ac_chan_vol_get(int pair, int lr, uint8_t *db)
+{
+	uint32_t reg_addr, value, shift = 32;
+
+	if (lr & AUDIOCODEC_LEFT)
+		shift = ((pair % 2) * 16);
+	else if (lr & AUDIOCODEC_RIGHT)
+		shift = (((pair % 2) * 16) + 8);
+
+	/* Each 32 bit GAIN register represents 2 channel pairs */
+	reg_addr = CR_AUDIO_GAIN0 + ((pair / 2) * sizeof(uint32_t));
+
+	value = readl(reg_addr);
+	*db = (value >> shift) & 0xFF;
+	return 0;
+}
+
+static int ac_chan_vol_set(int pair, int lr, uint8_t db)
+{
+	uint32_t reg_addr, value, clear_mask = 0, set_mask = 0;
+
+	if (lr & AUDIOCODEC_LEFT) {
+		clear_mask += 0xFF << ((pair % 2) * 16);
+		set_mask += db << ((pair % 2) * 16);
+	}
+
+	if (lr & AUDIOCODEC_RIGHT) {
+		clear_mask += 0xFF << (((pair % 2) * 16) + 8);
+		set_mask += db << (((pair % 2) * 16) + 8);
+	}
+
+	/* Each 32 bit GAIN register represents 2 channel pairs */
+	reg_addr = CR_AUDIO_GAIN0 + ((pair / 2) * sizeof(uint32_t));
+	value = readl(reg_addr);
+	value &= ~clear_mask;
+	value |= set_mask;
+	writel(value, reg_addr);
+	return 0;
+}
+
+static int ac_chan_mute_get(int pair, int lr, int *mute)
+{
+	uint32_t val = readl(CR_AUDIO_MUTE);
+	*mute = AUDIOCODEC_MUTE_NONE;
+
+	if (val & (lr << (16 + (pair * 2))))
+		*mute |= AUDIOCODEC_MUTE_HARD;
+
+	if (val & (lr << (8 + (pair * 2))))
+		*mute |= AUDIOCODEC_MUTE_90DB;
+
+	if (val & (lr << (0 + (pair * 2))))
+		*mute |= AUDIOCODEC_MUTE_SQUAREWAVE;
+
+	return 0;
+}
+
+static int ac_chan_mute_set(int pair, int lr, int mute)
+{
+	uint32_t val = readl(CR_AUDIO_MUTE);
+
+	val &= ~(lr << (16 + (pair * 2)));
+	val &= ~(lr << (8 + (pair * 2)));
+	val &= ~(lr << (0 + (pair * 2)));
+
+	if (mute & AUDIOCODEC_MUTE_HARD)
+		val |= (lr << (16 + (pair * 2)));
+
+	if (mute & AUDIOCODEC_MUTE_90DB)
+		val |= (lr << (8 + (pair * 2)));
+
+	if (mute & AUDIOCODEC_MUTE_SQUAREWAVE)
+		val |= (lr << (0 + (pair * 2)));
+
+	writel(val, CR_AUDIO_MUTE);
+	return 0;
+}
+
+static int curr_preset = AUDIOCODEC_PRESET_NONE;
+
+static ssize_t ac_preset_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	return map_string_flags(map_preset, curr_preset, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_preset_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int preset = map_lookup_flags(map_preset, buf);
+
+	if (preset == 0)
+		return -EINVAL;
+
+	curr_preset = preset;
+	ac_reset();
+
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+	zero1sp_hdmi_audio_set_enabled(!!(preset & AUDIOCODEC_PRESET_HDMI));
+#endif
+
+	ac_dac_power_set(!!(preset & AUDIOCODEC_PRESET_PHONO));
+
+	ac_chan_vol_set(0, AUDIOCODEC_LEFT | AUDIOCODEC_RIGHT, 128);
+	ac_chan_mute_set(0, AUDIOCODEC_LEFT | AUDIOCODEC_RIGHT,
+		AUDIOCODEC_MUTE_NONE);
+	ac_i2s_out_clock_set(AUDIOCODEC_I2SCLOCK_XTAL1);
+	ac_i2s_out_clockdiv_set(0);
+	ac_i2s_out_clockdiv2_set(1);
+	ac_i2s_out_active_channels_set(1);
+	ac_i2s_out_master_set(true);
+	ac_i2s_out_frame_set(AUDIOCODEC_FRAME_3232);
+	ac_i2s_out_aclk_set(AUDIOCODEC_CLOCK_256FS);
+	ac_i2s_out_blrclk_set(true);
+	ac_i2s_out_enable_set(true);
+
+	ac_i2swidth_dac_set(AUDIOCODEC_SAMPLEWIDTH_24);
+	ac_i2swidth_adc_set(AUDIOCODEC_SAMPLEWIDTH_24);
+	ac_chan_i2sinput_set(0, 1);
+	ac_chan_i2swidth_set(0, AUDIOCODEC_SAMPLEWIDTH_24);
+	ac_chan_i2sinput_set(1, 0);
+	ac_chan_i2swidth_set(1, AUDIOCODEC_SAMPLEWIDTH_24);
+	ac_chan_i2sinput_set(2, 2);
+	ac_chan_i2swidth_set(2, AUDIOCODEC_SAMPLEWIDTH_24);
+
+	ac_bypass_adc_set(false);
+	ac_input_route_set(AUDIOCODEC_INPUT_LINE);
+
+	return count;
+}
+
+static ssize_t ac_dac_power_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	bool pwr;
+	if (ac_dac_power_get(&pwr))
+		return -EINVAL;
+	return map_string(map_bool, pwr, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_dac_power_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int pwr = map_lookup(map_bool, buf);
+	if (pwr == -1)
+		return -EINVAL;
+	if (ac_dac_power_set(!!pwr))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_out_active_channels_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int chans;
+	if (ac_i2s_out_active_channels_get(&chans))
+		return -EINVAL;
+	return snprintf(buf, PAGE_SIZE, "%d\n", chans);
+}
+
+static ssize_t ac_i2s_out_active_channels_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int chans;
+	if (sscanf(buf, "%d", &chans) != 1)
+		return -EINVAL;
+	if (ac_i2s_out_active_channels_set(chans))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_out_clock_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int clksrc;
+	if (ac_i2s_out_clock_get(&clksrc))
+		return -EINVAL;
+	return map_string(map_i2sclock, clksrc, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_i2s_out_clock_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int clksrc = map_lookup(map_i2sclock, buf);
+	if (clksrc == -1)
+		return -EINVAL;
+	if (ac_i2s_out_clock_set(clksrc))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_out_clockdiv_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int chans;
+	if (ac_i2s_out_clockdiv_get(&chans))
+		return -EINVAL;
+	return snprintf(buf, PAGE_SIZE, "%d\n", chans);
+}
+
+static ssize_t ac_i2s_out_clockdiv_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int chans;
+	if (sscanf(buf, "%d", &chans) != 1)
+		return -EINVAL;
+	if (ac_i2s_out_clockdiv_set(chans))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_out_clockdiv2_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int chans;
+	if (ac_i2s_out_clockdiv2_get(&chans))
+		return -EINVAL;
+	return snprintf(buf, PAGE_SIZE, "%d\n", chans);
+}
+
+static ssize_t ac_i2s_out_clockdiv2_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int chans;
+	if (sscanf(buf, "%d", &chans) != 1)
+		return -EINVAL;
+	if (ac_i2s_out_clockdiv2_set(chans))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_out_frame_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int frame;
+	if (ac_i2s_out_frame_get(&frame))
+		return -EINVAL;
+	return map_string(map_frame, frame, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_i2s_out_frame_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int frame = map_lookup(map_frame, buf);
+	if (frame == -1)
+		return -EINVAL;
+	if (ac_i2s_out_frame_set(frame))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_out_master_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	bool master;
+	if (ac_i2s_out_master_get(&master))
+		return -EINVAL;
+	return map_string(map_bool, master, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_i2s_out_master_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int master = map_lookup(map_bool, buf);
+	if (master == -1)
+		return -EINVAL;
+	if (ac_i2s_out_master_set(!!master))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_out_aclk_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int aclk;
+	if (ac_i2s_out_aclk_get(&aclk))
+		return -EINVAL;
+	return map_string(map_clock, aclk, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_i2s_out_aclk_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int aclk = map_lookup(map_clock, buf);
+	if (aclk == -1)
+		return -EINVAL;
+	if (ac_i2s_out_aclk_set(aclk))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_out_blrclk_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	bool blrclk;
+	if (ac_i2s_out_blrclk_get(&blrclk))
+		return -EINVAL;
+	return map_string(map_bool, blrclk, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_i2s_out_blrclk_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int blrclk = map_lookup(map_bool, buf);
+	if (blrclk == -1)
+		return -EINVAL;
+	if (ac_i2s_out_blrclk_set(!!blrclk))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_out_enable_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	bool enable;
+	if (ac_i2s_out_enable_get(&enable))
+		return -EINVAL;
+	return map_string(map_bool, enable, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_i2s_out_enable_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int enable = map_lookup(map_bool, buf);
+	if (enable == -1)
+		return -EINVAL;
+	if (ac_i2s_out_enable_set(!!enable))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_bypass_adc_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	bool bypass;
+	if (ac_bypass_adc_get(&bypass))
+		return -EINVAL;
+	return map_string(map_bool, bypass, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_bypass_adc_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int bypass = map_lookup(map_bool, buf);
+	if (bypass == -1)
+		return -EINVAL;
+	if (ac_bypass_adc_set(!!bypass))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_input_route_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int in;
+	if (ac_input_route_get(&in))
+		return -EINVAL;
+	return map_string(map_input, in, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_input_route_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int in = map_lookup(map_input, buf);
+	if (in == -1)
+		return -EINVAL;
+	if (ac_input_route_set(in))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_samplewidth_dac_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int sw;
+	if (ac_i2swidth_dac_get(&sw))
+		return -EINVAL;
+	return map_string(map_samplewidth, sw, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_i2s_samplewidth_dac_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int sw = map_lookup(map_samplewidth, buf);
+	if (sw == -1)
+		return -EINVAL;
+	if (ac_i2swidth_dac_set(sw))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_samplewidth_adc_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int sw;
+	if (ac_i2swidth_adc_get(&sw))
+		return -EINVAL;
+	return map_string(map_samplewidth, sw, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_i2s_samplewidth_adc_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int sw = map_lookup(map_samplewidth, buf);
+	if (sw == -1)
+		return -EINVAL;
+	if (ac_i2swidth_adc_set(sw))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_input_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int pair, in;
+	if (sscanf(attr->attr.name, "i2s_input_%d", &pair) != 1)
+		return -EINVAL;
+	if (ac_chan_i2sinput_get(pair, &in))
+		return -EINVAL;
+	return snprintf(buf, PAGE_SIZE, "%d\n", in);
+}
+
+static ssize_t ac_i2s_input_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int pair, in;
+	if (sscanf(attr->attr.name, "i2s_input_%d", &pair) != 1)
+		return -EINVAL;
+	if (sscanf(buf, "%d", &in) != 1)
+		return -EINVAL;
+	if (ac_chan_i2sinput_set(pair, in))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_i2s_samplewidth_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int pair, sw;
+	if (sscanf(attr->attr.name, "i2s_samplewidth_%d", &pair) != 1)
+		return -EINVAL;
+	if (ac_chan_i2swidth_get(pair, &sw))
+		return -EINVAL;
+	return map_string(map_samplewidth, sw, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_i2s_samplewidth_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int pair, sw;
+	if (sscanf(attr->attr.name, "i2s_samplewidth_%d", &pair) != 1)
+		return -EINVAL;
+	sw = map_lookup(map_samplewidth, buf);
+	if (sw == -1)
+		return -EINVAL;
+	if (ac_chan_i2swidth_set(pair, sw))
+		return -EINVAL;
+	return count;
+}
+
+static int ac_lr_char(char lrchar)
+{
+	switch (lrchar) {
+	case 'l': return AUDIOCODEC_LEFT;
+	case 'r': return AUDIOCODEC_RIGHT;
+	case 'b': return AUDIOCODEC_LEFT | AUDIOCODEC_RIGHT;
+	default: return -EINVAL;
+	}
+}
+
+static ssize_t ac_vol_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int pair, lr;
+	uint8_t vol;
+	char lrchar;
+	if (sscanf(attr->attr.name, "vol_%d%c", &pair, &lrchar) != 2)
+		return -EINVAL;
+	lr = ac_lr_char(lrchar);
+	if (ac_chan_vol_get(pair, lr, &vol))
+		return -EINVAL;
+	return snprintf(buf, PAGE_SIZE, "%d\n", (int)vol);
+}
+
+static ssize_t ac_vol_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int pair, lr, vol;
+	char lrchar;
+	if (sscanf(attr->attr.name, "vol_%d%c", &pair, &lrchar) != 2)
+		return -EINVAL;
+	if (sscanf(buf, "%d", &vol) != 1)
+		return -EINVAL;
+	lr = ac_lr_char(lrchar);
+	if (ac_chan_vol_set(pair, lr, (uint8_t)vol))
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t ac_mute_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	int pair, lr, mute;
+	char lrchar;
+	if (sscanf(attr->attr.name, "mute_%d%c", &pair, &lrchar) != 2)
+		return -EINVAL;
+	lr = ac_lr_char(lrchar);
+	if (ac_chan_mute_get(pair, lr, &mute))
+		return -EINVAL;
+	return map_string_flags(map_mute, mute, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_mute_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int pair, lr, mute;
+	char lrchar;
+	if (sscanf(attr->attr.name, "mute_%d%c", &pair, &lrchar) != 2)
+		return -EINVAL;
+	lr = ac_lr_char(lrchar);
+	mute = map_lookup_flags(map_mute, buf);
+	if (ac_chan_mute_set(pair, lr, mute))
+		return -EINVAL;
+	return count;
+}
+
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+static ssize_t ac_hdmi_audio_enable_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	bool en = zero1sp_hdmi_audio_get_enabled();
+	return map_string(map_bool, en, buf, PAGE_SIZE);
+}
+
+static ssize_t ac_hdmi_audio_enable_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int en = map_lookup(map_bool, buf);
+	if (en == -1)
+		return -EINVAL;
+	zero1sp_hdmi_audio_set_enabled(!!en);
+	return count;
+}
+#endif
+
+#define AC_ATTR(name, fnname) \
+	__ATTR(name, 0666, ac_##fnname##_show, ac_##fnname##_store)
+
+#define AC_ATTR_PERPAIR(name, fnname) \
+	AC_ATTR(name##_0, fnname), \
+	AC_ATTR(name##_1, fnname), \
+	AC_ATTR(name##_2, fnname)
+
+#define AC_ATTR_PERCHAN(name, fnname) \
+	AC_ATTR(name##_0l, fnname), \
+	AC_ATTR(name##_0r, fnname), \
+	AC_ATTR(name##_1l, fnname), \
+	AC_ATTR(name##_1r, fnname), \
+	AC_ATTR(name##_2l, fnname), \
+	AC_ATTR(name##_2r, fnname)
+
+static struct kobj_attribute ac_kobj_attributes[] = {
+	AC_ATTR(preset, preset),
+	AC_ATTR(dac_power, dac_power),
+	AC_ATTR(bypass_adc, bypass_adc),
+	AC_ATTR(input_route, input_route),
+	AC_ATTR(i2s_out_clock, i2s_out_clock),
+	AC_ATTR(i2s_out_clockdiv, i2s_out_clockdiv),
+	AC_ATTR(i2s_out_clockdiv2, i2s_out_clockdiv2),
+	AC_ATTR(i2s_out_active_channels, i2s_out_active_channels),
+	AC_ATTR(i2s_out_frame, i2s_out_frame),
+	AC_ATTR(i2s_out_master, i2s_out_master),
+	AC_ATTR(i2s_out_aclk, i2s_out_aclk),
+	AC_ATTR(i2s_out_blrclk, i2s_out_blrclk),
+	AC_ATTR(i2s_out_enable, i2s_out_enable),
+	AC_ATTR(i2s_samplewidth_dac, i2s_samplewidth_dac),
+	AC_ATTR(i2s_samplewidth_adc, i2s_samplewidth_adc),
+	AC_ATTR_PERPAIR(i2s_input, i2s_input),
+	AC_ATTR_PERPAIR(i2s_samplewidth, i2s_samplewidth),
+	AC_ATTR_PERCHAN(vol, vol),
+	AC_ATTR_PERCHAN(mute, mute),
+#ifdef CONFIG_TZ1090_01XX_HDMI_AUDIO
+	AC_ATTR(hdmi_audio_enable, hdmi_audio_enable),
+#endif
+};
+
+static struct attribute *ac_attributes[ARRAY_SIZE(ac_kobj_attributes)+1];
+
+static struct attribute_group ac_attr_group = {
+	.attrs = ac_attributes,
+};
+
+static int __init audiocodec_init(void)
+{
+	int i, ret;
+
+	ac_kobj = kobject_create_and_add("audiocodec", kernel_kobj);
+	if (!ac_kobj) {
+		ret = -ENOMEM;
+		goto err0;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ac_kobj_attributes); i++)
+		ac_attributes[i] = &ac_kobj_attributes[i].attr;
+	ac_attributes[i] = NULL;
+
+	ret = sysfs_create_group(ac_kobj, &ac_attr_group);
+	if (ret)
+		goto err1;
+
+	return 0;
+
+err1:
+	kobject_put(ac_kobj);
+err0:
+	return ret;
+}
+
+static void audiocodec_exit(void)
+{
+	kobject_put(ac_kobj);
+}
+
+module_init(audiocodec_init);
+module_exit(audiocodec_exit);
+
diff --git a/arch/metag/soc/tz1090/bootprot.c b/arch/metag/soc/tz1090/bootprot.c
new file mode 100644
index 0000000..f03a7d6
--- /dev/null
+++ b/arch/metag/soc/tz1090/bootprot.c
@@ -0,0 +1,69 @@
+/*
+ * bootprot.c
+ *
+ * Boot protocol abstraction API (DFU, resume from suspend-to-RAM etc).
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ */
+
+#include <linux/io.h>
+#include <asm/soc-tz1090/bootprot.h>
+
+#define BOOT_REG		0x0
+#define BOOT_ATTEMPTS		0x00000003
+#define BOOT_DFU		0x00000010
+/* if !BOOT_DFU { */
+#define BOOT_BOOTMODE		0x00000fe0
+#define BOOT_BOOTMODE_S	5
+#define BOOT_BOOTMODE_NORMAL   (0 << BOOT_BOOTMODE_S)
+#define BOOT_BOOTMODE_RESUME   (1 << BOOT_BOOTMODE_S)
+/*   if BOOT_BOOTMODE_RESUME { */
+#define RESUME_BOOT_CHECKSUM	0x00001000
+#define RESUME_BOOT_MASK	0x00001ff3
+#define RESUME_FUNCTION_REG	0x4
+#define RESUME_DATA_REG		0x8
+#define RESUME_CHECKSUM_REG	0xc
+/*   } */
+/* } */
+
+/**
+ * bootprot_normal_boot() - set up a normal boot.
+ * @swprot0:	Address of protected registers.
+ *
+ * Set up protected registers for a normal boot.
+ */
+void bootprot_normal_boot(unsigned long swprot0)
+{
+	writel(0, swprot0 + BOOT_REG);
+}
+
+/**
+ * bootprot_normal_suspend_ram() - set up for resume from suspend-to-RAM.
+ * @swprot0:	Address of protected registers.
+ * @resume:	Resume function.
+ * @data:	Data to pass to resume function.
+ *
+ * Set up protected registers for a resume from suspend-to-RAM.
+ */
+void bootprot_suspend_ram(unsigned long swprot0,
+			  int (*resume)(void *),
+			  void *data)
+{
+	unsigned long boot = BOOT_BOOTMODE_RESUME;
+	writel(boot,			swprot0 + BOOT_REG);
+	writel((unsigned long)resume,	swprot0 + RESUME_FUNCTION_REG);
+	writel((unsigned long)data,	swprot0 + RESUME_DATA_REG);
+}
+
+/**
+ * bootprot_resume_ram() - unset the suspend state.
+ * @swprot0:	Address of protected registers.
+ *
+ * Undo set up protected registers for resume from suspend-to-RAM.
+ */
+void bootprot_resume_ram(unsigned long swprot0)
+{
+	bootprot_normal_boot(swprot0);
+	writel(0, swprot0 + RESUME_FUNCTION_REG);
+	writel(0, swprot0 + RESUME_DATA_REG);
+}
diff --git a/arch/metag/soc/tz1090/clock.c b/arch/metag/soc/tz1090/clock.c
new file mode 100644
index 0000000..eb56451
--- /dev/null
+++ b/arch/metag/soc/tz1090/clock.c
@@ -0,0 +1,456 @@
+/*
+ *  clock.c
+ *
+ *  Copyright (C) 2009 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+#include <linux/mutex.h>
+#include <asm/clock.h>
+#include <asm/global_lock.h>
+#include <asm/soc-tz1090/clock.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/pdc.h>
+
+static ATOMIC_NOTIFIER_HEAD(clk32k_notifier_list);
+
+unsigned long get_xtal1(void)
+{
+
+	u32 xtal_bits = (readl(CR_PERIP_RESET_CFG)
+			& CR_PERIP_RESET_CFG_FXTAL_BITS) >>
+			CR_PERIP_RESET_CFG_FXTAL_SHIFT;
+
+	switch (xtal_bits) {
+
+	case 0:		return 16384000;
+	case 1:		return 19200000;
+	case 2:		return 24000000;
+	case 3:		return 24576000;
+	case 4:		return 26000000;
+	case 5:		return 36000000;
+	case 6:		return 36864000;
+	case 7:		return 38400000;
+	case 8:		return 40000000;
+
+	default:
+			printk(KERN_ERR"Comet Clocks:"
+				"Invalid XTAL1 selected\n");
+			BUG();
+	}
+}
+
+/*
+ * Return the XTAL 2 clock. On Comet this is not detectable
+ * but the recommended value is 12MHz, the method is declared weak so it can
+ * be overridden by a board specific function if necessary.
+ */
+unsigned long __weak get_xtal2(void)
+{
+	return COMET_XTAL2;
+}
+
+/*
+ * Return the XTAL 3 clock. On Comet this is 32.768KHz for the RTC.
+ */
+unsigned long __weak get_xtal3(void)
+{
+	return COMET_XTAL3;
+}
+
+/**
+ * get_32kclock() - Get frequency of 32.768KHz clock.
+ *
+ * Returns:	The frequency of the 32KHz clock in Hz. This can be derived from
+ *		a 32.768KHz oscillator in XTAL3, or XTAL1 divided down.
+ */
+unsigned long get_32kclock(void)
+{
+	unsigned int soc_gpio0;
+	unsigned int divider;
+
+	soc_gpio0 = readl(PDC_BASE_ADDR + PDC_SOC_GPIO_CONTROL0);
+	if (soc_gpio0 & PDC_SOC_GPIO0_RTC_SW) {
+		return get_xtal3();
+	} else {
+		divider = 1 + ((soc_gpio0 & PDC_SOC_GPIO0_XTAL1_DIV)
+					>> PDC_SOC_GPIO0_XTAL1_DIV_SHIFT);
+		return get_xtal1() / divider;
+	}
+}
+EXPORT_SYMBOL_GPL(get_32kclock);
+
+/**
+ * clk32k_register_notify() - Register a 32.768KHz clock notifier callback.
+ * @nb:		pointer to the notifier block for the callback events.
+ *
+ * These occur when the frequency is changed.
+ *
+ * Returns:	0 on success.
+ */
+int clk32k_register_notify(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&clk32k_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(clk32k_register_notify);
+
+/**
+ * clk32k_unregister_notify() - Unregister a 32.768KHz clock notifier callback.
+ * @nb:		pointer to the notifier block for the callback events.
+ *
+ * clk32k_register_notify() must have been previously called for this function
+ * to work properly.
+ *
+ * Returns:	0 on success.
+ */
+int clk32k_unregister_notify(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&clk32k_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(clk32k_unregister_notify);
+
+/**
+ * set_32kclock_src() - Set source of 32.768KHz clock.
+ * @xtal1:	Whether to derive from XTAL1 rather than XTAL3.
+ * @xtal1_div:	Divider to use when deriving from XTAL1.
+ *
+ * Returns:	The resulting frequency in Hz.
+ */
+unsigned long set_32kclock_src(int xtal1, unsigned int xtal1_div)
+{
+	struct clk32k_change_freq change;
+	unsigned int soc_gpio0;
+	unsigned int divider;
+	unsigned int lstat;
+
+	__global_lock2(lstat);
+	change.old_freq = get_32kclock();
+
+	soc_gpio0 = readl(PDC_BASE_ADDR + PDC_SOC_GPIO_CONTROL0);
+	if (xtal1) {
+		soc_gpio0 &= ~PDC_SOC_GPIO0_RTC_SW;
+		divider = (xtal1_div - 1) << PDC_SOC_GPIO0_XTAL1_DIV_SHIFT;
+		soc_gpio0 &= ~PDC_SOC_GPIO0_XTAL1_DIV;
+		soc_gpio0 |= divider & PDC_SOC_GPIO0_XTAL1_DIV;
+	} else {
+		soc_gpio0 |= PDC_SOC_GPIO0_RTC_SW;
+	}
+	writel(soc_gpio0, PDC_BASE_ADDR + PDC_SOC_GPIO_CONTROL0);
+
+	change.new_freq = get_32kclock();
+	__global_unlock2(lstat);
+
+	/*
+	 * Inform users so they can make the necessary adjustments to their
+	 * timings.
+	 */
+	if (change.old_freq != change.new_freq)
+		atomic_notifier_call_chain(&clk32k_notifier_list,
+					   CLK32K_CHANGE_FREQUENCY, &change);
+
+	return change.new_freq;
+}
+
+unsigned long get_sysclock_x2_undeleted(void)
+{
+	u64 f_in;
+	u64 f_out;
+	u16 clk_f;    /* Feedback divide */
+	u8 clk_od;    /* Output Divider */
+	u8 clk_r;     /* Reference divider */
+	u32 pll_ctrl0 = readl(CR_TOP_SYSPLL_CTL0);
+	u32 pll_ctrl1 = readl(CR_TOP_SYSPLL_CTL1);
+	u32 sys_clk_div = readl(CR_TOP_SYSCLK_DIV) & 0xFF;
+
+	if ((readl(CR_TOP_CLKSWITCH) & 0x2) == 0)
+		return get_xtal1();
+
+	if (readl(CR_TOP_CLKSWITCH) & 0x1)
+		f_in = get_xtal2();
+	else
+		f_in = get_xtal1();
+
+	if (pll_ctrl1 & (1<<25)) { /* bypass bit set */
+		f_out = f_in;
+	} else {
+		unsigned long divisor;
+		/* Get Divider Values */
+		clk_f = (pll_ctrl0 >> 4) & 0x1FFF;
+		clk_od = pll_ctrl0 & 0x7;
+		clk_r = pll_ctrl1 & 0x3F;
+
+		/*
+		 *  formula:
+		 *  fout = (fin / (clkr + 1)) * (((clkf/2) + 0.5)/(clkod + 1))
+		 *
+		 *  note the equation has been re-arranged to avoid loss of
+		 *  precision due to integer maths.
+		 */
+		f_out = (u64)f_in * (clk_f + 1);
+		divisor = (2 * (clk_od + 1) * (clk_r + 1));
+		f_out = div_u64(f_out, divisor);
+	}
+
+	if (sys_clk_div)
+		return (unsigned long)f_out / (sys_clk_div + 1);
+	else
+		return (unsigned long)f_out;
+}
+
+unsigned long get_sysclock_undeleted(void)
+{
+#ifdef CONFIG_SOC_COMET_ES1
+	if (readl(CR_TOP_META_CLKDIV) & 0x1)
+		return get_sysclock_x2_undeleted() / 2;
+	else
+		return get_sysclock_x2_undeleted();
+#else
+	u32 div = readl(CR_TOP_META_CLKDIV);
+
+	return get_sysclock_x2_undeleted() / (div + 1);
+#endif
+}
+
+unsigned long get_sdhostclock(void)
+{
+	u8 clk_div = readl(CR_TOP_SDHOSTCLK_DIV);
+
+	return get_sysclock_undeleted() / (clk_div + 1);
+
+}
+
+unsigned long set_sdhostclock(unsigned long f)
+{
+	/*
+	 * Note we only modify the SDHosts own clock divider to try
+	 * and match the closest requested frequency - we do not
+	 * modify the main PLL, we return the value we achieved
+	 */
+	unsigned long f_in = get_sysclock_undeleted();
+	unsigned long f_out;
+	int lstat;
+	u32 temp;
+
+	if (f > f_in) {
+		writel(0, CR_TOP_SDHOSTCLK_DIV);
+		f_out = f_in;
+	} else {
+		u8 divider = min_t(u32, ((f_in+f-1)/f), 0xFFU);
+		divider = divider ? divider : 1;
+		writel(divider-1, CR_TOP_SDHOSTCLK_DIV);
+		f_out = f_in / divider;
+	}
+
+	/* Top level clk enable */
+	__global_lock2(lstat);
+	temp = readl(CR_PERIP_CLK_EN);
+	temp |= (1<<CR_PERIP_SDHOST_CLK_EN_BIT);
+	writel(temp, CR_PERIP_CLK_EN);
+	__global_unlock2(lstat);
+
+	return f_out;
+}
+
+
+void pix_clk_set_limits(unsigned long min, unsigned long max)
+{
+}
+
+unsigned long get_ddrclock(void)
+{
+	u8 clk_div = readl(CR_TOP_DDR_CLKDIV);
+
+	return get_sysclock_x2_undeleted() / (clk_div + 1);
+}
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+
+/* ======== UART clocks ======== */
+
+/* Global UART */
+#define CLKENAB_UART		(1 << CR_TOP_UART_EN_BIT)
+#define CLKSWITCH_UART		(1 << CR_TOP_UART_SW_BIT)
+#define UARTCLKDIV		(~0)
+
+/* UART0 */
+#define PERIPCLKEN_UART0	(1 << CR_PERIP_UART0_CLK_EN_BIT)
+
+/* UART1 */
+#define PERIPCLKEN_UART1	(1 << CR_PERIP_UART1_CLK_EN_BIT)
+
+/* ======== SCB (I2C) clocks ======== */
+
+/* Global SCB */
+#define CLKENAB_SCB		(1 << CR_TOP_SCB_EN_BIT)
+#define CLKSWITCH_SCB		(1 << CR_TOP_SCB_SW_BIT)
+
+/* SCB0 */
+#define PERIPCLKEN_SCB0		(1 << CR_PERIP_I2C0_CLK_EN_BIT)
+
+/* SCB1 */
+#define PERIPCLKEN_SCB1		(1 << CR_PERIP_I2C1_CLK_EN_BIT)
+
+/* SCB2 */
+#define PERIPCLKEN_SCB2		(1 << CR_PERIP_I2C2_CLK_EN_BIT)
+
+/* ======== SPI clocks ======== */
+#define PERIPCLKEN_SPIM1	(1 << CR_PERIP_SPIM1_CLK_EN_BIT)
+#define SPI1CLKDIV		(~0)
+
+/* ======== I2S clocks ======== */
+#define PERIPCLKEN_I2S		(1 << CR_PERIP_I2SOUT_CLK_EN_BIT)
+#define I2SCLKDIV		(~0)
+
+/* ======== PDP / PDI clock ======== */
+
+#define CLKENAB2_PIXEL		(1 << CR_TOP_PIXEL_CLK_2_EN_BIT)
+#define HEPCLKEN_PDP		CR_PDP_PDI_CLK_EN
+#define PIXELCLKDIV		(~0)
+
+/* ======== 2D clock ======== */
+#define HEPCLKEN_2D		CR_2D_CLK_EN
+
+
+/* ======== Accumulated clock bits to preserve ======== */
+#define CLKSWITCH_ALL		(CLKSWITCH_UART		| \
+				 CLKSWITCH_SCB)
+#define CLKENAB_ALL		(CLKENAB_UART		| \
+				 CLKENAB_SCB)
+#define CLKENAB2_ALL		(CLKENAB2_PIXEL)
+#define PERIPCLKEN_ALL		(PERIPCLKEN_UART0	| \
+				 PERIPCLKEN_UART1	| \
+				 PERIPCLKEN_SCB0	| \
+				 PERIPCLKEN_SCB1	| \
+				 PERIPCLKEN_SCB2	| \
+				 PERIPCLKEN_SPIM1	| \
+				 PERIPCLKEN_I2S)
+#define HEPCLKEN_ALL		(HEPCLKEN_PDP		| \
+				 HEPCLKEN_2D)
+
+/**
+ * enum comet_clk_op - Operations to perform on resume.
+ * @CLK_PRESERVE:	Preserve these bits across suspend.
+ * @CLK_CLEAR:		Clear these bits.
+ */
+enum comet_clk_op {
+	CLK_PRESERVE = 0,
+	CLK_CLEAR,
+};
+
+/**
+ * struct comet_clk_reg - Clock register field to preserve across suspend.
+ * @addr:	Address of 32bit register to preserve.
+ * @mask:	Mask of bits to preserve.
+ * @op:		Operation to perform on resume (CLK_*).
+ */
+struct comet_clk_reg {
+	u32 addr;
+	u32 mask;
+	enum comet_clk_op op;
+};
+
+static struct comet_clk_reg comet_clk_regs[] = {
+	/* Address		Mask				Operation */
+	/* turn clocks off first */
+	{ CR_TOP_CLKENAB,	CLKENAB_ALL,			CLK_CLEAR },
+	{ CR_TOP_CLKENAB2,	CLKENAB2_ALL,			CLK_CLEAR },
+	{ CR_PERIP_CLK_EN,	PERIPCLKEN_ALL,			CLK_CLEAR },
+	/* restore clock settings */
+	{ CR_TOP_CLKSWITCH,	CLKSWITCH_ALL,			CLK_PRESERVE },
+	{ CR_TOP_UART_CLK_DIV,	UARTCLKDIV,			CLK_PRESERVE },
+	{ CR_TOP_SPI1CLK_DIV,	SPI1CLKDIV,			CLK_PRESERVE },
+	{ CR_TOP_I2SCLK_DIV,	I2SCLKDIV,			CLK_PRESERVE },
+	{ CR_TOP_PIXEL_CLK_DIV,	PIXELCLKDIV,			CLK_PRESERVE },
+	/* finally restore clock switches */
+	{ CR_TOP_CLKENAB,	CLKENAB_ALL,			CLK_PRESERVE },
+	{ CR_TOP_CLKENAB2,	CLKENAB2_ALL,			CLK_PRESERVE },
+	{ CR_PERIP_CLK_EN,	PERIPCLKEN_ALL,			CLK_PRESERVE },
+	{ CR_HEP_CLK_EN,	HEPCLKEN_ALL,			CLK_PRESERVE },
+};
+
+static struct {
+	u32 values[ARRAY_SIZE(comet_clk_regs)];
+} *comet_clk_state;
+
+/**
+ * comet_clk_suspend() - stores hardware state so it can be restored.
+ *
+ * Stores clock settings into comet_clk_state using comet_clk_regs.
+ */
+int comet_clk_suspend(void)
+{
+	unsigned int i;
+	u32 val;
+
+	comet_clk_state = kzalloc(sizeof(*comet_clk_state), GFP_ATOMIC);
+	if (!comet_clk_state)
+		return -ENOMEM;
+
+	/* read the registers that need preserving */
+	for (i = 0; i < ARRAY_SIZE(comet_clk_regs); ++i) {
+		if (comet_clk_regs[i].op == CLK_PRESERVE) {
+			val = readl(comet_clk_regs[i].addr)
+				& comet_clk_regs[i].mask;
+			comet_clk_state->values[i] = val;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * comet_clk_resume() - restores hardware state.
+ *
+ * Restores clock settings from comet_clk_state.
+ */
+void comet_clk_resume(void)
+{
+	unsigned int i, lstat;
+	u32 val;
+
+	/* restore the clocking registers */
+	__global_lock2(lstat);
+	for (i = 0; i < ARRAY_SIZE(comet_clk_regs); ++i) {
+		if (!comet_clk_regs[i].mask)
+			continue;
+		if (comet_clk_regs[i].mask != ~0u) {
+			val = readl(comet_clk_regs[i].addr);
+			val &= ~comet_clk_regs[i].mask;
+			val |= comet_clk_state->values[i];
+		} else {
+			val = comet_clk_state->values[i];
+		}
+		writel(val, comet_clk_regs[i].addr);
+	}
+	__global_unlock2(lstat);
+
+	kfree(comet_clk_state);
+	comet_clk_state = NULL;
+}
+
+struct syscore_ops comet_clk_syscore_ops = {
+	.suspend = comet_clk_suspend,
+	.resume = comet_clk_resume,
+};
+
+static int __init comet_clk_init(void)
+{
+	register_syscore_ops(&comet_clk_syscore_ops);
+	return 0;
+}
+
+device_initcall(comet_clk_init);
+
+#endif /* CONFIG_METAG_SUSPEND_MEM */
diff --git a/arch/metag/soc/tz1090/coremem.c b/arch/metag/soc/tz1090/coremem.c
new file mode 100644
index 0000000..632394b
--- /dev/null
+++ b/arch/metag/soc/tz1090/coremem.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2010 Imagination Technologies Ltd.
+ *
+ * Core memory regions.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <asm/coremem.h>
+
+struct metag_coremem_region metag_coremems[] = {
+	{
+		.flags	= METAG_COREMEM_IMEM,
+		.start	= (char *)0x80000000,
+		.size	= 0x10000,
+	},
+	{
+		.flags	= METAG_COREMEM_DMEM,
+		.start	= (char *)0x82000000,
+		.size	= 0x10000,
+	},
+	{
+		.flags	= METAG_COREMEM_ICACHE,
+	},
+	{
+		.flags	= METAG_COREMEM_DCACHE,
+	},
+};
+
+unsigned int metag_coremems_sz = ARRAY_SIZE(metag_coremems);
diff --git a/arch/metag/soc/tz1090/ddr.inc b/arch/metag/soc/tz1090/ddr.inc
new file mode 100644
index 0000000..6b31570
--- /dev/null
+++ b/arch/metag/soc/tz1090/ddr.inc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ *
+ * DDR chip specific definitions
+ */
+
+#ifndef _METAG_COMET_DDR_INC_
+#define _METAG_COMET_DDR_INC_
+
+#ifdef __ASSEMBLY__
+
+/* We may need register addresses */
+#include <asm/soc-tz1090/defs.h>
+
+/*
+ * Board specific macros used by comet suspend code to control the DDR.
+ *
+ * COMET_DDR_PREPARE_POWERDOWN()
+ * 	This is used before putting DDR into self refresh mode.
+ * COMET_DDR_FINISH_POWERDOWN()
+ * 	This is used after taking DDR out of self refresh mode.
+ *
+ * Macros may use D0Ar6, D0Ar4 registers.
+ */
+
+/*
+ * Some boards don't have pull down on the clock line, so the DDR will come out
+ * of self refresh when the controller is powered down. The termination voltage
+ * of the DDR pads needs to be disabled to keep the clock from floating high.
+ */
+#if defined(CONFIG_COMET_BUB) || defined(CONFIG_POLARIS)
+
+#ifdef CONFIG_SOC_COMET_ES1
+
+/*
+ * ES1 doesn't allow the terminations of the DDR pads to be set independently
+ * of the terminations of the DDR part. We can turn them off by using the
+ * extended mode register (EMR).
+ */
+
+/* 512Mbit MT47H32M16 */
+#include <asm/soc-tz1090/ddr-mt47h.inc>
+
+/* CR_DDR_EMR.DDR_EMR_ODT = DDR_EMR_ODT_DISABLED */
+#define COMET_DDR_PREPARE_POWERDOWN() \
+	MOVT	D0FrT, #HI(CR_DDRC_EMR); \
+	ADD	D0FrT, D0FrT, #LO(CR_DDRC_EMR); \
+	GETD	D0Ar6, [D0FrT]; \
+	AND	D0Ar6, D0Ar6, #~DDR_EMR_ODT; \
+	/* OR	D0Ar6, D0Ar6, #DDR_EMR_ODT_DISABLED; */ \
+	SETD	[D0FrT], D0Ar6;
+
+/* CR_DDR_EMR.DDR_EMR_ODT = DDR_EMR_ODT_75_OHM */
+#define COMET_DDR_FINISH_POWERDOWN() \
+	MOVT	D0FrT, #HI(CR_DDRC_EMR); \
+	ADD	D0FrT, D0FrT, #LO(CR_DDRC_EMR); \
+	GETD	D0Ar6, [D0FrT]; \
+	AND	D0Ar6, D0Ar6, #~DDR_EMR_ODT; \
+	OR	D0Ar6, D0Ar6, #DDR_EMR_ODT_75_OHM; \
+	SETD	[D0FrT], D0Ar6;
+
+#else	/* CONFIG_SOC_COMET_ES1 */
+
+/*
+ * PS1 allows the terminations of the DDR pads to be set independently of the
+ * DDR part, using the CR_DDRC_PADS register.
+ */
+
+/* CR_DDR_PADS.CR_DDRC_PADS_ODT = CR_DDRC_PADS_ODT_DISABLED */
+#define COMET_DDR_PREPARE_POWERDOWN() \
+	MOVT	D0FrT, #HI(CR_DDRC_PADS); \
+	ADD	D0FrT, D0FrT, #LO(CR_DDRC_PADS); \
+	GETD	D0Ar6, [D0FrT]; \
+	AND	D0Ar6, D0Ar6, #~CR_DDRC_PADS_ODT; \
+	/* OR	D0Ar6, D0Ar6, #CR_DDRC_PADS_ODT_DISABLED; */ \
+	SETD	[D0FrT], D0Ar6;
+
+/* CR_DDR_PADS.CR_DDRC_PADS_ODT = CR_DDRC_PADS_ODT_150_OHM */
+#define COMET_DDR_FINISH_POWERDOWN() \
+	MOVT	D0FrT, #HI(CR_DDRC_PADS); \
+	ADD	D0FrT, D0FrT, #LO(CR_DDRC_PADS); \
+	GETD	D0Ar6, [D0FrT]; \
+	AND	D0Ar6, D0Ar6, #~CR_DDRC_PADS_ODT; \
+	OR	D0Ar6, D0Ar6, #CR_DDRC_PADS_ODT_150_OHM; \
+	SETD	[D0FrT], D0Ar6;
+
+#endif	/* CONFIG_SOC_COMET_ES1 */
+
+#endif	/* CONFIG_COMET_BUB */
+
+/* Have some sensible defaults for other boards */
+
+#ifndef COMET_DDR_PREPARE_POWERDOWN
+#define COMET_DDR_PREPARE_POWERDOWN()
+#endif
+
+#ifndef COMET_DDR_FINISH_POWERDOWN
+#define COMET_DDR_FINISH_POWERDOWN()
+#endif
+
+#endif	/* __ASSEMBLY__ */
+
+#endif	/* _METAG_COMET_DDR_INC_ */
diff --git a/arch/metag/soc/tz1090/irq.c b/arch/metag/soc/tz1090/irq.c
new file mode 100644
index 0000000..37efb24
--- /dev/null
+++ b/arch/metag/soc/tz1090/irq.c
@@ -0,0 +1,18 @@
+/*
+ * Comet specific interrupt code.
+ *
+ * Copyright (C) 2009-2012 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/irqchip/metag-ext.h>
+
+#include <asm/soc-tz1090/gpio.h>
+#include <asm/soc-tz1090/setup.h>
+
+void __init comet_init_irq(void)
+{
+	/* Evaluation silicon has no mask registers */
+	if (comet_is_evaluation_silicon())
+		meta_intc_no_mask();
+}
diff --git a/arch/metag/soc/tz1090/numa.c b/arch/metag/soc/tz1090/numa.c
new file mode 100644
index 0000000..d8f82d52
--- /dev/null
+++ b/arch/metag/soc/tz1090/numa.c
@@ -0,0 +1,18 @@
+/*
+ *  Multiple memory node support for Comet SoCs.
+ *
+ *  Copyright (C) 2010  Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/mmzone.h>
+
+#include <asm/mmzone.h>
+
+void __init soc_mem_setup(void)
+{
+	setup_bootmem_node(1, 0xe0200200, 0xe0260000);
+}
diff --git a/arch/metag/soc/tz1090/pdp.c b/arch/metag/soc/tz1090/pdp.c
new file mode 100644
index 0000000..a031aaf
--- /dev/null
+++ b/arch/metag/soc/tz1090/pdp.c
@@ -0,0 +1,133 @@
+/*
+ * pdp.c - contains block specific initialisation routines
+ *
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <video/pdpfb.h>
+#include <video/imgpdi_lcd.h>
+#include <linux/io.h>
+#include <asm/soc-tz1090/pdp.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/clock.h>
+
+void comet_pdp_set_shared_base(unsigned long pa)
+{
+	writel(pa >> 2, CR_PDP_MEM_BASE_ADDR);
+}
+
+static struct resource pdp_resources[] = {
+	{
+		.start	= PDP_IRQ_NUM,
+		/* mapped in comet_pdp_setup() */
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= PDP_BASE_ADDR,
+		.end	= PDP_BASE_ADDR + PDP_SIZE,
+		.flags	= IORESOURCE_MEM | PDPFB_IORES_PDP,
+	},
+};
+
+static struct pdp_info pdp_pdata = {
+	.bpp		= 16,
+#ifdef CONFIG_SOC_COMET_ES1
+	.linestore_len = 768,
+#else
+	.linestore_len = 1024,
+#endif
+	.vpitch_bilinear_threshold = 3 << PDPFB_PDATA_FIX_SHIFT,
+};
+
+static struct platform_device pdp_device = {
+	.name		= "pdpfb",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(pdp_resources),
+	.resource	= pdp_resources,
+	.dev		= {
+		.platform_data	= &pdp_pdata,
+	},
+};
+
+static struct resource pdi_resources[] = {
+	{
+		.start	= PDI_BASE_ADDR,
+		.end	= PDI_BASE_ADDR + PDI_SIZE,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct imgpdi_lcd_timings pdi_timings;
+static struct imgpdi_lcd_pdata pdi_pdata;
+
+static int comet_pdi_match_fb(struct imgpdi_lcd_pdata *pdata,
+			      struct fb_info *info)
+{
+	return !strncmp(info->fix.id, "pdp", 16);
+}
+
+static struct platform_device pdi_device = {
+	.name		= "imgpdi-lcd",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(pdi_resources),
+	.resource	= pdi_resources,
+	.dev		= {
+		.platform_data	= &pdi_pdata,
+	},
+};
+
+static struct platform_device *display_devices[] = {
+	&pdi_device,
+	&pdp_device,
+};
+
+void __init comet_pdp_set_limits(unsigned long min, unsigned long max)
+{
+	pix_clk_set_limits(min, max);
+}
+
+int __init comet_pdp_setup(const struct fb_videomode *fbvm,
+		struct pdp_lcd_size_cfg *plsc, struct imgpdi_lcd_pdata *pdic,
+		struct pdp_sync_cfg *psc, struct pdp_hwops *hwops)
+{
+	int irq;
+
+	/* Make sure we're given the necessary info */
+	if (!fbvm || !plsc)
+		return -ENXIO;
+
+	/* Map the IRQ */
+	irq = external_irq_map(pdp_resources[0].start);
+	if (irq < 0) {
+		pr_err("%s: unable to map PDP irq %u (%d)\n",
+		       __func__, pdp_resources[0].start, irq);
+		return irq;
+	}
+	pdp_resources[0].start = irq;
+	pdp_resources[0].end = irq;
+
+	memcpy(&pdp_pdata.lcd_cfg, fbvm, sizeof(struct fb_videomode));
+	memcpy(&pdp_pdata.lcd_size_cfg, plsc, sizeof(struct pdp_lcd_size_cfg));
+	memcpy(&pdp_pdata.sync_cfg, psc, sizeof(struct pdp_sync_cfg));
+	memcpy(&pdp_pdata.hwops, hwops, sizeof(struct pdp_hwops));
+
+	/* copy PDI platform data if provided */
+	if (pdic)
+		memcpy(&pdi_pdata, pdic, sizeof(struct imgpdi_lcd_pdata));
+	else
+		memset(&pdi_pdata, 0, sizeof(struct imgpdi_lcd_pdata));
+	/* if active timings are provided, copy them too */
+	if (pdi_pdata.active) {
+		memcpy(&pdi_timings, pdi_pdata.active,
+		       sizeof(struct imgpdi_lcd_timings));
+		pdi_pdata.active = &pdi_timings;
+	}
+	/* use our own fb matcher */
+	pdi_pdata.match_fb = comet_pdi_match_fb;
+
+	return platform_add_devices(display_devices,
+				    ARRAY_SIZE(display_devices));
+}
diff --git a/arch/metag/soc/tz1090/pm.c b/arch/metag/soc/tz1090/pm.c
new file mode 100644
index 0000000..1a57021
--- /dev/null
+++ b/arch/metag/soc/tz1090/pm.c
@@ -0,0 +1,355 @@
+/*
+ * IMG Comet Power Management
+ *
+ * Copyright 2010 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/core_reg.h>
+#include <asm/coremem.h>
+#include <asm/global_lock.h>
+#include <asm/mmu_context.h>
+#include <asm/soc-tz1090/bootprot.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/pdc.h>
+#include <asm/soc-tz1090/pm.h>
+#include <asm/soc-tz1090/suspend.h>
+#include <asm/suspend.h>
+#include <asm/tbx.h>
+
+/**
+ * comet_safe_mode() - Find whether the SoC is in SAFE mode.
+ *
+ * Returns:	non zero if SAFE mode is active, 0 otherwise.
+ */
+static inline int comet_safe_mode(void)
+{
+	u32 soc_bootstrap = readl(PDC_BASE_ADDR + PDC_SOC_BOOTSTRAP);
+	return soc_bootstrap & PDC_SOC_BOOTSTRAP_SAFE_MODE;
+}
+
+/**
+ * comet_pdc_restart() - Restart SoC using watchdog reset.
+ *
+ * Watchdog reset the SoC.
+ */
+void comet_pdc_restart(void)
+{
+	writel(1, PDC_BASE_ADDR + PDC_WD_SW_RESET);
+}
+
+/**
+ * comet_pdc_power_off() - Power off the SoC if possible using EXT_POWER.
+ *
+ * If the SoC isn't in SAFE mode, power off the SoC using the PDC to control
+ * EXT_POWER.
+ *
+ * Returns:	0 on success, -errno on failure.
+ */
+int comet_pdc_power_off(void)
+{
+	if (comet_safe_mode()) {
+		pr_info("SoC SAFE mode active, cannot power off SoC\n");
+		return -ENODEV;
+	}
+
+	writel(0, PDC_BASE_ADDR + PDC_SOC_POWER);
+	return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+
+/* Mask of CR_TOP_CLKENAB that is disabled in comet_pm_standby. */
+#define CLKENAB_CLKOUT_MASK ((1 << CR_TOP_CLKOUT1_3_EN_BIT) | \
+			     (1 << CR_TOP_CLKOUT0_3_EN_BIT))
+
+int (*board_suspend)(suspend_state_t state);
+void (*board_resume)(suspend_state_t state);
+
+/**
+ * comet_pm_standby() - Go into standby mode.
+ *
+ * This copies the standby code into core memory, does some final power saving
+ * tasks and jumps into core memory, where the DDR memory is put into self
+ * refresh mode and clocked off, and the Meta is clocked down.
+ * When a wake interrupt is detected, the Meta is powered back up to it's
+ * previous state.
+ */
+static int comet_pm_standby(void)
+{
+	struct metag_coremem_region *reg;
+	void (*standby_func)(unsigned int txmask);
+	unsigned int flags;
+	unsigned int perip_clk;
+	unsigned int top_clk;
+	int thread = hard_processor_id();
+	int ret = 0;
+
+	/* Get the address of some core memory */
+	reg = metag_coremem_alloc(METAG_COREMEM_ICACHE, metag_comet_standby_sz);
+	if (!reg) {
+		ret = -ENOMEM;
+		goto out_finish;
+	}
+
+	/* Copy the standby code into core memory */
+	standby_func = metag_coremem_push(reg, metag_comet_standby,
+					  metag_comet_standby_sz);
+	if (!standby_func) {
+		ret = -ENOMEM;
+		goto out_finish_coremem;
+	}
+
+	__global_lock2(flags);
+	/* Disable all peripheral clocks */
+	perip_clk = readl(CR_PERIP_CLK_EN);
+	writel(0, CR_PERIP_CLK_EN);
+	/* Disable clk_out_0 and clk_out_1 */
+	/* keep value of top_clk so we can restore those bits */
+	top_clk = readl(CR_TOP_CLKENAB);
+	writel(top_clk & ~CLKENAB_CLKOUT_MASK, CR_TOP_CLKENAB);
+	__global_unlock2(flags);
+
+	/* Run the code from core memory, waking on trigger */
+	wmb();
+	standby_func(TBI_TRIG_BIT(TBID_SIGNUM_TR2(thread)));
+
+	__global_lock2(flags);
+	/* Re-enable clk_out_0 and clk_out_1 */
+	writel(top_clk, CR_TOP_CLKENAB);
+	/* Re-enable the peripheral clocks */
+	writel(perip_clk, CR_PERIP_CLK_EN);
+	__global_unlock2(flags);
+
+	/* finish using the core memory */
+out_finish_coremem:
+	metag_coremem_free(reg);
+
+out_finish:
+	return ret;
+}
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+
+#ifdef CONFIG_COMET_SUSPEND_MEM_SAFE
+/* if safe suspend support is enabled then we always allow suspend */
+#define allow_suspend_mem()	1
+#else
+/* without safe suspend support we don't support suspend in SAFE mode */
+#define allow_suspend_mem()	(!comet_safe_mode())
+#endif
+
+/* jump buffer for use by suspend to RAM */
+static struct metag_suspend_jmpbuf s2r_jmpbuf;
+
+/**
+ * meta_pm_mem_resume() - Main resume entry point.
+ * @data:	Data pointer to jump buffer.
+ *
+ * This is the main entry point back into the kernel on resume.
+ *
+ * Returns:	1 on error, otherwise never returns.
+ */
+static int meta_pm_mem_resume(void *data)
+{
+	struct metag_suspend_jmpbuf *jmp = data;
+
+#ifdef CONFIG_METAG_DSP
+	__core_reg_set(D0.8, 0);
+#endif
+	setup_priv();
+
+	/* make sure the soft reset protected registers are cleared */
+	bootprot_resume_ram(PDC_BASE_ADDR + PDC_SOC_SW_PROT);
+
+	/* jump back to where we suspended from */
+	metag_resume_longjmp(jmp, 1);
+	/* should never get here, return to bootloader */
+	return 1;
+}
+
+/*
+ * Core suspend data that is needed during successful resume.
+ * used by comet_pm_do_suspend().
+ * used by comet_pm_resume().
+ */
+struct comet_pm_suspend_data {
+	struct metag_coremem_region *reg;
+};
+
+static int comet_pm_do_suspend(volatile struct comet_pm_suspend_data *d)
+{
+	void (*suspend_func)(unsigned int);
+	int thread = hard_processor_id();
+	int ret = 0;
+
+	/* Get the address of some core memory */
+	d->reg = metag_coremem_alloc(METAG_COREMEM_ICACHE,
+				     metag_comet_suspend_sz);
+	if (!d->reg) {
+		ret = -ENOMEM;
+		goto out_finish;
+	}
+
+	/* Copy the suspend code into core memory */
+	suspend_func = metag_coremem_push(d->reg, metag_comet_suspend,
+					  metag_comet_suspend_sz);
+	if (!suspend_func) {
+		ret = -ENOMEM;
+		goto out_finish_coremem;
+	}
+
+	/* Run the code from core memory, waking on TR2 */
+	wmb();
+	suspend_func(TBI_TRIG_BIT(TBID_SIGNUM_TR2(thread)));
+
+	/*
+	 * If we've got here then something went wrong, the power down (or reset
+	 * in the case of SAFE mode) never happened.
+	 */
+	pr_err("Power down failed\n");
+	ret = -ENODEV;
+
+	/* finish using the core memory */
+out_finish_coremem:
+	metag_coremem_free(d->reg);
+
+out_finish:
+	return ret;
+}
+
+/* clean up after a successful suspend */
+static noinline void comet_pm_resume(volatile struct comet_pm_suspend_data *d)
+{
+	metag_coremem_free(d->reg);
+}
+
+static int comet_pm_suspend(int (*resume)(void *),
+			    void *data,
+			    volatile struct comet_pm_suspend_data *d)
+{
+	int err;
+
+	/*
+	 * Set up the PDC soft reset protected registers for resuming form
+	 * suspend-to-RAM.
+	 */
+	bootprot_suspend_ram(PDC_BASE_ADDR + PDC_SOC_SW_PROT, resume, data);
+
+	/* Put DDR RAM into self refresh and switch off the power. */
+	err = comet_pm_do_suspend(d);
+	if (err) {
+		/*
+		 * Something went wrong. we don't want to leave the magic values
+		 * lying around though.
+		 */
+		bootprot_resume_ram(PDC_BASE_ADDR + PDC_SOC_SW_PROT);
+	}
+	return err;
+}
+
+static int metag_pm_mem(void)
+{
+	struct mm_struct *mm = current->active_mm;
+	int ret;
+	volatile struct comet_pm_suspend_data d;
+
+	comet_prepare_reset();
+
+	ret = traps_save_context();
+	if (unlikely(ret))
+		goto err_traps;
+
+	/* Store the core registers, so after wakeup we'll jump back to here. */
+	if (!metag_suspend_setjmp(&s2r_jmpbuf)) {
+		/*
+		 * Do SoC specifics to power switch core off safely.
+		 * This won't return unless something went wrong.
+		 */
+		ret = comet_pm_suspend(meta_pm_mem_resume, &s2r_jmpbuf, &d);
+		if (unlikely(ret))
+			goto err_suspend;
+	} else {
+		comet_pm_resume(&d);
+	}
+
+	/* We've suspended, so resume safely */
+	switch_mmu(&init_mm, mm);
+err_suspend:
+	traps_restore_context();
+err_traps:
+	return ret;
+}
+
+#else /* CONFIG_METAG_SUSPEND_MEM */
+
+#define allow_suspend_mem() 0
+#define metag_pm_mem() (-EINVAL)
+
+#endif
+
+/* Begin the suspend process. */
+static int comet_pm_begin(suspend_state_t state)
+{
+#ifdef CONFIG_COMET_SUSPEND_MEM_SAFE
+	if (state == PM_SUSPEND_MEM && comet_safe_mode())
+		pr_warn("WARNING: SoC SAFE mode active, Suspend to RAM will be faked using watchdog reset\n");
+#endif
+	return 0;
+}
+
+/* Enter a suspend mode. */
+static int comet_pm_enter(suspend_state_t state)
+{
+	int ret;
+
+	if (board_suspend) {
+		ret = board_suspend(state);
+		if (ret)
+			return ret;
+	}
+
+	switch (state) {
+	case PM_SUSPEND_STANDBY:
+		ret = comet_pm_standby();
+		break;
+	case PM_SUSPEND_MEM:
+		ret = metag_pm_mem();
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (board_resume)
+		board_resume(state);
+
+	return ret;
+}
+
+/* Specify which suspend modes are valid. */
+static int comet_pm_valid(suspend_state_t state)
+{
+	return state == PM_SUSPEND_STANDBY ||
+	       (state == PM_SUSPEND_MEM && allow_suspend_mem());
+}
+
+static struct platform_suspend_ops comet_pm_ops = {
+	.begin		= comet_pm_begin,
+	.enter		= comet_pm_enter,
+	.valid		= comet_pm_valid,
+};
+
+static int __init comet_pm_init(void)
+{
+	suspend_set_ops(&comet_pm_ops);
+	return 0;
+}
+device_initcall(comet_pm_init);
+
+#endif /* CONFIG_SUSPEND */
diff --git a/arch/metag/soc/tz1090/sdhost-dma.c b/arch/metag/soc/tz1090/sdhost-dma.c
new file mode 100644
index 0000000..dd24e61
--- /dev/null
+++ b/arch/metag/soc/tz1090/sdhost-dma.c
@@ -0,0 +1,219 @@
+/*
+ * Comet DMA functions for Synopsys SDHost driver
+ *
+ * Copyright (C) 2010 Imagination Technologies
+ */
+#include <linux/mmc/host.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/img_mdc_dma.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/sdhost.h>
+
+#define dw_mci_set_pending(host, event)				\
+	set_bit(event, &host->pending_events)
+
+struct dw_mci_dma_data {
+	struct dma_chan	*txchan;
+	struct dma_chan *rxchan;
+	struct dma_async_tx_descriptor *desc;
+};
+
+struct dma_pdata {
+	unsigned int tx_dma;
+	unsigned int rx_dma;
+};
+
+static void dw_mmc_dma_cleanup(struct dw_mci *host)
+{
+	struct mmc_data *data = host->data;
+
+	if (data)
+		dma_unmap_sg(host->dev, data->sg, data->sg_len,
+		     ((data->flags & MMC_DATA_WRITE)
+		      ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+}
+
+static void dw_mmc_dma_complete(void *arg)
+{
+	struct dw_mci	*host = arg;
+	struct mmc_data	*data = host->data;
+
+	host->dma_ops->cleanup(host);
+
+	/*
+	 * If the card was removed, data will be NULL. No point trying
+	 * to send the stop command or waiting for NBUSY in this case.
+	 */
+	if (data) {
+		dw_mci_set_pending(host, EVENT_XFER_COMPLETE);
+		tasklet_schedule(&host->tasklet);
+	}
+}
+
+/* Returns 0 on success, error code otherwise */
+static int dw_mmc_dma_init(struct dw_mci *host)
+{
+	struct dw_mci_dma_data *dma_data;
+	struct mdc_dma_cookie *cookie;
+	dma_cap_mask_t mask;
+	int ret = 0;
+
+	/* If we are resuming, don't allocate new resources */
+	if (host->dma_data)
+		return 0;
+
+	dma_data = kzalloc(sizeof(struct dw_mci_dma_data), GFP_KERNEL);
+	if (!dma_data) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+	if (!cookie) {
+		ret = -ENOMEM;
+		goto free_data;
+	}
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	/* Set peripheral number to dma cookie and reset channel */
+	cookie->periph = host->pdata->data->tx_dma;
+	cookie->req_channel = -1;
+	dma_data->txchan = dma_request_channel(mask, &mdc_dma_filter_fn,
+					       cookie);
+	if (!dma_data->txchan) {
+		dev_err(host->dev,
+			"%s: could not find suitable tx DMA channel.\n",
+			__func__);
+		ret = -ENXIO;
+		goto free_cookie;
+	}
+
+	/* Set peripheral number to dma cookie and reset channel */
+	cookie->periph = host->pdata->data->rx_dma;
+	cookie->req_channel = -1;
+
+	dma_data->rxchan = dma_request_channel(mask, &mdc_dma_filter_fn,
+					       cookie);
+	if (!dma_data->rxchan) {
+		dev_err(host->dev,
+			"%s: could not find suitable rx DMA channel.\n",
+			__func__);
+		ret = -ENXIO;
+		dma_release_channel(dma_data->txchan);
+		goto free_cookie;
+	}
+
+	host->dma_data = dma_data;
+
+	kfree(cookie);
+
+	return 0;
+
+free_cookie:
+	kfree(cookie);
+free_data:
+	kfree(dma_data);
+out:
+	return ret;
+}
+
+static void dw_mmc_dma_exit(struct dw_mci *host)
+{
+	dma_release_channel(host->dma_data->txchan);
+	dma_release_channel(host->dma_data->rxchan);
+	kfree(host->dma_data);
+}
+
+static void dw_mmc_dma_start(struct dw_mci *host, unsigned int sg_len)
+{
+	struct mmc_data *data = host->data;
+	int direction;
+	struct dma_chan *chan;
+	struct mdc_dma_tx_control tx_control;
+
+	direction = (data->flags & MMC_DATA_READ) ?
+			DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+	if (direction == DMA_TO_DEVICE) {
+		struct dma_slave_config dma_tx_conf = {
+			.direction = DMA_MEM_TO_DEV,
+			.dst_addr = CR_PERIP_SDHOST_DMA_RDATA,
+			.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+			.dst_maxburst = 0, /* minimum burst */
+		};
+		tx_control.flags = MDC_ACCESS_DELAY;
+		tx_control.access_delay = 1;
+		host->dma_data->txchan->private = (void *)&tx_control;
+
+		dmaengine_slave_config(host->dma_data->txchan,
+				       &dma_tx_conf);
+
+		/* Prepare the DMA channel for transfer */
+		host->dma_data->desc = dmaengine_prep_slave_sg(
+						host->dma_data->txchan,
+						data->sg,
+						sg_len,
+						DMA_MEM_TO_DEV,
+						DMA_PREP_INTERRUPT|
+						DMA_CTRL_ACK);
+		chan = host->dma_data->txchan;
+	} else {
+		struct dma_slave_config dma_rx_conf = {
+			.direction = DMA_DEV_TO_MEM,
+			.src_addr = CR_PERIP_SDHOST_DMA_WDATA,
+			.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+			.src_maxburst = 0, /* minimum burst */
+		};
+		tx_control.flags = MDC_ACCESS_DELAY;
+		tx_control.access_delay = 0;
+		host->dma_data->rxchan->private = (void *)&tx_control;
+
+		dmaengine_slave_config(host->dma_data->rxchan,
+				       &dma_rx_conf);
+
+		/* Prepare the DMA channel for transfer */
+		host->dma_data->desc = dmaengine_prep_slave_sg(
+						host->dma_data->rxchan,
+						data->sg,
+						sg_len,
+						DMA_DEV_TO_MEM,
+						DMA_PREP_INTERRUPT|
+						DMA_CTRL_ACK);
+		chan = host->dma_data->rxchan;
+	}
+
+	if (!host->dma_data->desc) {
+		dev_err(host->dev,
+			"Failed to allocate transfer descriptor\n");
+		return;
+	}
+
+	/* set the callbacks */
+	host->dma_data->desc->callback = dw_mmc_dma_complete;
+	host->dma_data->desc->callback_param = host;
+	/* Submit the descriptor */
+	dmaengine_submit(host->dma_data->desc);
+	/* Make the transfer */
+	dma_async_issue_pending(chan);
+}
+
+static void dw_mmc_dma_stop(struct dw_mci *host)
+{
+	dmaengine_terminate_all(host->dma_data->txchan);
+	dmaengine_terminate_all(host->dma_data->rxchan);
+}
+
+struct dw_mci_dma_ops comet_dma_ops = {
+	.init = dw_mmc_dma_init,
+	.exit = dw_mmc_dma_exit,
+	.start = dw_mmc_dma_start,
+	.stop = dw_mmc_dma_stop,
+	.cleanup = dw_mmc_dma_cleanup,
+};
diff --git a/arch/metag/soc/tz1090/sdhost.c b/arch/metag/soc/tz1090/sdhost.c
new file mode 100644
index 0000000..b2b9d0c
--- /dev/null
+++ b/arch/metag/soc/tz1090/sdhost.c
@@ -0,0 +1,142 @@
+/*
+ * sdhost.c - contains block specific initialisation routines
+ *
+ * Copyright (C) 2011 Imagination Technologies Ltd.
+ *
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/dma-mapping.h>
+#include <linux/img_mdc_dma.h>
+#include <asm/global_lock.h>
+#include <asm/soc-tz1090/sdhost.h>
+#include <asm/soc-tz1090/clock.h>
+#include <asm/soc-tz1090/gpio.h>
+#include <asm/soc-tz1090/pdc.h>
+#include <asm/soc-tz1090/defs.h>
+#include <linux/usb/dwc_otg_platform.h>
+
+
+#define COMET_SDHOST_CLK	100000000UL
+
+int mci_init(u32 slot_id, irq_handler_t irqhdlr, void *data)
+{
+	/*
+	 * Used to setup gpio based card detect interrupt handler
+	 * on a per slot basis, we are using the modules built in
+	 * card detect functionality, so do nothing (must be implemented).
+	 */
+
+	return 0;
+}
+
+int mci_get_ocr(u32 slot_id)
+{
+	return MMC_VDD_32_33 | MMC_VDD_33_34;
+}
+
+int mci_get_bus_wd(u32 slot_id)
+{
+	return 4;
+}
+
+
+static struct resource comet_mci_resources[] = {
+	[0] = {
+		.start  = SDIO_HOST_BASE_ADDR,
+		.end	= SDIO_HOST_BASE_ADDR + SDIO_HOST_SIZE,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= SDIO_HOST_IRQ_NUM,
+		/* mapped in comet_sdhost_init() */
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+struct dma_pdata {
+	unsigned int tx_dma;
+	unsigned int rx_dma;
+} dma_pdata = {
+	.tx_dma			= DMA_MUX_SDIO_HOST_WR,
+	.rx_dma			= DMA_MUX_SDIO_HOST_RD,
+};
+
+
+struct block_settings blk_settings = {
+	.max_segs	= PAGE_SIZE / sizeof(struct img_dma_mdc_list),
+	.max_blk_size	= 65536, /* BLKSIZ is 16 bits, dma could do 24 bits*/
+	.max_blk_count	= PAGE_SIZE / sizeof(struct img_dma_mdc_list),
+	.max_req_size	= 65536 * PAGE_SIZE / sizeof(struct img_dma_mdc_list),
+	.max_seg_size	= 65536 * PAGE_SIZE / sizeof(struct img_dma_mdc_list),
+};
+
+
+static u64 mci_dmamask = DMA_BIT_MASK(32);
+
+struct dw_mci_board comet_mci_platform_data = {
+	.detect_delay_ms	= 250,
+#ifdef CONFIG_SOC_COMET_ES1
+	.quirks			= 0,
+#else
+	.quirks			= DW_MCI_QUIRK_RETRY_DELAY |
+				  DW_MCI_QUIRK_HIGHSPEED |
+				  DW_MCI_QUIRK_GPIO_UNLOCK |
+				  DW_MCI_QUIRK_BIT_BANG,
+#endif
+	.clk_pin		= GPIO_SDIO_CLK,
+	.cmd_pin		= GPIO_SDIO_CMD,
+	.fifo_depth		= 32,
+	.init			= mci_init,
+	.get_ocr		= mci_get_ocr,
+	.get_bus_wd		= mci_get_bus_wd,
+	.setpower		= NULL,	/* boards can override this */
+#ifndef CONFIG_MMC_DW_IDMAC
+	.dma_ops		= &comet_dma_ops,
+#endif
+	.data			= &dma_pdata,
+	.blk_settings		= &blk_settings,
+};
+
+static struct platform_device	comet_mci_device = {
+	.name		= "dw_mmc",
+	.num_resources	= ARRAY_SIZE(comet_mci_resources),
+	.dev		= {
+		.dma_mask		= &mci_dmamask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+		.platform_data		= &comet_mci_platform_data,
+	},
+	.resource	= comet_mci_resources,
+};
+
+int __init comet_sdhost_init(void)
+{
+	unsigned long sdhost_clk = set_sdhostclock(COMET_SDHOST_CLK);
+	int irq;
+
+	if (COMET_SDHOST_CLK != sdhost_clk) {
+		printk(KERN_WARNING "Comet SD-Host Init: Requested %lu HZ SD "
+				"Clock Actual SF CLk = %lu HZ\n",
+				COMET_SDHOST_CLK, sdhost_clk);
+	}
+
+	/* Set the bus speed to send to the driver */
+	comet_mci_platform_data.bus_hz = get_sdhostclock();
+
+	/* map IRQs */
+	irq = external_irq_map(comet_mci_resources[1].start);
+	if (irq < 0) {
+		pr_err("%s: unable to map SD-Host irq %u (%d)\n",
+		       __func__, comet_mci_resources[1].start, irq);
+		return irq;
+	}
+	comet_mci_resources[1].start = irq;
+	comet_mci_resources[1].end = irq;
+
+	return platform_device_register(&comet_mci_device);
+}
diff --git a/arch/metag/soc/tz1090/setup.c b/arch/metag/soc/tz1090/setup.c
new file mode 100644
index 0000000..119dc44
--- /dev/null
+++ b/arch/metag/soc/tz1090/setup.c
@@ -0,0 +1,369 @@
+/*
+ * setup.c
+ *
+ * Copyright (C) 2009-2013 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/serial_8250.h>
+#include <linux/timeriomem-rng.h>
+#include <linux/uccp.h>
+#include <linux/syscore_ops.h>
+#include <linux/of_platform.h>
+#include <asm/global_lock.h>
+#include <asm/mach/arch.h>
+#include <asm/soc-tz1090/clock.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/gpio.h>
+#include <asm/soc-tz1090/pdc.h>
+#include <asm/soc-tz1090/pm.h>
+#include <asm/soc-tz1090/setup.h>
+
+/*---------------------------- RNG Setup -------------------------------------*/
+
+static struct resource rng_resource = {
+	.flags		= IORESOURCE_MEM,
+	.start		= CR_PERIP_RNG_NUM,
+	.end		= CR_PERIP_RNG_NUM + 4 - 1,
+};
+
+static struct timeriomem_rng_data rng_data = {
+	.period		= 0,
+};
+
+static struct platform_device rng_device = {
+	.name		= "timeriomem_rng",
+	.id		= -1,
+	.dev		= {
+		.platform_data	= &rng_data,
+	},
+	.resource	= &rng_resource,
+	.num_resources	= 1,
+};
+
+static void comet_rng_init(void)
+{
+	/* start rng without programmable seed */
+	unsigned long rng_ctrl;
+	rng_ctrl = CR_PERIP_RNG_START_BITS | CR_PERIP_RNG_PSEED_DIS_BITS;
+	writel(rng_ctrl, CR_PERIP_RNG_CTRL);
+}
+
+
+/*---------------------------- UCCP Setup ------------------------------------*/
+
+/* regions accessible from all uccp device nodes */
+static struct uccp_region comet_uccp_global_regions[] = {
+	{
+		.type		= UCCP_REGION_ALL,
+		.physical	= UCCP_BASE_ADDR,
+		.size		= UCCP_SIZE,
+	},
+	{
+		.type		= UCCP_REGION_SYS_INTERNAL,
+		.physical	= UCCP_SYSINT_BASE_ADDR,
+		.size		= UCCP_SYSINT_SIZE,
+	},
+};
+
+static struct uccp_region comet_uccp0_regions[] = {
+	{
+		.type		= UCCP_REGION_MTX,
+		.physical	= UCCP0_MTX_BASE_ADDR,
+		.size		= UCCP0_MTX_SIZE,
+	},
+	{
+		.type		= UCCP_REGION_MCP_16_BIT,
+		.physical	= UCCP0_MCP16BIT_BASE_ADDR,
+		.size		= UCCP0_MCP16BIT_SIZE,
+	},
+	{
+		.type		= UCCP_REGION_MCP_24_BIT,
+		.physical	= UCCP0_MCP24BIT_BASE_ADDR,
+		.size		= UCCP0_MCP24BIT_SIZE,
+	},
+};
+
+static struct uccp_region comet_uccp1_regions[] = {
+	{
+		.type		= UCCP_REGION_MTX,
+		.physical	= UCCP1_MTX_BASE_ADDR,
+		.size		= UCCP1_MTX_SIZE,
+	},
+};
+
+static struct uccp_core comet_uccp_cores[] = {
+	[0] = {
+		.regions = comet_uccp0_regions,
+		.num_regions = ARRAY_SIZE(comet_uccp0_regions),
+		.num_mc_req = UCC0_MC_REQ_MAX,
+	},
+	[1] = {
+		.regions = comet_uccp1_regions,
+		.num_regions = ARRAY_SIZE(comet_uccp1_regions),
+		.num_mc_req = UCC1_MC_REQ_MAX,
+	},
+};
+
+static struct uccp_pdata comet_uccp_pdata = {
+	.cores		= comet_uccp_cores,
+	.num_cores	= ARRAY_SIZE(comet_uccp_cores),
+	.regions	= comet_uccp_global_regions,
+	.num_regions	= ARRAY_SIZE(comet_uccp_global_regions),
+};
+
+/* register blocks */
+static struct resource comet_uccp_resources[] = {
+	{
+		.start          = UCC0_HOST_BASE_ADDR,
+		.end            = UCC0_HOST_BASE_ADDR + UCC0_HOST_SIZE,
+		.flags          = IORESOURCE_MEM
+				| UCCP_RES(0, UCCP_RES_HOSTSYSBUS),
+	},
+	{
+		.start          = UCC0_MC_BASE_ADDR,
+		.end            = UCC0_MC_BASE_ADDR + UCC0_MC_SIZE,
+		.flags          = IORESOURCE_MEM
+				| UCCP_RES(0, UCCP_RES_MCREQ),
+	},
+	{
+		.start          = UCC1_HOST_BASE_ADDR,
+		.end            = UCC1_HOST_BASE_ADDR + UCC1_HOST_SIZE,
+		.flags          = IORESOURCE_MEM
+				| UCCP_RES(1, UCCP_RES_HOSTSYSBUS),
+	},
+	{
+		.start          = UCC1_MC_BASE_ADDR,
+		.end            = UCC1_MC_BASE_ADDR + UCC1_MC_SIZE,
+		.flags          = IORESOURCE_MEM
+				| UCCP_RES(1, UCCP_RES_MCREQ),
+	},
+};
+
+static struct platform_device uccp_device = {
+	.name		= "uccp",
+	.id		= -1,
+	.dev		= {
+		.platform_data	= &comet_uccp_pdata,
+	},
+	.resource	= comet_uccp_resources,
+	.num_resources	= ARRAY_SIZE(comet_uccp_resources),
+};
+
+
+/*---------------------------- PDC Setup -------------------------------------*/
+
+/* Stores the boot time 32khz clock rate for drivers to access */
+unsigned long clk32k_bootfreq;
+EXPORT_SYMBOL_GPL(clk32k_bootfreq);
+
+static void comet_pdc_init(void)
+{
+	unsigned int soc_gpio2, div;
+	int lstat;
+
+	/*
+	 * The driver needs to know the boot time 32KHz clock frequency so it
+	 * can compensate for incorrect clock rates during power down.
+	 */
+	clk32k_bootfreq = get_32kclock();
+
+	/*
+	 * Set up PDC clock input to use XTAL1 with a divider. XTAL1 is
+	 * likely less accurate (due to physical oscillator accuracy)
+	 * than what might be available as XTAL3, but XTAL3 cannot be
+	 * relied upon to reset the system.
+	 */
+	div = (2 * get_xtal1() / CLK32K_DESIRED_FREQUENCY + 1) / 2;
+	set_32kclock_src(1, div);
+
+	/* bypass XTAL3 */
+	__global_lock2(lstat);
+	soc_gpio2 = readl(PDC_BASE_ADDR + PDC_SOC_GPIO_CONTROL2);
+	soc_gpio2 |= PDC_SOC_GPIO2_XTAL3_BYPASS;
+	soc_gpio2 &= ~PDC_SOC_GPIO2_XTAL3_EN;
+	writel(soc_gpio2, PDC_BASE_ADDR + PDC_SOC_GPIO_CONTROL2);
+	__global_unlock2(lstat);
+}
+
+/*---------------------- Event/Timestamp counter Setup------------------------*/
+#ifdef CONFIG_IMG_EVT
+static struct resource comet_evt_resource = {
+		.start = EVENT_TS_BASE_ADDR,
+		.end = EVENT_TS_BASE_ADDR + EVENT_TS_SIZE,
+		.flags = IORESOURCE_MEM,
+};
+
+static struct platform_device comet_evt_device = {
+		.name = "img-eventtimer",
+		.id = -1,
+		.resource = &comet_evt_resource,
+		.num_resources = 1,
+};
+#endif
+
+/*-----------------------Platform Devices Setup-------------------------------*/
+
+static struct platform_device *comet_devices[] __initdata = {
+	&rng_device,
+	&uccp_device,
+#ifdef CONFIG_IMG_EVT
+	&comet_evt_device,
+#endif
+};
+
+static void __init comet_clocks_init(void)
+{
+	unsigned int lstat, temp, clkenab2;
+	__global_lock2(lstat);
+
+	/* initialise HEP clocks to off */
+	clkenab2 = readl(CR_TOP_CLKENAB2);
+	temp = readl(CR_HEP_CLK_EN);
+	temp &= ~CR_2D_CLK_EN;
+	/* only stop PDP/PDI clock if pixel clock isn't already going */
+	if (!(clkenab2 & (1 << CR_TOP_PIXEL_CLK_2_EN_BIT)))
+		temp &= ~CR_PDP_PDI_CLK_EN;
+	writel(temp, CR_HEP_CLK_EN);
+
+	__global_unlock2(lstat);
+
+}
+
+/**
+ * comet_prepare_reset() - Prepare SoC for reset.
+ *
+ * This ensures workarounds can take place for reset/poweroff related quirks.
+ */
+void comet_prepare_reset(void)
+{
+	/*
+	 * Due to a hardware bug the 32.768KHz clock resets to being derived
+	 * from XTAL1 with a divider of 768 when the power is cut. Setting
+	 * explicitly beforehand triggers the notifier chain so that drivers of
+	 * devices that use it can compensate.
+	 */
+	set_32kclock_src(1, 768);
+}
+
+/* board reboot callbacks */
+void (*board_restart)(char *cmd) = NULL;
+void (*board_halt)(void) = NULL;
+void (*board_power_off)(void) = NULL;
+
+static void comet_restart(char *cmd)
+{
+	comet_prepare_reset();
+
+	if (board_restart)
+		board_restart(cmd);
+
+	comet_pdc_restart();
+}
+
+static void comet_halt(void)
+{
+	comet_prepare_reset();
+
+	if (board_halt)
+		board_halt();
+}
+
+static void comet_power_off(void)
+{
+	comet_prepare_reset();
+
+	if (board_power_off)
+		board_power_off();
+
+	comet_pdc_power_off();
+}
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+static int comet_suspend(void)
+{
+	return 0;
+}
+
+static void comet_resume(void)
+{
+	comet_pdc_init();
+	comet_rng_init();
+}
+#else
+#define comet_suspend NULL
+#define comet_resume NULL
+#endif	/* CONFIG_METAG_SUSPEND_MEM */
+
+static struct syscore_ops comet_syscore_ops = {
+	.suspend = comet_suspend,
+	.resume  = comet_resume,
+};
+
+bool comet_is_evaluation_silicon(void)
+{
+	unsigned int core_rev = readl(CR_COMET_CORE_REV);
+	return unlikely((core_rev & CR_COMET_CORE_REV_MAJOR_BITS)
+			== CR_COMET_CORE_REV_MAJOR_ES1);
+}
+
+void __init comet_init_early(void)
+{
+	/*
+	 * It's an easy mistake to forget SOC_COMET_ES1.
+	 * It is better to panic now rather than mysteriously going wrong later.
+	 */
+#ifndef CONFIG_SOC_COMET_ES1
+	if (comet_is_evaluation_silicon())
+		panic("This kernel doesn't support prototype (ES1) silicon. "
+		      "Please enable SOC_COMET_ES1.");
+#endif
+}
+
+void __init comet_init_machine(void)
+{
+	printk(KERN_INFO"Comet Soc: XTAL1 is %ld HZ\n", get_xtal1());
+	printk(KERN_INFO"Comet Soc: XTAL2 is %ld HZ\n", get_xtal2());
+	printk(KERN_INFO"Comet Soc: XTAL3 is %ld HZ\n", get_xtal3());
+	printk(KERN_INFO"Comet Soc: Clocks:\n");
+	printk(KERN_INFO"Comet Soc: \tSys Clock (Pre deleter) %ld HZ\n",
+		get_sysclock_undeleted());
+	printk(KERN_INFO"Comet Soc: \tDDR Clock %ld HZ\n",
+			get_ddrclock());
+
+	/* Temporary backwards compatibility */
+	clk_add_alias("pdi",	NULL,	"pdp",	NULL);
+
+	comet_clocks_init();
+	comet_rng_init();
+
+	platform_add_devices(comet_devices, ARRAY_SIZE(comet_devices));
+
+	register_syscore_ops(&comet_syscore_ops);
+
+	soc_restart = comet_restart;
+	soc_halt = comet_halt;
+	pm_power_off = comet_power_off;
+
+	comet_pdc_init();
+
+	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+static const char *tz1090_boards_compat[] __initdata = {
+	"toumaz,tz1090",
+	NULL,
+};
+
+MACHINE_START(TZ1090, "Generic TZ1090")
+	.dt_compat	= tz1090_boards_compat,
+	TZ1090_MACHINE_DEFAULTS,
+MACHINE_END
diff --git a/arch/metag/soc/tz1090/suspend.S b/arch/metag/soc/tz1090/suspend.S
new file mode 100644
index 0000000..6599f53
--- /dev/null
+++ b/arch/metag/soc/tz1090/suspend.S
@@ -0,0 +1,340 @@
+! Copyright 2010 Imagination Technologies Ltd.
+!
+! Functions for suspending the comet SoC.
+
+#include <linux/linkage.h>
+#include <asm/metag_isa.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/pdc.h>
+#include "ddr.inc"
+
+	.text
+
+! Pause for approximately 100*time instructions
+! This is modelled on ldlk's MPAUSE command
+.macro MPAUSE time
+	MOVT	D1Ar5, #HI((\time)*25)
+	ADD	D1Ar5, D1Ar5, #LO((\time)*25)
+1:
+	NOP
+	SUB	D1Ar5, D1Ar5, #1
+	CMP	D1Ar5, #0
+	BNZ	1b
+.endm
+
+
+! Wait for a wake interrupt to fire
+! The ret output is the trigger status
+! The trig argument is the trigger mask
+.macro wait_for_wakeup ret trig
+	! set TXMASK to wake trigger mask
+	MOV	TXMASK, \trig
+
+	! wait for a wakeup trigger to fire
+	MOV     \ret, TXSTAT
+
+	! we don't clear the trigger, otherwise it won't get handled on resume.
+
+	! reset TXMASK
+	XOR	TXMASK, D0Re0, D0Re0
+.endm
+
+! Instruction cache prefetch instruction
+.macro INSTR_ICACHE offset pfcount
+	.long (0xae000001	| (((\offset) & 0xffff) << 9) \
+				| (((\pfcount) & 0xf) << 1))
+.endm
+
+! Prefetch between two markers
+.macro INSTR_ICACHE_BETWEEN start end
+	INSTR_ICACHE ((\start) - .), \
+		      (1 + (((\end) - (\start)) >> ICACHE_LINE_S))
+.endm
+
+!================ SIMPLE STANDBY ================!
+
+! Wait for a wake interrupt (txmask in first argument).
+! This is pretty much SoC agnostic.
+ENTRY(_metag_standby)
+	wait_for_wakeup D0Re0, D1Ar1
+
+	MOV	PC, D1RtP
+ENDPROC(_metag_standby)
+
+! Make the length available so the code can be copied into core memory.
+ENTRY(_metag_standby_sz)
+        .long   . - _metag_standby
+
+
+!================ POWER SAVING STANDBY ================!
+
+! Wait for a wake interrupt (txmask in first argument), with a bit more
+! power saving techniques.
+
+#define DDR_POWERDOWN		! saves ~105mA on comet BUB
+#define META_PLL_BYPASS		! saves ~50mA at 360MHz on comet BUB
+#define META_PLL_PWRDN		! saves ~3mA on comet BUB
+#define META_CLKDELETE		! saves <1mA on comet BUB
+
+ENTRY(_metag_comet_standby)
+	! Prefetch enough cache lines to cover DDR powerdown
+	INSTR_ICACHE_BETWEEN ., $Lddr_powered_back_up
+	! Wait for prefetch to complete
+	MPAUSE 1000
+
+	!---------------- POWER VARIOUS THINGS DOWN ----------------!
+
+	! powerdown the DDR clock and pads
+#ifdef DDR_POWERDOWN
+	! put DDR into self refresh
+
+	! do board specific preparation
+	COMET_DDR_PREPARE_POWERDOWN()
+
+	! CR_DDRC_SELFREF_EN = 1
+	MOVT	D0FrT, #HI(CR_DDRC_SELFREF_EN)
+	ADD	D0FrT, D0FrT, #LO(CR_DDRC_SELFREF_EN)
+	MOV	D0Ar6, #1
+	SETD	[D0FrT], D0Ar6
+
+	! wait until DDRC_OPERATING_MODE is set to self refresh
+	MOVT	D0FrT, #HI(CR_DDRC_OPERATING_MODE)
+	ADD	D0FrT, D0FrT, #LO(CR_DDRC_OPERATING_MODE)
+1:
+	GETD	D0Ar6, [D0FrT]
+	CMP	D0Ar6, #CR_DDRC_OPERATING_MODE_SELFREF
+	BNZ	1b
+
+	! CR_DDR_CTRL |= 1 << CR_DDR_POWERDOWN
+	MOVT	D0FrT, #HI(CR_DDR_CTRL)
+	ADD	D0FrT, D0FrT, #LO(CR_DDR_CTRL)
+	GETD	D0Ar6, [D0FrT]
+	OR	D0Ar6, D0Ar6, #(1 << CR_DDR_POWERDOWN_BIT)
+	SETD	[D0FrT], D0Ar6
+#endif
+
+	! bypass PLL to clock the Meta down and power down the PLL
+#ifdef META_PLL_BYPASS
+	! Switch system clock to XTAL1
+	! CR_TOP_CLKSWITCH &= ~(1 << CR_TOP_SYSCLK1_SW_BIT)
+	MOVT	D0FrT, #HI(CR_TOP_CLKSWITCH)
+	ADD	D0FrT, D0FrT, #LO(CR_TOP_CLKSWITCH)
+	GETD	D0Ar6, [D0FrT]
+	AND	D0Ar6, D0Ar6, #(~(1 << CR_TOP_SYSCLK1_SW_BIT))
+	SETD	[D0FrT], D0Ar6
+	! wait for switch over
+	MPAUSE 1000
+
+#ifdef META_PLL_PWRDN
+	! CR_TOP_SYSPLL_CTL1 |= (1 << CR_TOP_SYSPLL_PWRDN_BIT)
+	MOVT	D0FrT, #HI(CR_TOP_SYSPLL_CTL1)
+	ADD	D0FrT, D0FrT, #LO(CR_TOP_SYSPLL_CTL1)
+	GETD	D0Ar6, [D0FrT]
+	MOVT	D0Ar4, #HI(1 << CR_TOP_SYSPLL_PWRDN_BIT)
+	OR	D0Ar6, D0Ar6, D0Ar4
+	SETD	[D0FrT], D0Ar6
+#endif
+#endif
+
+	! delete 1023/1024 system clock cycles
+#ifdef META_CLKDELETE
+	! CR_TOP_META_CLKDELETE = 1023
+	MOVT	D0FrT, #HI(CR_TOP_META_CLKDELETE)
+	ADD	D0FrT, D0FrT, #LO(CR_TOP_META_CLKDELETE)
+	MOV	D0Ar6, #1023
+	SETD	[D0FrT], D0Ar6
+#endif
+
+
+	!---------------- STANDBY UNTIL TRIGGER FIRES ----------------!
+
+	wait_for_wakeup D0Re0, D1Ar1
+
+	!---------------- POWER VARIOUS THINGS BACK UP AGAIN ----------------!
+
+	! delete 0/1024 system clock cycles
+#ifdef META_CLKDELETE
+	! CR_TOP_META_CLKDELETE = 0
+	MOVT	D0FrT, #HI(CR_TOP_META_CLKDELETE)
+	ADD	D0FrT, D0FrT, #LO(CR_TOP_META_CLKDELETE)
+	MOV	D0Ar6, #0
+	SETD	[D0FrT], D0Ar6
+#endif
+
+#ifdef META_PLL_BYPASS
+	! power up the PLL
+#ifdef META_PLL_PWRDN
+	! CR_TOP_SYSPLL_CTL1 ^= (1 << CR_TOP_SYSPLL_PWRDN_BIT)
+	MOVT	D0FrT, #HI(CR_TOP_SYSPLL_CTL1)
+	ADD	D0FrT, D0FrT, #LO(CR_TOP_SYSPLL_CTL1)
+	GETD	D0Ar6, [D0FrT]
+	MOVT	D0Ar4, #HI(1 << CR_TOP_SYSPLL_PWRDN_BIT)
+	XOR	D0Ar6, D0Ar6, D0Ar4
+	SETD	[D0FrT], D0Ar6
+	! wait for PLL to power back up and sort itself out
+	MPAUSE 2000
+#endif
+
+	! Switch system clock back to PLL
+	! CR_TOP_CLKSWITCH |= (1 << CR_TOP_SYSCLK1_SW_BIT)
+	MOVT	D0FrT, #HI(CR_TOP_CLKSWITCH)
+	ADD	D0FrT, D0FrT, #LO(CR_TOP_CLKSWITCH)
+	GETD	D0Ar6, [D0FrT]
+	OR	D0Ar6, D0Ar6, #(1 << CR_TOP_SYSCLK1_SW_BIT)
+	SETD	[D0FrT], D0Ar6
+	! wait for switch back
+	MPAUSE 5000
+#endif
+
+	! powerup the DDR clock and pads
+#ifdef DDR_POWERDOWN
+	! CR_DDR_CTRL &= ~(1 << CR_DDR_POWERDOWN_BIT)
+	MOVT	D0FrT, #HI(CR_DDR_CTRL)
+	ADD	D0FrT, D0FrT, #LO(CR_DDR_CTRL)
+	GETD	D0Ar6, [D0FrT]
+	AND	D0Ar6, D0Ar6, #(~(1 << CR_DDR_POWERDOWN_BIT))
+	SETD	[D0FrT], D0Ar6
+
+	! take DDR out of self refresh
+	! CR_DDRC_SELFREF_EN = 0
+	MOVT	D0FrT, #HI(CR_DDRC_SELFREF_EN)
+	ADD	D0FrT, D0FrT, #LO(CR_DDRC_SELFREF_EN)
+	MOV	D0Ar6, #0
+	SETD	[D0FrT], D0Ar6
+
+	! undo board specific preparation
+	COMET_DDR_FINISH_POWERDOWN()
+#endif
+$Lddr_powered_back_up:
+
+
+	!---------------- FINISHED ----------------!
+
+	MOV	PC, D1RtP
+ENDPROC(_metag_comet_standby)
+
+! Make the length available so the code can be copied into core memory.
+ENTRY(_metag_comet_standby_sz)
+        .long   . - _metag_comet_standby
+
+!================ POWER SAVING SUSPEND TO RAM =================================!
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+
+! Power down the main power island and let the Powerdown Controller power the
+! system back up again when it receives a wake interrupt.
+
+ENTRY(_metag_comet_suspend)
+	! Prefetch enough cache lines to cover DDR powerdown
+	INSTR_ICACHE_BETWEEN ., $Licache_extent
+	! Wait for prefetch to complete
+	MPAUSE 1000
+
+	!---------------- POWER VARIOUS THINGS DOWN ----------------!
+
+	! powerdown the DDR clock and pads
+	! put DDR into self refresh
+
+	! do board specific preparation
+	COMET_DDR_PREPARE_POWERDOWN()
+
+	! CR_DDRC_SELFREF_EN = 1
+	MOVT	D0FrT, #HI(CR_DDRC_SELFREF_EN)
+	ADD	D0FrT, D0FrT, #LO(CR_DDRC_SELFREF_EN)
+	MOV	D0Ar6, #1
+	SETD	[D0FrT], D0Ar6
+
+	! wait until DDRC_OPERATING_MODE is set to self refresh
+	MOVT	D0FrT, #HI(CR_DDRC_OPERATING_MODE)
+	ADD	D0FrT, D0FrT, #LO(CR_DDRC_OPERATING_MODE)
+1:	GETD	D0Ar6, [D0FrT]
+	CMP	D0Ar6, #CR_DDRC_OPERATING_MODE_SELFREF
+	BNZ	1b
+
+	! CR_DDR_CTRL |= 1 << CR_DDR_POWERDOWN
+	MOVT	D0FrT, #HI(CR_DDR_CTRL)
+	ADD	D0FrT, D0FrT, #LO(CR_DDR_CTRL)
+	GETD	D0Ar6, [D0FrT]
+	OR	D0Ar6, D0Ar6, #(1 << CR_DDR_POWERDOWN_BIT)
+	SETD	[D0FrT], D0Ar6
+
+#ifdef CONFIG_COMET_SUSPEND_MEM_SAFE
+	! Read SAFE mode bit from SOC_Bootstrap register.
+	! If the SoC is in SAFE mode powering off the main power island won't
+	! work, so we can imprecisely emulate the standby with a watchdog reset
+	! instead.
+	MOVT	D0FrT, #HI(PDC_BASE_ADDR + PDC_SOC_BOOTSTRAP)
+	ADD	D0FrT, D0FrT, #LO(PDC_BASE_ADDR + PDC_SOC_BOOTSTRAP)
+	GETD	D0Ar6, [D0FrT]
+	TST	D0Ar6, #LO(PDC_SOC_BOOTSTRAP_SAFE_MODE)
+	BNZ	$Lsafemode_suspend
+#endif /* CONFIG_COMET_SUSPEND_MEM_SAFE */
+
+	!---------------- POWER OFF MAIN POWER ISLAND ----------------!
+
+	! PDC_SOC_POWER = 0
+	MOVT	A0.2,#HI(PDC_BASE_ADDR + PDC_SOC_POWER)
+	ADD	A0.2,A0.2,#LO(PDC_BASE_ADDR + PDC_SOC_POWER)
+	MOV	D0Re0, #0
+	GETD	D1Re0, [A0.2]
+	SETD	[A0.2], D0Re0
+
+	! allow some time for it to work
+	MPAUSE 100000
+
+	!--------------- IF IT DIDN'T WORK, CLEANUP AND RETURN ---------------!
+
+	! restore PDC_SOC_POWER
+	SETD	[A0.2], D1Re0
+
+#ifdef CONFIG_COMET_SUSPEND_MEM_SAFE
+	B	$Lsuspend_failed
+$Lsafemode_suspend:
+	!---------------- STANDBY UNTIL TRIGGER FIRES ----------------!
+
+	wait_for_wakeup D0Re0, D1Ar1
+
+	!---------------- SOFT RESET THE SYSTEM TO EMULATE RESUME  ------------!
+
+	! PDC_WD_SW_RESET = 1
+	MOVT	A0.2,#HI(PDC_BASE_ADDR + PDC_WD_SW_RESET)
+	ADD	A0.2,A0.2,#LO(PDC_BASE_ADDR + PDC_WD_SW_RESET)
+	MOV	D1Re0, #1
+	SETD	[A0.2], D1Re0
+
+	! allow some time for it to work
+	MPAUSE 100000
+
+	!--------------- IF IT DIDN'T WORK, RETURN ---------------!
+$Lsuspend_failed:
+#endif /* CONFIG_COMET_SUSPEND_MEM_SAFE */
+
+	! powerup the DDR clock and pads
+	! CR_DDR_CTRL &= ~(1 << CR_DDR_POWERDOWN_BIT)
+	MOVT	D0FrT, #HI(CR_DDR_CTRL)
+	ADD	D0FrT, D0FrT, #LO(CR_DDR_CTRL)
+	GETD	D0Ar6, [D0FrT]
+	AND	D0Ar6, D0Ar6, #(~(1 << CR_DDR_POWERDOWN_BIT))
+	SETD	[D0FrT], D0Ar6
+
+	! take DDR out of self refresh
+	! CR_DDRC_SELFREF_EN = 0
+	MOVT	D0FrT, #HI(CR_DDRC_SELFREF_EN)
+	ADD	D0FrT, D0FrT, #LO(CR_DDRC_SELFREF_EN)
+	MOV	D0Ar6, #0
+	SETD	[D0FrT], D0Ar6
+
+	! undo board specific preparation
+	COMET_DDR_FINISH_POWERDOWN()
+$Licache_extent:
+
+	!---------------- FINISHED ----------------!
+
+	MOV	PC, D1RtP
+ENDPROC(_metag_comet_suspend)
+
+! Make the length available so the code can be copied into core memory.
+ENTRY(_metag_comet_suspend_sz)
+        .long   . - _metag_comet_suspend
+
+#endif /* CONFIG_METAG_SUSPEND_MEM */
diff --git a/arch/metag/soc/tz1090/tcm.c b/arch/metag/soc/tz1090/tcm.c
new file mode 100644
index 0000000..f3f6d28
--- /dev/null
+++ b/arch/metag/soc/tz1090/tcm.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ */
+
+#include <linux/init.h>
+#include <asm/tcm.h>
+
+#define CORE_I_TAG	0x1
+#define CORE_D_TAG	0x2
+#define INTERNAL_TAG	0x3
+
+#define CORE_RAM_I_BASE		0x80000000
+#define CORE_RAM_I_SIZE		0x0000ffff
+#define CORE_RAM_D_BASE		0x82000000
+#define CORE_RAM_D_SIZE		0x0000ffff
+#define INTERNAL_RAM_BASE	0xE0200000
+#define INTERNAL_RAM_SIZE	0x0005ffff
+
+static struct tcm_region core_i_region = {
+	.tag = CORE_I_TAG,
+	.res = {
+		.name = "Core Code Memory",
+		.start = CORE_RAM_I_BASE,
+		.end = CORE_RAM_I_BASE + CORE_RAM_I_SIZE,
+		.flags = IORESOURCE_MEM,
+	}
+};
+
+static struct tcm_region core_d_region = {
+	.tag = CORE_D_TAG,
+	.res = {
+		.name = "Core Data Memory",
+		.start = CORE_RAM_D_BASE,
+		.end = CORE_RAM_D_BASE + CORE_RAM_D_SIZE,
+		.flags = IORESOURCE_MEM,
+	}
+};
+
+static struct tcm_region internal_region = {
+	.tag = INTERNAL_TAG,
+	.res = {
+		.name = "Internal Memory",
+		.start = INTERNAL_RAM_BASE,
+		.end = INTERNAL_RAM_BASE + INTERNAL_RAM_SIZE,
+		.flags = IORESOURCE_MEM,
+	}
+};
+
+static int __init add_tcm_regions(void)
+{
+	tcm_add_region(&core_i_region);
+	tcm_add_region(&core_d_region);
+	tcm_add_region(&internal_region);
+
+	return 0;
+}
+
+core_initcall(add_tcm_regions);
diff --git a/arch/metag/soc/tz1090/usb.c b/arch/metag/soc/tz1090/usb.c
new file mode 100644
index 0000000..1525507
--- /dev/null
+++ b/arch/metag/soc/tz1090/usb.c
@@ -0,0 +1,190 @@
+/*
+ * usb.c - contains block specific initialisation routines
+ *
+ * Copyright (C) 2009-2012 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/syscore_ops.h>
+#include <asm/global_lock.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/usb.h>
+
+#define COMET_XTAL5	12000000
+
+/*
+ * Note: To use USB XTAL5 must be fitted with a 12 or 24MHz XTAL and the Jumpers
+ * set appropriately to select this as the clock source for the USB PHY.
+ */
+
+static void vbus_valid(int normal)
+{
+	int lstat;
+	u32 temp;
+
+	/*
+	 * When this bit is clear, host communication is prevented, allowing the
+	 * device to complete power-up and initialisation without the host
+	 * seeing errors.
+	 */
+	__global_lock2(lstat);
+	temp = readl(CR_PERIP_USB_PHY_TUNE_CONTROL);
+	if (normal)
+		temp |= CR_PERIP_USB_VMT_VBUSVALID;
+	else
+		temp &= ~CR_PERIP_USB_VMT_VBUSVALID;
+	writel(temp, CR_PERIP_USB_PHY_TUNE_CONTROL);
+	__global_unlock2(lstat);
+}
+
+static void comet_usb_init(void)
+{
+	int lstat;
+	u8 xtal_val = 1;
+	u32 temp;
+
+	/*enable module clock*/
+	__global_lock2(lstat);
+	temp = readl(CR_TOP_CLKENAB);
+	temp |= (1<<CR_TOP_USB_CLK_1_EN_BIT);
+	writel(temp, CR_TOP_CLKENAB);
+	__global_unlock2(lstat);
+
+	/* Set up REFCLKS, Need to assert power on reset while we change this */
+	if (COMET_XTAL5 == 12000000)
+		xtal_val = 0;
+	else if (COMET_XTAL5 == 24000000)
+		xtal_val = 1;
+	else
+		printk(KERN_WARNING "Comet USB Init: "
+				"Unsupported value for XTAL 5\n");
+
+	/* Delay host comms until we're set up */
+	vbus_valid(0);
+
+	__global_lock2(lstat);
+	/* Reset USB Block*/
+	temp = readl(CR_PERIP_SRST);
+	temp |= CR_PERIP_USB_PHY_PON_RESET_BIT;
+	temp |= CR_PERIP_USB_PHY_PORTRESET_BIT;
+	writel(temp, CR_PERIP_SRST);
+
+	/*Set Clock input to XTAL5 and set frequency */
+	temp = readl(CR_PERIP_USB_PHY_STRAP_CONTROL);
+	temp &= ~CR_PERIP_USB_REFCLKSEL_BITS;
+	temp |= (0 << CR_PERIP_USB_REFCLKSEL_SHIFT);
+	temp &= ~CR_PERIP_USB_REFCLKDIV_BITS;
+	temp |= xtal_val << CR_PERIP_USB_REFCLKDIV_SHIFT;
+	writel(temp, CR_PERIP_USB_PHY_STRAP_CONTROL);
+
+	/* Release Reset of USB BLock*/
+	temp = readl(CR_PERIP_SRST);
+	temp &= ~CR_PERIP_USB_PHY_PON_RESET_BIT;
+	temp &= ~CR_PERIP_USB_PHY_PORTRESET_BIT;
+	writel(temp, CR_PERIP_SRST);
+
+	__global_unlock2(lstat);
+
+	/*Now configure the PHY*/
+
+	/* Turn isolation off (done in sample code note sure why ??)*/
+	__global_lock2(lstat);
+	temp = readl(CR_PERIP_USB_PHY_STRAP_CONTROL);
+	temp |= CR_PERIP_USB_ISO_PHY_BIT;
+	writel(temp, CR_PERIP_USB_PHY_STRAP_CONTROL);
+	__global_unlock2(lstat);
+
+	/*Wait until PHY and UTMI are ready */
+#if 0 /*THIS CODE WON'T WORK FOR ES1 DUE TO A HARDWARE PROBLEM*/
+	while (1) {
+		u32 temp2 = readl(CR_PERIP_USB_PHY_STATUS);
+		if ((temp2 & CR_PERIP_USB_RX_PHY_CLK) &&
+				(temp2 & CR_PERIP_USB_RX_UTMI_CLK))
+			break;
+	}
+#endif
+}
+
+static struct dwc_otg_board comet_usb_board_data = {
+	.vbus_valid = vbus_valid,
+};
+
+static struct resource comet_usb_resources[] = {
+	[0] = {
+		.start          = USB_IRQ_NUM,
+		/* mapped in comet_usb_setup() */
+		.flags          = IORESOURCE_IRQ,
+	},
+	[1] = {
+		.start          = USB_BASE_ADDR,
+		.end            = USB_BASE_ADDR + USB_SIZE,
+		.flags          = IORESOURCE_MEM,
+	},
+};
+
+
+static u64 usb_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device comet_usb_device = {
+	.name = "dwc_otg",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(comet_usb_resources),
+	.resource = comet_usb_resources,
+	.dev = {
+		.dma_mask      = &usb_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+		.platform_data = &comet_usb_board_data,
+	},
+};
+
+#ifdef CONFIG_PM_SLEEP
+
+static int comet_usb_suspend(void)
+{
+	return 0;
+}
+
+static void comet_usb_resume(void)
+{
+	comet_usb_init();
+}
+#else
+#define comet_usb_suspend NULL
+#define comet_usb_resume NULL
+#endif	/* CONFIG_PM_SLEEP */
+
+static struct syscore_ops comet_usb_syscore_ops = {
+	.suspend = comet_usb_suspend,
+	.resume  = comet_usb_resume,
+};
+
+int __init comet_usb_setup(const struct dwc_otg_board *board)
+{
+	int ret;
+
+	/* use board specific vbus callbacks */
+	comet_usb_board_data.enable_vbus = board->enable_vbus;
+	comet_usb_board_data.disable_vbus = board->disable_vbus;
+
+	/* map irq */
+	ret = external_irq_map(comet_usb_resources[0].start);
+	if (ret < 0)
+		goto out;
+	comet_usb_resources[0].start = ret;
+	comet_usb_resources[0].end = ret;
+
+
+	comet_usb_init();
+
+	ret = platform_device_register(&comet_usb_device);
+	if (ret)
+		goto out;
+
+	register_syscore_ops(&comet_usb_syscore_ops);
+
+out:
+	return ret;
+}
diff --git a/arch/metag/tbx/tbistring.c b/arch/metag/tbx/tbistring.c
index f90cd08..ea537dd 100644
--- a/arch/metag/tbx/tbistring.c
+++ b/arch/metag/tbx/tbistring.c
@@ -111,4 +111,3 @@
 	/* Return base address of translation data or NULL */
 	return res;
 }
-EXPORT_SYMBOL(__TBITransStr);
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index c07e725..03cbfa8 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -119,4 +119,17 @@
 	  If you compile this as a module, you can still override this
 	  value using the module parameters.
 
+config IMG_LCD
+	tristate "IMG LCD Controller"
+	depends on SOC_TZ1090
+	default n
+	help
+	  If you have a LCD controlled by the Imagination Technologies LCD
+	  controller, say Y.
+
+	  To compile this as a module, choose M here:
+	  the module will be called img_lcd.
+
+	  If unsure, say N.
+
 endif # AUXDISPLAY
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 8a8936a..2f1eee8 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -4,3 +4,4 @@
 
 obj-$(CONFIG_KS0108)		+= ks0108.o
 obj-$(CONFIG_CFAG12864B)	+= cfag12864b.o cfag12864bfb.o
+obj-$(CONFIG_IMG_LCD)		+= img_lcd.o
diff --git a/drivers/auxdisplay/img_lcd.c b/drivers/auxdisplay/img_lcd.c
new file mode 100644
index 0000000..d49d5f5
--- /dev/null
+++ b/drivers/auxdisplay/img_lcd.c
@@ -0,0 +1,628 @@
+/*
+ * IMG LCD controller driver.
+ *
+ * Copyright (C) 2006,2007,2008,2009,2010,2012 Imagination Technologies Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/img_mdc_dma.h>
+
+#include <linux/img_lcd.h>
+
+#define LCD_REG0		0x0
+#define LCD_REG1		0x4
+#define LCD_REG2		0x8
+#define LCD_REG3		0xC
+#define LCD_ISTAT_REG		0x10
+#define LCD_IENABLE_REG		0x14
+#define LCD_ICLEAR_REG		0x18
+#define LCD_DMA_STATUS_REG	0x1C
+#define LCD_DMA_ENABLE_REG	0x20
+#define LCD_DMA_CLEAR_REG	0x24
+#define LCD_DMA_READ_REG	0x28
+#define LCD_DMA_SIZE_REG	0x2C
+
+#define REG0_L_H_DELAY_SHIFT	28
+#define REG0_P_H_WIDTH_SHIFT	24
+#define REG0_P_H_DELAY_SHIFT	20
+#define REG0_D_DELAY_SHIFT	16
+#define REG0_D_PERIOD_SHIFT	0
+
+#define REG1_T_DIV_SHIFT	28
+#define REG1_L_TRIGGER_SHIFT	16
+#define REG1_F_H_WIDTH_SHIFT	8
+#define REG1_F_H_DELAY_SHIFT	4
+#define REG1_L_H_WIDTH_SHIFT	0
+
+#define REG2_CMD		0x00000000
+#define REG2_DATA		0x80000000
+#define REG2_MODE_ALPH		0x00000000
+#define REG2_MODE_GRAPH		0x40000000
+#define REG2_MODE_BIT		0x20000000
+#define REG2_MODE_BYTE		0x10000000
+#define REG2_MODE_NIBBLE	0x00000000
+#define REG2_DB_WIDTH_SHIFT	28
+#define REG2_ORDER		0x08000000
+#define REG2_L_PER_F_SHIFT	16
+#define REG2_M_DELAY_SHIFT	12
+#define REG2_D_PER_L_SHIFT	0
+
+#define REG3_BUSY		4
+
+#define DMA_INT_EMPTY		1
+#define ALL_OP_FIN		0x4
+
+#define MAX_BLOCK_SIZE		PAGE_SIZE
+
+#define IDLE_TIMEOUT		2
+
+enum etimings {
+	data,
+	instruction,
+};
+
+static struct alpha_speed_struct cmd_speed = {
+	/* Default works for an 8bit KS0066U 2x16 display */
+	13517,			/* d_period */
+	3,			/* p_h_width */
+	0,			/* p_h_delay */
+	2			/* t_div */
+};
+
+static struct alpha_speed_struct data_speed = {
+	/* Default works for an 8bit KS0066U 2x16 display */
+	375,			/* d_period */
+	3,			/* p_h_width */
+	0,			/* p_h_delay */
+	2			/* t_div */
+};
+
+struct alpha_device {
+	struct mutex mutex;
+	int irq;
+	struct dma_chan *dma_channel;
+	void *buf;
+	dma_addr_t dmabuf;
+	struct completion dma_complete;
+	/* Default to sending LSB first */
+	unsigned int msb_choice;
+	/* Default to talking in bytes.  Can be modified by ioctl */
+	unsigned int width_bits;
+	void __iomem *regs_base;
+	void (*enable_cs)(void);
+	void (*disable_cs)(void);
+	struct device *dev;
+};
+
+static struct alpha_device alpha_device;
+
+static void program_timing(struct alpha_speed_struct *timing)
+{
+	unsigned int v;
+
+	v = timing->t_div << REG1_T_DIV_SHIFT;
+
+	writel(v, alpha_device.regs_base + LCD_REG1);
+
+	v = timing->p_h_delay << REG0_P_H_DELAY_SHIFT;
+	v |= timing->p_h_width << REG0_P_H_WIDTH_SHIFT;
+	v |= timing->d_period << REG0_D_PERIOD_SHIFT;
+
+	writel(v, alpha_device.regs_base + LCD_REG0);
+}
+
+static void instr_timing(void)
+{
+	program_timing(&cmd_speed);
+}
+
+static void data_timing(void)
+{
+	program_timing(&data_speed);
+}
+
+static int set_width(struct alpha_width_struct __user *addr)
+{
+	struct alpha_width_struct s;
+
+	if (copy_from_user(&s, addr, sizeof(s)))
+		return -EFAULT;
+
+	/* Set the mask here, which is then used in all 'chats' to reg2 */
+	switch (s.width) {
+	case 1:
+		alpha_device.width_bits = REG2_MODE_BIT;
+		break;
+	case 4:
+		alpha_device.width_bits = REG2_MODE_NIBBLE;
+		break;
+	case 8:
+		alpha_device.width_bits = REG2_MODE_BYTE;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	if (s.msb)
+		alpha_device.msb_choice = REG2_ORDER;
+	else
+		alpha_device.msb_choice = 0;
+
+	return 0;
+}
+
+static int validate_speed_struct(struct alpha_speed_struct __user *addr,
+				 struct alpha_speed_struct *s)
+{
+	if (copy_from_user(s, addr, sizeof(*s)))
+		return -EFAULT;
+
+	/* Do some verification according to the rules on the last page of
+	 * the LCD C2 module TRM.
+	 */
+	if (s->p_h_width + s->p_h_delay >= s->d_period) {
+		dev_warn(alpha_device.dev,
+		       "alpha data timing breaks d_period rule\n");
+		return -EINVAL;
+	}
+
+	dev_dbg(alpha_device.dev, "d_period %d\n", s->d_period);
+	dev_dbg(alpha_device.dev, "p_h_width %d\n", s->p_h_width);
+	dev_dbg(alpha_device.dev, "p_h_delay %d\n", s->p_h_delay);
+	dev_dbg(alpha_device.dev, "t_div %d\n", s->t_div);
+
+	return 0;
+}
+
+static int set_data_speed(struct alpha_speed_struct __user *addr)
+{
+	int ret;
+	struct alpha_speed_struct s;
+
+	ret = validate_speed_struct(addr, &s);
+
+	if (!ret)
+		data_speed = s;
+
+	return ret;
+}
+
+
+static int set_cmd_speed(struct alpha_speed_struct __user *addr)
+{
+	int ret;
+	struct alpha_speed_struct s;
+
+	ret = validate_speed_struct(addr, &s);
+
+	if (!ret)
+		cmd_speed = s;
+
+	return ret;
+}
+
+static int wait_for_idle(void __iomem *regs_base)
+{
+	unsigned long start_time = jiffies;
+
+	while (!time_after_eq(jiffies, start_time + IDLE_TIMEOUT)) {
+		unsigned int status = readl(regs_base + LCD_REG3);
+		if (!(status & REG3_BUSY))
+			return 0;
+	}
+	return 1;
+}
+
+static int send_byte(unsigned char c, enum etimings type)
+{
+	void __iomem *regs_base = alpha_device.regs_base;
+	unsigned int v;
+
+	/* Check for busy and tell the user */
+	if (wait_for_idle(regs_base))
+		return -EIO;
+
+	v = alpha_device.msb_choice | alpha_device.width_bits |	REG2_MODE_ALPH;
+
+	init_completion(&alpha_device.dma_complete);
+
+	/* Put the byte in the FIFO */
+	writel(1, regs_base + LCD_DMA_SIZE_REG);
+	writel(c, regs_base + LCD_DMA_READ_REG);
+
+	/* Enable all operation complete interrupt*/
+	writel(ALL_OP_FIN, regs_base + LCD_ICLEAR_REG); /*clear first*/
+	writel(ALL_OP_FIN, regs_base + LCD_IENABLE_REG);
+
+	/* Setup the clocks and kick off the transfer */
+	if (type == data) {
+		data_timing();
+		v |= REG2_DATA;
+	} else {
+		instr_timing();
+		v |= REG2_CMD;
+	}
+	if (alpha_device.enable_cs)
+		alpha_device.enable_cs();
+
+	writel(v, regs_base + LCD_REG2);
+
+	if (wait_for_completion_interruptible(&alpha_device.dma_complete))
+		return -ERESTARTSYS;
+
+	return 0;
+}
+
+#define BURST_SIZE 4
+
+static int
+send_block(struct alpha_data_block __user *addr, enum etimings timings)
+{
+	void __iomem *regs_base = alpha_device.regs_base;
+	struct dma_chan *dma_channel = alpha_device.dma_channel;
+	struct alpha_data_block block;
+	unsigned int v;
+	struct dma_async_tx_descriptor *desc;
+	dma_cookie_t txcookie;
+	struct dma_slave_config conf;
+
+	if (copy_from_user(&block, addr, sizeof(block)))
+		return -EFAULT;
+
+	if (!block.len)
+		return -EINVAL;
+
+	if (block.len > MAX_BLOCK_SIZE)
+		return -EINVAL;
+
+	/* Check for busy and tell the user */
+	if (wait_for_idle(regs_base))
+		return -EIO;
+
+	if (copy_from_user(alpha_device.buf, block.p, block.len))
+		return -EFAULT;
+
+	init_completion(&alpha_device.dma_complete);
+
+	conf.direction = DMA_MEM_TO_DEV;
+	conf.dst_addr = (unsigned int)regs_base + LCD_DMA_READ_REG;
+	conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	conf.dst_maxburst = BURST_SIZE;
+
+	dmaengine_slave_config(dma_channel, &conf);
+	desc = dmaengine_prep_slave_single(dma_channel, alpha_device.dmabuf,
+					   block.len, DMA_MEM_TO_DEV,
+					   DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
+
+	if (!desc)
+		return -ENOMEM;
+
+	if (timings == data)
+		data_timing();
+	else
+		instr_timing();
+
+	writel(block.len, regs_base + LCD_DMA_SIZE_REG);
+
+	/*Enable all operation complete interrupt*/
+	writel(ALL_OP_FIN, regs_base + LCD_ICLEAR_REG); /*clear first*/
+	writel(ALL_OP_FIN, regs_base + LCD_IENABLE_REG);
+
+	txcookie = dmaengine_submit(desc);
+	dma_async_issue_pending(dma_channel);
+
+	/* Kick off the transfer */
+	if (timings == data)
+		v = alpha_device.msb_choice | REG2_DATA | REG2_MODE_ALPH;
+	else
+		v = alpha_device.msb_choice | REG2_CMD | REG2_MODE_ALPH;
+
+	v |= alpha_device.width_bits;
+
+	if (alpha_device.enable_cs)
+		alpha_device.enable_cs();
+
+	writel(v, regs_base + LCD_REG2);
+
+	if (wait_for_completion_interruptible(&alpha_device.dma_complete))
+		return -ERESTARTSYS;
+
+	return 0;
+}
+
+static irqreturn_t alpha_dma_irq(int irq, void *dev_id)
+{
+	void __iomem *regs_base = dev_id;
+
+	writel(0, regs_base + LCD_IENABLE_REG);
+	writel(ALL_OP_FIN, regs_base + LCD_ICLEAR_REG);
+
+#ifdef CONFIG_SOC_TZ1090
+	{
+		u32 rem = readl(regs_base + LCD_DMA_SIZE_REG);
+
+		/*
+		 * Check that this is not a spurious interrupt - this can
+		 * occur due to a h/w problem whereby FIFO underrun causes
+		 * 'Alpha op finished' interrupt
+		 */
+		if (rem)
+			return IRQ_HANDLED;
+	}
+#endif
+
+	if (alpha_device.disable_cs)
+		alpha_device.disable_cs();
+
+	complete(&alpha_device.dma_complete);
+
+	return IRQ_HANDLED;
+}
+
+static long alpha_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	long result;
+
+	/*
+	 * extract the type and number bitfields, and don't decode
+	 * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
+	 */
+	if (_IOC_TYPE(cmd) != ALPHA_IOCTL)
+		return -ENOTTY;
+
+	/* Single ioctl at a time please!
+	 */
+	if (mutex_lock_interruptible(&alpha_device.mutex))
+		return -ERESTARTSYS;
+
+	switch (cmd) {
+	case ALPHA_INSTR:
+		pr_debug(" instr %#x\n", (char)arg);
+		result = send_byte((char)arg, instruction);
+		pr_debug(" instr done\n");
+		break;
+
+	case ALPHA_DATA:
+		pr_debug(" data %#x\n", (char)arg);
+		result = send_byte((char)arg, data);
+		pr_debug(" data done\n");
+		break;
+
+	case ALPHA_DATA_BLOCK:
+		pr_debug(" data block %#lx\n", arg);
+		result = send_block((struct alpha_data_block __user *)arg, data);
+		pr_debug(" data block done\n");
+		break;
+
+	case ALPHA_WIDTH:
+		pr_debug(" width\n");
+		result = set_width((struct alpha_width_struct __user *)arg);
+		pr_debug(" width done\n");
+		break;
+
+	case ALPHA_DATA_SPEED:
+		pr_debug(" data speed\n");
+		result = set_data_speed((struct alpha_speed_struct __user *)arg);
+		pr_debug(" data speed done\n");
+		break;
+
+	case ALPHA_COMMAND_SPEED:
+		pr_debug(" cmd speed\n");
+		result = set_cmd_speed((struct alpha_speed_struct __user *)arg);
+		pr_debug(" cmd speed done\n");
+		break;
+
+	case ALPHA_COMMAND_BLOCK:
+		pr_debug(" cmd block");
+		result = send_block((struct alpha_data_block __user *)arg, instruction);
+		pr_debug(" cmd block done\n");
+		break;
+
+	default:
+		result = -EINVAL;
+	}
+
+	mutex_unlock(&alpha_device.mutex);
+
+	return result;
+}
+
+static const struct file_operations alpha_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl	= alpha_ioctl,
+};
+
+static struct miscdevice alpha_misc_device = {
+	.minor          = MISC_DYNAMIC_MINOR,
+	.name           = "alpha_lcd",
+	.fops           = &alpha_fops,
+};
+
+static int __init img_lcd_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *irq_resource, *mem_resource, *dma_resource;
+	struct img_lcd_board *pdata;
+	struct mdc_dma_cookie *cookie;
+	dma_cap_mask_t mask;
+	int result;
+
+	mem_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem_resource) {
+		dev_err(dev, "mem resource not defined\n");
+		result = -ENODEV;
+		goto err_resource;
+	}
+
+	irq_resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq_resource) {
+		dev_err(dev, "irq resource not defined\n");
+		result = -ENODEV;
+		goto err_resource;
+	}
+
+	dma_resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (!dma_resource) {
+		dev_err(dev, "dma resource not defined\n");
+		result = -ENODEV;
+		goto err_resource;
+	}
+
+	mutex_init(&alpha_device.mutex);
+
+	alpha_device.buf = dma_alloc_coherent(dev, MAX_BLOCK_SIZE,
+					      &alpha_device.dmabuf, GFP_KERNEL);
+
+	if (alpha_device.buf == NULL) {
+		dev_err(&pdev->dev, "failed to allocate DMA buffer\n");
+		result = -ENOMEM;
+		goto err_resource;
+	}
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+	if (!cookie) {
+		result = -ENOMEM;
+		goto err_free_dma_buffer;
+	}
+
+	cookie->periph = dma_resource->start;
+	cookie->req_channel = -1;
+
+	alpha_device.dma_channel = dma_request_channel(mask,
+						       &mdc_dma_filter_fn,
+						       cookie);
+	if (!alpha_device.dma_channel) {
+		dev_err(&pdev->dev, "failed to get DMA channel\n");
+		result = -EBUSY;
+		goto err_free_cookie;
+	}
+
+	alpha_device.regs_base = (void __iomem *)mem_resource->start;
+
+	alpha_device.irq = irq_resource->start;
+
+	if (request_irq(alpha_device.irq, alpha_dma_irq, 0, "img-lcd",
+			alpha_device.regs_base)) {
+		dev_err(&pdev->dev, "failed to get irq\n");
+		result = -EBUSY;
+		goto err_free_channel;
+	}
+
+	alpha_device.width_bits = REG2_MODE_BYTE;
+	alpha_device.dev = dev;
+
+	pdata = (struct img_lcd_board *)pdev->dev.platform_data;
+	if (pdata) {
+		alpha_device.enable_cs = pdata->enable_cs;
+		alpha_device.disable_cs = pdata->disable_cs;
+	}
+
+	result = misc_register(&alpha_misc_device);
+
+	if (result) {
+		dev_err(dev, "misc_register failed\n");
+		goto err_free_irq;
+	}
+
+	dev_info(dev, "probed successfully\n");
+
+	kfree(cookie);
+
+	return 0;
+
+err_free_irq:
+	free_irq(alpha_device.irq, NULL);
+err_free_dma_buffer:
+	dma_free_coherent(dev, MAX_BLOCK_SIZE, alpha_device.buf,
+			  alpha_device.dmabuf);
+err_free_channel:
+	dma_release_channel(alpha_device.dma_channel);
+err_free_cookie:
+	kfree(cookie);
+err_resource:
+	return result;
+}
+
+static int __exit img_lcd_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	misc_deregister(&alpha_misc_device);
+	free_irq(alpha_device.irq, NULL);
+	dma_release_channel(alpha_device.dma_channel);
+	dma_free_coherent(dev, MAX_BLOCK_SIZE, alpha_device.buf,
+			  alpha_device.dmabuf);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int img_lcd_suspend(struct platform_device *pdev,
+			       pm_message_t state)
+{
+	/* FIXME Can we do anything here to power down? */
+	return 0;
+}
+
+static int img_lcd_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+#else
+#define img_lcd_suspend NULL
+#define img_lcd_resume NULL
+#endif				/* CONFIG_PM */
+
+MODULE_ALIAS("platform:img-lcd");	/* for platform bus hotplug */
+static struct platform_driver img_lcd_driver = {
+	.driver	= {
+		.name	= "img-lcd",
+		.owner	= THIS_MODULE,
+	},
+	.suspend	= img_lcd_suspend,
+	.resume		= img_lcd_resume,
+	.remove		= __exit_p(img_lcd_remove),
+};
+
+static int __init img_lcd_init(void)
+{
+	return platform_driver_probe(&img_lcd_driver, img_lcd_probe);
+}
+module_init(img_lcd_init);
+
+static void __exit img_lcd_exit(void)
+{
+	platform_driver_unregister(&img_lcd_driver);
+}
+module_exit(img_lcd_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("IMG LCD controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index b05ecab..3c5a09f 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -26,4 +26,13 @@
 
 	help
 	  Driver to enable OMAP interconnect error handling driver.
+
+config TZ1090_SOCIF
+	bool "TZ1090 SOCIF Driver"
+	depends on SOC_TZ1090
+	default y
+	help
+	  Driver to catch TZ1090 SOCIF errors such as when a memory access
+	  times out. Instead of taking the target offline the current state is
+	  logged in case it relates to the bad memory access.
 endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 3c7b53c..7f8fe02 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -7,3 +7,5 @@
 
 # Interconnect bus driver for OMAP SoCs.
 obj-$(CONFIG_OMAP_INTERCONNECT)	+= omap_l3_smx.o omap_l3_noc.o
+
+obj-$(CONFIG_TZ1090_SOCIF)	+= tz1090-socif.o
diff --git a/drivers/bus/tz1090-socif.c b/drivers/bus/tz1090-socif.c
new file mode 100644
index 0000000..4398e4c
--- /dev/null
+++ b/drivers/bus/tz1090-socif.c
@@ -0,0 +1,188 @@
+/*
+ * TZ1090 SOCIF Error Handling.
+ *
+ * Copyright (C) 2011-2013 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+
+/* Register offsets */
+#define REG_TIMEOUT	0x0
+#define REG_STATUS	0x4
+
+/* REG_TIMEOUT fields */
+#define REG_TIMEOUT_FLUSH_M	0x00002000
+#define REG_TIMEOUT_ENABLE_M	0x00001000
+#define REG_TIMEOUT_PERIOD_M	0x00000fff
+#define REG_TIMEOUT_PERIOD_S	0
+
+/* REG_STATUS fields */
+#define REG_STATUS_STATUS_M	0x00000001
+
+/**
+ * struct tz1090_socif_priv - Private driver data.
+ * @reg:		Base address of SOCIF registers.
+ * @err_count:		Count of SOCIF errors.
+ */
+struct tz1090_socif_priv {
+	void __iomem	*reg;
+	unsigned int	err_count;
+};
+
+/**
+ * tz1090_soci_isr() - SOCIF timeout interrupt handler.
+ * @irq:	IRQ number
+ * @dev_id:	Driver priv pointer
+ *
+ * It is useful to know where the timeout is coming from. We cannot find the
+ * memory address that caused the problem, but can print out the pid, PC, and
+ * registers which with some analysis would usually be enough to determine where
+ * the bad access is (assuming it's even emanating from the Meta).
+ */
+static irqreturn_t tz1090_socif_isr(int irq, void *dev_id)
+{
+	struct tz1090_socif_priv *priv = dev_id;
+	struct pt_regs *regs = get_irq_regs();
+	unsigned int err_num = priv->err_count++;
+
+	if (printk_ratelimit()) {
+		pr_info("%s[%d]: SOCIF error #%u at pc %08x sp %08x",
+			current->comm, task_pid_nr(current), err_num,
+			regs->ctx.CurrPC, regs->ctx.AX[0].U0);
+		print_vma_addr(" in ", regs->ctx.CurrPC);
+		print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
+		pr_cont("\n");
+		show_regs(regs);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * tz1090_socif_setup() - Setup SOCIF to catch timeouts.
+ * @priv:	Driver priv pointer
+ * @enable:	true to enable timeout, false to disable timeout
+ *
+ * Timeouts can happen when memory is accessed in a way or at a time when the
+ * peripheral doesn't handle it correctly, for example reading from the 2d
+ * block register area. It doesn't catch every case, such as accessing the 2d
+ * block register area with the 2d clock gated.
+ */
+static void tz1090_socif_setup(struct tz1090_socif_priv *priv, bool enable)
+{
+	u32 tmp = 0;
+
+	/* Set up the maximum timeout possible */
+	if (enable)
+		tmp = REG_TIMEOUT_ENABLE_M | REG_TIMEOUT_PERIOD_M;
+	iowrite32(tmp, priv->reg + REG_TIMEOUT);
+}
+
+static int tz1090_socif_probe(struct platform_device *pdev)
+{
+	struct tz1090_socif_priv *priv;
+	struct resource *res_regs;
+	int irq, err;
+
+	/* allocate private driver data */
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "cannot allocate driver data\n");
+		return -ENOMEM;
+	}
+	platform_set_drvdata(pdev, priv);
+
+	/* ioremap the registers */
+	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->reg = devm_request_and_ioremap(&pdev->dev, res_regs);
+	if (!priv->reg) {
+		dev_err(&pdev->dev, "cannot request/ioremap registers\n");
+		return -EIO;
+	}
+
+	/* get IRQ number */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "cannot find IRQ resource (%d)\n",
+			irq);
+		return irq;
+	}
+
+	/* set up irq */
+	err = devm_request_irq(&pdev->dev, irq, tz1090_socif_isr,
+			       IRQF_TRIGGER_RISING, "tz1090-socif", priv);
+	if (err) {
+		dev_err(&pdev->dev, "cannot register IRQ %u (%d)\n",
+			irq, err);
+		return err;
+	}
+
+	/* set up timeout to trigger interrupt */
+	tz1090_socif_setup(priv, true);
+
+	dev_info(&pdev->dev, "SOCIF timeout enabled (%u cycles)\n",
+		 REG_TIMEOUT_PERIOD_M);
+
+	return 0;
+}
+
+static int tz1090_socif_remove(struct platform_device *pdev)
+{
+	struct tz1090_socif_priv *priv = platform_get_drvdata(pdev);
+
+	/* disable timeout */
+	tz1090_socif_setup(priv, false);
+
+	return 0;
+}
+
+
+#if defined(CONFIG_PM_SLEEP)
+static int tz1090_socif_resume(struct device *dev)
+{
+	struct tz1090_socif_priv *priv = dev_get_drvdata(dev);
+
+	/* enable timeout */
+	tz1090_socif_setup(priv, true);
+	return 0;
+}
+#else
+#define tz1090_socif_resume NULL
+#endif	/* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(tz1090_socif_pmops, NULL, tz1090_socif_resume);
+
+static const struct of_device_id tz1090_socif_match[] = {
+	{ .compatible = "img,tz1090-socif" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, tz1090_socif_match);
+
+static struct platform_driver tz1090_socif_driver = {
+	.driver = {
+		.name		= "tz1090-socif",
+		.owner		= THIS_MODULE,
+		.of_match_table	= tz1090_socif_match,
+		.pm		= &tz1090_socif_pmops,
+	},
+	.probe = tz1090_socif_probe,
+	.remove = tz1090_socif_remove,
+};
+
+module_platform_driver(tz1090_socif_driver);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("TZ1090 SOCIF Error Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3bb6fa3..f0ebb55 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -49,6 +49,28 @@
          If you have an SGI Altix with an attached SABrick
          say Y or M here, otherwise say N.
 
+config IMG_UCCP
+	bool "IMG Universal Communcations Core Platform (UCCP) support"
+	depends on METAG
+	help
+	  This enables the UCCP driver, which allows userspace UCCP loaders
+	  such as uccpld to load and start binaries on the UCCPs.
+
+config IMG_SGX2D
+	bool "PowerVR SGX 2D block support"
+	depends on METAG && HAVE_CLK
+	help
+	  This enables the SGX 2D driver, which allows userland programs
+	  to access the 2D block slave port of the SGX.
+
+config IMG_EVT
+	bool "IMG Event Timestamp driver"
+	depends on METAG
+	help
+	  This enables the IMG timestamp driver which allows userland
+	  to setup the event counter/timestamp hardware and provides
+	  an atomic read of the timestamp along with the 1MHz Timer.
+
 source "drivers/tty/serial/Kconfig"
 
 config TTY_PRINTK
@@ -283,7 +305,8 @@
 config RTC
 	tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
 	depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \
-			&& !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN && !UML
+			&& !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN \
+			&& !UML && !METAG
 	---help---
 	  If you say Y here and create a character special file /dev/rtc with
 	  major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -331,7 +354,7 @@
 
 config GEN_RTC
 	tristate "Generic /dev/rtc emulation"
-	depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN && !UML
+	depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN && !UML && !METAG
 	---help---
 	  If you say Y here and create a character special file /dev/rtc with
 	  major number 10 and minor number 135 using mknod ("man mknod"), you
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7ff1d0d..33bf405 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -62,3 +62,7 @@
 js-rtc-y = rtc.o
 
 obj-$(CONFIG_TILE_SROM)		+= tile-srom.o
+
+obj-$(CONFIG_IMG_UCCP)		+= uccp.o
+obj-$(CONFIG_IMG_SGX2D)		+= sgx2d.o
+obj-$(CONFIG_IMG_EVT)		+= img_event_timer.o
diff --git a/drivers/char/img_event_timer.c b/drivers/char/img_event_timer.c
new file mode 100644
index 0000000..99fc0de
--- /dev/null
+++ b/drivers/char/img_event_timer.c
@@ -0,0 +1,238 @@
+/*
+ * IMG Event Timer Driver interface
+ *
+ * Copyright (C) 2011  Imagination Technologies
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/cdev.h>
+#include <linux/miscdevice.h>
+#include <linux/img_event_timer.h>
+
+#include <asm/core_reg.h>
+
+#define COUNT_CLK_REG		0x00
+#define CCR_CLKBITS_MASK	0x60000000
+#define CCR_CLKBITS_SHIFT	29
+#define CCR_ENABLE_MASK		0x80000000
+#define CCR_ENABLE_SHIFT	31
+
+#define EVENT_FLAGS_REG		0x04
+#define EVENT_FLAGSCLR_REG	0x08
+#define EVENT0_SELECT_REG	0x0C
+#define EVENT0_TIMESTAMP_REG	0x24
+
+
+static unsigned long base_addr;
+static struct miscdevice miscdev;
+static struct cdev cdev;
+
+int img_evt_setclock(u32 src)
+{
+	u32 temp;
+
+	switch (src) {
+
+	case 0:
+		temp = ioread32((void *)base_addr+COUNT_CLK_REG);
+		temp &= ~CCR_CLKBITS_MASK;
+		temp |= (0 <<  CCR_CLKBITS_SHIFT);
+		iowrite32(temp, (void *)base_addr+COUNT_CLK_REG);
+		break;
+
+	case 1:
+		temp = ioread32((void *)base_addr+COUNT_CLK_REG);
+		temp &= ~CCR_CLKBITS_MASK;
+		temp |= (2 <<  CCR_CLKBITS_SHIFT);
+		iowrite32(temp, (void *)base_addr+COUNT_CLK_REG);
+		break;
+
+	case 2:
+		temp = ioread32((void *)base_addr+COUNT_CLK_REG);
+		temp &= ~CCR_CLKBITS_MASK;
+		temp |= (3 <<  CCR_CLKBITS_SHIFT);
+		iowrite32(temp, (void *)base_addr+COUNT_CLK_REG);
+		break;
+
+	default:
+		return -ERANGE;
+
+	}
+
+	return 0;
+
+}
+
+int img_evt_setevent(struct evt_event event)
+{
+	if (event.counter < EVT_MAX_COUNTERS) {
+		iowrite32(event.source, (void *)base_addr + EVENT0_SELECT_REG +
+						event.counter * 4);
+		return 0;
+	} else {
+		return -ERANGE;
+	}
+
+}
+
+int img_evt_gettimestamp(struct evt_event *event)
+{
+	unsigned long flags;
+	struct timespec timeofday;
+
+	if (event->counter < EVT_MAX_COUNTERS) {
+		local_irq_save(flags);
+		/*todo is this order important ?*/
+		event->txtimer = __core_reg_get(TXTIMER);
+		event->timestamp = ioread32((void *)base_addr +
+						    EVENT0_TIMESTAMP_REG +
+						    event->counter * 4);
+		 ktime_get_ts(&timeofday);
+		 event->timeofday_sec = timeofday.tv_sec;
+		 event->timeofday_ns = timeofday.tv_nsec;
+
+		local_irq_restore(flags);
+		return 0;
+	} else {
+		return -ERANGE;
+	}
+}
+
+
+static long
+ioctl_img_evt(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	struct evt_clock clock;
+	struct evt_event event;
+	int ret;
+
+	switch (cmd) {
+	default:
+		return -ENOTTY;
+
+	case EVTIO_SETCLOCK:
+		if (copy_from_user(&clock, argp, sizeof(clock)))
+			return -EFAULT;
+
+		ret = img_evt_setclock(clock.src);
+		if (ret)
+			return ret;
+
+		break;
+
+	case EVTIO_SETEVENTSRC:
+		if (copy_from_user(&event, argp, sizeof(event)))
+			return -EFAULT;
+
+		ret = img_evt_setevent(event);
+		if (ret)
+			return ret;
+
+		break;
+
+	case EVTIO_GETEVTS:
+		if (copy_from_user(&event, argp, sizeof(event)))
+			return -EFAULT;
+
+		ret = img_evt_gettimestamp(&event);
+		if (ret)
+			return ret;
+
+		if (copy_to_user(argp, &event, sizeof(event)))
+			return -EFAULT;
+
+		break;
+	}
+
+	return 0;
+}
+
+static const struct file_operations img_evt_fops = {
+	.unlocked_ioctl		= ioctl_img_evt,
+};
+
+static int __init img_evt_probe(struct platform_device *pdev)
+{
+	struct resource *mem_resource;
+	int devno;
+	int error;
+
+	mem_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	if (!mem_resource) {
+		dev_info(&pdev->dev, "failed to get resources\n");
+		return -ENODEV;
+	}
+
+	base_addr = mem_resource->start;
+
+	/*enable module*/
+	iowrite32(CCR_ENABLE_MASK, (void *)base_addr+COUNT_CLK_REG);
+
+	miscdev.minor = MISC_DYNAMIC_MINOR;
+	miscdev.fops = &img_evt_fops;
+	miscdev.name = "img-evt";
+
+	error = misc_register(&miscdev);
+	if (error) {
+		dev_err(&pdev->dev, "unable to register misc device\n");
+		goto err_misc_reg;
+	}
+
+	devno = MKDEV(MISC_MAJOR, miscdev.minor);
+	cdev_init(&cdev, &img_evt_fops);
+	error = cdev_add(&cdev, devno, 1);
+	if (error) {
+		dev_err(&pdev->dev, "unable to add cdev\n");
+		goto err_cdev;
+	}
+
+	dev_info(&pdev->dev, "Event Timer Registered\n");
+
+	return 0;
+
+err_cdev:
+	misc_deregister(&miscdev);
+err_misc_reg:
+	return error;
+}
+
+static int img_evt_remove(struct platform_device *pdev)
+{
+	cdev_del(&cdev);
+	misc_deregister(&miscdev);
+	return 0;
+}
+
+MODULE_ALIAS("img-eventtimer");	/* for platform bus hotplug */
+static struct platform_driver img_evt_driver = {
+	.driver	= {
+		.name	= "img-eventtimer",
+		.owner	= THIS_MODULE,
+	},
+	.remove		= img_evt_remove,
+};
+
+static int __init img_evt_init(void)
+{
+	return platform_driver_probe(&img_evt_driver, img_evt_probe);
+}
+module_init(img_evt_init);
+
+static void __exit img_evt_exit(void)
+{
+	platform_driver_unregister(&img_evt_driver);
+}
+module_exit(img_evt_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("IMG Event Timer");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/char/sgx2d.c b/drivers/char/sgx2d.c
new file mode 100644
index 0000000..140b822
--- /dev/null
+++ b/drivers/char/sgx2d.c
@@ -0,0 +1,572 @@
+/*
+ * PowerVR SGX 2D block driver.
+ * SGX2D driver to allow writing to slave port.
+ *
+ * Copyright (C) 2010  Imagination Technologies
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/mman.h>
+#include <linux/clk.h>
+#include <linux/sgx2d.h>
+#include <linux/slab.h>
+
+#define NUM_REGIONS 2
+
+#define SGX2D_DEV_BUSY 0
+
+struct sgx2d_dev {
+	struct sgx2d_pdata *pdata;
+	struct miscdevice misc;
+	struct cdev cdev;
+	unsigned int irq;
+	struct clk *twod_clk;
+	unsigned long flags;	/* SGX2D_DEV_* */
+	void __iomem *mem_io;
+	unsigned long mmap_phys;
+	unsigned int mmap_len;
+	/* 0: slave port, 1: reg base */
+	unsigned int mmap_offset[NUM_REGIONS];
+
+	struct sgx2d_pdata_reg *reg_idle;
+	struct sgx2d_pdata_reg *reg_srst;
+	struct sgx2d_pdata_reg *reg_base;
+
+	wait_queue_head_t idleq;
+
+	/* suspend state */
+	u32 suspend_base;
+};
+
+static u32 sgx2d_read_reg(struct sgx2d_dev *dev, struct sgx2d_pdata_reg *reg)
+{
+	return readl(dev->mem_io + dev->mmap_offset[reg->region] +
+			reg->reg.offset);
+}
+
+static void sgx2d_write_reg(struct sgx2d_dev *dev, struct sgx2d_pdata_reg *reg,
+			    u32 val)
+{
+	writel(val, dev->mem_io + dev->mmap_offset[reg->region] +
+			reg->reg.offset);
+}
+
+static u32 sgx2d_read_field(struct sgx2d_dev *dev, struct sgx2d_pdata_reg *reg)
+{
+	u32 val = sgx2d_read_reg(dev, reg);
+	return (val & reg->reg.mask) >> reg->reg.shift;
+}
+
+static int sgx2d_idle(struct sgx2d_dev *dev)
+{
+	return sgx2d_read_field(dev, dev->reg_idle);
+}
+
+static int sgx2d_soft_reset(struct sgx2d_dev *dev)
+{
+	int lstat;
+	u32 val;
+	struct sgx2d_pdata_reg *reg = dev->reg_srst;
+
+	TBI_LOCK(lstat);
+	val = sgx2d_read_reg(dev, reg);
+	sgx2d_write_reg(dev, reg, val | reg->reg.mask);
+	sgx2d_write_reg(dev, reg, val & ~reg->reg.mask);
+	TBI_UNLOCK(lstat);
+
+	wake_up_interruptible(&dev->idleq);
+
+	return 0;
+}
+
+static irqreturn_t sgx2d_isr(int irq, void *dev_id)
+{
+	struct sgx2d_dev *dev = dev_id;
+
+	/* Wake up any processes waiting for idleness */
+	if (sgx2d_idle(dev))
+		wake_up_interruptible(&dev->idleq);
+
+	return IRQ_HANDLED;
+}
+
+static int sgx2d_wait_idle(struct sgx2d_dev *dev, struct file *filp)
+{
+	while (!sgx2d_idle(dev)) {
+		DEFINE_WAIT(wait);
+
+		if (filp && (filp->f_flags & O_NONBLOCK))
+			return -EAGAIN;
+
+		prepare_to_wait(&dev->idleq, &wait, TASK_INTERRUPTIBLE);
+		if (!sgx2d_idle(dev))
+			schedule();
+		finish_wait(&dev->idleq, &wait);
+		if (signal_pending(current))
+			return -ERESTARTSYS;
+	}
+	return 0;
+}
+
+static int sgx2d_open(struct inode *inod, struct file *filp)
+{
+	struct sgx2d_dev *dev = container_of(inod->i_cdev, struct sgx2d_dev,
+					     cdev);
+	/* one at a time */
+	if (test_and_set_bit_lock(SGX2D_DEV_BUSY, &dev->flags))
+		return -EBUSY;
+
+	filp->private_data = dev;
+
+	/* enable the 2d clock */
+	clk_prepare_enable(dev->twod_clk);
+	return 0;
+}
+
+static int sgx2d_release(struct inode *inod, struct file *filp)
+{
+	struct sgx2d_dev *dev = container_of(inod->i_cdev, struct sgx2d_dev,
+					     cdev);
+	/* disable the 2d clock */
+	clk_disable_unprepare(dev->twod_clk);
+
+	clear_bit_unlock(SGX2D_DEV_BUSY, &dev->flags);
+	return 0;
+}
+
+static int sgx2d_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct sgx2d_dev *dev = filp->private_data;
+	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+	unsigned long len = vma->vm_end - vma->vm_start;
+
+	/* if unmappable, don't even bother */
+	if (dev->pdata->unmappable)
+		return -ENODEV;
+
+	/* mustn't start after the end */
+	if (offset >= dev->mmap_len)
+		return -EINVAL;
+	/* or end after the end */
+	if (offset + len > dev->mmap_len)
+		return -EINVAL;
+
+	/* remap_pfn_range will mark the range VM_IO */
+	if (remap_pfn_range(vma, vma->vm_start,
+			    (dev->mmap_phys + offset) >> PAGE_SHIFT,
+			    len, PAGE_SHARED))
+		return -EAGAIN;
+	return 0;
+}
+
+static struct sgx2d_pdata_reg *sgx2d_find_reg(struct sgx2d_dev *dev,
+					      unsigned int id)
+{
+	struct sgx2d_pdata *pdata = dev->pdata;
+	struct sgx2d_pdata_reg *reg = pdata->regs;
+	int i;
+	for (i = 0; i < pdata->reg_count; ++i, ++reg)
+		if (reg->reg.id == id)
+			return reg;
+	return NULL;
+}
+
+static long sgx2d_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	struct sgx2d_dev *dev = filp->private_data;
+	struct sgx2d_pdata *pdata = dev->pdata;
+	struct sgx2d_reg reg;
+	struct sgx2d_meminfo meminfo;
+	struct sgx2d_pdata_reg *pdata_reg;
+
+	switch (cmd) {
+	default:
+		return -EINVAL;
+	case SGX2DIO_WAITIDLE:
+		return sgx2d_wait_idle(dev, filp);
+	case SGX2DIO_SOFTRST:
+		return sgx2d_soft_reset(dev);
+	case SGX2DIO_GETVERS:
+		if (copy_to_user(argp, &pdata->vers, sizeof(pdata->vers)))
+			return -EFAULT;
+		break;
+	case SGX2DIO_GETREG:
+		if (copy_from_user(&reg, argp, sizeof(reg)))
+			return -EFAULT;
+		if (reg.id == SGX2D_REG_INVALID)
+			return -EINVAL;
+		if (reg.id & SGX2D_REG_PRIVATE)
+			return -EINVAL;
+
+		/* find the actual register information */
+		pdata_reg = sgx2d_find_reg(dev, reg.id);
+		if (!pdata_reg)
+			return -EINVAL;
+		reg = pdata_reg->reg;
+		reg.offset += dev->mmap_offset[pdata_reg->region];
+
+		if (copy_to_user(argp, &reg, sizeof(reg)))
+			return -EFAULT;
+		break;
+	case SGX2DIO_GETMEM:
+		if (pdata->unmappable) {
+			meminfo.addr = (void *)dev->mmap_phys;
+			/* flags = 0 indicates that mmap shouldn't be used */
+			meminfo.flags = 0;
+		} else {
+			meminfo.addr = NULL;
+			meminfo.flags = MAP_SHARED;
+		}
+		meminfo.len = dev->mmap_len;
+		if (copy_to_user(argp, &meminfo, sizeof(meminfo)))
+			return -EFAULT;
+		break;
+	}
+
+	return 0;
+}
+
+static const struct file_operations sgx2d_fops = {
+	.owner			= THIS_MODULE,
+	.open			= sgx2d_open,
+	.release		= sgx2d_release,
+	.mmap			= sgx2d_mmap,
+	.unlocked_ioctl		= sgx2d_ioctl,
+};
+
+static struct sgx2d_pdata_reg comet_sgx2d_pdata_regs[] = {
+	{
+		.reg		= {
+			.id	= SGX2D_REG_SLAVEPORT,
+			.offset	= 0x00,
+		},
+		.region		= 0,	/* slave port */
+	},
+	{
+		.reg		= {
+			.id	= SGX2D_REG_BLTCOUNT,
+			.offset	= 0x0c,		/* CR_2D_STATUS */
+			.mask	= 0x0ffffff0,	/* CR_2D_BLIT_STATUS_COMPLETE */
+			.shift	= 4,
+		},
+		.region		= 1,	/* HEP */
+	},
+	{
+		.reg		= {
+			.id	= SGX2D_REG_BUSY,
+			.offset	= 0x0c,		/* CR_2D_STATUS */
+			.mask	= 0x00000004,	/* CR_2D_BLIT_STATUS_BUSY */
+			.shift	= 2,
+		},
+		.region		= 1,	/* HEP */
+	},
+	{
+		.reg		= {
+			.id	= SGX2D_REG_IDLE,
+			.offset	= 0x0c,		/* CR_2D_STATUS */
+			.mask	= 0x00000002,	/* CR_2D_IDLE */
+			.shift	= 1,
+		},
+		.region		= 1,	/* HEP */
+	},
+	{
+		.reg		= {
+			.id	= SGX2D_REG_BASEADDR,
+			.offset	= 0x24,		/* CR_2D_MEM_BASE_ADDR */
+		},
+		.region		= 1,	/* HEP */
+	},
+	{
+		.reg		= {
+			.id	= SGX2D_REG_SRST,
+			.offset	= 0x00,		/* CR_HEP_SRST */
+			.mask	= 0x00000002,	/* CR_2D_SOFT_RESET */
+			.shift	= 1,
+		},
+		.region		= 1,	/* HEP */
+	},
+};
+
+static struct sgx2d_pdata comet_sgx2d_pdata = {
+	.vers		= {
+		.arch_fam	= SGX2D_FAM_COMET,
+	},
+	.regs		= comet_sgx2d_pdata_regs,
+	.reg_count	= ARRAY_SIZE(comet_sgx2d_pdata_regs),
+	.unmappable	= 1,
+};
+
+static const struct of_device_id sgx2d_match[] = {
+	{ .compatible = "img,tz1090-2d", .data = &comet_sgx2d_pdata, },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sgx2d_match);
+
+static int sgx2d_process_pdata(struct sgx2d_pdata *pdata)
+{
+	struct sgx2d_pdata_reg *reg = pdata->regs;
+	int i;
+	for (i = 0; i < pdata->reg_count; ++i, ++reg) {
+		if (!reg->reg.mask)
+			reg->reg.mask = -1;
+		if (reg->region >= NUM_REGIONS)
+			reg->reg.id = SGX2D_REG_INVALID;
+	}
+	return 0;
+}
+
+static int sgx2d_probe(struct platform_device *pdev)
+{
+	struct sgx2d_dev *dev;
+	struct sgx2d_pdata *pdata;
+	struct resource *res_irq;
+	struct resource *res_slaveport;
+	struct resource *res_regs;
+	int devno;
+	int error;
+	const struct of_device_id *of_id;
+	struct device_node *node = pdev->dev.of_node;
+
+	/* get platform data from match table or pdev */
+	if (node && (of_id = of_match_node(sgx2d_match, node)))
+		pdata = (struct sgx2d_pdata *)of_id->data;
+	else
+		pdata = pdev->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data defined\n");
+		error = -EINVAL;
+		goto err_pdata;
+	}
+
+	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res_irq == NULL) {
+		dev_err(&pdev->dev, "cannot find IRQ resource\n");
+		error = -ENOENT;
+		goto err_pdata;
+	}
+
+	res_slaveport = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res_slaveport == NULL) {
+		dev_err(&pdev->dev, "cannot find slave port resource\n");
+		error = -ENOENT;
+		goto err_pdata;
+	}
+
+	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (res_regs == NULL) {
+		dev_err(&pdev->dev, "cannot find registers resource\n");
+		error = -ENOENT;
+		goto err_pdata;
+	}
+
+	/* correct any pdata shortcuts */
+	error = sgx2d_process_pdata(pdata);
+	if (error)
+		goto err_pdata;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		dev_err(&pdev->dev, "cannot allocate device data\n");
+		error = -ENOMEM;
+		goto err_dev;
+	}
+	platform_set_drvdata(pdev, dev);
+	dev->pdata = pdata;
+	init_waitqueue_head(&dev->idleq);
+
+	dev->twod_clk = of_clk_get(node, 0);
+	if (IS_ERR(dev->twod_clk)) {
+		dev_warn(&pdev->dev, "could not get clock resource\n");
+		error = PTR_ERR(dev->twod_clk);
+		goto err_clk;
+	}
+
+	dev->reg_idle = sgx2d_find_reg(dev, SGX2D_REG_IDLE);
+	if (!dev->reg_idle) {
+		dev_err(&pdev->dev, "no idle field found in platform data\n");
+		error = -EINVAL;
+		goto err_find_regs;
+	}
+
+	dev->reg_srst = sgx2d_find_reg(dev, SGX2D_REG_SRST);
+	if (!dev->reg_srst) {
+		dev_err(&pdev->dev,
+			"no soft reset field found in platform data\n");
+		error = -EINVAL;
+		goto err_find_regs;
+	}
+
+	dev->reg_base = sgx2d_find_reg(dev, SGX2D_REG_BASEADDR);
+	/* it's not the end of the world if there's no base address register */
+
+	/*
+	 * lay out the memory resources in the mmapping
+	 * each region needs to be on separate pages
+	 * regions may start/end midpage
+	 */
+	dev->mmap_phys = min(res_slaveport->start,
+			     res_regs->start) & PAGE_MASK;
+	dev->mmap_len = (max(res_slaveport->end, res_regs->end)
+			 - dev->mmap_phys + PAGE_SIZE - 1) & PAGE_MASK;
+	dev->mmap_offset[0] = res_slaveport->start - dev->mmap_phys;
+	dev->mmap_offset[1] = res_regs->start - dev->mmap_phys;
+
+	/* ioremap the memory resources (combined) */
+	dev->mem_io = ioremap(dev->mmap_phys, dev->mmap_len);
+	if (!dev->mem_io) {
+		error = -EIO;
+		goto err_memio;
+	}
+
+	dev->irq = res_irq->start;
+	error = request_irq(dev->irq, sgx2d_isr, IRQF_TRIGGER_RISING, "sgx2d",
+			    dev);
+	if (error) {
+		dev_err(&pdev->dev, "cannot register IRQ %u\n",
+			dev->irq);
+		error = -EIO;
+		goto err_irq;
+	}
+
+	dev->misc.minor = MISC_DYNAMIC_MINOR;
+	dev->misc.fops = &sgx2d_fops;
+	dev->misc.name = "sgx2d";
+
+	error = misc_register(&dev->misc);
+	if (error) {
+		dev_err(&pdev->dev, "unable to register misc device\n");
+		goto err_misc_reg;
+	}
+
+	devno = MKDEV(MISC_MAJOR, dev->misc.minor);
+	cdev_init(&dev->cdev, &sgx2d_fops);
+	error = cdev_add(&dev->cdev, devno, 1);
+	if (error) {
+		dev_err(&pdev->dev, "unable to add cdev\n");
+		goto err_cdev;
+	}
+
+	/* initialise the hardware */
+	sgx2d_soft_reset(dev);
+
+	dev_info(&pdev->dev, "SGX 2D module loaded\n");
+
+	return 0;
+
+err_cdev:
+	misc_deregister(&dev->misc);
+err_misc_reg:
+	free_irq(dev->irq, dev);
+err_irq:
+	iounmap(dev->mem_io);
+err_memio:
+err_find_regs:
+	clk_put(dev->twod_clk);
+err_clk:
+	kfree(dev);
+err_dev:
+err_pdata:
+	return error;
+}
+
+static int sgx2d_remove(struct platform_device *pdev)
+{
+	struct sgx2d_dev *dev = platform_get_drvdata(pdev);
+
+	misc_deregister(&dev->misc);
+	free_irq(dev->irq, dev);
+	iounmap(dev->mem_io);
+	clk_put(dev->twod_clk);
+	kfree(dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int sgx2d_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct sgx2d_dev *dev = platform_get_drvdata(pdev);
+
+	/* preserve the base address */
+	if (dev->reg_base)
+		dev->suspend_base = sgx2d_read_reg(dev, dev->reg_base);
+
+	/* disable the 2d clock if the device file is open */
+	if (test_and_set_bit_lock(SGX2D_DEV_BUSY, &dev->flags)) {
+		/*
+		 * Wait for the block to become idle
+		 * If a command is half written there's not a lot we can do
+		 * unfortunately.
+		 */
+		sgx2d_wait_idle(dev, NULL);
+
+		clk_disable_unprepare(dev->twod_clk);
+	} else {
+		clear_bit_unlock(SGX2D_DEV_BUSY, &dev->flags);
+	}
+	return 0;
+}
+
+static int sgx2d_resume(struct platform_device *pdev)
+{
+	struct sgx2d_dev *dev = platform_get_drvdata(pdev);
+
+	/* re-enable the 2d clock if the device file is open */
+	if (test_and_set_bit_lock(SGX2D_DEV_BUSY, &dev->flags))
+		clk_prepare_enable(dev->twod_clk);
+	else
+		clear_bit_unlock(SGX2D_DEV_BUSY, &dev->flags);
+
+	/* initialise the hardware */
+	sgx2d_soft_reset(dev);
+
+	/* restore the base address */
+	if (dev->reg_base)
+		sgx2d_write_reg(dev, dev->reg_base, dev->suspend_base);
+	return 0;
+}
+#else
+#define sgx2d_suspend NULL
+#define sgx2d_resume NULL
+#endif
+
+static struct platform_driver sgx2d_driver = {
+	.driver = {
+		.name		= "sgx2d",
+		.owner		= THIS_MODULE,
+		.of_match_table	= sgx2d_match,
+	},
+	.probe		= sgx2d_probe,
+	.remove		= sgx2d_remove,
+	.suspend	= sgx2d_suspend,
+	.resume		= sgx2d_resume,
+};
+
+static int __init sgx2d_init(void)
+{
+	return platform_driver_register(&sgx2d_driver);
+}
+
+static void __exit sgx2d_exit(void)
+{
+	platform_driver_unregister(&sgx2d_driver);
+}
+
+module_init(sgx2d_init);
+module_exit(sgx2d_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("PowerVR SGX 2D Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/uccp.c b/drivers/char/uccp.c
new file mode 100644
index 0000000..09d0f7b
--- /dev/null
+++ b/drivers/char/uccp.c
@@ -0,0 +1,566 @@
+/*
+ * IMG Universal Communications Core Platform (UCCP) driver.
+ * UCCP driver to allow setting up of memory, loading of binaries, and
+ * manipulation of MTX registers from userspace.
+ *
+ * Copyright (C) 2010  Imagination Technologies
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/uccp.h>
+#include <linux/delay.h>
+
+#define UCC_MCREQ_0			0x10
+#define UCC_MCREQ_STRIDE		0x10
+#define UCC_MCREQ_BULKADDR		0x0
+#define UCC_MCREQ_BULKLIMIT		0x4
+#define UCC_MCREQ_MEMBASE		0x8
+#define UCC_MCREQ_MEMPORT		0xc
+#define UCC_MCREQ_MEMPORT_THIS_MTX	0
+#define UCC_MCREQ_MEMPORT_OTHER_MTX	1
+#define UCC_MCREQ_MEMPORT_NOT_MTX	2
+
+#define MTX_TXUXXRXDT			0xf8
+#define MTX_TXUXXRXRQ			0xfc
+
+#define MCP_SYS_HACC_CMD		0x1040
+#define MCP_SYS_HACC_CMD_ADDR		0x0003ffff
+#define MCP_SYS_HACC_CMD_PACK		0x20000000
+#define MCP_SYS_HACC_CMD_RDNW		0x40000000
+#define MCP_SYS_HACC_CMD_VAL		0x80000000
+#define MCP_SYS_HACC_WDATA		0x1044
+#define MCP_SYS_HACC_RDATA		0x1048
+
+#define UCCP_SYSCONTROL			0x400
+#define UCCP_RESETCTRL			(UCCP_SYSCONTROL + 0x028)
+#define UCCP_RESETCTRL_ALL_EXCEPT_BUS	0xfffe
+#define UCCP_COREID			(UCCP_SYSCONTROL + 0x070)
+#define UCCP_COREID_GROUPID		0xff000000
+#define UCCP_COREID_GROUPID_SHIFT	24
+#define UCCP_COREID_COREID		0x00ff0000
+#define UCCP_COREID_COREID_SHIFT	16
+#define UCCP_COREID_CONFIG		0x0000ffff
+#define UCCP_COREID_CONFIG_SHIFT	0
+#define UCCP_COREID_GROUPID_UCCP	0xe
+#define UCCP_COREID_GROUPID_UCC		0xe
+#define UCCP_COREID_COREID_UCCP		0x1
+#define UCCP_COREID_COREID_UCC		0x2
+#define UCCP_COREREV			(UCCP_SYSCONTROL + 0x078)
+#define UCCP_COREREV_MAJOR		0x00ff0000
+#define UCCP_COREREV_MAJOR_SHIFT	16
+#define UCCP_COREREV_MINOR		0x0000ff00
+#define UCCP_COREREV_MINOR_SHIFT	8
+#define UCCP_COREREV_REV		0x000000ff
+#define UCCP_COREREV_REV_SHIFT		0
+
+
+static struct uccp_priv {
+	struct uccp_pdata *pdata;
+	struct class *uccp_class;
+	int uccp_major;
+
+	struct uccp_region *full_region;
+	void __iomem *region_map;
+} priv;
+
+static inline unsigned long get_region_phys(void)
+{
+	return ((unsigned long)priv.full_region->physical + (PAGE_SIZE-1))
+		& PAGE_MASK;
+}
+
+static inline unsigned int get_region_size(void)
+{
+	return ((priv.full_region->size -
+			(get_region_phys() - priv.full_region->physical)))
+		& PAGE_MASK;
+}
+
+static int mmap_uccp(struct file *file, struct vm_area_struct *vma)
+{
+	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+
+	if (offset >= get_region_size())
+		return -EINVAL;
+
+	/* remap_pfn_range will mark the range VM_IO */
+	if (remap_pfn_range(vma, vma->vm_start,
+			    get_region_phys() >> PAGE_SHIFT,
+			    vma->vm_end - vma->vm_start,
+			    PAGE_SHARED))
+		return -EAGAIN;
+	return 0;
+}
+
+/* Find a memory region by uccp number and type */
+static struct uccp_region *find_uccp_region(int uccp, unsigned int type)
+{
+	struct uccp_core *core;
+	int i;
+	if (uccp >= 0) {
+		core = &priv.pdata->cores[uccp];
+		for (i = 0; i < core->num_regions; ++i)
+			if (type == core->regions[i].type)
+				return &core->regions[i];
+	}
+	for (i = 0; i < priv.pdata->num_regions; ++i)
+		if (type == priv.pdata->regions[i].type)
+			return &priv.pdata->regions[i];
+	return NULL;
+}
+
+static unsigned long uccp_host_read(unsigned int uccp, unsigned int regno)
+{
+	struct uccp_core *core = &priv.pdata->cores[uccp];
+	return readl((unsigned long)core->host_sys_bus->start + regno);
+}
+
+static void uccp_host_write(unsigned int uccp, unsigned int regno,
+			    unsigned int value)
+{
+	struct uccp_core *core = &priv.pdata->cores[uccp];
+	writel(value, (unsigned long)core->host_sys_bus->start + regno);
+}
+
+/* Find the mcreq port of a memory area */
+static int uccp_mcreq_port(unsigned long uccp, unsigned long physical,
+			    unsigned long size)
+{
+	int c;
+	struct uccp_core *core;
+	struct uccp_region *mtx;
+	for (c = 0; c < priv.pdata->num_cores; ++c) {
+		mtx = find_uccp_region(uccp, UCCP_REGION_MTX);
+		if (!mtx)
+			continue;
+		core = &priv.pdata->cores[c];
+		if (physical >= mtx->physical &&
+		    physical < mtx->physical + mtx->size) {
+			/*
+			 * Check the specified range fits entirely within the
+			 * MTX internal memory - a given mapping cannot cross
+			 * port boundaries
+			 */
+			if (physical + size > mtx->physical + mtx->size)
+				return -EINVAL;
+
+			if (c == uccp)
+				return UCC_MCREQ_MEMPORT_THIS_MTX;
+			else
+				return UCC_MCREQ_MEMPORT_OTHER_MTX;
+		}
+	}
+	return UCC_MCREQ_MEMPORT_NOT_MTX;
+}
+
+static int uccp_mcreq_clear(unsigned int uccp, unsigned int index)
+{
+	struct uccp_core *core = &priv.pdata->cores[uccp];
+	unsigned long base = core->mc_req->start +
+				UCC_MCREQ_0 + index*UCC_MCREQ_STRIDE;
+
+	writel(0xFFFFFFFF, base + UCC_MCREQ_BULKADDR);
+	writel(0xFFFFFFFF, base + UCC_MCREQ_BULKLIMIT);
+	writel(0xFFFFFFFF, base + UCC_MCREQ_MEMBASE);
+	writel(2,          base + UCC_MCREQ_MEMPORT);
+	return 0;
+}
+
+static int uccp_mcreq_write(unsigned int uccp, unsigned int index,
+			     struct uccp_mcreq *mc_req)
+{
+	struct uccp_core *core = &priv.pdata->cores[uccp];
+	unsigned long base = core->mc_req->start +
+				UCC_MCREQ_0 + index*UCC_MCREQ_STRIDE;
+	unsigned long limit = mc_req->bulk + mc_req->size;
+	int port;
+
+	/*
+	 * Restrictions imposed by MCREQ registers. Make sure the user isn't
+	 * supplying values with bits that will be discarded when writing to
+	 * the registers.
+	 */
+	if ((mc_req->bulk & ~0xB03FF000) ||
+	    (limit & ~0xB03FF000) ||
+	    (mc_req->bulk > limit) ||
+	    (mc_req->physical & ~0xFFFFF000))
+		return -EINVAL;
+
+	port = uccp_mcreq_port(uccp, mc_req->physical, mc_req->size);
+	if (port < 0)
+		return -EINVAL;
+
+	writel(mc_req->bulk >> 2,	base + UCC_MCREQ_BULKADDR);
+	writel(limit >> 2,		base + UCC_MCREQ_BULKLIMIT);
+	writel(mc_req->physical >> 2,	base + UCC_MCREQ_MEMBASE);
+	writel(port,			base + UCC_MCREQ_MEMPORT);
+	return 0;
+}
+
+static void uccp_mcreq_read(unsigned int uccp, unsigned int index,
+			     struct uccp_mcreq *mc_req)
+{
+	struct uccp_core *core = &priv.pdata->cores[uccp];
+	unsigned long base = core->mc_req->start +
+				UCC_MCREQ_0 + index*UCC_MCREQ_STRIDE;
+	mc_req->bulk = readl(base + UCC_MCREQ_BULKADDR) << 2;
+	mc_req->size = (readl(base + UCC_MCREQ_BULKLIMIT) << 2) - mc_req->bulk;
+	mc_req->physical = readl(base + UCC_MCREQ_MEMBASE) << 2;
+}
+
+static long ioctl_uccp(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	struct inode *inode = file->f_mapping->host;
+	unsigned int minor = MINOR(inode->i_rdev);
+	struct uccp_core *core;
+	struct uccp_region region;
+	struct uccp_region *out_region;
+	struct uccp_reg reg;
+	struct uccp_mcreq mc_req;
+	struct resource *res;
+	unsigned int tmp = 0;
+	int lstat;
+
+	if (minor >= priv.pdata->num_cores)
+		return -EINVAL;
+
+	core = &priv.pdata->cores[minor];
+
+	switch (cmd) {
+	default:
+		return -EINVAL;
+	case UCCPIO_GETREGION:
+		if (copy_from_user(&region, argp, sizeof(region)))
+			return -EFAULT;
+
+		out_region = find_uccp_region(minor, region.type);
+		if (!out_region)
+			return -EINVAL;
+
+		if (copy_to_user(argp, out_region, sizeof(*out_region)))
+			return -EFAULT;
+		break;
+
+	case UCCPIO_WRREG:
+		if (copy_from_user(&reg, argp, sizeof(reg)))
+			return -EFAULT;
+
+		res = core->host_sys_bus;
+		switch (reg.op) {
+		case UCCP_REG_DIRECT:
+			if (reg.reg % 4 ||
+			    reg.reg >= res->end - res->start)
+				return -EINVAL;
+
+			uccp_host_write(minor, reg.reg, reg.val);
+			break;
+		case UCCP_REG_INDIRECT:
+			TBI_LOCK(lstat);
+			uccp_host_write(minor, MTX_TXUXXRXDT, reg.val);
+			uccp_host_write(minor, MTX_TXUXXRXRQ, reg.reg);
+			TBI_UNLOCK(lstat);
+			break;
+		case UCCP_REG_MCPPERIP_PACK:
+			tmp |= MCP_SYS_HACC_CMD_PACK;
+		case UCCP_REG_MCPPERIP:
+			tmp |= MCP_SYS_HACC_CMD_VAL;
+			tmp |= (reg.reg & MCP_SYS_HACC_CMD_ADDR);
+			TBI_LOCK(lstat);
+			uccp_host_write(minor, MCP_SYS_HACC_WDATA, reg.val);
+			uccp_host_write(minor, MCP_SYS_HACC_CMD, tmp);
+			uccp_host_write(minor, MCP_SYS_HACC_CMD, 0x0);
+			TBI_UNLOCK(lstat);
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+
+	case UCCPIO_RDREG:
+		if (copy_from_user(&reg, argp, sizeof(reg)))
+			return -EFAULT;
+
+		res = core->host_sys_bus;
+		switch (reg.op) {
+		case UCCP_REG_DIRECT:
+			if (reg.reg % 4 ||
+			    reg.reg >= res->end - res->start)
+				return -EINVAL;
+
+			reg.val = uccp_host_read(minor, reg.reg);
+			break;
+		case UCCP_REG_MCPPERIP_PACK:
+			tmp |= MCP_SYS_HACC_CMD_PACK;
+		case UCCP_REG_MCPPERIP:
+			tmp |= MCP_SYS_HACC_CMD_VAL;
+			tmp |= MCP_SYS_HACC_CMD_RDNW;
+			tmp |= (reg.reg & MCP_SYS_HACC_CMD_ADDR);
+			TBI_LOCK(lstat);
+			uccp_host_write(minor, MCP_SYS_HACC_CMD, tmp);
+			reg.val = uccp_host_read(minor, MCP_SYS_HACC_RDATA);
+			uccp_host_write(minor, MCP_SYS_HACC_CMD, 0x0);
+			TBI_UNLOCK(lstat);
+			break;
+		default:
+			return -EINVAL;
+		}
+		if (copy_to_user(argp, &reg, sizeof(reg)))
+			return -EFAULT;
+		break;
+
+	case UCCPIO_CLRMCREQ:
+		if (copy_from_user(&mc_req, argp, sizeof(mc_req)))
+			return -EFAULT;
+		if (mc_req.index < 0 || mc_req.index >= core->num_mc_req)
+			return -EINVAL;
+		return uccp_mcreq_clear(minor, mc_req.index);
+
+	case UCCPIO_SETMCREQ:
+		if (copy_from_user(&mc_req, argp, sizeof(mc_req)))
+			return -EFAULT;
+		if (mc_req.index < 0 || mc_req.index >= core->num_mc_req)
+			return -EINVAL;
+		return uccp_mcreq_write(minor, mc_req.index, &mc_req);
+
+	case UCCPIO_GETMCREQ:
+		if (copy_from_user(&mc_req, argp, sizeof(mc_req)))
+			return -EFAULT;
+		if (mc_req.index < 0 || mc_req.index >= core->num_mc_req)
+			return -EINVAL;
+		uccp_mcreq_read(minor, mc_req.index, &mc_req);
+
+		if (copy_to_user(argp, &mc_req, sizeof(mc_req)))
+			return -EFAULT;
+		break;
+
+	case UCCPIO_SRST:
+		uccp_host_write(minor, UCCP_RESETCTRL,
+					UCCP_RESETCTRL_ALL_EXCEPT_BUS);
+		uccp_host_write(minor, UCCP_RESETCTRL, 0x0);
+		/*
+		 * Allow the dash some time to perform it's own reset so we
+		 * don't lose any later register writes.
+		 */
+		mdelay(20);
+		break;
+	}
+
+	return 0;
+}
+
+static const struct file_operations uccp_fops = {
+	.mmap			= mmap_uccp,
+	.unlocked_ioctl		= ioctl_uccp,
+};
+
+/* Update offsets from physical addresses */
+static void complete_uccp_regions(void)
+{
+	struct uccp_core *core;
+	struct uccp_region *full = priv.full_region;
+	int i, c;
+	for (c = 0; c < priv.pdata->num_cores; ++c) {
+		core = &priv.pdata->cores[c];
+		for (i = 0; i < core->num_regions; ++i) {
+			if (core->regions[i].offset)
+				continue;
+			core->regions[i].offset = core->regions[i].physical
+							- full->physical;
+		}
+	}
+	for (i = 0; i < priv.pdata->num_regions; ++i) {
+		if (priv.pdata->regions[i].offset)
+			continue;
+		priv.pdata->regions[i].offset = priv.pdata->regions[i].physical
+						- full->physical;
+	}
+}
+
+/* Probe an individual UCCP */
+static int uccp_probe_core(struct platform_device *pdev,
+			   unsigned int i)
+{
+	struct uccp_core *core = &priv.pdata->cores[i];
+	unsigned long core_id, core_revision;
+	unsigned int groupid, coreid, config;
+	unsigned int majrev, minrev, steprev;
+	dev_t dev = MKDEV(priv.uccp_major, i);
+
+	core_id = uccp_host_read(i, UCCP_COREID);
+	groupid = (core_id & UCCP_COREID_GROUPID) >> UCCP_COREID_GROUPID_SHIFT;
+	coreid  = (core_id & UCCP_COREID_COREID)  >> UCCP_COREID_COREID_SHIFT;
+	config  = (core_id & UCCP_COREID_CONFIG)  >> UCCP_COREID_CONFIG_SHIFT;
+
+	if (groupid != UCCP_COREID_GROUPID_UCCP ||
+	    coreid  != UCCP_COREID_COREID_UCCP) {
+		dev_err(&pdev->dev, "uccp%d: wrong groupid/coreid\n", i);
+		return -ENODEV;
+	}
+
+	core_revision = uccp_host_read(i, UCCP_COREREV);
+	majrev = (core_revision & UCCP_COREREV_MAJOR)
+			>> UCCP_COREREV_MAJOR_SHIFT;
+	minrev = (core_revision & UCCP_COREREV_MINOR)
+			>> UCCP_COREREV_MINOR_SHIFT;
+	steprev = (core_revision & UCCP_COREREV_REV)
+			>> UCCP_COREREV_REV_SHIFT;
+
+	core->device = device_create(priv.uccp_class, &pdev->dev, dev,
+					NULL, "uccp%d", i);
+	if (IS_ERR(core->device)) {
+		dev_err(&pdev->dev, "unable to create device uccp%d\n", i);
+		return PTR_ERR(core->device);
+	}
+
+	dev_info(&pdev->dev,
+		 "uccp%d detected with config: 0x%04x, rev: %u.%u.%u\n",
+		 i, config, majrev, minrev, steprev);
+	return 0;
+}
+
+static void uccp_remove_core(struct platform_device *pdev, unsigned int i)
+{
+	dev_t dev = MKDEV(priv.uccp_major, i);
+	device_destroy(priv.uccp_class, dev);
+}
+
+/* Probe all UCCPs using platform data */
+static int uccp_probe(struct platform_device *pdev)
+{
+	struct uccp_pdata *pdata = pdev->dev.platform_data;
+	struct resource *res;
+	int error;
+	int i;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data defined\n");
+		error = -EINVAL;
+		goto err_pdata;
+	}
+	priv.pdata = pdata;
+	/* Must have resources for register regions */
+	for (i = 0; ; ++i) {
+		int uccp;
+		int type;
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (!res)
+			break;
+
+		uccp = UCCP_RES_UCCP(res->flags & IORESOURCE_BITS);
+		type = UCCP_RES_TYPE(res->flags & IORESOURCE_BITS);
+		if (uccp > pdata->num_cores)
+			continue;
+
+		if (type == UCCP_RES_HOSTSYSBUS)
+			pdata->cores[uccp].host_sys_bus = res;
+		else if (type == UCCP_RES_MCREQ)
+			pdata->cores[uccp].mc_req = res;
+	}
+	for (i = 0; i < pdata->num_cores; ++i) {
+		if (!pdata->cores[i].host_sys_bus) {
+			dev_err(&pdev->dev, "platform data does not specify a"
+				"HOSTSYSBUS memory resource for uccp%d\n", i);
+			error = -EINVAL;
+			goto err_pdata;
+		}
+		if (!pdata->cores[i].mc_req) {
+			dev_err(&pdev->dev, "platform data does not specify a"
+				"MCREQ memory resource for uccp%d\n", i);
+			error = -EINVAL;
+			goto err_pdata;
+		}
+	}
+	/* Must have global region of type UCCP_REGION_ALL */
+	priv.full_region = find_uccp_region(-1, UCCP_REGION_ALL);
+	if (!priv.full_region) {
+		dev_err(&pdev->dev, "platform data contains no region of type"
+			" UCCP_REGION_ALL\n");
+		error = -EINVAL;
+		goto err_pdata;
+	}
+	priv.full_region->offset = 0;
+	priv.region_map = ioremap(priv.full_region->physical,
+				  priv.full_region->size);
+	complete_uccp_regions();
+
+	priv.uccp_class = class_create(THIS_MODULE, "uccp");
+	if (IS_ERR(priv.uccp_class)) {
+		dev_err(&pdev->dev, "unable to create device class\n");
+		error = PTR_ERR(priv.uccp_class);
+		goto err_class;
+	}
+
+	priv.uccp_major = register_chrdev(0, "uccp", &uccp_fops);
+	if (priv.uccp_major < 0) {
+		dev_err(&pdev->dev, "unable to get major\n");
+		error = priv.uccp_major;
+		goto err_major;
+	}
+
+	for (i = 0; i < pdata->num_cores; ++i) {
+		error = uccp_probe_core(pdev, i);
+		if (error) {
+			/* undo our hard work probing cores */
+			int max_remove = i;
+			for (i = 0; i < max_remove; ++i)
+				uccp_remove_core(pdev, i);
+			goto err_dev;
+		}
+	}
+
+	return 0;
+err_dev:
+	unregister_chrdev(priv.uccp_major, "uccp");
+err_major:
+	class_destroy(priv.uccp_class);
+err_class:
+	iounmap(priv.region_map);
+err_pdata:
+	return error;
+}
+
+static int uccp_remove(struct platform_device *pdev)
+{
+	int i;
+	for (i = 0; i < priv.pdata->num_cores; ++i)
+		uccp_remove_core(pdev, i);
+	unregister_chrdev(priv.uccp_major, "uccp");
+	class_destroy(priv.uccp_class);
+	iounmap(priv.region_map);
+
+	return 0;
+}
+
+static struct platform_driver uccp_driver = {
+	.driver = {
+		.name		= "uccp",
+		.owner		= THIS_MODULE,
+	},
+	.probe		= uccp_probe,
+	.remove		= uccp_remove,
+};
+
+static int __init uccp_init(void)
+{
+	return platform_driver_register(&uccp_driver);
+}
+
+static void __exit uccp_exit(void)
+{
+	platform_driver_unregister(&uccp_driver);
+}
+
+module_init(uccp_init);
+module_exit(uccp_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("IMG UCCP Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 137d3e7..a258466 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_COMMON_CLK)	+= clk-gate.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-mux.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-composite.o
+obj-$(CONFIG_COMMON_CLK)	+= clk-specified-rate.o
 
 # SoCs specific
 obj-$(CONFIG_ARCH_BCM2835)	+= clk-bcm2835.o
@@ -30,7 +31,9 @@
 obj-$(CONFIG_ARCH_ZYNQ)		+= clk-zynq.o
 obj-$(CONFIG_ARCH_TEGRA)	+= tegra/
 obj-$(CONFIG_PLAT_SAMSUNG)	+= samsung/
+obj-$(CONFIG_SOC_TZ1090)	+= tz1090/
 
+obj-$(CONFIG_METAG)		+= metag/
 obj-$(CONFIG_X86)		+= x86/
 
 # Chip specific
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 6d96741..c0a835d 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -11,7 +11,10 @@
  */
 
 #include <linux/clk-provider.h>
+#include <linux/clkdev.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/err.h>
@@ -154,6 +157,14 @@
 	if (!rate)
 		rate = 1;
 
+	/* if read only, just return current value */
+	if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+		bestdiv = readl(divider->reg) >> divider->shift;
+		bestdiv &= div_mask(divider);
+		bestdiv = _get_div(divider, bestdiv);
+		return bestdiv;
+	}
+
 	maxdiv = _get_maxdiv(divider);
 
 	if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
@@ -238,8 +249,8 @@
 static struct clk *_register_divider(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
 		void __iomem *reg, u8 shift, u8 width,
-		u8 clk_divider_flags, const struct clk_div_table *table,
-		spinlock_t *lock)
+		u8 clk_divider_flags, u32 default_divide,
+		const struct clk_div_table *table, spinlock_t *lock)
 {
 	struct clk_divider *div;
 	struct clk *clk;
@@ -267,6 +278,10 @@
 	div->hw.init = &init;
 	div->table = table;
 
+	/* set default value */
+	if (default_divide)
+		clk_divider_set_rate(&div->hw, 1, default_divide);
+
 	/* register the clock */
 	clk = clk_register(dev, &div->hw);
 
@@ -294,7 +309,7 @@
 		u8 clk_divider_flags, spinlock_t *lock)
 {
 	return _register_divider(dev, name, parent_name, flags, reg, shift,
-			width, clk_divider_flags, NULL, lock);
+			width, clk_divider_flags, 0, NULL, lock);
 }
 
 /**
@@ -318,5 +333,68 @@
 		spinlock_t *lock)
 {
 	return _register_divider(dev, name, parent_name, flags, reg, shift,
-			width, clk_divider_flags, table, lock);
+			width, clk_divider_flags, 0, table, lock);
 }
+
+#ifdef CONFIG_OF
+/**
+ * of_divider_clk_setup() - Setup function for simple clock divider
+ */
+void __init of_divider_clk_setup(struct device_node *node)
+{
+	struct clk *clk;
+	const char *clk_name = node->name;
+	u32 shift, width, divide;
+	void __iomem *reg;
+	const char *parent_name;
+	u8 flags = 0;
+
+	of_property_read_string(node, "clock-output-names", &clk_name);
+
+	reg = of_iomap(node, 0);
+	if (!reg) {
+		pr_err("%s(%s): of_iomap failed\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	if (of_property_read_u32(node, "shift", &shift)) {
+		pr_err("%s(%s): could not read shift property\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	if (of_property_read_u32(node, "width", &width)) {
+		pr_err("%s(%s): could not read width property\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	parent_name = of_clk_get_parent_name(node, 0);
+	if (!parent_name) {
+		pr_err("%s(%s): could not read parent clock\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	if (of_find_property(node, "one-based", NULL))
+		flags |= CLK_DIVIDER_ONE_BASED;
+	if (of_find_property(node, "power-of-two", NULL))
+		flags |= CLK_DIVIDER_POWER_OF_TWO;
+	if (of_find_property(node, "linux,clk-read-only", NULL))
+		flags |= CLK_DIVIDER_READ_ONLY;
+
+	if (of_property_read_u32(node, "default-divide", &divide))
+		divide = 0;
+
+	clk = _register_divider(NULL, clk_name, parent_name,
+				CLK_SET_RATE_PARENT, reg, shift, width, flags,
+				divide, NULL, NULL);
+	if (!IS_ERR(clk)) {
+		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+		clk_register_clkdev(clk, clk_name, NULL);
+	}
+}
+EXPORT_SYMBOL_GPL(of_divider_clk_setup);
+CLK_OF_DECLARE(divider_clk, "divider-clock", of_divider_clk_setup);
+#endif /* CONFIG_OF */
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 25b1734..cecfa01 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -100,6 +100,7 @@
 const struct clk_ops clk_mux_ops = {
 	.get_parent = clk_mux_get_parent,
 	.set_parent = clk_mux_set_parent,
+	.determine_rate = __clk_mux_determine_rate,
 };
 EXPORT_SYMBOL_GPL(clk_mux_ops);
 
diff --git a/drivers/clk/clk-specified-rate.c b/drivers/clk/clk-specified-rate.c
new file mode 100644
index 0000000..8f78033
--- /dev/null
+++ b/drivers/clk/clk-specified-rate.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Fixed rate clock implementation with rate specified in a register field.
+ * Based on fixed rate clock implementation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+/*
+ * DOC: basic specified-rate clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parents are prepared
+ * enable - clk_enable only ensures parents are enabled
+ * rate - rate is always a fixed value.  No clk_set_rate support
+ * parent - fixed parent.  No clk_set_parent support
+ */
+
+#define to_clk_specified_rate(_hw) \
+	container_of(_hw, struct clk_specified_rate, hw)
+
+static unsigned long clk_specified_rate_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_specified_rate *specified = to_clk_specified_rate(hw);
+	struct clk_specified_rate_entry *entry;
+	u32 val;
+	unsigned int i;
+
+	/* read configuration field */
+	val = readl(specified->reg);
+	val >>= specified->shift;
+	val &= (1 << specified->width) - 1;
+
+	/* match the value in the mapping */
+	for (i = 0; i < specified->num_rates; ++i) {
+		entry = &specified->rates[i];
+		if (val == entry->value)
+			return entry->rate;
+	}
+
+	/* unknown rate! */
+	return 0;
+}
+
+const struct clk_ops clk_specified_rate_ops = {
+	.recalc_rate = clk_specified_rate_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_specified_rate_ops);
+
+/**
+ * clk_register_specified_rate - register specified-rate clock
+ * @dev:		device that is registering this clock
+ * @name:		name of this clock
+ * @parent_name:	name of clock's parent
+ * @flags:		framework-specific flags
+ * @reg:		config register
+ * @shift:		shift into config register of frequency field
+ * @width:		width of frequency field in config register
+ * @rates:		value->rate mapping entries
+ * @num_rates:		number of rates in @rates
+ */
+struct clk *clk_register_specified_rate(struct device *dev, const char *name,
+		const char *parent_name, unsigned long flags,
+		void __iomem *reg, u8 shift, u8 width,
+		struct clk_specified_rate_entry *rates,
+		unsigned long num_rates)
+{
+	struct clk_specified_rate *specified;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	/* allocate specified-rate clock */
+	specified = kzalloc(sizeof(struct clk_specified_rate), GFP_KERNEL);
+	if (!specified) {
+		pr_err("%s(%s): could not allocate specified clk\n",
+		       __func__, name);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	init.name = name;
+	init.ops = &clk_specified_rate_ops;
+	init.flags = flags | CLK_IS_BASIC;
+	init.parent_names = (parent_name ? &parent_name : NULL);
+	init.num_parents = (parent_name ? 1 : 0);
+
+	/* struct clk_specified_rate assignments */
+	specified->reg = reg;
+	specified->shift = shift;
+	specified->width = width;
+	specified->rates = rates;
+	specified->num_rates = num_rates;
+	specified->hw.init = &init;
+
+	/* register the clock */
+	clk = clk_register(dev, &specified->hw);
+
+	if (IS_ERR(clk))
+		kfree(specified);
+
+	return clk;
+}
+
+#ifdef CONFIG_OF
+/**
+ * of_specified_clk_setup() - Setup function for specified fixed rate clock
+ */
+void __init of_specified_clk_setup(struct device_node *node)
+{
+	struct clk *clk;
+	const char *clk_name = node->name;
+	u32 shift, width, rate;
+	void __iomem *reg;
+	int len, num_rates, i;
+	struct property *prop;
+	struct clk_specified_rate_entry *rates;
+	const __be32 *p;
+
+	of_property_read_string(node, "clock-output-names", &clk_name);
+
+	if (of_property_read_u32(node, "shift", &shift)) {
+		pr_err("%s(%s): could not read shift property\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	if (of_property_read_u32(node, "width", &width)) {
+		pr_err("%s(%s): could not read width property\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	reg = of_iomap(node, 0);
+	if (!reg) {
+		pr_err("%s(%s): of_iomap failed\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	/* check clock-frequency exists */
+	prop = of_find_property(node, "clock-frequency", &len);
+	if (!prop) {
+		pr_err("%s(%s): could not find clock-frequency property\n",
+		       __func__, clk_name);
+		goto err_iounmap;
+	}
+
+	if (len & (sizeof(u32)*2 - 1)) {
+		pr_err("%s(%s): clock-frequency has invalid size of %d bytes\n",
+		       __func__, clk_name, len);
+		goto err_iounmap;
+	}
+	num_rates = len / (sizeof(*rates)*2);
+
+	rates = kzalloc(sizeof(*rates)*num_rates, GFP_KERNEL);
+	if (!rates) {
+		pr_err("%s(%s): could not allocate %d rate mapping entries\n",
+		       __func__, clk_name, num_rates);
+		goto err_iounmap;
+	}
+
+	/* extract rate mapping */
+	for (i = 0, p = of_prop_next_u32(prop, NULL, &rates[0].value);
+	     p;
+	     ++i, p = of_prop_next_u32(prop, p, &rates[i].value)) {
+		p = of_prop_next_u32(prop, p, &rate);
+		rates[i].rate = rate;
+		pr_debug("%s(%s): map %u -> %lu Hz\n",
+			 __func__, clk_name, rates[i].value, rates[i].rate);
+	}
+
+	clk = clk_register_specified_rate(NULL, clk_name, NULL, CLK_IS_ROOT,
+					  reg, shift, width, rates, num_rates);
+	if (IS_ERR(clk))
+		goto err_kfree;
+
+	of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+	return;
+
+err_kfree:
+	kfree(rates);
+err_iounmap:
+	iounmap(reg);
+}
+EXPORT_SYMBOL_GPL(of_specified_clk_setup);
+CLK_OF_DECLARE(specified_clk, "specified-clock", of_specified_clk_setup);
+#endif
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 1144e8c..4f9f2b3 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -34,6 +34,9 @@
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+static int __clk_set_parent_no_recalc(struct clk *clk, struct clk *parent,
+				      u8 p_index);
+
 /***           locking             ***/
 static void clk_prepare_lock(void)
 {
@@ -559,6 +562,19 @@
 	return !clk ? NULL : clk->parent;
 }
 
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+{
+	if (!clk || index >= clk->num_parents)
+		return NULL;
+	else if (!clk->parents)
+		return __clk_lookup(clk->parent_names[index]);
+	else if (!clk->parents[index])
+		return clk->parents[index] =
+			__clk_lookup(clk->parent_names[index]);
+	else
+		return clk->parents[index];
+}
+
 unsigned int __clk_get_enable_count(struct clk *clk)
 {
 	return !clk ? 0 : clk->enable_count;
@@ -679,6 +695,55 @@
 	return NULL;
 }
 
+/*
+ * Helper for finding best parent to provide a given frequency. This can be used
+ * directly as a determine_rate callback (e.g. for a mux), or from a more
+ * complex clock that may combine a mux with other operations.
+ */
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+			      unsigned long *best_parent_rate,
+			      struct clk **best_parent_p)
+{
+	struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+	int i, num_parents;
+	unsigned long parent_rate, best = 0;
+
+	/* if remux flag not set, pass through to current parent */
+	if (!(clk->flags & CLK_SET_RATE_REMUX)) {
+		parent = clk->parent;
+		if (clk->flags & CLK_SET_RATE_PARENT)
+			best = __clk_round_rate(parent, rate);
+		else if (parent)
+			best = __clk_get_rate(parent);
+		else
+			best = __clk_get_rate(clk);
+		goto out;
+	}
+
+	/* find the parent that can provide the fastest rate <= rate */
+	num_parents = clk->num_parents;
+	for (i = 0; i < num_parents; i++) {
+		parent = clk_get_parent_by_index(clk, i);
+		if (!parent)
+			continue;
+		if (clk->flags & CLK_SET_RATE_PARENT)
+			parent_rate = __clk_round_rate(parent, rate);
+		else
+			parent_rate = __clk_get_rate(parent);
+		if (parent_rate <= rate && parent_rate > best) {
+			best_parent = parent;
+			best = parent_rate;
+		}
+	}
+
+out:
+	if (best_parent_p && best_parent)
+		*best_parent_p = best_parent;
+	*best_parent_rate = best;
+
+	return best;
+}
+
 /***        clk api        ***/
 
 void __clk_unprepare(struct clk *clk)
@@ -879,17 +944,20 @@
 	if (!clk)
 		return 0;
 
-	if (!clk->ops->round_rate) {
-		if (clk->flags & CLK_SET_RATE_PARENT)
-			return __clk_round_rate(clk->parent, rate);
-		else
-			return clk->rate;
-	}
 
 	if (clk->parent)
 		parent_rate = clk->parent->rate;
 
-	return clk->ops->round_rate(clk->hw, rate, &parent_rate);
+	if (clk->ops->determine_rate)
+		return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
+						NULL);
+	else if (clk->ops->round_rate)
+		return clk->ops->round_rate(clk->hw, rate, &parent_rate);
+	else if (clk->flags & CLK_SET_RATE_PARENT)
+		return __clk_round_rate(clk->parent, rate);
+	else
+		return clk->rate;
+
 }
 
 /**
@@ -1014,6 +1082,32 @@
 }
 EXPORT_SYMBOL_GPL(clk_get_rate);
 
+static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
+{
+	u8 i;
+
+	if (!clk->parents)
+		clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
+								GFP_KERNEL);
+
+	/*
+	 * find index of new parent clock using cached parent ptrs,
+	 * or if not yet cached, use string name comparison and cache
+	 * them now to avoid future calls to __clk_lookup.
+	 */
+	for (i = 0; i < clk->num_parents; i++) {
+		if (clk->parents && clk->parents[i] == parent)
+			break;
+		else if (!strcmp(clk->parent_names[i], parent->name)) {
+			if (clk->parents)
+				clk->parents[i] = __clk_lookup(parent->name);
+			break;
+		}
+	}
+
+	return i;
+}
+
 /**
  * __clk_speculate_rates
  * @clk: first clk in the subtree
@@ -1058,18 +1152,24 @@
 	return ret;
 }
 
-static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
+static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
+			     struct clk *new_parent, u8 p_index)
 {
 	struct clk *child;
 
 	clk->new_rate = new_rate;
+	clk->new_parent = new_parent;
+	clk->new_parent_index = p_index;
+	clk->new_child = NULL;
+	if (new_parent && new_parent != clk->parent)
+		new_parent->new_child = clk;
 
 	hlist_for_each_entry(child, &clk->children, child_node) {
 		if (child->ops->recalc_rate)
 			child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
 		else
 			child->new_rate = new_rate;
-		clk_calc_subtree(child, child->new_rate);
+		clk_calc_subtree(child, child->new_rate, NULL, 0);
 	}
 }
 
@@ -1080,50 +1180,63 @@
 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
 {
 	struct clk *top = clk;
+	struct clk *old_parent, *parent;
 	unsigned long best_parent_rate = 0;
 	unsigned long new_rate;
+	u8 p_index = 0;
 
 	/* sanity */
 	if (IS_ERR_OR_NULL(clk))
 		return NULL;
 
 	/* save parent rate, if it exists */
-	if (clk->parent)
-		best_parent_rate = clk->parent->rate;
+	parent = old_parent = clk->parent;
+	if (parent)
+		best_parent_rate = parent->rate;
 
-	/* never propagate up to the parent */
-	if (!(clk->flags & CLK_SET_RATE_PARENT)) {
-		if (!clk->ops->round_rate) {
-			clk->new_rate = clk->rate;
-			return NULL;
-		}
-		new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
+	/* find the closest rate and parent clk/rate */
+	if (clk->ops->determine_rate) {
+		new_rate = clk->ops->determine_rate(clk->hw, rate,
+						    &best_parent_rate,
+						    &parent);
+	} else if (clk->ops->round_rate) {
+		new_rate = clk->ops->round_rate(clk->hw, rate,
+						&best_parent_rate);
+	} else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
+		/* pass-through clock without adjustable parent */
+		clk->new_rate = clk->rate;
+		return NULL;
+	} else {
+		/* pass-through clock with adjustable parent */
+		top = clk_calc_new_rates(parent, rate);
+		new_rate = parent->new_rate;
 		goto out;
 	}
 
-	/* need clk->parent from here on out */
-	if (!clk->parent) {
-		pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
+	/* some clocks must be gated to change parent */
+	if (parent != old_parent &&
+	    (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
+		pr_debug("%s: %s not gated but wants to reparent\n",
+			 __func__, clk->name);
 		return NULL;
 	}
 
-	if (!clk->ops->round_rate) {
-		top = clk_calc_new_rates(clk->parent, rate);
-		new_rate = clk->parent->new_rate;
-
-		goto out;
+	/* try finding the new parent index */
+	if (parent) {
+		p_index = clk_fetch_parent_index(clk, parent);
+		if (p_index == clk->num_parents) {
+			pr_debug("%s: clk %s can not be parent of clk %s\n",
+				 __func__, parent->name, clk->name);
+			return NULL;
+		}
 	}
 
-	new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
-
-	if (best_parent_rate != clk->parent->rate) {
-		top = clk_calc_new_rates(clk->parent, best_parent_rate);
-
-		goto out;
-	}
+	if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
+	    best_parent_rate != parent->rate)
+		top = clk_calc_new_rates(parent, best_parent_rate);
 
 out:
-	clk_calc_subtree(clk, new_rate);
+	clk_calc_subtree(clk, new_rate, parent, p_index);
 
 	return top;
 }
@@ -1135,7 +1248,7 @@
  */
 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
 {
-	struct clk *child, *fail_clk = NULL;
+	struct clk *child, *tmp_clk, *fail_clk = NULL;
 	int ret = NOTIFY_DONE;
 
 	if (clk->rate == clk->new_rate)
@@ -1148,9 +1261,19 @@
 	}
 
 	hlist_for_each_entry(child, &clk->children, child_node) {
-		clk = clk_propagate_rate_change(child, event);
-		if (clk)
-			fail_clk = clk;
+		/* Skip children who will be reparented to another clock */
+		if (child->new_parent && child->new_parent != clk)
+			continue;
+		tmp_clk = clk_propagate_rate_change(child, event);
+		if (tmp_clk)
+			fail_clk = tmp_clk;
+	}
+
+	/* handle the new child who might not be in clk->children yet */
+	if (clk->new_child) {
+		tmp_clk = clk_propagate_rate_change(clk->new_child, event);
+		if (tmp_clk)
+			fail_clk = tmp_clk;
 	}
 
 	return fail_clk;
@@ -1168,6 +1291,11 @@
 
 	old_rate = clk->rate;
 
+	/* set parent */
+	if (clk->new_parent && clk->new_parent != clk->parent)
+		__clk_set_parent_no_recalc(clk, clk->new_parent,
+					   clk->new_parent_index);
+
 	if (clk->parent)
 		best_parent_rate = clk->parent->rate;
 
@@ -1182,8 +1310,15 @@
 	if (clk->notifier_count && old_rate != clk->rate)
 		__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
 
-	hlist_for_each_entry(child, &clk->children, child_node)
+	hlist_for_each_entry(child, &clk->children, child_node) {
+		/* Skip children who will be reparented to another clock */
+		if (child->new_parent && child->new_parent != clk)
+			continue;
 		clk_change_rate(child);
+	}
+
+	if (clk->new_child)
+		clk_change_rate(clk->new_child);
 }
 
 /**
@@ -1315,13 +1450,7 @@
 			kzalloc((sizeof(struct clk*) * clk->num_parents),
 					GFP_KERNEL);
 
-	if (!clk->parents)
-		ret = __clk_lookup(clk->parent_names[index]);
-	else if (!clk->parents[index])
-		ret = clk->parents[index] =
-			__clk_lookup(clk->parent_names[index]);
-	else
-		ret = clk->parents[index];
+	ret = clk_get_parent_by_index(clk, index);
 
 out:
 	return ret;
@@ -1329,6 +1458,9 @@
 
 static void clk_reparent(struct clk *clk, struct clk *new_parent)
 {
+	if (new_parent->new_child == clk)
+		new_parent->new_child = NULL;
+
 	hlist_del(&clk->child_node);
 
 	if (new_parent)
@@ -1346,32 +1478,6 @@
 	__clk_recalc_rates(clk, POST_RATE_CHANGE);
 }
 
-static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
-{
-	u8 i;
-
-	if (!clk->parents)
-		clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
-								GFP_KERNEL);
-
-	/*
-	 * find index of new parent clock using cached parent ptrs,
-	 * or if not yet cached, use string name comparison and cache
-	 * them now to avoid future calls to __clk_lookup.
-	 */
-	for (i = 0; i < clk->num_parents; i++) {
-		if (clk->parents && clk->parents[i] == parent)
-			break;
-		else if (!strcmp(clk->parent_names[i], parent->name)) {
-			if (clk->parents)
-				clk->parents[i] = __clk_lookup(parent->name);
-			break;
-		}
-	}
-
-	return i;
-}
-
 static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
 {
 	unsigned long flags;
@@ -1444,6 +1550,27 @@
 	return 0;
 }
 
+static int __clk_set_parent_no_recalc(struct clk *clk, struct clk *parent,
+				      u8 p_index)
+{
+	int ret = 0;
+
+	if (clk->parent == parent)
+		goto out;
+
+	/* only re-parent if the clock is not in use */
+	ret = __clk_set_parent(clk, parent, p_index);
+	if (ret)
+		goto out;
+
+	/* reparent, but don't propagate rate recalculation downstream */
+	clk_reparent(clk, parent);
+	clk_debug_reparent(clk, parent);
+
+out:
+	return ret;
+}
+
 /**
  * clk_set_parent - switch the parent of a mux clk
  * @clk: the mux clk whose input we are switching
@@ -1546,8 +1673,9 @@
 
 	/* check that clk_ops are sane.  See Documentation/clk.txt */
 	if (clk->ops->set_rate &&
-			!(clk->ops->round_rate && clk->ops->recalc_rate)) {
-		pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
+	    !((clk->ops->round_rate || clk->ops->determine_rate) &&
+	      clk->ops->recalc_rate)) {
+		pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
 				__func__, clk->name);
 		ret = -EINVAL;
 		goto out;
diff --git a/drivers/clk/metag/Makefile b/drivers/clk/metag/Makefile
new file mode 100644
index 0000000..c53ccb2
--- /dev/null
+++ b/drivers/clk/metag/Makefile
@@ -0,0 +1,3 @@
+# metag clock types
+obj-$(CONFIG_COMMON_CLK)	+= clk-gate.o
+obj-$(CONFIG_COMMON_CLK)	+= clk-mux.o
diff --git a/drivers/clk/metag/clk-gate.c b/drivers/clk/metag/clk-gate.c
new file mode 100644
index 0000000..301a3bd
--- /dev/null
+++ b/drivers/clk/metag/clk-gate.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Metag gated clock implementation
+ * Based on gated clock implementation, but does appropriate locking to protect
+ * registers shared between hardware threads.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <asm/global_lock.h>
+
+/**
+ * struct clk_metag_gate - metag gating clock
+ *
+ * @mux:	the parent class
+ * @ops:	pointer to clk_ops of parent class
+ *
+ * Clock which can gate its output. Extends basic mux by using a global
+ * exclusive lock when read-modify-writing the mux field so that multiple
+ * threads/cores can use different fields in the same register.
+ */
+struct clk_metag_gate {
+	struct clk_gate		gate;
+	const struct clk_ops	*ops;
+};
+
+static inline struct clk_metag_gate *to_clk_metag_gate(struct clk_hw *hw)
+{
+	struct clk_gate *gate = container_of(hw, struct clk_gate, hw);
+
+	return container_of(gate, struct clk_metag_gate, gate);
+}
+
+/* Acquire exclusive lock since other cores may access the same register */
+static int clk_metag_gate_enable(struct clk_hw *hw)
+{
+	struct clk_metag_gate *gate = to_clk_metag_gate(hw);
+	int ret;
+	unsigned long flags;
+
+	__global_lock2(flags);
+	ret = gate->ops->enable(&gate->gate.hw);
+	__global_unlock2(flags);
+
+	return ret;
+}
+
+/* Acquire exclusive lock since other cores may access the same register */
+static void clk_metag_gate_disable(struct clk_hw *hw)
+{
+	struct clk_metag_gate *gate = to_clk_metag_gate(hw);
+	unsigned long flags;
+
+	__global_lock2(flags);
+	gate->ops->disable(&gate->gate.hw);
+	__global_unlock2(flags);
+}
+
+static int clk_metag_gate_is_enabled(struct clk_hw *hw)
+{
+	struct clk_metag_gate *gate = to_clk_metag_gate(hw);
+
+	return gate->ops->is_enabled(&gate->gate.hw);
+}
+
+static const struct clk_ops clk_metag_gate_ops = {
+	.enable = clk_metag_gate_enable,
+	.disable = clk_metag_gate_disable,
+	.is_enabled = clk_metag_gate_is_enabled,
+};
+
+/**
+ * clk_register_metag_gate - register a Meta gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ */
+static struct clk *__init clk_register_metag_gate(struct device *dev,
+		const char *name, const char *parent_name, unsigned long flags,
+		void __iomem *reg, u8 bit_idx, u8 clk_gate_flags)
+{
+	struct clk_metag_gate *gate;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	/* allocate the gate */
+	gate = kzalloc(sizeof(struct clk_metag_gate), GFP_KERNEL);
+	if (!gate) {
+		pr_err("%s: could not allocate gated clk\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	init.name = name;
+	init.ops = &clk_metag_gate_ops;
+	init.flags = flags | CLK_IS_BASIC;
+	init.parent_names = (parent_name ? &parent_name : NULL);
+	init.num_parents = (parent_name ? 1 : 0);
+
+	/* struct clk_gate assignments */
+	gate->gate.reg = reg;
+	gate->gate.bit_idx = bit_idx;
+	gate->gate.flags = clk_gate_flags;
+	gate->gate.hw.init = &init;
+
+	/* struct clk_metag_gate assignments */
+	gate->ops = &clk_gate_ops;
+
+	clk = clk_register(dev, &gate->gate.hw);
+
+	if (IS_ERR(clk))
+		kfree(gate);
+
+	return clk;
+}
+
+#ifdef CONFIG_OF
+/**
+ * of_metag_gate_clk_setup() - Setup function for simple fixed rate clock
+ */
+static void __init of_metag_gate_clk_setup(struct device_node *node)
+{
+	struct clk *clk;
+	const char *clk_name = node->name;
+	u32 bit_idx;
+	void __iomem *reg;
+	const char *parent_name;
+	u8 flags = 0;
+
+	of_property_read_string(node, "clock-output-names", &clk_name);
+
+	if (of_property_read_u32(node, "bit", &bit_idx)) {
+		pr_err("%s(%s): could not read bit property\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	parent_name = of_clk_get_parent_name(node, 0);
+	if (!parent_name) {
+		pr_err("%s(%s): could not read parent clock\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	reg = of_iomap(node, 0);
+	if (!reg) {
+		pr_err("%s(%s): of_iomap failed\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	clk = clk_register_metag_gate(NULL, clk_name, parent_name,
+				      CLK_SET_RATE_PARENT, reg, bit_idx, flags);
+	if (IS_ERR(clk))
+		goto err_iounmap;
+
+	of_clk_add_provider(node, of_clk_src_simple_get, clk);
+	clk_register_clkdev(clk, clk_name, NULL);
+
+	return;
+
+err_iounmap:
+	iounmap(reg);
+}
+CLK_OF_DECLARE(metag_gate_clk, "img,meta-gate-clock", of_metag_gate_clk_setup);
+#endif /* CONFIG_OF */
diff --git a/drivers/clk/metag/clk-mux.c b/drivers/clk/metag/clk-mux.c
new file mode 100644
index 0000000..0d3096d
--- /dev/null
+++ b/drivers/clk/metag/clk-mux.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Metag simple multiplexer clock implementation
+ * Based on simple multiplexer clock implementation, but does appropriate
+ * locking to protect registers shared between hardware threads.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <asm/global_lock.h>
+
+/**
+ * struct clk_metag_mux - metag multiplexer clock
+ *
+ * @mux:	the parent class
+ * @ops:	pointer to clk_ops of parent class
+ *
+ * Clock with multiple selectable parents. Extends basic mux by using a global
+ * exclusive lock when read-modify-writing the mux field so that multiple
+ * threads/cores can use different fields in the same register.
+ */
+struct clk_metag_mux {
+	struct clk_mux		mux;
+	const struct clk_ops	*ops;
+};
+
+static inline struct clk_metag_mux *to_clk_metag_mux(struct clk_hw *hw)
+{
+	struct clk_mux *mux = container_of(hw, struct clk_mux, hw);
+
+	return container_of(mux, struct clk_metag_mux, mux);
+}
+
+static u8 clk_metag_mux_get_parent(struct clk_hw *hw)
+{
+	struct clk_metag_mux *mux = to_clk_metag_mux(hw);
+
+	return mux->ops->get_parent(&mux->mux.hw);
+}
+
+/* Acquire exclusive lock since other cores may access the same register */
+static int clk_metag_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct clk_metag_mux *mux = to_clk_metag_mux(hw);
+	int ret;
+	unsigned long flags;
+
+	__global_lock2(flags);
+	ret = mux->ops->set_parent(&mux->mux.hw, index);
+	__global_unlock2(flags);
+
+	return ret;
+}
+
+static long clk_metag_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+				     unsigned long *prate,
+				     struct clk **best_parent)
+{
+	struct clk_metag_mux *mux = to_clk_metag_mux(hw);
+
+	return mux->ops->determine_rate(&mux->mux.hw, rate, prate, best_parent);
+}
+
+static const struct clk_ops clk_metag_mux_ops = {
+	.get_parent = clk_metag_mux_get_parent,
+	.set_parent = clk_metag_mux_set_parent,
+	.determine_rate = clk_metag_mux_determine_rate,
+};
+
+static struct clk *__init clk_register_metag_mux(struct device *dev,
+		const char *name, const char **parent_names, u8 num_parents,
+		s32 default_parent, unsigned long flags, void __iomem *reg,
+		u8 shift, u8 width, u8 clk_mux_flags)
+{
+	struct clk_metag_mux *mux;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	/* allocate the mux */
+	mux = kzalloc(sizeof(struct clk_metag_mux), GFP_KERNEL);
+	if (!mux) {
+		pr_err("%s: could not allocate metag mux clk\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	init.name = name;
+	init.ops = &clk_metag_mux_ops;
+	init.flags = flags | CLK_IS_BASIC;
+	init.parent_names = parent_names;
+	init.num_parents = num_parents;
+
+	/* struct clk_mux assignments */
+	mux->mux.reg = reg;
+	mux->mux.shift = shift;
+	mux->mux.mask = BIT(width) - 1;
+	mux->mux.flags = clk_mux_flags;
+	mux->mux.hw.init = &init;
+
+	/* struct clk_metag_mux assignments */
+	mux->ops = &clk_mux_ops;
+
+	/* set default value */
+	if (default_parent >= 0)
+		clk_metag_mux_set_parent(&mux->mux.hw, default_parent);
+
+	clk = clk_register(dev, &mux->mux.hw);
+
+	if (IS_ERR(clk))
+		kfree(mux);
+
+	return clk;
+}
+
+#ifdef CONFIG_OF
+/**
+ * of_metag_mux_clk_setup() - Setup function for simple fixed rate clock
+ */
+static void __init of_metag_mux_clk_setup(struct device_node *node)
+{
+	struct clk *clk;
+	const char *clk_name = node->name;
+	u32 shift, width, default_clock;
+	void __iomem *reg;
+	int len, i;
+	struct property *prop;
+	const char **parent_names;
+	unsigned int num_parents;
+	u8 flags = 0;
+	unsigned long clk_flags = 0;
+
+	of_property_read_string(node, "clock-output-names", &clk_name);
+
+	if (of_property_read_u32(node, "shift", &shift)) {
+		pr_err("%s(%s): could not read shift property\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	if (of_property_read_u32(node, "width", &width)) {
+		pr_err("%s(%s): could not read width property\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	/* count maximum number of parent clocks */
+	prop = of_find_property(node, "clocks", &len);
+	if (!prop) {
+		pr_err("%s(%s): could not find clocks property\n",
+		       __func__, clk_name);
+		return;
+	}
+	/*
+	 * There cannot be more parents than entries in "clocks" property (which
+	 * may include additional args too. It also needs to fit in a u8.
+	 */
+	num_parents = len / sizeof(u32);
+	num_parents = min(num_parents, 0xffu);
+
+	/* allocate an array of parent names */
+	parent_names = kzalloc(sizeof(const char *)*num_parents, GFP_KERNEL);
+	if (!parent_names) {
+		pr_err("%s(%s): could not allocate %u parent names\n",
+		       __func__, clk_name, num_parents);
+		goto err_kfree;
+	}
+
+	/* fill in the parent names */
+	for (i = 0; i < num_parents; ++i) {
+		parent_names[i] = of_clk_get_parent_name(node, i);
+		if (!parent_names[i]) {
+			/* truncate array length if we hit the end early */
+			num_parents = i;
+			break;
+		}
+	}
+
+	/* default parent clock (mux value) */
+	if (!of_property_read_u32(node, "default-clock", &default_clock)) {
+		if (default_clock >= num_parents) {
+			pr_err("%s(%s): default-clock %u out of range (%u bits)\n",
+			       __func__, clk_name, default_clock, width);
+			goto err_kfree;
+		}
+	} else {
+		default_clock = -1;
+	}
+
+	reg = of_iomap(node, 0);
+	if (!reg) {
+		pr_err("%s(%s): of_iomap failed\n",
+		       __func__, clk_name);
+		goto err_kfree;
+	}
+
+	if (of_find_property(node, "linux,clk-set-rate-parent", NULL))
+		clk_flags |= CLK_SET_RATE_PARENT;
+	if (of_find_property(node, "linux,clk-set-rate-remux", NULL))
+		clk_flags |= CLK_SET_RATE_REMUX;
+
+
+	clk = clk_register_metag_mux(NULL, clk_name, parent_names, num_parents,
+				     default_clock, clk_flags, reg, shift,
+				     width, flags);
+	if (IS_ERR(clk))
+		goto err_iounmap;
+
+	of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+	return;
+
+err_iounmap:
+	iounmap(reg);
+err_kfree:
+	kfree(parent_names);
+}
+CLK_OF_DECLARE(metag_mux_clk, "img,meta-mux-clock", of_metag_mux_clk_setup);
+#endif /* CONFIG_OF */
diff --git a/drivers/clk/tz1090/Makefile b/drivers/clk/tz1090/Makefile
new file mode 100644
index 0000000..6f92de3
--- /dev/null
+++ b/drivers/clk/tz1090/Makefile
@@ -0,0 +1,3 @@
+# Makefile for TZ1090-specific clocks
+obj-y		+= clk-tz1090-deleter.o
+obj-y		+= clk-tz1090-pll.o
diff --git a/drivers/clk/tz1090/clk-tz1090-deleter.c b/drivers/clk/tz1090/clk-tz1090-deleter.c
new file mode 100644
index 0000000..8ac9219
--- /dev/null
+++ b/drivers/clk/tz1090/clk-tz1090-deleter.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Clock deleter in TZ1090
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+/**
+ * struct clk_tz1090_deleter - Clock deleter
+ *
+ * @hw:		handle between common and hardware-specific interfaces
+ * @reg:	delete register
+ * @shift:	start bit of delete field
+ * @width:	width of delete field
+ *
+ * Deleter in TZ1090.  Implements .recalc_rate, .set_rate and .round_rate
+ */
+struct clk_tz1090_deleter {
+	struct clk_hw	hw;
+	void __iomem	*reg;
+	u8		shift;
+	u8		width;
+};
+
+/*
+ * DOC: TZ1090 adjustable deleter clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is adjustable in hardware but set_rate unimplemented.
+ *		clk->rate = (parent->rate * ((1 << width) - delete)) >> width
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_tz1090_deleter(_hw) container_of(_hw, struct clk_tz1090_deleter, hw)
+
+static unsigned long clk_tz1090_deleter_recalc_rate(struct clk_hw *hw,
+						    unsigned long parent_rate)
+{
+	struct clk_tz1090_deleter *deleter = to_clk_tz1090_deleter(hw);
+	u32 val;
+	u32 period = (1 << deleter->width);
+	u32 mask = period - 1;
+
+	val = (readl(deleter->reg) >> deleter->shift) & mask;
+	return ((u64)parent_rate * (period - val)) >> deleter->width;
+}
+
+static const struct clk_ops clk_tz1090_deleter_ops = {
+	.recalc_rate = clk_tz1090_deleter_recalc_rate,
+};
+
+/**
+ * clk_register_tz1090_deleter_setup - register a clock deleter clock
+ * @dev:		device registering this clock
+ * @name:		name of this clock
+ * @parent_name:	name of clock's parent
+ * @flags:		framework-specific flags
+ * @reg:		register address to adjust deleter
+ * @shift:		start bit of delete field
+ * @width:		width of delete field
+ *
+ * Register a TZ1090 clock deleter with the clock framework.
+ */
+static struct clk *__init clk_register_tz1090_deleter(struct device *dev,
+						      const char *name,
+						      const char *parent_name,
+						      unsigned long flags,
+						      void __iomem *reg,
+						      u8 shift,
+						      u8 width)
+{
+	struct clk_tz1090_deleter *deleter;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	/* allocate the divider */
+	deleter = kzalloc(sizeof(struct clk_tz1090_deleter), GFP_KERNEL);
+	if (!deleter) {
+		pr_err("%s: could not allocate deleter clk\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	init.name = name;
+	init.ops = &clk_tz1090_deleter_ops;
+	init.flags = flags | CLK_IS_BASIC;
+	init.parent_names = (parent_name ? &parent_name: NULL);
+	init.num_parents = (parent_name ? 1 : 0);
+
+	/* struct clk_tz1090_deleter assignments */
+	deleter->reg = reg;
+	deleter->shift = shift;
+	deleter->width = width;
+	deleter->hw.init = &init;
+
+	/* register the clock */
+	clk = clk_register(dev, &deleter->hw);
+
+	if (IS_ERR(clk))
+		kfree(deleter);
+
+	return clk;
+}
+
+#ifdef CONFIG_OF
+/**
+ * of_tz1090_deleter_setup() - Setup function for clock deleter
+ */
+static void __init of_tz1090_deleter_setup(struct device_node *node)
+{
+	struct clk *clk;
+	const char *clk_name = node->name;
+	void __iomem *reg;
+	u32 shift, width;
+	const char *parent_name;
+
+	of_property_read_string(node, "clock-output-names", &clk_name);
+
+	if (of_property_read_u32(node, "shift", &shift)) {
+		pr_err("%s(%s): could not read shift property\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	if (of_property_read_u32(node, "width", &width)) {
+		pr_err("%s(%s): could not read width property\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	parent_name = of_clk_get_parent_name(node, 0);
+	if (!parent_name) {
+		pr_err("%s(%s): could not read parent clock\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	reg = of_iomap(node, 0);
+	if (!reg) {
+		pr_err("%s(%s): of_iomap failed\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	clk = clk_register_tz1090_deleter(NULL, clk_name, parent_name, 0, reg,
+					  shift, width);
+	if (IS_ERR(clk))
+		goto err_iounmap;
+
+	of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+	return;
+
+err_iounmap:
+	iounmap(reg);
+}
+CLK_OF_DECLARE(tz1090_deleter_clk, "img,tz1090-deleter",
+	       of_tz1090_deleter_setup);
+#endif /* CONFIG_OF */
diff --git a/drivers/clk/tz1090/clk-tz1090-pll.c b/drivers/clk/tz1090/clk-tz1090-pll.c
new file mode 100644
index 0000000..1447f31
--- /dev/null
+++ b/drivers/clk/tz1090/clk-tz1090-pll.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * True Circuits PLL in TZ1090 SoC.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+/* Register definitions */
+
+#define PLL_CTL0		0
+#define  PLL_CTL0_BWADJ_M		0xfff
+#define  PLL_CTL0_BWADJ_S		20
+#define  PLL_CTL0_CLKF_M		0x1fff
+#define  PLL_CTL0_CLKF_S		4
+#define  PLL_CTL0_CLKOD_M		0x7
+#define  PLL_CTL0_CLKOD_S		0
+#define PLL_CTL1		4
+#define  PLL_CTL1_RESET_B		BIT(28)
+#define  PLL_CTL1_FASTEN_B		BIT(27)
+#define  PLL_CTL1_ENSAT_B		BIT(26)
+#define  PLL_CTL1_BYPASS_B		BIT(25)
+#define  PLL_CTL1_PWRDN_B		BIT(24)
+#define  PLL_CTL1_CLKR_M		0x3f
+#define  PLL_CTL1_CLKR_S		0
+
+/**
+ * struct clk_tz1090_pll - PLL in TZ1090
+ *
+ * @hw:		handle between common and hardware-specific interfaces
+ * @reg:	first of two registers
+ *
+ * PLL in TZ1090.  Implements .recalc_rate, .set_rate and .round_rate
+ */
+struct clk_tz1090_pll {
+	struct clk_hw	hw;
+	void __iomem	*reg;
+};
+
+/*
+ * DOC: TZ1090 adjustable PLL clock
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is adjustable.
+ * parent - fixed parent.  No clk_set_parent support
+ */
+
+#define to_clk_tz1090_pll(_hw) container_of(_hw, struct clk_tz1090_pll, hw)
+
+static unsigned long clk_tz1090_pll_recalc_rate(struct clk_hw *hw,
+						unsigned long f_in)
+{
+	struct clk_tz1090_pll *pll = to_clk_tz1090_pll(hw);
+	u32 ctl0, ctl1;
+	unsigned int clk_f;	/* feedback divide */
+	unsigned int clk_od;	/* output divide */
+	unsigned int clk_r;	/* reference divide */
+	unsigned long f_out;
+
+	ctl0 = readl(pll->reg + PLL_CTL0);
+	ctl1 = readl(pll->reg + PLL_CTL1);
+
+	/* Bypass? */
+	if (ctl1 & PLL_CTL1_BYPASS_B)
+		return f_in;
+
+	/* Get divider values */
+	clk_f  = 1 + ((ctl0 >> PLL_CTL0_CLKF_S)  & PLL_CTL0_CLKF_M);
+	clk_od = 1 + ((ctl0 >> PLL_CTL0_CLKOD_S) & PLL_CTL0_CLKOD_M);
+	clk_r  = 1 + ((ctl1 >> PLL_CTL1_CLKR_S)  & PLL_CTL1_CLKR_M);
+
+	/*
+	 * formula:
+	 * f_out = (f_in / clk_r) * (clk_f / 2) / clk_od
+	 *       = (f_in * clk_f) / (2 * clk_r * clk_od)
+	 */
+	f_out = div_u64((u64)f_in * clk_f,
+			2 * clk_r * clk_od);
+	return f_out;
+}
+
+/* finds best pll parameters and returns rate on success (or 0) */
+static int clk_tz1090_pll_bestvals(struct clk_hw *hw, unsigned long parent_rate,
+				   unsigned long rate, unsigned long *clkf,
+				   unsigned long *clkr, unsigned long *clkod)
+{
+	unsigned long odmin, odmax;
+	unsigned long bestf = 1, bestr = 1, bestod = 1;
+	unsigned long rod2, cur, best = 0;
+	unsigned long f, r, od;
+
+	/* 120MHz/freq < od < 600MHz/freq */
+	odmin = 120000000/rate + 1;
+	odmax = 600000000/rate;
+
+	if (odmin < 1)
+		odmin = 1;
+	if (odmax > PLL_CTL0_CLKOD_M + 1)
+		odmax = PLL_CTL0_CLKOD_M + 1;
+
+	/*
+	 * Search through valid combinations of od and r, starting with lower
+	 * output divider values to get a lower intermediate frequency.
+	 */
+	for (od = odmin; od <= odmax; ++od) {
+		for (r = 1; r <= PLL_CTL1_CLKR_M + 1; ++r) {
+			/*
+			 * Calculate best f for given r and od, rounding down
+			 * So for f, freq <= rate
+			 * And for f+1, freq > rate
+			 * We have to do rate+1 because rate may have itself
+			 * been rounded down.
+			 */
+			rod2 = 2 * r * od;
+			f = div_u64((u64)(rate + 1) * rod2, parent_rate);
+			if (f < 1)
+				continue;
+			if (f > PLL_CTL0_CLKF_M + 1)
+				f = PLL_CTL0_CLKF_M + 1;
+
+			/* Calculate final rate and see if it's the best */
+			cur = div_u64((u64)parent_rate * f, rod2);
+			if (cur > best) {
+				bestf = f;
+				bestr = r;
+				bestod = od;
+				best = cur;
+				/* Can't improve on a perfect match */
+				if (cur == rate)
+					goto done;
+			}
+		}
+	}
+	if (!best)
+		return 0;
+done:
+	pr_debug("clk_tz1090_pll: final %lu/%lu * %lu/2/%lu=%lu (req=%lu, err=%ld)\n",
+		 parent_rate, bestr, bestf, bestod, best, rate, best - rate);
+
+	*clkf = bestf;
+	*clkr = bestr;
+	*clkod = bestod;
+	return best;
+}
+
+static long clk_tz1090_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+				      unsigned long *prate)
+{
+	unsigned long clkf, clkr, clkod;
+	unsigned long parent_rate = *prate;
+
+	return clk_tz1090_pll_bestvals(hw, parent_rate, rate, &clkf, &clkr,
+				       &clkod);
+}
+
+static int clk_tz1090_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+				   unsigned long parent_rate)
+{
+	struct clk_tz1090_pll *pll = to_clk_tz1090_pll(hw);
+	unsigned long clkf, clkr, clkod, bwadj;
+	u32 ctl0, ctl1;
+
+	if (!clk_tz1090_pll_bestvals(hw, parent_rate, rate,
+				     &clkf, &clkr, &clkod))
+		return -EINVAL;
+
+	/* offset the values ready to go in the PLL registers */
+	--clkr;
+	--clkf;
+	--clkod;
+	bwadj = clkf / 2;
+
+	/* bypass, reset and configure PLL */
+	ctl0 =	(bwadj << PLL_CTL0_BWADJ_S) |
+		(clkf  << PLL_CTL0_CLKF_S ) |
+		(clkod << PLL_CTL0_CLKOD_S);
+	ctl1 =	PLL_CTL1_RESET_B  |
+		PLL_CTL1_ENSAT_B  |
+		PLL_CTL1_BYPASS_B |
+		(clkr  << PLL_CTL1_CLKR_S);
+	writel(ctl1, pll->reg + PLL_CTL1);
+	writel(ctl0, pll->reg + PLL_CTL0);
+
+	/* allow 5us after clkf before deasserting reset */
+	udelay(5);
+
+	/* take PLL out of reset */
+	ctl1 &= ~PLL_CTL1_RESET_B;
+	writel(ctl1, pll->reg + PLL_CTL1);
+
+	/* count at least 500 divided ref clks to allow time to lock */
+	msleep(1 + 500*1000*(clkr+1)/parent_rate);
+
+	/* take PLL out of bypass */
+	ctl1 &= ~PLL_CTL1_BYPASS_B;
+	writel(ctl1, pll->reg + PLL_CTL1);
+
+	return 0;
+}
+
+static const struct clk_ops clk_tz1090_pll_ops = {
+	.recalc_rate = clk_tz1090_pll_recalc_rate,
+	.round_rate = clk_tz1090_pll_round_rate,
+	.set_rate = clk_tz1090_pll_set_rate,
+};
+
+/**
+ * clk_register_tz1090_pll_setup - register a PLL with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ *
+ * Register a TZ1090 PLL clock to the clock framework.
+ */
+static struct clk *__init clk_register_tz1090_pll(struct device *dev,
+						  const char *name,
+						  const char *parent_name,
+						  unsigned long flags,
+						  void __iomem *reg)
+{
+	struct clk_tz1090_pll *div;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	/* allocate the divider */
+	div = kzalloc(sizeof(struct clk_tz1090_pll), GFP_KERNEL);
+	if (!div) {
+		pr_err("%s: could not allocate PLL clk\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	init.name = name;
+	init.ops = &clk_tz1090_pll_ops;
+	init.flags = flags | CLK_IS_BASIC;
+	init.parent_names = (parent_name ? &parent_name: NULL);
+	init.num_parents = (parent_name ? 1 : 0);
+
+	/* struct clk_tz1090_pll assignments */
+	div->reg = reg;
+	div->hw.init = &init;
+
+	/* register the clock */
+	clk = clk_register(dev, &div->hw);
+
+	if (IS_ERR(clk))
+		kfree(div);
+
+	return clk;
+}
+
+#ifdef CONFIG_OF
+/**
+ * of_tz1090_pll_setup() - Setup function for PLL in TZ1090
+ */
+static void __init of_tz1090_pll_setup(struct device_node *node)
+{
+	struct clk *clk;
+	const char *clk_name = node->name;
+	void __iomem *reg;
+	const char *parent_name;
+
+	of_property_read_string(node, "clock-output-names", &clk_name);
+
+	parent_name = of_clk_get_parent_name(node, 0);
+	if (!parent_name) {
+		pr_err("%s(%s): could not read parent clock\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	reg = of_iomap(node, 0);
+	if (!reg) {
+		pr_err("%s(%s): of_iomap failed\n",
+		       __func__, clk_name);
+		return;
+	}
+
+	clk = clk_register_tz1090_pll(NULL, clk_name, parent_name, 0, reg);
+	if (IS_ERR(clk))
+		goto err_iounmap;
+
+	of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+	return;
+
+err_iounmap:
+	iounmap(reg);
+}
+CLK_OF_DECLARE(tz1090_pll_clk, "img,tz1090-pll", of_tz1090_pll_setup);
+#endif /* CONFIG_OF */
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index ade7513..6722f0e 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -184,6 +184,8 @@
 #ifdef CONFIG_METAG_META21
 	hwtimer_freq = get_coreclock() / (metag_in32(EXPAND_TIMER_DIV) + 1);
 #endif
+	pr_info("Timer frequency: %u Hz\n", hwtimer_freq);
+
 	clocksource_register_hz(&clocksource_metag, hwtimer_freq);
 
 	setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e992489..459ad94 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -263,6 +263,28 @@
 	  Support the i.MX DMA engine. This engine is integrated into
 	  Freescale i.MX1/21/27 chips.
 
+config MDC_DMA
+	bool "MDC DMA support"
+	select DMA_ENGINE
+	help
+	  Support the Imagination Technologies Meta DMA Controller (MDC).
+
+config MDC_DMA_TESTS
+	depends on MDC_DMA
+	tristate "MDC DMA Test Client"
+	help
+	  Simple MDC DMA test client. Say N unless you're debugging
+	  the MDC DMA driver.
+
+config TZ1090_MDC_DMA
+	bool "TZ1090 specific callbacks for the MDC DMA driver"
+	depends on SOC_TZ1090
+	select MDC_DMA
+	default y
+	help
+	  This selects support for TZ1090 SoC specific callbacks for
+	  the MDC DMA controller.
+
 config MXS_DMA
 	bool "MXS DMA support"
 	depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a2b0df5..e85281f 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -24,6 +24,9 @@
 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
 obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
 obj-$(CONFIG_IMX_DMA) += imx-dma.o
+obj-$(CONFIG_MDC_DMA) += img_mdc_dma.o
+obj-$(CONFIG_MDC_DMA_TESTS) += img_mdc_tests.o
+obj-$(CONFIG_TZ1090_MDC_DMA) += tz1090-mdc-dma.o
 obj-$(CONFIG_MXS_DMA) += mxs-dma.o
 obj-$(CONFIG_TIMB_DMA) += timb_dma.o
 obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
diff --git a/drivers/dma/img_mdc_dma.c b/drivers/dma/img_mdc_dma.c
new file mode 100644
index 0000000..f49fbae
--- /dev/null
+++ b/drivers/dma/img_mdc_dma.c
@@ -0,0 +1,1578 @@
+/*
+ * IMG Meta DMA Controller (MDC) specific DMA code.
+ *
+ * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/img_mdc_dma.h>
+
+#include "dmaengine.h"
+
+#define MAX_MDC_DMA_CHANNELS 32
+#define MAX_MDC_DMA_BUSY_RETRY 5
+#define MDC_DMA_INT_ACTIVE (1<<8) /* INT_ACTIVE bit of Cmds Processed reg */
+
+DEFINE_SPINLOCK(mdc_dma_lock);
+static struct device_driver *wrapper_driver;
+
+struct mdc_config_data {
+	int dma_threads;
+	int dma_channels;
+	int bus_width;
+};
+
+struct mdc_chan {
+	struct mdc_dmadev		*mdma;
+	struct dma_chan			dchan;
+	spinlock_t			lock;
+	char				name[30];
+	enum img_dma_channel_state	alloc_status;
+	int				a_chan_nr; /* Channel NR */
+	int				irq; /* MDC IRQ */
+	int				periph; /* Peripheral NR */
+	int				thread; /* Thread for this channel */
+	/* virt/dma buffers for channel */
+	void				*virt_addr;
+	dma_addr_t			dma_addr;
+	/* List of current DMA descriptors */
+	struct list_head		active_desc; /* Active descriptors */
+	struct list_head		free_desc; /* Used descriptors */
+	bool				sg; /* true for sg xfer */
+	bool				cyclic; /* true for cyclic xfer */
+	bool				is_list; /* list-based xfer */
+	bool				finished; /* xfer finished */
+	int				irq_en; /* MDC IRQ status */
+	/* Slave specific configuration */
+	struct dma_slave_config		dma_config; /* config for channel */
+	int				access_delay;
+	int				priority;
+	bool				skip_callback;
+	/* tasklet for channel */
+	struct tasklet_struct		tasklet; /* deferred work */
+};
+
+struct mdc_dmadev {
+	struct dma_device		dma_slave;
+	void __iomem			*base_addr;
+	spinlock_t			lock;
+	struct mdc_chan			slave_channel[MAX_MDC_DMA_CHANNELS];
+	struct mdc_config_data		config;
+	const struct img_mdc_soc_callbacks *callbacks;
+	int				last_fthread; /* Current fast thread */
+	int				last_sthread; /* Current slow thread */
+#ifdef CONFIG_PM_SLEEP
+	void				*pm_data;
+#endif
+};
+
+struct mdc_dma_desc {
+	struct dma_async_tx_descriptor	txd;
+	struct list_head		node;
+	enum dma_status			status;
+	dma_addr_t			start_list;
+	int				total_samples;
+	int				buffer_size;
+	int				sample_size;
+	int				sample_count;
+};
+
+/* Forward declaration for dma driver */
+static struct platform_driver img_mdc_dma_driver;
+static int mdc_terminate_all(struct dma_chan *chan);
+
+static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c)
+{
+	return container_of(c, struct mdc_chan, dchan);
+}
+
+static inline struct mdc_dma_desc *txd_to_mdc_desc(
+				struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct mdc_dma_desc, txd);
+}
+
+static inline struct device *mchan2dev(struct mdc_chan *c)
+{
+	return &c->dchan.dev->device;
+}
+
+/*
+ * Burst Size (expressed in bytes) must be equal to or greater than the
+ * system bus width for memory to memory accesses.
+ * So use a simple lookup to find the size in bytes based on the system bus
+ * width which is reported as log2 of the width in bits:
+ *              width (2^n)  0, 1, 2, 3, 4 , 5, 6, 7
+ *              width (bits) 1, 2, 4, 8, 16, 32,64,128
+ */
+static const unsigned burst_size_lookup[] = { 0, 0, 0, 1, 2, 4, 8, 16 };
+
+
+/*rate limit for warning message*/
+static DEFINE_RATELIMIT_STATE(rl_align_warn, 300 * HZ, 1); /* 1 per 5mins*/
+
+/*
+ * mdc_dma_filter_fn: Check if the DMA channel is free for allocation.
+ * @chan: DMA channel for allocation requested by the dmaengine.
+ * @param: Struct containing the requested DMA channel (if any) and the
+ * peripheral device number requesting this channel. On return,
+ * the req_channel member contains the channel that will be allocated
+ * by the MDC DMA device. This is useful when the caller passed -1 (as in,
+ * the first available channel) and then he wishes to know what channel will
+ * be picked by the DMA device.
+ *
+ * This callback should be passed to dma_request_channel whenever it is used
+ * by a slave device.
+ */
+bool mdc_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct device_driver *driver;
+	spin_lock(&mdc_dma_lock);
+	driver	= (wrapper_driver) ? wrapper_driver :
+		&img_mdc_dma_driver.driver;
+	spin_unlock(&mdc_dma_lock);
+
+	if (chan->device->dev->driver == driver) {
+		struct mdc_chan *mchan = to_mdc_chan(chan);
+		struct mdc_dma_cookie *c = (struct mdc_dma_cookie *)param;
+		if (mchan->alloc_status == IMG_DMA_CHANNEL_AVAILABLE) {
+			/* Did the device request a specific channel? */
+			if ((c->req_channel > -1) &&
+			    (c->req_channel != mchan->a_chan_nr))
+				/* Wrong channel */
+				return false;
+			mchan->periph = c->periph;
+			c->req_channel = mchan->a_chan_nr;
+			return true;
+		}
+	}
+	return false;
+}
+EXPORT_SYMBOL_GPL(mdc_dma_filter_fn);
+
+static struct dma_chan *of_dma_mdc_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct mdc_dma_cookie cookie;
+	dma_cap_mask_t cap;
+	int count = dma_spec->args_count;
+
+	/*
+	 * 1st argument = peripheral
+	 * 2nd argument = dma channel
+	 */
+	if (count != 2)
+		return NULL;
+
+	cookie.periph = dma_spec->args[0];
+	cookie.req_channel = dma_spec->args[1];
+
+	dma_cap_zero(cap);
+	dma_cap_set(DMA_SLAVE, cap);
+	dma_cap_set(DMA_CYCLIC, cap);
+
+	return dma_request_channel(cap, mdc_dma_filter_fn, &cookie);
+}
+
+static int check_widths(struct mdc_dmadev *mdma, u32 address)
+{
+	/*
+	 *  check alignment, we can do accesses to/from unaligned address but
+	 *  we must set width_w and width_r appropriately and it will impact
+	 *  on performance
+	 */
+	int width = -1;
+	if (address & 0x1) { /*byte addresses*/
+		if (mdma->config.bus_width > 3) /*2^3 = 8bits = 1byte*/
+			width = 0; /*2^0 = 1 byte.*/
+	} else if (address & 0x2) { /*word address*/
+		if (mdma->config.bus_width > 4) /*2^4 = 16bits = 2 bytes*/
+			width = 1;
+	} else if (address & 0x4) {
+		if (mdma->config.bus_width > 5)
+			width = 2;
+	}
+
+	if (width < 0) { /*We are aligned*/
+
+		/*
+		 * system bus width is in log2(bits)
+		 * we need log2(bytes) so subtract 3
+		 */
+		width = mdma->config.bus_width - 3;
+	} else {
+		if (__ratelimit(&rl_align_warn))
+			dev_warn(mdma->dma_slave.dev,
+				 "Using address not aligned to system bus width, this will impact performance\n");
+	}
+
+	return width;
+}
+
+/**
+ * img_reset() - resets a channel
+ * @mchan:	The channel to reset
+ *
+ * Resets a channel by clearing all of its context to zero
+ * Then sets up the default settings.
+ */
+static void img_dma_reset(struct mdc_chan *mchan)
+{
+	u32 genconf = 0;
+	u32 rpconf = 0;
+	int dma_channel = mchan->a_chan_nr;
+	int systembus_width = mchan->mdma->config.bus_width;
+
+	unsigned long mdc_base_address = (unsigned long)mchan->mdma->base_addr;
+
+	MDC_REG_RESET_CONTEXT(mdc_base_address, dma_channel);
+
+	/*ensure probe has setup base address before proceeding*/
+	BUG_ON(!mdc_base_address);
+
+	/*Setup General Config */
+
+	/*enable list interrupts*/
+	MDC_SET_FIELD(genconf, MDC_LIST_IEN, mchan->irq_en);
+	/*endian swap TODO make user configurable*/
+	MDC_SET_FIELD(genconf, MDC_BSWAP, 0);
+	/*enable interrupts*/
+	MDC_SET_FIELD(genconf, MDC_IEN, mchan->irq_en);
+	/*don't latch interrupts*/
+	MDC_SET_FIELD(genconf, MDC_LEVEL_INT, 1);
+	/* Physical channel.*/
+	MDC_SET_FIELD(genconf, MDC_CHANNEL, dma_channel);
+	/*256 cycle delay on burst accesses */
+	MDC_SET_FIELD(genconf, MDC_ACC_DEL, mchan->access_delay);
+	/* ?See manual? delays recognition of DREQ
+	 * until burst has reached the unpacker */
+	MDC_SET_FIELD(genconf, MDC_WAIT_UNPACK, 0);
+	/* Inc write address TODO make user configurable*/
+	MDC_SET_FIELD(genconf, MDC_INC_W, 1);
+	/* ?See manual? delays recognition of DREQ until burst
+	 * has reached the packer.*/
+	MDC_SET_FIELD(genconf, MDC_WAIT_PACK, 0);
+	/* Incr read address TODO make user configurable*/
+	MDC_SET_FIELD(genconf, MDC_INC_R, 1);
+	/* Should generally be set unless using a ram
+	 * narrower than the system bus*/
+	MDC_SET_FIELD(genconf, MDC_PHYSICAL_R, 1);
+	MDC_SET_FIELD(genconf, MDC_PHYSICAL_W, 1);
+	/* Note Read and Write widths get set when specifying direction. */
+
+	MDC_RSET_GENERAL_CONFIG(mdc_base_address, dma_channel, genconf);
+
+	/*Setup read port: */
+
+	/*
+	 * NJ:
+	 * We are going to split the channels equally across the number of
+	 * available threads in the DMA controller. Ideally we should assign
+	 * a different threads to peripherals with high latency than to those
+	 * without but we don't know what peripherals are attached, we could
+	 * give the user an interface to set this in a more advanced driver.
+	 *
+	 * Email from Paul Welton (did the hardware design) to NJ on 8/7/09:
+	 * " A different thread id should be used for peripherals with different
+	 *  latency characteristics. In the case of reads, the fabric guarantees
+	 *  that return data within a thread is returned in the same order as
+	 *  the read requests. Therefore, if one peripheral or memory is slow to
+	 *  return data, then return data from another peripheral or memory on
+	 *  the same thread will be blocked. If they are allocated different
+	 *  threads then the second one could continue independently.
+	 *
+	 *  There are also restrictions or bursts. Once a burst begins on one
+	 *  thread it must complete before any other burst can begin on the same
+	 *  thread. A burst can be blocked by the "READY" signal for that thread
+	 *  going low. Note that the READY, unlike the ENABLE, is provided on a
+	 *  per-thread basis. Therefore, even for writes, it is advantageous to
+	 *  place peripherals or memories which are likely to block on a
+	 *  different thread from other critical peripherals which should not
+	 *  be blocked."
+	 */
+
+	/*
+	 *  we split the available threads equally between channels
+	 *  so a 16 channel system with 2 threads, channels 0-7 will use
+	 *  thread 0 and channels 8-15 will use thread 1
+	 */
+
+	/*thread id used in tag for reads issued from list*/
+	MDC_SET_FIELD(rpconf, MDC_STHREAD, mchan->thread);
+	/*thread id used in tag for reads*/
+	MDC_SET_FIELD(rpconf, MDC_RTHREAD, mchan->thread);
+	/*thread id used in tag for writes*/
+	MDC_SET_FIELD(rpconf, MDC_STHREAD, mchan->thread);
+
+	/*priority of transfers*/
+	MDC_SET_FIELD(rpconf, MDC_PRIORITY, mchan->priority);
+	/* no of clock cycles before recognising DREQ following end-of-burst(
+	 * at unpacker when WAIT_UNPACK=1)*/
+	MDC_SET_FIELD(rpconf, MDC_HOLD_OFF, 0);
+	/*burst size.*/
+	MDC_SET_FIELD(rpconf, MDC_BURST_SIZE,
+			(burst_size_lookup[systembus_width & 0x7] - 1));
+	/*enable the use of the DREQ signal*/
+	MDC_SET_FIELD(rpconf, MDC_DREQ_ENABLE, 0);
+	/*perform read back on last write of transaction.*/
+	MDC_SET_FIELD(rpconf, MDC_READBACK, 0);
+
+	MDC_RSET_READ_PORT_CONFIG(mdc_base_address, dma_channel, rpconf);
+
+	MDC_RSET_CMDS_PROCESSED(mdc_base_address, dma_channel, 0);
+
+	wmb();
+}
+
+/*
+ * mdc_handler_isr: Clear the IRQ for the DMA channel and
+ * schedule the tasklet
+ */
+static irqreturn_t mdc_handler_isr(int irq, void *chan_id)
+{
+
+	u32 irq_status;
+	struct mdc_chan *mchan = chan_id;
+
+	spin_lock(&mchan->lock);
+
+	irq_status = MDC_RGET_CMDS_PROCESSED((unsigned long)
+					     mchan->mdma->base_addr,
+					     mchan->a_chan_nr);
+
+	if (irq_status & MDC_DMA_INT_ACTIVE) {
+		/* reset irq */
+		MDC_RSET_CMDS_PROCESSED((unsigned long)
+					mchan->mdma->base_addr,
+					mchan->a_chan_nr, 0);
+		/* Skip tasklet? */
+		if (mchan->skip_callback)
+			mchan->skip_callback = false;
+		else
+			/* Schedule the tasklet */
+			tasklet_schedule(&mchan->tasklet);
+	}
+
+	spin_unlock(&mchan->lock);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * mdc_dma_tasklet: Post IRQ work for a DMA channel
+ * @unsigned long data: MDC channel pointer
+ *
+ * Updates descriptor xfer parameters, moves finished descriptors
+ * to the free list, calls callback function.
+ */
+static void mdc_dma_tasklet(unsigned long data)
+{
+	struct mdc_chan *mchan = (struct mdc_chan *)data;
+	struct mdc_dma_desc *desc;
+	unsigned long flags;
+	dma_async_tx_callback callback = NULL;
+	void *param = NULL;
+
+	spin_lock_irqsave(&mchan->lock, flags);
+	if (list_empty(&mchan->active_desc)) {
+		spin_unlock_irqrestore(&mchan->lock, flags);
+		return;
+	}
+
+	desc = list_first_entry(&mchan->active_desc, typeof(*desc), node);
+	if (++desc->sample_count == desc->total_samples) {
+		desc->sample_count = 0;
+		mchan->finished = true;
+		/* For cyclic, this descriptor will remain active */
+		if (!mchan->cyclic)
+			/* Move it back to the free list */
+			list_move_tail(&desc->node, &mchan->free_desc);
+	}
+	if (desc->txd.callback) {
+		callback = desc->txd.callback;
+		param = desc->txd.callback_param;
+	}
+	spin_unlock_irqrestore(&mchan->lock, flags);
+
+	/* We are safe to call the callback now */
+	if (callback)
+		callback(param);
+}
+
+/*
+ * mdc_dma_tx_submit: Start a DMA transfer for txd descriptor
+ * @txd: Descriptor for the DMA transfer
+ */
+static dma_cookie_t mdc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+	struct mdc_dma_desc *dma_desc = txd_to_mdc_desc(txd);
+	struct mdc_chan *mchan = to_mdc_chan(txd->chan);
+	unsigned long flags;
+	dma_cookie_t cookie;
+
+	spin_lock_irqsave(&mchan->lock, flags);
+
+	cookie = dma_cookie_assign(&dma_desc->txd);
+	dma_desc->status = DMA_IN_PROGRESS;
+	/* Add descriptor to active list */
+	list_add(&dma_desc->node, &mchan->active_desc);
+
+	spin_unlock_irqrestore(&mchan->lock, flags);
+
+	dev_vdbg(txd->chan->device->dev,
+		"New DMA descriptor\n"
+		"Address           : 0x%p\n"
+		"Cookie            : 0x%08x\n"
+		"Channel           : %d\n"
+		"Callback function : 0x%p\n"
+		"Callback parameter: 0x%p\n",
+		dma_desc, cookie, mchan->a_chan_nr,
+		dma_desc->txd.callback,
+		dma_desc->txd.callback_param);
+
+	return cookie;
+}
+/*
+ * map_to_mdc_width: Convert a dma engine width to the MDC one
+ * @width: The dma_slave_buswidth value
+ */
+static enum img_dma_width map_to_mdc_width(enum dma_slave_buswidth width)
+{
+	/*
+	 * mchan->dma_config.dst_addr uses enum dma_slave_buswidth
+	 * Convert from dma_slave_buswidth to img_dma_width:
+	 * DMA_SLAVE_BUSWIDTH_1_BYTE,
+	 * DMA_SLAVE_BUSWIDTH_2_BYTES,
+	 * DMA_SLAVE_BUSWIDTH_4_BYTES,
+	 * DMA_SLAVE_BUSWIDTH_8_BYTES,
+	 * to
+	 * IMG_DMA_WIDTH_8,
+	 * IMG_DMA_WIDTH_16,
+	 * IMG_DMA_WIDTH_32,
+	 * IMG_DMA_WIDTH_64,
+	 * IMG_DMA_WIDTH_128,
+	 */
+	switch (width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		return IMG_DMA_WIDTH_8;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		return IMG_DMA_WIDTH_16;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		return IMG_DMA_WIDTH_32;
+	case DMA_SLAVE_BUSWIDTH_8_BYTES:
+		return IMG_DMA_WIDTH_64;
+	default:
+		return IMG_DMA_WIDTH_128;
+	}
+}
+
+/*
+ * mdc_desc_init: Initialize an MDC transfer descriptor for a given channel
+ * @desc: DMA descriptor
+ * @mchan: DMA channel
+ * @flags: Transfer flags
+ *
+ * Returns 0 on success or a negative number otherwise.
+ */
+static void mdc_desc_init(struct mdc_dma_desc *desc, struct mdc_chan *mchan,
+			  unsigned long flags)
+{
+	dma_async_tx_descriptor_init(&desc->txd, &mchan->dchan);
+	desc->txd.tx_submit = mdc_dma_tx_submit;
+	desc->txd.flags = flags; /* Ignore the MDC tx flags */
+
+	INIT_LIST_HEAD(&desc->node);
+};
+
+/*
+ * mdc_prep_irq_status: Enable/Disable an IRQ for an MDC channel
+ * @chan: The MDC channel
+ * @flags: Transfer flags
+ */
+static void mdc_prep_irq_status(struct mdc_chan *chan, unsigned long flags)
+{
+	/* Disable/Enable (if needed) allocated IRQ for this channel */
+	if (!(flags & DMA_PREP_INTERRUPT))
+		chan->irq_en = 0;
+	else
+		chan->irq_en = 1;
+}
+
+/*
+ * mdc_get_desc: Prepare a used descriptor or allocate a new one
+ * @chan: The MDC channel
+ * @flags: Transfer flags
+ */
+static struct mdc_dma_desc *mdc_dma_get_desc(struct mdc_chan *chan,
+					     unsigned long flags)
+{
+	unsigned long irq_flags;
+	struct mdc_dma_desc *desc;
+
+	/* Find a suitable descriptor */
+	spin_lock_irqsave(&chan->lock, irq_flags);
+	list_for_each_entry(desc, &chan->free_desc, node) {
+		if (async_tx_test_ack(&desc->txd)) {
+			/* Found one. Delete it from the list */
+			list_del(&desc->node);
+			spin_unlock_irqrestore(&chan->lock, irq_flags);
+			desc->txd.flags = flags;
+			return desc;
+		}
+	}
+	/* We couldn't find a suitable descriptor */
+	spin_unlock_irqrestore(&chan->lock, irq_flags);
+	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+	if (!desc) {
+		dev_err(mchan2dev(chan),
+			"Failed to allocate DMA descriptor\n");
+		return NULL;
+	}
+
+	mdc_desc_init(desc, chan, flags);
+
+	return desc;
+}
+/*
+ * alloc_thead_for_chan: Allocate a suitable thread for a given DMA channel
+ * @mchan: The MDC DMA channel
+ * @type: The type of thread to be allocated for this channel
+ */
+static void alloc_thread_for_chan(struct mdc_chan *mchan,
+				  enum mdc_dma_thread_type type)
+{
+	struct mdc_dmadev *mdma = mchan->mdma;
+	int total_threads = mdma->config.dma_threads;
+
+	dev_vdbg(mdma->dma_slave.dev,
+		 "Requested thread type %d for channel %d\n",
+		 type, mchan->a_chan_nr);
+
+	spin_lock(&mdma->lock);
+	if (type == MDC_THREAD_FAST) {
+		mdma->last_fthread = ++mdma->last_fthread &
+			(total_threads / 2 - 1);
+		mchan->thread = total_threads / 2 + mdma->last_fthread;
+	} else if (type == MDC_THREAD_SLOW) {
+		mdma->last_sthread = ++mdma->last_sthread &
+			(total_threads / 2 - 1);
+		mchan->thread = total_threads % 2 + mdma->last_sthread;
+	} else {
+		mdma->last_sthread = ++mdma->last_sthread &
+			(total_threads / 2 - 1);
+		mchan->thread = total_threads % 2 + mdma->last_sthread;
+		dev_warn(mdma->dma_slave.dev,
+			 "Caller did not use a valid thread_type\n"
+			 "Defaulting to MDC_THREAD_SLOW\n");
+	}
+	spin_unlock(&mdma->lock);
+}
+
+/*
+ * parse_dma_chan_flags: Configure channel based on the MDC specific xfer flags
+ * @chan: The dmaengine channel
+ */
+static void parse_dma_chan_flags(struct dma_chan *chan)
+{
+	struct mdc_dma_tx_control *tx_control;
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	if (chan->private) {
+		tx_control = (struct mdc_dma_tx_control *)chan->private;
+
+		if (tx_control->flags & MDC_PRIORITY)
+			mchan->priority = tx_control->prio;
+
+		if (tx_control->flags & MDC_ACCESS_DELAY)
+			mchan->access_delay = tx_control->access_delay;
+
+		if (tx_control->flags & MDC_NO_CALLBACK)
+			mchan->skip_callback = true;
+
+		if (tx_control->flags & MDC_NEED_THREAD)
+			alloc_thread_for_chan(mchan, tx_control->thread_type);
+	}
+}
+
+/*
+ * mdc_prep_memcpy: Prepare a descriptor for memory to memory transfers
+ * @chan: DMA channel
+ * @dst: Destination buffer
+ * @src: Source buffer
+ * @len: Total bytes to transfer
+ * @flags: DMA xfer flags
+ */
+static struct dma_async_tx_descriptor *mdc_prep_memcpy(struct dma_chan *chan,
+						       dma_addr_t dst,
+						       dma_addr_t src,
+						       size_t len,
+						       unsigned long flags)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct mdc_dma_desc *mdesc;
+	unsigned long irq_flags;
+	int width;
+	u32 genconf, rpconf;
+
+	mdc_prep_irq_status(mchan, flags);
+
+	mdesc = mdc_dma_get_desc(mchan, flags);
+
+	if (!mdesc)
+		return NULL;
+
+	mchan->is_list = false;
+	mchan->cyclic = false;
+	/* tx defaults for tx_status. single transfer */
+	mdesc->sample_count = 0;
+	mdesc->sample_size = 1;
+	mdesc->total_samples = mdesc->buffer_size = 1;
+
+	parse_dma_chan_flags(chan);
+
+	img_dma_reset(mchan);
+
+	spin_lock_irqsave(&mchan->lock, irq_flags);
+
+	genconf = MDC_RGET_GENERAL_CONFIG(
+			(unsigned long)mchan->mdma->base_addr,
+			mchan->a_chan_nr);
+
+	rpconf = MDC_RGET_READ_PORT_CONFIG(
+			(unsigned long)mchan->mdma->base_addr,
+			mchan->a_chan_nr);
+
+	/* Prepare src */
+	width = check_widths(mchan->mdma, src);
+	MDC_RSET_READ_ADDRESS((unsigned long)mchan->mdma->base_addr,
+			      mchan->a_chan_nr, src);
+	MDC_SET_FIELD(genconf, MDC_WIDTH_R, width);
+	MDC_SET_FIELD(genconf, MDC_INC_R, 1);
+	/* Prepare dst */
+	width = check_widths(mchan->mdma, dst);
+	MDC_RSET_WRITE_ADDRESS((unsigned long)mchan->mdma->base_addr,
+			       mchan->a_chan_nr, dst);
+	MDC_SET_FIELD(genconf, MDC_WIDTH_W, width);
+	MDC_SET_FIELD(genconf, MDC_INC_W, 1);
+	MDC_SET_FIELD(rpconf, MDC_DREQ_ENABLE, 0);
+
+	/* Set priority */
+	MDC_SET_FIELD(rpconf, MDC_PRIORITY, mchan->priority);
+
+	MDC_RSET_GENERAL_CONFIG((unsigned long)mchan->mdma->base_addr,
+				mchan->a_chan_nr, genconf);
+
+	MDC_RSET_READ_PORT_CONFIG((unsigned long)mchan->mdma->base_addr,
+				  mchan->a_chan_nr, rpconf);
+	MDC_RSET_TRANSFER_SIZE((unsigned long)mchan->mdma->base_addr,
+			       mchan->a_chan_nr, len - 1);
+	wmb();
+
+	spin_unlock_irqrestore(&mchan->lock, irq_flags);
+
+	return &mdesc->txd;
+}
+
+/*
+ * mdc_prep_dma_cyclic: Prepare a descriptor channel for cyclic transfer
+ * @chan: DMA channel
+ * @buf_addr: Source buffer
+ * @buf_len: Total bytes to transfer
+ * @direction: Transfer direction
+ * @flags: Transfer flags
+ * @context: context
+ */
+static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
+	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+	size_t period_len, enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct mdc_dma_desc *mdesc;
+	struct img_dma_mdc_list *dma_desc;
+	dma_addr_t list_base, next_list;
+	int width;
+
+	if (!buf_len && !period_len) {
+		dev_err(mchan2dev(mchan), "Invalid buffer/period len\n");
+		return NULL;
+	}
+
+
+	mdc_prep_irq_status(mchan, flags);
+
+	mdesc = mdc_dma_get_desc(mchan, flags);
+
+	if (!mdesc)
+		return NULL;
+
+	mdesc->sample_count = 0;
+	mdesc->total_samples = 0;
+	mdesc->sample_size = period_len;
+	mdesc->buffer_size = buf_len;
+	mchan->is_list = true;
+	mchan->cyclic = true;
+
+	parse_dma_chan_flags(chan);
+
+	dev_vdbg(mchan2dev(mchan), "DMA cyclic xfer setup:\n"
+		 "Peripheral dev  : %d\n"
+		 "DMA channel     : %d\n"
+		 "Buffer size     : %zu\n"
+		 "Period size     : %zu\n"
+		 "Direction       : %d\n"
+		 "Flags           : %lu\n"
+		 "Thread          : %d\n"
+		 "DMA Buffer(bus) : 0x%08llx\n",
+		 mchan->periph, mchan->a_chan_nr, buf_len,
+		 period_len, direction, flags, mchan->thread,
+		 (u64)buf_addr);
+
+	img_dma_reset(mchan);
+	/* This is for the MDC linked-list */
+	dma_desc = (struct img_dma_mdc_list *)mchan->virt_addr;
+	mdesc->start_list = list_base = next_list = mchan->dma_addr;
+
+	/* Hand back the DMA buffer to the CPU */
+	dma_sync_single_for_cpu(mchan->mdma->dma_slave.dev,
+				mchan->dma_addr,
+				PAGE_SIZE, DMA_BIDIRECTIONAL);
+	width = check_widths(mchan->mdma, buf_addr);
+
+	do {
+		next_list += sizeof(struct img_dma_mdc_list);
+
+		dma_desc->gen_conf = 0xB00000AA /* 2 byte width */
+			| ((mchan->a_chan_nr & 0x3f) << 20);
+
+		dma_desc->readport_conf = 0x00000002;
+		MDC_SET_FIELD(dma_desc->readport_conf, MDC_PRIORITY,
+			      mchan->priority);
+
+		if (direction == DMA_MEM_TO_DEV) {
+			dma_desc->gen_conf |= _MDC_INC_R_MASK;
+			dma_desc->read_addr = buf_addr;
+			dma_desc->write_addr = mchan->dma_config.dst_addr;
+			dma_desc->readport_conf |=
+				(mchan->dma_config.dst_maxburst
+				 & 0xFF) << 4;
+		} else {
+			dma_desc->gen_conf |= _MDC_INC_W_MASK;
+			dma_desc->read_addr = mchan->dma_config.src_addr;
+			dma_desc->write_addr = buf_addr;
+			dma_desc->readport_conf |=
+				(mchan->dma_config.src_maxburst
+				 & 0xFF) << 4;
+		}
+		dma_desc->xfer_size = period_len - 1;
+		dma_desc->node_addr = next_list;
+		dma_desc->cmds_done = 0;
+		dma_desc->ctrl_status = 0x11;
+
+		if (period_len > buf_len)
+			period_len = buf_len;
+
+		dma_desc++;
+		buf_addr += period_len;
+		mdesc->total_samples++;
+	} while (buf_len -= period_len);
+	/* Point back to the first item so we can get an infinite loop */
+	dma_desc[-1].node_addr = list_base;
+
+
+	/* we are done with the DMA buffer, give it back to the device */
+	dma_sync_single_for_device(mchan->mdma->dma_slave.dev,
+				   mchan->dma_addr,
+				   PAGE_SIZE,
+				   DMA_BIDIRECTIONAL);
+
+	return &mdesc->txd;
+}
+
+/*
+ * mdc_prep_slave_sg: Prepare a descriptor for sg transfer
+ * @chan: DMA channel
+ * @sgl: Scattergather list to transfer
+ * @sg_len: Size of the scattergather list
+ * @direction: Transfer direction
+ * @flags: Transfer flags
+ * @context: context
+ */
+static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct mdc_dmadev *mdma = mchan->mdma;
+	struct mdc_dma_desc *mdesc = NULL;
+	struct device *dev = chan->device->dev;
+	struct scatterlist *sg;
+	struct img_dma_mdc_list *desc_list;
+	dma_addr_t list_base, next_list, addr;
+	int i, width, temp, burst_size_min, burst_size, req_width;
+	u32 len;
+
+	if (unlikely(!sg_len || !sgl || !mchan))
+		return NULL;
+
+
+	mdc_prep_irq_status(mchan, flags);
+
+	mdesc = mdc_dma_get_desc(mchan, flags);
+
+	if (!mdesc)
+		return NULL;
+
+	mchan->is_list = true;
+	mchan->sg = true;
+	mdesc->sample_count = 0;
+	mdesc->sample_size = 1; /* single list item */
+	mdesc->total_samples = mdesc->buffer_size = sg_len;
+
+	parse_dma_chan_flags(chan);
+
+	dev_vdbg(dev, "DMA xfer setup:\n"
+		 "Peripheral dev  : %d\n"
+		 "DMA channel     : %d\n"
+		 "sg list         : 0x%p\n"
+		 "sg list length  : %d\n"
+		 "Direction       : %d\n"
+		 "Priority        : %d\n"
+		 "Thread          : %d\n"
+		 "Access Delay    : %d\n"
+		 "DMA Buffer      : 0x%p\n"
+		 "DMA Buffer(bus) : 0x%08llx\n",
+		 mchan->periph, mchan->a_chan_nr, sgl,
+		 sg_len, direction,
+		 mchan->priority, mchan->thread,
+		 mchan->access_delay,
+		 (u64 *)mchan->virt_addr,
+		 (u64)mchan->dma_addr);
+
+	img_dma_reset(mchan);
+
+	/* This is for the MDC linked-list */
+	desc_list = (struct img_dma_mdc_list *)mchan->virt_addr;
+	mdesc->start_list = list_base = next_list = mchan->dma_addr;
+
+	/* Hand back the DMA buffer to the CPU */
+	dma_sync_single_for_cpu(mchan->mdma->dma_slave.dev,
+				mchan->dma_addr,
+				PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+	burst_size_min = burst_size_lookup[mdma->config.bus_width & 0x7];
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		/*
+		 * Each list item is a 32-byte packet represented by the
+		 * img_dma_mdc_list struct. Every member of that struct
+		 * corresponds to the channel register
+		 */
+		next_list += sizeof(struct img_dma_mdc_list);
+		len = sg_dma_len(sg);
+		addr = sg_dma_address(sg);
+		width = check_widths(mchan->mdma, addr);
+		desc_list->gen_conf = 0x30000088
+			| ((mchan->a_chan_nr & 0x3f) << 20)
+			| ((mchan->access_delay & 0x7) << 16);
+
+		temp = (mchan->thread & 0xf);
+		desc_list->readport_conf = 0x00000002 | temp << 2
+			| temp << 24 | temp << 16;
+
+		MDC_SET_FIELD(desc_list->readport_conf, MDC_PRIORITY,
+			      mchan->priority);
+
+		if (direction == DMA_MEM_TO_DEV) {
+			MDC_SET_FIELD(desc_list->gen_conf, MDC_INC_R, 1);
+			MDC_SET_FIELD(desc_list->gen_conf, MDC_WIDTH_R, width);
+			req_width = mchan->dma_config.dst_addr_width;
+			MDC_SET_FIELD(desc_list->gen_conf, MDC_WIDTH_W,
+				      map_to_mdc_width(req_width));
+			desc_list->read_addr = addr;
+			desc_list->write_addr = mchan->dma_config.dst_addr;
+			burst_size = mchan->dma_config.dst_maxburst;
+			desc_list->readport_conf |=
+				(burst_size < burst_size_min)
+				? (burst_size_min - 1) << 4
+				: (burst_size - 1) << 4;
+		} else {
+			MDC_SET_FIELD(desc_list->gen_conf, MDC_INC_W, 1);
+			MDC_SET_FIELD(desc_list->gen_conf, MDC_WIDTH_W, width);
+			req_width = mchan->dma_config.src_addr_width;
+			MDC_SET_FIELD(desc_list->gen_conf, MDC_WIDTH_R,
+				      map_to_mdc_width(req_width));
+			desc_list->read_addr = mchan->dma_config.src_addr;
+			desc_list->write_addr = addr;
+			burst_size = mchan->dma_config.src_maxburst;
+			desc_list->readport_conf |=
+				(burst_size < burst_size_min)
+				? (burst_size_min - 1) << 4
+				: (burst_size - 1) << 4;
+		}
+
+		desc_list->xfer_size = len - 1;
+		desc_list->node_addr = next_list;
+		desc_list->cmds_done = 0;
+		desc_list->ctrl_status = 0x11;
+
+		desc_list++;
+	}
+
+	desc_list[-1].node_addr = 0;
+
+	/* we are done with the DMA buffer, give it back to the device */
+	dma_sync_single_for_device(mchan->mdma->dma_slave.dev,
+				   mchan->dma_addr,
+				   PAGE_SIZE,
+				   DMA_BIDIRECTIONAL);
+
+	return &mdesc->txd;
+}
+
+/*
+ * mdc_alloc_chan_resources: Allocate resources for an MDC channel
+ * @chan: The MDC channel
+ */
+static int mdc_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct mdc_dmadev *mdma = mchan->mdma;
+	struct device *dev = chan->device->dev;
+	int total_threads = mdma->config.dma_threads;
+	int ret;
+
+	mchan->cyclic = false;
+	mchan->sg = false;
+	mchan->is_list = false;
+	mchan->finished = false;
+	mchan->irq_en = 1;
+	mchan->priority = 0; /* Assume bulk priority */
+	mchan->access_delay = 0; /* Assume fast peripheral */
+	mchan->skip_callback = false;
+	/* Clear private data from previous allocations */
+	mchan->dchan.private = NULL;
+	/* Defaults to slow threads */
+	spin_lock(&mdma->lock);
+	mdma->last_sthread = ++mdma->last_sthread % (total_threads / 2);
+	mchan->thread = (total_threads % 2) + mdma->last_sthread;
+	spin_unlock(&mdma->lock);
+
+	BUG_ON(!mdma->callbacks->allocate);
+
+	ret = mdma->callbacks->allocate(mchan->a_chan_nr, mchan->periph);
+
+	if (ret < 0) {
+		dev_err(dev,
+			"Failed to allocate channel %d for device %u with err=%d\n",
+			mchan->a_chan_nr, mchan->periph, ret);
+		return ret;
+	}
+
+	mchan->alloc_status = IMG_DMA_CHANNEL_INUSE;
+
+	dma_cookie_init(&mchan->dchan);
+
+	dev_dbg(dev, "DMA channel %d allocated for peripheral device %u\n",
+		 mchan->a_chan_nr, mchan->periph);
+
+	/* Reset channel before we request an IRQ for it */
+
+	img_dma_reset(mchan);
+
+	/* Allocate an IRQ for this channel */
+
+	mchan->irq = platform_get_irq(to_platform_device(dev),
+				      mchan->a_chan_nr);
+
+	ret = request_irq(mchan->irq, mdc_handler_isr, 0,
+			  mchan->name, mchan);
+
+	if (ret) {
+		dev_err(dev,
+			"Failed to allocate IRQ %d for channel %d\n",
+			mchan->irq, mchan->a_chan_nr);
+		return -ENXIO;
+	}
+
+	dev_dbg(dev,
+		"IRQ %d (%s) allocated for channel %d\n",
+		mchan->irq, mchan->name, mchan->a_chan_nr);
+
+	/*
+	 * We need to allocate a DMA buffer for the MDC linked-list
+	 * operation
+	 */
+	mchan->virt_addr = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!mchan->virt_addr) {
+		dev_err(mchan->mdma->dma_slave.dev,
+			"Failed to allocate memory for channel %d\n",
+			mchan->a_chan_nr);
+		ret = -ENOMEM;
+		goto free_irq;
+	}
+	/*
+	 * Since we don't know the direction yet, map using
+	 * DMA_BIDRECTIONAL to cover both cases
+	 */
+	mchan->dma_addr = dma_map_single(mchan->mdma->dma_slave.dev,
+					     mchan->virt_addr,
+					     PAGE_SIZE,
+					     DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(dev, mchan->dma_addr))
+		goto free_buf;
+
+	return 0;
+
+free_irq:
+	free_irq(mchan->irq, mchan);
+free_buf:
+	kfree(mchan->virt_addr);
+
+	return ret;
+}
+
+/*
+ * mdc_free_chan_resources: Free resources for an MDC channel
+ * @chan: The MDC DMA channel
+ */
+static void mdc_free_chan_resources(struct dma_chan *chan)
+{
+	struct mdc_chan *dchan = to_mdc_chan(chan);
+	struct mdc_dmadev *mdma = dchan->mdma;
+	struct device *dev = chan->device->dev;
+	int ret;
+
+	/* Stop transfers and remove descriptors */
+	mdc_terminate_all(chan);
+
+	BUG_ON(!mdma->callbacks->free);
+
+	ret = mdma->callbacks->free(dchan->a_chan_nr);
+	if (ret < 0) {
+		dev_err(dev,
+			"Failed to unregister channel %d for device %u\n",
+			dchan->a_chan_nr, dchan->periph);
+	} else {
+		dchan->alloc_status = IMG_DMA_CHANNEL_AVAILABLE;
+		free_irq(dchan->irq, dchan);
+
+		dma_unmap_single(dchan->mdma->dma_slave.dev,
+				 dchan->dma_addr, PAGE_SIZE,
+				 DMA_BIDIRECTIONAL);
+		kfree(dchan->virt_addr);
+
+		dchan->periph = 0;
+
+		dev_vdbg(dev,
+			 "DMA channel %d for device %u deallocated\n",
+			 dchan->a_chan_nr, dchan->periph);
+	}
+}
+
+/*
+ * slave_check_width: Check the slave bus width or default to a good one
+ * @chan: The MDC DMA channel
+ * @req_width: Requested width for transfer
+ */
+static int slave_check_width(struct mdc_chan *chan, int req_width)
+{
+	if (chan->mdma->config.bus_width < req_width) {
+		dev_err(chan->mdma->dma_slave.dev,
+			"Invalid transfer width\n"
+			"System    : %d\n"
+			"Requested : %d\n",
+			chan->mdma->config.bus_width,
+			req_width);
+		return chan->mdma->config.bus_width;
+	} else {
+		return req_width;
+	}
+}
+
+/*
+ * mdc_tx_status: Get DMA status for a given cookie
+ * @chan: The MDC DMA channel
+ * @cookie: Transfer cookie
+ * @txstate: Struct containing the cookie status
+ */
+static enum dma_status mdc_tx_status(struct dma_chan *chan,
+			 dma_cookie_t cookie,
+			 struct dma_tx_state *txstate)
+{
+
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	int dma_status, ret;
+	int dma_retry = 0;
+	int total_xfered, residue;
+	unsigned long flags;
+	struct mdc_dma_desc *desc, *safe;
+	struct list_head *root = &mchan->active_desc;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	if (ret == DMA_SUCCESS) {
+		dma_set_residue(txstate, 0);
+		return ret;
+	}
+
+	if (!mchan->irq_en)
+		mchan->finished = true;
+
+	if (!mchan->cyclic && mchan->finished) {
+		do {
+			dma_status = MDC_REG_IS_BUSY((unsigned long)
+						     mchan->mdma->base_addr,
+						     mchan->a_chan_nr);
+			if (!dma_status)
+				break;
+			if (++dma_retry > MAX_MDC_DMA_BUSY_RETRY)
+				return DMA_IN_PROGRESS;
+		} while (1);
+	}
+
+	if (mchan->finished) {
+		/*
+		 * For cyclic or disabled irqs, we will
+		 * look in the active list
+		 */
+		root = (mchan->irq_en && !mchan->cyclic) ?
+			&mchan->free_desc : root;
+		dma_status = DMA_SUCCESS;
+	} else {
+		dma_status = DMA_IN_PROGRESS;
+	}
+
+	spin_lock_irqsave(&mchan->lock, flags);
+	list_for_each_entry_safe(desc, safe, root, node) {
+		if (desc->txd.cookie == cookie) {
+			if (mchan->finished) {
+				dma_set_residue(txstate, 0);
+				if (!mchan->cyclic)
+					dma_cookie_complete(&desc->txd);
+				/*
+				 * If IRQ is disabled, we need to move the
+				 * descriptor to the free list and ack it now
+				 */
+				if (!mchan->irq_en) {
+					async_tx_ack(&desc->txd);
+					list_move_tail(&desc->node,
+						       &mchan->free_desc);
+				}
+			} else {
+				total_xfered = desc->sample_count *
+					desc->sample_size;
+				residue = desc->buffer_size - total_xfered;
+				dma_set_residue(txstate, residue);
+			}
+		}
+	}
+
+	/* Reset status */
+	mchan->finished = false;
+
+	spin_unlock_irqrestore(&mchan->lock, flags);
+
+	return dma_status;
+}
+
+/*
+ * mdc_slave_config: Configure slave config for a DMA channel
+ * @mchan: The MDC DMA channel
+ * @config: The slave configuration passed by the caller
+ */
+static int mdc_slave_config(struct mdc_chan *mchan,
+			    struct dma_slave_config *config)
+{
+
+	if (config->direction == DMA_MEM_TO_DEV) {
+		config->dst_addr_width = slave_check_width(mchan,
+						   config->dst_addr_width);
+	} else if (config->direction == DMA_DEV_TO_MEM) {
+		config->src_addr_width = slave_check_width(mchan,
+						   config->src_addr_width);
+	} else {
+		dev_err(mchan->mdma->dma_slave.dev,
+			"Unsupported slave direction\n");
+		/*
+		 * The caller needs to be fixed
+		 */
+		BUG();
+		return -1;
+	}
+
+	/* Copy the rest of the slave config */
+	memcpy(&mchan->dma_config, config, sizeof(*config));
+
+	return 0;
+}
+
+/*
+ * mdc_terminate_all: Stop transfers and free lists
+ * @chan: The MDC DMA channel
+ */
+static int mdc_terminate_all(struct dma_chan *chan)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct mdc_dma_desc *desc, *safe;
+	unsigned long flags;
+
+	/* Remove all descriptors */
+	spin_lock_irqsave(&mchan->lock, flags);
+
+	MDC_CANCEL((unsigned long)mchan->mdma->base_addr,
+			   mchan->a_chan_nr);
+	wmb();
+
+	/* Safe removal of list items */
+	list_for_each_entry_safe(desc, safe, &mchan->free_desc, node) {
+		list_del(&desc->node);
+		kfree(desc);
+	}
+	/* Safe removal of list items */
+	list_for_each_entry_safe(desc, safe, &mchan->active_desc, node) {
+		list_del(&desc->node);
+		kfree(desc);
+	}
+
+	/* Reset cookie for this channel */
+	dma_cookie_init(chan);
+
+	spin_unlock_irqrestore(&mchan->lock, flags);
+
+	return 0;
+}
+
+/*
+ * mdc_control: Control cmds for the DMA channel
+ * @chan: DMA channel
+ * @cmd: Command (as passed by the dmaengine infrastracture)
+ * @arg: Opaque data. Can be anything depending on the cmd argument
+ */
+static int mdc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+		       unsigned long arg)
+{
+	int ret = 0;
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct dma_slave_config *config = NULL;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		return mdc_terminate_all(chan);
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = mdc_slave_config(mchan, config);
+		break;
+	default:
+	case DMA_PAUSE:
+	case DMA_RESUME:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+/*
+ * mdc_issue_pending: Make the actual transfer
+ * @chan: The MDC DMA channel
+ */
+static void mdc_issue_pending(struct dma_chan *chan)
+{
+	struct mdc_chan *mchan = to_mdc_chan(chan);
+	struct mdc_dma_desc *desc = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mchan->lock, flags);
+
+	/* Make sure the xfer list is not empty */
+	if (list_empty(&mchan->active_desc)) {
+		spin_unlock_irqrestore(&mchan->lock, flags);
+		return;
+	}
+
+	/* Fetch first descriptor */
+	desc = list_first_entry(&mchan->active_desc,
+				typeof(*desc), node);
+
+	spin_unlock_irqrestore(&mchan->lock, flags);
+
+	/* Make the transfer */
+	if (mchan->is_list) {
+		MDC_RSET_LIST_NODE_ADDR((unsigned long)mchan->mdma->base_addr,
+					mchan->a_chan_nr, desc->start_list);
+		wmb();
+		MDC_LIST_ENABLE((unsigned long)mchan->mdma->base_addr,
+				mchan->a_chan_nr);
+		dev_dbg(mchan->mdma->dma_slave.dev,
+			"Starting list transfer for channel %d\n",
+			mchan->a_chan_nr);
+	} else { /* Single shot */
+		MDC_REG_ENABLE((unsigned long)mchan->mdma->base_addr,
+		       mchan->a_chan_nr);
+		dev_dbg(mchan->mdma->dma_slave.dev,
+			"Starting single transfer for channel %d\n",
+			mchan->a_chan_nr);
+
+	}
+}
+
+/*
+ * mdc_dma_init: Initialize the dma_device structure.
+ * @dma: The dma_device structure to initialize.
+ * @dev: Device where the 'dma' structure belongs to.
+ */
+static void mdc_dma_init(struct mdc_dmadev *mdma, struct device *dev)
+{
+	mdma->dma_slave.chancnt = MAX_MDC_DMA_CHANNELS;
+	mdma->dma_slave.device_prep_slave_sg = mdc_prep_slave_sg;
+	mdma->dma_slave.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
+	mdma->dma_slave.device_prep_dma_memcpy = mdc_prep_memcpy;
+	mdma->dma_slave.device_alloc_chan_resources = mdc_alloc_chan_resources;
+	mdma->dma_slave.device_free_chan_resources = mdc_free_chan_resources;
+	mdma->dma_slave.device_tx_status = mdc_tx_status;
+	mdma->dma_slave.device_issue_pending = mdc_issue_pending;
+	mdma->dma_slave.device_control = mdc_control;
+	mdma->dma_slave.dev = dev;
+
+	INIT_LIST_HEAD(&mdma->dma_slave.channels);
+}
+
+/*
+ * mdc_get_current_config: Get current DMA configuration.
+ * @mdma: MDC DMA device
+ */
+static int __init mdc_get_current_config(struct mdc_dmadev *mdma)
+{
+	unsigned long mdc_base_address = (unsigned long)mdma->base_addr;
+	mdma->config.dma_channels = _MDC_READ_GLOBAL_REG_FIELD(mdc_base_address,
+							MDC_NUM_CONTEXTS);
+	mdma->config.dma_threads = 1 << _MDC_READ_GLOBAL_REG_FIELD(
+							mdc_base_address,
+							MDC_THREADID_WIDTH);
+	mdma->config.bus_width = _MDC_READ_GLOBAL_REG_FIELD(mdc_base_address,
+							MDC_SYS_DATA_WIDTH);
+
+	if (!(mdma->config.bus_width || mdma->config.dma_channels))
+		return -1;
+
+	mdma->last_fthread = mdma->config.dma_threads / 2;
+	mdma->last_sthread = mdma->config.dma_threads % 2;
+
+	return 0;
+}
+
+/*
+ * mdc_chan_init: Initialize all DMA channels
+ * @mdma: MDC DMA device
+ * @mchan: Array of DMA channels for this device
+ */
+static void __init mdc_chan_init(struct mdc_dmadev *mdma,
+				 struct mdc_chan *mchan)
+{
+	int i;
+	for (i = 0; i < MAX_MDC_DMA_CHANNELS; i++) {
+		struct mdc_chan *mdc_chan = &mchan[i];
+		mdc_chan->mdma = mdma;
+		mdc_chan->dchan.device = &mdma->dma_slave;
+		mdc_chan->a_chan_nr = i;
+		mdc_chan->periph = 0;
+		if (i < mdma->config.dma_channels)
+			mdma->slave_channel[i].alloc_status =
+				IMG_DMA_CHANNEL_AVAILABLE;
+		else
+			mdma->slave_channel[i].alloc_status =
+				IMG_DMA_CHANNEL_RESERVED;
+		snprintf(mdc_chan->name, sizeof(mdc_chan->name), "mdc-chan-%d",
+			mdc_chan->a_chan_nr);
+		/* init tasklet for this channel */
+		tasklet_init(&mdc_chan->tasklet, mdc_dma_tasklet,
+			     (unsigned long)mdc_chan);
+
+		/* init the list of descriptors for this channel */
+		INIT_LIST_HEAD(&mdc_chan->active_desc);
+		INIT_LIST_HEAD(&mdc_chan->free_desc);
+
+		/* Add channel to the DMA channel linked-list */
+		list_add_tail(&mdc_chan->dchan.device_node,
+			      &mdma->dma_slave.channels);
+	}
+}
+
+int mdc_dma_probe(struct platform_device *pdev,
+		  const struct img_mdc_soc_callbacks *callbacks)
+{
+	struct device *dev = &pdev->dev;
+	struct mdc_dmadev *mdma;
+	struct resource *mem_resource;
+	int status = 0;
+
+	/* Are we using a wrapper to initialize this driver? */
+	spin_lock(&mdc_dma_lock);
+	if (pdev->dev.driver != &img_mdc_dma_driver.driver)
+		wrapper_driver = pdev->dev.driver;
+	spin_unlock(&mdc_dma_lock);
+
+	if (!pdev->dev.of_node)
+		return -ENOENT;
+
+	mdma = devm_kzalloc(dev, sizeof(*mdma), GFP_KERNEL);
+	if (!mdma) {
+		dev_err(dev, "Can't allocate controller\n");
+		return -ENOMEM;
+	}
+
+	mem_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	mdma->base_addr = devm_request_and_ioremap(dev, mem_resource);
+	if (!mdma->base_addr) {
+		dev_err(dev, "unable to ioremap registers\n");
+		status = -ENOMEM;
+		goto out;
+	}
+
+	/*
+	 * Set DMA controller capabilities.
+	 * The controller can do DEV <-> MEM and MEM <-> MEM transfers.
+	 */
+	dma_cap_zero(mdma->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdma->dma_slave.cap_mask);
+	dma_cap_set(DMA_MEMCPY, mdma->dma_slave.cap_mask);
+	dma_cap_set(DMA_CYCLIC, mdma->dma_slave.cap_mask);
+
+	/* Set callbacks */
+	mdc_dma_init(mdma, dev);
+
+	/*
+	 * Set SoC callbacks. It's very unlikely
+	 * for the driver to work without SoC specific
+	 * alloc/free callbacks
+	 */
+	if (wrapper_driver)
+		BUG_ON(!callbacks);
+
+	mdma->callbacks = callbacks;
+
+	/* Get configuration */
+	if (mdc_get_current_config(mdma)) {
+		status = -EINVAL;
+		goto out;
+	}
+
+	/* Initialize channels */
+	mdc_chan_init(mdma, mdma->slave_channel);
+
+	/* Register the device */
+	status = dma_async_device_register(&mdma->dma_slave);
+
+	if (status)
+		goto out;
+
+	platform_set_drvdata(pdev, mdma);
+
+	dev_dbg(dev, "MDC DMA hardware supports %d channels and %d threads\n",
+		mdma->config.dma_channels,
+		mdma->config.dma_threads);
+
+	status = of_dma_controller_register(pdev->dev.of_node,
+					    of_dma_mdc_xlate, mdma);
+
+	return 0;
+
+out:
+	kfree(mdma);
+	return status;
+}
+EXPORT_SYMBOL_GPL(mdc_dma_probe);
+
+static int mdc_probe(struct platform_device *pdev)
+{
+	return mdc_dma_probe(pdev, NULL);
+}
+
+/* stop hardware and remove the driver */
+static int mdc_remove(struct platform_device *pdev)
+{
+	platform_device_unregister(pdev);
+	return 0;
+}
+
+static const struct of_device_id mdc_dma_id[] = {
+		{ .compatible = "img,mdc-dma" },
+		{},
+};
+MODULE_DEVICE_TABLE(of, mdc_dma_id);
+
+#ifdef CONFIG_PM_SLEEP
+
+static int img_mdc_dma_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct mdc_dmadev *mdma = platform_get_drvdata(pdev);
+
+	if (mdma->callbacks->suspend)
+		mdma->pm_data = mdma->callbacks->suspend();
+
+	return 0;
+}
+
+static int img_mdc_dma_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct mdc_dmadev *mdma = platform_get_drvdata(pdev);
+
+	if (mdma->callbacks->resume)
+		mdma->callbacks->resume(mdma->pm_data);
+	return 0;
+}
+#else
+#define img_mdc_dma_suspend NULL
+#define img_mdc_dma_resume NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops img_mdc_dma_pm_ops = {
+	.suspend_noirq = img_mdc_dma_suspend,
+	.resume_noirq = img_mdc_dma_resume,
+};
+
+static struct platform_driver img_mdc_dma_driver = {
+	.driver	= {
+		.name	= "img-mdc-dma",
+		.owner	= THIS_MODULE,
+		.pm	= &img_mdc_dma_pm_ops,
+		.of_match_table = mdc_dma_id,
+	},
+	.remove		= mdc_remove,
+};
+
+static int __init mdc_init(void)
+{
+	return platform_driver_probe(&img_mdc_dma_driver, mdc_probe);
+}
+subsys_initcall(mdc_init);
+
+
+static void mdc_exit(void)
+{
+	platform_driver_unregister(&img_mdc_dma_driver);
+}
+module_exit(mdc_exit);
+
+MODULE_ALIAS("img-mdc-dma");	/* for platform bus hotplug */
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Imagination Technologies LTD.");
+MODULE_DESCRIPTION("IMG - MDC DMA Controller");
diff --git a/drivers/dma/img_mdc_tests.c b/drivers/dma/img_mdc_tests.c
new file mode 100644
index 0000000..5deb725
--- /dev/null
+++ b/drivers/dma/img_mdc_tests.c
@@ -0,0 +1,269 @@
+/*
+ * img_mdc_tests.c
+ *
+ *  Created on: 15-Jul-2009
+ *  Modified on: 02-Apr-2013
+ *      Author: neil jones, markos chandras
+ *
+ *  Module to test the simple mdc_dma driver using the DMA Engine API.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/img_mdc_dma.h>
+
+MODULE_LICENSE("GPL");
+
+u8 *buffer1;
+u8 *buffer2;
+static dma_addr_t hw_address1;
+static dma_addr_t hw_address2;
+
+static int do_single_shot(int chan, u32 src, u32 dest, u32 size,
+		enum img_dma_priority prio)
+{
+	int channel;
+	struct dma_chan *dchan;
+	struct mdc_dma_cookie cookie;
+	dma_cap_mask_t mask;
+	unsigned long delay;
+	struct dma_device *dev;
+	struct dma_async_tx_descriptor *tx;
+	dma_cookie_t dma_cookie;
+	struct mdc_dma_tx_control tx_control;
+
+	tx_control.flags = MDC_PRIORITY|MDC_NEED_THREAD;
+	tx_control.thread_type = MDC_THREAD_FAST;
+	tx_control.prio = IMG_DMA_PRIO_BULK;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	cookie.periph = 0;
+	cookie.req_channel = chan;
+
+	dchan = dma_request_channel(mask, &mdc_dma_filter_fn, &cookie);
+
+	if (!dchan) {
+		pr_err("Failed to allocate channel %d\n",
+		       cookie.req_channel);
+		return -EBUSY;
+	}
+
+	channel = cookie.req_channel;
+
+	dev = dchan->device;
+
+	/* We store the desired priority in channel's private struct */
+
+	dchan->private = (void *)&tx_control;
+	/*
+	 * Never pass DMA_PREP_INTERRUPT to flags
+	 * Use the dmatest module to test this
+	 * codepath in the MDC driver.
+	 */
+	tx = dev->device_prep_dma_memcpy(dchan,
+					 dest, src,
+					 size,
+					 DMA_CTRL_ACK);
+	dma_cookie = dmaengine_submit(tx);
+	/* Make the transfer */
+	dma_async_issue_pending(dchan);
+
+	/*wait a little*/
+	udelay(100);
+
+	/*check for completion*/
+	delay = jiffies + size * HZ / 1000;
+	/*allow 1 ms per byte anything slower than this and
+	 you might as well move the data a bit at a time by hand!!*/
+	while (time_before(jiffies, delay) &&
+	       (dma_async_is_tx_complete(dchan, dma_cookie, NULL, NULL) ==
+		DMA_IN_PROGRESS))
+		cpu_relax();
+
+	if (dma_async_is_tx_complete(dchan, dma_cookie, NULL, NULL) ==
+	    DMA_IN_PROGRESS)
+		pr_err("mdc_tests: Single shot operation timed out\n");
+
+	dma_release_channel(dchan);
+
+	return 0;
+}
+
+#define TEST_BUFF_SIZE 0x2000
+
+struct test_data_tag {
+	u32 src_addr_offset;
+	u32 dest_addr_offset;
+	u32 size;
+	u8 fill_patten;
+	const char *test_name;
+};
+
+static const struct test_data_tag test_data[] = {
+{ 0x00, 0x01, 1, 0x55,
+		"single byte from word aligned addr to byte aligned addr" },
+{ 0x00, 0x02, 1, 0xA5,
+		"single byte from word aligned addr to short aligned addr" },
+{ 0x00, 0x04, 1, 0x5A,
+		"single byte from word aligned addr to word aligned addr" },
+{ 0x00, 0x08, 1, 0x11,
+		"single byte from word aligned addr to double aligned addr" },
+{ 0x01, 0x00, 1, 0x12,
+		"single byte from byte aligned addr to aligned addr" },
+{ 0x02, 0x00, 1, 0x13,
+		"single byte from short aligned addr to aligned addr" },
+{ 0x00, 0x01, 2, 0x14,
+		"2 bytes from word aligned addr to byte aligned addr" },
+{ 0x00, 0x02, 2, 0x15,
+		"2 bytes from word aligned addr to short aligned addr" },
+{ 0x00, 0x04, 2, 0x16,
+		"2 bytes from word aligned addr to word aligned addr" },
+{ 0x01, 0x00, 2, 0x17,
+		"2 bytes from byte aligned addr to word  aligned addr" },
+{ 0x02, 0x00, 2, 0x18,
+		"2 bytes from short aligned addr to word aligned addr" },
+{ 0x00, 0x01, 4, 0x19,
+		"4 bytes from word aligned addr to byte aligned addr" },
+{ 0x00, 0x02, 4, 0x1A,
+		"4 bytes from word aligned addr to short aligned addr" },
+{ 0x00, 0x04, 4, 0x1B,
+		"4 bytes from word aligned addr to word aligned addr" },
+{ 0x01, 0x00, 4, 0x1C,
+		"4 bytes from byte aligned addr to word  aligned addr" },
+{ 0x02, 0x00, 4, 0x1D,
+		"4 bytes from short aligned addr to word aligned addr" },
+{ 0x00, 0x100, 0x100, 0x1E,
+		"0x100 byte block, from/to aligned address" },
+{ 0x00, 0x101, 0x100, 0x1F,
+		"0x100 byte block, to odd address" },
+{ 0x01, 0x100, 0x100, 0x20,
+		"0x100 byte block, from odd address" },
+{ 0x00, 0x00, 0x1000, 0xDA, "big block" } };
+
+static const int test_data_size = ARRAY_SIZE(test_data);
+
+static void single_shot_address_range_tests(
+		const struct test_data_tag *test_data, const int no_of_tests)
+{
+	int i;
+	int passed = 0;
+
+	pr_debug("mdc_tests: Running %d single shot address range tests\n",
+		 no_of_tests);
+	for (i = 0; i < no_of_tests; i++) {
+		/*fill the source buffer with test pattern*/
+		memset((u8 *) (buffer1 + test_data[i].src_addr_offset),
+				test_data[i].fill_patten, test_data[i].size);
+		/*clear destination buffer*/
+		memset((u8 *) (buffer2 + test_data[i].dest_addr_offset), 0x00,
+				test_data[i].size);
+
+		/*do the access*/
+		if (!do_single_shot(-1,
+				    hw_address1 + test_data[i].src_addr_offset,
+				hw_address2 + test_data[i].dest_addr_offset,
+				test_data[i].size, IMG_DMA_PRIO_BULK)) {
+
+			/*test the result*/
+			if (memcmp((u8 *) buffer1 +
+				   test_data[i].src_addr_offset,
+				   (u8 *) buffer2 +
+				   test_data[i].dest_addr_offset,
+				   test_data[i].size)) {
+				pr_err("mdc_tests: Failing Test: %s\n",
+				       test_data[i].test_name);
+			} else
+				passed++;
+		}
+	}
+	if (passed == no_of_tests)
+		pr_info("mdc_tests: All address range tests passed\n");
+
+	else
+		pr_err("mdc_tests: Address range tests: %d of %d tests passed\n",
+		       passed, no_of_tests);
+
+}
+
+static int single_shot_simple_test(int channel, enum img_dma_priority priority)
+{
+	/*fill the source buffer with test pattern*/
+	memset(buffer1, 0xA5, TEST_BUFF_SIZE);
+	/*clear destination buffer*/
+	memset(buffer2, 0x00, TEST_BUFF_SIZE);
+
+	/*do the transfer*/
+	if (!do_single_shot(channel, hw_address1, hw_address2,
+			TEST_BUFF_SIZE, priority))
+		/*test the result*/
+		return memcmp(buffer1, buffer2, TEST_BUFF_SIZE);
+	else
+		return -1;
+}
+
+#define MAX_CHANNELS 7
+#define START_CHANNEL 3 /* First channels are likely to be busy */
+static int __init mdc_tests_init(void)
+{
+	int passed = 0, i;
+
+	buffer1 = dma_alloc_coherent(NULL,
+				TEST_BUFF_SIZE, &hw_address1, GFP_KERNEL);
+	buffer2 = dma_alloc_coherent(NULL,
+				TEST_BUFF_SIZE, &hw_address2, GFP_KERNEL);
+
+	memset(buffer1, 0x00, TEST_BUFF_SIZE);
+	memset(buffer2, 0x00, TEST_BUFF_SIZE);
+
+	single_shot_address_range_tests(test_data, test_data_size);
+
+	for (i = START_CHANNEL; i < MAX_CHANNELS; i++) {
+		if (single_shot_simple_test(i, IMG_DMA_PRIO_BULK))
+			pr_err("mdc_tests: single shot test on channel %d failed\n",
+			       i);
+		else
+			passed++;
+
+	}
+	pr_info("mdc_tests: Per channel tests: %d of %d passed\n",
+		 passed, MAX_CHANNELS - START_CHANNEL);
+
+	passed = 0 ;
+
+	/* Pick a higher channel */
+	if (single_shot_simple_test(5, IMG_DMA_PRIO_BULK))
+		pr_err("mdc_tests: single shot test at bulk priority failed\n");
+	else
+		passed++;
+
+	if (single_shot_simple_test(5, IMG_DMA_PRIO_REALTIME))
+		pr_err("mdc_tests: single shot test at real time priority failed\n");
+	else
+		passed++;
+
+	pr_info("mdc_tests: Per priority tests: %d of 2 tests passed\n",
+		 passed);
+
+	return 0;
+
+}
+module_init(mdc_tests_init)
+
+static void __exit mdc_tests_exit(void)
+{
+	struct device dma_tester;
+	dma_free_coherent(&dma_tester, TEST_BUFF_SIZE,
+			(void *)buffer1, hw_address1);
+	dma_free_coherent(&dma_tester, TEST_BUFF_SIZE,
+			(void *)buffer2, hw_address2);
+}
+module_exit(mdc_tests_exit)
diff --git a/drivers/dma/tz1090-mdc-dma.c b/drivers/dma/tz1090-mdc-dma.c
new file mode 100644
index 0000000..f0a4657
--- /dev/null
+++ b/drivers/dma/tz1090-mdc-dma.c
@@ -0,0 +1,328 @@
+/*
+ * TZ1090 MDC DMA Specific Callbacks
+ *
+ * Copyright (C) 2010,2013 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/img_mdc_dma.h>
+
+#include <asm/global_lock.h>
+#include <asm/soc-tz1090/defs.h>
+
+#define MAX_PERIPH_CHANNELS     14
+#define MAX_DMA_CHANNELS	8
+
+static DEFINE_SPINLOCK(dma_spin_lock);
+
+static unsigned int dma_channels[MAX_DMA_CHANNELS] = {
+#ifdef CONFIG_SOC_COMET_DMA0
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_COMET_DMA1
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_COMET_DMA2
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_COMET_DMA3
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_COMET_DMA4
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_COMET_DMA5
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_COMET_DMA6
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+#ifdef CONFIG_SOC_COMET_DMA7
+	IMG_DMA_CHANNEL_AVAILABLE,
+#else
+	IMG_DMA_CHANNEL_RESERVED,
+#endif
+};
+
+static int get_free_channel(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+		if (dma_channels[i] == IMG_DMA_CHANNEL_AVAILABLE)
+			return i;
+	}
+	return -1;
+}
+
+static void setup_dma_channel(int dmanr, unsigned int periph)
+{
+	int lstat;
+	u32 mux_val;
+
+	__global_lock2(lstat);
+	mux_val = readl(CR_PERIP_DMA_ROUTE_SEL2_REG);
+
+	mux_val &= ~(0xf << (dmanr*4));
+	mux_val |= (periph & 0xf) << (dmanr*4);
+
+	writel(mux_val, CR_PERIP_DMA_ROUTE_SEL2_REG);
+	__global_unlock2(lstat);
+}
+
+
+static int img_request_dma(int dmanr, unsigned int periph)
+{
+	unsigned long flags;
+	int err;
+
+	if (dmanr >= MAX_DMA_CHANNELS)
+		return -EINVAL;
+
+	if (periph > MAX_PERIPH_CHANNELS)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dma_spin_lock, flags);
+
+	/* If dmanr is -1 we pick the DMA channel to use. */
+	if (dmanr == -1) {
+		dmanr = get_free_channel();
+		if (dmanr == -1) {
+			err = -EBUSY;
+			goto out;
+		}
+	}
+
+	if (dma_channels[dmanr] != IMG_DMA_CHANNEL_AVAILABLE) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	dma_channels[dmanr] = IMG_DMA_CHANNEL_INUSE;
+
+	setup_dma_channel(dmanr, periph);
+
+	err = dmanr;
+out:
+	spin_unlock_irqrestore(&dma_spin_lock, flags);
+
+	return err;
+}
+
+static int img_free_dma(int dmanr)
+{
+	unsigned long flags;
+
+	if (dmanr >= MAX_DMA_CHANNELS)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dma_spin_lock, flags);
+
+	if (dma_channels[dmanr] == IMG_DMA_CHANNEL_INUSE)
+		dma_channels[dmanr] = IMG_DMA_CHANNEL_AVAILABLE;
+
+	setup_dma_channel(dmanr, 0);
+
+	spin_unlock_irqrestore(&dma_spin_lock, flags);
+
+	return 0;
+}
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+
+/* suspend state */
+
+void *img_dma_suspend(void)
+{
+	return (void *)readl(CR_PERIP_DMA_ROUTE_SEL2_REG);
+}
+
+void img_dma_resume(void *data)
+{
+	unsigned int lstat, i;
+	u32 mux, mask = 0;
+	u32 dma_channel_mux = (u32)data;
+
+	/* only restore muxes of unreserved channels */
+	for (i = 0; i < MAX_DMA_CHANNELS; ++i)
+		if (dma_channels[i] != IMG_DMA_CHANNEL_RESERVED)
+			mask |= (0xf << (i*4));
+
+	__global_lock2(lstat);
+	mux = readl(CR_PERIP_DMA_ROUTE_SEL2_REG);
+	mux &= ~mask;
+	mux |= dma_channel_mux & mask;
+	writel(mux, CR_PERIP_DMA_ROUTE_SEL2_REG);
+	__global_unlock2(lstat);
+}
+
+#else
+#define img_dma_suspend NULL
+#define img_dma_resume NULL
+#endif	/* CONFIG_METAG_SUSPEND_MEM */
+
+#ifdef CONFIG_PROC_FS
+
+static const char * const periph_names[] = {
+	"Unused",
+	"SDIO Wr",
+	"SDIO Rd",
+	"SPI Master0 Wr",
+	"SPI Master0 Rd",
+	"SPI Slave Wr",
+	"SPI Slave Rd",
+	"SPI Master1 Wr",
+	"SPI Master1 Rd",
+	"I2S Write",
+	"I2S Read",
+	"LCD",
+	"SDHOST Wr",
+	"SDHOST Rd",
+};
+
+static const char * const state_names[] = {
+	"Reserved",
+	"Available",
+	"In Use",
+};
+
+static void proc_dma_show_channels(struct seq_file *m)
+{
+	int i;
+
+	u32 mux_val = readl(CR_PERIP_DMA_ROUTE_SEL2_REG);
+
+	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+		int periph = (mux_val >> (i*4)) & 0xF;
+		if (periph < sizeof(periph_names)) {
+			seq_printf(m, "Channel %2d: %s (%s)\n", i,
+				periph_names[periph],
+				state_names[dma_channels[i]]);
+		} else {
+			seq_printf(m, "Channel %2d: %d (%s)\n", i,
+				periph,
+				state_names[dma_channels[i]]);
+		}
+
+	}
+
+}
+
+static int proc_dma_show(struct seq_file *m, void *v)
+{
+	proc_dma_show_channels(m);
+
+	return 0;
+}
+
+static int proc_dma_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_dma_show, NULL);
+}
+
+static const struct file_operations proc_dma_operations = {
+	.open = proc_dma_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int __init proc_dma_init(void)
+{
+	proc_create("dma", 0, NULL, &proc_dma_operations);
+	return 0;
+}
+
+device_initcall(proc_dma_init);
+#endif
+
+static struct img_mdc_soc_callbacks comet_dma_callbacks = {
+	.allocate = img_request_dma,
+	.free = img_free_dma,
+	.suspend = img_dma_suspend,
+	.resume = img_dma_resume,
+};
+
+static const struct of_device_id tz1090_mdc_dma_match[] = {
+	{ .compatible = "img,tz1090-mdc-dma", .data = &comet_dma_callbacks, },
+	{},
+};
+MODULE_DEVICE_TABLE(of, tz1090_mdc_dma_match);
+
+static int tz1090_mdc_dma_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	const struct img_mdc_soc_callbacks *c;
+	int ret;
+
+	match = of_match_node(tz1090_mdc_dma_match, pdev->dev.of_node);
+	c = match->data;
+
+	ret = mdc_dma_probe(pdev, c);
+
+	dev_info(&pdev->dev,
+		 "tz1090 mdc dma callback driver probed successfully.\n");
+
+	return ret;
+};
+
+static int tz1090_mdc_dma_remove(struct platform_device *pdev)
+{
+	platform_device_unregister(pdev);
+	return 0;
+}
+
+static struct platform_driver tz1090_mdc_dma_pltfm_driver = {
+	.remove		= tz1090_mdc_dma_remove,
+	.driver		= {
+		.name		= "tz1090-mdc-dma",
+		.of_match_table	= tz1090_mdc_dma_match,
+	},
+};
+
+static int __init tz1090_mdc_dma_init(void)
+{
+	return platform_driver_probe(&tz1090_mdc_dma_pltfm_driver,
+				     tz1090_mdc_dma_probe);
+}
+subsys_initcall(tz1090_mdc_dma_init);
+
+static void tz1090_mdc_dma_exit(void)
+{
+	return platform_driver_unregister(&tz1090_mdc_dma_pltfm_driver);
+}
+module_exit(tz1090_mdc_dma_exit);
+
+MODULE_DESCRIPTION("TZ1090 Specific MDC DMA Callbacks");
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("tz1090-mdc-dma");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 573c449..93e2d0b 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -232,6 +232,18 @@
 	  blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
 	  LCD port.
 
+config GPIO_TZ1090
+	def_bool y
+	depends on SOC_TZ1090
+	help
+	  Say yes here to support Toumaz Xenif TZ1090 GPIOs.
+
+config GPIO_TZ1090_PDC
+	def_bool y
+	depends on SOC_TZ1090
+	help
+	  Say yes here to support Toumaz Xenif TZ1090 PDC GPIOs.
+
 config GPIO_XILINX
 	bool "Xilinx GPIO support"
 	depends on PPC_OF || MICROBLAZE
@@ -660,7 +672,7 @@
 comment "AC97 GPIO expanders:"
 
 config GPIO_UCB1400
-	bool "Philips UCB1400 GPIO"
+	tristate "Philips UCB1400 GPIO"
 	depends on UCB1400_CORE
 	help
 	  This enables support for the Philips UCB1400 GPIO pins.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 0cb2d65..44341c7 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -79,6 +79,8 @@
 obj-$(CONFIG_GPIO_TS5500)	+= gpio-ts5500.o
 obj-$(CONFIG_GPIO_TWL4030)	+= gpio-twl4030.o
 obj-$(CONFIG_GPIO_TWL6040)	+= gpio-twl6040.o
+obj-$(CONFIG_GPIO_TZ1090)	+= gpio-tz1090.o
+obj-$(CONFIG_GPIO_TZ1090_PDC)	+= gpio-tz1090-pdc.o
 obj-$(CONFIG_GPIO_UCB1400)	+= gpio-ucb1400.o
 obj-$(CONFIG_GPIO_VIPERBOARD)	+= gpio-viperboard.o
 obj-$(CONFIG_GPIO_VR41XX)	+= gpio-vr41xx.o
diff --git a/drivers/gpio/gpio-tz1090-pdc.c b/drivers/gpio/gpio-tz1090-pdc.c
new file mode 100644
index 0000000..29e9a6d
--- /dev/null
+++ b/drivers/gpio/gpio-tz1090-pdc.c
@@ -0,0 +1,330 @@
+/*
+ * Toumaz Xenif TZ1090 PDC GPIO handling.
+ *
+ * Copyright (C) 2012-2013 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <asm/global_lock.h>
+#include <asm/soc-tz1090/gpio.h>
+
+/* Register offsets from SOC_GPIO_CONTROL0 */
+#define REG_SOC_GPIO_CONTROL0	0x00
+#define REG_SOC_GPIO_CONTROL1	0x04
+#define REG_SOC_GPIO_CONTROL2	0x08
+#define REG_SOC_GPIO_CONTROL3	0x0c
+#define REG_SOC_GPIO_STATUS	0x80
+
+/* out of PDC gpios, only syswakes have irqs */
+#define NR_PDC_GPIO_IRQS	3
+
+/**
+ * struct tz1090_pdc_gpio - GPIO bank private data
+ * @chip:	Generic GPIO chip for GPIO bank
+ * @reg:	Base of registers, offset for this GPIO bank
+ * @irq:	IRQ numbers for Syswake GPIOs
+ *
+ * This is the main private data for the PDC GPIO driver. It encapsulates a
+ * gpio_chip, and the callbacks for the gpio_chip can access the private data
+ * with the to_pdc() macro below.
+ */
+struct tz1090_pdc_gpio {
+	struct gpio_chip chip;
+	void __iomem *reg;
+	int irq[NR_PDC_GPIO_IRQS];
+};
+#define to_pdc(c)	container_of(c, struct tz1090_pdc_gpio, chip)
+
+/* Register accesses into the PDC MMIO area */
+
+static inline void pdc_write(struct tz1090_pdc_gpio *priv, unsigned int reg_offs,
+		      unsigned int data)
+{
+	writel(data, priv->reg + reg_offs);
+}
+
+static inline unsigned int pdc_read(struct tz1090_pdc_gpio *priv,
+			     unsigned int reg_offs)
+{
+	return readl(priv->reg + reg_offs);
+}
+
+/* Generic GPIO interface */
+
+static int tz1090_pdc_gpio_direction_input(struct gpio_chip *chip,
+					   unsigned offset)
+{
+	struct tz1090_pdc_gpio *priv = to_pdc(chip);
+	u32 value;
+	int lstat;
+
+	__global_lock2(lstat);
+	value = pdc_read(priv, REG_SOC_GPIO_CONTROL1);
+	value |= BIT(offset);
+	pdc_write(priv, REG_SOC_GPIO_CONTROL1, value);
+	__global_unlock2(lstat);
+
+	return 0;
+}
+
+static int tz1090_pdc_gpio_direction_output(struct gpio_chip *chip,
+					    unsigned offset, int output_value)
+{
+	struct tz1090_pdc_gpio *priv = to_pdc(chip);
+	u32 value;
+	int lstat;
+
+	__global_lock2(lstat);
+	/* EXT_POWER doesn't seem to have an output value bit */
+	if (offset < 6) {
+		value = pdc_read(priv, REG_SOC_GPIO_CONTROL0);
+		if (output_value)
+			value |= BIT(offset);
+		else
+			value &= ~BIT(offset);
+		pdc_write(priv, REG_SOC_GPIO_CONTROL0, value);
+	}
+
+	value = pdc_read(priv, REG_SOC_GPIO_CONTROL1);
+	value &= ~BIT(offset);
+	pdc_write(priv, REG_SOC_GPIO_CONTROL1, value);
+	__global_unlock2(lstat);
+
+	return 0;
+}
+
+static int tz1090_pdc_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct tz1090_pdc_gpio *priv = to_pdc(chip);
+	return pdc_read(priv, REG_SOC_GPIO_STATUS) & BIT(offset);
+}
+
+static void tz1090_pdc_gpio_set(struct gpio_chip *chip, unsigned offset,
+				int output_value)
+{
+	struct tz1090_pdc_gpio *priv = to_pdc(chip);
+	u32 value;
+	int lstat;
+
+	/* EXT_POWER doesn't seem to have an output value bit */
+	if (offset >= 6)
+		return;
+
+	__global_lock2(lstat);
+	value = pdc_read(priv, REG_SOC_GPIO_CONTROL0);
+	if (output_value)
+		value |= BIT(offset);
+	else
+		value &= ~BIT(offset);
+	pdc_write(priv, REG_SOC_GPIO_CONTROL0, value);
+	__global_unlock2(lstat);
+}
+
+static int tz1090_pdc_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+	return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void tz1090_pdc_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+	pinctrl_free_gpio(chip->base + offset);
+}
+
+static int tz1090_pdc_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	struct tz1090_pdc_gpio *priv = to_pdc(chip);
+	unsigned int syswake = GPIO_PDC_PIN(offset) - GPIO_SYS_WAKE0;
+	int irq;
+
+	/* only syswakes have irqs */
+	if (syswake >= NR_PDC_GPIO_IRQS)
+		return -EINVAL;
+
+	irq = priv->irq[syswake];
+	if (!irq)
+		return -EINVAL;
+
+	return irq;
+}
+
+struct tz1090_pdc_gpio *pdc_gpio;
+
+static int tz1090_pdc_gpio_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *res_regs;
+	struct tz1090_pdc_gpio *priv;
+	unsigned int i;
+
+	if (!np) {
+		dev_err(&pdev->dev, "must be instantiated via devicetree\n");
+		return -ENOENT;
+	}
+
+	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res_regs) {
+		dev_err(&pdev->dev, "cannot find registers resource\n");
+		return -ENOENT;
+	}
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "unable to allocate driver data\n");
+		return -ENOMEM;
+	}
+
+	/* Ioremap the registers */
+	priv->reg = devm_ioremap(&pdev->dev, res_regs->start,
+				 res_regs->end - res_regs->start);
+	if (!priv->reg) {
+		dev_err(&pdev->dev, "unable to ioremap registers\n");
+		return -ENOMEM;
+	}
+
+	pdc_gpio = priv;
+
+	/* Set up GPIO chip */
+	priv->chip.label		= "tz1090-pdc-gpio";
+	priv->chip.dev			= &pdev->dev;
+	priv->chip.direction_input	= tz1090_pdc_gpio_direction_input;
+	priv->chip.direction_output	= tz1090_pdc_gpio_direction_output;
+	priv->chip.get			= tz1090_pdc_gpio_get;
+	priv->chip.set			= tz1090_pdc_gpio_set;
+	priv->chip.free			= tz1090_pdc_gpio_free;
+	priv->chip.request		= tz1090_pdc_gpio_request;
+	priv->chip.to_irq		= tz1090_pdc_gpio_to_irq;
+	priv->chip.of_node		= np;
+
+	/* GPIO numbering */
+	priv->chip.base			= GPIO_PDC_PIN(0);
+	priv->chip.ngpio		= NR_PDC_GPIO;
+
+	/* Map the syswake irqs */
+	for (i = 0; i < NR_PDC_GPIO_IRQS; ++i)
+		priv->irq[i] = irq_of_parse_and_map(np, i);
+
+	/* Add the GPIO bank */
+	gpiochip_add(&priv->chip);
+
+	return 0;
+}
+
+static struct of_device_id tz1090_pdc_gpio_of_match[] = {
+	{ .compatible = "img,tz1090-pdc-gpio" },
+	{ },
+};
+
+static struct platform_driver tz1090_pdc_gpio_driver = {
+	.driver = {
+		.name		= "tz1090-pdc-gpio",
+		.owner		= THIS_MODULE,
+		.of_match_table	= tz1090_pdc_gpio_of_match,
+	},
+	.probe		= tz1090_pdc_gpio_probe,
+};
+
+static int __init tz1090_pdc_gpio_init(void)
+{
+	return platform_driver_register(&tz1090_pdc_gpio_driver);
+}
+postcore_initcall(tz1090_pdc_gpio_init);
+
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+/*
+ * For now we save and restore all GPIO setup.
+ */
+static const u32 tz1090_pdc_gpio_reg_masks[3] = {
+	0x000000ff,	/* SOC_GPIO_CONTROL0 */
+	0x0000007f,	/* SOC_GPIO_CONTROL1 */
+	0x3fff0000,	/* SOC_GPIO_CONTROL2 */
+
+};
+struct tz1090_pdc_gpio_state {
+	u32 values[ARRAY_SIZE(tz1090_pdc_gpio_reg_masks)];
+};
+
+static struct tz1090_pdc_gpio_state *tz1090_pdc_gpio_state;
+
+static int tz1090_pdc_gpio_suspend(void)
+{
+	struct tz1090_pdc_gpio *priv = pdc_gpio;
+	unsigned long flags;
+	unsigned int i;
+	struct tz1090_pdc_gpio_state *state;
+
+	if (!priv)
+		return 0;
+
+	state = kzalloc(sizeof(*state), GFP_ATOMIC);
+	if (!state)
+		return -ENOMEM;
+
+	__global_lock2(flags);
+	for (i = 0; i < ARRAY_SIZE(tz1090_pdc_gpio_reg_masks); ++i)
+		state->values[i] = pdc_read(priv, REG_SOC_GPIO_CONTROL0 + i*4);
+	__global_unlock2(flags);
+
+	tz1090_pdc_gpio_state = state;
+
+	return 0;
+}
+
+static void tz1090_pdc_gpio_resume(void)
+{
+	struct tz1090_pdc_gpio *priv = pdc_gpio;
+	unsigned long flags;
+	unsigned int i;
+	u32 value;
+	struct tz1090_pdc_gpio_state *state = tz1090_pdc_gpio_state;
+
+	if (!priv || !state)
+		return;
+
+	__global_lock2(flags);
+	for (i = 0; i < ARRAY_SIZE(tz1090_pdc_gpio_reg_masks); ++i) {
+		value = pdc_read(priv, REG_SOC_GPIO_CONTROL0 + i*4);
+		value &= ~tz1090_pdc_gpio_reg_masks[i];
+		value |= state->values[i] & tz1090_pdc_gpio_reg_masks[i];
+		pdc_write(priv, REG_SOC_GPIO_CONTROL0 + i*4, value);
+	}
+	__global_unlock2(flags);
+
+	tz1090_pdc_gpio_state = NULL;
+	kfree(state);
+}
+#else
+#define tz1090_pdc_gpio_suspend NULL
+#define tz1090_pdc_gpio_resume NULL
+#endif	/* CONFIG_METAG_SUSPEND_MEM */
+
+struct syscore_ops tz1090_pdc_gpio_syscore_ops = {
+	.suspend = tz1090_pdc_gpio_suspend,
+	.resume = tz1090_pdc_gpio_resume,
+};
+
+static int tz1090_pdc_gpio_syscore_init(void)
+{
+	register_syscore_ops(&tz1090_pdc_gpio_syscore_ops);
+	return 0;
+}
+
+static void tz1090_pdc_gpio_syscore_exit(void)
+{
+	unregister_syscore_ops(&tz1090_pdc_gpio_syscore_ops);
+}
+
+module_init(tz1090_pdc_gpio_syscore_init);
+module_exit(tz1090_pdc_gpio_syscore_exit);
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
new file mode 100644
index 0000000..5c42d61
--- /dev/null
+++ b/drivers/gpio/gpio-tz1090.c
@@ -0,0 +1,974 @@
+/*
+ * Toumaz Xenif TZ1090 GPIO handling.
+ *
+ * Copyright (C) 2008-2013 Imagination Technologies Ltd.
+ *
+ *  Based on ARM PXA code and others.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <asm/global_lock.h>
+#include <asm/soc-tz1090/defs.h>
+#include <asm/soc-tz1090/gpio.h>
+
+/* Register offsets from bank base address */
+#define REG_GPIO_DIR		0x00
+#define REG_GPIO_SELECT		0x10
+#define REG_GPIO_IRQ_PLRT	0x20
+#define REG_GPIO_IRQ_TYPE	0x30
+#define REG_GPIO_IRQ_EN		0x40
+#define REG_GPIO_IRQ_STS	0x50
+#define REG_GPIO_BIT_EN		0x60
+#define REG_GPIO_DIN		0x70
+#define REG_GPIO_DOUT		0x80
+#define REG_GPIO_PU_PD		0xa0
+
+/* REG_GPIO_IRQ_PLRT */
+#define REG_GPIO_IRQ_PLRT_LOW	0
+#define REG_GPIO_IRQ_PLRT_HIGH	1
+
+/* REG_GPIO_IRQ_TYPE */
+#define REG_GPIO_IRQ_TYPE_LEVEL	0
+#define REG_GPIO_IRQ_TYPE_EDGE	1
+
+struct comet_gpio_pullup {
+	unsigned char index;
+	unsigned char offset;
+};
+
+/**
+ * struct tz1090_gpio_bank - GPIO bank private data
+ * @chip:	Generic GPIO chip for GPIO bank
+ * @domain:	IRQ domain for GPIO bank (may be NULL)
+ * @reg:	Base of registers, offset for this GPIO bank
+ * @irq:	IRQ number for GPIO bank
+ * @label:	Debug GPIO bank label, used for storage of chip->label
+ *
+ * This is the main private data for a GPIO bank. It encapsulates a gpio_chip,
+ * and the callbacks for the gpio_chip can access the private data with the
+ * to_bank() macro below.
+ */
+struct tz1090_gpio_bank {
+	struct gpio_chip chip;
+	struct irq_domain *domain;
+	void __iomem *reg;
+	int irq;
+	char label[16];
+};
+#define to_bank(c)	container_of(c, struct tz1090_gpio_bank, chip)
+
+/**
+ * struct tz1090_gpio - Overall GPIO device private data
+ * @dev:	Device (from platform device)
+ * @reg:	Base of GPIO registers
+ *
+ * Represents the overall GPIO device. This structure is actually only
+ * temporary, and used during init.
+ */
+struct tz1090_gpio {
+	struct device *dev;
+	void __iomem *reg;
+};
+
+/**
+ * struct tz1090_gpio_bank_info - Temporary registration info for GPIO bank
+ * @priv:	Overall GPIO device private data
+ * @node:	Device tree node specific to this GPIO bank
+ * @index:	Index of bank in range 0-2
+ */
+struct tz1090_gpio_bank_info {
+	struct tz1090_gpio *priv;
+	struct device_node *node;
+	unsigned int index;
+};
+
+/* Convenience register accessors */
+static inline void tz1090_gpio_write(struct tz1090_gpio_bank *bank,
+			      unsigned int reg_offs, u32 data)
+{
+	iowrite32(data, bank->reg + reg_offs);
+}
+
+static inline u32 tz1090_gpio_read(struct tz1090_gpio_bank *bank,
+			    unsigned int reg_offs)
+{
+	return ioread32(bank->reg + reg_offs);
+}
+
+/* caller must hold LOCK2 */
+static inline void _tz1090_gpio_clear_bit(struct tz1090_gpio_bank *bank,
+					  unsigned int reg_offs,
+					  unsigned int offset)
+{
+	u32 value;
+
+	value = tz1090_gpio_read(bank, reg_offs);
+	value &= ~BIT(offset);
+	tz1090_gpio_write(bank, reg_offs, value);
+}
+
+static void tz1090_gpio_clear_bit(struct tz1090_gpio_bank *bank,
+				  unsigned int reg_offs,
+				  unsigned int offset)
+{
+	int lstat;
+
+	__global_lock2(lstat);
+	_tz1090_gpio_clear_bit(bank, reg_offs, offset);
+	__global_unlock2(lstat);
+}
+
+/* caller must hold LOCK2 */
+static inline void _tz1090_gpio_set_bit(struct tz1090_gpio_bank *bank,
+					unsigned int reg_offs,
+					unsigned int offset)
+{
+	u32 value;
+
+	value = tz1090_gpio_read(bank, reg_offs);
+	value |= BIT(offset);
+	tz1090_gpio_write(bank, reg_offs, value);
+}
+
+static void tz1090_gpio_set_bit(struct tz1090_gpio_bank *bank,
+				unsigned int reg_offs,
+				unsigned int offset)
+{
+	int lstat;
+
+	__global_lock2(lstat);
+	_tz1090_gpio_set_bit(bank, reg_offs, offset);
+	__global_unlock2(lstat);
+}
+
+/* caller must hold LOCK2 */
+static inline void _tz1090_gpio_mod_bit(struct tz1090_gpio_bank *bank,
+					unsigned int reg_offs,
+					unsigned int offset,
+					bool val)
+{
+	u32 value;
+
+	value = tz1090_gpio_read(bank, reg_offs);
+	value &= ~BIT(offset);
+	if (val)
+		value |= BIT(offset);
+	tz1090_gpio_write(bank, reg_offs, value);
+}
+
+static void tz1090_gpio_mod_bit(struct tz1090_gpio_bank *bank,
+				unsigned int reg_offs,
+				unsigned int offset,
+				bool val)
+{
+	int lstat;
+
+	__global_lock2(lstat);
+	_tz1090_gpio_mod_bit(bank, reg_offs, offset, val);
+	__global_unlock2(lstat);
+}
+
+static inline int tz1090_gpio_read_bit(struct tz1090_gpio_bank *bank,
+				       unsigned int reg_offs,
+				       unsigned int offset)
+{
+	return tz1090_gpio_read(bank, reg_offs) & BIT(offset);
+}
+
+/* GPIO chip callbacks */
+
+static int tz1090_gpio_direction_input(struct gpio_chip *chip,
+				       unsigned offset)
+{
+	struct tz1090_gpio_bank *bank = to_bank(chip);
+	tz1090_gpio_set_bit(bank, REG_GPIO_DIR, offset);
+
+	return 0;
+}
+
+static int tz1090_gpio_direction_output(struct gpio_chip *chip,
+					unsigned offset, int output_value)
+{
+	struct tz1090_gpio_bank *bank = to_bank(chip);
+	int lstat;
+
+	__global_lock2(lstat);
+	_tz1090_gpio_mod_bit(bank, REG_GPIO_DOUT, offset, output_value);
+	_tz1090_gpio_clear_bit(bank, REG_GPIO_DIR, offset);
+	__global_unlock2(lstat);
+
+	return 0;
+}
+
+/*
+ * Return GPIO level
+ */
+static int tz1090_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct tz1090_gpio_bank *bank = to_bank(chip);
+
+	return tz1090_gpio_read_bit(bank, REG_GPIO_DIN, offset);
+}
+
+/*
+ * Set output GPIO level
+ */
+static void tz1090_gpio_set(struct gpio_chip *chip, unsigned offset,
+			    int output_value)
+{
+	struct tz1090_gpio_bank *bank = to_bank(chip);
+
+	tz1090_gpio_mod_bit(bank, REG_GPIO_DOUT, offset, output_value);
+}
+
+static int tz1090_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+	struct tz1090_gpio_bank *bank = to_bank(chip);
+	int ret;
+
+	ret = pinctrl_request_gpio(chip->base + offset);
+	if (ret)
+		return ret;
+
+	tz1090_gpio_set_bit(bank, REG_GPIO_DIR, offset);
+	tz1090_gpio_set_bit(bank, REG_GPIO_BIT_EN, offset);
+
+	return 0;
+}
+
+static void tz1090_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+	struct tz1090_gpio_bank *bank = to_bank(chip);
+
+	pinctrl_free_gpio(chip->base + offset);
+
+	tz1090_gpio_clear_bit(bank, REG_GPIO_BIT_EN, offset);
+}
+
+static int tz1090_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	struct tz1090_gpio_bank *bank = to_bank(chip);
+
+	if (!bank->domain)
+		return -EINVAL;
+
+	return irq_create_mapping(bank->domain, offset);
+}
+
+static struct tz1090_gpio_bank *comet_gpio_chip[3];
+
+/* The mapping of GPIO to pull register index and offset */
+static struct comet_gpio_pullup gpio_pullup_table[NR_BUILTIN_GPIO] = {
+	{5, 22}, /*  0 - GPIO_SDIO_CLK */
+	{0, 14}, /*  1 - GPIO_SDIO_CMD */
+	{0,  6}, /*  2 - GPIO_SDIO_D0 */
+	{0,  8}, /*  3 - GPIO_SDIO_D1 */
+	{0, 10}, /*  4 - GPIO_SDIO_D2 */
+	{0, 12}, /*  5 - GPIO_SDIO_D3 */
+	{0,  2}, /*  6 - GPIO_SDH_CD */
+	{0,  4}, /*  7 - GPIO_SDH_WP */
+	{0, 16}, /*  8 - GPIO_SPI0_MCLK */
+	{0, 18}, /*  9 - GPIO_SPI0_CS0 */
+	{0, 20}, /* 10 - GPIO_SPI0_CS1 */
+	{0, 22}, /* 11 - GPIO_SPI0_CS2 */
+	{0, 24}, /* 12 - GPIO_SPI0_DOUT */
+	{0, 26}, /* 13 - GPIO_SPI0_DIN */
+	{0, 28}, /* 14 - GPIO_SPI1_MCLK */
+	{0, 30}, /* 15 - GPIO_SPI1_CS0 */
+	{1,  0}, /* 16 - GPIO_SPI1_CS1 */
+	{1,  2}, /* 17 - GPIO_SPI1_CS2 */
+	{1,  4}, /* 18 - GPIO_SPI1_DOUT */
+	{1,  6}, /* 19 - GPIO_SPI1_DIN */
+	{1,  8}, /* 20 - GPIO_UART0_RXD */
+	{1, 10}, /* 21 - GPIO_UART0_TXD */
+	{1, 12}, /* 22 - GPIO_UART0_CTS */
+	{1, 14}, /* 23 - GPIO_UART0_RTS */
+	{1, 16}, /* 24 - GPIO_UART1_RXD */
+	{1, 18}, /* 25 - GPIO_UART1_TXD */
+	{1, 20}, /* 26 - GPIO_SCB0_SDAT */
+	{1, 22}, /* 27 - GPIO_SCB0_SCLK */
+	{1, 24}, /* 28 - GPIO_SCB1_SDAT */
+	{1, 26}, /* 29 - GPIO_SCB1_SCLK */
+
+	{1, 28}, /* 30 - GPIO_SCB2_SDAT */
+	{1, 30}, /* 31 - GPIO_SCB2_SCLK */
+	{2,  0}, /* 32 - GPIO_I2S_MCLK */
+	{2,  2}, /* 33 - GPIO_I2S_BCLK_OUT */
+	{2,  4}, /* 34 - GPIO_I2S_LRCLK_OUT */
+	{2,  6}, /* 35 - GPIO_I2S_DOUT0 */
+	{2,  8}, /* 36 - GPIO_I2S_DOUT1 */
+	{2, 10}, /* 37 - GPIO_I2S_DOUT2 */
+	{2, 12}, /* 38 - GPIO_I2S_DIN */
+	{4, 12}, /* 39 - GPIO_PDM_A */
+	{4, 14}, /* 40 - GPIO_PDM_B */
+	{4, 18}, /* 41 - GPIO_PDM_C */
+	{4, 20}, /* 42 - GPIO_PDM_D */
+	{2, 14}, /* 43 - GPIO_TFT_RED0 */
+	{2, 16}, /* 44 - GPIO_TFT_RED1 */
+	{2, 18}, /* 45 - GPIO_TFT_RED2 */
+	{2, 20}, /* 46 - GPIO_TFT_RED3 */
+	{2, 22}, /* 47 - GPIO_TFT_RED4 */
+	{2, 24}, /* 48 - GPIO_TFT_RED5 */
+	{2, 26}, /* 49 - GPIO_TFT_RED6 */
+	{2, 28}, /* 50 - GPIO_TFT_RED7 */
+	{2, 30}, /* 51 - GPIO_TFT_GREEN0 */
+	{3,  0}, /* 52 - GPIO_TFT_GREEN1 */
+	{3,  2}, /* 53 - GPIO_TFT_GREEN2 */
+	{3,  4}, /* 54 - GPIO_TFT_GREEN3 */
+	{3,  6}, /* 55 - GPIO_TFT_GREEN4 */
+	{3,  8}, /* 56 - GPIO_TFT_GREEN5 */
+	{3, 10}, /* 57 - GPIO_TFT_GREEN6 */
+	{3, 12}, /* 58 - GPIO_TFT_GREEN7 */
+	{3, 14}, /* 59 - GPIO_TFT_BLUE0 */
+
+	{3, 16}, /* 60 - GPIO_TFT_BLUE1 */
+	{3, 18}, /* 61 - GPIO_TFT_BLUE2 */
+	{3, 20}, /* 62 - GPIO_TFT_BLUE3 */
+	{3, 22}, /* 63 - GPIO_TFT_BLUE4 */
+	{3, 24}, /* 64 - GPIO_TFT_BLUE5 */
+	{3, 26}, /* 65 - GPIO_TFT_BLUE6 */
+	{3, 28}, /* 66 - GPIO_TFT_BLUE7 */
+	{3, 30}, /* 67 - GPIO_TFT_VDDEN_GD */
+	{4,  0}, /* 68 - GPIO_TFT_PANELCLK */
+	{4,  2}, /* 69 - GPIO_TFT_BLANK_LS */
+	{4,  4}, /* 70 - GPIO_TFT_VSYNC_NS */
+	{4,  6}, /* 71 - GPIO_TFT_HSYNC_NR */
+	{4,  8}, /* 72 - GPIO_TFT_VD12ACB */
+	{4, 10}, /* 73 - GPIO_TFT_PWRSAVE */
+	{4, 24}, /* 74 - GPIO_TX_ON */
+	{4, 26}, /* 75 - GPIO_RX_ON */
+	{4, 28}, /* 76 - GPIO_PLL_ON */
+	{4, 30}, /* 77 - GPIO_PA_ON */
+	{5,  0}, /* 78 - GPIO_RX_HP */
+	{5,  6}, /* 79 - GPIO_GAIN0 */
+	{5,  8}, /* 80 - GPIO_GAIN1 */
+	{5, 10}, /* 81 - GPIO_GAIN2 */
+	{5, 12}, /* 82 - GPIO_GAIN3 */
+	{5, 14}, /* 83 - GPIO_GAIN4 */
+	{5, 16}, /* 84 - GPIO_GAIN5 */
+	{5, 18}, /* 85 - GPIO_GAIN6 */
+	{5, 20}, /* 86 - GPIO_GAIN7 */
+	{5,  2}, /* 87 - GPIO_ANT_SEL0 */
+	{5,  4}, /* 88 - GPIO_ANT_SEL1 */
+	{0,  0}, /* 89 - GPIO_SDH_CLK_IN */
+};
+
+static int comet_gpio_to_chip(unsigned int gpio)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(comet_gpio_chip); i++) {
+		if (!comet_gpio_chip[i])
+			continue;
+		if (gpio >= comet_gpio_chip[i]->chip.base &&
+		    gpio < (comet_gpio_chip[i]->chip.base +
+			    comet_gpio_chip[i]->chip.ngpio)) {
+				return i;
+		}
+	}
+	return -EINVAL;
+}
+
+int comet_gpio_disable(unsigned int gpio)
+{
+	struct tz1090_gpio_bank *bank = NULL;
+	unsigned int offset = 0;
+	int idx;
+
+	idx = comet_gpio_to_chip(gpio);
+
+	if (idx < 0)
+		return -EINVAL;
+
+	bank = comet_gpio_chip[idx];
+	offset = gpio - bank->chip.base;
+
+	tz1090_gpio_clear_bit(bank, REG_GPIO_SELECT, offset);
+
+	return 0;
+}
+EXPORT_SYMBOL(comet_gpio_disable);
+
+int comet_gpio_enable(unsigned int gpio)
+{
+	struct tz1090_gpio_bank *bank = NULL;
+	unsigned int offset = 0;
+	int idx;
+
+	idx = comet_gpio_to_chip(gpio);
+
+	if (idx < 0)
+		return -EINVAL;
+
+	bank = comet_gpio_chip[idx];
+	offset = gpio - bank->chip.base;
+
+	tz1090_gpio_set_bit(bank, REG_GPIO_SELECT, offset);
+
+	return 0;
+}
+EXPORT_SYMBOL(comet_gpio_enable);
+
+
+int comet_gpio_disable_block(unsigned int first, unsigned int last)
+{
+	struct tz1090_gpio_bank *bank = NULL;
+	unsigned int offset = 0;
+	int gpio;
+	int idx;
+	int lstat;
+	int result = 0;
+
+	__global_lock2(lstat);
+	for (gpio = first; gpio <= last; ++gpio) {
+		idx = comet_gpio_to_chip(gpio);
+
+		if (idx < 0) {
+			result = -EINVAL;
+			goto out;
+		}
+		++result;
+
+		bank = comet_gpio_chip[idx];
+		offset = gpio - bank->chip.base;
+
+		_tz1090_gpio_clear_bit(bank, REG_GPIO_SELECT, offset);
+	}
+out:
+	__global_unlock2(lstat);
+
+	return result;
+}
+EXPORT_SYMBOL(comet_gpio_disable_block);
+
+int comet_gpio_enable_block(unsigned int first, unsigned int last)
+{
+	struct tz1090_gpio_bank *bank = NULL;
+	unsigned int offset = 0;
+	int gpio;
+	int idx;
+	int lstat;
+	int result = 0;
+
+	__global_lock2(lstat);
+	for (gpio = first; gpio <= last; ++gpio) {
+		idx = comet_gpio_to_chip(gpio);
+
+		if (idx < 0) {
+			result = -EINVAL;
+			goto out;
+		}
+		++result;
+
+		bank = comet_gpio_chip[idx];
+		offset = gpio - bank->chip.base;
+
+		tz1090_gpio_set_bit(bank, REG_GPIO_SELECT, offset);
+	}
+out:
+	__global_unlock2(lstat);
+
+	return result;
+}
+EXPORT_SYMBOL(comet_gpio_enable_block);
+
+int comet_gpio_pullup_type(unsigned int gpio, unsigned int pullup)
+{
+	struct tz1090_gpio_bank *bank = NULL;
+	struct comet_gpio_pullup *gpio_pullup;
+	unsigned int offset = 0, value;
+	int idx;
+	int lstat;
+
+	idx = comet_gpio_to_chip(gpio);
+
+	if (idx < 0)
+		return -EINVAL;
+
+	bank = comet_gpio_chip[idx];
+	gpio_pullup = &gpio_pullup_table[gpio];
+	offset = gpio_pullup->offset;
+
+	__global_lock2(lstat);
+	value = tz1090_gpio_read(bank, REG_GPIO_PU_PD);
+	value &= ~(0x3 << offset);
+	value |= (pullup & 0x3) << offset;
+	tz1090_gpio_write(bank, REG_GPIO_PU_PD, value);
+	__global_unlock2(lstat);
+
+	return 0;
+}
+EXPORT_SYMBOL(comet_gpio_pullup_type);
+
+/* IRQ chip handlers */
+
+/* Get TZ1090 GPIO chip from irq data provided to generic IRQ callbacks */
+static inline struct tz1090_gpio_bank *irqd_to_gpio_bank(struct irq_data *data)
+{
+	return (struct tz1090_gpio_bank *)data->domain->host_data;
+}
+
+static void tz1090_gpio_irq_clear(struct tz1090_gpio_bank *bank,
+				  unsigned int offset)
+{
+	tz1090_gpio_clear_bit(bank, REG_GPIO_IRQ_STS, offset);
+}
+
+static void tz1090_gpio_irq_enable(struct tz1090_gpio_bank *bank,
+				   unsigned int offset, bool enable)
+{
+	tz1090_gpio_mod_bit(bank, REG_GPIO_IRQ_EN, offset, enable);
+}
+
+static void tz1090_gpio_irq_polarity(struct tz1090_gpio_bank *bank,
+				     unsigned int offset, unsigned int polarity)
+{
+	tz1090_gpio_mod_bit(bank, REG_GPIO_IRQ_PLRT, offset, polarity);
+}
+
+static int tz1090_gpio_valid_handler(struct irq_desc *desc)
+{
+	return desc->handle_irq == handle_level_irq ||
+		desc->handle_irq == handle_edge_irq;
+}
+
+static void tz1090_gpio_irq_type(struct tz1090_gpio_bank *bank,
+				 unsigned int offset, unsigned int type)
+{
+	tz1090_gpio_mod_bit(bank, REG_GPIO_IRQ_TYPE, offset, type);
+}
+
+/* set polarity to trigger on next edge, whether rising or falling */
+static void tz1090_gpio_irq_next_edge(struct tz1090_gpio_bank *bank,
+				      unsigned int offset)
+{
+	unsigned int value_p, value_i;
+	int lstat;
+
+	/*
+	 * Set the GPIO's interrupt polarity to the opposite of the current
+	 * input value so that the next edge triggers an interrupt.
+	 */
+	__global_lock2(lstat);
+	value_i = ~tz1090_gpio_read(bank, REG_GPIO_DIN);
+	value_p = tz1090_gpio_read(bank, REG_GPIO_IRQ_PLRT);
+	value_p &= ~BIT(offset);
+	value_p |= value_i & BIT(offset);
+	tz1090_gpio_write(bank, REG_GPIO_IRQ_PLRT, value_p);
+	__global_unlock2(lstat);
+}
+
+static void gpio_ack_irq(struct irq_data *data)
+{
+	struct tz1090_gpio_bank *bank = irqd_to_gpio_bank(data);
+
+	tz1090_gpio_irq_clear(bank, data->hwirq);
+}
+
+static void gpio_mask_irq(struct irq_data *data)
+{
+	struct tz1090_gpio_bank *bank = irqd_to_gpio_bank(data);
+
+	tz1090_gpio_irq_enable(bank, data->hwirq, false);
+}
+
+static void gpio_unmask_irq(struct irq_data *data)
+{
+	struct tz1090_gpio_bank *bank = irqd_to_gpio_bank(data);
+
+	tz1090_gpio_irq_enable(bank, data->hwirq, true);
+}
+
+static unsigned int gpio_startup_irq(struct irq_data *data)
+{
+	struct tz1090_gpio_bank *bank = irqd_to_gpio_bank(data);
+	irq_hw_number_t hw = data->hwirq;
+	struct irq_desc *desc = irq_to_desc(data->irq);
+
+	/*
+	 * This warning indicates that the type of the irq hasn't been set
+	 * before enabling the irq. This would normally be done by passing some
+	 * trigger flags to request_irq().
+	 */
+	WARN(!tz1090_gpio_valid_handler(desc),
+		"irq type not set before enabling gpio irq %d", data->irq);
+
+	tz1090_gpio_irq_clear(bank, hw);
+	tz1090_gpio_irq_enable(bank, hw, true);
+	return 0;
+}
+
+static int gpio_set_irq_type(struct irq_data *data, unsigned int flow_type)
+{
+	struct tz1090_gpio_bank *bank = irqd_to_gpio_bank(data);
+	unsigned int type;
+	unsigned int polarity;
+
+	switch (flow_type) {
+	case IRQ_TYPE_EDGE_BOTH:
+		type = REG_GPIO_IRQ_TYPE_EDGE;
+		polarity = REG_GPIO_IRQ_PLRT_LOW;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		type = REG_GPIO_IRQ_TYPE_EDGE;
+		polarity = REG_GPIO_IRQ_PLRT_HIGH;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		type = REG_GPIO_IRQ_TYPE_EDGE;
+		polarity = REG_GPIO_IRQ_PLRT_LOW;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		type = REG_GPIO_IRQ_TYPE_LEVEL;
+		polarity = REG_GPIO_IRQ_PLRT_HIGH;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		type = REG_GPIO_IRQ_TYPE_LEVEL;
+		polarity = REG_GPIO_IRQ_PLRT_LOW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	tz1090_gpio_irq_type(bank, data->hwirq, type);
+	if (type == REG_GPIO_IRQ_TYPE_LEVEL)
+		__irq_set_handler_locked(data->irq, handle_level_irq);
+	else
+		__irq_set_handler_locked(data->irq, handle_edge_irq);
+
+	if (flow_type == IRQ_TYPE_EDGE_BOTH)
+		tz1090_gpio_irq_next_edge(bank, data->hwirq);
+	else
+		tz1090_gpio_irq_polarity(bank, data->hwirq, polarity);
+
+	return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+static int gpio_set_irq_wake(struct irq_data *data, unsigned int on)
+{
+	struct tz1090_gpio_bank *bank = irqd_to_gpio_bank(data);
+
+#ifdef CONFIG_PM_DEBUG
+	pr_info("irq_wake irq%d state:%d\n", data->irq, on);
+#endif
+
+	/* wake on gpio block interrupt */
+	return irq_set_irq_wake(bank->irq, on);
+}
+#else
+#define gpio_set_irq_wake NULL
+#endif
+
+/* gpio virtual interrupt functions */
+static struct irq_chip gpio_irq_chip = {
+	.irq_startup	= gpio_startup_irq,
+	.irq_ack	= gpio_ack_irq,
+	.irq_mask	= gpio_mask_irq,
+	.irq_unmask	= gpio_unmask_irq,
+	.irq_set_type	= gpio_set_irq_type,
+	.irq_set_wake	= gpio_set_irq_wake,
+	.flags		= IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+	irq_hw_number_t hw;
+	unsigned int irq_stat, irq_no;
+	struct tz1090_gpio_bank *bank;
+	struct irq_desc *child_desc;
+
+	bank = (struct tz1090_gpio_bank *)irq_desc_get_handler_data(desc);
+	irq_stat = tz1090_gpio_read(bank, REG_GPIO_DIR) &
+		   tz1090_gpio_read(bank, REG_GPIO_IRQ_STS) &
+		   tz1090_gpio_read(bank, REG_GPIO_IRQ_EN) &
+		   0x3FFFFFFF; /* 30 bits only */
+
+	for (hw = 0; irq_stat; irq_stat >>= 1, ++hw) {
+		if (!(irq_stat & 1))
+			continue;
+
+		irq_no = irq_linear_revmap(bank->domain, hw);
+		child_desc = irq_to_desc(irq_no);
+
+		/* Toggle edge for pin with both edges triggering enabled */
+		if (irqd_get_trigger_type(&child_desc->irq_data)
+				== IRQ_TYPE_EDGE_BOTH)
+			tz1090_gpio_irq_next_edge(bank, hw);
+
+		BUG_ON(!tz1090_gpio_valid_handler(child_desc));
+		generic_handle_irq_desc(irq_no, child_desc);
+	}
+}
+
+static int tz1090_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+			       irq_hw_number_t hw)
+{
+	irq_set_chip(irq, &gpio_irq_chip);
+	return 0;
+}
+
+static const struct irq_domain_ops tz1090_gpio_irq_domain_ops = {
+	.map	= tz1090_gpio_irq_map,
+	.xlate	= irq_domain_xlate_twocell,
+};
+
+static int tz1090_gpio_bank_probe(struct tz1090_gpio_bank_info *info)
+{
+	struct device_node *np = info->node;
+	struct device *dev = info->priv->dev;
+	struct tz1090_gpio_bank *bank;
+
+	bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL);
+	if (!bank) {
+		dev_err(dev, "unable to allocate driver data\n");
+		return -ENOMEM;
+	}
+
+	/* Offset the main registers to the first register in this bank */
+	bank->reg = info->priv->reg + info->index * 4;
+	comet_gpio_chip[info->index] = bank;
+
+	/* Set up GPIO chip */
+	snprintf(bank->label, sizeof(bank->label), "tz1090-gpio-%u",
+		 info->index);
+	bank->chip.label		= bank->label;
+	bank->chip.dev			= dev;
+	bank->chip.direction_input	= tz1090_gpio_direction_input;
+	bank->chip.direction_output	= tz1090_gpio_direction_output;
+	bank->chip.get			= tz1090_gpio_get;
+	bank->chip.set			= tz1090_gpio_set;
+	bank->chip.free			= tz1090_gpio_free;
+	bank->chip.request		= tz1090_gpio_request;
+	bank->chip.to_irq		= tz1090_gpio_to_irq;
+	bank->chip.of_node		= np;
+
+	/* GPIO numbering from 0 */
+	bank->chip.base			= info->index * 30;
+	bank->chip.ngpio		= 30;
+
+	/* Add the GPIO bank */
+	gpiochip_add(&bank->chip);
+
+	/* Get the GPIO bank IRQ if provided */
+	bank->irq = irq_of_parse_and_map(np, 0);
+
+	/* The interrupt is optional (it may be used by another core on chip) */
+	if (bank->irq < 0) {
+		dev_info(dev, "IRQ not provided for bank %u, IRQs disabled\n",
+			 info->index);
+		return 0;
+	}
+
+	dev_info(dev, "Setting up IRQs for GPIO bank %u\n",
+		 info->index);
+
+	/*
+	 * Initialise all interrupts to disabled so we don't get
+	 * spurious ones on a dirty boot and hit the BUG_ON in the
+	 * handler.
+	 */
+	tz1090_gpio_write(bank, REG_GPIO_IRQ_EN, 0);
+
+	/* Add a virtual IRQ for each GPIO */
+	bank->domain = irq_domain_add_linear(np,
+					     bank->chip.ngpio,
+					     &tz1090_gpio_irq_domain_ops,
+					     bank);
+
+	/* Setup chained handler for this GPIO bank */
+	irq_set_handler_data(bank->irq, bank);
+	irq_set_chained_handler(bank->irq, tz1090_gpio_irq_handler);
+
+	return 0;
+}
+
+static void tz1090_gpio_register_banks(struct tz1090_gpio *priv)
+{
+	struct device_node *np = priv->dev->of_node;
+	struct device_node *node;
+
+	for_each_available_child_of_node(np, node) {
+		struct tz1090_gpio_bank_info info;
+		u32 addr;
+		int ret;
+
+		ret = of_property_read_u32(node, "reg", &addr);
+		if (ret) {
+			dev_err(priv->dev, "invalid reg on %s\n",
+				node->full_name);
+			continue;
+		}
+		if (addr >= 3) {
+			dev_err(priv->dev, "index %u in %s out of range\n",
+				addr, node->full_name);
+			continue;
+		}
+
+		info.index = addr;
+		info.node = of_node_get(node);
+		info.priv = priv;
+
+		ret = tz1090_gpio_bank_probe(&info);
+		if (ret) {
+			dev_err(priv->dev, "failure registering %s\n",
+				node->full_name);
+			of_node_put(node);
+			continue;
+		}
+	}
+}
+
+static int tz1090_gpio_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *res_regs;
+	struct tz1090_gpio priv;
+
+	if (!np) {
+		dev_err(&pdev->dev, "must be instantiated via devicetree\n");
+		return -ENOENT;
+	}
+
+	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res_regs) {
+		dev_err(&pdev->dev, "cannot find registers resource\n");
+		return -ENOENT;
+	}
+
+	priv.dev = &pdev->dev;
+
+	/* Ioremap the registers */
+	priv.reg = devm_ioremap(&pdev->dev, res_regs->start,
+				 res_regs->end - res_regs->start);
+	if (!priv.reg) {
+		dev_err(&pdev->dev, "unable to ioremap registers\n");
+		return -ENOMEM;
+	}
+
+	/* Look for banks */
+	tz1090_gpio_register_banks(&priv);
+
+	return 0;
+}
+
+static struct of_device_id tz1090_gpio_of_match[] = {
+	{ .compatible = "img,tz1090-gpio" },
+	{ },
+};
+
+static struct platform_driver tz1090_gpio_driver = {
+	.driver = {
+		.name		= "tz1090-gpio",
+		.owner		= THIS_MODULE,
+		.of_match_table	= tz1090_gpio_of_match,
+	},
+	.probe		= tz1090_gpio_probe,
+};
+
+static int __init tz1090_gpio_init(void)
+{
+	return platform_driver_register(&tz1090_gpio_driver);
+}
+postcore_initcall(tz1090_gpio_init);
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+/*
+ * For now we save and restore all GPIO setup.
+ */
+static const u8 tz1090_gpio_regs[] = {
+	0x00, 0x04, 0x08,	/* GPIO_DIR{0-2} */
+	0x10, 0x14, 0x18,	/* GPIO_SELECT{0-2} */
+	0x20, 0x24, 0x28,	/* IRQ_PLRT{0-2} */
+	0x30, 0x34, 0x38,	/* IRQ_TYPE{0-2} */
+	0x40, 0x44, 0x48,	/* IRQ_EN{0-2} */
+				/* IRQ_STS{0-2} */
+	0x60, 0x64, 0x68,	/* GPIO_BIT_EN{0-2} */
+				/* GPIO_DIN{0-2} */
+	0x80, 0x84, 0x88,	/* GPIO_DOUT{0-2} */
+	0x90,			/* SCHMITT_EN0 */
+	0xa0, 0xa4, 0xa8, 0xac,	/* PU_PD_{0-3} */
+	0xb0, 0xb4, 0xb8,	/* PU_PD_{4-6} */
+	0xc0,			/* SR_0 */
+	0xd0,			/* DR_0 */
+};
+struct tz1090_gpio_state {
+	u32 values[ARRAY_SIZE(tz1090_gpio_regs)];
+};
+
+static struct tz1090_gpio_state *tz1090_gpio_state;
+
+static int tz1090_gpio_suspend(void)
+{
+	unsigned long flags;
+	unsigned int i;
+	struct tz1090_gpio_state *state;
+
+	state = kzalloc(sizeof(*state), GFP_ATOMIC);
+	if (!state)
+		return -ENOMEM;
+
+	__global_lock2(flags);
+	for (i = 0; i < ARRAY_SIZE(tz1090_gpio_regs); ++i)
+		state->values[i] = readl(GPIO_CRTOP_BASE_ADDR +
+					 tz1090_gpio_regs[i]);
+	__global_unlock2(flags);
+
+	tz1090_gpio_state = state;
+
+	return 0;
+}
+
+static void tz1090_gpio_resume(void)
+{
+	unsigned long flags;
+	unsigned int i;
+	struct tz1090_gpio_state *state = tz1090_gpio_state;
+
+	__global_lock2(flags);
+	for (i = 0; i < ARRAY_SIZE(tz1090_gpio_regs); ++i)
+		writel(state->values[i],
+		       GPIO_CRTOP_BASE_ADDR + tz1090_gpio_regs[i]);
+	__global_unlock2(flags);
+
+	tz1090_gpio_state = NULL;
+	kfree(state);
+}
+#else
+#define tz1090_gpio_suspend NULL
+#define tz1090_gpio_resume NULL
+#endif	/* CONFIG_METAG_SUSPEND_MEM */
+
+static struct syscore_ops tz1090_gpio_syscore_ops = {
+	.suspend = tz1090_gpio_suspend,
+	.resume = tz1090_gpio_resume,
+};
+
+static int tz1090_gpio_syscore_init(void)
+{
+	register_syscore_ops(&tz1090_gpio_syscore_ops);
+	return 0;
+}
+
+static void tz1090_gpio_syscore_exit(void)
+{
+	unregister_syscore_ops(&tz1090_gpio_syscore_ops);
+}
+
+module_init(tz1090_gpio_syscore_init);
+module_exit(tz1090_gpio_syscore_exit);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 631736e..c98ac8d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -371,6 +371,13 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-cbus-gpio.
 
+config I2C_CHORUS2
+	tristate "Chorus2 I2C support"
+	depends on SOC_CHORUS2
+	help
+	  This is the driver for the onboard I2C controller on the Chorus2
+	  SoC. This device is also known as SCB.
+
 config I2C_CPM
 	tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
 	depends on (CPM1 || CPM2) && OF_I2C
@@ -464,6 +471,27 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-ibm_iic.
 
+config I2C_IMG
+	tristate "IMG I2C (SCB) master interface"
+	depends on HAVE_CLK
+	help
+	  Say Y here if you want to use the I2C (SCB) master interface on
+	  the Imagination Technologies SoCs.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-img.
+
+config I2C_IMG_DEBUG_BUFFER
+	bool "Record and report detailed interrupt info on fatal errors"
+	depends on I2C_IMG
+	default y
+	help
+	  Say Y here (recommended) if you want I2C (SCB) interrupts for each
+	  transaction to be recorded in a debug buffer so that more detailed
+	  debug information can be printed to the kernel log when a fatal error
+	  such as an SCLK low timeout is encountered. This uses between 1KB-2KB
+	  of memory.
+
 config I2C_IMX
 	tristate "IMX I2C interface"
 	depends on ARCH_MXC
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 8f4fc23..6465974 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -35,6 +35,7 @@
 obj-$(CONFIG_I2C_BLACKFIN_TWI)	+= i2c-bfin-twi.o
 obj-$(CONFIG_I2C_CBUS_GPIO)	+= i2c-cbus-gpio.o
 obj-$(CONFIG_I2C_CPM)		+= i2c-cpm.o
+obj-$(CONFIG_I2C_CHORUS2)	+= i2c-chorus2.o
 obj-$(CONFIG_I2C_DAVINCI)	+= i2c-davinci.o
 obj-$(CONFIG_I2C_DESIGNWARE_CORE)	+= i2c-designware-core.o
 obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM)	+= i2c-designware-platform.o
@@ -45,6 +46,7 @@
 obj-$(CONFIG_I2C_GPIO)		+= i2c-gpio.o
 obj-$(CONFIG_I2C_HIGHLANDER)	+= i2c-highlander.o
 obj-$(CONFIG_I2C_IBM_IIC)	+= i2c-ibm_iic.o
+obj-$(CONFIG_I2C_IMG)		+= i2c-img.o
 obj-$(CONFIG_I2C_IMX)		+= i2c-imx.o
 obj-$(CONFIG_I2C_INTEL_MID)	+= i2c-intel-mid.o
 obj-$(CONFIG_I2C_IOP3XX)	+= i2c-iop3xx.o
diff --git a/drivers/i2c/busses/i2c-chorus2.c b/drivers/i2c/busses/i2c-chorus2.c
new file mode 100644
index 0000000..c1f3473
--- /dev/null
+++ b/drivers/i2c/busses/i2c-chorus2.c
@@ -0,0 +1,797 @@
+/*
+ *  I2C adapter for the Chorus2 Serial Control Bus.
+ *
+ *  Copyright (C) 2006-2008,2009 Imagination Technologies Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/soc-chorus2/gpio.h>
+
+/* TODO
+ * - actually use the interrupt and sleep where appropriate
+ */
+
+#define SCB_REG0                0x0000
+#define SCB_REG1                0x0001
+#define SCB_REG2                0x0002
+#define SCB_REG3                0x0003
+#define SCB_REG4                0x0004
+#define SCB_REG5                0x0005
+
+#define SCB_REG1_SDM            0x0001
+#define SCB_REG1_SCM            0x0002
+#define SCB_REG1_BEI            0x0004
+#define SCB_REG1_SC             0x0008
+#define SCB_REG1_LOOPBACK       0x0010
+#define SCB_REG1_BCR_MASK       0x0FE0
+#define SCB_REG1_BCR_SHIFT           5
+#define SCB_REG1_MODE           0x8000
+
+#define SCB_REG2_DATA           0x00FF
+#define SCB_REG2_DATA_SHIFT          0
+#define SCB_REG2_BUS_STATUS     0x0030
+#define SCB_REG2_BUS_STATUS_SHIFT    8
+#define SCB_REG2_ACK            0x8000
+
+#define SCB_REG3_CIE            0x0400
+#define SCB_REG3_EIE            0x0800
+
+#define REG0_CMD_SHIFT               8
+#define REG0_CMD_TX                0x0
+#define REG0_CMD_RESERVED          0x1
+#define REG0_CMD_RX_ACK            0x2
+#define REG0_CMD_RX_NACK           0x3
+#define REG0_CMD_START             0x4
+#define REG0_CMD_STOP              0x5
+#define REG0_CMD_SLAVE_ADDR        0x6
+#define REG0_CMD_EXIT_SLAVE        0x7
+#define REG0_UPGRADE            0x1000
+
+#define REG2_STATUS_IGNORE        0x00
+#define REG2_STATUS_IDLE          0x01
+#define REG2_STATUS_BUSY          0x02
+#define REG2_STATUS_STUCK         0x03
+#define REG2_ACK                0x8000
+#define REG2_STATE_MASK         0x7000
+#define REG2_STATE_SHIFT            12
+
+#define REG2_STATE_IDLE            0x0
+#define REG2_STATE_START           0x1
+#define REG2_STATE_TX              0x2
+#define REG2_STATE_RX              0x3
+#define REG2_STATE_STOP            0x4
+#define REG2_STATE_RSTART          0x5
+#define REG2_STATE_RADDR           0x6
+#define REG2_STATE_RSTOP           0x7
+
+#define REG4_CMD_COMPLETE         0x01
+#define REG4_ERROR                0x02
+
+#define REG5_CMD_COMPLETE         0x01
+#define REG5_ERROR                0x02
+#define REG5_SOFT_RESET           0x04
+
+#define TX_DATA_READ              0x01
+
+#define SCB_WRAPPER_READY	        0x80000000
+#define SCB_WRAPPER_READ_READY	        0x40000000
+#define SCB_WRAPPER_IRQ		        0x20000000
+#define SCB_WRAPPER_CMD_VALID	        0x10000000
+#define SCB_WRAPPER_READ_NOT_WRITE	0x01000000
+#define SCB_WRAPPER_ADDR_MASK	        0x00070000
+#define SCB_WRAPPER_ADDR_SHIFT	                16
+#define SCB_WRAPPER_DATA_MASK	        0x0000FFFF
+
+#define MAX_REGNO                       5  /* the SCB only has 5 regs! */
+
+#define WRAPPER_TIMEOUT                10  /* Specified as number of loops */
+#define INTERRUPT_TIMEOUT	(HZ / 20)  /* Specified in jiffies */
+
+#define SCB_BCR_50KHZ		0x8
+#define SCB_BCR_100KHZ		0x11
+#define SCB_BCR_200KHZ		0x22
+#define SCB_BCR_400KHZ		0x44
+
+#define SCB_PIN_CTRL_REG	0x02024004
+
+#define SCB_PIN_CTRL_SCB2	0
+#define SCB_PIN_CTRL_GPDAC	1
+#define SCB_PIN_CTRL_SPI2	2
+#define SCB_PIN_CTRL_GPIO	3
+
+#define SCB_DAT_CTRL_SHIFT	0
+#define SCB_CLK_CTRL_SHIFT	2
+
+struct chorus2_i2c {
+	struct i2c_adapter adap;
+
+	void __iomem *reg_base;
+
+	unsigned long iobase;
+	unsigned long iosize;
+};
+
+#define GPIO_CLK_PIN	GPIO_E_PIN(14)
+#define GPIO_DATA_PIN	GPIO_E_PIN(15)
+
+static void scb_clear_error(void)
+{
+	int attempts;
+
+	gpio_request(GPIO_CLK_PIN, "SCB CLK gpio");
+	gpio_request(GPIO_DATA_PIN, "SCB data gpio");
+
+	/* Make clock an output pin. */
+	gpio_direction_output(GPIO_CLK_PIN, 1);
+
+	/* Make data pin an input. */
+	gpio_direction_input(GPIO_DATA_PIN);
+
+	writel((SCB_PIN_CTRL_GPIO << SCB_DAT_CTRL_SHIFT) +
+	       (SCB_PIN_CTRL_GPIO << SCB_CLK_CTRL_SHIFT),
+	       SCB_PIN_CTRL_REG);
+
+	for (attempts = 0; attempts < 100; attempts++) {
+		unsigned data = gpio_get_value(GPIO_DATA_PIN) & 1;
+		if (data == 1)
+			break;
+		else {
+			udelay(5);
+			gpio_set_value(GPIO_CLK_PIN, 0);
+			udelay(5);
+			gpio_set_value(GPIO_CLK_PIN, 1);
+		}
+	}
+
+	writel((SCB_PIN_CTRL_SCB2 << SCB_DAT_CTRL_SHIFT) +
+	       (SCB_PIN_CTRL_SCB2 << SCB_CLK_CTRL_SHIFT),
+	       SCB_PIN_CTRL_REG);
+
+	chorus2_gpio_disable(GPIO_CLK_PIN);
+	chorus2_gpio_disable(GPIO_DATA_PIN);
+}
+
+static int scb_wait_wrapper(void __iomem *reg_base, unsigned int wait_mask)
+{
+	unsigned int wrapper;
+	unsigned int loops;
+
+	for (loops = 0; loops < WRAPPER_TIMEOUT; loops++) {
+		wrapper = readl(reg_base);
+		if (wrapper & wait_mask)
+			return 0;
+	}
+
+	pr_err("SCB wrapper ready timeout: %#x (%#x)\n", wrapper, wait_mask);
+
+	return -ETIMEDOUT;
+}
+
+/* Read an SCB register via the wrapper register */
+static int scb_read_reg(void __iomem *reg_base, unsigned int regno,
+			unsigned short *value)
+{
+	unsigned short data;
+
+	if (regno > MAX_REGNO)
+		return -EINVAL;
+
+	/* Wait for the wrapper to be ready */
+	if (scb_wait_wrapper(reg_base, SCB_WRAPPER_READY))
+		return -ETIMEDOUT;
+
+	/* Write the read command */
+	writel(SCB_WRAPPER_CMD_VALID | SCB_WRAPPER_READ_NOT_WRITE |
+	       ((regno << SCB_WRAPPER_ADDR_SHIFT) & SCB_WRAPPER_ADDR_MASK),
+	       reg_base);
+
+	/* Wait for the valid_data to come up */
+	if (scb_wait_wrapper(reg_base, SCB_WRAPPER_READ_READY))
+		return -ETIMEDOUT;
+
+	/* And read the data out */
+	data = (unsigned short) readl(reg_base) & SCB_WRAPPER_DATA_MASK;
+
+	*value = data;
+
+	return 0;
+}
+
+/* Write an SCB register via the wrapper register */
+static int scb_write_reg(void __iomem *reg_base, unsigned int regno,
+			 unsigned short value)
+{
+	if (regno > MAX_REGNO)
+		return -EINVAL;
+
+	/* Wait for the wrapper to be ready */
+	if (scb_wait_wrapper(reg_base, SCB_WRAPPER_READY))
+		return -ETIMEDOUT;
+
+	/* Write the write command */
+	writel(SCB_WRAPPER_CMD_VALID |
+	       ((regno << SCB_WRAPPER_ADDR_SHIFT) & SCB_WRAPPER_ADDR_MASK) |
+	       value,
+	       reg_base);
+
+	return 0;
+}
+
+static int chorus2_scb_ready(struct i2c_adapter *i2c_adap)
+{
+	struct chorus2_i2c *algo_data;
+	unsigned short reg2;
+	unsigned short reg4;
+
+	algo_data = (struct chorus2_i2c *)i2c_adap->algo_data;
+
+	if (scb_read_reg(algo_data->reg_base, SCB_REG2, &reg2))
+		return 0;
+
+	switch ((reg2 & SCB_REG2_BUS_STATUS) >> SCB_REG2_BUS_STATUS_SHIFT) {
+	case REG2_STATUS_BUSY:
+	case REG2_STATUS_STUCK:
+		return 0;
+	case REG2_STATUS_IDLE:
+	case REG2_STATUS_IGNORE:
+	default:
+		break;
+	}
+
+	if (scb_read_reg(algo_data->reg_base, SCB_REG4, &reg4))
+		return 0;
+
+	if (reg4 & REG4_ERROR)
+		return 0;
+
+	return 1;
+}
+
+static unsigned char chorus2_scb_get_state(struct i2c_adapter *i2c_adap)
+{
+	struct chorus2_i2c *algo_data;
+	unsigned short data;
+	unsigned char state;
+
+	algo_data = (struct chorus2_i2c *)i2c_adap->algo_data;
+
+	if (scb_read_reg(algo_data->reg_base, SCB_REG2, &data))
+		return 0;
+
+	state = (data & REG2_STATE_MASK) >> REG2_STATE_SHIFT;
+
+	return state;
+}
+
+static int chorus2_scb_got_ack(struct i2c_adapter *i2c_adap)
+{
+	struct chorus2_i2c *algo_data;
+	unsigned short data;
+
+	algo_data = (struct chorus2_i2c *)i2c_adap->algo_data;
+
+	if (scb_read_reg(algo_data->reg_base, SCB_REG2, &data))
+		return 0;
+
+	if (data & REG2_ACK)
+		return 1;
+
+	return 0;
+}
+
+static int chorus2_scb_wait_interrupt(struct i2c_adapter *i2c_adap)
+{
+	struct chorus2_i2c *algo_data;
+	unsigned short data;
+	unsigned long start_time = jiffies;
+
+	algo_data = (struct chorus2_i2c *)i2c_adap->algo_data;
+
+	/* Give us a couple of ticks, or get bored */
+	while (!time_after_eq(jiffies, start_time + INTERRUPT_TIMEOUT)) {
+		if (scb_read_reg(algo_data->reg_base, SCB_REG4, &data))
+			return -EIO;
+
+		if (data) {
+			int ret;
+
+			if (data & REG4_CMD_COMPLETE) {
+				ret = 0;
+			} else if (data & REG4_ERROR) {
+				dev_dbg(&i2c_adap->dev, "error interrupt!\n");
+
+				ret = -EIO;
+			} else {
+				dev_dbg(&i2c_adap->dev,
+					"command not complete interrupt!\n");
+				ret = -EIO;
+			}
+
+			/* clear down the interrupts */
+			data = REG5_ERROR | REG5_CMD_COMPLETE;
+			if (scb_write_reg(algo_data->reg_base, SCB_REG5, data))
+				ret = -EIO;
+
+			return ret;
+		}
+	}
+
+	dev_dbg(&i2c_adap->dev, "timed out waiting for interrupt?\n");
+
+	return -ETIMEDOUT;
+}
+
+static int chorus2_scb_stop(struct i2c_adapter *i2c_adap)
+{
+	struct chorus2_i2c *algo_data;
+	unsigned short data;
+
+	algo_data = (struct chorus2_i2c *)i2c_adap->algo_data;
+
+	data = (REG0_CMD_STOP << REG0_CMD_SHIFT) | REG0_UPGRADE;
+	if (scb_write_reg(algo_data->reg_base, SCB_REG0, data))
+		return -EIO;
+
+	if (chorus2_scb_wait_interrupt(i2c_adap))
+		return -EIO;
+
+	return 0;
+}
+
+static int chorus2_scb_start(struct i2c_adapter *i2c_adap, unsigned char addr,
+			     int reading)
+{
+	struct chorus2_i2c *algo_data;
+	unsigned short data;
+	unsigned char state;
+
+	algo_data = (struct chorus2_i2c *)i2c_adap->algo_data;
+
+	state = chorus2_scb_get_state(i2c_adap);
+
+	if (state != REG2_STATE_IDLE) {
+		dev_warn(&i2c_adap->dev,
+			 "scb_start: expected idle state: %hhx\n", state);
+		return -EIO;
+	}
+
+	data = (REG0_CMD_START << REG0_CMD_SHIFT) | REG0_UPGRADE;
+	if (scb_write_reg(algo_data->reg_base, SCB_REG0, data))
+		return -EIO;
+
+	if (chorus2_scb_wait_interrupt(i2c_adap))
+		return -EIO;
+
+	/* Manual says that status is always '0' after start or stop, so
+	 * don't check it?
+	 */
+
+	/* Send the slave address */
+	data = (addr << 1) | (REG0_CMD_TX << REG0_CMD_SHIFT) | REG0_UPGRADE;
+
+	/* Set the rd/!wr bit at the bottom of the 'addr' byte */
+	if (reading)
+		data |= TX_DATA_READ;
+
+	if (scb_write_reg(algo_data->reg_base, SCB_REG0, data))
+		return -EIO;
+
+	if (chorus2_scb_wait_interrupt(i2c_adap))
+		return -EIO;
+
+	/* Even if we are about to do a READ, the state machine will
+	 * still report TX mode, ready for us to TX the read request!
+	 */
+	state = chorus2_scb_get_state(i2c_adap);
+
+	if (state != REG2_STATE_TX)
+		dev_warn(&i2c_adap->dev,
+			 "scb_start: expected TX state: %hhx\n", state);
+
+	/* And for that, we expect an ACK ! */
+	if (!chorus2_scb_got_ack(i2c_adap)) {
+		chorus2_scb_stop(i2c_adap);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int chorus2_scb_read(struct i2c_adapter *i2c_adap, unsigned char addr,
+			    char *buffer, int len)
+{
+	struct chorus2_i2c *algo_data;
+	int remaining = len;
+	char *bufp = buffer;
+	unsigned short data;
+	unsigned char state;
+
+	algo_data = (struct chorus2_i2c *)i2c_adap->algo_data;
+
+	/* Note - even though we are reading, the state machine will be in
+	 * TX state, waiting for us to TX the read request to it.
+	 */
+	state = chorus2_scb_get_state(i2c_adap);
+	if (state != REG2_STATE_TX)
+		dev_warn(&i2c_adap->dev,
+			 "scb_read: expected TX state: %hhx\n", state);
+
+	while (remaining) {
+		/* Send the read request - no ACK for the last read */
+		if (remaining == 1)
+			data = (REG0_CMD_RX_NACK << REG0_CMD_SHIFT) |
+				REG0_UPGRADE;
+		else
+			data = (REG0_CMD_RX_ACK << REG0_CMD_SHIFT) |
+				REG0_UPGRADE;
+
+		if (scb_write_reg(algo_data->reg_base, SCB_REG0, data))
+			return -ENODEV;
+
+		if (chorus2_scb_wait_interrupt(i2c_adap)) {
+			dev_dbg(&i2c_adap->dev, "no RX interrupt\n");
+			return -ENODEV;
+		}
+
+		/* Should have moved to RX state now */
+		state = chorus2_scb_get_state(i2c_adap);
+		if (state != REG2_STATE_RX)
+			dev_warn(&i2c_adap->dev,
+				 "scb_read: expected RX state: %hhx\n", state);
+
+		/* Acks on all but the last read */
+		if (remaining != 1) {
+			if (!chorus2_scb_got_ack(i2c_adap)) {
+				dev_dbg(&i2c_adap->dev, "no ACK, %d left\n",
+					remaining);
+				return -EIO;
+			}
+		}
+
+		if (scb_read_reg(algo_data->reg_base, SCB_REG2, &data))
+			return -EIO;
+
+		data &= SCB_REG2_DATA;
+
+		dev_dbg(&i2c_adap->dev, "got back data [%#x]\n", data);
+
+		*bufp++ = data;
+
+		remaining--;
+	}
+
+	return 0;
+}
+
+static int chorus2_scb_write(struct i2c_adapter *i2c_adap, unsigned char addr,
+			     const char *buffer, int len)
+{
+	struct chorus2_i2c *algo_data;
+	int remaining = len;
+	const char *bufp = buffer;
+	unsigned short data;
+	unsigned char state;
+
+	algo_data = (struct chorus2_i2c *)i2c_adap->algo_data;
+
+	while (remaining) {
+		state = chorus2_scb_get_state(i2c_adap);
+		if (state != REG2_STATE_TX)
+			dev_warn(&i2c_adap->dev,
+				 "scb_write: expected TX state (1): %hhx\n",
+				 state);
+
+		/* Send the byte */
+		data = *bufp | (REG0_CMD_TX << REG0_CMD_SHIFT) | REG0_UPGRADE;
+		bufp++;
+
+		if (scb_write_reg(algo_data->reg_base, SCB_REG0, data))
+			return -EIO;
+
+		if (chorus2_scb_wait_interrupt(i2c_adap))
+			return -EIO;
+
+		state = chorus2_scb_get_state(i2c_adap);
+		if (state != REG2_STATE_TX)
+			dev_warn(&i2c_adap->dev,
+				 "scb_write: expected TX state (2): %hhx\n",
+				 state);
+
+		/* And for that, we expect an ACK ! */
+		if (!chorus2_scb_got_ack(i2c_adap))
+			return -EIO;
+
+		remaining--;
+	}
+	return 0;
+}
+
+static int chorus2_scb_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
+			    int num)
+{
+	int i;
+	struct i2c_msg *msg;
+
+	if (!num)
+		return 0;
+
+	for (i = 0; i < num; i++) {
+		int err = 0;
+
+		msg = &msgs[i];
+
+		dev_dbg(&i2c_adap->dev, " %d: addr %d, %s, dlen %d\n",
+			i,
+			msg->addr,
+			(msg->flags & I2C_M_RD) ? "rd" : "wr", msg->len);
+
+		if (!chorus2_scb_ready(i2c_adap)) {
+			dev_warn(&i2c_adap->dev, "bus not ready\n");
+			return -EBUSY;
+		}
+
+		if (chorus2_scb_start(i2c_adap, msg->addr,
+				      msg->flags & I2C_M_RD)) {
+			/* A failed start normally means there is no
+			 * device there.
+			 */
+			dev_dbg(&i2c_adap->dev,
+				"start failed for xfer, addr %#x\n",
+				msg->addr);
+			scb_clear_error();
+			return -ENODEV;
+		}
+
+		if (msg->flags & I2C_M_RD)
+			err = chorus2_scb_read(i2c_adap, msg->addr, msg->buf,
+					       msg->len);
+		else
+			err = chorus2_scb_write(i2c_adap, msg->addr, msg->buf,
+						msg->len);
+
+		/* Only issue a STOP at the end of the whole sequence. */
+
+		/* NOTE NOTE NOTE
+		 * I think we have a bug in our I2C/SCB controller here!
+		 *
+		 * In theory, I should be able to issue 'restarts' between
+		 * transactions, and only issue a STOP when I've run out
+		 * of things to do.
+		 *
+		 * In actuallity, that does not seem to work.
+		 * I'd really like to move this outside of the cmd loop, or
+		 * to only issue STOPs if the slave addr changes between cmds.
+		 */
+
+		chorus2_scb_stop(i2c_adap);
+
+		if (err) {
+			dev_err(&i2c_adap->dev,
+				"%s %d bytes addr %#x failed error %d\n",
+				msg->flags & I2C_M_RD ? "read" : "write",
+				msg->len, msg->addr, err);
+			return err;
+		}
+	}
+
+	return i;
+}
+
+static u32 chorus2_scb_func(struct i2c_adapter *i2c_adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static int __init chorus2_scb_init(struct i2c_adapter *i2c_adap)
+{
+	struct chorus2_i2c *algo_data;
+	unsigned short data;
+	unsigned short readback;
+
+	algo_data = (struct chorus2_i2c *)i2c_adap->algo_data;
+
+	/* First, lets try a soft reset! */
+	data = REG5_SOFT_RESET;
+	if (scb_write_reg(algo_data->reg_base, SCB_REG5, data)) {
+		dev_dbg(&i2c_adap->dev, "failed to soft reset Chorus2 SCB\n");
+		return -EIO;
+	}
+
+	/* Reg1 - Mode control register:
+	 *  DAT and CLK open drain.
+	 *  Bus Error Interrupts enabled
+	 *  Status Codes enabled
+	 *  Not in loopback
+	 *  Clock rate 100kHz
+	 *  Byte mode
+	 */
+	data = SCB_BCR_100KHZ << SCB_REG1_BCR_SHIFT;
+	data |= SCB_REG1_BEI;
+	data |= SCB_REG1_SC;
+
+	if (scb_write_reg(algo_data->reg_base, SCB_REG1, data)) {
+		dev_dbg(&i2c_adap->dev, "failed to write SCB reg1\n");
+		return -EIO;
+	}
+
+	if (scb_read_reg(algo_data->reg_base, SCB_REG1, &readback)) {
+		dev_dbg(&i2c_adap->dev, "failed to read back SCB reg1\n");
+		return -EIO;
+	}
+
+	if (readback != data) {
+		dev_dbg(&i2c_adap->dev, "failed to set SCB reg1\n");
+		return -EIO;
+	}
+
+	/* Reg3 - timeouts, interrupt enables etc. */
+	data = SCB_REG3_CIE;
+	data |= SCB_REG3_EIE;
+
+	/* FIXME - Need to define and clean up the whole timeout
+	 * stuff - for instance - program the timeout register in the SCB?
+	 */
+	if (scb_write_reg(algo_data->reg_base, SCB_REG3, data)) {
+		dev_dbg(&i2c_adap->dev, "failed to write SCB reg3\n");
+		return -EIO;
+	}
+
+	if (scb_read_reg(algo_data->reg_base, SCB_REG3, &readback)) {
+		dev_dbg(&i2c_adap->dev, "failed to read back SCB reg3\n");
+		return -EIO;
+	}
+
+	if (readback != data) {
+		dev_dbg(&i2c_adap->dev, "failed to set SCB reg1\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static const struct i2c_algorithm i2c_chorus2_algorithm = {
+	.master_xfer = chorus2_scb_xfer,
+	.functionality = chorus2_scb_func,
+};
+
+static int __init i2c_chorus2_probe(struct platform_device *dev)
+{
+	struct chorus2_i2c *i2c;
+	struct resource *res;
+	struct device_node *node = dev->dev.of_node;
+	int ret;
+	u32 val;
+
+	if (!node)
+		return -ENOENT;
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (res == NULL)
+		return -ENODEV;
+
+	if (!request_mem_region(res->start, resource_size(res), res->name))
+		return -ENOMEM;
+
+	i2c = kzalloc(sizeof(struct chorus2_i2c), GFP_KERNEL);
+	if (!i2c) {
+		ret = -ENOMEM;
+		goto out_error_kmalloc;
+	}
+
+	i2c->adap.owner = THIS_MODULE;
+	i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+	i2c->adap.algo = &i2c_chorus2_algorithm;
+	i2c->adap.retries = 5;
+
+	sprintf(i2c->adap.name, "chorus2_i2c-i2c.%u", dev->id);
+
+	i2c->reg_base = ioremap(res->start, resource_size(res));
+	if (!i2c->reg_base) {
+		ret = -EIO;
+		goto out_error_ioremap;
+	}
+
+	i2c->iobase = res->start;
+	i2c->iosize = resource_size(res);
+
+	i2c->adap.algo_data = i2c;
+	i2c->adap.dev.parent = &dev->dev;
+
+	/*
+	 * Get the "dev->id" from the device tree. If there
+	 * is no such attribute, print an error message and
+	 * free allocated resources.
+	 */
+	ret = of_property_read_u32(node, "id", &val);
+	if (ret) {
+		dev_err(&dev->dev, "could not find the id number");
+		goto out_error_id;
+	}
+
+	i2c->adap.nr = val;
+
+	ret = chorus2_scb_init(&i2c->adap);
+
+	if (ret) {
+		dev_warn(&dev->dev, "failed to reset bus\n");
+		goto out_error_ioremap;
+	}
+
+	ret = i2c_add_numbered_adapter(&i2c->adap);
+	if (ret < 0) {
+		dev_info(&dev->dev, "failed to add bus\n");
+		goto out_error_ioremap;
+	}
+
+	platform_set_drvdata(dev, i2c);
+
+	dev_info(&dev->dev, "Chorus2 I2C adapter probed successfully\n");
+
+	return 0;
+
+out_error_id:
+	iounmap(i2c->reg_base);
+out_error_ioremap:
+	kfree(i2c);
+out_error_kmalloc:
+	release_mem_region(res->start, resource_size(res));
+	return ret;
+}
+
+static int __exit i2c_chorus2_remove(struct platform_device *dev)
+{
+	struct chorus2_i2c *i2c = platform_get_drvdata(dev);
+
+	i2c_del_adapter(&i2c->adap);
+
+	release_mem_region(i2c->iobase, i2c->iosize);
+	kfree(i2c);
+
+	return 0;
+}
+
+static const struct of_device_id i2c_img_match[] = {
+	{ .compatible = "img,chorus2-i2c" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, i2c_img_match);
+
+static struct platform_driver i2c_chorus2_driver = {
+	.driver = {
+		.name	= "chorus2-i2c",
+		.owner	= THIS_MODULE,
+		.of_match_table	= i2c_img_match,
+	},
+	.remove = __exit_p(i2c_chorus2_remove),
+};
+
+static int __init i2c_adap_chorus2_init(void)
+{
+	return platform_driver_probe(&i2c_chorus2_driver, i2c_chorus2_probe);
+}
+module_init(i2c_adap_chorus2_init);
+
+static void __exit i2c_adap_chorus2_exit(void)
+{
+	platform_driver_unregister(&i2c_chorus2_driver);
+}
+module_exit(i2c_adap_chorus2_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("Chorus2 SCB I2C bus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-img.c b/drivers/i2c/busses/i2c-img.c
new file mode 100644
index 0000000..4749b11
--- /dev/null
+++ b/drivers/i2c/busses/i2c-img.c
@@ -0,0 +1,1585 @@
+/*
+ *  I2C adapter for the IMG Serial Control Bus IP block.
+ *
+ *  Copyright (C) 2009,2010,2012 Imagination Technologies Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#define SCB_STATUS_REG			0x0
+#define SCB_CLEAR_REG			0x98
+#define SCB_OVERRIDE_REG		0x4
+#define SCB_READ_ADDR_REG		0x8
+#define SCB_READ_COUNT_REG		0xc
+#define SCB_WRITE_ADDR_REG		0x10
+#define SCB_READ_XADDR_REG		0x74
+#define SCB_WRITE_XADDR_REG		0x78
+#define SCB_WRITE_COUNT_REG		0x7c
+#define SCB_READ_DATA_REG		0x14
+#define SCB_READ_FIFO_REG		0x94
+#define SCB_WRITE_DATA_REG		0x18
+#define SCB_FIFO_STATUS_REG		0x1c
+#define SCB_CLK_SET_REG			0x3c
+#define SCB_INT_STATUS_REG		0x40
+#define SCB_INT_CLEAR_REG		0x44
+#define SCB_INT_MASK_REG		0x48
+#define SCB_CONTROL_REG			0x4c
+
+#define SCB_TIME_TPL_REG		0x50
+#define SCB_TIME_TPH_REG		0x54
+#define SCB_TIME_TP2S_REG		0x58
+#define SCB_TIME_TBI_REG		0x60
+#define SCB_TIME_TSL_REG		0x64
+#define SCB_TIME_TDL_REG		0x68
+#define SCB_TIME_TSDL_REG		0x6c
+#define SCB_TIME_TSDH_REG		0x70
+#define SCB_TIME_TCKH_REG		0x84
+#define SCB_TIME_TCKL_REG		0x88
+
+#define SCB_CORE_REV_REG		0x80
+
+#define SCB_CONTROL_TRANSACTION_HALT	0x200
+#define SCB_CONTROL_CLK_ENABLE		0x1e0
+#define SCB_CONTROL_SOFT_RESET		0x01f
+
+#define FIFO_READ_FULL			0x1
+#define FIFO_READ_EMPTY			0x2
+#define FIFO_WRITE_FULL			0x4
+#define FIFO_WRITE_EMPTY		0x8
+
+#define INT_BUS_INACTIVE		0x00001
+#define INT_UNEXPECTED_START		0x00002
+#define INT_SCLK_LOW_TIMEOUT		0x00004
+#define INT_SDAT_LOW_TIMEOUT		0x00008
+#define INT_WRITE_ACK_ERR		0x00010
+#define INT_ADDR_ACK_ERR		0x00020
+#define INT_READ_FIFO_FULL		0x00200
+#define INT_READ_FIFO_FILLING		0x00400
+#define INT_WRITE_FIFO_EMPTY		0x00800
+#define INT_WRITE_FIFO_EMPTYING		0x01000
+#define INT_TRANSACTION_DONE		0x08000
+#define INT_SLAVE_EVENT			0x10000
+#define INT_TIMING			0x40000
+
+/* level interrupts need clearing after handling instead of before */
+#define INT_LEVEL			0x01e00
+
+/* don't allow any interrupts while the clock may be off */
+#define INT_ENABLE_MASK_INACTIVE	0x00000
+#define INT_ENABLE_MASK_RAW		0x40000
+#define INT_ENABLE_MASK_ATOMIC		0x18034
+#define INT_ENABLE_MASK_AUTOMATIC	0x01e34
+#define INT_ENABLE_MASK_WAITSTOP	0x10030
+
+#define MODE_INACTIVE			0
+#define MODE_RAW			1
+#define MODE_ATOMIC			2
+#define MODE_AUTOMATIC			3
+#define MODE_SEQUENCE			4
+#define MODE_FATAL			5
+#define MODE_WAITSTOP			6
+#define MODE_SUSPEND			7
+
+#define LINESTAT_SCLK_LINE_STATUS	0x00000001
+#define LINESTAT_SCLK_EN		0x00000002
+#define LINESTAT_SDAT_LINE_STATUS	0x00000004
+#define LINESTAT_SDAT_EN		0x00000008
+#define LINESTAT_DET_START_STATUS	0x00000010
+#define LINESTAT_DET_STOP_STATUS	0x00000020
+#define LINESTAT_DET_ACK_STATUS		0x00000040
+#define LINESTAT_DET_NACK_STATUS	0x00000080
+#define LINESTAT_BUS_IDLE		0x00000100
+#define LINESTAT_T_DONE_STATUS		0x00000200
+#define LINESTAT_SCLK_OUT_STATUS	0x00000400
+#define LINESTAT_SDAT_OUT_STATUS	0x00000800
+#define LINESTAT_GEN_LINE_MASK_STATUS	0x00001000
+#define LINESTAT_START_BIT_DET		0x00002000
+#define LINESTAT_STOP_BIT_DET		0x00004000
+#define LINESTAT_ACK_DET		0x00008000
+#define LINESTAT_NACK_DET		0x00010000
+#define LINESTAT_INPUT_HELD_V		0x00020000
+#define LINESTAT_ABORT_DET		0x00040000
+#define LINESTAT_ACK_OR_NACK_DET	(LINESTAT_ACK_DET | LINESTAT_NACK_DET)
+#define LINESTAT_INPUT_DATA		0xff000000
+#define LINESTAT_INPUT_DATA_SHIFT	24
+
+#define LINESTAT_CLEAR_SHIFT		13
+#define LINESTAT_LATCHED		(0x3f << LINESTAT_CLEAR_SHIFT)
+
+#define OVERRIDE_SCLK_OVR		0x001
+#define OVERRIDE_SCLKEN_OVR		0x002
+#define OVERRIDE_SDAT_OVR		0x004
+#define OVERRIDE_SDATEN_OVR		0x008
+#define OVERRIDE_MASTER			0x200
+#define OVERRIDE_LINE_OVR_EN		0x400
+#define OVERRIDE_DIRECT			0x800
+#define OVERRIDE_CMD_SHIFT		4
+#define OVERRIDE_DATA_SHIFT		24
+
+#define OVERRIDE_SCLK_DOWN		(OVERRIDE_LINE_OVR_EN | \
+					 OVERRIDE_SCLKEN_OVR)
+#define OVERRIDE_SCLK_UP		(OVERRIDE_LINE_OVR_EN | \
+					 OVERRIDE_SCLKEN_OVR | \
+					 OVERRIDE_SCLK_OVR)
+#define OVERRIDE_SDAT_DOWN		(OVERRIDE_LINE_OVR_EN | \
+					 OVERRIDE_SDATEN_OVR)
+#define OVERRIDE_SDAT_UP		(OVERRIDE_LINE_OVR_EN | \
+					 OVERRIDE_SDATEN_OVR | \
+					 OVERRIDE_SDAT_OVR)
+
+#define CMD_PAUSE			0x00
+#define CMD_GEN_DATA			0x01
+#define CMD_GEN_START			0x02
+#define CMD_GEN_STOP			0x03
+#define CMD_GEN_ACK			0x04
+#define CMD_GEN_NACK			0x05
+#define CMD_RET_DATA			0x08
+#define CMD_RET_ACK			0x09
+
+#define ATSTATE_ADDRESSING		0
+#define ATSTATE_DATA_XFERED		1
+
+#define TIMEOUT_TBI			0x0
+#define TIMEOUT_TSL			0xffff
+#define TIMEOUT_TDL			0x0
+
+/*
+ * Hardware quirks
+ */
+
+/*
+ * Do 2 dummy writes to ensure a subsequent read reflects the effects of a prior
+ * write. This is necessary due to clock domain crossing in the SCB.
+ */
+#define QUIRK_WR_RD_FENCE	0x00000001
+/*
+ * Automatic mode reads and writes are unreliable when using different clock
+ * domains, e.g. on Comet using XTAL1 in the SCB clock switch. This can be
+ * worked around by using atomic mode (slower), or the same clock domain.
+ */
+#define QUIRK_ATOMIC_ONLY	0x00000002
+
+/*
+ * Bits to return from isr handler functions for different modes.
+ * This delays completion until we've finished with the registers, so that the
+ * function waiting for completion can safely disable the clock to save power.
+ */
+#define ISR_COMPLETE_M		0x80000000
+#define ISR_FATAL_M		0x40000000
+#define ISR_WAITSTOP		0x20000000
+#define ISR_ATDATA_M		0x0ff00000
+#define ISR_ATDATA_S		20
+#define ISR_ATCMD_M		0x000f0000
+#define ISR_ATCMD_S		16
+#define ISR_STATUS_M		0x0000ffff	/* contains +ve errno */
+#define ISR_COMPLETE(ERR)	(ISR_COMPLETE_M | (ISR_STATUS_M & (ERR)))
+#define ISR_FATAL(ERR)		(ISR_COMPLETE(ERR) | ISR_FATAL_M)
+#define ISR_ATOMIC(CMD, DATA)	((ISR_ATCMD_M & ((CMD) << ISR_ATCMD_S)) \
+				| (ISR_ATDATA_M & ((DATA) << ISR_ATDATA_S)))
+
+/* Timing parameters for i2c modes (in ns) */
+static struct {
+	char *name;
+	unsigned int max_bitrate;	/* bps */
+	unsigned int tckh, tckl, tsdh, tsdl;
+	unsigned int tp2s, tpl, tph;
+} img_i2c_timings[] = {
+	/* Standard mode */
+	{
+		.name = "standard",
+		.max_bitrate = 100000,
+		.tckh = 4000,
+		.tckl = 4700,
+		.tsdh = 4700,
+		.tsdl = 8700,
+		.tp2s = 4700,
+		.tpl = 4700,
+		.tph = 4000,
+	},
+	/* Fast mode */
+	{
+		.name = "fast",
+		.max_bitrate = 400000,
+		.tckh = 600,
+		.tckl = 1300,
+		.tsdh = 600,
+		.tsdl = 1200,
+		.tp2s = 1300,
+		.tpl = 600,
+		.tph = 600,
+	},
+};
+
+/* Reset dance */
+static u8 img_i2c_reset_seq[] = { CMD_GEN_START,
+				  CMD_GEN_DATA, 0xff,
+				  CMD_RET_ACK,
+				  CMD_GEN_START,
+				  CMD_GEN_STOP,
+				  0 };
+/* Just issue a stop (after an abort condition) */
+static u8 img_i2c_stop_seq[] = {  CMD_GEN_STOP,
+				  0 };
+
+/* We're interested in different interrupts depending on the mode */
+static unsigned int img_i2c_int_enable_by_mode[] = {
+	[MODE_INACTIVE]  = INT_ENABLE_MASK_INACTIVE,
+	[MODE_RAW]       = INT_ENABLE_MASK_RAW,
+	[MODE_ATOMIC]    = INT_ENABLE_MASK_ATOMIC,
+	[MODE_AUTOMATIC] = INT_ENABLE_MASK_AUTOMATIC,
+	[MODE_SEQUENCE]  = INT_ENABLE_MASK_ATOMIC,
+	[MODE_FATAL]     = 0,
+	[MODE_WAITSTOP]  = INT_ENABLE_MASK_WAITSTOP,
+	[MODE_SUSPEND]   = 0,
+};
+
+/* Mode names */
+static const char * const img_scb_mode_names[] = {
+	[MODE_INACTIVE]   = "INACTIVE",
+	[MODE_RAW]        = "RAW",
+	[MODE_ATOMIC]     = "ATOMIC",
+	[MODE_AUTOMATIC]  = "AUTOMATIC",
+	[MODE_SEQUENCE]   = "SEQUENCE",
+	[MODE_FATAL]      = "FATAL",
+	[MODE_WAITSTOP]   = "WAITSTOP",
+	[MODE_SUSPEND]    = "SUSPEND",
+};
+
+/* Atomic command names */
+static const char * const img_scb_atomic_cmd_names[] = {
+	[CMD_PAUSE]	= "PAUSE",
+	[CMD_GEN_DATA]	= "GEN_DATA",
+	[CMD_GEN_START]	= "GEN_START",
+	[CMD_GEN_STOP]	= "GEN_STOP",
+	[CMD_GEN_ACK]	= "GEN_ACK",
+	[CMD_GEN_NACK]	= "GEN_NACK",
+	[CMD_RET_DATA]	= "RET_DATA",
+	[CMD_RET_ACK]	= "RET_ACK",
+};
+
+struct img_i2c;
+typedef unsigned int img_scb_raw_handler(struct img_i2c *i2c,
+					 unsigned int int_status,
+					 unsigned int line_status);
+
+struct img_i2c {
+	struct i2c_adapter adap;
+
+	void __iomem *reg_base;
+
+	unsigned long iobase;
+	unsigned long iosize;
+
+	int irq;
+
+	/*
+	 * The clock is used to get the input frequency, and to disable it
+	 * after every set of transactions to save some power.
+	 */
+	struct clk *clk;
+	unsigned int bitrate;
+	unsigned int busdelay;
+	unsigned int quirks;
+
+	/* state */
+	struct completion xfer_done;
+	spinlock_t main_lock;	/* lock before doing anything with the state */
+	struct i2c_msg msg;
+	int last_msg;	/* wait for a stop bit after this transaction */
+	int status;
+
+	int mode;			/* MODE_* */
+	unsigned int int_enable;	/* depends on mode */
+	unsigned int line_status;	/* line status over command */
+
+	/*
+	 * To avoid slave event interrupts in automatic mode, use a timer to
+	 * poll the abort condition if we don't get an interrupt for too long.
+	 */
+	struct timer_list check_timer;
+	int t_halt;
+
+	/* atomic mode state */
+	int at_t_done;
+	int at_slave_event;
+	int at_cur_cmd;
+	u8 at_cur_data;
+	int at_state;	/* ATSTATE_* */
+
+	u8 *seq;	/* see img_scb_sequence_handle_irq */
+
+	/* raw mode */
+	unsigned int raw_timeout;
+	img_scb_raw_handler *raw_handler;
+	unsigned int data_setup_cycles;
+
+#ifdef CONFIG_I2C_IMG_DEBUG_BUFFER
+	/* for minimal overhead debugging */
+	struct {
+		unsigned int time;
+		unsigned int irq_stat;
+		unsigned int line_stat;
+		unsigned int cmd;
+		unsigned int result;
+	} irq_buf[64];
+	unsigned int start_time;
+	unsigned int irq_buf_index;
+#endif
+};
+
+static void scb_write_reg(void __iomem *reg_base, unsigned int regno,
+			  unsigned int value)
+{
+	writel(value, reg_base + regno);
+}
+
+static unsigned int scb_read_reg(void __iomem *reg_base, unsigned int regno)
+{
+	return readl(reg_base + regno);
+}
+
+static void scb_wr_rd_fence(struct img_i2c *i2c)
+{
+	if (i2c->quirks & QUIRK_WR_RD_FENCE) {
+		scb_write_reg(i2c->reg_base, SCB_CORE_REV_REG, 0);
+		scb_write_reg(i2c->reg_base, SCB_CORE_REV_REG, 0);
+	}
+}
+
+static void img_scb_switch_mode(struct img_i2c *i2c, int mode)
+{
+	i2c->mode = mode;
+	i2c->int_enable = img_i2c_int_enable_by_mode[mode];
+	i2c->line_status = 0;
+}
+
+/* delay the check timeout for a bit longer */
+static void img_scb_delay_check(struct img_i2c *i2c)
+{
+	mod_timer(&i2c->check_timer, jiffies + msecs_to_jiffies(1));
+}
+
+static void img_scb_raw_op(struct img_i2c *i2c,
+			   int force_sdat, int sdat,
+			   int force_sclk, int sclk,
+			   int timeout,
+			   img_scb_raw_handler *handler)
+{
+	i2c->raw_timeout = timeout;
+	i2c->raw_handler = handler;
+	scb_write_reg(i2c->reg_base, SCB_OVERRIDE_REG,
+		(sclk		? OVERRIDE_SCLK_OVR	: 0) |
+		(force_sclk	? OVERRIDE_SCLKEN_OVR	: 0) |
+		(sdat		? OVERRIDE_SDAT_OVR	: 0) |
+		(force_sdat	? OVERRIDE_SDATEN_OVR	: 0) |
+		OVERRIDE_MASTER |
+		OVERRIDE_LINE_OVR_EN |
+		OVERRIDE_DIRECT |
+		((i2c->at_cur_cmd & 0x1f) << OVERRIDE_CMD_SHIFT) |
+		(i2c->at_cur_data << OVERRIDE_DATA_SHIFT));
+}
+
+static const char *img_scb_atomic_op_name(unsigned int cmd)
+{
+	if (unlikely(cmd >= ARRAY_SIZE(img_scb_atomic_cmd_names)))
+		return "UNKNOWN";
+	return img_scb_atomic_cmd_names[cmd];
+}
+
+#ifdef CONFIG_I2C_IMG_DEBUG_BUFFER
+static const char *img_scb_mode_name(int mode)
+{
+	if (unlikely(mode >= ARRAY_SIZE(img_scb_mode_names)))
+		return "UNKNOWN";
+	return img_scb_mode_names[mode];
+}
+#endif
+
+static void img_scb_atomic_op(struct img_i2c *i2c, int cmd, u8 data);
+
+unsigned int img_scb_raw_atomic_delay_handler(struct img_i2c *i2c,
+					      unsigned int int_status,
+					      unsigned int line_status)
+{
+	/* stay in raw mode for this, so we don't just loop infinitely */
+	img_scb_atomic_op(i2c, i2c->at_cur_cmd, i2c->at_cur_data);
+	img_scb_switch_mode(i2c, MODE_ATOMIC);
+	return ISR_ATOMIC(i2c->at_cur_cmd, i2c->at_cur_data);
+}
+
+static void img_scb_atomic_op(struct img_i2c *i2c, int cmd, u8 data)
+{
+	unsigned int line_status;
+
+	i2c->at_cur_cmd = cmd;
+	i2c->at_cur_data = data;
+
+	/* work around lack of data setup time when generating data */
+	if (cmd == CMD_GEN_DATA && i2c->mode == MODE_ATOMIC) {
+		line_status = scb_read_reg(i2c->reg_base, SCB_STATUS_REG);
+		if (line_status & LINESTAT_SDAT_LINE_STATUS && !(data & 0x80)) {
+			/* hold the data line down for a moment */
+			img_scb_switch_mode(i2c, MODE_RAW);
+			img_scb_raw_op(i2c, 1, 0, 1, 0,
+					0, &img_scb_raw_atomic_delay_handler);
+			return;
+		}
+	}
+
+	dev_dbg(i2c->adap.dev.parent,
+		"atomic cmd=%s (%d) data=%#x\n",
+		img_scb_atomic_op_name(cmd), cmd,
+		data);
+	i2c->at_t_done = (cmd == CMD_RET_DATA || cmd == CMD_RET_ACK);
+	i2c->at_slave_event = 0;
+	i2c->line_status = 0;
+
+	scb_write_reg(i2c->reg_base, SCB_OVERRIDE_REG,
+		((cmd & 0x1f) << OVERRIDE_CMD_SHIFT) |
+		OVERRIDE_MASTER |
+		OVERRIDE_DIRECT |
+		(data << OVERRIDE_DATA_SHIFT));
+}
+
+static void img_scb_atomic_start(struct img_i2c *i2c)
+{
+	img_scb_switch_mode(i2c, MODE_ATOMIC);
+	scb_write_reg(i2c->reg_base, SCB_INT_MASK_REG, i2c->int_enable);
+	i2c->at_state = ATSTATE_ADDRESSING;
+	img_scb_atomic_op(i2c, CMD_GEN_START, 0x00);
+}
+
+static void img_scb_soft_reset(struct img_i2c *i2c)
+{
+	i2c->t_halt = 0;
+	scb_write_reg(i2c->reg_base, SCB_CONTROL_REG, 0);
+
+	scb_write_reg(i2c->reg_base, SCB_CONTROL_REG,
+		      SCB_CONTROL_CLK_ENABLE | SCB_CONTROL_SOFT_RESET);
+}
+
+/* enable or release transaction halt for control of repeated starts */
+static void img_scb_transaction_halt(struct img_i2c *i2c, int t_halt)
+{
+	unsigned int tmp;
+
+	if (i2c->t_halt == t_halt)
+		return;
+
+	i2c->t_halt = t_halt;
+	tmp = scb_read_reg(i2c->reg_base, SCB_CONTROL_REG);
+	if (t_halt)
+		tmp |= SCB_CONTROL_TRANSACTION_HALT;
+	else
+		tmp &= ~SCB_CONTROL_TRANSACTION_HALT;
+	scb_write_reg(i2c->reg_base, SCB_CONTROL_REG, tmp);
+}
+
+static void img_scb_read_fifo(struct img_i2c *i2c)
+{
+	while (i2c->msg.len) {
+		unsigned int fifo_status;
+		char data;
+		/* Get FIFO status. */
+		fifo_status = scb_read_reg(i2c->reg_base, SCB_FIFO_STATUS_REG);
+
+		if (fifo_status & FIFO_READ_EMPTY)
+			break;
+
+		/* Read data from FIFO. */
+		data = scb_read_reg(i2c->reg_base, SCB_READ_DATA_REG);
+
+		*i2c->msg.buf = data;
+
+		/* Advance FIFO. */
+		scb_write_reg(i2c->reg_base, SCB_READ_FIFO_REG, 0xff);
+		scb_wr_rd_fence(i2c);
+		i2c->msg.len--;
+		i2c->msg.buf++;
+	}
+}
+
+static void img_scb_write_fifo(struct img_i2c *i2c)
+{
+	while (i2c->msg.len) {
+		unsigned int fifo_status;
+		/* Get FIFO status. */
+		fifo_status = scb_read_reg(i2c->reg_base, SCB_FIFO_STATUS_REG);
+
+		if (fifo_status & FIFO_WRITE_FULL)
+			break;
+
+		/* Write data into FIFO. */
+		scb_write_reg(i2c->reg_base, SCB_WRITE_DATA_REG,
+			      *i2c->msg.buf);
+		scb_wr_rd_fence(i2c);
+		i2c->msg.len--;
+		i2c->msg.buf++;
+	}
+	if (!i2c->msg.len)
+		/* Disable fifo emptying interrupt if nothing more to write. */
+		i2c->int_enable &= ~INT_WRITE_FIFO_EMPTYING;
+}
+
+static void img_scb_read(struct img_i2c *i2c)
+{
+	img_scb_switch_mode(i2c, MODE_AUTOMATIC);
+	if (!i2c->last_msg)
+		i2c->int_enable |= INT_SLAVE_EVENT;
+	scb_write_reg(i2c->reg_base, SCB_INT_MASK_REG, i2c->int_enable);
+
+	/* Set address to read from. */
+	scb_write_reg(i2c->reg_base, SCB_READ_ADDR_REG, i2c->msg.addr);
+	/* Set number of bytes to read. */
+	scb_write_reg(i2c->reg_base, SCB_READ_COUNT_REG, i2c->msg.len);
+
+	/* Release transaction halt */
+	img_scb_transaction_halt(i2c, 0);
+
+	/* start check timer if not already going */
+	img_scb_delay_check(i2c);
+}
+
+static void img_scb_write(struct img_i2c *i2c)
+{
+	img_scb_switch_mode(i2c, MODE_AUTOMATIC);
+	if (!i2c->last_msg)
+		i2c->int_enable |= INT_SLAVE_EVENT;
+
+	/* Set address to write to. */
+	scb_write_reg(i2c->reg_base, SCB_WRITE_ADDR_REG, i2c->msg.addr);
+	/* Set number of bytes to write. */
+	scb_write_reg(i2c->reg_base, SCB_WRITE_COUNT_REG, i2c->msg.len);
+
+	/* Release transaction halt */
+	img_scb_transaction_halt(i2c, 0);
+
+	/* start check timer if not already going */
+	img_scb_delay_check(i2c);
+
+	/* Start filling fifo right away */
+	img_scb_write_fifo(i2c);
+
+	/* img_scb_write_fifo() may modify int_enable */
+	scb_write_reg(i2c->reg_base, SCB_INT_MASK_REG, i2c->int_enable);
+}
+
+/* After calling this, the ISR must not access any more SCB registers. */
+static void img_scb_complete_transaction(struct img_i2c *i2c, int status)
+{
+	img_scb_switch_mode(i2c, MODE_INACTIVE);
+	if (status) {
+		i2c->status = status;
+		img_scb_transaction_halt(i2c, 0);
+	}
+	complete(&i2c->xfer_done);
+}
+
+static unsigned int img_scb_raw_handle_irq(struct img_i2c *i2c,
+					   unsigned int int_status,
+					   unsigned int line_status)
+{
+	if (int_status & INT_TIMING) {
+		if (!i2c->raw_timeout)
+			return i2c->raw_handler(i2c, int_status, line_status);
+		--i2c->raw_timeout;
+	}
+	return 0;
+}
+
+static unsigned int img_scb_sequence_handle_irq(struct img_i2c *i2c,
+						unsigned int int_status)
+{
+	static const unsigned int continue_bits[] = {
+		[CMD_GEN_START] = LINESTAT_START_BIT_DET,
+		[CMD_GEN_DATA]  = LINESTAT_INPUT_HELD_V,
+		[CMD_RET_ACK]   = LINESTAT_ACK_DET | LINESTAT_NACK_DET,
+		[CMD_RET_DATA]  = LINESTAT_INPUT_HELD_V,
+		[CMD_GEN_STOP]  = LINESTAT_STOP_BIT_DET,
+	};
+	int next_cmd = -1;
+	u8 next_data = 0x00;
+
+	if (int_status & INT_SLAVE_EVENT)
+		i2c->at_slave_event = 1;
+	if (int_status & INT_TRANSACTION_DONE)
+		i2c->at_t_done = 1;
+
+	if (!i2c->at_slave_event || !i2c->at_t_done)
+		return 0;
+
+	/* wait if no continue bits are set */
+	if (i2c->at_cur_cmd >= 0 && i2c->at_cur_cmd
+					< ARRAY_SIZE(continue_bits)) {
+		unsigned int cont_bits = continue_bits[i2c->at_cur_cmd];
+		if (cont_bits) {
+			cont_bits |= LINESTAT_ABORT_DET;
+			if (!(i2c->line_status & cont_bits))
+				return 0;
+		}
+	}
+
+	/* follow the sequence of commands in i2c->seq */
+	next_cmd = *i2c->seq;
+	/* stop on a nil */
+	if (!next_cmd) {
+		scb_write_reg(i2c->reg_base, SCB_OVERRIDE_REG, 0);
+		return ISR_COMPLETE(0);
+	}
+	/* when generating data, the next byte is the data */
+	if (next_cmd == CMD_GEN_DATA) {
+		++i2c->seq;
+		next_data = *i2c->seq;
+	}
+	++i2c->seq;
+	img_scb_atomic_op(i2c, next_cmd, next_data);
+
+	return ISR_ATOMIC(next_cmd, next_data);
+}
+
+static void img_scb_reset_start(struct img_i2c *i2c)
+{
+	/* Initiate the magic dance */
+	img_scb_switch_mode(i2c, MODE_SEQUENCE);
+	scb_write_reg(i2c->reg_base, SCB_INT_MASK_REG, i2c->int_enable);
+	i2c->seq = img_i2c_reset_seq;
+	i2c->at_slave_event = 1;
+	i2c->at_t_done = 1;
+	i2c->at_cur_cmd = -1;
+	/* img_i2c_reset_seq isn't empty so the following won't fail */
+	img_scb_sequence_handle_irq(i2c, 0);
+}
+
+static void img_scb_stop_start(struct img_i2c *i2c)
+{
+	/* Initiate a stop bit sequence */
+	img_scb_switch_mode(i2c, MODE_SEQUENCE);
+	scb_write_reg(i2c->reg_base, SCB_INT_MASK_REG, i2c->int_enable);
+	i2c->seq = img_i2c_stop_seq;
+	i2c->at_slave_event = 1;
+	i2c->at_t_done = 1;
+	i2c->at_cur_cmd = -1;
+	/* img_i2c_stop_seq isn't empty so the following won't fail */
+	img_scb_sequence_handle_irq(i2c, 0);
+}
+
+static unsigned int img_scb_atomic_handle_irq(struct img_i2c *i2c,
+					      unsigned int int_status,
+					      unsigned int line_status)
+{
+	int next_cmd = -1;
+	u8 next_data = 0x00;
+
+	if (int_status & INT_SLAVE_EVENT)
+		i2c->at_slave_event = 1;
+	if (int_status & INT_TRANSACTION_DONE)
+		i2c->at_t_done = 1;
+
+	if (!i2c->at_slave_event || !i2c->at_t_done)
+		goto next_atomic_cmd;
+	if (i2c->line_status & LINESTAT_ABORT_DET) {
+		dev_dbg(i2c->adap.dev.parent, "abort condition detected\n");
+		next_cmd = CMD_GEN_STOP;
+		i2c->status = -EIO;
+		goto next_atomic_cmd;
+	}
+
+	/* i2c->at_cur_cmd may have completed */
+	switch (i2c->at_cur_cmd) {
+	case CMD_GEN_START:
+		next_cmd = CMD_GEN_DATA;
+		next_data = (i2c->msg.addr << 1);
+		if (i2c->msg.flags & I2C_M_RD)
+			next_data |= 0x1;
+		break;
+	case CMD_GEN_DATA:
+		if (i2c->line_status & LINESTAT_INPUT_HELD_V)
+			next_cmd = CMD_RET_ACK;
+		break;
+	case CMD_RET_ACK:
+		if (i2c->line_status & LINESTAT_ACK_DET) {
+			if (i2c->msg.len == 0)
+				next_cmd = CMD_GEN_STOP;
+			else if (i2c->msg.flags & I2C_M_RD)
+				next_cmd = CMD_RET_DATA;
+			else {
+				next_cmd = CMD_GEN_DATA;
+				next_data = *i2c->msg.buf;
+				--i2c->msg.len;
+				++i2c->msg.buf;
+				i2c->at_state = ATSTATE_DATA_XFERED;
+			}
+		} else if (i2c->line_status & LINESTAT_NACK_DET) {
+			i2c->status = -EIO;
+			next_cmd = CMD_GEN_STOP;
+		}
+		break;
+	case CMD_RET_DATA:
+		if (i2c->line_status & LINESTAT_INPUT_HELD_V) {
+			*i2c->msg.buf = (i2c->line_status &
+						LINESTAT_INPUT_DATA)
+					>> LINESTAT_INPUT_DATA_SHIFT;
+			--i2c->msg.len;
+			++i2c->msg.buf;
+			if (i2c->msg.len)
+				next_cmd = CMD_GEN_ACK;
+			else
+				next_cmd = CMD_GEN_NACK;
+		}
+		break;
+	case CMD_GEN_ACK:
+		if (i2c->line_status & LINESTAT_ACK_DET) {
+			next_cmd = CMD_RET_DATA;
+		} else {
+			i2c->status = -EIO;
+			next_cmd = CMD_GEN_STOP;
+		}
+		i2c->at_state = ATSTATE_DATA_XFERED;
+		break;
+	case CMD_GEN_NACK:
+		next_cmd = CMD_GEN_STOP;
+		break;
+	case CMD_GEN_STOP:
+		scb_write_reg(i2c->reg_base, SCB_OVERRIDE_REG, 0);
+		return ISR_COMPLETE(0);
+	default:
+		dev_err(i2c->adap.dev.parent, "bad atomic command %d\n",
+			i2c->at_cur_cmd);
+		i2c->status = -EIO;
+		next_cmd = CMD_GEN_STOP;
+		break;
+	}
+
+next_atomic_cmd:
+	if (next_cmd != -1) {
+		/* don't actually stop unless we're the last transaction */
+		if (next_cmd == CMD_GEN_STOP && !i2c->status && !i2c->last_msg)
+			return ISR_COMPLETE(0);
+		img_scb_atomic_op(i2c, next_cmd, next_data);
+	}
+	return ISR_ATOMIC(next_cmd, next_data);
+}
+
+/*
+ * Timer function to check if something has gone wrong in automatic mode (so we
+ * don't have to handle so many interrupts just to catch an exception).
+ */
+static void img_scb_check_timer(unsigned long arg)
+{
+	struct img_i2c *i2c = (struct img_i2c *)arg;
+	unsigned long flags;
+	unsigned int line_status;
+
+	spin_lock_irqsave(&i2c->main_lock, flags);
+	line_status = scb_read_reg(i2c->reg_base, SCB_STATUS_REG);
+
+	/* check for an abort condition */
+	if (line_status & LINESTAT_ABORT_DET) {
+		dev_dbg(i2c->adap.dev.parent,
+			"abort condition detected by check timer\n");
+		/* enable slave event interrupt mask to trigger irq */
+		scb_write_reg(i2c->reg_base, SCB_INT_MASK_REG,
+			      i2c->int_enable | INT_SLAVE_EVENT);
+	}
+
+	spin_unlock_irqrestore(&i2c->main_lock, flags);
+}
+
+static unsigned int img_scb_automatic_handle_irq(struct img_i2c *i2c,
+						 unsigned int int_status,
+						 unsigned int line_status)
+{
+	if (int_status & (INT_WRITE_ACK_ERR |
+			  INT_ADDR_ACK_ERR))
+		return ISR_COMPLETE(EIO);
+	if (line_status & LINESTAT_ABORT_DET) {
+		dev_dbg(i2c->adap.dev.parent, "abort condition detected\n");
+		/* empty the read fifo */
+		if (i2c->msg.flags & I2C_M_RD && int_status &
+				(INT_READ_FIFO_FULL | INT_READ_FIFO_FILLING))
+			img_scb_read_fifo(i2c);
+		/* use atomic mode and try to force a stop bit */
+		i2c->status = -EIO;
+		img_scb_stop_start(i2c);
+		return 0;
+	}
+	/* Enable transaction halt on start bit */
+	if (!i2c->last_msg && i2c->line_status & LINESTAT_START_BIT_DET) {
+		img_scb_transaction_halt(i2c, 1);
+		/* we're no longer interested in the slave event */
+		i2c->int_enable &= ~INT_SLAVE_EVENT;
+	}
+
+
+	/* push back check timer */
+	img_scb_delay_check(i2c);
+
+	if (i2c->msg.flags & I2C_M_RD) {
+		if (int_status & (INT_READ_FIFO_FULL |
+				  INT_READ_FIFO_FILLING)) {
+			img_scb_read_fifo(i2c);
+			if (i2c->msg.len == 0)
+				return ISR_WAITSTOP;
+		}
+	} else {
+		if (int_status & (INT_WRITE_FIFO_EMPTY |
+				  INT_WRITE_FIFO_EMPTYING)) {
+			/*
+			 * The write fifo empty indicates that we're in the
+			 * last byte so it's safe to start a new write
+			 * transaction without losing any bytes from the
+			 * previous one.
+			 * see 2.3.7 Repeated Start Transactions.
+			 */
+			if ((int_status & INT_WRITE_FIFO_EMPTY) &&
+			    i2c->msg.len == 0)
+				return ISR_WAITSTOP;
+			img_scb_write_fifo(i2c);
+		}
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_I2C_IMG_DEBUG_BUFFER
+/* get the name of a command in the debug irq_buf */
+static const char *img_scb_cmd_name(unsigned int cmd)
+{
+	switch (cmd >> 16) {
+	case MODE_ATOMIC:
+	case MODE_SEQUENCE:
+		return img_scb_atomic_op_name(cmd & 0xffff);
+	default:
+		return "UNKNOWN";
+	}
+}
+#endif
+
+/* dump info about the last few interrupts */
+static void img_scb_dump_debug(struct img_i2c *i2c)
+{
+	static const struct {
+		unsigned int addr;
+		const char * const name;
+	} interesting_regs[] = {
+		{ SCB_STATUS_REG, "scb_line_status" },
+		{ SCB_OVERRIDE_REG, "scb_line_override" },
+		{ SCB_FIFO_STATUS_REG, "scb_master_fill_status" },
+		{ SCB_INT_STATUS_REG, "scb_interrupt_status" },
+		{ SCB_INT_MASK_REG, "scb_interrupt_mask" },
+		{ SCB_CONTROL_REG, "scb_general_control" },
+	};
+	int i;
+
+	/* print out some useful registers */
+	dev_info(i2c->adap.dev.parent, "SCB registers:\n");
+	for (i = 0; i < ARRAY_SIZE(interesting_regs); ++i)
+		dev_info(i2c->adap.dev.parent, " %s=%#x\n",
+			 interesting_regs[i].name,
+			 scb_read_reg(i2c->reg_base, interesting_regs[i].addr));
+
+#ifdef CONFIG_I2C_IMG_DEBUG_BUFFER
+	/* print out the contents of the debug buffer */
+	if (!i2c->irq_buf_index)
+		dev_info(i2c->adap.dev.parent,
+			 "No interrupts in debug buffer\n");
+
+	dev_info(i2c->adap.dev.parent,
+		 "%d interrupts in debug buffer:\n",
+		 i2c->irq_buf_index);
+	for (i = 0; i < i2c->irq_buf_index; ++i)
+		dev_info(i2c->adap.dev.parent,
+			 " #%d after %uus: irq=%#x, stat=%#x, cmd=%s.%s (%#x), hret=%#x\n",
+			 i,
+			 (i2c->irq_buf[i].time - i2c->start_time) >> 10,
+			 i2c->irq_buf[i].irq_stat,
+			 i2c->irq_buf[i].line_stat,
+			 img_scb_mode_name(i2c->irq_buf[i].cmd >> 16),
+			 img_scb_cmd_name(i2c->irq_buf[i].cmd),
+			 i2c->irq_buf[i].cmd,
+			 i2c->irq_buf[i].result);
+#endif
+}
+
+static irqreturn_t img_scb_irq_handler(int irq, void *dev_id)
+{
+	struct img_i2c *i2c = (struct img_i2c *)dev_id;
+	unsigned int int_status;
+	unsigned int line_status;
+	/* We handle transaction completion AFTER accessing registers */
+	unsigned int hret;
+
+	/* Read interrupt status register. */
+	int_status = scb_read_reg(i2c->reg_base, SCB_INT_STATUS_REG);
+	/* Clear detected interrupts. */
+	scb_write_reg(i2c->reg_base, SCB_INT_CLEAR_REG, int_status);
+
+	/*
+	 * Read line status and clear it until it actually is clear.  We have
+	 * to be careful not to lose any line status bits that get latched.
+	 */
+	line_status = scb_read_reg(i2c->reg_base, SCB_STATUS_REG);
+	if (line_status & LINESTAT_LATCHED) {
+		scb_write_reg(i2c->reg_base, SCB_CLEAR_REG,
+			      (line_status & LINESTAT_LATCHED)
+				>> LINESTAT_CLEAR_SHIFT);
+		scb_wr_rd_fence(i2c);
+	}
+
+	spin_lock(&i2c->main_lock);
+
+#ifdef CONFIG_I2C_IMG_DEBUG_BUFFER
+	if (i2c->irq_buf_index < ARRAY_SIZE(i2c->irq_buf)) {
+		i2c->irq_buf[i2c->irq_buf_index].time =
+						(unsigned int)local_clock();
+		i2c->irq_buf[i2c->irq_buf_index].irq_stat = int_status;
+		i2c->irq_buf[i2c->irq_buf_index].line_stat = line_status;
+		i2c->irq_buf[i2c->irq_buf_index].cmd = (i2c->mode << 16)
+							| i2c->at_cur_cmd;
+	}
+#endif
+
+	/* Keep track of line status bits received */
+	i2c->line_status &= ~LINESTAT_INPUT_DATA;
+	i2c->line_status |= line_status;
+
+	/*
+	 * Certain interrupts indicate that sclk low timeout is not
+	 * a problem. If any of these are set, just continue.
+	 */
+	if ((int_status & INT_SCLK_LOW_TIMEOUT) &&
+	    !(int_status & (INT_SLAVE_EVENT |
+			    INT_WRITE_FIFO_EMPTY |
+			    INT_READ_FIFO_FULL))) {
+		dev_crit(i2c->adap.dev.parent,
+			 "fatal: clock low timeout occurred"
+			 " %s addr 0x%02x\n",
+			 (i2c->msg.flags & I2C_M_RD) ? "reading"
+						      : "writing",
+			 i2c->msg.addr);
+		hret = ISR_FATAL(EIO);
+		goto out;
+	}
+
+	if (i2c->mode == MODE_ATOMIC)
+		hret = img_scb_atomic_handle_irq(i2c, int_status, line_status);
+	else if (i2c->mode == MODE_AUTOMATIC)
+		hret = img_scb_automatic_handle_irq(i2c, int_status,
+						    line_status);
+	else if (i2c->mode == MODE_SEQUENCE)
+		hret = img_scb_sequence_handle_irq(i2c, int_status);
+	else if (i2c->mode == MODE_WAITSTOP && (int_status & INT_SLAVE_EVENT) &&
+			 (line_status & LINESTAT_STOP_BIT_DET))
+		hret = ISR_COMPLETE(0);
+	else if (i2c->mode == MODE_RAW)
+		hret = img_scb_raw_handle_irq(i2c, int_status, line_status);
+	else
+		hret = 0;
+
+	/* Clear detected level interrupts. */
+	scb_write_reg(i2c->reg_base, SCB_INT_CLEAR_REG, int_status & INT_LEVEL);
+out:
+#ifdef CONFIG_I2C_IMG_DEBUG_BUFFER
+	if (i2c->irq_buf_index < ARRAY_SIZE(i2c->irq_buf)) {
+		i2c->irq_buf[i2c->irq_buf_index].result = hret;
+		++i2c->irq_buf_index;
+	}
+#endif
+
+#ifdef CONFIG_I2C_DEBUG_BUS
+	dev_dbg(i2c->adap.dev.parent,
+		"serviced irq: %x (ls=%x,ret=%x) %s%s%s%s%s%s%s%s%s%s%s%s\n",
+		int_status,
+		line_status,
+		hret,
+		(int_status & INT_BUS_INACTIVE)
+			? " | INT_BUS_INACTIVE" : "",
+		(int_status & INT_UNEXPECTED_START)
+			? " | INT_UNEXPECTED_START" : "",
+		(int_status & INT_SCLK_LOW_TIMEOUT)
+			? " | INT_SCLK_LOW_TIMEOUT" : "",
+		(int_status & INT_SDAT_LOW_TIMEOUT)
+			? " | INT_SDAT_LOW_TIMEOUT" : "",
+		(int_status & INT_WRITE_ACK_ERR)
+			? " | INT_WRITE_ACK_ERR" : "",
+		(int_status & INT_ADDR_ACK_ERR)
+			? " | INT_ADDR_ACK_ERR" : "",
+		(int_status & INT_READ_FIFO_FULL)
+			? " | INT_READ_FIFO_FULL" : "",
+		(int_status & INT_READ_FIFO_FILLING)
+			? " | INT_READ_FIFO_FILLING" : "",
+		(int_status & INT_WRITE_FIFO_EMPTY)
+			? " | INT_WRITE_FIFO_EMPTY" : "",
+		(int_status & INT_WRITE_FIFO_EMPTYING)
+			? " | INT_WRITE_FIFO_EMPTYING" : "",
+		(int_status & INT_TRANSACTION_DONE)
+			? " | INT_TRANSACTION_DONE" : "",
+		(int_status & INT_SLAVE_EVENT)
+			? " | INT_SLAVE_EVENT" : "");
+#endif
+
+	if (hret & ISR_WAITSTOP) {
+		/*
+		 * Only wait for stop on last message.
+		 * Also we may already have detected the stop bit.
+		 */
+		if (!i2c->last_msg || i2c->line_status & LINESTAT_STOP_BIT_DET)
+			hret = ISR_COMPLETE(0);
+		else
+			img_scb_switch_mode(i2c, MODE_WAITSTOP);
+	}
+
+	/* now we've finished using regs, handle transaction completion */
+	if (hret & ISR_COMPLETE_M) {
+		int status = -(hret & ISR_STATUS_M);
+		img_scb_complete_transaction(i2c, status);
+		if (hret & ISR_FATAL_M) {
+			img_scb_switch_mode(i2c, MODE_FATAL);
+			img_scb_dump_debug(i2c);
+		}
+	}
+
+	/* Enable interrupts (int_enable may be altered by changing mode) */
+	scb_write_reg(i2c->reg_base, SCB_INT_MASK_REG, i2c->int_enable);
+
+	spin_unlock(&i2c->main_lock);
+
+	return IRQ_HANDLED;
+}
+
+static u32 img_scb_func(struct i2c_adapter *i2c_adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static int __init i2c_img_reset_bus(struct img_i2c *i2c)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&i2c->main_lock, flags);
+
+	i2c->status = 0;
+	INIT_COMPLETION(i2c->xfer_done);
+	img_scb_reset_start(i2c);
+
+	spin_unlock_irqrestore(&i2c->main_lock, flags);
+
+	wait_for_completion(&i2c->xfer_done);
+
+	return i2c->status;
+}
+
+static int img_scb_init(struct img_i2c *i2c)
+{
+	int clk_khz;
+	int bitrate_khz = i2c->bitrate / 1000;
+	int opt_inc;
+	int data, prescale, inc, filt, clk_period, int_bitrate;
+	int tckh, tckl, tsdh;
+	int mode, i;
+	unsigned int revision;
+	int ret = 0;
+
+	clk_prepare_enable(i2c->clk);
+
+	revision = scb_read_reg(i2c->reg_base, SCB_CORE_REV_REG);
+	if ((revision & 0x00ffffff) < 0x00020200) {
+		dev_info(i2c->adap.dev.parent,
+			 "Unknown hardware revision (%d.%d.%d.%d)\n",
+			 (revision >> 24) & 0xff, (revision >> 16) & 0xff,
+			 (revision >> 8) & 0xff, revision & 0xff);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	clk_khz = clk_get_rate(i2c->clk) / 1000;
+
+	/* Determine what mode we're in from the bitrate */
+	mode = ARRAY_SIZE(img_i2c_timings) - 1;
+	for (i = 0; i < ARRAY_SIZE(img_i2c_timings); ++i)
+		if (i2c->bitrate <= img_i2c_timings[i].max_bitrate) {
+			mode = i;
+			break;
+		}
+
+	/*
+	 * Worst incs are 1 (innacurate) and 16*256 (irregular).
+	 * So a sensible inc is the logarithmic mean: 64 (2^6), which is
+	 * in the middle of the valid range (0-127).
+	 */
+	opt_inc = 64;
+
+	/* Find the prescale that would give us that inc (approx delay = 0) */
+	prescale = opt_inc * clk_khz / (256 * 16 * bitrate_khz);
+	if (prescale > 8)
+		prescale = 8;
+	else if (prescale < 1)
+		prescale = 1;
+	clk_khz /= prescale;
+
+	/* Setup the clock increment value. */
+	inc = ((256 * 16 * bitrate_khz) /
+	       (clk_khz - (16 * bitrate_khz * (clk_khz / 1000) *
+			   i2c->busdelay) / 10000));
+	/* Setup the filter clock value. */
+	if (clk_khz < 20000) {
+		/* Filter disable. */
+		filt = 0x8000;
+	} else if (clk_khz < 40000) {
+		/* Filter bypass. */
+		filt = 0x4000;
+	} else {
+		/* Calculate filter clock. */
+		filt = ((640000) / ((clk_khz / 1000) *
+				    (250 - i2c->busdelay)));
+		if ((640000) % ((clk_khz / 1000) *
+				(250 - i2c->busdelay))) {
+			/* Scale up. */
+			inc++;
+		}
+		if (filt > 0x7f)
+			filt = 0x7f;
+	}
+	data = ((filt & 0xffff) << 16) | ((inc & 0x7f) << 8) | (prescale - 1);
+	scb_write_reg(i2c->reg_base, SCB_CLK_SET_REG, data);
+
+	/* Obtain the clock period of the fx16 clock in ns. */
+	clk_period = (256 * 1000000) / (clk_khz * inc) + i2c->busdelay;
+
+	/* Calculate the bitrate in terms of internal clock pulses. */
+	int_bitrate = 1000000 / (bitrate_khz * clk_period);
+	if ((1000000 % (bitrate_khz * clk_period)) >=
+	    ((bitrate_khz * clk_period) / 2))
+		int_bitrate++;
+
+	/* Setup TCKH value. */
+	tckh = img_i2c_timings[mode].tckh / clk_period;
+	if (img_i2c_timings[mode].tckh % clk_period)
+		tckh++;
+
+	if (tckh > 0)
+		data = tckh - 1;
+	else
+		data = 0;
+
+	scb_write_reg(i2c->reg_base, SCB_TIME_TCKH_REG, data);
+
+	/* Setup TCKL value. */
+	tckl = int_bitrate - tckh;
+
+	if (tckl > 0)
+		data = tckl - 1;
+	else
+		data = 0;
+
+	scb_write_reg(i2c->reg_base, SCB_TIME_TCKL_REG, data);
+
+	/* Setup TSDH value. */
+	tsdh = img_i2c_timings[mode].tsdh / clk_period;
+	if (img_i2c_timings[mode].tsdh % clk_period)
+		tsdh++;
+
+	if (tsdh > 1)
+		data = tsdh - 1;
+	else
+		data = 0x01;
+	scb_write_reg(i2c->reg_base, SCB_TIME_TSDH_REG, data);
+
+	/* This value is used later. */
+	tsdh = data;
+
+	/* Setup TPL value. */
+	data = img_i2c_timings[mode].tpl / clk_period;
+	if (data > 0)
+		--data;
+	scb_write_reg(i2c->reg_base, SCB_TIME_TPL_REG, data);
+
+	/* Setup TPH value. */
+	data = img_i2c_timings[mode].tph / clk_period;
+	if (data > 0)
+		--data;
+	scb_write_reg(i2c->reg_base, SCB_TIME_TPH_REG, data);
+
+	/* Setup TSDL value to TPL + TSDH + 2. */
+	scb_write_reg(i2c->reg_base, SCB_TIME_TSDL_REG, data + tsdh + 2);
+
+	/* Setup TP2S value. */
+	data = img_i2c_timings[mode].tp2s / clk_period;
+	if (data > 0)
+		--data;
+	scb_write_reg(i2c->reg_base, SCB_TIME_TP2S_REG, data);
+
+	scb_write_reg(i2c->reg_base, SCB_TIME_TBI_REG, TIMEOUT_TBI);
+	scb_write_reg(i2c->reg_base, SCB_TIME_TSL_REG, TIMEOUT_TSL);
+	scb_write_reg(i2c->reg_base, SCB_TIME_TDL_REG, TIMEOUT_TDL);
+
+	/* Take module out of soft reset and enable clocks. */
+	img_scb_soft_reset(i2c);
+
+	/* Disable all interrupts. */
+	scb_write_reg(i2c->reg_base, SCB_INT_MASK_REG, 0);
+
+	/* Clear all interrupts. */
+	scb_write_reg(i2c->reg_base, SCB_INT_CLEAR_REG, 0xffffffff);
+
+	/* Clear the scb_line_status events. */
+	scb_write_reg(i2c->reg_base, SCB_CLEAR_REG, 0xffffffff);
+
+	/* Enable interrupts */
+	scb_write_reg(i2c->reg_base, SCB_INT_MASK_REG, i2c->int_enable);
+
+	dev_info(i2c->adap.dev.parent,
+		 "IMG I2C adapter (%d.%d.%d.%d) probed successfully"
+			" (%s %d bps)\n",
+		 (revision >> 24) & 0xff, (revision >> 16) & 0xff,
+		 (revision >> 8) & 0xff, revision & 0xff,
+		 img_i2c_timings[mode].name,
+		 1000000000/(int_bitrate*clk_period));
+
+	/* Reset the bus */
+	i2c_img_reset_bus(i2c);
+
+out:
+	clk_disable_unprepare(i2c->clk);
+
+	return ret;
+}
+
+static int img_scb_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
+			int num)
+{
+	struct img_i2c *i2c = i2c_get_adapdata(i2c_adap);
+	int ret = num;
+	int i;
+	int atomic;
+
+	if (i2c->mode == MODE_FATAL)
+		return -EIO;
+
+	/*
+	 * Decide whether to use automatic or atomic mode.
+	 * Due to repeated starts we can't really mix and match.
+	 */
+	atomic = (i2c->quirks & QUIRK_ATOMIC_ONLY);
+	for (i = 0; i < num; i++) {
+		if (likely(msgs[i].len))
+			continue;
+		/*
+		 * 0 byte reads are not possible because the slave could try
+		 * and pull the data line low, preventing a stop bit.
+		 */
+		if (unlikely(msgs[i].flags & I2C_M_RD))
+			return -EIO;
+		/*
+		 * 0 byte writes are possible and used for probing, but we
+		 * cannot do them in automatic mode, so use atomic mode
+		 * instead.
+		 */
+		atomic = 1;
+	}
+
+	clk_prepare_enable(i2c->clk);
+
+	/* re-initialise the device */
+	if (i2c->mode == MODE_SUSPEND)
+		img_scb_init(i2c);
+
+#ifdef CONFIG_I2C_IMG_DEBUG_BUFFER
+	i2c->irq_buf_index = 0;
+	i2c->start_time = (unsigned int)local_clock();
+#endif
+
+	for (i = 0; i < num; i++) {
+		struct i2c_msg *msg = &msgs[i];
+		unsigned long flags;
+
+		spin_lock_irqsave(&i2c->main_lock, flags);
+
+		/*
+		 * Make a copy of the message struct. We mustn't modify the
+		 * original or we'll confuse drivers and i2c-dev.
+		 */
+		i2c->msg = *msg;
+		i2c->status = 0;
+		/*
+		 * After the last message we must have waited for a stop bit.
+		 * Not waiting can cause problems when the clock is disabled
+		 * before the stop bit is sent, and the linux I2C interface
+		 * requires separate transfers not to joined with repeated
+		 * start.
+		 */
+		i2c->last_msg = (i == num-1);
+		dev_dbg(i2c->adap.dev.parent,
+			"msg %c %#x %d (%d/%d last=%d)\n",
+			(msg->flags & I2C_M_RD) ? 'R' : 'W',
+			(int)msg->addr, (int)msg->len,
+			i, num, i2c->last_msg);
+
+		INIT_COMPLETION(i2c->xfer_done);
+
+		if (atomic)
+			img_scb_atomic_start(i2c);
+		else if (msg->flags & I2C_M_RD)
+			img_scb_read(i2c);
+		else
+			img_scb_write(i2c);
+
+		spin_unlock_irqrestore(&i2c->main_lock, flags);
+
+		wait_for_completion(&i2c->xfer_done);
+
+		del_timer_sync(&i2c->check_timer);
+
+		if (i2c->status) {
+			ret = i2c->status;
+			break;
+		}
+	}
+
+	clk_disable_unprepare(i2c->clk);
+
+	return ret;
+}
+
+static const struct i2c_algorithm i2c_img_algorithm = {
+	.master_xfer = img_scb_xfer,
+	.functionality = img_scb_func,
+};
+
+static int __init i2c_img_probe(struct platform_device *dev)
+{
+	struct img_i2c *i2c;
+	struct resource *res;
+	int irq, ret;
+	struct device_node *node = dev->dev.of_node;
+	u32 val;
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (res == NULL)
+		return -ENOENT;
+
+	irq = platform_get_irq(dev, 0);
+	if (irq < 0)
+		return -ENOENT;
+
+	/* A device node must be provided */
+	if (!node)
+		return -ENOENT;
+
+	if (!request_mem_region(res->start, resource_size(res), res->name))
+		return -ENOMEM;
+
+	i2c = kzalloc(sizeof(struct img_i2c), GFP_KERNEL);
+	if (!i2c) {
+		ret = -ENOMEM;
+		goto out_error_kmalloc;
+	}
+
+	i2c->adap.owner = THIS_MODULE;
+	i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+	i2c->adap.algo = &i2c_img_algorithm;
+	i2c->adap.retries = 5;
+
+	/*
+	 * Get the bus number from the device tree. Other I2C adapters may be
+	 * reserved for non-Linux cores.
+	 */
+	ret = of_property_read_u32(node, "linux,i2c-index", &val);
+	if (!ret)
+		i2c->adap.nr = val;
+	else
+		i2c->adap.nr = dev->id;
+	snprintf(i2c->adap.name, sizeof(i2c->adap.name),
+		 "ImgTec SCB I2C %d", i2c->adap.nr);
+
+	i2c->reg_base = ioremap(res->start, resource_size(res));
+	if (!i2c->reg_base) {
+		ret = -EIO;
+		goto out_error_ioremap;
+	}
+
+	i2c->clk = of_clk_get(node, 0);
+	if (IS_ERR(i2c->clk)) {
+		dev_err(&dev->dev, "could not get clock resource\n");
+		ret = PTR_ERR(i2c->clk);
+		goto out_error_clock;
+	}
+
+	ret = request_irq(irq, img_scb_irq_handler, 0, i2c->adap.name, i2c);
+	if (ret)
+		goto out_error_irq;
+
+	/* Set up the exception check timer */
+	init_timer(&i2c->check_timer);
+	i2c->check_timer.function = img_scb_check_timer;
+	i2c->check_timer.data = (unsigned long)i2c;
+
+	i2c->irq = irq;
+
+	i2c->iobase = res->start;
+	i2c->iosize = resource_size(res);
+
+	i2c->bitrate = 400000; /* fast mode */
+	if (!of_property_read_u32(node, "bit-rate", &val))
+		i2c->bitrate = val;
+	i2c->busdelay = 0;
+	if (!of_property_read_u32(node, "bus-delay", &val))
+		i2c->busdelay = val;
+	i2c->quirks = 0;
+	if (!of_property_read_u32(node, "quirks", &val))
+		i2c->quirks = val;
+
+	i2c_set_adapdata(&i2c->adap, i2c);
+	i2c->adap.dev.parent = &dev->dev;
+	i2c->adap.dev.of_node = node;
+
+	img_scb_switch_mode(i2c, MODE_INACTIVE);
+	spin_lock_init(&i2c->main_lock);
+	init_completion(&i2c->xfer_done);
+
+	platform_set_drvdata(dev, i2c);
+
+	ret = img_scb_init(i2c);
+	if (ret)
+		goto out_error_i2c;
+
+	ret = i2c_add_numbered_adapter(&i2c->adap);
+	if (ret < 0) {
+		dev_err(&dev->dev, "failed to add bus\n");
+		goto out_error_i2c;
+	}
+
+	return 0;
+out_error_i2c:
+	free_irq(i2c->irq, i2c);
+out_error_irq:
+	clk_put(i2c->clk);
+out_error_clock:
+	iounmap(i2c->reg_base);
+out_error_ioremap:
+	kfree(i2c);
+out_error_kmalloc:
+	release_mem_region(res->start, resource_size(res));
+	return ret;
+}
+
+static int __exit i2c_img_remove(struct platform_device *dev)
+{
+	struct img_i2c *i2c = platform_get_drvdata(dev);
+
+	i2c_del_adapter(&i2c->adap);
+
+	free_irq(i2c->irq, i2c);
+	clk_put(i2c->clk);
+	iounmap(i2c->reg_base);
+	release_mem_region(i2c->iobase, i2c->iosize);
+	kfree(i2c);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int i2c_img_suspend(struct device *dev)
+{
+	struct img_i2c *i2c = dev_get_drvdata(dev);
+
+	/*
+	 * Wait for all user transactions to complete.
+	 */
+	i2c_lock_adapter(&i2c->adap);
+	i2c_unlock_adapter(&i2c->adap);
+
+	return 0;
+}
+
+static int i2c_img_suspend_noirq(struct device *dev)
+{
+	struct img_i2c *i2c = dev_get_drvdata(dev);
+
+	/*
+	 * Go into suspend state. The device will be reinitialised on the next
+	 * transfer, or on resume.
+	 */
+	img_scb_switch_mode(i2c, MODE_SUSPEND);
+
+	return 0;
+}
+
+static int i2c_img_resume(struct device *dev)
+{
+	struct img_i2c *i2c = dev_get_drvdata(dev);
+
+	/* Ensure the device has been reinitialised. */
+	i2c_lock_adapter(&i2c->adap);
+
+	/* re-initialise the device */
+	if (i2c->mode == MODE_SUSPEND)
+		img_scb_init(i2c);
+
+	i2c_unlock_adapter(&i2c->adap);
+
+	return 0;
+}
+#else
+#define i2c_img_suspend NULL
+#define i2c_img_resume NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops i2c_img_pmops = {
+	SET_SYSTEM_SLEEP_PM_OPS(i2c_img_suspend, i2c_img_resume)
+#ifdef CONFIG_PM_SLEEP
+	.suspend_noirq	= i2c_img_suspend_noirq,
+	.freeze_noirq	= i2c_img_suspend_noirq,
+	.poweroff_noirq	= i2c_img_suspend_noirq,
+#endif
+};
+
+static const struct of_device_id i2c_img_match[] = {
+	{ .compatible = "img,scb" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, i2c_img_match);
+
+static struct platform_driver i2c_img_driver = {
+	.driver = {
+		.name		= "img-i2c",
+		.owner		= THIS_MODULE,
+		.of_match_table	= i2c_img_match,
+		.pm		= &i2c_img_pmops,
+	},
+	.remove = __exit_p(i2c_img_remove),
+};
+
+static int __init i2c_adap_img_init(void)
+{
+	return platform_driver_probe(&i2c_img_driver, i2c_img_probe);
+}
+module_init(i2c_adap_img_init);
+
+static void __exit i2c_adap_img_exit(void)
+{
+	platform_driver_unregister(&i2c_img_driver);
+}
+module_exit(i2c_adap_img_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("IMG SCB I2C bus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index ab0767e6..7e297f2 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -103,6 +103,12 @@
 	help
 	  Say yes here to build support for Atmel AT91 ADC.
 
+config CHORUS2_ADC
+	depends on SOC_CHORUS2
+	tristate "Chorus2 Analogue-to-Digital Converter"
+	help
+	  This enables the Chorus2 ADC.
+
 config EXYNOS_ADC
 	bool "Exynos ADC driver support"
 	depends on OF
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 0a825be..3f9f2cc 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -11,6 +11,7 @@
 obj-$(CONFIG_AD7793) += ad7793.o
 obj-$(CONFIG_AD7887) += ad7887.o
 obj-$(CONFIG_AT91_ADC) += at91_adc.o
+obj-$(CONFIG_CHORUS2_ADC) += chorus2_adc.o
 obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
 obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
 obj-$(CONFIG_MAX1363) += max1363.o
diff --git a/drivers/iio/adc/chorus2_adc.c b/drivers/iio/adc/chorus2_adc.c
new file mode 100644
index 0000000..4477030
--- /dev/null
+++ b/drivers/iio/adc/chorus2_adc.c
@@ -0,0 +1,426 @@
+/*
+ * c2_adc.c
+ * Driver for the Chorus2 ADC block.
+ * Copyright (C) 2010,2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/ioctl.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/semaphore.h>
+#include <linux/mm.h>
+
+#include <asm/soc-chorus2/c2_irqnums.h>
+#include <asm/irq.h>
+#include <asm/img_dma.h>
+
+#include "chorus2_adc.h"
+
+#define DRV_VERSION	"1.0"
+#define DRV_NAME	"c2_adc"
+
+/* Size of the buffer */
+#define BUFFER_SIZE	(1 << 16)		/* 32kb */
+#define MAX_BUF_OUT	(BUFFER_SIZE >> 1)	/* half BUFFER_SIZE */
+
+struct c2_adc_priv {
+	/* Device pointer */
+	struct miscdevice	*dev;
+	int			irq;
+
+	/* User lock */
+	spinlock_t		user_lock;
+	struct semaphore	read_sem;
+
+	/* User count & id */
+	int			user_count;
+	int			user_id;
+
+	/* Current speed of ADC clock (8.192 - 24.576 MHz) */
+	int			adc_clk_speed;
+
+	/* DMA buffer */
+	int			chan_out;
+	void			*buf_virt;
+	dma_addr_t		buf_phys;
+	unsigned int		*dma_desc;
+	dma_addr_t		dma_desc_phys;
+
+	/* Double buffers */
+	int			buffer_number;
+	void			*buf1;
+	void			*buf2;
+	void			*current_buffer;
+	spinlock_t		buffer_lock;
+
+	/* Register block */
+	unsigned int		*reg_base;
+};
+
+static struct c2_adc_priv c2_adc_dev;
+
+/* Reset the clocks  */
+static void c2_adc_reset(void)
+{
+	unsigned int flags;
+
+	/* Bring ADC/SCP to known initial values */
+	TBI_LOCK(flags);
+
+	/* Put the SCP into reset */
+	scp_write(SCP_CTRL_RESETN, CONTROL);
+
+	/* Turn off digital ADC clock */
+	writel(DCXO_CLK_ADC_DISABLE, DCXO_CLK_ENABLE);
+
+	/* Set ADC Clock to 8.192MHz */
+	writel(ADC_CLK_8192M, ADC_CLK_SEL_ADDR);
+
+	/* Enable digital ADC clock for SCP */
+	writel(DCXO_CLK_ADC_ENABLE, DCXO_CLK_ENABLE);
+
+	/* Set SCP input clock to use clock gen */
+	writel(USE_ON_CHIP_IF_CLK, SCP_IF_PIN_CTRL);
+
+	/* Initialise the SCP to a known state (out of reset) */
+	scp_write(SCP_CTRL_BYPASS | SCP_CTRL_DMA_SYNC_EN | SCP_CTRL_PWR_ON,
+			CONTROL);
+
+	TBI_UNLOCK(flags);
+}
+
+/* Initialise the DMA block */
+static void c2_setup_dma(void)
+{
+	unsigned int *c2_adc_dma_desc = c2_adc_dev.dma_desc;
+
+	/* Setup the dma list descriptor */
+	c2_adc_dma_desc[0] = 1 << 30;
+	c2_adc_dma_desc[1] = (1 << 30) | ((MAX_BUF_OUT >> 2) & 0xffff);
+	c2_adc_dma_desc[2] = SCP_OUTPUT_ADDR >> 2;
+	c2_adc_dma_desc[3] = 4 << 26;
+	c2_adc_dma_desc[4] = 0;
+	c2_adc_dma_desc[5] = 0;
+	c2_adc_dma_desc[6] = c2_adc_dev.buf_phys;
+	c2_adc_dma_desc[7] = c2_adc_dev.dma_desc_phys;
+
+	/* Setup the DMA to be ready to get going */
+	img_dma_set_list_addr(c2_adc_dev.chan_out, c2_adc_dev.dma_desc_phys);
+	wmb();
+}
+
+/*
+ * Open the device.
+ * This will only allow one file descriptor to be open for this device.
+ */
+static int c2_adc_open(struct inode *i, struct file *f)
+{
+	int ret = 0;
+
+	/* Single-user exclusivity */
+	spin_lock(&(c2_adc_dev.user_lock));
+
+	/* Allow user, an 'su' user and root to access the device */
+	if (c2_adc_dev.user_count &&
+			(c2_adc_dev.user_id != current->real_cred->uid) &&
+			(c2_adc_dev.user_id != current->real_cred->euid) &&
+			!capable(CAP_DAC_OVERRIDE)) {
+		ret = -EBUSY;
+		goto done;
+	}
+
+	/* Ensure we have read access */
+	if ((f->f_flags & O_ACCMODE) == O_WRONLY) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!c2_adc_dev.user_count) {
+		c2_adc_dev.user_id = current->real_cred->uid;
+		spin_unlock(&c2_adc_dev.user_lock);
+
+		memset(c2_adc_dev.buf1, 0, MAX_BUF_OUT);
+		memset(c2_adc_dev.buf2, 0, MAX_BUF_OUT);
+		img_dma_start_list(c2_adc_dev.chan_out);
+
+		spin_lock(&c2_adc_dev.user_lock);
+	}
+
+	c2_adc_dev.user_count++;
+done:
+	spin_unlock(&c2_adc_dev.user_lock);
+
+	return ret;
+}
+
+/*
+ * Release the device.
+ */
+static int c2_adc_release(struct inode *i, struct file *f)
+{
+	spin_lock(&(c2_adc_dev.user_lock));
+
+	c2_adc_dev.user_count--;
+	if (!c2_adc_dev.user_count) {
+		spin_unlock(&c2_adc_dev.user_lock);
+		img_dma_stop_list(c2_adc_dev.chan_out);
+		img_dma_reset(c2_adc_dev.chan_out);
+		c2_setup_dma();
+		goto done;
+	}
+
+	spin_unlock(&c2_adc_dev.user_lock);
+done:
+	return 0;
+}
+
+/*
+ * Read data from the device
+ */
+static ssize_t c2_adc_read(struct file *f, char __user *buf,
+	size_t count, loff_t *off)
+{
+	int ret = 0;
+	unsigned int *p = NULL;
+
+	if (down_interruptible(&(c2_adc_dev.read_sem)))
+		return -ERESTARTSYS;
+	if (*off >= MAX_BUF_OUT)
+		*off -= MAX_BUF_OUT;
+	if (*off + count > MAX_BUF_OUT)
+		count = MAX_BUF_OUT - *off;
+
+	spin_lock(&c2_adc_dev.buffer_lock);
+	p = c2_adc_dev.current_buffer + *off;
+	spin_unlock(&c2_adc_dev.buffer_lock);
+
+	if (copy_to_user(buf, p, count)) {
+		return -EFAULT;
+		goto out;
+	}
+	*off += count;
+	ret = count;
+out:
+	up(&(c2_adc_dev.read_sem));
+	return ret;
+}
+
+static const struct file_operations adc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= c2_adc_open,
+	.release	= c2_adc_release,
+	.read		= c2_adc_read,
+};
+
+static struct miscdevice c2_adc_device = {
+	.minor		= MISC_DYNAMIC_MINOR,
+	.name		= "adc",
+	.fops		= &adc_fops,
+};
+
+/* IRQ */
+static irqreturn_t c2_adc_irq(int irq, void *data)
+{
+	unsigned int status = 0;
+
+	img_dma_get_int_status(c2_adc_dev.chan_out, &status);
+
+	if (status & 0x20000)
+		memcpy(c2_adc_dev.current_buffer, c2_adc_dev.buf_virt,
+				MAX_BUF_OUT);
+
+	spin_lock(&(c2_adc_dev.buffer_lock));
+	/* Switch buffer to DMA into */
+	c2_adc_dev.buffer_number = (c2_adc_dev.buffer_number + 1) & 0x01;
+	c2_adc_dev.current_buffer = (c2_adc_dev.buffer_number) ?
+			c2_adc_dev.buf1 : c2_adc_dev.buf2;
+
+	spin_unlock(&(c2_adc_dev.buffer_lock));
+
+	img_dma_set_int_status(c2_adc_dev.chan_out, 0);
+	return IRQ_HANDLED;
+}
+
+/* Module initialisation */
+static int c2_adc_probe(void)
+{
+	int ret = 0;
+
+	pr_info("%s: Chorus2 Analogue-to-Digital Converter driver V%s\n",
+			DRV_NAME, DRV_VERSION);
+
+	/* Remap the peripheral memory */
+	c2_adc_dev.reg_base = ioremap(SCP_BASE, 0x0fff);
+	if (!c2_adc_dev.reg_base) {
+		pr_err("%s: Unable to remap IO registers, aborting\n",
+				DRV_NAME);
+		ret = -ENXIO;
+		goto done_err;
+	}
+
+	/* Initial device reset */
+	c2_adc_reset();
+
+	/* Initialise the open spinlock */
+	spin_lock_init(&(c2_adc_dev.user_lock));
+	spin_lock_init(&c2_adc_dev.buffer_lock);
+	sema_init(&(c2_adc_dev.read_sem), 1);
+
+	ret = misc_register(&c2_adc_device);
+	if (ret) {
+		pr_err("%s: Unable to register device node, aborting\n",
+				DRV_NAME);
+		goto done_unmap;
+	}
+	c2_adc_dev.dev = &c2_adc_device;
+
+	/* Set coherent DMA mask */
+	ret = dma_set_coherent_mask(c2_adc_device.this_device, 0xffffffff);
+	if (ret) {
+		pr_err("%s: Unable to initialise DMA, aborting\n", DRV_NAME);
+		goto done_unreg;
+	}
+
+	/* We can now do stuff with our device - first, allocate DMA out */
+	ret = img_request_dma(11, SCP_DMA_OUT_PERIPH);
+	if (ret < 0) {
+		pr_err("%s: Unable to allocate outbound DMA channel, aborting\n",
+				DRV_NAME);
+		goto done_unreg;
+	}
+	c2_adc_dev.chan_out = ret;
+	img_dma_reset(c2_adc_dev.chan_out);
+
+	/* Allocate the buffer */
+	c2_adc_dev.buf_virt = dma_alloc_coherent(c2_adc_device.this_device,
+			BUFFER_SIZE, &(c2_adc_dev.buf_phys), GFP_KERNEL);
+	if (!c2_adc_dev.buf_virt) {
+		pr_err("%s: Unable to allocate outbound DMA buffer, aborting\n",
+				DRV_NAME);
+		ret = -ENOMEM;
+		goto done_free_dma;
+	}
+
+	c2_adc_dev.dma_desc = dma_alloc_coherent(c2_adc_device.this_device,
+			8 * sizeof(unsigned int),
+			&c2_adc_dev.dma_desc_phys, GFP_KERNEL);
+
+	if (!c2_adc_dev.dma_desc) {
+		pr_err("%s: Unable to allocate DMA descriptor buffer, aborting\n",
+				DRV_NAME);
+		ret = -ENOMEM;
+		goto done_free_coherent1;
+	}
+
+	c2_setup_dma();
+
+	/* Setup the bounce buffers */
+	c2_adc_dev.buf1 = kzalloc(MAX_BUF_OUT, GFP_KERNEL);
+	if (!c2_adc_dev.buf1) {
+		pr_err("%s: Unable to allocate primary buffer, aborting\n",
+				DRV_NAME);
+		ret = -ENOMEM;
+		goto done_free_coherent2;
+	}
+	c2_adc_dev.buf2 = kzalloc(MAX_BUF_OUT, GFP_KERNEL);
+	if (!c2_adc_dev.buf2) {
+		pr_err("%s: Unable to allocate secondary buffer, aborting\n",
+				DRV_NAME);
+		ret = -ENOMEM;
+		goto done_free_buffer;
+	}
+
+	/*
+	 * Initialise bounce pointer - due to the way the DMAC works, this
+	 * should be the 2nd buffer (interrupts trigger BEFORE the list
+	 * element has been processed)
+	 */
+	c2_adc_dev.current_buffer = c2_adc_dev.buf1;
+	c2_adc_dev.irq = img_dma_get_irq(c2_adc_dev.chan_out);
+	if (c2_adc_dev.irq < 0) {
+		pr_err("%s: Unable to get DMA IRQ\n", DRV_NAME);
+		ret = c2_adc_dev.irq;
+		goto done_free_buffer;
+	}
+
+	/* Setup the IRQ */
+	ret = request_irq(c2_adc_dev.irq, c2_adc_irq, 0, "c2_adc", &c2_adc_dev);
+	if (ret) {
+		pr_err("%s: Unable to allocate IRQ, aborting\n", DRV_NAME);
+		ret = -EBUSY;
+		goto done_free_buffer;
+	}
+
+	/* We're done now :) */
+	pr_info("%s: Device allocated at /dev/adc\n", DRV_NAME);
+	goto done;
+
+done_free_buffer:
+	kfree(c2_adc_dev.buf2);
+	kfree(c2_adc_dev.buf1);
+done_free_coherent2:
+	dma_free_coherent(c2_adc_device.this_device, 8,
+						c2_adc_dev.dma_desc,
+						c2_adc_dev.dma_desc_phys);
+done_free_coherent1:
+	dma_free_coherent(c2_adc_device.this_device, BUFFER_SIZE,
+		c2_adc_dev.buf_virt, c2_adc_dev.buf_phys);
+
+done_free_dma:
+	img_free_dma(c2_adc_dev.chan_out);
+done_unreg:
+	misc_deregister(&c2_adc_device);
+done_unmap:
+	iounmap(c2_adc_dev.reg_base);
+done_err:
+	pr_err("%s: Error code: %d", DRV_NAME, ret);
+done:
+	return ret;
+}
+module_init(c2_adc_probe);
+
+static void c2_adc_remove(void)
+{
+	free_irq(c2_adc_dev.irq, &c2_adc_dev);
+	kfree(c2_adc_dev.buf2);
+	kfree(c2_adc_dev.buf1);
+	dma_free_coherent(c2_adc_device.this_device, 8,
+						c2_adc_dev.dma_desc,
+						c2_adc_dev.dma_desc_phys);
+
+	dma_free_coherent(c2_adc_device.this_device, BUFFER_SIZE,
+		c2_adc_dev.buf_virt, c2_adc_dev.buf_phys);
+
+	img_free_dma(c2_adc_dev.chan_out);
+	misc_deregister(&c2_adc_device);
+	iounmap(c2_adc_dev.reg_base);
+	return;
+}
+module_exit(c2_adc_remove);
+
+MODULE_DESCRIPTION("Chorus2 ADC driver");
+MODULE_AUTHOR("Imagination Technologies");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/chorus2_adc.h b/drivers/iio/adc/chorus2_adc.h
new file mode 100644
index 0000000..9488e12
--- /dev/null
+++ b/drivers/iio/adc/chorus2_adc.h
@@ -0,0 +1,74 @@
+/*
+ * c2_adc.h
+ * Chorus 2 ADC/SCP driver register locations
+ *
+ * Copyright (C) 2010,2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef C2_ADC_H_
+#define C2_ADC_H_
+
+#define SCP_BASE		0x02005000
+#define SCP_DMA_IN_PERIPH	5
+#define SCP_DMA_OUT_PERIPH	6
+
+/* ADC Clock select */
+#define ADC_CLK_SEL_ADDR	0x02000034
+#define ADC_CLK_8192M		0
+#define ADC_CLK_12288M		1
+#define ADC_CLK_24576M		2
+
+/* Digital ADC clock enable (for SCP) */
+#define DCXO_CLK_ENABLE	0x020000B4
+#define DCXO_CLK_ADC_DISABLE	0
+#define DCXO_CLK_ADC_ENABLE	1
+
+/* SCP input if_clk */
+#define SCP_IF_PIN_CTRL		0x02024018
+#define USE_ON_CHIP_IF_CLK	0x02
+
+/* SCP Control register */
+#define SCP_CONTROL_OFFSET	0x00
+#define SCP_CTRL_BYPASS		0x20000000
+#define SCP_CTRL_DMA_SYNC_EN	0x02000000
+#define SCP_CTRL_DMA_SYNC_BP	0x01000000
+#define SCP_CTRL_PWR_ON		0x00300000
+#define SCP_CTRL_PWR_OFF	0x00400000
+#define SCP_CTRL_RESETN		0x00010000
+/* For testing only */
+#define SCP_CTRL_SRC_INPUT	0x00000400
+
+/* Status register */
+#define SCP_STATUS_OFFSET	0x04
+#define SCP_STAT_IRQ_SCP	0x80000000
+#define SCP_STAT_FRAME_A	0x00000000
+#define SCP_STAT_FRAME_B	0x40000000
+#define SCP_STAT_IRQ_DMA_OP	0x20000000
+
+/* SCP Output sample */
+#define SCP_OUTPUT_ADDR		(SCP_BASE + SCP_OUTPUT_OFFSET)
+#define SCP_OUTPUT_OFFSET	0x14
+
+/* DMA output interrupt control */
+#define SCP_DMA_OP_INT_OFFSET	0x18
+
+#define scp_write(val, reg) \
+	writel((val), SCP_BASE + SCP_##reg##_OFFSET)
+#define scp_read(reg) \
+	readl(SCP_BASE + SCP_##reg##_OFFSET)
+
+#endif /* C2_ADC_H_ */
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 1bda828..22b78e7 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -22,7 +22,8 @@
 	tristate "i8042 PC Keyboard controller" if EXPERT || !X86
 	default y
 	depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
-		   (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390
+		   (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
+		   !METAG
 	help
 	  i8042 is the chip over which the standard AT keyboard and PS/2
 	  mouse are connected to the computer. If you use these devices,
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index f9a5fd8..203dba8 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -790,6 +790,11 @@
 	  Say Y here if you have an EasyTouch USB Touch controller.
 	  If unsure, say N.
 
+config TOUCHSCREEN_USB_LILLIPUT
+	default y
+	bool "Lilliput VGA/HDMI monitor touchscreen support" if EMBEDDED
+	depends on TOUCHSCREEN_USB_COMPOSITE
+
 config TOUCHSCREEN_TOUCHIT213
 	tristate "Sahara TouchIT-213 touchscreen"
 	select SERIO
@@ -854,6 +859,15 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called pcap_ts.
 
+config TOUCHSCREEN_QT5480
+	tristate "QT5480 touchscreen"
+	depends on I2C
+	help
+	  Say Y here if you have a QT5480 I2C touchscreen.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ts_qt5480.
+
 config TOUCHSCREEN_ST1232
 	tristate "Sitronix ST1232 touchscreen controllers"
 	depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 6bfbeab..a2c62f9 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -48,6 +48,7 @@
 obj-$(CONFIG_TOUCHSCREEN_PCAP)		+= pcap_ts.o
 obj-$(CONFIG_TOUCHSCREEN_PENMOUNT)	+= penmount.o
 obj-$(CONFIG_TOUCHSCREEN_PIXCIR)	+= pixcir_i2c_ts.o
+obj-$(CONFIG_TOUCHSCREEN_QT5480)	+= ts_qt5480.o
 obj-$(CONFIG_TOUCHSCREEN_S3C2410)	+= s3c2410_ts.o
 obj-$(CONFIG_TOUCHSCREEN_ST1232)	+= st1232.o
 obj-$(CONFIG_TOUCHSCREEN_STMPE)		+= stmpe-ts.o
diff --git a/drivers/input/touchscreen/ts_qt5480.c b/drivers/input/touchscreen/ts_qt5480.c
new file mode 100644
index 0000000..fd87ff4
--- /dev/null
+++ b/drivers/input/touchscreen/ts_qt5480.c
@@ -0,0 +1,1118 @@
+/*
+ * Copyright (C) 2009,2010 Imagination Technologies Limited.
+ *
+ * Quantum TouchScreen Controller driver.
+ */
+
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/input/ts_qt5480.h>
+
+#include "ts_qt5480.h"
+
+/*
+ * TODO
+ *  - multi-touch support
+ *  - clean up debug messages
+ *  - drop Pure's interface and migrate to just /dev/input
+ *  - figure out unreliability of writes
+ */
+
+#ifndef I2C_M_WR
+#define I2C_M_WR	0x00
+#endif
+
+#define QT5480_NAME		"ts_qt5480"
+
+#define QT5480_MAX_I2C_RETRIES	5
+
+#define QT5480_SLAVE_ADDRESS	0x30
+
+/* Polling Frequency */
+#define QT5480_POLL_TIME	(HZ/25)
+
+/* FIXME this is probably not correct */
+#define MAX_PRESSURE_VALUE	0x10
+
+/* touch-screen touch event */
+struct ts_qt5480_touch {
+	struct list_head link;
+	unsigned char x;
+	unsigned char y;
+	unsigned char size;
+	unsigned char area;
+};
+
+/* touch-screen gesture event */
+struct ts_qt5480_gesture {
+	unsigned char event;
+	unsigned char dir;
+	unsigned char dist;
+	unsigned char x;
+	unsigned char y;
+};
+
+/* touch-screen chip info */
+struct ts_qt5480_status {
+	unsigned char update_flags;
+	unsigned char general_status_1;
+	unsigned char general_status_2;
+	unsigned char key[6];
+	unsigned char slider[6];
+	struct ts_qt5480_touch touchscr[2];
+	struct ts_qt5480_gesture gesture[2];
+};
+
+/* Driver data */
+struct ts_qt5480_data {
+	char phys[32];
+	struct input_dev *input;
+	int (*poll_status)(void);
+
+	/* Control data */
+	unsigned int used;
+	struct mutex mutex;
+
+	/* Chip Connections Configuration */
+	struct i2c_client *client;
+	struct i2c_adapter *i2c_adap;
+	unsigned char dev_addr;
+	ts_qt5480_mapping_t *phy_map;
+	ts_qt5480_conf_reg_t *config;
+
+	/* Chip Live Data */
+	struct ts_qt5480_status chip_status;
+
+	/* Touch state */
+	bool touch1_down;
+};
+
+static struct ts_qt5480_data *ts_qt5480_data;
+
+static int ts_qt5480_read_change(struct ts_qt5480_data *data,
+				 unsigned char *buf);
+static void ts_qt5480_parse_data(struct ts_qt5480_data *data,
+				 unsigned char *buf,
+				 struct ts_qt5480_status *chip_status);
+
+static irqreturn_t ts_qt5480_isr_check(int irq, void *p)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t ts_qt5480_isr(int irq, void *p)
+{
+	struct ts_qt5480_data *data = p;
+	unsigned char buf[5] = { };
+
+	while (!data->poll_status()) {
+		mutex_lock(&data->mutex);
+
+		if (!ts_qt5480_read_change(data, buf)) {
+			ts_qt5480_parse_data(data, buf,
+					     &data->chip_status);
+		}
+
+		mutex_unlock(&data->mutex);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int ts_qt5480_read_reg(struct ts_qt5480_data *data, short addr,
+			      unsigned char *value)
+{
+	struct i2c_msg msg = { };
+	struct ts_qt5480_frame frame = { };
+	unsigned char buf[5] = { };
+	unsigned int retries = 0;
+	unsigned long timeout;
+	short read_ptr = addr >> 2;
+
+	dev_dbg(&data->client->dev, "ts_qt5480_read_reg()\n");
+
+	do {
+		/*
+		 * If we have reach the maximum number of retries,
+		 * lets report error.
+		 */
+		if (++retries == QT5480_MAX_I2C_RETRIES) {
+			dev_err(&data->client->dev,
+				"ts_qt5480_read_reg() - Error: Setting register %d. Max retries achieved\n",
+				addr);
+			return -ENODEV;
+		}
+
+		/* Prepare the address and the data */
+		msg.addr = data->dev_addr;
+		msg.flags = I2C_M_WR;
+		msg.len = 3;
+		msg.buf = (__u8 *) &frame;
+		frame.addr = QT_ADDRESS_PTR;
+		frame.data[0] = read_ptr;
+
+		/* write the "read address pointer" */
+		frame.stat = i2c_transfer(data->i2c_adap, &msg, 1);
+		if (frame.stat < 0) {
+			dev_warn(&data->client->dev,
+				 "ts_qt5480_read_reg() - Error (%d): Failed writing the read address pointer\n",
+				 frame.stat);
+			continue;
+		}
+
+		udelay(500);
+
+		/* Wait for roughly 10ms. */
+		timeout = jiffies + (10 * HZ / 1000);
+
+		/*
+		 * Wait, with timeout, for the change line asserted low
+		 * (requested data ready signal).
+		 */
+		while (time_before(jiffies, timeout)) {
+			/* if change line asserted low, lets read the frame */
+			if (!data->poll_status()) {
+				/* Read back the data */
+				frame.stat = ts_qt5480_read_change(data, buf);
+				if (frame.stat < 0) {
+					dev_warn(&data->client->dev,
+						 "ts_qt5480_read_reg() - Error (%d): Failed reading reg %d\n",
+						 frame.stat, addr);
+				}
+				break;
+			}
+		}
+
+		if (frame.stat < 0)
+			continue;
+
+		/*
+		 * If we haven't get the data we are asking for, lets
+		 * request it again.
+		 */
+	} while (buf[0] != read_ptr);
+
+	/* Copy the value back */
+	(*value) = buf[(addr & 0x03) + 1];
+
+	return 0;
+}
+
+static int ts_qt5480_write_reg(struct ts_qt5480_data *data, short addr,
+			       unsigned char value)
+{
+	struct i2c_msg msg = { };
+	struct ts_qt5480_frame frame = { };
+
+	dev_dbg(&data->client->dev, "ts_qt5480_write_reg()\n");
+
+	/* Prepare the address and the data */
+	msg.addr = data->dev_addr;
+	msg.flags = I2C_M_WR;
+	msg.len = 3;
+	msg.buf = (__u8 *) &frame;
+	frame.addr = addr;
+	frame.data[0] = value;
+
+	/* Write the value into the register */
+	frame.stat = i2c_transfer(data->i2c_adap, &msg, 1);
+	if (frame.stat < 0) {
+		dev_warn(&data->client->dev,
+			 "ts_qt5480_write_reg() - Error %d: Failed writing reg %d\n",
+			 frame.stat, addr);
+		return -ENODEV;
+	}
+
+	/*
+	 * Writing registers too quickly appears to cause the device
+	 * to issues NACKs.
+	 */
+	udelay(500);
+
+	return 0;
+}
+
+static int ts_qt5480_reset(struct ts_qt5480_data *data)
+{
+	unsigned char buf[5] = { };
+	unsigned long timeout;
+	int ret;
+	unsigned char chipid = 0, software = 0;
+
+	dev_dbg(&data->client->dev, "ts_qt5480_reset()\n");
+
+	/* Writing a non-zero value causes a reset. */
+	ret = ts_qt5480_write_reg(data, QT_RESET, 1);
+	if (ret < 0) {
+		dev_warn(&data->client->dev,
+			 "ts_qt5480_reset() - Error %d: Failed writing reset reg\n",
+			 ret);
+		return -ENODEV;
+	}
+
+	/*
+	 * Wait for 40ms. This is necessary because the device sometimes
+	 * starts up with the CHANGE line asserted and we have to wait for
+	 * it to be deasserted before looking for the reset response. 
+	 */
+	msleep(40);
+
+	/* Timeout after roughly 15ms. */
+	timeout = jiffies + (15 * HZ / 1000);
+
+	/* Wait for reset to be acknowledged. */
+	while (data->poll_status() && time_before(jiffies, timeout))
+		cpu_relax();
+
+	if (data->poll_status()) {
+		dev_warn(&data->client->dev,
+			 "ts_qt5480_reset() - Error: did not respond to reset\n");
+	}
+
+	/* Read response. */
+	ret = ts_qt5480_read_change(data, buf);
+	if (ret < 0) {
+		dev_warn(&data->client->dev,
+			 "ts_qt5480_reset() - Error %d: reading status\n",
+			 ret);
+	}
+
+	/* Test for reset bit. */
+	if (!(buf[3] & 0x80)) {
+		dev_warn(&data->client->dev,
+			 "ts_qt5480_reset() - incorrect reset response: %#hhx\n",
+			 buf[3]);
+	}
+
+	ret = ts_qt5480_read_reg(data, QT_CHIP_ID, &chipid);
+	if (ret < 0) {
+		dev_warn(&data->client->dev,
+			 "ts_qt5480_reset() - Error %d: reading chip id\n",
+			 ret);
+		return -ENODEV;
+	}
+
+	ret = ts_qt5480_read_reg(data, QT_CODE_VERSION, &software);
+	if (ret < 0) {
+		dev_warn(&data->client->dev,
+			 "ts_qt5480_reset() - Error %d: reading chip id\n",
+			 ret);
+		return -ENODEV;
+	}
+
+	dev_info(&data->client->dev,
+		 "Found device, chip ID %#hhx, software %d.%d\n",
+		 chipid, (software >> 4), software & 0xf);
+
+	return 0;
+}
+
+static int ts_qt5480_send_config(struct ts_qt5480_data *data)
+{
+	int reg_addr = 0;
+	int ret = -1;
+	ts_qt5480_conf_reg_t *config = data->config;
+
+	dev_dbg(&data->client->dev, "ts_qt5480_send_config()");
+
+	for (reg_addr = 0; reg_addr < QT_MAX_REG; reg_addr++) {
+		int i;
+
+		if (!config[reg_addr].set)
+			continue;
+
+		for (i = 0; i < QT5480_MAX_I2C_RETRIES; i++) {
+			/* write the register */
+			ret = ts_qt5480_write_reg(data, reg_addr,
+						  config[reg_addr].value);
+
+			if (!ret)
+				break;
+		}
+
+		if (ret < 0) {
+			dev_err(&data->client->dev,
+				"ts_qt5480_send_config() - Error: Failed write reg %d\n",
+				reg_addr);
+			return ret;
+		}
+	}
+
+	/* Send a Calibration request with the new configuration */
+	ret = ts_qt5480_write_reg(data, QT_CALIBRATE, 0x1);
+	if (ret < 0) {
+		dev_err(&data->client->dev,
+			"ts_qt5480_send_config() - Error: Calibrating after Config\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ts_qt5480_read_change(struct ts_qt5480_data *data,
+				 unsigned char *buf)
+{
+	struct i2c_msg msg = { };
+	int ret = -1;
+
+	dev_dbg(&data->client->dev, "ts_qt5480_read_change()\n");
+
+	/* Read back the data */
+	msg.addr = data->dev_addr;
+	msg.flags = I2C_M_RD;
+	msg.len = 5;
+	msg.buf = buf;
+
+	ret = i2c_transfer(data->i2c_adap, &msg, 1);
+	dev_dbg(&data->client->dev,
+		"ts_qt5480_read_change() - i2c_transfer() ret %d - data %#x %#x %#x %#x %#x\n",
+		ret, buf[0], buf[1], buf[2], buf[3], buf[4]);
+
+	if (ret < 0) {
+		dev_err(&data->client->dev,
+			"ts_qt5480_read_change() - Warning: Failed read\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void add_finger_release_to_queue(struct ts_qt5480_data *data,
+					struct ts_qt5480_touch *queue)
+{
+	struct ts_qt5480_touch *touch = kmalloc(sizeof(struct ts_qt5480_touch),
+					   GFP_KERNEL);
+	if (likely(touch)) {
+		touch->x = 0;
+		touch->y = 0;
+		touch->size = 0;
+		touch->area = 0;
+
+		list_add_tail(&touch->link, &queue->link);
+		dev_dbg(&data->client->dev,
+			"Added %p %d %d %d\n", touch, touch->x, touch->y,
+			touch->area);
+	}
+
+}
+
+static void ts_qt5480_parse_data(struct ts_qt5480_data *data,
+				 unsigned char *buf,
+				 struct ts_qt5480_status *chip_status)
+{
+	unsigned int icnt;
+	struct ts_qt5480_frame frame = { };
+	struct input_dev *input = data->input;
+
+	/* Safety check */
+	if (buf == 0)
+		return;
+
+	/* get packet pkt_address */
+	frame.addr = (unsigned short)buf[0] << 2;
+	/* get 4 data bytes */
+	frame.data[0] = buf[1];
+	frame.data[1] = buf[2];
+	frame.data[2] = buf[3];
+	frame.data[3] = buf[4];
+
+	dev_dbg(&data->client->dev, "ts_qt5480_parseData()\n");
+
+	switch (frame.addr) {
+	case QT_EEPROM_CHKSUM - 2:
+		/* special case: isolate 16-bit checksum */
+		frame.stat = (((unsigned short)frame.data[3] << 8) +
+			      frame.data[2]);
+
+		break;
+	case QT_KEY_STATUS_0:
+		for (icnt = 0; icnt < 4; icnt++)
+			chip_status->key[icnt] = frame.data[icnt];
+		/* flag key status changed */
+		chip_status->update_flags |= KEY_0_UPDATE;
+
+		break;
+	case QT_KEY_STATUS_4:
+		chip_status->key[4] = frame.data[0];
+		chip_status->key[5] = frame.data[1];
+		chip_status->general_status_1 = frame.data[2];
+		chip_status->general_status_2 = frame.data[3];
+		/* flag key status changed */
+		chip_status->update_flags |= KEY_4_UPDATE;
+
+		if ((!(chip_status->general_status_2 & TS0_DET))
+		    && (chip_status->touchscr[0].area != 0)) {
+			chip_status->touchscr[0].x = 0;
+			chip_status->touchscr[0].y = 0;
+			chip_status->touchscr[0].size = 0;
+			chip_status->touchscr[0].area = 0;
+
+			input_report_key(input, BTN_TOUCH, 0);
+			input_report_abs(input, ABS_PRESSURE, 0);
+			input_sync(input);
+			data->touch1_down = false;
+
+			add_finger_release_to_queue(data,
+						    &chip_status->touchscr[0]);
+		}
+		/*
+		 * If touch two is not active and is the first time we
+		 * detect it, set a Finger-Up data.
+		 */
+		if ((!(chip_status->general_status_2 & TS1_DET))
+		    && (chip_status->touchscr[1].area != 0)) {
+			chip_status->touchscr[1].x = 0;
+			chip_status->touchscr[1].y = 0;
+			chip_status->touchscr[1].size = 0;
+			chip_status->touchscr[1].area = 0;
+		}
+		break;
+	case QT_TOUCHSCR_0_X:
+	case QT_TOUCHSCR_1_X:
+		icnt = (frame.addr == QT_TOUCHSCR_0_X) ? 0 : 1;
+
+		chip_status->touchscr[icnt].x = frame.data[0];
+		chip_status->touchscr[icnt].y = frame.data[2];
+		chip_status->touchscr[icnt].size = frame.data[1] & 0x3f;
+		chip_status->touchscr[icnt].area = frame.data[3] & 0x3f;
+
+		if (frame.addr == QT_TOUCHSCR_0_X
+		    && (chip_status->general_status_2 & TS0_DET)) {
+			int listEmpty = list_empty(&chip_status->touchscr[0].link);
+			int addEntry = 1;
+			struct ts_qt5480_touch *last = NULL;
+
+			if (!data->touch1_down) {
+				data->touch1_down = true;
+				input_report_key(input, BTN_TOUCH, 1);
+			}
+
+			input_report_abs(input, ABS_X,
+					 chip_status->touchscr[0].x);
+			input_report_abs(input, ABS_Y,
+					 chip_status->touchscr[0].y);
+			input_report_abs(input, ABS_PRESSURE,
+					 chip_status->touchscr[0].size);
+			input_sync(input);
+
+			if (!listEmpty) {
+				last = list_entry(chip_status->touchscr[0].link.prev,
+						  struct ts_qt5480_touch, link);
+					addEntry = (last->area == 0);
+			}
+
+			if (!addEntry) {
+				/* update last unread entry in the list with current coordinates */
+				last->x = chip_status->touchscr[0].x;
+				last->y = chip_status->touchscr[0].y;
+				last->area = chip_status->touchscr[0].area;
+				last->size = chip_status->touchscr[0].size;
+				dev_dbg(&data->client->dev,
+					"Updat %p %d %d %d\n", last,
+					last->x, last->y, last->area);
+			} else {
+				struct ts_qt5480_touch *touch = kmalloc(sizeof(struct ts_qt5480_touch),
+								   GFP_KERNEL);
+				if (likely(touch)) {
+					*touch = chip_status->touchscr[0];
+					list_add_tail(&touch->link,
+						      &chip_status->touchscr[0].link);
+					dev_dbg(&data->client->dev,
+						"Added %p %d %d %d\n",
+						touch, touch->x, touch->y,
+						touch->area);
+				}
+			}
+		} else if (frame.addr == QT_TOUCHSCR_1_X) {
+			/* save slider info for Touch 1 packet */
+			for (icnt = 0; icnt < 4; icnt++)
+				chip_status->slider[icnt] = frame.data[icnt];
+		}
+
+		break;
+	case QT_SLIDER_4:
+		for (icnt = 0; icnt < 2; icnt++)
+			chip_status->slider[icnt + 4] = frame.data[icnt];
+
+		break;
+	case QT_TOUCH_0_GESTURE:
+	case QT_TOUCH_1_GESTURE:
+		icnt = (frame.addr == QT_TOUCH_0_GESTURE) ? 0 : 1;
+
+		chip_status->gesture[icnt].event = frame.data[0] & 0x0f;
+		chip_status->gesture[icnt].dir =
+			(frame.data[0] & 0x70) >> 4;
+		chip_status->gesture[icnt].dist =
+			(frame.data[1] & 0xf0) >> 4;
+		chip_status->gesture[icnt].x =
+			(frame.data[2] << 2) +
+			((frame.data[1] >> 2) & 0x03);
+		chip_status->gesture[icnt].y = frame.data[3];
+		/* flag gesture received */
+		chip_status->update_flags |= GESTURE_0_UPDATE << icnt;
+
+		break;
+	default:
+		break;
+	}
+}
+
+static void ts_qt5480_mapping(struct ts_qt5480_data *data,
+			      const struct ts_qt5480_touch *touch,
+			      long *pX, long *pY)
+{
+	ts_qt5480_mapping_t *phy_map = data->phy_map;
+	unsigned short x_sen_res = phy_map->x_sensor_res;
+	unsigned short x_scr_res = phy_map->x_screen_res;
+	unsigned short x_flip = phy_map->x_flip;
+	unsigned short x_sen_size = phy_map->x_sensor_size;
+	unsigned short x_scr_size = phy_map->x_screen_size;
+	short x_sen_ofst = phy_map->x_sensor_offset;
+
+	unsigned short y_sen_res = phy_map->y_sensor_res;
+	unsigned short y_scr_res = phy_map->y_screen_res;
+	unsigned short y_flip = phy_map->y_flip;
+	unsigned short y_sen_size = phy_map->y_sensor_size;
+	unsigned short y_scr_size = phy_map->y_screen_size;
+	short y_sen_ofst = phy_map->y_sensor_offset;
+
+	long x = 0;
+	long y = 0;
+
+	x = touch->x;
+	y = touch->y;
+
+	/* Resolution and orientation */
+	x *= x_scr_res;
+	x /= x_sen_res;
+	if (x_flip)
+		x = x_scr_res - x;
+
+	/* Physical Mapping */
+	x *= x_sen_size;
+	x /= x_scr_size;
+	x += (x_sen_ofst * x_scr_res) / x_scr_size;
+
+	/* Resolution and orientation */
+	y *= y_scr_res;
+	y /= y_sen_res;
+	if (y_flip)
+		y = y_scr_res - y;
+
+	/* Physical Mapping */
+	y *= y_sen_size;
+	y /= y_scr_size;
+	y += (y_sen_ofst * y_scr_res) / y_scr_size;
+
+	/* Return the data */
+	(*pX) = x;
+	(*pY) = y;
+}
+
+static int ts_qt5480_start_input(struct ts_qt5480_data *data)
+{
+	/* Set Mutex protection */
+	mutex_lock(&data->mutex);
+
+	if (data->used) {
+		data->used++;
+		dev_dbg(&data->client->dev,
+			"ts_qt5480_open() - Warning: It is was already in use!!!\n");
+	} else {
+		data->used++;
+
+		dev_dbg(&data->client->dev,
+			"ts_qt5480_open() - Device Opened!!!\n");
+	}
+
+	/* Release Mutex protecction */
+	mutex_unlock(&data->mutex);
+
+	return 0;
+}
+
+static int ts_qt5480_open(struct inode *inode, struct file *file)
+{
+	struct ts_qt5480_data *data = ts_qt5480_data;
+	int ret;
+
+	if (!data) {
+		pr_err("ts_qt5480_open() - Error: Device not attached\n");
+		return -ENODEV;
+	}
+
+	dev_dbg(&data->client->dev,
+		"ts_qt5480_open(inode %#x, file %#x)\n",
+		(unsigned int)inode, (unsigned int)file);
+
+	ret = ts_qt5480_start_input(data);
+
+	if (!ret)
+		file->private_data = data;
+
+	dev_dbg(&data->client->dev,
+		"ts_qt5480_open returned %d (inode %#x, file %#x)\n",
+		ret, (unsigned int)inode, (unsigned int)file);
+	return ret;
+}
+
+static int ts_qt5480_release(struct inode *inode, struct file *file)
+{
+	struct ts_qt5480_data *data = file->private_data;
+
+	dev_dbg(&data->client->dev, "ts_qt5480_release(inode %#x, file %#x)\n",
+		(unsigned int)inode, (unsigned int)file);
+
+	mutex_lock(&data->mutex);
+
+	if (data->used) {
+		data->used--;
+
+		if (!data->used) {
+
+			dev_dbg(&data->client->dev,
+				"ts_qt5480_release() - Device Closed!!!\n");
+
+		} else {
+			dev_dbg(&data->client->dev,
+				"ts_qt5480_release() - Warning: It is still in use!!!\n");
+		}
+	} else {
+		dev_err(&data->client->dev,
+			"ts_qt5480_release() - Error: Device never opened!!!\n");
+
+		mutex_unlock(&data->mutex);
+		return -EFAULT;
+	}
+
+	mutex_unlock(&data->mutex);
+	return 0;
+}
+
+static ssize_t ts_qt5480_read(struct file *file, char *buf, size_t count,
+			      loff_t *ppos)
+{
+	struct ts_qt5480_data *data = file->private_data;
+	ts_event event = { };
+
+	dev_dbg(&data->client->dev,
+	      "ts_qt5480_read(file %#x, buf %#x, count %#x, ppos %#x)\n",
+	      (unsigned int)file, (unsigned int)buf, count,
+	      (unsigned int)ppos);
+
+	mutex_lock(&data->mutex);
+
+	if (count >= sizeof(event)) {
+		if (!list_empty(&data->chip_status.touchscr[0].link)) {
+			struct ts_qt5480_touch *touch =
+			    list_entry(data->chip_status.touchscr[0].link.next,
+				       struct ts_qt5480_touch, link);
+
+			event.pressure = touch->area;
+			if (event.pressure == 0) {
+				event.x = 0;
+				event.y = 0;
+			} else {
+				unsigned long x, y;
+
+				ts_qt5480_mapping(data, touch, &x, &y);
+
+				event.x = (unsigned short)x;
+				event.y = (unsigned short)y;
+			}
+			dev_dbg(&data->client->dev,
+				"Read_ %p %d %d %d\n", touch,
+				touch->x, touch->y, touch->area);
+
+			list_del(&touch->link);
+			kfree(touch);
+
+			/* Copy Data to user */
+			if (copy_to_user((ts_event *) buf, &event,
+					 sizeof(event))) {
+				dev_dbg(&data->client->dev,
+					"ts_qt5480_read() - Error: Copying data to the user area\n");
+
+				mutex_unlock(&data->mutex);
+				return -EFAULT;
+			}
+
+		} else {
+			/* No data to send */
+			count = 0;
+		}
+	}
+
+	mutex_unlock(&data->mutex);
+	return count;
+}
+
+static long ts_qt5480_ioctl(struct file *file,
+			    unsigned int cmd, unsigned long arg)
+{
+	struct ts_qt5480_data *data = file->private_data;
+	long err = -EINVAL;
+
+	dev_dbg(&data->client->dev,
+		"ts_qt5480_ioctl(file %#x, cmd %i, arg %i)\n",
+		(unsigned int)file, cmd, (unsigned int)arg);
+
+	/*
+	 * extract the type and number bitfields, and don't decode
+	 * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
+	 */
+	if (_IOC_TYPE(cmd) != QT5480_IOCTL) {
+		dev_dbg(&data->client->dev,
+			"ts_qt5480_ioctl() - Error: Don't do family %#x\n",
+			_IOC_TYPE(cmd));
+		return -ENOTTY;
+	}
+
+	/*
+	 * the direction is a bitmask, and VERIFY_WRITE catches R/W
+	 * transfers. `Type' is user-oriented, while
+	 * access_ok is kernel-oriented, so the concept of "read" and
+	 * "write" is reversed
+	 */
+	if (_IOC_DIR(cmd) & _IOC_READ) {
+		dev_dbg(&data->client->dev,
+			"ts_qt5480_ioctl() - read - arg %#x\n",
+			(int)arg);
+		err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
+	} else if (_IOC_DIR(cmd) & _IOC_WRITE) {
+		dev_dbg(&data->client->dev,
+			"ts_qt5480_ioctl() - write - arg %#x\n",
+			(int)arg);
+		err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
+	}
+
+	if (err) {
+		dev_dbg(&data->client->dev,
+			"ts_qt5480_ioctl() - Error: access error!\n");
+		return -EFAULT;
+	} else {
+		dev_dbg(&data->client->dev,
+			"ts_qt5480_ioctl() - access OK\n");
+	}
+
+	mutex_lock(&data->mutex);
+
+	switch (cmd) {
+	case QT5480_CALIBRATE:
+		/* Send a Calibrate Command */
+		dev_dbg(&data->client->dev,
+			"ts_qt5480_ioctl() - QT5480_CALIBRATE\n");
+
+		err = ts_qt5480_write_reg(data, QT_CALIBRATE, 0x1);
+
+		if (err < 0) {
+			dev_err(&data->client->dev,
+				"ts_qt5480_ioctl() - Error: QT5480_CALIBRATE - Failed i2c_transfer\n");
+			goto out;
+		}
+
+		break;
+	case QT5480_POWER: {
+		/* Switch ON / OFF the device */
+		unsigned int power_on;
+
+		if (copy_from_user(&power_on, (int *)arg, sizeof(int))) {
+			dev_dbg(&data->client->dev,
+				"ts_qt5480_ioctl() - Error: QT5480_POWER - failed copy_from_user\n");
+			err = -EFAULT;
+			goto out;
+		}
+
+		err = ts_qt5480_write_reg(data, QT_CALIBRATE,
+					  ((power_on) ?
+					   data->config[QT_LP_MODE].value :
+					   0x00));
+
+		if (err < 0) {
+			dev_dbg(&data->client->dev,
+				"ts_qt5480_ioctl() - Error: QT5480_POWER - Failed i2c_transfer\n");
+			goto out;
+		}
+
+		break;
+	}
+	case QT5480_DEBUG:
+		/* Enable / Disable Debug tracking in the PDP Memory (TFT) */
+		/* not implemented */
+		return -EINVAL;
+		break;
+	case QT5480_GETREG: {
+		/* Read Register */
+		struct ts_qt5480_frame frame;
+
+		if (copy_from_user(&frame, (int *)arg,
+				   sizeof(struct ts_qt5480_frame))) {
+			dev_dbg(&data->client->dev,
+				"ts_qt5480_ioctl() - Error: QT5480_GETREG - failed copy_from_user\n");
+			err = -EFAULT;
+			goto out;
+		}
+
+		frame.stat = ts_qt5480_read_reg(data, frame.addr, frame.data);
+		if (frame.stat < 0) {
+			dev_err(&data->client->dev,
+				"ts_qt5480_ioctl() - Error: QT5480_GETREG - Failed i2c_transfer\n");
+			err = frame.stat;
+			goto out;
+		}
+
+		if (copy_to_user((int *)arg, &frame,
+				 sizeof(struct ts_qt5480_frame))) {
+			dev_dbg(&data->client->dev,
+				"ts_qt5480_ioctl() - Error: QT5480_GETREG - failed copy_to_user\n");
+				err = -EFAULT;
+				goto out;
+		}
+
+		break;
+	}
+	case QT5480_SETREG: {
+		/* Write Register */
+		struct ts_qt5480_frame frame;
+
+		if (copy_from_user(&frame, (int *)arg,
+				   sizeof(struct ts_qt5480_frame))) {
+			dev_dbg(&data->client->dev,
+				"ts_qt5480_ioctl() - Error: QT5480_SETREG - failed copy_from_user\n");
+			err = -EFAULT;
+			goto out;
+		}
+
+		frame.stat = ts_qt5480_write_reg(data, frame.addr,
+						 frame.data[0]);
+		if (frame.stat < 0) {
+			dev_err(&data->client->dev,
+				"ts_qt5480_ioctl() - Error: QT5480_SETREG - Failed i2c_transfer\n");
+			err = frame.stat;
+			goto out;
+		}
+
+		if (copy_to_user((int *)arg, &frame,
+				 sizeof(struct ts_qt5480_frame))) {
+			dev_dbg(&data->client->dev,
+				"ts_qt5480_ioctl() - Error: QT5480_SETREG - failed copy_to_user\n");
+				err = -EFAULT;
+				goto out;
+		}
+
+		break;
+	}
+	}
+
+out:
+	/* Clean up */
+	mutex_unlock(&data->mutex);
+
+	return err;
+}
+
+static int ts_qt5480_input_open(struct input_dev *dev)
+{
+	struct ts_qt5480_data *data = input_get_drvdata(dev);
+
+	return ts_qt5480_start_input(data);
+}
+
+/* attach to an instance of the device that was probed on a bus.
+ * This function is only called if i2c_probe determined that some device
+ * does actually exist at this address
+ */
+static int ts_qt5480_probe(struct i2c_client *client,
+			   const struct i2c_device_id *idp)
+{
+	struct qt5480_platform_data *pdata = client->dev.platform_data;
+	struct ts_qt5480_data *data;
+	struct input_dev *input_dev;
+	int err = 0;
+
+	if (!pdata || !pdata->poll_status || !pdata->phy_map ||
+	    !pdata->config) {
+		dev_err(&client->dev, "valid platform data is required!\n");
+		return -EINVAL;
+	}
+
+	data = kzalloc(sizeof(struct ts_qt5480_data), GFP_KERNEL);
+	input_dev = input_allocate_device();
+	if (!data || !input_dev) {
+		err = -ENOMEM;
+		goto err_mem_alloc;
+	}
+
+	data->dev_addr = client->addr;
+	data->i2c_adap = client->adapter;
+	data->client = client;
+	data->input = input_dev;
+	data->poll_status = pdata->poll_status;
+	data->phy_map = pdata->phy_map;
+	data->config = pdata->config;
+
+	input_set_drvdata(input_dev, data);
+
+	mutex_init(&data->mutex);
+
+	INIT_LIST_HEAD(&data->chip_status.touchscr[0].link);
+	/* Not used now, but better keep it initialised */
+	INIT_LIST_HEAD(&data->chip_status.touchscr[1].link);
+
+	snprintf(data->phys, sizeof(data->phys),
+		 "%s/input0", dev_name(&client->dev));
+
+	input_dev->name = "QT5480 Touchscreen";
+	input_dev->phys = data->phys;
+	input_dev->id.bustype = BUS_I2C;
+
+	input_dev->open = ts_qt5480_input_open;
+
+	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+	input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+	input_set_abs_params(input_dev, ABS_X, 0, data->phy_map->x_sensor_res,
+			     0, 0);
+	input_set_abs_params(input_dev, ABS_Y, 0, data->phy_map->y_sensor_res,
+			     0, 0);
+	input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_PRESSURE_VALUE,
+			     0, 0);
+
+	/* Reset the controller. */
+	err = ts_qt5480_reset(data);
+	if (err < 0) {
+		dev_warn(&client->dev, "could not reset controller\n");
+		goto err_mem_alloc;
+	}
+
+	/* Configure and calibrate the controller. */
+	if (ts_qt5480_send_config(data) < 0) {
+		dev_warn(&client->dev, "could not configure device\n");
+	}
+
+	if (request_threaded_irq(client->irq, ts_qt5480_isr_check,
+				 ts_qt5480_isr, IRQF_ONESHOT,
+				 "ts_qt5480", data)) {
+		dev_err(&client->dev, "failed to register irq\n");
+		goto err_mem_alloc;
+	}
+
+	err = input_register_device(input_dev);
+	if (err) {
+		dev_err(&client->dev, "failed to register input device: %d\n",
+			err);
+		goto err_request_irq;
+	}
+
+	i2c_set_clientdata(client, data);
+
+	return 0;
+
+err_request_irq:
+	free_irq(client->irq, data);
+err_mem_alloc:
+	input_free_device(input_dev);
+	kfree(data);
+	return err;
+}
+
+static int ts_qt5480_remove(struct i2c_client *client)
+{
+	struct ts_qt5480_data *data = i2c_get_clientdata(client);
+
+	input_unregister_device(data->input);
+	free_irq(client->irq, data);
+	kfree(data);
+	i2c_set_clientdata(client, NULL);
+
+	return 0;
+}
+
+/* i2c Support Data */
+static const unsigned short normal_i2c[] = {
+	QT5480_SLAVE_ADDRESS, I2C_CLIENT_END
+};
+
+static const struct i2c_device_id ts_qt5480_id[] = {
+	{QT5480_NAME, 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, ts_qt5480_id);
+
+static struct i2c_driver i2c_driver_qt5480 = {
+	.driver = {
+	      .name	= QT5480_NAME,
+	},
+	.probe		= ts_qt5480_probe,
+	.remove		= ts_qt5480_remove,
+	.id_table	= ts_qt5480_id,
+	.address_list	= normal_i2c,
+};
+
+/* File Ops Support Data */
+static const struct file_operations ts_qt5480_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ts_qt5480_open,
+	.release	= ts_qt5480_release,
+	.read		= ts_qt5480_read,
+	.unlocked_ioctl	= ts_qt5480_ioctl,
+};
+
+static struct miscdevice ts_qt5480_misc_device = {
+	.minor          = MISC_DYNAMIC_MINOR,
+	.name           = "qt5480",
+	.fops           = &ts_qt5480_fops,
+};
+
+int __init ts_qt5480_init(void)
+{
+	int err;
+
+	err = i2c_add_driver(&i2c_driver_qt5480);
+	err = 0;
+	if (err) {
+		pr_err("ts_qt5480: failed to add i2c driver: %d\n", err);
+		return err;
+	}
+
+	/* Now create the /dev part. */
+	err = misc_register(&ts_qt5480_misc_device);
+	if (err < 0) {
+		i2c_del_driver(&i2c_driver_qt5480);
+		pr_err("ts_qt5480: cannot register misc device: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static void __exit ts_qt5480_exit(void)
+{
+	misc_deregister(&ts_qt5480_misc_device);
+	i2c_del_driver(&i2c_driver_qt5480);
+}
+
+module_init(ts_qt5480_init);
+module_exit(ts_qt5480_exit);
+
+MODULE_AUTHOR("Pedro Teixido-Rovira <pedro.teixido-rovira@pure.com>");
+MODULE_AUTHOR("Marcin Nowakowski <marcin.nowakowski@pure.com>");
+MODULE_DESCRIPTION("Atmel QT5480 Touchscreen driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/ts_qt5480.h b/drivers/input/touchscreen/ts_qt5480.h
new file mode 100644
index 0000000..4f5bd95
--- /dev/null
+++ b/drivers/input/touchscreen/ts_qt5480.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2009,2010 Imagination Technologies Limited.
+ *
+ * Quantum TouchScreen Controller driver.
+ */
+
+#ifndef TS_QT5480_H
+#define TS_QT5480_H
+
+#include <linux/types.h>
+
+/* touchscreen 0 detect bit in general_status_2 */
+#define TS0_DET			0x01
+/* touchscreen 1 detect bit in general_status_2 */
+#define TS1_DET			0x02
+
+/* bitfield flags indicating that new status has been received */
+#define KEY_0_UPDATE		0x01
+#define KEY_4_UPDATE		0x02
+#define TOUCH_0_UPDATE		0x04
+#define TOUCH_1_UPDATE		0x08
+#define SLIDER_4_UPDATE		0x10
+#define GESTURE_0_UPDATE	0x20
+#define GESTURE_1_UPDATE	0x40
+
+/* standard touch-screen event */
+typedef struct {
+	unsigned short pressure;
+	unsigned short x;
+	unsigned short y;
+} ts_event;
+
+#endif
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 721fdb3..a4c2045 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -18,6 +18,7 @@
  *  - NEXIO/iNexio
  *  - Elo TouchSystems 2700 IntelliTouch
  *  - EasyTouch USB Dual/Multi touch controller from Data Modul
+ *  - Lilliput 8in HDMI Monitor touchscreen (special case)
  *
  * Copyright (C) 2004-2007 by Daniel Ritz <daniel.ritz@gmx.ch>
  * Copyright (C) by Todd E. Johnson (mtouchusb.c)
@@ -45,7 +46,7 @@
  *
  *****************************************************************************/
 
-//#define DEBUG
+/* #define DEBUG */
 
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -142,6 +143,7 @@
 	DEVTYPE_NEXIO,
 	DEVTYPE_ELO,
 	DEVTYPE_ETOUCH,
+	DEVTYPE_LILLIPUT,
 };
 
 #define USB_DEVICE_HID_CLASS(vend, prod) \
@@ -251,6 +253,10 @@
 	{USB_DEVICE(0x7374, 0x0001), .driver_info = DEVTYPE_ETOUCH},
 #endif
 
+#ifdef CONFIG_TOUCHSCREEN_USB_LILLIPUT
+	{USB_DEVICE(0x0eef, 0x0001), .driver_info = DEVTYPE_LILLIPUT},
+#endif
+
 	{}
 };
 
@@ -1045,6 +1051,55 @@
 }
 #endif
 
+/*****************************************************************************
+ * Lilliput 8in VGA/HDMI monitor touchscreen
+ *
+ * All sources I have found suggest that the touchscreen part for this device
+ * is the eGalax Touchscreen driver (including the vendor and product IDs).
+ * However, when this device was not functioning correctly, the captures from
+ * a USB analyser suggest that the touchscreen is not driven by the eGalax
+ * part.
+ */
+#ifdef CONFIG_TOUCHSCREEN_USB_LILLIPUT
+
+#define LILLIPUT_PKT_SYNC		0x02
+
+/*
+ * Calibration Values, array of short ints:
+ * x-min, y-min, x-max, y-max
+ */
+static short calib_vals[4];
+module_param_array (calib_vals, short, NULL, 0664);
+MODULE_PARM_DESC(calib_vals, \
+	"An array of 4x 16-bit values containing the min/max x/y values and "\
+	"step values");
+
+
+static int lilliput_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
+{
+	unsigned short x, y;
+	if (pkt[0] != LILLIPUT_PKT_SYNC)
+		return 0;
+
+	/* Negate the y value, as the touchscreen has a bottom-left origin */
+	x = ((pkt[5] & 0x0f) << 8) | (pkt[4] & 0xff);
+	y = (((pkt[3] & 0x0f) << 8) | (pkt[2] & 0xff)) ^ 0x0fff;
+
+	/* Ensure we don't divide by zero for non-set calibration values */
+	if (calib_vals[0] && calib_vals[1]) {
+		x = ((x - calib_vals[0]) * 3072) /
+				(calib_vals[2] - calib_vals[0]) + 512;
+		y = ((y - calib_vals[1]) * 3072) /
+				(calib_vals[3] - calib_vals[1]) + 512;
+	}
+
+	dev->x = x;
+	dev->y = y;
+	dev->touch = pkt[1] & 0x01;
+
+	return 1;
+}
+#endif
 
 /*****************************************************************************
  * ELO part
@@ -1284,6 +1339,17 @@
 		.read_data	= etouch_read_data,
 	},
 #endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_LILLIPUT
+	[DEVTYPE_LILLIPUT] = {
+		.min_xc		= 0x0000,
+		.max_xc		= 0x0fff,
+		.min_yc		= 0x0000,
+		.max_yc		= 0x0fff,
+		.rept_size	= 6,
+		.read_data	= lilliput_read_data,
+	},
+#endif
 };
 
 
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 4a33351..2bfcf77 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -25,6 +25,10 @@
 	  The maximum number of VICs available in the system, for
 	  power management.
 
+config IMGPDC_IRQ
+	bool
+	select IRQ_DOMAIN
+
 config RENESAS_INTC_IRQPIN
 	bool
 	select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index cda4cb5..b24f3f5 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -11,6 +11,7 @@
 obj-$(CONFIG_ARCH_SPEAR3XX)		+= spear-shirq.o
 obj-$(CONFIG_ARM_GIC)			+= irq-gic.o
 obj-$(CONFIG_ARM_VIC)			+= irq-vic.o
+obj-$(CONFIG_IMGPDC_IRQ)		+= irq-imgpdc.o
 obj-$(CONFIG_SIRF_IRQ)			+= irq-sirfsoc.o
 obj-$(CONFIG_RENESAS_INTC_IRQPIN)	+= irq-renesas-intc-irqpin.o
 obj-$(CONFIG_RENESAS_IRQC)		+= irq-renesas-irqc.o
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
new file mode 100644
index 0000000..0190d2e
--- /dev/null
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -0,0 +1,541 @@
+/*
+ * IMG PowerDown Controller (PDC)
+ *
+ * Copyright 2010-2013 Imagination Technologies Ltd.
+ *
+ * Exposes the syswake and PDC peripheral wake interrupts to the system.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/* PDC interrupt register numbers */
+
+#define PDC_IRQ_STATUS			0x310
+#define PDC_IRQ_ENABLE			0x314
+#define PDC_IRQ_CLEAR			0x318
+#define PDC_IRQ_ROUTE			0x31c
+#define PDC_SYS_WAKE_BASE		0x330
+#define PDC_SYS_WAKE_STRIDE		0x8
+#define PDC_SYS_WAKE_CONFIG_BASE	0x334
+#define PDC_SYS_WAKE_CONFIG_STRIDE	0x8
+
+/* PDC interrupt register field masks */
+
+#define PDC_IRQ_SYS3			0x08
+#define PDC_IRQ_SYS2			0x04
+#define PDC_IRQ_SYS1			0x02
+#define PDC_IRQ_SYS0			0x01
+#define PDC_IRQ_ROUTE_WU_EN_SYS3	0x08000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS2	0x04000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS1	0x02000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS0	0x01000000
+#define PDC_IRQ_ROUTE_WU_EN_WD		0x00040000
+#define PDC_IRQ_ROUTE_WU_EN_IR		0x00020000
+#define PDC_IRQ_ROUTE_WU_EN_RTC		0x00010000
+#define PDC_IRQ_ROUTE_EXT_EN_SYS3	0x00000800
+#define PDC_IRQ_ROUTE_EXT_EN_SYS2	0x00000400
+#define PDC_IRQ_ROUTE_EXT_EN_SYS1	0x00000200
+#define PDC_IRQ_ROUTE_EXT_EN_SYS0	0x00000100
+#define PDC_IRQ_ROUTE_EXT_EN_WD		0x00000004
+#define PDC_IRQ_ROUTE_EXT_EN_IR		0x00000002
+#define PDC_IRQ_ROUTE_EXT_EN_RTC	0x00000001
+#define PDC_SYS_WAKE_RESET		0x00000010
+#define PDC_SYS_WAKE_INT_MODE		0x0000000e
+#define PDC_SYS_WAKE_INT_MODE_SHIFT	1
+#define PDC_SYS_WAKE_PIN_VAL		0x00000001
+
+/* PDC interrupt constants */
+
+#define PDC_SYS_WAKE_INT_LOW		0x0
+#define PDC_SYS_WAKE_INT_HIGH		0x1
+#define PDC_SYS_WAKE_INT_DOWN		0x2
+#define PDC_SYS_WAKE_INT_UP		0x3
+#define PDC_SYS_WAKE_INT_CHANGE		0x6
+#define PDC_SYS_WAKE_INT_NONE		0x4
+
+/**
+ * struct pdc_intc_priv - private pdc interrupt data.
+ * @nr_perips:		Number of peripheral interrupt signals.
+ * @nr_syswakes:	Number of syswake signals.
+ * @perip_irqs:		List of peripheral IRQ numbers handled.
+ * @syswake_irq:	Shared PDC syswake IRQ number.
+ * @domain:		IRQ domain for PDC peripheral and syswake IRQs.
+ * @pdc_base:		Base of PDC registers.
+ * @irq_en:		Cached version of PDC_IRQ_ENABLE register.
+ * @irq_route:		Cached version of PDC_IRQ_ROUTE register.
+ * @lock:		Lock to protect the PDC syswake registers and the cached
+ *			values of those registers in this struct.
+ */
+struct pdc_intc_priv {
+	unsigned int		nr_perips;
+	unsigned int		nr_syswakes;
+	unsigned int		*perip_irqs;
+	unsigned int		syswake_irq;
+	struct irq_domain	*domain;
+	void __iomem		*pdc_base;
+
+	u32			irq_en;
+	u32			irq_route;
+	raw_spinlock_t		lock;
+};
+
+static void pdc_write(struct pdc_intc_priv *priv, unsigned int reg_offs,
+		      unsigned int data)
+{
+	iowrite32(data, priv->pdc_base + reg_offs);
+}
+
+static unsigned int pdc_read(struct pdc_intc_priv *priv,
+			     unsigned int reg_offs)
+{
+	return ioread32(priv->pdc_base + reg_offs);
+}
+
+/* Generic IRQ callbacks */
+
+#define SYS0_HWIRQ	8
+
+static unsigned int hwirq_is_syswake(irq_hw_number_t hw)
+{
+	return hw >= SYS0_HWIRQ;
+}
+
+static unsigned int hwirq_to_syswake(irq_hw_number_t hw)
+{
+	return hw - SYS0_HWIRQ;
+}
+
+static irq_hw_number_t syswake_to_hwirq(unsigned int syswake)
+{
+	return SYS0_HWIRQ + syswake;
+}
+
+static struct pdc_intc_priv *irqd_to_priv(struct irq_data *data)
+{
+	return (struct pdc_intc_priv *)data->domain->host_data;
+}
+
+static void perip_irq_mask(struct irq_data *data)
+{
+	struct pdc_intc_priv *priv = irqd_to_priv(data);
+
+	raw_spin_lock(&priv->lock);
+	priv->irq_route &= ~(1 << data->hwirq);
+	pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+	raw_spin_unlock(&priv->lock);
+}
+
+static void perip_irq_unmask(struct irq_data *data)
+{
+	struct pdc_intc_priv *priv = irqd_to_priv(data);
+
+	raw_spin_lock(&priv->lock);
+	priv->irq_route |= 1 << data->hwirq;
+	pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+	raw_spin_unlock(&priv->lock);
+}
+
+static void syswake_irq_ack(struct irq_data *data)
+{
+	struct pdc_intc_priv *priv = irqd_to_priv(data);
+	unsigned int syswake = hwirq_to_syswake(data->hwirq);
+
+	pdc_write(priv, PDC_IRQ_CLEAR, 1 << syswake);
+}
+
+static void syswake_irq_mask(struct irq_data *data)
+{
+	struct pdc_intc_priv *priv = irqd_to_priv(data);
+	unsigned int syswake = hwirq_to_syswake(data->hwirq);
+
+	raw_spin_lock(&priv->lock);
+	priv->irq_en &= ~(PDC_IRQ_SYS0 << syswake);
+	pdc_write(priv, PDC_IRQ_ENABLE, priv->irq_en);
+	raw_spin_unlock(&priv->lock);
+}
+
+static void syswake_irq_unmask(struct irq_data *data)
+{
+	struct pdc_intc_priv *priv = irqd_to_priv(data);
+	unsigned int syswake = hwirq_to_syswake(data->hwirq);
+
+	raw_spin_lock(&priv->lock);
+	priv->irq_en |= PDC_IRQ_SYS0 << syswake;
+	pdc_write(priv, PDC_IRQ_ENABLE, priv->irq_en);
+	raw_spin_unlock(&priv->lock);
+}
+
+static int syswake_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+	struct pdc_intc_priv *priv = irqd_to_priv(data);
+	unsigned int syswake = hwirq_to_syswake(data->hwirq);
+	unsigned int irq_mode;
+	unsigned int soc_sys_wake_regoff, soc_sys_wake;
+
+	/* translate to syswake IRQ mode */
+	switch (flow_type) {
+	case IRQ_TYPE_EDGE_BOTH:
+		irq_mode = PDC_SYS_WAKE_INT_CHANGE;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		irq_mode = PDC_SYS_WAKE_INT_UP;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		irq_mode = PDC_SYS_WAKE_INT_DOWN;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		irq_mode = PDC_SYS_WAKE_INT_HIGH;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		irq_mode = PDC_SYS_WAKE_INT_LOW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	raw_spin_lock(&priv->lock);
+
+	/* set the IRQ mode */
+	soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + syswake*PDC_SYS_WAKE_STRIDE;
+	soc_sys_wake = pdc_read(priv, soc_sys_wake_regoff);
+	soc_sys_wake &= ~PDC_SYS_WAKE_INT_MODE;
+	soc_sys_wake |= irq_mode << PDC_SYS_WAKE_INT_MODE_SHIFT;
+	pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake);
+
+	/* and update the handler */
+	if (flow_type & IRQ_TYPE_LEVEL_MASK)
+		__irq_set_handler_locked(data->irq, handle_level_irq);
+	else
+		__irq_set_handler_locked(data->irq, handle_edge_irq);
+
+	raw_spin_unlock(&priv->lock);
+
+	return 0;
+}
+
+/* applies to both peripheral and syswake interrupts */
+static int pdc_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+	struct pdc_intc_priv *priv = irqd_to_priv(data);
+	irq_hw_number_t hw = data->hwirq;
+	unsigned int mask = (1 << 16) << hw;
+	unsigned int dst_irq;
+
+	raw_spin_lock(&priv->lock);
+	if (on)
+		priv->irq_route |= mask;
+	else
+		priv->irq_route &= ~mask;
+	pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+	raw_spin_unlock(&priv->lock);
+
+	/* control the destination IRQ wakeup too for standby mode */
+	if (hwirq_is_syswake(hw))
+		dst_irq = priv->syswake_irq;
+	else
+		dst_irq = priv->perip_irqs[hw];
+	irq_set_irq_wake(dst_irq, on);
+
+	return 0;
+}
+
+/* peripheral virtual interrupt functions */
+static struct irq_chip perip_irq_chip = {
+	.irq_mask	= perip_irq_mask,
+	.irq_unmask	= perip_irq_unmask,
+	.irq_set_wake	= pdc_irq_set_wake,
+	/* for standby we use the peripheral IRQ */
+	.flags		= IRQCHIP_MASK_ON_SUSPEND,
+};
+
+/* syswake virtual interrupt functions */
+static struct irq_chip syswake_irq_chip = {
+	.irq_ack	= syswake_irq_ack,
+	.irq_mask	= syswake_irq_mask,
+	.irq_unmask	= syswake_irq_unmask,
+	.irq_set_type	= syswake_irq_set_type,
+	.irq_set_wake	= pdc_irq_set_wake,
+	/* for standby we use the shared IRQ */
+	.flags		= IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int pdc_intc_irq_map(struct irq_domain *d, unsigned int irq,
+			    irq_hw_number_t hw)
+{
+	if (hwirq_is_syswake(hw))
+		irq_set_chip_and_handler(irq, &syswake_irq_chip,
+					 handle_edge_irq);
+	else
+		irq_set_chip_and_handler(irq, &perip_irq_chip,
+					 handle_level_irq);
+	return 0;
+}
+
+/* translate interrupt identifier to hwirq number */
+static int pdc_intc_irq_xlate(struct irq_domain *d,
+			      struct device_node *ctrlr,
+			      const u32 *intspec,
+			      unsigned int intsize,
+			      irq_hw_number_t *out_hwirq,
+			      unsigned int *out_type)
+{
+	struct pdc_intc_priv *priv = d->host_data;
+
+	if (WARN_ON(intsize < 2))
+		return -EINVAL;
+
+	/* 0 indicates peripheral interrupt */
+	if (intspec[0] == 0) {
+		if (WARN_ON(intspec[1] >= priv->nr_perips))
+			return -EINVAL;
+		*out_hwirq = intspec[1];
+		return 0;
+	}
+
+	/* invalid? */
+	if (WARN_ON(intspec[0] != 1))
+		return -EINVAL;
+
+	/* 1 indictes syswake interrupt */
+	if (WARN_ON(intsize < 3))
+		return -EINVAL;
+	if (WARN_ON(intspec[1] >= priv->nr_syswakes))
+		return -EINVAL;
+	*out_hwirq = syswake_to_hwirq(intspec[1]);
+	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+	return 0;
+}
+
+static const struct irq_domain_ops pdc_intc_irq_domain_ops = {
+	.map	= pdc_intc_irq_map,
+	.xlate	= pdc_intc_irq_xlate,
+};
+
+static void pdc_intc_perip_isr(unsigned int irq, struct irq_desc *desc)
+{
+	struct pdc_intc_priv *priv;
+	unsigned int i, irq_no;
+
+	priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
+
+	/* find the peripheral number */
+	for (i = 0; i < priv->nr_perips; ++i)
+		if (irq == priv->perip_irqs[i])
+			goto found;
+
+	/* should never get here */
+	return;
+found:
+
+	/* pass on the interrupt */
+	irq_no = irq_linear_revmap(priv->domain, i);
+	generic_handle_irq(irq_no);
+}
+
+static void pdc_intc_syswake_isr(unsigned int irq, struct irq_desc *desc)
+{
+	struct pdc_intc_priv *priv;
+	unsigned int syswake, irq_no;
+	unsigned int status;
+
+	priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
+
+	raw_spin_lock(&priv->lock);
+	status = pdc_read(priv, PDC_IRQ_STATUS) & priv->irq_en;
+	raw_spin_unlock(&priv->lock);
+
+	status &= (1 << priv->nr_syswakes) - 1;
+
+	for (syswake = 0; status; status >>= 1, ++syswake) {
+		/* Has this sys_wake triggered? */
+		if (!(status & 1))
+			continue;
+
+		irq_no = irq_linear_revmap(priv->domain,
+					   syswake_to_hwirq(syswake));
+		generic_handle_irq(irq_no);
+	}
+}
+
+static void pdc_intc_setup(struct pdc_intc_priv *priv)
+{
+	int i;
+	unsigned int soc_sys_wake_regoff;
+	unsigned int soc_sys_wake;
+
+	/*
+	 * Mask all syswake interrupts before routing, or we could receive an
+	 * interrupt before we're ready to handle it.
+	 */
+	priv->irq_en = 0;
+	pdc_write(priv, PDC_IRQ_ENABLE, priv->irq_en);
+
+	/*
+	 * Enable routing of all syswakes
+	 * Disable all wake sources
+	 */
+	priv->irq_route = ((PDC_IRQ_ROUTE_EXT_EN_SYS0 << priv->nr_syswakes) -
+				PDC_IRQ_ROUTE_EXT_EN_SYS0);
+	pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+
+	/* Initialise syswake IRQ */
+	for (i = 0; i < priv->nr_syswakes; ++i) {
+		/* set the IRQ mode to rising edge */
+		soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + i*PDC_SYS_WAKE_STRIDE;
+		soc_sys_wake = PDC_SYS_WAKE_INT_UP
+				<< PDC_SYS_WAKE_INT_MODE_SHIFT;
+		pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake);
+	}
+}
+
+static int pdc_intc_probe(struct platform_device *pdev)
+{
+	struct pdc_intc_priv *priv;
+	struct device_node *node = pdev->dev.of_node;
+	struct resource *res_regs;
+	unsigned int i;
+	int irq, ret;
+	u32 val;
+
+	if (!node)
+		return -ENOENT;
+
+	/* Get registers */
+	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res_regs == NULL) {
+		dev_err(&pdev->dev, "cannot find registers resource\n");
+		return -ENOENT;
+	}
+
+	/* Allocate driver data */
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "cannot allocate device data\n");
+		return -ENOMEM;
+	}
+	raw_spin_lock_init(&priv->lock);
+	platform_set_drvdata(pdev, priv);
+
+	/* Ioremap the registers */
+	priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
+				      res_regs->end - res_regs->start);
+	if (!priv->pdc_base)
+		return -EIO;
+
+	/* Get number of peripherals */
+	ret = of_property_read_u32(node, "num-perips", &val);
+	if (ret) {
+		dev_err(&pdev->dev, "No num-perips node property found\n");
+		return -EINVAL;
+	}
+	if (val > SYS0_HWIRQ) {
+		dev_err(&pdev->dev, "num-perips (%u) out of range\n", val);
+		return -EINVAL;
+	}
+	priv->nr_perips = val;
+
+	/* Get number of syswakes */
+	ret = of_property_read_u32(node, "num-syswakes", &val);
+	if (ret) {
+		dev_err(&pdev->dev, "No num-syswakes node property found\n");
+		return -EINVAL;
+	}
+	if (val > SYS0_HWIRQ) {
+		dev_err(&pdev->dev, "num-syswakes (%u) out of range\n", val);
+		return -EINVAL;
+	}
+	priv->nr_syswakes = val;
+
+	/* Get peripheral IRQ numbers */
+	priv->perip_irqs = devm_kzalloc(&pdev->dev, 4 * priv->nr_perips,
+					GFP_KERNEL);
+	if (!priv->perip_irqs) {
+		dev_err(&pdev->dev, "cannot allocate perip IRQ list\n");
+		return -ENOMEM;
+	}
+	for (i = 0; i < priv->nr_perips; ++i) {
+		irq = platform_get_irq(pdev, 1 + i);
+		if (irq < 0) {
+			dev_err(&pdev->dev, "cannot find perip IRQ #%u\n", i);
+			return irq;
+		}
+		priv->perip_irqs[i] = irq;
+	}
+	/* check if too many were provided */
+	if (platform_get_irq(pdev, 1 + i) >= 0) {
+		dev_err(&pdev->dev, "surplus perip IRQs detected\n");
+		return -EINVAL;
+	}
+
+	/* Get syswake IRQ number */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "cannot find syswake IRQ\n");
+		return irq;
+	}
+	priv->syswake_irq = irq;
+
+	/* Set up an IRQ domain */
+	priv->domain = irq_domain_add_linear(node, 16, &pdc_intc_irq_domain_ops,
+					     priv);
+	if (unlikely(!priv->domain)) {
+		dev_err(&pdev->dev, "cannot add IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	/* Set up the hardware to enable interrupt routing */
+	pdc_intc_setup(priv);
+
+	/* Setup chained handlers for the peripheral IRQs */
+	for (i = 0; i < priv->nr_perips; ++i) {
+		irq = priv->perip_irqs[i];
+		irq_set_handler_data(irq, priv);
+		irq_set_chained_handler(irq, pdc_intc_perip_isr);
+	}
+
+	/* Setup chained handler for the syswake IRQ */
+	irq_set_handler_data(priv->syswake_irq, priv);
+	irq_set_chained_handler(priv->syswake_irq, pdc_intc_syswake_isr);
+
+	dev_info(&pdev->dev,
+		 "PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n",
+		 priv->nr_perips,
+		 priv->nr_syswakes);
+
+	return 0;
+}
+
+static int pdc_intc_remove(struct platform_device *pdev)
+{
+	struct pdc_intc_priv *priv = platform_get_drvdata(pdev);
+
+	irq_domain_remove(priv->domain);
+	return 0;
+}
+
+static const struct of_device_id pdc_intc_match[] = {
+	{ .compatible = "img,pdc-intc" },
+	{}
+};
+
+static struct platform_driver pdc_intc_driver = {
+	.driver = {
+		.name		= "pdc-intc",
+		.of_match_table	= pdc_intc_match,
+	},
+	.probe = pdc_intc_probe,
+	.remove = pdc_intc_remove,
+};
+
+static int __init pdc_intc_init(void)
+{
+	return platform_driver_register(&pdc_intc_driver);
+}
+core_initcall(pdc_intc_init);
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 92c41ab..13d9aa8 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -10,6 +10,7 @@
  * meta_intc_irq_demux().
  */
 
+#include <linux/export.h>
 #include <linux/interrupt.h>
 #include <linux/irqchip/metag-ext.h>
 #include <linux/irqdomain.h>
@@ -557,6 +558,25 @@
 };
 
 /**
+ * external_irq_map() - Map an external SoC IRQ to a virtual IRQ number.
+ * @hw:		Number of the external IRQ.
+ *
+ * This function is DEPRECATED. Use device tree instead.
+ *
+ * Returns:	The virtual IRQ number of the external IRQ specified by @hw.
+ */
+int external_irq_map(unsigned int hw)
+{
+	struct meta_intc_priv *priv = &meta_intc_priv;
+	if (!priv->domain)
+		return -ENODEV;
+	if (hw > priv->nr_banks*32)
+		return -EINVAL;
+	return irq_create_mapping(priv->domain, hw);
+}
+EXPORT_SYMBOL_GPL(external_irq_map);
+
+/**
  * meta_intc_map() - map an external irq
  * @d:		irq domain of external trigger block
  * @irq:	virtual irq number
@@ -816,7 +836,10 @@
 	struct device_node *node;
 	int ret, cpu;
 	u32 val;
+	u32 vals[4];
 	bool no_masks = false;
+	unsigned int i;
+	void __iomem *level_addr;
 
 	node = of_find_compatible_node(NULL, NULL, "img,meta-intc");
 	if (!node)
@@ -838,6 +861,25 @@
 	if (of_get_property(node, "no-mask", NULL))
 		no_masks = true;
 
+	/* Are any default edge/level senses available? */
+	ret = of_property_read_u32_array(node, "default-level", vals,
+					 priv->nr_banks);
+	if (!ret) {
+		/* valid, set HWLEVELEXT registers */
+		level_addr = (void __iomem *)HWLEVELEXT;
+		for (i = 0; i < priv->nr_banks; ++i,
+						level_addr += HWSTAT_STRIDE) {
+			metag_out32(vals[i], level_addr);
+#ifdef CONFIG_METAG_SUSPEND_MEM
+			priv->levels_altered[i] = 0xffffffff;
+#endif
+		}
+	} else if (ret != -EINVAL) {
+		/* invalid (rather than simply omitted) */
+		pr_err("meta-intc: default-level could not be read\n");
+		return ret;
+	}
+
 	/* No HWMASKEXT registers present? */
 	if (no_masks)
 		meta_intc_no_mask();
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 5a79c33..9a7bc1b 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -299,6 +299,59 @@
 	   The driver uses omap DM timers for generating the carrier
 	   wave and pulses.
 
+config IR_IMG
+	tristate "ImgTec IR Decoder"
+	depends on SOC_TZ1090
+	depends on RC_CORE
+	help
+	   Say Y here if you want to use the ImgTec infrared decoder
+	   functionality found in some ImgTec Socs such as Comet.
+
+config IR_IMG_NEC
+	tristate "NEC protocol support"
+	depends on IR_IMG && !IR_IMG_RAW
+	help
+	   Say Y here to enable support for the NEC protocol in the
+	   ImgTec infrared decoder block.
+
+config IR_IMG_JVC
+	tristate "JVC protocol support"
+	depends on IR_IMG && !IR_IMG_RAW
+	help
+	   Say Y here to enable support for the JVC protocol in the
+	   ImgTec infrared decoder block.
+
+config IR_IMG_SONY
+	tristate "Sony protocol support"
+	depends on IR_IMG && !IR_IMG_RAW
+	help
+	   Say Y here to enable support for the Sony protocol in the
+	   ImgTec infrared decoder block.
+
+config IR_IMG_SHARP
+	tristate "Sharp protocol support"
+	depends on IR_IMG && !IR_IMG_RAW
+	help
+	   Say Y here to enable support for the Sharp protocol in the
+	   ImgTec infrared decoder block.
+
+config IR_IMG_SANYO
+	tristate "Sanyo protocol support"
+	depends on IR_IMG && !IR_IMG_RAW
+	help
+	   Say Y here to enable support for the Sanyo protocol (used by Sanyo,
+	   Aiwa, Chinon remotes) in the ImgTec infrared decoder block.
+
+config IR_IMG_RAW
+	bool "Raw mode"
+	depends on IR_IMG
+	help
+	   Say Y here to switch to raw mode which only passes raw IR signal
+	   changes to the IR raw decoders for software decoding. This is much
+	   less reliable (due to lack of timestamps) and uses more processing
+	   power than using hardware decode, but can be useful for testing,
+	   debug, and to make other protocols available.
+
 config RC_LOOPBACK
 	tristate "Remote Control Loopback Driver"
 	depends on RC_CORE
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 56bacf0..0c41a2a 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -30,3 +30,9 @@
 obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o
 obj-$(CONFIG_IR_IGUANA) += iguanair.o
 obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o
+obj-$(CONFIG_IR_IMG) += ir-img.o
+obj-$(CONFIG_IR_IMG_NEC) += ir-img-nec.o
+obj-$(CONFIG_IR_IMG_JVC) += ir-img-jvc.o
+obj-$(CONFIG_IR_IMG_SONY) += ir-img-sony.o
+obj-$(CONFIG_IR_IMG_SHARP) += ir-img-sharp.o
+obj-$(CONFIG_IR_IMG_SANYO) += ir-img-sanyo.o
diff --git a/drivers/media/rc/ir-img-jvc.c b/drivers/media/rc/ir-img-jvc.c
new file mode 100644
index 0000000..b7a1e91
--- /dev/null
+++ b/drivers/media/rc/ir-img-jvc.c
@@ -0,0 +1,109 @@
+/*
+ * ImgTec IR Decoder setup for JVC protocol.
+ *
+ * Copyright 2012 Imagination Technologies Ltd.
+ */
+
+#include <linux/module.h>
+
+#include "ir-img.h"
+
+/* Convert JVC data to a scancode */
+static int img_ir_jvc_scancode(int len, u64 raw, u64 protocols)
+{
+	unsigned int cust, data;
+
+	if (len != 16)
+		return IMG_IR_ERR_INVALID;
+
+	cust = (raw >> 0) & 0xff;
+	data = (raw >> 8) & 0xff;
+
+	return cust << 8 | data;
+}
+
+/* Convert JVC scancode to JVC data filter */
+static int img_ir_jvc_filter(const struct img_ir_sc_filter *in,
+			     struct img_ir_filter *out, u64 protocols)
+{
+	unsigned int cust, data;
+	unsigned int cust_m, data_m;
+
+	cust   = (in->data >> 8) & 0xff;
+	cust_m = (in->mask >> 8) & 0xff;
+	data   = (in->data >> 0) & 0xff;
+	data_m = (in->mask >> 0) & 0xff;
+
+	out->data = cust   | data << 8;
+	out->mask = cust_m | data_m << 8;
+
+	return 0;
+}
+
+/*
+ * JVC decoder
+ * See also http://www.sbprojects.com/knowledge/ir/jvc.php
+ *          http://support.jvc.com/consumer/support/documents/RemoteCodes.pdf
+ */
+static struct img_ir_decoder img_ir_jvc = {
+	.type = RC_BIT_JVC,
+	.control = {
+		.decoden = 1,
+		.code_type = IMG_IR_CODETYPE_PULSEDIST,
+		.decodend2 = 1,
+	},
+	/* main timings */
+	.unit = 527500, /* 527.5 us */
+	.timings = {
+		/* leader symbol */
+		.ldr = {
+			.pulse = { 16	/* 8.44 ms */ },
+			.space = { 8	/* 4.22 ms */ },
+		},
+		/* 0 symbol */
+		.s00 = {
+			.pulse = { 1	/* 527.5 us +-60 us */ },
+			.space = { 1	/* 527.5 us */ },
+		},
+		/* 1 symbol */
+		.s01 = {
+			.pulse = { 1	/* 527.5 us +-60 us */ },
+			.space = { 3	/* 1.5825 ms +-40 us */ },
+		},
+		/* 0 symbol (no leader) */
+		.s00 = {
+			.pulse = { 1	/* 527.5 us +-60 us */ },
+			.space = { 1	/* 527.5 us */ },
+		},
+		/* 1 symbol (no leader) */
+		.s01 = {
+			.pulse = { 1	/* 527.5 us +-60 us */ },
+			.space = { 3	/* 1.5825 ms +-40 us */ },
+		},
+		/* free time */
+		.ft = {
+			.minlen = 16,
+			.maxlen = 16,
+			.ft_min = 10,	/* 5.275 ms */
+		},
+	},
+	/* scancode logic */
+	.scancode = img_ir_jvc_scancode,
+	.filter = img_ir_jvc_filter,
+};
+
+static int __init img_ir_jvc_init(void)
+{
+	return img_ir_register_decoder(&img_ir_jvc);
+}
+module_init(img_ir_jvc_init);
+
+static void __exit img_ir_jvc_exit(void)
+{
+	img_ir_unregister_decoder(&img_ir_jvc);
+}
+module_exit(img_ir_jvc_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("ImgTec IR JVC protocol support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/ir-img-nec.c b/drivers/media/rc/ir-img-nec.c
new file mode 100644
index 0000000..1eb2877
--- /dev/null
+++ b/drivers/media/rc/ir-img-nec.c
@@ -0,0 +1,149 @@
+/*
+ * ImgTec IR Decoder setup for NEC protocol.
+ *
+ * Copyright 2010,2011,2012 Imagination Technologies Ltd.
+ */
+
+#include <linux/module.h>
+
+#include "ir-img.h"
+
+/* Convert NEC data to a scancode */
+static int img_ir_nec_scancode(int len, u64 raw, u64 protocols)
+{
+	unsigned int addr, addr_inv, data, data_inv;
+	int scancode;
+	/* a repeat code has no data */
+	if (!len)
+		return IMG_IR_REPEATCODE;
+	if (len != 32)
+		return IMG_IR_ERR_INVALID;
+	addr     = (raw >>  0) & 0xff;
+	addr_inv = (raw >>  8) & 0xff;
+	data     = (raw >> 16) & 0xff;
+	data_inv = (raw >> 24) & 0xff;
+	/* Validate data */
+	if ((data_inv ^ data) != 0xff)
+		return IMG_IR_ERR_INVALID;
+
+	if ((addr_inv ^ addr) != 0xff) {
+		/* Extended NEC */
+		scancode = addr     << 16 |
+			   addr_inv <<  8 |
+			   data;
+	} else {
+		/* Normal NEC */
+		scancode = addr << 8 |
+			   data;
+	}
+	return scancode;
+}
+
+/* Convert NEC scancode to NEC data filter */
+static int img_ir_nec_filter(const struct img_ir_sc_filter *in,
+			     struct img_ir_filter *out, u64 protocols)
+{
+	unsigned int addr, addr_inv, data, data_inv;
+	unsigned int addr_m, addr_inv_m, data_m;
+
+	data     = in->data & 0xff;
+	data_m   = in->mask & 0xff;
+	data_inv = data ^ 0xff;
+
+	if (in->data & 0xff000000)
+		return -EINVAL;
+
+	if (in->data & 0x00ff0000) {
+		/* Extended NEC */
+		addr       = (in->data >> 16) & 0xff;
+		addr_m     = (in->mask >> 16) & 0xff;
+		addr_inv   = (in->data >>  8) & 0xff;
+		addr_inv_m = (in->mask >>  8) & 0xff;
+	} else {
+		/* Normal NEC */
+		addr       = (in->data >>  8) & 0xff;
+		addr_m     = (in->mask >>  8) & 0xff;
+		addr_inv   = addr ^ 0xff;
+		addr_inv_m = addr_m;
+	}
+
+	out->data = data_inv << 24 |
+		    data     << 16 |
+		    addr_inv <<  8 |
+		    addr;
+	out->mask = data_m     << 24 |
+		    data_m     << 16 |
+		    addr_inv_m <<  8 |
+		    addr_m;
+	return 0;
+}
+
+/*
+ * NEC decoder
+ * See also http://www.sbprojects.com/knowledge/ir/nec.php
+ *        http://wiki.altium.com/display/ADOH/NEC+Infrared+Transmission+Protocol
+ */
+static struct img_ir_decoder img_ir_nec = {
+	.type = RC_BIT_NEC,
+	.control = {
+		.decoden = 1,
+		.code_type = IMG_IR_CODETYPE_PULSEDIST,
+	},
+	/* main timings */
+	.unit = 562500, /* 562.5 us */
+	.timings = {
+		/* leader symbol */
+		.ldr = {
+			.pulse = { 16	/* 9ms */ },
+			.space = { 8	/* 4.5ms */ },
+		},
+		/* 0 symbol */
+		.s00 = {
+			.pulse = { 1	/* 562.5 us */ },
+			.space = { 1	/* 562.5 us */ },
+		},
+		/* 1 symbol */
+		.s01 = {
+			.pulse = { 1	/* 562.5 us */ },
+			.space = { 3	/* 1687.5 us */ },
+		},
+		/* free time */
+		.ft = {
+			.minlen = 32,
+			.maxlen = 32,
+			.ft_min = 10,	/* 5.625 ms */
+		},
+	},
+	/* repeat codes */
+	.repeat = 108,			/* 108 ms */
+	.rtimings = {
+		/* leader symbol */
+		.ldr = {
+			.space = { 4	/* 2.25 ms */ },
+		},
+		/* free time */
+		.ft = {
+			.minlen = 0,	/* repeat code has no data */
+			.maxlen = 0,
+		},
+	},
+	/* scancode logic */
+	.scancode = img_ir_nec_scancode,
+	.filter = img_ir_nec_filter,
+};
+
+static int __init img_ir_nec_init(void)
+{
+	return img_ir_register_decoder(&img_ir_nec);
+}
+module_init(img_ir_nec_init);
+
+static void __exit img_ir_nec_exit(void)
+{
+	img_ir_unregister_decoder(&img_ir_nec);
+}
+module_exit(img_ir_nec_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("ImgTec IR NEC protocol support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/ir-img-sanyo.c b/drivers/media/rc/ir-img-sanyo.c
new file mode 100644
index 0000000..07835ad
--- /dev/null
+++ b/drivers/media/rc/ir-img-sanyo.c
@@ -0,0 +1,139 @@
+/*
+ * ImgTec IR Decoder setup for Sanyo protocol.
+ *
+ * Copyright 2012 Imagination Technologies Ltd.
+ *
+ * From ir-sanyo-decoder.c:
+ *
+ * This protocol uses the NEC protocol timings. However, data is formatted as:
+ *	13 bits Custom Code
+ *	13 bits NOT(Custom Code)
+ *	8 bits Key data
+ *	8 bits NOT(Key data)
+ *
+ * According with LIRC, this protocol is used on Sanyo, Aiwa and Chinon
+ * Information for this protocol is available at the Sanyo LC7461 datasheet.
+ */
+
+#include <linux/module.h>
+
+#include "ir-img.h"
+
+/* Convert Sanyo data to a scancode */
+static int img_ir_sanyo_scancode(int len, u64 raw, u64 protocols)
+{
+	unsigned int addr, addr_inv, data, data_inv;
+	/* a repeat code has no data */
+	if (!len)
+		return IMG_IR_REPEATCODE;
+	if (len != 42)
+		return IMG_IR_ERR_INVALID;
+	addr     = (raw >>  0) & 0x1fff;
+	addr_inv = (raw >> 13) & 0x1fff;
+	data     = (raw >> 26) & 0xff;
+	data_inv = (raw >> 34) & 0xff;
+	/* Validate data */
+	if ((data_inv ^ data) != 0xff)
+		return IMG_IR_ERR_INVALID;
+	/* Validate address */
+	if ((addr_inv ^ addr) != 0x1fff)
+		return IMG_IR_ERR_INVALID;
+
+	/* Normal Sanyo */
+	return addr << 8 | data;
+}
+
+/* Convert Sanyo scancode to Sanyo data filter */
+static int img_ir_sanyo_filter(const struct img_ir_sc_filter *in,
+			       struct img_ir_filter *out, u64 protocols)
+{
+	unsigned int addr, addr_inv, data, data_inv;
+	unsigned int addr_m, data_m;
+
+	data = in->data & 0xff;
+	data_m = in->mask & 0xff;
+	data_inv = data ^ 0xff;
+
+	if (in->data & 0xff700000)
+		return -EINVAL;
+
+	addr       = (in->data >> 8) & 0x1fff;
+	addr_m     = (in->mask >> 8) & 0x1fff;
+	addr_inv   = addr ^ 0x1fff;
+
+	out->data = (u64)data_inv << 34 |
+		    (u64)data     << 26 |
+			 addr_inv << 13 |
+			 addr;
+	out->mask = (u64)data_m << 34 |
+		    (u64)data_m << 26 |
+			 addr_m << 13 |
+			 addr_m;
+	return 0;
+}
+
+/* Sanyo decoder */
+static struct img_ir_decoder img_ir_sanyo = {
+	.type = RC_BIT_SANYO,
+	.control = {
+		.decoden = 1,
+		.code_type = IMG_IR_CODETYPE_PULSEDIST,
+	},
+	/* main timings */
+	.unit = 562500, /* 562.5 us */
+	.timings = {
+		/* leader symbol */
+		.ldr = {
+			.pulse = { 16	/* 9ms */ },
+			.space = { 8	/* 4.5ms */ },
+		},
+		/* 0 symbol */
+		.s00 = {
+			.pulse = { 1	/* 562.5 us */ },
+			.space = { 1	/* 562.5 us */ },
+		},
+		/* 1 symbol */
+		.s01 = {
+			.pulse = { 1	/* 562.5 us */ },
+			.space = { 3	/* 1687.5 us */ },
+		},
+		/* free time */
+		.ft = {
+			.minlen = 42,
+			.maxlen = 42,
+			.ft_min = 10,	/* 5.625 ms */
+		},
+	},
+	/* repeat codes */
+	.repeat = 108,			/* 108 ms */
+	.rtimings = {
+		/* leader symbol */
+		.ldr = {
+			.space = { 4	/* 2.25 ms */ },
+		},
+		/* free time */
+		.ft = {
+			.minlen = 0,	/* repeat code has no data */
+			.maxlen = 0,
+		},
+	},
+	/* scancode logic */
+	.scancode = img_ir_sanyo_scancode,
+	.filter = img_ir_sanyo_filter,
+};
+
+static int __init img_ir_sanyo_init(void)
+{
+	return img_ir_register_decoder(&img_ir_sanyo);
+}
+module_init(img_ir_sanyo_init);
+
+static void __exit img_ir_sanyo_exit(void)
+{
+	img_ir_unregister_decoder(&img_ir_sanyo);
+}
+module_exit(img_ir_sanyo_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("ImgTec IR Sanyo protocol support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/ir-img-sharp.c b/drivers/media/rc/ir-img-sharp.c
new file mode 100644
index 0000000..a32974c
--- /dev/null
+++ b/drivers/media/rc/ir-img-sharp.c
@@ -0,0 +1,115 @@
+/*
+ * ImgTec IR Decoder setup for Sharp protocol.
+ *
+ * Copyright 2012 Imagination Technologies Ltd.
+ */
+
+#include <linux/module.h>
+
+#include "ir-img.h"
+
+/* Convert Sharp data to a scancode */
+static int img_ir_sharp_scancode(int len, u64 raw, u64 protocols)
+{
+	unsigned int addr, cmd, exp, chk;
+
+	if (len != 15)
+		return IMG_IR_ERR_INVALID;
+
+	addr = (raw >>   0) & 0x1f;
+	cmd  = (raw >>   5) & 0xff;
+	exp  = (raw >>  13) &  0x1;
+	chk  = (raw >>  14) &  0x1;
+
+	/* validate data */
+	if (!exp)
+		return IMG_IR_ERR_INVALID;
+	if (chk)
+		/* probably the second half of the message */
+		return IMG_IR_ERR_INVALID;
+
+	return addr << 8 | cmd;
+}
+
+/* Convert Sharp scancode to Sharp data filter */
+static int img_ir_sharp_filter(const struct img_ir_sc_filter *in,
+			       struct img_ir_filter *out, u64 protocols)
+{
+	unsigned int addr, cmd, exp = 0, chk = 0;
+	unsigned int addr_m, cmd_m, exp_m = 0, chk_m = 0;
+
+	addr   = (in->data >> 8) & 0x1f;
+	addr_m = (in->mask >> 8) & 0x1f;
+	cmd    = (in->data >> 0) & 0xff;
+	cmd_m  = (in->mask >> 0) & 0xff;
+	if (cmd_m) {
+		/* if filtering commands, we can only match the first part */
+		exp   = 1;
+		exp_m = 1;
+		chk   = 0;
+		chk_m = 1;
+	}
+
+	out->data = addr        |
+		    cmd   <<  5 |
+		    exp   << 13 |
+		    chk   << 14;
+	out->mask = addr_m      |
+		    cmd_m <<  5 |
+		    exp_m << 13 |
+		    chk_m << 14;
+
+	return 0;
+}
+
+/*
+ * Sharp decoder
+ * See also http://www.sbprojects.com/knowledge/ir/sharp.php
+ */
+static struct img_ir_decoder img_ir_sharp = {
+	.type = RC_BIT_SHARP,
+	.control = {
+		.decoden = 0,
+		.decodend2 = 1,
+		.code_type = IMG_IR_CODETYPE_PULSEDIST,
+		.d1validsel = 1,
+	},
+	/* main timings */
+	.timings = {
+		/* 0 symbol */
+		.s10 = {
+			.pulse = { 320	/* 320 us */ },
+			.space = { 680	/* 1 ms period */ },
+		},
+		/* 1 symbol */
+		.s11 = {
+			.pulse = { 320	/* 230 us */ },
+			.space = { 1680	/* 2 ms period */ },
+		},
+		/* free time */
+		.ft = {
+			.minlen = 15,
+			.maxlen = 15,
+			.ft_min = 5000,	/* 5 ms */
+		},
+	},
+	/* scancode logic */
+	.scancode = img_ir_sharp_scancode,
+	.filter = img_ir_sharp_filter,
+};
+
+static int __init img_ir_sharp_init(void)
+{
+	return img_ir_register_decoder(&img_ir_sharp);
+}
+module_init(img_ir_sharp_init);
+
+static void __exit img_ir_sharp_exit(void)
+{
+	img_ir_unregister_decoder(&img_ir_sharp);
+}
+module_exit(img_ir_sharp_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("ImgTec IR Sharp protocol support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/ir-img-sony.c b/drivers/media/rc/ir-img-sony.c
new file mode 100644
index 0000000..8f69139
--- /dev/null
+++ b/drivers/media/rc/ir-img-sony.c
@@ -0,0 +1,163 @@
+/*
+ * ImgTec IR Decoder setup for Sony (SIRC) protocol.
+ *
+ * Copyright 2012 Imagination Technologies Ltd.
+ */
+
+#include <linux/module.h>
+
+#include "ir-img.h"
+
+/* Convert Sony data to a scancode */
+static int img_ir_sony_scancode(int len, u64 raw, u64 protocols)
+{
+	unsigned int dev, subdev, func;
+
+	switch (len) {
+	case 12:
+		if (!(protocols & RC_BIT_SONY12))
+			goto invalid;
+		func   = raw & 0x7f;	/* first 7 bits */
+		raw    >>= 7;
+		dev    = raw & 0x1f;	/* next 5 bits */
+		subdev = 0;
+		break;
+	case 15:
+		if (!(protocols & RC_BIT_SONY15))
+			goto invalid;
+		func   = raw & 0x7f;	/* first 7 bits */
+		raw    >>= 7;
+		dev    = raw & 0xff;	/* next 8 bits */
+		subdev = 0;
+		break;
+	case 20:
+		if (!(protocols & RC_BIT_SONY20))
+			goto invalid;
+		func   = raw & 0x7f;	/* first 7 bits */
+		raw    >>= 7;
+		dev    = raw & 0x1f;	/* next 5 bits */
+		raw    >>= 5;
+		subdev = raw & 0xff;	/* next 8 bits */
+		break;
+	default:
+invalid:
+		return IMG_IR_ERR_INVALID;
+	}
+	return dev << 16 | subdev << 8 | func;
+}
+
+/* Convert NEC scancode to NEC data filter */
+static int img_ir_sony_filter(const struct img_ir_sc_filter *in,
+			      struct img_ir_filter *out, u64 protocols)
+{
+	unsigned int dev, subdev, func;
+	unsigned int dev_m, subdev_m, func_m;
+	unsigned int len = 0;
+
+	dev      = (in->data >> 16) & 0xff;
+	dev_m    = (in->mask >> 16) & 0xff;
+	subdev   = (in->data >> 8)  & 0xff;
+	subdev_m = (in->mask >> 8)  & 0xff;
+	func     = (in->data >> 0)  & 0x7f;
+	func_m   = (in->mask >> 0)  & 0x7f;
+
+	if (subdev & subdev_m) {
+		/* can't encode subdev and higher device bits */
+		if (dev & dev_m & 0xe0)
+			return -EINVAL;
+		/* subdevice (extended) bits only in 20 bit encoding */
+		if (!(protocols & RC_BIT_SONY20))
+			return -EINVAL;
+		len = 20;
+		dev_m &= 0x1f;
+	} else if (dev & dev_m & 0xe0) {
+		/* upper device bits only in 15 bit encoding */
+		if (!(protocols & RC_BIT_SONY15))
+			return -EINVAL;
+		len = 15;
+		subdev_m = 0;
+	} else {
+		/*
+		 * The hardware mask cannot distinguish high device bits and low
+		 * extended bits, so logically AND those bits of the masks
+		 * together.
+		 */
+		subdev_m &= (dev_m >> 5) | 0xf8;
+		dev_m &= 0x1f;
+	}
+
+	/* ensure there aren't any bits straying between fields */
+	dev &= dev_m;
+	subdev &= subdev_m;
+
+	/* write the hardware filter */
+	out->data = func          |
+		    dev      << 7 |
+		    subdev   << 15;
+	out->mask = func_m        |
+		    dev_m    << 7 |
+		    subdev_m << 15;
+
+	if (len) {
+		out->minlen = len;
+		out->maxlen = len;
+	}
+	return 0;
+}
+
+/*
+ * Sony SIRC decoder
+ * See also http://www.sbprojects.com/knowledge/ir/sirc.php
+ *          http://picprojects.org.uk/projects/sirc/sonysirc.pdf
+ */
+static struct img_ir_decoder img_ir_sony = {
+	.type = RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20,
+	.control = {
+		.decoden = 1,
+		.code_type = IMG_IR_CODETYPE_PULSELEN,
+	},
+	/* main timings */
+	.unit = 600000, /* 600 us */
+	.timings = {
+		/* leader symbol */
+		.ldr = {
+			.pulse = { 4	/* 2.4 ms */ },
+			.space = { 1	/* 600 us */ },
+		},
+		/* 0 symbol */
+		.s00 = {
+			.pulse = { 1	/* 600 us */ },
+			.space = { 1	/* 600 us */ },
+		},
+		/* 1 symbol */
+		.s01 = {
+			.pulse = { 2	/* 1.2 ms */ },
+			.space = { 1	/* 600 us */ },
+		},
+		/* free time */
+		.ft = {
+			.minlen = 12,
+			.maxlen = 20,
+			.ft_min = 10,	/* 6 ms */
+		},
+	},
+	/* scancode logic */
+	.scancode = img_ir_sony_scancode,
+	.filter = img_ir_sony_filter,
+};
+
+static int __init img_ir_sony_init(void)
+{
+	return img_ir_register_decoder(&img_ir_sony);
+}
+module_init(img_ir_sony_init);
+
+static void __exit img_ir_sony_exit(void)
+{
+	img_ir_unregister_decoder(&img_ir_sony);
+}
+module_exit(img_ir_sony_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("ImgTec IR Sony SIRC protocol support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/ir-img.c b/drivers/media/rc/ir-img.c
new file mode 100644
index 0000000..b4509af
--- /dev/null
+++ b/drivers/media/rc/ir-img.c
@@ -0,0 +1,1493 @@
+/*
+ * ImgTec IR Decoder found in PowerDown Controller.
+ *
+ * Copyright 2010,2011,2012 Imagination Technologies Ltd.
+ *
+ * This ties into the input subsystem using the IR-core. When some more IR
+ * remote control specific interfaces go upstream this driver should be updated
+ * accodingly (e.g. for changing protocol).
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <media/rc-core.h>
+#include <asm/soc-tz1090/clock.h>
+#include "ir-img.h"
+
+#ifndef CONFIG_IR_IMG_RAW
+/* Decoder list */
+static DEFINE_SPINLOCK(img_ir_decoders_lock);
+static struct img_ir_decoder *img_ir_decoders;
+static struct img_ir_priv *img_ir_privs;
+
+#define IMG_IR_F_FILTER		0x00000001	/* enable filtering */
+#define IMG_IR_F_WAKE		0x00000002	/* enable waking */
+
+enum img_ir_mode {
+	IMG_IR_M_NORMAL,
+	IMG_IR_M_REPEATING,
+#ifdef CONFIG_PM_SLEEP
+	IMG_IR_M_WAKE,
+#endif
+};
+#endif
+
+/* code type quirks */
+
+#define IMG_IR_QUIRK_CODE_BROKEN	0x1	/* Decode is broken */
+#define IMG_IR_QUIRK_CODE_LEN_INCR	0x2	/* Bit length needs increment */
+
+/**
+ * struct img_ir_priv - Private driver data.
+ * @next:		Next IR device's private driver data (to form a linked
+ *			list).
+ * @dev:		Platform device.
+ * @irq:		IRQ number.
+ * @reg_base:		Iomem base address of IR register block.
+ * @lock:		Protects IR registers and variables in this struct.
+ * @rdev:		Remote control device
+ * @ct_quirks:		Quirk bits for each code type.
+ * @last_status:	Last raw status bits.
+ * @end_timer:		Timer until repeat timeout.
+ * @clk_nb:		Notifier block for clock notify events.
+ * @decoder:		Current decoder settings.
+ * @enabled_protocols:	Currently enabled protocols.
+ * @clk_hz:		Current clock rate in Hz.
+ * @flags:		IMG_IR_F_*.
+ * @filter:		HW filter for normal events (derived from sc_filter).
+ * @wake_filter:	HW filter for wake event (derived from sc_wake_filter).
+ * @sc_filter:		Current scancode filter.
+ * @sc_wake_filter:	Current scancode filter for wake events.
+ * @mode:		Current decode mode.
+ */
+struct img_ir_priv {
+	/* this priv sits in a global list protected by img_ir_decoders_lock */
+	struct img_ir_priv *next;
+
+	struct device *dev;
+	int irq;
+	void __iomem *reg_base;
+	spinlock_t lock;
+	struct rc_dev *rdev;
+	unsigned int ct_quirks[4];
+
+#ifdef CONFIG_IR_IMG_RAW
+	u32 last_status;
+#else
+	struct notifier_block clk_nb;
+	struct timer_list end_timer;
+	struct img_ir_decoder *decoder;
+	u64 enabled_protocols;
+	unsigned long clk_hz;
+	unsigned int flags;
+	struct img_ir_filter filter;
+	struct img_ir_filter wake_filter;
+
+	/* filters in terms of scancodes */
+	struct img_ir_sc_filter sc_filter;
+	struct img_ir_sc_filter sc_wake_filter;
+
+	enum img_ir_mode mode;
+#endif
+};
+
+/* Hardware access */
+
+static inline void img_ir_write(struct img_ir_priv *priv,
+				unsigned int reg_offs, unsigned int data)
+{
+	iowrite32(data, priv->reg_base + reg_offs);
+}
+
+static inline unsigned int img_ir_read(struct img_ir_priv *priv,
+				       unsigned int reg_offs)
+{
+	return ioread32(priv->reg_base + reg_offs);
+}
+
+#ifndef CONFIG_IR_IMG_RAW
+
+/* functions for preprocessing timings, ensuring max is set */
+
+static void img_ir_timing_preprocess(struct img_ir_timing_range *range,
+				     unsigned int unit)
+{
+	if (range->max < range->min)
+		range->max = range->min;
+	if (unit) {
+		/* multiply by unit and convert to microseconds */
+		range->min = (range->min*unit)/1000;
+		range->max = (range->max*unit + 999)/1000; /* round up */
+	}
+}
+
+static void img_ir_symbol_timing_preprocess(struct img_ir_symbol_timing *timing,
+					    unsigned int unit)
+{
+	img_ir_timing_preprocess(&timing->pulse, unit);
+	img_ir_timing_preprocess(&timing->space, unit);
+}
+
+static void img_ir_timings_preprocess(struct img_ir_timings *timings,
+				      unsigned int unit)
+{
+	img_ir_symbol_timing_preprocess(&timings->ldr, unit);
+	img_ir_symbol_timing_preprocess(&timings->s00, unit);
+	img_ir_symbol_timing_preprocess(&timings->s01, unit);
+	img_ir_symbol_timing_preprocess(&timings->s10, unit);
+	img_ir_symbol_timing_preprocess(&timings->s11, unit);
+	/* default s10 and s11 to s00 and s01 if no leader */
+	if (unit)
+		/* multiply by unit and convert to microseconds (round up) */
+		timings->ft.ft_min = (timings->ft.ft_min*unit + 999)/1000;
+}
+
+/* functions for filling empty fields with defaults */
+
+static void img_ir_timing_defaults(struct img_ir_timing_range *range,
+				   struct img_ir_timing_range *defaults)
+{
+	if (!range->min)
+		range->min = defaults->min;
+	if (!range->max)
+		range->max = defaults->max;
+}
+
+static void img_ir_symbol_timing_defaults(struct img_ir_symbol_timing *timing,
+					  struct img_ir_symbol_timing *defaults)
+{
+	img_ir_timing_defaults(&timing->pulse, &defaults->pulse);
+	img_ir_timing_defaults(&timing->space, &defaults->space);
+}
+
+static void img_ir_timings_defaults(struct img_ir_timings *timings,
+				    struct img_ir_timings *defaults)
+{
+	img_ir_symbol_timing_defaults(&timings->ldr, &defaults->ldr);
+	img_ir_symbol_timing_defaults(&timings->s00, &defaults->s00);
+	img_ir_symbol_timing_defaults(&timings->s01, &defaults->s01);
+	img_ir_symbol_timing_defaults(&timings->s10, &defaults->s10);
+	img_ir_symbol_timing_defaults(&timings->s11, &defaults->s11);
+	if (!timings->ft.ft_min)
+		timings->ft.ft_min = defaults->ft.ft_min;
+}
+
+/* functions for converting timings to register values */
+
+/**
+ * img_ir_control() - Convert control struct to control register value.
+ * @control:	Control data
+ *
+ * Returns:	The control register value equivalent of @control.
+ */
+static u32 img_ir_control(struct img_ir_control *control)
+{
+	u32 ctrl = control->code_type << IMG_IR_CODETYPE_SHIFT;
+	if (control->decoden)
+		ctrl |= IMG_IR_DECODEN;
+	if (control->hdrtog)
+		ctrl |= IMG_IR_HDRTOG;
+	if (control->ldrdec)
+		ctrl |= IMG_IR_LDRDEC;
+	if (control->decodinpol)
+		ctrl |= IMG_IR_DECODINPOL;
+	if (control->bitorien)
+		ctrl |= IMG_IR_BITORIEN;
+	if (control->d1validsel)
+		ctrl |= IMG_IR_D1VALIDSEL;
+	if (control->bitinv)
+		ctrl |= IMG_IR_BITINV;
+	if (control->decodend2)
+		ctrl |= IMG_IR_DECODEND2;
+	if (control->bitoriend2)
+		ctrl |= IMG_IR_BITORIEND2;
+	if (control->bitinvd2)
+		ctrl |= IMG_IR_BITINVD2;
+	return ctrl;
+}
+
+/**
+ * img_ir_timing_range_convert() - Convert microsecond range.
+ * @out:	Output timing range in clock cycles with a shift.
+ * @in:		Input timing range in microseconds.
+ * @tolerance:	Tolerance as a fraction of 128 (roughly percent).
+ * @clock_hz:	IR clock rate in Hz.
+ * @shift:	Shift of output units.
+ *
+ * Converts min and max from microseconds to IR clock cycles, applies a
+ * tolerance, and shifts for the register, rounding in the right direction.
+ * Note that in and out can safely be the same object.
+ */
+static void img_ir_timing_range_convert(struct img_ir_timing_range *out,
+					const struct img_ir_timing_range *in,
+					unsigned int tolerance,
+					unsigned long clock_hz,
+					unsigned int shift)
+{
+	unsigned int min = in->min;
+	unsigned int max = in->max;
+	/* add a tolerance */
+	min = min - (min*tolerance >> 7);
+	max = max + (max*tolerance >> 7);
+	/* convert from microseconds into clock cycles */
+	min = min*clock_hz / 1000000;
+	max = (max*clock_hz + 999999) / 1000000; /* round up */
+	/* apply shift and copy to output */
+	out->min = min >> shift;
+	out->max = (max + ((1 << shift) - 1)) >> shift; /* round up */
+}
+
+/**
+ * img_ir_symbol_timing() - Convert symbol timing struct to register value.
+ * @timing:	Symbol timing data
+ * @tolerance:	Timing tolerance where 0-128 represents 0-100%
+ * @clock_hz:	Frequency of source clock in Hz
+ * @pd_shift:	Shift to apply to symbol period
+ * @w_shift:	Shift to apply to symbol width
+ *
+ * Returns:	Symbol timing register value based on arguments.
+ */
+static u32 img_ir_symbol_timing(const struct img_ir_symbol_timing *timing,
+				unsigned int tolerance,
+				unsigned long clock_hz,
+				unsigned int pd_shift,
+				unsigned int w_shift)
+{
+	struct img_ir_timing_range hw_pulse, hw_period;
+	/* we calculate period in hw_period, then convert in place */
+	hw_period.min = timing->pulse.min + timing->space.min;
+	hw_period.max = timing->pulse.max + timing->space.max;
+	img_ir_timing_range_convert(&hw_period, &hw_period,
+			tolerance, clock_hz, pd_shift);
+	img_ir_timing_range_convert(&hw_pulse, &timing->pulse,
+			tolerance, clock_hz, w_shift);
+	/* construct register value */
+	return	(hw_period.max	<< IMG_IR_PD_MAX_SHIFT)	|
+		(hw_period.min	<< IMG_IR_PD_MIN_SHIFT)	|
+		(hw_pulse.max	<< IMG_IR_W_MAX_SHIFT)	|
+		(hw_pulse.min	<< IMG_IR_W_MIN_SHIFT);
+}
+
+/**
+ * img_ir_free_timing() - Convert free time timing struct to register value.
+ * @timing:	Free symbol timing data
+ * @clock_hz:	Source clock frequency in Hz
+ *
+ * Returns:	Free symbol timing register value.
+ */
+static u32 img_ir_free_timing(const struct img_ir_free_timing *timing,
+			      unsigned long clock_hz)
+{
+	unsigned int minlen, maxlen, ft_min;
+	/* minlen is only 5 bits, and round minlen to multiple of 2 */
+	if (timing->minlen < 30)
+		minlen = timing->minlen & -2;
+	else
+		minlen = 30;
+	/* maxlen has maximum value of 48, and round maxlen to multiple of 2 */
+	if (timing->maxlen < 48)
+		maxlen = (timing->maxlen + 1) & -2;
+	else
+		maxlen = 48;
+	/* convert and shift ft_min, rounding upwards */
+	ft_min = (timing->ft_min*clock_hz + 999999) / 1000000;
+	ft_min = (ft_min + 7) >> 3;
+	/* construct register value */
+	return	(timing->maxlen	<< IMG_IR_MAXLEN_SHIFT)	|
+		(timing->minlen	<< IMG_IR_MINLEN_SHIFT)	|
+		(ft_min		<< IMG_IR_FT_MIN_SHIFT);
+}
+
+/**
+ * img_ir_free_timing_dynamic() - Update free time register value.
+ * @st_ft:	Static free time register value from img_ir_free_timing.
+ * @filter:	Current filter which may additionally restrict min/max len.
+ *
+ * Returns:	Updated free time register value based on the current filter.
+ */
+static u32 img_ir_free_timing_dynamic(u32 st_ft, struct img_ir_filter *filter)
+{
+	unsigned int minlen, maxlen, newminlen, newmaxlen;
+
+	/* round minlen, maxlen to multiple of 2 */
+	newminlen = filter->minlen & -2;
+	newmaxlen = (filter->maxlen + 1) & -2;
+	/* extract min/max len from register */
+	minlen = (st_ft & IMG_IR_MINLEN) >> IMG_IR_MINLEN_SHIFT;
+	maxlen = (st_ft & IMG_IR_MAXLEN) >> IMG_IR_MAXLEN_SHIFT;
+	/* if the new values are more restrictive, update the register value */
+	if (newminlen > minlen) {
+		st_ft &= ~IMG_IR_MINLEN;
+		st_ft |= newminlen << IMG_IR_MINLEN_SHIFT;
+	}
+	if (newmaxlen < maxlen) {
+		st_ft &= ~IMG_IR_MAXLEN;
+		st_ft |= newmaxlen << IMG_IR_MAXLEN_SHIFT;
+	}
+	return st_ft;
+}
+
+/**
+ * img_ir_timings_convert() - Convert timings to register values
+ * @regs:	Output timing register values
+ * @timings:	Input timing data
+ * @tolerance:	Timing tolerance where 0-128 represents 0-100%
+ * @clock_hz:	Source clock frequency in Hz
+ */
+static void img_ir_timings_convert(struct img_ir_timing_regvals *regs,
+				   const struct img_ir_timings *timings,
+				   unsigned int tolerance,
+				   unsigned int clock_hz)
+{
+	/* leader symbol timings are divided by 16 */
+	regs->ldr = img_ir_symbol_timing(&timings->ldr, tolerance, clock_hz,
+			4, 4);
+	/* other symbol timings, pd fields only are divided by 2 */
+	regs->s00 = img_ir_symbol_timing(&timings->s00, tolerance, clock_hz,
+			1, 0);
+	regs->s01 = img_ir_symbol_timing(&timings->s01, tolerance, clock_hz,
+			1, 0);
+	regs->s10 = img_ir_symbol_timing(&timings->s10, tolerance, clock_hz,
+			1, 0);
+	regs->s11 = img_ir_symbol_timing(&timings->s11, tolerance, clock_hz,
+			1, 0);
+	regs->ft = img_ir_free_timing(&timings->ft, clock_hz);
+}
+
+/**
+ * img_ir_decoder_preprocess() - Preprocess timings in decoder.
+ * @decoder:	Decoder to be preprocessed.
+ *
+ * Ensures that the symbol timing ranges are valid with respect to ordering, and
+ * does some fixed conversion on them.
+ */
+static void img_ir_decoder_preprocess(struct img_ir_decoder *decoder)
+{
+	/* fill in implicit fields */
+	img_ir_timings_preprocess(&decoder->timings, decoder->unit);
+
+	/* do the same for repeat timings if applicable */
+	if (decoder->repeat) {
+		img_ir_timings_preprocess(&decoder->rtimings, decoder->unit);
+		img_ir_timings_defaults(&decoder->rtimings, &decoder->timings);
+	}
+
+	/* calculate control value */
+	decoder->reg_ctrl = img_ir_control(&decoder->control);
+}
+
+/**
+ * img_ir_decoder_convert() - Generate internal timings in decoder.
+ * @decoder:	Decoder to be converted to internal timings.
+ * @tolerance:	Tolerance as percent.
+ * @clock_hz:	IR clock rate in Hz.
+ *
+ * Fills out the repeat timings and timing register values for a specific
+ * tolerance and clock rate.
+ */
+static void img_ir_decoder_convert(struct img_ir_decoder *decoder,
+				   unsigned int tolerance,
+				   unsigned int clock_hz)
+{
+	tolerance = tolerance * 128 / 100;
+
+	/* record clock rate in case timings need recalculating */
+	decoder->clk_hz = clock_hz;
+
+	/* fill in implicit fields and calculate register values */
+	img_ir_timings_convert(&decoder->reg_timings, &decoder->timings,
+			       tolerance, clock_hz);
+
+	/* do the same for repeat timings if applicable */
+	if (decoder->repeat)
+		img_ir_timings_convert(&decoder->reg_rtimings,
+				       &decoder->rtimings, tolerance, clock_hz);
+}
+
+/**
+ * img_ir_decoder_check_timings() - Check if decoder timings need updating.
+ * @priv:	IR private data.
+ */
+static void img_ir_check_timings(struct img_ir_priv *priv)
+{
+	struct img_ir_decoder *dec = priv->decoder;
+	if (dec->clk_hz != priv->clk_hz) {
+		dev_dbg(priv->dev, "converting tolerance=%d%%, clk=%lu\n",
+			10, priv->clk_hz);
+		img_ir_decoder_convert(dec, 10, priv->clk_hz);
+	}
+}
+
+/**
+ * img_ir_write_timings() - Write timings to the hardware now
+ * @priv:	IR private data
+ * @regs:	Timing register values to write
+ * @filter:	Current filter data or NULL
+ *
+ * Write timing register values @regs to the hardware, taking into account the
+ * current filter pointed to by @filter which may impose restrictions on the
+ * length of the expected data.
+ */
+static void img_ir_write_timings(struct img_ir_priv *priv,
+				 struct img_ir_timing_regvals *regs,
+				 struct img_ir_filter *filter)
+{
+	/* filter may be more restrictive to minlen, maxlen */
+	u32 ft = regs->ft;
+	if (filter)
+		ft = img_ir_free_timing_dynamic(regs->ft, filter);
+	/* write to registers */
+	img_ir_write(priv, IMG_IR_LEAD_SYMB_TIMING, regs->ldr);
+	img_ir_write(priv, IMG_IR_S00_SYMB_TIMING, regs->s00);
+	img_ir_write(priv, IMG_IR_S01_SYMB_TIMING, regs->s01);
+	img_ir_write(priv, IMG_IR_S10_SYMB_TIMING, regs->s10);
+	img_ir_write(priv, IMG_IR_S11_SYMB_TIMING, regs->s11);
+	img_ir_write(priv, IMG_IR_FREE_SYMB_TIMING, ft);
+	dev_dbg(priv->dev, "timings: ldr=%#x, s=[%#x, %#x, %#x, %#x], ft=%#x\n",
+		regs->ldr, regs->s00, regs->s01, regs->s10, regs->s11, ft);
+}
+
+/**
+ * img_ir_write_timings_normal() - Write normal timings to the hardware now
+ * @priv:	IR private data
+ * @regs:	Normal timing register values to write
+ *
+ * Write the normal (non-wake) timing register values @regs to the hardware,
+ * taking into account the current filter.
+ */
+static void img_ir_write_timings_normal(struct img_ir_priv *priv,
+					struct img_ir_timing_regvals *regs)
+{
+	struct img_ir_filter *filter = NULL;
+	if (priv->flags & IMG_IR_F_FILTER)
+		filter = &priv->filter;
+	img_ir_write_timings(priv, regs, filter);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * img_ir_write_timings_wake() - Write wake timings to the hardware now
+ * @priv:	IR private data
+ * @regs:	Wake timing register values to write
+ *
+ * Write the wake timing register values @regs to the hardware, taking into
+ * account the current wake filter.
+ */
+static void img_ir_write_timings_wake(struct img_ir_priv *priv,
+				      struct img_ir_timing_regvals *regs)
+{
+	struct img_ir_filter *filter = NULL;
+	if (priv->flags & IMG_IR_F_WAKE)
+		filter = &priv->wake_filter;
+	img_ir_write_timings(priv, regs, filter);
+}
+#endif
+
+static void img_ir_write_filter(struct img_ir_priv *priv,
+				struct img_ir_filter *filter)
+{
+	if (filter) {
+		dev_dbg(priv->dev, "IR filter=%016llx & %016llx\n",
+			(unsigned long long)filter->data,
+			(unsigned long long)filter->mask);
+		img_ir_write(priv, IMG_IR_IRQ_MSG_DATA_LW, (u32)filter->data);
+		img_ir_write(priv, IMG_IR_IRQ_MSG_DATA_UP, (u32)(filter->data
+									>> 32));
+		img_ir_write(priv, IMG_IR_IRQ_MSG_MASK_LW, (u32)filter->mask);
+		img_ir_write(priv, IMG_IR_IRQ_MSG_MASK_UP, (u32)(filter->mask
+									>> 32));
+	} else {
+		dev_dbg(priv->dev, "IR clearing filter\n");
+		img_ir_write(priv, IMG_IR_IRQ_MSG_MASK_LW, 0);
+		img_ir_write(priv, IMG_IR_IRQ_MSG_MASK_UP, 0);
+	}
+}
+
+/* caller must have lock */
+static void _img_ir_set_filter(struct img_ir_priv *priv,
+			       struct img_ir_filter *filter)
+{
+	u32 irq_en, irq_on;
+
+	irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
+	if (filter) {
+		/* Only use the match interrupt */
+		priv->filter = *filter;
+		priv->flags |= IMG_IR_F_FILTER;
+		irq_on = IMG_IR_IRQ_DATA_MATCH;
+		irq_en &= ~(IMG_IR_IRQ_DATA_VALID | IMG_IR_IRQ_DATA2_VALID);
+	} else {
+		/* Only use the valid interrupt */
+		priv->flags &= ~IMG_IR_F_FILTER;
+		irq_en &= ~IMG_IR_IRQ_DATA_MATCH;
+		irq_on = IMG_IR_IRQ_DATA_VALID | IMG_IR_IRQ_DATA2_VALID;
+	}
+	irq_en |= irq_on;
+
+	img_ir_write_filter(priv, filter);
+	/* clear any interrupts we're enabling so we don't handle old ones */
+	img_ir_write(priv, IMG_IR_IRQ_CLEAR, irq_on);
+	img_ir_write(priv, IMG_IR_IRQ_ENABLE, irq_en);
+}
+
+/* caller must have lock */
+static void _img_ir_set_wake_filter(struct img_ir_priv *priv,
+				    struct img_ir_filter *filter)
+{
+	if (filter) {
+		/* Enable wake, and copy filter for later */
+		priv->wake_filter = *filter;
+		priv->flags |= IMG_IR_F_WAKE;
+	} else {
+		/* Disable wake */
+		priv->flags &= ~IMG_IR_F_WAKE;
+	}
+}
+
+/* caller must have lock */
+static void _img_ir_update_filters(struct img_ir_priv *priv)
+{
+	struct img_ir_filter filter;
+	int ret1 = -1, ret2 = -1;
+
+	/* clear raw filters */
+	_img_ir_set_filter(priv, NULL);
+	_img_ir_set_wake_filter(priv, NULL);
+
+	/* convert scancode filters to raw filters and try to set them */
+	if (priv->decoder && priv->decoder->filter) {
+		if (priv->sc_filter.mask) {
+			filter.minlen = 0;
+			filter.maxlen = ~0;
+			dev_dbg(priv->dev, "IR scancode filter=%08x & %08x\n",
+				priv->sc_filter.data,
+				priv->sc_filter.mask);
+			ret1 = priv->decoder->filter(&priv->sc_filter, &filter,
+						     priv->enabled_protocols);
+			if (!ret1) {
+				dev_dbg(priv->dev, "IR raw filter=%016llx & %016llx\n",
+					(unsigned long long)filter.data,
+					(unsigned long long)filter.mask);
+				_img_ir_set_filter(priv, &filter);
+			}
+		}
+		if (priv->sc_wake_filter.mask) {
+			filter.minlen = 0;
+			filter.maxlen = ~0;
+			dev_dbg(priv->dev, "IR scancode wake filter=%08x & %08x\n",
+				priv->sc_wake_filter.data,
+				priv->sc_wake_filter.mask);
+			ret2 = priv->decoder->filter(&priv->sc_wake_filter,
+						     &filter,
+						     priv->enabled_protocols);
+			if (!ret2) {
+				dev_dbg(priv->dev, "IR raw wake filter=%016llx & %016llx\n",
+					(unsigned long long)filter.data,
+					(unsigned long long)filter.mask);
+				_img_ir_set_wake_filter(priv, &filter);
+			}
+		}
+	}
+
+	/*
+	 * if either of the filters couldn't get set, clear the corresponding
+	 * scancode filter mask.
+	 */
+	if (ret1)
+		priv->sc_filter.mask = 0;
+	if (ret2)
+		priv->sc_wake_filter.mask = 0;
+}
+
+static void img_ir_update_filters(struct img_ir_priv *priv)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	_img_ir_update_filters(priv);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/**
+ * img_ir_set_decoder() - Set the current decoder.
+ * @priv:	IR private data.
+ * @decoder:	Decoder to use with immediate effect.
+ * @proto:	Protocol bitmap (or 0 to use decoder->type).
+ */
+static void img_ir_set_decoder(struct img_ir_priv *priv,
+			       struct img_ir_decoder *decoder,
+			       u64 proto)
+{
+	unsigned long flags;
+	u32 ir_status;
+	spin_lock_irqsave(&priv->lock, flags);
+
+	/* switch off and disable interrupts */
+	img_ir_write(priv, IMG_IR_CONTROL, 0);
+	img_ir_write(priv, IMG_IR_IRQ_ENABLE, 0);
+	img_ir_write(priv, IMG_IR_IRQ_CLEAR, IMG_IR_IRQ_ALL);
+
+	/* ack any data already detected */
+	ir_status = img_ir_read(priv, IMG_IR_STATUS);
+	if (ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2)) {
+		ir_status &= ~(IMG_IR_RXDVAL | IMG_IR_RXDVALD2);
+		img_ir_write(priv, IMG_IR_STATUS, ir_status);
+		img_ir_read(priv, IMG_IR_DATA_LW);
+		img_ir_read(priv, IMG_IR_DATA_UP);
+	}
+
+	/* clear the scancode filters */
+	priv->sc_filter.data = 0;
+	priv->sc_filter.mask = 0;
+	priv->sc_wake_filter.data = 0;
+	priv->sc_wake_filter.mask = 0;
+
+	/* update (clear) the raw filters */
+	_img_ir_update_filters(priv);
+
+	/* clear the enabled protocols */
+	priv->enabled_protocols = 0;
+
+	/* switch decoder */
+	priv->decoder = decoder;
+	if (!decoder) {
+		goto unlock;
+	}
+
+	priv->mode = IMG_IR_M_NORMAL;
+
+	/* set the enabled protocols */
+	if (!proto)
+		proto = decoder->type;
+	priv->enabled_protocols = proto;
+
+	/* write the new timings */
+	img_ir_check_timings(priv);
+	img_ir_write_timings_normal(priv, &decoder->reg_timings);
+
+	/* set up and enable */
+	img_ir_write(priv, IMG_IR_CONTROL, decoder->reg_ctrl);
+
+
+unlock:
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/**
+ * img_ir_decoder_compatable() - Find whether a decoder will work with a device.
+ * @priv:	IR private data.
+ * @dec:	Decoder to check.
+ *
+ * Returns:	true if @dec is compatible with the device @priv refers to.
+ */
+static bool img_ir_decoder_compatible(struct img_ir_priv *priv,
+				      struct img_ir_decoder *dec)
+{
+	unsigned int ct;
+
+	/* don't accept decoders using code types which aren't supported */
+	ct = dec->control.code_type;
+	if (priv->ct_quirks[ct] & IMG_IR_QUIRK_CODE_BROKEN)
+		return false;
+
+	return true;
+}
+
+/**
+ * img_ir_allowed_protos() - Get allowed protocols from global decoder list.
+ * @priv:	IR private data.
+ *
+ * img_ir_decoders_lock must be locked by caller.
+ *
+ * Returns:	Mask of protocols supported by the device @priv refers to.
+ */
+static unsigned long img_ir_allowed_protos(struct img_ir_priv *priv)
+{
+	unsigned long protos = 0;
+	struct img_ir_decoder *dec;
+
+	for (dec = img_ir_decoders; dec; dec = dec->next)
+		if (img_ir_decoder_compatible(priv, dec))
+			protos |= dec->type;
+	return protos;
+}
+
+/**
+ * img_ir_update_allowed_protos() - Update devices with allowed protocols.
+ *
+ * img_ir_decoders_lock must be locked by caller.
+ */
+static void img_ir_update_allowed_protos(void)
+{
+	struct img_ir_priv *priv;
+
+	for (priv = img_ir_privs; priv; priv = priv->next)
+		priv->rdev->allowed_protos = img_ir_allowed_protos(priv);
+}
+
+/* Callback for changing protocol using sysfs */
+static int img_ir_change_protocol(struct rc_dev *data, u64 *ir_type)
+{
+	struct img_ir_priv *priv;
+	struct img_ir_decoder *dec;
+	int ret = -EINVAL;
+	priv = data->priv;
+
+	spin_lock(&img_ir_decoders_lock);
+	for (dec = img_ir_decoders; dec; dec = dec->next) {
+		if (!img_ir_decoder_compatible(priv, dec))
+			continue;
+		if (*ir_type & dec->type) {
+			*ir_type &= dec->type;
+			img_ir_set_decoder(priv, dec, *ir_type);
+			ret = 0;
+			break;
+		}
+	}
+	spin_unlock(&img_ir_decoders_lock);
+	return ret;
+}
+
+/* Changes ir-core protocol device attribute */
+static void img_ir_set_protocol(struct img_ir_priv *priv, u64 proto)
+{
+	struct rc_dev *ir_dev = priv->rdev;
+
+	unsigned long flags;
+
+	spin_lock_irqsave(&ir_dev->rc_map.lock, flags);
+	ir_dev->rc_map.rc_type = proto;
+	spin_unlock_irqrestore(&ir_dev->rc_map.lock, flags);
+}
+
+/* Register an ir decoder */
+int img_ir_register_decoder(struct img_ir_decoder *dec)
+{
+	struct img_ir_priv *priv;
+
+	/* first preprocess decoder timings */
+	img_ir_decoder_preprocess(dec);
+
+	spin_lock(&img_ir_decoders_lock);
+	/* add to list */
+	dec->next = img_ir_decoders;
+	img_ir_decoders = dec;
+	img_ir_update_allowed_protos();
+	/* if it's the first decoder, start using it */
+	for (priv = img_ir_privs; priv; priv = priv->next) {
+		if (!priv->decoder && img_ir_decoder_compatible(priv, dec)) {
+			img_ir_set_protocol(priv, dec->type);
+			img_ir_set_decoder(priv, dec, 0);
+		}
+	}
+	spin_unlock(&img_ir_decoders_lock);
+	return 0;
+}
+EXPORT_SYMBOL(img_ir_register_decoder);
+
+/* Unregister an ir decoder */
+void img_ir_unregister_decoder(struct img_ir_decoder *dec)
+{
+	struct img_ir_priv *priv;
+
+	spin_lock(&img_ir_decoders_lock);
+	/* If the decoder is in use, stop it now */
+	for (priv = img_ir_privs; priv; priv = priv->next)
+		if (priv->decoder == dec) {
+			img_ir_set_protocol(priv, 0);
+			img_ir_set_decoder(priv, NULL, 0);
+		}
+	/* Remove from list of decoders */
+	if (img_ir_decoders == dec) {
+		img_ir_decoders = dec->next;
+	} else {
+		struct img_ir_decoder *cur;
+		for (cur = img_ir_decoders; cur; cur = cur->next)
+			if (dec == cur->next) {
+				cur->next = dec->next;
+				dec->next = NULL;
+				break;
+			}
+	}
+	img_ir_update_allowed_protos();
+	spin_unlock(&img_ir_decoders_lock);
+}
+EXPORT_SYMBOL(img_ir_unregister_decoder);
+
+static void img_ir_register_device(struct img_ir_priv *priv)
+{
+	spin_lock(&img_ir_decoders_lock);
+	priv->next = img_ir_privs;
+	img_ir_privs = priv;
+	priv->rdev->allowed_protos = img_ir_allowed_protos(priv);
+	spin_unlock(&img_ir_decoders_lock);
+}
+
+static void img_ir_unregister_device(struct img_ir_priv *priv)
+{
+	struct img_ir_priv *cur;
+
+	spin_lock(&img_ir_decoders_lock);
+	if (img_ir_privs == priv)
+		img_ir_privs = priv->next;
+	else
+		for (cur = img_ir_privs; cur; cur = cur->next)
+			if (cur->next == priv) {
+				cur->next = priv->next;
+				break;
+			}
+	spin_unlock(&img_ir_decoders_lock);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * img_ir_enable_wake() - Switch to wake mode.
+ * @priv:	IR private data.
+ *
+ * Returns:	non-zero if the IR can wake the system.
+ */
+static int img_ir_enable_wake(struct img_ir_priv *priv)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (priv->flags & IMG_IR_F_WAKE) {
+		/* interrupt only on a match */
+		img_ir_write(priv, IMG_IR_IRQ_ENABLE, IMG_IR_IRQ_DATA_MATCH);
+		img_ir_write_filter(priv, &priv->wake_filter);
+		img_ir_write_timings_wake(priv, &priv->decoder->reg_timings);
+		priv->mode = IMG_IR_M_WAKE;
+		ret = 1;
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return ret;
+}
+
+/**
+ * img_ir_disable_wake() - Switch out of wake mode.
+ * @priv:	IR private data
+ *
+ * Returns:	1 if the hardware should be allowed to wake from a sleep state.
+ *		0 otherwise.
+ */
+static int img_ir_disable_wake(struct img_ir_priv *priv)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (priv->flags & IMG_IR_F_WAKE) {
+		/* restore normal filtering */
+		if (priv->flags & IMG_IR_F_FILTER) {
+			img_ir_write(priv, IMG_IR_IRQ_ENABLE,
+				     IMG_IR_IRQ_DATA_MATCH);
+			img_ir_write_filter(priv, &priv->filter);
+		} else {
+			img_ir_write(priv, IMG_IR_IRQ_ENABLE,
+				     IMG_IR_IRQ_DATA_VALID |
+				     IMG_IR_IRQ_DATA2_VALID);
+			img_ir_write_filter(priv, NULL);
+		}
+		img_ir_write_timings_normal(priv, &priv->decoder->reg_timings);
+		priv->mode = IMG_IR_M_NORMAL;
+		ret = 1;
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return ret;
+}
+#endif
+
+/* lock must be held */
+static void img_ir_begin_repeat(struct img_ir_priv *priv)
+{
+	if (priv->mode == IMG_IR_M_NORMAL) {
+		struct img_ir_decoder *dec = priv->decoder;
+
+		/* switch to repeat timings */
+		img_ir_write(priv, IMG_IR_CONTROL, 0);
+		priv->mode = IMG_IR_M_REPEATING;
+		img_ir_write_timings_normal(priv, &dec->reg_rtimings);
+		img_ir_write(priv, IMG_IR_CONTROL, dec->reg_ctrl);
+	}
+}
+
+/* lock must be held */
+static void img_ir_end_repeat(struct img_ir_priv *priv)
+{
+	if (priv->mode == IMG_IR_M_REPEATING) {
+		struct img_ir_decoder *dec = priv->decoder;
+
+		/* switch to normal timings */
+		img_ir_write(priv, IMG_IR_CONTROL, 0);
+		priv->mode = IMG_IR_M_NORMAL;
+		img_ir_write_timings_normal(priv, &dec->reg_timings);
+		img_ir_write(priv, IMG_IR_CONTROL, dec->reg_ctrl);
+	}
+}
+
+/* lock must be held */
+static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw)
+{
+	struct img_ir_decoder *dec = priv->decoder;
+	int scancode = IMG_IR_ERR_INVALID;
+	if (dec->scancode)
+		scancode = dec->scancode(len, raw, priv->enabled_protocols);
+	else if (len >= 32)
+		scancode = (u32)raw;
+	else if (len < 32)
+		scancode = (u32)raw & ((1 << len)-1);
+	dev_dbg(priv->dev, "data (%u bits) = %#llx\n",
+		len, (unsigned long long)raw);
+	if (scancode >= 0) {
+		dev_dbg(priv->dev, "decoded scan code %#x\n", scancode);
+		rc_keydown(priv->rdev, scancode, 0);
+		img_ir_end_repeat(priv);
+	} else if (scancode == IMG_IR_REPEATCODE) {
+		if (priv->mode == IMG_IR_M_REPEATING) {
+			dev_dbg(priv->dev, "decoded repeat code\n");
+			rc_repeat(priv->rdev);
+		} else {
+			dev_dbg(priv->dev, "decoded unexpected repeat code, ignoring\n");
+		}
+	} else {
+		return;
+	}
+
+
+	if (dec->repeat) {
+		unsigned long interval;
+
+		img_ir_begin_repeat(priv);
+
+		/* update timer, but allowing for 1/8th tolerance */
+		interval = dec->repeat + (dec->repeat >> 3);
+		mod_timer(&priv->end_timer,
+			  jiffies + msecs_to_jiffies(interval));
+	}
+}
+
+/* timer function to end waiting for repeat. */
+static void img_ir_end_timer(unsigned long arg)
+{
+	unsigned long flags;
+	struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	img_ir_end_repeat(priv);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+#else /* CONFIG_IR_IMG_RAW */
+
+static void img_ir_register_device(struct img_ir_priv *priv)
+{
+}
+
+static void img_ir_unregister_device(struct img_ir_priv *priv)
+{
+}
+#endif
+
+static irqreturn_t img_ir_isr(int irq, void *dev_id)
+{
+	struct img_ir_priv *priv = dev_id;
+	u32 irq_status, ir_status;
+
+	spin_lock(&priv->lock);
+	/* we have to clear irqs before reading */
+	irq_status = img_ir_read(priv, IMG_IR_IRQ_STATUS);
+	img_ir_write(priv, IMG_IR_IRQ_CLEAR, irq_status);
+
+#ifdef CONFIG_IR_IMG_RAW
+	/* only report the raw signal changes */
+	if (irq_status & IMG_IR_IRQ_EDGE) {
+		struct rc_dev *rc_dev = priv->rdev;
+		int multiple;
+
+		/* find whether both rise and fall was detected */
+		multiple = ((irq_status & IMG_IR_IRQ_EDGE) == IMG_IR_IRQ_EDGE);
+		/*
+		 * If so, we need to see if the level has actually changed.
+		 * If it's just noise that we didn't have time to process,
+		 * there's no point reporting it.
+		 */
+		ir_status = img_ir_read(priv, IMG_IR_STATUS) & IMG_IR_IRRXD;
+		if (multiple && !(ir_status ^ priv->last_status))
+			goto unlock;
+		priv->last_status = ir_status;
+
+		/* report the edge to the IR raw decoders */
+		if (ir_status) /* low */
+			ir_raw_event_store_edge(rc_dev, IR_SPACE);
+		else /* high */
+			ir_raw_event_store_edge(rc_dev, IR_PULSE);
+		ir_raw_event_handle(priv->rdev);
+	}
+#else
+	/* use the decoders */
+	if (priv->decoder &&
+	    (irq_status & (IMG_IR_IRQ_DATA_MATCH |
+			   IMG_IR_IRQ_DATA_VALID |
+			   IMG_IR_IRQ_DATA2_VALID))) {
+		u32 len, lw, up;
+		unsigned int ct;
+		ir_status = img_ir_read(priv, IMG_IR_STATUS);
+		if (!(ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2)))
+			goto unlock;
+		ir_status &= ~(IMG_IR_RXDVAL | IMG_IR_RXDVALD2);
+		img_ir_write(priv, IMG_IR_STATUS, ir_status);
+
+		len = (ir_status & IMG_IR_RXDLEN) >> IMG_IR_RXDLEN_SHIFT;
+		/* some versions report wrong length for certain code types */
+		ct = priv->decoder->control.code_type;
+		if (priv->ct_quirks[ct] & IMG_IR_QUIRK_CODE_LEN_INCR)
+			++len;
+
+		lw = img_ir_read(priv, IMG_IR_DATA_LW);
+		up = img_ir_read(priv, IMG_IR_DATA_UP);
+		img_ir_handle_data(priv, len, (u64)up << 32 | lw);
+	}
+#endif
+unlock:
+	spin_unlock(&priv->lock);
+	return IRQ_HANDLED;
+}
+
+static void img_ir_setup(struct img_ir_priv *priv)
+{
+#ifdef CONFIG_IR_IMG_RAW
+	/* raw mode, just enable the interrupts */
+	img_ir_write(priv, IMG_IR_IRQ_ENABLE, IMG_IR_IRQ_EDGE);
+#else
+	struct img_ir_decoder *dec;
+	spin_lock(&img_ir_decoders_lock);
+	/* Use the first available decoder (or disable stuff if NULL) */
+	for (dec = img_ir_decoders; dec; dec = dec->next) {
+		if (img_ir_decoder_compatible(priv, dec)) {
+			img_ir_set_protocol(priv, dec->type);
+			img_ir_set_decoder(priv, dec, 0);
+			goto unlock;
+		}
+	}
+	img_ir_set_decoder(priv, NULL, 0);
+unlock:
+	spin_unlock(&img_ir_decoders_lock);
+#endif
+}
+
+/**
+ * img_ir_probe_caps() - Probe capabilities of the hardware.
+ * @priv:	IR private data.
+ */
+static void img_ir_probe_caps(struct img_ir_priv *priv)
+{
+	/*
+	 * When a version of the block becomes available without these quirks,
+	 * they'll have to depend on the core revision.
+	 */
+	priv->ct_quirks[IMG_IR_CODETYPE_PULSELEN]
+		|= IMG_IR_QUIRK_CODE_LEN_INCR;
+	priv->ct_quirks[IMG_IR_CODETYPE_BIPHASE]
+		|= IMG_IR_QUIRK_CODE_BROKEN;
+	priv->ct_quirks[IMG_IR_CODETYPE_2BITPULSEPOS]
+		|= IMG_IR_QUIRK_CODE_BROKEN;
+}
+
+static void img_ir_ident(struct img_ir_priv *priv)
+{
+	u32 core_rev = img_ir_read(priv, IMG_IR_CORE_REV);
+
+	dev_info(priv->dev,
+		 "IMG IR Decoder (%d.%d.%d.%d) probed successfully\n",
+		 (core_rev & IMG_IR_DESIGNER) >> IMG_IR_DESIGNER_SHIFT,
+		 (core_rev & IMG_IR_MAJOR_REV) >> IMG_IR_MAJOR_REV_SHIFT,
+		 (core_rev & IMG_IR_MINOR_REV) >> IMG_IR_MINOR_REV_SHIFT,
+		 (core_rev & IMG_IR_MAINT_REV) >> IMG_IR_MAINT_REV_SHIFT);
+#ifdef CONFIG_IR_IMG_RAW
+	dev_info(priv->dev, "IMG IR Decoder in raw mode\n");
+#endif
+}
+
+/* Kernel interface */
+
+#ifndef CONFIG_IR_IMG_RAW
+
+static ssize_t img_ir_filter_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct rc_dev *rdev = container_of(dev, struct rc_dev, dev);
+	struct img_ir_priv *priv = rdev->priv;
+
+	return sprintf(buf, "%#x\n", priv->sc_filter.data);
+}
+static ssize_t img_ir_filter_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct rc_dev *rdev = container_of(dev, struct rc_dev, dev);
+	struct img_ir_priv *priv = rdev->priv;
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	priv->sc_filter.data = val;
+	img_ir_update_filters(priv);
+	return count;
+}
+static DEVICE_ATTR(filter, S_IRUGO|S_IWUSR,
+		   img_ir_filter_show,
+		   img_ir_filter_store);
+
+static ssize_t img_ir_filter_mask_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct rc_dev *rdev = container_of(dev, struct rc_dev, dev);
+	struct img_ir_priv *priv = rdev->priv;
+
+	return sprintf(buf, "%#x\n", priv->sc_filter.mask);
+}
+static ssize_t img_ir_filter_mask_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct rc_dev *rdev = container_of(dev, struct rc_dev, dev);
+	struct img_ir_priv *priv = rdev->priv;
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	priv->sc_filter.mask = val;
+	img_ir_update_filters(priv);
+	return count;
+}
+static DEVICE_ATTR(filter_mask, S_IRUGO|S_IWUSR,
+		   img_ir_filter_mask_show,
+		   img_ir_filter_mask_store);
+
+static ssize_t img_ir_wakeup_filter_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct rc_dev *rdev = container_of(dev, struct rc_dev, dev);
+	struct img_ir_priv *priv = rdev->priv;
+
+	return sprintf(buf, "%#x\n", priv->sc_wake_filter.data);
+}
+static ssize_t img_ir_wakeup_filter_store(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t count)
+{
+	struct rc_dev *rdev = container_of(dev, struct rc_dev, dev);
+	struct img_ir_priv *priv = rdev->priv;
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	priv->sc_wake_filter.data = val;
+	img_ir_update_filters(priv);
+	return count;
+}
+static DEVICE_ATTR(wakeup_filter, S_IRUGO|S_IWUSR,
+		   img_ir_wakeup_filter_show,
+		   img_ir_wakeup_filter_store);
+
+static ssize_t img_ir_wakeup_filter_mask_show(struct device *dev,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	struct rc_dev *rdev = container_of(dev, struct rc_dev, dev);
+	struct img_ir_priv *priv = rdev->priv;
+
+	return sprintf(buf, "%#x\n", priv->sc_wake_filter.mask);
+}
+static ssize_t img_ir_wakeup_filter_mask_store(struct device *dev,
+					       struct device_attribute *attr,
+					       const char *buf, size_t count)
+{
+	struct rc_dev *rdev = container_of(dev, struct rc_dev, dev);
+	struct img_ir_priv *priv = rdev->priv;
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	priv->sc_wake_filter.mask = val;
+	img_ir_update_filters(priv);
+	return count;
+}
+static DEVICE_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR,
+		   img_ir_wakeup_filter_mask_show,
+		   img_ir_wakeup_filter_mask_store);
+
+static void img_ir_attr_create(struct device *dev)
+{
+	device_create_file(dev, &dev_attr_filter);
+	device_create_file(dev, &dev_attr_filter_mask);
+	device_create_file(dev, &dev_attr_wakeup_filter);
+	device_create_file(dev, &dev_attr_wakeup_filter_mask);
+}
+
+static void img_ir_attr_remove(struct device *dev)
+{
+	device_remove_file(dev, &dev_attr_filter);
+	device_remove_file(dev, &dev_attr_filter_mask);
+	device_remove_file(dev, &dev_attr_wakeup_filter);
+	device_remove_file(dev, &dev_attr_wakeup_filter_mask);
+}
+
+static void img_ir_change_frequency(struct img_ir_priv *priv,
+				    struct clk32k_change_freq *change)
+{
+	struct img_ir_decoder *dec;
+	unsigned long flags;
+
+	dev_dbg(priv->dev, "clk changed %lu HZ -> %lu HZ\n",
+		change->old_freq, change->new_freq);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	dec = priv->decoder;
+	priv->clk_hz = change->new_freq;
+	/* refresh current timings */
+	if (priv->decoder) {
+		img_ir_check_timings(priv);
+		switch (priv->mode) {
+		case IMG_IR_M_NORMAL:
+			img_ir_write_timings_normal(priv, &dec->reg_timings);
+			break;
+		case IMG_IR_M_REPEATING:
+			img_ir_write_timings_normal(priv, &dec->reg_rtimings);
+			break;
+#ifdef CONFIG_PM_SLEEP
+		case IMG_IR_M_WAKE:
+			img_ir_write_timings_wake(priv, &dec->reg_timings);
+			break;
+#endif
+		}
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int img_ir_clk_notify(struct notifier_block *self, unsigned long action,
+			     void *data)
+{
+	struct img_ir_priv *priv;
+
+	priv = container_of(self, struct img_ir_priv, clk_nb);
+	switch (action) {
+	case CLK32K_CHANGE_FREQUENCY:
+		img_ir_change_frequency(priv, data);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+#else /* CONFIG_IR_IMG_RAW */
+
+static void img_ir_attr_create(struct device *dev)
+{
+}
+
+static void img_ir_attr_remove(struct device *dev)
+{
+}
+
+#endif
+
+static int img_ir_probe(struct platform_device *pdev)
+{
+	struct img_ir_priv *priv;
+	struct resource *res_regs;
+	int irq, error;
+
+	/* Get resources from platform device */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "cannot find IRQ resource\n");
+		return irq;
+	}
+
+	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res_regs == NULL) {
+		dev_err(&pdev->dev, "cannot find registers resource\n");
+		return -ENOENT;
+	}
+
+	/* Private driver data */
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "cannot allocate device data\n");
+		return -ENOMEM;
+	}
+	platform_set_drvdata(pdev, priv);
+	priv->dev = &pdev->dev;
+	spin_lock_init(&priv->lock);
+
+	/* Ioremap the registers */
+	priv->reg_base = devm_ioremap(&pdev->dev, res_regs->start,
+				 res_regs->end - res_regs->start);
+	if (!priv->reg_base)
+		return -EIO;
+
+#ifndef CONFIG_IR_IMG_RAW
+	/* Set up the end timer */
+	init_timer(&priv->end_timer);
+	priv->end_timer.function = img_ir_end_timer;
+	priv->end_timer.data = (unsigned long)priv;
+
+	/* Register a clock notifier */
+	priv->clk_hz = get_32kclock();
+	priv->clk_nb.notifier_call = img_ir_clk_notify;
+	error = clk32k_register_notify(&priv->clk_nb);
+	if (error) {
+		dev_err(&pdev->dev, "failed to register clk32k notifier\n");
+		return error;
+	}
+#endif
+
+	priv->rdev = rc_allocate_device();
+	if (!priv->rdev) {
+		dev_err(&pdev->dev, "cannot allocate input device\n");
+		error = -ENOMEM;
+		goto err_unregister_clk_notify;
+	}
+	priv->rdev->priv = priv;
+	img_ir_probe_caps(priv);
+	priv->rdev->map_name = RC_MAP_EMPTY;
+	priv->rdev->input_name = "IMG Infrared Decoder";
+#ifdef CONFIG_IR_IMG_RAW
+	priv->rdev->driver_type = RC_DRIVER_IR_RAW;
+#endif
+	/* img_ir_register_device sets rdev->allowed_protos. */
+	img_ir_register_device(priv);
+
+	error = rc_register_device(priv->rdev);
+	if (error) {
+		dev_err(&pdev->dev, "failed to register IR input device\n");
+		goto err_input_reg;
+	}
+#ifndef CONFIG_IR_IMG_RAW
+	/*
+	 * Set this after rc_register_device as no protocols have been
+	 * registered yet.
+	 */
+	priv->rdev->change_protocol = img_ir_change_protocol;
+
+	device_init_wakeup(&pdev->dev, 1);
+#endif
+
+	/* Create custom sysfs attributes */
+	img_ir_attr_create(&priv->rdev->dev);
+
+	/* Get the IRQ */
+	priv->irq = irq;
+	error = devm_request_irq(&pdev->dev, priv->irq, img_ir_isr, 0, "img-ir", priv);
+	if (error) {
+		dev_err(&pdev->dev, "cannot register IRQ %u\n",
+			priv->irq);
+		error = -EIO;
+		goto err_irq;
+	}
+
+	img_ir_ident(priv);
+	img_ir_setup(priv);
+
+	return 0;
+
+err_irq:
+	img_ir_attr_remove(&priv->rdev->dev);
+	rc_unregister_device(priv->rdev);
+	goto err_unregister_dev;
+err_input_reg:
+	rc_free_device(priv->rdev);
+err_unregister_dev:
+	img_ir_unregister_device(priv);
+err_unregister_clk_notify:
+#ifndef CONFIG_IR_IMG_RAW
+	clk32k_unregister_notify(&priv->clk_nb);
+#endif
+
+	return error;
+}
+
+static int img_ir_remove(struct platform_device *pdev)
+{
+	struct img_ir_priv *priv = platform_get_drvdata(pdev);
+
+#ifndef CONFIG_IR_IMG_RAW
+	del_timer_sync(&priv->end_timer);
+#endif
+	img_ir_attr_remove(&priv->rdev->dev);
+	rc_unregister_device(priv->rdev);
+	img_ir_unregister_device(priv);
+#ifndef CONFIG_IR_IMG_RAW
+	clk32k_unregister_notify(&priv->clk_nb);
+#endif
+
+	return 0;
+}
+
+#if defined(CONFIG_PM_SLEEP) && !defined(CONFIG_IR_IMG_RAW)
+static int img_ir_suspend(struct device *dev)
+{
+	struct img_ir_priv *priv = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev) && img_ir_enable_wake(priv))
+		enable_irq_wake(priv->irq);
+	return 0;
+}
+
+static int img_ir_resume(struct device *dev)
+{
+	struct img_ir_priv *priv = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev) && img_ir_disable_wake(priv))
+		disable_irq_wake(priv->irq);
+	return 0;
+}
+#else
+#define img_ir_suspend NULL
+#define img_ir_resume NULL
+#endif	/* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(img_ir_pmops, img_ir_suspend, img_ir_resume);
+
+static const struct of_device_id img_ir_match[] = {
+	{ .compatible = "img,ir" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, img_ir_match);
+
+static struct platform_driver img_ir_driver = {
+	.driver = {
+		.name = "img-ir",
+		.owner	= THIS_MODULE,
+		.of_match_table	= img_ir_match,
+		.pm = &img_ir_pmops,
+	},
+	.probe = img_ir_probe,
+	.remove = img_ir_remove,
+};
+
+module_platform_driver(img_ir_driver);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("ImgTec IR");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/ir-img.h b/drivers/media/rc/ir-img.h
new file mode 100644
index 0000000..576d117
--- /dev/null
+++ b/drivers/media/rc/ir-img.h
@@ -0,0 +1,302 @@
+/*
+ * ImgTec IR Decoder found in PowerDown Controller.
+ *
+ * Copyright 2010,2011.2012 Imagination Technologies Ltd.
+ */
+
+#ifndef _IMG_IR_H_
+#define _IMG_IR_H_
+
+#include <linux/kernel.h>
+#include <media/rc-core.h>
+
+/* registers */
+
+/* relative to the start of the IR block of registers */
+#define IMG_IR_CONTROL		0x00
+#define IMG_IR_STATUS		0x04
+#define IMG_IR_DATA_LW		0x08
+#define IMG_IR_DATA_UP		0x0c
+#define IMG_IR_LEAD_SYMB_TIMING	0x10
+#define IMG_IR_S00_SYMB_TIMING	0x14
+#define IMG_IR_S01_SYMB_TIMING	0x18
+#define IMG_IR_S10_SYMB_TIMING	0x1c
+#define IMG_IR_S11_SYMB_TIMING	0x20
+#define IMG_IR_FREE_SYMB_TIMING	0x24
+#define IMG_IR_POW_MOD_PARAMS	0x28
+#define IMG_IR_POW_MOD_ENABLE	0x2c
+#define IMG_IR_IRQ_MSG_DATA_LW	0x30
+#define IMG_IR_IRQ_MSG_DATA_UP	0x34
+#define IMG_IR_IRQ_MSG_MASK_LW	0x38
+#define IMG_IR_IRQ_MSG_MASK_UP	0x3c
+#define IMG_IR_IRQ_ENABLE	0x40
+#define IMG_IR_IRQ_STATUS	0x44
+#define IMG_IR_IRQ_CLEAR	0x48
+#define IMG_IR_IRCORE_ID	0xf0
+#define IMG_IR_CORE_REV		0xf4
+#define IMG_IR_CORE_DES1	0xf8
+#define IMG_IR_CORE_DES2	0xfc
+
+
+/* field masks */
+
+/* IMG_IR_CONTROL */
+#define IMG_IR_DECODEN		0x40000000
+#define IMG_IR_CODETYPE		0x30000000
+#define IMG_IR_CODETYPE_SHIFT		28
+#define IMG_IR_HDRTOG		0x08000000
+#define IMG_IR_LDRDEC		0x04000000
+#define IMG_IR_DECODINPOL	0x02000000	/* active high */
+#define IMG_IR_BITORIEN		0x01000000	/* MSB first */
+#define IMG_IR_D1VALIDSEL	0x00008000
+#define IMG_IR_BITINV		0x00000040	/* don't invert */
+#define IMG_IR_DECODEND2	0x00000010
+#define IMG_IR_BITORIEND2	0x00000002	/* MSB first */
+#define IMG_IR_BITINVD2		0x00000001	/* don't invert */
+
+/* IMG_IR_STATUS */
+#define IMG_IR_RXDVALD2		0x00001000
+#define IMG_IR_IRRXD		0x00000400
+#define IMG_IR_TOGSTATE		0x00000200
+#define IMG_IR_RXDVAL		0x00000040
+#define IMG_IR_RXDLEN		0x0000003f
+#define IMG_IR_RXDLEN_SHIFT		0
+
+/* IMG_IR_LEAD_SYMB_TIMING, IMG_IR_Sxx_SYMB_TIMING */
+#define IMG_IR_PD_MAX		0xff000000
+#define IMG_IR_PD_MAX_SHIFT		24
+#define IMG_IR_PD_MIN		0x00ff0000
+#define IMG_IR_PD_MIN_SHIFT		16
+#define IMG_IR_W_MAX		0x0000ff00
+#define IMG_IR_W_MAX_SHIFT		8
+#define IMG_IR_W_MIN		0x000000ff
+#define IMG_IR_W_MIN_SHIFT		0
+
+/* IMG_IR_FREE_SYMB_TIMING */
+#define IMG_IR_MAXLEN		0x0007e000
+#define IMG_IR_MAXLEN_SHIFT		13
+#define IMG_IR_MINLEN		0x00001f00
+#define IMG_IR_MINLEN_SHIFT		8
+#define IMG_IR_FT_MIN		0x000000ff
+#define IMG_IR_FT_MIN_SHIFT		0
+
+/* IMG_IR_POW_MOD_PARAMS */
+#define IMG_IR_PERIOD_LEN	0x3f000000
+#define IMG_IR_PERIOD_LEN_SHIFT		24
+#define IMG_IR_PERIOD_DUTY	0x003f0000
+#define IMG_IR_PERIOD_DUTY_SHIFT	16
+#define IMG_IR_STABLE_STOP	0x00003f00
+#define IMG_IR_STABLE_STOP_SHIFT	8
+#define IMG_IR_STABLE_START	0x0000003f
+#define IMG_IR_STABLE_START_SHIFT	0
+
+/* IMG_IR_POW_MOD_ENABLE */
+#define IMG_IR_POWER_OUT_EN	0x00000002
+#define IMG_IR_POWER_MOD_EN	0x00000001
+
+/* IMG_IR_IRQ_ENABLE, IMG_IR_IRQ_STATUS, IMG_IR_IRQ_CLEAR */
+#define IMG_IR_IRQ_DEC2_ERR	0x00000080
+#define IMG_IR_IRQ_DEC_ERR	0x00000040
+#define IMG_IR_IRQ_ACT_LEVEL	0x00000020
+#define IMG_IR_IRQ_FALL_EDGE	0x00000010
+#define IMG_IR_IRQ_RISE_EDGE	0x00000008
+#define IMG_IR_IRQ_DATA_MATCH	0x00000004
+#define IMG_IR_IRQ_DATA2_VALID	0x00000002
+#define IMG_IR_IRQ_DATA_VALID	0x00000001
+#define IMG_IR_IRQ_ALL		0x000000ff
+#define IMG_IR_IRQ_EDGE		(IMG_IR_IRQ_FALL_EDGE | IMG_IR_IRQ_RISE_EDGE)
+
+/* IMG_IR_CORE_ID */
+#define IMG_IR_CORE_ID		0x00ff0000
+#define IMG_IR_CORE_ID_SHIFT		16
+#define IMG_IR_CORE_CONFIG	0x0000ffff
+#define IMG_IR_CORE_CONFIG_SHIFT	0
+
+/* IMG_IR_CORE_REV */
+#define IMG_IR_DESIGNER		0xff000000
+#define IMG_IR_DESIGNER_SHIFT		24
+#define IMG_IR_MAJOR_REV	0x00ff0000
+#define IMG_IR_MAJOR_REV_SHIFT		16
+#define IMG_IR_MINOR_REV	0x0000ff00
+#define IMG_IR_MINOR_REV_SHIFT		8
+#define IMG_IR_MAINT_REV	0x000000ff
+#define IMG_IR_MAINT_REV_SHIFT		0
+
+
+/* constants */
+
+#define IMG_IR_CODETYPE_PULSELEN	0x0	/* Sony */
+#define IMG_IR_CODETYPE_PULSEDIST	0x1	/* NEC, Toshiba, Micom, Sharp */
+#define IMG_IR_CODETYPE_BIPHASE		0x2	/* RC-5/6 */
+#define IMG_IR_CODETYPE_2BITPULSEPOS	0x3	/* RC-MM */
+
+
+/* Timing information */
+
+/**
+ * struct img_ir_control - Decoder control settings
+ * @decoden:	Primary decoder enable
+ * @code_type:	Decode type (see IMG_IR_CODETYPE_*)
+ * @hdrtog:	Detect header toggle symbol after leader symbol
+ * @ldrdec:	Don't discard leader if maximum width reached
+ * @decodinpol:	Decoder input polarity (1=active high)
+ * @bitorien:	Bit orientation (1=MSB first)
+ * @d1validsel:	Decoder 2 takes over if it detects valid data
+ * @bitinv:	Bit inversion switch (1=don't invert)
+ * @decodend2:	Secondary decoder enable (no leader symbol)
+ * @bitoriend2:	Bit orientation (1=MSB first)
+ * @bitinvd2:	Secondary decoder bit inversion switch (1=don't invert)
+ */
+struct img_ir_control {
+	unsigned decoden:1;
+	unsigned code_type:2;
+	unsigned hdrtog:1;
+	unsigned ldrdec:1;
+	unsigned decodinpol:1;
+	unsigned bitorien:1;
+	unsigned d1validsel:1;
+	unsigned bitinv:1;
+	unsigned decodend2:1;
+	unsigned bitoriend2:1;
+	unsigned bitinvd2:1;
+};
+
+/**
+ * struct img_ir_timing_range - range of timing values
+ * @min:	Minimum timing value
+ * @max:	Maximum timing value (if < @min, this will be set to @min during
+ *		preprocessing step, so it is normally not explicitly initialised
+ *		and is taken care of by the tolerance)
+ */
+struct img_ir_timing_range {
+	u16 min;
+	u16 max;
+};
+
+/**
+ * struct img_ir_symbol_timing - timing data for a symbol
+ * @pulse:	Timing range for the length of the pulse in this symbol
+ * @space:	Timing range for the length of the space in this symbol
+ */
+struct img_ir_symbol_timing {
+	struct img_ir_timing_range pulse;
+	struct img_ir_timing_range space;
+};
+
+/**
+ * struct img_ir_free_timing - timing data for free time symbol
+ * @minlen:	Minimum number of bits of data
+ * @maxlen:	Maximum number of bits of data
+ * @ft_min:	Minimum free time after message
+ */
+struct img_ir_free_timing {
+	/* measured in bits */
+	u8 minlen;
+	u8 maxlen;
+	u16 ft_min;
+};
+
+/**
+ * struct img_ir_timings - Timing values.
+ * @ldr:	Leader symbol timing data
+ * @s00:	Zero symbol timing data for primary decoder
+ * @s01:	One symbol timing data for primary decoder
+ * @s10:	Zero symbol timing data for secondary (no leader symbol) decoder
+ * @s11:	One symbol timing data for secondary (no leader symbol) decoder
+ * @ft:		Free time symbol timing data
+ */
+struct img_ir_timings {
+	struct img_ir_symbol_timing ldr, s00, s01, s10, s11;
+	struct img_ir_free_timing ft;
+};
+
+/**
+ * struct img_ir_sc_filter - Filter scan codes.
+ * @data:	Data to match.
+ * @mask:	Mask of bits to compare.
+ */
+struct img_ir_sc_filter {
+	unsigned int data;
+	unsigned int mask;
+};
+
+/**
+ * struct img_ir_filter - Filter IR events.
+ * @data:	Data to match.
+ * @mask:	Mask of bits to compare.
+ * @minlen:	Additional minimum number of bits.
+ * @maxlen:	Additional maximum number of bits.
+ */
+struct img_ir_filter {
+	u64 data;
+	u64 mask;
+	u8 minlen;
+	u8 maxlen;
+};
+
+/**
+ * struct img_ir_timing_regvals - Calculated timing register values.
+ * @ldr:	Leader symbol timing register value
+ * @s00:	Zero symbol timing register value for primary decoder
+ * @s01:	One symbol timing register value for primary decoder
+ * @s10:	Zero symbol timing register value for secondary decoder
+ * @s11:	One symbol timing register value for secondary decoder
+ * @ft:		Free time symbol timing register value
+ */
+struct img_ir_timing_regvals {
+	u32 ldr, s00, s01, s10, s11, ft;
+};
+
+#define IMG_IR_REPEATCODE	(-1)	/* repeat the previous code */
+#define IMG_IR_ERR_INVALID	(-2)	/* not a valid code */
+
+/**
+ * struct img_ir_decoder - Decoder settings for an IR protocol.
+ * @type:		Protocol types bitmap.
+ * @unit:		Unit of timings in nanoseconds (default 1 us).
+ * @timings:		Primary timings
+ * @rtimings:		Additional override timings while waiting for repeats.
+ * @repeat:		Maximum repeat interval (always in milliseconds).
+ * @control:		Control flags.
+ *
+ * @scancode:		Pointer to function to convert the IR data into a
+ *			scancode (it must be safe to execute in interrupt
+ *			context).
+ *			Returns IMG_IR_REPEATCODE to repeat previous code.
+ *			Returns IMG_IR_ERR_* on error.
+ * @filter:		Pointer to function to convert scancode filter to raw
+ *			hardware filter. The minlen and maxlen fields will have
+ *			been initialised to the maximum range.
+ *
+ * @reg_ctrl:		Processed control register value.
+ * @clk_hz:		Assumed clock rate in Hz for processed timings.
+ * @reg_timings:	Processed primary timings.
+ * @reg_rtimings:	Processed repeat timings.
+ * @next:		Next IR decoder (to form a linked list).
+ */
+struct img_ir_decoder {
+	/* core description */
+	u64				type;
+	unsigned int			unit;
+	struct img_ir_timings		timings;
+	struct img_ir_timings		rtimings;
+	unsigned int			repeat;
+	struct img_ir_control		control;
+
+	/* scancode logic */
+	int (*scancode)(int len, u64 raw, u64 protocols);
+	int (*filter)(const struct img_ir_sc_filter *in,
+		      struct img_ir_filter *out, u64 protocols);
+
+	/* for internal use only */
+	u32				reg_ctrl;
+	unsigned long			clk_hz;
+	struct img_ir_timing_regvals	reg_timings;
+	struct img_ir_timing_regvals	reg_rtimings;
+	struct img_ir_decoder		*next;
+};
+
+int img_ir_register_decoder(struct img_ir_decoder *dec);
+void img_ir_unregister_decoder(struct img_ir_decoder *dec);
+
+#endif
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 1cf382a..cedcd08 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -751,6 +751,7 @@
 	  RC_BIT_SONY20,	"sony"		},
 	{ RC_BIT_RC5_SZ,	"rc-5-sz"	},
 	{ RC_BIT_SANYO,		"sanyo"		},
+	{ RC_BIT_SHARP,		"sharp"		},
 	{ RC_BIT_MCE_KBD,	"mce_kbd"	},
 	{ RC_BIT_LIRC,		"lirc"		},
 };
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 9ab8f8d..ad11067 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -519,7 +519,7 @@
 
 config MMC_DW
 	tristate "Synopsys DesignWare Memory Card Interface"
-	depends on ARM
+	depends on ARM || METAG
 	help
 	  This selects support for the Synopsys DesignWare Mobile Storage IP
 	  block, this provides host support for SD and MMC interfaces, in both
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index bc3a1bc..18a6a60 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -17,6 +17,7 @@
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <linux/err.h>
+#include <linux/gpio.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
@@ -2103,6 +2104,157 @@
 	return false;
 }
 
+/* Check if the block is still locked */
+static bool mci_locked(struct device *dev, struct dw_mci *host)
+{
+	return (mci_readl(host, STATUS) & SDMMC_STATUS_DATA_BUSY) ==
+					SDMMC_STATUS_DATA_BUSY;
+}
+
+/* Bit-bang a clock cycle */
+static void mci_bitbang_clk(struct device *dev, struct dw_mci *host)
+{
+	gpio_set_value(host->pdata->clk_pin, 1);
+	udelay(1);
+	gpio_set_value(host->pdata->clk_pin, 0);
+	udelay(1);
+}
+
+/* Bit-bang a command bit */
+static void mci_bitbang_cmd_bit(struct device *dev, struct dw_mci *host,
+							u8 bit)
+{
+	gpio_set_value(host->pdata->cmd_pin, bit ? 1 : 0);
+	udelay(1);
+	mci_bitbang_clk(dev, host);
+}
+
+/* Bit-bang a command byte */
+static void mci_bitbang_cmd_byte(struct device *dev, struct dw_mci *host,
+								u8 cmd)
+{
+	mci_bitbang_cmd_bit(dev, host, cmd & 0x80);
+	mci_bitbang_cmd_bit(dev, host, cmd & 0x40);
+	mci_bitbang_cmd_bit(dev, host, cmd & 0x20);
+	mci_bitbang_cmd_bit(dev, host, cmd & 0x10);
+	mci_bitbang_cmd_bit(dev, host, cmd & 0x08);
+	mci_bitbang_cmd_bit(dev, host, cmd & 0x04);
+	mci_bitbang_cmd_bit(dev, host, cmd & 0x02);
+	mci_bitbang_cmd_bit(dev, host, cmd & 0x01);
+}
+
+/* Bit-bang CMD0, reset, and test that the block reset cleanly */
+static bool mci_bitbang_reset(struct device *dev, struct dw_mci *host)
+{
+	bool locked = true;
+
+	dev_info(dev, "STATUS = 0x%08x, unlocking by bit-banging CMD0",
+					mci_readl(host, STATUS));
+
+	if (gpio_request_one(host->pdata->clk_pin, GPIOF_OUT_INIT_LOW,
+							"SDIO CLK")) {
+		dev_err(dev, "Failed to take SDIO "
+			"CLK line for reset\n");
+		return false;
+	}
+	if (gpio_request_one(host->pdata->cmd_pin, GPIOF_OUT_INIT_HIGH,
+							"SDIO CMD")) {
+		dev_err(dev, "Failed to take SDIO "
+			"CMD line for reset\n");
+		return false;
+	}
+
+	mci_bitbang_cmd_byte(dev, host, 0xff);
+	mci_bitbang_cmd_byte(dev, host, 0xff);
+	mci_bitbang_cmd_byte(dev, host, 0xff);
+	mci_bitbang_cmd_byte(dev, host, 0xff);
+	mci_bitbang_cmd_byte(dev, host, 0xff);
+	mci_bitbang_cmd_byte(dev, host, 0xff);
+	mci_bitbang_cmd_byte(dev, host, 0xff);
+	mci_bitbang_cmd_byte(dev, host, 0xff);
+	mci_bitbang_cmd_byte(dev, host, 0xff);
+	mci_bitbang_cmd_byte(dev, host, 0xff);
+	mci_bitbang_cmd_byte(dev, host, 0x40);
+	mci_bitbang_cmd_byte(dev, host, 0x00);
+	mci_bitbang_cmd_byte(dev, host, 0x00);
+	mci_bitbang_cmd_byte(dev, host, 0x00);
+	mci_bitbang_cmd_byte(dev, host, 0x00);
+	mci_bitbang_cmd_byte(dev, host, 0x95);
+	mci_bitbang_clk(dev, host);
+
+	gpio_free(host->pdata->cmd_pin);
+	gpio_free(host->pdata->clk_pin);
+
+	if (mci_wait_reset(dev, host))
+		locked = mci_locked(dev, host);
+	if (locked)
+		dev_err(dev, "Failed to unlock, STATUS = 0x%08x",
+					mci_readl(host, STATUS));
+	else
+		dev_info(dev, "Successfully unlocked, STATUS = 0x%08x",
+					mci_readl(host, STATUS));
+
+	return !locked;
+}
+
+/* Clock SDIO CLK until the block resets cleanly */
+static bool mci_unlock(struct device *dev, struct dw_mci *host)
+{
+	bool locked = true;
+	unsigned int tries = 0xffff;
+
+	dev_info(dev, "STATUS = 0x%08x, unlocking by clocking",
+				mci_readl(host, STATUS));
+
+	if (gpio_request_one(host->pdata->clk_pin, GPIOF_OUT_INIT_LOW,
+							"SDIO CLK")) {
+		dev_err(dev, "Failed to take SDIO "
+			"CLK line for reset\n");
+		return false;
+	}
+
+	while (tries-- && locked) {
+		gpio_set_value(host->pdata->clk_pin, 1);
+		udelay(1);
+		gpio_set_value(host->pdata->clk_pin, 0);
+		udelay(1);
+
+		if (mci_wait_reset(dev, host))
+			locked = mci_locked(dev, host);
+		else
+			break;
+	}
+
+	gpio_free(host->pdata->clk_pin);
+
+	if (locked)
+		dev_err(dev, "Failed to unlock, STATUS = 0x%08x",
+					mci_readl(host, STATUS));
+	else
+		dev_info(dev, "Successfully unlocked, STATUS = 0x%08x",
+					mci_readl(host, STATUS));
+
+	return !locked;
+}
+
+/* Reset the block, unlocking it if we can. */
+static bool mci_safe_reset(struct device *dev, struct dw_mci *host)
+{
+	if (!mci_wait_reset(dev, host))
+		return false;
+	if (mci_locked(dev, host)) {
+		if ((host->quirks & DW_MCI_QUIRK_BIT_BANG) &&
+				mci_bitbang_reset(dev, host))
+			return true;
+		if (host->quirks & DW_MCI_QUIRK_GPIO_UNLOCK)
+			return mci_unlock(dev, host);
+		else
+			return false;
+	}
+
+	return true;
+}
+
 #ifdef CONFIG_OF
 static struct dw_mci_of_quirks {
 	char *quirk;
@@ -2268,7 +2420,7 @@
 	}
 
 	/* Reset all blocks */
-	if (!mci_wait_reset(host->dev, host))
+	if (!mci_safe_reset(host->dev, host))
 		return -ENODEV;
 
 	host->dma_ops = host->pdata->dma_ops;
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 0b74189..c74ba17 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -90,6 +90,8 @@
 #define SDMMC_CTYPE_8BIT		BIT(16)
 #define SDMMC_CTYPE_4BIT		BIT(0)
 #define SDMMC_CTYPE_1BIT		0
+/* Status register defines */
+#define SDMMC_STATUS_DATA_BUSY		BIT(9)
 /* Interrupt status & mask register defines */
 #define SDMMC_INT_SDIO(n)		BIT(16 + (n))
 #define SDMMC_INT_EBE			BIT(15)
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index bb4c167..917d207 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -6,7 +6,7 @@
 	bool "SMC (SMSC)/Western Digital devices"
 	default y
 	depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \
-		BLACKFIN || MN10300 || COLDFIRE || PCI || PCMCIA
+		BLACKFIN || MN10300 || COLDFIRE || PCI || PCMCIA || METAG
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
@@ -97,7 +97,7 @@
 
 config SMSC911X
 	tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
-	depends on (ARM || SUPERH || BLACKFIN || MIPS || MN10300)
+	depends on (ARM || SUPERH || BLACKFIN || METAG || MIPS || MN10300)
 	select CRC32
 	select NET_CORE
 	select MII
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 03e8a15d..2a753a6 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -463,6 +463,10 @@
 		/* FIXME schedule work to clear the halt */
 		netif_warn(pegasus, rx_err, net, "no rx stall recovery\n");
 		return;
+	case -EOVERFLOW:
+	case -EPROTO:
+	case -EILSEQ:
+		pegasus->stats.rx_errors++;
 	case -ENOENT:
 	case -ECONNRESET:
 	case -ESHUTDOWN:
@@ -632,6 +636,7 @@
 	switch (status) {
 	case 0:
 		break;
+	case -EOVERFLOW:
 	case -ECONNRESET:	/* unlink */
 	case -ENOENT:
 	case -ESHUTDOWN:
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index f8f0156..3e3e985 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -278,6 +278,7 @@
 source "drivers/net/wireless/rt2x00/Kconfig"
 source "drivers/net/wireless/rtlwifi/Kconfig"
 source "drivers/net/wireless/ti/Kconfig"
+source "drivers/net/wireless/uccp310wlan/Kconfig"
 source "drivers/net/wireless/zd1211rw/Kconfig"
 source "drivers/net/wireless/mwifiex/Kconfig"
 
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 67156ef..6a24c1a 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -57,3 +57,5 @@
 
 obj-$(CONFIG_BRCMFMAC)	+= brcm80211/
 obj-$(CONFIG_BRCMSMAC)	+= brcm80211/
+
+obj-$(CONFIG_UCCP310WLAN)	+= uccp310wlan/
diff --git a/drivers/net/wireless/uccp310wlan/80211_if.c b/drivers/net/wireless/uccp310wlan/80211_if.c
new file mode 100644
index 0000000..e68ef8d
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/80211_if.c
@@ -0,0 +1,1080 @@
+/*HEADER**********************************************************************
+******************************************************************************
+***
+*** Copyright (c) 2011, 2012, Imagination Technologies Ltd.
+***
+*** This program is free software; you can redistribute it and/or
+*** modify it under the terms of the GNU General Public License
+*** as published by the Free Software Foundation; either version 2
+*** of the License, or (at your option) any later version.
+***
+*** This program is distributed in the hope that it will be useful,
+*** but WITHOUT ANY WARRANTY; without even the implied warranty of
+*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*** GNU General Public License for more details.
+***
+*** You should have received a copy of the GNU General Public License
+*** along with this program; if not, write to the Free Software
+*** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+*** USA.
+***
+*** File Name  : 80211_if.c
+***
+*** File Description:
+*** This file is the glue layer between net/mac80211 and UMAC
+***
+******************************************************************************
+*END**************************************************************************/
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/proc_fs.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+#include <net/ieee80211_radiotap.h>
+#include <../net/mac80211/ieee80211_i.h>
+
+#include "version.h"
+#include "umac.h"
+#include "utils.h"
+
+#ifdef CONFIG_80211IF_DEBUG
+#define _80211IF_DEBUG(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+#else
+#define _80211IF_DEBUG(...) do { } while (0)
+#endif
+
+static char   *mac_addr = DEFAULT_MAC_ADDRESS;
+
+/* Its value will be the default mac address and it can only be updated with the
+ * command line arguments
+ */
+module_param(mac_addr, charp, 0000);
+
+#define CHAN2G(_freq, _idx)  {		\
+	.band = IEEE80211_BAND_2GHZ,	\
+	.center_freq = (_freq),		\
+	.hw_value = (_idx),		\
+	.max_power = 20,		\
+}
+
+#define CHAN5G(_freq, _idx) {		\
+	.band = IEEE80211_BAND_5GHZ,	\
+	.center_freq = (_freq),		\
+	.hw_value = (_idx),		\
+	.max_power = 20,		\
+}
+
+struct wifi_dev {
+	struct proc_dir_entry *umac_proc_dir_entry;
+	struct wifi_params params;
+	struct wifi_stats  stats;
+	struct ieee80211_hw *hw;
+};
+
+static struct wifi_dev *wifi;
+
+static struct ieee80211_channel dsss_chantable[] = {
+	CHAN2G(2412, 0), /* Channel 1 */
+	CHAN2G(2417, 1), /* Channel 2 */
+	CHAN2G(2422, 2), /* Channel 3 */
+	CHAN2G(2427, 3), /* Channel 4 */
+	CHAN2G(2432, 4), /* Channel 5 */
+	CHAN2G(2437, 5), /* Channel 6 */
+	CHAN2G(2442, 6), /* Channel 7 */
+	CHAN2G(2447, 7), /* Channel 8 */
+	CHAN2G(2452, 8), /* Channel 9 */
+	CHAN2G(2457, 9), /* Channel 10 */
+	CHAN2G(2462, 10), /* Channel 11 */
+	CHAN2G(2467, 11), /* Channel 12 */
+	CHAN2G(2472, 12), /* Channel 13 */
+	CHAN2G(2484, 13), /* Channel 14 */
+};
+
+static struct ieee80211_channel ofdm_chantable[] = {
+	CHAN5G(5180, 14), /* Channel 36 */
+	CHAN5G(5200, 15), /* Channel 40 */
+	CHAN5G(5220, 16), /* Channel 44 */
+	CHAN5G(5240, 17), /* Channel 48 */
+	CHAN5G(5260, 18), /* Channel 52 */
+	CHAN5G(5280, 19), /* Channel 56 */
+	CHAN5G(5300, 20), /* Channel 60 */
+	CHAN5G(5320, 21), /* Channel 64 */
+	CHAN5G(5745, 33), /* Channel 149 */
+	CHAN5G(5765, 34), /* Channel 153 */
+	CHAN5G(5785, 35), /* Channel 157 */
+	CHAN5G(5805, 36), /* Channel 161 */
+};
+
+static struct ieee80211_rate dsss_rates[] = {
+	{ .bitrate = 10, .hw_value = 2 },
+	{ .bitrate = 20, .hw_value = 4, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 55, .hw_value = 11, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 110, .hw_value = 22, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 60 , .hw_value = 12},
+	{ .bitrate = 90 , .hw_value = 18},
+	{ .bitrate = 120 , .hw_value = 24},
+	{ .bitrate = 180 , .hw_value = 36},
+	{ .bitrate = 240 , .hw_value = 48},
+	{ .bitrate = 360 , .hw_value = 72},
+	{ .bitrate = 480 , .hw_value = 96},
+	{ .bitrate = 540 , .hw_value = 108}
+};
+
+static struct ieee80211_rate ofdm_rates[] = {
+	{ .bitrate = 60 , .hw_value = 12},
+	{ .bitrate = 90 , .hw_value = 18},
+	{ .bitrate = 120 , .hw_value = 24},
+	{ .bitrate = 180 , .hw_value = 36},
+	{ .bitrate = 240 , .hw_value = 48},
+	{ .bitrate = 360 , .hw_value = 72},
+	{ .bitrate = 480 , .hw_value = 96},
+	{ .bitrate = 540 , .hw_value = 108}
+};
+
+static struct ieee80211_supported_band band_2ghz = {
+	.channels = dsss_chantable,
+	.n_channels = ARRAY_SIZE(dsss_chantable),
+	.band = IEEE80211_BAND_2GHZ,
+	.bitrates = dsss_rates,
+	.n_bitrates = ARRAY_SIZE(dsss_rates),
+};
+
+static struct ieee80211_supported_band band_5ghz = {
+	.channels = ofdm_chantable,
+	.n_channels = ARRAY_SIZE(ofdm_chantable),
+	.band = IEEE80211_BAND_5GHZ,
+	.bitrates = ofdm_rates,
+	.n_bitrates = ARRAY_SIZE(ofdm_rates),
+};
+
+static int conv_str_to_byte(unsigned char *byte,
+		unsigned char *str,
+		int len)
+{
+	int  i, j = 0;
+	unsigned char  ch, val = 0;
+
+	for (i = 0; i < (len * 2); i++) {
+		/*convert to lower*/
+		ch = ((str[i] >= 'A' && str[i] <= 'Z') ? str[i] + 32 : str[i]);
+		if ((ch < '0' || ch > '9') && (ch < 'a' || ch > 'f'))
+			return -1;
+		if (ch >= '0' && ch <= '9')  /*check is digit*/
+			ch = ch - '0';
+		else
+			ch = ch - 'a' + 10;
+		val += ch;
+		if (!(i%2))
+			val <<= 4;
+		else {
+			byte[j] = val;
+			j++;
+			val = 0;
+		}
+	}
+	return 0;
+}
+
+static unsigned char get_ps_info(unsigned char *ie_data,
+		int ie_len)
+{
+	unsigned char *pos, *end;
+	unsigned char val = 0;
+	unsigned char wmm_oui[4] = { 0x00, 0x50, 0xF2, 0x02 };
+
+	pos = ie_data;
+	if (pos == NULL)
+		return val;
+
+	end = pos + ie_len;
+
+	while ((pos + 1) < end) {
+		if ((pos + 2 + pos[1]) > end)
+			break;
+		if ((*pos == 221) && (memcmp(pos+2, wmm_oui, 4) == 0)) {
+			pos += 8;
+			val = 0x0F & *pos;
+			break;
+		}
+		pos += 2 + pos[1];
+	}
+	return val;
+}
+
+static void tx(struct ieee80211_hw *hw,
+		struct ieee80211_tx_control *control,
+		struct sk_buff *skb)
+{
+	struct mac80211_dev *dev = hw->priv;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+	unsigned char ps_info;
+	struct umac_vif *uvif;
+
+	if (tx_info->control.vif == NULL) {
+		printk(KERN_DEBUG "%s: Dropping injected TX frame\n", dev->name);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	uvif = (struct umac_vif *)(tx_info->control.vif->drv_priv);
+
+	if (wifi->params.production_test) {
+		if (((hdr->frame_control & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) || (tx_info->control.vif == NULL)) {
+			tx_info->flags |= IEEE80211_TX_STAT_ACK;
+			tx_info->status.rates[0].count = 1;
+			ieee80211_tx_status(hw, skb);
+			return;
+		}
+	}
+
+	if ((dev->power_save == PWRSAVE_STATE_DOZE) &&
+			((hdr->frame_control & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA))
+		hdr->frame_control |= IEEE80211_FCTL_PM;
+
+	if ((hdr->frame_control & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
+		if ((hdr->frame_control & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ) {
+			ps_info = get_ps_info((unsigned char *)(skb->data + 28), (skb->len - 28));
+			/* program the power save information */
+			uccp310wlan_prog_vif_powersave_mode(uvif->vif_index, uvif->vif->addr, ps_info);
+		}
+		if ((hdr->frame_control & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) {
+			ps_info = get_ps_info((unsigned char *)(skb->data + 34), (skb->len - 34));
+			/* program the power save information */
+			uccp310wlan_prog_vif_powersave_mode(uvif->vif_index, uvif->vif->addr, ps_info);
+		}
+	}
+
+	if (uvif->noa_active) {
+		uccp310wlan_noa_event(CMD_TX, (void *)uvif->vif_index, dev, skb);
+		return;
+	}
+
+	uccp310wlan_tx_frame(skb, dev, false);
+}
+
+static int start(struct ieee80211_hw *hw)
+{
+	struct mac80211_dev *dev = (struct mac80211_dev *)hw->priv;
+	_80211IF_DEBUG("%s-80211IF: In start\n", dev->name);
+
+	mutex_lock(&dev->mutex);
+	if ((uccp310wlan_core_init(dev)) < 0) {
+		_80211IF_DEBUG("%s-80211IF: umac init failed\n", dev->name);
+		mutex_unlock(&dev->mutex);
+		return -ENODEV;
+	}
+
+	dev->state = STARTED;
+	mutex_unlock(&dev->mutex);
+
+	return 0;
+}
+
+static void stop(struct ieee80211_hw *hw)
+{
+	struct mac80211_dev    *dev = (struct mac80211_dev *)hw->priv;
+
+	_80211IF_DEBUG("%s-80211IF:In stop\n", dev->name);
+
+	mutex_lock(&dev->mutex);
+	uccp310wlan_core_deinit(dev);
+	dev->state = STOPPED;
+	mutex_unlock(&dev->mutex);
+}
+
+static int add_interface(struct ieee80211_hw *hw,
+		struct ieee80211_vif *vif)
+{
+	struct mac80211_dev    *dev = hw->priv;
+	struct ieee80211_vif *v;
+	struct umac_vif   *uvif;
+	int vif_index, iftype;
+
+	iftype = vif->type;
+	v = vif;
+	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+	if (!(iftype == NL80211_IFTYPE_STATION ||
+				iftype == NL80211_IFTYPE_ADHOC ||
+				iftype == NL80211_IFTYPE_AP)) {
+		printk(KERN_ERR "Invalid Interfacetype\n");
+		return -ENOTSUPP;
+	}
+
+	mutex_lock(&dev->mutex);
+
+	if (wifi->params.production_test) {
+		if (dev->active_vifs || iftype != NL80211_IFTYPE_ADHOC) {
+			mutex_unlock(&dev->mutex);
+			return -EBUSY;
+		}
+	}
+	for (vif_index = 0; vif_index < wifi->params.num_vifs; vif_index++)
+		if (dev->if_mac_addresses[vif_index].addr[5] == vif->addr[5])
+			break;
+	uvif = (struct umac_vif *)&v->drv_priv;
+	uvif->vif_index = vif_index;
+	uvif->vif = v;
+	uvif->dev = dev;
+	uccp310wlan_vif_add(uvif);
+	dev->active_vifs |= (1 << vif_index);
+
+	rcu_assign_pointer(dev->vifs[vif_index], v);
+	synchronize_rcu();
+
+	mutex_unlock(&dev->mutex);
+
+	return 0;
+}
+
+static void remove_interface(struct ieee80211_hw *hw,
+		struct ieee80211_vif *vif)
+{
+	struct mac80211_dev    *dev = hw->priv;
+	struct ieee80211_vif *v;
+	int vif_index;
+
+	v = vif;
+	vif_index = ((struct umac_vif *)&v->drv_priv)->vif_index;
+
+	mutex_lock(&dev->mutex);
+
+	uccp310wlan_vif_remove((struct umac_vif *)&v->drv_priv);
+	dev->active_vifs &= ~(1 << vif_index);
+	rcu_assign_pointer(dev->vifs[vif_index], NULL);
+	synchronize_rcu();
+
+	mutex_unlock(&dev->mutex);
+
+}
+
+
+static int config(struct ieee80211_hw *hw,
+		unsigned int changed)
+{
+	struct mac80211_dev    *dev = hw->priv;
+	struct ieee80211_conf *conf = &hw->conf;
+	unsigned int chnl;
+
+	_80211IF_DEBUG("%s-80211IF:In config\n", dev->name);
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_POWER) {
+		dev->txpower = conf->power_level;
+		uccp310wlan_prog_txpower(dev->txpower);
+	}
+
+	/*Check for change in Channel*/
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		chnl = ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
+		_80211IF_DEBUG("%s-80211IF:Set Channel to %d\n", dev->name, chnl);
+		uccp310wlan_prog_channel(chnl);
+	}
+
+	/*Check for change in Power save state*/
+	if (changed & IEEE80211_CONF_CHANGE_PS) {
+		int i;
+		for (i = 0; i < MAX_VIFS; i++)
+			if (dev->active_vifs & (1 << i))
+				break;
+
+		if (conf->flags & IEEE80211_CONF_PS)
+			dev->power_save = PWRSAVE_STATE_DOZE;
+		else
+			dev->power_save = PWRSAVE_STATE_AWAKE;
+		_80211IF_DEBUG("%s-80211IF:Power save state of VIF %d changed to %d\n", dev->name, i, dev->power_save);
+		uccp310wlan_prog_powersave_state(i, dev->if_mac_addresses[i].addr, dev->power_save);
+	}
+
+
+	/*Check for change in Listen Interval*/
+	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
+		/* TODO */
+	;
+	}
+
+	/*Check for change in Retry Limits*/
+	if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+		int i;
+		_80211IF_DEBUG("%s-80211IF:Retry Limits changed to %d and %d\n", dev->name, conf->short_frame_max_tx_count, conf->long_frame_max_tx_count);
+		for (i = 0; i < MAX_VIFS; i++) {
+			if (dev->active_vifs & (1 << i)) {
+				uccp310wlan_prog_vif_short_retry(i, dev->if_mac_addresses[i].addr, conf->short_frame_max_tx_count);
+				uccp310wlan_prog_vif_long_retry(i, dev->if_mac_addresses[i].addr, conf->long_frame_max_tx_count);
+			}
+		}
+	}
+
+	/* Production test hack */
+	if (wifi->params.production_test) {
+		struct ieee80211_sub_if_data *sdata;
+		if (dev->vifs[0]) {
+			sdata = vif_to_sdata(dev->vifs[0]);
+			sdata->u.ibss.fixed_channel = 1;
+			sdata->u.ibss.last_scan_completed = jiffies + HZ;
+			sdata->u.ibss.ibss_join_req = jiffies - (10*HZ);
+		}
+	}
+
+	mutex_unlock(&dev->mutex);
+	return 0;
+}
+
+static u64 prepare_multicast(struct ieee80211_hw *hw,
+		struct netdev_hw_addr_list *mc_list)
+{
+	struct mac80211_dev      *dev = hw->priv;
+	int                  i;
+	struct               netdev_hw_addr *ha;
+	int                  mc_count = 0;
+
+	if (dev->state != STARTED)
+		return 0;
+
+	netdev_hw_addr_list_for_each(ha, mc_list) {
+		if (++mc_count > MAX_MCAST_FILTERS) {
+			mc_count = 0;
+			_80211IF_DEBUG("%s-80211IF: Multicast filter count : %d\n", dev->name, mc_count);
+			goto out;
+		}
+	}
+	_80211IF_DEBUG("%s-80211IF: Multicast filter count : %d\n", dev->name, mc_count);
+
+	if (dev->mc_filter_count > 0) {
+		/* Remove all previous multicast addresses from the LMAC */
+		for (i = 0; i < dev->mc_filter_count; i++)
+			uccp310wlan_prog_mcast_addr_cfg(dev->mc_filters[i], 1);
+	}
+
+	i = 0;
+	netdev_hw_addr_list_for_each(ha, mc_list)
+	{
+		/* Prog the multicast address into the LMAC */
+		uccp310wlan_prog_mcast_addr_cfg(ha->addr, 0);
+		memcpy(dev->mc_filters[i], ha->addr, 6);
+		i++;
+	}
+	dev->mc_filter_count = mc_count;
+out:
+	return mc_count;
+}
+
+static void configure_filter(struct           ieee80211_hw *hw,
+		unsigned int     changed_flags,
+		unsigned int     *new_flags,
+		u64              mc_count)
+{
+	struct mac80211_dev *dev = hw->priv;
+	mutex_lock(&dev->mutex);
+
+	changed_flags &= SUPPORTED_FILTERS;
+	*new_flags &= SUPPORTED_FILTERS;
+
+	if (dev->state != STARTED) {
+		mutex_unlock(&dev->mutex);
+		return;
+	}
+
+	if ((*new_flags & FIF_ALLMULTI) || (mc_count == 0)) {
+		/* Disable the multicast filter in LMAC */
+		_80211IF_DEBUG("%s-80211IF: Multicast filters disabled\n", dev->name);
+		uccp310wlan_prog_mcast_filter_control(0);
+	} else if (mc_count) {
+		/* Enable the multicast filter in LMAC */
+		_80211IF_DEBUG("%s-80211IF: Multicast filters enabled\n", dev->name);
+		uccp310wlan_prog_mcast_filter_control(1);
+	}
+
+	if (changed_flags == 0)
+		/* no filters which we support changed */
+		goto out;
+
+	if (wifi->params.production_test == 0) {
+		if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
+			/* receive all beacons and probe responses */
+			_80211IF_DEBUG("%s-80211IF: RCV ALL bcns\n", dev->name);
+			uccp310wlan_prog_rcv_bcn_mode(RCV_ALL_BCNS);
+		} else {
+			/* receive only network beacons and probe responses */
+			_80211IF_DEBUG("%s-80211IF: RCV NW bcns\n", dev->name);
+			uccp310wlan_prog_rcv_bcn_mode(RCV_NETWORK_BCNS);
+		}
+	}
+out:
+	if (wifi->params.production_test == 1) {
+		_80211IF_DEBUG("%s-80211IF: RCV ALL bcns\n", dev->name);
+		uccp310wlan_prog_rcv_bcn_mode(RCV_ALL_BCNS);
+	}
+	mutex_unlock(&dev->mutex);
+	return;
+}
+
+static int conf_vif_tx(struct ieee80211_hw  *hw,
+		struct ieee80211_vif    *vif,
+		unsigned short  queue, const struct  ieee80211_tx_queue_params *params)
+{
+	struct mac80211_dev *dev = hw->priv;
+	int vif_index, vif_active;
+
+	for (vif_index = 0; vif_index < wifi->params.num_vifs; vif_index++)
+		if (dev->if_mac_addresses[vif_index].addr[5] == vif->addr[5])
+			break;
+
+	vif_active = 0;
+	if ((dev->active_vifs & (1 << vif_index)))
+		vif_active = 1;
+	mutex_lock(&dev->mutex);
+	uccp310wlan_vif_set_edca_params(queue, (struct umac_vif *)&vif->drv_priv, params, vif_active);
+	mutex_unlock(&dev->mutex);
+	return 0;
+}
+
+static int set_key(struct ieee80211_hw *hw,
+		enum set_key_cmd cmd,
+		struct ieee80211_vif *vif,
+		struct ieee80211_sta *sta,
+		struct ieee80211_key_conf *key_conf)
+{
+
+	struct umac_key sec_key;
+	unsigned int result = 0;
+	struct mac80211_dev  *dev = hw->priv;
+	unsigned int cipher_type, key_type;
+	int vif_index;
+	struct umac_vif *uvif;
+	uvif = ((struct umac_vif *)&vif->drv_priv);
+
+	memset(&sec_key, 0, sizeof(struct umac_key));
+	switch (key_conf->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		sec_key.key = key_conf->key;
+		cipher_type = CIPHER_TYPE_WEP40 ;
+		break;
+	case WLAN_CIPHER_SUITE_WEP104:
+		sec_key.key = key_conf->key;
+		cipher_type = CIPHER_TYPE_WEP104 ;
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+		/*
+		 * We get the key in the following form:
+		 * KEY (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
+		 */
+		sec_key.key = key_conf->key;
+		sec_key.tx_mic = key_conf->key + 16;
+		sec_key.rx_mic = key_conf->key + 24;
+		cipher_type = CIPHER_TYPE_TKIP ;
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		sec_key.key = key_conf->key;
+		cipher_type = CIPHER_TYPE_CCMP;
+		break;
+	default:
+		result = -EOPNOTSUPP;
+		mutex_unlock(&dev->mutex);
+		goto out;
+	}
+
+	vif_index = ((struct umac_vif *)&vif->drv_priv)->vif_index;
+	mutex_lock(&dev->mutex);
+	if (cmd == SET_KEY) {
+		key_conf->hw_key_idx = 0; /* Don't really use this */
+
+		/* This flag indicate that it requires IV generation */
+		key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+
+		if (cipher_type == CIPHER_TYPE_WEP40 || cipher_type == CIPHER_TYPE_WEP104) {
+			_80211IF_DEBUG("%s-80211IF: ADD IF KEY. vif_index = %d, keyidx = %d, cipher_type = %d\n", dev->name, vif_index, key_conf->keyidx, cipher_type);
+			uccp310wlan_prog_if_key(vif_index, vif->addr, KEY_CTRL_ADD, key_conf->keyidx, cipher_type, &sec_key);
+		} else {
+			if (sta) {
+				sec_key.peer_mac = sta->addr;
+				if (key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+					key_type = KEY_TYPE_UCAST;
+				else
+					key_type = KEY_TYPE_BCAST;
+				_80211IF_DEBUG("%s-80211IF: ADD PEER KEY. vif_index = %d, keyidx = %d, keytype = %d, cipher_type = %d\n", dev->name, vif_index, key_conf->keyidx, key_type, cipher_type);
+				uccp310wlan_prog_peer_key(vif_index, vif->addr, KEY_CTRL_ADD, key_conf->keyidx, key_type, cipher_type, &sec_key);
+			} else {
+				key_type = KEY_TYPE_BCAST;
+				if (vif->type == NL80211_IFTYPE_STATION) {
+					sec_key.peer_mac = (vif_to_sdata(vif))->u.mgd.bssid;
+					memcpy(uvif->bssid, (vif_to_sdata(vif)->u.mgd.bssid), ETH_ALEN);
+					_80211IF_DEBUG("%s-80211IF: ADD PEER KEY. vif_index = %d, keyidx = %d, keytype = %d, cipher_type = %d\n", dev->name, vif_index, key_conf->keyidx, key_type, cipher_type);
+					uccp310wlan_prog_peer_key(vif_index, vif->addr, KEY_CTRL_ADD, key_conf->keyidx, key_type, cipher_type, &sec_key);
+				} else if (vif->type == NL80211_IFTYPE_AP) {
+					_80211IF_DEBUG("%s-80211IF: ADD IF KEY. vif_index = %d, keyidx = %d, cipher_type = %d\n", dev->name, vif_index, key_conf->keyidx, cipher_type);
+					uccp310wlan_prog_if_key(vif_index, vif->addr, KEY_CTRL_ADD, key_conf->keyidx, cipher_type, &sec_key);
+				} else {
+					_80211IF_DEBUG("%s-80211IF: ADD IF KEY. vif_index = %d, keyidx = %d, cipher_type = %d\n", dev->name, vif_index, key_conf->keyidx, cipher_type);
+					uccp310wlan_prog_if_key(vif_index, vif->addr, KEY_CTRL_ADD, key_conf->keyidx, cipher_type, &sec_key);
+				}
+			}
+		}
+	} else if (cmd == DISABLE_KEY) {
+		if ((cipher_type == CIPHER_TYPE_WEP40) || (cipher_type == CIPHER_TYPE_WEP104)) {
+			uccp310wlan_prog_if_key(vif_index, vif->addr, KEY_CTRL_DEL, key_conf->keyidx, cipher_type, &sec_key);
+		} else if (sta) {
+			sec_key.peer_mac = sta->addr;
+			if (key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+				key_type = KEY_TYPE_UCAST;
+			else
+				key_type = KEY_TYPE_BCAST;
+			uccp310wlan_prog_peer_key(vif_index, vif->addr, KEY_CTRL_DEL, key_conf->keyidx, key_type, cipher_type, &sec_key);
+		} else {
+			if (vif->type == NL80211_IFTYPE_STATION) {
+				sec_key.peer_mac = uvif->bssid;
+				uccp310wlan_prog_peer_key(vif_index, vif->addr, KEY_CTRL_DEL, key_conf->keyidx, KEY_TYPE_BCAST, cipher_type, &sec_key);
+			} else if (vif->type == NL80211_IFTYPE_AP) {
+				uccp310wlan_prog_if_key(vif_index, vif->addr, KEY_CTRL_DEL, key_conf->keyidx, cipher_type, &sec_key);
+			} else {
+				uccp310wlan_prog_if_key(vif_index, vif->addr, KEY_CTRL_DEL, key_conf->keyidx, cipher_type, &sec_key);
+			}
+		}
+	}
+	mutex_unlock(&dev->mutex);
+
+out:
+	return result;
+}
+
+
+static int get_stats(struct ieee80211_hw *hw,
+		struct ieee80211_low_level_stats *stats)
+{
+
+	/*TODO */
+	return 0;
+}
+
+static void bss_info_changed(struct ieee80211_hw *hw,
+		struct ieee80211_vif *vif,
+		struct ieee80211_bss_conf *bss_conf,
+		unsigned int changed)
+{
+	struct mac80211_dev   *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+	if (wifi->params.production_test) {
+		/* Prevent IBSS merge scan in production test mode */
+		struct ieee80211_sub_if_data *sdata;
+		sdata = vif_to_sdata(vif);
+		sdata->u.ibss.fixed_channel = 1;
+		mutex_unlock(&dev->mutex);
+		return;
+	}
+
+	uccp310wlan_vif_bss_info_changed((struct umac_vif *)&vif->drv_priv, bss_conf, changed);
+	mutex_unlock(&dev->mutex);
+	return;
+}
+
+static void sw_scan_start(struct ieee80211_hw *hw)
+{
+	_80211IF_DEBUG("%s-80211IF: scan started\n", ((struct mac80211_dev *)(hw->priv))->name);
+
+	/*
+	 * TODO::
+	 */
+
+}
+
+static void sw_scan_complete(struct ieee80211_hw *hw)
+{
+	_80211IF_DEBUG("%s-80211IF: scan stopped\n", ((struct mac80211_dev *)(hw->priv))->name);
+	/*
+	 * TODO::
+	 */
+}
+
+static void init_hw(struct ieee80211_hw    *hw)
+{
+	struct mac80211_dev  *dev = (struct mac80211_dev *)hw->priv;
+	/* Supported Interface Types and other Default values*/
+	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
+
+	hw->flags = IEEE80211_HW_SIGNAL_DBM  |
+		IEEE80211_HW_SUPPORTS_PS ; /* umac */
+	hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
+	hw->flags |= IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+	hw->flags |= IEEE80211_HW_SUPPORTS_PER_STA_GTK;
+
+	hw->max_listen_interval = 10; /* umac */
+	hw->max_rates = 4; /* umac */
+	hw->max_rate_tries = 5; /* umac */
+	hw->channel_change_time = 5000; /* umac */
+	hw->queues = 4; /* umac */
+
+	/*size */
+	hw->extra_tx_headroom = 0; /* umac */
+	hw->vif_data_size = sizeof(struct umac_vif);
+	hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2ghz;
+	if (wifi->params.dot11a_support)
+		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5ghz;
+
+	hw->rate_control_algorithm = NULL;
+	memset(hw->wiphy->addr_mask, 0, sizeof(hw->wiphy->addr_mask));
+	if (wifi->params.num_vifs == 1) {
+		hw->wiphy->addresses = NULL;
+		SET_IEEE80211_PERM_ADDR(hw, dev->if_mac_addresses[0].addr);
+	} else {
+		hw->wiphy->n_addresses = wifi->params.num_vifs;
+		hw->wiphy->addresses = dev->if_mac_addresses;
+	}
+	hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+}
+static struct ieee80211_ops ops = {
+	.tx                 = tx,
+	.start              = start,
+	.stop               = stop,
+	.add_interface      = add_interface,
+	.remove_interface   = remove_interface,
+	.config             = config,
+	.prepare_multicast  = prepare_multicast,
+	.configure_filter   = configure_filter,
+	.sw_scan_start      = sw_scan_start,
+	.sw_scan_complete   = sw_scan_complete,
+	.get_stats          = get_stats,
+	.sta_notify         = NULL,
+	.conf_tx            = conf_vif_tx,
+	.bss_info_changed   = bss_info_changed,
+	.set_tim            = NULL,
+	.set_key            = set_key,
+	.hw_scan            = NULL,
+	.get_tkip_seq       = NULL,
+	.set_rts_threshold  = NULL,
+	.tx_last_beacon     = NULL,
+	.ampdu_action       = NULL,
+};
+
+static void uccp310wlan_exit(void)
+{
+	ieee80211_unregister_hw(wifi->hw);
+	ieee80211_free_hw(wifi->hw);
+	wifi->hw = NULL;
+}
+
+static int uccp310wlan_init(void)
+{
+	struct ieee80211_hw *hw;
+	int error;
+	struct mac80211_dev  *dev;
+	int i;
+	unsigned char addr[ETH_ALEN];
+
+	/*Allocate new hardware device*/
+	hw = ieee80211_alloc_hw(sizeof(struct mac80211_dev), &ops);
+	if (hw == NULL) {
+		printk(KERN_ERR "Failed to allocate memory for ieee80211_hw\n");
+		error = -ENOMEM;
+		goto out;
+	}
+	dev = (struct mac80211_dev *)hw->priv;
+
+	/*TODO : Set dev->dev */
+
+	conv_str_to_byte(addr, mac_addr, ETH_ALEN);
+	printk(KERN_INFO "MAC ADDR: %pM\n", addr);
+	SET_IEEE80211_DEV(hw, dev->dev);
+
+	mutex_init(&dev->mutex);
+	spin_lock_init(&dev->bcast_lock);
+	dev->state = STOPPED;
+	dev->active_vifs = 0;
+	dev->txpower = DEFAULT_TX_POWER;
+	strcpy(dev->name, "UCCP310WIFI");
+	/* TODO : dev->wlan.default_keyid = -1;*/
+
+	for (i = 0; i < wifi->params.num_vifs; i++) {
+		memcpy(dev->if_mac_addresses[i].addr, addr, ETH_ALEN);
+		addr[5]++;
+	}
+	/* Initialize HW parameters */
+	init_hw(hw);
+
+	/*Register hardware*/
+	error = ieee80211_register_hw(hw);
+
+	/* Production test hack: Set all channel flags to 0 to allow IBSS creation
+	   in all channels */
+	if (wifi->params.production_test && !error) {
+		enum ieee80211_band band;
+		struct ieee80211_supported_band *sband;
+		for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+			sband = hw->wiphy->bands[band];
+			if (sband)
+				for (i = 0; i < sband->n_channels; i++)
+					sband->channels[i].flags = 0;
+		}
+	}
+	if (!error) {
+		wifi->hw = hw;
+		dev->hw = hw;
+		dev->params = &wifi->params;
+		dev->stats = &wifi->stats;
+	} else
+		ieee80211_free_hw(hw);
+out:
+	return error;
+
+}
+
+static ssize_t proc_read(struct seq_file *m, void *v)
+{
+	seq_printf(m, "************* PARAMS ***********\n");
+	seq_printf(m, "dot11a_support = %d\n", wifi->params.dot11a_support);
+	seq_printf(m, "sensitivity = %d\n", wifi->params.ed_sensitivity);
+	seq_printf(m, "auto_sensitivity = %d\n", wifi->params.auto_sensitivity);
+	seq_printf(m, "rf_params = "RFPARAMSTR"\n", RFPARAM2STR(wifi->params.rf_params));
+	seq_printf(m, "production_test = %d\n", wifi->params.production_test);
+	seq_printf(m, "show_phy_stats = %d\n", wifi->params.show_phy_stats);
+	seq_printf(m, "num_vifs = %d\n", wifi->params.num_vifs);
+	seq_printf(m, "************* STATS ***********\n");
+	seq_printf(m, "rx_packet_count = %d\n", wifi->stats.rx_packet_count);
+	if (wifi->params.show_phy_stats) {
+		seq_printf(m, "ofdm_rx_crc_success_cnt = %d\n", wifi->stats.ofdm_rx_crc_success_cnt);
+		seq_printf(m, "ofdm_rx_crc_fail_cnt = %d\n", wifi->stats.ofdm_rx_crc_fail_cnt);
+		seq_printf(m, "ofdm_rx_false_trig_cnt = %d\n", wifi->stats.ofdm_rx_false_trig_cnt);
+		seq_printf(m, "ofdm_rx_header_fail_cnt = %d\n", wifi->stats.ofdm_rx_header_fail_cnt);
+		seq_printf(m, "dsss_rx_crc_success_cnt = %d\n", wifi->stats.dsss_rx_crc_success_cnt);
+		seq_printf(m, "dsss_rx_crc_fail_cnt = %d\n", wifi->stats.dsss_rx_crc_fail_cnt);
+		seq_printf(m, "dsss_rx_false_trig_cnt = %d\n", wifi->stats.dsss_rx_false_trig_cnt);
+		seq_printf(m, "dsss_rx_header_fail_cnt = %d\n", wifi->stats.dsss_rx_header_fail_cnt);
+		seq_printf(m, "ed_cnt = %d\n", wifi->stats.ed_cnt);
+		seq_printf(m, "cca_fail_cnt = %d\n", wifi->stats.cca_fail_cnt);
+		seq_printf(m, "pdout_val = %d\n", wifi->stats.pdout_val);
+	}
+	seq_printf(m, "current sensitivity = %d\n", wifi->stats.current_sensitivity);
+	seq_printf(m, "************* VERSION ***********\n");
+	seq_printf(m, "UMAC_VERSION = %s\n", UMAC_VERSION);
+
+	if (wifi->hw && (((struct mac80211_dev *)(wifi->hw->priv))->state != STARTED)) {
+		seq_printf(m, "LMAC_VERSION = %s\n", "UNKNOWN");
+		seq_printf(m, "Firmware version = %s\n", "UNKNOWN");
+	} else {
+		seq_printf(m, "LMAC_VERSION = %s\n", wifi->stats.uccp310_lmac_version);
+		seq_printf(m, "Firmware version= %d.%d\n", (wifi->stats.uccp310_lmac_version[0] - '0'), (wifi->stats.uccp310_lmac_version[2] - '0'));
+	}
+#ifdef CONFIG_TIMING_DEBUG
+	{
+		unsigned long temp[20];
+		unsigned long i, j, prev, curr;
+		seq_printf(m, "************* TIMING DEBUG ***********\n");
+		spin_lock_irqsave(&timing_lock, j);
+		i = irq_ts_index;
+		memcpy(temp, irq_timestamp, 20 * sizeof(unsigned long));
+		spin_unlock_irqrestore(&timing_lock, j);
+		if (i == 0)
+			i = 19;
+		else
+			i = i - 1;
+		for (j = 0; j < 20; j++) {
+			if (j != 0) {
+				curr = temp[i];
+				if (i == 0)
+					prev = temp[19];
+				else
+					prev = temp[i-1];
+
+				if (curr > prev)
+					seq_printf(m, "%d ", (unsigned int)(curr - prev));
+				else
+					seq_printf(m, "%d ", (unsigned int)((0xFFFFFFFF - prev) + curr));
+			}
+
+			if (i == 0)
+				i = 19;
+			else
+				i = i - 1;
+		}
+		seq_printf(m, "\n");
+	}
+#endif
+	return 0;
+}
+
+static ssize_t proc_write(struct file *file, const char __user *buffer,
+			  size_t count, loff_t *ppos)
+{
+	char buf[100];
+	int ret;
+	unsigned long val;
+	long sval;
+
+	if (count > sizeof(buf))
+		count = sizeof(buf)-1;
+
+	if (copy_from_user(buf, buffer, count))
+		return -EFAULT;
+	buf[count] = '\0';
+	if (!strncmp(buf, "dot11a_support=", 15)) {
+		ret = kstrtoul(buf+15, 0, &val);
+		if (((val == 0) || (val == 1)) && (wifi->params.dot11a_support != val)) {
+			wifi->params.dot11a_support = val;
+			if (wifi->hw) {
+				uccp310wlan_exit();
+				wifi->hw = NULL;
+			}
+			printk(KERN_ERR "Re-intializing UMAC ..\n");
+			uccp310wlan_init();
+		} else
+			printk(KERN_ERR "Invalid parameter value.\n");
+	} else if (!strncmp(buf, "sensitivity=", 12)) {
+		ret = kstrtol(buf+12, 0, &sval);
+		if (sval > -51 || sval < -96 || (sval % 3 != 0))
+			printk(KERN_ERR "Invalid parameter value.\n");
+		else
+			wifi->params.ed_sensitivity = sval;
+	} else if (!strncmp(buf, "auto_sensitivity=", 17)) {
+		ret = kstrtoul(buf+17, 0, &val);
+		if ((val == 0) || (val == 1))
+			wifi->params.auto_sensitivity = val;
+		else
+			printk(KERN_ERR "Invalid parameter value.\n");
+	} else if (!strncmp(buf, "production_test=", 16)) {
+		ret = kstrtoul(buf+16, 0, &val);
+		if ((val == 0) || (val == 1)) {
+			if (wifi->params.production_test != val) {
+				if (wifi->params.production_test) {
+					wifi->params.show_phy_stats = 1;
+					wifi->params.num_vifs = 1;
+				}
+				wifi->params.production_test = val;
+				if (wifi->hw) {
+					uccp310wlan_exit();
+					wifi->hw = NULL;
+				}
+				printk(KERN_ERR "Re-intializing UMAC ..\n");
+				uccp310wlan_init();
+			}
+		} else
+			printk(KERN_ERR "Invalid parameter value.\n");
+	} else if (!strncmp(buf, "num_vifs=", 9)) {
+		ret = kstrtoul(buf+9, 0, &val);
+		if (val > 0 && val <= MAX_VIFS) {
+			if (wifi->params.num_vifs != val) {
+				if (wifi->hw) {
+					uccp310wlan_exit();
+					wifi->hw = NULL;
+				}
+				printk(KERN_ERR "Re-intializing UMAC ..\n");
+				wifi->params.num_vifs = val;
+				uccp310wlan_init();
+			}
+		}
+	} else if (!strncmp(buf, "show_phy_stats=", 15)) {
+		ret = kstrtoul(buf+15, 0, &val);
+		if ((val == 0) || (val == 1))
+			wifi->params.show_phy_stats = val;
+		else
+			printk(KERN_ERR "Invalid parameter value.\n");
+	} else if (!strncmp(buf, "rf_params=", 10)) {
+		conv_str_to_byte(wifi->params.rf_params, buf+10, 8);
+	} else if (!strncmp(buf, "rx_packet_count=", 16)) {
+		ret = kstrtoul(buf+16, 0, &val);
+		if (val >= 0)
+			wifi->stats.rx_packet_count = val;
+		else
+			printk(KERN_ERR "Invalid parameter value.\n");
+	} else if (!strncmp(buf, "pdout_val=", 10)) {
+		ret = kstrtoul(buf+10, 0, &val);
+		if (val >= 0)
+			wifi->stats.pdout_val = val;
+		else
+			printk(KERN_ERR "Invalid parameter value.\n");
+	} else if (!strncmp(buf, "get_rx_stats=", 13)) {
+		uccp310wlan_prog_mib_stats();
+	} else {
+		printk(KERN_ERR "Invalid parameter name.\n");
+	}
+	return count;
+}
+
+static int proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_read, NULL);
+}
+
+static const struct file_operations params_fops = {
+	.open = proc_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = proc_write,
+};
+
+static int proc_init(void)
+{
+	struct proc_dir_entry *entry;
+	int err = 0;
+
+	wifi = kzalloc(sizeof(struct wifi_dev), GFP_KERNEL);
+	if (!wifi) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	wifi->umac_proc_dir_entry = proc_mkdir("umac", NULL);
+	if (!wifi->umac_proc_dir_entry) {
+		printk(KERN_ERR "Failed to create proc dir\n");
+		err = -ENOMEM;
+		goto  proc_dir_fail;
+	}
+
+	entry = proc_create("params", 0644, wifi->umac_proc_dir_entry,
+			    &params_fops);
+	if (!entry) {
+		printk(KERN_ERR "Failed to create proc entry\n");
+		err = -ENOMEM;
+		goto  proc_entry_fail;
+	}
+
+	/* Initialize WLAN params */
+	memset(&wifi->params, 0, sizeof(struct wifi_params));
+	memset(wifi->params.rf_params, 0xff, sizeof(wifi->params.rf_params));
+	wifi->params.ed_sensitivity = -84;
+	wifi->params.rf_params[0] = 0x3B;
+	wifi->params.auto_sensitivity = 1;
+	wifi->params.num_vifs = 1;
+
+	return err;
+
+proc_entry_fail:
+	remove_proc_entry("umac", NULL);
+proc_dir_fail:
+	kfree(wifi);
+out:
+	return err;
+
+}
+
+static void proc_exit(void)
+{
+	remove_proc_entry("params", wifi->umac_proc_dir_entry);
+	remove_proc_entry("umac", NULL);
+	kfree(wifi);
+}
+
+
+int _uccp310wlan_80211if_init(void)
+{
+	int error;
+	error = proc_init();
+	if (error)
+		return error;
+	error = uccp310wlan_init();
+
+	return error;
+}
+
+void _uccp310wlan_80211if_exit(void)
+{
+	if (wifi->hw)
+		uccp310wlan_exit();
+	proc_exit();
+}
diff --git a/drivers/net/wireless/uccp310wlan/Kconfig b/drivers/net/wireless/uccp310wlan/Kconfig
new file mode 100644
index 0000000..50b4c0a
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/Kconfig
@@ -0,0 +1,16 @@
+config UCCP310WLAN
+	tristate "ImgTec Universal Communications Core Platform (UCCP310) WLAN"
+	depends on SOC_TZ1090
+	depends on MAC80211
+	help
+	  Select to build the driver supporting ImgTec Universal Communications
+	  Core Platform (UCCP310) WLAN.
+
+	  If you select to build as a module it will be called uccp310wlan.
+
+config UCCP310WLAN_DEBUG
+	bool "Debugging messages"
+	depends on UCCP310WLAN
+	help
+	  Select this option to build the UCCP310 WLAN driver with debugging
+	  messages.
diff --git a/drivers/net/wireless/uccp310wlan/Makefile b/drivers/net/wireless/uccp310wlan/Makefile
new file mode 100644
index 0000000..b0d6012
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_UCCP310WLAN)		+= uccp310wlan.o
+uccp310wlan-objs			:= 80211_if.o core.o hal_hostport.o lmac_if.o tx.o
+
+ccflags-$(CONFIG_UCCP310WLAN_DEBUG)	+= -DDEBUG
diff --git a/drivers/net/wireless/uccp310wlan/core.c b/drivers/net/wireless/uccp310wlan/core.c
new file mode 100644
index 0000000..b7496c5
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/core.c
@@ -0,0 +1,527 @@
+/*HEADER**********************************************************************
+******************************************************************************
+***
+*** Copyright (c) 2011, 2012, Imagination Technologies Ltd.
+***
+*** This program is free software; you can redistribute it and/or
+*** modify it under the terms of the GNU General Public License
+*** as published by the Free Software Foundation; either version 2
+*** of the License, or (at your option) any later version.
+***
+*** This program is distributed in the hope that it will be useful,
+*** but WITHOUT ANY WARRANTY; without even the implied warranty of
+*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*** GNU General Public License for more details.
+***
+*** You should have received a copy of the GNU General Public License
+*** along with this program; if not, write to the Free Software
+*** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+*** USA.
+***
+*** File Name  : core.c
+***
+*** File Description:
+*** This file contains the source functions for UMAC core
+***
+******************************************************************************
+*END**************************************************************************/
+#include "umac.h"
+
+#ifdef CONFIG_CORE_DEBUG
+#define UMAC_DEBUG(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+#else
+#define UMAC_DEBUG(...) do { } while (0)
+#endif
+
+#define UMAC_PRINT(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+
+static int wait_for_reset_complete(struct mac80211_dev *dev)
+{
+	int count;
+	count = 0;
+
+check_reset_complete:
+	if (!dev->reset_complete && (count < RESET_TIMEOUT_TICKS)) {
+		count++;
+		current->state = TASK_INTERRUPTIBLE;
+		schedule_timeout(1);
+		goto check_reset_complete;
+	}
+
+	if (!dev->reset_complete) {
+		UMAC_PRINT("%s-UMAC: Warning: Didn't get reset complete after %ld timer ticks\n", dev->name, RESET_TIMEOUT_TICKS);
+		return -1;
+	}
+
+	UMAC_PRINT("%s-UMAC: Reset complete after %d timer ticks\n", dev->name, count);
+	return 0;
+
+}
+
+static void vif_bcn_timer_expiry(unsigned long data)
+{
+	struct umac_vif *uvif = (struct umac_vif *)data;
+	struct ieee80211_sub_if_data *sdata;
+	struct sk_buff *skb, *temp;
+	struct sk_buff_head bcast_frames;
+	unsigned long flags;
+
+	sdata = vif_to_sdata(uvif->vif);
+
+	if (!ieee80211_sdata_running(sdata))
+		return;
+	if (uvif->vif->bss_conf.enable_beacon == false)
+		return;
+
+	if (uvif->vif->type == NL80211_IFTYPE_AP) {
+		skb_queue_head_init(&bcast_frames);
+
+		temp = skb = ieee80211_beacon_get(uvif->dev->hw, uvif->vif);
+		skb->priority = 1;
+		skb_queue_tail(&bcast_frames, skb);
+
+		skb = ieee80211_get_buffered_bc(uvif->dev->hw, uvif->vif);
+		while (skb) {
+			skb->priority = 1; /* Hack: skb->priority is used to indicate more frames */
+			skb_queue_tail(&bcast_frames, skb);
+			temp = skb;
+			skb = ieee80211_get_buffered_bc(uvif->dev->hw, uvif->vif);
+		}
+
+		temp->priority = 0;
+
+		spin_lock_irqsave(&uvif->dev->bcast_lock, flags);
+		while ((skb = skb_dequeue(&bcast_frames)))
+			uccp310wlan_tx_frame(skb, uvif->dev, true);
+		spin_unlock_irqrestore(&uvif->dev->bcast_lock, flags);
+	} else {
+		skb = ieee80211_beacon_get(uvif->dev->hw, uvif->vif);
+		uccp310wlan_tx_frame(skb, uvif->dev, true);
+
+		/* TODO: IBSS PS handling */
+	}
+
+	mod_timer(&uvif->bcn_timer, jiffies + msecs_to_jiffies(uvif->vif->bss_conf.beacon_int));
+
+}
+int uccp310wlan_core_init(struct mac80211_dev *dev)
+{
+
+	UMAC_DEBUG("%s-UMAC: Init called\n", dev->name);
+	uccp310wlan_lmac_if_init(dev, dev->name);
+
+	/* Enable the LMAC, set defaults and initialize TX */
+	dev->reset_complete = 0;
+	UMAC_PRINT("%s-UMAC: Reset (ENABLE)\n", dev->name);
+	uccp310wlan_prog_reset(LMAC_ENABLE);
+
+	if (wait_for_reset_complete(dev) < 0) {
+		uccp310wlan_lmac_if_deinit();
+		return -1;
+	}
+
+	uccp310wlan_prog_global_cfg(512, /* Rx MSDU life time in msecs */
+			512, /* Tx MSDU life time in msecs */
+			dev->params->ed_sensitivity,
+			dev->params->auto_sensitivity,
+			dev->params->rf_params);
+
+	uccp310wlan_prog_txpower(dev->txpower);
+	uccp310wlan_tx_init(dev);
+
+	return 0;
+}
+
+void uccp310wlan_core_deinit(struct mac80211_dev *dev)
+{
+	UMAC_DEBUG("%s-UMAC: De-init called\n", dev->name);
+
+	/* De initialize tx  and disable LMAC*/
+	uccp310wlan_tx_deinit(dev);
+
+	/* Disable the LMAC */
+	dev->reset_complete = 0;
+	UMAC_PRINT("%s-UMAC: Reset (DISABLE)\n", dev->name);
+	uccp310wlan_prog_reset(LMAC_DISABLE);
+
+	wait_for_reset_complete(dev);
+
+	uccp310wlan_lmac_if_deinit();
+
+	return;
+}
+
+void uccp310wlan_vif_add(struct umac_vif *uvif)
+{
+	unsigned int type;
+	struct ieee80211_conf *conf = &uvif->dev->hw->conf;
+
+	UMAC_DEBUG("%s-UMAC: Add VIF %d Type = %d\n", uvif->dev->name, uvif->vif_index, uvif->vif->type);
+
+	uvif->config.atim_window = uvif->config.bcn_lost_cnt = uvif->config.aid = 0;
+
+	switch (uvif->vif->type) {
+	case NL80211_IFTYPE_STATION:
+		type = IF_MODE_STA_BSS;
+		uvif->noa_active = 0;
+		skb_queue_head_init(&uvif->noa_que);
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		type = IF_MODE_STA_IBSS;
+		init_timer(&uvif->bcn_timer);
+		uvif->bcn_timer.data = (unsigned long)uvif;
+		uvif->bcn_timer.function = vif_bcn_timer_expiry;
+		break;
+	case NL80211_IFTYPE_AP:
+		type = IF_MODE_AP;
+		init_timer(&uvif->bcn_timer);
+		uvif->bcn_timer.data = (unsigned long)uvif;
+		uvif->bcn_timer.function = vif_bcn_timer_expiry;
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+	uccp310wlan_prog_vif_ctrl(uvif->vif_index,
+			uvif->vif->addr,
+			type,
+			IF_ADD);
+
+	/* Reprogram retry counts */
+	uccp310wlan_prog_vif_short_retry(uvif->vif_index, uvif->vif->addr, conf->short_frame_max_tx_count);
+	uccp310wlan_prog_vif_long_retry(uvif->vif_index, uvif->vif->addr, conf->long_frame_max_tx_count);
+
+	if (uvif->vif->type == NL80211_IFTYPE_AP) {
+		/* Program the EDCA params */
+		int queue;
+		for (queue = 0; queue < 4; queue++)
+			uccp310wlan_prog_txq_params(uvif->vif_index,
+					uvif->vif->addr,
+					queue,
+					uvif->config.edca_params[queue].aifs,
+					uvif->config.edca_params[queue].txop,
+					uvif->config.edca_params[queue].cwmin,
+					uvif->config.edca_params[queue].cwmax);
+
+
+	}
+}
+
+void uccp310wlan_vif_remove(struct umac_vif *uvif)
+{
+	struct sk_buff *skb;
+	unsigned int type;
+	unsigned long flags;
+	UMAC_DEBUG("%s-UMAC: Remove VIF %d called\n", uvif->dev->name, uvif->vif_index);
+
+	switch (uvif->vif->type) {
+	case NL80211_IFTYPE_STATION:
+		type = IF_MODE_STA_BSS;
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		type = IF_MODE_STA_IBSS;
+		del_timer(&uvif->bcn_timer);
+		break;
+	case NL80211_IFTYPE_AP:
+		type = IF_MODE_AP;
+		del_timer(&uvif->bcn_timer);
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	spin_lock_irqsave(&uvif->noa_que.lock, flags);
+	while ((skb = __skb_dequeue(&uvif->noa_que)))
+		dev_kfree_skb(skb);
+	spin_unlock_irqrestore(&uvif->noa_que.lock, flags);
+
+	uccp310wlan_prog_vif_ctrl(uvif->vif_index,
+			uvif->vif->addr,
+			type,
+			IF_REM);
+
+}
+
+void uccp310wlan_vif_set_edca_params(unsigned short queue, struct umac_vif *uvif, const struct ieee80211_tx_queue_params *params, unsigned int vif_active)
+{
+	switch (queue) {
+	case 0:
+		queue = 3; /* Voice */
+		break;
+	case 1:
+		queue = 2; /* Video */
+		break;
+	case 2:
+		queue = 1; /* Best effort */
+		break;
+	case 3:
+		queue = 0; /* Back groud */
+		break;
+	}
+
+	UMAC_DEBUG("%s-UMAC: Set EDCA params for VIF %d, Values: %d, %d, %d, %d, %d\n", uvif->dev ? uvif->dev->name : 0, uvif->vif_index, queue,
+			params->aifs, params->txop,
+			params->cw_min, params->cw_max);
+	if (uvif->dev->params->production_test == 0) {
+		/* arbitration interframe space [0..255] */
+		uvif->config.edca_params[queue].aifs = params->aifs;
+
+		/* maximum burst time in units of 32 usecs, 0 meaning disabled */
+		uvif->config.edca_params[queue].txop = params->txop;
+
+		/* minimum contention window in units of  2^n-1 */
+		uvif->config.edca_params[queue].cwmin = params->cw_min;
+
+		/*  maximum contention window in units of 2^n-1 */
+		uvif->config.edca_params[queue].cwmax = params->cw_max;
+	} else {
+		uvif->config.edca_params[queue].aifs = 3;
+		uvif->config.edca_params[queue].txop = 0;
+		uvif->config.edca_params[queue].cwmin = 0;
+		uvif->config.edca_params[queue].cwmax = 0;
+	}
+	/* For the AP case, EDCA params are set before ADD interface is called.
+	   Since this is not supported, we simply store the params and program them
+	   to the LMAC after the interface is added */
+	if (!vif_active)
+		return;
+
+	/* Program the txq parameters into the LMAC */
+	uccp310wlan_prog_txq_params(uvif->vif_index,
+			uvif->vif->addr,
+			queue,
+			params->aifs,
+			params->txop,
+			params->cw_min,
+			params->cw_max);
+
+}
+
+void uccp310wlan_vif_bss_info_changed(struct umac_vif *uvif, struct ieee80211_bss_conf *bss_conf, unsigned int changed)
+{
+	struct ieee80211_bss *bss;
+	UMAC_DEBUG("%s-UMAC: BSS INFO changed %d, %d, %d\n", uvif->dev->name, uvif->vif_index, uvif->vif->type, changed);
+
+	bss = (void *)((cfg80211_get_bss(uvif->dev->hw->wiphy, NULL, bss_conf->bssid, NULL, 0, 0, 0))->priv);
+
+	if (changed & BSS_CHANGED_BSSID)
+		uccp310wlan_prog_vif_bssid(uvif->vif_index, uvif->vif->addr, (unsigned char *)bss_conf->bssid);
+
+	if (changed & BSS_CHANGED_BASIC_RATES) {
+		if (bss_conf->basic_rates)
+			uccp310wlan_prog_vif_basic_rates(uvif->vif_index, uvif->vif->addr, bss_conf->basic_rates);
+		else
+			uccp310wlan_prog_vif_basic_rates(uvif->vif_index, uvif->vif->addr, 0x153);
+	}
+
+	if (changed & BSS_CHANGED_ERP_SLOT) {
+		unsigned short queue;
+		uccp310wlan_prog_vif_short_slot(uvif->vif_index,
+				uvif->vif->addr,
+				bss_conf->use_short_slot);
+
+		for (queue = 0; queue < WLAN_AC_MAX_CNT; queue++)
+			if (uvif->config.edca_params[queue].cwmin != 0)
+				uccp310wlan_prog_txq_params(uvif->vif_index,
+						uvif->vif->addr,
+						queue,
+						uvif->config.edca_params[queue].aifs,
+						uvif->config.edca_params[queue].txop,
+						uvif->config.edca_params[queue].cwmin,
+						uvif->config.edca_params[queue].cwmax);
+	}
+
+	switch (uvif->vif->type) {
+	case NL80211_IFTYPE_STATION:
+		if (changed & BSS_CHANGED_ASSOC) {
+			if (bss_conf->assoc) {
+				UMAC_DEBUG("%s-UMAC: AID %d, CAPS 0x%04x\n", uvif->dev->name, bss_conf->aid, bss_conf->assoc_capability |
+						(bss->wmm_used << 9));
+				uccp310wlan_prog_vif_aid(uvif->vif_index,
+							uvif->vif->addr,
+							bss_conf->aid);
+				uccp310wlan_prog_vif_assoc_cap(uvif->vif_index,
+							uvif->vif->addr,
+							bss_conf->assoc_capability | (bss->wmm_used << 9));
+				uvif->noa_active = 0;
+			}
+		}
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		if (changed & BSS_CHANGED_BEACON_ENABLED) {
+			if (uvif->vif->bss_conf.enable_beacon == true)
+				mod_timer(&uvif->bcn_timer, jiffies + msecs_to_jiffies(uvif->vif->bss_conf.beacon_int));
+			else
+				del_timer(&uvif->bcn_timer);
+		}
+		if (changed & BSS_CHANGED_BEACON_INT) {
+			if (uvif->vif->bss_conf.enable_beacon == true)
+				mod_timer(&uvif->bcn_timer, jiffies + msecs_to_jiffies(uvif->vif->bss_conf.beacon_int));
+		}
+		break;
+	case NL80211_IFTYPE_AP:
+		if (changed & BSS_CHANGED_BEACON_ENABLED) {
+			if (uvif->vif->bss_conf.enable_beacon == true)
+				mod_timer(&uvif->bcn_timer, jiffies + msecs_to_jiffies(uvif->vif->bss_conf.beacon_int));
+			else
+				del_timer(&uvif->bcn_timer);
+		}
+		if (changed & BSS_CHANGED_BEACON_INT) {
+			if (uvif->vif->bss_conf.enable_beacon == true)
+				mod_timer(&uvif->bcn_timer, jiffies + msecs_to_jiffies(uvif->vif->bss_conf.beacon_int));
+		}
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+}
+
+void uccp310wlan_reset_complete(char *lmac_version, void *context)
+{
+	struct mac80211_dev *dev = (struct mac80211_dev *)context;
+	memcpy(dev->stats->uccp310_lmac_version, lmac_version, 5);
+	dev->stats->uccp310_lmac_version[5] = '\0';
+	dev->reset_complete = 1;
+}
+
+void uccp310wlan_mib_stats(struct umac_event_mib_stats *mib_stats, void *context)
+{
+	struct mac80211_dev *dev = (struct mac80211_dev *)context;
+
+	dev->stats->ofdm_rx_crc_success_cnt = mib_stats->ofdm_rx_crc_success_cnt;
+	dev->stats->ofdm_rx_crc_fail_cnt = mib_stats->ofdm_rx_crc_fail_cnt;
+	dev->stats->ofdm_rx_false_trig_cnt = mib_stats->ofdm_rx_false_trig_cnt;
+	dev->stats->ofdm_rx_header_fail_cnt = mib_stats->ofdm_rx_header_fail_cnt;
+	dev->stats->dsss_rx_crc_success_cnt = mib_stats->dsss_rx_crc_success_cnt;
+	dev->stats->dsss_rx_crc_fail_cnt = mib_stats->dsss_rx_crc_fail_cnt;
+	dev->stats->dsss_rx_false_trig_cnt = mib_stats->dsss_rx_false_trig_cnt;
+	dev->stats->ed_cnt = mib_stats->ed_cnt;
+	dev->stats->cca_fail_cnt = mib_stats->cca_fail_cnt;
+	dev->stats->current_sensitivity = mib_stats->sensitivity;
+}
+
+void uccp310wlan_noa_event(int event, void *msg, void *context, struct sk_buff *skb)
+{
+	struct mac80211_dev  *dev = (struct mac80211_dev *)context;
+	struct umac_event_noa *noa = (struct umac_event_noa *)msg;
+	struct ieee80211_vif *vif;
+	struct umac_vif *uvif;
+	unsigned long flags;
+	bool transmit = false;
+
+	rcu_read_lock();
+
+	if (event == EVENT_TX_DONE || event == CMD_TX)
+		vif = (struct ieee80211_vif *)rcu_dereference(dev->vifs[(int)msg]);
+	else
+		vif = (struct ieee80211_vif *)rcu_dereference(dev->vifs[noa->vif_index]);
+
+	if (vif == NULL) {
+		rcu_read_unlock();
+		return;
+	}
+
+	uvif = (struct umac_vif *)vif->drv_priv;
+
+	spin_lock_irqsave(&uvif->noa_que.lock, flags);
+
+	if (event == CMD_TX) {
+		if (uvif->noa_active) {
+			if (!uvif->noa_tx_allowed || skb_peek(&uvif->noa_que))
+				skb_queue_tail(&uvif->noa_que, skb);
+			else
+				transmit = true;
+		} else
+			transmit = true;
+	} else if (event == EVENT_TX_DONE) {
+		if (uvif->noa_active && uvif->noa_tx_allowed) {
+			skb = skb_dequeue(&uvif->noa_que);
+			if (skb)
+				transmit = true;
+		}
+	} else { /* event = EVENT_NOA */
+
+		uvif->noa_active = noa->noa_active;
+		if (uvif->noa_active) {
+			printk(KERN_DEBUG "%s: noa active = %d, ap_present = %d\n", dev->name, noa->noa_active, noa->ap_present);
+			uvif->noa_tx_allowed = noa->ap_present;
+			if (uvif->noa_tx_allowed) {
+				skb = skb_dequeue(&uvif->noa_que);
+				if (skb)
+					transmit = true;
+			}
+		} else {
+			printk(KERN_DEBUG "%s: noa active = %d\n", dev->name, noa->noa_active);
+			uvif->noa_tx_allowed = 1;
+			/* Can be done in a better way. For now, just flush the NoA Queue */
+			while ((skb = skb_dequeue(&uvif->noa_que)))
+				dev_kfree_skb_any(skb);
+		}
+	}
+
+	spin_unlock_irqrestore(&uvif->noa_que.lock, flags);
+
+	rcu_read_unlock();
+
+	if (transmit)
+		uccp310wlan_tx_frame(skb, dev, false);
+
+	return;
+}
+
+void uccp310wlan_rx_frame(struct sk_buff *skb, void *context)
+{
+	struct mac80211_dev *dev = (struct mac80211_dev *)context;
+	struct umac_event_rx *rx = (struct umac_event_rx *)(skb->data);
+	struct ieee80211_hdr *hdr;
+	struct ieee80211_rx_status rx_status;
+	int i;
+
+	dev->stats->rx_packet_count++;
+
+	skb_pull(skb, 32); /* Remove RX control information */
+
+#ifdef CONFIG_RX_DEBUG
+	printk(KERN_DEBUG "%s-RX: RX frame, length = %d, RSSI = %d, rate = %d\n", dev->name, rx->buff_len, rx->rssi, rx->rate);
+	/* print_hex_dump(KERN_DEBUG, " ",DUMP_PREFIX_NONE,16,1,skb->data,skb->len,1); */
+#endif
+
+
+	hdr = (struct ieee80211_hdr *)skb->data;
+
+	if ((ieee80211_is_data_qos(hdr->frame_control)) &&
+			(!ieee80211_is_qos_nullfunc(hdr->frame_control)))
+		skb_pull(skb, 2);
+
+
+	memset(&rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+	rx_status.band = dev->hw->conf.chandef.chan->band;
+	rx_status.freq = dev->hw->conf.chandef.chan->center_freq;
+	rx_status.signal = rx->rssi;
+	rx_status.antenna = 0;
+
+	for (i = 0; i < dev->hw->wiphy->bands[dev->hw->conf.chandef.chan->band]->n_bitrates; i++) {
+		if (rx->rate == dev->hw->wiphy->bands[dev->hw->conf.chandef.chan->band]->bitrates[i].hw_value) {
+			rx_status.rate_idx = i;
+			break;
+		}
+	}
+
+	rx_status.flag |= RX_FLAG_DECRYPTED;
+	rx_status.flag |= RX_FLAG_MMIC_STRIPPED;
+	if (rx->status == RX_MIC_FAILURE)
+		rx_status.flag |= RX_FLAG_MMIC_ERROR;
+
+	if (((hdr->frame_control & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) &&
+			((hdr->frame_control & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)) {
+		rx_status.mactime = get_unaligned_le64(rx->timestamp);
+		rx_status.flag |= RX_FLAG_MACTIME_START;
+	}
+
+	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+	ieee80211_rx(dev->hw, skb);
+}
diff --git a/drivers/net/wireless/uccp310wlan/hal.h b/drivers/net/wireless/uccp310wlan/hal.h
new file mode 100644
index 0000000..d274927
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/hal.h
@@ -0,0 +1,62 @@
+/*HEADER**********************************************************************
+******************************************************************************
+***
+*** Copyright (c) 2011, 2012, Imagination Technologies Ltd.
+***
+*** This program is free software; you can redistribute it and/or
+*** modify it under the terms of the GNU General Public License
+*** as published by the Free Software Foundation; either version 2
+*** of the License, or (at your option) any later version.
+***
+*** This program is distributed in the hope that it will be useful,
+*** but WITHOUT ANY WARRANTY; without even the implied warranty of
+*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*** GNU General Public License for more details.
+***
+*** You should have received a copy of the GNU General Public License
+*** along with this program; if not, write to the Free Software
+*** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+*** USA.
+***
+*** File Name  : hal.h
+***
+*** File Description:
+*** This file contains Intermodule communication APIs
+***
+******************************************************************************
+*END**************************************************************************/
+
+
+#ifndef _UCCP310WLAN_HAL_H_
+#define _UCCP310WLAN_HAL_H_
+
+#define HOST_MOD_ID 0
+#define UMAC_MOD_ID 1
+#define LMAC_MOD_ID 2
+#define MODULE_MAX 3
+
+#define MAX_MESSAGE_SIZE 2600
+
+typedef int (*msg_handler)(void *, unsigned char);
+
+struct hal_ops_tag {
+	int (*init)(void);
+	int (*deinit)(void);
+	void (*register_callback)(msg_handler, unsigned char);
+	void (*send)(void*, unsigned char, unsigned char);
+	void *(*get_buff)(unsigned int);
+	void (*put_buff)(void *);
+	unsigned char * (*get_data)(void *);
+	unsigned char * (*get_head)(void *);
+	unsigned int (*get_length)(void *);
+	unsigned int (*get_true_length)(void *);
+	unsigned char * (*put_data)(void *, unsigned int);
+	unsigned char * (*pull_data)(void *, unsigned int);
+	unsigned char * (*push_data)(void *, unsigned int);
+	void (*link_buff)(void *dst, void *src);
+};
+
+extern struct hal_ops_tag hal_ops;
+#endif /* _UCCP310WLAN_HAL_H_ */
+
+/* EOF */
diff --git a/drivers/net/wireless/uccp310wlan/hal_hostport.c b/drivers/net/wireless/uccp310wlan/hal_hostport.c
new file mode 100644
index 0000000..b8f4afd
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/hal_hostport.c
@@ -0,0 +1,390 @@
+/*HEADER**********************************************************************
+******************************************************************************
+***
+*** Copyright (c) 2011, 2012, Imagination Technologies Ltd.
+***
+*** This program is free software; you can redistribute it and/or
+*** modify it under the terms of the GNU General Public License
+*** as published by the Free Software Foundation; either version 2
+*** of the License, or (at your option) any later version.
+***
+*** This program is distributed in the hope that it will be useful,
+*** but WITHOUT ANY WARRANTY; without even the implied warranty of
+*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*** GNU General Public License for more details.
+***
+*** You should have received a copy of the GNU General Public License
+*** along with this program; if not, write to the Free Software
+*** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+*** USA.
+***
+*** File Name  : hal_hostport.c
+***
+*** File Description:
+*** This file contains the source functions of HAL IF for hostport+shared
+*** memmory based communications
+******************************************************************************
+*END**************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/unaligned/access_ok.h>
+#include <linux/netdevice.h>
+#include <asm/core_reg.h>
+
+#include "hal.h"
+#include "hal_hostport.h"
+
+#define COMMAND_START_MAGIC 0xDEAD
+
+static struct hal_priv  *hpriv;
+static const char *hal_name = "UCCP310_WIFI_HAL";
+
+static unsigned long shm_offset = HAL_SHARED_MEM_OFFSET;
+module_param(shm_offset, ulong, S_IRUSR|S_IWUSR);
+
+#ifdef CONFIG_TIMING_DEBUG
+unsigned long irq_timestamp[20];
+unsigned int irq_ts_index;
+spinlock_t  timing_lock;
+#endif
+
+static int hal_ready(void)
+{
+	unsigned int value = 0;
+	/* Check the ACK register bit */
+	value =  readl((void __iomem *)(UCCP_CORE_HOST_TO_MTX_CMD_ADDR));
+	if (value & BIT(UCCP_CORE_HOST_INT_SHIFT))
+		return 0;
+	else
+		return 1;
+}
+
+static void tx_tasklet_fn(unsigned long data)
+{
+	struct hal_priv  *priv = (struct hal_priv *)data;
+	struct sk_buff  *skb;
+	unsigned int  value = 0;
+	unsigned long start_addr;
+	unsigned long timeout, timestamp;
+	while ((skb = skb_dequeue(&priv->txq))) {
+#ifdef CONFIG_HAL_DEBUG
+		printk(KERN_DEBUG "%s: xmit dump\n", hal_name);
+		print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_NONE, 16, 1, skb->data, skb->len, 1);
+#endif
+
+		timestamp = __core_reg_get(TXTIMER);
+		timeout = timestamp + 100;
+		while (!hal_ready()) {
+			timestamp = __core_reg_get(TXTIMER);
+			if (((long)timeout - (long)timestamp) < 0) {
+				printk(KERN_DEBUG "%s: Interface not ready for 100 us,	dropping command. ID = %d\n", hal_name, skb->data[0]);
+				dev_kfree_skb_any(skb);
+				skb = NULL;
+				break;
+			}
+		}
+		if (!skb)
+			continue;
+		/* write the command buffer in bulk RAM */
+		start_addr = readl((void __iomem *)HAL_BULK_RAM_CMD_START);
+#ifdef CONFIG_HAL_DEBUG
+		printk(KERN_DEBUG "%s: command address = 0x%08x\n", hal_name, (unsigned int)start_addr);
+#endif
+		start_addr -= HAL_MTX_BULK_RAM;
+		start_addr += ((priv->bulk_ram_mem_addr)-(priv->shm_offset));
+
+		if ((start_addr < priv->bulk_ram_mem_addr) || (start_addr > (priv->bulk_ram_mem_addr + HAL_WLAN_BULK_RAM_LEN))) {
+			printk(KERN_DEBUG "%s: Invalid command address 0x%08x	in bulkram, dropping command.\n", hal_name, (unsigned int)start_addr);
+			dev_kfree_skb_any(skb);
+			skb = NULL;
+			continue;
+		}
+
+		memcpy((unsigned char *)start_addr, skb->data, skb->len);
+		value = (unsigned int) (priv->cmd_cnt);
+		value |= 0x7fff0000;
+		writel(value, (void __iomem *)(UCCP_CORE_HOST_TO_MTX_CMD_ADDR));
+		priv->cmd_cnt++;
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static void rx_tasklet_fn(unsigned long data)
+{
+	struct hal_priv *priv = (struct hal_priv *)data;
+	struct sk_buff  *skb;
+	unsigned char *buf;
+	unsigned long temp;
+	while ((skb = skb_dequeue(&priv->rxq))) {
+		temp = *((unsigned long *)(skb->cb));
+		buf = skb_put(skb, *(unsigned long *)(skb->cb + 4));
+		memcpy(buf, (unsigned char *)temp, skb->len);
+		/* Mark the buffer free */
+		temp = *((unsigned long *)(skb->cb + 8));
+#ifdef CONFIG_HAL_DEBUG
+		printk(KERN_DEBUG "%s: Freeing event buffer at 0x%08x\n", hal_name, (unsigned int)temp);
+#endif
+		*((unsigned long *)temp) = 0;
+#ifdef CONFIG_HAL_DEBUG
+		printk(KERN_DEBUG "%s: recv dump\n", hal_name);
+		print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_NONE, 16, 1, skb->data, skb->len, 1);
+#endif
+		priv->rcv_handler(skb, LMAC_MOD_ID);
+	}
+}
+
+static void hostport_send(struct hal_priv  *priv,
+			struct sk_buff   *skb)
+{
+	skb_queue_tail(&priv->txq, skb);
+	tasklet_schedule(&priv->tx_tasklet);
+}
+
+static void  hal_send(void      *nwb,
+			unsigned char  rcv_mod_id,
+			unsigned char  send_mod_id)
+{
+	hostport_send(hpriv, nwb);
+}
+
+static void hal_register_callback(msg_handler        handler,
+				unsigned char      mod_id)
+{
+	hpriv->rcv_handler = handler;
+}
+
+
+static irqreturn_t hal_irq_handler(int    irq, void  *p)
+{
+
+	unsigned int         value;
+	unsigned long        event_addr, event_status_addr, event_len;
+	unsigned char        spurious;
+	struct sk_buff       *skb;
+	struct hal_priv      *priv = (struct hal_priv *)p;
+
+	spurious = 0;
+
+	value = readl((void __iomem *)(UCCP_CORE_MTX_TO_HOST_CMD_ADDR)) & 0x7fffffff;
+	if (value == (0x7fff0000 | priv->event_cnt)) {
+#ifdef CONFIG_TIMING_DEBUG
+		spin_lock(&timing_lock);
+		irq_timestamp[irq_ts_index] = __core_reg_get(TXTIMER);
+		irq_ts_index = (irq_ts_index + 1)%20;
+		spin_unlock(&timing_lock);
+#endif
+		event_addr = readl((void __iomem *)HAL_BULK_RAM_EVENT_START);
+		event_status_addr = readl((void __iomem *)(HAL_BULK_RAM_EVENT_START + 4));
+		event_len = readl((void __iomem *)(HAL_BULK_RAM_EVENT_START + 8));
+
+#ifdef CONFIG_HAL_DEBUG
+		printk(KERN_DEBUG "%s: event address = 0x%08x\n", hal_name, (unsigned int)event_addr);
+		printk(KERN_DEBUG "%s: event status address = 0x%08x\n", hal_name, (unsigned int)event_status_addr);
+		printk(KERN_DEBUG "%s: event len = %d\n", hal_name, (int)event_len);
+#endif
+		event_addr -= HAL_MTX_BULK_RAM;
+		event_status_addr -= HAL_MTX_BULK_RAM;
+		event_addr += ((priv->bulk_ram_mem_addr) - (priv->shm_offset));
+		event_status_addr += ((priv->bulk_ram_mem_addr) - (priv->shm_offset));
+		skb = dev_alloc_skb(event_len);
+		if (!skb) {
+			printk(KERN_ERR "%s: out of memory\n", hal_name);
+			*((unsigned long *)event_status_addr) = 0;
+		} else {
+			*(unsigned long *)(skb->cb) = event_addr;		/* address of event payload */
+			*(unsigned long *)(skb->cb + 4) = event_len;		/* length of event payload */
+			*(unsigned long *)(skb->cb + 8) = event_status_addr;	/* address to mark free */
+			skb_queue_tail(&priv->rxq, skb);
+			tasklet_schedule(&priv->rx_tasklet);
+		}
+		priv->event_cnt++;
+	} else {
+			spurious = 1;
+	}
+
+	if (!spurious) {
+		/* Clear the mtx interrupt */
+		value = 0;
+		value |= BIT(UCCP_CORE_MTX_INT_CLR_SHIFT);
+		writel(*((unsigned long   *)&(value)), (void __iomem *)(UCCP_CORE_HOST_TO_MTX_ACK_ADDR));
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void hal_enable_int(void  *p)
+{
+	unsigned int   value = 0;
+
+	/* Set external pin irq enable for host_irq and mtx_irq */
+	value = readl((void __iomem *)UCCP_CORE_INT_ENAB_ADDR);
+	value |= BIT(UCCP_CORE_MTX_INT_IRQ_ENAB_SHIFT);
+	writel(*((unsigned long   *)&(value)), (void __iomem *)(UCCP_CORE_INT_ENAB_ADDR));
+
+	/* Enable raising mtx_int when MTX_INT = 1 */
+	value = 0;
+	value |= BIT(UCCP_CORE_MTX_INT_EN_SHIFT);
+	writel(*((unsigned long *)&(value)), (void __iomem *)(UCCP_CORE_MTX_INT_ENABLE_ADDR));
+
+	return;
+}
+
+
+static void hal_disable_int(void  *p)
+{
+	unsigned int   value = 0;
+
+	/* Reset external pin irq enable for host_irq and mtx_irq */
+	value = readl((void __iomem *)UCCP_CORE_INT_ENAB_ADDR);
+	value &= ~(BIT(UCCP_CORE_MTX_INT_IRQ_ENAB_SHIFT));
+	writel(*((unsigned long   *)&(value)), (void __iomem *)(UCCP_CORE_INT_ENAB_ADDR));
+
+	/* Disable raising mtx_int when MTX_INT = 1 */
+	value = 0;
+	value &= ~(BIT(UCCP_CORE_MTX_INT_EN_SHIFT));
+	writel(*((unsigned long *)&(value)), (void __iomem *)(UCCP_CORE_MTX_INT_ENABLE_ADDR));
+
+	return;
+}
+
+static int hal_deinit(void)
+{
+	struct sk_buff *skb;
+
+	_uccp310wlan_80211if_exit();
+
+	/* Disable host_int and mtx_irq */
+	hal_disable_int(NULL);
+
+	/* Free irq line */
+	free_irq(HAL_IRQ_LINE, hpriv);
+
+	/* Kill the HAL tasklet */
+	tasklet_kill(&hpriv->tx_tasklet);
+	tasklet_kill(&hpriv->rx_tasklet);
+
+	while ((skb = skb_dequeue(&hpriv->rxq)))
+		dev_kfree_skb_any(skb);
+	while ((skb = skb_dequeue(&hpriv->txq)))
+		dev_kfree_skb_any(skb);
+
+	/* unmap UCCP memory */
+	iounmap((void __iomem *)hpriv->uccp_mem_addr);
+	release_mem_region(HAL_META_UCC_BASE, HAL_META_UCC_LEN);
+
+	/* unmap Bulk RAM */
+	iounmap((void __iomem *)hpriv->bulk_ram_mem_addr);
+	release_mem_region(HAL_WLAN_BULK_RAM_START, HAL_WLAN_BULK_RAM_LEN);
+
+	kfree(hpriv);
+	return 0;
+}
+
+static int hal_init(void)
+{
+	hpriv = kzalloc(sizeof(struct hal_priv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+
+	hpriv->shm_offset =  shm_offset;
+
+	if (hpriv->shm_offset != HAL_SHARED_MEM_OFFSET)
+		printk(KERN_DEBUG "%s: Using shared memory offset 0x%lx\n", hal_name, hpriv->shm_offset);
+
+	/* Map UCCP memory */
+	if (!(request_mem_region(HAL_META_UCC_BASE, HAL_META_UCC_LEN, "uccp"))) {
+		printk(KERN_ERR "%s: request_mem_region failed	for UCCP region\n", hal_name);
+		kfree(hpriv);
+		return -ENOMEM;
+	}
+
+	hpriv->uccp_mem_addr = (unsigned long)ioremap(HAL_META_UCC_BASE,
+					HAL_META_UCC_LEN);
+	if (hpriv->uccp_mem_addr == 0) {
+		printk(KERN_ERR "%s: Ioremap failed for UCCP mem region.\n", hal_name);
+		release_mem_region(HAL_META_UCC_BASE, HAL_META_UCC_LEN);
+		kfree(hpriv);
+		return -ENOMEM;
+	}
+
+	/* Map Bulk RAM */
+	if (!request_mem_region(HAL_WLAN_BULK_RAM_START, HAL_WLAN_BULK_RAM_LEN, "wlan_bulk_ram")) {
+		printk(KERN_ERR "%s: request_mem_region failed for bulk ram.\n", hal_name);
+		release_mem_region(HAL_META_UCC_BASE, HAL_META_UCC_LEN);
+		kfree(hpriv);
+		return -ENOMEM;
+	}
+
+	hpriv->bulk_ram_mem_addr = (unsigned long)ioremap(HAL_WLAN_BULK_RAM_START, HAL_WLAN_BULK_RAM_LEN);
+	if (hpriv->bulk_ram_mem_addr == 0) {
+		printk(KERN_ERR "%s: Ioremap failed for bulk ram region.\n", hal_name);
+		release_mem_region(HAL_META_UCC_BASE, HAL_META_UCC_LEN);
+		release_mem_region(HAL_WLAN_BULK_RAM_START, HAL_WLAN_BULK_RAM_LEN);
+		kfree(hpriv);
+		return -ENOMEM;
+	}
+
+
+	/* Register irq handler */
+	if (request_irq(HAL_IRQ_LINE, hal_irq_handler, 0, "wlan", hpriv) != 0) {
+		printk(KERN_ERR "%s: Unable to register the Interrupt handler with kernel\n", hal_name);
+		release_mem_region(HAL_META_UCC_BASE, HAL_META_UCC_LEN);
+		release_mem_region(HAL_WLAN_BULK_RAM_START, HAL_WLAN_BULK_RAM_LEN);
+		kfree(hpriv);
+		return -ENOMEM;
+	}
+
+	/* Enable host_int and mtx_int */
+	hal_enable_int(NULL);
+
+	/* Intialize HAL tasklets */
+	tasklet_init(&hpriv->tx_tasklet, tx_tasklet_fn, (unsigned long)hpriv);
+	tasklet_init(&hpriv->rx_tasklet, rx_tasklet_fn, (unsigned long)hpriv);
+	skb_queue_head_init(&hpriv->rxq);
+	skb_queue_head_init(&hpriv->txq);
+#ifdef CONFIG_TIMING_DEBUG
+	spin_lock_init(&timing_lock);
+#endif
+
+	if (_uccp310wlan_80211if_init() < 0) {
+		printk(KERN_ERR "%s: wlan_init failed\n", hal_name);
+		release_mem_region(HAL_META_UCC_BASE, HAL_META_UCC_LEN);
+		release_mem_region(HAL_WLAN_BULK_RAM_START, HAL_WLAN_BULK_RAM_LEN);
+		kfree(hpriv);
+		return -ENOMEM;
+	}
+
+	hpriv->cmd_cnt = COMMAND_START_MAGIC;
+	hpriv->event_cnt = 0;
+	return 0;
+}
+
+struct hal_ops_tag  hal_ops = {
+	.init = hal_init,
+	.deinit = hal_deinit,
+	.register_callback = hal_register_callback,
+	.send = hal_send,
+	/* Other ops can be implement if needed. */
+};
+
+static int __init hostport_init(void)
+{
+	return hal_ops.init();
+}
+
+static void __exit hostport_exit(void)
+{
+	hal_ops.deinit();
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Imagination Technologies");
+MODULE_DESCRIPTION("Driver for IMG UCCP310 WiFi solution");
+
+module_init(hostport_init);
+module_exit(hostport_exit);
diff --git a/drivers/net/wireless/uccp310wlan/hal_hostport.h b/drivers/net/wireless/uccp310wlan/hal_hostport.h
new file mode 100644
index 0000000..fb1a082
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/hal_hostport.h
@@ -0,0 +1,118 @@
+/*HEADER**********************************************************************
+******************************************************************************
+***
+***
+*** Copyright (c) 2011, 2012, Imagination Technologies Ltd.
+***
+*** This program is free software; you can redistribute it and/or
+*** modify it under the terms of the GNU General Public License
+*** as published by the Free Software Foundation; either version 2
+*** of the License, or (at your option) any later version.
+***
+*** This program is distributed in the hope that it will be useful,
+*** but WITHOUT ANY WARRANTY; without even the implied warranty of
+*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*** GNU General Public License for more details.
+***
+*** You should have received a copy of the GNU General Public License
+*** along with this program; if not, write to the Free Software
+*** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+*** USA.
+***
+*** File Name  : hal_hostport.h
+***
+*** File Description:
+*** This file contains the definitions specific to HOSPORT comms
+***
+******************************************************************************
+*END**************************************************************************/
+
+#ifndef _UCCP310WLAN_HAL_HOSTPORT_H_
+#define _UCCP310WLAN_HAL_HOSTPORT_H_
+
+/* Include files */
+#include <asm/soc-tz1090/defs.h>
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif /* __cplusplus */
+
+struct hal_priv {
+/* UCCP and BULKRAM mappings */
+unsigned long         uccp_mem_addr;
+unsigned long         bulk_ram_mem_addr;
+unsigned long         shm_offset;
+/*
+ *  TX
+ */
+struct sk_buff_head             txq;
+struct tasklet_struct           tx_tasklet;
+unsigned short                  cmd_cnt;
+/*
+ * RX
+ */
+struct sk_buff_head             rxq;
+struct tasklet_struct           rx_tasklet;
+unsigned short                  event_cnt;
+msg_handler                     rcv_handler;
+};
+
+int _uccp310wlan_80211if_init(void);
+void _uccp310wlan_80211if_exit(void);
+
+
+/* Comet values */
+#define HAL_META_UCC_BASE              0x02010400
+#define HAL_META_BULK_RAM              0xE0200000
+#define HAL_META_BULK_RAM_LEN          0x00060000
+#define HAL_META_UCC_LEN               0x0000007c
+#define HAL_MTX_BULK_RAM	       0xB0000000
+/* Register UCCP_CORE_HOST_TO_MTX_CMD */
+#define UCCP_CORE_HOST_TO_MTX_CMD        0x0030
+#define UCCP_CORE_HOST_TO_MTX_CMD_ADDR      ((hpriv->uccp_mem_addr) + UCCP_CORE_HOST_TO_MTX_CMD)
+#define UCCP_CORE_HOST_INT_SHIFT            31
+
+/* Register UCCP_CORE_MTX_TO_HOST_CMD */
+#define UCCP_CORE_MTX_TO_HOST_CMD 0x0034
+#define UCCP_CORE_MTX_TO_HOST_CMD_ADDR      ((hpriv->uccp_mem_addr) + UCCP_CORE_MTX_TO_HOST_CMD)
+
+/* Register UCCP_CORE_HOST_TO_MTX_ACK */
+#define UCCP_CORE_HOST_TO_MTX_ACK 0x0038
+#define UCCP_CORE_HOST_TO_MTX_ACK_ADDR      ((hpriv->uccp_mem_addr) + UCCP_CORE_HOST_TO_MTX_ACK)
+#define UCCP_CORE_MTX_INT_CLR_SHIFT         31
+
+/* Register UCCP_CORE_MTX_TO_HOST_ACK */
+#define UCCP_CORE_MTX_TO_HOST_ACK 0x003C
+#define UCCP_CORE_MTX_TO_HOST_ACK_ADDR      ((hpriv->uccp_mem_addr) + UCCP_CORE_MTX_TO_HOST_ACK)
+
+/* Register UCCP_CORE_MTX_INT_ENABLE */
+#define UCCP_CORE_MTX_INT_ENABLE         0x0044
+#define UCCP_CORE_MTX_INT_ENABLE_ADDR       ((hpriv->uccp_mem_addr) + UCCP_CORE_MTX_INT_ENABLE)
+#define UCCP_CORE_MTX_INT_EN_SHIFT          31
+
+#define UCCP_CORE_INT_ENAB               0x0000
+#define UCCP_CORE_INT_ENAB_ADDR             ((hpriv->uccp_mem_addr) + UCCP_CORE_INT_ENAB)
+#define UCCP_CORE_MTX_INT_IRQ_ENAB_SHIFT    15
+
+/******************************************************************************************************/
+#define HAL_SHARED_MEM_OFFSET           0x30000
+#define HAL_WLAN_BULK_RAM_START		(HAL_META_BULK_RAM + (hpriv->shm_offset))
+#define HAL_WLAN_BULK_RAM_LEN		0x00012000
+
+/* Command, Event and Buff mappping offsets */
+#define HAL_COMMAND_OFFSET                    (0)
+#define HAL_EVENT_OFFSET                      (HAL_COMMAND_OFFSET + 512)
+
+#define HAL_BULK_RAM_CMD_START                 ((hpriv->bulk_ram_mem_addr) + HAL_COMMAND_OFFSET)
+#define HAL_BULK_RAM_EVENT_START               ((hpriv->bulk_ram_mem_addr) + HAL_EVENT_OFFSET)
+
+#define HAL_IRQ_LINE                      external_irq_map(UCC0_IRQ_NUM)
+
+#if defined(__cplusplus)
+}
+#endif /* __cplusplus */
+
+#endif /* _UCCP310WLAN_HAL_HOSTPORT_H_ */
+
+/* EOF */
diff --git a/drivers/net/wireless/uccp310wlan/lmac_if.c b/drivers/net/wireless/uccp310wlan/lmac_if.c
new file mode 100644
index 0000000..058d8c0
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/lmac_if.c
@@ -0,0 +1,600 @@
+/*HEADER**********************************************************************
+******************************************************************************
+***
+*** Copyright (c) 2011, 2012, Imagination Technologies Ltd.
+***
+*** This program is free software; you can redistribute it and/or
+*** modify it under the terms of the GNU General Public License
+*** as published by the Free Software Foundation; either version 2
+*** of the License, or (at your option) any later version.
+***
+*** This program is distributed in the hope that it will be useful,
+*** but WITHOUT ANY WARRANTY; without even the implied warranty of
+*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*** GNU General Public License for more details.
+***
+*** You should have received a copy of the GNU General Public License
+*** along with this program; if not, write to the Free Software
+*** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+*** USA.
+***
+*** File Name  : lmac_if.c
+***
+*** File Description:
+*** This file contains the defintions of helper functions for LMAC comms
+***
+******************************************************************************
+*END**************************************************************************/
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+
+#include "lmac_if.h"
+
+#ifdef CONFIG_LMACIF_DEBUG
+#define LMACIF_DEBUG(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+#else
+#define LMACIF_DEBUG(...) do { } while (0)
+#endif
+
+struct lmac_if_data {
+	char      *name;
+	void      *context;
+};
+
+static struct lmac_if_data __rcu *lmac_if;
+
+static int uccp310wlan_send_cmd(unsigned char  *buf,
+		unsigned int   len)
+{
+	struct umac_lmac_msg_hdr  *hdr = (struct umac_lmac_msg_hdr *)buf;
+	struct sk_buff       *nbuf;
+	unsigned char        *data;
+	struct lmac_if_data *p;
+
+	rcu_read_lock();
+	p = (struct lmac_if_data *)(rcu_dereference(lmac_if));
+	if (!p) {
+		WARN_ON(1);
+		rcu_read_unlock();
+		return -1;
+	}
+
+	nbuf = alloc_skb(len, GFP_ATOMIC);
+	if (!nbuf) {
+		rcu_read_unlock();
+		return -1;
+	}
+
+	/* LMACIF_DEBUG ("%s-LMACIF: Sending command:%d\n", p->name, hdr->id); */
+	hdr->flags = (len  - sizeof(struct umac_lmac_msg_hdr));
+
+	data = skb_put(nbuf, len);
+	memcpy(data, buf, len);
+
+	hal_ops.send((void *)nbuf, LMAC_MOD_ID, UMAC_MOD_ID);
+	rcu_read_unlock();
+	return 0;
+}
+
+int uccp310wlan_prog_reset(unsigned int reset_type)
+{
+	struct umac_cmd_reset  reset;
+
+	reset.hdr.id = CMD_RESET;
+	reset.hdr.flags = 0;
+	reset.reset_type = reset_type;
+	return uccp310wlan_send_cmd((unsigned char *) &reset,
+			sizeof(struct umac_cmd_reset));
+}
+
+int uccp310wlan_prog_vif_ctrl(int index,
+		unsigned char *mac_addr,
+		unsigned int  vif_type,
+		unsigned int  op)
+{
+	struct umac_cmd_vif_ctrl  vif_ctrl;
+
+	vif_ctrl.hdr.id = CMD_VIF_CTRL;
+	vif_ctrl.hdr.flags = 0;
+	vif_ctrl.if_mode = vif_type;
+	memcpy(vif_ctrl.mac_addr, mac_addr, 6);
+	vif_ctrl.if_index = index;
+	vif_ctrl.if_ctrl = op;
+
+	return uccp310wlan_send_cmd((unsigned char *) &vif_ctrl,
+			sizeof(struct umac_cmd_vif_ctrl));
+}
+
+int uccp310wlan_prog_vif_basic_rates(int index,
+		unsigned char *vif_addr,
+		unsigned int basic_rate_set)
+{
+	struct umac_cmd_vif_cfg   vif_cfg;
+
+	vif_cfg.hdr.id = CMD_VIF_CFG;
+	vif_cfg.hdr.flags = 0;
+
+	vif_cfg.changed_bitmap = BASICRATES_CHANGED;
+	vif_cfg.basic_rate_set = basic_rate_set;
+	vif_cfg.if_index = index;
+	memcpy(vif_cfg.vif_addr, vif_addr, 6);
+
+	return uccp310wlan_send_cmd((unsigned char *)&vif_cfg,
+			sizeof(struct umac_cmd_vif_cfg));
+}
+int uccp310wlan_prog_vif_short_slot(int index,
+		unsigned char *vif_addr,
+		unsigned int use_short_slot)
+{
+	struct umac_cmd_vif_cfg   vif_cfg;
+
+	vif_cfg.hdr.id = CMD_VIF_CFG;
+	vif_cfg.hdr.flags = 0;
+
+	vif_cfg.changed_bitmap = SHORTSLOT_CHANGED;
+	vif_cfg.use_short_slot = use_short_slot;
+	vif_cfg.if_index = index;
+	memcpy(vif_cfg.vif_addr, vif_addr, 6);
+
+	return uccp310wlan_send_cmd((unsigned char *)&vif_cfg,
+			sizeof(struct umac_cmd_vif_cfg));
+}
+
+int uccp310wlan_prog_vif_powersave_mode(int index,
+		unsigned char *vif_addr,
+		unsigned int powersave_mode)
+{
+	struct umac_cmd_vif_cfg   vif_cfg;
+
+	vif_cfg.hdr.id = CMD_VIF_CFG;
+	vif_cfg.hdr.flags = 0;
+	vif_cfg.changed_bitmap = POWERSAVE_CHANGED;
+	vif_cfg.powersave_mode = powersave_mode;
+	vif_cfg.if_index = index;
+	memcpy(vif_cfg.vif_addr, vif_addr, 6);
+	return uccp310wlan_send_cmd((unsigned char *)&vif_cfg,
+			sizeof(struct umac_cmd_vif_cfg));
+}
+int uccp310wlan_prog_vif_atim_window(int index,
+		unsigned char *vif_addr,
+		unsigned int atim_window)
+{
+	struct umac_cmd_vif_cfg   vif_cfg;
+
+	vif_cfg.hdr.id = CMD_VIF_CFG;
+	vif_cfg.hdr.flags = 0;
+
+	vif_cfg.changed_bitmap = ATIMWINDOW_CHANGED;
+	vif_cfg.atim_window = atim_window;
+	vif_cfg.if_index = index;
+	memcpy(vif_cfg.vif_addr, vif_addr, 6);
+
+	return uccp310wlan_send_cmd((unsigned char *)&vif_cfg,
+			sizeof(struct umac_cmd_vif_cfg));
+}
+int uccp310wlan_prog_vif_aid(int index,
+		unsigned char *vif_addr,
+		unsigned int aid)
+{
+	struct umac_cmd_vif_cfg   vif_cfg;
+
+	vif_cfg.hdr.id = CMD_VIF_CFG;
+	vif_cfg.hdr.flags = 0;
+
+	vif_cfg.changed_bitmap = AID_CHANGED;
+	vif_cfg.aid = aid;
+	vif_cfg.if_index = index;
+	memcpy(vif_cfg.vif_addr, vif_addr, 6);
+
+	return uccp310wlan_send_cmd((unsigned char *)&vif_cfg,
+			sizeof(struct umac_cmd_vif_cfg));
+}
+
+int uccp310wlan_prog_vif_assoc_cap(int index,
+		unsigned char *vif_addr,
+		unsigned int caps)
+{
+	struct umac_cmd_vif_cfg   vif_cfg;
+
+	vif_cfg.hdr.id = CMD_VIF_CFG;
+	vif_cfg.hdr.flags = 0;
+
+	vif_cfg.changed_bitmap = CAPABILITY_CHANGED;
+	vif_cfg.capability = caps;
+	vif_cfg.if_index = index;
+	memcpy(vif_cfg.vif_addr, vif_addr, 6);
+
+	return uccp310wlan_send_cmd((unsigned char *)&vif_cfg,
+			sizeof(struct umac_cmd_vif_cfg));
+}
+int uccp310wlan_prog_vif_apsd_type(int index,
+		unsigned char *vif_addr,
+		unsigned int uapsd_type)
+{
+	struct umac_cmd_vif_cfg   vif_cfg;
+
+	vif_cfg.hdr.id = CMD_VIF_CFG;
+	vif_cfg.hdr.flags = 0;
+
+	vif_cfg.changed_bitmap = UAPSDTYPE_CHANGED;
+	vif_cfg.uapsd_type = uapsd_type;
+	vif_cfg.if_index = index;
+	memcpy(vif_cfg.vif_addr, vif_addr, 6);
+
+	return uccp310wlan_send_cmd((unsigned char *)&vif_cfg,
+			sizeof(struct umac_cmd_vif_cfg));
+}
+int uccp310wlan_prog_vif_long_retry(int index,
+		unsigned char *vif_addr,
+		unsigned int long_retry)
+{
+	struct umac_cmd_vif_cfg   vif_cfg;
+
+	vif_cfg.hdr.id = CMD_VIF_CFG;
+	vif_cfg.hdr.flags = 0;
+
+	vif_cfg.changed_bitmap = LONGRETRY_CHANGED;
+	vif_cfg.long_retry = long_retry;
+	vif_cfg.if_index = index;
+	memcpy(vif_cfg.vif_addr, vif_addr, 6);
+
+	return uccp310wlan_send_cmd((unsigned char *)&vif_cfg,
+			sizeof(struct umac_cmd_vif_cfg));
+}
+int uccp310wlan_prog_vif_short_retry(int index,
+		unsigned char *vif_addr,
+		unsigned int short_retry)
+{
+	struct umac_cmd_vif_cfg   vif_cfg;
+
+	vif_cfg.hdr.id = CMD_VIF_CFG;
+	vif_cfg.hdr.flags = 0;
+
+	vif_cfg.changed_bitmap = SHORTRETRY_CHANGED;
+	vif_cfg.short_retry = short_retry;
+	vif_cfg.if_index = index;
+	memcpy(vif_cfg.vif_addr, vif_addr, 6);
+
+	return uccp310wlan_send_cmd((unsigned char *)&vif_cfg,
+			sizeof(struct umac_cmd_vif_cfg));
+}
+
+int uccp310wlan_prog_vif_bssid(int index,
+		unsigned char *vif_addr,
+		unsigned char *bssid)
+{
+	struct umac_cmd_vif_cfg   vif_cfg;
+
+	vif_cfg.hdr.id = CMD_VIF_CFG;
+	vif_cfg.hdr.flags = 0;
+
+	vif_cfg.changed_bitmap = BSSID_CHANGED;
+	memcpy(vif_cfg.bssid, bssid, 6);
+	memcpy(vif_cfg.vif_addr, vif_addr, 6);
+	vif_cfg.if_index = index;
+	return uccp310wlan_send_cmd((unsigned char *)&vif_cfg,
+			sizeof(struct umac_cmd_vif_cfg));
+}
+
+int uccp310wlan_prog_powersave_state(int index,
+		unsigned char *vif_addr,
+		unsigned int powersave_state)
+{
+	struct umac_cmd_ps_cfg   ps_cfg;
+
+	ps_cfg.hdr.id = CMD_PS_CFG;
+	ps_cfg.hdr.flags = 0;
+
+	ps_cfg.powersave_state = powersave_state;
+	ps_cfg.if_index = index;
+	memcpy(ps_cfg.vif_addr, vif_addr, 6);
+
+	return uccp310wlan_send_cmd((unsigned char *)&ps_cfg,
+			sizeof(struct umac_cmd_ps_cfg));
+}
+
+int uccp310wlan_prog_global_cfg(unsigned int rx_msdu_lifetime,
+		unsigned int tx_msdu_lifetime,
+		unsigned int sensitivity,
+		unsigned int dyn_ed_enable,
+		unsigned char *rf_params)
+{
+	struct umac_cmd_global_cfg  gbl_config;
+
+	gbl_config.hdr.id = CMD_GLOBAL_CFG;
+	gbl_config.hdr.flags = 0;
+
+	gbl_config.rx_msdu_lifetime = rx_msdu_lifetime;
+	gbl_config.tx_msdu_lifetime = tx_msdu_lifetime;
+	gbl_config.ed_sensitivity = sensitivity;
+	gbl_config.dynamic_ed_enable = dyn_ed_enable;
+	memcpy(gbl_config.rf_params, rf_params, 8);
+
+	return uccp310wlan_send_cmd((unsigned char *) &gbl_config,
+			sizeof(struct umac_cmd_global_cfg));
+}
+
+int uccp310wlan_prog_txpower(unsigned int txpower)
+{
+	struct umac_cmd_txpower  power;
+
+	power.hdr.id = CMD_TX_POWER;
+	power.hdr.flags = 0;
+
+	power.txpower = txpower;
+
+	return uccp310wlan_send_cmd((unsigned char *) &power,
+			sizeof(struct umac_cmd_txpower));
+}
+
+int uccp310wlan_prog_mcast_addr_cfg(unsigned char  *mcast_addr,
+		unsigned int   op)
+{
+	struct umac_cmd_mcst_filter_cfg    mcast_config;
+
+	mcast_config.hdr.id = CMD_MCST_FILTER_CFG;
+	mcast_config.hdr.flags = 0;
+	mcast_config.mcst_ctrl = op;
+	memcpy(mcast_config.addr, mcast_addr, 6);
+
+	return uccp310wlan_send_cmd((unsigned char *) &mcast_config,
+			sizeof(struct umac_cmd_mcst_filter_cfg));
+}
+
+int uccp310wlan_prog_mcast_filter_control(unsigned int mcast_filter_enable)
+{
+	struct umac_cmd_mcst_filter_ctrl  mcast_ctrl;
+
+	mcast_ctrl.hdr.id = CMD_MCST_FILTER_CTRL;
+	mcast_ctrl.hdr.flags = 0;
+	mcast_ctrl.ctrl = mcast_filter_enable;
+
+	return uccp310wlan_send_cmd((unsigned char *) &mcast_ctrl,
+			sizeof(struct umac_cmd_mcst_filter_ctrl));
+}
+
+int uccp310wlan_prog_rcv_bcn_mode(unsigned int  bcn_rcv_mode)
+{
+	struct umac_cmd_rcv_bcn_mode  rcv_bcn_mode;
+
+	rcv_bcn_mode.hdr.id = CMD_RCV_BCN_MODE;
+	rcv_bcn_mode.hdr.flags = 0;
+	rcv_bcn_mode.mode = bcn_rcv_mode;
+
+	return uccp310wlan_send_cmd((unsigned char *) &rcv_bcn_mode,
+			sizeof(struct umac_cmd_rcv_bcn_mode));
+}
+
+int uccp310wlan_prog_txq_params(int index,
+		unsigned char *addr,
+		unsigned int queue,
+		unsigned int aifs,
+		unsigned int txop,
+		unsigned int cwmin,
+		unsigned int cwmax)
+{
+	struct umac_cmd_txq_params    params;
+
+	params.hdr.id = CMD_TXQ_PARAMS;
+	params.hdr.flags = 0;
+
+	params.if_index = index;
+	memcpy(params.vif_addr, addr, ETH_ALEN);
+	params.queue_num = queue;
+	params.aifsn = aifs;
+	params.txop = txop;
+	params.cwmin = cwmin;
+	params.cwmax = cwmax;
+
+	return uccp310wlan_send_cmd((unsigned char *) &params,
+			sizeof(struct umac_cmd_txq_params));
+}
+
+int uccp310wlan_prog_channel(unsigned int ch)
+{
+	struct umac_cmd_channel    channel;
+
+	channel.hdr.id = CMD_CHANNEL;
+	channel.hdr.flags = 0;
+	channel.channel = ch;
+
+	return uccp310wlan_send_cmd((unsigned char *) &channel,
+			sizeof(struct umac_cmd_channel));
+}
+
+int uccp310wlan_prog_peer_key(int              vif_index,
+		unsigned char    *vif_addr,
+		unsigned int     op,
+		unsigned int     key_id,
+		unsigned int     key_type,
+		unsigned int     cipher_type,
+		struct umac_key  *key)
+{
+	struct umac_cmd_peer_key_cfg  peer_key;
+
+	peer_key.hdr.id = CMD_PEER_KEY_CFG;
+	peer_key.hdr.flags = 0;
+
+	peer_key.if_index = vif_index;
+	memcpy(peer_key.vif_addr, vif_addr, ETH_ALEN);
+	peer_key.op = op;
+	peer_key.key_id = key_id;
+	memcpy(peer_key.peer_mac, key->peer_mac, ETH_ALEN);
+
+	peer_key.key_type = key_type;
+	peer_key.cipher_type = cipher_type;
+	memcpy(peer_key.key, key->key, MAX_KEY_LEN);
+	if (key->tx_mic)
+		memcpy(peer_key.tx_mic, key->tx_mic, MICHAEL_LEN);
+	if (key->rx_mic)
+		memcpy(peer_key.rx_mic, key->rx_mic, MICHAEL_LEN);
+	return uccp310wlan_send_cmd((unsigned char *) &peer_key,
+			sizeof(struct umac_cmd_peer_key_cfg));
+}
+
+int uccp310wlan_prog_if_key(int             vif_index,
+		unsigned char   *vif_addr,
+		unsigned int    op,
+		unsigned int    key_id,
+		unsigned int    cipher_type,
+		struct umac_key     *key)
+{
+	struct umac_cmd_if_key_cfg  if_key;
+
+	if_key.hdr.id = CMD_IF_KEY_CFG;
+	if_key.hdr.flags = 0;
+
+	if_key.if_index = vif_index;
+	memcpy(if_key.vif_addr, vif_addr, 6);
+	if_key.key_id = key_id;
+	if_key.op = op;
+
+	if (op == KEY_CTRL_ADD) {
+		if_key.cipher_type = cipher_type;
+		if (cipher_type == CIPHER_TYPE_TKIP ||	cipher_type == CIPHER_TYPE_CCMP) {
+			memcpy(if_key.key.rsn_grp_key.key, key->key, MAX_KEY_LEN);
+			if (key->tx_mic)
+				memcpy(if_key.key.rsn_grp_key.mic_key, key->tx_mic, MICHAEL_LEN);
+		} else {
+			if_key.key.wep_key.key_len =
+				(cipher_type == CIPHER_TYPE_WEP40) ? 5 : 13;
+			memcpy(if_key.key.wep_key.wep_key, key->key,
+					if_key.key.wep_key.key_len);
+		}
+	}
+	return uccp310wlan_send_cmd((unsigned char *) &if_key,
+			sizeof(struct umac_cmd_if_key_cfg));
+}
+
+int uccp310wlan_prog_tx(struct sk_buff  *skb)
+{
+	struct umac_cmd_tx    *tx_cmd;
+	struct lmac_if_data *p;
+	tx_cmd = (struct umac_cmd_tx *)skb->data;
+	tx_cmd->hdr.id = CMD_TX;
+	tx_cmd->hdr.flags = ((sizeof(struct umac_cmd_tx) - sizeof(struct umac_lmac_msg_hdr)) |	((skb->len - sizeof(struct umac_cmd_tx)) << 16));
+
+	rcu_read_lock();
+
+	p = (struct lmac_if_data *)(rcu_dereference(lmac_if));
+
+	if (!p) {
+		WARN_ON(1);
+		rcu_read_unlock();
+		return -1;
+	}
+
+	hal_ops.send((void *)skb, LMAC_MOD_ID, UMAC_MOD_ID);
+
+	rcu_read_unlock();
+	return 0;
+}
+
+int uccp310wlan_prog_mib_stats(void)
+{
+	struct umac_cmd_mib_stats    mib_stats;
+
+	mib_stats.hdr.id = CMD_MIB_STATS;
+	mib_stats.hdr.flags = 0;
+
+	return uccp310wlan_send_cmd((unsigned char *) &mib_stats,
+			sizeof(struct umac_cmd_mib_stats));
+}
+
+int uccp310wlan_prog_phy_stats(void)
+{
+	struct umac_cmd_phy_stats  phy_stats;
+
+	phy_stats.hdr.id = CMD_PHY_STATS;
+	phy_stats.hdr.flags = 0;
+
+	return uccp310wlan_send_cmd((unsigned char *) &phy_stats,
+			sizeof(struct umac_cmd_phy_stats));
+}
+
+static int uccp310wlan_msg_handler (void *nbuff,
+		unsigned char sender_id)
+{
+	unsigned int              event;
+	unsigned char             *buff;
+	struct umac_lmac_msg_hdr  *hdr;
+	struct lmac_if_data       *p;
+	struct sk_buff            *skb = (struct sk_buff *)nbuff;
+
+	rcu_read_lock();
+
+	p = (struct lmac_if_data *)(rcu_dereference(lmac_if));
+
+	if (!p) {
+		WARN_ON(1);
+		dev_kfree_skb_any(skb);
+		rcu_read_unlock();
+		return 0;
+	}
+
+	buff = skb->data;
+	hdr = (struct umac_lmac_msg_hdr *)buff;
+
+	hdr->id &= 0x0000ffff;
+	hdr->flags &= 0x0000ffff;
+
+	event = hdr->id;
+
+	/* LMACIF_DEBUG("%s-LMACIF: event %d received\n", p->name, event); */
+	if (event == EVENT_RESET_COMPLETE) {
+		struct umac_event_reset_complete *r = (struct umac_event_reset_complete *)buff;
+		uccp310wlan_reset_complete(r->version, p->context);
+
+	} else if (event == EVENT_RX /* || event == EVENT_RX_MIC_FAILURE*/) {
+		uccp310wlan_rx_frame(nbuff, p->context);
+
+	} else if (event == EVENT_TX_DONE) {
+		uccp310wlan_tx_complete((struct umac_event_tx_done *)buff,	p->context);
+
+	} else if (event == EVENT_MIB_STAT) {
+		struct umac_event_mib_stats  *mib_stats = (struct umac_event_mib_stats *) buff;
+		uccp310wlan_mib_stats(mib_stats, p->context);
+
+	} else if (event == EVENT_NOA) {
+		uccp310wlan_noa_event(event, (void *)buff, p->context, NULL);
+
+	} else {
+		/*
+		 * TODO:: handle remaining events
+		 */
+	}
+
+	if (event != EVENT_RX)
+		dev_kfree_skb_any(skb);
+
+	rcu_read_unlock();
+	return 0;
+}
+
+int uccp310wlan_lmac_if_init(void *context, const char *name)
+{
+	struct lmac_if_data *p;
+	LMACIF_DEBUG("%s-LMACIF: lmac_if init called\n", name);
+	p = kzalloc(sizeof(struct lmac_if_data), GFP_KERNEL);
+
+	if (!p)
+		return -ENOMEM;
+
+	p->name = (char *)name;
+	p->context = context;
+	hal_ops.register_callback(uccp310wlan_msg_handler, UMAC_MOD_ID);
+	rcu_assign_pointer(lmac_if, p);
+	return 0;
+}
+
+void uccp310wlan_lmac_if_deinit(void)
+{
+	struct lmac_if_data *p;
+	LMACIF_DEBUG("%s-LMACIF: Deinit called\n", lmac_if->name);
+	p = rcu_dereference(lmac_if);
+	rcu_assign_pointer(lmac_if, NULL);
+	synchronize_rcu();
+	kfree(p);
+}
diff --git a/drivers/net/wireless/uccp310wlan/lmac_if.h b/drivers/net/wireless/uccp310wlan/lmac_if.h
new file mode 100644
index 0000000..f07a2de
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/lmac_if.h
@@ -0,0 +1,135 @@
+/*HEADER**********************************************************************
+******************************************************************************
+***
+***
+*** Copyright (c) 2011, 2012, Imagination Technologies Ltd.
+***
+*** This program is free software; you can redistribute it and/or
+*** modify it under the terms of the GNU General Public License
+*** as published by the Free Software Foundation; either version 2
+*** of the License, or (at your option) any later version.
+***
+*** This program is distributed in the hope that it will be useful,
+*** but WITHOUT ANY WARRANTY; without even the implied warranty of
+*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*** GNU General Public License for more details.
+***
+*** You should have received a copy of the GNU General Public License
+*** along with this program; if not, write to the Free Software
+*** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+*** USA.
+***
+*** File Name  : lmac_if.h
+***
+*** File Description:
+*** This file contains the helper functions exported by LMAC interface module for
+*** sending commands and receiving events from the LMAC
+*****************************************************************************
+*END**************************************************************************/
+
+#ifndef _UCCP310WLAN_LMAC_IF_H_
+#define _UCCP310WLAN_LMAC_IF_H_
+#include <linux/skbuff.h>
+
+#include "hal.h"
+#include "umac_lmac_if.h"
+struct umac_key {
+	unsigned char     *peer_mac;
+	unsigned char     *tx_mic;
+	unsigned char     *rx_mic;
+	unsigned char     *key;
+};
+
+/* Commands */
+
+extern int uccp310wlan_prog_reset(unsigned int reset_type);
+extern int uccp310wlan_prog_vif_ctrl(int index,
+	unsigned char *vif_addr,
+	unsigned int  vif_type,
+	unsigned int  add_vif);
+extern int uccp310wlan_prog_vif_basic_rates(int index,
+	unsigned char *vif_addr,
+	unsigned int basic_rate_set);
+extern int uccp310wlan_prog_vif_short_slot(int index,
+	unsigned char *vif_addr,
+	unsigned int use_short_slot);
+extern int uccp310wlan_prog_vif_powersave_mode(int index,
+	unsigned char *vif_addr,
+	unsigned int powersave_mode);
+extern int uccp310wlan_prog_vif_atim_window(int index,
+	unsigned char *vif_addr,
+	unsigned int atim_window);
+extern int uccp310wlan_prog_vif_aid(int index,
+	unsigned char *vif_addr,
+	unsigned int aid);
+extern int uccp310wlan_prog_vif_assoc_cap(int index,
+	unsigned char *vif_addr,
+	unsigned int caps);
+extern int uccp310wlan_prog_vif_apsd_type(int index,
+	unsigned char *vif_addr,
+	unsigned int uapsd_type);
+extern int uccp310wlan_prog_vif_long_retry(int index,
+	unsigned char *vif_addr,
+	unsigned int long_retry);
+extern int uccp310wlan_prog_vif_short_retry(int index,
+	unsigned char *vif_addr,
+	unsigned int short_retry);
+extern int uccp310wlan_prog_vif_bssid(int index,
+	unsigned char *vif_addr,
+	unsigned char *bssid);
+extern int uccp310wlan_prog_powersave_state(int index,
+	unsigned char *vif_addr,
+	unsigned int powersave_state);
+
+extern int uccp310wlan_prog_global_cfg(unsigned int rx_msdu_lifetime,
+	unsigned int tx_msdu_lifetime,
+	unsigned int sensitivity,
+	unsigned int dyn_ed_enabled,
+	unsigned char *rf_params);
+extern int uccp310wlan_prog_txpower(unsigned int txpower);
+extern int uccp310wlan_prog_mcast_addr_cfg(unsigned char  *mcast_addr,
+	unsigned int   add_filter);
+extern int uccp310wlan_prog_mcast_filter_control(unsigned int enable_mcast_filtering);
+extern int uccp310wlan_prog_rcv_bcn_mode(unsigned int  bcn_rcv_mode);
+extern int uccp310wlan_prog_txq_params(int index,
+	unsigned char *vif_addr,
+	unsigned int queue,
+	unsigned int aifs,
+	unsigned int txop,
+	unsigned int cwmin,
+	unsigned int cwmax);
+extern int uccp310wlan_prog_channel(unsigned int ch);
+extern int uccp310wlan_prog_peer_key(int              index,
+	unsigned char   *vif_addr,
+	unsigned int     op,
+	unsigned int     key_id,
+	unsigned int     key_type,
+	unsigned int     cipher_type,
+	struct umac_key  *key);
+extern int uccp310wlan_prog_if_key(int   index,
+	unsigned char   *vif_addr,
+	unsigned int    op,
+	unsigned int    key_id,
+	unsigned int    cipher_type,
+	struct umac_key *key);
+
+extern int uccp310wlan_prog_mib_stats(void);
+extern int uccp310wlan_prog_phy_stats(void);
+
+extern int uccp310wlan_prog_tx(struct sk_buff *skb);
+
+/* Events  */
+extern void uccp310wlan_reset_complete(char *lmac_version, void *context);
+extern void uccp310wlan_tx_complete(struct umac_event_tx_done *txdone, void *context);
+extern void uccp310wlan_rx_frame(struct sk_buff *skb, void *context);
+extern void uccp310wlan_mib_stats(struct umac_event_mib_stats *mib_stats, void *context);
+extern void uccp310wlan_noa_event(int event, void *msg, void *context, struct sk_buff *skb);
+
+/* Init/Deinit */
+
+extern int uccp310wlan_lmac_if_init(void *context, const char *name);
+extern void uccp310wlan_lmac_if_deinit(void);
+#endif /* _UCCP310WLAN_LMAC_IF_H_ */
+
+/* EOF */
+
diff --git a/drivers/net/wireless/uccp310wlan/tx.c b/drivers/net/wireless/uccp310wlan/tx.c
new file mode 100644
index 0000000..1d43fa3
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/tx.c
@@ -0,0 +1,437 @@
+/*HEADER**********************************************************************
+******************************************************************************
+***
+*** Copyright (c) 2011, 2012, Imagination Technologies Ltd.
+***
+*** This program is free software; you can redistribute it and/or
+*** modify it under the terms of the GNU General Public License
+*** as published by the Free Software Foundation; either version 2
+*** of the License, or (at your option) any later version.
+***
+*** This program is distributed in the hope that it will be useful,
+*** but WITHOUT ANY WARRANTY; without even the implied warranty of
+*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*** GNU General Public License for more details.
+***
+*** You should have received a copy of the GNU General Public License
+*** along with this program; if not, write to the Free Software
+*** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+*** USA.
+***
+*** File Name  : tx.c
+***
+*** File Description:
+*** This file contains the source functions UMAC TX logic
+***
+******************************************************************************
+*END**************************************************************************/
+#include "umac.h"
+#ifdef CONFIG_TX_DEBUG
+#define UMACTX_DEBUG(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+#else
+#define UMACTX_DEBUG(...) do { } while (0)
+#endif
+
+#define UMACTX_TO_MACDEV(x) ((struct mac80211_dev *)(container_of(x, struct mac80211_dev, tx)))
+
+static void wait_for_tx_complete(struct tx_config *tx)
+{
+	int count;
+
+	count = 0;
+
+	while (tx->tx_buff_pool_bmp) {
+		count++;
+
+		if (count < TX_COMPLETE_TIMEOUT_TICKS) {
+			current->state = TASK_INTERRUPTIBLE;
+			schedule_timeout(1);
+		} else {
+			printk(KERN_DEBUG "%s-UMACTX: WARNING: TX complete didn't succeed after %ld timer "
+					"ticks, 0x%08x\n", UMACTX_TO_MACDEV(tx)->name, TX_COMPLETE_TIMEOUT_TICKS, (unsigned int) tx->tx_buff_pool_bmp);
+
+			break;
+		}
+	}
+
+	if (count && (count < TX_COMPLETE_TIMEOUT_TICKS))
+		UMACTX_DEBUG("TX complete after %d timer ticks\n", count);
+}
+
+static inline int tx_queue_map(int queue)
+{
+	unsigned int ac[4] = {WLAN_AC_VO, WLAN_AC_VI, WLAN_AC_BE, WLAN_AC_BK};
+
+	if (queue < 4)
+		return ac[queue];
+
+	return WLAN_AC_VO;
+}
+
+static inline int tx_queue_unmap(int queue)
+{
+	unsigned int ac[4] = {3, 2, 1, 0};
+
+	return ac[queue];
+}
+
+static void get_rate(struct sk_buff *skb, struct umac_cmd_tx *txcmd, struct mac80211_dev *dev)
+{
+	struct ieee80211_rate *rate;
+	struct ieee80211_tx_info *c;
+	unsigned int index;
+
+	rate = ieee80211_get_tx_rate(dev->hw, IEEE80211_SKB_CB(skb));
+	if (rate == NULL) {
+		rate = &dev->hw->wiphy->bands[dev->hw->conf.chandef.chan->band]->bitrates[0];
+		txcmd->num_rates = 1;
+		txcmd->rate[0] = rate->hw_value;
+		txcmd->rate_retries[0] = 5;
+		txcmd->rate_protection_type[0] = USE_PROTECTION_NONE;
+		txcmd->rate_preamble_type[0] = DONT_USE_SHORT_PREAMBLE;
+	} else {
+		c = IEEE80211_SKB_CB(skb);
+		txcmd->num_rates = 0;
+		for (index = 0; index < 4; index++) {
+			if (c->control.rates[index].idx >= 0) {
+				rate = &dev->hw->wiphy->bands[c->band]->bitrates[c->control.rates[index].idx];
+				txcmd->rate[index] = rate->hw_value;
+				txcmd->rate_retries[index] = c->control.rates[index].count;
+				if (c->control.rates[index].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+					txcmd->rate_preamble_type[index] = USE_SHORT_PREAMBLE;
+				else
+					txcmd->rate_preamble_type[index] = DONT_USE_SHORT_PREAMBLE;
+
+				if (c->control.rates[index].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
+					txcmd->rate_protection_type[index] = USE_PROTECTION_CTS2SELF;
+				else if (c->control.rates[index].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+					txcmd->rate_protection_type[index] = USE_PROTECTION_RTS;
+				else
+					txcmd->rate_protection_type[index] = USE_PROTECTION_NONE;
+				txcmd->num_rates++;
+			}
+		}
+	}
+	return;
+}
+
+static void tx_status(struct sk_buff *skb, struct umac_event_tx_done *tx_done, struct mac80211_dev *dev)
+{
+	int index;
+	struct ieee80211_tx_info *tx_info;
+
+	tx_info = IEEE80211_SKB_CB(skb);
+
+	if (tx_done->frm_status == UMAC_EVENT_TX_DONE_SUCCESS)
+		tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+	for (index = 0; index < 4; index++) {
+		if ((dev->hw->wiphy->bands[tx_info->band]->bitrates[tx_info->status.rates[index].idx]).hw_value == (tx_done->rate)) {
+			tx_info->status.rates[index].count = (tx_done->retries_num + 1);
+			break;
+		}
+	}
+	while (((index + 1) < 4) && (tx_info->status.rates[index + 1].idx >= 0)) {
+		tx_info->status.rates[index + 1].idx = -1;
+		tx_info->status.rates[index + 1].count = 0;
+		index++;
+	}
+	ieee80211_tx_status(dev->hw, skb);
+
+}
+
+static int uccp310wlan_tx_alloc_buff_req(struct mac80211_dev *dev, int queue, unsigned int *id, struct sk_buff *skb)
+{
+	int cnt = 0;
+	struct tx_config *tx = &dev->tx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tx->lock, flags);
+
+	UMACTX_DEBUG("%s: Alloc buf Req q = %d, bmap = 0x%08lx\n", dev->name, queue, tx->tx_buff_pool_bmp);
+
+	*id = MAX_BUFF_POOL_ELEMENTS;
+	/*
+	 * first 4 tx tokens will be reserved for 4 AC's
+	 * 5th and 6th tokens are reserved for beacon or broadcast or ATIMframes..
+	 */
+	if (queue != WLAN_AC_BCN) {
+		if (!__test_and_set_bit(queue, &tx->tx_buff_pool_bmp))
+			*id = queue;
+		else {
+			/*
+			 * while searching the buff pool bmp, it should be noted that 2 buffers
+			 * are reserved for beacon. so pool bmp should be searched from beacon queue+2
+			 * instead of WLAN_AC_MAX_CNT
+			 */
+			for (cnt = WLAN_AC_BCN+2; cnt < MAX_BUFF_POOL_ELEMENTS; cnt++) {
+				if (!__test_and_set_bit(cnt, &tx->tx_buff_pool_bmp)) {
+					*id = cnt;
+					break;
+				}
+
+			}
+
+		}
+
+	} else {/* (queue == WLAN_AC_BCN) */
+		if (!__test_and_set_bit(WLAN_AC_BCN, &tx->tx_buff_pool_bmp))
+			*id = WLAN_AC_BCN;
+
+		else if (!__test_and_set_bit(WLAN_AC_BCN+1, &tx->tx_buff_pool_bmp))
+			*id = WLAN_AC_BCN+1;
+
+	}
+
+	if (*id == MAX_BUFF_POOL_ELEMENTS) {
+		skb_queue_tail(&tx->pending_pkt[queue], skb);
+		if ((queue != WLAN_AC_BCN) && (tx->pending_pkt[queue].qlen > MAX_TX_QUEUE_LEN)) {
+			ieee80211_stop_queue(dev->hw, skb->queue_mapping);
+			tx->queue_stopped_bmp |= (1 << queue);
+		}
+	} else {
+		tx->tx_pkt[*id] = skb;
+	}
+
+	UMACTX_DEBUG("%s: Alloc buf Result *id = %d, bmap = 0x%08lx\n", dev->name, *id, tx->tx_buff_pool_bmp);
+	spin_unlock_irqrestore(&tx->lock, flags);
+
+	return 0;
+}
+
+static struct sk_buff *uccp310wlan_tx_free_buff_req(struct mac80211_dev  *dev, unsigned int buff_pool_id, unsigned int *queue, struct sk_buff **skb)
+{
+	int i;
+	unsigned long flags;
+	struct tx_config *tx = &dev->tx;
+	struct sk_buff *pending = NULL;
+
+	spin_lock_irqsave(&tx->lock, flags);
+
+	for (i = 0; i < WLAN_AC_MAX_CNT; i++)
+		if (skb_peek(&tx->pending_pkt[i]))
+			break;
+	if (i == WLAN_AC_MAX_CNT) {
+		/* No pending packets */
+		__clear_bit(buff_pool_id, &tx->tx_buff_pool_bmp);
+	} else if (buff_pool_id <= WLAN_AC_MAX_CNT) { /* Reserved token */
+		if (buff_pool_id >= WLAN_AC_BCN)
+			*queue =  WLAN_AC_BCN;
+		else
+			*queue = buff_pool_id;
+
+		pending = skb_dequeue(&tx->pending_pkt[*queue]);
+		if (!pending)
+			__clear_bit(buff_pool_id, &tx->tx_buff_pool_bmp);
+	} else if (buff_pool_id > WLAN_AC_MAX_CNT) { /* Spare token */
+		unsigned int next_spare_token_ac = tx->next_spare_token_ac + 1;
+		if (next_spare_token_ac == WLAN_AC_MAX_CNT)
+			tx->next_spare_token_ac = 0;
+		else
+			tx->next_spare_token_ac = next_spare_token_ac;
+		while (!skb_peek(&tx->pending_pkt[tx->next_spare_token_ac])) {
+			next_spare_token_ac = tx->next_spare_token_ac + 1;
+			if (next_spare_token_ac == WLAN_AC_MAX_CNT)
+				tx->next_spare_token_ac = 0;
+			else
+				tx->next_spare_token_ac = next_spare_token_ac;
+		}
+		pending = skb_dequeue(&tx->pending_pkt[tx->next_spare_token_ac]);
+		*queue = tx->next_spare_token_ac;
+	}
+
+	*skb = tx->tx_pkt[buff_pool_id];
+	tx->tx_pkt[buff_pool_id] = pending;
+
+
+	if (pending && (tx->queue_stopped_bmp & (1 << *queue)) &&
+			tx->pending_pkt[*queue].qlen < (MAX_TX_QUEUE_LEN / 2)) {
+		ieee80211_wake_queue(dev->hw, tx_queue_unmap(*queue));
+		tx->queue_stopped_bmp &= ~(1 << (*queue));
+	}
+	spin_unlock_irqrestore(&tx->lock, flags);
+
+	return pending;
+}
+
+
+void uccp310wlan_tx_init(struct mac80211_dev *dev)
+{
+	int cnt = 0;
+	struct tx_config *tx = &dev->tx;
+
+	tx->tx_buff_pool_bmp = 0;
+	tx->queue_stopped_bmp = 0;
+	tx->next_spare_token_ac = WLAN_AC_BE;
+
+	for (cnt = 0; cnt < WLAN_AC_MAX_CNT; cnt++)
+		skb_queue_head_init(&tx->pending_pkt[cnt]);
+
+	for (cnt = 0; cnt < MAX_BUFF_POOL_ELEMENTS; cnt++)
+		tx->tx_pkt[cnt] = NULL;
+
+	spin_lock_init(&tx->lock);
+	ieee80211_wake_queues(dev->hw);
+
+	UMACTX_DEBUG("%s-UMACTX: initialization successful\n", UMACTX_TO_MACDEV(tx)->name);
+}
+
+void uccp310wlan_tx_deinit(struct mac80211_dev *dev)
+{
+	int  cnt = 0;
+	struct sk_buff    *skb;
+	struct tx_config  *tx = &dev->tx;
+
+	ieee80211_stop_queues(dev->hw);
+
+	wait_for_tx_complete(tx);
+
+	for (cnt = 0; cnt < MAX_BUFF_POOL_ELEMENTS; cnt++) {
+		if (tx->tx_pkt[cnt]) {
+			dev_kfree_skb_any(tx->tx_pkt[cnt]);
+			tx->tx_pkt[cnt] = NULL;
+		}
+	}
+
+	for (cnt = 0; cnt < WLAN_AC_MAX_CNT; cnt++) {
+		skb = skb_dequeue(&tx->pending_pkt[cnt]);
+		while (skb) {
+			dev_kfree_skb_any(skb);
+			skb = skb_dequeue(&tx->pending_pkt[cnt]);
+		}
+
+	}
+
+	UMACTX_DEBUG("%s-UMACTX: deinitialization successful\n", UMACTX_TO_MACDEV(tx)->name);
+}
+
+
+static int __uccp310wlan_tx_frame(struct sk_buff  *skb,
+		unsigned int       queue,
+		unsigned int       buff_pool_id,
+		unsigned int       more_frames,
+		struct mac80211_dev     *dev)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct sk_buff *nbuff;
+	struct umac_cmd_tx tx_cmd;
+	unsigned char *data;
+	int vif_index;
+
+	vif_index = vif_addr_to_index(hdr->addr2, dev);
+	if (vif_index > -1) {
+		tx_cmd.if_index = vif_index;
+		memcpy(tx_cmd.vif_addr, hdr->addr2, ETH_ALEN);
+	} else {
+		tx_cmd.if_index = 0;
+		memcpy(tx_cmd.vif_addr, dev->if_mac_addresses[0].addr, ETH_ALEN);
+	}
+	tx_cmd.queue_num = queue;
+	tx_cmd.buff_pool_id = buff_pool_id;
+	tx_cmd.more_frms = more_frames;
+	tx_cmd.tx_power = dev->txpower;
+
+	/* Get the rate at which packet has to be transmitted */
+	get_rate(skb, &tx_cmd, dev);
+
+	tx_cmd.num_of_frags = 1;
+	tx_cmd.last_frag_len = skb->len;
+	tx_cmd.frag_len = skb->len;
+
+	tx_cmd.force_encrypt = 0;
+
+	nbuff = alloc_skb(128 + skb->len, GFP_ATOMIC); /* 128 bytes offset from start of cmd_tx to payload. sizeof(cmd_tx) < 128 */
+	if (!nbuff) {
+		printk(KERN_DEBUG "%s-UMACTX: Unable to allocate skb for TX packet\n", dev->name);
+		return -1;
+	}
+	data = skb_put(nbuff, 128);
+	memcpy(data, &tx_cmd, sizeof(struct umac_cmd_tx));
+	data = skb_put(nbuff, skb->len);
+	memcpy(data, skb->data, skb->len);
+
+	UMACTX_DEBUG("%s-UMACTX: TX Frame, Queue = %d, pool_id = %d, len = %d\n", dev->name, tx_cmd.queue_num, tx_cmd.buff_pool_id, nbuff->len);
+	UMACTX_DEBUG("%s-UMACTX: Num rates = %d, %d, %d, %d, %d\n", dev->name, tx_cmd.num_rates, tx_cmd.rate[0], tx_cmd.rate[1], tx_cmd.rate[2], tx_cmd.rate[3]);
+#ifdef CONFIG_TX_DEBUG
+	/* print_hex_dump(KERN_DEBUG, " ",DUMP_PREFIX_NONE,16,1,nbuff->data,nbuff->len,1); */
+#endif
+	return uccp310wlan_prog_tx(nbuff);
+}
+
+int uccp310wlan_tx_frame(struct sk_buff *skb,
+		struct mac80211_dev *dev,
+		bool bcast)
+{
+	unsigned int queue, buff_pool_id, more_frames;
+
+	if (bcast == false) {
+		queue = tx_queue_map(skb->queue_mapping);
+		more_frames = 0;
+	} else {
+		queue = WLAN_AC_BCN;
+		more_frames = skb->priority; /* Hack: skb->priority is used to indicate more frames */
+	}
+
+	uccp310wlan_tx_alloc_buff_req(dev, queue, &buff_pool_id, skb);
+
+	if (buff_pool_id == MAX_BUFF_POOL_ELEMENTS)
+		return NETDEV_TX_OK;
+
+
+	if (__uccp310wlan_tx_frame(skb, queue, buff_pool_id, more_frames, dev) < 0) {
+		struct umac_event_tx_done tx_done;
+		printk(KERN_DEBUG "%s-UMACTX: Unable to send frame, dropping ..\n", dev->name);
+
+		tx_done.buff_pool_id = buff_pool_id;
+		tx_done.frm_status = UMAC_EVENT_TX_DONE_ERROR_RETRY_LIMIT;
+		tx_done.rate = 0;
+		uccp310wlan_tx_complete(&tx_done, dev);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+void uccp310wlan_tx_complete(struct umac_event_tx_done *tx_done, void *context)
+{
+	struct mac80211_dev *dev = (struct mac80211_dev *)context;
+	struct sk_buff *skb, *pending;
+	unsigned int queue, more_frames;
+	int vif_index, vif_index_bitmap = 0;
+
+tx_complete:
+	queue = 0;
+	UMACTX_DEBUG("%s-UMACTX: TX Done, pool_id = %d, status = %d, rate = %d, retries = %d\n", dev->name, tx_done->buff_pool_id, tx_done->frm_status, tx_done->rate, tx_done->retries_num);
+
+
+	pending = uccp310wlan_tx_free_buff_req(dev, tx_done->buff_pool_id, &queue, &skb);
+
+	if (skb) {
+		if (!ieee80211_is_beacon(((struct ieee80211_hdr *)(skb->data))->frame_control)) {
+			vif_index = vif_addr_to_index(((struct ieee80211_hdr *)(skb->data))->addr2, dev);
+			if (vif_index > -1)
+				vif_index_bitmap |= (1 << vif_index);
+			tx_status(skb, tx_done, dev);
+		} else
+			dev_kfree_skb_any(skb);
+	}
+
+	if (pending) {
+		if ((queue == WLAN_AC_BCN) && (pending->priority == 1))
+			more_frames = 1;
+		else
+			more_frames = 0;
+
+		if (__uccp310wlan_tx_frame(pending, queue, tx_done->buff_pool_id, more_frames, dev) < 0) {
+			printk(KERN_DEBUG "%s-UMACTX: Unable to send pending frame, dropping ..\n", dev->name);
+
+			tx_done->frm_status = UMAC_EVENT_TX_DONE_ERROR_RETRY_LIMIT;
+			tx_done->rate = 0;
+			goto tx_complete;
+		}
+	}
+
+	for (vif_index = 0; vif_index < MAX_VIFS; vif_index++)
+		if (vif_index_bitmap & (1 << vif_index))
+			uccp310wlan_noa_event(EVENT_TX_DONE, (void *)vif_index, (void *)dev, NULL);
+
+}
diff --git a/drivers/net/wireless/uccp310wlan/umac.h b/drivers/net/wireless/uccp310wlan/umac.h
new file mode 100644
index 0000000..9d8ab25
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/umac.h
@@ -0,0 +1,198 @@
+/*HEADER**********************************************************************
+******************************************************************************
+***
+***
+*** Copyright (c) 2011, 2012, Imagination Technologies Ltd.
+***
+*** This program is free software; you can redistribute it and/or
+*** modify it under the terms of the GNU General Public License
+*** as published by the Free Software Foundation; either version 2
+*** of the License, or (at your option) any later version.
+***
+*** This program is distributed in the hope that it will be useful,
+*** but WITHOUT ANY WARRANTY; without even the implied warranty of
+*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*** GNU General Public License for more details.
+***
+*** You should have received a copy of the GNU General Public License
+*** along with this program; if not, write to the Free Software
+*** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+*** USA.
+***
+*** File Name  : umac.h
+***
+*** File Description:
+*** This file contains the declarations of structures that will
+*** be used by core, tx and rx code
+***
+******************************************************************************
+*END**************************************************************************/
+
+#ifndef _UCCP310WLAN_UMAC_H_
+#define _UCCP310WLAN_UMAC_H_
+
+#include <linux/version.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/wireless.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <../net/mac80211/ieee80211_i.h>
+#include <net/mac80211.h>
+
+#include "lmac_if.h"
+
+#ifdef CONFIG_TIMING_DEBUG
+extern unsigned long irq_timestamp[20];
+extern unsigned int irq_ts_index;
+extern spinlock_t timing_lock;
+#endif
+
+#define RESET_TIMEOUT  1000   /* Specify delay in milli-seconds*/
+#define RESET_TIMEOUT_TICKS   msecs_to_jiffies(RESET_TIMEOUT)
+
+#define TX_COMPLETE_TIMEOUT  1000   /* Specify delay in milli-seconds*/
+#define TX_COMPLETE_TIMEOUT_TICKS   msecs_to_jiffies(TX_COMPLETE_TIMEOUT)
+
+#define   MAX_MCAST_FILTERS             10
+#define   MAX_VIFS			5
+#define   DEFAULT_TX_POWER              15
+#define   DEFAULT_MAC_ADDRESS           "001122334455"
+#define   SUPPORTED_FILTERS             (FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC)
+#define   MAX_BUFF_POOL_ELEMENTS        9 /* Must be alteast 6, one for each AC + two for BCN queue + spares*/
+#define	  MAX_TX_QUEUE_LEN		20
+
+struct wifi_params {
+	int            ed_sensitivity;
+	int            num_vifs;
+	unsigned char  auto_sensitivity;
+	unsigned char  rf_params[8];
+	unsigned char  show_phy_stats;
+	unsigned char  production_test;
+	unsigned int   dot11a_support;
+};
+
+struct wifi_stats {
+	unsigned int   rx_packet_count;
+	 int            current_sensitivity;
+	unsigned int   ofdm_rx_crc_success_cnt;
+	unsigned int   ofdm_rx_crc_fail_cnt;
+	unsigned int   ofdm_rx_false_trig_cnt;
+	unsigned int   ofdm_rx_header_fail_cnt;
+	unsigned int   dsss_rx_crc_success_cnt;
+	unsigned int   dsss_rx_crc_fail_cnt;
+	unsigned int   dsss_rx_false_trig_cnt;
+	unsigned int   dsss_rx_header_fail_cnt;
+	unsigned int   ed_cnt;
+	unsigned int   cca_fail_cnt;
+	unsigned int   pdout_val;
+	unsigned char  uccp310_lmac_version[8];
+};
+
+struct tx_config {
+	/*
+	 * used to protect the TX pool
+	 */
+	spinlock_t      lock;
+	/*
+	*used to store tx tokens(buff pool ids)
+	*/
+	unsigned long   tx_buff_pool_bmp;
+
+	unsigned int    next_spare_token_ac;
+
+	/*
+	 *  used to store the address of pending skbs per ac
+	 */
+	struct sk_buff_head  pending_pkt[WLAN_AC_MAX_CNT];
+
+	/*
+	 * used to store the address of tx'ed skb.. it will be used in tx
+	 * complete
+	 */
+	struct sk_buff  *tx_pkt[MAX_BUFF_POOL_ELEMENTS];
+
+	unsigned int queue_stopped_bmp;
+};
+
+enum DEVICE_STATE {
+	STOPPED = 0,
+	STARTED
+};
+
+struct mac80211_dev {
+	struct device       *dev;
+	struct mac_address  if_mac_addresses[MAX_VIFS];
+	unsigned int        active_vifs;
+	struct mutex        mutex;
+	int                 state;
+	int                 txpower;
+	unsigned char       mc_filters[MAX_MCAST_FILTERS][6];
+	int                 mc_filter_count;
+
+	struct tx_config     tx;
+
+	struct wifi_params  *params;
+	struct wifi_stats   *stats;
+	char                name[20];
+	char                reset_complete;
+	int                 power_save; /* Will be set only when a single VIF in STA mode is active */
+	struct ieee80211_vif __rcu *vifs[MAX_VIFS];
+	struct ieee80211_hw *hw;
+	spinlock_t           bcast_lock; /* Used to ensure more_frames bit is set properly when transmitting bcast frames in AP in IBSS modes */
+};
+
+
+struct edca_params {
+	unsigned short    txop; /* units of 32us */
+	unsigned short    cwmin;/* units of 2^n-1 */
+	unsigned short    cwmax;/* units of 2^n-1 */
+	unsigned char     aifs;
+};
+
+struct umac_vif {
+	struct timer_list           bcn_timer;
+	struct uvif_config {
+		unsigned int             atim_window;
+		unsigned int             aid;
+		unsigned int             bcn_lost_cnt;
+		struct edca_params       edca_params[WLAN_AC_MAX_CNT];
+	} config;
+
+	unsigned int                noa_active;
+	struct sk_buff_head         noa_que;
+	unsigned int                noa_tx_allowed;
+
+	int			vif_index;
+	struct ieee80211_vif        *vif;
+	struct mac80211_dev         *dev;
+	unsigned char			bssid[ETH_ALEN];
+};
+
+extern int  uccp310wlan_core_init(struct mac80211_dev *dev);
+extern void uccp310wlan_core_deinit(struct mac80211_dev *dev);
+extern void uccp310wlan_vif_add(struct umac_vif  *uvif);
+extern void uccp310wlan_vif_remove(struct umac_vif *uvif);
+extern void uccp310wlan_vif_set_edca_params(unsigned short queue, struct umac_vif *uvif, const struct  ieee80211_tx_queue_params *params, unsigned int vif_active);
+extern void uccp310wlan_vif_bss_info_changed(struct umac_vif *uvif, struct ieee80211_bss_conf *bss_conf, unsigned int changed);
+extern int  uccp310wlan_tx_frame(struct sk_buff *skb, struct mac80211_dev *dev, bool bcast);
+extern void uccp310wlan_tx_init(struct mac80211_dev *dev);
+extern void uccp310wlan_tx_deinit(struct mac80211_dev *dev);
+
+static inline int vif_addr_to_index(unsigned char *addr, struct mac80211_dev *dev)
+{
+	int i;
+	for (i = 0; i < MAX_VIFS; i++)
+		if (!compare_ether_addr(addr, dev->if_mac_addresses[i].addr))
+			break;
+	if ((i < MAX_VIFS) && (dev->active_vifs & (1 << i)))
+		return i;
+	else
+		return -1;
+}
+
+#endif /* _UCCP310WLAN_UMAC_H_ */
+
+/* EOF */
diff --git a/drivers/net/wireless/uccp310wlan/umac_lmac_if.h b/drivers/net/wireless/uccp310wlan/umac_lmac_if.h
new file mode 100644
index 0000000..7470fd7
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/umac_lmac_if.h
@@ -0,0 +1,630 @@
+/*HEADER**********************************************************************
+******************************************************************************
+***
+***
+*** Copyright (c) 2011, 2012, Imagination Technologies Ltd.
+***
+*** This program is free software; you can redistribute it and/or
+*** modify it under the terms of the GNU General Public License
+*** as published by the Free Software Foundation; either version 2
+*** of the License, or (at your option) any later version.
+***
+*** This program is distributed in the hope that it will be useful,
+*** but WITHOUT ANY WARRANTY; without even the implied warranty of
+*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*** GNU General Public License for more details.
+***
+*** You should have received a copy of the GNU General Public License
+*** along with this program; if not, write to the Free Software
+*** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+*** USA.
+***
+*** File Name  : umac_lmac_if.h
+***
+*** File Description:
+*** This file describes UMAC/LMAC logical interface
+***
+******************************************************************************
+*END**************************************************************************/
+
+#ifndef _UCCP310WLAN_UMAC_LMAC_IF_H_
+#define _UCCP310WLAN_UMAC_LMAC_IF_H_
+
+#define MICHAEL_LEN                   8
+#define MAX_KEY_LEN                   16
+
+#define WEP40_KEYLEN                  5
+#define WEP104_KEYLEN                 13
+#define MAX_WEP_KEY_LEN               13
+
+#define _PACKED_          __attribute__((__packed__))
+
+enum UMAC_QUEUE_NUM {
+	WLAN_AC_BK = 0,
+	WLAN_AC_BE,
+	WLAN_AC_VI,
+	WLAN_AC_VO,
+	WLAN_AC_BCN,
+	WLAN_AC_MAX_CNT
+};
+
+struct umac_lmac_msg_hdr {
+	unsigned int  id;
+	unsigned int  flags;
+} _PACKED_;
+
+/* Commands */
+enum UMAC_CMD {
+	CMD_FIRST = 0x000,
+	CMD_RESET,
+	CMD_VIF_CTRL,
+	CMD_VIF_CFG,
+	CMD_GLOBAL_CFG,
+	CMD_TX_POWER,
+	CMD_MCST_FILTER_CFG,
+	CMD_MCST_FILTER_CTRL,
+	CMD_RCV_BCN_MODE,
+	CMD_TXQ_PARAMS,
+	CMD_PS_CFG,
+	CMD_CHANNEL,
+	CMD_PEER_KEY_CFG,
+	CMD_IF_KEY_CFG,
+	CMD_TX,
+	CMD_MIB_STATS,
+	CMD_PHY_STATS,
+	CMD_TEST, /* Can be used by hardware abstraction layer(s) */
+	CMD_LAST
+};
+/* UMAC commands */
+
+struct umac_cmd_reset {
+	struct umac_lmac_msg_hdr hdr;
+	/*
+	 * 0 - LMAC ENABLE
+	 * 1 - LMAC DISABLE
+	 */
+#define LMAC_ENABLE     0
+#define LMAC_DISABLE    1
+	unsigned int reset_type;
+} _PACKED_;
+
+struct umac_cmd_vif_ctrl {
+	struct umac_lmac_msg_hdr hdr;
+	/*
+	 * if_ctrl -
+	 * 1 - add interface address
+	 * 2 - remove interface address
+	 */
+#define IF_ADD   1
+#define IF_REM   2
+	unsigned int        if_ctrl;
+
+	/*
+	 * Interface mode -
+	 * 0 - STA in infrastucture mode
+	 * 1 - STA in AD-HOC mode
+	 * 2 - AP
+	 */
+#define IF_MODE_STA_BSS  0
+#define IF_MODE_STA_IBSS 1
+#define IF_MODE_AP       2
+#define IF_MODE_INVALID  3
+	unsigned int        if_mode;
+	/*
+	 * if_index -
+	 * index assigned to this interface
+	 */
+	unsigned int        if_index;
+	/*
+	 * mac_addr -
+	 * interface address to add or delete
+	 */
+	unsigned char       mac_addr[ETH_ALEN];
+} _PACKED_;
+
+struct umac_cmd_vif_cfg {
+	struct umac_lmac_msg_hdr hdr;
+   /* Bitmap indicating whether value is changed or not */
+  #define BASICRATES_CHANGED (1<<0)
+  #define SHORTSLOT_CHANGED (1<<1)
+  #define POWERSAVE_CHANGED (1<<2)
+  #define UAPSDTYPE_CHANGED (1<<3)
+  #define ATIMWINDOW_CHANGED (1<<4)
+  #define AID_CHANGED (1<<5)
+  #define CAPABILITY_CHANGED (1<<6)
+  #define SHORTRETRY_CHANGED (1<<7)
+  #define LONGRETRY_CHANGED (1<<8)
+  #define BSSID_CHANGED (1<<9)
+
+	unsigned int         changed_bitmap;
+	/*
+	 * bitmap of supported basic rates
+	 */
+	unsigned int        basic_rate_set;
+
+	/*
+	 * slot type -
+	 * 0 - long slot
+	 * 1 - short slot
+	 */
+	unsigned int        use_short_slot;
+
+	/*
+	 * power save mode -
+	 *
+	 * bit0 - BK
+	 * bit1 - BE
+	 * bit2 - VI
+	 * bit3 - VO
+	 * all other bits are reserved
+	 *
+	 * 0 - indicates legacy mode powersave, 1 - indicates UAPSD for the
+	 * corresponding AC.
+	 *
+	 * all bits are 0 - legacy mode
+	 * some bits are 1's and some are 0's - mixed mode
+	 *  all bits are 1 - APSD
+	 */
+	unsigned int        powersave_mode;
+
+	/*
+	 * UAPSD type
+	 * if value is 0, AC is delivery enabled access category. otherwise it
+	 * is trigger enabled
+	 *
+	 * bit0 - BK
+	 * bit1 - BE
+	 * bit2 - VI
+	 * bit3 - VO
+	 * all other bits are reserved
+	 */
+	unsigned int        uapsd_type;
+
+	/*
+	 * ATIM window
+	 */
+	unsigned int         atim_window;
+
+	unsigned int         aid;
+
+	unsigned int         capability;
+
+	unsigned int         short_retry;
+
+	unsigned int         long_retry ;
+
+	/* index of the intended interface */
+	unsigned int         if_index;
+	unsigned char        vif_addr[ETH_ALEN];
+
+	/* bssid of interface */
+	unsigned char        bssid[ETH_ALEN];
+
+} _PACKED_;
+
+struct umac_cmd_global_cfg {
+	struct umac_lmac_msg_hdr hdr;
+	/*
+	 * Both the values in mSecs
+	 */
+	unsigned int         rx_msdu_lifetime;
+	unsigned int         tx_msdu_lifetime;
+
+	int                  ed_sensitivity;
+
+	/*
+	 * dynamic_ed_enable -
+	 * 0 - disable dynamic ed
+	 * 1 - enable dynamic ed
+	 */
+#define DYN_ED_DISABLE        0
+#define DYN_ED_ENABLE         1
+	unsigned int         dynamic_ed_enable;
+
+	unsigned char        rf_params[8];
+} _PACKED_ ;
+
+struct umac_cmd_txpower {
+	struct umac_lmac_msg_hdr  hdr;
+	unsigned int        txpower; /* In dbm */
+} _PACKED_;
+
+struct umac_cmd_mcst_filter_cfg {
+	struct umac_lmac_msg_hdr hdr;
+	/*
+	 * mcst_ctrl -
+	 * 0 -- ADD multicast address
+	 * 1 -- Remove multicast address
+	 */
+#define MCAST_ADDR_ADD        0
+#define MCAST_ADDR_REM        1
+	unsigned int        mcst_ctrl;
+
+	/*
+	 * addr to add or delete..
+	 */
+	unsigned char       addr[ETH_ALEN];
+
+} _PACKED_;
+
+struct umac_cmd_mcst_filter_ctrl {
+	struct umac_lmac_msg_hdr hdr;
+
+	/*
+	 * ctrl -
+	 * 0 - disable multicast filtering in LMAC
+	 * 1 - enable multicast filtering in LMAC
+	 */
+#define MCAST_FILTER_DISABLE  0
+#define MCAST_FILTER_ENABLE   1
+	unsigned int        ctrl;
+} _PACKED_ ;
+
+struct umac_cmd_rcv_bcn_mode {
+	struct umac_lmac_msg_hdr hdr;
+    /*
+     * 0 - all beacons
+     * 1 - network only
+     * 2 - no beacons
+     */
+#define RCV_ALL_BCNS          0
+#define RCV_NETWORK_BCNS      1
+#define RCV_NO_BCNS           2
+	unsigned int        mode;
+} _PACKED_ ;
+
+struct umac_cmd_txq_params {
+	struct umac_lmac_msg_hdr  hdr;
+	/*
+	 * @UMAC_QUEUE_NUM_T will be used to fill this variable
+	 */
+	unsigned int         queue_num;
+
+	/*
+	 * AIFSN value
+	 * user has to compute the AIFS from this value using
+	 * AIFS = aifs*slot_time+SIFS
+	 */
+	unsigned int         aifsn;
+	/*
+	 * in units of uSec
+	 */
+	unsigned int         txop;
+	/*
+	 * CWmin value in number of slots
+	 */
+	unsigned int         cwmin;
+	/*
+	 * CWmax value in number of slots
+	 */
+	unsigned int         cwmax;
+
+	/*
+	 * interface index..
+	 */
+	unsigned int         if_index;
+
+	/*
+	 * interface address
+	 */
+	unsigned char        vif_addr[ETH_ALEN];
+
+} _PACKED_;
+
+struct umac_cmd_ps_cfg {
+	struct umac_lmac_msg_hdr hdr;
+
+	/*
+	 * state -
+	 * 0 - power save off
+	 * 1 - power save on
+	 */
+#define PWRSAVE_STATE_AWAKE   0
+#define PWRSAVE_STATE_DOZE    1
+	unsigned int        powersave_state;
+
+	/*
+	 * interface index..
+	 */
+	unsigned int        if_index;
+	/*
+	 * interface address
+	 */
+	unsigned char        vif_addr[ETH_ALEN];
+
+} _PACKED_ ;
+
+struct umac_cmd_channel {
+	struct umac_lmac_msg_hdr hdr;
+	unsigned int        channel;
+} _PACKED_ ;
+
+struct umac_cmd_peer_key_cfg {
+	struct umac_lmac_msg_hdr hdr;
+
+	/*
+	 * 0 - add key
+	 * 1 - del key
+	 */
+#define KEY_CTRL_ADD    0
+#define KEY_CTRL_DEL    1
+	unsigned int        op;
+
+	/*
+	 * key_type -
+	 * 0 - unicast
+	 * 1 - broadcast
+	 */
+#define KEY_TYPE_UCAST    0
+#define KEY_TYPE_BCAST    1
+	unsigned int        key_type;
+	/*
+	 * cipher_type -
+	 * 0 - wep40
+	 * 1 - wep104
+	 * 2 - tkip
+	 * 3 - ccmp
+	 */
+#define CIPHER_TYPE_WEP40    0
+#define CIPHER_TYPE_WEP104   1
+#define CIPHER_TYPE_TKIP     2
+#define CIPHER_TYPE_CCMP     3
+	unsigned int        cipher_type;
+	unsigned int        key_id;
+
+	/*
+	 * if_index -
+	 * interface index..
+	 */
+	unsigned int        if_index;
+
+	unsigned char       vif_addr[ETH_ALEN];
+
+	unsigned char       peer_mac[ETH_ALEN];
+
+	unsigned char       key[MAX_KEY_LEN];
+
+	unsigned char       tx_mic[MICHAEL_LEN];
+
+	unsigned char       rx_mic[MICHAEL_LEN];
+
+} _PACKED_ ;
+
+struct umac_cmd_if_key_cfg {
+	struct umac_lmac_msg_hdr hdr;
+
+	/*
+	 * 0 - add key
+	 * 1 - del key
+	 */
+#define KEY_CTRL_ADD    0
+#define KEY_CTRL_DEL    1
+	unsigned int        op;
+
+	unsigned int        cipher_type;
+
+	/*
+	 * 0..3
+	 */
+	unsigned int        key_id;
+
+	/*
+	 * if_index -
+	 * interface index..
+	 */
+	unsigned int        if_index;
+
+	unsigned char       vif_addr[ETH_ALEN];
+
+	union {
+		struct {
+			unsigned int     key_len;
+			unsigned char    wep_key[MAX_WEP_KEY_LEN];
+		} wep_key;
+		struct {
+			unsigned char    key[MAX_KEY_LEN];
+			unsigned char    mic_key[MICHAEL_LEN];
+		} rsn_grp_key;
+	} key;
+
+} _PACKED_ ;
+
+struct umac_cmd_tx {
+	struct umac_lmac_msg_hdr hdr;
+	/*
+	 * @UMAC_QUEUE_NUM_T will be used to fill this variable
+	 */
+	unsigned int     queue_num;
+
+	/*
+	 * id of this TX buffer in global TX buffer pool
+	 */
+	unsigned int     buff_pool_id;
+
+	unsigned int     num_of_frags;
+
+	/*
+	 * Length of the intermediate fragment if it is a fragmented frame or
+	 * length of the frame in case of single MSDU
+	 */
+	unsigned int     frag_len;
+
+	/*
+	 * Length of the last fragment..
+	 */
+	unsigned int     last_frag_len;
+
+	/*
+	 * tx power..
+	 */
+	unsigned int     tx_power;
+
+	/*
+	 * if_index -
+	 * interface index..
+	 */
+	unsigned int     if_index;
+
+	unsigned char    vif_addr[ETH_ALEN];
+	/*
+	 * more_frms -
+	 * it indicates that one or more high priority frames are buffered at
+	 * UMAC
+	 */
+	unsigned int     more_frms;
+
+	unsigned int     num_rates;
+	unsigned int     rate[4];  /* Units of 500 Kbps */
+	/*
+	 * Control frame to transmit..
+	 * 0 - none
+	 * 1 - RTS-CTS protection
+	 * 2 - CTS-to-Self protection
+	 */
+#define USE_PROTECTION_NONE        0
+#define USE_PROTECTION_RTS         1
+#define USE_PROTECTION_CTS2SELF    2
+	unsigned int     rate_protection_type[4];
+	/*
+	 * use_short_preamble -
+	 * 0 - don't use short preamble
+	 * 1 - use short preamble for this frame transmission
+	 */
+#define DONT_USE_SHORT_PREAMBLE    0
+#define USE_SHORT_PREAMBLE         1
+	unsigned int     rate_preamble_type[4];
+	unsigned int     rate_retries[4];
+
+	/*
+	 * This is used only by FMAC when it wants to encrypt the AUTH
+	 * frame in the case of shared key authentication
+	 * SoftMAC should set this field to 0
+	 */
+	unsigned int     force_encrypt;
+
+} _PACKED_;
+
+struct umac_cmd_mib_stats {
+	struct umac_lmac_msg_hdr hdr;
+} _PACKED_ ;
+
+struct umac_cmd_phy_stats {
+	struct umac_lmac_msg_hdr hdr;
+} _PACKED_ ;
+
+/* Events */
+
+enum UMAC_EVENT {
+	EVENT_FIRST = 0x000,
+	EVENT_NOA,
+	EVENT_IBSS_MERGE,
+	EVENT_TX_DONE,
+	EVENT_RX,
+	EVENT_PF_DROPPED,
+	EVENT_RESET_COMPLETE,
+	EVENT_MIB_STAT,
+	EVENT_PHY_STAT,
+	EVENT_LMAC_ERROR,
+	EVENT_LAST
+} ;
+
+
+/* Event structures */
+
+struct umac_event_lmac_error {
+	struct umac_lmac_msg_hdr hdr;
+	/*
+	 * LMAC will send the unexpected errors in this event..
+	 */
+	unsigned int        error;
+} _PACKED_ ;
+
+struct umac_event_noa {
+	struct umac_lmac_msg_hdr hdr;
+	unsigned int vif_index;
+	unsigned char vif_addr[ETH_ALEN];
+
+	/*
+	 * 1 indicates NoA feature is active
+	 * 0 indicates NoA feature is not active
+	 */
+	unsigned int noa_active;
+#define ABSENCE_START 0 /* Indicates AP is absent */
+#define ABSENCE_STOP  1 /* Indicates AP is present */
+	unsigned int ap_present;
+} _PACKED_ ;
+
+struct umac_event_reset_complete {
+	struct umac_lmac_msg_hdr hdr;
+	char                version[6];
+} _PACKED_ ;
+
+struct umac_event_rx {
+	struct umac_lmac_msg_hdr hdr;
+	unsigned int        buff_len;
+	unsigned int        rate;
+	unsigned int        rssi;
+	unsigned char       timestamp[8];
+#define RX_MIC_SUCCESS 0 /* No MIC error in frame */
+#define RX_MIC_FAILURE 1 /* MIC error in frame */
+	unsigned int        status;
+} _PACKED_ ;
+
+
+struct umac_event_tx_done {
+	struct umac_lmac_msg_hdr hdr;
+
+	/*
+	 * ID of the buffer transmitted...
+	 */
+	unsigned int        buff_pool_id;
+	unsigned int        pdout_voltage;
+
+	/*
+	 * frame_status -
+	 * 0 - success
+	 * 1 - discarded due to retry limit exceeded
+	 * 2 - discarded due to msdu lifetime expiry
+	 * 3 - discarded due to encryption key not available
+	 */
+#define UMAC_EVENT_TX_DONE_SUCCESS            (0)
+#define UMAC_EVENT_TX_DONE_ERROR_RETRY_LIMIT  (1)
+#define UMAC_EVENT_TX_DONE_MSDU_LIFETIME      (2)
+#define UMAC_EVENT_TX_DONE_KEY_NOT_FOUND      (3)
+	unsigned int        frm_status;
+
+	unsigned int        retries_num;
+	unsigned int        rate;
+	unsigned int        queue;
+} _PACKED_ ;
+
+struct umac_event_mib_stats {
+	struct umac_lmac_msg_hdr hdr;
+	unsigned int    wlan_phy_stats;
+	unsigned int    ofdm_rx_crc_success_cnt;
+	unsigned int    ofdm_rx_crc_fail_cnt;
+	unsigned int    ofdm_rx_false_trig_cnt;
+	unsigned int    ofdm_rx_header_fail_cnt;
+	unsigned int    dsss_rx_crc_success_cnt;
+	unsigned int    dsss_rx_crc_fail_cnt;
+	unsigned int    dsss_rx_false_trig_cnt;
+	unsigned int    dsss_rx_header_fail_cnt;
+	unsigned int    ed_cnt;
+	unsigned int    cca_fail_cnt;
+	unsigned int    tx_total_pkt_cnt;
+	unsigned int    tx_complete_pkt_cnt;
+	unsigned int    tx_err_cnt;
+	unsigned int    tx_complete_err_pkt_cnt;
+	unsigned int    tx_ack_pkt_cnt;
+	unsigned int    frag_success_cnt;
+	int             sensitivity;
+} _PACKED_;
+
+struct umac_event_phy_stats {
+	struct umac_lmac_msg_hdr hdr;
+	unsigned int   phy_stats[32];
+};
+
+#endif /* _UCCP310WLAN_UMAC_LMAC_IF_H_ */
+
+/* EOF */
diff --git a/drivers/net/wireless/uccp310wlan/utils.h b/drivers/net/wireless/uccp310wlan/utils.h
new file mode 100644
index 0000000..58a1305
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/utils.h
@@ -0,0 +1,46 @@
+/*HEADER**********************************************************************
+******************************************************************************
+***
+***
+*** Copyright (c) 2011, 2012, Imagination Technologies Ltd.
+***
+*** This program is free software; you can redistribute it and/or
+*** modify it under the terms of the GNU General Public License
+*** as published by the Free Software Foundation; either version 2
+*** of the License, or (at your option) any later version.
+***
+*** This program is distributed in the hope that it will be useful,
+*** but WITHOUT ANY WARRANTY; without even the implied warranty of
+*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*** GNU General Public License for more details.
+***
+*** You should have received a copy of the GNU General Public License
+*** along with this program; if not, write to the Free Software
+*** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+*** USA.
+***
+*** File Name  : utils.h
+***
+*** File Description:
+*** This file contains helper macros and data structures used across the code
+***
+******************************************************************************
+*END**************************************************************************/
+#ifndef _UCCP310WLAN_UTILS_H
+#define _UCCP310WLAN_UTILS_H
+
+#define RFPARAM2STR(a)   (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5], a[6], a[7]
+#define RFPARAMSTR       "0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X"
+
+#define MASK_BITS(msb, lsb)               (((1U << ((msb) - (lsb) + 1)) - 1) << (lsb))
+#define EXTRACT_BITS(arg, msb, lsb)       ((arg & MASK_BITS(msb, lsb)) >> (lsb))
+#define INSERT_BITS(arg, msb, lsb, value) ((arg) = ((arg) & ~MASK_BITS(msb, lsb)) | (((value) << (lsb)) & MASK_BITS(msb, lsb)))
+
+#define FRAME_CTRL_TYPE(arg)               EXTRACT_BITS(arg, 3, 2)
+#define FRAME_CTRL_STYPE(arg)              EXTRACT_BITS(arg, 7, 4)
+#define FTYPE_DATA                         0x02
+#define FSTYPE_QOS_DATA                    0x08
+
+#endif /* _UCCP310WLAN_UTILS_H */
+
+/* EOF */
diff --git a/drivers/net/wireless/uccp310wlan/version.h b/drivers/net/wireless/uccp310wlan/version.h
new file mode 100644
index 0000000..0f9cc14
--- /dev/null
+++ b/drivers/net/wireless/uccp310wlan/version.h
@@ -0,0 +1,34 @@
+/*HEADER**********************************************************************
+******************************************************************************
+***
+*** Copyright (c) 2011, 2012, Imagination Technologies Ltd.
+***
+*** This program is free software; you can redistribute it and/or
+*** modify it under the terms of the GNU General Public License
+*** as published by the Free Software Foundation; either version 2
+*** of the License, or (at your option) any later version.
+***
+*** This program is distributed in the hope that it will be useful,
+*** but WITHOUT ANY WARRANTY; without even the implied warranty of
+*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*** GNU General Public License for more details.
+***
+*** You should have received a copy of the GNU General Public License
+*** along with this program; if not, write to the Free Software
+*** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+*** USA.
+***
+*** File Name  : version.h
+***
+*** File Description:
+*** This file contains the UMAC version string
+***
+******************************************************************************
+*END**************************************************************************/
+#ifndef _UCCP310WLAN_VERSION_H
+#define _UCCP310WLAN_VERSION_H
+#define UMAC_VERSION "2_3_1"
+#endif /* _UCCP310WLAN_VERSION_H */
+
+/* EOF */
+
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index a505760..07c7540 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -36,7 +36,8 @@
 config PARPORT_PC
 	tristate "PC-style hardware"
 	depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
-		(!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && !XTENSA
+		(!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && \
+		!XTENSA && !METAG
 	---help---
 	  You should say Y here if you have a PC-style parallel port. All
 	  IBM PC compatible computers and some Alphas have PC-style
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 8f66924..9fe8d33 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -186,6 +186,18 @@
 	bool
 	select PINCTRL_TEGRA
 
+config PINCTRL_TZ1090
+	bool "Toumaz Xenif TZ1090 pin control driver"
+	depends on SOC_TZ1090
+	select PINMUX
+	select GENERIC_PINCONF
+
+config PINCTRL_TZ1090_PDC
+	bool "Toumaz Xenif TZ1090 PDC pin control driver"
+	depends on SOC_TZ1090
+	select PINMUX
+	select GENERIC_PINCONF
+
 config PINCTRL_U300
 	bool "U300 pin controller driver"
 	depends on ARCH_U300
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 9bdaeb8..8aae378 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -37,6 +37,8 @@
 obj-$(CONFIG_PINCTRL_TEGRA20)	+= pinctrl-tegra20.o
 obj-$(CONFIG_PINCTRL_TEGRA30)	+= pinctrl-tegra30.o
 obj-$(CONFIG_PINCTRL_TEGRA114)	+= pinctrl-tegra114.o
+obj-$(CONFIG_PINCTRL_TZ1090)	+= pinctrl-tz1090.o
+obj-$(CONFIG_PINCTRL_TZ1090_PDC)	+= pinctrl-tz1090-pdc.o
 obj-$(CONFIG_PINCTRL_U300)	+= pinctrl-u300.o
 obj-$(CONFIG_PINCTRL_COH901)	+= pinctrl-coh901.o
 obj-$(CONFIG_PINCTRL_SAMSUNG)	+= pinctrl-samsung.o
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 2ad5a8d..ba465b3 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -37,11 +37,13 @@
 static struct pin_config_item conf_items[] = {
 	PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL),
 	PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL),
+	PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL),
 	PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL),
 	PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL),
 	PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL),
 	PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL),
 	PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL),
+	PCONFDUMP(PIN_CONFIG_DRIVE_STRENGTH, "output drive strength", "mA"),
 	PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL),
 	PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL),
 	PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "time units"),
diff --git a/drivers/pinctrl/pinctrl-tz1090-pdc.c b/drivers/pinctrl/pinctrl-tz1090-pdc.c
new file mode 100644
index 0000000..7fc74b9
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-tz1090-pdc.c
@@ -0,0 +1,1105 @@
+/*
+ * Pinctrl driver for the Toumaz Xenif TZ1090 PowerDown Controller pins
+ *
+ * Copyright (c) 2013, Imagination Technologies Ltd.
+ *
+ * Derived from Tegra code:
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Derived from code:
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2010 NVIDIA Corporation
+ * Copyright (C) 2009-2011 ST-Ericsson AB
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/slab.h>
+
+/*
+ * The registers may be shared with other threads/cores, so we need to use the
+ * metag global lock2 for atomicity.
+ */
+#include <asm/global_lock.h>
+
+#include "core.h"
+
+/* Register offsets from bank base address */
+#define REG_GPIO_CONTROL0	0x00
+#define REG_GPIO_CONTROL2	0x08
+
+/* Register field information */
+#define REG_GPIO_CONTROL2_PU_PD_S	16
+#define REG_GPIO_CONTROL2_PDC_POS_S	 4
+#define REG_GPIO_CONTROL2_PDC_DR_S	 2
+#define REG_GPIO_CONTROL2_PDC_SR_S	 1
+#define REG_GPIO_CONTROL2_PDC_SCHMITT_S	 0
+
+/* PU_PD field values */
+#define REG_PU_PD_TRISTATE	0
+#define REG_PU_PD_UP		1
+#define REG_PU_PD_DOWN		2
+#define REG_PU_PD_REPEATER	3
+
+/* DR field values */
+#define REG_DR_2mA		0
+#define REG_DR_4mA		1
+#define REG_DR_8mA		2
+#define REG_DR_12mA		3
+
+/**
+ * struct tz1090_pdc_function - TZ1090 PDC pinctrl mux function
+ * @name:	The name of the function, exported to pinctrl core.
+ * @groups:	An array of pin groups that may select this function.
+ * @ngroups:	The number of entries in @groups.
+ */
+struct tz1090_pdc_function {
+	const char		*name;
+	const char * const	*groups;
+	unsigned		ngroups;
+};
+
+/**
+ * struct tz1090_pdc_pingroup - TZ1090 PDC pin group
+ * @name:	Name of pin group.
+ * @pins:	Array of pin numbers in this pin group.
+ * @npins:	Number of pins in this pin group.
+ * @func:	Function enabled by the mux.
+ * @reg:	Mux register offset.
+ * @bit:	Mux register bit.
+ * @drv:	Drive control supported, otherwise it's a mux.
+ *		This means Schmitt, Slew, and Drive strength.
+ *
+ * A representation of a group of pins (possibly just one pin) in the TZ1090
+ * PDC pin controller. Each group allows some parameter or parameters to be
+ * configured. The most common is mux function selection.
+ */
+struct tz1090_pdc_pingroup {
+	const char		*name;
+	const unsigned int	*pins;
+	unsigned int		npins;
+	int			func;
+	u16			reg;
+	u8			bit;
+	bool			drv;
+};
+
+/*
+ * All PDC pins can be GPIOs. Define these first to match how the GPIO driver
+ * names/numbers its pins.
+ */
+
+enum tz1090_pdc_pin {
+	TZ1090_PDC_PIN_GPIO0,
+	TZ1090_PDC_PIN_GPIO1,
+	TZ1090_PDC_PIN_SYS_WAKE0,
+	TZ1090_PDC_PIN_SYS_WAKE1,
+	TZ1090_PDC_PIN_SYS_WAKE2,
+	TZ1090_PDC_PIN_IR_DATA,
+	TZ1090_PDC_PIN_EXT_POWER,
+};
+
+/* Pin names */
+
+static const struct pinctrl_pin_desc tz1090_pdc_pins[] = {
+	/* PDC GPIOs */
+	PINCTRL_PIN(TZ1090_PDC_PIN_GPIO0,	"gpio0"),
+	PINCTRL_PIN(TZ1090_PDC_PIN_GPIO1,	"gpio1"),
+	PINCTRL_PIN(TZ1090_PDC_PIN_SYS_WAKE0,	"sys_wake0"),
+	PINCTRL_PIN(TZ1090_PDC_PIN_SYS_WAKE1,	"sys_wake1"),
+	PINCTRL_PIN(TZ1090_PDC_PIN_SYS_WAKE2,	"sys_wake2"),
+	PINCTRL_PIN(TZ1090_PDC_PIN_IR_DATA,	"ir_data"),
+	PINCTRL_PIN(TZ1090_PDC_PIN_EXT_POWER,	"ext_power"),
+};
+
+/* Pin group pins */
+
+static const unsigned gpio0_pins[] = {
+	TZ1090_PDC_PIN_GPIO0,
+};
+
+static const unsigned gpio1_pins[] = {
+	TZ1090_PDC_PIN_GPIO1,
+};
+
+static const unsigned pdc_pins[] = {
+	TZ1090_PDC_PIN_GPIO0,
+	TZ1090_PDC_PIN_GPIO1,
+	TZ1090_PDC_PIN_SYS_WAKE0,
+	TZ1090_PDC_PIN_SYS_WAKE1,
+	TZ1090_PDC_PIN_SYS_WAKE2,
+	TZ1090_PDC_PIN_IR_DATA,
+	TZ1090_PDC_PIN_EXT_POWER,
+};
+
+/* Mux functions */
+
+enum tz1090_pdc_mux {
+	/* PDC_GPIO0 mux */
+	TZ1090_PDC_MUX_IR_MOD_STABLE_OUT,
+	/* PDC_GPIO1 mux */
+	TZ1090_PDC_MUX_IR_MOD_POWER_OUT,
+};
+
+/* Pin groups a function can be muxed to */
+
+static const char * const gpio0_groups[] = {
+	"gpio0",
+};
+
+static const char * const gpio1_groups[] = {
+	"gpio1",
+};
+
+#define FUNCTION(mux, fname, group)			\
+	[(TZ1090_PDC_MUX_ ## mux)] = {			\
+		.name = #fname,				\
+		.groups = group##_groups,		\
+		.ngroups = ARRAY_SIZE(group##_groups),	\
+	}
+
+/* Must correlate with enum tz1090_pdc_mux */
+static const struct tz1090_pdc_function tz1090_pdc_functions[] = {
+	/*	 MUX			fn			pingroups */
+	FUNCTION(IR_MOD_STABLE_OUT,	ir_mod_stable_out,	gpio0),
+	FUNCTION(IR_MOD_POWER_OUT,	ir_mod_power_out,	gpio1),
+};
+
+/**
+ * MUX_PG() - Initialise a pin group with mux control
+ * @pg_name:	Pin group name (stringified, _pins appended to get pins array)
+ * @f0:		Function 0 (TZ1090_PDC_MUX_ is prepended)
+ * @mux_r:	Mux register (REG_PINCTRL_ is prepended)
+ * @mux_b:	Bit number in register of mux field
+ */
+#define MUX_PG(pg_name, f0, mux_r, mux_b)			\
+	{							\
+		.name = #pg_name,				\
+		.pins = pg_name##_pins,				\
+		.npins = ARRAY_SIZE(pg_name##_pins),		\
+		.func = TZ1090_PDC_MUX_ ## f0,			\
+		.reg = (REG_ ## mux_r),				\
+		.bit = (mux_b),					\
+	}
+
+/**
+ * DRV_PG() - Initialise a pin group with drive control
+ * @pg_name:	Pin group name (stringified, _pins appended to get pins array)
+ */
+#define DRV_PG(pg_name)				\
+	{							\
+		.name = #pg_name,				\
+		.pins = pg_name##_pins,				\
+		.npins = ARRAY_SIZE(pg_name##_pins),		\
+		.drv = true,					\
+	}
+
+static const struct tz1090_pdc_pingroup tz1090_pdc_groups[] = {
+	/* Muxing pin groups */
+	/*     pg_name, f0,                 mux register,  mux bit */
+	MUX_PG(gpio0,   IR_MOD_STABLE_OUT,  GPIO_CONTROL0, 7),
+	MUX_PG(gpio1,   IR_MOD_POWER_OUT,   GPIO_CONTROL0, 6),
+
+	/* Drive pin groups */
+	/*     pg_name */
+	DRV_PG(pdc),
+};
+
+/**
+ * struct tz1090_pdc_pmx - Private pinctrl data
+ * @dev:	Platform device
+ * @pctl:	Pin control device
+ * @regs:	Register region
+ * @lock:	Lock protecting coherency of mux_en and gpio_en
+ * @mux_en:	Muxes that have been enabled
+ * @gpio_en:	Muxable GPIOs that have been enabled
+ */
+struct tz1090_pdc_pmx {
+	struct device		*dev;
+	struct pinctrl_dev	*pctl;
+	void __iomem		*regs;
+	spinlock_t		lock;
+	u32			mux_en;
+	u32			gpio_en;
+};
+
+static inline u32 pmx_read(struct tz1090_pdc_pmx *pmx, u32 reg)
+{
+	return ioread32(pmx->regs + reg);
+}
+
+static inline void pmx_write(struct tz1090_pdc_pmx *pmx, u32 val, u32 reg)
+{
+	iowrite32(val, pmx->regs + reg);
+}
+
+/*
+ * Pin control operations
+ */
+
+static int tz1090_pdc_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(tz1090_pdc_groups);
+}
+
+static const char *tz1090_pdc_pinctrl_get_group_name(struct pinctrl_dev *pctl,
+						     unsigned group)
+{
+	return tz1090_pdc_groups[group].name;
+}
+
+static int tz1090_pdc_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+					     unsigned group,
+					     const unsigned **pins,
+					     unsigned *num_pins)
+{
+	*pins = tz1090_pdc_groups[group].pins;
+	*num_pins = tz1090_pdc_groups[group].npins;
+
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void tz1090_pdc_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
+					    struct seq_file *s,
+					    unsigned offset)
+{
+	seq_printf(s, " %s", dev_name(pctldev->dev));
+}
+#endif
+
+static int reserve_map(struct device *dev, struct pinctrl_map **map,
+		       unsigned *reserved_maps, unsigned *num_maps,
+		       unsigned reserve)
+{
+	unsigned old_num = *reserved_maps;
+	unsigned new_num = *num_maps + reserve;
+	struct pinctrl_map *new_map;
+
+	if (old_num >= new_num)
+		return 0;
+
+	new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
+	if (!new_map) {
+		dev_err(dev, "krealloc(map) failed\n");
+		return -ENOMEM;
+	}
+
+	memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
+
+	*map = new_map;
+	*reserved_maps = new_num;
+
+	return 0;
+}
+
+static int add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
+		       unsigned *num_maps, const char *group,
+		       const char *function)
+{
+	if (WARN_ON(*num_maps == *reserved_maps))
+		return -ENOSPC;
+
+	(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+	(*map)[*num_maps].data.mux.group = group;
+	(*map)[*num_maps].data.mux.function = function;
+	(*num_maps)++;
+
+	return 0;
+}
+
+/**
+ * get_group_selector() - returns the group selector for a group
+ * @pin_group: the pin group to look up
+ *
+ * This is the same as pinctrl_get_group_selector except it doesn't produce an
+ * error message if the group isn't found or debug messages.
+ */
+static int get_group_selector(const char *pin_group)
+{
+	unsigned int group;
+
+	for (group = 0; group < ARRAY_SIZE(tz1090_pdc_groups); ++group)
+		if (!strcmp(tz1090_pdc_groups[group].name, pin_group))
+			return group;
+
+	return -EINVAL;
+}
+
+static int add_map_configs(struct device *dev,
+			   struct pinctrl_map **map,
+			   unsigned *reserved_maps, unsigned *num_maps,
+			   const char *group, unsigned long *configs,
+			   unsigned num_configs)
+{
+	unsigned long *dup_configs;
+	enum pinctrl_map_type type;
+
+	if (WARN_ON(*num_maps == *reserved_maps))
+		return -ENOSPC;
+
+	dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
+			      GFP_KERNEL);
+	if (!dup_configs) {
+		dev_err(dev, "kmemdup(configs) failed\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * We support both pins and pin groups, but we need to figure out which
+	 * one we have.
+	 */
+	if (get_group_selector(group) >= 0)
+		type = PIN_MAP_TYPE_CONFIGS_GROUP;
+	else
+		type = PIN_MAP_TYPE_CONFIGS_PIN;
+	(*map)[*num_maps].type = type;
+	(*map)[*num_maps].data.configs.group_or_pin = group;
+	(*map)[*num_maps].data.configs.configs = dup_configs;
+	(*map)[*num_maps].data.configs.num_configs = num_configs;
+	(*num_maps)++;
+
+	return 0;
+}
+
+static int add_config(struct device *dev, unsigned long **configs,
+		      unsigned *num_configs, unsigned long config)
+{
+	unsigned old_num = *num_configs;
+	unsigned new_num = old_num + 1;
+	unsigned long *new_configs;
+
+	new_configs = krealloc(*configs, sizeof(*new_configs) * new_num,
+			       GFP_KERNEL);
+	if (!new_configs) {
+		dev_err(dev, "krealloc(configs) failed\n");
+		return -ENOMEM;
+	}
+
+	new_configs[old_num] = config;
+
+	*configs = new_configs;
+	*num_configs = new_num;
+
+	return 0;
+}
+
+void tz1090_pdc_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
+				    struct pinctrl_map *map, unsigned num_maps)
+{
+	int i;
+
+	for (i = 0; i < num_maps; i++)
+		if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
+			kfree(map[i].data.configs.configs);
+
+	kfree(map);
+}
+
+/* Describes pinconf properties/flags available from device tree */
+static const struct cfg_param {
+	const char *property;
+	enum pin_config_param param;
+	bool flag;
+} cfg_params[] = {
+	{"tristate",		PIN_CONFIG_BIAS_HIGH_IMPEDANCE,		true},
+	{"pull-up",		PIN_CONFIG_BIAS_PULL_UP,		true},
+	{"pull-down",		PIN_CONFIG_BIAS_PULL_DOWN,		true},
+	{"bus-hold",		PIN_CONFIG_BIAS_BUS_HOLD,		true},
+	{"power-on-start",	PIN_CONFIG_LOW_POWER_MODE,		true},
+	{"schmitt",		PIN_CONFIG_INPUT_SCHMITT_ENABLE,	true},
+	{"slew-rate",		PIN_CONFIG_SLEW_RATE,			false},
+	{"drive-strength",	PIN_CONFIG_DRIVE_STRENGTH,		false},
+
+};
+
+int tz1090_pdc_pinctrl_dt_subnode_to_map(struct device *dev,
+					 struct device_node *np,
+					 struct pinctrl_map **map,
+					 unsigned *reserved_maps,
+					 unsigned *num_maps)
+{
+	int ret, i;
+	const char *function;
+	u32 val;
+	unsigned long config;
+	unsigned long *configs = NULL;
+	unsigned num_configs = 0;
+	unsigned reserve;
+	struct property *prop;
+	const char *group;
+
+	ret = of_property_read_string(np, "function", &function);
+	if (ret < 0) {
+		/* EINVAL=missing, which is fine since it's optional */
+		if (ret != -EINVAL)
+			dev_err(dev,
+				"could not parse property function\n");
+		function = NULL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(cfg_params); i++) {
+		ret = of_property_read_u32(np, cfg_params[i].property, &val);
+		/* flags don't have to have a value */
+		if (ret == -EOVERFLOW && cfg_params[i].flag) {
+			val = 1;
+			ret = 0;
+		}
+		if (!ret) {
+			config = pinconf_to_config_packed(cfg_params[i].param,
+							  val);
+			ret = add_config(dev, &configs, &num_configs, config);
+			if (ret < 0)
+				goto exit;
+		/* EINVAL=missing, which is fine since it's optional */
+		} else if (ret != -EINVAL) {
+			dev_err(dev, "could not parse property %s\n",
+				cfg_params[i].property);
+		}
+	}
+
+	reserve = 0;
+	if (function != NULL)
+		reserve++;
+	if (num_configs)
+		reserve++;
+	ret = of_property_count_strings(np, "pins");
+	if (ret < 0) {
+		dev_err(dev, "could not parse property pins\n");
+		goto exit;
+	}
+	reserve *= ret;
+
+	ret = reserve_map(dev, map, reserved_maps, num_maps, reserve);
+	if (ret < 0)
+		goto exit;
+
+	of_property_for_each_string(np, "pins", prop, group) {
+		if (function) {
+			ret = add_map_mux(map, reserved_maps, num_maps,
+					  group, function);
+			if (ret < 0)
+				goto exit;
+		}
+
+		if (num_configs) {
+			ret = add_map_configs(dev, map, reserved_maps,
+					      num_maps, group, configs,
+					      num_configs);
+			if (ret < 0)
+				goto exit;
+		}
+	}
+
+	ret = 0;
+
+exit:
+	kfree(configs);
+	return ret;
+}
+
+int tz1090_pdc_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+				      struct device_node *np_config,
+				      struct pinctrl_map **map,
+				      unsigned *num_maps)
+{
+	unsigned reserved_maps;
+	struct device_node *np;
+	int ret;
+
+	reserved_maps = 0;
+	*map = NULL;
+	*num_maps = 0;
+
+	for_each_child_of_node(np_config, np) {
+		ret = tz1090_pdc_pinctrl_dt_subnode_to_map(pctldev->dev, np,
+							   map, &reserved_maps,
+							   num_maps);
+		if (ret < 0) {
+			tz1090_pdc_pinctrl_dt_free_map(pctldev, *map,
+						       *num_maps);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static struct pinctrl_ops tz1090_pdc_pinctrl_ops = {
+	.get_groups_count	= tz1090_pdc_pinctrl_get_groups_count,
+	.get_group_name		= tz1090_pdc_pinctrl_get_group_name,
+	.get_group_pins		= tz1090_pdc_pinctrl_get_group_pins,
+#ifdef CONFIG_DEBUG_FS
+	.pin_dbg_show		= tz1090_pdc_pinctrl_pin_dbg_show,
+#endif
+	.dt_node_to_map		= tz1090_pdc_pinctrl_dt_node_to_map,
+	.dt_free_map		= tz1090_pdc_pinctrl_dt_free_map,
+};
+
+/*
+ * Pin mux operations
+ */
+
+static int tz1090_pdc_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(tz1090_pdc_functions);
+}
+
+static const char *tz1090_pdc_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+						    unsigned function)
+{
+	return tz1090_pdc_functions[function].name;
+}
+
+static int tz1090_pdc_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
+					      unsigned function,
+					      const char * const **groups,
+					      unsigned * const num_groups)
+{
+	*groups = tz1090_pdc_functions[function].groups;
+	*num_groups = tz1090_pdc_functions[function].ngroups;
+
+	return 0;
+}
+
+/**
+ * tz1090_pdc_pinctrl_mux() - update mux bit
+ * @pmx:		Pinmux data
+ * @grp:		Pin mux group
+ */
+static void tz1090_pdc_pinctrl_mux(struct tz1090_pdc_pmx *pmx,
+				   const struct tz1090_pdc_pingroup *grp)
+{
+	u32 reg, select;
+	unsigned int pin_shift = grp->pins[0];
+	unsigned long flags;
+
+	/* select = mux && !gpio */
+	select = ((pmx->mux_en & ~pmx->gpio_en) >> pin_shift) & 1;
+
+	/* set up the mux */
+	__global_lock2(flags);
+	reg = pmx_read(pmx, grp->reg);
+	reg &= ~BIT(grp->bit);
+	reg |= select << grp->bit;
+	pmx_write(pmx, reg, grp->reg);
+	__global_unlock2(flags);
+}
+
+static int tz1090_pdc_pinctrl_enable(struct pinctrl_dev *pctldev,
+				     unsigned function, unsigned group)
+{
+	struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	const struct tz1090_pdc_pingroup *grp = &tz1090_pdc_groups[group];
+
+	dev_dbg(pctldev->dev, "%s(func=%u (%s), group=%u (%s))\n",
+		__func__,
+		function, tz1090_pdc_functions[function].name,
+		group, tz1090_pdc_groups[group].name);
+
+	/* is it even a mux? */
+	if (grp->drv)
+		return -EINVAL;
+
+	/* does this group even control the function? */
+	if (function != grp->func)
+		return -EINVAL;
+
+	/* record the pin being muxed and update mux bit */
+	spin_lock(&pmx->lock);
+	pmx->mux_en |= BIT(grp->pins[0]);
+	tz1090_pdc_pinctrl_mux(pmx, grp);
+	spin_unlock(&pmx->lock);
+	return 0;
+}
+
+static void tz1090_pdc_pinctrl_disable(struct pinctrl_dev *pctldev,
+				       unsigned function, unsigned group)
+{
+	struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	const struct tz1090_pdc_pingroup *grp = &tz1090_pdc_groups[group];
+
+	dev_dbg(pctldev->dev, "%s(func=%u (%s), group=%u (%s))\n",
+		__func__,
+		function, tz1090_pdc_functions[function].name,
+		group, tz1090_pdc_groups[group].name);
+
+	/* is it even a mux? */
+	if (grp->drv)
+		return;
+
+	/* does this group even control the function? */
+	if (function != grp->func)
+		return;
+
+	/* record the pin being unmuxed and update mux bit */
+	spin_lock(&pmx->lock);
+	pmx->mux_en &= ~BIT(grp->pins[0]);
+	tz1090_pdc_pinctrl_mux(pmx, grp);
+	spin_unlock(&pmx->lock);
+}
+
+static const struct tz1090_pdc_pingroup *find_mux_group(
+						struct tz1090_pdc_pmx *pmx,
+						unsigned int pin)
+{
+	const struct tz1090_pdc_pingroup *grp;
+	unsigned int group;
+
+	grp = tz1090_pdc_groups;
+	for (group = 0; group < ARRAY_SIZE(tz1090_pdc_groups); ++group, ++grp) {
+		/* only match muxes */
+		if (grp->drv)
+			continue;
+
+		/* with a matching pin */
+		if (grp->pins[0] == pin)
+			return grp;
+	}
+
+	return NULL;
+}
+
+static int tz1090_pdc_pinctrl_gpio_request_enable(
+					struct pinctrl_dev *pctldev,
+					struct pinctrl_gpio_range *range,
+					unsigned int pin)
+{
+	struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	const struct tz1090_pdc_pingroup *grp = find_mux_group(pmx, pin);
+
+	if (grp) {
+		/* record the pin in GPIO use and update mux bit */
+		spin_lock(&pmx->lock);
+		pmx->gpio_en |= BIT(pin);
+		tz1090_pdc_pinctrl_mux(pmx, grp);
+		spin_unlock(&pmx->lock);
+	}
+	return 0;
+}
+
+static void tz1090_pdc_pinctrl_gpio_disable_free(
+					struct pinctrl_dev *pctldev,
+					struct pinctrl_gpio_range *range,
+					unsigned int pin)
+{
+	struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	const struct tz1090_pdc_pingroup *grp = find_mux_group(pmx, pin);
+
+	if (grp) {
+		/* record the pin not in GPIO use and update mux bit */
+		spin_lock(&pmx->lock);
+		pmx->gpio_en &= ~BIT(pin);
+		tz1090_pdc_pinctrl_mux(pmx, grp);
+		spin_unlock(&pmx->lock);
+	}
+}
+
+static struct pinmux_ops tz1090_pdc_pinmux_ops = {
+	.get_functions_count	= tz1090_pdc_pinctrl_get_funcs_count,
+	.get_function_name	= tz1090_pdc_pinctrl_get_func_name,
+	.get_function_groups	= tz1090_pdc_pinctrl_get_func_groups,
+	.enable			= tz1090_pdc_pinctrl_enable,
+	.disable		= tz1090_pdc_pinctrl_disable,
+	.gpio_request_enable	= tz1090_pdc_pinctrl_gpio_request_enable,
+	.gpio_disable_free	= tz1090_pdc_pinctrl_gpio_disable_free,
+};
+
+/*
+ * Pin config operations
+ */
+
+static int tz1090_pdc_pinconf_reg(struct pinctrl_dev *pctldev,
+				  unsigned int pin,
+				  enum pin_config_param param,
+				  bool report_err,
+				  u32 *reg, u32 *width, u32 *mask, u32 *shift,
+				  u32 *val)
+{
+	/* Find information about parameter's register */
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+		*val = REG_PU_PD_TRISTATE;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		*val = REG_PU_PD_UP;
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		*val = REG_PU_PD_DOWN;
+		break;
+	case PIN_CONFIG_BIAS_BUS_HOLD:
+		*val = REG_PU_PD_REPEATER;
+		break;
+	default:
+		return -ENOTSUPP;
+	};
+
+	/* Only input bias parameters supported */
+	*reg = REG_GPIO_CONTROL2;
+	*shift = REG_GPIO_CONTROL2_PU_PD_S + pin*2;
+	*width = 2;
+
+	/* Calculate field information */
+	*mask = (BIT(*width) - 1) << *shift;
+
+	return 0;
+}
+
+static int tz1090_pdc_pinconf_get(struct pinctrl_dev *pctldev,
+				  unsigned pin, unsigned long *config)
+{
+	struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	enum pin_config_param param = pinconf_to_config_param(*config);
+	int ret;
+	u32 reg, width, mask, shift, val, tmp, arg;
+
+	/* Get register information */
+	ret = tz1090_pdc_pinconf_reg(pctldev, pin, param, true,
+				     &reg, &width, &mask, &shift, &val);
+	if (ret < 0)
+		return ret;
+
+	/* Extract field from register */
+	tmp = pmx_read(pmx, reg);
+	arg = ((tmp & mask) >> shift) == val;
+
+	/* Config not active */
+	if (!arg)
+		return -EINVAL;
+
+	/* And pack config */
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int tz1090_pdc_pinconf_set(struct pinctrl_dev *pctldev,
+				  unsigned pin, unsigned long config)
+{
+	struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	enum pin_config_param param = pinconf_to_config_param(config);
+	unsigned int arg = pinconf_to_config_argument(config);
+	int ret;
+	u32 reg, width, mask, shift, val, tmp;
+	unsigned long flags;
+
+	dev_dbg(pctldev->dev, "%s(pin=%s, config=%#lx)\n",
+		__func__, tz1090_pdc_pins[pin].name, config);
+
+	/* Get register information */
+	ret = tz1090_pdc_pinconf_reg(pctldev, pin, param, true,
+				     &reg, &width, &mask, &shift, &val);
+	if (ret < 0)
+		return ret;
+
+	/* Unpack argument and range check it */
+	if (arg > 1) {
+		dev_dbg(pctldev->dev, "%s: arg %u out of range\n",
+			__func__, arg);
+		return -EINVAL;
+	}
+
+	/* Write register field */
+	__global_lock2(flags);
+	tmp = pmx_read(pmx, reg);
+	tmp &= ~mask;
+	if (arg)
+		tmp |= val << shift;
+	pmx_write(pmx, tmp, reg);
+	__global_unlock2(flags);
+
+	return 0;
+}
+
+static const int tz1090_pdc_boolean_map[] = {
+	[0]		= -EINVAL,
+	[1]		= 1,
+};
+
+static const int tz1090_pdc_dr_map[] = {
+	[REG_DR_2mA]	= 2,
+	[REG_DR_4mA]	= 4,
+	[REG_DR_8mA]	= 8,
+	[REG_DR_12mA]	= 12,
+};
+
+static int tz1090_pdc_pinconf_group_reg(struct pinctrl_dev *pctldev,
+					const struct tz1090_pdc_pingroup *g,
+					enum pin_config_param param,
+					bool report_err, u32 *reg, u32 *width,
+					u32 *mask, u32 *shift, const int **map)
+{
+	/* Drive configuration applies in groups, but not to all groups. */
+	if (!g->drv) {
+		if (report_err)
+			dev_dbg(pctldev->dev,
+				"%s: group %s has no drive control\n",
+				__func__, g->name);
+		return -ENOTSUPP;
+	}
+
+	/* Find information about drive parameter's register */
+	*reg = REG_GPIO_CONTROL2;
+	switch (param) {
+	case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+		*shift = REG_GPIO_CONTROL2_PDC_SCHMITT_S;
+		*width = 1;
+		*map = tz1090_pdc_boolean_map;
+		break;
+	case PIN_CONFIG_SLEW_RATE:
+		*shift = REG_GPIO_CONTROL2_PDC_SR_S;
+		*width = 1;
+		*map = tz1090_pdc_boolean_map;
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		*shift = REG_GPIO_CONTROL2_PDC_DR_S;
+		*width = 2;
+		*map = tz1090_pdc_dr_map;
+		break;
+	case PIN_CONFIG_LOW_POWER_MODE:
+		*shift = REG_GPIO_CONTROL2_PDC_POS_S;
+		*width = 1;
+		*map = tz1090_pdc_boolean_map;
+		break;
+	default:
+		return -ENOTSUPP;
+	};
+
+	/* Calculate field information */
+	*mask = (BIT(*width) - 1) << *shift;
+
+	return 0;
+}
+
+static int tz1090_pdc_pinconf_group_get(struct pinctrl_dev *pctldev,
+					unsigned group,
+					unsigned long *config)
+{
+	struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	const struct tz1090_pdc_pingroup *g = &tz1090_pdc_groups[group];
+	enum pin_config_param param = pinconf_to_config_param(*config);
+	int ret, arg;
+	u32 reg, width, mask, shift, val;
+	const int *map;
+
+	/* Get register information */
+	ret = tz1090_pdc_pinconf_group_reg(pctldev, g, param, true,
+					   &reg, &width, &mask, &shift, &map);
+	if (ret < 0)
+		return ret;
+
+	/* Extract field from register */
+	val = pmx_read(pmx, reg);
+	arg = map[(val & mask) >> shift];
+	if (arg < 0)
+		return arg;
+
+	/* And pack config */
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int tz1090_pdc_pinconf_group_set(struct pinctrl_dev *pctldev,
+					unsigned group, unsigned long config)
+{
+	struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	const struct tz1090_pdc_pingroup *g = &tz1090_pdc_groups[group];
+	enum pin_config_param param = pinconf_to_config_param(config);
+	const unsigned int *pit;
+	unsigned int i;
+	int ret, arg;
+	u32 reg, width, mask, shift, val;
+	unsigned long flags;
+	const int *map;
+
+	dev_dbg(pctldev->dev, "%s(group=%s, config=%#lx)\n",
+		__func__, g->name, config);
+
+	/* Get register information */
+	ret = tz1090_pdc_pinconf_group_reg(pctldev, g, param, true,
+					   &reg, &width, &mask, &shift, &map);
+	if (ret < 0) {
+		/*
+		 * Maybe we're trying to set a per-pin configuration of a group,
+		 * so do the pins one by one. This is mainly as a convenience.
+		 */
+		for (i = 0, pit = g->pins; i < g->npins; ++i, ++pit) {
+			ret = tz1090_pdc_pinconf_set(pctldev, *pit, config);
+			if (ret)
+				return ret;
+		}
+		return 0;
+	}
+
+	/* Unpack argument and map it to register value */
+	arg = pinconf_to_config_argument(config);
+	for (i = 0; i < BIT(width); ++i) {
+		if (map[i] == arg || (map[i] == -EINVAL && !arg)) {
+			/* Write register field */
+			__global_lock2(flags);
+			val = pmx_read(pmx, reg);
+			val &= ~mask;
+			val |= i << shift;
+			pmx_write(pmx, val, reg);
+			__global_unlock2(flags);
+			return 0;
+		}
+	}
+
+	dev_dbg(pctldev->dev, "%s: arg %u not supported\n",
+		__func__, arg);
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void tz1090_pdc_pinconf_config_dbg_show(struct pinctrl_dev *pctldev,
+					       struct seq_file *s,
+					       unsigned long config)
+{
+	enum pin_config_param param = pinconf_to_config_param(config);
+	u16 arg = pinconf_to_config_argument(config);
+	const char *pname = "unknown";
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cfg_params); i++) {
+		if (cfg_params[i].param == param) {
+			pname = cfg_params[i].property;
+			break;
+		}
+	}
+
+	seq_printf(s, "%s=%d", pname, arg);
+}
+#endif
+
+struct pinconf_ops tz1090_pdc_pinconf_ops = {
+	.is_generic		= true,
+	.pin_config_get		= tz1090_pdc_pinconf_get,
+	.pin_config_set		= tz1090_pdc_pinconf_set,
+	.pin_config_group_get	= tz1090_pdc_pinconf_group_get,
+	.pin_config_group_set	= tz1090_pdc_pinconf_group_set,
+#ifdef CONFIG_DEBUG_FS
+	.pin_config_config_dbg_show	= tz1090_pdc_pinconf_config_dbg_show,
+#endif
+};
+
+/*
+ * Pin control driver setup
+ */
+
+static struct pinctrl_desc tz1090_pdc_pinctrl_desc = {
+	.pctlops	= &tz1090_pdc_pinctrl_ops,
+	.pmxops		= &tz1090_pdc_pinmux_ops,
+	.confops	= &tz1090_pdc_pinconf_ops,
+	.owner		= THIS_MODULE,
+};
+
+static int tz1090_pdc_pinctrl_probe(struct platform_device *pdev)
+{
+	struct tz1090_pdc_pmx *pmx;
+	struct resource *res;
+
+	pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
+	if (!pmx) {
+		dev_err(&pdev->dev, "Can't alloc tz1090_pdc_pmx\n");
+		return -ENOMEM;
+	}
+	pmx->dev = &pdev->dev;
+	spin_lock_init(&pmx->lock);
+
+	tz1090_pdc_pinctrl_desc.name = dev_name(&pdev->dev);
+	tz1090_pdc_pinctrl_desc.pins = tz1090_pdc_pins;
+	tz1090_pdc_pinctrl_desc.npins = ARRAY_SIZE(tz1090_pdc_pins);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Missing MEM resource\n");
+		return -ENODEV;
+	}
+
+	if (!devm_request_mem_region(&pdev->dev, res->start,
+				     resource_size(res),
+				     dev_name(&pdev->dev))) {
+		dev_err(&pdev->dev,
+			"Couldn't request MEM resource\n");
+		return -ENODEV;
+	}
+
+	pmx->regs = devm_ioremap(&pdev->dev, res->start,
+				 resource_size(res));
+	if (!pmx->regs) {
+		dev_err(&pdev->dev, "Couldn't ioremap regs\n");
+		return -ENODEV;
+	}
+
+	pmx->pctl = pinctrl_register(&tz1090_pdc_pinctrl_desc, &pdev->dev, pmx);
+	if (!pmx->pctl) {
+		dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
+		return -ENODEV;
+	}
+
+	platform_set_drvdata(pdev, pmx);
+
+	dev_info(&pdev->dev, "TZ1090 PDC pinctrl driver initialised\n");
+
+	return 0;
+}
+
+static int tz1090_pdc_pinctrl_remove(struct platform_device *pdev)
+{
+	struct tz1090_pdc_pmx *pmx = platform_get_drvdata(pdev);
+
+	pinctrl_unregister(pmx->pctl);
+
+	return 0;
+}
+
+static struct of_device_id tz1090_pdc_pinctrl_of_match[] = {
+	{ .compatible = "img,tz1090-pdc-pinctrl", },
+	{ },
+};
+
+static struct platform_driver tz1090_pdc_pinctrl_driver = {
+	.driver = {
+		.name		= "tz1090-pdc-pinctrl",
+		.owner		= THIS_MODULE,
+		.of_match_table	= tz1090_pdc_pinctrl_of_match,
+	},
+	.probe	= tz1090_pdc_pinctrl_probe,
+	.remove	= tz1090_pdc_pinctrl_remove,
+};
+
+static int __init tz1090_pdc_pinctrl_init(void)
+{
+	return platform_driver_register(&tz1090_pdc_pinctrl_driver);
+}
+postcore_initcall(tz1090_pdc_pinctrl_init);
+
+static void __exit tz1090_pdc_pinctrl_exit(void)
+{
+	platform_driver_unregister(&tz1090_pdc_pinctrl_driver);
+}
+module_exit(tz1090_pdc_pinctrl_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("Toumaz Xenif TZ1090 PDC pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, tz1090_pdc_pinctrl_of_match);
diff --git a/drivers/pinctrl/pinctrl-tz1090.c b/drivers/pinctrl/pinctrl-tz1090.c
new file mode 100644
index 0000000..9fbae88
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-tz1090.c
@@ -0,0 +1,2152 @@
+/*
+ * Pinctrl driver for the Toumaz Xenif TZ1090 SoC
+ *
+ * Copyright (c) 2013, Imagination Technologies Ltd.
+ *
+ * Derived from Tegra code:
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Derived from code:
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2010 NVIDIA Corporation
+ * Copyright (C) 2009-2011 ST-Ericsson AB
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/*
+ * The registers may be shared with other threads/cores, so we need to use the
+ * metag global lock2 for atomicity.
+ */
+#include <asm/global_lock.h>
+
+#include "core.h"
+
+/* Register offsets from bank base address */
+#define REG_PINCTRL_SELECT	0x10
+#define REG_PINCTRL_SCHMITT	0x90
+#define REG_PINCTRL_PU_PD	0xa0
+#define REG_PINCTRL_SR		0xc0
+#define REG_PINCTRL_DR		0xd0
+#define REG_PINCTRL_IF_CTL	0xe0
+
+/* REG_PINCTRL_PU_PD field values */
+#define REG_PU_PD_TRISTATE	0
+#define REG_PU_PD_UP		1
+#define REG_PU_PD_DOWN		2
+#define REG_PU_PD_REPEATER	3
+
+/* REG_PINCTRL_DR field values */
+#define REG_DR_2mA		0
+#define REG_DR_4mA		1
+#define REG_DR_8mA		2
+#define REG_DR_12mA		3
+
+/**
+ * struct tz1090_function - TZ1090 pinctrl mux function
+ * @name:	The name of the function, exported to pinctrl core.
+ * @groups:	An array of pin groups that may select this function.
+ * @ngroups:	The number of entries in @groups.
+ */
+struct tz1090_function {
+	const char		*name;
+	const char * const	*groups;
+	unsigned		ngroups;
+};
+
+/**
+ * struct tz1090_muxdesc - TZ1090 individual mux description
+ * @funcs:	Function for each mux value.
+ * @reg:	Mux register offset. 0 if unsupported.
+ * @bit:	Mux register bit. 0 if unsupported.
+ * @width:	Mux field width. 0 if unsupported.
+ *
+ * A representation of a group of signals (possibly just one signal) in the
+ * TZ1090 which can be muxed to a set of functions or sub muxes.
+ */
+struct tz1090_muxdesc {
+	int	funcs[5];
+	u16	reg;
+	u8	bit;
+	u8	width;
+};
+
+/**
+ * struct tz1090_pingroup - TZ1090 pin group
+ * @name:	Name of pin group.
+ * @pins:	Array of pin numbers in this pin group.
+ * @npins:	Number of pins in this pin group.
+ * @mux:	Top level mux.
+ * @drv:	Drive control supported, 0 if unsupported.
+ *		This means Schmitt, Slew, and Drive strength.
+ * @slw_bit:	Slew register bit. 0 if unsupported.
+ *		The same bit is used for Schmitt, and Drive (*2).
+ * @func:	Currently muxed function.
+ * @func_count:	Number of pins using current mux function.
+ *
+ * A representation of a group of pins (possibly just one pin) in the TZ1090
+ * pin controller. Each group allows some parameter or parameters to be
+ * configured. The most common is mux function selection.
+ */
+struct tz1090_pingroup {
+	const char		*name;
+	const unsigned int	*pins;
+	unsigned int		npins;
+	struct tz1090_muxdesc	mux;
+
+	bool			drv;
+	u8			slw_bit;
+
+	int			func;
+	unsigned int		func_count;
+};
+
+/*
+ * Most pins affected by the pinmux can also be GPIOs. Define these first.
+ * These must match how the GPIO driver names/numbers its pins.
+ */
+
+enum tz1090_pin {
+	/* GPIO pins */
+	TZ1090_PIN_SDIO_CLK,
+	TZ1090_PIN_SDIO_CMD,
+	TZ1090_PIN_SDIO_D0,
+	TZ1090_PIN_SDIO_D1,
+	TZ1090_PIN_SDIO_D2,
+	TZ1090_PIN_SDIO_D3,
+	TZ1090_PIN_SDH_CD,
+	TZ1090_PIN_SDH_WP,
+	TZ1090_PIN_SPI0_MCLK,
+	TZ1090_PIN_SPI0_CS0,
+	TZ1090_PIN_SPI0_CS1,
+	TZ1090_PIN_SPI0_CS2,
+	TZ1090_PIN_SPI0_DOUT,
+	TZ1090_PIN_SPI0_DIN,
+	TZ1090_PIN_SPI1_MCLK,
+	TZ1090_PIN_SPI1_CS0,
+	TZ1090_PIN_SPI1_CS1,
+	TZ1090_PIN_SPI1_CS2,
+	TZ1090_PIN_SPI1_DOUT,
+	TZ1090_PIN_SPI1_DIN,
+	TZ1090_PIN_UART0_RXD,
+	TZ1090_PIN_UART0_TXD,
+	TZ1090_PIN_UART0_CTS,
+	TZ1090_PIN_UART0_RTS,
+	TZ1090_PIN_UART1_RXD,
+	TZ1090_PIN_UART1_TXD,
+	TZ1090_PIN_SCB0_SDAT,
+	TZ1090_PIN_SCB0_SCLK,
+	TZ1090_PIN_SCB1_SDAT,
+	TZ1090_PIN_SCB1_SCLK,
+	TZ1090_PIN_SCB2_SDAT,
+	TZ1090_PIN_SCB2_SCLK,
+	TZ1090_PIN_I2S_MCLK,
+	TZ1090_PIN_I2S_BCLK_OUT,
+	TZ1090_PIN_I2S_LRCLK_OUT,
+	TZ1090_PIN_I2S_DOUT0,
+	TZ1090_PIN_I2S_DOUT1,
+	TZ1090_PIN_I2S_DOUT2,
+	TZ1090_PIN_I2S_DIN,
+	TZ1090_PIN_PDM_A,
+	TZ1090_PIN_PDM_B,
+	TZ1090_PIN_PDM_C,
+	TZ1090_PIN_PDM_D,
+	TZ1090_PIN_TFT_RED0,
+	TZ1090_PIN_TFT_RED1,
+	TZ1090_PIN_TFT_RED2,
+	TZ1090_PIN_TFT_RED3,
+	TZ1090_PIN_TFT_RED4,
+	TZ1090_PIN_TFT_RED5,
+	TZ1090_PIN_TFT_RED6,
+	TZ1090_PIN_TFT_RED7,
+	TZ1090_PIN_TFT_GREEN0,
+	TZ1090_PIN_TFT_GREEN1,
+	TZ1090_PIN_TFT_GREEN2,
+	TZ1090_PIN_TFT_GREEN3,
+	TZ1090_PIN_TFT_GREEN4,
+	TZ1090_PIN_TFT_GREEN5,
+	TZ1090_PIN_TFT_GREEN6,
+	TZ1090_PIN_TFT_GREEN7,
+	TZ1090_PIN_TFT_BLUE0,
+	TZ1090_PIN_TFT_BLUE1,
+	TZ1090_PIN_TFT_BLUE2,
+	TZ1090_PIN_TFT_BLUE3,
+	TZ1090_PIN_TFT_BLUE4,
+	TZ1090_PIN_TFT_BLUE5,
+	TZ1090_PIN_TFT_BLUE6,
+	TZ1090_PIN_TFT_BLUE7,
+	TZ1090_PIN_TFT_VDDEN_GD,
+	TZ1090_PIN_TFT_PANELCLK,
+	TZ1090_PIN_TFT_BLANK_LS,
+	TZ1090_PIN_TFT_VSYNC_NS,
+	TZ1090_PIN_TFT_HSYNC_NR,
+	TZ1090_PIN_TFT_VD12ACB,
+	TZ1090_PIN_TFT_PWRSAVE,
+	TZ1090_PIN_TX_ON,
+	TZ1090_PIN_RX_ON,
+	TZ1090_PIN_PLL_ON,
+	TZ1090_PIN_PA_ON,
+	TZ1090_PIN_RX_HP,
+	TZ1090_PIN_GAIN0,
+	TZ1090_PIN_GAIN1,
+	TZ1090_PIN_GAIN2,
+	TZ1090_PIN_GAIN3,
+	TZ1090_PIN_GAIN4,
+	TZ1090_PIN_GAIN5,
+	TZ1090_PIN_GAIN6,
+	TZ1090_PIN_GAIN7,
+	TZ1090_PIN_ANT_SEL0,
+	TZ1090_PIN_ANT_SEL1,
+	TZ1090_PIN_SDH_CLK_IN,
+
+	/* Non-GPIO pins */
+	TZ1090_PIN_TCK,
+	TZ1090_PIN_TRST,
+	TZ1090_PIN_TDI,
+	TZ1090_PIN_TDO,
+	TZ1090_PIN_TMS,
+	TZ1090_PIN_CLK_OUT0,
+	TZ1090_PIN_CLK_OUT1,
+
+	NUM_GPIOS = TZ1090_PIN_TCK,
+};
+
+/* Pin names */
+
+static const struct pinctrl_pin_desc tz1090_pins[] = {
+	/* GPIO pins */
+	PINCTRL_PIN(TZ1090_PIN_SDIO_CLK,	"sdio_clk"),
+	PINCTRL_PIN(TZ1090_PIN_SDIO_CMD,	"sdio_cmd"),
+	PINCTRL_PIN(TZ1090_PIN_SDIO_D0,		"sdio_d0"),
+	PINCTRL_PIN(TZ1090_PIN_SDIO_D1,		"sdio_d1"),
+	PINCTRL_PIN(TZ1090_PIN_SDIO_D2,		"sdio_d2"),
+	PINCTRL_PIN(TZ1090_PIN_SDIO_D3,		"sdio_d3"),
+	PINCTRL_PIN(TZ1090_PIN_SDH_CD,		"sdh_cd"),
+	PINCTRL_PIN(TZ1090_PIN_SDH_WP,		"sdh_wp"),
+	PINCTRL_PIN(TZ1090_PIN_SPI0_MCLK,	"spi0_mclk"),
+	PINCTRL_PIN(TZ1090_PIN_SPI0_CS0,	"spi0_cs0"),
+	PINCTRL_PIN(TZ1090_PIN_SPI0_CS1,	"spi0_cs1"),
+	PINCTRL_PIN(TZ1090_PIN_SPI0_CS2,	"spi0_cs2"),
+	PINCTRL_PIN(TZ1090_PIN_SPI0_DOUT,	"spi0_dout"),
+	PINCTRL_PIN(TZ1090_PIN_SPI0_DIN,	"spi0_din"),
+	PINCTRL_PIN(TZ1090_PIN_SPI1_MCLK,	"spi1_mclk"),
+	PINCTRL_PIN(TZ1090_PIN_SPI1_CS0,	"spi1_cs0"),
+	PINCTRL_PIN(TZ1090_PIN_SPI1_CS1,	"spi1_cs1"),
+	PINCTRL_PIN(TZ1090_PIN_SPI1_CS2,	"spi1_cs2"),
+	PINCTRL_PIN(TZ1090_PIN_SPI1_DOUT,	"spi1_dout"),
+	PINCTRL_PIN(TZ1090_PIN_SPI1_DIN,	"spi1_din"),
+	PINCTRL_PIN(TZ1090_PIN_UART0_RXD,	"uart0_rxd"),
+	PINCTRL_PIN(TZ1090_PIN_UART0_TXD,	"uart0_txd"),
+	PINCTRL_PIN(TZ1090_PIN_UART0_CTS,	"uart0_cts"),
+	PINCTRL_PIN(TZ1090_PIN_UART0_RTS,	"uart0_rts"),
+	PINCTRL_PIN(TZ1090_PIN_UART1_RXD,	"uart1_rxd"),
+	PINCTRL_PIN(TZ1090_PIN_UART1_TXD,	"uart1_txd"),
+	PINCTRL_PIN(TZ1090_PIN_SCB0_SDAT,	"scb0_sdat"),
+	PINCTRL_PIN(TZ1090_PIN_SCB0_SCLK,	"scb0_sclk"),
+	PINCTRL_PIN(TZ1090_PIN_SCB1_SDAT,	"scb1_sdat"),
+	PINCTRL_PIN(TZ1090_PIN_SCB1_SCLK,	"scb1_sclk"),
+	PINCTRL_PIN(TZ1090_PIN_SCB2_SDAT,	"scb2_sdat"),
+	PINCTRL_PIN(TZ1090_PIN_SCB2_SCLK,	"scb2_sclk"),
+	PINCTRL_PIN(TZ1090_PIN_I2S_MCLK,	"i2s_mclk"),
+	PINCTRL_PIN(TZ1090_PIN_I2S_BCLK_OUT,	"i2s_bclk_out"),
+	PINCTRL_PIN(TZ1090_PIN_I2S_LRCLK_OUT,	"i2s_lrclk_out"),
+	PINCTRL_PIN(TZ1090_PIN_I2S_DOUT0,	"i2s_dout0"),
+	PINCTRL_PIN(TZ1090_PIN_I2S_DOUT1,	"i2s_dout1"),
+	PINCTRL_PIN(TZ1090_PIN_I2S_DOUT2,	"i2s_dout2"),
+	PINCTRL_PIN(TZ1090_PIN_I2S_DIN,		"i2s_din"),
+	PINCTRL_PIN(TZ1090_PIN_PDM_A,		"pdm_a"),
+	PINCTRL_PIN(TZ1090_PIN_PDM_B,		"pdm_b"),
+	PINCTRL_PIN(TZ1090_PIN_PDM_C,		"pdm_c"),
+	PINCTRL_PIN(TZ1090_PIN_PDM_D,		"pdm_d"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_RED0,	"tft_red0"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_RED1,	"tft_red1"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_RED2,	"tft_red2"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_RED3,	"tft_red3"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_RED4,	"tft_red4"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_RED5,	"tft_red5"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_RED6,	"tft_red6"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_RED7,	"tft_red7"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_GREEN0,	"tft_green0"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_GREEN1,	"tft_green1"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_GREEN2,	"tft_green2"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_GREEN3,	"tft_green3"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_GREEN4,	"tft_green4"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_GREEN5,	"tft_green5"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_GREEN6,	"tft_green6"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_GREEN7,	"tft_green7"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_BLUE0,	"tft_blue0"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_BLUE1,	"tft_blue1"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_BLUE2,	"tft_blue2"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_BLUE3,	"tft_blue3"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_BLUE4,	"tft_blue4"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_BLUE5,	"tft_blue5"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_BLUE6,	"tft_blue6"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_BLUE7,	"tft_blue7"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_VDDEN_GD,	"tft_vdden_gd"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_PANELCLK,	"tft_panelclk"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_BLANK_LS,	"tft_blank_ls"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_VSYNC_NS,	"tft_vsync_ns"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_HSYNC_NR,	"tft_hsync_nr"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_VD12ACB,	"tft_vd12acb"),
+	PINCTRL_PIN(TZ1090_PIN_TFT_PWRSAVE,	"tft_pwrsave"),
+	PINCTRL_PIN(TZ1090_PIN_TX_ON,		"tx_on"),
+	PINCTRL_PIN(TZ1090_PIN_RX_ON,		"rx_on"),
+	PINCTRL_PIN(TZ1090_PIN_PLL_ON,		"pll_on"),
+	PINCTRL_PIN(TZ1090_PIN_PA_ON,		"pa_on"),
+	PINCTRL_PIN(TZ1090_PIN_RX_HP,		"rx_hp"),
+	PINCTRL_PIN(TZ1090_PIN_GAIN0,		"gain0"),
+	PINCTRL_PIN(TZ1090_PIN_GAIN1,		"gain1"),
+	PINCTRL_PIN(TZ1090_PIN_GAIN2,		"gain2"),
+	PINCTRL_PIN(TZ1090_PIN_GAIN3,		"gain3"),
+	PINCTRL_PIN(TZ1090_PIN_GAIN4,		"gain4"),
+	PINCTRL_PIN(TZ1090_PIN_GAIN5,		"gain5"),
+	PINCTRL_PIN(TZ1090_PIN_GAIN6,		"gain6"),
+	PINCTRL_PIN(TZ1090_PIN_GAIN7,		"gain7"),
+	PINCTRL_PIN(TZ1090_PIN_ANT_SEL0,	"ant_sel0"),
+	PINCTRL_PIN(TZ1090_PIN_ANT_SEL1,	"ant_sel1"),
+	PINCTRL_PIN(TZ1090_PIN_SDH_CLK_IN,	"sdh_clk_in"),
+
+	/* Non-GPIO pins */
+	PINCTRL_PIN(TZ1090_PIN_TCK,		"tck"),
+	PINCTRL_PIN(TZ1090_PIN_TRST,		"trst"),
+	PINCTRL_PIN(TZ1090_PIN_TDI,		"tdi"),
+	PINCTRL_PIN(TZ1090_PIN_TDO,		"tdo"),
+	PINCTRL_PIN(TZ1090_PIN_TMS,		"tms"),
+	PINCTRL_PIN(TZ1090_PIN_CLK_OUT0,	"clk_out0"),
+	PINCTRL_PIN(TZ1090_PIN_CLK_OUT1,	"clk_out1"),
+};
+
+/* Pins in each pin group */
+
+static const unsigned spi1_cs2_pins[] = {
+	TZ1090_PIN_SPI1_CS2,
+};
+
+static const unsigned pdm_d_pins[] = {
+	TZ1090_PIN_PDM_D,
+};
+
+static const unsigned tft_pins[] = {
+	TZ1090_PIN_TFT_RED0,
+	TZ1090_PIN_TFT_RED1,
+	TZ1090_PIN_TFT_RED2,
+	TZ1090_PIN_TFT_RED3,
+	TZ1090_PIN_TFT_RED4,
+	TZ1090_PIN_TFT_RED5,
+	TZ1090_PIN_TFT_RED6,
+	TZ1090_PIN_TFT_RED7,
+	TZ1090_PIN_TFT_GREEN0,
+	TZ1090_PIN_TFT_GREEN1,
+	TZ1090_PIN_TFT_GREEN2,
+	TZ1090_PIN_TFT_GREEN3,
+	TZ1090_PIN_TFT_GREEN4,
+	TZ1090_PIN_TFT_GREEN5,
+	TZ1090_PIN_TFT_GREEN6,
+	TZ1090_PIN_TFT_GREEN7,
+	TZ1090_PIN_TFT_BLUE0,
+	TZ1090_PIN_TFT_BLUE1,
+	TZ1090_PIN_TFT_BLUE2,
+	TZ1090_PIN_TFT_BLUE3,
+	TZ1090_PIN_TFT_BLUE4,
+	TZ1090_PIN_TFT_BLUE5,
+	TZ1090_PIN_TFT_BLUE6,
+	TZ1090_PIN_TFT_BLUE7,
+	TZ1090_PIN_TFT_VDDEN_GD,
+	TZ1090_PIN_TFT_PANELCLK,
+	TZ1090_PIN_TFT_BLANK_LS,
+	TZ1090_PIN_TFT_VSYNC_NS,
+	TZ1090_PIN_TFT_HSYNC_NR,
+	TZ1090_PIN_TFT_VD12ACB,
+	TZ1090_PIN_TFT_PWRSAVE,
+};
+
+static const unsigned afe_pins[] = {
+	TZ1090_PIN_TX_ON,
+	TZ1090_PIN_RX_ON,
+	TZ1090_PIN_PLL_ON,
+	TZ1090_PIN_PA_ON,
+	TZ1090_PIN_RX_HP,
+	TZ1090_PIN_ANT_SEL0,
+	TZ1090_PIN_ANT_SEL1,
+	TZ1090_PIN_GAIN0,
+	TZ1090_PIN_GAIN1,
+	TZ1090_PIN_GAIN2,
+	TZ1090_PIN_GAIN3,
+	TZ1090_PIN_GAIN4,
+	TZ1090_PIN_GAIN5,
+	TZ1090_PIN_GAIN6,
+	TZ1090_PIN_GAIN7,
+};
+
+static const unsigned sdio_pins[] = {
+	TZ1090_PIN_SDIO_CLK,
+	TZ1090_PIN_SDIO_CMD,
+	TZ1090_PIN_SDIO_D0,
+	TZ1090_PIN_SDIO_D1,
+	TZ1090_PIN_SDIO_D2,
+	TZ1090_PIN_SDIO_D3,
+};
+
+static const unsigned sdh_pins[] = {
+	TZ1090_PIN_SDH_CD,
+	TZ1090_PIN_SDH_WP,
+	TZ1090_PIN_SDH_CLK_IN,
+};
+
+static const unsigned spi0_pins[] = {
+	TZ1090_PIN_SPI0_MCLK,
+	TZ1090_PIN_SPI0_CS0,
+	TZ1090_PIN_SPI0_CS1,
+	TZ1090_PIN_SPI0_CS2,
+	TZ1090_PIN_SPI0_DOUT,
+	TZ1090_PIN_SPI0_DIN,
+};
+
+static const unsigned spi1_pins[] = {
+	TZ1090_PIN_SPI1_MCLK,
+	TZ1090_PIN_SPI1_CS0,
+	TZ1090_PIN_SPI1_CS1,
+	TZ1090_PIN_SPI1_CS2,
+	TZ1090_PIN_SPI1_DOUT,
+	TZ1090_PIN_SPI1_DIN,
+};
+
+static const unsigned uart0_pins[] = {
+	TZ1090_PIN_UART0_RTS,
+	TZ1090_PIN_UART0_CTS,
+	TZ1090_PIN_UART0_TXD,
+	TZ1090_PIN_UART0_RXD,
+};
+
+static const unsigned uart1_pins[] = {
+	TZ1090_PIN_UART1_TXD,
+	TZ1090_PIN_UART1_RXD,
+};
+
+static const unsigned uart_pins[] = {
+	TZ1090_PIN_UART1_TXD,
+	TZ1090_PIN_UART1_RXD,
+	TZ1090_PIN_UART0_RTS,
+	TZ1090_PIN_UART0_CTS,
+	TZ1090_PIN_UART0_TXD,
+	TZ1090_PIN_UART0_RXD,
+};
+
+static const unsigned scb0_pins[] = {
+	TZ1090_PIN_SCB0_SDAT,
+	TZ1090_PIN_SCB0_SCLK,
+};
+
+static const unsigned scb1_pins[] = {
+	TZ1090_PIN_SCB1_SDAT,
+	TZ1090_PIN_SCB1_SCLK,
+};
+
+static const unsigned scb2_pins[] = {
+	TZ1090_PIN_SCB2_SDAT,
+	TZ1090_PIN_SCB2_SCLK,
+};
+
+static const unsigned i2s_pins[] = {
+	TZ1090_PIN_I2S_MCLK,
+	TZ1090_PIN_I2S_BCLK_OUT,
+	TZ1090_PIN_I2S_LRCLK_OUT,
+	TZ1090_PIN_I2S_DOUT0,
+	TZ1090_PIN_I2S_DOUT1,
+	TZ1090_PIN_I2S_DOUT2,
+	TZ1090_PIN_I2S_DIN,
+};
+
+static const unsigned jtag_pins[] = {
+	TZ1090_PIN_TCK,
+	TZ1090_PIN_TRST,
+	TZ1090_PIN_TDI,
+	TZ1090_PIN_TDO,
+	TZ1090_PIN_TMS,
+};
+
+/* Pins in each drive pin group */
+
+static const unsigned drive_sdio_pins[] = {
+	TZ1090_PIN_SDIO_CLK,
+	TZ1090_PIN_SDIO_CMD,
+	TZ1090_PIN_SDIO_D0,
+	TZ1090_PIN_SDIO_D1,
+	TZ1090_PIN_SDIO_D2,
+	TZ1090_PIN_SDIO_D3,
+	TZ1090_PIN_SDH_WP,
+	TZ1090_PIN_SDH_CD,
+	TZ1090_PIN_SDH_CLK_IN,
+};
+
+static const unsigned drive_i2s_pins[] = {
+	TZ1090_PIN_CLK_OUT1,
+	TZ1090_PIN_I2S_DIN,
+	TZ1090_PIN_I2S_DOUT0,
+	TZ1090_PIN_I2S_DOUT1,
+	TZ1090_PIN_I2S_DOUT2,
+	TZ1090_PIN_I2S_LRCLK_OUT,
+	TZ1090_PIN_I2S_BCLK_OUT,
+	TZ1090_PIN_I2S_MCLK,
+};
+
+static const unsigned drive_scb0_pins[] = {
+	TZ1090_PIN_SCB0_SCLK,
+	TZ1090_PIN_SCB0_SDAT,
+	TZ1090_PIN_PDM_D,
+	TZ1090_PIN_PDM_C,
+};
+
+static const unsigned drive_pdm_pins[] = {
+	TZ1090_PIN_CLK_OUT0,
+	TZ1090_PIN_PDM_B,
+	TZ1090_PIN_PDM_A,
+};
+
+/* Pin groups each function can be muxed to */
+
+/*
+ * The magic "perip" function allows otherwise non-muxing pins to be enabled in
+ * peripheral mode.
+ */
+static const char * const perip_groups[] = {
+	/* non-muxing convenient gpio pingroups */
+	"uart",
+	"uart0",
+	"uart1",
+	"spi0",
+	"spi1",
+	"scb0",
+	"scb1",
+	"scb2",
+	"i2s",
+	/* individual pins not part of a pin mux group */
+	"spi0_mclk",
+	"spi0_cs0",
+	"spi0_cs1",
+	"spi0_cs2",
+	"spi0_dout",
+	"spi0_din",
+	"spi1_mclk",
+	"spi1_cs0",
+	"spi1_cs1",
+	"spi1_dout",
+	"spi1_din",
+	"uart0_rxd",
+	"uart0_txd",
+	"uart0_cts",
+	"uart0_rts",
+	"uart1_rxd",
+	"uart1_txd",
+	"scb0_sdat",
+	"scb0_sclk",
+	"scb1_sdat",
+	"scb1_sclk",
+	"scb2_sdat",
+	"scb2_sclk",
+	"i2s_mclk",
+	"i2s_bclk_out",
+	"i2s_lrclk_out",
+	"i2s_dout0",
+	"i2s_dout1",
+	"i2s_dout2",
+	"i2s_din",
+	"pdm_a",
+	"pdm_b",
+	"pdm_c",
+};
+
+static const char * const sdh_sdio_groups[] = {
+	"sdh",
+	"sdio",
+	/* sdh pins */
+	"sdh_cd",
+	"sdh_wp",
+	"sdh_clk_in",
+	/* sdio pins */
+	"sdio_clk",
+	"sdio_cmd",
+	"sdio_d0",
+	"sdio_d1",
+	"sdio_d2",
+	"sdio_d3",
+};
+
+static const char * const spi1_cs2_groups[] = {
+	"spi1_cs2",
+};
+
+static const char * const pdm_dac_groups[] = {
+	"pdm_d",
+};
+
+static const char * const usb_vbus_groups[] = {
+	"spi1_cs2",
+	"pdm_d",
+};
+
+static const char * const afe_groups[] = {
+	"afe",
+	/* afe pins */
+	"tx_on",
+	"rx_on",
+	"pll_on",
+	"pa_on",
+	"rx_hp",
+	"ant_sel0",
+	"ant_sel1",
+	"gain0",
+	"gain1",
+	"gain2",
+	"gain3",
+	"gain4",
+	"gain5",
+	"gain6",
+	"gain7",
+};
+
+static const char * const tft_groups[] = {
+	"tft",
+	/* tft pins */
+	"tft_red0",
+	"tft_red1",
+	"tft_red2",
+	"tft_red3",
+	"tft_red4",
+	"tft_red5",
+	"tft_red6",
+	"tft_red7",
+	"tft_green0",
+	"tft_green1",
+	"tft_green2",
+	"tft_green3",
+	"tft_green4",
+	"tft_green5",
+	"tft_green6",
+	"tft_green7",
+	"tft_blue0",
+	"tft_blue1",
+	"tft_blue2",
+	"tft_blue3",
+	"tft_blue4",
+	"tft_blue5",
+	"tft_blue6",
+	"tft_blue7",
+	"tft_vdden_gd",
+	"tft_panelclk",
+	"tft_blank_ls",
+	"tft_vsync_ns",
+	"tft_hsync_nr",
+	"tft_vd12acb",
+	"tft_pwrsave",
+};
+
+/* Mux functions that can be used by a mux */
+
+enum tz1090_mux {
+	/* internal placeholder */
+	TZ1090_MUX_NA = -1,
+	/* magic per-non-muxing-GPIO-pin peripheral mode mux */
+	TZ1090_MUX_PERIP,
+	/* SDH/SDIO mux */
+	TZ1090_MUX_SDH,
+	TZ1090_MUX_SDIO,
+	/* USB_VBUS muxes */
+	TZ1090_MUX_SPI1_CS2,
+	TZ1090_MUX_PDM_DAC,
+	TZ1090_MUX_USB_VBUS,
+	/* AFE mux */
+	TZ1090_MUX_AFE,
+	TZ1090_MUX_TS_OUT_0,
+	/* EXT_DAC mux */
+	TZ1090_MUX_DAC,
+	TZ1090_MUX_NOT_IQADC_STB,
+	TZ1090_MUX_IQDAC_STB,
+	/* TFT mux */
+	TZ1090_MUX_TFT,
+	TZ1090_MUX_EXT_DAC,
+	TZ1090_MUX_TS_OUT_1,
+	TZ1090_MUX_LCD_TRACE,
+	TZ1090_MUX_PHY_RINGOSC,
+};
+
+#define FUNCTION(mux, fname, group)			\
+	[(TZ1090_MUX_ ## mux)] = {			\
+		.name = #fname,				\
+		.groups = group##_groups,		\
+		.ngroups = ARRAY_SIZE(group##_groups),	\
+	}
+/* For intermediate functions with submuxes */
+#define NULL_FUNCTION(mux, fname)			\
+	[(TZ1090_MUX_ ## mux)] = {			\
+		.name = #fname,				\
+	}
+
+/* Must correlate with enum tz1090_mux */
+static const struct tz1090_function tz1090_functions[] = {
+	/*	 FUNCTION	function name	pingroups */
+	FUNCTION(PERIP,		perip,		perip),
+	FUNCTION(SDH,		sdh,		sdh_sdio),
+	FUNCTION(SDIO,		sdio,		sdh_sdio),
+	FUNCTION(SPI1_CS2,	spi1_cs2,	spi1_cs2),
+	FUNCTION(PDM_DAC,	pdm_dac,	pdm_dac),
+	FUNCTION(USB_VBUS,	usb_vbus,	usb_vbus),
+	FUNCTION(AFE,		afe,		afe),
+	FUNCTION(TS_OUT_0,	ts_out_0,	afe),
+	FUNCTION(DAC,		ext_dac,	tft),
+	FUNCTION(NOT_IQADC_STB,	not_iqadc_stb,	tft),
+	FUNCTION(IQDAC_STB,	iqdac_stb,	tft),
+	FUNCTION(TFT,		tft,		tft),
+	NULL_FUNCTION(EXT_DAC,	_ext_dac),
+	FUNCTION(TS_OUT_1,	ts_out_1,	tft),
+	FUNCTION(LCD_TRACE,	lcd_trace,	tft),
+	FUNCTION(PHY_RINGOSC,	phy_ringosc,	tft),
+};
+
+/* Sub muxes */
+
+/**
+ * MUX() - Initialise a mux description.
+ * @f0:		Function 0 (TZ1090_MUX_ is prepended, NA for none)
+ * @f1:		Function 1 (TZ1090_MUX_ is prepended, NA for none)
+ * @f2:		Function 2 (TZ1090_MUX_ is prepended, NA for none)
+ * @f3:		Function 3 (TZ1090_MUX_ is prepended, NA for none)
+ * @f4:		Function 4 (TZ1090_MUX_ is prepended, NA for none)
+ * @mux_r:	Mux register (REG_PINCTRL_ is prepended)
+ * @mux_b:	Bit number in register that the mux field begins
+ * @mux_w:	Width of mux field in register
+ */
+#define MUX(f0, f1, f2, f3, f4, mux_r, mux_b, mux_w)		\
+	{							\
+		.funcs = {					\
+			TZ1090_MUX_ ## f0,			\
+			TZ1090_MUX_ ## f1,			\
+			TZ1090_MUX_ ## f2,			\
+			TZ1090_MUX_ ## f3,			\
+			TZ1090_MUX_ ## f4,			\
+		},						\
+		.reg = (REG_PINCTRL_ ## mux_r),			\
+		.bit = (mux_b),					\
+		.width = (mux_w),				\
+	}
+
+/**
+ * DEFINE_SUBMUX() - Defines a submux description separate from a pin group.
+ * @mux:	Mux name (_submux is appended)
+ * @f0:		Function 0 (TZ1090_MUX_ is prepended, NA for none)
+ * @f1:		Function 1 (TZ1090_MUX_ is prepended, NA for none)
+ * @f2:		Function 2 (TZ1090_MUX_ is prepended, NA for none)
+ * @f3:		Function 3 (TZ1090_MUX_ is prepended, NA for none)
+ * @f4:		Function 4 (TZ1090_MUX_ is prepended, NA for none)
+ * @mux_r:	Mux register (REG_PINCTRL_ is prepended)
+ * @mux_b:	Bit number in register that the mux field begins
+ * @mux_w:	Width of mux field in register
+ *
+ * A sub mux is a nested mux that can be bound to a magic function number used
+ * by another mux description. For example value 4 of the top level mux might
+ * correspond to a function which has a submux pointed to in tz1090_submux[].
+ * The outer mux can then take on any function in the top level mux or the
+ * submux, and if a submux function is chosen both muxes are updated to route
+ * the signal from the submux.
+ *
+ * The submux can be defined with DEFINE_SUBMUX and pointed to from
+ * tz1090_submux[] using SUBMUX.
+ */
+#define DEFINE_SUBMUX(mux, f0, f1, f2, f3, f4, mux_r, mux_b, mux_w)	\
+	static struct tz1090_muxdesc mux ## _submux =			\
+		MUX(f0, f1, f2, f3, f4, mux_r, mux_b, mux_w)
+
+/**
+ * SUBMUX() - Link a submux to a function number.
+ * @f:		Function name (TZ1090_MUX_ is prepended)
+ * @submux:	Submux name (_submux is appended)
+ *
+ * For use in tz1090_submux[] initialisation to link an intermediate function
+ * number to a particular submux description. It indicates that when the
+ * function is chosen the signal is connected to the submux.
+ */
+#define SUBMUX(f, submux)	[(TZ1090_MUX_ ## f)] = &(submux ## _submux)
+
+/**
+ * MUX_PG() - Initialise a pin group with mux control
+ * @pg_name:	Pin group name (stringified, _pins appended to get pins array)
+ * @f0:		Function 0 (TZ1090_MUX_ is prepended, NA for none)
+ * @f1:		Function 1 (TZ1090_MUX_ is prepended, NA for none)
+ * @f2:		Function 2 (TZ1090_MUX_ is prepended, NA for none)
+ * @f3:		Function 3 (TZ1090_MUX_ is prepended, NA for none)
+ * @f4:		Function 4 (TZ1090_MUX_ is prepended, NA for none)
+ * @mux_r:	Mux register (REG_PINCTRL_ is prepended)
+ * @mux_b:	Bit number in register that the mux field begins
+ * @mux_w:	Width of mux field in register
+ */
+#define MUX_PG(pg_name, f0, f1, f2, f3, f4,			\
+	       mux_r, mux_b, mux_w)				\
+	{							\
+		.name = #pg_name,				\
+		.pins = pg_name##_pins,				\
+		.npins = ARRAY_SIZE(pg_name##_pins),		\
+		.mux = MUX(f0, f1, f2, f3, f4,			\
+			   mux_r, mux_b, mux_w),		\
+	}
+
+/**
+ * SIMPLE_PG() - Initialise a simple convenience pin group
+ * @pg_name:	Pin group name (stringified, _pins appended to get pins array)
+ *
+ * A simple pin group is simply used for binding pins together so they can be
+ * referred to by a single name instead of having to list every pin
+ * individually.
+ */
+#define SIMPLE_PG(pg_name)					\
+	{							\
+		.name = #pg_name,				\
+		.pins = pg_name##_pins,				\
+		.npins = ARRAY_SIZE(pg_name##_pins),		\
+	}
+
+/**
+ * DRV_PG() - Initialise a pin group with drive control
+ * @pg_name:	Pin group name (stringified, _pins appended to get pins array)
+ * @slw_b:	Slew register bit.
+ *		The same bit is used for Schmitt, and Drive (*2).
+ */
+#define DRV_PG(pg_name, slw_b)					\
+	{							\
+		.name = #pg_name,				\
+		.pins = pg_name##_pins,				\
+		.npins = ARRAY_SIZE(pg_name##_pins),		\
+		.drv = true,					\
+		.slw_bit = (slw_b),				\
+	}
+
+/*
+ * Define main muxing pin groups
+ */
+
+/* submuxes */
+
+/*            name     f0,  f1,            f2,        f3, f4, mux r/b/w */
+DEFINE_SUBMUX(ext_dac, DAC, NOT_IQADC_STB, IQDAC_STB, NA, NA, IF_CTL, 6, 2);
+
+/* bind submuxes to internal functions */
+static struct tz1090_muxdesc *tz1090_submux[] = {
+	SUBMUX(EXT_DAC, ext_dac),
+};
+
+/*
+ * These are the pin mux groups. Pin muxing can be enabled and disabled for each
+ * pin individually so these groups are internal. The mapping of pins to pin mux
+ * group is below (tz1090_mux_pins).
+ */
+static struct tz1090_pingroup tz1090_mux_groups[] = {
+	/* Muxing pin groups */
+	/*     pg_name,  f0,       f1,       f2,       f3,        f4,          mux r/b/w */
+	MUX_PG(sdh,      SDH,      SDIO,     NA,       NA,        NA,          IF_CTL, 20, 2),
+	MUX_PG(sdio,     SDIO,     SDH,      NA,       NA,        NA,          IF_CTL, 16, 2),
+	MUX_PG(spi1_cs2, SPI1_CS2, USB_VBUS, NA,       NA,        NA,          IF_CTL, 10, 2),
+	MUX_PG(pdm_d,    PDM_DAC,  USB_VBUS, NA,       NA,        NA,          IF_CTL,  8, 2),
+	MUX_PG(afe,      AFE,      TS_OUT_0, NA,       NA,        NA,          IF_CTL,  4, 2),
+	MUX_PG(tft,      TFT,      EXT_DAC,  TS_OUT_1, LCD_TRACE, PHY_RINGOSC, IF_CTL,  0, 3),
+};
+
+/*
+ * This is the mapping from GPIO pins to pin mux groups in tz1090_mux_groups[].
+ * Pins which aren't muxable to multiple peripherals are set to
+ * TZ1090_MUX_GROUP_MAX to enable the "perip" function to enable/disable
+ * peripheral control of the pin.
+ *
+ * This array is initialised in tz1090_init_mux_pins().
+ */
+static u8 tz1090_mux_pins[NUM_GPIOS];
+
+/* TZ1090_MUX_GROUP_MAX is used in tz1090_mux_pins[] for non-muxing pins */
+#define TZ1090_MUX_GROUP_MAX ARRAY_SIZE(tz1090_mux_groups)
+
+/**
+ * tz1090_init_mux_pins() - Initialise GPIO pin to mux group mapping.
+ *
+ * Initialises the tz1090_mux_pins[] array to be the inverse of the pin lists in
+ * each pin mux group in tz1090_mux_groups[].
+ *
+ * It is assumed that no pin mux groups overlap (share pins).
+ */
+static void __init tz1090_init_mux_pins(void)
+{
+	unsigned int g, p;
+	const struct tz1090_pingroup *grp;
+	const unsigned int *pin;
+
+	for (p = 0; p < NUM_GPIOS; ++p)
+		tz1090_mux_pins[p] = TZ1090_MUX_GROUP_MAX;
+
+	grp = tz1090_mux_groups;
+	for (g = 0, grp = tz1090_mux_groups;
+	     g < ARRAY_SIZE(tz1090_mux_groups); ++g, ++grp)
+		for (pin = grp->pins, p = 0; p < grp->npins; ++p, ++pin)
+			tz1090_mux_pins[*pin] = g;
+}
+
+/*
+ * These are the externally visible pin groups. Some of them allow group control
+ * of drive configuration. Some are just simple convenience pingroups. All the
+ * internal pin mux groups in tz1090_mux_groups[] are mirrored here with the
+ * same pins.
+ * Pseudo pin groups follow in the group numbers after this array for each GPIO
+ * pin. Any group used for muxing must have all pins belonging to the same pin
+ * mux group.
+ */
+static struct tz1090_pingroup tz1090_groups[] = {
+	/* Pin groups with drive control (with no out of place pins) */
+	/*     pg_name,		slw/schmitt/drv b */
+	DRV_PG(jtag,		11 /* 11, 22 */),
+	DRV_PG(tft,		10 /* 10, 20 */),
+	DRV_PG(scb2,		9  /*  9, 18 */),
+	DRV_PG(spi0,		7  /*  7, 14 */),
+	DRV_PG(uart,		5  /*  5, 10 */),
+	DRV_PG(scb1,		4  /*  4,  8 */),
+	DRV_PG(spi1,		3  /*  3,  6 */),
+	DRV_PG(afe,		0  /*  0,  0 */),
+
+	/*
+	 * Drive specific pin groups (with odd combinations of pins which makes
+	 * the pin group naming somewhat arbitrary)
+	 */
+	/*     pg_name,		slw/schmitt/drv b */
+	DRV_PG(drive_sdio,	8  /*  8, 16 */), /* sdio_* + sdh_* */
+	DRV_PG(drive_i2s,	6  /*  6, 12 */), /* i2s_* + clk_out1 */
+	DRV_PG(drive_scb0,	2  /*  2,  4 */), /* scb0_* + pdm_{c,d} */
+	DRV_PG(drive_pdm,	1  /*  1,  2 */), /* pdm_{a,b} + clk_out0 */
+
+	/* Convenience pin groups */
+	/*        pg_name */
+	SIMPLE_PG(uart0),
+	SIMPLE_PG(uart1),
+	SIMPLE_PG(scb0),
+	SIMPLE_PG(i2s),
+	SIMPLE_PG(sdh),
+	SIMPLE_PG(sdio),
+
+	/* pseudo-pingroups for each GPIO pin follow */
+};
+
+/**
+ * struct tz1090_pmx - Private pinctrl data
+ * @dev:	Platform device
+ * @pctl:	Pin control device
+ * @regs:	Register region
+ * @lock:	Lock protecting coherency of pin_en, gpio_en, and SELECT regs
+ * @pin_en:	Pins that have been enabled (32 pins packed into each element)
+ * @gpio_en:	GPIOs that have been enabled (32 pins packed into each element)
+ */
+struct tz1090_pmx {
+	struct device		*dev;
+	struct pinctrl_dev	*pctl;
+	void __iomem		*regs;
+	spinlock_t		lock;
+	u32			pin_en[3];
+	u32			gpio_en[3];
+};
+
+static inline u32 pmx_read(struct tz1090_pmx *pmx, u32 reg)
+{
+	return ioread32(pmx->regs + reg);
+}
+
+static inline void pmx_write(struct tz1090_pmx *pmx, u32 val, u32 reg)
+{
+	iowrite32(val, pmx->regs + reg);
+}
+
+/*
+ * Pin control operations
+ */
+
+/* each GPIO pin has it's own pseudo pingroup containing only itself */
+
+static int tz1090_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(tz1090_groups) + NUM_GPIOS;
+}
+
+static const char *tz1090_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+						 unsigned group)
+{
+	if (group < ARRAY_SIZE(tz1090_groups)) {
+		/* normal pingroup */
+		return tz1090_groups[group].name;
+	} else {
+		/* individual gpio pin pseudo-pingroup */
+		unsigned int pin = group - ARRAY_SIZE(tz1090_groups);
+		return tz1090_pins[pin].name;
+	}
+}
+
+static int tz1090_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+					 unsigned group,
+					 const unsigned **pins,
+					 unsigned *num_pins)
+{
+	if (group < ARRAY_SIZE(tz1090_groups)) {
+		/* normal pingroup */
+		*pins = tz1090_groups[group].pins;
+		*num_pins = tz1090_groups[group].npins;
+	} else {
+		/* individual gpio pin pseudo-pingroup */
+		unsigned int pin = group - ARRAY_SIZE(tz1090_groups);
+		*pins = &tz1090_pins[pin].number;
+		*num_pins = 1;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void tz1090_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
+					struct seq_file *s,
+					unsigned offset)
+{
+	seq_printf(s, " %s", dev_name(pctldev->dev));
+}
+#endif
+
+static int reserve_map(struct device *dev, struct pinctrl_map **map,
+		       unsigned *reserved_maps, unsigned *num_maps,
+		       unsigned reserve)
+{
+	unsigned old_num = *reserved_maps;
+	unsigned new_num = *num_maps + reserve;
+	struct pinctrl_map *new_map;
+
+	if (old_num >= new_num)
+		return 0;
+
+	new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
+	if (!new_map) {
+		dev_err(dev, "krealloc(map) failed\n");
+		return -ENOMEM;
+	}
+
+	memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
+
+	*map = new_map;
+	*reserved_maps = new_num;
+
+	return 0;
+}
+
+static int add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
+		       unsigned *num_maps, const char *group,
+		       const char *function)
+{
+	if (WARN_ON(*num_maps == *reserved_maps))
+		return -ENOSPC;
+
+	(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+	(*map)[*num_maps].data.mux.group = group;
+	(*map)[*num_maps].data.mux.function = function;
+	(*num_maps)++;
+
+	return 0;
+}
+
+static int add_map_configs(struct device *dev,
+			   struct pinctrl_map **map,
+			   unsigned *reserved_maps, unsigned *num_maps,
+			   const char *group, unsigned long *configs,
+			   unsigned num_configs)
+{
+	unsigned long *dup_configs;
+
+	if (WARN_ON(*num_maps == *reserved_maps))
+		return -ENOSPC;
+
+	dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
+			      GFP_KERNEL);
+	if (!dup_configs) {
+		dev_err(dev, "kmemdup(configs) failed\n");
+		return -ENOMEM;
+	}
+
+	(*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+	(*map)[*num_maps].data.configs.group_or_pin = group;
+	(*map)[*num_maps].data.configs.configs = dup_configs;
+	(*map)[*num_maps].data.configs.num_configs = num_configs;
+	(*num_maps)++;
+
+	return 0;
+}
+
+static int add_config(struct device *dev, unsigned long **configs,
+		      unsigned *num_configs, unsigned long config)
+{
+	unsigned old_num = *num_configs;
+	unsigned new_num = old_num + 1;
+	unsigned long *new_configs;
+
+	new_configs = krealloc(*configs, sizeof(*new_configs) * new_num,
+			       GFP_KERNEL);
+	if (!new_configs) {
+		dev_err(dev, "krealloc(configs) failed\n");
+		return -ENOMEM;
+	}
+
+	new_configs[old_num] = config;
+
+	*configs = new_configs;
+	*num_configs = new_num;
+
+	return 0;
+}
+
+void tz1090_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
+				struct pinctrl_map *map, unsigned num_maps)
+{
+	int i;
+
+	for (i = 0; i < num_maps; i++)
+		if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
+			kfree(map[i].data.configs.configs);
+
+	kfree(map);
+}
+
+/* Describes pinconf properties/flags available from device tree */
+static const struct cfg_param {
+	const char *property;
+	enum pin_config_param param;
+	bool flag;
+} cfg_params[] = {
+	{"tristate",		PIN_CONFIG_BIAS_HIGH_IMPEDANCE,		true},
+	{"pull-up",		PIN_CONFIG_BIAS_PULL_UP,		true},
+	{"pull-down",		PIN_CONFIG_BIAS_PULL_DOWN,		true},
+	{"bus-hold",		PIN_CONFIG_BIAS_BUS_HOLD,		true},
+	{"schmitt",		PIN_CONFIG_INPUT_SCHMITT_ENABLE,	true},
+	{"slew-rate",		PIN_CONFIG_SLEW_RATE,			false},
+	{"drive-strength",	PIN_CONFIG_DRIVE_STRENGTH,		false},
+};
+
+int tz1090_pinctrl_dt_subnode_to_map(struct device *dev,
+				     struct device_node *np,
+				     struct pinctrl_map **map,
+				     unsigned *reserved_maps,
+				     unsigned *num_maps)
+{
+	int ret, i;
+	const char *function;
+	u32 val;
+	unsigned long config;
+	unsigned long *configs = NULL;
+	unsigned num_configs = 0;
+	unsigned reserve;
+	struct property *prop;
+	const char *group;
+
+	ret = of_property_read_string(np, "function", &function);
+	if (ret < 0) {
+		/* EINVAL=missing, which is fine since it's optional */
+		if (ret != -EINVAL)
+			dev_err(dev, "could not parse property function\n");
+		function = NULL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(cfg_params); i++) {
+		ret = of_property_read_u32(np, cfg_params[i].property, &val);
+		/* flags don't have to have a value */
+		if (ret == -EOVERFLOW && cfg_params[i].flag) {
+			val = 1;
+			ret = 0;
+		}
+		if (!ret) {
+			config = pinconf_to_config_packed(cfg_params[i].param,
+							  val);
+			ret = add_config(dev, &configs, &num_configs, config);
+			if (ret < 0)
+				goto exit;
+		/* EINVAL=missing, which is fine since it's optional */
+		} else if (ret != -EINVAL) {
+			dev_err(dev, "could not parse property %s (%d)\n",
+				cfg_params[i].property, ret);
+		}
+	}
+
+	reserve = 0;
+	if (function != NULL)
+		reserve++;
+	if (num_configs)
+		reserve++;
+	ret = of_property_count_strings(np, "pins");
+	if (ret < 0) {
+		dev_err(dev, "could not parse property pins\n");
+		goto exit;
+	}
+	reserve *= ret;
+
+	ret = reserve_map(dev, map, reserved_maps, num_maps, reserve);
+	if (ret < 0)
+		goto exit;
+
+	of_property_for_each_string(np, "pins", prop, group) {
+		if (function) {
+			ret = add_map_mux(map, reserved_maps, num_maps,
+					  group, function);
+			if (ret < 0)
+				goto exit;
+		}
+
+		if (num_configs) {
+			ret = add_map_configs(dev, map, reserved_maps,
+					      num_maps, group, configs,
+					      num_configs);
+			if (ret < 0)
+				goto exit;
+		}
+	}
+
+	ret = 0;
+
+exit:
+	kfree(configs);
+	return ret;
+}
+
+int tz1090_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+				  struct device_node *np_config,
+				  struct pinctrl_map **map, unsigned *num_maps)
+{
+	unsigned reserved_maps;
+	struct device_node *np;
+	int ret;
+
+	reserved_maps = 0;
+	*map = NULL;
+	*num_maps = 0;
+
+	for_each_child_of_node(np_config, np) {
+		ret = tz1090_pinctrl_dt_subnode_to_map(pctldev->dev, np, map,
+						       &reserved_maps,
+						       num_maps);
+		if (ret < 0) {
+			tz1090_pinctrl_dt_free_map(pctldev, *map, *num_maps);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static struct pinctrl_ops tz1090_pinctrl_ops = {
+	.get_groups_count	= tz1090_pinctrl_get_groups_count,
+	.get_group_name		= tz1090_pinctrl_get_group_name,
+	.get_group_pins		= tz1090_pinctrl_get_group_pins,
+#ifdef CONFIG_DEBUG_FS
+	.pin_dbg_show		= tz1090_pinctrl_pin_dbg_show,
+#endif
+	.dt_node_to_map		= tz1090_pinctrl_dt_node_to_map,
+	.dt_free_map		= tz1090_pinctrl_dt_free_map,
+};
+
+/*
+ * Pin mux operations
+ */
+
+static int tz1090_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(tz1090_functions);
+}
+
+static const char *tz1090_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+						unsigned function)
+{
+	return tz1090_functions[function].name;
+}
+
+static int tz1090_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
+					  unsigned function,
+					  const char * const **groups,
+					  unsigned * const num_groups)
+{
+	/* pingroup functions */
+	*groups = tz1090_functions[function].groups;
+	*num_groups = tz1090_functions[function].ngroups;
+	return 0;
+}
+
+/**
+ * tz1090_pinctrl_select() - update bit in SELECT register
+ * @pmx:		Pinmux data
+ * @pin:		Pin number (must be within GPIO range)
+ */
+static void tz1090_pinctrl_select(struct tz1090_pmx *pmx,
+				  unsigned int pin)
+{
+	u32 reg, reg_shift, select, val;
+	unsigned int pmx_index, pmx_shift;
+	unsigned long flags;
+
+	/* uses base 32 instead of base 30 */
+	pmx_index = pin >> 5;
+	pmx_shift = pin & 0x1f;
+
+	/* select = !perip || gpio */
+	select = ((~pmx->pin_en[pmx_index] |
+		   pmx->gpio_en[pmx_index]) >> pmx_shift) & 1;
+
+	/* find register and bit offset (base 30) */
+	reg = REG_PINCTRL_SELECT + 4*(pin / 30);
+	reg_shift = pin % 30;
+
+	/* modify gpio select bit */
+	__global_lock2(flags);
+	val = pmx_read(pmx, reg);
+	val &= ~BIT(reg_shift);
+	val |= select << reg_shift;
+	pmx_write(pmx, val, reg);
+	__global_unlock2(flags);
+}
+
+/**
+ * tz1090_pinctrl_gpio_select() - enable/disable GPIO usage for a pin
+ * @pmx:		Pinmux data
+ * @pin:		Pin number
+ * @gpio_select:	true to enable pin as GPIO,
+ *			false to leave control to whatever function is enabled
+ *
+ * Records that GPIO usage is enabled/disabled so that enabling a function
+ * doesn't override the SELECT register bit.
+ */
+static void tz1090_pinctrl_gpio_select(struct tz1090_pmx *pmx,
+				       unsigned int pin,
+				       bool gpio_select)
+{
+	unsigned int index, shift;
+	u32 gpio_en;
+
+	if (pin >= NUM_GPIOS)
+		return;
+
+	/* uses base 32 instead of base 30 */
+	index = pin >> 5;
+	shift = pin & 0x1f;
+
+	spin_lock(&pmx->lock);
+
+	/* keep a record whether gpio is selected */
+	gpio_en = pmx->gpio_en[index];
+	gpio_en &= ~BIT(shift);
+	if (gpio_select)
+		gpio_en |= BIT(shift);
+	pmx->gpio_en[index] = gpio_en;
+
+	/* update the select bit */
+	tz1090_pinctrl_select(pmx, pin);
+
+	spin_unlock(&pmx->lock);
+}
+
+/**
+ * tz1090_pinctrl_perip_select() - enable/disable peripheral interface for a pin
+ * @pmx:		Pinmux data
+ * @pin:		Pin number
+ * @perip_select:	true to enable peripheral interface when not GPIO,
+ *			false to leave pin in GPIO mode
+ *
+ * Records that peripheral usage is enabled/disabled so that SELECT register can
+ * be set appropriately when GPIO is disabled.
+ */
+static void tz1090_pinctrl_perip_select(struct tz1090_pmx *pmx,
+					unsigned int pin,
+					bool perip_select)
+{
+	unsigned int index, shift;
+	u32 pin_en;
+
+	if (pin >= NUM_GPIOS)
+		return;
+
+	/* uses base 32 instead of base 30 */
+	index = pin >> 5;
+	shift = pin & 0x1f;
+
+	spin_lock(&pmx->lock);
+
+	/* keep a record whether peripheral is selected */
+	pin_en = pmx->pin_en[index];
+	pin_en &= ~BIT(shift);
+	if (perip_select)
+		pin_en |= BIT(shift);
+	pmx->pin_en[index] = pin_en;
+
+	/* update the select bit */
+	tz1090_pinctrl_select(pmx, pin);
+
+	spin_unlock(&pmx->lock);
+}
+
+/**
+ * tz1090_pinctrl_enable_mux() - Switch a pin mux group to a function.
+ * @pmx:		Pinmux data
+ * @desc:		Pinmux description
+ * @function:		Function to switch to
+ *
+ * Enable a particular function on a pin mux group. Since pin mux descriptions
+ * are nested this function is recursive.
+ */
+static int tz1090_pinctrl_enable_mux(struct tz1090_pmx *pmx,
+				     const struct tz1090_muxdesc *desc,
+				     unsigned int function)
+{
+	const int *fit;
+	unsigned long flags;
+	int mux;
+	unsigned int func, ret;
+	u32 reg, mask;
+
+	/* find the mux value for this function, searching recursively */
+	for (mux = 0, fit = desc->funcs;
+	     mux < ARRAY_SIZE(desc->funcs); ++mux, ++fit) {
+		func = *fit;
+		if (func == function)
+			goto found_mux;
+
+		/* maybe it's a sub-mux */
+		if (func < ARRAY_SIZE(tz1090_submux) && tz1090_submux[func]) {
+			ret = tz1090_pinctrl_enable_mux(pmx,
+							tz1090_submux[func],
+							function);
+			if (!ret)
+				goto found_mux;
+		}
+	}
+
+	return -EINVAL;
+found_mux:
+
+	/* Set up the mux */
+	if (desc->width) {
+		mask = (BIT(desc->width) - 1) << desc->bit;
+		__global_lock2(flags);
+		reg = pmx_read(pmx, desc->reg);
+		reg &= ~mask;
+		reg |= (mux << desc->bit) & mask;
+		pmx_write(pmx, reg, desc->reg);
+		__global_unlock2(flags);
+	}
+
+	return 0;
+}
+
+/**
+ * tz1090_pinctrl_enable() - Enable a function on a pin group.
+ * @pctldev:		Pin control data
+ * @function:		Function index to enable
+ * @group:		Group index to enable
+ *
+ * Enable a particular function on a group of pins. The per GPIO pin pseudo pin
+ * groups can be used (in which case the pin will be enabled in peripheral mode
+ * and if it belongs to a pin mux group the mux will be switched if it isn't
+ * already in use. Some convenience pin groups can also be used in which case
+ * the effect is the same as enabling the function on each individual pin in the
+ * group.
+ */
+static int tz1090_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function,
+				 unsigned group)
+{
+	struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	struct tz1090_pingroup *grp;
+	int ret;
+	unsigned int pin_num, mux_group, i, npins;
+	const unsigned int *pins;
+
+	/* group of pins? */
+	if (group < ARRAY_SIZE(tz1090_groups)) {
+		grp = &tz1090_groups[group];
+		npins = grp->npins;
+		pins = grp->pins;
+		/*
+		 * All pins in the group must belong to the same mux group,
+		 * which allows us to just use the mux group of the first pin.
+		 * By explicitly listing permitted pingroups for each function
+		 * the pinmux core should ensure this is always the case.
+		 */
+	} else {
+		pin_num = group - ARRAY_SIZE(tz1090_groups);
+		npins = 1;
+		pins = &pin_num;
+	}
+	mux_group = tz1090_mux_pins[*pins];
+
+	/* no mux group, but can still be individually muxed to peripheral */
+	if (mux_group >= TZ1090_MUX_GROUP_MAX) {
+		if (function == TZ1090_MUX_PERIP)
+			goto mux_pins;
+		return -EINVAL;
+	}
+
+	/* mux group already set to a different function? */
+	grp = &tz1090_mux_groups[mux_group];
+	if (grp->func_count && grp->func != function) {
+		dev_err(pctldev->dev,
+			"%s: can't mux pin(s) to '%s', group already muxed to '%s'\n",
+			__func__, tz1090_functions[function].name,
+			tz1090_functions[grp->func].name);
+		return -EBUSY;
+	}
+
+	dev_dbg(pctldev->dev, "%s: muxing %u pin(s) in '%s' to '%s'\n",
+		__func__, npins, grp->name, tz1090_functions[function].name);
+
+	/* if first pin in mux group to be enabled, enable the group mux */
+	if (!grp->func_count) {
+		grp->func = function;
+		ret = tz1090_pinctrl_enable_mux(pmx, &grp->mux, function);
+		if (ret)
+			return ret;
+	}
+	/* add pins to ref count and mux individually to peripheral */
+	grp->func_count += npins;
+mux_pins:
+	for (i = 0; i < npins; ++i)
+		tz1090_pinctrl_perip_select(pmx, pins[i], true);
+
+	return 0;
+}
+
+/**
+ * tz1090_pinctrl_disable() - Disable a function on a pin group.
+ * @pctldev:		Pin control data
+ * @function:		Function index to disable
+ * @group:		Group index to disable
+ *
+ * Disable a particular function on a group of pins. The per GPIO pin pseudo pin
+ * groups can be used (in which case the pin will be taken out of peripheral
+ * mode. Some convenience pin groups can also be used in which case the effect
+ * is the same as enabling the function on each individual pin in the group.
+ */
+static void tz1090_pinctrl_disable(struct pinctrl_dev *pctldev,
+				   unsigned function, unsigned group)
+{
+	struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	struct tz1090_pingroup *grp;
+	unsigned int pin_num, mux_group, i, npins;
+	const unsigned int *pins;
+
+	/* group of pins? */
+	if (group < ARRAY_SIZE(tz1090_groups)) {
+		grp = &tz1090_groups[group];
+		npins = grp->npins;
+		pins = grp->pins;
+		/*
+		 * All pins in the group must belong to the same mux group,
+		 * which allows us to just use the mux group of the first pin.
+		 * By explicitly listing permitted pingroups for each function
+		 * the pinmux core should ensure this is always the case.
+		 */
+	} else {
+		pin_num = group - ARRAY_SIZE(tz1090_groups);
+		npins = 1;
+		pins = &pin_num;
+	}
+	mux_group = tz1090_mux_pins[*pins];
+
+	/* no mux group, but can still be individually muxed to peripheral */
+	if (mux_group >= TZ1090_MUX_GROUP_MAX) {
+		if (function == TZ1090_MUX_PERIP)
+			goto unmux_pins;
+		return;
+	}
+
+	/* mux group already set to a different function? */
+	grp = &tz1090_mux_groups[mux_group];
+	dev_dbg(pctldev->dev, "%s: unmuxing %u pin(s) in '%s' from '%s'\n",
+		__func__, npins, grp->name, tz1090_functions[function].name);
+
+	/* subtract pins from ref count and unmux individually */
+	WARN_ON(grp->func_count < npins);
+	grp->func_count -= npins;
+unmux_pins:
+	for (i = 0; i < npins; ++i)
+		tz1090_pinctrl_perip_select(pmx, pins[i], false);
+}
+
+/**
+ * tz1090_pinctrl_gpio_request_enable() - Put pin in GPIO mode.
+ * @pctldev:		Pin control data
+ * @range:		GPIO range
+ * @pin:		Pin number
+ *
+ * Puts a particular pin into GPIO mode, disabling peripheral control until it's
+ * disabled again.
+ */
+static int tz1090_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
+					      struct pinctrl_gpio_range *range,
+					      unsigned int pin)
+{
+	struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	tz1090_pinctrl_gpio_select(pmx, pin, true);
+	return 0;
+}
+
+/**
+ * tz1090_pinctrl_gpio_disable_free() - Take pin out of GPIO mode.
+ * @pctldev:		Pin control data
+ * @range:		GPIO range
+ * @pin:		Pin number
+ *
+ * Take a particular pin out of GPIO mode. If the pin is enabled for a
+ * peripheral it will return to peripheral mode.
+ */
+static void tz1090_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev,
+					     struct pinctrl_gpio_range *range,
+					     unsigned int pin)
+{
+	struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	tz1090_pinctrl_gpio_select(pmx, pin, false);
+}
+
+static struct pinmux_ops tz1090_pinmux_ops = {
+	.get_functions_count	= tz1090_pinctrl_get_funcs_count,
+	.get_function_name	= tz1090_pinctrl_get_func_name,
+	.get_function_groups	= tz1090_pinctrl_get_func_groups,
+	.enable			= tz1090_pinctrl_enable,
+	.disable		= tz1090_pinctrl_disable,
+	.gpio_request_enable	= tz1090_pinctrl_gpio_request_enable,
+	.gpio_disable_free	= tz1090_pinctrl_gpio_disable_free,
+};
+
+/*
+ * Pin config operations
+ */
+
+struct tz1090_pinconf_pullup {
+	unsigned char index;
+	unsigned char shift;
+};
+
+/* The mapping of pin to pull up/down register index and shift */
+static struct tz1090_pinconf_pullup tz1090_pinconf_pullup[] = {
+	{5, 22}, /*  0 - TZ1090_PIN_SDIO_CLK */
+	{0, 14}, /*  1 - TZ1090_PIN_SDIO_CMD */
+	{0,  6}, /*  2 - TZ1090_PIN_SDIO_D0 */
+	{0,  8}, /*  3 - TZ1090_PIN_SDIO_D1 */
+	{0, 10}, /*  4 - TZ1090_PIN_SDIO_D2 */
+	{0, 12}, /*  5 - TZ1090_PIN_SDIO_D3 */
+	{0,  2}, /*  6 - TZ1090_PIN_SDH_CD */
+	{0,  4}, /*  7 - TZ1090_PIN_SDH_WP */
+	{0, 16}, /*  8 - TZ1090_PIN_SPI0_MCLK */
+	{0, 18}, /*  9 - TZ1090_PIN_SPI0_CS0 */
+	{0, 20}, /* 10 - TZ1090_PIN_SPI0_CS1 */
+	{0, 22}, /* 11 - TZ1090_PIN_SPI0_CS2 */
+	{0, 24}, /* 12 - TZ1090_PIN_SPI0_DOUT */
+	{0, 26}, /* 13 - TZ1090_PIN_SPI0_DIN */
+	{0, 28}, /* 14 - TZ1090_PIN_SPI1_MCLK */
+	{0, 30}, /* 15 - TZ1090_PIN_SPI1_CS0 */
+	{1,  0}, /* 16 - TZ1090_PIN_SPI1_CS1 */
+	{1,  2}, /* 17 - TZ1090_PIN_SPI1_CS2 */
+	{1,  4}, /* 18 - TZ1090_PIN_SPI1_DOUT */
+	{1,  6}, /* 19 - TZ1090_PIN_SPI1_DIN */
+	{1,  8}, /* 20 - TZ1090_PIN_UART0_RXD */
+	{1, 10}, /* 21 - TZ1090_PIN_UART0_TXD */
+	{1, 12}, /* 22 - TZ1090_PIN_UART0_CTS */
+	{1, 14}, /* 23 - TZ1090_PIN_UART0_RTS */
+	{1, 16}, /* 24 - TZ1090_PIN_UART1_RXD */
+	{1, 18}, /* 25 - TZ1090_PIN_UART1_TXD */
+	{1, 20}, /* 26 - TZ1090_PIN_SCB0_SDAT */
+	{1, 22}, /* 27 - TZ1090_PIN_SCB0_SCLK */
+	{1, 24}, /* 28 - TZ1090_PIN_SCB1_SDAT */
+	{1, 26}, /* 29 - TZ1090_PIN_SCB1_SCLK */
+
+	{1, 28}, /* 30 - TZ1090_PIN_SCB2_SDAT */
+	{1, 30}, /* 31 - TZ1090_PIN_SCB2_SCLK */
+	{2,  0}, /* 32 - TZ1090_PIN_I2S_MCLK */
+	{2,  2}, /* 33 - TZ1090_PIN_I2S_BCLK_OUT */
+	{2,  4}, /* 34 - TZ1090_PIN_I2S_LRCLK_OUT */
+	{2,  6}, /* 35 - TZ1090_PIN_I2S_DOUT0 */
+	{2,  8}, /* 36 - TZ1090_PIN_I2S_DOUT1 */
+	{2, 10}, /* 37 - TZ1090_PIN_I2S_DOUT2 */
+	{2, 12}, /* 38 - TZ1090_PIN_I2S_DIN */
+	{4, 12}, /* 39 - TZ1090_PIN_PDM_A */
+	{4, 14}, /* 40 - TZ1090_PIN_PDM_B */
+	{4, 18}, /* 41 - TZ1090_PIN_PDM_C */
+	{4, 20}, /* 42 - TZ1090_PIN_PDM_D */
+	{2, 14}, /* 43 - TZ1090_PIN_TFT_RED0 */
+	{2, 16}, /* 44 - TZ1090_PIN_TFT_RED1 */
+	{2, 18}, /* 45 - TZ1090_PIN_TFT_RED2 */
+	{2, 20}, /* 46 - TZ1090_PIN_TFT_RED3 */
+	{2, 22}, /* 47 - TZ1090_PIN_TFT_RED4 */
+	{2, 24}, /* 48 - TZ1090_PIN_TFT_RED5 */
+	{2, 26}, /* 49 - TZ1090_PIN_TFT_RED6 */
+	{2, 28}, /* 50 - TZ1090_PIN_TFT_RED7 */
+	{2, 30}, /* 51 - TZ1090_PIN_TFT_GREEN0 */
+	{3,  0}, /* 52 - TZ1090_PIN_TFT_GREEN1 */
+	{3,  2}, /* 53 - TZ1090_PIN_TFT_GREEN2 */
+	{3,  4}, /* 54 - TZ1090_PIN_TFT_GREEN3 */
+	{3,  6}, /* 55 - TZ1090_PIN_TFT_GREEN4 */
+	{3,  8}, /* 56 - TZ1090_PIN_TFT_GREEN5 */
+	{3, 10}, /* 57 - TZ1090_PIN_TFT_GREEN6 */
+	{3, 12}, /* 58 - TZ1090_PIN_TFT_GREEN7 */
+	{3, 14}, /* 59 - TZ1090_PIN_TFT_BLUE0 */
+
+	{3, 16}, /* 60 - TZ1090_PIN_TFT_BLUE1 */
+	{3, 18}, /* 61 - TZ1090_PIN_TFT_BLUE2 */
+	{3, 20}, /* 62 - TZ1090_PIN_TFT_BLUE3 */
+	{3, 22}, /* 63 - TZ1090_PIN_TFT_BLUE4 */
+	{3, 24}, /* 64 - TZ1090_PIN_TFT_BLUE5 */
+	{3, 26}, /* 65 - TZ1090_PIN_TFT_BLUE6 */
+	{3, 28}, /* 66 - TZ1090_PIN_TFT_BLUE7 */
+	{3, 30}, /* 67 - TZ1090_PIN_TFT_VDDEN_GD */
+	{4,  0}, /* 68 - TZ1090_PIN_TFT_PANELCLK */
+	{4,  2}, /* 69 - TZ1090_PIN_TFT_BLANK_LS */
+	{4,  4}, /* 70 - TZ1090_PIN_TFT_VSYNC_NS */
+	{4,  6}, /* 71 - TZ1090_PIN_TFT_HSYNC_NR */
+	{4,  8}, /* 72 - TZ1090_PIN_TFT_VD12ACB */
+	{4, 10}, /* 73 - TZ1090_PIN_TFT_PWRSAVE */
+	{4, 24}, /* 74 - TZ1090_PIN_TX_ON */
+	{4, 26}, /* 75 - TZ1090_PIN_RX_ON */
+	{4, 28}, /* 76 - TZ1090_PIN_PLL_ON */
+	{4, 30}, /* 77 - TZ1090_PIN_PA_ON */
+	{5,  0}, /* 78 - TZ1090_PIN_RX_HP */
+	{5,  6}, /* 79 - TZ1090_PIN_GAIN0 */
+	{5,  8}, /* 80 - TZ1090_PIN_GAIN1 */
+	{5, 10}, /* 81 - TZ1090_PIN_GAIN2 */
+	{5, 12}, /* 82 - TZ1090_PIN_GAIN3 */
+	{5, 14}, /* 83 - TZ1090_PIN_GAIN4 */
+	{5, 16}, /* 84 - TZ1090_PIN_GAIN5 */
+	{5, 18}, /* 85 - TZ1090_PIN_GAIN6 */
+	{5, 20}, /* 86 - TZ1090_PIN_GAIN7 */
+	{5,  2}, /* 87 - TZ1090_PIN_ANT_SEL0 */
+	{5,  4}, /* 88 - TZ1090_PIN_ANT_SEL1 */
+	{0,  0}, /* 89 - TZ1090_PIN_SDH_CLK_IN */
+
+	{5, 24}, /* 90 - TZ1090_PIN_TCK */
+	{5, 26}, /* 91 - TZ1090_PIN_TRST */
+	{5, 28}, /* 92 - TZ1090_PIN_TDI */
+	{5, 30}, /* 93 - TZ1090_PIN_TDO */
+	{6,  0}, /* 94 - TZ1090_PIN_TMS */
+	{4, 16}, /* 95 - TZ1090_PIN_CLK_OUT0 */
+	{4, 22}, /* 96 - TZ1090_PIN_CLK_OUT1 */
+};
+
+static int tz1090_pinconf_reg(struct pinctrl_dev *pctldev,
+			      unsigned int pin,
+			      enum pin_config_param param,
+			      bool report_err,
+			      u32 *reg, u32 *width, u32 *mask, u32 *shift,
+			      u32 *val)
+{
+	struct tz1090_pinconf_pullup *pu;
+
+	/* All supported pins have controllable input bias */
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+		*val = REG_PU_PD_TRISTATE;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		*val = REG_PU_PD_UP;
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		*val = REG_PU_PD_DOWN;
+		break;
+	case PIN_CONFIG_BIAS_BUS_HOLD:
+		*val = REG_PU_PD_REPEATER;
+		break;
+	default:
+		return -ENOTSUPP;
+	};
+
+	/* Only input bias parameters supported */
+	pu = &tz1090_pinconf_pullup[pin];
+	*reg = REG_PINCTRL_PU_PD + 4*pu->index;
+	*shift = pu->shift;
+	*width = 2;
+
+	/* Calculate field information */
+	*mask = (BIT(*width) - 1) << *shift;
+
+	return 0;
+}
+
+static int tz1090_pinconf_get(struct pinctrl_dev *pctldev,
+			      unsigned pin, unsigned long *config)
+{
+	struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	enum pin_config_param param = pinconf_to_config_param(*config);
+	int ret;
+	u32 reg, width, mask, shift, val, tmp, arg;
+
+	/* Get register information */
+	ret = tz1090_pinconf_reg(pctldev, pin, param, true,
+				 &reg, &width, &mask, &shift, &val);
+	if (ret < 0)
+		return ret;
+
+	/* Extract field from register */
+	tmp = pmx_read(pmx, reg);
+	arg = ((tmp & mask) >> shift) == val;
+
+	/* Config not active */
+	if (!arg)
+		return -EINVAL;
+
+	/* And pack config */
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int tz1090_pinconf_set(struct pinctrl_dev *pctldev,
+			      unsigned pin, unsigned long config)
+{
+	struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	enum pin_config_param param = pinconf_to_config_param(config);
+	unsigned int arg = pinconf_to_config_argument(config);
+	int ret;
+	u32 reg, width, mask, shift, val, tmp;
+	unsigned long flags;
+
+	dev_dbg(pctldev->dev, "%s(pin=%s, config=%#lx)\n",
+		__func__, tz1090_pins[pin].name, config);
+
+	/* Get register information */
+	ret = tz1090_pinconf_reg(pctldev, pin, param, true,
+				 &reg, &width, &mask, &shift, &val);
+	if (ret < 0)
+		return ret;
+
+	/* Unpack argument and range check it */
+	if (arg > 1) {
+		dev_dbg(pctldev->dev, "%s: arg %u out of range\n",
+			__func__, arg);
+		return -EINVAL;
+	}
+
+	/* Write register field */
+	__global_lock2(flags);
+	tmp = pmx_read(pmx, reg);
+	tmp &= ~mask;
+	if (arg)
+		tmp |= val << shift;
+	pmx_write(pmx, tmp, reg);
+	__global_unlock2(flags);
+
+	return 0;
+}
+
+static const int tz1090_boolean_map[] = {
+	[0]		= -EINVAL,
+	[1]		= 1,
+};
+
+static const int tz1090_dr_map[] = {
+	[REG_DR_2mA]	= 2,
+	[REG_DR_4mA]	= 4,
+	[REG_DR_8mA]	= 8,
+	[REG_DR_12mA]	= 12,
+};
+
+static int tz1090_pinconf_group_reg(struct pinctrl_dev *pctldev,
+				    const struct tz1090_pingroup *g,
+				    enum pin_config_param param,
+				    bool report_err,
+				    u32 *reg, u32 *width, u32 *mask, u32 *shift,
+				    const int **map)
+{
+	/* Drive configuration applies in groups, but not to all groups. */
+	if (!g->drv) {
+		if (report_err)
+			dev_dbg(pctldev->dev,
+				"%s: group %s has no drive control\n",
+				__func__, g->name);
+		return -ENOTSUPP;
+	}
+
+	/* Find information about drive parameter's register */
+	switch (param) {
+	case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+		*reg = REG_PINCTRL_SCHMITT;
+		*width = 1;
+		*map = tz1090_boolean_map;
+		break;
+	case PIN_CONFIG_SLEW_RATE:
+		*reg = REG_PINCTRL_SR;
+		*width = 1;
+		*map = tz1090_boolean_map;
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		*reg = REG_PINCTRL_DR;
+		*width = 2;
+		*map = tz1090_dr_map;
+		break;
+	default:
+		return -ENOTSUPP;
+	};
+
+	/* Calculate field information */
+	*shift = g->slw_bit * *width;
+	*mask = (BIT(*width) - 1) << *shift;
+
+	return 0;
+}
+
+static int tz1090_pinconf_group_get(struct pinctrl_dev *pctldev,
+				    unsigned group,
+				    unsigned long *config)
+{
+	struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	const struct tz1090_pingroup *g;
+	enum pin_config_param param = pinconf_to_config_param(*config);
+	int ret, arg;
+	unsigned int pin;
+	u32 reg, width, mask, shift, val;
+	const int *map;
+
+	if (group >= ARRAY_SIZE(tz1090_groups)) {
+		pin = group - ARRAY_SIZE(tz1090_groups);
+		return tz1090_pinconf_get(pctldev, pin, config);
+	}
+
+	g = &tz1090_groups[group];
+	if (g->npins == 1) {
+		pin = g->pins[0];
+		ret = tz1090_pinconf_get(pctldev, pin, config);
+		if (ret != -ENOTSUPP)
+			return ret;
+	}
+
+	/* Get register information */
+	ret = tz1090_pinconf_group_reg(pctldev, g, param, true,
+				       &reg, &width, &mask, &shift, &map);
+	if (ret < 0)
+		return ret;
+
+	/* Extract field from register */
+	val = pmx_read(pmx, reg);
+	arg = map[(val & mask) >> shift];
+	if (arg < 0)
+		return arg;
+
+	/* And pack config */
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int tz1090_pinconf_group_set(struct pinctrl_dev *pctldev,
+				    unsigned group, unsigned long config)
+{
+	struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+	const struct tz1090_pingroup *g;
+	enum pin_config_param param = pinconf_to_config_param(config);
+	unsigned int arg, pin, i;
+	const unsigned int *pit;
+	int ret;
+	u32 reg, width, mask, shift, val;
+	unsigned long flags;
+	const int *map;
+
+	if (group >= ARRAY_SIZE(tz1090_groups)) {
+		pin = group - ARRAY_SIZE(tz1090_groups);
+		return tz1090_pinconf_set(pctldev, pin, config);
+	}
+
+	g = &tz1090_groups[group];
+	if (g->npins == 1) {
+		pin = g->pins[0];
+		ret = tz1090_pinconf_set(pctldev, pin, config);
+		if (ret != -ENOTSUPP)
+			return ret;
+	}
+
+	dev_dbg(pctldev->dev, "%s(group=%s, config=%#lx)\n",
+		__func__, g->name, config);
+
+	/* Get register information */
+	ret = tz1090_pinconf_group_reg(pctldev, g, param, true,
+				       &reg, &width, &mask, &shift, &map);
+	if (ret < 0) {
+		/*
+		 * Maybe we're trying to set a per-pin configuration of a group,
+		 * so do the pins one by one. This is mainly as a convenience.
+		 */
+		for (i = 0, pit = g->pins; i < g->npins; ++i, ++pit) {
+			ret = tz1090_pinconf_set(pctldev, *pit, config);
+			if (ret)
+				return ret;
+		}
+		return 0;
+	}
+
+	/* Unpack argument and map it to register value */
+	arg = pinconf_to_config_argument(config);
+	for (i = 0; i < BIT(width); ++i) {
+		if (map[i] == arg || (map[i] == -EINVAL && !arg)) {
+			/* Write register field */
+			__global_lock2(flags);
+			val = pmx_read(pmx, reg);
+			val &= ~mask;
+			val |= i << shift;
+			pmx_write(pmx, val, reg);
+			__global_unlock2(flags);
+			return 0;
+		}
+	}
+
+	dev_dbg(pctldev->dev, "%s: arg %u not supported\n",
+		__func__, arg);
+	return -EINVAL;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void tz1090_pinconf_config_dbg_show(struct pinctrl_dev *pctldev,
+					   struct seq_file *s,
+					   unsigned long config)
+{
+	enum pin_config_param param = pinconf_to_config_param(config);
+	u16 arg = pinconf_to_config_argument(config);
+	const char *pname = "unknown";
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cfg_params); i++) {
+		if (cfg_params[i].param == param) {
+			pname = cfg_params[i].property;
+			break;
+		}
+	}
+
+	seq_printf(s, "%s=%d", pname, arg);
+}
+#endif
+
+struct pinconf_ops tz1090_pinconf_ops = {
+	.is_generic		= true,
+	.pin_config_get		= tz1090_pinconf_get,
+	.pin_config_set		= tz1090_pinconf_set,
+	.pin_config_group_get	= tz1090_pinconf_group_get,
+	.pin_config_group_set	= tz1090_pinconf_group_set,
+#ifdef CONFIG_DEBUG_FS
+	.pin_config_config_dbg_show	= tz1090_pinconf_config_dbg_show,
+#endif
+};
+
+/*
+ * Pin control driver setup
+ */
+
+static struct pinctrl_desc tz1090_pinctrl_desc = {
+	.pctlops	= &tz1090_pinctrl_ops,
+	.pmxops		= &tz1090_pinmux_ops,
+	.confops	= &tz1090_pinconf_ops,
+	.owner		= THIS_MODULE,
+};
+
+static int tz1090_pinctrl_probe(struct platform_device *pdev)
+{
+	struct tz1090_pmx *pmx;
+	struct resource *res;
+
+	pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
+	if (!pmx) {
+		dev_err(&pdev->dev, "Can't alloc tz1090_pmx\n");
+		return -ENOMEM;
+	}
+	pmx->dev = &pdev->dev;
+	spin_lock_init(&pmx->lock);
+
+	tz1090_pinctrl_desc.name = dev_name(&pdev->dev);
+	tz1090_pinctrl_desc.pins = tz1090_pins;
+	tz1090_pinctrl_desc.npins = ARRAY_SIZE(tz1090_pins);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Missing MEM resource\n");
+		return -ENODEV;
+	}
+
+	if (!devm_request_mem_region(&pdev->dev, res->start,
+				     resource_size(res),
+				     dev_name(&pdev->dev))) {
+		dev_err(&pdev->dev,
+			"Couldn't request MEM resource\n");
+		return -ENODEV;
+	}
+
+	pmx->regs = devm_ioremap(&pdev->dev, res->start,
+				 resource_size(res));
+	if (!pmx->regs) {
+		dev_err(&pdev->dev, "Couldn't ioremap regs\n");
+		return -ENODEV;
+	}
+
+	pmx->pctl = pinctrl_register(&tz1090_pinctrl_desc, &pdev->dev, pmx);
+	if (!pmx->pctl) {
+		dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
+		return -ENODEV;
+	}
+
+	platform_set_drvdata(pdev, pmx);
+
+	dev_info(&pdev->dev, "TZ1090 pinctrl driver initialised\n");
+
+	return 0;
+}
+
+static int tz1090_pinctrl_remove(struct platform_device *pdev)
+{
+	struct tz1090_pmx *pmx = platform_get_drvdata(pdev);
+
+	pinctrl_unregister(pmx->pctl);
+
+	return 0;
+}
+
+static struct of_device_id tz1090_pinctrl_of_match[] = {
+	{ .compatible = "img,tz1090-pinctrl", },
+	{ },
+};
+
+static struct platform_driver tz1090_pinctrl_driver = {
+	.driver = {
+		.name		= "tz1090-pinctrl",
+		.owner		= THIS_MODULE,
+		.of_match_table	= tz1090_pinctrl_of_match,
+	},
+	.probe	= tz1090_pinctrl_probe,
+	.remove	= tz1090_pinctrl_remove,
+};
+
+static int __init tz1090_pinctrl_init(void)
+{
+	tz1090_init_mux_pins();
+	return platform_driver_register(&tz1090_pinctrl_driver);
+}
+postcore_initcall(tz1090_pinctrl_init);
+
+static void __exit tz1090_pinctrl_exit(void)
+{
+	platform_driver_unregister(&tz1090_pinctrl_driver);
+}
+module_exit(tz1090_pinctrl_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("Toumaz Xenif TZ1090 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, tz1090_pinctrl_of_match);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index b983813..a38152a 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -841,6 +841,14 @@
 	  If you say yes here you get support for the RTC subsystem of the
 	  NUC910/NUC920 used in embedded systems.
 
+config RTC_DRV_IMGPDC
+	tristate "ImgTec Power-Down Controller RTC"
+	depends on SOC_TZ1090
+	default y
+	help
+	  If you say yes here you will get support for the RTC in the ImgTec
+	  PowerDown Controller as found on the Toumaz Xenif TZ1090 (Comet) SoC.
+
 comment "on-CPU RTC drivers"
 
 config RTC_DRV_DAVINCI
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index c33f86f..bffd801 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -54,6 +54,7 @@
 obj-$(CONFIG_RTC_DRV_FM3130)	+= rtc-fm3130.o
 obj-$(CONFIG_RTC_DRV_GENERIC)	+= rtc-generic.o
 obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o
+obj-$(CONFIG_RTC_DRV_IMGPDC)	+= rtc-imgpdc.o
 obj-$(CONFIG_RTC_DRV_IMXDI)	+= rtc-imxdi.o
 obj-$(CONFIG_RTC_DRV_ISL1208)	+= rtc-isl1208.o
 obj-$(CONFIG_RTC_DRV_ISL12022)	+= rtc-isl12022.o
diff --git a/drivers/rtc/rtc-imgpdc.c b/drivers/rtc/rtc-imgpdc.c
new file mode 100644
index 0000000..b95c0ac
--- /dev/null
+++ b/drivers/rtc/rtc-imgpdc.c
@@ -0,0 +1,1130 @@
+/*
+ * ImgTec PowerDown Controller (PDC) RTC
+ *
+ * Copyright 2010-2012 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/io.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+/* needed for clk32k interface */
+#include <asm/soc-tz1090/clock.h>
+
+#define HW_EPOCH		2000
+#define HW_YEARS		 100
+#define LIN_EPOCH		1900
+#define HW2LIN_EPOCH	(HW_EPOCH - LIN_EPOCH)
+#define LIN2HW_EPOCH	(LIN_EPOCH - HW_EPOCH)
+
+/* Registers */
+#define PDC_RTC_CONTROL			0x00
+#define PDC_RTC_SEC			0x04
+#define PDC_RTC_MIN			0x08
+#define PDC_RTC_HOUR			0x0c
+#define PDC_RTC_DAY			0x10
+#define PDC_RTC_MON			0x14
+#define PDC_RTC_YEAR			0x18
+#define PDC_RTC_ASEC			0x1c
+#define PDC_RTC_AMIN			0x20
+#define PDC_RTC_AHOUR			0x24
+#define PDC_RTC_ADAY			0x28
+#define PDC_RTC_AMON			0x2c
+#define PDC_RTC_AYEAR			0x30
+#define PDC_RTC_IRQ_STATUS		0x34
+#define PDC_RTC_IRQ_CLEAR		0x38
+#define PDC_RTC_IRQ_EN			0x3c
+
+/* Register field masks */
+#define PDC_RTC_CONTROL_GAE		0x08	/* global alarm enable */
+#define PDC_RTC_CONTROL_FAST		0x04
+#define PDC_RTC_CONTROL_UPDATE		0x02
+#define PDC_RTC_CONTROL_CE		0x01	/* clock enable */
+#define PDC_RTC_SEC_SEC			0x3f
+#define PDC_RTC_MIN_MIN			0x3f
+#define PDC_RTC_HOUR_HOUR		0x1f
+#define PDC_RTC_DAY_DAY			0x1f
+#define PDC_RTC_MON_MON			0x0f
+#define PDC_RTC_YEAR_YEAR		0x7f
+#define PDC_RTC_ASEC_EN			0x40
+#define PDC_RTC_ASEC_ASEC		0x3f
+#define PDC_RTC_AMIN_EN			0x40
+#define PDC_RTC_AMIN_AMIN		0x3f
+#define PDC_RTC_AHOUR_EN		0x20
+#define PDC_RTC_AHOUR_AHOUR		0x1f
+#define PDC_RTC_ADAY_EN			0x20
+#define PDC_RTC_ADAY_ADAY		0x1f
+#define PDC_RTC_AMON_EN			0x10
+#define PDC_RTC_AMON_AMON		0x0f
+#define PDC_RTC_AYEAR_EN		0x80
+#define PDC_RTC_AYEAR_AYEAR		0x7f
+#define PDC_RTC_IRQ_ALARM		0x04
+#define PDC_RTC_IRQ_MIN			0x02
+#define PDC_RTC_IRQ_SEC			0x01
+
+/**
+ * struct pdc_rtc_priv - Private PDC RTC data.
+ * @rtc_dev:		RTC device structure.
+ * @dev:		Platform device (used for dev_dbg messages etc).
+ * @irq:		IRQ number of RTC device.
+ * @reg_base:		Base of registers memory.
+ * @nonvolatile_base:	Base of non-volatile registers if provided.
+ * @nonvolatile_len:	Length of non-volatile registers.
+ * @time_set_delay:	Number of seconds it takes to set the time.
+ * @alarm_irq_delay:	Number of seconds the alarm IRQ is delayed.
+ * @clk_nb:		Notifier block for clock notify events.
+ * @alarm_pending:	Whether an alarm has fired and hasn't been handled.
+ * @lock:		Protects PDC_RTC_CONTROL, control_reg, and
+ *			softalrm_sec.
+ * @control_reg:	Back up of PDC_RTC_CONTROL to work around buggy
+ *			hardware. It takes time for some last written values to
+ *			take effect, and later writes can replace a write that
+ *			hasn't taken effect yet.
+ * @softalrm_sec:	Software alarm second, for emulating alarms which need
+ *			to fire very soon, which would otherwise foil our work
+ *			around for late alarm interrupts.
+ * @hardalrm_offset:	Offset from desired alarm time given to hardware.
+ * @hardstop_time:	Time the alarm was stopped (or possibly second before).
+ * @alrm_time:		Time of current alarm, for filtering out delayed
+ *			cancelled alarm interrupts.
+ * @adj_alrm_time:	Time of current adjusted alarm (for wrong clock rate).
+ * @time_update_diff:	Difference between the old time and the new time, which
+ *			should be used while hardware is still updating the
+ *			time.
+ * @suspended:		Whether the device is in suspend mode, in which case rtc
+ *			interrupt events should be postponed until resume (see
+ *			postponed_rtc_int).
+ * @wakeup:		Whether the device can wake the system from a sleep
+ *			state.
+ * @postponed_rtc_int:	Postponed rtc interrupt flags to submit on resume.
+ * @last_irq_en:	Preserved IRQ enable state when wakeup is in use.
+ */
+struct pdc_rtc_priv {
+	struct rtc_device *rtc_dev;
+	struct device *dev;
+	int irq;
+	void __iomem *reg_base;
+	void __iomem *nonvolatile_base;
+	unsigned long nonvolatile_len;
+	unsigned int time_set_delay;
+	unsigned int alarm_irq_delay;
+	struct notifier_block clk_nb;
+
+	int alarm_pending;
+	spinlock_t lock;
+	u32 control_reg;
+	int softalrm_sec;
+	int hardalrm_offset;
+	unsigned long hardstop_time;
+	unsigned long alrm_time;
+	unsigned long adj_alrm_time;
+	unsigned long time_update_diff;
+
+	/* suspend data */
+	bool suspended;
+	bool wakeup;
+	unsigned int postponed_rtc_int;
+	unsigned int last_irq_en;
+};
+
+static void pdc_rtc_write(struct pdc_rtc_priv *priv,
+		   unsigned int reg_offs, unsigned int data)
+{
+	iowrite32(data, priv->reg_base + reg_offs);
+}
+
+static unsigned int pdc_rtc_read(struct pdc_rtc_priv *priv,
+			  unsigned int reg_offs)
+{
+	return ioread32(priv->reg_base + reg_offs);
+}
+
+static int pdc_rtc_write_nonvolatile(struct pdc_rtc_priv *priv,
+				     unsigned int reg_offs, unsigned long in)
+{
+	if (reg_offs >= priv->nonvolatile_len)
+		return -EINVAL;
+
+	iowrite32(in, priv->nonvolatile_base + reg_offs);
+	return 0;
+}
+
+static int pdc_rtc_read_nonvolatile(struct pdc_rtc_priv *priv,
+				    unsigned int reg_offs, unsigned long *out)
+{
+	if (reg_offs >= priv->nonvolatile_len)
+		return -EINVAL;
+
+	*out = ioread32(priv->nonvolatile_base + reg_offs);
+	return 0;
+}
+
+
+/* caller must hold lock. does not handle read during time update. */
+static void _pdc_rtc_read_time_raw(struct pdc_rtc_priv *priv,
+				   struct rtc_time *tm, int *updating)
+{
+	int min, upd = 0;
+
+	/*
+	 * If it takes time for the time to get set and an update is in
+	 * progress, we need to check that the update is still in progress
+	 * afterwards otherwise it will have changed while we were reading it.
+	 */
+	if (priv->time_set_delay)
+		upd = pdc_rtc_read(priv, PDC_RTC_CONTROL)
+			& PDC_RTC_CONTROL_UPDATE;
+start_again:
+
+	/*
+	 * We re-read the minute at the end of the loop to check it hasn't
+	 * changed while we were reading the others. If it has then we didn't
+	 * read atomically and should try again. The second is allowed to
+	 * change by itself as that won't result in an inconsistent time.
+	 */
+	min = pdc_rtc_read(priv, PDC_RTC_MIN) & PDC_RTC_MIN_MIN;
+	do {
+		tm->tm_sec  = (pdc_rtc_read(priv, PDC_RTC_SEC) &
+			       PDC_RTC_SEC_SEC);
+		tm->tm_min  = min;
+		tm->tm_hour = (pdc_rtc_read(priv, PDC_RTC_HOUR) &
+			       PDC_RTC_HOUR_HOUR);
+		tm->tm_mday = (pdc_rtc_read(priv, PDC_RTC_DAY) &
+			       PDC_RTC_DAY_DAY);
+		tm->tm_mon  = (pdc_rtc_read(priv, PDC_RTC_MON) &
+			       PDC_RTC_MON_MON) - 1;
+		tm->tm_year = (pdc_rtc_read(priv, PDC_RTC_YEAR) &
+			       PDC_RTC_YEAR_YEAR) + HW2LIN_EPOCH;
+		if (upd) {
+			upd = pdc_rtc_read(priv, PDC_RTC_CONTROL)
+				& PDC_RTC_CONTROL_UPDATE;
+			/* did the update finish while we were reading */
+			if (!upd)
+				goto start_again;
+		}
+		min = pdc_rtc_read(priv, PDC_RTC_MIN) & PDC_RTC_MIN_MIN;
+		if (min != tm->tm_min)
+			dev_dbg(priv->dev,
+				"time read %02d:%02d:%02d nonatomic, retrying\n",
+				tm->tm_hour, tm->tm_min, tm->tm_sec);
+	} while (min != tm->tm_min);
+
+	if (updating)
+		*updating = upd;
+}
+
+/* caller must hold lock. does handle read during time update. */
+static void _pdc_rtc_read_time(struct pdc_rtc_priv *priv, struct rtc_time *tm)
+{
+	int upd;
+	unsigned long time;
+
+	_pdc_rtc_read_time_raw(priv, tm, &upd);
+
+	dev_dbg(priv->dev, "time read %02d:%02d:%02d\n",
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+	/* if we got the old time during an update, add the difference */
+	if (upd && priv->time_update_diff) {
+		rtc_tm_to_time(tm, &time);
+		time += priv->time_update_diff;
+		rtc_time_to_tm(time, tm);
+
+		dev_dbg(priv->dev,
+			"update was in progress, adjusting to %02d:%02d:%02d\n",
+			tm->tm_hour, tm->tm_min, tm->tm_sec);
+	}
+}
+
+static int pdc_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct pdc_rtc_priv *priv = dev_get_drvdata(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	_pdc_rtc_read_time(priv, tm);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	tm->tm_wday  = -1;
+	tm->tm_yday  = -1;
+	tm->tm_isdst = -1;
+
+	return 0;
+}
+
+static int pdc_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct pdc_rtc_priv *priv = dev_get_drvdata(dev);
+	unsigned int hw_year;
+	unsigned int ctrl;
+	unsigned long flags;
+	unsigned long time, rtime;
+	struct rtc_time tm_adj;
+
+	dev_dbg(priv->dev, "time set %02d:%02d:%02d\n",
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+	/*
+	 * Due to a hardware quirk the time may only be set after several
+	 * seconds.
+	 */
+	if (priv->time_set_delay) {
+		rtc_tm_to_time(tm, &time);
+		rtc_time_to_tm(time + priv->time_set_delay, &tm_adj);
+		tm = &tm_adj;
+	}
+
+	hw_year = tm->tm_year + LIN2HW_EPOCH;
+	/* year must be in range */
+	if (hw_year >= HW_YEARS)
+		return -EINVAL;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	/* write out the values */
+	pdc_rtc_write(priv, PDC_RTC_SEC, tm->tm_sec);
+	pdc_rtc_write(priv, PDC_RTC_MIN, tm->tm_min);
+	pdc_rtc_write(priv, PDC_RTC_HOUR, tm->tm_hour);
+	pdc_rtc_write(priv, PDC_RTC_DAY, tm->tm_mday);
+	pdc_rtc_write(priv, PDC_RTC_MON, tm->tm_mon + 1);
+	pdc_rtc_write(priv, PDC_RTC_YEAR, hw_year);
+
+	/* update the clock with the written values */
+	ctrl = priv->control_reg | PDC_RTC_CONTROL_UPDATE;
+	pdc_rtc_write(priv, PDC_RTC_CONTROL, ctrl);
+
+	/*
+	 * Record the offset of the new time so that we can calculate the
+	 * current time before the update is complete.
+	 */
+	if (priv->time_set_delay) {
+		_pdc_rtc_read_time_raw(priv, &tm_adj, NULL);
+		rtc_tm_to_time(&tm_adj, &rtime);
+		priv->time_update_diff = time - rtime;
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return 0;
+}
+
+static int pdc_rtc_alarm_enabled(struct pdc_rtc_priv *priv)
+{
+	return !!(priv->control_reg & PDC_RTC_CONTROL_GAE);
+}
+
+static int pdc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct pdc_rtc_priv *priv = dev_get_drvdata(dev);
+	unsigned long flags;
+	unsigned long scheduled;
+	int offset;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	offset = priv->hardalrm_offset;
+
+	/* Just get the register values */
+	alrm->enabled = pdc_rtc_alarm_enabled(priv);
+	alrm->pending = priv->alarm_pending;
+
+	alrm->time.tm_sec  = pdc_rtc_read(priv, PDC_RTC_ASEC);
+	alrm->time.tm_min  = pdc_rtc_read(priv, PDC_RTC_AMIN);
+	alrm->time.tm_hour = pdc_rtc_read(priv, PDC_RTC_AHOUR);
+	alrm->time.tm_mday = pdc_rtc_read(priv, PDC_RTC_ADAY);
+	alrm->time.tm_mon  = pdc_rtc_read(priv, PDC_RTC_AMON);
+	alrm->time.tm_year = pdc_rtc_read(priv, PDC_RTC_AYEAR);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	/* Any misisng _EN bit translated to -1 */
+
+	if (alrm->time.tm_sec & PDC_RTC_ASEC_EN)
+		alrm->time.tm_sec &= PDC_RTC_ASEC_ASEC;
+	else
+		alrm->time.tm_sec = -1;
+
+	if (alrm->time.tm_min & PDC_RTC_AMIN_EN)
+		alrm->time.tm_min &= PDC_RTC_AMIN_AMIN;
+	else
+		alrm->time.tm_min = -1;
+
+	if (alrm->time.tm_hour & PDC_RTC_AHOUR_EN)
+		alrm->time.tm_hour &= PDC_RTC_AHOUR_AHOUR;
+	else
+		alrm->time.tm_hour = -1;
+
+	if (alrm->time.tm_mday & PDC_RTC_ADAY_EN)
+		alrm->time.tm_mday &= PDC_RTC_ADAY_ADAY;
+	else
+		alrm->time.tm_mday = -1;
+
+	if (alrm->time.tm_wday & PDC_RTC_ADAY_EN)
+		alrm->time.tm_wday &= PDC_RTC_ADAY_ADAY;
+	else
+		alrm->time.tm_wday = -1;
+
+	if (alrm->time.tm_mon & PDC_RTC_AMON_EN)
+		alrm->time.tm_mon = (alrm->time.tm_mon & PDC_RTC_AMON_AMON) - 1;
+	else
+		alrm->time.tm_mon = -1;
+
+	if (alrm->time.tm_year & PDC_RTC_AYEAR_EN)
+		alrm->time.tm_year = (alrm->time.tm_year & PDC_RTC_AYEAR_AYEAR)
+					+ HW2LIN_EPOCH;
+	else
+		alrm->time.tm_year = -1;
+
+	alrm->time.tm_wday  = -1;
+	alrm->time.tm_yday  = -1;
+	alrm->time.tm_isdst = -1;
+
+	/*
+	 * The alarm time in the hardware is offset to compensate for the late
+	 * alarm interrupts, so we need to adjust it to get the original alarm.
+	 */
+
+	rtc_tm_to_time(&alrm->time, &scheduled);
+	scheduled -= offset;
+	rtc_time_to_tm(scheduled, &alrm->time);
+
+	return 0;
+}
+
+/* caller must hold priv->lock */
+static void _pdc_rtc_stop_alarm(struct pdc_rtc_priv *priv, unsigned long now)
+{
+	/* disable the secondly interrupt */
+	pdc_rtc_write(priv, PDC_RTC_IRQ_EN, PDC_RTC_IRQ_ALARM);
+	priv->softalrm_sec = -1;
+	/* disable the global alarm enable bit */
+	priv->control_reg &= ~PDC_RTC_CONTROL_GAE;
+	pdc_rtc_write(priv, PDC_RTC_CONTROL, priv->control_reg);
+
+	priv->hardstop_time = now;
+}
+
+static void pdc_rtc_stop_alarm(struct pdc_rtc_priv *priv)
+{
+	unsigned long flags;
+	struct rtc_time tm;
+	unsigned long now = 0;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (priv->alarm_irq_delay) {
+		pdc_rtc_read_time(priv->dev, &tm);
+		rtc_tm_to_time(&tm, &now);
+	}
+	_pdc_rtc_stop_alarm(priv, now);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void pdc_rtc_start_alarm(struct pdc_rtc_priv *priv)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	priv->control_reg |= PDC_RTC_CONTROL_GAE;
+	pdc_rtc_write(priv, PDC_RTC_CONTROL, priv->control_reg);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int _pdc_rtc_set_alarm(struct pdc_rtc_priv *priv, bool temporary,
+			      unsigned long scheduled, struct rtc_wkalrm *alrm)
+{
+	struct rtc_time tm;
+	unsigned long flags;
+	unsigned long now, adjusted;
+	struct rtc_wkalrm alrm_adj, *alrm_orig = alrm;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	/*
+	 * If we're compensating for hardware bugs, don't change the stored
+	 * alarm time.
+	 */
+	if (!temporary)
+		priv->alrm_time = scheduled;
+	/*
+	 * But still record the alarm being set so we can tell whether we need
+	 * to change it back again.
+	 */
+	priv->adj_alrm_time = scheduled;
+
+	/*
+	 * Due to a hardware quirk the alarm may fire several seconds late, so
+	 * rewind the scheduled alarm time to compensate.
+	 */
+	if (priv->alarm_irq_delay) {
+		alrm_adj = *alrm;
+
+		pdc_rtc_read_time(priv->dev, &tm);
+		rtc_tm_to_time(&tm, &now);
+
+try_again_locked:
+		adjusted = scheduled - priv->alarm_irq_delay;
+
+		/* disable the alarm while we set it up */
+		_pdc_rtc_stop_alarm(priv, now);
+
+		/* Make sure we're not setting the alarm in the past */
+		if (scheduled <= now) {
+			spin_unlock_irqrestore(&priv->lock, flags);
+			return -ETIME;
+		}
+		/* If adjusted time is in past, emulate with a soft alarm */
+		if (adjusted <= now && alrm->enabled) {
+			/* clear and enable secondly interrupt */
+			pdc_rtc_write(priv, PDC_RTC_IRQ_CLEAR, PDC_RTC_IRQ_SEC);
+			pdc_rtc_write(priv, PDC_RTC_IRQ_EN,
+				      PDC_RTC_IRQ_ALARM | PDC_RTC_IRQ_SEC);
+			priv->softalrm_sec = alrm_orig->time.tm_sec;
+			/* still set the real alarm as early as possible */
+			adjusted = now + 1;
+		}
+		priv->hardalrm_offset = adjusted - scheduled;
+		spin_unlock_irqrestore(&priv->lock, flags);
+
+		rtc_time_to_tm(adjusted, &alrm_adj.time);
+		alrm = &alrm_adj;
+
+		dev_dbg(priv->dev, "alarm setting %02d:%02d:%02d\n",
+			alrm->time.tm_hour, alrm->time.tm_min,
+			alrm->time.tm_sec);
+	} else {
+		/* disable the alarm while we set it up */
+		_pdc_rtc_stop_alarm(priv, 0);
+
+		adjusted = scheduled;
+		priv->hardalrm_offset = 0;
+		spin_unlock_irqrestore(&priv->lock, flags);
+	}
+
+	/*
+	 * don't use fields set to -1
+	 * all smaller fields than a field in use must also be in use
+	 */
+
+	if (alrm->time.tm_year >= 0) {
+		tm.tm_year = alrm->time.tm_year + LIN2HW_EPOCH;
+		/* year must be in range */
+		if ((unsigned int)tm.tm_year > HW_YEARS)
+			return -EINVAL;
+		tm.tm_year |= PDC_RTC_AYEAR_EN;
+	} else
+		tm.tm_year = 0;
+
+	if (alrm->time.tm_mon >= 0)
+		tm.tm_mon = (alrm->time.tm_mon + 1) | PDC_RTC_AMON_EN;
+	else if (tm.tm_year & PDC_RTC_AYEAR_EN)
+		return -EINVAL;
+	else
+		tm.tm_mon = 0;
+
+	if (alrm->time.tm_mday >= 0)
+		tm.tm_mday = alrm->time.tm_mday | PDC_RTC_ADAY_EN;
+	else if (tm.tm_mon & PDC_RTC_AMON_EN)
+		return -EINVAL;
+	else
+		tm.tm_mday = 0;
+
+	if (alrm->time.tm_hour >= 0)
+		tm.tm_hour = alrm->time.tm_hour | PDC_RTC_AHOUR_EN;
+	else if (tm.tm_mday & PDC_RTC_ADAY_EN)
+		return -EINVAL;
+	else
+		tm.tm_hour = 0;
+
+	if (alrm->time.tm_min >= 0)
+		tm.tm_min = alrm->time.tm_min | PDC_RTC_AMIN_EN;
+	else if (tm.tm_hour & PDC_RTC_AHOUR_EN)
+		return -EINVAL;
+	else
+		tm.tm_min = 0;
+
+	if (alrm->time.tm_sec >= 0)
+		tm.tm_sec = alrm->time.tm_sec | PDC_RTC_ASEC_EN;
+	else if (tm.tm_min & PDC_RTC_AMIN_EN)
+		return -EINVAL;
+	else
+		tm.tm_sec = 0;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	pdc_rtc_write(priv, PDC_RTC_ASEC,  tm.tm_sec);
+	pdc_rtc_write(priv, PDC_RTC_AMIN,  tm.tm_min);
+	pdc_rtc_write(priv, PDC_RTC_AHOUR, tm.tm_hour);
+	pdc_rtc_write(priv, PDC_RTC_ADAY,  tm.tm_mday);
+	pdc_rtc_write(priv, PDC_RTC_AMON,  tm.tm_mon);
+	pdc_rtc_write(priv, PDC_RTC_AYEAR, tm.tm_year);
+	priv->alarm_pending = 0;
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	/* re-enable the alarm if applicable */
+	if (alrm->enabled) {
+		pdc_rtc_start_alarm(priv);
+
+		/*
+		 * Check that setting the alarm didn't lose the race against the
+		 * next clock tick (which may be the one we're trying to set the
+		 * alarm on). It may be that the interrupt just hasn't been
+		 * handled yet (e.g. on another CPU), but it does no harm to
+		 * handle it here instead.
+		 */
+		spin_lock_irqsave(&priv->lock, flags);
+		if (!priv->alarm_pending) {
+			pdc_rtc_read_time(priv->dev, &tm);
+			rtc_tm_to_time(&tm, &now);
+			/* If it's too late, immediately trigger the alarm */
+			if (scheduled <= now) {
+				_pdc_rtc_stop_alarm(priv, now);
+				dev_dbg(priv->dev,
+					"alarm set race lost, triggering immediately\n");
+				spin_unlock_irqrestore(&priv->lock, flags);
+				return -ETIME;
+			}
+			/*
+			 * If we've missed the window of oportunity to set the
+			 * alarm interrupt, we need to reconsider.
+			 */
+			if (adjusted <= now) {
+				dev_dbg(priv->dev,
+					"alarm set race lost, retrying\n");
+				goto try_again_locked;
+			}
+		}
+		spin_unlock_irqrestore(&priv->lock, flags);
+	}
+
+	return 0;
+}
+
+static int pdc_rtc_adjust_alarm_time(struct pdc_rtc_priv *priv,
+				     unsigned long scheduled)
+{
+	struct rtc_wkalrm alrm;
+	alrm.enabled = 1;
+	alrm.pending = 0;
+
+	rtc_time_to_tm(scheduled, &alrm.time);
+	return _pdc_rtc_set_alarm(priv, true, scheduled, &alrm);
+}
+
+static int pdc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct pdc_rtc_priv *priv = dev_get_drvdata(dev);
+	unsigned long scheduled;
+
+	dev_dbg(priv->dev, "alarm set %02d:%02d:%02d\n",
+		alrm->time.tm_hour, alrm->time.tm_min, alrm->time.tm_sec);
+
+	rtc_tm_to_time(&alrm->time, &scheduled);
+	return _pdc_rtc_set_alarm(priv, false, scheduled, alrm);
+}
+
+static int pdc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct pdc_rtc_priv *priv = dev_get_drvdata(dev);
+
+	dev_dbg(priv->dev, "alarm irq enable %d\n",
+		enabled);
+
+	if (enabled)
+		pdc_rtc_start_alarm(priv);
+	else
+		pdc_rtc_stop_alarm(priv);
+
+	return 0;
+}
+
+static struct rtc_class_ops pdc_rtc_ops = {
+	.read_time		= pdc_rtc_read_time,
+	.set_time		= pdc_rtc_set_time,
+	.read_alarm		= pdc_rtc_read_alarm,
+	.set_alarm		= pdc_rtc_set_alarm,
+	.alarm_irq_enable	= pdc_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t pdc_rtc_isr(int irq, void *dev_id)
+{
+	struct pdc_rtc_priv *priv = dev_id;
+	unsigned int status;
+	unsigned long events = RTC_IRQF;
+	unsigned long flags;
+	struct rtc_time tm;
+	unsigned long now = 0;
+	u32 sec;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	status = pdc_rtc_read(priv, PDC_RTC_IRQ_STATUS);
+	pdc_rtc_write(priv, PDC_RTC_IRQ_CLEAR, status);
+
+	/* ignore delayed alarm interrupts after turned alarm off */
+	if (pdc_rtc_alarm_enabled(priv)) {
+		if (status & PDC_RTC_IRQ_ALARM) {
+			/*
+			 * Alarm interrupt.
+			 * If an alarm was cancelled, we may still get the
+			 * delayed interrupt, so we need to check the current
+			 * time has actually exceeded the alarm time.
+			 * Of course if we've corrected the time we may also get
+			 * a legitimate interrupt early, so we use hardstop_time
+			 * to check whether the alarm could be the second after
+			 * we've disabled it, which would indicate an echo.
+			 */
+			_pdc_rtc_read_time(priv, &tm);
+			rtc_tm_to_time(&tm, &now);
+			if (now >= priv->alrm_time) {
+				events |= RTC_AF;
+				dev_dbg(priv->dev, "isr alarm %02d:%02d:%02d\n",
+					tm.tm_hour, tm.tm_min, tm.tm_sec);
+			} else if (now > priv->hardstop_time &&
+				   now <= priv->hardstop_time + 2) {
+				dev_dbg(priv->dev,
+					"isr alarm %02d:%02d:%02d ignored (echo %lu disabled %lu)\n",
+					tm.tm_hour, tm.tm_min, tm.tm_sec,
+					now, priv->hardstop_time);
+			} else {
+				/*
+				 * It looks like a legitimate interrupt since we
+				 * haven't just cancelled an alarm.
+				 */
+				events |= RTC_AF;
+				dev_dbg(priv->dev,
+					"isr alarm %02d:%02d:%02d (early %lu < %lu)\n",
+					tm.tm_hour, tm.tm_min, tm.tm_sec,
+					now, priv->alrm_time);
+			}
+		} else if (status & PDC_RTC_IRQ_SEC) {
+			/*
+			 * Secondly interrupt.
+			 * Check if the current second matches the soft alarm.
+			 */
+			sec = pdc_rtc_read(priv, PDC_RTC_SEC);
+			if (priv->softalrm_sec == sec)
+				events |= RTC_AF;
+			dev_dbg(priv->dev,
+				"isr second %02d:%02d:%02d (compare %02d)\n",
+				pdc_rtc_read(priv, PDC_RTC_HOUR),
+				pdc_rtc_read(priv, PDC_RTC_MIN),
+				sec, priv->softalrm_sec);
+		}
+
+		/* rtc alarms are one-shot */
+		priv->hardstop_time = 0;
+		if (events & RTC_AF) {
+			if (!now && priv->alarm_irq_delay) {
+				pdc_rtc_read_time(priv->dev, &tm);
+				rtc_tm_to_time(&tm, &now);
+			}
+			priv->alarm_pending = 1;
+			_pdc_rtc_stop_alarm(priv, now);
+		}
+	} else {
+		/* make absolutely sure that the alarm is properly stopped */
+		if (priv->alarm_irq_delay) {
+			pdc_rtc_read_time(priv->dev, &tm);
+			rtc_tm_to_time(&tm, &now);
+		}
+		_pdc_rtc_stop_alarm(priv, now);
+		dev_dbg(priv->dev,
+			"isr irq %#x ignored (alarm disabled)\n",
+			status);
+	}
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (events != RTC_IRQF)
+		rtc_update_irq(priv->rtc_dev, 1, events);
+
+	return IRQ_HANDLED;
+}
+
+/* Non-volatile registers available to the RTC */
+
+/* The last time when the RTC was adjusted and should have been correct */
+#define PDC_RTC_SRPROT_LASTTIME		0x0
+/*
+ * Sub-second accumulated skew due to the changes in clock frequency.
+ * Fixed point number with shift of PDC_RTC_SWPROT_SKEW_BITS.
+ * I.e. best_time = rtctime + skew
+ * Must be in the range -0.5 <= X < 0.5.
+ */
+#define PDC_RTC_SRPROT_SKEW		0x4
+#define PDC_RTC_SWPROT_SKEW_BITS	15
+
+static void pdc_rtc_change_frequency(struct pdc_rtc_priv *priv,
+				     struct clk32k_change_freq *change)
+{
+	unsigned long last_time;
+	long skew, half;
+	int skew_err, alrm_err, alrm_en;
+	unsigned long now, adj;
+	unsigned long orig_alrm_time, adj_alrm_time, alrm_time;
+	unsigned long diff;
+	u64 diff64;
+	struct rtc_time tm;
+	unsigned long flags;
+
+	dev_dbg(priv->dev, "clk changed %lu HZ -> %lu HZ\n",
+		change->old_freq, change->new_freq);
+
+	/* adjust time since last recorded time */
+
+	if (pdc_rtc_read_nonvolatile(priv, PDC_RTC_SRPROT_LASTTIME, &last_time))
+		return;
+
+	skew_err = pdc_rtc_read_nonvolatile(priv, PDC_RTC_SRPROT_SKEW,
+					    (unsigned long *)&skew);
+	/* sanity check range of skew */
+	half = 1 << (PDC_RTC_SWPROT_SKEW_BITS - 1);
+	if (!skew_err && (skew >= half ||
+			  skew < -half))
+		skew = 0;
+
+	dev_dbg(priv->dev, "last_time = %lx, skew = %lx (%d)\n",
+		last_time, skew, skew_err);
+
+	pdc_rtc_read_time(priv->dev, &tm);
+	rtc_tm_to_time(&tm, &now);
+
+	if (last_time && change->old_freq != CLK32K_DESIRED_FREQUENCY) {
+		diff = now - last_time;
+		if ((long)diff > 0) {
+			dev_dbg(priv->dev, "%#lx seconds since last change\n",
+				diff);
+			/* fixed point (using shift of SKEW_BITS) */
+			diff64 = div_u64((u64)(CLK32K_DESIRED_FREQUENCY
+					       << PDC_RTC_SWPROT_SKEW_BITS)
+					 * diff, change->old_freq);
+			/* add 0.5 so we round to closest */
+			diff64 += half;
+			if (!skew_err) {
+				/* adjust using the accumulated clock skew */
+				diff64 += skew;
+				/*
+				 * Update the skew from rounding the time to the
+				 * nearest second.
+				 */
+				skew = diff64 &
+					((1 << PDC_RTC_SWPROT_SKEW_BITS) - 1);
+				skew -= half;
+			}
+			diff = diff64 >> PDC_RTC_SWPROT_SKEW_BITS;
+			adj = last_time + diff;
+			dev_dbg(priv->dev, "scaled to %#lx seconds\n",
+				diff);
+			if (adj != now) {
+				rtc_time_to_tm(adj, &tm);
+				pdc_rtc_set_time(priv->dev, &tm);
+				now = adj;
+			}
+		}
+	}
+
+	/* adjust the alarm */
+	spin_lock_irqsave(&priv->lock, flags);
+	orig_alrm_time = priv->alrm_time;
+	adj_alrm_time = priv->adj_alrm_time;
+	alrm_en = pdc_rtc_alarm_enabled(priv);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (orig_alrm_time && alrm_en) {
+		if (change->new_freq == CLK32K_DESIRED_FREQUENCY) {
+			dev_dbg(priv->dev, "%#lx seconds until alarm\n",
+				orig_alrm_time - now);
+			alrm_time = orig_alrm_time;
+		} else {
+			diff = orig_alrm_time - now;
+			dev_dbg(priv->dev, "%#lx seconds until alarm\n",
+				diff);
+			/* scale time until alarm, rounding to closest second */
+			diff = (2 * diff * change->new_freq + 1)
+				/ (2 * CLK32K_DESIRED_FREQUENCY);
+			alrm_time = now + diff;
+			dev_dbg(priv->dev, "scaled to %#lx seconds\n",
+				diff);
+		}
+
+		if (adj_alrm_time != alrm_time) {
+			alrm_err = pdc_rtc_adjust_alarm_time(priv, alrm_time);
+			if (alrm_err == -ETIME) {
+				/* alarm has already expired, trigger */
+				dev_dbg(priv->dev,
+					"alarm expired during adjustment\n");
+				priv->alarm_pending = 1;
+				/* if suspended, postpone event until resume */
+				if (priv->suspended)
+					priv->postponed_rtc_int =
+							RTC_IRQF | RTC_AF;
+				else
+					rtc_update_irq(priv->rtc_dev, 1,
+						       RTC_IRQF | RTC_AF);
+			}
+		}
+	}
+
+	dev_dbg(priv->dev, "writing now = %lx, skew = %lx (%d)\n",
+		now, skew, skew_err);
+
+	pdc_rtc_write_nonvolatile(priv, PDC_RTC_SRPROT_LASTTIME, now);
+	if (!skew_err)
+		pdc_rtc_write_nonvolatile(priv, PDC_RTC_SRPROT_SKEW, skew);
+}
+
+static int pdc_rtc_clk_notify(struct notifier_block *self, unsigned long action,
+			      void *data)
+{
+	struct pdc_rtc_priv *priv;
+
+	priv = container_of(self, struct pdc_rtc_priv, clk_nb);
+	switch (action) {
+	case CLK32K_CHANGE_FREQUENCY:
+		pdc_rtc_change_frequency(priv, data);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static int pdc_rtc_setup(struct pdc_rtc_priv *priv)
+{
+	unsigned long flags;
+
+	/* enable appropriate interrupts */
+	pdc_rtc_write(priv, PDC_RTC_IRQ_EN, PDC_RTC_IRQ_ALARM);
+
+	/* make sure clock is enabled */
+
+	spin_lock_irqsave(&priv->lock, flags);
+	priv->control_reg |= PDC_RTC_CONTROL_CE;
+	pdc_rtc_write(priv, PDC_RTC_CONTROL, priv->control_reg);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return 0;
+}
+
+static int pdc_rtc_probe(struct platform_device *pdev)
+{
+	struct pdc_rtc_priv *priv;
+	struct resource *res_regs;
+	struct resource *res_nonvolatile;
+	struct device_node *node = pdev->dev.of_node;
+	int irq, ret, error;
+	u32 val;
+
+	if (!node)
+		return -ENOENT;
+
+	/* Get resources from platform device */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "cannot find IRQ resource\n");
+		error = irq;
+		goto err_pdata;
+	}
+
+	res_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+	if (res_regs == NULL) {
+		dev_err(&pdev->dev, "cannot find registers resource\n");
+		error = -ENOENT;
+		goto err_pdata;
+	}
+
+	res_nonvolatile = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						       "nonvolatile");
+	/* nonvolatile registers are optional */
+
+	/* Private driver data */
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "cannot allocate device data\n");
+		error = -ENOMEM;
+		goto err_dev;
+	}
+	spin_lock_init(&priv->lock);
+	platform_set_drvdata(pdev, priv);
+	priv->dev = &pdev->dev;
+
+	/* Get devicetree properties */
+	ret = of_property_read_u32(node, "time-set-delay", &val);
+	if (!ret)
+		priv->time_set_delay = val;
+	ret = of_property_read_u32(node, "alarm-irq-delay", &val);
+	if (!ret)
+		priv->alarm_irq_delay = val;
+
+	/* Ioremap the registers */
+	priv->reg_base = devm_ioremap(&pdev->dev, res_regs->start,
+				      res_regs->end - res_regs->start);
+	if (!priv->reg_base) {
+		dev_err(&pdev->dev,
+			"cannot ioremap registers\n");
+		error = -EIO;
+		goto err_regs;
+	}
+
+	/* Ioremap the non-volatile registers if available */
+	if (res_nonvolatile) {
+		priv->nonvolatile_len = res_nonvolatile->end -
+						res_nonvolatile->start;
+		priv->nonvolatile_base = devm_ioremap(&pdev->dev,
+						      res_nonvolatile->start,
+						      priv->nonvolatile_len);
+		if (!priv->nonvolatile_base) {
+			dev_err(&pdev->dev,
+				"cannot ioremap nonvolatile registers\n");
+			error = -EIO;
+			goto err_regs;
+		}
+	}
+
+	/* disable interrupts */
+	pdc_rtc_write(priv, PDC_RTC_IRQ_EN, 0);
+
+	priv->softalrm_sec = -1;
+	priv->irq = irq;
+	error = devm_request_irq(&pdev->dev, priv->irq, pdc_rtc_isr, 0,
+				 "pdc-rtc", priv);
+	if (error) {
+		dev_err(&pdev->dev, "cannot register IRQ %u\n",
+			priv->irq);
+		error = -EIO;
+		goto err_irq;
+	}
+
+	if (clk32k_bootfreq) {
+		/* Compensate for boot time frequency */
+		struct clk32k_change_freq change;
+		change.old_freq = clk32k_bootfreq;
+		change.new_freq = CLK32K_DESIRED_FREQUENCY;
+		pdc_rtc_change_frequency(priv, &change);
+	}
+
+	/* Register a clock notifier */
+	priv->clk_nb.notifier_call = pdc_rtc_clk_notify;
+	clk32k_register_notify(&priv->clk_nb);
+
+	/* Register our RTC with the RTC framework */
+	device_init_wakeup(&pdev->dev, 1);
+	priv->rtc_dev = rtc_device_register(pdev->name, &pdev->dev,
+					    &pdc_rtc_ops,
+					    THIS_MODULE);
+	if (unlikely(IS_ERR(priv->rtc_dev))) {
+		error = PTR_ERR(priv->rtc_dev);
+		goto err_rtc;
+	}
+
+	pdc_rtc_setup(priv);
+
+	return 0;
+
+	rtc_device_unregister(priv->rtc_dev);
+err_rtc:
+	clk32k_unregister_notify(&priv->clk_nb);
+err_irq:
+err_regs:
+err_dev:
+err_pdata:
+	return error;
+}
+
+static int pdc_rtc_remove(struct platform_device *pdev)
+{
+	struct pdc_rtc_priv *priv = platform_get_drvdata(pdev);
+
+	clk32k_unregister_notify(&priv->clk_nb);
+	rtc_device_unregister(priv->rtc_dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * We use noirq callbacks because an ISR after the normal callbacks can clear
+ * the secondly interrupt which would then get restored on resume and keep
+ * firing.
+ */
+static int pdc_rtc_suspend_noirq(struct device *dev)
+{
+	struct pdc_rtc_priv *priv = dev_get_drvdata(dev);
+	unsigned int irq_en;
+
+	priv->suspended = true;
+	/* only wake if the alarm is enabled */
+	if (device_may_wakeup(dev) && pdc_rtc_alarm_enabled(priv)) {
+		/* disable interrupts other than the alarm */
+		irq_en = priv->last_irq_en = pdc_rtc_read(priv, PDC_RTC_IRQ_EN);
+		irq_en &= PDC_RTC_IRQ_ALARM;
+		pdc_rtc_write(priv, PDC_RTC_IRQ_EN, irq_en);
+		/* and enable wakeup on the interrupt */
+		enable_irq_wake(priv->irq);
+		priv->wakeup = true;
+	}
+	return 0;
+}
+
+static int pdc_rtc_resume_noirq(struct device *dev)
+{
+	struct pdc_rtc_priv *priv = dev_get_drvdata(dev);
+
+	if (priv->wakeup) {
+		/* disable wakeup */
+		disable_irq_wake(priv->irq);
+		/* and restore the previous interrupt enable bits */
+		pdc_rtc_write(priv, PDC_RTC_IRQ_EN, priv->last_irq_en);
+		priv->wakeup = false;
+	}
+	/* submit any postponed rtc interrupt */
+	priv->suspended = false;
+	if (priv->postponed_rtc_int) {
+		dev_dbg(priv->dev,
+			"submitting postponed rtc interrupt %x\n",
+			priv->postponed_rtc_int);
+		rtc_update_irq(priv->rtc_dev, 1, priv->postponed_rtc_int);
+		priv->postponed_rtc_int = 0;
+	}
+	return 0;
+}
+#else
+#define pdc_rtc_suspend NULL
+#define pdc_rtc_resume NULL
+#endif	/* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops pdc_rtc_pmops = {
+#ifdef CONFIG_PM_SLEEP
+	.suspend_noirq = pdc_rtc_suspend_noirq,
+	.resume_noirq = pdc_rtc_resume_noirq,
+	.freeze_noirq = pdc_rtc_suspend_noirq,
+	.thaw_noirq = pdc_rtc_resume_noirq,
+	.poweroff_noirq = pdc_rtc_suspend_noirq,
+	.restore_noirq = pdc_rtc_resume_noirq,
+#endif
+};
+
+static const struct of_device_id pdc_rtc_match[] = {
+	{ .compatible = "img,pdc-rtc" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, pdc_rtc_match);
+
+static struct platform_driver pdc_rtc_driver = {
+	.driver = {
+		.name = "pdc-rtc",
+		.owner	= THIS_MODULE,
+		.of_match_table	= pdc_rtc_match,
+		.pm = &pdc_rtc_pmops,
+	},
+	.probe = pdc_rtc_probe,
+	.remove = pdc_rtc_remove,
+};
+
+module_platform_driver(pdc_rtc_driver);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("ImgTec PowerDown Controller RTC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 92a9345..9863d78 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -189,6 +189,12 @@
 	  GPIO operations, you should be able to leverage that for better
 	  speed with a custom version of this driver; see the source code.
 
+config SPI_IMG
+        tristate "IMG SPI controller"
+        depends on HAVE_CLK && SOC_TZ1090
+        help
+          This enables using the IMG SPI controller in master mode.
+
 config SPI_IMX
 	tristate "Freescale i.MX SPI controllers"
 	depends on ARCH_MXC
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 33f9c09..0a3e2a3 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -34,6 +34,7 @@
 obj-$(CONFIG_SPI_FSL_ESPI)		+= spi-fsl-espi.o
 obj-$(CONFIG_SPI_FSL_SPI)		+= spi-fsl-spi.o
 obj-$(CONFIG_SPI_GPIO)			+= spi-gpio.o
+obj-$(CONFIG_SPI_IMG)			+= spi-img.o
 obj-$(CONFIG_SPI_IMX)			+= spi-imx.o
 obj-$(CONFIG_SPI_LM70_LLP)		+= spi-lm70llp.o
 obj-$(CONFIG_SPI_MPC512x_PSC)		+= spi-mpc512x-psc.o
diff --git a/drivers/spi/spi-img.c b/drivers/spi/spi-img.c
new file mode 100644
index 0000000..79012f5
--- /dev/null
+++ b/drivers/spi/spi-img.c
@@ -0,0 +1,1412 @@
+/*
+ * IMG SPI controller driver (master mode only)
+ *
+ * Driver for IMGLIB SPI Controller
+ *
+ * Author: Imagination Technologies Ltd.
+ * Copyright: 2007, 2008, 2013 Imagination Technologies Ltd.
+ *
+ * Based on spi_bfin5xx.c from Analog Devices Inc.
+ *
+ * This program is free software ;  you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ;  either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ;  without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/spi/spi_img.h>
+#include <linux/img_mdc_dma.h>
+
+#define SPI_FREQ_MAX            ((24.576 * 0xff) / 512)
+#define SPI_FREQ_MIN            ((24.576 * 0x01) / 512)
+
+/*
+ * The FIFO is 16 bytes deep. If we can fit the transfer in the FIFO
+ * it's more efficient to do that and avoid the overhead of setting
+ * up DMA.
+ *
+ *
+ * NOTE: This is set to zero to disable PIO for now. It works with mmc but not
+ * the Marvell 88W8686 wifi chip for reasons that are unknown at present.
+ */
+#define DMA_MIN_SIZE		0 /*16 if if worked for all devices*/
+
+#if defined(CONFIG_META_DMA_CONTROLLER)
+
+/* BURST SIZE is in bytes for MDC */
+#define BURST_SIZE		8 /*Fifo is 16 bytes deep so burst this many*/
+#define BURST_BYTES		(BURST_SIZE*8)
+
+#define BURST_MASK              (~(BURST_BYTES - 1))
+
+#else /* DMAC */
+
+/* We have to work around a hardware bug. When DMAing from external memory to
+ * the SPI controller the DMAC creates a byte mask for the first and last
+ * transfers. Unfortunately it uses the end byte mask for every burst, not just
+ * the final burst. The workaround is to make sure we only DMA whole bursts
+ * (so the end byte mask is all 1s) or single bursts if we're transferring
+ * less than a whole burst.
+ *
+ * The burst size is defined in bus-width terms so 1 is a 64-bit burst.
+ */
+#define BURST_SIZE              1
+#define BURST_BYTES             (BURST_SIZE*8)
+#define BURST_MASK              (~(BURST_BYTES - 1))
+
+/* The DMAC also cannot do DMA from unaligned buffers thus we copy the data to
+ * a dma-able bounce buffer first.
+ */
+#define NEED_BOUNCE_BUFFERS	1
+
+#endif
+
+/* SPI - Device Registers */
+#define SPI_DEV0_REG            0x000   /* Device 0*/
+#define SPI_DEV1_REG            0x004   /* Device 1*/
+#define SPI_DEV2_REG            0x008   /* Device 2*/
+#define SPI_MODE_REG            0x010
+#define SPI_TRANS_REG           0x00C   /* transaction parameters */
+#define DMA_SPIO_SENDDAT        0x014
+#define DMA_SPII_GETDAT         0x018
+#define DMA_SPIO_INT_STAT       0x01C
+#define DMA_SPIO_INT_EN         0x020
+#define DMA_SPIO_INT_CL         0x024
+#define DMA_SPII_INT_STAT       0x028
+#define DMA_SPII_INT_EN         0x02C
+#define DMA_SPII_INT_CL         0x030
+#define SPI_DI_STATUS           0x034
+
+#define SPI_TRANS_REG_CONT_BIT  0x08000000
+#define SPI_TRANS_RESET_BIT     0x04000000
+#define SPI_TRANS_REG_GDMA_BIT  0x02000000
+#define SPI_TRANS_REG_SDMA_BIT  0x01000000
+
+#define SPI_SDTRIG_EN           0x1
+
+#define SPI_GDTRIG		0x1
+#define SPI_ALLDONE_TRIG	0x10
+
+#define SPI_WRITE_INT_MASK      0x1f
+#define SPI_READ_INT_MASK       0x1f
+
+#define START_STATE   ((void *)0)
+#define RUNNING_STATE ((void *)1)
+#define DONE_STATE    ((void *)2)
+#define ERROR_STATE   ((void *)-1)
+
+#define QUEUE_RUNNING 0
+#define QUEUE_STOPPED 1
+
+#define spi_readb(dd, reg)		readb(dd->regs_base + reg)
+#define spi_writeb(val, dd, reg)	writeb(val, dd->regs_base + reg)
+#define spi_readl(dd, reg)		readl(dd->regs_base + reg)
+#define spi_writel(val, dd, reg)	writel(val, dd->regs_base + reg)
+
+struct driver_data {
+	/* Driver model hookup */
+	struct platform_device *pdev;
+
+	/* SPI framework hookup */
+	struct spi_master *master;
+
+	/* Regs base of SPI controller */
+	void __iomem *regs_base;
+	unsigned int periph_base;
+
+	/* Clocks */
+	struct clk *clk;
+
+	struct img_spi_master *master_info;
+
+	/* Driver message queue */
+	struct workqueue_struct *workqueue;
+	struct work_struct pump_messages;
+	spinlock_t lock;
+	struct list_head queue;
+	int busy;
+	int run;
+
+	/* Message Transfer pump */
+	struct tasklet_struct pump_transfers;
+
+	/* Current message transfer state info */
+	struct spi_message *cur_msg;
+	struct spi_transfer *cur_transfer;
+	struct chip_data *cur_chip;
+
+	/* Length of the current DMA */
+	size_t len;
+
+	/* Length of any subsequent DMA needed to clean up after the bug
+	 * mentioned above.
+	 */
+	size_t tail_len;
+
+	/* Total length of the transfer */
+	size_t map_len;
+
+	/* Virtual addresses of the current transfer buffers. */
+	void *tx;
+	void *rx;
+
+	/* DMA channels */
+	struct dma_chan *txchan;
+	struct dma_chan *rxchan;
+
+	/* DMA mapped buffers of the current transfer. */
+	dma_addr_t rx_dma;
+	dma_addr_t tx_dma;
+
+	/* Bounce buffers */
+	void *rx_buf;
+	void *tx_buf;
+
+	dma_addr_t rx_dma_start;
+	dma_addr_t tx_dma_start;
+
+	int read_irq;
+
+	/* This flag is set if this is the last transfer in the current
+	 * message.
+	 */
+	int last_transfer;
+	int cs_change;
+
+#ifdef CONFIG_PM_SLEEP
+	/* Suspend data */
+	u32 modereg;
+#endif
+
+	/*set if we had to dma map the buffers provide to us*/
+	int tx_mapped_by_us:1;
+	int rx_mapped_by_us:1;
+};
+
+struct chip_data {
+	u8 clk_pol;
+	u8 clk_pha;
+
+	u8 clk_div;
+	u8 cs_setup;
+	u8 cs_hold;
+	u8 cs_delay;
+
+	u8 cs_high;
+
+	u8 chip_select_num;
+
+	u8 bits_per_word;
+};
+
+/* Perform a byte-swap operation on a buffer, grouped by 16-bit words */
+static void byte_swap(u16 *buf, int len)
+{
+	u16 *pos;
+	for (pos = buf; pos < buf + len; pos++)
+		*pos = cpu_to_be16(*pos);
+}
+
+/*  Calculate the clock divider value based on input HZ.*/
+static u8 hz_to_clk_div(struct driver_data *drv_data, u32 speed_hz)
+{
+	/*  Register value is:
+	 *  Fout = (Fin * reg / 512) MHz */
+	u8 val = min_t(unsigned int, speed_hz/(clk_get_rate(drv_data->clk)/512),
+				     0xffU);
+
+	/* Clamp value at 1 as 0 is invalid (we get no clock) */
+	val = val ? val : 1;
+
+	return val;
+}
+
+static void setup_spi_mode(struct driver_data *drv_data,
+			   struct chip_data *chip)
+{
+	unsigned int mask = 0xf;
+	unsigned int shift = chip->chip_select_num * 4;
+	unsigned int val;
+
+	val = spi_readl(drv_data, SPI_MODE_REG);
+
+	val &= ~(mask << shift);
+
+	/* Data and chip select idle high. */
+	val |= (((chip->clk_pha << 3) | (chip->clk_pol << 1) | 0x5) << shift);
+
+	spi_writel(val, drv_data, SPI_MODE_REG);
+}
+
+static void write_spi_param(struct driver_data *drv_data, u8 cs, u8 clk_div,
+			    u8 cs_setup, u8 cs_hold, u8 cs_delay)
+{
+	unsigned int params = ((clk_div << 24) | (cs_setup << 16) |
+			       (cs_hold << 8) | (cs_delay));
+
+	if (cs == 0)
+		spi_writel(params, drv_data, SPI_DEV0_REG);
+	else if (cs == 1)
+		spi_writel(params, drv_data, SPI_DEV1_REG);
+	else if (cs == 2)
+		spi_writel(params, drv_data, SPI_DEV2_REG);
+}
+
+static void img_spi_dma_prep_slave(struct driver_data *drv_data,
+				    unsigned int periph_base,
+				    enum dma_transfer_direction direction)
+{
+	struct dma_slave_config conf;
+	struct dma_chan *chan;
+
+	conf.direction = direction;
+
+	if (direction == DMA_DEV_TO_MEM) {
+		conf.src_addr = periph_base;
+		conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+		conf.src_maxburst = BURST_SIZE;
+		chan = drv_data->rxchan;
+	} else {
+		conf.dst_addr = periph_base;
+		conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+		conf.dst_maxburst = BURST_SIZE;
+		chan = drv_data->txchan;
+	}
+
+	dmaengine_slave_config(chan, &conf);
+}
+
+static void start_dma(struct driver_data *drv_data, struct chip_data *chip)
+{
+	unsigned int transaction = 0;
+	unsigned int chip_select = drv_data->cur_chip->chip_select_num;
+	struct dma_async_tx_descriptor *rxdesc, *txdesc;
+	dma_cookie_t rxcookie, txcookie;
+	unsigned int periph_base;
+	struct mdc_dma_tx_control tx_control;
+
+	if (chip->cs_high)
+		chip_select |= 0x1;
+
+	/* Maximum transfer of 4096 bytes requires count of 0 */
+	if (drv_data->len < IMG_SPI_MAX_TRANSFER)
+		transaction |= drv_data->len;
+
+	transaction |= (chip_select << 16);
+
+	if ((drv_data->tail_len) ||
+	    (!drv_data->cs_change && !drv_data->last_transfer))
+		transaction |= SPI_TRANS_REG_CONT_BIT;
+
+	/* Ensure all writes to the tx buffer have completed. */
+	wmb();
+
+	/* access delay = 1 for R/W */
+	tx_control.flags = MDC_ACCESS_DELAY;
+	tx_control.access_delay = 1;
+
+	/* Setup RX */
+	periph_base = (unsigned int)(drv_data->periph_base + DMA_SPII_GETDAT);
+
+	drv_data->rxchan->private = (void *)&tx_control;
+	img_spi_dma_prep_slave(drv_data, periph_base, DMA_DEV_TO_MEM);
+
+	rxdesc = dmaengine_prep_slave_single(drv_data->rxchan, drv_data->rx_dma,
+					   drv_data->len, DMA_DEV_TO_MEM,
+					   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+
+	if (!rxdesc) {
+		dev_err(&drv_data->pdev->dev,
+			"Failed to allocate a RX dma descriptor\n");
+		return;
+	}
+
+	rxcookie = dmaengine_submit(rxdesc);
+
+	dma_async_issue_pending(drv_data->rxchan);
+
+	spi_writel(SPI_SDTRIG_EN, drv_data, DMA_SPII_INT_EN);
+	transaction |= SPI_TRANS_REG_GDMA_BIT;
+
+	/* Setup TX */
+	periph_base = (unsigned int)(drv_data->periph_base + DMA_SPIO_SENDDAT);
+	drv_data->txchan->private = (void *)&tx_control;
+
+	img_spi_dma_prep_slave(drv_data, periph_base, DMA_MEM_TO_DEV);
+
+	txdesc = dmaengine_prep_slave_single(drv_data->txchan, drv_data->tx_dma,
+					   drv_data->len, DMA_MEM_TO_DEV,
+					   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+
+	if (!txdesc) {
+		dev_err(&drv_data->pdev->dev,
+			"Failed to allocate a TX dma descriptor\n");
+		return;
+	}
+
+	txcookie = dmaengine_submit(txdesc);
+
+	dma_async_issue_pending(drv_data->txchan);
+
+	transaction |= SPI_TRANS_REG_SDMA_BIT;
+
+	dev_dbg(&drv_data->pdev->dev, "Starting SPI Transaction"
+			" Tx Buff 0x%08x "
+			" Rx Buff 0x%08x "
+			" Size %d\n",
+			drv_data->tx_dma, drv_data->rx_dma, drv_data->len);
+
+
+	spi_writel(transaction, drv_data, SPI_TRANS_REG);
+}
+
+/* test if there are more transfers to be done */
+static void *next_transfer(struct driver_data *drv_data)
+{
+	struct spi_message *msg = drv_data->cur_msg;
+	struct spi_transfer *trans = drv_data->cur_transfer;
+
+	/* Move to next transfer */
+	if (trans->transfer_list.next != &msg->transfers) {
+		struct spi_transfer *next_trans;
+		next_trans = list_entry(trans->transfer_list.next,
+					struct spi_transfer, transfer_list);
+		drv_data->cur_transfer = next_trans;
+		if (list_is_last(&next_trans->transfer_list,
+				 &msg->transfers))
+			drv_data->last_transfer = 1;
+		else
+			drv_data->last_transfer = 0;
+		return RUNNING_STATE;
+	} else
+		return DONE_STATE;
+}
+
+static void finished_transfer(struct driver_data *drv_data)
+{
+	drv_data->cur_msg->actual_length += drv_data->map_len;
+
+	/* Move to next transfer */
+	drv_data->cur_msg->state = next_transfer(drv_data);
+
+	/* Schedule transfer tasklet */
+	tasklet_schedule(&drv_data->pump_transfers);
+}
+
+static void start_pio(struct driver_data *drv_data, struct chip_data *chip)
+{
+	unsigned int transaction = 0;
+	unsigned int chip_select = drv_data->cur_chip->chip_select_num;
+	unsigned int i;
+	uint32_t irq_status;
+
+	if (chip->cs_high)
+		chip_select |= 0x1;
+
+	transaction |= drv_data->map_len;
+	transaction |= (chip_select << 16);
+	transaction |= SPI_TRANS_REG_SDMA_BIT;
+	transaction |= SPI_TRANS_REG_GDMA_BIT;
+
+	if (!drv_data->cs_change && !drv_data->last_transfer)
+		transaction |= SPI_TRANS_REG_CONT_BIT;
+
+	/* Fill up the FIFO */
+	for (i = 0; i < drv_data->map_len; i++) {
+		const u8 *write_buf = drv_data->tx;
+		u8 write_byte;
+		if (write_buf)
+			write_byte = write_buf[i];
+		else
+			write_byte = 0x00;
+		spi_writeb(write_byte, drv_data, DMA_SPIO_SENDDAT);
+	}
+
+	/* Start the transaction */
+	spi_writel(transaction, drv_data, SPI_TRANS_REG);
+
+	/* Wait for the data we're going to read to fill the FIFO */
+	irq_status = 0;
+	while (!((irq_status & SPI_GDTRIG) || (irq_status & SPI_ALLDONE_TRIG)))
+		irq_status = spi_readl(drv_data, DMA_SPII_INT_STAT);
+
+	/* Read from FIFO */
+	for (i = 0; i < drv_data->map_len; i++) {
+		u8 *read_buf = drv_data->rx;
+		u8 read_byte = spi_readb(drv_data, DMA_SPII_GETDAT);
+		if (read_buf)
+			read_buf[i] = read_byte;
+	}
+
+	/* Clear any interrupts we generated */
+	spi_writel(SPI_READ_INT_MASK, drv_data, DMA_SPII_INT_CL);
+
+	finished_transfer(drv_data);
+}
+
+/*
+ * caller already set message->status;
+ * dma and pio irqs are blocked give finished message back
+ */
+static void giveback(struct driver_data *drv_data)
+{
+	unsigned long flags;
+	struct spi_message *msg;
+
+	spin_lock_irqsave(&drv_data->lock, flags);
+	msg = drv_data->cur_msg;
+	drv_data->cur_msg = NULL;
+	drv_data->cur_transfer = NULL;
+	drv_data->cur_chip = NULL;
+	queue_work(drv_data->workqueue, &drv_data->pump_messages);
+	spin_unlock_irqrestore(&drv_data->lock, flags);
+
+	msg->state = NULL;
+
+	if (msg->complete)
+		msg->complete(msg->context);
+}
+
+static irqreturn_t spi_irq(int irq, void *dev_id)
+{
+	struct driver_data *drv_data = dev_id;
+	unsigned int stat;
+
+	stat = spi_readl(drv_data, DMA_SPII_INT_STAT);
+	spi_writel(0, drv_data, DMA_SPII_INT_EN);
+	spi_writel(SPI_READ_INT_MASK, drv_data, DMA_SPII_INT_CL);
+
+	if (!(stat & SPI_GDTRIG) || !drv_data->cur_msg) {
+		dev_err(&drv_data->pdev->dev, "spurious read irq\n");
+		return IRQ_HANDLED;
+	}
+
+	if (drv_data->tail_len) {
+		if (drv_data->rx_dma)
+			drv_data->rx_dma += drv_data->len;
+
+		if (drv_data->tx_dma)
+			drv_data->tx_dma += drv_data->len;
+
+		drv_data->len = drv_data->tail_len;
+
+		drv_data->tail_len = 0;
+
+		start_dma(drv_data, drv_data->cur_chip);
+
+		return IRQ_HANDLED;
+	}
+
+#ifdef NEED_BOUNCE_BUFFERS
+	if (drv_data->rx)
+		memcpy(drv_data->rx, drv_data->rx_buf, drv_data->map_len);
+#else
+
+	if (drv_data->tx_mapped_by_us) {
+
+		dma_unmap_single(&drv_data->pdev->dev, drv_data->tx_dma,
+			drv_data->map_len, DMA_TO_DEVICE);
+
+		drv_data->tx_mapped_by_us = 0;
+
+		/*dev_dbg(&drv_data->pdev->dev, "UnMapped tx address %08x"
+				" size %d\n",
+				(u32)drv_data->tx_dma,
+				drv_data->len);*/
+	}
+	if (drv_data->rx_mapped_by_us) {
+
+		dma_unmap_single(&drv_data->pdev->dev, drv_data->rx_dma,
+			drv_data->map_len, DMA_FROM_DEVICE);
+
+		drv_data->rx_mapped_by_us = 0;
+
+		/*dev_dbg(&drv_data->pdev->dev, "UnMapped rx address %08x"
+				" size %d\n",
+				(u32)drv_data->rx_dma,
+				drv_data->len);*/
+	}
+#endif
+
+	dev_dbg(&drv_data->pdev->dev, "interrupt di status: %#x\n",
+		spi_readl(drv_data, SPI_DI_STATUS));
+
+	/* For a 16bpw transfer, byte swap the rx buffer */
+	if ((drv_data->cur_chip->bits_per_word == 16) && (drv_data->rx)) {
+		wmb();
+		byte_swap((u16 *)drv_data->rx, drv_data->map_len >> 1);
+	}
+
+	/*
+	 * Some drivers (i.e the new libertas patches) rely on the data put
+	 * intop the buffer NOT being changed. So if we've changed the data
+	 * (i.e. buffer swapped) change it back.  Because we're half-duplex,
+	 * we can use drv_data->map_len; if we ever became full duplex,
+	 * map_len would need to be separated for tx and rx.
+	 */
+	if ((drv_data->cur_chip->bits_per_word == 16) && (drv_data->tx)) {
+		wmb();
+		byte_swap((u16 *)drv_data->tx, drv_data->map_len >> 1);
+	}
+
+	finished_transfer(drv_data);
+
+	return IRQ_HANDLED;
+}
+
+#ifndef NEED_BOUNCE_BUFFERS
+/* Helper:*/
+static int map_buffers(struct driver_data *drv_data,
+		struct spi_message *message,
+		struct spi_transfer *transfer)
+{
+	if (message->is_dma_mapped) {
+		/* Buffer already has a DMA mapping */
+
+		dev_dbg(&drv_data->pdev->dev, "Buffers pre-mapped\n");
+
+		drv_data->tx_mapped_by_us = 0;
+		drv_data->rx_mapped_by_us = 0;
+
+		if (drv_data->tx) {
+			drv_data->tx_dma = transfer->tx_dma;
+		} else {
+			/* We have to send something - use the bounce buffer
+			   set to zero */
+			memset(drv_data->tx_buf, 0x00, transfer->len);
+			drv_data->tx_dma = drv_data->tx_dma_start;
+		}
+
+		if (drv_data->rx) {
+			drv_data->rx_dma = transfer->rx_dma;
+		} else {  /*no rx buffer we still need to dma data out
+			  of fifo though so use the bounce buffer*/
+			drv_data->rx_dma = drv_data->rx_dma_start;
+		}
+
+	} else {
+		/* We must create a dma mapping for the buffer */
+		if (drv_data->tx) {
+			drv_data->tx_dma = dma_map_single(&drv_data->pdev->dev,
+					drv_data->tx, transfer->len,
+					DMA_TO_DEVICE);
+
+			/*dev_dbg(&drv_data->pdev->dev, "Mapped Tx address %08x"
+					" to %08x size %d\n",
+					(u32)tx_temp,
+					(u32)drv_data->tx_dma,
+					transfer->len);*/
+
+			if (!drv_data->tx_dma) {
+				dev_err(&drv_data->pdev->dev,
+						"Failed to DMA Map Tx Buffer");
+				return -ENOMEM;
+			}
+			drv_data->tx_mapped_by_us = 1;
+		} else {
+			/* we have to send something - use the bounce buffer
+			   set to zero */
+			memset(drv_data->tx_buf, 0x00, transfer->len);
+			drv_data->tx_dma = drv_data->tx_dma_start;
+		}
+
+		if (drv_data->rx) {
+
+			drv_data->rx_dma = dma_map_single(
+					&drv_data->pdev->dev,
+					drv_data->rx,
+					transfer->len,
+					DMA_FROM_DEVICE);
+
+			/*dev_dbg(&drv_data->pdev->dev, "Mapped Rx address %08x"
+					" to %08x size %d\n",
+					(u32)drv_data->rx,
+					(u32)drv_data->rx_dma,
+					transfer->len);*/
+
+			if (!drv_data->rx_dma) {
+				dev_err(&drv_data->pdev->dev,
+					"Failed to DMA Map Rx Buffer");
+
+				return -ENOMEM;
+			}
+			drv_data->rx_mapped_by_us = 1;
+		} else {
+			/*no rx buffer we still need to dma data out
+			  of fifo though so use the bounce buffer*/
+			drv_data->rx_dma = drv_data->rx_dma_start;
+		}
+	}
+	return 0;
+}
+#endif
+
+static void pump_transfers(unsigned long data)
+{
+	struct driver_data *drv_data = (struct driver_data *)data;
+	struct spi_message *message = NULL;
+	struct spi_transfer *transfer = NULL;
+	struct spi_transfer *previous = NULL;
+	struct chip_data *chip = NULL;
+
+	/* Get current state information */
+	message = drv_data->cur_msg;
+	transfer = drv_data->cur_transfer;
+	chip = drv_data->cur_chip;
+
+	/*
+	 * if msg is error or done, report it back using complete() callback
+	 */
+
+	 /* Handle for abort */
+	if (message->state == ERROR_STATE) {
+		message->status = -EIO;
+		giveback(drv_data);
+		return;
+	}
+
+	/* Handle end of message */
+	if (message->state == DONE_STATE) {
+		message->status = 0;
+		giveback(drv_data);
+		return;
+	}
+
+	/* Delay if requested at end of transfer */
+	if (message->state == RUNNING_STATE) {
+		previous = list_entry(transfer->transfer_list.prev,
+				      struct spi_transfer, transfer_list);
+		if (previous->delay_usecs)
+			udelay(previous->delay_usecs);
+	}
+
+	if (transfer->len > IMG_SPI_MAX_TRANSFER) {
+		dev_dbg(&drv_data->pdev->dev, "pump_transfers: transfer "
+			 "length (%d) greater than maximum (%d)\n",
+			 transfer->len, IMG_SPI_MAX_TRANSFER);
+		transfer->len = IMG_SPI_MAX_TRANSFER;
+	}
+
+	if (transfer->len == 0) {
+		dev_warn(&drv_data->pdev->dev, "pump_transfers: transfer "
+			 "length is zero\n");
+		message->status = -EINVAL;
+		giveback(drv_data);
+		return;
+	}
+
+	/* Kernel headers qualify this as const, so we need to cast away to
+	 * stop compiler warnings */
+	drv_data->tx = (void *)transfer->tx_buf;
+	drv_data->rx = transfer->rx_buf;
+
+	/* Byte swap the tx buffer before it is used in a 16-bit transmission */
+	if ((chip->bits_per_word == 16) && (drv_data->tx)) {
+		byte_swap((u16 *)drv_data->tx, transfer->len >> 1);
+		wmb();
+	}
+
+#ifdef NEED_BOUNCE_BUFFERS
+	/* Copy data to bounce buffer due to DMAC bug */
+	if (transfer->len > DMA_MIN_SIZE) {
+		if (drv_data->tx)
+			memcpy(drv_data->tx_buf, drv_data->tx, transfer->len);
+		else
+			/*send out zeros*/
+			memset(drv_data->tx_buf, 0x00, transfer->len);
+
+		drv_data->rx_dma = drv_data->rx_dma_start;
+		drv_data->tx_dma = drv_data->tx_dma_start;
+
+	}
+
+	if (transfer->len > BURST_BYTES)
+		drv_data->len = transfer->len & BURST_MASK;
+	else
+		drv_data->len = transfer->len;
+
+	drv_data->tail_len = transfer->len - drv_data->len;
+
+#else	/* MDC */
+	/* setup dma mappings for buffers, no need for
+	 * the bounce buffer!
+	 */
+	if (transfer->len > DMA_MIN_SIZE) {
+		int ret = map_buffers(drv_data, message, transfer);
+		if (ret) {
+			message->status = ret;
+			giveback(drv_data);
+			return;
+		}
+		drv_data->len = transfer->len;
+		drv_data->tail_len = 0;
+	}
+#endif
+
+	drv_data->map_len = transfer->len;
+
+	drv_data->cs_change = transfer->cs_change;
+
+	/* Make sure soft reset bit is cleared */
+	spi_writel(0, drv_data, SPI_TRANS_REG);
+
+	/* Change speed per transfer */
+	if (transfer->speed_hz) {
+		write_spi_param(drv_data, chip->chip_select_num,
+				hz_to_clk_div(drv_data, transfer->speed_hz),
+				chip->cs_setup, chip->cs_hold, chip->cs_delay);
+		dev_dbg(&drv_data->pdev->dev, "Setting Clock to %d HZ\n",
+				transfer->speed_hz);
+
+	} else
+		write_spi_param(drv_data, chip->chip_select_num, chip->clk_div,
+				chip->cs_setup, chip->cs_hold, chip->cs_delay);
+
+	message->state = RUNNING_STATE;
+
+	if (transfer->len > DMA_MIN_SIZE)
+		start_dma(drv_data, chip);
+	else
+		start_pio(drv_data, chip);
+
+
+}
+
+
+
+
+/* pop a msg from queue and kick off real transfer */
+static void pump_messages(struct work_struct *work)
+{
+	struct driver_data *drv_data = container_of(work, struct driver_data,
+						    pump_messages);
+	struct spi_transfer *next_trans;
+	unsigned long flags;
+
+	/* Lock queue and check for queue work */
+	spin_lock_irqsave(&drv_data->lock, flags);
+	if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
+		/* pumper kicked off but no work to do */
+		drv_data->busy = 0;
+		spin_unlock_irqrestore(&drv_data->lock, flags);
+		return;
+	}
+
+	/* Make sure we are not already running a message */
+	if (drv_data->cur_msg) {
+		spin_unlock_irqrestore(&drv_data->lock, flags);
+		return;
+	}
+
+	/* Extract head of queue */
+	drv_data->cur_msg = list_entry(drv_data->queue.next,
+				       struct spi_message, queue);
+	list_del_init(&drv_data->cur_msg->queue);
+
+	/* Initial message state */
+	drv_data->cur_msg->state = START_STATE;
+	next_trans = list_entry(drv_data->cur_msg->transfers.next,
+				struct spi_transfer, transfer_list);
+	drv_data->cur_transfer = next_trans;
+	if (list_is_last(&next_trans->transfer_list,
+			 &drv_data->cur_msg->transfers))
+		drv_data->last_transfer = 1;
+	else
+		drv_data->last_transfer = 0;
+
+	/* Setup the SPI using the per chip configuration */
+	drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+
+	dev_dbg(&drv_data->pdev->dev, "the first transfer len is %d\n",
+		drv_data->cur_transfer->len);
+
+	/* Mark as busy and launch transfers */
+	tasklet_schedule(&drv_data->pump_transfers);
+
+	drv_data->busy = 1;
+	spin_unlock_irqrestore(&drv_data->lock, flags);
+}
+
+/*
+ * got a msg to transfer, queue it in drv_data->queue.
+ * And kick off message pumper
+ */
+static int transfer(struct spi_device *spi, struct spi_message *msg)
+{
+	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+	unsigned long flags;
+
+	spin_lock_irqsave(&drv_data->lock, flags);
+
+	if (drv_data->run == QUEUE_STOPPED) {
+		spin_unlock_irqrestore(&drv_data->lock, flags);
+		return -ESHUTDOWN;
+	}
+
+	msg->actual_length = 0;
+	msg->status = -EINPROGRESS;
+	msg->state = START_STATE;
+
+	dev_dbg(&spi->dev, "adding a msg in transfer()\n");
+	list_add_tail(&msg->queue, &drv_data->queue);
+
+	if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
+		queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+	spin_unlock_irqrestore(&drv_data->lock, flags);
+
+	return 0;
+}
+
+/* first setup for new devices */
+static int setup(struct spi_device *spi)
+{
+	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+	struct img_spi_chip *chip_info = NULL;
+	struct chip_data *chip;
+
+	/* Zero (the default) here means 8 bits */
+	if (!spi->bits_per_word)
+		spi->bits_per_word = 8;
+
+	/* Allow for 8- or 16-bit word */
+	if ((spi->bits_per_word != 8) && (spi->bits_per_word != 16)) {
+		dev_dbg(&spi->dev, "setup: unsupported bits per word %x\n",
+			spi->bits_per_word);
+		return -EINVAL;
+	}
+
+	if ((spi->mode & SPI_CS_HIGH) && (spi->chip_select != 2)) {
+		dev_dbg(&spi->dev,
+			"setup: SPI_CS_HIGH only supported on CS 2\n");
+		return -EINVAL;
+	}
+
+	if (spi->mode & SPI_LSB_FIRST) {
+		dev_dbg(&spi->dev,
+			"setup: LSB first devices are unsupported\n");
+		return -EINVAL;
+	}
+
+	/* Only alloc (or use chip_info) on first setup */
+	chip = spi_get_ctldata(spi);
+	if (chip == NULL) {
+		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+		if (!chip)
+			return -ENOMEM;
+
+		chip_info = spi->controller_data;
+	}
+
+	chip->cs_setup = 0xa;  /* 400 ns */
+	chip->cs_hold = 0xa;   /* 400 ns */
+	chip->cs_delay = 0x14; /* 800 ns */
+
+	/* chip_info isn't always needed */
+	if (chip_info) {
+		chip->cs_setup = chip_info->cs_setup;
+		chip->cs_hold = chip_info->cs_hold;
+		chip->cs_delay = chip_info->cs_delay;
+	}
+
+	if (spi->mode & SPI_CPOL)
+		chip->clk_pol = 1;
+	else
+		chip->clk_pol = 0;
+
+	if (spi->mode & SPI_CPHA)
+		chip->clk_pha = 1;
+	else
+		chip->clk_pha = 0;
+
+	if (spi->mode & SPI_CS_HIGH)
+		chip->cs_high = 1;
+	else
+		chip->cs_high = 0;
+
+
+	dev_dbg(&spi->dev, "Setting Clock to %d HZ (Max)\n", spi->max_speed_hz);
+
+	chip->clk_div = hz_to_clk_div(drv_data, spi->max_speed_hz);
+
+	chip->chip_select_num = spi->chip_select;
+
+	chip->bits_per_word = spi->bits_per_word;
+
+	setup_spi_mode(drv_data, chip);
+
+	spi_set_ctldata(spi, chip);
+
+	return 0;
+}
+
+/*
+ * callback for spi framework.
+ * clean driver specific data
+ */
+static void cleanup(struct spi_device *spi)
+{
+	struct chip_data *chip = spi_get_ctldata(spi);
+
+	kfree(chip);
+}
+
+static int init_queue(struct driver_data *drv_data)
+{
+	INIT_LIST_HEAD(&drv_data->queue);
+	spin_lock_init(&drv_data->lock);
+
+	drv_data->run = QUEUE_STOPPED;
+	drv_data->busy = 0;
+
+	/* init transfer tasklet */
+	tasklet_init(&drv_data->pump_transfers,
+		     pump_transfers, (unsigned long)drv_data);
+
+	/* init messages workqueue */
+	INIT_WORK(&drv_data->pump_messages, pump_messages);
+	drv_data->workqueue =
+		create_singlethread_workqueue(dev_name(drv_data->master->dev.parent));
+	if (drv_data->workqueue == NULL)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int start_queue(struct driver_data *drv_data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&drv_data->lock, flags);
+
+	if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
+		spin_unlock_irqrestore(&drv_data->lock, flags);
+		return -EBUSY;
+	}
+
+	drv_data->run = QUEUE_RUNNING;
+	drv_data->cur_msg = NULL;
+	drv_data->cur_transfer = NULL;
+	drv_data->cur_chip = NULL;
+	spin_unlock_irqrestore(&drv_data->lock, flags);
+
+	queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+	return 0;
+}
+
+static int stop_queue(struct driver_data *drv_data)
+{
+	unsigned long flags;
+	unsigned limit = 500;
+	int status = 0;
+
+	spin_lock_irqsave(&drv_data->lock, flags);
+
+	/*
+	 * This is a bit lame, but is optimized for the common execution path.
+	 * A wait_queue on the drv_data->busy could be used, but then the common
+	 * execution path (pump_messages) would be required to call wake_up or
+	 * friends on every SPI message. Do this instead
+	 */
+	drv_data->run = QUEUE_STOPPED;
+	while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+		spin_unlock_irqrestore(&drv_data->lock, flags);
+		msleep(10);
+		spin_lock_irqsave(&drv_data->lock, flags);
+	}
+
+	if (!list_empty(&drv_data->queue) || drv_data->busy)
+		status = -EBUSY;
+
+	spin_unlock_irqrestore(&drv_data->lock, flags);
+
+	return status;
+}
+
+static int destroy_queue(struct driver_data *drv_data)
+{
+	int status;
+
+	status = stop_queue(drv_data);
+	if (status != 0)
+		return status;
+
+	destroy_workqueue(drv_data->workqueue);
+
+	return 0;
+}
+
+static void img_spi_init_hw(struct driver_data *drv_data)
+{
+	/* Reset the SPI controller. */
+	spi_writel(SPI_TRANS_RESET_BIT, drv_data, SPI_TRANS_REG);
+	spi_writel(0, drv_data, SPI_TRANS_REG);
+
+	/* Disable any interrupts that may be enabled. */
+	spi_writel(0, drv_data, DMA_SPIO_INT_EN);
+	spi_writel(0, drv_data, DMA_SPII_INT_EN);
+	spi_writel(SPI_WRITE_INT_MASK, drv_data, DMA_SPIO_INT_CL);
+	spi_writel(SPI_READ_INT_MASK, drv_data, DMA_SPII_INT_CL);
+}
+
+static int img_spi_init_dma(struct platform_device *pdev,
+			    struct driver_data *drv_data,
+			    struct device *dev) {
+	int status;
+
+	drv_data->rxchan = dma_request_slave_channel(&pdev->dev,
+						     "rx");
+
+	if (!drv_data->rxchan) {
+		dev_err(dev, "Failed to get SPI DMA rx channel\n");
+		status = -EBUSY;
+		goto out;
+	}
+
+	drv_data->txchan = dma_request_slave_channel(&pdev->dev,
+						     "tx");
+
+	if (!drv_data->txchan) {
+		dev_err(dev, "Failed to get SPI DMA tx channel\n");
+		goto free_rx;
+	}
+
+	/* Allocate necessary coherent buffers */
+
+	drv_data->rx_buf = dma_alloc_coherent(dev, IMG_SPI_MAX_TRANSFER,
+					      &drv_data->rx_dma_start,
+					      GFP_KERNEL);
+
+	if (!drv_data->rx_buf) {
+		dev_err(dev, "failed to alloc read dma buffer\n");
+		status = -ENOMEM;
+		goto free_tx;
+	}
+
+	drv_data->tx_buf = dma_alloc_coherent(dev, IMG_SPI_MAX_TRANSFER,
+					      &drv_data->tx_dma_start,
+					      GFP_KERNEL);
+
+	if (!drv_data->tx_buf) {
+		dev_err(dev, "failed to alloc write dma buffer\n");
+		status = -ENOMEM;
+		goto free_buf;
+	}
+
+
+	return 0;
+
+free_buf:
+	dma_free_coherent(dev, IMG_SPI_MAX_TRANSFER, drv_data->rx_buf,
+			  drv_data->rx_dma_start);
+free_tx:
+	dma_release_channel(drv_data->txchan);
+free_rx:
+	dma_release_channel(drv_data->rxchan);
+out:
+	return status;
+}
+
+static struct of_device_id img_spi_of_match[] = {
+		{ .compatible = "img,spi", },
+		{},
+};
+MODULE_DEVICE_TABLE(of, img_spi_of_match);
+
+static struct img_spi_master *img_spi_parse_dt(
+		struct platform_device *pdev)
+{
+	struct img_spi_master *pdata;
+	u32 prop;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		dev_err(&pdev->dev, "Memory alloc for pdata failed\n");
+		return NULL;
+	}
+
+	if (of_property_read_u32(pdev->dev.of_node, "num-cs",
+				 &prop) < 0) {
+		dev_err(&pdev->dev,
+			"num-cs not defined\n");
+		goto free_data;
+	}
+
+	pdata->num_chipselect = prop;
+
+	if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+				 &prop) < 0)
+		/* Default to 40Mhz */
+		prop = 40000000;
+
+	pdata->clk_rate = prop;
+
+	return pdata;
+
+free_data:
+	kfree(pdata);
+	return NULL;
+}
+
+static int __init img_spi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct img_spi_master *platform_info;
+	struct spi_master *master;
+	struct driver_data *drv_data = NULL;
+	struct resource *irq_resource, *mem_resource;
+	int status = 0;
+	unsigned long clk_rate;
+
+	/* Allocate master with space for drv_data */
+	master = spi_alloc_master(dev, sizeof(struct driver_data));
+	if (!master) {
+		dev_err(&pdev->dev, "cannot alloc spi_master\n");
+		return -ENOMEM;
+	}
+
+	platform_info = img_spi_parse_dt(pdev);
+	if (IS_ERR(platform_info)) {
+		status = PTR_ERR(platform_info);
+		goto out_error_resource;
+	}
+
+	drv_data = spi_master_get_devdata(master);
+	drv_data->master = master;
+	drv_data->master_info = platform_info;
+	drv_data->pdev = pdev;
+
+	/* the spi->mode bits supported by this driver: */
+	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+	master->num_chipselect = platform_info->num_chipselect;
+	master->cleanup = cleanup;
+	master->setup = setup;
+	master->transfer = transfer;
+	master->dev.of_node = pdev->dev.of_node;
+	master->dma_alignment = 8; /*64bit alignement*/
+
+	dev_set_drvdata(&pdev->dev, master);
+
+	mem_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	drv_data->regs_base = devm_request_and_ioremap(&pdev->dev,
+						       mem_resource);
+	if (!drv_data->regs_base) {
+		dev_err(&pdev->dev, "reg not defined\n");
+		status = -ENODEV;
+		goto out_error_resource;
+	}
+
+	drv_data->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(drv_data->clk)) {
+		dev_err(dev, "spi clock not found\n");
+		status = PTR_ERR(drv_data->clk);
+		goto out_error_resource;
+	}
+
+	/* try setting the clock to the requested rate */
+	if (platform_info->clk_rate) {
+		status = clk_set_rate(drv_data->clk, platform_info->clk_rate);
+		clk_rate = clk_get_rate(drv_data->clk);
+		if (clk_rate != platform_info->clk_rate) {
+			dev_warn(dev,
+				 "SPI clock requested: %lu HZ. Actual SPI clock: %lu (status=%d)\n",
+				 platform_info->clk_rate, clk_rate, status);
+		}
+		status = 0;
+	}
+
+	/* try enabling the clock */
+	status = clk_prepare_enable(drv_data->clk);
+	if (status) {
+		dev_err(dev, "SPI clock could not be enabled\n");
+		goto out_error_resource;
+	}
+
+	irq_resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq_resource) {
+		dev_err(&pdev->dev, "irq resource not defined\n");
+		goto out_error_resource;
+	}
+	drv_data->read_irq = irq_resource->start;
+
+	/* Initialize and start queue */
+	status = init_queue(drv_data);
+	if (status != 0) {
+		dev_err(&pdev->dev, "problem initializing queue\n");
+		goto out_error_queue_alloc;
+	}
+	status = start_queue(drv_data);
+	if (status != 0) {
+		dev_err(&pdev->dev, "problem starting queue\n");
+		goto out_error_queue_alloc;
+	}
+
+	if (drv_data->regs_base == NULL) {
+		dev_err(dev, "cannot map IO\n");
+		status = -ENXIO;
+		goto out_error_queue_alloc;
+	}
+
+	drv_data->periph_base = (unsigned int)drv_data->regs_base;
+
+	if (img_spi_init_dma(pdev, drv_data, dev) < 0) {
+		dev_err(dev,
+			"Failed to allocated tx/rx DMA channels\n");
+			status = -ENOMEM;
+		goto out_error_queue_alloc;
+	}
+
+	if (request_irq(drv_data->read_irq, spi_irq, 0, "img-spi",
+			drv_data)) {
+		dev_err(&pdev->dev, "failed to get SPI read irq\n");
+		status = -EBUSY;
+		goto out_error_queue_alloc;
+	}
+
+	img_spi_init_hw(drv_data);
+
+	/* Register with the SPI framework */
+	platform_set_drvdata(pdev, drv_data);
+	status = spi_register_master(master);
+	if (status != 0) {
+		dev_err(&pdev->dev, "problem registering spi master\n");
+		goto out_error_read_irq;
+	}
+	dev_dbg(&pdev->dev, "controller probed successfully\n");
+	return status;
+
+out_error_read_irq:
+	free_irq(drv_data->read_irq, drv_data);
+out_error_queue_alloc:
+	destroy_queue(drv_data);
+	clk_disable_unprepare(drv_data->clk);
+out_error_resource:
+	spi_master_put(master);
+	return status;
+}
+
+/* stop hardware and remove the driver */
+static int img_spi_remove(struct platform_device *pdev)
+{
+	struct driver_data *drv_data = platform_get_drvdata(pdev);
+	int status = 0;
+
+	if (!drv_data)
+		return 0;
+
+	/* Remove the queue */
+	status = destroy_queue(drv_data);
+	if (status != 0)
+		return status;
+
+	/* Release DMA */
+	dma_release_channel(drv_data->rxchan);
+	dma_release_channel(drv_data->txchan);
+
+	/* Free irq */
+	free_irq(drv_data->read_irq, drv_data);
+
+	/* Stop the SPI clock */
+	clk_disable_unprepare(drv_data->clk);
+
+	/* Disconnect from the SPI framework */
+	spi_unregister_master(drv_data->master);
+
+	/* Prevent double remove */
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int img_spi_suspend(struct device *dev)
+{
+	struct driver_data *drv_data = dev_get_drvdata(dev);
+	int status = 0;
+
+	status = stop_queue(drv_data);
+	if (status != 0)
+		return status;
+
+	drv_data->modereg = spi_readl(drv_data, SPI_MODE_REG);
+
+	/* FIXME Can we do anything here to power down the SPI? */
+
+	return 0;
+}
+
+static int img_spi_resume(struct device *dev)
+{
+	struct driver_data *drv_data = dev_get_drvdata(dev);
+	int status = 0;
+
+	/* Reinitialise the hardware */
+	img_spi_init_hw(drv_data);
+	spi_writel(drv_data->modereg, drv_data, SPI_MODE_REG);
+
+	/* Start the queue running */
+	status = start_queue(drv_data);
+	if (status != 0) {
+		dev_err(dev, "problem starting queue (%d)\n", status);
+		return status;
+	}
+
+	return 0;
+}
+#else
+#define img_spi_suspend NULL
+#define img_spi_resume NULL
+#endif	/* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(img_spi_pmops, img_spi_suspend, img_spi_resume);
+
+MODULE_ALIAS("img-spi");	/* for platform bus hotplug */
+static struct platform_driver img_spi_driver = {
+	.driver	= {
+		.name	= "img-spi",
+		.owner	= THIS_MODULE,
+		.pm	= &img_spi_pmops,
+		.of_match_table = img_spi_of_match,
+	},
+	.remove		= img_spi_remove,
+};
+
+static int __init img_spi_init(void)
+{
+	return platform_driver_probe(&img_spi_driver, img_spi_probe);
+}
+module_init(img_spi_init);
+
+static void __exit img_spi_exit(void)
+{
+	platform_driver_unregister(&img_spi_driver);
+}
+module_exit(img_spi_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("IMG SPI Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 978db34..9a54223 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -419,4 +419,20 @@
 	help
 	  This enables a console on a Dash channel.
 
+config KGDB_DA
+	bool "Use KGDB over a DA channel"
+	depends on DA_TTY && KGDB
+	default y
+	help
+	  This enables the use of KGDB over a DA channel, allowing KGDB to be
+	  used remotely or when a serial port isn't available.
+
+config KGDB_DA_CHANNEL
+	int "KGDB DA channel"
+	depends on KGDB_DA
+	range 2 6
+	default 3
+	help
+	  DA channel to use for KGDB.
+
 endif # TTY
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
index 0e888621..b2fbd54 100644
--- a/drivers/tty/metag_da.c
+++ b/drivers/tty/metag_da.c
@@ -16,10 +16,12 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/kgdb.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
 #include <linux/sched.h>
 #include <linux/serial.h>
+#include <linux/serial_core.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
@@ -675,3 +677,78 @@
 };
 
 #endif
+
+#ifdef CONFIG_KGDB_DA
+
+/* read buffer to reduce small DA channel reads */
+static unsigned int kgdbda_rbuflen;
+static unsigned int kgdbda_rpos;
+static char kgdbda_rbuf[RX_BUF_SIZE];
+
+/* write buffer to reduce small DA channel writes */
+static unsigned int kgdbda_wbuflen;
+static char kgdbda_wbuf[RX_BUF_SIZE];
+
+/* read a character from the read buffer, filling from DA debug channel */
+static int kgdbda_read_char(void)
+{
+	int received = 0;
+	/* no more data, try and get some more from the DA debug channel */
+	if (kgdbda_rpos >= kgdbda_rbuflen) {
+		kgdbda_rpos = 0;
+		kgdbda_rbuflen = 0;
+		if (chancall(RDBUF, CONFIG_KGDB_DA_CHANNEL, sizeof(kgdbda_rbuf),
+			     (void *)kgdbda_rbuf, &received) == CONAOK) {
+			if (received > sizeof(kgdbda_rbuf))
+				received = 0;
+			kgdbda_rbuflen = received;
+		}
+		if (!kgdbda_rbuflen)
+			return NO_POLL_CHAR;
+	}
+	pr_devel("kgdbda r %c\n", kgdbda_rbuf[kgdbda_rpos]);
+	return kgdbda_rbuf[kgdbda_rpos++];
+}
+
+/* flush the write buffer to the DA debug channel */
+static void kgdbda_flush(void)
+{
+	int number_written;
+	if (kgdbda_wbuflen) {
+		chancall(WRBUF, CONFIG_KGDB_DA_CHANNEL, kgdbda_wbuflen,
+			 kgdbda_wbuf, &number_written);
+		kgdbda_wbuflen = 0;
+	}
+}
+
+/* write a character into the write buffer, flushing if necessary */
+static void kgdbda_write_char(u8 chr)
+{
+	pr_devel("kgdbda w %c\n", chr);
+	kgdbda_wbuf[kgdbda_wbuflen++] = chr;
+	if (kgdbda_wbuflen >= sizeof(kgdbda_wbuf))
+		kgdbda_flush();
+}
+
+static struct kgdb_io kgdbda_io_ops = {
+	.name		= "kgdbda",
+	.read_char	= kgdbda_read_char,
+	.write_char	= kgdbda_write_char,
+	.flush		= kgdbda_flush,
+};
+
+static int __init kgdbda_init(void)
+{
+	kgdb_register_io_module(&kgdbda_io_ops);
+	return 0;
+}
+
+static void kgdbda_cleanup(void)
+{
+	kgdb_unregister_io_module(&kgdbda_io_ops);
+}
+
+module_init(kgdbda_init);
+module_exit(kgdbda_cleanup);
+
+#endif
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index d07b6af..f953614 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -128,7 +128,9 @@
 static int dw8250_probe_of(struct uart_port *p)
 {
 	struct device_node	*np = p->dev->of_node;
+	struct dw8250_data	*d = p->private_data;
 	u32			val;
+	int			err;
 
 	if (!of_property_read_u32(np, "reg-io-width", &val)) {
 		switch (val) {
@@ -148,14 +150,22 @@
 	if (!of_property_read_u32(np, "reg-shift", &val))
 		p->regshift = val;
 
-	/* clock got configured through clk api, all done */
-	if (p->uartclk)
-		return 0;
-
 	/* try to find out clock frequency from DT as fallback */
 	if (of_property_read_u32(np, "clock-frequency", &val)) {
+		/* clock got configured through clk api, all done */
+		if (p->uartclk)
+			return 0;
 		dev_err(p->dev, "clk or clock-frequency not defined\n");
 		return -EINVAL;
+	} else if (!IS_ERR(d->clk)) {
+		/* set the clock rate */
+		err = clk_set_rate(d->clk, val);
+		if (err) {
+			dev_err(p->dev,
+				"setting clk rate to %u Hz failed (%d)\n",
+				val, err);
+			return err;
+		}
 	}
 	p->uartclk = val;
 
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 92e1dc9..e5cd616b 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -49,6 +49,7 @@
 	default y if ARCH_MMP
 	default y if MACH_LOONGSON1
 	default y if PLAT_ORION
+	default y if SOC_CHORUS2
 	default PCI
 
 # some non-PCI HCDs implement xHCI
@@ -83,6 +84,7 @@
 	default y if ARM				# SL-811
 	default y if BLACKFIN				# SL-811
 	default y if SUPERH				# r8a66597-hcd
+	default y if SOC_TZ1090
 	default PCI
 
 # ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
@@ -126,6 +128,8 @@
 
 source "drivers/usb/core/Kconfig"
 
+source "drivers/usb/dwc_otg/Kconfig"
+
 source "drivers/usb/mon/Kconfig"
 
 source "drivers/usb/wusbcore/Kconfig"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index c41feba..aa887a4 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -8,6 +8,8 @@
 
 obj-$(CONFIG_USB_DWC3)		+= dwc3/
 
+obj-$(CONFIG_USB_DWC_OTG)	+= dwc_otg/
+
 obj-$(CONFIG_USB_MON)		+= mon/
 
 obj-$(CONFIG_PCI)		+= host/
diff --git a/drivers/usb/dwc_otg/Kconfig b/drivers/usb/dwc_otg/Kconfig
new file mode 100644
index 0000000..9e1cbe7
--- /dev/null
+++ b/drivers/usb/dwc_otg/Kconfig
@@ -0,0 +1,106 @@
+menuconfig USB_DWC_OTG
+	bool "Synopsys DWC OTG Controller"
+	depends on 4XX_SOC || CPU_CAVIUM_OCTEON || SOC_TZ1090
+	help
+	  This driver provide USB Host and Device Controller support for the
+	  Synopsys Designware USB OTG Core
+
+choice
+	prompt "Operating Mode"
+	default USB_DWC_OTG_HOST_ONLY
+	depends on USB_DWC_OTG
+	help
+	  Select the operating mode of the controller OTG does both Host and
+	  Device with the selection made by the type of cable used.
+
+config USB_DWC_OTG_OTG
+	bool "OTG Support"
+	depends on USB && USB_GADGET
+	help
+	  Select this for Full OTG Mode. Important You must select Synopsys
+	  DWC_OTG as your peripheral controller in the Gadget Menu
+
+config USB_DWC_OTG_HOST_ONLY
+	bool "Host Only"
+	depends on USB
+	help
+	  Select this for Host only operation no device support
+
+config USB_DWC_OTG_DEVICE_ONLY
+	bool "Device Only"
+	depends on USB_GADGET
+	help
+	  Select this for Device only operation no host support
+
+endchoice
+
+choice
+	prompt "Speed"
+	default USB_DWC_OTG_SPEED_HIGH
+	depends on USB_DWC_OTG
+	help
+	  Select the speed at which the USB Bus should operate at
+
+config USB_DWC_OTG_SPEED_HIGH
+	bool "High Speed (2.0)"
+	help
+	  Select this for High Speed (USB2.0 - 480Mbs/s) operation
+
+config USB_DWC_OTG_SPEED_FULL
+	bool "Full Speed (1.1)"
+	help
+	  Select this for Full Speed (USB1.1 - 12Mb/s) operation.
+	  Useful when support for Full speed devices is needed and split
+	  transactions are not avaialble (eg in descriptor dma mode).
+
+endchoice
+
+choice
+	prompt "DMA Mode"
+	default USB_DWC_OTG_DESC_DMA
+	depends on USB_DWC_OTG
+	help
+	  Select the DMA mode the controller will operate in
+
+config USB_DWC_OTG_SLAVE
+	bool "Slave (no DMA)"
+	help
+	  Select this for Slave mode, no DMA will be performed.
+
+config USB_DWC_OTG_BUFFER_DMA
+	bool "Buffer DMA"
+	help
+	  Select this mode to use the Controllers internal buffer DMA mode
+
+config USB_DWC_OTG_DESC_DMA
+	bool "Descriptor based DMA"
+	help
+	  Select this mode to use the Controllers internal Descriptor based DMA
+	  mode, Note this mode is only available on newer IP versions >2.9
+	  and doesn't support split transactions.
+
+config USB_DWC_OTG_EXT_DMA
+	bool "External DMA"
+	depends on 4XX_SOC
+	help
+	  Select this mode if you wish to use a DMA controller external to the
+	  Controller IP.
+
+endchoice
+
+if USB_DWC_OTG
+
+config USB_DWC_OTG_ISOCHRONOUS
+	bool "Isochronous transfer support"
+	depends on USB_DWC_OTG
+	help
+	  Select to enable support for Isochronous transfers
+
+config USB_DWC_OTG_DEBUG
+	bool "Verbose Debugging"
+	depends on USB_DWC_OTG
+	help
+	  Select to enable verbose debuging, once enabled verbostiy level can be
+	  set via a sysfs entry.
+
+endif
diff --git a/drivers/usb/dwc_otg/Makefile b/drivers/usb/dwc_otg/Makefile
new file mode 100644
index 0000000..8586da0
--- /dev/null
+++ b/drivers/usb/dwc_otg/Makefile
@@ -0,0 +1,50 @@
+#
+# Makefile for Synopsys DWC OTG Contrller
+#
+# don't enable high-power workaround
+#KBUILD_CPPFLAGS    += -DOTG_EXT_CHG_PUMP
+
+
+ifeq ($(CONFIG_USB_DWC_OTG_HOST_ONLY),y)
+	ccflags-y        += -DDWC_HOST_ONLY
+else
+ifeq ($(CONFIG_USB_DWC_OTG_DEVICE_ONLY),y)
+	ccflags-y        += -DDWC_DEVICE_ONLY
+else
+	ccflags-y        += -DDWC_OTG_MODE
+endif
+endif
+
+# Note: By defining DWC_SLAVE, internal DMA mode is disabled.
+#       When DWC_SLAVE is not defined, the internal DMA mode is used.
+
+ifeq ($(CONFIG_USB_DWC_OTG_SLAVE),y)
+	ccflags-y        += -DDWC_SLAVE
+else
+#buffer vs descriptor dma set in driver code
+ifeq ($(CONFIG_USB_DWC_OTG_EXT_DMA),y)
+ifeq ($(CONFIG_4XX_SOC),y)
+	ccflags-y += -DDWC_SLAVE -DOTG_PLB_DMA -DOTG_PLB_DMA_TASKLET
+	ccflags-y += -DCONFIG_PPC4xx_EDMA
+endif
+endif
+endif
+
+#speed selection set in driver code
+
+ccflags-$(CONFIG_USB_DWC_OTG_ISOCHRONOUS) += -DDWC_EN_ISOC
+
+ccflags-$(CONFIG_USB_DWC_OTG_DEBUG) += -DDEBUG
+
+
+obj-y		+= dwc_otg.o
+
+dwc_otg-objs	:= dwc_otg_driver.o dwc_otg_attr.o
+dwc_otg-objs	+= dwc_otg_cil.o dwc_otg_cil_intr.o
+dwc_otg-objs	+= dwc_otg_pcd.o dwc_otg_pcd_intr.o
+dwc_otg-objs	+= dwc_otg_hcd.o dwc_otg_hcd_intr.o
+dwc_otg-objs	+= dwc_otg_hcd_ddma.o dwc_otg_hcd_queue.o
+
+ifeq ($(CONFIG_4XX_SOC),y)
+dwc_otg-objs += ppc4xx_dma.o
+endif
diff --git a/drivers/usb/dwc_otg/dwc_otg_attr.c b/drivers/usb/dwc_otg/dwc_otg_attr.c
new file mode 100644
index 0000000..d2af94b
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_attr.c
@@ -0,0 +1,677 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+/** @file
+ *
+ * The diagnostic interface will provide access to the controller for
+ * bringing up the hardware and testing.  The Linux driver attributes
+ * feature will be used to provide the Linux Diagnostic
+ * Interface. These attributes are accessed through sysfs.
+ */
+
+/** @page "Linux Module Attributes"
+ *
+ * The Linux module attributes feature is used to provide the Linux
+ * Diagnostic Interface.  These attributes are accessed through sysfs.
+ * The diagnostic interface will provide access to the controller for
+ * bringing up the hardware and testing.
+ *
+ * Example usage:
+ * To get the current mode:
+ * cat /sys/devices/lm0/mode
+ *
+ * To power down the USB:
+ * echo 0 > /sys/devices/lm0/buspower
+ */
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/stat.h>  /* permission constants */
+
+#include <linux/io.h>
+
+#include "dwc_otg_attr.h"
+#include "dwc_otg_driver.h"
+#include "dwc_otg_pcd.h"
+#include "dwc_otg_hcd.h"
+
+/*
+ * MACROs for defining sysfs attribute
+ */
+#define DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_, \
+						_addr_, \
+						_mask_, \
+						_shift_, \
+						_string_) \
+static ssize_t _otg_attr_name_##_show(struct device *dev, \
+					struct device_attribute *attr, \
+					char *buf) \
+{ \
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);\
+	u32 val; \
+	val = dwc_read_reg32(_addr_); \
+	val = (val & (_mask_)) >> _shift_; \
+	return sprintf(buf, "%s = 0x%x\n", _string_, val); \
+}
+
+#define DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_, \
+						_addr_, \
+						_mask_, \
+						_shift_, \
+						_string_) \
+static ssize_t _otg_attr_name_##_store(struct device *dev, \
+					struct device_attribute *attr, \
+					const char *buf, size_t count) \
+{ \
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);\
+	u32 set = simple_strtoul(buf, NULL, 16); \
+	u32 clear = set; \
+	clear = ((~clear) << _shift_) & _mask_; \
+	set = (set << _shift_) & _mask_; \
+	dev_dbg(dev, "Storing Address=0x%p Set=0x%08x Clear=0x%08x\n", \
+					_addr_, set, clear); \
+	dwc_modify_reg32(_addr_, clear, set); \
+	return count; \
+}
+
+#define DWC_OTG_DEVICE_ATTR_BITFIELD_RW(_otg_attr_name_, \
+					_addr_, \
+					_mask_, \
+					_shift_, \
+					_string_) \
+DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_, \
+					_addr_, \
+					_mask_, \
+					_shift_, \
+					_string_) \
+DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_, \
+					_addr_, \
+					_mask_, \
+					_shift_, \
+					_string_) \
+DEVICE_ATTR(_otg_attr_name_, 0644, \
+		_otg_attr_name_##_show, _otg_attr_name_##_store);
+
+#define DWC_OTG_DEVICE_ATTR_BITFIELD_RO(_otg_attr_name_, \
+					_addr_, \
+					_mask_, \
+					_shift_, \
+					_string_) \
+DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_, \
+					_addr_, \
+					_mask_, \
+					_shift_, \
+					_string_) \
+DEVICE_ATTR(_otg_attr_name_, 0444, _otg_attr_name_##_show, NULL);
+
+/*
+ * MACROs for defining sysfs attribute for 32-bit registers
+ */
+#define DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_, _addr_, _string_) \
+static ssize_t _otg_attr_name_##_show(struct device *dev, \
+				struct device_attribute *attr, char *buf) \
+{ \
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);\
+	u32 val; \
+	val = dwc_read_reg32(_addr_); \
+	return sprintf(buf, "%s = 0x%08x\n", _string_, val); \
+}
+#define DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_, _addr_, _string_) \
+static ssize_t _otg_attr_name_##_store(struct device *dev, \
+				struct device_attribute *attr, \
+				const char *buf, size_t count) \
+{ \
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);\
+	u32 val = simple_strtoul(buf, NULL, 16); \
+	dev_dbg(dev, "Storing Address=0x%p Val=0x%08x\n", \
+			_addr_, val); \
+	dwc_write_reg32(_addr_, val); \
+	return count; \
+}
+
+#define DWC_OTG_DEVICE_ATTR_REG32_RW(_otg_attr_name_, _addr_, _string_) \
+DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_, _addr_, _string_) \
+DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_, _addr_, _string_) \
+DEVICE_ATTR(_otg_attr_name_, 0644, _otg_attr_name_##_show, \
+		_otg_attr_name_##_store);
+
+#define DWC_OTG_DEVICE_ATTR_REG32_RO(_otg_attr_name_, _addr_, _string_) \
+DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_, _addr_, _string_) \
+DEVICE_ATTR(_otg_attr_name_, 0444, _otg_attr_name_##_show, NULL);
+
+
+/** @name Functions for Show/Store of Attributes */
+/**@{*/
+
+/**
+ * Show the register offset of the Register Access.
+ */
+static ssize_t
+regoffset_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	return snprintf(buf, sizeof("0xFFFFFFFF\n")+1, "0x%08x\n",
+			otg_dev->reg_offset);
+}
+
+/**
+ * Set the register offset for the next Register Access 	Read/Write
+ */
+#define SZ_256K 0x00040000
+static ssize_t regoffset_store(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	u32 offset = simple_strtoul(buf, NULL, 16);
+	if (offset < SZ_256K)
+		otg_dev->reg_offset = offset;
+	else
+		dev_err(dev, "invalid offset\n");
+
+	return count;
+}
+DEVICE_ATTR(regoffset, S_IRUGO|S_IWUSR, regoffset_show, regoffset_store);
+
+/**
+ * Show the value of the register at the offset in the reg_offset
+ * attribute.
+ */
+static ssize_t
+regvalue_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	u32 val;
+	u32 __iomem *addr;
+
+	if (otg_dev->reg_offset != 0xFFFFFFFF && otg_dev->base) {
+		/* Calculate the address */
+		addr = (u32 __iomem *)(otg_dev->reg_offset +
+				      (u8 __iomem *)otg_dev->base);
+		val = dwc_read_reg32(addr);
+		return snprintf(buf, sizeof("Reg@0xFFFFFFFF = 0xFFFFFFFF\n")+1,
+				"Reg@0x%06x = 0x%08x\n",
+				otg_dev->reg_offset, val);
+	} else {
+		dev_err(dev, "Invalid offset (0x%0x)\n",
+			otg_dev->reg_offset);
+		return sprintf(buf, "invalid offset\n");
+	}
+}
+
+/**
+ * Store the value in the register at the offset in the reg_offset
+ * attribute.
+ *
+ */
+static ssize_t
+regvalue_store(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	u32 __iomem *addr;
+	u32 val = simple_strtoul(buf, NULL, 16);
+	if (otg_dev->reg_offset != 0xFFFFFFFF && otg_dev->base) {
+		/* Calculate the address */
+		addr = (u32 __iomem *)(otg_dev->reg_offset +
+				   (u8 __iomem *)otg_dev->base);
+		dwc_write_reg32(addr, val);
+	} else
+		dev_err(dev, "Invalid Register Offset (0x%08x)\n",
+			otg_dev->reg_offset);
+
+	return count;
+}
+DEVICE_ATTR(regvalue, S_IRUGO|S_IWUSR, regvalue_show, regvalue_store);
+
+/*
+ * Attributes
+ */
+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(mode,
+		&(otg_dev->core_if->core_global_regs->gotgctl),
+		(1<<20), 20, "Mode");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(hnpcapable,
+		&(otg_dev->core_if->core_global_regs->gusbcfg),
+		(1<<9), 9, "Mode");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(srpcapable,
+		&(otg_dev->core_if->core_global_regs->gusbcfg),
+		(1<<8), 8, "Mode");
+
+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(busconnected,
+		otg_dev->core_if->host_if->hprt0,
+		0x01, 0, "Bus Connected");
+
+DWC_OTG_DEVICE_ATTR_REG32_RW(gotgctl,
+		&(otg_dev->core_if->core_global_regs->gotgctl),
+		"GOTGCTL");
+DWC_OTG_DEVICE_ATTR_REG32_RW(gusbcfg,
+		&(otg_dev->core_if->core_global_regs->gusbcfg),
+		"GUSBCFG");
+DWC_OTG_DEVICE_ATTR_REG32_RW(grxfsiz,
+		&(otg_dev->core_if->core_global_regs->grxfsiz),
+		"GRXFSIZ");
+DWC_OTG_DEVICE_ATTR_REG32_RW(gnptxfsiz,
+		&(otg_dev->core_if->core_global_regs->gnptxfsiz),
+		"GNPTXFSIZ");
+DWC_OTG_DEVICE_ATTR_REG32_RW(gpvndctl,
+		&(otg_dev->core_if->core_global_regs->gpvndctl),
+		"GPVNDCTL");
+DWC_OTG_DEVICE_ATTR_REG32_RW(ggpio,
+		&(otg_dev->core_if->core_global_regs->ggpio),
+		"GGPIO");
+DWC_OTG_DEVICE_ATTR_REG32_RW(guid,
+		&(otg_dev->core_if->core_global_regs->guid),
+		"GUID");
+DWC_OTG_DEVICE_ATTR_REG32_RO(gsnpsid,
+		&(otg_dev->core_if->core_global_regs->gsnpsid),
+		"GSNPSID");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(devspeed,
+		&(otg_dev->core_if->dev_if->dev_global_regs->dcfg),
+		0x3, 0, "Device Speed");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(enumspeed,
+		&(otg_dev->core_if->dev_if->dev_global_regs->dsts),
+		0x6, 1, "Device Enumeration Speed");
+
+DWC_OTG_DEVICE_ATTR_REG32_RO(hptxfsiz,
+		&(otg_dev->core_if->core_global_regs->hptxfsiz),
+		"HPTXFSIZ");
+DWC_OTG_DEVICE_ATTR_REG32_RW(hprt0,
+		otg_dev->core_if->host_if->hprt0,
+		"HPRT0");
+
+
+/**
+ * @todo Add code to initiate the HNP.
+ */
+/**
+ * Show the HNP status bit
+ */
+static ssize_t
+hnp_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	union gotgctl_data val;
+	val.d32 =
+		dwc_read_reg32(&(otg_dev->core_if->core_global_regs->gotgctl));
+	return sprintf(buf, "HstNegScs = 0x%x\n", val.b.hstnegscs);
+}
+
+/**
+ * Set the HNP Request bit
+ */
+static ssize_t
+hnp_store(struct device *dev, struct device_attribute *attr, const char *buf,
+			  size_t count)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	u32 in = simple_strtoul(buf, NULL, 16);
+	u32 __iomem *addr = &(otg_dev->core_if->core_global_regs->gotgctl);
+	union gotgctl_data mem;
+
+	mem.d32 = dwc_read_reg32(addr);
+	mem.b.hnpreq = in;
+	dev_dbg(dev, "Storing Address=0x%p Data=0x%08x\n",
+			addr, mem.d32);
+	dwc_write_reg32(addr, mem.d32);
+	return count;
+}
+DEVICE_ATTR(hnp, 0644, hnp_show, hnp_store);
+
+/**
+ * @todo Add code to initiate the SRP.
+ */
+/**
+ * Show the SRP status bit
+ */
+static ssize_t
+srp_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+#ifndef DWC_HOST_ONLY
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	union gotgctl_data val;
+	val.d32 =
+		dwc_read_reg32(&(otg_dev->core_if->core_global_regs->gotgctl));
+	return sprintf(buf, "SesReqScs = 0x%x\n", val.b.sesreqscs);
+#else
+	return sprintf(buf, "Host Only Mode!\n");
+#endif
+}
+
+/**
+ * Set the SRP Request bit
+ */
+static ssize_t
+srp_store(struct device *dev, struct device_attribute *attr, const char *buf,
+			  size_t count)
+{
+#ifndef DWC_HOST_ONLY
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	dwc_otg_pcd_initiate_srp(otg_dev->pcd);
+#endif
+	return count;
+}
+DEVICE_ATTR(srp, 0644, srp_show, srp_store);
+
+/**
+ * @todo Need to do more for power on/off?
+ */
+/**
+ * Show the Bus Power status
+ */
+static ssize_t
+buspower_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	union hprt0_data val;
+	val.d32 = dwc_read_reg32(otg_dev->core_if->host_if->hprt0);
+	return sprintf(buf, "Bus Power = 0x%x\n", val.b.prtpwr);
+}
+
+
+/**
+ * Set the Bus Power status
+ */
+static ssize_t
+buspower_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	u32 on = simple_strtoul(buf, NULL, 16);
+	u32 __iomem *addr = otg_dev->core_if->host_if->hprt0;
+	union hprt0_data mem;
+
+	mem.d32 = dwc_read_reg32(addr);
+	mem.b.prtpwr = on;
+
+	dwc_write_reg32(addr, mem.d32);
+
+	return count;
+}
+DEVICE_ATTR(buspower, 0644, buspower_show, buspower_store);
+
+/**
+ * @todo Need to do more for suspend?
+ */
+/**
+ * Show the Bus Suspend status
+ */
+static ssize_t
+bussuspend_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	union hprt0_data val;
+	val.d32 = dwc_read_reg32(otg_dev->core_if->host_if->hprt0);
+	return sprintf(buf, "Bus Suspend = 0x%x\n", val.b.prtsusp);
+}
+
+/**
+ * Set the Bus Suspend status
+ */
+static ssize_t
+bussuspend_store(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	u32 in = simple_strtoul(buf, NULL, 16);
+	u32 __iomem *addr = otg_dev->core_if->host_if->hprt0;
+	union hprt0_data mem;
+	mem.d32 = dwc_read_reg32(addr);
+	mem.b.prtsusp = in;
+	dev_dbg(dev, "Storing Address=0x%p Data=0x%08x\n",
+			addr, mem.d32);
+	dwc_write_reg32(addr, mem.d32);
+	return count;
+}
+DEVICE_ATTR(bussuspend, 0644, bussuspend_show, bussuspend_store);
+
+/**
+ * Show the status of Remote Wakeup.
+ */
+static ssize_t
+remote_wakeup_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+#ifndef DWC_HOST_ONLY
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	union dctl_data val;
+	val.d32 =
+		dwc_read_reg32(&otg_dev->core_if->dev_if->dev_global_regs->dctl);
+	return sprintf(buf, "Remote Wakeup = %d Enabled = %d\n",
+			val.b.rmtwkupsig, otg_dev->pcd->remote_wakeup_enable);
+#else
+	return sprintf(buf, "Host Only Mode!\n");
+#endif
+}
+
+/**
+ * Initiate a remote wakeup of the host.  The Device control register
+ * Remote Wakeup Signal bit is written if the PCD Remote wakeup enable
+ * flag is set.
+ *
+ */
+static ssize_t
+remote_wakeup_store(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+#ifndef DWC_HOST_ONLY
+	u32 val = simple_strtoul(buf, NULL, 16);
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	if (val&1)
+		dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 1);
+	else
+		dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 0);
+#endif
+	return count;
+}
+static DEVICE_ATTR(remote_wakeup, S_IRUGO|S_IWUSR, remote_wakeup_show,
+		remote_wakeup_store);
+
+/**
+ * Dump global registers and either host or device registers (depending on the
+ * current mode of the core).
+ */
+static ssize_t
+regdump_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	printk(KERN_DEBUG"%s otg_dev=0x%p\n", __func__, otg_dev);
+	dwc_otg_dump_global_registers(otg_dev->core_if);
+	if (dwc_otg_is_host_mode(otg_dev->core_if))
+		dwc_otg_dump_host_registers(otg_dev->core_if);
+	else
+		dwc_otg_dump_dev_registers(otg_dev->core_if);
+
+	return sprintf(buf, "Register Dump\n");
+}
+static DEVICE_ATTR(regdump, S_IRUGO, regdump_show, NULL);
+
+/**
+ * Dump the current hcd state.
+ */
+static ssize_t
+hcddump_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+#ifndef DWC_DEVICE_ONLY
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	dwc_otg_hcd_dump_state(otg_dev->hcd);
+#endif
+	return sprintf(buf, "HCD Dump\n");
+}
+static DEVICE_ATTR(hcddump, S_IRUGO, hcddump_show, NULL);
+
+/**
+ * Dump the average frame remaining at SOF. This can be used to
+ * determine average interrupt latency. Frame remaining is also shown for
+ * start transfer and two additional sample points.
+ */
+static ssize_t
+hcd_frrem_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+#ifndef DWC_DEVICE_ONLY
+	/*
+	 * struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	 * TODO: expose this method from hcd:
+	 * dwc_otg_hcd_dump_frrem(otg_dev->hcd);
+	 */
+#endif
+	return sprintf(buf, "HCD Dump Frame Remaining\n");
+}
+static DEVICE_ATTR(hcd_frrem, S_IRUGO, hcd_frrem_show, NULL);
+
+/**
+ * Displays the time required to read the GNPTXFSIZ register many times (the
+ * output shows the number of times the register is read).
+ */
+#define RW_REG_COUNT 10000000
+#define MSEC_PER_JIFFIE (1000/HZ)
+static ssize_t
+rd_reg_test_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int i;
+	int time;
+	int start_jiffies;
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+
+	printk(KERN_DEBUG"HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n",
+	       HZ, MSEC_PER_JIFFIE, loops_per_jiffy);
+	start_jiffies = jiffies;
+	for (i = 0; i < RW_REG_COUNT; i++)
+		dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz);
+
+	time = jiffies - start_jiffies;
+	return sprintf(buf, "Time to read GNPTXFSIZ reg %d times:"
+			" %d msecs (%d jiffies)\n",
+			RW_REG_COUNT, time * MSEC_PER_JIFFIE, time);
+}
+
+static DEVICE_ATTR(rd_reg_test, S_IRUGO, rd_reg_test_show, NULL);
+
+/**
+ * Displays the time required to write the GNPTXFSIZ register many times (the
+ * output shows the number of times the register is written).
+ */
+static ssize_t
+wr_reg_test_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int i;
+	int time;
+	int start_jiffies;
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	u32 reg_val;
+
+	printk(KERN_DEBUG"HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n",
+	       HZ, MSEC_PER_JIFFIE, loops_per_jiffy);
+	reg_val =
+		dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz);
+	start_jiffies = jiffies;
+	for (i = 0; i < RW_REG_COUNT; i++)
+		dwc_write_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz,
+				reg_val);
+
+	time = jiffies - start_jiffies;
+	return sprintf(buf, "Time to write GNPTXFSIZ reg %d times:"
+			" %d msecs (%d jiffies)\n",
+			RW_REG_COUNT, time * MSEC_PER_JIFFIE, time);
+}
+static DEVICE_ATTR(wr_reg_test, S_IRUGO, wr_reg_test_show, NULL);
+
+/**
+ * Create the device files
+ */
+void dwc_otg_attr_create(struct device *dev)
+{
+	int ret_val = 0;
+	ret_val = device_create_file(dev, &dev_attr_regoffset);
+	ret_val = device_create_file(dev, &dev_attr_regvalue);
+	ret_val = device_create_file(dev, &dev_attr_mode);
+	ret_val = device_create_file(dev, &dev_attr_hnpcapable);
+	ret_val = device_create_file(dev, &dev_attr_srpcapable);
+	ret_val = device_create_file(dev, &dev_attr_hnp);
+	ret_val = device_create_file(dev, &dev_attr_srp);
+	ret_val = device_create_file(dev, &dev_attr_buspower);
+	ret_val = device_create_file(dev, &dev_attr_bussuspend);
+	ret_val = device_create_file(dev, &dev_attr_busconnected);
+	ret_val = device_create_file(dev, &dev_attr_gotgctl);
+	ret_val = device_create_file(dev, &dev_attr_gusbcfg);
+	ret_val = device_create_file(dev, &dev_attr_grxfsiz);
+	ret_val = device_create_file(dev, &dev_attr_gnptxfsiz);
+	ret_val = device_create_file(dev, &dev_attr_gpvndctl);
+	ret_val = device_create_file(dev, &dev_attr_ggpio);
+	ret_val = device_create_file(dev, &dev_attr_guid);
+	ret_val = device_create_file(dev, &dev_attr_gsnpsid);
+	ret_val = device_create_file(dev, &dev_attr_devspeed);
+	ret_val = device_create_file(dev, &dev_attr_enumspeed);
+	ret_val = device_create_file(dev, &dev_attr_hptxfsiz);
+	ret_val = device_create_file(dev, &dev_attr_hprt0);
+	ret_val = device_create_file(dev, &dev_attr_remote_wakeup);
+	ret_val = device_create_file(dev, &dev_attr_regdump);
+	ret_val = device_create_file(dev, &dev_attr_hcddump);
+	ret_val = device_create_file(dev, &dev_attr_hcd_frrem);
+	ret_val = device_create_file(dev, &dev_attr_rd_reg_test);
+	ret_val = device_create_file(dev, &dev_attr_wr_reg_test);
+}
+
+/**
+ * Remove the device files
+ */
+void dwc_otg_attr_remove(struct device *dev)
+{
+	device_remove_file(dev, &dev_attr_regoffset);
+	device_remove_file(dev, &dev_attr_regvalue);
+	device_remove_file(dev, &dev_attr_mode);
+	device_remove_file(dev, &dev_attr_hnpcapable);
+	device_remove_file(dev, &dev_attr_srpcapable);
+	device_remove_file(dev, &dev_attr_hnp);
+	device_remove_file(dev, &dev_attr_srp);
+	device_remove_file(dev, &dev_attr_buspower);
+	device_remove_file(dev, &dev_attr_bussuspend);
+	device_remove_file(dev, &dev_attr_busconnected);
+	device_remove_file(dev, &dev_attr_gotgctl);
+	device_remove_file(dev, &dev_attr_gusbcfg);
+	device_remove_file(dev, &dev_attr_grxfsiz);
+	device_remove_file(dev, &dev_attr_gnptxfsiz);
+	device_remove_file(dev, &dev_attr_gpvndctl);
+	device_remove_file(dev, &dev_attr_ggpio);
+	device_remove_file(dev, &dev_attr_guid);
+	device_remove_file(dev, &dev_attr_gsnpsid);
+	device_remove_file(dev, &dev_attr_devspeed);
+	device_remove_file(dev, &dev_attr_enumspeed);
+	device_remove_file(dev, &dev_attr_hptxfsiz);
+	device_remove_file(dev, &dev_attr_hprt0);
+	device_remove_file(dev, &dev_attr_remote_wakeup);
+	device_remove_file(dev, &dev_attr_regdump);
+	device_remove_file(dev, &dev_attr_hcddump);
+	device_remove_file(dev, &dev_attr_hcd_frrem);
+	device_remove_file(dev, &dev_attr_rd_reg_test);
+	device_remove_file(dev, &dev_attr_wr_reg_test);
+}
+
diff --git a/drivers/usb/dwc_otg/dwc_otg_attr.h b/drivers/usb/dwc_otg/dwc_otg_attr.h
new file mode 100644
index 0000000..b20e891
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_attr.h
@@ -0,0 +1,68 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__DWC_OTG_ATTR_H__)
+#define __DWC_OTG_ATTR_H__
+
+/** @file
+ * This file contains the interface to the Linux device attributes.
+ */
+extern struct device_attribute dev_attr_regoffset;
+extern struct device_attribute dev_attr_regvalue;
+
+extern struct device_attribute dev_attr_mode;
+extern struct device_attribute dev_attr_hnpcapable;
+extern struct device_attribute dev_attr_srpcapable;
+extern struct device_attribute dev_attr_hnp;
+extern struct device_attribute dev_attr_srp;
+extern struct device_attribute dev_attr_buspower;
+extern struct device_attribute dev_attr_bussuspend;
+extern struct device_attribute dev_attr_busconnected;
+extern struct device_attribute dev_attr_gotgctl;
+extern struct device_attribute dev_attr_gusbcfg;
+extern struct device_attribute dev_attr_grxfsiz;
+extern struct device_attribute dev_attr_gnptxfsiz;
+extern struct device_attribute dev_attr_gpvndctl;
+extern struct device_attribute dev_attr_ggpio;
+extern struct device_attribute dev_attr_guid;
+extern struct device_attribute dev_attr_gsnpsid;
+extern struct device_attribute dev_attr_devspeed;
+extern struct device_attribute dev_attr_enumspeed;
+extern struct device_attribute dev_attr_hptxfsiz;
+extern struct device_attribute dev_attr_hprt0;
+#ifdef CONFIG_USB_DWC_OTG_LPM
+extern struct device_attribute dev_attr_lpm_response;
+extern struct device_attribute dev_attr_sleep_local_dev;
+extern struct device_attribute devi_attr_sleep_status;
+#endif
+
+void dwc_otg_attr_create(struct device *dev);
+void dwc_otg_attr_remove(struct device *dev);
+
+#endif
diff --git a/drivers/usb/dwc_otg/dwc_otg_cil.c b/drivers/usb/dwc_otg/dwc_otg_cil.c
new file mode 100644
index 0000000..077afbf
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_cil.c
@@ -0,0 +1,4445 @@
+/*==========================================================================
+ *
+ *Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ *"Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ *otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ *The Software IS NOT an item of Licensed Software or Licensed Product under
+ *any End User Software License Agreement or Agreement for Licensed Product
+ *with Synopsys or any supplement thereto. You are permitted to use and
+ *redistribute this Software in source and binary forms, with or without
+ *modification, provided that redistributions of source code must retain this
+ *notice. You may not view, use, disclose, copy or distribute this file or
+ *any information contained herein except pursuant to this license grant from
+ *Synopsys. If you do not agree with this notice, including the disclaimer
+ *below, then you are not authorized to use the Software.
+ *
+ *THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ *ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ *(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ *LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ *OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ *DAMAGE.
+ *========================================================================== */
+
+
+/**@file
+ *
+ *The Core Interface Layer provides basic services for accessing and
+ *managing the DWC_otg hardware. These services are used by both the
+ *Host Controller Driver and the Peripheral Controller Driver.
+ *
+ *The CIL manages the memory map for the core so that the HCD and PCD
+ *don't have to do this separately. It also handles basic tasks like
+ *reading/writing the registers and data FIFOs in the controller.
+ *Some of the data access functions provide encapsulation of several
+ *operations required to perform a task, such as writing multiple
+ *registers to start a transfer. Finally, the CIL performs basic
+ *services that are not specific to either the host or device modes
+ *of operation. These services include management of the OTG Host
+ *Negotiation Protocol (HNP) and Session Request Protocol (SRP). A
+ *Diagnostic API is also provided to allow testing of the controller
+ *hardware.
+ *
+ *The Core Interface Layer has the following requirements:
+ *- Provides basic controller operations.
+ *- Minimal use of OS services.
+ *- The OS services used will be abstracted by using inline functions
+ *	 or macros.
+ *
+ */
+#include <asm/unaligned.h>
+#include <linux/workqueue.h>
+#include <linux/usb.h>
+
+#ifdef DEBUG
+#include <linux/jiffies.h>
+#endif
+
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_regs.h"
+#include "dwc_otg_cil.h"
+#include "dwc_otg_core_if.h"
+
+#ifdef OTG_PPC_PLB_DMA_TASKLET
+atomic_t release_later = ATOMIC_INIT(0);
+#endif
+
+
+/**
+ *This function is called to initialize the DWC_otg CSR data
+ *structures.	The register addresses in the device and host
+ *structures are initialized from the base address supplied by the
+ *caller.	The calling function must make the OS calls to get the
+ *base address of the DWC_otg controller registers.  The core_params
+ *argument holds the parameters that specify how the core should be
+ *configured.
+ *
+ *@param[in] reg_base_addr Base address of DWC_otg core registers
+ *@param[in] core_params Pointer to the core configuration parameters
+ *
+ */
+struct dwc_otg_core_if *dwc_otg_cil_init(u32 __iomem *reg_base_addr,
+	struct dwc_otg_core_params *core_params,
+	struct dwc_otg_device *dwc_otg_device)
+{
+	struct dwc_otg_core_if *core_if = NULL;
+	struct dwc_otg_dev_if *dev_if = NULL;
+	struct dwc_otg_host_if *host_if = NULL;
+	u8 __iomem *reg_base = (u8 __iomem *) reg_base_addr;
+	int i = 0;
+	DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, reg_base_addr,
+		      core_params);
+	core_if = kmalloc(sizeof(struct dwc_otg_core_if), GFP_KERNEL);
+	if (!core_if) {
+		DWC_DEBUGPL(DBG_CIL,
+			"Allocation of struct dwc_otg_core_if failed\n");
+		return NULL;
+	}
+	memset(core_if, 0, sizeof(struct dwc_otg_core_if));
+	core_if->core_params = core_params;
+	core_if->core_global_regs =
+		(struct dwc_otg_core_global_regs __iomem *) reg_base;
+
+	/*
+	*Allocate the Device Mode structures.
+	*/
+	dev_if = kmalloc(sizeof(struct dwc_otg_dev_if), GFP_KERNEL);
+	if (!dev_if) {
+		DWC_DEBUGPL(DBG_CIL, "Allocation of struct d"
+				"wc_otg_dev_if failed\n");
+		kfree(core_if);
+		return NULL;
+	}
+	dev_if->dev_global_regs =
+		(struct dwc_otg_dev_global_regs __iomem *)(reg_base +
+				DWC_DEV_GLOBAL_REG_OFFSET);
+	/*
+	 * NJ Bug fix: loop was looping for MAX EPS_CHANNELS which is
+	 * the sum of both in and out EPs only need to loop for half this
+	 * as setting up both in and out on each pass
+	 */
+
+	for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+		dev_if->in_ep_regs[i] =
+			(struct dwc_otg_dev_in_ep_regs __iomem *) (reg_base +
+			DWC_DEV_IN_EP_REG_OFFSET + (i * DWC_EP_REG_OFFSET));
+		dev_if->out_ep_regs[i] =
+			(struct dwc_otg_dev_out_ep_regs __iomem *) (reg_base +
+			DWC_DEV_OUT_EP_REG_OFFSET + (i * DWC_EP_REG_OFFSET));
+		DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n", i,
+			     &dev_if->in_ep_regs[i]->diepctl);
+		DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n", i,
+			     &dev_if->out_ep_regs[i]->doepctl);
+
+	}
+	dev_if->speed = 0;	/*unknown */
+	core_if->dev_if = dev_if;
+	core_if->otg_dev = dwc_otg_device;
+
+	/*
+	 * Allocate the Host Mode structures.
+	 */
+	host_if = kmalloc(sizeof(struct dwc_otg_host_if), GFP_KERNEL);
+	if (!host_if) {
+		DWC_DEBUGPL(DBG_CIL, "Allocation of struct "
+				"dwc_otg_host_if failed\n");
+		kfree(dev_if);
+		kfree(core_if);
+		return NULL;
+	}
+	host_if->host_global_regs = (struct dwc_otg_host_global_regs __iomem *)
+	    (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET);
+	host_if->hprt0 = (u32 __iomem *)
+				(reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET);
+
+	for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+		host_if->hc_regs[i] = (struct dwc_otg_hc_regs __iomem *)
+		    (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET +
+				    (i * DWC_OTG_CHAN_REGS_OFFSET));
+		DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n",
+				i, &host_if->hc_regs[i]->hcchar);
+	}
+
+	host_if->num_host_channels = MAX_EPS_CHANNELS;
+	core_if->host_if = host_if;
+	for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+		core_if->data_fifo[i] =
+		    (u32 __iomem *) (reg_base + DWC_OTG_DATA_FIFO_OFFSET +
+				  (i * DWC_OTG_DATA_FIFO_SIZE));
+		DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%p\n", i,
+			    core_if->data_fifo[i]);
+	}
+	core_if->pcgcctl = (u32 __iomem *) (reg_base + DWC_OTG_PCGCCTL_OFFSET);
+
+	/*Initiate lx_state to L3 disconnected state */
+	core_if->lx_state = DWC_OTG_L3;
+	/*
+	*Store the contents of the hardware configuration registers here for
+	*easy access later.
+	*/
+	core_if->hwcfg1.d32 =
+		dwc_read_reg32(&core_if->core_global_regs->ghwcfg1);
+	core_if->hwcfg2.d32 =
+		dwc_read_reg32(&core_if->core_global_regs->ghwcfg2);
+#ifdef DWC_SLAVE
+	core_if->hwcfg2.b.architecture =  DWC_SLAVE_ONLY_ARCH;
+#endif
+	core_if->hwcfg3.d32 =
+		dwc_read_reg32(&core_if->core_global_regs->ghwcfg3);
+	core_if->hwcfg4.d32 =
+		dwc_read_reg32(&core_if->core_global_regs->ghwcfg4);
+
+	DWC_DEBUGPL(DBG_CILV, "hwcfg1=%08x\n", core_if->hwcfg1.d32);
+	DWC_DEBUGPL(DBG_CILV, "hwcfg2=%08x\n", core_if->hwcfg2.d32);
+	DWC_DEBUGPL(DBG_CILV, "hwcfg3=%08x\n", core_if->hwcfg3.d32);
+	DWC_DEBUGPL(DBG_CILV, "hwcfg4=%08x\n", core_if->hwcfg4.d32);
+	core_if->hcfg.d32 =
+	    dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg);
+	core_if->dcfg.d32 =
+	    dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg);
+
+	DWC_DEBUGPL(DBG_CILV, "hcfg=%08x\n", core_if->hcfg.d32);
+	DWC_DEBUGPL(DBG_CILV, "dcfg=%08x\n", core_if->dcfg.d32);
+	DWC_DEBUGPL(DBG_CILV, "op_mode=%0x\n", core_if->hwcfg2.b.op_mode);
+	DWC_DEBUGPL(DBG_CILV, "arch=%0x\n", core_if->hwcfg2.b.architecture);
+	DWC_DEBUGPL(DBG_CILV, "num_dev_ep=%d\n",
+			core_if->hwcfg2.b.num_dev_ep + 1);
+	DWC_DEBUGPL(DBG_CILV, "num_host_chan=%d\n",
+			core_if->hwcfg2.b.num_host_chan);
+	DWC_DEBUGPL(DBG_CILV, "nonperio_tx_q_depth=0x%0x\n",
+		     core_if->hwcfg2.b.nonperio_tx_q_depth);
+	DWC_DEBUGPL(DBG_CILV, "host_perio_tx_q_depth=0x%0x\n",
+		     core_if->hwcfg2.b.host_perio_tx_q_depth);
+	DWC_DEBUGPL(DBG_CILV, "dev_token_q_depth=0x%0x\n",
+		     core_if->hwcfg2.b.dev_token_q_depth);
+	DWC_DEBUGPL(DBG_CILV, "Total FIFO SZ=%d\n",
+		      core_if->hwcfg3.b.dfifo_depth);
+	DWC_DEBUGPL(DBG_CILV, "xfer_size_cntr_width=%0x\n",
+		     core_if->hwcfg3.b.xfer_size_cntr_width);
+
+	/*
+	 *Set the SRP sucess bit for FS-I2c
+	 */
+	core_if->srp_success = 0;
+	core_if->srp_timer_started = 0;
+
+	/*
+	 *Create new workqueue and init works
+	 */
+	core_if->wq_otg = create_singlethread_workqueue("dwc_otg");
+	if (!core_if->wq_otg) {
+		DWC_WARN("DWC_WORKQ_ALLOC failed\n");
+		kfree(host_if);
+		kfree(dev_if);
+		kfree(core_if);
+		return NULL;
+	}
+	core_if->snpsid = dwc_read_reg32(&core_if->core_global_regs->gsnpsid);
+
+	DWC_PRINT("Core Release: %x.%x%x%x\n",
+		   (core_if->snpsid >> 12 & 0xF),
+		   (core_if->snpsid >> 8 & 0xF),
+		   (core_if->snpsid >> 4 & 0xF), (core_if->snpsid & 0xF));
+
+	core_if->wkp_timer = kzalloc(sizeof(struct timer_list), GFP_KERNEL);
+	if (!core_if->wkp_timer) {
+		DWC_WARN(" Timer alloc failed\n");
+		kfree(host_if);
+		kfree(dev_if);
+		destroy_workqueue(core_if->wq_otg);
+		kfree(core_if);
+		return NULL;
+	}
+	core_if->wkp_timer->function = w_wakeup_detected;
+	core_if->wkp_timer->data = (unsigned long)core_if;
+	init_timer(core_if->wkp_timer);
+
+	return core_if;
+}
+
+
+/**
+ *This function frees the structures allocated by dwc_otg_cil_init().
+ *
+ *@param[in] core_if The core interface pointer returned from
+ *dwc_otg_cil_init().
+ *
+ */
+void dwc_otg_cil_remove(struct dwc_otg_core_if *core_if)
+{
+	/*Disable all interrupts */
+	dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 1, 0);
+	dwc_write_reg32(&core_if->core_global_regs->gintmsk, 0);
+	if (core_if->wq_otg) {
+		flush_workqueue(core_if->wq_otg);
+		destroy_workqueue(core_if->wq_otg);
+	}
+
+	/*kfree(NULL) is safe so checks for null removed (NJ)*/
+	kfree(core_if->dev_if);
+	kfree(core_if->host_if);
+	kfree(core_if->wkp_timer);
+	kfree(core_if);
+}
+
+
+/**
+ *This function enables the controller's Global Interrupt in the AHB Config
+ *register.
+ *
+ *@param[in] core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_enable_global_interrupts(struct dwc_otg_core_if *core_if)
+{
+	union gahbcfg_data ahbcfg = {.d32 = 0};
+	ahbcfg.b.glblintrmsk = 1;	/*Enable interrupts */
+	dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32);
+}
+
+/**
+ *This function disables the controller's Global Interrupt in the AHB Config
+ *register.
+ *
+ *@param[in] core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_disable_global_interrupts(struct dwc_otg_core_if *core_if)
+{
+	union gahbcfg_data ahbcfg = {.d32 = 0};
+	ahbcfg.b.glblintrmsk = 1;	/*Enable interrupts */
+	dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
+}
+
+/**
+ *This function initializes the commmon interrupts, used in both
+ *device and host modes.
+ *
+ *@param[in] core_if Programming view of the DWC_otg controller
+ *
+ */
+static void dwc_otg_enable_common_interrupts(struct dwc_otg_core_if *core_if)
+{
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	union gintmsk_data intr_mask = {.d32 = 0};
+
+	/*Clear any pending OTG Interrupts */
+	dwc_write_reg32(&global_regs->gotgint, 0xFFFFFFFF);
+
+	/*Clear any pending interrupts */
+	dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
+
+	/*
+	 *Enable the interrupts in the GINTMSK.
+	 */
+	intr_mask.b.modemismatch = 1;
+	intr_mask.b.otgintr = 1;
+	if (!core_if->dma_enable)
+		intr_mask.b.rxstsqlvl = 1;
+
+	intr_mask.b.conidstschng = 1;
+	intr_mask.b.wkupintr = 1;
+	intr_mask.b.disconnect = 1;
+	intr_mask.b.usbsuspend = 1;
+	intr_mask.b.sessreqintr = 1;
+#ifdef CONFIG_USB_DWC_OTG_LPM
+	if (core_if->core_params->lpm_enable)
+		intr_mask.b.lpmtranrcvd = 1;
+#endif
+	dwc_write_reg32(&global_regs->gintmsk, intr_mask.d32);
+}
+
+
+/**
+ *Initializes the FSLSPClkSel field of the HCFG register depending on the PHY
+ *type.
+ */
+static void init_fslspclksel(struct dwc_otg_core_if *core_if)
+{
+	u32 val;
+	union hcfg_data hcfg;
+	if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
+		(core_if->hwcfg2.b.fs_phy_type == 1) &&
+		(core_if->core_params->ulpi_fs_ls)) ||
+		(core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS))
+			/*Full speed PHY */
+			val = DWC_HCFG_48_MHZ;
+	else
+		/*High speed PHY running at full speed or high speed */
+		val = DWC_HCFG_30_60_MHZ;
+
+	DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val);
+	hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg);
+	hcfg.b.fslspclksel = val;
+	dwc_write_reg32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32);
+}
+
+
+/**
+ *Initializes the DevSpd field of the DCFG register depending on the PHY type
+ *and the enumeration speed of the device.
+ */
+static void init_devspd(struct dwc_otg_core_if *core_if)
+{
+	u32 val;
+	union dcfg_data dcfg;
+	if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
+		(core_if->hwcfg2.b.fs_phy_type == 1) &&
+		(core_if->core_params->ulpi_fs_ls)) ||
+		(core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS))
+			/*Full speed PHY */
+			val = 0x3;
+	else if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL)
+		/*High speed PHY running at full speed */
+		val = 0x1;
+	else
+		/*High speed PHY running at high speed */
+		val = 0x0;
+
+	DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val);
+	dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg);
+	dcfg.b.devspd = val;
+	dwc_write_reg32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
+}
+
+
+/**
+ *This function calculates the number of IN EPS
+ *using GHWCFG1 and GHWCFG2 registers values
+ *
+ *@param _pcd the pcd structure.
+ */
+static u32 calc_num_in_eps(struct dwc_otg_core_if *core_if)
+{
+	u32 num_in_eps = 0;
+	u32 num_eps = core_if->hwcfg2.b.num_dev_ep;
+	u32 hwcfg1 = core_if->hwcfg1.d32 >> 3;
+	u32 num_tx_fifos = core_if->hwcfg4.b.num_in_eps;
+	int i;
+	for (i = 0; i < num_eps; ++i) {
+		if (!(hwcfg1 & 0x1))
+			num_in_eps++;
+		hwcfg1 >>= 2;
+	}
+	if (core_if->hwcfg4.b.ded_fifo_en) {
+		num_in_eps = (num_in_eps > num_tx_fifos) ?
+				num_tx_fifos : num_in_eps;
+	}
+	return num_in_eps;
+}
+
+
+/**
+ *This function calculates the number of OUT EPS
+ *using GHWCFG1 and GHWCFG2 registers values
+ *
+ *@param _pcd the pcd structure.
+ */
+static u32 calc_num_out_eps(struct dwc_otg_core_if *core_if)
+{
+	u32 num_out_eps = 0;
+	u32 num_eps = core_if->hwcfg2.b.num_dev_ep;
+	u32 hwcfg1 = core_if->hwcfg1.d32 >> 2;
+	int i;
+	for (i = 0; i < num_eps; ++i) {
+		if (!(hwcfg1 & 0x1))
+			num_out_eps++;
+		hwcfg1 >>= 2;
+	}
+	return num_out_eps;
+}
+
+
+/**
+ *This function initializes the DWC_otg controller registers and
+ *prepares the core for device mode or host mode operation.
+ *
+ *@param core_if Programming view of the DWC_otg controller
+ *
+ */
+void dwc_otg_core_init(struct dwc_otg_core_if *core_if)
+{
+	int i = 0;
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	union gahbcfg_data ahbcfg = {.d32 = 0};
+	union gusbcfg_data usbcfg = {.d32 = 0};
+	union gi2cctl_data i2cctl = {.d32 = 0};
+	DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n", core_if);
+
+	/*Common Initialization */
+	usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+	DWC_DEBUGPL(DBG_CIL, "USB config register: 0x%08x\n", usbcfg.d32);
+
+	/*Program the ULPI External VBUS bit if needed */
+#if defined(OTG_EXT_CHG_PUMP) || defined(CONFIG_460EX)
+	usbcfg.b.ulpi_ext_vbus_drv = 1;
+#else
+	/*usbcfg.b.ulpi_ext_vbus_drv = 0;*/
+	usbcfg.b.ulpi_ext_vbus_drv =
+	(core_if->core_params->phy_ulpi_ext_vbus ==
+		DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0;
+#endif
+
+	/*Set external TS Dline pulsing */
+	usbcfg.b.term_sel_dl_pulse = (core_if->core_params->ts_dline == 1) ?
+			1 : 0;
+	dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+
+	/*Reset the Controller */
+	dwc_otg_core_reset(core_if);
+
+	/*Initialize parameters from Hardware configuration registers. */
+	dev_if->num_in_eps = calc_num_in_eps(core_if);
+	dev_if->num_out_eps = calc_num_out_eps(core_if);
+
+	DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n",
+		       core_if->hwcfg4.b.num_dev_perio_in_ep);
+	DWC_DEBUGPL(DBG_CIL, "Is power optimization enabled?  %s\n",
+		     core_if->hwcfg4.b.power_optimiz ? "Yes" : "No");
+	DWC_DEBUGPL(DBG_CIL, "vbus_valid filter enabled?  %s\n",
+		     core_if->hwcfg4.b.vbus_valid_filt_en ? "Yes" : "No");
+	DWC_DEBUGPL(DBG_CIL, "iddig filter enabled?  %s\n",
+		     core_if->hwcfg4.b.iddig_filt_en ? "Yes" : "No");
+
+	for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
+		dev_if->perio_tx_fifo_size[i] =
+		    dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
+		DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n", i,
+			     dev_if->perio_tx_fifo_size[i]);
+	}
+	for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
+		dev_if->tx_fifo_size[i] =
+		    dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
+		DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n", i,
+			     dev_if->perio_tx_fifo_size[i]);
+	}
+	core_if->total_fifo_size = core_if->hwcfg3.b.dfifo_depth;
+	core_if->rx_fifo_size = dwc_read_reg32(&global_regs->grxfsiz);
+	core_if->nperio_tx_fifo_size =
+		dwc_read_reg32(&global_regs->gnptxfsiz) >> 16;
+	DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", core_if->total_fifo_size);
+	DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", core_if->rx_fifo_size);
+	DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n",
+			core_if->nperio_tx_fifo_size);
+
+
+	/*
+	 * This programming sequence needs to happen in FS mode before any other
+	 * programming occurs
+	 */
+	if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) &&
+		(core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
+
+		/*If FS mode with FS PHY */
+
+		/*
+		 * core_init() is now called on every switch so only call the
+		 * following for the first time through.
+		 */
+		if (!core_if->phy_init_done) {
+			core_if->phy_init_done = 1;
+			DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n");
+			usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+			usbcfg.b.physel = 1;
+			dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+
+			/*Reset after a PHY select */
+			dwc_otg_core_reset(core_if);
+		}
+
+		/* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS.
+		 * Do this on HNP Dev/Host mode switches (done in dev_init and
+		 * host_init).
+		 */
+		if (dwc_otg_is_host_mode(core_if)) {
+			DWC_DEBUGPL(DBG_CIL, "host mode\n");
+			init_fslspclksel(core_if);
+		} else {
+			DWC_DEBUGPL(DBG_CIL, "device mode\n");
+			init_devspd(core_if);
+		}
+
+		if (core_if->core_params->i2c_enable) {
+			DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n");
+
+			/*Program GUSBCFG.OtgUtmifsSel to I2C */
+			usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+			usbcfg.b.otgutmifssel = 1;
+			dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+
+			/*Program GI2CCTL.I2CEn */
+			i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl);
+			i2cctl.b.i2cdevaddr = 1;
+			i2cctl.b.i2cen = 0;
+			dwc_write_reg32(&global_regs->gi2cctl, i2cctl.d32);
+			i2cctl.b.i2cen = 1;
+			dwc_write_reg32(&global_regs->gi2cctl, i2cctl.d32);
+		}
+	}	/*endif speed == DWC_SPEED_PARAM_FULL */
+	else {
+		/*High speed PHY. */
+		if (!core_if->phy_init_done) {
+			core_if->phy_init_done = 1;
+			DWC_DEBUGPL(DBG_CIL, "High speed PHY\n");
+			/*HS PHY parameters.  These parameters are preserved
+			 *during soft reset so only program the first time.  Do
+			 *a soft reset immediately after setting phyif.
+			 */
+			usbcfg.b.ulpi_utmi_sel = (core_if->core_params->phy_type != 1);
+			if (usbcfg.b.ulpi_utmi_sel) {
+				DWC_DEBUGPL(DBG_CIL, "ULPI\n");
+				/*ULPI interface */
+				usbcfg.b.phyif = 0;
+				usbcfg.b.ddrsel =
+					core_if->core_params->phy_ulpi_ddr;
+			} else {
+				/*UTMI+ interface */
+				if (core_if->core_params->phy_utmi_width
+						== 16) {
+					usbcfg.b.phyif = 1;
+					DWC_DEBUGPL(DBG_CIL, "UTMI+ 16\n");
+				} else {
+					DWC_DEBUGPL(DBG_CIL, "UTMI+ 8\n");
+					usbcfg.b.phyif = 0;
+				}
+			}
+			dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+			/*Reset after setting the PHY parameters */
+			dwc_otg_core_reset(core_if);
+		}
+	}
+	if ((core_if->hwcfg2.b.hs_phy_type == 2) &&
+		(core_if->hwcfg2.b.fs_phy_type == 1) &&
+		(core_if->core_params->ulpi_fs_ls)) {
+		DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n");
+		usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+		usbcfg.b.ulpi_fsls = 1;
+		usbcfg.b.ulpi_clk_sus_m = 1;
+		dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+	} else {
+		DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS=0\n");
+		usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+		usbcfg.b.ulpi_fsls = 0;
+		usbcfg.b.ulpi_clk_sus_m = 0;
+		dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+	}
+
+	/*Program the GAHBCFG Register. */
+	switch (core_if->hwcfg2.b.architecture) {
+
+	case DWC_SLAVE_ONLY_ARCH:
+		DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n");
+		ahbcfg.b.nptxfemplvl_txfemplvl =
+			DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
+		ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
+		core_if->dma_enable = 0;
+		core_if->dma_desc_enable = 0;
+		break;
+	case DWC_EXT_DMA_ARCH:
+		DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n");
+		ahbcfg.b.hburstlen = core_if->core_params->dma_burst_size;
+		core_if->dma_enable = (core_if->core_params->dma_enable != 0);
+		core_if->dma_desc_enable =
+			(core_if->core_params->dma_desc_enable != 0);
+		break;
+	case DWC_INT_DMA_ARCH:
+		DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n");
+		ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR;
+		core_if->dma_enable = (core_if->core_params->dma_enable != 0);
+		core_if->dma_desc_enable =
+			(core_if->core_params->dma_desc_enable != 0);
+		break;
+	}
+	if (core_if->dma_enable) {
+		if (core_if->dma_desc_enable)
+			DWC_PRINT("Using Descriptor DMA mode\n");
+		else
+			DWC_PRINT("Using Buffer DMA mode\n");
+
+	} else {
+		DWC_PRINT("Using Slave mode\n");
+		core_if->dma_desc_enable = 0;
+	}
+	ahbcfg.b.dmaenable = core_if->dma_enable;
+	dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32);
+	core_if->en_multiple_tx_fifo = core_if->hwcfg4.b.ded_fifo_en;
+
+	core_if->pti_enh_enable = core_if->core_params->pti_enable != 0;
+	core_if->multiproc_int_enable = core_if->core_params->mpi_enable;
+	DWC_PRINT("Periodic Transfer Interrupt Enhancement - %s\n",
+		   ((core_if->pti_enh_enable) ? "enabled" : "disabled"));
+	DWC_PRINT("Multiprocessor Interrupt Enhancement - %s\n",
+		   ((core_if->multiproc_int_enable) ? "enabled" : "disabled"));
+	/*
+	 * Program the GUSBCFG register.
+	 */
+	usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+	switch (core_if->hwcfg2.b.op_mode) {
+	case DWC_MODE_HNP_SRP_CAPABLE:
+		usbcfg.b.hnpcap = (core_if->core_params->otg_cap ==
+			DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
+		usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
+			DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+		break;
+	case DWC_MODE_SRP_ONLY_CAPABLE:
+		usbcfg.b.hnpcap = 0;
+		usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
+			DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+		break;
+	case DWC_MODE_NO_HNP_SRP_CAPABLE:
+		usbcfg.b.hnpcap = 0;
+		usbcfg.b.srpcap = 0;
+		break;
+	case DWC_MODE_SRP_CAPABLE_DEVICE:
+		usbcfg.b.hnpcap = 0;
+		usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
+			DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+		break;
+	case DWC_MODE_NO_SRP_CAPABLE_DEVICE:
+		usbcfg.b.hnpcap = 0;
+		usbcfg.b.srpcap = 0;
+		break;
+	case DWC_MODE_SRP_CAPABLE_HOST:
+		usbcfg.b.hnpcap = 0;
+		usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
+			DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+		break;
+	case DWC_MODE_NO_SRP_CAPABLE_HOST:
+		usbcfg.b.hnpcap = 0;
+		usbcfg.b.srpcap = 0;
+		break;
+	}
+	dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+
+#ifdef CONFIG_USB_DWC_OTG_LPM
+	if (core_if->core_params->lpm_enable) {
+		union glpmcfg_data lpmcfg = {.d32 = 0 };
+
+		/*To enable LPM support set lpm_cap_en bit */
+		lpmcfg.b.lpm_cap_en = 1;
+
+		/*Make AppL1Res ACK */
+		lpmcfg.b.appl_resp = 1;
+
+		/*Retry 3 times */
+		lpmcfg.b.retry_count = 3;
+
+		dwc_modify_reg32(&core_if->core_global_regs->glpmcfg,
+				 0, lpmcfg.d32);
+
+	}
+#endif
+	if (core_if->core_params->ic_usb_cap) {
+		union gusbcfg_data gusbcfg = {.d32 = 0 };
+		gusbcfg.b.ic_usb_cap = 1;
+		dwc_modify_reg32(&core_if->core_global_regs->gusbcfg,
+				 0, gusbcfg.d32);
+	}
+	/*Enable common interrupts */
+	dwc_otg_enable_common_interrupts(core_if);
+
+	/*Do device or host intialization based on mode during PCD
+	*and HCD initialization
+	 */
+	if (dwc_otg_is_host_mode(core_if)) {
+		DWC_DEBUGPL(DBG_ANY, "Host Mode\n");
+		core_if->op_state = A_HOST;
+	} else {
+		DWC_DEBUGPL(DBG_ANY, "Device Mode\n");
+		core_if->op_state = B_PERIPHERAL;
+#ifdef DWC_DEVICE_ONLY
+		dwc_otg_core_dev_init(core_if);
+#endif	/* */
+	}
+}
+
+
+/**
+ *This function enables the Device mode interrupts.
+ *
+ *@param core_if Programming view of DWC_otg controller
+ */
+void dwc_otg_enable_device_interrupts(struct dwc_otg_core_if *core_if)
+{
+	union gintmsk_data intr_mask = {.d32 = 0};
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
+
+#if 0
+	/*
+	 * Removed as this clears down any host mode
+	 * ints that have been setup
+	 */
+
+	/*Disable all interrupts. */
+	dwc_write_reg32(&global_regs->gintmsk, 0);
+#endif
+
+	/*Clear any pending interrupts */
+	dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
+
+#if 0
+	/*
+	 * Removed as this clears down any host mode
+	 * ints that have been setup and has already been called.
+	 */
+
+	/*Enable the common interrupts */
+	dwc_otg_enable_common_interrupts(core_if);
+#endif
+
+	/*Enable interrupts */
+	intr_mask.b.usbreset = 1;
+	intr_mask.b.enumdone = 1;
+	if (!core_if->multiproc_int_enable) {
+		intr_mask.b.inepintr = 1;
+		intr_mask.b.outepintr = 1;
+	}
+	intr_mask.b.erlysuspend = 1;
+	if (core_if->en_multiple_tx_fifo == 0)
+		intr_mask.b.epmismatch = 1;
+
+	dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32,
+			      intr_mask.d32);
+
+	DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__,
+		      dwc_read_reg32(&global_regs->gintmsk));
+}
+
+
+/**
+ *This function initializes the DWC_otg controller registers for
+ *device mode.
+ *
+ *@param core_if Programming view of DWC_otg controller
+ *
+ */
+void dwc_otg_core_dev_init(struct dwc_otg_core_if *core_if)
+{
+	int i, ptxfifosize_each;
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	struct dwc_otg_core_params *params = core_if->core_params;
+	union dcfg_data dcfg = {.d32 = 0};
+	union grstctl_data resetctl = {.d32 = 0};
+	u32 rx_fifo_size;
+	union fifosize_data nptxfifosize;
+	union fifosize_data txfifosize;
+	union dthrctl_data dthrctl;
+	union fifosize_data ptxfifosize;
+
+	/*Restart the Phy Clock */
+	dwc_write_reg32(core_if->pcgcctl, 0);
+
+	/*Device configuration register */
+	init_devspd(core_if);
+	dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg);
+	dcfg.b.descdma = (core_if->dma_desc_enable) ? 1 : 0;
+	dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80;
+	dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
+
+	/*Configure data FIFO sizes */
+	if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
+		/*Split the Fifo equally*/
+		DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n",
+			    core_if->total_fifo_size);
+		DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n",
+			    core_if->total_fifo_size/3);
+		DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n",
+			    core_if->total_fifo_size/3);
+
+		/*Rx FIFO */
+		DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
+			    dwc_read_reg32(&global_regs->grxfsiz));
+
+		rx_fifo_size = core_if->total_fifo_size/3;
+		dwc_write_reg32(&global_regs->grxfsiz, rx_fifo_size);
+		DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
+			    dwc_read_reg32(&global_regs->grxfsiz));
+
+		/**Set Periodic Tx FIFO Mask all bits 0 */
+		core_if->p_tx_msk = 0;
+
+		/**Set Tx FIFO Mask all bits 0 */
+		core_if->tx_msk = 0;
+		if (core_if->en_multiple_tx_fifo == 0) {
+			/*Non-periodic Tx FIFO */
+			DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
+				dwc_read_reg32(&global_regs->gnptxfsiz));
+			nptxfifosize.b.depth = core_if->total_fifo_size/3;
+			nptxfifosize.b.startaddr = core_if->total_fifo_size/3;
+			dwc_write_reg32(&global_regs->gnptxfsiz,
+					nptxfifosize.d32);
+			DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
+				      dwc_read_reg32(&global_regs->gnptxfsiz));
+
+			/**@todo NGS: Fix Periodic FIFO Sizing! */
+
+			/*
+			 * Periodic Tx FIFOs These FIFOs are numbered from 1 to
+			 * 15.Indexes of the FIFO size module parameters in the
+			 * dev_perio_tx_fifo_size array and the FIFO size
+			 * registers in the dptxfsiz array run from 0 to 14.
+			 */
+
+			ptxfifosize_each = (core_if->total_fifo_size/3) /
+					   core_if->hwcfg4.b.num_dev_perio_in_ep;
+
+			ptxfifosize.b.startaddr =
+				nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+			for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep;
+			    i++) {
+				ptxfifosize.b.depth = ptxfifosize_each;
+
+				DWC_DEBUGPL(DBG_CIL,
+					    "initial dptxfsiz_dieptxf[%d]"
+					    "=%08x\n",
+					    i,
+					    dwc_read_reg32(&global_regs->
+							   dptxfsiz_dieptxf
+							   [i]));
+				dwc_write_reg32(&global_regs->
+						dptxfsiz_dieptxf[i],
+						ptxfifosize.d32);
+				DWC_DEBUGPL(DBG_CIL,
+					    "new dptxfsiz_dieptxf[%d]=%08x\n",
+					    i,
+					    dwc_read_reg32(&global_regs->
+							   dptxfsiz_dieptxf
+							   [i]));
+				ptxfifosize.b.startaddr += ptxfifosize.b.depth;
+			}
+		} else {
+
+			/*
+			 * Tx FIFOs These FIFOs are numbered from 1 to 15.
+			 * Indexes of the FIFO size module parameters in the
+			 * dev_tx_fifo_size array and the FIFO size registers in
+			 * the dptxfsiz_dieptxf array run from 0 to 14.
+			 */
+
+			/*Non-periodic Tx FIFO */
+			DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
+				dwc_read_reg32(&global_regs->gnptxfsiz));
+
+			nptxfifosize.b.depth = core_if->total_fifo_size/3;
+			nptxfifosize.b.startaddr = core_if->total_fifo_size/3;
+			dwc_write_reg32(&global_regs->gnptxfsiz,
+					nptxfifosize.d32);
+			DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
+				      dwc_read_reg32(&global_regs->gnptxfsiz));
+			txfifosize.b.startaddr =
+			    nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+
+			ptxfifosize_each = (core_if->total_fifo_size/3) /
+					   core_if->hwcfg4.b.num_in_eps;
+
+
+			for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
+
+				txfifosize.b.depth = ptxfifosize_each;
+
+				DWC_DEBUGPL(DBG_CIL,
+					    "initial dptxfsiz_dieptxf[%d]"
+					    "=%08x\n",
+					    i,
+					    dwc_read_reg32(&global_regs->
+							   dptxfsiz_dieptxf
+							   [i]));
+
+				dwc_write_reg32(&global_regs->
+						dptxfsiz_dieptxf[i],
+						txfifosize.d32);
+
+				DWC_DEBUGPL(DBG_CIL,
+					    "new dptxfsiz_dieptxf[%d]=%08x\n",
+					    i,
+					    dwc_read_reg32(&global_regs->
+							   dptxfsiz_dieptxf
+							   [i]));
+				txfifosize.b.startaddr += txfifosize.b.depth;
+			}
+		}
+	}
+
+	/*Flush the FIFOs */
+	dwc_otg_flush_tx_fifo(core_if, 0x10);	/*all Tx FIFOs */
+	dwc_otg_flush_rx_fifo(core_if);
+
+	/*Flush the Learning Queue. */
+	resetctl.b.intknqflsh = 1;
+	dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32);
+
+	/*Clear all pending Device Interrupts */
+	if (core_if->multiproc_int_enable) {
+		for (i = 0; i < core_if->dev_if->num_in_eps; ++i) {
+			dwc_write_reg32(&dev_if->dev_global_regs->
+					diepeachintmsk[i], 0);
+		}
+
+		for (i = 0; i < core_if->dev_if->num_out_eps; ++i) {
+			dwc_write_reg32(&dev_if->dev_global_regs->
+					doepeachintmsk[i], 0);
+		}
+
+		dwc_write_reg32(&dev_if->dev_global_regs->deachint, 0xFFFFFFFF);
+		dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, 0);
+	} else {
+		dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, 0);
+		dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, 0);
+		dwc_write_reg32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF);
+		dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, 0);
+	}
+	for (i = 0; i <= dev_if->num_in_eps; i++) {
+		union depctl_data depctl;
+		depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
+		if (depctl.b.epena) {
+			depctl.d32 = 0;
+			depctl.b.epdis = 1;
+			depctl.b.snak = 1;
+		} else {
+			depctl.d32 = 0;
+		}
+		dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
+		dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0);
+		dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0);
+		dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF);
+	}
+	for (i = 0; i <= dev_if->num_out_eps; i++) {
+		union depctl_data depctl;
+		depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl);
+		if (depctl.b.epena) {
+			depctl.d32 = 0;
+			depctl.b.epdis = 1;
+			depctl.b.snak = 1;
+		} else {
+			depctl.d32 = 0;
+		}
+		dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32);
+		dwc_write_reg32(&dev_if->out_ep_regs[i]->doeptsiz, 0);
+		dwc_write_reg32(&dev_if->out_ep_regs[i]->doepdma, 0);
+		dwc_write_reg32(&dev_if->out_ep_regs[i]->doepint, 0xFF);
+	}
+	if (core_if->en_multiple_tx_fifo && core_if->dma_enable) {
+		dev_if->non_iso_tx_thr_en = params->thr_ctl & 0x1;
+		dev_if->iso_tx_thr_en = (params->thr_ctl >> 1) & 0x1;
+		dev_if->rx_thr_en = (params->thr_ctl >> 2) & 0x1;
+
+		dev_if->rx_thr_length = params->rx_thr_length;
+		dev_if->tx_thr_length = params->tx_thr_length;
+		dev_if->setup_desc_index = 0;
+		dthrctl.d32 = 0;
+		dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en;
+		dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en;
+		dthrctl.b.tx_thr_len = dev_if->tx_thr_length;
+		dthrctl.b.rx_thr_en = dev_if->rx_thr_en;
+		dthrctl.b.rx_thr_len = dev_if->rx_thr_length;
+		dthrctl.b.ahb_thr_ratio = params->ahb_thr_ratio;
+		dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl,
+				dthrctl.d32);
+
+		DWC_DEBUGPL(DBG_CIL,
+			    "Non ISO Tx Thr - %d\nISO Tx Thr - %d\n"
+			    "Rx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n",
+			    dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en,
+			    dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len,
+			    dthrctl.b.rx_thr_len);
+	}
+	dwc_otg_enable_device_interrupts(core_if);
+	{
+		union diepint_data msk = {.d32 = 0};
+		msk.b.txfifoundrn = 1;
+		if (core_if->multiproc_int_enable) {
+			dwc_modify_reg32(&dev_if->dev_global_regs->
+					 diepeachintmsk[0], msk.d32, msk.d32);
+		} else {
+			dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk,
+					 msk.d32, msk.d32);
+		}
+	}
+
+	if (core_if->multiproc_int_enable) {
+		/*Set NAK on Babble */
+		union dctl_data dctl = {.d32 = 0 };
+		dctl.b.nakonbble = 1;
+		dwc_modify_reg32(&dev_if->dev_global_regs->dctl, 0, dctl.d32);
+	}
+}
+
+/**
+ *This function enables the Host mode interrupts.
+ *
+ *@param core_if Programming view of DWC_otg controller
+ */
+void dwc_otg_enable_host_interrupts(struct dwc_otg_core_if *core_if)
+{
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	union gintmsk_data intr_mask = {.d32 = 0};
+	DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
+
+#if 0
+	/*
+	 * Removed as this clears down any device mode
+	 * ints that have been setup
+	 */
+
+	/*Disable all interrupts. */
+	dwc_write_reg32(&global_regs->gintmsk, 0);
+#endif
+
+	/*Clear any pending interrupts. */
+	dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
+
+#if 0
+	/*
+	 * Removed as this clears down any device mode
+	 * ints that have been setup and has already been called.
+	 */
+
+	/*Enable the common interrupts */
+	dwc_otg_enable_common_interrupts(core_if);
+#endif
+
+	/*
+	 * Enable host mode interrupts without disturbing common
+	 * interrupts.
+	 */
+	if (!core_if->dma_desc_enable)
+		intr_mask.b.sofintr = 1;
+
+	intr_mask.b.portintr = 1;
+	intr_mask.b.hcintr = 1;
+	dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
+}
+
+/**
+ *This function disables the Host Mode interrupts.
+ *
+ *@param core_if Programming view of DWC_otg controller
+ */
+void dwc_otg_disable_host_interrupts(struct dwc_otg_core_if *core_if)
+{
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	union gintmsk_data intr_mask = {.d32 = 0};
+	DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__);
+
+	/*
+	 * Disable host mode interrupts without disturbing common
+	 * interrupts.
+	 */
+	intr_mask.b.sofintr = 1;
+	intr_mask.b.portintr = 1;
+	intr_mask.b.hcintr = 1;
+	intr_mask.b.ptxfempty = 1;
+	intr_mask.b.nptxfempty = 1;
+	dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
+}
+
+#if 0
+/*currently not used, keep it here as if needed later */
+static int phy_read(struct dwc_otg_core_if *core_if, int addr)
+{
+	u32 val;
+	int timeout = 10;
+
+	dwc_write_reg32(&core_if->core_global_regs->gpvndctl,
+			0x02000000 | (addr << 16));
+	val = dwc_read_reg32(&core_if->core_global_regs->gpvndctl);
+	while (((val & 0x08000000) == 0) && (timeout--)) {
+		udelay(1000);
+		val = dwc_read_reg32(&core_if->core_global_regs->gpvndctl);
+	}
+	val = dwc_read_reg32(&core_if->core_global_regs->gpvndctl);
+	printk(KERN_DEBUG"%s: addr=%02x regval=%02x\n",
+			__func__, addr, val & 0x000000ff);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_405EX
+static int phy_write(struct dwc_otg_core_if *core_if, int addr, int val8)
+{
+	u32 val;
+	int timeout = 10;
+
+	dwc_write_reg32(&core_if->core_global_regs->gpvndctl,
+			0x02000000 | 0x00400000 |
+			(addr << 16) | (val8 & 0x000000ff));
+	val = dwc_read_reg32(&core_if->core_global_regs->gpvndctl);
+	while (((val & 0x08000000) == 0) && (timeout--)) {
+		udelay(1000);
+		val = dwc_read_reg32(&core_if->core_global_regs->gpvndctl);
+	}
+	val = dwc_read_reg32(&core_if->core_global_regs->gpvndctl);
+
+	return 0;
+}
+#endif
+
+/**
+ *This function initializes the DWC_otg controller registers for
+ *host mode.
+ *
+ *This function flushes the Tx and Rx FIFOs and it flushes any entries in the
+ *request queues. Host channels are reset to ensure that they are ready for
+ *performing transfers.
+ *
+ *@param core_if Programming view of DWC_otg controller
+ *
+ */
+void dwc_otg_core_host_init(struct dwc_otg_core_if *core_if)
+{
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	struct dwc_otg_host_if *host_if = core_if->host_if;
+	struct dwc_otg_core_params *params = core_if->core_params;
+	union hprt0_data hprt0 = {.d32 = 0};
+	union fifosize_data nptxfifosize;
+	union fifosize_data ptxfifosize;
+	int i;
+	union hcchar_data hcchar;
+	union hcfg_data hcfg;
+	struct dwc_otg_hc_regs __iomem *hc_regs;
+	int num_channels;
+	union gotgctl_data gotgctl = {.d32 = 0};
+	DWC_DEBUGPL(DBG_CILV, "%s(%p)\n", __func__, core_if);
+
+	/*Restart the Phy Clock */
+	dwc_write_reg32(core_if->pcgcctl, 0);
+
+	/*Initialize Host Configuration Register */
+	init_fslspclksel(core_if);
+	if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) {
+		hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
+		hcfg.b.fslssupp = 1;
+		dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
+	}
+
+	if (core_if->core_params->dma_desc_enable) {
+		u8 op_mode = core_if->hwcfg2.b.op_mode;
+		if (!(core_if->hwcfg4.b.desc_dma
+			&& (core_if->snpsid >= OTG_CORE_REV_2_90a) &&
+			((op_mode == DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) ||
+			(op_mode == DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) ||
+			(op_mode == DWC_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG) ||
+			(op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST) ||
+			(op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST)))) {
+
+			DWC_ERROR("Host can't operate in Descriptor DMA mode.\n"
+					"Either core version is below 2.90a or "
+					"GHWCFG2, GHWCFG4 registers' values do "
+					"not allow Descriptor DMA in host mode."
+					"\n"
+					"To run the driver in Buffer DMA host "
+					"mode set dma_desc_enable "
+					"module parameter to 0.\n");
+			return;
+		}
+		hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
+		hcfg.b.descdma = 1;
+		dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
+	}
+	/*Configure data FIFO sizes */
+	if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
+		/* split fifos equally */
+		int fifo_size = core_if->total_fifo_size/3;
+
+		DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n",
+			    core_if->total_fifo_size);
+		DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", fifo_size);
+		DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n", fifo_size);
+		DWC_DEBUGPL(DBG_CIL, "P Tx FIFO Size=%d\n", fifo_size);
+
+		/*Rx FIFO */
+		DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
+			    dwc_read_reg32(&global_regs->grxfsiz));
+		dwc_write_reg32(&global_regs->grxfsiz, fifo_size);
+		DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
+			    dwc_read_reg32(&global_regs->grxfsiz));
+
+		/*Non-periodic Tx FIFO */
+		DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
+			    dwc_read_reg32(&global_regs->gnptxfsiz));
+		nptxfifosize.b.depth = fifo_size;
+		nptxfifosize.b.startaddr = fifo_size;
+		dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
+		DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
+			    dwc_read_reg32(&global_regs->gnptxfsiz));
+
+		/*Periodic Tx FIFO */
+		DWC_DEBUGPL(DBG_CIL, "initial hptxfsiz=%08x\n",
+			    dwc_read_reg32(&global_regs->hptxfsiz));
+		ptxfifosize.b.depth = fifo_size;
+		ptxfifosize.b.startaddr =
+		    nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+		dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32);
+		DWC_DEBUGPL(DBG_CIL, "new hptxfsiz=%08x\n",
+			    dwc_read_reg32(&global_regs->hptxfsiz));
+	}
+
+	/*Clear Host Set HNP Enable in the OTG Control Register */
+	gotgctl.b.hstsethnpen = 1;
+	dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0);
+
+	/*Make sure the FIFOs are flushed. */
+	dwc_otg_flush_tx_fifo(core_if, 0x10 /*all Tx FIFOs */);
+	dwc_otg_flush_rx_fifo(core_if);
+
+	if (!core_if->core_params->dma_desc_enable) {
+		/*Flush out any leftover queued requests. */
+		num_channels = core_if->core_params->host_channels;
+		for (i = 0; i < num_channels; i++) {
+			hc_regs = core_if->host_if->hc_regs[i];
+			hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+			hcchar.b.chen = 0;
+			hcchar.b.chdis = 1;
+			hcchar.b.epdir = 0;
+
+			wmb();
+
+			dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+		}
+
+		/*Halt all channels to put them into a known state. */
+		for (i = 0; i < num_channels; i++) {
+			int count = 0;
+			hc_regs = core_if->host_if->hc_regs[i];
+			hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+			hcchar.b.chen = 1;
+			hcchar.b.chdis = 1;
+			hcchar.b.epdir = 0;
+
+			wmb();
+
+			dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+			DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n",
+					__func__, i);
+
+			do {
+				hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+#ifdef DWC_SLAVE		/* We must pop anything in the RxQueue */
+				(void)dwc_read_reg32(&global_regs->grxstsp);
+#endif
+				if (++count > 200) {
+					DWC_ERROR
+					    ("%s: Unable to clear halt "
+							    "on channel %d\n",
+					     __func__, i);
+					break;
+				}
+				udelay(100);
+			} while (hcchar.b.chen);
+		}
+	}
+
+	/* Turn on the vbus power. */
+	DWC_PRINT("Init: Port Power? op_state=%d\n", core_if->op_state);
+	if (core_if->op_state == A_HOST) {
+		hprt0.d32 = dwc_otg_read_hprt0(core_if);
+		DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr);
+		if (hprt0.b.prtpwr == 0) {
+			hprt0.b.prtpwr = 1;
+			dwc_write_reg32(host_if->hprt0, hprt0.d32);
+			if (core_if->otg_dev->soc_enable_vbus)
+				core_if->otg_dev->soc_enable_vbus();
+		}
+	}
+
+#ifdef CONFIG_405EX
+	/*Write 0x60 to USB PHY register 7:
+	 *Modify "Indicator Complement" and "Indicator Pass Thru" of
+	 *Interface control register to disable the internal Vbus
+	 *comparator, as suggested by RichTek FAE.
+	 *This produced better results recognizing and mounting USB
+	 *memory sticks on the Makalu 405EX platform. I couldn't see
+	 *any difference on Kilauea, but since it seems to be better
+	 *on Makalu, let's keep it in here too.
+	 */
+	phy_write(core_if, 7, 0x60);
+#endif
+
+	dwc_otg_enable_host_interrupts(core_if);
+}
+
+
+/**
+ *Prepares a host channel for transferring packets to/from a specific
+ *endpoint. The HCCHARn register is set up with the characteristics specified
+ *in hc. Host channel interrupts that may need to be serviced while this
+ *transfer is in progress are enabled.
+ *
+ *@param core_if Programming view of DWC_otg controller
+ *@param hc Information needed to initialize the host channel
+ */
+void dwc_otg_hc_init(struct dwc_otg_core_if *core_if, struct dwc_hc *hc)
+{
+	u32 intr_enable;
+	union hcintmsk_data hc_intr_mask;
+	union gintmsk_data gintmsk = {.d32 = 0};
+	union hcchar_data hcchar;
+	union hcsplt_data hcsplt;
+	u8 hc_num = hc->hc_num;
+	struct dwc_otg_host_if *host_if = core_if->host_if;
+	struct dwc_otg_hc_regs __iomem *hc_regs = host_if->hc_regs[hc_num];
+
+	/*Clear old interrupt conditions for this host channel. */
+	hc_intr_mask.d32 = 0xFFFFFFFF;
+	hc_intr_mask.b.reserved14_31 = 0;
+	dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32);
+
+	/*Enable channel interrupts required for this transfer. */
+	hc_intr_mask.d32 = 0;
+	hc_intr_mask.b.chhltd = 1;
+	if (core_if->dma_enable) {
+		if (!core_if->dma_desc_enable)
+			hc_intr_mask.b.ahberr = 1;
+		else
+			if (hc->ep_type == USB_ENDPOINT_XFER_ISOC)
+				hc_intr_mask.b.xfercompl = 1;
+
+		if (hc->error_state && !hc->do_split &&
+			 hc->ep_type != USB_ENDPOINT_XFER_ISOC) {
+			hc_intr_mask.b.ack = 1;
+			if (hc->ep_is_in) {
+				hc_intr_mask.b.datatglerr = 1;
+				if (hc->ep_type != USB_ENDPOINT_XFER_INT)
+					hc_intr_mask.b.nak = 1;
+			}
+		}
+	} else {
+		switch (hc->ep_type) {
+		case USB_ENDPOINT_XFER_CONTROL:
+		case USB_ENDPOINT_XFER_BULK:
+			hc_intr_mask.b.xfercompl = 1;
+			hc_intr_mask.b.stall = 1;
+			hc_intr_mask.b.xacterr = 1;
+			hc_intr_mask.b.datatglerr = 1;
+			if (hc->ep_is_in)
+				hc_intr_mask.b.bblerr = 1;
+			else {
+				hc_intr_mask.b.nak = 1;
+				hc_intr_mask.b.nyet = 1;
+				if (hc->do_ping)
+					hc_intr_mask.b.ack = 1;
+			}
+			if (hc->do_split) {
+				hc_intr_mask.b.nak = 1;
+				if (hc->complete_split)
+					hc_intr_mask.b.nyet = 1;
+				else
+					hc_intr_mask.b.ack = 1;
+			}
+			if (hc->error_state)
+				hc_intr_mask.b.ack = 1;
+			break;
+		case USB_ENDPOINT_XFER_INT:
+			hc_intr_mask.b.xfercompl = 1;
+			hc_intr_mask.b.nak = 1;
+			hc_intr_mask.b.stall = 1;
+			hc_intr_mask.b.xacterr = 1;
+			hc_intr_mask.b.datatglerr = 1;
+			hc_intr_mask.b.frmovrun = 1;
+			if (hc->ep_is_in)
+				hc_intr_mask.b.bblerr = 1;
+			if (hc->error_state)
+				hc_intr_mask.b.ack = 1;
+			if (hc->do_split) {
+				if (hc->complete_split)
+					hc_intr_mask.b.nyet = 1;
+				else
+					hc_intr_mask.b.ack = 1;
+			}
+			break;
+		case USB_ENDPOINT_XFER_ISOC:
+			hc_intr_mask.b.xfercompl = 1;
+			hc_intr_mask.b.frmovrun = 1;
+			hc_intr_mask.b.ack = 1;
+			if (hc->ep_is_in) {
+				hc_intr_mask.b.xacterr = 1;
+				hc_intr_mask.b.bblerr = 1;
+			}
+			break;
+		}
+	}
+	dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32);
+
+	/* Enable the top level host channel interrupt. */
+	intr_enable = (1 << hc_num);
+	dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable);
+
+	/* Make sure host channel interrupts are enabled. */
+	gintmsk.b.hcintr = 1;
+	dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
+
+	/*
+	 * Program the HCCHARn register with the endpoint characteristics for
+	 * the current transfer.
+	 */
+	hcchar.d32 = 0;
+	hcchar.b.devaddr = hc->dev_addr;
+	hcchar.b.epnum = hc->ep_num;
+	hcchar.b.epdir = hc->ep_is_in;
+	hcchar.b.lspddev = (hc->speed == USB_SPEED_LOW);
+	hcchar.b.eptype = hc->ep_type;
+	hcchar.b.mps = hc->max_packet;
+	dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32);
+	DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
+	DWC_DEBUGPL(DBG_HCDV, "	 Dev Addr: %d\n", hcchar.b.devaddr);
+	DWC_DEBUGPL(DBG_HCDV, "	 Ep Num: %d\n", hcchar.b.epnum);
+	DWC_DEBUGPL(DBG_HCDV, "	 Is In: %d\n", hcchar.b.epdir);
+	DWC_DEBUGPL(DBG_HCDV, "	 Is Low Speed: %d\n", hcchar.b.lspddev);
+	DWC_DEBUGPL(DBG_HCDV, "	 Ep Type: %d\n", hcchar.b.eptype);
+	DWC_DEBUGPL(DBG_HCDV, "	 Max Pkt: %d\n", hcchar.b.mps);
+	DWC_DEBUGPL(DBG_HCDV, "	 Multi Cnt: %d\n", hcchar.b.multicnt);
+
+	/*
+	 * Program the HCSPLIT register for SPLITs
+	 */
+
+	hcsplt.d32 = 0;
+	if (hc->do_split) {
+		DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n",
+			hc->hc_num, hc->complete_split ? "CSPLIT" : "SSPLIT");
+		hcsplt.b.compsplt = hc->complete_split;
+		hcsplt.b.xactpos = hc->xact_pos;
+		hcsplt.b.hubaddr = hc->hub_addr;
+		hcsplt.b.prtaddr = hc->port_addr;
+		DWC_DEBUGPL(DBG_HCDV, "	  comp split %d\n", hc->complete_split);
+		DWC_DEBUGPL(DBG_HCDV, "	  xact pos %d\n", hc->xact_pos);
+		DWC_DEBUGPL(DBG_HCDV, "	  hub addr %d\n", hc->hub_addr);
+		DWC_DEBUGPL(DBG_HCDV, "	  port addr %d\n", hc->port_addr);
+		DWC_DEBUGPL(DBG_HCDV, "	  is_in %d\n", hc->ep_is_in);
+		DWC_DEBUGPL(DBG_HCDV, "	  Max Pkt: %d\n", hcchar.b.mps);
+		DWC_DEBUGPL(DBG_HCDV, "	  xferlen: %d\n", hc->xfer_len);
+	}
+	dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32);
+}
+
+
+/**
+ *Attempts to halt a host channel. This function should only be called in
+ *Slave mode or to abort a transfer in either Slave mode or DMA mode. Under
+ *normal circumstances in DMA mode, the controller halts the channel when the
+ *transfer is complete or a condition occurs that requires application
+ *intervention.
+ *
+ *In slave mode, checks for a free request queue entry, then sets the Channel
+ *Enable and Channel Disable bits of the Host Channel Characteristics
+ *register of the specified channel to intiate the halt. If there is no free
+ *request queue entry, sets only the Channel Disable bit of the HCCHARn
+ *register to flush requests for this channel. In the latter case, sets a
+ *flag to indicate that the host channel needs to be halted when a request
+ *queue slot is open.
+ *
+ *In DMA mode, always sets the Channel Enable and Channel Disable bits of the
+ *HCCHARn register. The controller ensures there is space in the request
+ *queue before submitting the halt request.
+ *
+ *Some time may elapse before the core flushes any posted requests for this
+ *host channel and halts. The Channel Halted interrupt handler completes the
+ *deactivation of the host channel.
+ *
+ *@param core_if Controller register interface.
+ *@param hc Host channel to halt.
+ *@param _halt_status Reason for halting the channel.
+ */
+void dwc_otg_hc_halt(struct dwc_otg_core_if *core_if,
+		     struct dwc_hc *hc,  enum dwc_otg_halt_status _halt_status)
+{
+	union gnptxsts_data nptxsts;
+	union hptxsts_data hptxsts;
+	union hcchar_data hcchar;
+	struct dwc_otg_hc_regs __iomem *hc_regs;
+	struct dwc_otg_core_global_regs __iomem *global_regs;
+	struct dwc_otg_host_global_regs __iomem *host_global_regs;
+	hc_regs = core_if->host_if->hc_regs[hc->hc_num];
+	global_regs = core_if->core_global_regs;
+	host_global_regs = core_if->host_if->host_global_regs;
+	WARN_ON(_halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS);
+	if (_halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
+		 _halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
+
+		/*
+		 * Disable all channel interrupts except Ch Halted. The QTD
+		 * and QH state associated with this transfer has been cleared
+		 * (in the case of URB_DEQUEUE), so the channel needs to be
+		 * shut down carefully to prevent crashes.
+		 */
+		union hcintmsk_data hcintmsk;
+		hcintmsk.d32 = 0;
+		hcintmsk.b.chhltd = 1;
+		dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32);
+
+		/*
+		 * Make sure no other interrupts besides halt are currently
+		 * pending. Handling another interrupt could cause a crash due
+		 * to the QTD and QH state.
+		 */
+		dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32);
+
+		/*
+		 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
+		 * even if the channel was already halted for some other
+		 * reason.
+		 */
+		hc->halt_status = _halt_status;
+		hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+		if (hcchar.b.chen == 0) {
+			/*
+			 * The channel is either already halted or it hasn't
+			 * started yet. In DMA mode, the transfer may halt if
+			 * it finishes normally or a condition occurs that
+			 * requires driver intervention. Don't want to halt
+			 * the channel again. In either Slave or DMA mode,
+			 * it's possible that the transfer has been assigned
+			 * to a channel, but not started yet when an URB is
+			 * dequeued. Don't want to halt a channel that hasn't
+			 * started yet.
+			 */
+			return;
+		}
+	}
+	if (hc->halt_pending) {
+
+		/*
+		 * A halt has already been issued for this channel. This might
+		 * happen when a transfer is aborted by a higher level in
+		 * the stack.
+		 */
+#ifdef DEBUG
+		DWC_PRINT("***%s: Channel %d, "
+				"hc->halt_pending already set ***\n",
+				__func__, hc->hc_num);
+		/* dwc_otg_dump_global_registers(core_if); */
+		/* dwc_otg_dump_host_registers(core_if); */
+#endif
+		return;
+	}
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+	/*No need to set the bit in DDMA for disabling the channel */
+	/* TODO check it everywhere channel is disabled */
+	if (!core_if->core_params->dma_desc_enable)
+		hcchar.b.chen = 1;
+	hcchar.b.chdis = 1;
+	if (!core_if->dma_enable) {
+		/*Check for space in the request queue to issue the halt. */
+		if (hc->ep_type == USB_ENDPOINT_XFER_CONTROL
+			|| hc->ep_type == USB_ENDPOINT_XFER_BULK) {
+			nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+			if (nptxsts.b.nptxqspcavail == 0)
+				hcchar.b.chen = 0;
+		} else {
+			hptxsts.d32 =
+				dwc_read_reg32(&host_global_regs->hptxsts);
+			if ((hptxsts.b.ptxqspcavail == 0) ||
+				 (core_if->queuing_high_bandwidth))
+				hcchar.b.chen = 0;
+		}
+	}
+
+	wmb();
+
+	dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+	hc->halt_status = _halt_status;
+	if (hcchar.b.chen) {
+		hc->halt_pending = 1;
+		hc->halt_on_queue = 0;
+	} else
+		hc->halt_on_queue = 1;
+
+	DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
+	DWC_DEBUGPL(DBG_HCDV, "	 hcchar: 0x%08x\n", hcchar.d32);
+	DWC_DEBUGPL(DBG_HCDV, "	 halt_pending: %d\n", hc->halt_pending);
+	DWC_DEBUGPL(DBG_HCDV, "	 halt_on_queue: %d\n", hc->halt_on_queue);
+	DWC_DEBUGPL(DBG_HCDV, "	 halt_status: %d\n", hc->halt_status);
+	return;
+}
+
+
+/**
+ *Clears the transfer state for a host channel. This function is normally
+ *called after a transfer is done and the host channel is being released.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param hc Identifies the host channel to clean up.
+ */
+void dwc_otg_hc_cleanup(struct dwc_otg_core_if *core_if, struct dwc_hc *hc)
+{
+	struct dwc_otg_hc_regs __iomem *hc_regs;
+	hc->xfer_started = 0;
+
+	/*
+	 * Clear channel interrupt enables and any unhandled channel interrupt
+	 * conditions.
+	 */
+	hc_regs = core_if->host_if->hc_regs[hc->hc_num];
+	dwc_write_reg32(&hc_regs->hcintmsk, 0);
+	dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF);
+
+#ifdef DEBUG
+	del_timer(&core_if->hc_xfer_timer[hc->hc_num]);
+	{
+		union hcchar_data hcchar;
+		hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+		if (hcchar.b.chdis) {
+			DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
+				  __func__, hc->hc_num, hcchar.d32);
+		}
+	}
+#endif
+}
+
+
+/**
+ *Sets the channel property that indicates in which frame a periodic transfer
+ *should occur. This is always set to the _next_ frame. This function has no
+ *effect on non-periodic transfers.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param hc Identifies the host channel to set up and its properties.
+ *@param _hcchar Current value of the HCCHAR register for the specified host
+ *channel.
+ */
+static inline void hc_set_even_odd_frame(struct dwc_otg_core_if *core_if,
+					 struct dwc_hc *hc,
+					 union hcchar_data *_hcchar)
+{
+	if (hc->ep_type == USB_ENDPOINT_XFER_INT ||
+			hc->ep_type == USB_ENDPOINT_XFER_ISOC) {
+		union hfnum_data hfnum;
+		hfnum.d32 =
+		dwc_read_reg32(&core_if->host_if->host_global_regs->hfnum);
+
+		/*1 if _next_ frame is odd, 0 if it's even */
+		_hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
+
+#ifdef DEBUG
+		if (hc->ep_type == USB_ENDPOINT_XFER_INT && hc->do_split
+			&& !hc->complete_split) {
+			switch (hfnum.b.frnum & 0x7) {
+			case 7:
+				core_if->hfnum_7_samples++;
+				core_if->hfnum_7_frrem_accum += hfnum.b.frrem;
+				break;
+			case 0:
+				core_if->hfnum_0_samples++;
+				core_if->hfnum_0_frrem_accum += hfnum.b.frrem;
+				break;
+			default:
+				core_if->hfnum_other_samples++;
+				core_if->hfnum_other_frrem_accum +=
+				    hfnum.b.frrem;
+				break;
+			}
+		}
+#endif	/* */
+	}
+}
+
+#ifdef DEBUG
+static void hc_xfer_timeout(unsigned long _ptr)
+{
+	struct hc_xfer_info *xfer_info = (struct hc_xfer_info *) _ptr;
+	int hc_num = xfer_info->hc->hc_num;
+	DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num);
+	DWC_WARN("	start_hcchar_val 0x%08x\n",
+		  xfer_info->core_if->start_hcchar_val[hc_num]);
+}
+#endif	/* */
+
+static void set_pid_isoc(struct dwc_hc *hc)
+{
+	/*Set up the initial PID for the transfer. */
+	if (hc->speed == USB_SPEED_HIGH) {
+		if (hc->ep_is_in) {
+			if (hc->multi_count == 1) {
+				hc->data_pid_start =
+				    DWC_OTG_HC_PID_DATA0;
+			} else if (hc->multi_count == 2) {
+				hc->data_pid_start =
+				    DWC_OTG_HC_PID_DATA1;
+			} else {
+				hc->data_pid_start =
+				    DWC_OTG_HC_PID_DATA2;
+			}
+		} else {
+			if (hc->multi_count == 1) {
+				hc->data_pid_start =
+				    DWC_OTG_HC_PID_DATA0;
+			} else {
+				hc->data_pid_start =
+				    DWC_OTG_HC_PID_MDATA;
+			}
+		}
+	} else {
+		hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
+	}
+}
+/*
+ *This function does the setup for a data transfer for a host channel and
+ *starts the transfer. May be called in either Slave mode or DMA mode. In
+ *Slave mode, the caller must ensure that there is sufficient space in the
+ *request queue and Tx Data FIFO.
+ *
+ *For an OUT transfer in Slave mode, it loads a data packet into the
+ *appropriate FIFO. If necessary, additional data packets will be loaded in
+ *the Host ISR.
+ *
+ *For an IN transfer in Slave mode, a data packet is requested. The data
+ *packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
+ *additional data packets are requested in the Host ISR.
+ *
+ *For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
+ *register along with a packet count of 1 and the channel is enabled. This
+ *causes a single PING transaction to occur. Other fields in HCTSIZ are
+ *simply set to 0 since no data transfer occurs in this case.
+ *
+ *For a PING transfer in DMA mode, the HCTSIZ register is initialized with
+ *all the information required to perform the subsequent data transfer. In
+ *addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
+ *controller performs the entire PING protocol, then starts the data
+ *transfer.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param hc Information needed to initialize the host channel. The xfer_len
+ *value may be reduced to accommodate the max widths of the XferSize and
+ *PktCnt fields in the HCTSIZn register. The multi_count value may be changed
+ *to reflect the final xfer_len value.
+ */
+void
+dwc_otg_hc_start_transfer(struct dwc_otg_core_if *core_if, struct dwc_hc *hc)
+{
+	union hcchar_data hcchar;
+	union hctsiz_data hctsiz;
+	u16 num_packets;
+	u32 max_hc_xfer_size = core_if->core_params->max_transfer_size;
+	u16 max_hc_pkt_count = core_if->core_params->max_packet_count;
+	struct dwc_otg_hc_regs __iomem *hc_regs =
+					core_if->host_if->hc_regs[hc->hc_num];
+	hctsiz.d32 = 0;
+	if (hc->do_ping) {
+		if (!core_if->dma_enable) {
+			dwc_otg_hc_do_ping(core_if, hc);
+			hc->xfer_started = 1;
+			return;
+		} else {
+			hctsiz.b.dopng = 1;
+		}
+	}
+	if (hc->do_split) {
+		num_packets = 1;
+		if (hc->complete_split && !hc->ep_is_in)
+			/*
+			 * For CSPLIT OUT Transfer, set the size to 0 so the
+			 * core doesn't expect any data written to the FIFO
+			 */
+			hc->xfer_len = 0;
+		else if (hc->ep_is_in || (hc->xfer_len > hc->max_packet))
+			hc->xfer_len = hc->max_packet;
+		else if (!hc->ep_is_in && (hc->xfer_len > 188))
+			hc->xfer_len = 188;
+
+		hctsiz.b.xfersize = hc->xfer_len;
+	} else {
+		/*
+		 * Ensure that the transfer length and packet count will fit
+		 * in the widths allocated for them in the HCTSIZn register.
+		 */
+		if (hc->ep_type == USB_ENDPOINT_XFER_INT
+			|| hc->ep_type == USB_ENDPOINT_XFER_ISOC) {
+			/*
+			 * Make sure the transfer size is no larger than one
+			 * (micro)frame's worth of data. (A check was done
+			 * when the periodic transfer was accepted to ensure
+			 * that a (micro)frame's worth of data can be
+			 * programmed into a channel.)
+			 */
+			u32 max_periodic_len = hc->multi_count * hc->max_packet;
+			if (hc->xfer_len > max_periodic_len)
+				hc->xfer_len = max_periodic_len;
+		} else if (hc->xfer_len > max_hc_xfer_size)
+			/*
+			 * Make sure that xfer_len is a multiple of
+			 * max packet size.
+			 */
+			hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1;
+
+		if (hc->xfer_len > 0) {
+			num_packets = (hc->xfer_len + hc->max_packet - 1) /
+					hc->max_packet;
+
+			if (num_packets > max_hc_pkt_count) {
+				num_packets = max_hc_pkt_count;
+				hc->xfer_len = num_packets * hc->max_packet;
+			}
+		} else
+			/*Need 1 packet for transfer length of 0. */
+			num_packets = 1;
+
+		if (hc->ep_is_in)
+			/*
+			 * Always program an integral # of max packets
+			 * for IN transfers.
+			 */
+			hc->xfer_len = num_packets * hc->max_packet;
+
+		if (hc->ep_type == USB_ENDPOINT_XFER_INT
+			|| hc->ep_type == USB_ENDPOINT_XFER_ISOC)
+			/*
+			 * Make sure that the multi_count field matches the
+			 * actual transfer length.
+			 */
+			hc->multi_count = num_packets;
+
+
+		if (hc->ep_type == USB_ENDPOINT_XFER_ISOC)
+			set_pid_isoc(hc);
+
+		hctsiz.b.xfersize = hc->xfer_len;
+	}
+
+	hc->start_pkt_count = num_packets;
+	hctsiz.b.pktcnt = num_packets;
+	hctsiz.b.pid = hc->data_pid_start;
+	dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+	DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
+	DWC_DEBUGPL(DBG_HCDV, "	 Xfer Size: %d\n", hctsiz.b.xfersize);
+	DWC_DEBUGPL(DBG_HCDV, "	 Num Pkts: %d\n", hctsiz.b.pktcnt);
+	DWC_DEBUGPL(DBG_HCDV, "	 Start PID: %d\n", hctsiz.b.pid);
+	if (core_if->dma_enable) {
+		dma_addr_t dma_addr;
+		if (hc->align_buff)
+			dma_addr = hc->align_buff;
+		else
+			dma_addr = (u32)hc->xfer_buff;
+
+		dwc_write_reg32(&hc_regs->hcdma, dma_addr);
+	}
+
+	/*Start the split */
+	if (hc->do_split) {
+		union hcsplt_data hcsplt;
+		hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
+		hcsplt.b.spltena = 1;
+		dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32);
+		wmb();
+	}
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+	hcchar.b.multicnt = hc->multi_count;
+	hc_set_even_odd_frame(core_if, hc, &hcchar);
+
+#ifdef DEBUG
+	core_if->start_hcchar_val[hc->hc_num] = hcchar.d32;
+	if (hcchar.b.chdis) {
+		DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
+			  __func__, hc->hc_num, hcchar.d32);
+	}
+
+#endif
+
+	/*Set host channel enable after all other setup is complete. */
+	hcchar.b.chen = 1;
+	hcchar.b.chdis = 0;
+
+	wmb();
+	dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+	hc->xfer_started = 1;
+	hc->requests++;
+	if (!core_if->dma_enable && !hc->ep_is_in && hc->xfer_len > 0)
+		/*Load OUT packet into the appropriate Tx FIFO. */
+		dwc_otg_hc_write_packet(core_if, hc);
+
+#ifdef DEBUG
+	/*Start a timer for this transfer. */
+	core_if->hc_xfer_timer[hc->hc_num].function = hc_xfer_timeout;
+	core_if->hc_xfer_info[hc->hc_num].core_if = core_if;
+	core_if->hc_xfer_info[hc->hc_num].hc = hc;
+	core_if->hc_xfer_timer[hc->hc_num].data =
+		(unsigned long)(&core_if->hc_xfer_info[hc->hc_num]);
+	core_if->hc_xfer_timer[hc->hc_num].expires = jiffies + (HZ * 10);
+	add_timer(&core_if->hc_xfer_timer[hc->hc_num]);
+#endif
+}
+
+/**
+ *This function does the setup for a data transfer for a host channel
+ *and starts the transfer in Descriptor DMA mode.
+ *
+ *Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
+ *Sets PID and NTD values. For periodic transfers
+ *initializes SCHED_INFO field with micro-frame bitmap.
+ *
+ *Initializes HCDMA register with descriptor list address and CTD value
+ *then starts the transfer via enabling the channel.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param hc Information needed to initialize the host channel.
+ */
+void
+dwc_otg_hc_start_transfer_ddma(struct dwc_otg_core_if *core_if,
+				struct dwc_hc *hc)
+{
+	struct dwc_otg_hc_regs __iomem *hc_regs =
+					core_if->host_if->hc_regs[hc->hc_num];
+	union hcchar_data hcchar;
+	union hctsiz_data hctsiz;
+	union hcdma_data  hcdma;
+
+	hctsiz.d32 = 0;
+
+	if (hc->do_ping && !hc->ep_is_in)
+		hctsiz.b_ddma.dopng = 1;
+
+	if (hc->ep_type == USB_ENDPOINT_XFER_ISOC)
+		set_pid_isoc(hc);
+
+	/*Packet Count and Xfer Size are not used in Descriptor DMA mode */
+	hctsiz.b_ddma.pid = hc->data_pid_start;
+	/*0 - 1 descriptor, 1 - 2 descriptors, etc. */
+	hctsiz.b_ddma.ntd = hc->ntd - 1;
+	/*Non-zero only for high-speed interrupt endpoints */
+	hctsiz.b_ddma.schinfo = hc->schinfo;
+
+	DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
+	DWC_DEBUGPL(DBG_HCDV, "	 Start PID: %d\n", hctsiz.b.pid);
+	DWC_DEBUGPL(DBG_HCDV, "	 NTD: %d\n", hctsiz.b_ddma.ntd);
+
+	dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+	hcdma.d32 = 0;
+	hcdma.b.dma_addr = ((u32)hc->desc_list_addr) >> 11;
+
+	/*Always start from first descriptor. */
+	hcdma.b.ctd = 0;
+	dwc_write_reg32(&hc_regs->hcdma, hcdma.d32);
+
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+	hcchar.b.multicnt = hc->multi_count;
+
+#ifdef DEBUG
+	core_if->start_hcchar_val[hc->hc_num] = hcchar.d32;
+	if (hcchar.b.chdis) {
+		DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
+			 __func__, hc->hc_num, hcchar.d32);
+	}
+#endif
+
+	/*Set host channel enable after all other setup is complete. */
+	hcchar.b.chen = 1;
+	hcchar.b.chdis = 0;
+
+	wmb();
+
+	dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+	hc->xfer_started = 1;
+	hc->requests++;
+
+#if 0
+	if ((hc->ep_type != USB_ENDPOINT_XFER_INT) &&
+			(hc->ep_type != USB_ENDPOINT_XFER_ISOC)) {
+		core_if->hc_xfer_info[hc->hc_num].core_if = core_if;
+		core_if->hc_xfer_info[hc->hc_num].hc = hc;
+		/*Start a timer for this transfer. */
+		DWC_TIMER_SCHEDULE(core_if->hc_xfer_timer[hc->hc_num], 10000);
+	}
+
+#endif
+
+}
+/**
+ *This function continues a data transfer that was started by previous call
+ *to dwc_otg_hc_start_transfer. The caller must ensure there is
+ *sufficient space in the request queue and Tx Data FIFO. This function
+ *should only be called in Slave mode. In DMA mode, the controller acts
+ *autonomously to complete transfers programmed to a host channel.
+ *
+ *For an OUT transfer, a new data packet is loaded into the appropriate FIFO
+ *if there is any data remaining to be queued. For an IN transfer, another
+ *data packet is always requested. For the SETUP phase of a control transfer,
+ *this function does nothing.
+ *
+ *@return 1 if a new request is queued, 0 if no more requests are required
+ *for this transfer.
+ */
+int dwc_otg_hc_continue_transfer(struct dwc_otg_core_if *core_if,
+				struct dwc_hc *hc)
+{
+	DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
+	if (hc->do_split)
+		/*SPLITs always queue just once per channel */
+		return 0;
+	else if (hc->data_pid_start == DWC_OTG_HC_PID_SETUP)
+		/*SETUPs are queued only once since they can't be NAKed. */
+		return 0;
+	else if (hc->ep_is_in) {
+		/*
+		 * Always queue another request for other IN transfers. If
+		 * back-to-back INs are issued and NAKs are received for both,
+		 * the driver may still be processing the first NAK when the
+		 * second NAK is received. When the interrupt handler clears
+		 * the NAK interrupt for the first NAK, the second NAK will
+		 * not be seen. So we can't depend on the NAK interrupt
+		 * handler to requeue a NAKed request. Instead, IN requests
+		 * are issued each time this function is called. When the
+		 * transfer completes, the extra requests for the channel will
+		 * be flushed.
+		 */
+		union hcchar_data hcchar;
+		struct dwc_otg_hc_regs __iomem *hc_regs =
+			core_if->host_if->hc_regs[hc->hc_num];
+
+		hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+		hc_set_even_odd_frame(core_if, hc, &hcchar);
+		hcchar.b.chen = 1;
+		hcchar.b.chdis = 0;
+		DWC_DEBUGPL(DBG_HCDV, "	 IN xfer: hcchar = 0x%08x\n",
+				hcchar.d32);
+
+		wmb();
+
+		dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+		hc->requests++;
+		return 1;
+	} else {
+	    /*OUT transfers. */
+	    if (hc->xfer_count < hc->xfer_len) {
+			if (hc->ep_type == USB_ENDPOINT_XFER_INT ||
+				hc->ep_type == USB_ENDPOINT_XFER_ISOC) {
+				union hcchar_data hcchar;
+				struct dwc_otg_hc_regs __iomem *hc_regs;
+				hc_regs = core_if->host_if->hc_regs[hc->hc_num];
+				hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+				hc_set_even_odd_frame(core_if, hc, &hcchar);
+			}
+
+			/*Load OUT packet into the appropriate Tx FIFO. */
+			dwc_otg_hc_write_packet(core_if, hc);
+			hc->requests++;
+			return 1;
+		} else
+			return 0;
+	}
+}
+
+/**
+ *Starts a PING transfer. This function should only be called in Slave mode.
+ *The Do Ping bit is set in the HCTSIZ register, then the channel is enabled.
+ */
+void dwc_otg_hc_do_ping(struct dwc_otg_core_if *core_if, struct dwc_hc *hc)
+{
+	union hcchar_data hcchar;
+	union hctsiz_data hctsiz;
+	struct dwc_otg_hc_regs __iomem *hc_regs =
+					core_if->host_if->hc_regs[hc->hc_num];
+	DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
+	hctsiz.d32 = 0;
+	hctsiz.b.dopng = 1;
+	hctsiz.b.pktcnt = 1;
+	dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+	hcchar.b.chen = 1;
+	hcchar.b.chdis = 0;
+
+	wmb();
+
+	dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+}
+
+
+#ifdef OTG_PPC_PLB_DMA      /*PPC_PLB_DMA mode */
+/*
+ * This will dump the status of the dma registers -
+ * Only used in debug mode
+ */
+void ppc4xx_dump_dma(unsigned int dmanr)
+{
+	int index;
+
+	printk(KERN_DEBUG"%32s:\n", __func__);
+	for (index = 0; index <= 7; index++) {
+		printk(KERN_DEBUG"%32s dmanr=%d , 0x%x=0x%x\n", __func__, dmanr,
+			DCRN_DMACR0 + dmanr*8+index,
+			mfdcr(DCRN_DMACR0 + dmanr*8 + index)
+			);
+	}
+	printk(KERN_DEBUG"%32s DCRN_DMASR=0x%x\n", __func__, mfdcr(DCRN_DMASR));
+}
+
+/*
+ * This function programs the PLB-DMA engine to perform MEM-MEM transfer
+ * This is used to RD & WR from the DWC_FIFO by the PLB_DMA engine
+ */
+void ppc4xx_start_plb_dma(struct dwc_otg_core_if *core_if, void *src, void *dst,
+		unsigned int length, unsigned int use_interrupt,
+		unsigned int dma_ch, unsigned int dma_dir)
+{
+	int res = 0;
+	unsigned int control;
+	ppc_dma_ch_t p_init;
+
+	memset((char *)&p_init, sizeof(p_init), 0);
+	p_init.polarity = 0;
+	p_init.pwidth = PW_32;
+	p_init.in_use = 0;
+	if (dma_dir == OTG_TX_DMA) {
+		p_init.sai = 1;
+		p_init.dai = 0;
+	} else if (dma_dir == OTG_RX_DMA) {
+		p_init.sai = 0;
+		p_init.dai = 1;
+	}
+	res = ppc4xx_init_dma_channel(dma_ch, &p_init);
+	if (res) {
+		printk(KERN_DEBUG"%32s: nit_dma_channel return %d %d "
+				"bytes dest %p\n",
+				__func__, res, length, dst);
+	}
+	res = ppc4xx_clr_dma_status(dma_ch);
+	if (res) {
+		printk(KERN_DEBUG"%32s: ppc4xx_clr_dma_status %d\n",
+				__func__, res);
+	}
+
+	if (dma_dir == OTG_TX_DMA) {
+		ppc4xx_set_src_addr(dma_ch, virt_to_bus(src));
+		ppc4xx_set_dst_addr(dma_ch, (core_if->phys_addr +
+				(dst - (void *)(core_if->core_global_regs))));
+	} else if (dma_dir == OTG_RX_DMA) {
+		ppc4xx_set_src_addr(dma_ch, (core_if->phys_addr +
+			(src - (void *)(core_if->core_global_regs))));
+		ppc4xx_set_dst_addr(dma_ch, virt_to_bus(dst));
+	}
+
+	ppc4xx_set_dma_mode(dma_ch, DMA_MODE_MM);
+	ppc4xx_set_dma_count(dma_ch, length);
+
+	/*flush cache before enabling DMA transfer */
+	if (dma_dir == OTG_TX_DMA) {
+		flush_dcache_range((unsigned long)src,
+				(unsigned long)(src + length));
+	} else if (dma_dir == OTG_RX_DMA) {
+		flush_dcache_range((unsigned long)dst,
+				(unsigned long)(dst + length));
+	}
+
+	if (use_interrupt)
+		res = ppc4xx_enable_dma_interrupt(dma_ch);
+	else
+		res = ppc4xx_disable_dma_interrupt(dma_ch);
+
+	if (res) {
+		printk(KERN_DEBUG"%32s: en/disable_dma_interrupt "
+				"%d return %d per %d\n",
+				__func__, use_interrupt, res,
+		ppc4xx_get_peripheral_width(dma_ch));
+	}
+
+	control = mfdcr(DCRN_DMACR0 + (dma_ch * 8));
+
+	control &= ~(SET_DMA_BEN(1));
+	control &= ~(SET_DMA_PSC(3));
+	control &= ~(SET_DMA_PWC(0x3f));
+	control &= ~(SET_DMA_PHC(0x7));
+	control &= ~(SET_DMA_PL(1));
+
+	mtdcr(DCRN_DMACR0 + (dma_ch * 8), control);
+
+#ifdef OTG_PPC_PLB_DMA_DBG
+	ppc4xx_dump_dma(dma_ch);
+#endif
+	ppc4xx_enable_dma(dma_ch);
+}
+#endif
+
+/*
+ * This function writes a packet into the Tx FIFO associated with the Host
+ * Channel. For a channel associated with a non-periodic EP, the non-periodic
+ * Tx FIFO is written. For a channel associated with a periodic EP, the
+ * periodic Tx FIFO is written. This function should only be called in Slave
+ * mode.
+ *
+ * Upon return the xfer_buff and xfer_count fields in hc are incremented by
+ * then number of bytes written to the Tx FIFO.
+ */
+void dwc_otg_hc_write_packet(struct dwc_otg_core_if *core_if, struct dwc_hc *hc)
+{
+#ifndef OTG_PPC_PLB_DMA
+	u32 i;
+#endif
+	u32 remaining_count;
+	u32 byte_count;
+	u32 dword_count;
+	u32 *data_buff = (u32 *) (hc->xfer_buff);
+	u32 __iomem *data_fifo = core_if->data_fifo[hc->hc_num];
+#if !defined(OTG_PPC_PLB_DMA_TASKLET) &&  defined(OTG_PPC_PLB_DMA)
+	u32 dma_sts = 0;
+#endif
+	remaining_count = hc->xfer_len - hc->xfer_count;
+	if (remaining_count > hc->max_packet)
+		byte_count = hc->max_packet;
+	else
+		byte_count = remaining_count;
+
+	dword_count = (byte_count + 3) / 4;
+
+#ifdef OTG_PPC_PLB_DMA
+#ifdef OTG_PPC_PLB_DMA_TASKLET
+
+	if (hc->xfer_len < USB_BUFSIZ) {
+		int i;
+		if ((((unsigned long)data_buff) & 0x3) == 0) {
+			/*xfer_buff is DWORD aligned. */
+			for (i = 0; i < dword_count; i++, data_buff++)
+				dwc_write_datafifo32(data_fifo, *data_buff);
+		} else {
+			/*xfer_buff is not DWORD aligned. */
+			for (i = 0; i < dword_count; i++, data_buff++)
+				dwc_write_datafifo32(data_fifo,
+						get_unaligned(data_buff));
+		}
+	} else {
+		DWC_DEBUGPL(DBG_SP, "%s set release_later %d\n",
+				__func__, dword_count);
+		atomic_set(&release_later, 1);
+
+		dwc_otg_disable_global_interrupts(core_if);
+
+		core_if->dma_xfer.dma_data_buff = data_buff;
+		core_if->dma_xfer.dma_data_fifo = (void *)data_fifo;
+		core_if->dma_xfer.dma_count = dword_count;
+		core_if->dma_xfer.dma_dir = OTG_TX_DMA;
+		tasklet_schedule(core_if->plbdma_tasklet);
+	}
+#else	/*!OTG_PPC_PLB_DMA_TASKLET */
+	if ((((unsigned long)data_buff) & 0x3) == 0) {
+		/*call tx_dma - src,dest,len,intr */
+		ppc4xx_start_plb_dma(core_if,
+					(void *)data_buff,
+					data_fifo,
+					(dword_count * 4),
+					PLB_DMA_INT_DIS,
+					PLB_DMA_CH, O
+					TG_TX_DMA
+					);
+	} else {
+		ppc4xx_start_plb_dma(core_if,
+					(void *)get_unaligned(data_buff),
+					data_fifo,
+					(dword_count * 4),
+					PLB_DMA_INT_DIS,
+					PLB_DMA_CH,
+					OTG_TX_DMA
+					);
+	}
+
+	while (mfdcr(DCRN_DMACR0 + (PLB_DMA_CH*8)) & DMA_CE_ENABLE)
+		; /*tight spin*/
+
+	dma_sts = (u32)ppc4xx_get_dma_status();
+#ifdef OTG_PPC_PLB_DMA_DBG
+	if (!(dma_sts & DMA_CS0))
+		printk(KERN_DEBUG"Status (Terminal Count not occured) 0x%08x\n",
+				mfdcr(DCRN_DMASR));
+#endif
+	if (dma_sts & DMA_CH0_ERR)
+		printk(KERN_DEBUG"Status (Channel Error) 0x%08x\n",
+				mfdcr(DCRN_DMASR));
+
+	ppc4xx_clr_dma_status(PLB_DMA_CH);
+#ifdef OTG_PPC_PLB_DMA_DBG
+	printk(KERN_DEBUG"%32s DMA Status =0x%08x\n", __func__,
+			mfdcr(DCRN_DMASR);
+#endif
+
+#endif	/*OTG_PPC_PLB_DMA_TASKLET */
+
+
+#else
+	if ((((unsigned long)data_buff) & 0x3) == 0) {
+		/*xfer_buff is DWORD aligned. */
+		for (i = 0; i < dword_count; i++, data_buff++)
+			dwc_write_datafifo32(data_fifo, *data_buff);
+	} else {
+		/*xfer_buff is not DWORD aligned. */
+		for (i = 0; i < dword_count; i++, data_buff++)
+			dwc_write_datafifo32(data_fifo,
+					get_unaligned(data_buff));
+	}
+#endif
+	hc->xfer_count += byte_count;
+	hc->xfer_buff += byte_count;
+}
+
+/**
+ *This function reads a setup packet from the Rx FIFO into the destination
+ *buffer. This function is called from the Rx Status Queue Level (RxStsQLvl)
+ *Interrupt routine when a SETUP packet has been received in Slave mode.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param dest Destination buffer for packet data.
+ */
+void dwc_otg_read_setup_packet(struct dwc_otg_core_if *core_if, u32 *dest)
+{
+	/*Get the 8 bytes of a setup transaction data */
+
+	/*Pop 2 DWORDS off the receive data FIFO into memory */
+	dest[0] = dwc_read_datafifo32(core_if->data_fifo[0]);
+	dest[1] = dwc_read_datafifo32(core_if->data_fifo[0]);
+}
+
+/**
+ *This function enables EP0 OUT to receive SETUP packets and configures EP0
+ *IN for transmitting packets.	 It is normally called when the
+ *"Enumeration Done" interrupt occurs.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param ep The EP0 data.
+ */
+void dwc_otg_ep0_activate(struct dwc_otg_core_if *core_if, struct dwc_ep *ep)
+{
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	union dsts_data dsts;
+	union depctl_data diepctl;
+	union depctl_data doepctl;
+	union dctl_data dctl = {.d32 = 0};
+
+	/*Read the Device Status and Endpoint 0 Control registers */
+	dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts);
+	diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl);
+	doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl);
+
+	/*Set the MPS of the IN EP based on the enumeration speed */
+	switch (dsts.b.enumspd) {
+
+	case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
+	case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
+	case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
+		diepctl.b.mps = DWC_DEP0CTL_MPS_64;
+		break;
+	case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
+		diepctl.b.mps = DWC_DEP0CTL_MPS_8;
+		break;
+	}
+	dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
+
+	/*Enable OUT EP for receive */
+	doepctl.b.epena = 1;
+	dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
+
+#ifdef VERBOSE
+	DWC_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
+			dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
+	DWC_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
+			dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl));
+
+#endif	/* */
+	dctl.b.cgnpinnak = 1;
+	dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
+	DWC_DEBUGPL(DBG_PCDV, "dctl=%0x\n",
+			dwc_read_reg32(&dev_if->dev_global_regs->dctl));
+}
+
+
+#if defined(DEBUG) && defined(VERBOSE)
+void dwc_otg_dump_msg(const u8 *buf, unsigned int length)
+{
+	unsigned int start, num, i;
+	char line[52], *p;
+	if (length >= 512)
+		return;
+	start = 0;
+	while (length > 0) {
+		num = min(length, 16u);
+		p = line;
+		for (i = 0; i < num; ++i) {
+			if (i == 8)
+				*p++ = ' ';
+			sprintf(p, " %02x", buf[i]);
+			p += 3;
+		}
+		*p = 0;
+		DWC_PRINT("%6x: %s\n", start, line);
+		buf += num;
+		start += num;
+		length -= num;
+	}
+}
+#endif	/* */
+
+/**
+ *This function writes a packet into the Tx FIFO associated with the
+ *EP.	For non-periodic EPs the non-periodic Tx FIFO is written.  For
+ *periodic EPs the periodic Tx FIFO associated with the EP is written
+ *with all packets for the next micro-frame.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param ep The EP to write packet for.
+ *@param dma Indicates if DMA is being used.
+ */
+void dwc_otg_ep_write_packet(struct dwc_otg_core_if *core_if, struct dwc_ep *ep,
+			     int dma)
+{
+	/**
+	 * The buffer is padded to DWORD on a per packet basis in
+	 * slave/dma mode if the MPS is not DWORD aligned. The last
+	 * packet, if short, is also padded to a multiple of DWORD.
+	 *
+	 * ep->xfer_buff always starts DWORD aligned in memory and is a
+	 * multiple of DWORD in length
+	 *
+	 * ep->xfer_len can be any number of bytes
+	 *
+	 * ep->xfer_count is a multiple of ep->maxpacket until the last
+	 *	packet
+	 *
+	 * FIFO access is DWORD
+	 */
+#ifndef OTG_PPC_PLB_DMA
+	u32 i;
+#endif
+	u32 byte_count;
+	u32 dword_count;
+	u32 __iomem *fifo;
+	u32 *data_buff = (u32 *) ep->xfer_buff;
+#if !defined(OTG_PPC_PLB_DMA_TASKLET) && defined(OTG_PPC_PLB_DMA)
+	u32 dma_sts = 0;
+#endif
+
+	if (ep->xfer_count >= ep->xfer_len) {
+		DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s() No data for EP%d!!!\n",
+				__func__, ep->num);
+		return;
+	}
+
+	/*Find the byte length of the packet either short packet or MPS */
+	if ((ep->xfer_len - ep->xfer_count) < ep->maxpacket)
+		byte_count = ep->xfer_len - ep->xfer_count;
+	else
+		byte_count = ep->maxpacket;
+
+
+	/*Find the DWORD length, padded by extra bytes as neccessary if MPS
+	 *is not a multiple of DWORD */
+	dword_count = (byte_count + 3) / 4;
+
+	dwc_otg_dump_msg(ep->xfer_buff, byte_count);
+
+	/**@todo NGS Where are the Periodic Tx FIFO addresses
+	 *intialized?	What should this be? */
+	fifo = core_if->data_fifo[ep->num];
+	DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n",
+		       fifo, data_buff, *data_buff, byte_count);
+	if (!dma) {
+#ifdef OTG_PPC_PLB_DMA
+#ifdef OTG_PPC_PLB_DMA_TASKLET
+		if (byte_count < USB_BUFSIZ) {
+			int i;
+			for (i = 0; i < dword_count; i++, data_buff++)
+				dwc_write_datafifo32(fifo, *data_buff);
+		} else {
+			DWC_DEBUGPL(DBG_SP, "%s set release_later %d\n",
+					__func__, dword_count);
+			atomic_set(&release_later, 1);
+
+			dwc_otg_disable_global_interrupts(core_if);
+
+			core_if->dma_xfer.dma_data_buff = data_buff;
+			core_if->dma_xfer.dma_data_fifo = fifo;
+			core_if->dma_xfer.dma_count = dword_count;
+			core_if->dma_xfer.dma_dir = OTG_TX_DMA;
+			tasklet_schedule(core_if->plbdma_tasklet);
+		}
+#else  /*!OTG_PPC_PLB_DMA_TASKLET */
+		ppc4xx_start_plb_dma(core_if, data_buff, fifo, dword_count * 4,
+			PLB_DMA_INT_DIS, PLB_DMA_CH, OTG_TX_DMA);
+
+		while (mfdcr(DCRN_DMACR0 + (DMA_CH0*8)) & DMA_CE_ENABLE)
+			; /*tight poll loop*/
+
+		dma_sts = (u32)ppc4xx_get_dma_status();
+
+#ifdef OTG_PPC_PLB_DMA_DBG
+		if (!(dma_sts & DMA_CS0))
+			printk(KERN_DEBUG"DMA Status (Terminal Count not "
+				"Occurred) 0x%08x\n", mfdcr(DCRN_DMASR));
+#endif
+		if (dma_sts & DMA_CH0_ERR)
+			printk(KERN_DEBUG"DMA Status (Channel 0 Error) "
+					"0x%08x\n",
+					mfdcr(DCRN_DMASR));
+
+		ppc4xx_clr_dma_status(PLB_DMA_CH);
+
+#ifdef OTG_PPC_PLB_DMA_DBG
+		printk(KERN_DEBUG"%32s DMA Status =0x%08x\n",
+				__func__,
+				mfdcr(DCRN_DMASR));
+#endif
+
+#endif	/*OTG_PPC_PLB_DMA_TASKLET */
+
+#else	/*DWC_SLAVE mode */
+		if ((((unsigned long)data_buff) & 0x3) == 0) {
+			/*xfer_buff is DWORD aligned. */
+			for (i = 0; i < dword_count; i++, data_buff++)
+				dwc_write_datafifo32(fifo, *data_buff);
+		} else {
+			/*xfer_buff is not DWORD aligned. */
+			for (i = 0; i < dword_count; i++, data_buff++)
+				dwc_write_datafifo32(fifo,
+						get_unaligned(data_buff));
+		}
+#endif
+	}
+
+	ep->xfer_count += byte_count;
+	ep->xfer_buff += byte_count;
+	ep->dma_addr += byte_count;
+}
+
+/**
+ *This function reads a packet from the Rx FIFO into the destination
+ *buffer.	To read SETUP data use dwc_otg_read_setup_packet.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param dest	  Destination buffer for the packet.
+ *@param bytes  Number of bytes to copy to the destination.
+ */
+void dwc_otg_read_packet(struct dwc_otg_core_if *core_if,
+			 u8 *dest,  u16 bytes)
+{
+#ifndef OTG_PPC_PLB_DMA
+	int i;
+#endif
+	int word_count = (bytes + 3) / 4;
+	u32 __iomem *fifo = core_if->data_fifo[0];
+	u32 *data_buff = (u32 *) dest;
+#if !defined(OTG_PPC_PLB_DMA_TASKLET) && defined(OTG_PPC_PLB_DMA)
+	u32 dma_sts = 0;
+#endif
+
+	/**
+	 *@todo Account for the case where dest is not dword aligned. This
+	 *requires reading data from the FIFO into a u32 temp buffer,
+	 *then moving it into the data buffer.
+	 */
+
+	DWC_DEBUGPL((DBG_PCDV | DBG_CILV | DBG_SP), "%s(%p,%p,%d)\n", __func__,
+			 core_if, dest, bytes);
+#ifdef OTG_PPC_PLB_DMA
+#ifdef OTG_PPC_PLB_DMA_TASKLET
+	if (bytes < USB_BUFSIZ) {
+		int i;
+		for (i = 0; i < word_count; i++, data_buff++)
+			*data_buff = dwc_read_datafifo32(fifo);
+	} else {
+		DWC_DEBUGPL(DBG_SP, "%s set release_later %d\n",
+				__func__, bytes);
+		atomic_set(&release_later, 1);
+
+		dwc_otg_disable_global_interrupts(core_if);
+
+		/*plbdma tasklet */
+		core_if->dma_xfer.dma_data_buff = data_buff;
+		core_if->dma_xfer.dma_data_fifo = (void *)fifo;
+		core_if->dma_xfer.dma_count = word_count;
+		core_if->dma_xfer.dma_dir = OTG_RX_DMA;
+		tasklet_schedule(core_if->plbdma_tasklet);
+	}
+#else /*!OTG_PPC_PLB_DMA_TASKLET */
+	ppc4xx_start_plb_dma(core_if, (void *)fifo, data_buff, (word_count * 4),
+				PLB_DMA_INT_DIS, PLB_DMA_CH, OTG_RX_DMA);
+
+	while (mfdcr(DCRN_DMACR0 + (DMA_CH0*8)) & DMA_CE_ENABLE)
+		; /*tight poll loop*/
+
+	dma_sts = (u32)ppc4xx_get_dma_status();
+#ifdef OTG_PPC_PLB_DMA_DBG
+	if (!(dma_sts & DMA_CS0)) {
+		printk(KERN_DEBUG"DMA Status (Terminal Count not occurred) "
+				"0x%08x\n",
+				mfdcr(DCRN_DMASR));
+	}
+#endif
+	if (dma_sts & DMA_CH0_ERR) {
+		printk(KERN_DEBUG"DMA Status (Channel 0 Error) 0x%08x\n",
+				mfdcr(DCRN_DMASR));
+	}
+	ppc4xx_clr_dma_status(PLB_DMA_CH);
+#ifdef OTG_PPC_PLB_DMA_DBG
+	printk(KERN_DEBUG"%32s DMA Status =0x%08x\n", __func__,
+			mfdcr(DCRN_DMASR));
+	printk(KERN_DEBUG" Rxed buffer \n");
+	for (i = 0; i < bytes; i++)
+		printk(KERN_DEBUG" 0x%02x", *(dest + i));
+
+	printk(KERN_DEBUG" \n End of Rxed buffer \n");
+#endif
+#endif /*OTG_PPC_PLB_DMA_TASKLET */
+
+#else   /*DWC_SLAVE mode */
+
+	BUG_ON(data_buff == NULL);
+	if ((((unsigned long)data_buff) & 0x3) == 0) {
+		/*xfer_buff is DWORD aligned. */
+		for (i = 0; i < word_count; i++, data_buff++)
+			*data_buff = dwc_read_datafifo32(fifo);
+	} else {
+		/*xfer_buff is not DWORD aligned. */
+		for (i = 0; i < word_count; i++, data_buff++) {
+			u32 temp = dwc_read_datafifo32(fifo);
+			put_unaligned(temp, data_buff);
+		}
+	}
+
+#endif
+	return;
+}
+
+
+/**
+ *This functions reads the device registers and prints them
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_dump_dev_registers(struct dwc_otg_core_if *core_if)
+{
+	int i;
+	u32 __iomem *addr;
+	DWC_PRINT("Device Global Registers\n");
+	addr = &core_if->dev_if->dev_global_regs->dcfg;
+	DWC_PRINT("DCFG		 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->dev_if->dev_global_regs->dctl;
+	DWC_PRINT("DCTL		 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->dev_if->dev_global_regs->dsts;
+	DWC_PRINT("DSTS		 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->dev_if->dev_global_regs->diepmsk;
+	DWC_PRINT("DIEPMSK	 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->dev_if->dev_global_regs->doepmsk;
+	DWC_PRINT("DOEPMSK	 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->dev_if->dev_global_regs->daint;
+	DWC_PRINT("DAINT	 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->dev_if->dev_global_regs->daintmsk;
+	DWC_PRINT("DAINTMSK	 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->dev_if->dev_global_regs->dtknqr1;
+	DWC_PRINT("DTKNQR1	 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	if (core_if->hwcfg2.b.dev_token_q_depth > 6) {
+		addr = &core_if->dev_if->dev_global_regs->dtknqr2;
+		DWC_PRINT("DTKNQR2	 @0x%p : 0x%08X\n",
+			   addr, dwc_read_reg32(addr));
+	}
+	addr = &core_if->dev_if->dev_global_regs->dvbusdis;
+	DWC_PRINT("DVBUSID	 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->dev_if->dev_global_regs->dvbuspulse;
+	DWC_PRINT("DVBUSPULSE	@0x%p : 0x%08X\n",
+		   addr, dwc_read_reg32(addr));
+	if (core_if->hwcfg2.b.dev_token_q_depth > 14) {
+		addr = &core_if->dev_if->dev_global_regs->dtknqr3_dthrctl;
+		DWC_PRINT("DTKNQR3	 @0x%p : 0x%08X\n",
+			   addr, dwc_read_reg32(addr));
+	}
+	if (core_if->hwcfg2.b.dev_token_q_depth > 22) {
+		addr = &core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
+		DWC_PRINT("DTKNQR4	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+	}
+	for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+		DWC_PRINT("Device IN EP %d Registers\n", i);
+		addr = &core_if->dev_if->in_ep_regs[i]->diepctl;
+		DWC_PRINT("DIEPCTL	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->dev_if->in_ep_regs[i]->diepint;
+		DWC_PRINT("DIEPINT	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->dev_if->in_ep_regs[i]->dieptsiz;
+		DWC_PRINT("DIETSIZ	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->dev_if->in_ep_regs[i]->diepdma;
+		DWC_PRINT("DIEPDMA	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->dev_if->in_ep_regs[i]->dtxfsts;
+		DWC_PRINT("DTXFSTS	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->dev_if->in_ep_regs[i]->diepdmab;
+		DWC_PRINT("DIEPDMAB	 @0x%p : 0x%08X\n", addr,
+			   0 /*dwc_read_reg32(addr) */);
+	}
+	for (i = 0; i <= core_if->dev_if->num_out_eps; i++) {
+		DWC_PRINT("Device OUT EP %d Registers\n", i);
+		addr = &core_if->dev_if->out_ep_regs[i]->doepctl;
+		DWC_PRINT("DOEPCTL	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->dev_if->out_ep_regs[i]->doepfn;
+		DWC_PRINT("DOEPFN	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->dev_if->out_ep_regs[i]->doepint;
+		DWC_PRINT("DOEPINT	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->dev_if->out_ep_regs[i]->doeptsiz;
+		DWC_PRINT("DOETSIZ	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->dev_if->out_ep_regs[i]->doepdma;
+		DWC_PRINT("DOEPDMA	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		if (core_if->dma_enable) {
+			/*Don't access this register in SLAVE mode */
+			addr = &core_if->dev_if->out_ep_regs[i]->doepdmab;
+			DWC_PRINT("DOEPDMAB	 @0x%p : 0x%08X\n",
+				   addr, dwc_read_reg32(addr));
+		}
+	}
+	return;
+}
+
+
+/**
+ *This function reads the host registers and prints them
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_dump_host_registers(struct dwc_otg_core_if *core_if)
+{
+	int i;
+	u32 __iomem *addr;
+	DWC_PRINT("Host Global Registers\n");
+	addr = &core_if->host_if->host_global_regs->hcfg;
+	DWC_PRINT("HCFG		 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->host_if->host_global_regs->hfir;
+	DWC_PRINT("HFIR		 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->host_if->host_global_regs->hfnum;
+	DWC_PRINT("HFNUM	 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->host_if->host_global_regs->hptxsts;
+	DWC_PRINT("HPTXSTS	 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->host_if->host_global_regs->haint;
+	DWC_PRINT("HAINT	 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	addr = &core_if->host_if->host_global_regs->haintmsk;
+	DWC_PRINT("HAINTMSK	 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	if (core_if->dma_desc_enable) {
+		addr = &core_if->host_if->host_global_regs->hflbaddr;
+		DWC_PRINT("HFLBADDR	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+	}
+	addr = core_if->host_if->hprt0;
+	DWC_PRINT("HPRT0	 @0x%p : 0x%08X\n", addr,
+		   dwc_read_reg32(addr));
+	for (i = 0; i < core_if->core_params->host_channels; i++) {
+		DWC_PRINT("Host Channel %d Specific Registers\n", i);
+		addr = &core_if->host_if->hc_regs[i]->hcchar;
+		DWC_PRINT("HCCHAR	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->host_if->hc_regs[i]->hcsplt;
+		DWC_PRINT("HCSPLT	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->host_if->hc_regs[i]->hcint;
+		DWC_PRINT("HCINT	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->host_if->hc_regs[i]->hcintmsk;
+		DWC_PRINT("HCINTMSK	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->host_if->hc_regs[i]->hctsiz;
+		DWC_PRINT("HCTSIZ	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		addr = &core_if->host_if->hc_regs[i]->hcdma;
+		DWC_PRINT("HCDMA	 @0x%p : 0x%08X\n", addr,
+			   dwc_read_reg32(addr));
+		if (core_if->dma_desc_enable) {
+			addr = &core_if->host_if->hc_regs[i]->hcdmab;
+			DWC_PRINT("HCDMAB	 @0x%p : 0x%08X\n",
+					addr, dwc_read_reg32(addr));
+		}
+	}
+	return;
+}
+
+
+/**
+ *This function reads the core global registers and prints them
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_dump_global_registers(struct dwc_otg_core_if *core_if)
+{
+	int i;
+	u32 __iomem *addr;
+	DWC_PRINT("Core Global Registers");
+	addr = &core_if->core_global_regs->gotgctl;
+	DWC_PRINT("GOTGCTL	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->gotgint;
+	DWC_PRINT("GOTGINT	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->gahbcfg;
+	DWC_PRINT("GAHBCFG	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->gusbcfg;
+	DWC_PRINT("GUSBCFG	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->grstctl;
+	DWC_PRINT("GRSTCTL	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->gintsts;
+	DWC_PRINT("GINTSTS	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->gintmsk;
+	DWC_PRINT("GINTMSK	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->grxstsr;
+	DWC_PRINT("GRXSTSR	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+
+	/*
+	 * addr=&core_if->core_global_regs->grxstsp;
+	 * DWC_PRINT("GRXSTSP   @0x%p : 0x%08X\n", addr,
+	 * 	dwc_read_reg32(addr));
+	 */
+	addr = &core_if->core_global_regs->grxfsiz;
+	DWC_PRINT("GRXFSIZ	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->gnptxfsiz;
+	DWC_PRINT("GNPTXFSIZ @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->gnptxsts;
+	DWC_PRINT("GNPTXSTS	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->gi2cctl;
+	DWC_PRINT("GI2CCTL	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->gpvndctl;
+	DWC_PRINT("GPVNDCTL	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->ggpio;
+	DWC_PRINT("GGPIO	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->guid;
+	DWC_PRINT("GUID		 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->gsnpsid;
+	DWC_PRINT("GSNPSID	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->ghwcfg1;
+	DWC_PRINT("GHWCFG1	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->ghwcfg2;
+	DWC_PRINT("GHWCFG2	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->ghwcfg3;
+	DWC_PRINT("GHWCFG3	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->ghwcfg4;
+	DWC_PRINT("GHWCFG4	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->glpmcfg;
+	DWC_PRINT("GLPMCFG	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	addr = &core_if->core_global_regs->hptxfsiz;
+	DWC_PRINT("HPTXFSIZ	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+	for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
+		addr = &core_if->core_global_regs->dptxfsiz_dieptxf[i];
+		DWC_PRINT("DPTXFSIZ[%d] @0x%p : 0x%08X\n", i,
+				addr, dwc_read_reg32(addr));
+	}
+	addr = core_if->pcgcctl;
+	DWC_PRINT("PCGCCTL	 @0x%p : 0x%08X\n", addr,
+			dwc_read_reg32(addr));
+}
+
+
+/**
+ *Flush a Tx FIFO.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param _num Tx FIFO to flush.
+ */
+void dwc_otg_flush_tx_fifo(struct dwc_otg_core_if *core_if,
+				  const int num)
+{
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	union grstctl_data greset = {.d32 = 0 };
+	int count = 0;
+	DWC_DEBUGPL((DBG_CIL | DBG_PCDV), "Flush Tx FIFO %d\n", num);
+	greset.b.txfflsh = 1;
+	greset.b.txfnum = num;
+	dwc_write_reg32(&global_regs->grstctl, greset.d32);
+
+	do {
+		greset.d32 = dwc_read_reg32(&global_regs->grstctl);
+		if (++count > 10000) {
+			DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
+				  __func__, greset.d32,
+				  dwc_read_reg32(&global_regs->gnptxsts));
+			break;
+		}
+		udelay(1);
+	} while (greset.b.txfflsh == 1);
+	/*Wait for 3 PHY Clocks */
+	udelay(1);
+}
+
+
+/**
+ *Flush Rx FIFO.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_flush_rx_fifo(struct dwc_otg_core_if *core_if)
+{
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	union grstctl_data greset = {.d32 = 0 };
+	int count = 0;
+	DWC_DEBUGPL((DBG_CIL | DBG_PCDV), "%s\n", __func__);
+
+	greset.b.rxfflsh = 1;
+	dwc_write_reg32(&global_regs->grstctl, greset.d32);
+
+	do {
+		greset.d32 = dwc_read_reg32(&global_regs->grstctl);
+		if (++count > 10000) {
+			DWC_WARN("%s() HANG! GRSTCTL=%0x\n",
+					__func__, greset.d32);
+			break;
+		}
+		udelay(1);
+	} while (greset.b.rxfflsh == 1);
+
+	/*Wait for 3 PHY Clocks */
+	udelay(1);
+}
+
+
+/**
+ *Do core a soft reset of the core.  Be careful with this because it
+ *resets all the internal state machines of the core.
+ */
+void dwc_otg_core_reset(struct dwc_otg_core_if *core_if)
+{
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	u32 greset = 0;
+	int count = 0;
+	DWC_DEBUGPL(DBG_CILV, "%s\n", __func__);
+
+	/*Wait for AHB master IDLE state. */
+	do {
+		udelay(10);
+		greset = dwc_read_reg32(&global_regs->grstctl);
+		if (++count > 100000) {
+			DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n",
+					__func__, greset);
+			return;
+		}
+	} while ((greset & 0x80000000) == 0);
+
+	/*Core Soft Reset */
+	count = 0;
+	greset |= 1;
+	dwc_write_reg32(&global_regs->grstctl, greset);
+	wmb();
+	mdelay(1);
+	do {
+		udelay(10);
+		greset = dwc_read_reg32(&global_regs->grstctl);
+		if (++count > 10000) {
+			DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n",
+					__func__, greset);
+			break;
+		}
+	} while (greset & 0x1);
+
+	/*Wait for at least 3 PHY Clocks */
+	mdelay(500);
+}
+
+
+/**
+ *Register HCD callbacks.	The callbacks are used to start and stop
+ *the HCD for interrupt processing.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param _cb the HCD callback structure.
+ *@param _p pointer to be passed to callback function (usb_hcd*).
+ */
+void dwc_otg_cil_register_hcd_callbacks(struct dwc_otg_core_if *core_if,
+		       struct dwc_otg_cil_callbacks *_cb, void *_p)
+{
+	core_if->hcd_cb = _cb;
+	_cb->p = _p;
+}
+
+/**
+ *Register PCD callbacks.	The callbacks are used to start and stop
+ *the PCD for interrupt processing.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param _cb the PCD callback structure.
+ *@param _p pointer to be passed to callback function (pcd*).
+ */
+void dwc_otg_cil_register_pcd_callbacks(struct dwc_otg_core_if *core_if,
+		       struct dwc_otg_cil_callbacks *_cb, void *_p)
+{
+	core_if->pcd_cb = _cb;
+	_cb->p = _p;
+}
+
+static int dwc_otg_param_initialized(int val)
+{
+	return val != -1;
+}
+u8 dwc_otg_is_dma_enable(struct dwc_otg_core_if *core_if)
+{
+	return core_if->dma_enable;
+}
+
+/*Checks if the parameter is outside of its valid range of values */
+#define DWC_OTG_PARAM_TEST(_param_, _low_, _high_) \
+		(((_param_) < (_low_)) || \
+		((_param_) > (_high_)))
+/*Parameter access functions */
+int dwc_otg_set_param_otg_cap(struct dwc_otg_core_if *core_if, int val)
+{
+	int valid;
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 0, 2)) {
+		DWC_WARN("Wrong value for otg_cap parameter\n");
+		DWC_WARN("otg_cap parameter must be 0,1 or 2\n");
+		retval = -EINVAL;
+		goto out;
+	}
+
+	valid = 1;
+	switch (val) {
+	case DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE:
+		if (core_if->hwcfg2.b.op_mode !=
+		    DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
+			valid = 0;
+		break;
+	case DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE:
+		if ((core_if->hwcfg2.b.op_mode !=
+		     DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
+		    && (core_if->hwcfg2.b.op_mode !=
+			DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG)
+		    && (core_if->hwcfg2.b.op_mode !=
+			DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE)
+		    && (core_if->hwcfg2.b.op_mode !=
+		      DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) {
+			valid = 0;
+		}
+		break;
+	case DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE:
+		/*always valid */
+		break;
+	}
+	if (!valid) {
+		if (dwc_otg_param_initialized(core_if->core_params->otg_cap)) {
+			DWC_ERROR("%d invalid for otg_cap paremter. "
+					"Check HW configuration.\n",
+					val);
+		}
+		val = (((core_if->hwcfg2.b.op_mode ==
+			DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
+			|| (core_if->hwcfg2.b.op_mode ==
+			DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG)
+			|| (core_if->hwcfg2.b.op_mode ==
+			DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE)
+			|| (core_if->hwcfg2.b.op_mode ==
+			DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ?
+					DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE :
+					DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->otg_cap = val;
+out:
+	return retval;
+}
+
+int dwc_otg_get_param_otg_cap(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->otg_cap;
+}
+
+int dwc_otg_set_param_opt(struct dwc_otg_core_if *core_if, int val)
+{
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong value for opt parameter\n");
+		return -EINVAL;
+	}
+	core_if->core_params->opt = val;
+	return 0;
+}
+
+int dwc_otg_get_param_opt(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->opt;
+}
+
+int dwc_otg_set_param_dma_enable(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong value for dma enable\n");
+		return -EINVAL;
+	}
+
+	if ((val == 1) && (core_if->hwcfg2.b.architecture == 0)) {
+		if (dwc_otg_param_initialized(core_if->core_params->dma_enable))
+			DWC_ERROR("%d invalid for dma_enable parameter. "
+					"Check HW configuration.\n", val);
+		val = 0;
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->dma_enable = val;
+	if (val == 0)
+		dwc_otg_set_param_dma_desc_enable(core_if, 0);
+
+	return retval;
+}
+
+int dwc_otg_get_param_dma_enable(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->dma_enable;
+}
+
+int dwc_otg_set_param_dma_desc_enable(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong value for dma_enable\n");
+		DWC_WARN("dma_desc_enable must be 0 or 1\n");
+		return -EINVAL;
+	}
+
+	if ((val == 1)
+	    && ((dwc_otg_get_param_dma_enable(core_if) == 0)
+		|| (core_if->hwcfg4.b.desc_dma == 0))) {
+		if (dwc_otg_param_initialized
+		    (core_if->core_params->dma_desc_enable)) {
+			DWC_ERROR("%d invalid for dma_desc_enable parameter. "
+					"Check HW configuration.\n", val);
+		}
+		val = 0;
+		retval = -EINVAL;
+	}
+	core_if->core_params->dma_desc_enable = val;
+	return retval;
+}
+
+int dwc_otg_get_param_dma_desc_enable(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->dma_desc_enable;
+}
+
+int
+dwc_otg_set_param_host_support_fs_ls_low_power(struct dwc_otg_core_if *core_if,
+						   int val)
+{
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong value for host_support_fs_low_power\n");
+		DWC_WARN("host_support_fs_low_power must be 0 or 1\n");
+		return -EINVAL;
+	}
+	core_if->core_params->host_support_fs_ls_low_power = val;
+	return 0;
+}
+
+int dwc_otg_get_param_host_support_fs_ls_low_power(struct dwc_otg_core_if *
+						       core_if)
+{
+	return core_if->core_params->host_support_fs_ls_low_power;
+}
+
+int dwc_otg_set_param_enable_dynamic_fifo(struct dwc_otg_core_if *core_if,
+					  int val)
+{
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong value for enable_dynamic_fifo\n");
+		DWC_WARN("enable_dynamic_fifo must be 0 or 1\n");
+		return -EINVAL;
+	}
+
+	if ((val == 1) && (core_if->hwcfg2.b.dynamic_fifo == 0)) {
+		if (dwc_otg_param_initialized
+		    (core_if->core_params->enable_dynamic_fifo)) {
+			DWC_ERROR("%d invalid for enable_dynamic_fifo parameter"
+					" Check HW configuration.\n", val);
+		}
+		val = 0;
+		retval = -EINVAL;
+	}
+	core_if->core_params->enable_dynamic_fifo = val;
+	return retval;
+}
+
+int dwc_otg_get_param_enable_dynamic_fifo(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->enable_dynamic_fifo;
+}
+
+int dwc_otg_set_param_data_fifo_size(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 32, 32768)) {
+		DWC_WARN("Wrong value for data_fifo_size\n");
+		DWC_WARN("data_fifo_size must be 32-32768\n");
+		return -EINVAL;
+	}
+
+	if (val > core_if->hwcfg3.b.dfifo_depth) {
+		if (dwc_otg_param_initialized
+		    (core_if->core_params->data_fifo_size)) {
+			DWC_ERROR("%d invalid for data_fifo_size parameter. "
+					"Check HW configuration.\n", val);
+		}
+		val = core_if->hwcfg3.b.dfifo_depth;
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->data_fifo_size = val;
+	return retval;
+}
+
+int dwc_otg_get_param_data_fifo_size(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->data_fifo_size;
+}
+
+int dwc_otg_set_param_dev_rx_fifo_size(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 16, 32768)) {
+		DWC_WARN("Wrong value for dev_rx_fifo_size\n");
+		DWC_WARN("dev_rx_fifo_size must be 16-32768\n");
+		return -EINVAL;
+	}
+
+	if (val > dwc_read_reg32(&core_if->core_global_regs->grxfsiz)) {
+		if (dwc_otg_param_initialized(core_if->core_params->dev_rx_fifo_size)) {
+			DWC_WARN("%d invalid for dev_rx_fifo_size parameter\n",
+					val);
+		}
+		val = dwc_read_reg32(&core_if->core_global_regs->grxfsiz);
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->dev_rx_fifo_size = val;
+	return retval;
+}
+
+int dwc_otg_get_param_dev_rx_fifo_size(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->dev_rx_fifo_size;
+}
+
+int dwc_otg_set_param_dev_nperio_tx_fifo_size(struct dwc_otg_core_if *core_if,
+					      int val)
+{
+	int retval = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 16, 32768)) {
+		DWC_WARN("Wrong value for dev_nperio_tx_fifo\n");
+		DWC_WARN("dev_nperio_tx_fifo must be 16-32768\n");
+		return -EINVAL;
+	}
+
+	if (val >
+		(dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)) {
+		if (dwc_otg_param_initialized
+		    (core_if->core_params->dev_nperio_tx_fifo_size)) {
+			DWC_ERROR("%d invalid for dev_nperio_tx_fifo_size."
+					"Check HW configuration.\n", val);
+		}
+		val = (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >>
+				16);
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->dev_nperio_tx_fifo_size = val;
+	return retval;
+}
+
+int dwc_otg_get_param_dev_nperio_tx_fifo_size(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->dev_nperio_tx_fifo_size;
+}
+
+int dwc_otg_set_param_host_rx_fifo_size(struct dwc_otg_core_if *core_if,
+					int val)
+{
+	int retval = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 16, 32768)) {
+		DWC_WARN("Wrong value for host_rx_fifo_size\n");
+		DWC_WARN("host_rx_fifo_size must be 16-32768\n");
+		return -EINVAL;
+	}
+
+	if (val > dwc_read_reg32(&core_if->core_global_regs->grxfsiz)) {
+		if (dwc_otg_param_initialized
+		    (core_if->core_params->host_rx_fifo_size)) {
+			DWC_ERROR("%d invalid for host_rx_fifo_size. "
+					"Check HW configuration.\n", val);
+		}
+		val = dwc_read_reg32(&core_if->core_global_regs->grxfsiz);
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->host_rx_fifo_size = val;
+	return retval;
+
+}
+
+int dwc_otg_get_param_host_rx_fifo_size(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->host_rx_fifo_size;
+}
+
+int dwc_otg_set_param_host_nperio_tx_fifo_size(struct dwc_otg_core_if *core_if,
+					       int val)
+{
+	int retval = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 16, 32768)) {
+		DWC_WARN("Wrong value for host_nperio_tx_fifo_size\n");
+		DWC_WARN("host_nperio_tx_fifo_size must be 16-32768\n");
+		return -EINVAL;
+	}
+
+	if (val > (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)) {
+		if (dwc_otg_param_initialized
+		    (core_if->core_params->host_nperio_tx_fifo_size)) {
+			DWC_ERROR("%d invalid for host_nperio_tx_fifo_size. "
+					"Check HW configuration.\n", val);
+		}
+		val =
+		    (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >>
+		     16);
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->host_nperio_tx_fifo_size = val;
+	return retval;
+}
+
+int dwc_otg_get_param_host_nperio_tx_fifo_size(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->host_nperio_tx_fifo_size;
+}
+
+int dwc_otg_set_param_host_perio_tx_fifo_size(struct dwc_otg_core_if *core_if,
+					      int val)
+{
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 16, 32768)) {
+		DWC_WARN("Wrong value for host_perio_tx_fifo_size\n");
+		DWC_WARN("host_perio_tx_fifo_size must be 16-32768\n");
+		return -EINVAL;
+	}
+
+	if (val >
+	    ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))) {
+		if (dwc_otg_param_initialized
+		    (core_if->core_params->host_perio_tx_fifo_size)) {
+			DWC_ERROR("%d invalid for host_perio_tx_fifo_size. "
+					"Check HW configuration.\n", val);
+		}
+		val =
+		    (dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >>
+		     16);
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->host_perio_tx_fifo_size = val;
+	return retval;
+}
+
+int dwc_otg_get_param_host_perio_tx_fifo_size(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->host_perio_tx_fifo_size;
+}
+
+int dwc_otg_set_param_max_transfer_size(struct dwc_otg_core_if *core_if,
+					int val)
+{
+	int retval = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 2047, 524288)) {
+		DWC_WARN("Wrong value for max_transfer_size\n");
+		DWC_WARN("max_transfer_size must be 2047-524288\n");
+		return -EINVAL;
+	}
+
+	if (val >= (1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))) {
+		if (dwc_otg_param_initialized
+		    (core_if->core_params->max_transfer_size)) {
+			DWC_ERROR("%d invalid for max_transfer_size. "
+					"Check HW configuration.\n", val);
+		}
+		val =
+		    ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 11)) -
+		     1);
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->max_transfer_size = val;
+	return retval;
+}
+
+int dwc_otg_get_param_max_transfer_size(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->max_transfer_size;
+}
+
+int dwc_otg_set_param_max_packet_count(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 15, 511)) {
+		DWC_WARN("Wrong value for max_packet_count\n");
+		DWC_WARN("max_packet_count must be 15-511\n");
+		return -EINVAL;
+	}
+
+	if (val > (1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))) {
+		if (dwc_otg_param_initialized
+		    (core_if->core_params->max_packet_count)) {
+			DWC_ERROR("%d invalid for max_packet_count. "
+					"Check HW configuration.\n", val);
+		}
+		val =
+		    ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1);
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->max_packet_count = val;
+	return retval;
+}
+
+int dwc_otg_get_param_max_packet_count(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->max_packet_count;
+}
+
+int dwc_otg_set_param_host_channels(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 1, 16)) {
+		DWC_WARN("Wrong value for host_channels\n");
+		DWC_WARN("host_channels must be 1-16\n");
+		return -EINVAL;
+	}
+
+	if (val > (core_if->hwcfg2.b.num_host_chan + 1)) {
+		if (dwc_otg_param_initialized
+		    (core_if->core_params->host_channels)) {
+			DWC_ERROR("%d invalid for host_channels."
+					"Check HW configurations.\n", val);
+		}
+		val = (core_if->hwcfg2.b.num_host_chan + 1);
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->host_channels = val;
+	return retval;
+}
+
+int dwc_otg_get_param_host_channels(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->host_channels;
+}
+
+int dwc_otg_set_param_dev_endpoints(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 1, 15)) {
+		DWC_WARN("Wrong value for dev_endpoints\n");
+		DWC_WARN("dev_endpoints must be 1-15\n");
+		return -EINVAL;
+	}
+
+	if (val > (core_if->hwcfg2.b.num_dev_ep)) {
+		if (dwc_otg_param_initialized
+		    (core_if->core_params->dev_endpoints)) {
+			DWC_ERROR("%d invalid for dev_endpoints. "
+					"Check HW configurations.\n", val);
+		}
+		val = core_if->hwcfg2.b.num_dev_ep;
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->dev_endpoints = val;
+	return retval;
+}
+
+int dwc_otg_get_param_dev_endpoints(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->dev_endpoints;
+}
+
+int dwc_otg_set_param_phy_type(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+	int valid = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 0, 2)) {
+		DWC_WARN("Wrong value for phy_type\n");
+		DWC_WARN("phy_type must be 0,1 or 2\n");
+		return -EINVAL;
+	}
+#ifndef NO_FS_PHY_HW_CHECKS
+	if ((val == DWC_PHY_TYPE_PARAM_UTMI) &&
+	    ((core_if->hwcfg2.b.hs_phy_type == 1) ||
+	     (core_if->hwcfg2.b.hs_phy_type == 3))) {
+		valid = 1;
+	} else if ((val == DWC_PHY_TYPE_PARAM_ULPI) &&
+		   ((core_if->hwcfg2.b.hs_phy_type == 2) ||
+		    (core_if->hwcfg2.b.hs_phy_type == 3))) {
+		valid = 1;
+	} else if ((val == DWC_PHY_TYPE_PARAM_FS) &&
+		   (core_if->hwcfg2.b.fs_phy_type == 1)) {
+		valid = 1;
+	}
+	if (!valid) {
+		if (dwc_otg_param_initialized(core_if->core_params->phy_type)) {
+			DWC_ERROR("%d invalid for phy_type. "
+					"Check HW configurations.\n", val);
+		}
+		if (core_if->hwcfg2.b.hs_phy_type) {
+			if ((core_if->hwcfg2.b.hs_phy_type == 3) ||
+			    (core_if->hwcfg2.b.hs_phy_type == 1)) {
+				val = DWC_PHY_TYPE_PARAM_UTMI;
+			} else {
+				val = DWC_PHY_TYPE_PARAM_ULPI;
+			}
+		}
+		retval = -EINVAL;
+	}
+#endif
+	core_if->core_params->phy_type = val;
+	return retval;
+}
+
+int dwc_otg_get_param_phy_type(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->phy_type;
+}
+
+int dwc_otg_set_param_speed(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong value for speed parameter\n");
+		DWC_WARN("max_speed parameter must be 0 or 1\n");
+		return -EINVAL;
+	}
+	if ((val == 0)
+	    && dwc_otg_get_param_phy_type(core_if) == DWC_PHY_TYPE_PARAM_FS) {
+		if (dwc_otg_param_initialized(core_if->core_params->speed)) {
+			DWC_ERROR("%d invalid for speed paremter. "
+					"Check HW configuration.\n", val);
+		}
+		val =
+		    (dwc_otg_get_param_phy_type(core_if) ==
+		     DWC_PHY_TYPE_PARAM_FS ? 1 : 0);
+		retval = -EINVAL;
+	}
+	core_if->core_params->speed = val;
+	return retval;
+}
+
+int dwc_otg_get_param_speed(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->speed;
+}
+
+int dwc_otg_set_param_host_ls_low_power_phy_clk(struct dwc_otg_core_if *core_if,
+						int val)
+{
+	int retval = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN
+		    ("Wrong value for host_ls_low_power_phy_clk parameter\n");
+		DWC_WARN("host_ls_low_power_phy_clk must be 0 or 1\n");
+		return -EINVAL;
+	}
+
+	if ((val == DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ)
+	    && (dwc_otg_get_param_phy_type(core_if) == DWC_PHY_TYPE_PARAM_FS)) {
+		if (dwc_otg_param_initialized(core_if->core_params->host_ls_low_power_phy_clk)) {
+			DWC_ERROR("%d invalid for host_ls_low_power_phy_clk. "
+					"Check HW configuration.\n", val);
+		}
+		val =
+		    (dwc_otg_get_param_phy_type(core_if) ==
+		     DWC_PHY_TYPE_PARAM_FS) ?
+		    DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ :
+		    DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
+
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->host_ls_low_power_phy_clk = val;
+	return retval;
+}
+
+int dwc_otg_get_param_host_ls_low_power_phy_clk(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->host_ls_low_power_phy_clk;
+}
+
+int dwc_otg_set_param_phy_ulpi_ddr(struct dwc_otg_core_if *core_if, int val)
+{
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong value for phy_ulpi_ddr\n");
+		DWC_WARN("phy_upli_ddr must be 0 or 1\n");
+		return -EINVAL;
+	}
+
+	core_if->core_params->phy_ulpi_ddr = val;
+	return 0;
+}
+
+int dwc_otg_get_param_phy_ulpi_ddr(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->phy_ulpi_ddr;
+}
+
+int dwc_otg_set_param_phy_ulpi_ext_vbus(struct dwc_otg_core_if *core_if,
+					int val)
+{
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong valaue for phy_ulpi_ext_vbus\n");
+		DWC_WARN("phy_ulpi_ext_vbus must be 0 or 1\n");
+		return -EINVAL;
+	}
+
+	core_if->core_params->phy_ulpi_ext_vbus = val;
+	return 0;
+}
+
+int dwc_otg_get_param_phy_ulpi_ext_vbus(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->phy_ulpi_ext_vbus;
+}
+
+int dwc_otg_set_param_phy_utmi_width(struct dwc_otg_core_if *core_if, int val)
+{
+	if (DWC_OTG_PARAM_TEST(val, 8, 8) && DWC_OTG_PARAM_TEST(val, 16, 16)) {
+		DWC_WARN("Wrong valaue for phy_utmi_width\n");
+		DWC_WARN("phy_utmi_width must be 8 or 16\n");
+		return -EINVAL;
+	}
+
+	core_if->core_params->phy_utmi_width = val;
+	return 0;
+}
+
+int dwc_otg_get_param_phy_utmi_width(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->phy_utmi_width;
+}
+
+int dwc_otg_set_param_ulpi_fs_ls(struct dwc_otg_core_if *core_if, int val)
+{
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong valaue for ulpi_fs_ls\n");
+		DWC_WARN("ulpi_fs_ls must be 0 or 1\n");
+		return -EINVAL;
+	}
+
+	core_if->core_params->ulpi_fs_ls = val;
+	return 0;
+}
+
+int dwc_otg_get_param_ulpi_fs_ls(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->ulpi_fs_ls;
+}
+
+int dwc_otg_set_param_ts_dline(struct dwc_otg_core_if *core_if, int val)
+{
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong valaue for ts_dline\n");
+		DWC_WARN("ts_dline must be 0 or 1\n");
+		return -EINVAL;
+	}
+
+	core_if->core_params->ts_dline = val;
+	return 0;
+}
+
+int dwc_otg_get_param_ts_dline(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->ts_dline;
+}
+
+int dwc_otg_set_param_i2c_enable(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong valaue for i2c_enable\n");
+		DWC_WARN("i2c_enable must be 0 or 1\n");
+		return -EINVAL;
+	}
+#ifndef NO_FS_PHY_HW_CHECK
+	if (val == 1 && core_if->hwcfg3.b.i2c == 0) {
+		if (dwc_otg_param_initialized(core_if->core_params->i2c_enable))
+			DWC_ERROR("%d invalid for i2c_enable. "
+					"Check HW configuration.\n", val);
+		val = 0;
+		retval = -EINVAL;
+	}
+#endif
+
+	core_if->core_params->i2c_enable = val;
+	return retval;
+}
+
+int dwc_otg_get_param_i2c_enable(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->i2c_enable;
+}
+
+int dwc_otg_set_param_dev_perio_tx_fifo_size(struct dwc_otg_core_if *core_if,
+					     int val, int fifo_num)
+{
+	int retval = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 4, 768)) {
+		DWC_WARN("Wrong value for dev_perio_tx_fifo_size\n");
+		DWC_WARN("dev_perio_tx_fifo_size must be 4-768\n");
+		return -EINVAL;
+	}
+
+	if (val > (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[fifo_num]))) {
+		if (dwc_otg_param_initialized(core_if->core_params->dev_perio_tx_fifo_size[fifo_num])) {
+			DWC_ERROR("`%d' invalid for parameter "
+					"`dev_perio_fifo_size_%d'. "
+					"Check HW configuration.\n",
+					val, fifo_num);
+		}
+		val = (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[fifo_num]));
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->dev_perio_tx_fifo_size[fifo_num] = val;
+	return retval;
+}
+
+int dwc_otg_get_param_dev_perio_tx_fifo_size(struct dwc_otg_core_if *core_if,
+						 int fifo_num)
+{
+	return core_if->core_params->dev_perio_tx_fifo_size[fifo_num];
+}
+
+int dwc_otg_set_param_en_multiple_tx_fifo(struct dwc_otg_core_if *core_if,
+					  int val)
+{
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong valaue for en_multiple_tx_fifo,\n");
+		DWC_WARN("en_multiple_tx_fifo must be 0 or 1\n");
+		return -EINVAL;
+	}
+
+	if (val == 1 && core_if->hwcfg4.b.ded_fifo_en == 0) {
+		if (dwc_otg_param_initialized(core_if->core_params->en_multiple_tx_fifo)) {
+			DWC_ERROR("%d invalid for parameter en_multiple_tx_fifo"
+					" Check HW configuration.\n",
+					val);
+		}
+		val = 0;
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->en_multiple_tx_fifo = val;
+	return retval;
+}
+
+int dwc_otg_get_param_en_multiple_tx_fifo(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->en_multiple_tx_fifo;
+}
+
+int dwc_otg_set_param_dev_tx_fifo_size(struct dwc_otg_core_if *core_if, int val,
+				       int fifo_num)
+{
+	int retval = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 4, 768)) {
+		DWC_WARN("Wrong value for dev_tx_fifo_size\n");
+		DWC_WARN("dev_tx_fifo_size must be 4-768\n");
+		return -EINVAL;
+	}
+
+	if (val > (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[fifo_num]))) {
+		if (dwc_otg_param_initialized(core_if->core_params->dev_tx_fifo_size[fifo_num])) {
+			DWC_ERROR("`%d' invalid for parameter "
+					"`dev_tx_fifo_size_%d'. "
+					"Check HW configuration.\n",
+					val,
+					fifo_num);
+		}
+		val =
+			(dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[fifo_num]));
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->dev_tx_fifo_size[fifo_num] = val;
+	return retval;
+}
+
+int dwc_otg_get_param_dev_tx_fifo_size(struct dwc_otg_core_if *core_if,
+					   int fifo_num)
+{
+	return core_if->core_params->dev_tx_fifo_size[fifo_num];
+}
+
+int dwc_otg_set_param_thr_ctl(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 0, 7)) {
+		DWC_WARN("Wrong value for thr_ctl\n");
+		DWC_WARN("thr_ctl must be 0-7\n");
+		return -EINVAL;
+	}
+
+	if ((val != 0) && (!dwc_otg_get_param_dma_enable(core_if) ||
+				!core_if->hwcfg4.b.ded_fifo_en)) {
+		if (dwc_otg_param_initialized(core_if->core_params->thr_ctl)) {
+			DWC_ERROR("%d invalid for parameter thr_ctl. "
+					"Check HW configuration.\n", val);
+		}
+		val = 0;
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->thr_ctl = val;
+	return retval;
+}
+
+static int dwc_otg_get_param_thr_ctl(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->thr_ctl;
+}
+
+int dwc_otg_set_param_lpm_enable(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("Wrong value for lpm_enable\n");
+		DWC_WARN("lpm_enable must be 0 or 1\n");
+		return -EINVAL;
+	}
+
+	if (val && !core_if->hwcfg3.b.otg_lpm_en) {
+		if (dwc_otg_param_initialized(core_if->core_params->lpm_enable))
+			DWC_ERROR("%d invalid for parameter lpm_enable."
+					"Check HW configuration.\n", val);
+		val = 0;
+		retval = -EINVAL;
+	}
+
+	core_if->core_params->lpm_enable = val;
+	return retval;
+}
+
+int dwc_otg_get_param_lpm_enable(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->lpm_enable;
+}
+
+int dwc_otg_set_param_tx_thr_length(struct dwc_otg_core_if *core_if, int val)
+{
+	if (DWC_OTG_PARAM_TEST(val, 8, 128)) {
+		DWC_WARN("Wrong valaue for tx_thr_length\n");
+		DWC_WARN("tx_thr_length must be 8 - 128\n");
+		return -EINVAL;
+	}
+
+	core_if->core_params->tx_thr_length = val;
+	return 0;
+}
+
+int dwc_otg_get_param_tx_thr_length(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->tx_thr_length;
+}
+
+int dwc_otg_set_param_rx_thr_length(struct dwc_otg_core_if *core_if, int val)
+{
+	if (DWC_OTG_PARAM_TEST(val, 8, 128)) {
+		DWC_WARN("Wrong valaue for rx_thr_length\n");
+		DWC_WARN("rx_thr_length must be 8 - 128\n");
+		return -EINVAL;
+	}
+
+	core_if->core_params->rx_thr_length = val;
+	return 0;
+}
+
+int dwc_otg_get_param_rx_thr_length(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->rx_thr_length;
+}
+
+int dwc_otg_set_param_dma_burst_size(struct dwc_otg_core_if *core_if, int val)
+{
+	if (DWC_OTG_PARAM_TEST(val, 1, 1) &&
+	    DWC_OTG_PARAM_TEST(val, 4, 4) &&
+	    DWC_OTG_PARAM_TEST(val, 8, 8) &&
+	    DWC_OTG_PARAM_TEST(val, 16, 16) &&
+	    DWC_OTG_PARAM_TEST(val, 32, 32) &&
+	    DWC_OTG_PARAM_TEST(val, 64, 64) &&
+	    DWC_OTG_PARAM_TEST(val, 128, 128) &&
+	    DWC_OTG_PARAM_TEST(val, 256, 256)) {
+		DWC_WARN("`%d' invalid for parameter `dma_burst_size'\n", val);
+		return -EINVAL;
+	}
+	core_if->core_params->dma_burst_size = val;
+	return 0;
+}
+
+int dwc_otg_get_param_dma_burst_size(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->dma_burst_size;
+}
+
+int dwc_otg_set_param_pti_enable(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("`%d' invalid for parameter `pti_enable'\n", val);
+		return -EINVAL;
+	}
+	if (val && (core_if->snpsid < OTG_CORE_REV_2_72a)) {
+		if (dwc_otg_param_initialized(core_if->core_params->pti_enable))
+			DWC_ERROR("%d invalid for parameter pti_enable. "
+					"Check HW configuration.\n", val);
+		retval = -EINVAL;
+		val = 0;
+	}
+	core_if->core_params->pti_enable = val;
+	return retval;
+}
+
+int dwc_otg_get_param_pti_enable(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->pti_enable;
+}
+
+int dwc_otg_set_param_mpi_enable(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("`%d' invalid for parameter `mpi_enable'\n", val);
+		return -EINVAL;
+	}
+	if (val && (core_if->hwcfg2.b.multi_proc_int == 0)) {
+		if (dwc_otg_param_initialized(core_if->core_params->mpi_enable))
+			DWC_ERROR("%d invalid for parameter mpi_enable. "
+					"Check HW configuration.\n", val);
+		retval = -EINVAL;
+		val = 0;
+	}
+	core_if->core_params->mpi_enable = val;
+	return retval;
+}
+
+int dwc_otg_get_param_mpi_enable(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->mpi_enable;
+}
+
+int dwc_otg_set_param_ic_usb_cap(struct dwc_otg_core_if *core_if,
+					int val)
+{
+	int retval = 0;
+	if (DWC_OTG_PARAM_TEST(val, 0, 1)) {
+		DWC_WARN("`%d' invalid for parameter `ic_usb_cap'\n", val);
+		DWC_WARN("ic_usb_cap must be 0 or 1\n");
+		return -EINVAL;
+	}
+
+	if (val && (core_if->hwcfg3.b.otg_enable_ic_usb == 0)) {
+		if (dwc_otg_param_initialized(core_if->core_params->ic_usb_cap))
+			DWC_ERROR("%d invalid for parameter ic_usb_cap. "
+					"Check HW configuration.\n", val);
+		retval = -EINVAL;
+		val = 0;
+	}
+	core_if->core_params->ic_usb_cap = val;
+	return retval;
+}
+int dwc_otg_get_param_ic_usb_cap(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->ic_usb_cap;
+}
+
+int dwc_otg_set_param_ahb_thr_ratio(struct dwc_otg_core_if *core_if, int val)
+{
+	int retval = 0;
+	int valid = 1;
+
+	if (DWC_OTG_PARAM_TEST(val, 0, 3)) {
+		DWC_WARN("`%d' invalid for parameter `ahb_thr_ratio'\n", val);
+		DWC_WARN("ahb_thr_ratio must be 0 - 3\n");
+		return -EINVAL;
+	}
+
+	if (val && (core_if->snpsid < OTG_CORE_REV_2_81a ||
+			!dwc_otg_get_param_thr_ctl(core_if)))
+		valid = 0;
+	else
+		if (val && ((dwc_otg_get_param_tx_thr_length(core_if) /
+				(1 << val)) < 4))
+			valid = 0;
+
+	if (valid == 0) {
+		if (dwc_otg_param_initialized(core_if->core_params->ahb_thr_ratio))
+			DWC_ERROR("%d invalid for parameter ahb_thr_ratio. "
+					"Check HW configuration.\n", val);
+
+		retval = -EINVAL;
+		val = 0;
+	}
+
+	core_if->core_params->ahb_thr_ratio = val;
+	return retval;
+}
+int dwc_otg_get_param_ahb_thr_ratio(struct dwc_otg_core_if *core_if)
+{
+	return core_if->core_params->ahb_thr_ratio;
+}
+u32 dwc_otg_get_hnpstatus(struct dwc_otg_core_if *core_if)
+{
+	union gotgctl_data otgctl;
+	otgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl);
+	return otgctl.b.hstnegscs;
+}
+
+u32 dwc_otg_get_srpstatus(struct dwc_otg_core_if *core_if)
+{
+	union gotgctl_data otgctl;
+	otgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl);
+	return otgctl.b.sesreqscs;
+}
+
+void dwc_otg_set_hnpreq(struct dwc_otg_core_if *core_if, u32 val)
+{
+	union gotgctl_data otgctl;
+	otgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl);
+	otgctl.b.hnpreq = val;
+	dwc_write_reg32(&core_if->core_global_regs->gotgctl, otgctl.d32);
+}
+
+u32 dwc_otg_get_gsnpsid(struct dwc_otg_core_if *core_if)
+{
+	return core_if->snpsid;
+}
+
+u32 dwc_otg_get_mode(struct dwc_otg_core_if *core_if)
+{
+	union gotgctl_data otgctl;
+	otgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl);
+	return otgctl.b.currmod;
+}
+
+u32 dwc_otg_get_hnpcapable(struct dwc_otg_core_if *core_if)
+{
+	union gusbcfg_data usbcfg;
+	usbcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->gusbcfg);
+	return usbcfg.b.hnpcap;
+}
+
+void dwc_otg_set_hnpcapable(struct dwc_otg_core_if *core_if, u32 val)
+{
+	union gusbcfg_data usbcfg;
+	usbcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->gusbcfg);
+	usbcfg.b.hnpcap = val;
+	dwc_write_reg32(&core_if->core_global_regs->gusbcfg, usbcfg.d32);
+}
+
+u32 dwc_otg_get_srpcapable(struct dwc_otg_core_if *core_if)
+{
+	union gusbcfg_data usbcfg;
+	usbcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->gusbcfg);
+	return usbcfg.b.srpcap;
+}
+
+void dwc_otg_set_srpcapable(struct dwc_otg_core_if *core_if, u32 val)
+{
+	union gusbcfg_data usbcfg;
+	usbcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->gusbcfg);
+	usbcfg.b.srpcap = val;
+	dwc_write_reg32(&core_if->core_global_regs->gusbcfg, usbcfg.d32);
+}
+
+u32 dwc_otg_get_devspeed(struct dwc_otg_core_if *core_if)
+{
+	union dcfg_data dcfg;
+	dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg);
+	return dcfg.b.devspd;
+}
+
+void dwc_otg_set_devspeed(struct dwc_otg_core_if *core_if, u32 val)
+{
+	union dcfg_data dcfg;
+	dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg);
+	dcfg.b.devspd = val;
+	dwc_write_reg32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
+}
+
+u32 dwc_otg_get_busconnected(struct dwc_otg_core_if *core_if)
+{
+	union hprt0_data hprt0;
+	hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
+	return hprt0.b.prtconnsts;
+}
+
+u32 dwc_otg_get_enumspeed(struct dwc_otg_core_if *core_if)
+{
+	union dsts_data dsts;
+	dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
+	return dsts.b.enumspd;
+}
+
+u32 dwc_otg_get_prtpower(struct dwc_otg_core_if *core_if)
+{
+	union hprt0_data hprt0;
+	hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
+	return hprt0.b.prtpwr;
+
+}
+
+void dwc_otg_set_prtpower(struct dwc_otg_core_if *core_if, u32 val)
+{
+	union hprt0_data hprt0;
+	hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
+	hprt0.b.prtpwr = val;
+	dwc_write_reg32(core_if->host_if->hprt0, val);
+}
+
+u32 dwc_otg_get_prtsuspend(struct dwc_otg_core_if *core_if)
+{
+	union hprt0_data hprt0;
+	hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
+	return hprt0.b.prtsusp;
+
+}
+
+void dwc_otg_set_prtsuspend(struct dwc_otg_core_if *core_if, u32 val)
+{
+	union hprt0_data hprt0;
+	hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
+	hprt0.b.prtsusp = val;
+	dwc_write_reg32(core_if->host_if->hprt0, val);
+}
+
+void dwc_otg_set_prtresume(struct dwc_otg_core_if *core_if, u32 val)
+{
+	union hprt0_data hprt0;
+	hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
+	hprt0.b.prtres = val;
+	dwc_write_reg32(core_if->host_if->hprt0, val);
+}
+
+u32 dwc_otg_get_remotewakesig(struct dwc_otg_core_if *core_if)
+{
+	union dctl_data dctl;
+	dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl);
+	return dctl.b.rmtwkupsig;
+}
+
+u32 dwc_otg_get_lpm_portsleepstatus(struct dwc_otg_core_if *core_if)
+{
+	union glpmcfg_data lpmcfg;
+	lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+
+	if ((core_if->lx_state == DWC_OTG_L1) ^ lpmcfg.b.prt_sleep_sts) {
+		printk(KERN_ERR"lx_state = %d, lmpcfg.prt_sleep_sts = %d\n",
+				core_if->lx_state, lpmcfg.b.prt_sleep_sts);
+		BUG_ON(1);
+	}
+	return lpmcfg.b.prt_sleep_sts;
+}
+
+u32 dwc_otg_get_lpm_remotewakeenabled(struct dwc_otg_core_if *core_if)
+{
+	union glpmcfg_data lpmcfg;
+	lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+	return lpmcfg.b.rem_wkup_en;
+}
+
+u32 dwc_otg_get_lpmresponse(struct dwc_otg_core_if *core_if)
+{
+	union glpmcfg_data lpmcfg;
+	lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+	return lpmcfg.b.appl_resp;
+}
+
+void dwc_otg_set_lpmresponse(struct dwc_otg_core_if *core_if, u32 val)
+{
+	union glpmcfg_data lpmcfg;
+	lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+	lpmcfg.b.appl_resp = val;
+	dwc_write_reg32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+}
+
+u32 dwc_otg_get_hsic_connect(struct dwc_otg_core_if *core_if)
+{
+	union glpmcfg_data lpmcfg;
+	lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+	return lpmcfg.b.hsic_connect;
+}
+
+void dwc_otg_set_hsic_connect(struct dwc_otg_core_if *core_if, u32 val)
+{
+	union glpmcfg_data lpmcfg;
+	lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+	lpmcfg.b.hsic_connect = val;
+	dwc_write_reg32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+}
+
+u32 dwc_otg_get_inv_sel_hsic(struct dwc_otg_core_if *core_if)
+{
+	union glpmcfg_data lpmcfg;
+	lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+	return lpmcfg.b.inv_sel_hsic;
+
+}
+
+void dwc_otg_set_inv_sel_hsic(struct dwc_otg_core_if *core_if, u32 val)
+{
+	union glpmcfg_data lpmcfg;
+	lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+	lpmcfg.b.inv_sel_hsic = val;
+	dwc_write_reg32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+}
+
+u32 dwc_otg_get_gotgctl(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(&core_if->core_global_regs->gotgctl);
+}
+
+void dwc_otg_set_gotgctl(struct dwc_otg_core_if *core_if, u32 val)
+{
+	dwc_write_reg32(&core_if->core_global_regs->gotgctl, val);
+}
+
+u32 dwc_otg_get_gusbcfg(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(&core_if->core_global_regs->gusbcfg);
+}
+
+void dwc_otg_set_gusbcfg(struct dwc_otg_core_if *core_if, u32 val)
+{
+	dwc_write_reg32(&core_if->core_global_regs->gusbcfg, val);
+}
+
+u32 dwc_otg_get_grxfsiz(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(&core_if->core_global_regs->grxfsiz);
+}
+
+void dwc_otg_set_grxfsiz(struct dwc_otg_core_if *core_if, u32 val)
+{
+	dwc_write_reg32(&core_if->core_global_regs->grxfsiz, val);
+}
+
+u32 dwc_otg_get_gnptxfsiz(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz);
+}
+
+void dwc_otg_set_gnptxfsiz(struct dwc_otg_core_if *core_if, u32 val)
+{
+	dwc_write_reg32(&core_if->core_global_regs->gnptxfsiz, val);
+}
+
+u32 dwc_otg_get_gpvndctl(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(&core_if->core_global_regs->gpvndctl);
+}
+
+void dwc_otg_set_gpvndctl(struct dwc_otg_core_if *core_if, u32 val)
+{
+	dwc_write_reg32(&core_if->core_global_regs->gpvndctl, val);
+}
+
+u32 dwc_otg_get_ggpio(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(&core_if->core_global_regs->ggpio);
+}
+
+void dwc_otg_set_ggpio(struct dwc_otg_core_if *core_if, u32 val)
+{
+	dwc_write_reg32(&core_if->core_global_regs->ggpio, val);
+}
+
+u32 dwc_otg_get_hprt0(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(core_if->host_if->hprt0);
+
+}
+
+void dwc_otg_set_hprt0(struct dwc_otg_core_if *core_if, u32 val)
+{
+	dwc_write_reg32(core_if->host_if->hprt0, val);
+}
+
+u32 dwc_otg_get_guid(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(&core_if->core_global_regs->guid);
+}
+
+void dwc_otg_set_guid(struct dwc_otg_core_if *core_if, u32 val)
+{
+	dwc_write_reg32(&core_if->core_global_regs->guid, val);
+}
+
+u32 dwc_otg_get_hptxfsiz(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(&core_if->core_global_regs->hptxfsiz);
+}
diff --git a/drivers/usb/dwc_otg/dwc_otg_cil.h b/drivers/usb/dwc_otg/dwc_otg_cil.h
new file mode 100644
index 0000000..720366a
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_cil.h
@@ -0,0 +1,1250 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__DWC_CIL_H__)
+#define __DWC_CIL_H__
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_regs.h"
+#ifdef DEBUG
+#include "linux/timer.h"
+#endif
+
+#ifdef OTG_PPC_PLB_DMA
+#include "ppc4xx_dma.h"
+#include <asm/cacheflush.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/unaligned.h>
+
+#undef OTG_PPC_PLB_DMA_DBG
+#define OTG_TX_DMA 0    /* TX DMA direction */
+#define OTG_RX_DMA 1    /* RX DMA direction */
+#define PLB_DMA_CH DMA_CH0	/* plb dma channel */
+#define PLB_DMA_CH_INT 12
+#define PLB_DMA_INT_ENA	1
+#define PLB_DMA_INT_DIS	0
+#define USB_BUFSIZ	512
+
+#ifdef OTG_PPC_PLB_DMA_TASKLET
+#ifndef OTG_PPC_PLB_DMA
+#define OTG_PPC_PLB_DMA
+#endif
+
+extern atomic_t release_later;
+#endif
+#endif
+
+/**
+ * @file
+ * This file contains the interface to the Core Interface Layer.
+ */
+
+#define OTG_CORE_REV_2_60a	0x4F54260A
+#define OTG_CORE_REV_2_71a	0x4F54271A
+#define OTG_CORE_REV_2_72a	0x4F54272A
+#define OTG_CORE_REV_2_80a	0x4F54280A
+#define OTG_CORE_REV_2_81a	0x4F54281A
+#define OTG_CORE_REV_2_90a	0x4F54290A
+#define OTG_CORE_REV_2_91a	0x4F54290A
+
+#ifdef CONFIG_405EZ
+/*
+ * Added-sr: 2007-07-26
+ *
+ * Since the 405EZ (Ultra) only support 2047 bytes as
+ * max transfer size, we have to split up bigger transfers
+ * into multiple transfers of 1024 bytes sized messages.
+ * I happens often, that transfers of 4096 bytes are
+ * required (zero-gadget, file_storage-gadget).
+ *
+ * MAX_XFER_LEN is set to 1024 right now, but could be 2047,
+ * since the xfer-size field in the 405EZ USB device controller
+ * implementation has 11 bits. Using 1024 seems to work for now.
+ */
+#define MAX_XFER_LEN	1024
+#endif
+
+/**
+ * Information for each ISOC packet.
+ */
+struct iso_pkt_info {
+	u32 offset;
+	u32 length;
+	int status;
+};
+/**
+ * The <code>dwc_ep</code> structure represents the state of a single
+ * endpoint when acting in device mode. It contains the data items
+ * needed for an endpoint to be activated and transfer packets.
+ */
+struct dwc_ep {
+	/** EP number used for register address lookup */
+	u8	 num;
+	/** EP direction 0 = OUT */
+	unsigned is_in:1;
+	/** EP active. */
+	unsigned active:1;
+
+	/*
+	 * Periodic Tx FIFO # for IN EPs For INTR EP set to 0 to use
+	 * non-periodic Tx FIFO If dedicated Tx FIFOs are enabled for all IN
+	 * Eps - Tx FIFO # FOR IN EPs
+	 */
+	unsigned tx_fifo_num:4;
+	/** EP type: 0 - Control, 1 - ISOC,	 2 - BULK,	3 - INTR */
+	unsigned type:2;
+	/** DATA start PID for INTR and BULK EP */
+	unsigned data_pid_start:1;
+	/** Frame (even/odd) for ISOC EP */
+	unsigned even_odd_frame:1;
+	/** Max Packet bytes */
+	unsigned maxpacket:11;
+
+	/** Max Transfer size */
+	u32 maxxfer;
+	/** @name Transfer state */
+
+	/**
+	 * Pointer to the beginning of the transfer buffer -- do not modify
+	 * during transfer.
+	 */
+
+	dma_addr_t dma_addr;
+	dma_addr_t dma_desc_addr;
+	struct dwc_otg_dev_dma_desc *desc_addr;
+
+	u8 *start_xfer_buff;
+	/** pointer to the transfer buffer */
+	u8 *xfer_buff;
+	/** Number of bytes to transfer */
+	unsigned xfer_len:19;
+	/** Number of bytes transferred. */
+	unsigned xfer_count:19;
+	/** Sent ZLP */
+	unsigned sent_zlp:1;
+	/** Total len for control transfer */
+	unsigned total_len:19;
+
+	/** stall clear flag */
+	unsigned stall_clear_flag:1;
+
+#ifdef CONFIG_405EZ
+	/*
+	 * Added-sr: 2007-07-26
+	 *
+	 * Since the 405EZ (Ultra) only support 2047 bytes as
+	 * max transfer size, we have to split up bigger transfers
+	 * into multiple transfers of 1024 bytes sized messages.
+	 * I happens often, that transfers of 4096 bytes are
+	 * required (zero-gadget, file_storage-gadget).
+	 *
+	 * "bytes_pending" will hold the amount of bytes that are
+	 * still pending to be send in further messages to complete
+	 * the bigger transfer.
+	 */
+	u32 bytes_pending;
+#endif
+
+	/** Allocated DMA Desc count */
+	u32 desc_cnt;
+
+};
+
+/*
+ * Reasons for halting a host channel.
+ */
+enum dwc_otg_halt_status {
+	DWC_OTG_HC_XFER_NO_HALT_STATUS,
+	DWC_OTG_HC_XFER_COMPLETE,
+	DWC_OTG_HC_XFER_URB_COMPLETE,
+	DWC_OTG_HC_XFER_ACK,
+	DWC_OTG_HC_XFER_NAK,
+	DWC_OTG_HC_XFER_NYET,
+	DWC_OTG_HC_XFER_STALL,
+	DWC_OTG_HC_XFER_XACT_ERR,
+	DWC_OTG_HC_XFER_FRAME_OVERRUN,
+	DWC_OTG_HC_XFER_BABBLE_ERR,
+	DWC_OTG_HC_XFER_DATA_TOGGLE_ERR,
+	DWC_OTG_HC_XFER_AHB_ERR,
+	DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE,
+	DWC_OTG_HC_XFER_URB_DEQUEUE
+};
+
+/**
+ * Host channel descriptor. This structure represents the state of a single
+ * host channel when acting in host mode. It contains the data items needed to
+ * transfer packets to an endpoint via a host channel.
+ */
+struct dwc_hc {
+	/** Host channel number used for register address lookup */
+	u8	 hc_num;
+
+	/** Device to access */
+	unsigned dev_addr:7;
+
+	/** EP to access */
+	unsigned ep_num:4;
+
+	/** EP direction. 0: OUT, 1: IN */
+	unsigned ep_is_in:1;
+
+	/**
+	 * EP speed.
+	 * One of the following values (from ch9.h):
+		USB_SPEED_UNKNOWN = 0,			 enumerating
+		USB_SPEED_LOW, USB_SPEED_FULL,		 usb 1.1
+		USB_SPEED_HIGH,				 usb 2.0
+		USB_SPEED_WIRELESS,			 wireless (usb 2.5)
+		USB_SPEED_SUPER,			 usb 3.0
+	 */
+	unsigned speed:2;
+
+
+	/**
+	 * Endpoint type.
+	 * One of the following (from ch9.h):
+	 *	- USB_ENDPOINT_XFER_CONTROL: 0
+	 *	- USB_ENDPOINT_XFER_ISOC: 1
+	 *	- USB_ENDPOINT_XFER_BULK: 2
+	 *	- USB_ENDPOINT_XFER_INT: 3
+	 */
+	unsigned ep_type:2;
+
+	/** Max packet size in bytes */
+	unsigned max_packet:11;
+
+	/**
+	 * PID for initial transaction.
+	 * 0: DATA0,<br>
+	 * 1: DATA2,<br>
+	 * 2: DATA1,<br>
+	 * 3: MDATA (non-Control EP),
+	 *	  SETUP (Control EP)
+	 */
+	unsigned data_pid_start:2;
+#define DWC_OTG_HC_PID_DATA0 0
+#define DWC_OTG_HC_PID_DATA2 1
+#define DWC_OTG_HC_PID_DATA1 2
+#define DWC_OTG_HC_PID_MDATA 3
+#define DWC_OTG_HC_PID_SETUP 3
+
+	/** Number of periodic transactions per (micro)frame */
+	unsigned multi_count:2;
+
+	/** @name Transfer State */
+	/** @{ */
+
+	/** Pointer to the current transfer buffer position. */
+	u8 *xfer_buff;
+
+	dma_addr_t align_buff;
+	/** Total number of bytes to transfer. */
+	u32 xfer_len;
+	/** Number of bytes transferred so far. */
+	u32 xfer_count;
+	/** Packet count at start of transfer.*/
+	u16 start_pkt_count;
+
+	/**
+	 * Flag to indicate whether the transfer has been started. Set to 1 if
+	 * it has been started, 0 otherwise.
+	 */
+	u8 xfer_started;
+
+	/**
+	 * Set to 1 to indicate that a PING request should be issued on this
+	 * channel. If 0, process normally.
+	 */
+	u8 do_ping;
+
+	/**
+	 * Set to 1 to indicate that the error count for this transaction is
+	 * non-zero. Set to 0 if the error count is 0.
+	 */
+	u8 error_state;
+
+	/**
+	 * Set to 1 to indicate that this channel should be halted the next
+	 * time a request is queued for the channel. This is necessary in
+	 * slave mode if no request queue space is available when an attempt
+	 * is made to halt the channel.
+	 */
+	u8 halt_on_queue;
+
+	/**
+	 * Set to 1 if the host channel has been halted, but the core is not
+	 * finished flushing queued requests. Otherwise 0.
+	 */
+	u8 halt_pending;
+
+	/**
+	 * Reason for halting the host channel.
+	 */
+	enum dwc_otg_halt_status halt_status;
+
+	/*
+	 * Split settings for the host channel
+	 */
+	u8 do_split;		   /**< Enable split for the channel */
+	u8 complete_split;	   /**< Enable complete split */
+	u8 hub_addr;		   /**< Address of high speed hub */
+
+	u8 port_addr;		   /**< Port of the low/full speed device */
+	/** Split transaction position
+	 * One of the following values:
+	 *	  - DWC_HCSPLIT_XACTPOS_MID
+	 *	  - DWC_HCSPLIT_XACTPOS_BEGIN
+	 *	  - DWC_HCSPLIT_XACTPOS_END
+	 *	  - DWC_HCSPLIT_XACTPOS_ALL */
+	u8 xact_pos;
+
+	/** Set when the host channel does a short read. */
+	u8 short_read;
+
+	/**
+	 * Number of requests issued for this channel since it was assigned to
+	 * the current transfer (not counting PINGs).
+	 */
+	u8 requests;
+
+	/**
+	 * Queue Head for the transfer being processed by this channel.
+	 */
+	struct dwc_otg_qh *qh;
+
+	/** @} */
+
+	/** Entry in list of host channels. */
+	struct list_head	hc_list_entry;
+	/** @name Descriptor DMA support */
+	/** @{ */
+
+	/** Number of Transfer Descriptors */
+	u16 ntd;
+
+	/** Descriptor List DMA address */
+	dma_addr_t desc_list_addr;
+
+	/** Scheduling micro-frame bitmap. */
+	u8 schinfo;
+
+	/** @} */
+};
+
+/**
+ * The following parameters may be specified when starting the module. These
+ * parameters define how the DWC_otg controller should be configured.
+ * Parameter values are passed to the CIL initialisation function
+ * dwc_otg_cil_init.
+ */
+struct dwc_otg_core_params {
+	int opt;
+#define dwc_param_opt_default 1
+
+	/**
+	 * Specifies the OTG capabilities. The driver will automatically
+	 * detect the value for this parameter if none is specified.
+	 * 0 - HNP and SRP capable (default)
+	 * 1 - SRP Only capable
+	 * 2 - No HNP/SRP capable
+	 */
+	int otg_cap;
+#define DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0
+#define DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1
+#define DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
+#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE
+
+	/**
+	 * Specifies whether to use slave or DMA mode for accessing the data
+	 * FIFOs. The driver will automatically detect the value for this
+	 * parameter if none is specified.
+	 * 0 - Slave
+	 * 1 - DMA (default, if available)
+	 */
+	int dma_enable;
+#define dwc_param_dma_enable_default 1
+	/**
+	 * When DMA mode is enabled specifies whether to use address DMA or
+	 * DMA Descritor mode for accessing the data
+	 * FIFOs in device mode. The driver will automatically
+	 * detect the value for this
+	 * parameter if none is specified.
+	 * 0 - address DMA
+	 * 1 - DMA Descriptor(default, if available)
+	 */
+	int dma_desc_enable;
+	/** The DMA Burst size (applicable only for External DMA
+	 * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32)
+	 */
+	int dma_burst_size;	 /* Translate this to GAHBCFG values */
+#define dwc_param_dma_burst_size_default 32
+
+	/**
+	 * Specifies the maximum speed of operation in host and device mode.
+	 * The actual speed depends on the speed of the attached device and
+	 * the value of phy_type. The actual speed depends on the speed of the
+	 * attached device.
+	 * 0 - High Speed (default)
+	 * 1 - Full Speed
+	 */
+	int speed;
+#define dwc_param_speed_default 0
+#define DWC_SPEED_PARAM_HIGH 0
+#define DWC_SPEED_PARAM_FULL 1
+
+	/** Specifies whether low power mode is supported when attached
+	 *	to a Full Speed or Low Speed device in host mode.
+	 * 0 - Don't support low power mode (default)
+	 * 1 - Support low power mode
+	 */
+	int host_support_fs_ls_low_power;
+#define dwc_param_host_support_fs_ls_low_power_default 0
+
+	/** Specifies the PHY clock rate in low power mode when connected to a
+	 * Low Speed device in host mode. This parameter is applicable only if
+	 * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS
+	 * then defaults to 6 MHZ otherwise 48 MHZ.
+	 *
+	 * 0 - 48 MHz
+	 * 1 - 6 MHz
+	 */
+	int host_ls_low_power_phy_clk;
+#define dwc_param_host_ls_low_power_phy_clk_default 0
+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
+
+	/**
+	 * 0 - Use cC FIFO size parameters
+	 * 1 - Allow dynamic FIFO sizing (default)
+	 */
+	int enable_dynamic_fifo;
+#define dwc_param_enable_dynamic_fifo_default 1
+
+	/** Total number of 4-byte words in the data FIFO memory. This
+	 * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic
+	 * Tx FIFOs.
+	 * 32 to 32768 (default 8192)
+	 * Note: The total FIFO memory depth in the FPGA configuration is 8192.
+	 */
+	int data_fifo_size;
+#define dwc_param_data_fifo_size_default 8192
+
+	/** Number of 4-byte words in the Rx FIFO in device mode when dynamic
+	 * FIFO sizing is enabled.
+	 * 16 to 32768 (default 1064)
+	 */
+	int dev_rx_fifo_size;
+#define dwc_param_dev_rx_fifo_size_default 1064
+
+	/** Number of 4-byte words in the non-periodic Tx FIFO in device mode
+	 * when dynamic FIFO sizing is enabled.
+	 * 16 to 32768 (default 1024)
+	 */
+	int dev_nperio_tx_fifo_size;
+#define dwc_param_dev_nperio_tx_fifo_size_default 1024
+
+	/** Number of 4-byte words in each of the periodic Tx FIFOs in device
+	 * mode when dynamic FIFO sizing is enabled.
+	 * 4 to 768 (default 256)
+	 */
+	u32 dev_perio_tx_fifo_size[MAX_PERIO_FIFOS];
+#define dwc_param_dev_perio_tx_fifo_size_default 256
+
+	/** Number of 4-byte words in the Rx FIFO in host mode when dynamic
+	 * FIFO sizing is enabled.
+	 * 16 to 32768 (default 1024)
+	 */
+	int host_rx_fifo_size;
+#define dwc_param_host_rx_fifo_size_default 1024
+
+	/** Number of 4-byte words in the non-periodic Tx FIFO in host mode
+	 * when Dynamic FIFO sizing is enabled in the core.
+	 * 16 to 32768 (default 1024)
+	 */
+	int host_nperio_tx_fifo_size;
+#define dwc_param_host_nperio_tx_fifo_size_default 1024
+
+	/** Number of 4-byte words in the host periodic Tx FIFO when dynamic
+	 * FIFO sizing is enabled.
+	 * 16 to 32768 (default 1024)
+	 */
+	int host_perio_tx_fifo_size;
+#define dwc_param_host_perio_tx_fifo_size_default 1024
+
+	/** The maximum transfer size supported in bytes.
+	 * 2047 to 65,535  (default 65,535)
+	 */
+	int max_transfer_size;
+#define dwc_param_max_transfer_size_default 65535
+
+	/** The maximum number of packets in a transfer.
+	 * 15 to 511  (default 511)
+	 */
+	int max_packet_count;
+#define dwc_param_max_packet_count_default 511
+
+	/** The number of host channel registers to use.
+	 * 1 to 16 (default 12)
+	 * Note: The FPGA configuration supports a maximum of 12 host channels.
+	 */
+	int host_channels;
+#define dwc_param_host_channels_default 12
+
+	/** The number of endpoints in addition to EP0 available for device
+	 * mode operations.
+	 * 1 to 15 (default 6 IN and OUT)
+	 * Note: The FPGA configuration supports a maximum of 6 IN and OUT
+	 * endpoints in addition to EP0.
+	 */
+	int dev_endpoints;
+#define dwc_param_dev_endpoints_default 6
+
+		/**
+		 * Specifies the type of PHY interface to use. By default,
+		 * the driver will automatically detect the phy_type.
+		 *
+		 * 0 - Full Speed PHY
+		 * 1 - UTMI+ (default)
+		 * 2 - ULPI
+		 */
+	int phy_type;
+#define DWC_PHY_TYPE_PARAM_FS 0
+#define DWC_PHY_TYPE_PARAM_UTMI 1
+#define DWC_PHY_TYPE_PARAM_ULPI 2
+#define dwc_param_phy_type_default DWC_PHY_TYPE_PARAM_UTMI
+
+	/**
+	 * Specifies the UTMI+ Data Width.	This parameter is
+	 * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI
+	 * PHY_TYPE, this parameter indicates the data width between
+	 * the MAC and the ULPI Wrapper.) Also, this parameter is
+	 * applicable only if the OTG_HSPHY_WIDTH cC parameter was set
+	 * to "8 and 16 bits", meaning that the core has been
+	 * configured to work at either data path width.
+	 *
+	 * 8 or 16 bits (default 16)
+	 */
+	int phy_utmi_width;
+#define dwc_param_phy_utmi_width_default 16
+
+	/**
+	 * Specifies whether the ULPI operates at double or single
+	 * data rate. This parameter is only applicable if PHY_TYPE is
+	 * ULPI.
+	 *
+	 * 0 - single data rate ULPI interface with 8 bit wide data
+	 * bus (default)
+	 * 1 - double data rate ULPI interface with 4 bit wide data
+	 * bus
+	 */
+	int phy_ulpi_ddr;
+#define dwc_param_phy_ulpi_ddr_default 0
+
+	/**
+	 * Specifies whether to use the internal or external supply to
+	 * drive the vbus with a ULPI phy.
+	 */
+	int phy_ulpi_ext_vbus;
+#define DWC_PHY_ULPI_INTERNAL_VBUS 0
+#define DWC_PHY_ULPI_EXTERNAL_VBUS 1
+#define dwc_param_phy_ulpi_ext_vbus_default DWC_PHY_ULPI_INTERNAL_VBUS
+
+	/**
+	 * Specifies whether to use the I2Cinterface for full speed PHY. This
+	 * parameter is only applicable if PHY_TYPE is FS.
+	 * 0 - No (default)
+	 * 1 - Yes
+	 */
+	int i2c_enable;
+#define dwc_param_i2c_enable_default 0
+
+	int ulpi_fs_ls;
+#define dwc_param_ulpi_fs_ls_default 0
+
+	int ts_dline;
+#define dwc_param_ts_dline_default 0
+
+	/**
+	 * Specifies whether dedicated transmit FIFOs are
+	 * enabled for non periodic IN endpoints in device mode
+	 * 0 - No
+	 * 1 - Yes
+	 */
+	 int en_multiple_tx_fifo;
+#define dwc_param_en_multiple_tx_fifo_default 1
+
+	/** Number of 4-byte words in each of the Tx FIFOs in device
+	 * mode when dynamic FIFO sizing is enabled.
+	 * 4 to 768 (default 256)
+	 */
+	u32 dev_tx_fifo_size[MAX_TX_FIFOS];
+#define dwc_param_dev_tx_fifo_size_default 256
+
+	/** Thresholding enable flag-
+	 * bit 0 - enable non-ISO Tx thresholding
+	 * bit 1 - enable ISO Tx thresholding
+	 * bit 2 - enable Rx thresholding
+	 */
+	u32 thr_ctl;
+#define dwc_param_thr_ctl_default 0
+
+	/** Thresholding length for Tx
+	 *	FIFOs in 32 bit DWORDs
+	 */
+	u32 tx_thr_length;
+#define dwc_param_tx_thr_length_default 64
+
+	/** Thresholding length for Rx
+	 *	FIFOs in 32 bit DWORDs
+	 */
+	u32 rx_thr_length;
+#define dwc_param_rx_thr_length_default 64
+	/**
+	 * Specifies whether LPM (Link Power Management) support is enabled
+	 */
+	int lpm_enable;
+
+	/** Per Transfer Interrupt
+	 *	mode enable flag
+	 * 1 - Enabled
+	 * 0 - Disabled
+	 */
+	int pti_enable;
+
+	/** Multi Processor Interrupt
+	 *	mode enable flag
+	 * 1 - Enabled
+	 * 0 - Disabled
+	 */
+	int mpi_enable;
+
+	/** IS_USB Capability
+	 * 1 - Enabled
+	 * 0 - Disabled
+	 */
+	int ic_usb_cap;
+
+	/** AHB Threshold Ratio
+	 * 2'b00 AHB Threshold = 	MAC Threshold
+	 * 2'b01 AHB Threshold = 1/2 	MAC Threshold
+	 * 2'b10 AHB Threshold = 1/4	MAC Threshold
+	 * 2'b11 AHB Threshold = 1/8	MAC Threshold
+	 */
+	int ahb_thr_ratio;
+};
+
+#ifdef DEBUG
+struct dwc_otg_core_if;
+struct hc_xfer_info {
+	struct dwc_otg_core_if	*core_if;
+	struct dwc_hc		*hc;
+};
+#endif
+/*
+ * Device States
+ */
+enum dwc_otg_lx_state {
+	/** On state */
+	DWC_OTG_L0,
+	/** LPM sleep state*/
+	DWC_OTG_L1,
+	/** USB suspend state*/
+	DWC_OTG_L2,
+	/** Off state*/
+	DWC_OTG_L3
+};
+
+#ifdef OTG_PPC_PLB_DMA_TASKLET
+struct dma_xfer_s {
+	u32	*dma_data_buff;
+	void	*dma_data_fifo;
+	u32	dma_count;
+	u32	dma_dir;
+};
+#endif
+
+/**
+ * The <code>dwc_otg_core_if</code> structure contains information needed to
+ * manage the DWC_otg controller acting in either host or device mode. It
+ * represents the programming view of the controller as a whole.
+ */
+struct dwc_otg_core_if {
+	/** Parameters that define how the core should be configured.*/
+	struct dwc_otg_core_params	   *core_params;
+
+	/** Core Global registers starting at offset 000h. */
+	struct dwc_otg_core_global_regs __iomem *core_global_regs;
+
+	/** Device-specific information */
+	struct dwc_otg_dev_if		   *dev_if;
+	/** Host-specific information */
+	struct dwc_otg_host_if		   *host_if;
+	/** Value from SNPSID register */
+	u32 snpsid;
+
+	/*
+	 * Set to 1 if the core PHY interface bits in USBCFG have been
+	 * initialized.
+	 */
+	u8 phy_init_done;
+
+	/*
+	 * SRP Success flag, set by srp success interrupt in FS I2C mode
+	 */
+	u8 srp_success;
+	u8 srp_timer_started;
+
+	/* Common configuration information */
+	/** Power and Clock Gating Control Register */
+	u32 __iomem *pcgcctl;
+#define DWC_OTG_PCGCCTL_OFFSET 0xE00
+
+	/** Push/pop addresses for endpoints or host channels.*/
+	u32 __iomem *data_fifo[MAX_EPS_CHANNELS];
+#define DWC_OTG_DATA_FIFO_OFFSET 0x1000
+#define DWC_OTG_DATA_FIFO_SIZE 0x1000
+
+	/** Total RAM for FIFOs (Bytes) */
+	u16 total_fifo_size;
+	/** Size of Rx FIFO (Bytes) */
+	u16 rx_fifo_size;
+	/** Size of Non-periodic Tx FIFO (Bytes) */
+	u16 nperio_tx_fifo_size;
+
+
+	/** 1 if DMA is enabled, 0 otherwise. */
+	u8 dma_enable;
+	/** 1 if DMA descriptor is enabled, 0 otherwise. */
+	u8 dma_desc_enable;
+
+	/** 1 if PTI Enhancement mode is enabled, 0 otherwise. */
+	u8 pti_enh_enable;
+
+	/** 1 if MPI Enhancement mode is enabled, 0 otherwise. */
+	u8 multiproc_int_enable;
+
+	/** 1 if dedicated Tx FIFOs are enabled, 0 otherwise. */
+	u8 en_multiple_tx_fifo;
+
+	/** Set to 1 if multiple packets of a high-bandwidth transfer is in
+	 * process of being queued */
+	u8 queuing_high_bandwidth;
+
+	/** Hardware Configuration -- stored here for convenience.*/
+	union hwcfg1_data hwcfg1;
+	union hwcfg2_data hwcfg2;
+	union hwcfg3_data hwcfg3;
+	union hwcfg4_data hwcfg4;
+
+	/** Host and Device Configuration -- stored here for convenience.*/
+	union hcfg_data hcfg;
+	union dcfg_data dcfg;
+
+	/** The operational State, during transations
+	 * (a_host>>a_peripherial and b_device=>b_host) this may not
+	 * match the core but allows the software to determine
+	 * transitions.
+	 */
+	u8 op_state;
+
+	/**
+	 * Set to 1 if the HCD needs to be restarted on a session request
+	 * interrupt. This is required if no connector ID status change has
+	 * occurred since the HCD was last disconnected.
+	 */
+	u8 restart_hcd_on_session_req;
+
+	/** HCD callbacks */
+	/** A-Device is a_host */
+#define A_HOST		(1)
+	/** A-Device is a_suspend */
+#define A_SUSPEND	(2)
+	/** A-Device is a_peripherial */
+#define A_PERIPHERAL	(3)
+	/** B-Device is operating as a Peripheral. */
+#define B_PERIPHERAL	(4)
+	/** B-Device is operating as a Host. */
+#define B_HOST		(5)
+
+	/** HCD callbacks */
+	struct dwc_otg_cil_callbacks *hcd_cb;
+	/** PCD callbacks */
+	struct dwc_otg_cil_callbacks *pcd_cb;
+
+	/** Device mode Periodic Tx FIFO Mask */
+	u32 p_tx_msk;
+	/** Device mode Periodic Tx FIFO Mask */
+	u32 tx_msk;
+
+	/** Workqueue object used for handling several interrupts */
+	struct workqueue_struct *wq_otg;
+
+	/** Timer object used for handling "Wakeup Detected" Interrupt */
+	struct timer_list *wkp_timer;
+#ifdef DEBUG
+	u32			start_hcchar_val[MAX_EPS_CHANNELS];
+
+	struct hc_xfer_info	hc_xfer_info[MAX_EPS_CHANNELS];
+	struct timer_list	hc_xfer_timer[MAX_EPS_CHANNELS];
+
+	u32			hfnum_7_samples;
+	u64			hfnum_7_frrem_accum;
+	u32			hfnum_0_samples;
+	u64			hfnum_0_frrem_accum;
+	u32			hfnum_other_samples;
+	u64			hfnum_other_frrem_accum;
+#endif
+	resource_size_t phys_addr;
+	/* Added to support PLB DMA : phys-virt mapping */
+
+#ifdef OTG_PPC_PLB_DMA_TASKLET
+	/* Tasklet to do plbdma */
+	struct tasklet_struct   *plbdma_tasklet;
+#if 1
+	dma_xfer_t		dma_xfer;
+#else
+	u32 			*dma_data_buff;
+	void 			*dma_data_fifo;
+	u32 			dma_count;
+	u32 			dma_dir;
+#endif
+#endif
+	/** Lx state of device */
+	enum dwc_otg_lx_state lx_state;
+
+	struct dwc_otg_device *otg_dev;
+};
+
+
+/*
+ * The following functions are functions for works
+ * using during handling some interrupts
+ */
+extern void w_wakeup_detected(unsigned long);
+
+/* The following functions support initialization of the CIL driver component
+ * and the DWC_otg controller.
+ */
+extern struct dwc_otg_core_if *dwc_otg_cil_init(
+		u32 __iomem *reg_base_addr,
+		struct dwc_otg_core_params *_core_params,
+		struct dwc_otg_device *dwc_otg_device);
+extern void dwc_otg_cil_remove(struct dwc_otg_core_if *core_if);
+extern void dwc_otg_core_init(struct dwc_otg_core_if *core_if);
+extern void dwc_otg_core_host_init(struct dwc_otg_core_if *core_if);
+extern void dwc_otg_core_dev_init(struct dwc_otg_core_if *core_if);
+extern void dwc_otg_enable_global_interrupts(struct dwc_otg_core_if *core_if);
+extern void dwc_otg_disable_global_interrupts(struct dwc_otg_core_if *core_if);
+
+/* Debug functions */
+
+#if defined(DEBUG) && defined(VERBOSE)
+extern void dwc_otg_dump_msg(const u8 *buf, unsigned int length);
+#else
+static inline void dwc_otg_dump_msg(const u8 *buf, unsigned int length)
+{
+}
+#endif
+
+/** @name Device CIL Functions
+ * The following functions support managing the DWC_otg controller in device
+ * mode.
+ */
+/**@{*/
+extern void
+dwc_otg_wakeup(struct dwc_otg_core_if *core_if);
+
+extern void
+dwc_otg_read_setup_packet(struct dwc_otg_core_if *core_if, u32 *dest);
+
+extern u32
+dwc_otg_get_frame_number(struct dwc_otg_core_if *core_if);
+
+extern void
+dwc_otg_ep0_activate(struct dwc_otg_core_if *core_if, struct dwc_ep *ep);
+
+extern void
+dwc_otg_ep_activate(struct dwc_otg_core_if *core_if, struct dwc_ep *ep);
+
+extern void
+dwc_otg_ep_deactivate(struct dwc_otg_core_if *core_if, struct dwc_ep *ep);
+
+extern void
+dwc_otg_ep_start_transfer(struct dwc_otg_core_if *core_if, struct dwc_ep *ep);
+
+extern void
+dwc_otg_ep_start_zl_transfer(struct dwc_otg_core_if *core_if,
+					struct dwc_ep *ep);
+
+extern void
+dwc_otg_ep0_start_transfer(struct dwc_otg_core_if *core_if, struct dwc_ep *ep);
+
+extern void
+dwc_otg_ep0_continue_transfer(struct dwc_otg_core_if *core_if,
+					struct dwc_ep *ep);
+
+extern void
+dwc_otg_ep_write_packet(struct dwc_otg_core_if *core_if, struct dwc_ep *ep,
+					int dma);
+
+extern void
+dwc_otg_ep_set_stall(struct dwc_otg_core_if *core_if, struct dwc_ep *ep);
+
+extern void
+dwc_otg_ep_clear_stall(struct dwc_otg_core_if *core_if, struct dwc_ep *ep);
+
+extern void
+dwc_otg_enable_device_interrupts(struct dwc_otg_core_if *core_if);
+
+extern void
+dwc_otg_dump_dev_registers(struct dwc_otg_core_if *core_if);
+
+/**@}*/
+
+/** @name Host CIL Functions
+ * The following functions support managing the DWC_otg controller in host
+ * mode.
+ */
+/**@{*/
+extern void
+dwc_otg_hc_init(struct dwc_otg_core_if *core_if, struct dwc_hc *hc);
+
+extern void
+dwc_otg_hc_halt(struct dwc_otg_core_if *core_if, struct dwc_hc *hc,
+			enum dwc_otg_halt_status _halt_status);
+
+extern void
+dwc_otg_hc_cleanup(struct dwc_otg_core_if *core_if, struct dwc_hc *hc);
+
+extern void
+dwc_otg_hc_start_transfer(struct dwc_otg_core_if *core_if, struct dwc_hc *hc);
+
+extern int
+dwc_otg_hc_continue_transfer(struct dwc_otg_core_if *core_if,
+				struct dwc_hc *hc);
+
+extern void
+dwc_otg_hc_do_ping(struct dwc_otg_core_if *core_if, struct dwc_hc *hc);
+
+extern void
+dwc_otg_hc_write_packet(struct dwc_otg_core_if *core_if, struct dwc_hc *hc);
+
+extern void
+dwc_otg_enable_host_interrupts(struct dwc_otg_core_if *core_if);
+
+extern void
+dwc_otg_disable_host_interrupts(struct dwc_otg_core_if *core_if);
+
+extern void
+dwc_otg_hc_start_transfer_ddma(struct dwc_otg_core_if *core_if,
+					struct dwc_hc *hc);
+
+/* Macro used to clear one channel interrupt */
+#define clear_hc_int(_hc_regs_, _intr_) \
+do { \
+	union hcint_data hcint_clear = {.d32 = 0}; \
+	hcint_clear.b._intr_ = 1; \
+	dwc_write_reg32(&(_hc_regs_)->hcint, hcint_clear.d32); \
+} while (0)
+
+/*
+ * Macro used to disable one channel interrupt. Channel interrupts are
+ * disabled when the channel is halted or released by the interrupt handler.
+ * There is no need to handle further interrupts of that type until the
+ * channel is re-assigned. In fact, subsequent handling may cause crashes
+ * because the channel structures are cleaned up when the channel is released.
+ */
+#define disable_hc_int(_hc_regs_, _intr_) \
+do { \
+	union hcintmsk_data hcintmsk = {.d32 = 0}; \
+	hcintmsk.b._intr_ = 1; \
+	dwc_modify_reg32(&(_hc_regs_)->hcintmsk, hcintmsk.d32, 0); \
+} while (0)
+/**
+ * This function Reads HPRT0 in preparation to modify.	It keeps the
+ * WC bits 0 so that if they are read as 1, they won't clear when you
+ * write it back
+ */
+static inline u32 dwc_otg_read_hprt0(struct dwc_otg_core_if *core_if)
+{
+	union hprt0_data hprt0;
+	hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
+	hprt0.b.prtena = 0;
+	hprt0.b.prtconndet = 0;
+	hprt0.b.prtenchng = 0;
+	hprt0.b.prtovrcurrchng = 0;
+	return hprt0.d32;
+}
+
+extern void dwc_otg_dump_host_registers(struct dwc_otg_core_if *core_if);
+/**@}*/
+
+/** @name Common CIL Functions
+ * The following functions support managing the DWC_otg controller in either
+ * device or host mode.
+ */
+/**@{*/
+
+extern void
+dwc_otg_read_packet(struct dwc_otg_core_if *core_if,
+				u8 *dest,
+				u16 bytes);
+
+extern void
+dwc_otg_dump_global_registers(struct dwc_otg_core_if *core_if);
+
+extern void
+dwc_otg_flush_tx_fifo(struct dwc_otg_core_if *core_if, const int _num);
+
+extern void
+dwc_otg_flush_rx_fifo(struct dwc_otg_core_if *core_if);
+
+extern void
+dwc_otg_core_reset(struct dwc_otg_core_if *core_if);
+
+#ifdef OTG_PPC_PLB_DMA
+extern void
+ppc4xx_start_plb_dma(struct dwc_otg_core_if *core_if, void *src, void *dst,
+			unsigned int length, unsigned int use_interrupt,
+			unsigned int dma_ch, unsigned int dma_dir);
+#endif
+#define NP_TXFIFO_EMPTY -1
+#define MAX_NP_TXREQUEST_Q_SLOTS 8
+/**
+ * This function returns the endpoint number of the request at
+ * the top of non-periodic TX FIFO, or -1 if the request FIFO is
+ * empty.
+ */
+static inline int dwc_otg_top_nptxfifo_epnum(struct dwc_otg_core_if *core_if)
+{
+	union gnptxsts_data txstatus = {.d32 = 0};
+
+	txstatus.d32 = dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
+	return (txstatus.b.nptxqspcavail == MAX_NP_TXREQUEST_Q_SLOTS ?
+		-1 : txstatus.b.nptxqtop_chnep);
+}
+
+/**
+ * This function returns the Core Interrupt register.
+ */
+static inline u32 dwc_otg_read_core_intr(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(&core_if->core_global_regs->gintsts) &
+		dwc_read_reg32(&core_if->core_global_regs->gintmsk);
+}
+
+/**
+ * This function returns the OTG Interrupt register.
+ */
+static inline u32 dwc_otg_read_otg_intr(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(&core_if->core_global_regs->gotgint);
+}
+
+/**
+ * This function reads the Device All Endpoints Interrupt register and
+ * returns the IN endpoint interrupt bits.
+ */
+static inline u32
+dwc_otg_read_dev_all_in_ep_intr(struct dwc_otg_core_if *core_if)
+{
+	u32 v;
+	if (core_if->multiproc_int_enable) {
+		v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->
+				   deachint) & dwc_read_reg32(&core_if->dev_if->
+							      dev_global_regs->
+							      deachintmsk);
+	} else {
+		v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->daint) &
+		    dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk);
+	}
+	return v & 0xffff;
+
+}
+
+/**
+ * This function reads the Device All Endpoints Interrupt register and
+ * returns the OUT endpoint interrupt bits.
+ */
+static inline u32
+dwc_otg_read_dev_all_out_ep_intr(struct dwc_otg_core_if *core_if)
+{
+	u32 v;
+	if (core_if->multiproc_int_enable) {
+		v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->
+				   deachint) & dwc_read_reg32(&core_if->dev_if->
+							      dev_global_regs->
+							      deachintmsk);
+	} else {
+		v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->daint) &
+		    dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk);
+	}
+	return (v & 0xffff0000) >> 16;
+}
+
+/**
+ * This function returns the Device IN EP Interrupt register
+ */
+static inline
+u32 dwc_otg_read_dev_in_ep_intr(struct dwc_otg_core_if *core_if,
+					struct dwc_ep *ep)
+{
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	u32 v, msk, emp;
+
+	if (core_if->multiproc_int_enable) {
+		msk =
+		    dwc_read_reg32(&dev_if->dev_global_regs->
+				   diepeachintmsk[ep->num]);
+		emp =
+		    dwc_read_reg32(&dev_if->dev_global_regs->
+				   dtknqr4_fifoemptymsk);
+		msk |= ((emp >> ep->num) & 0x1) << 7;
+		v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) & msk;
+	} else {
+		msk = dwc_read_reg32(&dev_if->dev_global_regs->diepmsk);
+		emp =
+		    dwc_read_reg32(&dev_if->dev_global_regs->
+				   dtknqr4_fifoemptymsk);
+		msk |= ((emp >> ep->num) & 0x1) << 7;
+		v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) & msk;
+	}
+/*
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	u32 v;
+	v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) &
+			dwc_read_reg32(&dev_if->dev_global_regs->diepmsk);
+*/
+	return v;
+}
+/**
+ * This function returns the Device OUT EP Interrupt register
+ */
+static inline u32
+dwc_otg_read_dev_out_ep_intr(struct dwc_otg_core_if *core_if, struct dwc_ep *ep)
+{
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	u32 v;
+	union doepint_data msk = {.d32 = 0 };
+
+	if (core_if->multiproc_int_enable) {
+		msk.d32 =
+		    dwc_read_reg32(&dev_if->dev_global_regs->
+				   doepeachintmsk[ep->num]);
+		if (core_if->pti_enh_enable)
+			msk.b.pktdrpsts = 1;
+		v = dwc_read_reg32(&dev_if->out_ep_regs[ep->num]->
+				   doepint) & msk.d32;
+	} else {
+		msk.d32 = dwc_read_reg32(&dev_if->dev_global_regs->doepmsk);
+		if (core_if->pti_enh_enable)
+			msk.b.pktdrpsts = 1;
+		v = dwc_read_reg32(&dev_if->out_ep_regs[ep->num]->
+				   doepint) & msk.d32;
+	}
+	return v;
+}
+
+/**
+ * This function returns the Host All Channel Interrupt register
+ */
+static inline u32
+dwc_otg_read_host_all_channels_intr(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(&core_if->host_if->host_global_regs->haint);
+}
+
+static inline u32
+dwc_otg_read_host_channel_intr(struct dwc_otg_core_if *core_if,
+		struct dwc_hc *hc)
+{
+	return dwc_read_reg32(&core_if->host_if->hc_regs[hc->hc_num]->hcint);
+}
+
+
+/**
+ * This function returns the mode of the operation, host or device.
+ *
+ * @return 0 - Device Mode, 1 - Host Mode
+ */
+static inline u32 dwc_otg_mode(struct dwc_otg_core_if *core_if)
+{
+	return dwc_read_reg32(&core_if->core_global_regs->gintsts) & 0x1;
+}
+
+static inline u8 dwc_otg_is_device_mode(struct dwc_otg_core_if *core_if)
+{
+	return dwc_otg_mode(core_if) != DWC_HOST_MODE;
+}
+static inline u8 dwc_otg_is_host_mode(struct dwc_otg_core_if *core_if)
+{
+	return dwc_otg_mode(core_if) == DWC_HOST_MODE;
+}
+
+extern int dwc_otg_handle_common_intr(struct dwc_otg_core_if *core_if);
+
+
+/**@}*/
+
+/**
+ * DWC_otg CIL callback structure.	This structure allows the HCD and
+ * PCD to register functions used for starting and stopping the PCD
+ * and HCD for role change on for a DRD.
+ */
+struct dwc_otg_cil_callbacks {
+	/** Start function for role change */
+	int (*start) (void *_p);
+	/** Stop Function for role change */
+	int (*stop) (void *_p);
+	/** Disconnect Function for role change */
+	int (*disconnect) (void *_p);
+	/** Resume/Remote wakeup Function */
+	int (*resume_wakeup) (void *_p);
+	/** Suspend function */
+	int (*suspend) (void *_p);
+	/** Session Start (SRP) */
+	int (*session_start) (void *_p);
+#ifdef CONFIG_USB_DWC_OTG_LPM
+	/** Sleep (switch to L0 state) */
+	int (*sleep) (void *_p);
+#endif
+	/** Pointer passed to start() and stop() */
+	void *p;
+};
+
+extern void
+dwc_otg_cil_register_pcd_callbacks(struct dwc_otg_core_if *core_if,
+					struct dwc_otg_cil_callbacks *_cb,
+					void *_p);
+extern void
+dwc_otg_cil_register_hcd_callbacks(struct dwc_otg_core_if *core_if,
+					struct dwc_otg_cil_callbacks *_cb,
+					void *_p);
+#endif
diff --git a/drivers/usb/dwc_otg/dwc_otg_cil_intr.c b/drivers/usb/dwc_otg/dwc_otg_cil_intr.c
new file mode 100644
index 0000000..4040f81
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_cil_intr.c
@@ -0,0 +1,890 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+
+/** @file
+ *
+ * The Core Interface Layer provides basic services for accessing and
+ * managing the DWC_otg hardware. These services are used by both the
+ * Host Controller Driver and the Peripheral Controller Driver.
+ *
+ * This file contains the Common Interrupt handlers.
+ */
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_regs.h"
+#include "dwc_otg_cil.h"
+
+static const char *op_state_str(struct dwc_otg_core_if * core_if)
+{
+	return (core_if->op_state == A_HOST ? "a_host" :
+		(core_if->op_state == A_SUSPEND ? "a_suspend" :
+		(core_if->op_state == A_PERIPHERAL ? "a_peripheral" :
+		(core_if->op_state == B_PERIPHERAL ? "b_peripheral" :
+		(core_if->op_state == B_HOST ? "b_host" : "unknown")))));
+}
+
+/** This function will log a debug message
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static int dwc_otg_handle_mode_mismatch_intr(struct dwc_otg_core_if *core_if)
+{
+	union gintsts_data gintsts;
+	DWC_WARN("Mode Mismatch Interrupt: currently in %s mode\n",
+		  dwc_otg_mode(core_if) ? "Host" : "Device");
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.modemismatch = 1;
+	dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
+	return 1;
+}
+
+/** Start the HCD.  Helper function for using the HCD callbacks.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static void hcd_start(struct dwc_otg_core_if *core_if)
+{
+	if (core_if->hcd_cb && core_if->hcd_cb->start)
+		core_if->hcd_cb->start(core_if->hcd_cb->p);
+}
+
+/** Disconnect the HCD.  Helper function for using the HCD callbacks.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static void hcd_disconnect(struct dwc_otg_core_if *core_if)
+{
+	if (core_if->hcd_cb && core_if->hcd_cb->disconnect)
+		core_if->hcd_cb->disconnect(core_if->hcd_cb->p);
+}
+
+#ifndef DWC_HOST_ONLY
+/** Inform the HCD the a New Session has begun.  Helper function for
+ * using the HCD callbacks.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static void hcd_session_start(struct dwc_otg_core_if *core_if)
+{
+	if (core_if->hcd_cb && core_if->hcd_cb->session_start)
+		core_if->hcd_cb->session_start(core_if->hcd_cb->p);
+}
+#endif
+
+#ifdef CONFIG_USB_DWC_OTG_LPM
+/**
+ * Inform the HCD about LPM sleep.
+ * Helper function for using the HCD callbacks.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static void hcd_sleep(struct dwc_otg_core_if *core_if)
+{
+	if (core_if->hcd_cb && core_if->hcd_cb->sleep)
+		core_if->hcd_cb->sleep(core_if->hcd_cb->p);
+}
+#endif
+
+/** Resume the HCD.  Helper function for using the HCD callbacks.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static void hcd_resume(struct dwc_otg_core_if *core_if)
+{
+	if (core_if->hcd_cb && core_if->hcd_cb->resume_wakeup)
+		core_if->hcd_cb->resume_wakeup(core_if->hcd_cb->p);
+}
+/** Start the PCD.  Helper function for using the PCD callbacks.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static void pcd_start(struct dwc_otg_core_if *core_if)
+{
+	if (core_if->pcd_cb && core_if->pcd_cb->start)
+		core_if->pcd_cb->start(core_if->pcd_cb->p);
+}
+
+/** Stop the PCD.  Helper function for using the PCD callbacks.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static void pcd_stop(struct dwc_otg_core_if *core_if)
+{
+	if (core_if->pcd_cb && core_if->pcd_cb->stop)
+		core_if->pcd_cb->stop(core_if->pcd_cb->p);
+}
+
+/** Suspend the PCD.  Helper function for using the PCD callbacks.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static void pcd_suspend(struct dwc_otg_core_if *core_if)
+{
+	if (core_if->pcd_cb && core_if->pcd_cb->suspend)
+		core_if->pcd_cb->suspend(core_if->pcd_cb->p);
+}
+
+/** Resume the PCD.  Helper function for using the PCD callbacks.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static void pcd_resume(struct dwc_otg_core_if *core_if)
+{
+	if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup)
+		core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
+}
+
+/**
+ * This function handles the OTG Interrupts. It reads the OTG
+ * Interrupt Register (GOTGINT) to determine what interrupt has
+ * occurred.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static int dwc_otg_handle_otg_intr(struct dwc_otg_core_if *core_if)
+{
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	union gotgint_data gotgint;
+	union gotgctl_data gotgctl;
+	union gintmsk_data gintmsk;
+	gotgint.d32 = dwc_read_reg32(&global_regs->gotgint);
+	gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl);
+	DWC_DEBUGPL(DBG_CIL, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint.d32,
+		     op_state_str(core_if));
+
+	if (gotgint.b.sesenddet) {
+		DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+			     "Session End Detected++ (%s)\n",
+			     op_state_str(core_if));
+		gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl);
+		if (core_if->op_state == B_HOST) {
+			pcd_start(core_if);
+			core_if->op_state = B_PERIPHERAL;
+		} else {
+			/* If not B_HOST and Device HNP still set. HNP
+			 * Did not succeed!*/
+			if (gotgctl.b.devhnpen) {
+				DWC_DEBUGPL(DBG_ANY, "Session End Detected\n");
+				DWC_ERROR("Device Not Connected/Responding!\n");
+			}
+			/* If Session End Detected the B-Cable has
+			 * been disconnected. */
+			/* Reset PCD and Gadget driver to a
+			 * clean state. */
+			core_if->lx_state = DWC_OTG_L0;
+			pcd_stop(core_if);
+		}
+		gotgctl.d32 = 0;
+		gotgctl.b.devhnpen = 1;
+		dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0);
+	}
+	if (gotgint.b.sesreqsucstschng) {
+		DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+			     "Session Request Success Status Change++\n");
+		gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl);
+		if (gotgctl.b.sesreqscs) {
+			if ((core_if->core_params->phy_type ==
+			      DWC_PHY_TYPE_PARAM_FS)
+			     && (core_if->core_params->i2c_enable)) {
+				core_if->srp_success = 1;
+			} else {
+				pcd_resume(core_if);
+
+				/* Clear Session Request */
+				gotgctl.d32 = 0;
+				gotgctl.b.sesreq = 1;
+				dwc_modify_reg32(&global_regs->gotgctl,
+						  gotgctl.d32, 0);
+			}
+		}
+	}
+	if (gotgint.b.hstnegsucstschng) {
+		/* Print statements during the HNP interrupt handling
+		 * can cause it to fail.*/
+		gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl);
+		if (gotgctl.b.hstnegscs) {
+			if (dwc_otg_is_host_mode(core_if)) {
+				core_if->op_state = B_HOST;
+
+				/*
+				 * Need to disable SOF interrupt immediately.
+				 * When switching from device to host, the PCD
+				 * interrupt handler won't handle the
+				 * interrupt if host mode is already set. The
+				 * HCD interrupt handler won't get called if
+				 * the HCD state is HALT. This means that the
+				 * interrupt does not get handled and Linux
+				 * complains loudly.
+				 */
+				gintmsk.d32 = 0;
+				gintmsk.b.sofintr = 1;
+				dwc_modify_reg32(&global_regs->gintmsk,
+						gintmsk.d32,
+						0);
+
+				pcd_stop(core_if);
+				/*
+				 * Initialize the Core for Host mode.
+				 */
+				hcd_start(core_if);
+				core_if->op_state = B_HOST;
+			}
+		} else {
+			gotgctl.d32 = 0;
+			gotgctl.b.hnpreq = 1;
+			gotgctl.b.devhnpen = 1;
+			dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0);
+			DWC_DEBUGPL(DBG_ANY, "HNP Failed\n");
+			DWC_ERROR("Device Not Connected/Responding\n");
+		}
+	}
+	if (gotgint.b.hstnegdet) {
+
+		/* The disconnect interrupt is set at the same time as
+		 * Host Negotiation Detected.  During the mode
+		 * switch all interrupts are cleared so the disconnect
+		 * interrupt handler will not get executed.
+		 */
+		DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+				"Host Negotiation Detected++ (%s)\n",
+				(dwc_otg_is_host_mode(core_if) ? "Host" :
+				 "Device"));
+		if (dwc_otg_is_device_mode(core_if)) {
+			DWC_DEBUGPL(DBG_ANY, "a_suspend->a_peripheral (%d)\n",
+				     core_if->op_state);
+			hcd_disconnect(core_if);
+			pcd_start(core_if);
+			core_if->op_state = A_PERIPHERAL;
+		} else {
+
+			/*
+			 * Need to disable SOF interrupt immediately. When
+			 * switching from device to host, the PCD interrupt
+			 * handler won't handle the interrupt if host mode is
+			 * already set. The HCD interrupt handler won't get
+			 * called if the HCD state is HALT. This means that
+			 * the interrupt does not get handled and Linux
+			 * complains loudly.
+			 */
+			gintmsk.d32 = 0;
+			gintmsk.b.sofintr = 1;
+			dwc_modify_reg32(&global_regs->gintmsk, gintmsk.d32,
+					  0);
+			pcd_stop(core_if);
+			hcd_start(core_if);
+			core_if->op_state = A_HOST;
+		}
+	}
+	if (gotgint.b.adevtoutchng) {
+		DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+			     "A-Device Timeout Change++\n");
+		core_if->otg_dev->soc_disable_vbus();
+	}
+	if (gotgint.b.debdone) {
+		DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+			     "Debounce Done++\n");
+	}
+
+	/* Clear GOTGINT */
+	dwc_write_reg32(&core_if->core_global_regs->gotgint, gotgint.d32);
+	return 1;
+}
+
+struct work_wrapper {
+	struct work_struct work;
+	struct dwc_otg_core_if *core_if;
+};
+
+static void w_conn_id_status_change(struct work_struct *work)
+{
+	struct work_wrapper *wwrap = container_of(work,
+						struct work_wrapper,
+						work);
+
+	struct dwc_otg_core_if *core_if = wwrap->core_if;
+	u32 count = 0;
+	union gotgctl_data gotgctl = {.d32 = 0 };
+
+	gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl);
+	DWC_DEBUGPL(DBG_CIL, "gotgctl=%0x\n", gotgctl.d32);
+	DWC_DEBUGPL(DBG_CIL, "gotgctl.b.conidsts=%d\n", gotgctl.b.conidsts);
+
+	/* B-Device connector (Device Mode) */
+	if (gotgctl.b.conidsts) {
+		/* Wait for switch to device mode. */
+		while (!dwc_otg_is_device_mode(core_if)) {
+			DWC_PRINT("Waiting for Peripheral Mode, Mode=%s\n",
+				   (dwc_otg_is_host_mode(core_if) ? "Host" :
+				    "Peripheral"));
+			mdelay(100);
+			if (++count > 10000)
+				break;
+		}
+		WARN_ON(count > 10000);
+			  /* "Connection id status change timed out");*/
+		core_if->op_state = B_PERIPHERAL;
+		if (core_if->otg_dev->soc_disable_vbus)
+			core_if->otg_dev->soc_disable_vbus();
+		dwc_otg_core_init(core_if);
+		dwc_otg_enable_global_interrupts(core_if);
+		pcd_start(core_if);
+	} else {
+		/* A-Device connector (Host Mode) */
+		while (!dwc_otg_is_host_mode(core_if)) {
+			DWC_PRINT("Waiting for Host Mode, Mode=%s\n",
+				   (dwc_otg_is_host_mode(core_if) ? "Host" :
+				    "Peripheral"));
+			mdelay(100);
+			if (++count > 10000)
+				break;
+		}
+		WARN_ON(count > 10000);
+			   /*"Connection id status change timed out");*/
+		core_if->op_state = A_HOST;
+		/*
+		 * Initialize the Core for Host mode.
+		 */
+		dwc_otg_core_init(core_if);
+		dwc_otg_enable_global_interrupts(core_if);
+		hcd_start(core_if);
+	}
+	kfree(wwrap);
+}
+
+/**
+ * This function handles the Connector ID Status Change Interrupt.  It
+ * reads the OTG Interrupt Register (GOTCTL) to determine whether this
+ * is a Device to Host Mode transition or a Host Mode to Device
+ * Transition.
+ *
+ * This only occurs when the cable is connected/removed from the PHY
+ * connector.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static
+int dwc_otg_handle_conn_id_status_change_intr(struct dwc_otg_core_if *core_if)
+{
+
+	struct work_wrapper *work_wrap;
+	/*
+	 * Need to disable SOF interrupt immediately. If switching from device
+	 * to host, the PCD interrupt handler won't handle the interrupt if
+	 * host mode is already set. The HCD interrupt handler won't get
+	 * called if the HCD state is HALT. This means that the interrupt does
+	 * not get handled and Linux complains loudly.
+	 */
+	union gintmsk_data gintmsk = {.d32 = 0 };
+	union gintsts_data gintsts = {.d32 = 0 };
+
+	gintmsk.b.sofintr = 1;
+	dwc_modify_reg32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
+
+	DWC_DEBUGPL(DBG_CIL,
+		    " ++Connector ID Status Change Interrupt++  (%s)\n",
+		    (dwc_otg_is_host_mode(core_if) ? "Host" : "Device"));
+
+	/*
+	 * Need to schedule a work, as there are possible DELAY function calls
+	 */
+
+	work_wrap = kzalloc(sizeof(struct work_wrapper), GFP_ATOMIC);
+
+	if (work_wrap) {
+		work_wrap->core_if = core_if;
+		INIT_WORK(&work_wrap->work, w_conn_id_status_change);
+	} else {
+		printk(KERN_ERR"Failed to alloc workqueue wrapper \n");
+		goto err;
+	}
+
+	if (!queue_work(core_if->wq_otg, &work_wrap->work)) {
+		printk(KERN_ERR
+			"Failed to queue status change workqueue helper\n");
+		BUG();
+	}
+
+
+	/* Set flag and clear interrupt */
+	gintsts.b.conidstschng = 1;
+	dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+	return 1;
+
+err:
+	return -1;
+}
+
+/**
+ * This interrupt indicates that a device is initiating the Session
+ * Request Protocol to request the host to turn on bus power so a new
+ * session can begin. The handler responds by turning on bus power. If
+ * the DWC_otg controller is in low power mode, the handler brings the
+ * controller out of low power mode before turning on bus power.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static int dwc_otg_handle_session_req_intr(struct dwc_otg_core_if *core_if)
+{
+
+	union gintsts_data gintsts;
+#ifndef DWC_HOST_ONLY
+	union hprt0_data hprt0;
+
+
+	DWC_DEBUGPL(DBG_ANY, "++Session Request Interrupt++\n");
+	if (dwc_otg_is_device_mode(core_if))
+		DWC_PRINT("SRP: Device mode\n");
+	else {
+		DWC_PRINT("SRP: Host mode\n");
+
+		/* Turn on the port power bit. */
+		hprt0.d32 = dwc_otg_read_hprt0(core_if);
+		hprt0.b.prtpwr = 1;
+		dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+		if (core_if->otg_dev->soc_enable_vbus)
+			core_if->otg_dev->soc_enable_vbus();
+
+		/* Start the Connection timer. So a message can be displayed
+		 * if connect does not occur within 10 seconds. */
+		hcd_session_start(core_if);
+	}
+
+#endif
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.sessreqintr = 1;
+	dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
+	return 1;
+}
+
+void w_wakeup_detected(unsigned long p)
+{
+	struct dwc_otg_core_if *core_if = (struct dwc_otg_core_if *) p;
+	/*
+	 * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
+	 * so that OPT tests pass with all PHYs).
+	 */
+	union hprt0_data hprt0 = {.d32 = 0 };
+#if 0
+	union pcgcctl_data pcgcctl = {.d32 = 0 };
+	/* Restart the Phy Clock */
+	pcgcctl.b.stoppclk = 1;
+	dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32, 0);
+	udelay(10);
+#endif
+	hprt0.d32 = dwc_otg_read_hprt0(core_if);
+	DWC_DEBUGPL(DBG_ANY, "Resume: HPRT0=%0x\n", hprt0.d32);
+
+	hprt0.b.prtres = 0;	/* Resume */
+	dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+	DWC_DEBUGPL(DBG_ANY, "Clear Resume: HPRT0=%0x\n",
+		    dwc_read_reg32(core_if->host_if->hprt0));
+
+	hcd_resume(core_if);
+
+	/** Change to L0 state*/
+	core_if->lx_state = DWC_OTG_L0;
+
+}
+/**
+ * This interrupt indicates that the DWC_otg controller has detected a
+ * resume or remote wakeup sequence. If the DWC_otg controller is in
+ * low power mode, the handler must brings the controller out of low
+ * power mode. The controller automatically begins resume
+ * signaling. The handler schedules a time to stop resume signaling.
+ */
+#define WAKEUP_TIMER_MS 71
+static int dwc_otg_handle_wakeup_detected_intr(struct dwc_otg_core_if *core_if)
+{
+	union gintsts_data gintsts;
+	DWC_DEBUGPL(DBG_ANY,
+		    "++Resume and Remote Wakeup Detected Interrupt++\n");
+
+	DWC_PRINT("%s lxstate = %d\n", __func__, core_if->lx_state);
+	if (dwc_otg_is_device_mode(core_if)) {
+		union dctl_data dctl = {.d32 = 0};
+		DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n",
+			dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts)
+			);
+
+		if (core_if->lx_state == DWC_OTG_L2) {
+#ifdef PARTIAL_POWER_DOWN
+			if (core_if->hwcfg4.b.power_optimiz) {
+				union pcgcctl_data power = {.d32 = 0 };
+
+				power.d32 = dwc_read_reg32(core_if->pcgcctl);
+				DWC_DEBUGPL(DBG_CIL, "PCGCCTL=%0x\n",
+					    power.d32);
+
+				power.b.stoppclk = 0;
+				dwc_write_reg32(core_if->pcgcctl, power.d32);
+
+				power.b.pwrclmp = 0;
+				dwc_write_reg32(core_if->pcgcctl, power.d32);
+
+				power.b.rstpdwnmodule = 0;
+				dwc_write_reg32(core_if->pcgcctl, power.d32);
+			}
+#endif
+			/* Clear the Remote Wakeup Signalling */
+			dctl.b.rmtwkupsig = 1;
+			dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
+					 dctl, dctl.d32, 0);
+
+			if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
+				core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->
+							       p);
+			}
+		} else {
+			union glpmcfg_data lpmcfg;
+			lpmcfg.d32 =
+			    dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+			lpmcfg.b.hird_thres &= (~(1 << 4));
+			dwc_write_reg32(&core_if->core_global_regs->glpmcfg,
+					lpmcfg.d32);
+		}
+		/** Change to L0 state*/
+		core_if->lx_state = DWC_OTG_L0;
+	} else {
+		if (core_if->lx_state != DWC_OTG_L1) {
+			union pcgcctl_data pcgcctl = {.d32 = 0 };
+
+			/* Restart the Phy Clock */
+			pcgcctl.b.stoppclk = 1;
+			dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32, 0);
+
+			mod_timer(core_if->wkp_timer,
+				  jiffies + msecs_to_jiffies(WAKEUP_TIMER_MS));
+
+		} else {
+			union glpmcfg_data lpmcfg;
+
+			/*
+			 * Fix for STAR 9000382324:
+			 * When the High Speed device enters LPM state after
+			 * successfully completing LPM transactions in USB, it
+			 * erroneously detects Reset or Resume even though there
+			 * is no Reset or Resume from the Host.
+			 * As a result of this issue, the device core exits L1
+			 * state when the Host is still in L1. This issue occurs
+			 * randomly if the PHY takes more than 2.5us to enable
+			 * FS terminations after entering L1.
+			 */
+
+			lpmcfg.d32 = dwc_read_reg32(&core_if->
+						    core_global_regs->glpmcfg);
+			if (lpmcfg.b.sleep_state_resumeok) {
+				/* Change to L0 state*/
+				core_if->lx_state = DWC_OTG_L0;
+			} else {
+				/*
+				 * perform a soft disconnect as we are out of
+				 * step with the host
+				 */
+				union dctl_data dctl = {.d32 = 0};
+				dctl.b.sftdiscon = 1;
+				dwc_modify_reg32(&core_if->dev_if->
+						 dev_global_regs->dctl,
+						 0,
+						 dctl.d32);
+				wmb();
+				mdelay(1);
+				dwc_modify_reg32(&core_if->dev_if->
+						 dev_global_regs->dctl,
+						 dctl.d32,
+						 0);
+			}
+		}
+	}
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.wkupintr = 1;
+	dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+	return 1;
+}
+
+/**
+ * This interrupt indicates that a device has been disconnected from
+ * the root port.
+ */
+static int dwc_otg_handle_disconnect_intr(struct dwc_otg_core_if *core_if)
+{
+	union gintsts_data gintsts;
+	printk(KERN_ERR "  Disconnect Detected Interrupt++ (%s) %s\n",
+		 (dwc_otg_is_host_mode(core_if) ? "Host" : "Device"),
+		 op_state_str(core_if));
+	DWC_DEBUGPL(DBG_ANY, "++Disconnect Detected Interrupt++ (%s) %s\n",
+		     (dwc_otg_is_host_mode(core_if) ? "Host" : "Device"),
+		     op_state_str(core_if));
+
+/** @todo Consolidate this if statement. */
+#ifndef DWC_HOST_ONLY
+	if (core_if->op_state == B_HOST) {
+
+		/* If in device mode Disconnect and stop the HCD, then
+		 * start the PCD. */
+		hcd_disconnect(core_if);
+		pcd_start(core_if);
+		core_if->op_state = B_PERIPHERAL;
+	} else if (dwc_otg_is_device_mode(core_if)) {
+		union gotgctl_data gotgctl = {.d32 = 0};
+		gotgctl.d32 =
+		    dwc_read_reg32(&core_if->core_global_regs->gotgctl);
+		if (gotgctl.b.hstsethnpen == 1) {
+			/* Do nothing, if HNP in process the OTG
+			 * interrupt "Host Negotiation Detected"
+			 * interrupt will do the mode switch.
+			 */
+		} else if (gotgctl.b.devhnpen == 0) {
+			/* If in device mode Disconnect and stop the HCD, then
+			 * start the PCD. */
+			hcd_disconnect(core_if);
+			pcd_start(core_if);
+			core_if->op_state = B_PERIPHERAL;
+		} else {
+			DWC_DEBUGPL(DBG_ANY, "!a_peripheral && !devhnpen\n");
+		}
+	} else {
+		if (core_if->op_state == A_HOST) {
+			/* A-Cable still connected but device disconnected. */
+			hcd_disconnect(core_if);
+		}
+	}
+
+#endif	/*  */
+	/* Change to L3(OFF) state */
+	core_if->lx_state = DWC_OTG_L3;
+	gintsts.d32 = 0;
+	gintsts.b.disconnect = 1;
+	dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
+	return 1;
+}
+
+/**
+ * This interrupt indicates that SUSPEND state has been detected on
+ * the USB.
+ *
+ * For HNP the USB Suspend interrupt signals the change from
+ * "a_peripheral" to "a_host".
+ *
+ * When power management is enabled the core will be put in low power
+ * mode.
+ */
+static int dwc_otg_handle_usb_suspend_intr(struct dwc_otg_core_if *core_if)
+{
+	union dsts_data dsts;
+	union gintsts_data gintsts;
+	DWC_DEBUGPL(DBG_ANY, "USB SUSPEND\n");
+
+	if (dwc_otg_is_device_mode(core_if)) {
+		/* Check the Device status register to determine if the Suspend
+		 * state is active. */
+		dsts.d32 =
+		    dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
+		DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dsts.d32);
+		DWC_DEBUGPL(DBG_PCD, "DSTS.Suspend Status=%d "
+			    "HWCFG4.power Optimize=%d\n",
+			    dsts.b.suspsts, core_if->hwcfg4.b.power_optimiz);
+
+#ifdef PARTIAL_POWER_DOWN
+/** @todo Add a module parameter for power management. */
+		if (dsts.b.suspsts && core_if->hwcfg4.b.power_optimiz) {
+			union pcgcctl_data power = {.d32 = 0};
+			DWC_DEBUGPL(DBG_CIL, "suspend\n");
+			power.b.pwrclmp = 1;
+			dwc_write_reg32(core_if->pcgcctl, power.d32);
+			power.b.rstpdwnmodule = 1;
+			dwc_modify_reg32(core_if->pcgcctl, 0, power.d32);
+			power.b.stoppclk = 1;
+			dwc_modify_reg32(core_if->pcgcctl, 0, power.d32);
+		} else {
+			DWC_DEBUGPL(DBG_ANY, "disconnect?\n");
+		}
+
+#endif	/*  */
+		/* PCD callback for suspend. */
+		pcd_suspend(core_if);
+	} else {
+		if (core_if->op_state == A_PERIPHERAL) {
+			DWC_DEBUGPL(DBG_ANY, "a_peripheral->a_host\n");
+
+			/* Clear the a_peripheral flag, back to a_host. */
+			pcd_stop(core_if);
+			hcd_start(core_if);
+			core_if->op_state = A_HOST;
+		}
+	}
+
+	/* Change to L2(suspend) state */
+	core_if->lx_state = DWC_OTG_L2;
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.usbsuspend = 1;
+	dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
+	return 1;
+}
+
+#ifdef CONFIG_USB_DWC_OTG_LPM
+/**
+ * This function handles LPM transaction received interrupt.
+ */
+static int dwc_otg_handle_lpm_intr(struct dwc_otg_core_if *core_if)
+{
+	union glpmcfg_data lpmcfg;
+	union gintsts_data gintsts;
+
+	if (!core_if->core_params->lpm_enable)
+		DWC_PRINT("Unexpected LPM interrupt\n");
+
+	lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+	DWC_PRINT("LPM config register = 0x%08x\n", lpmcfg.d32);
+
+	if (dwc_otg_is_host_mode(core_if)) {
+		hcd_sleep(core_if);
+	} else {
+		lpmcfg.b.hird_thres |= (1 << 4);
+		dwc_write_reg32(&core_if->core_global_regs->glpmcfg,
+				lpmcfg.d32);
+	}
+
+	/* Examine prt_sleep_sts after TL1TokenTetry period max (10 us) */
+	udelay(10);
+	lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+	if (lpmcfg.b.prt_sleep_sts) {
+		/* Save the current state */
+		core_if->lx_state = DWC_OTG_L1;
+	}
+
+	/* Clear interrupt  */
+	gintsts.d32 = 0;
+	gintsts.b.lpmtranrcvd = 1;
+	dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
+	return 1;
+}
+#endif				/* CONFIG_USB_DWC_OTG_LPM */
+
+/**
+ * This function returns the Core Interrupt register.
+ */
+static u32 dwc_otg_read_common_intr(struct dwc_otg_core_if *core_if)
+{
+	union gintsts_data gintsts;
+	union gintmsk_data gintmsk;
+	union gintmsk_data gintmsk_common = {.d32 = 0};
+	gintmsk_common.b.wkupintr = 1;
+	gintmsk_common.b.sessreqintr = 1;
+	gintmsk_common.b.conidstschng = 1;
+	gintmsk_common.b.otgintr = 1;
+	gintmsk_common.b.modemismatch = 1;
+	gintmsk_common.b.disconnect = 1;
+	gintmsk_common.b.usbsuspend = 1;
+#ifdef CONFIG_USB_DWC_OTG_LPM
+	gintmsk_common.b.lpmtranrcvd = 1;
+#endif
+	/** @todo: The port interrupt occurs while in device
+     * mode. Added code to CIL to clear the interrupt for now!
+     */
+	gintmsk_common.b.portintr = 1;
+	gintsts.d32 = dwc_read_reg32(&core_if->core_global_regs->gintsts);
+	gintmsk.d32 = dwc_read_reg32(&core_if->core_global_regs->gintmsk);
+
+#ifdef DEBUG
+	/* if any common interrupts set */
+	if (gintsts.d32 & gintmsk_common.d32) {
+		DWC_DEBUGPL(DBG_ANY, "gintsts=%08x  gintmsk=%08x\n",
+			     gintsts.d32, gintmsk.d32);
+	}
+
+#endif	/*  */
+	return (gintsts.d32 & gintmsk.d32) & gintmsk_common.d32;
+}
+
+/**
+ * Common interrupt handler.
+ *
+ * The common interrupts are those that occur in both Host and Device mode.
+ * This handler handles the following interrupts:
+ * - Mode Mismatch Interrupt
+ * - Disconnect Interrupt
+ * - OTG Interrupt
+ * - Connector ID Status Change Interrupt
+ * - Session Request Interrupt.
+ * - Resume / Remote Wakeup Detected Interrupt.
+ *
+ */
+int dwc_otg_handle_common_intr(struct dwc_otg_core_if *core_if)
+{
+	int retval = 0;
+	union gintsts_data gintsts;
+	gintsts.d32 = dwc_otg_read_common_intr(core_if);
+	if (gintsts.b.modemismatch)
+		retval |= dwc_otg_handle_mode_mismatch_intr(core_if);
+	if (gintsts.b.otgintr)
+		retval |= dwc_otg_handle_otg_intr(core_if);
+	if (gintsts.b.conidstschng)
+		retval |= dwc_otg_handle_conn_id_status_change_intr(core_if);
+	if (gintsts.b.disconnect)
+		retval |= dwc_otg_handle_disconnect_intr(core_if);
+	if (gintsts.b.sessreqintr)
+		retval |= dwc_otg_handle_session_req_intr(core_if);
+	if (gintsts.b.wkupintr)
+		retval |= dwc_otg_handle_wakeup_detected_intr(core_if);
+	if (gintsts.b.usbsuspend)
+		retval |= dwc_otg_handle_usb_suspend_intr(core_if);
+#ifdef CONFIG_USB_DWC_OTG_LPM
+	if (gintsts.b.lpmtranrcvd)
+		retval |= dwc_otg_handle_lpm_intr(core_if);
+#endif
+	if (gintsts.b.portintr && dwc_otg_is_device_mode(core_if)) {
+		/* The port interrupt occurs while in device mode with HPRT0
+		 * Port Enable/Disable.
+		 */
+		gintsts.d32 = 0;
+		gintsts.b.portintr = 1;
+		dwc_write_reg32(&core_if->core_global_regs->gintsts,
+				 gintsts.d32);
+		retval |= 1;
+	}
+
+	if (retval)
+		return IRQ_HANDLED;
+	else
+		return IRQ_NONE;
+
+}
+
diff --git a/drivers/usb/dwc_otg/dwc_otg_core_if.h b/drivers/usb/dwc_otg/dwc_otg_core_if.h
new file mode 100644
index 0000000..9786606
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_core_if.h
@@ -0,0 +1,641 @@
+/*==========================================================================
+ *
+ *Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ *"Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ *otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ *The Software IS NOT an item of Licensed Software or Licensed Product under
+ *any End User Software License Agreement or Agreement for Licensed Product
+ *with Synopsys or any supplement thereto. You are permitted to use and
+ *redistribute this Software in source and binary forms, with or without
+ *modification, provided that redistributions of source code must retain this
+ *notice. You may not view, use, disclose, copy or distribute this file or
+ *any information contained herein except pursuant to this license grant from
+ *Synopsys. If you do not agree with this notice, including the disclaimer
+ *below, then you are not authorized to use the Software.
+ *
+ *THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ *ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ *(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ *LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ *OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ *DAMAGE.
+ *========================================================================== */
+#if !defined(__DWC_CORE_IF_H__)
+#define __DWC_CORE_IF_H__
+
+
+
+
+/**Maximum number of Periodic FIFOs */
+#define MAX_PERIO_FIFOS 15
+/**Maximum number of Periodic FIFOs */
+#define MAX_TX_FIFOS 15
+
+/**Maximum number of Endpoints/HostChannels */
+#define MAX_EPS_CHANNELS 16
+
+
+
+extern u8 dwc_otg_is_dma_enable(struct dwc_otg_core_if *core_if);
+
+
+/**@name OTG Core Parameters */
+/**@{ */
+
+/**
+ *Specifies the OTG capabilities. The driver will automatically
+ *detect the value for this parameter if none is specified.
+ *0 - HNP and SRP capable (default)
+ *1 - SRP Only capable
+ *2 - No HNP/SRP capable
+ */
+extern int dwc_otg_set_param_otg_cap(struct dwc_otg_core_if *core_if, int val);
+extern int dwc_otg_get_param_otg_cap(struct dwc_otg_core_if *core_if);
+#define DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0
+#define DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1
+#define DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
+#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE
+
+extern int dwc_otg_set_param_opt(struct dwc_otg_core_if *core_if, int val);
+extern int dwc_otg_get_param_opt(struct dwc_otg_core_if *core_if);
+#define dwc_param_opt_default 1
+
+/**
+ *Specifies whether to use slave or DMA mode for accessing the data
+ *FIFOs. The driver will automatically detect the value for this
+ *parameter if none is specified.
+ *0 - Slave
+ *1 - DMA (default, if available)
+ */
+extern int dwc_otg_set_param_dma_enable(struct dwc_otg_core_if *core_if,
+					int val);
+extern int dwc_otg_get_param_dma_enable(struct dwc_otg_core_if *core_if);
+#define dwc_param_dma_enable_default 1
+
+/**
+ *When DMA mode is enabled specifies whether to use
+ *address DMA or DMA Descritor mode for accessing the data
+ *FIFOs in device mode. The driver will automatically detect
+ *the value for this parameter if none is specified.
+ *0 - address DMA
+ *1 - DMA Descriptor(default, if available)
+ */
+extern int dwc_otg_set_param_dma_desc_enable(struct dwc_otg_core_if *core_if,
+					     int val);
+extern int dwc_otg_get_param_dma_desc_enable(struct dwc_otg_core_if *core_if);
+#define dwc_param_dma_desc_enable_default 1
+
+/**The DMA Burst size (applicable only for External DMA
+ *Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32)
+ */
+extern int dwc_otg_set_param_dma_burst_size(struct dwc_otg_core_if *core_if,
+					    int val);
+extern int dwc_otg_get_param_dma_burst_size(struct dwc_otg_core_if *core_if);
+#define dwc_param_dma_burst_size_default 32
+
+/**
+ *Specifies the maximum speed of operation in host and device mode.
+ *The actual speed depends on the speed of the attached device and
+ *the value of phy_type. The actual speed depends on the speed of the
+ *attached device.
+ *0 - High Speed (default)
+ *1 - Full Speed
+ */
+extern int dwc_otg_set_param_speed(struct dwc_otg_core_if *core_if, int val);
+extern int dwc_otg_get_param_speed(struct dwc_otg_core_if *core_if);
+#define dwc_param_speed_default 0
+#define DWC_SPEED_PARAM_HIGH 0
+#define DWC_SPEED_PARAM_FULL 1
+
+/**Specifies whether low power mode is supported when attached
+ *	to a Full Speed or Low Speed device in host mode.
+ *0 - Don't support low power mode (default)
+ *1 - Support low power mode
+ */
+extern int
+dwc_otg_set_param_host_support_fs_ls_low_power(struct dwc_otg_core_if *core_if,
+							int val);
+extern int
+dwc_otg_get_param_host_support_fs_ls_low_power(struct dwc_otg_core_if *core_if);
+#define dwc_param_host_support_fs_ls_low_power_default 0
+
+/**Specifies the PHY clock rate in low power mode when connected to a
+ *Low Speed device in host mode. This parameter is applicable only if
+ *HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS
+ *then defaults to 6 MHZ otherwise 48 MHZ.
+ *
+ *0 - 48 MHz
+ *1 - 6 MHz
+ */
+extern int
+dwc_otg_set_param_host_ls_low_power_phy_clk(struct dwc_otg_core_if *core_if,
+							int val);
+extern int
+dwc_otg_get_param_host_ls_low_power_phy_clk(struct dwc_otg_core_if *core_if);
+#define dwc_param_host_ls_low_power_phy_clk_default 0
+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
+
+/**
+ *0 - Use cC FIFO size parameters
+ *1 - Allow dynamic FIFO sizing (default)
+ */
+extern int
+dwc_otg_set_param_enable_dynamic_fifo(struct dwc_otg_core_if *core_if, int val);
+extern int
+dwc_otg_get_param_enable_dynamic_fifo(struct dwc_otg_core_if *core_if);
+#define dwc_param_enable_dynamic_fifo_default 1
+
+/**Total number of 4-byte words in the data FIFO memory. This
+ *memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic
+ *Tx FIFOs.
+ *32 to 32768 (default 8192)
+ *Note: The total FIFO memory depth in the FPGA configuration is 8192.
+ */
+extern int dwc_otg_set_param_data_fifo_size(struct dwc_otg_core_if *core_if,
+						    int val);
+extern int dwc_otg_get_param_data_fifo_size(struct dwc_otg_core_if *core_if);
+#define dwc_param_data_fifo_size_default 8192
+
+/**Number of 4-byte words in the Rx FIFO in device mode when dynamic
+ *FIFO sizing is enabled.
+ *16 to 32768 (default 1064)
+ */
+extern int dwc_otg_set_param_dev_rx_fifo_size(struct dwc_otg_core_if *core_if,
+						int val);
+
+extern int dwc_otg_get_param_dev_rx_fifo_size(struct dwc_otg_core_if *core_if);
+#define dwc_param_dev_rx_fifo_size_default 1064
+
+/**Number of 4-byte words in the non-periodic Tx FIFO in device mode
+ *when dynamic FIFO sizing is enabled.
+ *16 to 32768 (default 1024)
+ */
+extern int
+dwc_otg_set_param_dev_nperio_tx_fifo_size(struct dwc_otg_core_if *core_if,
+						int val);
+extern int
+dwc_otg_get_param_dev_nperio_tx_fifo_size(struct dwc_otg_core_if *core_if);
+#define dwc_param_dev_nperio_tx_fifo_size_default 1024
+
+/**Number of 4-byte words in each of the periodic Tx FIFOs in device
+ *mode when dynamic FIFO sizing is enabled.
+ *4 to 768 (default 256)
+ */
+extern int
+dwc_otg_set_param_dev_perio_tx_fifo_size(struct dwc_otg_core_if *core_if,
+						int val, int fifo_num);
+extern int
+dwc_otg_get_param_dev_perio_tx_fifo_size(struct dwc_otg_core_if *core_if,
+						int fifo_num);
+#define dwc_param_dev_perio_tx_fifo_size_default 256
+
+/**Number of 4-byte words in the Rx FIFO in host mode when dynamic
+ *FIFO sizing is enabled.
+ *16 to 32768 (default 1024)
+ */
+extern int dwc_otg_set_param_host_rx_fifo_size(struct dwc_otg_core_if *core_if,
+					       int val);
+extern int dwc_otg_get_param_host_rx_fifo_size(struct dwc_otg_core_if *core_if);
+#define dwc_param_host_rx_fifo_size_default 1024
+
+/**Number of 4-byte words in the non-periodic Tx FIFO in host mode
+ *when Dynamic FIFO sizing is enabled in the core.
+ *16 to 32768 (default 1024)
+ */
+extern int
+dwc_otg_set_param_host_nperio_tx_fifo_size(struct dwc_otg_core_if *core_if,
+						int val);
+extern int
+dwc_otg_get_param_host_nperio_tx_fifo_size(struct dwc_otg_core_if *core_if);
+#define dwc_param_host_nperio_tx_fifo_size_default 1024
+
+/**Number of 4-byte words in the host periodic Tx FIFO when dynamic
+ *FIFO sizing is enabled.
+ *16 to 32768 (default 1024)
+ */
+extern int
+dwc_otg_set_param_host_perio_tx_fifo_size(struct dwc_otg_core_if *core_if,
+						int val);
+
+extern int
+dwc_otg_get_param_host_perio_tx_fifo_size(struct dwc_otg_core_if *core_if);
+#define dwc_param_host_perio_tx_fifo_size_default 1024
+
+/**The maximum transfer size supported in bytes.
+ *2047 to 65,535  (default 65,535)
+ */
+extern int
+dwc_otg_set_param_max_transfer_size(struct dwc_otg_core_if *core_if, int val);
+
+extern int dwc_otg_get_param_max_transfer_size(struct dwc_otg_core_if *core_if);
+#define dwc_param_max_transfer_size_default 65535
+
+/**The maximum number of packets in a transfer.
+ *15 to 511  (default 511)
+ */
+extern int
+dwc_otg_set_param_max_packet_count(struct dwc_otg_core_if *core_if, int val);
+
+extern int
+dwc_otg_get_param_max_packet_count(struct dwc_otg_core_if *core_if);
+#define dwc_param_max_packet_count_default 511
+
+/**The number of host channel registers to use.
+ *1 to 16 (default 12)
+ *Note: The FPGA configuration supports a maximum of 12 host channels.
+ */
+extern int
+dwc_otg_set_param_host_channels(struct dwc_otg_core_if *core_if, int val);
+
+extern int
+dwc_otg_get_param_host_channels(struct dwc_otg_core_if *core_if);
+#define dwc_param_host_channels_default 12
+
+/**The number of endpoints in addition to EP0 available for device
+ *mode operations.
+ *1 to 15 (default 6 IN and OUT)
+ *Note: The FPGA configuration supports a maximum of 6 IN and OUT
+ *endpoints in addition to EP0.
+ */
+extern int
+dwc_otg_set_param_dev_endpoints(struct dwc_otg_core_if *core_if, int val);
+
+extern int dwc_otg_get_param_dev_endpoints(struct dwc_otg_core_if *core_if);
+#define dwc_param_dev_endpoints_default 6
+
+/**
+ *Specifies the type of PHY interface to use. By default, the driver
+ *will automatically detect the phy_type.
+ *
+ *0 - Full Speed PHY
+ *1 - UTMI+ (default)
+ *2 - ULPI
+ */
+extern int dwc_otg_set_param_phy_type(struct dwc_otg_core_if *core_if, int val);
+extern int dwc_otg_get_param_phy_type(struct dwc_otg_core_if *core_if);
+#define DWC_PHY_TYPE_PARAM_FS 0
+#define DWC_PHY_TYPE_PARAM_UTMI 1
+#define DWC_PHY_TYPE_PARAM_ULPI 2
+#define dwc_param_phy_type_default DWC_PHY_TYPE_PARAM_UTMI
+
+/**
+ *Specifies the UTMI+ Data Width.	This parameter is
+ *applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI
+ *PHY_TYPE, this parameter indicates the data width between
+ *the MAC and the ULPI Wrapper.) Also, this parameter is
+ *applicable only if the OTG_HSPHY_WIDTH cC parameter was set
+ *to "8 and 16 bits", meaning that the core has been
+ *configured to work at either data path width.
+ *
+ *8 or 16 bits (default 16)
+ */
+extern int dwc_otg_set_param_phy_utmi_width(struct dwc_otg_core_if *core_if,
+					    int val);
+extern int dwc_otg_get_param_phy_utmi_width(struct dwc_otg_core_if *core_if);
+#define dwc_param_phy_utmi_width_default 16
+
+/**
+ *Specifies whether the ULPI operates at double or single
+ *data rate. This parameter is only applicable if PHY_TYPE is
+ *ULPI.
+ *
+ *0 - single data rate ULPI interface with 8 bit wide data
+ *bus (default)
+ *1 - double data rate ULPI interface with 4 bit wide data
+ *bus
+ */
+extern int dwc_otg_set_param_phy_ulpi_ddr(struct dwc_otg_core_if *core_if,
+					  int val);
+extern int dwc_otg_get_param_phy_ulpi_ddr(struct dwc_otg_core_if *core_if);
+#define dwc_param_phy_ulpi_ddr_default 0
+
+/**
+ *Specifies whether to use the internal or external supply to
+ *drive the vbus with a ULPI phy.
+ */
+extern int dwc_otg_set_param_phy_ulpi_ext_vbus(struct dwc_otg_core_if *core_if,
+					       int val);
+extern int dwc_otg_get_param_phy_ulpi_ext_vbus(struct dwc_otg_core_if *core_if);
+#define DWC_PHY_ULPI_INTERNAL_VBUS 0
+#define DWC_PHY_ULPI_EXTERNAL_VBUS 1
+#define dwc_param_phy_ulpi_ext_vbus_default DWC_PHY_ULPI_INTERNAL_VBUS
+
+/**
+ *Specifies whether to use the I2Cinterface for full speed PHY. This
+ *parameter is only applicable if PHY_TYPE is FS.
+ *0 - No (default)
+ *1 - Yes
+ */
+extern int dwc_otg_set_param_i2c_enable(struct dwc_otg_core_if *core_if,
+					int val);
+extern int dwc_otg_get_param_i2c_enable(struct dwc_otg_core_if *core_if);
+#define dwc_param_i2c_enable_default 0
+
+extern int dwc_otg_set_param_ulpi_fs_ls(struct dwc_otg_core_if *core_if,
+					int val);
+extern int dwc_otg_get_param_ulpi_fs_ls(struct dwc_otg_core_if *core_if);
+#define dwc_param_ulpi_fs_ls_default 0
+
+extern int dwc_otg_set_param_ts_dline(struct dwc_otg_core_if *core_if, int val);
+extern int dwc_otg_get_param_ts_dline(struct dwc_otg_core_if *core_if);
+#define dwc_param_ts_dline_default 0
+
+/**
+ *Specifies whether dedicated transmit FIFOs are
+ *enabled for non periodic IN endpoints in device mode
+ *0 - No
+ *1 - Yes
+ */
+extern int
+dwc_otg_set_param_en_multiple_tx_fifo(struct dwc_otg_core_if *core_if, int val);
+extern int
+dwc_otg_get_param_en_multiple_tx_fifo(struct dwc_otg_core_if *core_if);
+#define dwc_param_en_multiple_tx_fifo_default 1
+
+/**Number of 4-byte words in each of the Tx FIFOs in device
+ *mode when dynamic FIFO sizing is enabled.
+ *4 to 768 (default 256)
+ */
+extern int dwc_otg_set_param_dev_tx_fifo_size(struct dwc_otg_core_if *core_if,
+					      int fifo_num, int val);
+extern int dwc_otg_get_param_dev_tx_fifo_size(struct dwc_otg_core_if *core_if,
+						  int fifo_num);
+#define dwc_param_dev_tx_fifo_size_default 256
+
+/**Thresholding enable flag-
+ *bit 0 - enable non-ISO Tx thresholding
+ *bit 1 - enable ISO Tx thresholding
+ *bit 2 - enable Rx thresholding
+ */
+extern int dwc_otg_set_param_thr_ctl(struct dwc_otg_core_if *core_if, int val);
+extern int dwc_otg_get_thr_ctl(struct dwc_otg_core_if *core_if, int fifo_num);
+#define dwc_param_thr_ctl_default 0
+
+/**Thresholding length for Tx
+ *FIFOs in 32 bit DWORDs
+ */
+extern int dwc_otg_set_param_tx_thr_length(struct dwc_otg_core_if *core_if,
+					   int val);
+extern int dwc_otg_get_param_tx_thr_length(struct dwc_otg_core_if *core_if);
+#define dwc_param_tx_thr_length_default 64
+
+/**Thresholding length for Rx
+ *	FIFOs in 32 bit DWORDs
+ */
+extern int dwc_otg_set_param_rx_thr_length(struct dwc_otg_core_if *core_if,
+					   int val);
+extern int dwc_otg_get_param_rx_thr_length(struct dwc_otg_core_if *core_if);
+#define dwc_param_rx_thr_length_default 64
+
+/**
+ *Specifies whether LPM (Link Power Management) support is enabled
+ */
+extern int dwc_otg_set_param_lpm_enable(struct dwc_otg_core_if *core_if,
+					int val);
+extern int dwc_otg_get_param_lpm_enable(struct dwc_otg_core_if *core_if);
+#define dwc_param_lpm_enable_default 1
+
+/**
+ *Specifies whether PTI enhancement is enabled
+ */
+extern int dwc_otg_set_param_pti_enable(struct dwc_otg_core_if *core_if,
+					int val);
+extern int dwc_otg_get_param_pti_enable(struct dwc_otg_core_if *core_if);
+#define dwc_param_pti_enable_default 0
+
+/**
+ *Specifies whether MPI enhancement is enabled
+ */
+extern int dwc_otg_set_param_mpi_enable(struct dwc_otg_core_if *core_if,
+					int val);
+extern int dwc_otg_get_param_mpi_enable(struct dwc_otg_core_if *core_if);
+#define dwc_param_mpi_enable_default 0
+
+/**
+ *Specifies whether IC_USB capability is enabled
+ */
+extern int dwc_otg_set_param_ic_usb_cap(struct dwc_otg_core_if *core_if,
+					int val);
+extern int dwc_otg_get_param_ic_usb_cap(struct dwc_otg_core_if *core_if);
+#define dwc_param_ic_usb_cap_default 0
+
+extern int
+dwc_otg_set_param_ahb_thr_ratio(struct dwc_otg_core_if *core_if, int val);
+extern int dwc_otg_get_param_ahb_thr_ratio(struct dwc_otg_core_if *core_if);
+#define dwc_param_ahb_thr_ratio_default 0
+
+/**@} */
+
+/**@name Access to registers and bit-fields */
+
+/**
+ *Dump core registers and SPRAM
+ */
+extern void dwc_otg_dump_dev_registers(struct dwc_otg_core_if *_core_if);
+extern void dwc_otg_dump_spram(struct dwc_otg_core_if *_core_if);
+extern void dwc_otg_dump_host_registers(struct dwc_otg_core_if *_core_if);
+extern void dwc_otg_dump_global_registers(struct dwc_otg_core_if *_core_if);
+
+/**
+ *Get host negotiation status.
+ */
+extern u32 dwc_otg_get_hnpstatus(struct dwc_otg_core_if *core_if);
+
+/**
+ *Get srp status
+ */
+extern u32 dwc_otg_get_srpstatus(struct dwc_otg_core_if *core_if);
+
+/**
+ *Set hnpreq bit in the GOTGCTL register.
+ */
+extern void dwc_otg_set_hnpreq(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *Get Content of SNPSID register.
+ */
+extern u32 dwc_otg_get_gsnpsid(struct dwc_otg_core_if *core_if);
+
+/**
+ *Get current mode.
+ *Returns 0 if in device mode, and 1 if in host mode.
+ */
+extern u32 dwc_otg_get_mode(struct dwc_otg_core_if *core_if);
+
+/**
+ *Get value of hnpcapable field in the GUSBCFG register
+ */
+extern u32 dwc_otg_get_hnpcapable(struct dwc_otg_core_if *core_if);
+/**
+ *Set value of hnpcapable field in the GUSBCFG register
+ */
+extern void
+dwc_otg_set_hnpcapable(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *Get value of srpcapable field in the GUSBCFG register
+ */
+extern u32 dwc_otg_get_srpcapable(struct dwc_otg_core_if *core_if);
+/**
+ *Set value of srpcapable field in the GUSBCFG register
+ */
+extern void
+dwc_otg_set_srpcapable(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *Get value of devspeed field in the DCFG register
+ */
+extern u32 dwc_otg_get_devspeed(struct dwc_otg_core_if *core_if);
+/**
+ *Set value of devspeed field in the DCFG register
+ */
+extern void dwc_otg_set_devspeed(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *Get the value of busconnected field from the HPRT0 register
+ */
+extern u32 dwc_otg_get_busconnected(struct dwc_otg_core_if *core_if);
+
+/**
+ *Gets the device enumeration Speed.
+ */
+extern u32 dwc_otg_get_enumspeed(struct dwc_otg_core_if *core_if);
+
+/**
+ *Get value of prtpwr field from the HPRT0 register
+ */
+extern u32 dwc_otg_get_prtpower(struct dwc_otg_core_if *core_if);
+/**
+ *Set value of prtpwr field from the HPRT0 register
+ */
+extern void dwc_otg_set_prtpower(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *Get value of prtsusp field from the HPRT0 regsiter
+ */
+extern u32 dwc_otg_get_prtsuspend(struct dwc_otg_core_if *core_if);
+/**
+ *Set value of prtpwr field from the HPRT0 register
+ */
+extern void
+dwc_otg_set_prtsuspend(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *Set value of prtres field from the HPRT0 register
+ *FIXME Remove?
+ */
+extern void
+dwc_otg_set_prtresume(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *Get value of rmtwkupsig bit in DCTL register
+ */
+extern u32 dwc_otg_get_remotewakesig(struct dwc_otg_core_if *core_if);
+
+/**
+ *Get value of prt_sleep_sts field from the GLPMCFG register
+ */
+extern u32
+dwc_otg_get_lpm_portsleepstatus(struct dwc_otg_core_if *core_if);
+
+/**
+ *Get value of rem_wkup_en field from the GLPMCFG register
+ */
+extern u32
+dwc_otg_get_lpm_remotewakeenabled(struct dwc_otg_core_if *core_if);
+
+/**
+ *Get value of appl_resp field from the GLPMCFG register
+ */
+extern u32 dwc_otg_get_lpmresponse(struct dwc_otg_core_if *core_if);
+/**
+ *Set value of appl_resp field from the GLPMCFG register
+ */
+extern void
+dwc_otg_set_lpmresponse(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *Get value of hsic_connect field from the GLPMCFG register
+ */
+extern u32 dwc_otg_get_hsic_connect(struct dwc_otg_core_if *core_if);
+/**
+ *Set value of hsic_connect field from the GLPMCFG register
+ */
+extern void
+dwc_otg_set_hsic_connect(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *Get value of inv_sel_hsic field from the GLPMCFG register.
+ */
+extern u32 dwc_otg_get_inv_sel_hsic(struct dwc_otg_core_if *core_if);
+/**
+ *Set value of inv_sel_hsic field from the GLPMFG register.
+ */
+extern void
+dwc_otg_set_inv_sel_hsic(struct dwc_otg_core_if *core_if, u32 val);
+
+/*
+ *Some functions for accessing registers
+ */
+
+/**
+ * GOTGCTL register
+ */
+extern u32 dwc_otg_get_gotgctl(struct dwc_otg_core_if *core_if);
+extern void dwc_otg_set_gotgctl(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *GUSBCFG register
+ */
+extern u32 dwc_otg_get_gusbcfg(struct dwc_otg_core_if *core_if);
+extern void dwc_otg_set_gusbcfg(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *GRXFSIZ register
+ */
+extern u32 dwc_otg_get_grxfsiz(struct dwc_otg_core_if *core_if);
+extern void dwc_otg_set_grxfsiz(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *GNPTXFSIZ register
+ */
+extern u32 dwc_otg_get_gnptxfsiz(struct dwc_otg_core_if *core_if);
+extern void
+dwc_otg_set_gnptxfsiz(struct dwc_otg_core_if *core_if, u32 val);
+
+extern u32 dwc_otg_get_gpvndctl(struct dwc_otg_core_if *core_if);
+extern void dwc_otg_set_gpvndctl(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *GGPIO register
+ */
+extern u32 dwc_otg_get_ggpio(struct dwc_otg_core_if *core_if);
+extern void dwc_otg_set_ggpio(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *GUID register
+ */
+extern u32 dwc_otg_get_guid(struct dwc_otg_core_if *core_if);
+extern void dwc_otg_set_guid(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *HPRT0 register
+ */
+extern u32 dwc_otg_get_hprt0(struct dwc_otg_core_if *core_if);
+extern void dwc_otg_set_hprt0(struct dwc_otg_core_if *core_if, u32 val);
+
+/**
+ *GHPTXFSIZE
+ */
+extern u32 dwc_otg_get_hptxfsiz(struct dwc_otg_core_if *core_if);
+
+/**@} */
+
+#endif				/*__DWC_CORE_IF_H__ */
diff --git a/drivers/usb/dwc_otg/dwc_otg_driver.c b/drivers/usb/dwc_otg/dwc_otg_driver.c
new file mode 100644
index 0000000..5823e4a
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_driver.c
@@ -0,0 +1,1347 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+
+/** @file
+ * The dwc_otg_driver module provides the initialization and cleanup entry
+ * points for the DWC_otg driver. This module will be dynamically installed
+ * after Linux is booted using the insmod command. When the module is
+ * installed, the dwc_otg_driver_init function is called. When the module is
+ * removed (using rmmod), the dwc_otg_driver_cleanup function is called.
+ *
+ * This module also defines a data structure for the dwc_otg_driver, which is
+ * used in conjunction with the standard device structure. These
+ * structures allow the OTG driver to comply with the standard Linux driver
+ * model in which devices and drivers are registered with a bus driver. This
+ * has the benefit that Linux can expose attributes of the driver and device
+ * in its special sysfs file system. Users can then read or write files in
+ * this file system to perform diagnostics on the driver components or the
+ * device.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/stat.h>	 /* permission constants */
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/usb/dwc_otg_platform.h>
+
+#include "dwc_otg_attr.h"
+#include "dwc_otg_driver.h"
+#include "dwc_otg_core_if.h"
+#include "dwc_otg_cil.h"
+#include "dwc_otg_pcd.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+
+#define DWC_DRIVER_VERSION	"2.90a 23-APR-2009 with Bug Fixes from IMG"
+#define DWC_DRIVER_DESC		"HS OTG USB Controller driver"
+static const char dwc_driver_name[] = "dwc_otg";
+
+/*-------------------------------------------------------------------------*/
+/* Encapsulate the module parameter settings */
+static struct dwc_otg_core_params dwc_otg_module_params = {
+	.opt = -1,
+	.otg_cap = -1,
+
+#if defined(CONFIG_USB_DWC_OTG_BUFFER_DMA)
+	.dma_enable = 1,
+	.dma_desc_enable = 0,
+#elif defined(CONFIG_USB_DWC_OTG_DESC_DMA)
+	.dma_enable = 1,
+	.dma_desc_enable = 1,
+#else
+	.dma_enable = 0,
+	.dma_desc_enable = 0,
+#endif
+	.dma_burst_size = -1,
+#if defined(CONFIG_USB_DWC_OTG_SPEED_HIGH)
+	.speed = 0,
+#elif defined(CONFIG_USB_DWC_OTG_SPEED_FULL)
+	.speed = 1,
+#else
+	.speed = -1,
+#endif
+	.host_support_fs_ls_low_power = -1,
+	.host_ls_low_power_phy_clk = -1,
+	.enable_dynamic_fifo = -1,
+	.data_fifo_size = -1,
+	.dev_rx_fifo_size = -1,
+	.dev_nperio_tx_fifo_size = -1,
+	.dev_perio_tx_fifo_size = { /* dev_perio_tx_fifo_size_1 */
+	     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+	     -1, -1
+	}, /* 15 */
+	.host_rx_fifo_size = -1,
+	.host_nperio_tx_fifo_size = -1,
+	.host_perio_tx_fifo_size = -1,
+	.max_transfer_size = -1,
+	.max_packet_count = -1,
+#ifdef DWC_SLAVE /*bug prevents slave mode working with >6 channels */
+	.host_channels = 6,
+#else
+	.host_channels = -1,
+#endif
+	.dev_endpoints = -1,
+	.phy_type = -1,
+	.phy_utmi_width = -1,
+	.phy_ulpi_ddr = -1,
+	.phy_ulpi_ext_vbus = -1,
+	.i2c_enable = -1,
+	.ulpi_fs_ls = -1,
+	.ts_dline = -1,
+	.en_multiple_tx_fifo = -1,
+	.dev_tx_fifo_size = { /* dev_tx_fifo_size */
+	-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+	}, /* 15 */
+	.thr_ctl = -1,
+	.tx_thr_length = -1,
+	.rx_thr_length = -1,
+	.pti_enable = -1,
+	.mpi_enable = -1,
+	.lpm_enable = -1,
+	.ic_usb_cap = -1,
+	.ahb_thr_ratio = -1,
+};
+
+
+/**
+ * This function shows the Driver Version.
+ */
+static ssize_t version_show(struct device_driver *dev, char *buf)
+{
+	return snprintf(buf, sizeof(DWC_DRIVER_VERSION) + 2, "%s\n",
+			 DWC_DRIVER_VERSION);
+}
+static DRIVER_ATTR(version, S_IRUGO, version_show, NULL);
+
+/**
+ * Global Debug Level Mask.
+ */
+u32 g_dbg_lvl; /* = DBG_ANY; */
+
+/**
+ * This function shows the driver Debug Level.
+ */
+static ssize_t dbg_level_show(struct device_driver *drv, char *buf)
+{
+	return sprintf(buf, "0x%0x\n", g_dbg_lvl);
+}
+
+
+/**
+ * This function stores the driver Debug Level.
+ */
+static ssize_t dbg_level_store(struct device_driver *drv, const char *buf,
+			       size_t _count)
+{
+	g_dbg_lvl = simple_strtoul(buf, NULL, 16);
+	return _count;
+}
+static DRIVER_ATTR(debuglevel, S_IRUGO | S_IWUSR, dbg_level_show,
+		    dbg_level_store);
+
+/**
+ * This function is called during module intialization to verify that
+ * the module parameters are in a valid state.
+ */
+static int check_parameters(struct dwc_otg_core_if *core_if)
+{
+	int i;
+	int retval = 0;
+
+/* Checks if the parameter is outside of its valid range of values */
+#define DWC_OTG_PARAM_TEST(_param_, _low_, _high_) \
+	    ((dwc_otg_module_params._param_ < (_low_)) || \
+	     (dwc_otg_module_params._param_ > (_high_)))
+/* If the parameter has been set by the user, check that the parameter value is
+ * within the value range of values.  If not, report a module error. */
+#define DWC_OTG_PARAM_ERR(_param_, _low_, _high_, _string_) \
+do { \
+	if (dwc_otg_module_params._param_ != -1) { \
+		if (DWC_OTG_PARAM_TEST(_param_, (_low_), (_high_))) { \
+			DWC_ERROR("`%d' invalid for parameter `%s'\n", \
+			    dwc_otg_module_params._param_, _string_); \
+			dwc_otg_module_params._param_ = \
+				dwc_param_##_param_##_default; \
+			retval++; \
+		} \
+	} \
+} while (0)
+
+	DWC_OTG_PARAM_ERR(opt, 0, 1, "opt");
+	DWC_OTG_PARAM_ERR(otg_cap, 0, 2, "otg_cap");
+	DWC_OTG_PARAM_ERR(dma_enable, 0, 1, "dma_enable");
+	DWC_OTG_PARAM_ERR(speed, 0, 1, "speed");
+	DWC_OTG_PARAM_ERR(host_support_fs_ls_low_power, 0, 1,
+			   "host_support_fs_ls_low_power");
+	DWC_OTG_PARAM_ERR(host_ls_low_power_phy_clk, 0, 1,
+			   "host_ls_low_power_phy_clk");
+	DWC_OTG_PARAM_ERR(enable_dynamic_fifo, 0, 1, "enable_dynamic_fifo");
+	DWC_OTG_PARAM_ERR(data_fifo_size, 32, 32768, "data_fifo_size");
+	DWC_OTG_PARAM_ERR(dev_rx_fifo_size, 16, 32768, "dev_rx_fifo_size");
+	DWC_OTG_PARAM_ERR(dev_nperio_tx_fifo_size, 16, 32768,
+			   "dev_nperio_tx_fifo_size");
+	DWC_OTG_PARAM_ERR(host_rx_fifo_size, 16, 32768, "host_rx_fifo_size");
+	DWC_OTG_PARAM_ERR(host_nperio_tx_fifo_size, 16, 32768,
+			   "host_nperio_tx_fifo_size");
+	DWC_OTG_PARAM_ERR(host_perio_tx_fifo_size, 16, 32768,
+			   "host_perio_tx_fifo_size");
+	DWC_OTG_PARAM_ERR(max_transfer_size, 2047, 524288,
+			   "max_transfer_size");
+	DWC_OTG_PARAM_ERR(max_packet_count, 15, 511, "max_packet_count");
+	DWC_OTG_PARAM_ERR(host_channels, 1, 16, "host_channels");
+	DWC_OTG_PARAM_ERR(dev_endpoints, 1, 15, "dev_endpoints");
+	DWC_OTG_PARAM_ERR(phy_type, 0, 2, "phy_type");
+	DWC_OTG_PARAM_ERR(phy_ulpi_ddr, 0, 1, "phy_ulpi_ddr");
+	DWC_OTG_PARAM_ERR(phy_ulpi_ext_vbus, 0, 1, "phy_ulpi_ext_vbus");
+	DWC_OTG_PARAM_ERR(i2c_enable, 0, 1, "i2c_enable");
+	DWC_OTG_PARAM_ERR(ulpi_fs_ls, 0, 1, "ulpi_fs_ls");
+	DWC_OTG_PARAM_ERR(ts_dline, 0, 1, "ts_dline");
+	if (dwc_otg_module_params.dma_burst_size != -1) {
+		if (DWC_OTG_PARAM_TEST(dma_burst_size, 1, 1)
+		     && DWC_OTG_PARAM_TEST(dma_burst_size, 4, 4)
+		     && DWC_OTG_PARAM_TEST(dma_burst_size, 8, 8)
+		     && DWC_OTG_PARAM_TEST(dma_burst_size, 16, 16)
+		     && DWC_OTG_PARAM_TEST(dma_burst_size, 32, 32)
+		     && DWC_OTG_PARAM_TEST(dma_burst_size, 64, 64)
+		     && DWC_OTG_PARAM_TEST(dma_burst_size, 128, 128)
+		     && DWC_OTG_PARAM_TEST(dma_burst_size, 256, 256)) {
+			DWC_ERROR
+			    ("`%d' invalid for parameter `dma_burst_size'\n",
+			     dwc_otg_module_params.dma_burst_size);
+			dwc_otg_module_params.dma_burst_size = 32;
+			retval++;
+		}
+	}
+	if (dwc_otg_module_params.phy_utmi_width != -1) {
+		if (DWC_OTG_PARAM_TEST(phy_utmi_width, 8, 8)
+		     && DWC_OTG_PARAM_TEST(phy_utmi_width, 16, 16)) {
+			DWC_ERROR("`%d'invalid for parameter 'phy_utmi_width'\n"
+			     , dwc_otg_module_params.phy_utmi_width);
+			dwc_otg_module_params.phy_utmi_width = 8; /*fscz 16*/
+			retval++;
+		}
+	}
+	for (i = 0; i < 15; i++) {
+		/** @todo should be like above */
+		    if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] != -1) {
+			if (DWC_OTG_PARAM_TEST
+			     (dev_perio_tx_fifo_size[i], 4, 768)) {
+				DWC_ERROR("`%d' invalid for parameter `%s_%d'\n"
+				     , dwc_otg_module_params.dev_perio_tx_fifo_size[i],
+				     "dev_perio_tx_fifo_size", i);
+				dwc_otg_module_params.
+				    dev_perio_tx_fifo_size[i] =
+				    dwc_param_dev_perio_tx_fifo_size_default;
+				retval++;
+			}
+		}
+	}
+	DWC_OTG_PARAM_ERR(en_multiple_tx_fifo, 0, 1, "en_multiple_tx_fifo");
+	for (i = 0; i < 15; i++) {
+		/** @todo should be like above */
+		    if (dwc_otg_module_params.dev_tx_fifo_size[i] != -1) {
+			if (DWC_OTG_PARAM_TEST(dev_tx_fifo_size[i], 4, 768)) {
+				DWC_ERROR("`%d' invalid for parameter `%s_%d'\n"
+					, dwc_otg_module_params.dev_tx_fifo_size[i],
+				     "dev_tx_fifo_size", i);
+				dwc_otg_module_params.dev_tx_fifo_size[i] =
+				    dwc_param_dev_tx_fifo_size_default;
+				retval++;
+			}
+		}
+	}
+	DWC_OTG_PARAM_ERR(thr_ctl, 0, 7, "thr_ctl");
+	DWC_OTG_PARAM_ERR(tx_thr_length, 8, 128, "tx_thr_length");
+	DWC_OTG_PARAM_ERR(rx_thr_length, 8, 128, "rx_thr_length");
+
+	/*
+	 * At this point, all module parameters that have been set by the user
+	 * are valid, and those that have not are left unset.  Now set their
+	 * default values and/or check the parameters against the hardware
+	 * configurations of the OTG core.
+	 */
+
+/* This sets the parameter to the default value if it has not been set by the
+ * user */
+#define DWC_OTG_PARAM_SET_DEFAULT(_param_) \
+({ \
+	int changed = 1; \
+	if (dwc_otg_module_params._param_ == -1) { \
+		changed = 0; \
+		dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \
+	} \
+	changed; \
+})
+
+/* This checks the macro against the hardware configuration to see if it is
+ * valid.  It is possible that the default value could be invalid.	In this
+ * case, it will report a module error if the user touched the parameter.
+ * Otherwise it will adjust the value without any error. */
+#define DWC_OTG_PARAM_CHECK_VALID(_param_, _str_, _is_valid_, _set_valid_) \
+({ \
+	int changed = DWC_OTG_PARAM_SET_DEFAULT(_param_); \
+	int error = 0; \
+	if (!(_is_valid_)) { \
+		if (changed) { \
+			DWC_ERROR("`%d' invalid for parameter `%s'" \
+				"Check HW configuration.\n", \
+				dwc_otg_module_params._param_, _str_); \
+			error = 1; \
+		} \
+		dwc_otg_module_params._param_ = (_set_valid_); \
+	} \
+	error; \
+})
+
+		    /* OTG Cap */
+	retval += DWC_OTG_PARAM_CHECK_VALID(otg_cap, "otg_cap",
+		({
+			int valid;
+			valid = 1;
+			switch (dwc_otg_module_params.otg_cap) {
+
+			case DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE:
+				if (core_if->hwcfg2.b.op_mode !=
+					DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
+					valid = 0;
+				break;
+			case DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE:
+				if ((core_if->hwcfg2.b.op_mode !=
+					DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
+					&& (core_if->hwcfg2.b.op_mode !=
+					DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG)
+					&& (core_if->hwcfg2.b.op_mode !=
+					DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE)
+					&& (core_if->hwcfg2.b.op_mode !=
+					DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST))
+
+						valid = 0;
+
+				break;
+			case DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE:
+						      /* always valid */
+				break;
+			}
+			valid;
+		}),
+
+		(((core_if->hwcfg2.b.op_mode ==
+			DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) ||
+			(core_if->hwcfg2.b.op_mode ==
+			DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) ||
+			(core_if->hwcfg2.b.op_mode ==
+			DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) ||
+			(core_if->hwcfg2.b.op_mode ==
+			DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ?
+				DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE :
+				DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE)) ;
+
+	retval += DWC_OTG_PARAM_CHECK_VALID(dma_enable, "dma_enable",
+			((dwc_otg_module_params.dma_enable == 1) &&
+			(core_if->hwcfg2.b.architecture == 0)) ? 0 : 1,
+			0);
+	retval += DWC_OTG_PARAM_CHECK_VALID(opt, "opt", 1, 0);
+	DWC_OTG_PARAM_SET_DEFAULT(dma_burst_size);
+	retval += DWC_OTG_PARAM_CHECK_VALID(host_support_fs_ls_low_power,
+			"host_support_fs_ls_low_power", 1, 0);
+	retval += DWC_OTG_PARAM_CHECK_VALID(enable_dynamic_fifo,
+			"enable_dynamic_fifo",
+			((dwc_otg_module_params.enable_dynamic_fifo == 0) ||
+			(core_if->hwcfg2.b.dynamic_fifo == 1)), 0);
+	retval += DWC_OTG_PARAM_CHECK_VALID(data_fifo_size, "data_fifo_size",
+			(dwc_otg_module_params.data_fifo_size <=
+			core_if->hwcfg3.b.dfifo_depth),
+			core_if->hwcfg3.b.dfifo_depth);
+	retval += DWC_OTG_PARAM_CHECK_VALID(dev_rx_fifo_size,
+			"dev_rx_fifo_size",
+			(dwc_otg_module_params.dev_rx_fifo_size <=
+			dwc_read_reg32(&core_if->core_global_regs->grxfsiz)),
+			dwc_read_reg32(&core_if->core_global_regs->grxfsiz));
+	retval += DWC_OTG_PARAM_CHECK_VALID(dev_nperio_tx_fifo_size,
+			"dev_nperio_tx_fifo_size",
+			(dwc_otg_module_params.dev_nperio_tx_fifo_size <=
+			(dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)),
+			(dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16));
+	retval += DWC_OTG_PARAM_CHECK_VALID(host_rx_fifo_size,
+			"host_rx_fifo_size",
+			(dwc_otg_module_params.host_rx_fifo_size <=
+			dwc_read_reg32(&core_if->core_global_regs->grxfsiz)),
+			dwc_read_reg32(&core_if->core_global_regs->grxfsiz));
+	retval += DWC_OTG_PARAM_CHECK_VALID(host_nperio_tx_fifo_size,
+			"host_nperio_tx_fifo_size",
+			(dwc_otg_module_params.host_nperio_tx_fifo_size <=
+			(dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)),
+			(dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16));
+	retval += DWC_OTG_PARAM_CHECK_VALID(host_perio_tx_fifo_size,
+			"host_perio_tx_fifo_size",
+			(dwc_otg_module_params.host_perio_tx_fifo_size <=
+			((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))),
+			((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16)));
+	retval += DWC_OTG_PARAM_CHECK_VALID(max_transfer_size,
+			"max_transfer_size",
+			(dwc_otg_module_params.max_transfer_size <
+			(1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))),
+			((1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))
+			- 1));
+	retval += DWC_OTG_PARAM_CHECK_VALID(max_packet_count,
+			"max_packet_count",
+			(dwc_otg_module_params.max_packet_count <
+			(1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))),
+			((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))
+			- 1));
+retval += DWC_OTG_PARAM_CHECK_VALID(host_channels, "host_channels",
+			(dwc_otg_module_params.host_channels <=
+			(core_if->hwcfg2.b.num_host_chan + 1)),
+			(core_if->hwcfg2.b.num_host_chan + 1));
+	retval += DWC_OTG_PARAM_CHECK_VALID(dev_endpoints, "dev_endpoints",
+			(dwc_otg_module_params.dev_endpoints <=
+			(core_if->hwcfg2.b.num_dev_ep)),
+			core_if->hwcfg2.b.num_dev_ep);
+
+/*
+ * Define the following to disable the FS PHY Hardware checking.  This is for
+ * internal testing only.
+ *
+ * #define NO_FS_PHY_HW_CHECKS
+ */
+
+#ifdef NO_FS_PHY_HW_CHECKS
+	retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, "phy_type", 1, 0);
+#else
+	retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, "phy_type", ({
+		int valid = 0;
+		if ((dwc_otg_module_params.phy_type ==
+			DWC_PHY_TYPE_PARAM_UTMI) &&
+			((core_if->hwcfg2.b.hs_phy_type == 1) ||
+			(core_if->hwcfg2.b.hs_phy_type == 3))) {
+			valid = 1;
+		} else if ((dwc_otg_module_params.phy_type ==
+			DWC_PHY_TYPE_PARAM_ULPI) &&
+			((core_if->hwcfg2.b.hs_phy_type == 2) ||
+			(core_if->hwcfg2.b.hs_phy_type == 3))) {
+			valid = 1;
+		} else if ((dwc_otg_module_params.phy_type ==
+			DWC_PHY_TYPE_PARAM_FS) &&
+			(core_if->hwcfg2.b.fs_phy_type == 1))  {
+			valid = 1;
+		}
+		valid;
+		}),
+		({
+			int set = DWC_PHY_TYPE_PARAM_FS;
+			if (core_if->hwcfg2.b.hs_phy_type) {
+				if ((core_if->hwcfg2.b.hs_phy_type == 3) ||
+					(core_if->hwcfg2.b.hs_phy_type == 1))
+					set = DWC_PHY_TYPE_PARAM_UTMI;
+				else
+					set = DWC_PHY_TYPE_PARAM_ULPI;
+			}
+			set;
+		})) ;
+
+#endif
+	retval += DWC_OTG_PARAM_CHECK_VALID(speed, "speed",
+		(dwc_otg_module_params.speed == 0) &&
+		(dwc_otg_module_params.phy_type ==
+		DWC_PHY_TYPE_PARAM_FS) ? 0 : 1,
+		dwc_otg_module_params.phy_type ==
+		DWC_PHY_TYPE_PARAM_FS ? 1 : 0);
+	retval += DWC_OTG_PARAM_CHECK_VALID(host_ls_low_power_phy_clk,
+		"host_ls_low_power_phy_clk",
+		((dwc_otg_module_params.host_ls_low_power_phy_clk ==
+		DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ) &&
+		(dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS)
+		? 0 : 1),
+		((dwc_otg_module_params.phy_type ==
+		DWC_PHY_TYPE_PARAM_FS) ?
+		DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ :
+		DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ));
+	DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ddr);
+	DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ext_vbus);
+	DWC_OTG_PARAM_SET_DEFAULT(phy_utmi_width);
+	DWC_OTG_PARAM_SET_DEFAULT(ulpi_fs_ls);
+	DWC_OTG_PARAM_SET_DEFAULT(ts_dline);
+
+#ifdef NO_FS_PHY_HW_CHECKS
+	retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, "i2c_enable", 1, 0);
+
+#else	/*  */
+	retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, "i2c_enable",
+				(dwc_otg_module_params.i2c_enable == 1) &&
+				(core_if->hwcfg3.b.i2c == 0) ? 0 : 1, 0);
+#endif
+
+	for (i = 0; i < 15; i++) {
+		int changed = 1;
+		int error = 0;
+		if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] == -1) {
+			changed = 0;
+			dwc_otg_module_params.dev_perio_tx_fifo_size[i] =
+			    dwc_param_dev_perio_tx_fifo_size_default;
+		}
+		if (!(dwc_otg_module_params.dev_perio_tx_fifo_size[i] <=
+			(dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) {
+			if (changed) {
+				DWC_ERROR("`%d' invalid for parameter "
+					"`dev_perio_fifo_size_%d'."
+					"Check HW configuration.\n",
+					dwc_otg_module_params.dev_perio_tx_fifo_size[i],
+					i);
+				error = 1;
+			}
+			dwc_otg_module_params.dev_perio_tx_fifo_size[i] =
+			    dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]);
+		}
+		retval += error;
+	}
+
+	retval += DWC_OTG_PARAM_CHECK_VALID(en_multiple_tx_fifo,
+			"en_multiple_tx_fifo",
+			((dwc_otg_module_params.en_multiple_tx_fifo == 1)
+			&& (core_if->hwcfg4.b.ded_fifo_en == 0)) ? 0 : 1,
+			0);
+
+	for (i = 0; i < 15; i++) {
+		int changed = 1;
+		int error = 0;
+		if (dwc_otg_module_params.dev_tx_fifo_size[i] == -1) {
+			changed = 0;
+			dwc_otg_module_params.dev_tx_fifo_size[i] =
+			    dwc_param_dev_tx_fifo_size_default;
+		}
+		if (!(dwc_otg_module_params.dev_tx_fifo_size[i] <=
+		     (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) {
+			if (changed) {
+				DWC_ERROR("%d' invalid for parameter "
+					"`dev_perio_fifo_size_%d'."
+					"Check HW configuration.\n",
+					dwc_otg_module_params.dev_tx_fifo_size[i],
+					i);
+				error = 1;
+			}
+			dwc_otg_module_params.dev_tx_fifo_size[i] =
+			    dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]);
+		}
+		retval += error;
+	}
+	DWC_OTG_PARAM_SET_DEFAULT(thr_ctl);
+	DWC_OTG_PARAM_SET_DEFAULT(tx_thr_length);
+	DWC_OTG_PARAM_SET_DEFAULT(rx_thr_length);
+	if (dwc_otg_module_params.mpi_enable != -1) {
+		retval +=
+		    dwc_otg_set_param_mpi_enable(core_if,
+						 dwc_otg_module_params.
+						 mpi_enable);
+	} else {
+		core_if->core_params->mpi_enable =
+			core_if->hwcfg2.b.multi_proc_int;
+	}
+
+
+	if (dwc_otg_module_params.pti_enable != -1) {
+		retval +=
+		    dwc_otg_set_param_pti_enable(core_if,
+						 dwc_otg_module_params.
+						 pti_enable);
+	} else
+		DWC_OTG_PARAM_SET_DEFAULT(pti_enable);
+
+
+	if (dwc_otg_module_params.lpm_enable != -1) {
+		retval +=
+		    dwc_otg_set_param_lpm_enable(core_if,
+						 dwc_otg_module_params.
+						 lpm_enable);
+	} else {
+		core_if->core_params->lpm_enable =
+			core_if->hwcfg3.b.otg_lpm_en;
+	}
+	if (dwc_otg_module_params.ic_usb_cap != -1) {
+		retval +=
+		    dwc_otg_set_param_ic_usb_cap(core_if,
+						 dwc_otg_module_params.
+						 ic_usb_cap);
+	} else {
+		core_if->core_params->ic_usb_cap =
+			core_if->hwcfg3.b.otg_enable_ic_usb;
+
+	}
+
+	if (dwc_otg_module_params.ahb_thr_ratio != -1) {
+		retval +=
+		    dwc_otg_set_param_ahb_thr_ratio(core_if,
+						dwc_otg_module_params.
+						ahb_thr_ratio);
+	} else
+		DWC_OTG_PARAM_SET_DEFAULT(ahb_thr_ratio);
+
+
+
+	return retval;
+}
+
+/**
+ * This function is the top level interrupt handler for the Common
+ * (Device and host modes) interrupts.
+ */
+static irqreturn_t dwc_otg_common_irq(int irq, void *dev)
+{
+	struct dwc_otg_device *otg_dev = dev;
+	int32_t retval = IRQ_NONE;
+
+	retval = dwc_otg_handle_common_intr(otg_dev->core_if);
+
+	return IRQ_RETVAL(retval);
+}
+
+#ifdef OTG_EXT_CHG_PUMP
+/**
+ * This function is the interrupt handler for the OverCurrent condition
+ * from the external charge pump (if enabled)
+ */
+static irqreturn_t dwc_otg_externalchgpump_irq(int irq, void *dev)
+{
+	struct dwc_otg_device *otg_dev = dev;
+	int32_t retval = IRQ_NONE;
+	dwc_otg_hcd_t *_dwc_otg_hcd = NULL;
+
+	DWC_DEBUGPL(DBG_OFF, " ++OTG OverCurrent Detected "
+			"(ExtChgPump Interrupt)++ \n");
+
+	if (dwc_otg_is_host_mode(otg_dev->core_if)) {
+		hprt0_data_t hprt0 = {.d32 = 0};
+		_dwc_otg_hcd = otg_dev->hcd;
+		_dwc_otg_hcd->flags.b.port_over_current_change = 1;
+
+		hprt0.b.prtpwr = 0;
+		dwc_write_reg32(_dwc_otg_hcd->core_if->host_if->hprt0,
+				hprt0.d32);
+
+		if (otg_dev->soc_disable_vbus)
+			otg_dev->soc_disable_vbus();
+
+	} else {
+		/* Device mode - This int is n/a for device mode */
+		DWC_ERROR(" DeviceMode: OTG OverCurrent Detected\n");
+	}
+
+	retval |= 1;
+	return IRQ_RETVAL(retval);
+}
+#endif
+
+/**
+ * This function is called when a device is unregistered with the
+ * dwc_otg_driver. This happens, for example, when the rmmod command is
+ * executed. The device may or may not be electrically present. If it is
+ * present, the driver stops device processing. Any resources used on behalf
+ * of this device are freed.
+ *
+ * @param[in] dev
+ */
+static int dwc_otg_driver_remove(struct platform_device *pdev)
+{
+	struct dwc_otg_device *otg_dev = platform_get_drvdata(pdev);
+	DWC_DEBUGPL(DBG_ANY, "%s(%p)\n", __func__, (void *)&pdev->dev);
+	if (otg_dev == NULL) {
+		/* Memory allocation for the dwc_otg_device failed. */
+		return 0;
+	}
+	/*
+	 * Free the IRQ
+	 */
+	if (otg_dev->common_irq_installed)
+		free_irq(otg_dev->irq, otg_dev);
+
+#ifndef DWC_DEVICE_ONLY
+	if (otg_dev->hcd != NULL)
+		dwc_otg_hcd_remove(&pdev->dev);
+#endif
+
+#ifndef DWC_HOST_ONLY
+	if (otg_dev->pcd != NULL)
+		dwc_otg_pcd_remove(&pdev->dev);
+
+#endif
+	if (otg_dev->core_if != NULL)
+		dwc_otg_cil_remove(otg_dev->core_if);
+
+	/*
+	 * Remove the device attributes
+	 */
+	dwc_otg_attr_remove(&pdev->dev);
+
+	/*
+	 * Return the memory.
+	 */
+	if (otg_dev->base != NULL)
+		iounmap(otg_dev->base);
+
+	if (otg_dev->phys_addr != 0)
+		release_mem_region(otg_dev->phys_addr, otg_dev->base_len);
+
+	kfree(otg_dev);
+
+	/*
+	 * Clear the drvdata pointer.
+	 */
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+#ifdef OTG_PLB_DMA_TASKLET
+/**
+ * This function is the top level interrupt handler for the Common
+ * (Device and host modes) interrupts.
+ */
+static irqreturn_t dwc_otg_plbdma(int irq, void *dev)
+{
+    struct dwc_otg_device *otg_dev = dev;
+    int32_t retval = IRQ_HANDLED;
+
+	ppc4xx_clr_dma_status(0);
+	DWC_DEBUGPL(DBG_SP, "%s reset release_later\n",  __func__);
+	atomic_set(&release_later, 0);
+	dwc_otg_enable_global_interrupts(otg_dev->core_if);
+
+	return IRQ_RETVAL(retval);
+}
+#endif
+
+/**
+ * This function is called when an device is bound to a
+ * dwc_otg_driver. It creates the driver components required to
+ * control the device (CIL, HCD, and PCD) and it initializes the
+ * device. The driver components are stored in a dwc_otg_device
+ * structure. A reference to the dwc_otg_device is saved in the
+ * device. This allows the driver to access the dwc_otg_device
+ * structure on subsequent calls to driver methods for this device.
+ *
+ * @param[in] dev  device definition
+ */
+static int dwc_otg_driver_probe(struct platform_device *pdev)
+{
+	int retval = 0;
+	struct dwc_otg_device *dwc_otg_device;
+	struct dwc_otg_board *pdata;
+	int32_t snpsid;
+	struct resource *res;
+#if 0
+	union gusbcfg_data usbcfg = {.d32 = 0};
+#endif
+
+#if defined(OTG_EXT_CHG_PUMP) || defined(OTG_PLB_DMA_TASKLET)
+	int irq;
+#endif
+
+	dev_dbg(&pdev->dev, "dwc_otg_driver_probe (%p)\n", pdev);
+	dwc_otg_device = kmalloc(sizeof(struct dwc_otg_device), GFP_KERNEL);
+	if (!dwc_otg_device) {
+		dev_err(&pdev->dev, "kmalloc of dwc_otg_device failed\n");
+		retval = -ENOMEM;
+		goto fail;
+	}
+	memset(dwc_otg_device, 0, sizeof(*dwc_otg_device));
+	dwc_otg_device->reg_offset = 0xFFFFFFFF;
+
+	/*
+	 * Retrieve the memory and IRQ resources.
+	 */
+	dwc_otg_device->irq = platform_get_irq(pdev, 0);
+	if (dwc_otg_device->irq == 0) {
+		dev_err(&pdev->dev, "no device irq\n");
+		retval = -ENODEV;
+		goto fail;
+	}
+	dev_dbg(&pdev->dev, "OTG - device irq: %d\n", dwc_otg_device->irq);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "no CSR address\n");
+		retval = -ENODEV;
+		goto fail;
+	}
+	dev_dbg(&pdev->dev, "OTG - ioresource_mem start0x%08x: end:0x%08x\n",
+		(unsigned)res->start, (unsigned)res->end);
+	dwc_otg_device->phys_addr = res->start;
+	dwc_otg_device->base_len = res->end - res->start + 1;
+	if (request_mem_region(dwc_otg_device->phys_addr,
+			dwc_otg_device->base_len,
+			dwc_driver_name) == NULL
+			) {
+		dev_err(&pdev->dev, "request_mem_region failed\n");
+		retval = -EBUSY;
+		goto fail;
+	}
+
+	/*
+	 * Get the remaining platform data
+	 */
+	pdata = (struct dwc_otg_board *)pdev->dev.platform_data;
+	if (pdata) {
+		dwc_otg_device->soc_enable_vbus = pdata->enable_vbus;
+		dwc_otg_device->soc_disable_vbus = pdata->disable_vbus;
+		dwc_otg_device->soc_vbus_valid = pdata->vbus_valid;
+	}
+
+	/*
+	 * Map the DWC_otg Core memory into virtual address space.
+	 */
+
+	dwc_otg_device->base = ioremap_nocache(dwc_otg_device->phys_addr,
+					dwc_otg_device->base_len);
+	if (dwc_otg_device->base == NULL) {
+		dev_err(&pdev->dev, "ioremap() failed\n");
+		retval = -ENOMEM;
+		goto fail;
+	}
+
+	dev_dbg(&pdev->dev, "mapped base=0x%p\n", dwc_otg_device->base);
+
+	/*
+	 * Attempt to ensure this device is really a DWC_otg Controller.
+	 * Read and verify the SNPSID register contents. The value should be
+	 * 0x45F42XXX, which corresponds to "OT2", as in "OTG version 2.XX".
+	 */
+	snpsid = dwc_read_reg32((u32 __iomem *)
+				((u8 __iomem *) dwc_otg_device->base + 0x40));
+	/*
+	 * Initialize driver data to point to the global DWC_otg
+	 * Device structure.
+	 */
+	platform_set_drvdata(pdev, dwc_otg_device);
+	dev_dbg(&pdev->dev, "dwc_otg_device=0x%p\n", dwc_otg_device);
+	dwc_otg_device->core_if =
+		dwc_otg_cil_init(dwc_otg_device->base,
+				 &dwc_otg_module_params,
+				 dwc_otg_device);
+	if (!dwc_otg_device->core_if) {
+		dev_err(&pdev->dev, "CIL initialization failed!\n");
+		retval = -ENOMEM;
+		goto fail;
+	}
+	/*
+	 * Validate parameter values.
+	 */
+	if (check_parameters(dwc_otg_device->core_if) != 0) {
+		retval = -EINVAL;
+		goto fail;
+	}
+
+	/* Added for PLB DMA phys virt mapping */
+	dwc_otg_device->core_if->phys_addr = dwc_otg_device->phys_addr;
+
+	/*
+	 * Create Device Attributes in sysfs
+	 */
+	dwc_otg_attr_create(&pdev->dev);
+
+	/*
+	 * Disable the global interrupt until all the interrupt
+	 * handlers are installed.
+	 */
+	dwc_otg_disable_global_interrupts(dwc_otg_device->core_if);
+
+	/*
+	* Install the interrupt handler for the common interrupts before
+	* enabling common interrupts in core_init below.
+	*/
+	DWC_DEBUGPL(DBG_CIL, "registering (common) handler for irq%d\n",
+			dwc_otg_device->irq);
+	retval = request_irq(dwc_otg_device->irq,
+			dwc_otg_common_irq,
+			IRQF_SHARED,
+			"dwc_otg",
+			dwc_otg_device);
+
+	if (retval != 0) {
+		DWC_ERROR("request of irq%d failed retval: %d\n",
+				dwc_otg_device->irq, retval);
+		retval = -EBUSY;
+		goto fail;
+	} else {
+		dwc_otg_device->common_irq_installed = 1;
+	}
+
+#ifdef CONFIG_MACH_IPMATE
+	set_irq_type(_lmdev->irq, IRQT_LOW);
+#endif
+	/*
+	 * Initialize the DWC_otg core.
+	 */
+	dwc_otg_core_init(dwc_otg_device->core_if);
+
+#ifdef OTG_EXT_CHG_PUMP
+	/* configure GPIO to use IRQ2, IRQ=58 (IRQ2) */
+	irq = platform_get_irq(pdev, 1);
+	retval = request_irq(irq, dwc_otg_externalchgpump_irq, IRQF_SHARED,
+			     "dwc_otg_ext_chg_pump", dwc_otg_device);
+	if (retval != 0) {
+		DWC_ERROR("request of irq:2(ExtInt) failed retval: %d\n",
+			  retval);
+		retval = -EBUSY;
+		goto fail;
+	} else {
+		printk(KERN_INFO "%s: (ExtChgPump-OverCurrent Detection)"
+		       " IRQ2 registered\n", dwc_driver_name);
+	}
+#endif
+
+#ifndef DWC_HOST_ONLY
+	/*
+	 * Initialize the PCD
+	 */
+	retval = dwc_otg_pcd_init(&pdev->dev);
+
+	if (retval != 0) {
+		DWC_ERROR("dwc_otg_pcd_init failed\n");
+		dwc_otg_device->pcd = NULL;
+		goto fail;
+	}
+
+#endif
+#ifndef DWC_DEVICE_ONLY
+	/*
+	 * Initialize the HCD
+	 */
+#if 0	/* force_host_mode to start*/
+	usbcfg.d32 =
+		dwc_read_reg32(&dwc_otg_device->core_if->core_global_regs->gusbcfg);
+	usbcfg.b.force_host_mode = 1;
+	dwc_write_reg32(&dwc_otg_device->core_if->core_global_regs->gusbcfg,
+			usbcfg.d32);
+#endif
+	retval = dwc_otg_hcd_init(&pdev->dev, dwc_otg_device);
+	if (retval != 0) {
+		DWC_ERROR("dwc_otg_hcd_init failed\n");
+		dwc_otg_device->hcd = NULL;
+		goto fail;
+	}
+
+#endif
+
+	/*
+	 * Switch VBUS to normal
+	 */
+	if (dwc_otg_device->soc_vbus_valid)
+		dwc_otg_device->soc_vbus_valid(1);
+
+	/*
+	 * Enable the global interrupt after all the interrupt
+	 * handlers are installed.
+	 */
+	dwc_otg_enable_global_interrupts(dwc_otg_device->core_if);
+#if 0 	/* Force device mode to start*/
+	usbcfg.d32 =
+		dwc_read_reg32(&dwc_otg_device->core_if->core_global_regs->gusbcfg);
+	usbcfg.b.force_host_mode = 0;
+	dwc_write_reg32(&dwc_otg_device->core_if->core_global_regs->gusbcfg,
+			usbcfg.d32);
+#endif
+
+#ifdef OTG_PLB_DMA_TASKLET
+	atomic_set(&release_later, 0);
+	irq = platform_get_irq(pdev, 2);
+	retval = request_irq(irq, dwc_otg_plbdma, IRQF_SHARED,
+			     "dwc_otg_plbdma", dwc_otg_device);
+	if (retval != 0) {
+		DWC_ERROR("Request of irq %d failed retval: %d\n",
+				PLB_DMA_CH_INT, retval);
+		retval = -EBUSY;
+		goto fail;
+	} else {
+		DWC_DEBUGPL(DBG_CIL, "%s Irq %d registered\n", dwc_driver_name,
+				PLB_DMA_CH_INT);
+	}
+#endif
+	return 0;
+
+fail:
+	dwc_otg_driver_remove(pdev);
+	return retval;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dwc_otg_suspend(struct device *dev)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	int ret;
+
+#ifndef DWC_DEVICE_ONLY
+	ret = dwc_otg_hcd_suspend(otg_dev->hcd);
+	if (ret)
+		return ret;
+#endif
+
+	/*
+	 * Switch VBUS to invalid
+	 */
+	if (otg_dev->soc_vbus_valid)
+		otg_dev->soc_vbus_valid(0);
+
+	return 0;
+}
+
+static int dwc_otg_resume(struct device *dev)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+
+	/*
+	 * Initialize the DWC_otg core.
+	 */
+	dwc_otg_core_init(otg_dev->core_if);
+
+	/*
+	 * Switch VBUS to normal
+	 */
+	if (otg_dev->soc_vbus_valid)
+		otg_dev->soc_vbus_valid(1);
+
+	/*
+	 * Enable the global interrupt after all the interrupt
+	 * handlers are installed.
+	 */
+	dwc_otg_enable_global_interrupts(otg_dev->core_if);
+
+#ifndef DWC_DEVICE_ONLY
+	dwc_otg_hcd_resume(otg_dev->hcd);
+#endif
+
+	return 0;
+}
+#else
+#define dwc_otg_driver_suspend NULL
+#define dwc_otg_driver_resume NULL
+#endif
+
+static SIMPLE_DEV_PM_OPS(dwc_otg_pmops, dwc_otg_suspend, dwc_otg_resume);
+
+/**
+ * This structure defines the methods to be called by a bus driver
+ * during the lifecycle of a device on that bus. Both drivers and
+ * devices are registered with a bus driver. The bus driver matches
+ * devices to drivers based on information in the device and driver
+ * structures.
+ *
+ * The probe function is called when the bus driver matches a device
+ * to this driver. The remove function is called when a device is
+ * unregistered with the bus driver.
+ */
+static struct platform_driver dwc_otg_driver = {
+	.probe = dwc_otg_driver_probe,
+	.remove = dwc_otg_driver_remove,
+	.driver = {
+		.name = (char *)dwc_driver_name,
+		.bus = &platform_bus_type,
+		.pm = &dwc_otg_pmops,
+	},
+};
+
+/**
+ * This function is called when the dwc_otg_driver is installed with the
+ * insmod command. It registers the dwc_otg_driver structure with the
+ * appropriate bus driver. This will cause the dwc_otg_driver_probe function
+ * to be called. In addition, the bus driver will automatically expose
+ * attributes defined for the device and driver in the special sysfs file
+ * system.
+ *
+ * @return
+ */
+static int  __init dwc_otg_driver_init(void)
+{
+	int retval = 0, ret = 0;
+	printk(KERN_INFO "%s: version %s\n", dwc_driver_name,
+		 DWC_DRIVER_VERSION);
+	retval = platform_driver_register(&dwc_otg_driver);
+	if (retval < 0) {
+		printk(KERN_ERR "%s registration failed. retval=%d\n",
+			dwc_driver_name, retval);
+		return retval;
+	}
+	ret = driver_create_file(&dwc_otg_driver.driver, &driver_attr_version);
+	ret = driver_create_file(&dwc_otg_driver.driver,
+			&driver_attr_debuglevel);
+	return retval;
+}
+
+module_init(dwc_otg_driver_init);
+
+/**
+ * This function is called when the driver is removed from the kernel
+ * with the rmmod command. The driver unregisters itself with its bus
+ * driver.
+ *
+ */
+static void __exit dwc_otg_driver_cleanup(void)
+{
+	printk(KERN_DEBUG "dwc_otg_driver_cleanup()\n");
+	driver_remove_file(&dwc_otg_driver.driver, &driver_attr_debuglevel);
+	driver_remove_file(&dwc_otg_driver.driver, &driver_attr_version);
+	platform_driver_unregister(&dwc_otg_driver);
+	printk(KERN_INFO "%s module removed\n", dwc_driver_name);
+} module_exit(dwc_otg_driver_cleanup);
+
+MODULE_DESCRIPTION(DWC_DRIVER_DESC);
+MODULE_AUTHOR("Synopsys Inc.");
+MODULE_LICENSE("GPL");
+
+module_param_named(otg_cap, dwc_otg_module_params.otg_cap, int, 0444);
+MODULE_PARM_DESC(otg_cap, "OTG Capabilities 0=HNP&SRP 1=SRP Only 2=None");
+module_param_named(opt, dwc_otg_module_params.opt, int, 0444);
+MODULE_PARM_DESC(opt, "OPT Mode");
+module_param_named(dma_enable, dwc_otg_module_params.dma_enable, int, 0444);
+MODULE_PARM_DESC(dma_enable, "DMA Mode 0=Slave 1=DMA enabled");
+module_param_named(dma_desc_enable, dwc_otg_module_params.dma_desc_enable, int,
+		   0444);
+MODULE_PARM_DESC(dma_desc_enable,
+		 "DMA Desc Mode 0=Address DMA 1=DMA Descriptor enabled");
+module_param_named(dma_burst_size, dwc_otg_module_params.dma_burst_size, int,
+		0444);
+MODULE_PARM_DESC(dma_burst_size,
+		"DMA Burst Size 1, 4, 8, 16, 32, 64, 128, 256");
+module_param_named(speed, dwc_otg_module_params.speed, int, 0444);
+MODULE_PARM_DESC(speed, "Speed 0=High Speed 1=Full Speed");
+module_param_named(host_support_fs_ls_low_power,
+	dwc_otg_module_params.host_support_fs_ls_low_power, int, 0444);
+MODULE_PARM_DESC(host_support_fs_ls_low_power,
+		  "Support Low Power w/FS or LS 0=Support 1=Don't Support");
+module_param_named(host_ls_low_power_phy_clk,
+		    dwc_otg_module_params.host_ls_low_power_phy_clk, int, 0444);
+MODULE_PARM_DESC(host_ls_low_power_phy_clk,
+		  "Low Speed Low Power Clock 0=48Mhz 1=6Mhz");
+module_param_named(enable_dynamic_fifo,
+		    dwc_otg_module_params.enable_dynamic_fifo, int, 0444);
+MODULE_PARM_DESC(enable_dynamic_fifo, "0=cC Setting 1=Allow Dynamic Sizing");
+module_param_named(data_fifo_size,
+	dwc_otg_module_params.data_fifo_size, int, 0444);
+MODULE_PARM_DESC(data_fifo_size,
+		  "Total number of words in the data FIFO memory 32-32768");
+module_param_named(dev_rx_fifo_size, dwc_otg_module_params.dev_rx_fifo_size,
+		    int, 0444);
+MODULE_PARM_DESC(dev_rx_fifo_size, "Number of words in the Rx FIFO 16-32768");
+module_param_named(dev_nperio_tx_fifo_size,
+		    dwc_otg_module_params.dev_nperio_tx_fifo_size, int, 0444);
+MODULE_PARM_DESC(dev_nperio_tx_fifo_size,
+		  "Number of words in the non-periodic Tx FIFO 16-32768");
+module_param_named(dev_perio_tx_fifo_size_1,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[0], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_1,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_2,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[1], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_2,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_3,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[2], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_3,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_4,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[3], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_4,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_5,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[4], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_5,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_6,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[5], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_6,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_7,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[6], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_7,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_8,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[7], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_8,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_9,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[8], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_9,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_10,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[9], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_10,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_11,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[10], int,
+		    0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_11,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_12,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[11], int,
+		    0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_12,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_13,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[12], int,
+		    0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_13,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_14,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[13], int,
+		    0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_14,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_15,
+		    dwc_otg_module_params.dev_perio_tx_fifo_size[14], int,
+		    0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_15,
+		  "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(host_rx_fifo_size, dwc_otg_module_params.host_rx_fifo_size,
+		    int, 0444);
+MODULE_PARM_DESC(host_rx_fifo_size, "Number of words in the Rx FIFO 16-32768");
+module_param_named(host_nperio_tx_fifo_size,
+		    dwc_otg_module_params.host_nperio_tx_fifo_size, int, 0444);
+MODULE_PARM_DESC(host_nperio_tx_fifo_size,
+		  "Number of words in the non-periodic Tx FIFO 16-32768");
+module_param_named(host_perio_tx_fifo_size,
+		    dwc_otg_module_params.host_perio_tx_fifo_size, int, 0444);
+MODULE_PARM_DESC(host_perio_tx_fifo_size,
+		  "Number of words in the host periodic Tx FIFO 16-32768");
+module_param_named(max_transfer_size, dwc_otg_module_params.max_transfer_size,
+		    int, 0444);
+
+/** @todo Set the max to 512K, modify checks */
+MODULE_PARM_DESC(max_transfer_size,
+		 "The maximum transfer size supported in bytes 2047-65535");
+module_param_named(max_packet_count, dwc_otg_module_params.max_packet_count,
+		    int, 0444);
+MODULE_PARM_DESC(max_packet_count,
+		  "The maximum number of packets in a transfer 15-511");
+module_param_named(host_channels, dwc_otg_module_params.host_channels, int,
+		0444);
+MODULE_PARM_DESC(host_channels,
+		  "The number of host channel registers to use 1-16");
+module_param_named(dev_endpoints, dwc_otg_module_params.dev_endpoints, int,
+		0444);
+MODULE_PARM_DESC(dev_endpoints,
+		  "The number of endpoints in addition to "
+		  "EP0 available for device mode 1-15");
+module_param_named(phy_type, dwc_otg_module_params.phy_type, int, 0444);
+MODULE_PARM_DESC(phy_type, "0=Reserved 1=UTMI+ 2=ULPI");
+module_param_named(phy_utmi_width, dwc_otg_module_params.phy_utmi_width, int,
+		    0444);
+MODULE_PARM_DESC(phy_utmi_width,
+		  "Specifies the UTMI+ Data Width 8 or 16 bits");
+module_param_named(phy_ulpi_ddr, dwc_otg_module_params.phy_ulpi_ddr, int,
+		    0444);
+MODULE_PARM_DESC(phy_ulpi_ddr,
+		 "ULPI at double or single data rate 0=Single 1=Double");
+module_param_named(phy_ulpi_ext_vbus, dwc_otg_module_params.phy_ulpi_ext_vbus,
+		    int, 0444);
+MODULE_PARM_DESC(phy_ulpi_ext_vbus,
+		  "ULPI PHY using internal or external vbus 0=Internal");
+module_param_named(i2c_enable, dwc_otg_module_params.i2c_enable, int, 0444);
+MODULE_PARM_DESC(i2c_enable, "FS PHY Interface");
+module_param_named(ulpi_fs_ls, dwc_otg_module_params.ulpi_fs_ls, int, 0444);
+MODULE_PARM_DESC(ulpi_fs_ls, "ULPI PHY FS/LS mode only");
+module_param_named(ts_dline, dwc_otg_module_params.ts_dline, int, 0444);
+MODULE_PARM_DESC(ts_dline, "Term select Dline pulsing for all PHYs");
+module_param_named(debug, g_dbg_lvl, int, 0444);
+MODULE_PARM_DESC(debug, "0");
+module_param_named(en_multiple_tx_fifo,
+		     dwc_otg_module_params.en_multiple_tx_fifo, int, 0444);
+MODULE_PARM_DESC(en_multiple_tx_fifo,
+		  "Dedicated Non Periodic Tx FIFOs 0=disabled 1=enabled");
+module_param_named(dev_tx_fifo_size_1,
+		    dwc_otg_module_params.dev_tx_fifo_size[0], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_1, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_2,
+		    dwc_otg_module_params.dev_tx_fifo_size[1], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_2, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_3,
+		    dwc_otg_module_params.dev_tx_fifo_size[2], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_3, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_4,
+		    dwc_otg_module_params.dev_tx_fifo_size[3], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_4, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_5,
+		    dwc_otg_module_params.dev_tx_fifo_size[4], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_5, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_6,
+		    dwc_otg_module_params.dev_tx_fifo_size[5], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_6, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_7,
+		    dwc_otg_module_params.dev_tx_fifo_size[6], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_7, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_8,
+		    dwc_otg_module_params.dev_tx_fifo_size[7], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_8, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_9,
+		    dwc_otg_module_params.dev_tx_fifo_size[8], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_9, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_10,
+		    dwc_otg_module_params.dev_tx_fifo_size[9], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_10, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_11,
+		    dwc_otg_module_params.dev_tx_fifo_size[10], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_11, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_12,
+		    dwc_otg_module_params.dev_tx_fifo_size[11], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_12, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_13,
+		    dwc_otg_module_params.dev_tx_fifo_size[12], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_13, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_14,
+		    dwc_otg_module_params.dev_tx_fifo_size[13], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_14, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_15,
+		    dwc_otg_module_params.dev_tx_fifo_size[14], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_15, "Number of words in the Tx FIFO 4-768");
+module_param_named(thr_ctl, dwc_otg_module_params.thr_ctl, int, 0444);
+MODULE_PARM_DESC(thr_ctl, "Thresholding enable flag bit"
+		"0 - non ISO Tx thr., 1 - ISO Tx thr., 2 - Rx thr.- "
+		"bit 0=disabled 1=enabled");
+module_param_named(tx_thr_length, dwc_otg_module_params.tx_thr_length, int,
+		0444);
+MODULE_PARM_DESC(tx_thr_length, "Tx Threshold length in 32 bit DWORDs");
+module_param_named(rx_thr_length, dwc_otg_module_params.rx_thr_length, int,
+		0444);
+MODULE_PARM_DESC(rx_thr_length, "Rx Threshold length in 32 bit DWORDs");
+
+module_param_named(pti_enable, dwc_otg_module_params.pti_enable, int, 0444);
+module_param_named(mpi_enable, dwc_otg_module_params.mpi_enable, int, 0444);
+module_param_named(lpm_enable, dwc_otg_module_params.lpm_enable, int, 0444);
+MODULE_PARM_DESC(lpm_enable, "LPM Enable 0=LPM Disabled 1=LPM Enabled");
+module_param_named(ic_usb_cap, dwc_otg_module_params.ic_usb_cap, int, 0444);
+MODULE_PARM_DESC(ic_usb_cap,
+		 "IC_USB Capability 0=IC_USB Disabled 1=IC_USB Enabled");
+module_param_named(ahb_thr_ratio, dwc_otg_module_params.ahb_thr_ratio, int,
+		0444);
+MODULE_PARM_DESC(ahb_thr_ratio, "AHB Threshold Ratio");
+
+
diff --git a/drivers/usb/dwc_otg/dwc_otg_driver.h b/drivers/usb/dwc_otg/dwc_otg_driver.h
new file mode 100644
index 0000000..a386ada
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_driver.h
@@ -0,0 +1,189 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__DWC_OTG_DRIVER_H__)
+#define __DWC_OTG_DRIVER_H__
+
+/** @file
+ * This file contains the interface to the Linux driver.
+ */
+
+#if defined(CONFIG_SOC_TZ1090)
+#include "dwc_otg_tz1090.h"
+#endif
+
+#include <linux/usb/dwc_otg_platform.h>
+#include "dwc_otg_cil.h"
+
+
+/* Type declarations */
+struct dwc_otg_pcd;
+struct dwc_otg_hcd;
+
+/**
+ * This structure is a wrapper that encapsulates the driver components used to
+ * manage a single DWC_otg controller.
+ */
+struct dwc_otg_device {
+	/** Device pointer for convenience */
+	struct device *dev;
+
+	/** Base address returned from ioremap() */
+	void __iomem *base;
+
+	/** Pointer to the core interface structure. */
+	struct dwc_otg_core_if *core_if;
+
+	/** Register offset for Diagnostic API.*/
+	u32 reg_offset;
+
+	/** Pointer to the PCD structure. */
+	struct dwc_otg_pcd *pcd;
+
+	/** Pointer to the HCD structure. */
+	struct dwc_otg_hcd *hcd;
+
+	/** Flag to indicate whether the common IRQ handler is installed. */
+	u8 common_irq_installed;
+
+	/** Interrupt request number. */
+	unsigned int irq;
+
+	/*
+	 * Physical address of Control and Status registers, used by
+	 * release_mem_region().
+	 */
+	resource_size_t phys_addr;
+
+	/** Length of memory region, used by release_mem_region(). */
+	unsigned long base_len;
+
+	/* methods for enabling / disabling Vbus at the SoC Level*/
+	void (*soc_enable_vbus)(void);
+	void (*soc_disable_vbus)(void);
+	void (*soc_vbus_valid)(int normal);
+
+};
+
+
+
+/**
+ * The Debug Level bit-mask variable.
+ */
+extern u32 g_dbg_lvl;
+/**
+ * Set the Debug Level variable.
+ */
+static inline u32 SET_DEBUG_LEVEL(const u32 _new)
+{
+	u32 old = g_dbg_lvl;
+	g_dbg_lvl = _new;
+	return old;
+}
+
+
+/** When debug level has the DBG_CIL bit set, display CIL Debug messages. */
+#define DBG_CIL		(0x2)
+/** When debug level has the DBG_CILV bit set, display CIL Verbose debug
+ * messages */
+#define DBG_CILV	(0x20)
+/**  When debug level has the DBG_PCD bit set, display PCD (Device) debug
+ *  messages */
+#define DBG_PCD		(0x4)
+/** When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug
+ * messages */
+#define DBG_PCDV	(0x40)
+/** When debug level has the DBG_HCD bit set, display Host debug messages */
+#define DBG_HCD		(0x8)
+/** When debug level has the DBG_HCDV bit set, display Verbose Host debug
+ * messages */
+#define DBG_HCDV	(0x80)
+/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host
+ *  mode. */
+#define DBG_HCD_URB	(0x800)
+
+#define DBG_SP (0x10) /*???*/
+
+/** When debug level has any bit set, display debug messages */
+#define DBG_ANY		(0xFF)
+
+/** All debug messages off */
+#define DBG_OFF		0
+
+/** Prefix string for DWC_DEBUG print macros. */
+#define USB_DWC "dwc_otg: "
+
+/**
+ * Print a debug message when the Global debug level variable contains
+ * the bit defined in lvl.
+ *
+ * @param[in] lvl - Debug level, use one of the DBG_ constants above.
+ * @param[in] x - like printf
+ *
+ *    Example:
+ *      DWC_DEBUGPL( DBG_ANY, "%s(%p)\n", __func__, _reg_base_addr);
+ * results in:
+ * 	usb-DWC_otg: dwc_otg_cil_init(ca867000)
+ */
+#ifdef DEBUG
+#define DWC_DEBUGPL(lvl, x...) \
+	do { if ((lvl)&g_dbg_lvl)printk(KERN_ERR USB_DWC x); } while (0)
+#define DWC_DEBUGP(x...) DWC_DEBUGPL(DBG_ANY, x)
+
+#define CHK_DEBUG_LEVEL(level) ((level) & g_dbg_lvl)
+
+#else
+/*
+ * Debugging support vanishes in non-debug builds.
+ */
+#define DWC_DEBUGPL(lvl, x...) do {} while (0)
+#define DWC_DEBUGP(x...)
+
+#define CHK_DEBUG_LEVEL(level) (0)
+
+#endif /*DEBUG*/
+
+/**
+ * Print an Error message.
+ */
+#define DWC_ERROR(x...) printk(KERN_ERR USB_DWC x)
+/**
+ * Print a Warning message.
+ */
+#define DWC_WARN(x...) printk(KERN_WARNING USB_DWC x)
+/**
+ * Print a notice (normal but significant message).
+ */
+#define DWC_NOTICE(x...) printk(KERN_NOTICE USB_DWC x)
+/**
+ *  Basic message printing.
+ */
+#define DWC_PRINT(x...) printk(KERN_INFO USB_DWC x)
+
+#endif
diff --git a/drivers/usb/dwc_otg/dwc_otg_hcd.c b/drivers/usb/dwc_otg/dwc_otg_hcd.c
new file mode 100644
index 0000000..c6d1a7a
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_hcd.c
@@ -0,0 +1,3490 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef DWC_DEVICE_ONLY
+
+/**
+ * @file
+ *
+ * This file contains the implementation of the HCD. In Linux, the HCD
+ * implements the hc_driver API.
+ */
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/math64.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/bug.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+
+/*
+extern atomic_t release_later;
+extern int fscz_debug;
+*/
+#ifdef DEBUG
+static void dump_channel_info(struct dwc_otg_hcd *hcd,  struct dwc_otg_qh *qh);
+static void dump_urb_info(struct urb *urb, char *fn_name);
+#endif
+
+static int dwc_otc_hcd_bus_suspend(struct usb_hcd *hcd);
+static int dwc_otc_hcd_bus_resume(struct usb_hcd *hcd);
+
+static const int LOCKED = 1;
+
+static void
+dwc_otg_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
+
+static u64 dma_mask = DMA_BIT_MASK(32);
+
+static const char dwc_otg_hcd_name[] = "dwc_otg_hcd";
+static const struct hc_driver dwc_otg_hc_driver = {
+	.description = dwc_otg_hcd_name,
+	.product_desc = "DWC OTG Controller",
+	.hcd_priv_size = sizeof(struct dwc_otg_hcd),
+	.irq = dwc_otg_hcd_irq,
+	.flags = HCD_MEMORY | HCD_USB2,
+	/*.reset =*/
+	.start = dwc_otg_hcd_start,
+	/*.suspend =*/
+	/*.resume =*/
+	.stop = dwc_otg_hcd_stop,
+	.urb_enqueue = dwc_otg_hcd_urb_enqueue,
+	.urb_dequeue = dwc_otg_hcd_urb_dequeue,
+	.endpoint_disable = dwc_otg_hcd_endpoint_disable,
+	.get_frame_number = dwc_otg_hcd_get_frame_number,
+	.hub_status_data = dwc_otg_hcd_hub_status_data,
+	.hub_control = dwc_otg_hcd_hub_control,
+	.bus_suspend = dwc_otc_hcd_bus_suspend,
+	.bus_resume = dwc_otc_hcd_bus_resume,
+	.endpoint_reset = dwc_otg_endpoint_reset,
+};
+
+/*
+ * Synopsys STAR 9000382006
+ *
+ * Title: Software Driver Should Reset Data Toggle Based on Clear Endpoint Halt
+ *        Setup
+ *
+ * Impacted Configuration: Host mode enabled
+ * Description: When the OTG Linux Driver deals with STALL packets, the OTG
+ *              driver resets the data toggle.
+ *              However, when the class driver calls usb_clear_halt() for some
+ *              reason other than STALL packet handling, the linux driver has
+ *              no branch to reset the data toggle, which is a defect.
+ *
+ *              Version(s) affected: 2.91a and all earlier versions
+ *
+ * How discovered: Discovered by customer
+ *
+ * SW code fixed: Implement endpoint reset method (bellow)
+ */
+
+static void
+dwc_otg_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+	unsigned long flags;
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+	struct dwc_otg_qh *qh = (struct dwc_otg_qh *)ep->hcpriv;
+	int epnum = usb_endpoint_num(&ep->desc);
+	int is_out = usb_endpoint_dir_out(&ep->desc);
+	int is_control = usb_endpoint_xfer_control(&ep->desc);
+
+	DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP RESET: Endpoint Num=0x%02d, "
+		    "endpoint=%d\n", (int)ep, epnum);
+
+	if (!qh)
+		return;
+
+	spin_lock_irqsave(&dwc_otg_hcd->lock, flags);
+
+	epnum = usb_endpoint_num(&ep->desc);
+	is_out = usb_endpoint_dir_out(&ep->desc);
+	is_control = usb_endpoint_xfer_control(&ep->desc);
+
+	usb_settoggle(qh->dev, epnum, is_out, 0);
+
+	if (is_control)
+		usb_settoggle(qh->dev, epnum, !is_out, 0);
+
+	qh->data_toggle = DWC_OTG_HC_PID_DATA0;
+
+	spin_unlock_irqrestore(&dwc_otg_hcd->lock, flags);
+
+}
+
+
+/**
+ * Connection timeout function.  An OTG host is required to display a
+ * message if the device does not connect within 10 seconds.
+ */
+
+#ifdef DEBUG
+static void dwc_otg_hcd_connect_timeout(unsigned long ptr)
+{
+	DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)ptr);
+	DWC_PRINT("Connect Timeout\n");
+	DWC_ERROR("Device Not Connected/Responding\n");
+}
+#endif
+
+
+/**
+ * Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
+ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
+ * interrupt.
+ *
+ * This function is called by the USB core when an interrupt occurs
+ */
+irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd)
+{
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+	int ret;
+
+	ret = dwc_otg_hcd_handle_intr(dwc_otg_hcd);
+
+	return IRQ_RETVAL(ret);
+}
+
+/**
+ * Work queue function for starting the HCD when A-Cable is connected.
+ * The dwc_otg_hcd_start() must be called in a process context.
+ */
+static void hcd_start_func(struct work_struct *work)
+{
+	struct dwc_otg_hcd *priv =
+		container_of(work, struct dwc_otg_hcd, start_work);
+	struct usb_hcd *usb_hcd = dwc_otg_hcd_to_hcd(priv);
+
+	DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd);
+	dwc_otg_hcd_start(usb_hcd);
+}
+
+#ifdef DEBUG
+static void del_xfer_timers(struct dwc_otg_hcd *hcd)
+{
+
+	int i;
+	int num_channels = hcd->core_if->core_params->host_channels;
+	for (i = 0; i < num_channels; i++)
+		del_timer(&hcd->core_if->hc_xfer_timer[i]);
+}
+#endif
+static void del_timers(struct dwc_otg_hcd *hcd)
+{
+#ifdef DEBUG
+	del_xfer_timers(hcd);
+	del_timer(&hcd->conn_timer);
+#endif
+}
+
+/**
+ * Processes all the URBs in a single list of QHs. Completes them with
+ * -ETIMEDOUT and frees the QTD.
+ */
+static void kill_urbs_in_qh_list(struct dwc_otg_hcd *hcd,
+				 struct list_head *qh_list)
+{
+	struct list_head *qh_item;
+	struct dwc_otg_qh *qh;
+	struct list_head *qtd_item, *list_temp1, *list_temp2;
+	struct dwc_otg_qtd *qtd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hcd->lock, flags);
+
+	list_for_each_safe(qh_item, list_temp1, qh_list) {
+
+		qh = list_entry(qh_item, struct dwc_otg_qh, qh_list_entry);
+
+re_read_list2:
+		list_for_each_safe(qtd_item, list_temp2, &qh->qtd_list) {
+
+			qtd = list_entry(qtd_item,
+					 struct dwc_otg_qtd,
+					 qtd_list_entry);
+
+			if (qtd->urb != NULL) {
+				dwc_otg_hcd_complete_urb(hcd, qtd->urb,
+							 -ETIMEDOUT);
+				dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
+
+				/* Calling complete on the URB drops the HCD
+				 * lock and passes control back to the higher
+				 * levels, they may then unlink any current
+				 * URBs, unlinks are synchronous in this driver
+				 * (unlike in EHCI) thus dequeue gets called
+				 * straight away and the associated QTD gets
+				 * removed off the qh list and free'd thus
+				 * list_temp2 can be invalid after this call
+				 * so we must re-read the list from the start.
+				 */
+
+				goto re_read_list2;
+			}
+
+
+		}
+	}
+	spin_unlock_irqrestore(&hcd->lock, flags);
+}
+
+/**
+ * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic
+ * and periodic schedules. The QTD associated with each URB is removed from
+ * the schedule and freed. This function may be called when a disconnect is
+ * detected or when the HCD is being stopped.
+ */
+static void kill_all_urbs(struct dwc_otg_hcd *hcd)
+{
+	kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_deferred);
+	kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive);
+	kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active);
+	kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive);
+	kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready);
+	kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned);
+	kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued);
+}
+
+/**
+ * Start the connection timer.  An OTG host is required to display a
+ * message if the device does not connect within 10 seconds.  The
+ * timer is deleted if a port connect interrupt occurs before the
+ * timer expires.
+ */
+static void dwc_otg_hcd_start_connect_timer(struct dwc_otg_hcd *hcd)
+{
+#ifdef DEBUG
+	mod_timer(&hcd->conn_timer, jiffies + (HZ * 10));
+#endif
+}
+
+
+/**
+ * HCD Callback function for disconnect of the HCD.
+ *
+ * @param p void pointer to the <code>struct usb_hcd</code>
+ */
+static int dwc_otg_hcd_session_start_cb(void *p)
+{
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p);
+	DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
+	dwc_otg_hcd_start_connect_timer(dwc_otg_hcd);
+	return 1;
+}
+/**
+ * HCD Callback function for starting the HCD when A-Cable is
+ * connected.
+ *
+ * @param p void pointer to the <code>struct usb_hcd</code>
+ */
+static int dwc_otg_hcd_start_cb(void *p)
+{
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p);
+	struct dwc_otg_core_if *core_if = dwc_otg_hcd->core_if;
+	union hprt0_data hprt0;
+	if (core_if->op_state == B_HOST) {
+		/*
+		 * Reset the port.  During a HNP mode switch the reset
+		 * needs to occur within 1ms and have a duration of at
+		 * least 50ms.
+		 */
+		hprt0.d32 = dwc_otg_read_hprt0(core_if);
+		hprt0.b.prtrst = 1;
+		dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+		((struct usb_hcd *)p)->self.is_b_host = 1;
+	} else
+		((struct usb_hcd *)p)->self.is_b_host = 0;
+
+	/* Need to start the HCD in a non-interrupt context. */
+	INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func);
+	schedule_work(&dwc_otg_hcd->start_work);
+	return 1;
+}
+
+/**
+ * HCD Callback function for disconnect of the HCD.
+ *
+ * @param p void pointer to the <code>struct usb_hcd</code>
+ */
+static int dwc_otg_hcd_disconnect_cb(void *p)
+{
+	union gintsts_data intr;
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p);
+
+	/*
+	 * Set status flags for the hub driver.
+	 */
+	dwc_otg_hcd->flags.b.port_connect_status_change = 1;
+	dwc_otg_hcd->flags.b.port_connect_status = 0;
+
+	/*
+	 * Shutdown any transfers in progress by clearing the Tx FIFO Empty
+	 * interrupt mask and status bits and disabling subsequent host
+	 * channel interrupts.
+	 */
+	intr.d32 = 0;
+	intr.b.nptxfempty = 1;
+	intr.b.ptxfempty = 1;
+	intr.b.hcintr = 1;
+	dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk,
+			  intr.d32, 0);
+	dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintsts,
+			  intr.d32, 0);
+	del_timers(dwc_otg_hcd);
+
+	/*
+	 * Turn off the vbus power only if the core has transitioned to device
+	 * mode. If still in host mode, need to keep power on to detect a
+	 * reconnection.
+	 */
+	if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) {
+		if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) {
+			union hprt0_data hprt0 = {.d32 = 0};
+			DWC_PRINT("Disconnect: PortPower off\n");
+			hprt0.b.prtpwr = 0;
+			dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0,
+					 hprt0.d32);
+
+			if (dwc_otg_hcd->otg_dev->soc_disable_vbus)
+				dwc_otg_hcd->otg_dev->soc_disable_vbus();
+		}
+		dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
+	}
+
+	/* Respond with an error status to all URBs in the schedule. */
+
+	/* NJ dont kill all URBs,
+	 * the hub driver will call hcd_flush_endpoint which will remove
+	 * all URBs from a given endpoint in response to the disconnect.
+	 */
+
+	/* kill_all_urbs(dwc_otg_hcd); */
+
+
+	if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) {
+		/* Clean up any host channels that were in use. */
+		int num_channels;
+		int i;
+		struct dwc_hc *channel;
+		struct dwc_otg_hc_regs __iomem *hc_regs;
+		union hcchar_data hcchar;
+		num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
+		if (!dwc_otg_hcd->core_if->dma_enable) {
+			/* Flush out any channel requests in slave mode. */
+			for (i = 0; i < num_channels; i++) {
+				channel = dwc_otg_hcd->hc_ptr_array[i];
+				if (list_empty(&channel->hc_list_entry)) {
+					hc_regs =
+						dwc_otg_hcd->core_if->host_if->hc_regs[i];
+					hcchar.d32 =
+						dwc_read_reg32(&hc_regs->hcchar);
+					if (hcchar.b.chen) {
+						hcchar.b.chen = 0;
+						hcchar.b.chdis = 1;
+						hcchar.b.epdir = 0;
+						dwc_write_reg32(&hc_regs->hcchar,
+								hcchar.d32);
+					}
+				}
+			}
+		}
+		for (i = 0; i < num_channels; i++) {
+			channel = dwc_otg_hcd->hc_ptr_array[i];
+			if (list_empty(&channel->hc_list_entry)) {
+				hc_regs =
+					dwc_otg_hcd->core_if->host_if->hc_regs[i];
+				hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+				if (hcchar.b.chen) {
+					/* Halt the channel. */
+					hcchar.b.chdis = 1;
+					dwc_write_reg32(&hc_regs->hcchar,
+							hcchar.d32);
+				}
+				dwc_otg_hc_cleanup(dwc_otg_hcd->core_if,
+						channel);
+				list_add_tail(&channel->hc_list_entry,
+						&dwc_otg_hcd->free_hc_list);
+				/*
+				 * Added for Descriptor DMA to prevent channel
+				 * double cleanup in release_channel_ddma().
+				 * Which called from ep_disable
+				 * when device disconnect.
+				 */
+				channel->qh = NULL;
+			}
+		}
+	}
+
+	/* A disconnect will end the session so the B-Device is no
+	 * longer a B-host. */
+	((struct usb_hcd *)p)->self.is_b_host = 0;
+	return 1;
+}
+
+
+
+/**
+ * HCD Callback function for stopping the HCD.
+ *
+ * @param p void pointer to the <code>struct usb_hcd</code>
+ */
+static int dwc_otg_hcd_stop_cb(void *p)
+{
+	struct usb_hcd *usb_hcd = (struct usb_hcd *)p;
+	DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
+	dwc_otg_hcd_stop(usb_hcd);
+	return 1;
+}
+
+#ifdef CONFIG_USB_DWC_OTG_LPM
+/**
+ * HCD Callback function for sleep of HCD.
+ *
+ * @param p void pointer to the <code>struct usb_hcd</code>
+ */
+static int dwc_otg_hcd_sleep_cb(void *p)
+{
+	struct dwc_otg_hcd *hcd = hcd_to_dwc_otg_hcd(p);
+
+	dwc_otg_hcd_free_hc_from_lpm(hcd);
+
+	return 0;
+}
+#endif
+
+
+/**
+ * HCD Callback function for Remote Wakeup.
+ *
+ * @param p void pointer to the <code>struct usb_hcd</code>
+ */
+static int dwc_otg_hcd_rem_wakeup_cb(void *p)
+{
+	struct usb_hcd *hcd = p;
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+
+	if (dwc_otg_hcd->core_if->lx_state == DWC_OTG_L2) {
+		dwc_otg_hcd->flags.b.port_suspend_change = 1;
+		usb_hcd_resume_root_hub(hcd);
+	}
+#ifdef CONFIG_USB_DWC_OTG_LPM
+	else
+		dwc_otg_hcd->flags.b.port_l1_change = 1;
+
+#endif
+	return 0;
+}
+/**
+ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
+ * stopped.
+ */
+void dwc_otg_hcd_stop(struct usb_hcd *hcd)
+{
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+	union hprt0_data hprt0 = {.d32 = 0};
+	DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n");
+
+	/* Turn off all host-specific interrupts. */
+	dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
+
+	/*
+	 * The root hub should be disconnected before this function is called.
+	 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
+	 * and the QH lists (via ..._hcd_endpoint_disable).
+	 */
+
+	/* Turn off the vbus power */
+	DWC_PRINT("PortPower off\n");
+	hprt0.b.prtpwr = 0;
+	dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
+
+	if (dwc_otg_hcd->otg_dev->soc_disable_vbus)
+		dwc_otg_hcd->otg_dev->soc_disable_vbus();
+
+	mdelay(1);
+}
+
+/**
+ * Starts processing a USB transfer request specified by a USB Request Block
+ * (URB). mem_flags indicates the type of memory allocation to use while
+ * processing this URB.
+ */
+int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd,
+				struct urb *urb,
+				gfp_t mem_flags)
+{
+	unsigned long flags;
+	int retval;
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+	struct dwc_otg_qtd *qtd;
+
+	spin_lock_irqsave(&dwc_otg_hcd->lock, flags);
+
+	retval = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (retval) {
+		goto out;
+	}
+
+#ifdef DEBUG
+	if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB))
+		dump_urb_info(urb, "dwc_otg_hcd_urb_enqueue");
+#endif	/*  */
+	if (!dwc_otg_hcd->flags.b.port_connect_status) {
+		/* No longer connected. */
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+		retval = -ENODEV;
+		goto out;
+	}
+	qtd = dwc_otg_hcd_qtd_create(urb);
+	if (qtd == NULL) {
+		DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n");
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd);
+	if (retval < 0) {
+		DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. "
+			   "Error status %d\n", retval);
+		dwc_otg_hcd_qtd_free(qtd);
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+		goto out;
+	}
+
+	if (dwc_otg_hcd->core_if->dma_desc_enable && (retval == 0)) {
+		enum dwc_otg_transaction_type tr_type;
+		if ((qtd->qtd_qh_ptr->ep_type == USB_ENDPOINT_XFER_ISOC) &&
+				!(qtd->urb->transfer_flags & URB_ISO_ASAP)) {
+			/*
+			 * Do not schedule SG transactions until
+			 *  qtd has URB_GIVEBACK_ASAP set
+			 */
+			retval = 0;
+			goto out;
+		}
+		tr_type = __dwc_otg_hcd_select_transactions(dwc_otg_hcd,
+							    LOCKED);
+		if (tr_type != DWC_OTG_TRANSACTION_NONE)
+			dwc_otg_hcd_queue_transactions(dwc_otg_hcd, tr_type);
+	}
+
+
+out:
+	spin_unlock_irqrestore(&dwc_otg_hcd->lock, flags);
+
+	return retval;
+}
+
+int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	unsigned long flags;
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+	struct dwc_otg_qtd *urb_qtd;
+	struct dwc_otg_qh *qh;
+	int retval;
+
+	DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n");
+
+	spin_lock_irqsave(&dwc_otg_hcd->lock, flags);
+
+	BUG_ON(!urb);
+	BUG_ON(!urb->ep);
+
+	retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (retval) {
+		printk(KERN_WARNING"URB not unlinkable!! err %d", retval);
+		spin_unlock_irqrestore(&dwc_otg_hcd->lock, flags);
+		return retval;
+	}
+
+	urb_qtd = (struct dwc_otg_qtd *) urb->hcpriv;
+	if (urb_qtd == NULL) {
+		printk(KERN_WARNING"urb_qtd is NULL for urb %08x\n",
+		       (unsigned)urb);
+		goto done;
+	}
+	qh = (struct dwc_otg_qh *) urb_qtd->qtd_qh_ptr;
+	if (qh == NULL) {
+		printk(KERN_WARNING"urb_qtd->qtd_qh_ptr is NULL for urb %08x\n",
+		       (unsigned)urb);
+		goto done;
+	}
+
+#ifdef DEBUG
+	if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+		dump_urb_info(urb, "dwc_otg_hcd_urb_dequeue");
+		if (urb_qtd == qh->qtd_in_process)
+			dump_channel_info(dwc_otg_hcd, qh);
+	}
+
+#endif
+	if (urb_qtd == qh->qtd_in_process && qh->channel) {
+		/* The QTD is in process (it has been assigned to a channel). */
+		if (dwc_otg_hcd->flags.b.port_connect_status) {
+
+			/*
+			 * If still connected (i.e. in host mode), halt the
+			 * channel so it can be used for other transfers. If
+			 * no longer connected, the host registers can't be
+			 * written to halt the channel since the core is in
+			 * device mode.
+			 */
+			dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh->channel,
+					DWC_OTG_HC_XFER_URB_DEQUEUE);
+		}
+	}
+
+	/*
+	 * Free the QTD and clean up the associated QH. Leave the QH in the
+	 * schedule if it has any remaining QTDs.
+	 */
+	if (!dwc_otg_hcd->core_if->dma_desc_enable) {
+		__dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd, urb_qtd, qh);
+		if (urb_qtd == qh->qtd_in_process) {
+			__dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0);
+			qh->channel = NULL;
+			qh->qtd_in_process = NULL;
+		} else if (list_empty(&qh->qtd_list)) {
+			__dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh);
+		}
+	} else {
+		__dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd, urb_qtd, qh);
+		if (list_empty(&qh->qtd_list) &&
+		    dwc_otg_hcd->flags.b.port_connect_status &&
+		    qh->channel) {
+			dwc_otg_hc_halt(dwc_otg_hcd->core_if,
+					qh->channel,
+					DWC_OTG_HC_XFER_URB_DEQUEUE);
+		}
+	}
+
+
+done:
+	dwc_otg_hcd_complete_urb(dwc_otg_hcd, urb, status);
+
+	spin_unlock_irqrestore(&dwc_otg_hcd->lock, flags);
+
+	if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+		DWC_PRINT("Called usb_hcd_giveback_urb()\n");
+		DWC_PRINT("  urb->status = %d\n", status);
+	}
+	return 0;
+}
+
+#define ENDPOINT_DISABLE_RETRY 10
+
+void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd,
+				struct usb_host_endpoint *ep)
+{
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+	struct dwc_otg_qh *qh;
+	int retry = ENDPOINT_DISABLE_RETRY;
+	unsigned long flags;
+
+	DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x,"
+			" endpoint=%d\n", ep->desc.bEndpointAddress,
+			dwc_ep_addr_to_endpoint(ep->desc.bEndpointAddress));
+
+
+	qh = (struct dwc_otg_qh *) (ep->hcpriv);
+
+	if (qh != NULL) {
+
+		spin_lock_irqsave(&dwc_otg_hcd->lock, flags);
+
+		while (!list_empty(&qh->qtd_list) && retry) {
+			spin_unlock_irqrestore(&dwc_otg_hcd->lock, flags);
+			retry--;
+			msleep(5);
+			spin_lock_irqsave(&dwc_otg_hcd->lock, flags);
+		}
+
+
+		__dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh);
+
+		spin_unlock_irqrestore(&dwc_otg_hcd->lock, flags);
+		/*
+		 * Split dwc_otg_hcd_qh_remove_and_free() into qh_remove
+		 * and qh_free to prevent stack dump on dwc_dma_free() with
+		 * irq_disabled (spinlock_irqsave) in
+		 * dwc_otg_hcd_desc_list_free() and
+		 * dwc_otg_hcd_frame_list_alloc().
+		 */
+		dwc_otg_hcd_qh_free(dwc_otg_hcd, qh, 0);
+
+		ep->hcpriv = NULL;
+	}
+	return;
+}
+
+
+/**
+ * HCD Callback structure for handling mode switching.
+ */
+static struct dwc_otg_cil_callbacks hcd_cil_callbacks = {
+	.start = dwc_otg_hcd_start_cb,
+	.stop = dwc_otg_hcd_stop_cb,
+	.disconnect = dwc_otg_hcd_disconnect_cb,
+	.session_start = dwc_otg_hcd_session_start_cb,
+	.resume_wakeup = dwc_otg_hcd_rem_wakeup_cb,
+#ifdef CONFIG_USB_DWC_OTG_LPM
+	.sleep = dwc_otg_hcd_sleep_cb,
+#endif
+};
+
+static void reset_tasklet_func(unsigned long data)
+{
+	struct dwc_otg_hcd *dwc_otg_hcd = (struct dwc_otg_hcd *) data;
+	struct dwc_otg_core_if *core_if = dwc_otg_hcd->core_if;
+	union hprt0_data hprt0;
+	DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n");
+	hprt0.d32 = dwc_otg_read_hprt0(core_if);
+	hprt0.b.prtrst = 1;
+	dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+	mdelay(60);
+	hprt0.b.prtrst = 0;
+	dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+	dwc_otg_hcd->flags.b.port_reset_change = 1;
+	return;
+}
+static DECLARE_TASKLET(reset_tasklet, reset_tasklet_func, 0);
+
+static void qh_list_free(struct dwc_otg_hcd *hcd, struct list_head *qh_list)
+{
+	struct list_head *item;
+	struct dwc_otg_qh *qh;
+	if (qh_list->next == NULL) {
+		/* The list hasn't been initialized yet. */
+		return;
+	}
+
+	/* Ensure there are no QTDs or URBs left. */
+	kill_urbs_in_qh_list(hcd, qh_list);
+	for (item = qh_list->next; item != qh_list; item = qh_list->next) {
+		qh = list_entry(item, struct dwc_otg_qh, qh_list_entry);
+		dwc_otg_hcd_qh_remove_and_free(hcd, qh, 0);
+	}
+}
+/**
+ * Frees secondary storage associated with the dwc_otg_hcd structure contained
+ * in the struct usb_hcd field.
+ */
+void dwc_otg_hcd_free(struct usb_hcd *hcd)
+{
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+	int i;
+	DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n");
+	del_timers(dwc_otg_hcd);
+
+	/* Free memory for QH/QTD lists */
+
+	qh_list_free(dwc_otg_hcd,
+	&dwc_otg_hcd->non_periodic_sched_inactive);
+	qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_deferred);
+	qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active);
+	qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive);
+	qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready);
+	qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned);
+	qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued);
+
+	/* Free memory for the host channels. */
+	for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+		struct dwc_hc *hc = dwc_otg_hcd->hc_ptr_array[i];
+		if (hc != NULL) {
+			DWC_DEBUGPL(DBG_HCDV, "HCD Free channel "
+					"#%i, hc=%p\n", i, hc);
+			kfree(hc);
+		}
+	}
+	if (dwc_otg_hcd->core_if->dma_enable) {
+		if (dwc_otg_hcd->status_buf_dma) {
+			dma_free_coherent(dwc_otg_hcd->dev,
+					DWC_OTG_HCD_STATUS_BUF_SIZE,
+					dwc_otg_hcd->status_buf,
+					dwc_otg_hcd->status_buf_dma);
+		}
+	} else if (dwc_otg_hcd->status_buf != NULL)
+		kfree(dwc_otg_hcd->status_buf);
+
+	return;
+}
+
+/**
+ * Initializes the HCD. This function allocates memory for and initializes the
+ * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the
+ * USB bus with the core and calls the hc_driver->start() function. It returns
+ * a negative error on failure.
+ */
+int dwc_otg_hcd_init(struct device *dev, struct dwc_otg_device *dwc_otg_device)
+{
+	struct usb_hcd *hcd = NULL;
+	struct dwc_otg_hcd *dwc_otg_hcd = NULL;
+	int num_channels;
+	int i;
+	struct dwc_hc *channel;
+	int retval = 0;
+	DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n");
+
+	/*
+	 * Allocate memory for the base HCD plus the DWC OTG HCD.
+	 * Initialize the base HCD.
+	 */
+	hcd = usb_create_hcd(&dwc_otg_hc_driver, dev, dev_name(dev));
+	if (hcd == NULL) {
+		retval = -ENOMEM;
+		goto error1;
+	}
+	dev_set_drvdata(dev, dwc_otg_device); /* fscz restore */
+	hcd->regs = dwc_otg_device->base;
+	hcd->self.otg_port = 1;
+
+	/* Integrate TT in root hub, by default this is disbled. */
+	hcd->has_tt = 1;
+	/* Initialize the DWC OTG HCD. */
+	dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+	dwc_otg_hcd->core_if = dwc_otg_device->core_if;
+	dwc_otg_device->hcd = dwc_otg_hcd;
+	dwc_otg_hcd->dev = dev;
+	dwc_otg_hcd->otg_dev = dwc_otg_device;
+
+	spin_lock_init(&dwc_otg_hcd->lock);
+
+	/* Register the HCD CIL Callbacks */
+	dwc_otg_cil_register_hcd_callbacks(dwc_otg_device->core_if,
+				       &hcd_cil_callbacks, hcd);
+
+	/* Initialize the non-periodic schedule. */
+	INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive);
+	INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active);
+	INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_deferred);
+
+	/* Initialize the periodic schedule. */
+	INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive);
+	INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
+	INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
+	INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
+
+	init_waitqueue_head(&dwc_otg_hcd->idleq);
+
+	/*
+	 * Create a host channel descriptor for each host channel implemented
+	 * in the controller. Initialize the channel descriptor array.
+	 */
+	INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list);
+	num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
+	for (i = 0; i < num_channels; i++) {
+		channel = kmalloc(sizeof(struct dwc_hc), GFP_KERNEL);
+		if (channel == NULL) {
+			retval = -ENOMEM;
+			DWC_ERROR("%s: host channel allocation failed\n",
+					__func__);
+			goto error2;
+		}
+		memset(channel, 0, sizeof(struct dwc_hc));
+		channel->hc_num = i;
+		dwc_otg_hcd->hc_ptr_array[i] = channel;
+
+#ifdef DEBUG
+		init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]);
+#endif
+		DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i,
+				channel);
+	}
+
+	/* Initialize the Connection timeout timer. */
+#ifdef DEBUG
+	dwc_otg_hcd->conn_timer.function = dwc_otg_hcd_connect_timeout;
+	dwc_otg_hcd->conn_timer.data = (unsigned long)0;
+#endif
+	init_timer(&dwc_otg_hcd->conn_timer);
+
+	/* Initialize reset tasklet. */
+	reset_tasklet.data = (unsigned long)dwc_otg_hcd;
+	dwc_otg_hcd->reset_tasklet = &reset_tasklet;
+
+#ifdef OTG_PLB_DMA_TASKLET
+	/* Initialize plbdma tasklet. */
+	plbdma_tasklet.data = (unsigned long)dwc_otg_hcd->core_if;
+	dwc_otg_hcd->core_if->plbdma_tasklet = &plbdma_tasklet;
+#endif
+
+	/* Set device flags indicating whether the HCD supports DMA. */
+	if (dwc_otg_device->core_if->dma_enable) {
+		DWC_PRINT("Using DMA mode\n");
+		dev->dma_mask = &dma_mask;
+		dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	} else {
+		DWC_PRINT("Using Slave mode\n");
+		dev->dma_mask = (void *)0;
+		dev->coherent_dma_mask = 0;
+	}
+	/*
+	 * Finish generic HCD initialization and start the HCD. This function
+	 * allocates the DMA buffer pool, registers the USB bus, requests the
+	 * IRQ line, and calls dwc_otg_hcd_start method.
+	 */
+	retval = usb_add_hcd(hcd, dwc_otg_device->irq, IRQF_SHARED);
+	if (retval < 0)
+		goto error2;
+
+	/*
+	* Allocate space for storing data on status transactions. Normally no
+	* data is sent, but this space acts as a bit bucket. This must be
+	* done after usb_add_hcd since that function allocates the DMA buffer
+	* pool.
+	*/
+	if (dwc_otg_device->core_if->dma_enable) {
+		dwc_otg_hcd->status_buf =
+			dma_alloc_coherent(dev, DWC_OTG_HCD_STATUS_BUF_SIZE,
+					&dwc_otg_hcd->status_buf_dma,
+					GFP_KERNEL | GFP_DMA);
+	} else {
+		dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE,
+				GFP_KERNEL);
+	}
+	if (dwc_otg_hcd->status_buf == NULL) {
+		retval = -ENOMEM;
+		DWC_ERROR("%s: status_buf allocation failed\n", __func__);
+		goto error3;
+	}
+	DWC_DEBUGPL(DBG_HCD,
+			"DWC OTG HCD Initialized HCD, bus=%s, usbbus=%d\n",
+			dev->init_name, hcd->self.busnum);
+	return 0;
+
+	/* Error conditions */
+error3:
+	usb_remove_hcd(hcd);
+error2:
+	dwc_otg_hcd_free(hcd);
+	usb_put_hcd(hcd);
+error1:
+	return retval;
+}
+
+
+/**
+ * Removes the HCD.
+ * Frees memory and resources associated with the HCD and deregisters the bus.
+ */
+void dwc_otg_hcd_remove(struct device *dev)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	struct dwc_otg_hcd *dwc_otg_hcd = otg_dev->hcd;
+	struct usb_hcd *hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd);
+	DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n");
+
+	/* Turn off all interrupts */
+	dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0);
+	dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gahbcfg,
+			1, 0);
+	usb_remove_hcd(hcd);
+	dwc_otg_hcd_free(hcd);
+	usb_put_hcd(hcd);
+	return;
+}
+
+/**
+ * Initializes dynamic portions of the DWC_otg HCD state.
+ */
+static void hcd_reinit(struct dwc_otg_hcd *hcd)
+{
+	struct list_head *item;
+	int num_channels;
+	int i;
+	struct dwc_hc *channel;
+	hcd->flags.d32 = 0;
+	hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active;
+	hcd->non_periodic_channels = 0;
+	hcd->periodic_channels = 0;
+
+	/*
+	 * Put all channels in the free channel list and clean up channel
+	 * states.
+	 */
+	item = hcd->free_hc_list.next;
+	while (item != &hcd->free_hc_list) {
+		list_del(item);
+		item = hcd->free_hc_list.next;
+	}
+	num_channels = hcd->core_if->core_params->host_channels;
+	for (i = 0; i < num_channels; i++) {
+		channel = hcd->hc_ptr_array[i];
+		list_add_tail(&channel->hc_list_entry, &hcd->free_hc_list);
+		dwc_otg_hc_cleanup(hcd->core_if, channel);
+	}
+
+	/* Initialize the DWC core for host mode operation. */
+	dwc_otg_core_host_init(hcd->core_if);
+}
+/**
+ * Assigns transactions from a QTD to a free host channel and initializes the
+ * host channel to perform the transactions. The host channel is removed from
+ * the free list.
+ *
+ * @param hcd The HCD state structure.
+ * @param qh Transactions from the first QTD for this QH are selected and
+ * assigned to a free host channel.
+ */
+static void assign_and_init_hc(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	struct dwc_hc *hc;
+	struct dwc_otg_qtd *qtd;
+	struct urb *urb;
+	void *ptr = NULL;
+
+	DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, hcd, qh);
+	hc = list_entry(hcd->free_hc_list.next, struct dwc_hc, hc_list_entry);
+
+	/* Remove the host channel from the free list. */
+	list_del_init(&hc->hc_list_entry);
+	qtd = list_entry(qh->qtd_list.next, struct dwc_otg_qtd, qtd_list_entry);
+	urb = qtd->urb;
+	qh->channel = hc;
+	qh->qtd_in_process = qtd;
+
+	/*
+	 * Use usb_pipedevice to determine device address. This address is
+	 * 0 before the SET_ADDRESS command and the correct address afterward.
+	 */
+	hc->dev_addr = usb_pipedevice(urb->pipe);
+	hc->ep_num = usb_pipeendpoint(urb->pipe);
+
+	hc->speed = urb->dev->speed;
+
+	hc->max_packet = dwc_max_packet(qh->maxp);
+	hc->xfer_started = 0;
+	hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS;
+	hc->error_state = (qtd->error_count > 0);
+	hc->halt_on_queue = 0;
+	hc->halt_pending = 0;
+	hc->requests = 0;
+
+	/*
+	 * The following values may be modified in the transfer type section
+	 * below. The xfer_len value may be reduced when the transfer is
+	 * started to accommodate the max widths of the XferSize and PktCnt
+	 * fields in the HCTSIZn register.
+	 */
+	hc->do_ping = qh->ping_state;
+	hc->ep_is_in = (usb_pipein(urb->pipe) != 0);
+	hc->data_pid_start = qh->data_toggle;
+	hc->multi_count = 1;
+
+
+	/* If we dont fill(or empty) a URBs buffer in a single pass (which is
+	 * unlikely) the QTD remains on the front of the qh list and it gets
+	 * rescheduled again and we end up here, this is why everything gets
+	 * adjusted by urb->actual_length which contains the amount of data in
+	 * the urb so far.
+	 */
+
+	if (hcd->core_if->dma_enable) {
+		hc->xfer_buff =
+			(u8 *)(u32)urb->transfer_dma + urb->actual_length;
+		/* For non-dword aligned case */
+		if (((u32)hc->xfer_buff & 0x3)
+				&& !hcd->core_if->dma_desc_enable)
+			ptr = (u8 *) urb->transfer_buffer + urb->actual_length;
+	} else {
+		hc->xfer_buff =
+			(u8 *) urb->transfer_buffer + urb->actual_length;
+	}
+
+	if (urb->actual_length > urb->transfer_buffer_length) {
+		/*
+		 * actual length may exceed transfer_buffer_length under an
+		 * error condition, dont let the calculation go negative.
+		 */
+		hc->xfer_len = urb->transfer_buffer_length;
+		WARN_ON(1);
+	} else {
+		hc->xfer_len = urb->transfer_buffer_length - urb->actual_length;
+	}
+
+
+	hc->xfer_count = 0;
+
+	/*
+	 * Set the split attributes
+	 */
+	hc->do_split = 0;
+	if (qh->do_split) {
+		hc->do_split = 1;
+		hc->xact_pos = qtd->isoc_split_pos;
+		hc->complete_split = qtd->complete_split;
+		hc->hub_addr = urb->dev->tt->hub->devnum;
+		hc->port_addr = urb->dev->ttport;
+	}
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+		hc->ep_type = USB_ENDPOINT_XFER_CONTROL;
+		switch (qtd->control_phase) {
+		case DWC_OTG_CONTROL_SETUP:
+			DWC_DEBUGPL(DBG_HCDV, "  Control setup transaction\n");
+			hc->do_ping = 0;
+			hc->ep_is_in = 0;
+			hc->data_pid_start = DWC_OTG_HC_PID_SETUP;
+			if (hcd->core_if->dma_enable)
+				hc->xfer_buff = (u8 *)(u32)urb->setup_dma;
+			else
+				hc->xfer_buff = (u8 *) urb->setup_packet;
+
+			hc->xfer_len = 8;
+			ptr = NULL;
+			break;
+		case DWC_OTG_CONTROL_DATA:
+			DWC_DEBUGPL(DBG_HCDV, "  Control data transaction\n");
+			hc->data_pid_start = qtd->data_toggle;
+			break;
+		case DWC_OTG_CONTROL_STATUS:
+
+			/*
+			 * Direction is opposite of data direction or IN if no
+			 * data.
+			 */
+			DWC_DEBUGPL(DBG_HCDV,
+					"  Control status transaction\n");
+			if (urb->transfer_buffer_length == 0)
+				hc->ep_is_in = 1;
+			else
+				hc->ep_is_in =
+					(usb_pipein(urb->pipe) != USB_DIR_IN);
+
+			if (hc->ep_is_in)
+				hc->do_ping = 0;
+
+			hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
+			hc->xfer_len = 0;
+			if (hcd->core_if->dma_enable)
+				hc->xfer_buff = (u8 *)(u32)hcd->status_buf_dma;
+			else
+				hc->xfer_buff = (u8 *) hcd->status_buf;
+
+			ptr = NULL;
+			break;
+		}
+		break;
+	case PIPE_BULK:
+		hc->ep_type = USB_ENDPOINT_XFER_BULK;
+		break;
+	case PIPE_INTERRUPT:
+		hc->ep_type = USB_ENDPOINT_XFER_INT;
+		break;
+	case PIPE_ISOCHRONOUS: {
+		struct usb_iso_packet_descriptor *frame_desc;
+
+		hc->ep_type = USB_ENDPOINT_XFER_ISOC;
+		if (hcd->core_if->dma_desc_enable)
+			break;
+
+		frame_desc =
+			&urb->iso_frame_desc[qtd->isoc_frame_index];
+
+		frame_desc->status = 0;
+		if (hcd->core_if->dma_enable)
+			hc->xfer_buff = (u8 *)(u32)urb->transfer_dma;
+		else
+			hc->xfer_buff = (u8 *) urb->transfer_buffer;
+
+		hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset;
+		hc->xfer_len = frame_desc->length - qtd->isoc_split_offset;
+		/* For non-dword aligned buffers */
+		if (((u32)hc->xfer_buff & 0x3) && hcd->core_if->dma_enable)
+			ptr = (u8 *) urb->transfer_buffer +
+					frame_desc->offset +
+					qtd->isoc_split_offset;
+
+		else
+			ptr = NULL;
+		if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) {
+			if (hc->xfer_len <= 188)
+				hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL;
+			else
+				hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN;
+		}
+		}
+		break;
+	}
+	/* non DWORD-aligned buffer case */
+	if (ptr) {
+		u32 buf_size;
+		if (hc->ep_type != USB_ENDPOINT_XFER_ISOC)
+			buf_size = hcd->core_if->core_params->max_transfer_size;
+		else
+			buf_size = 4096;
+
+		if (!qh->dw_align_buf) {
+			qh->dw_align_buf = dma_alloc_coherent(hcd->dev,
+							buf_size,
+							&qh->dw_align_buf_dma,
+							GFP_ATOMIC);
+			if (!qh->dw_align_buf) {
+				DWC_ERROR("%s: Failed to allocate memory to "
+						"handle non-dword aligned "
+						"buffer case\n", __func__);
+				return;
+			}
+		}
+		if (!hc->ep_is_in)
+			memcpy(qh->dw_align_buf, ptr, hc->xfer_len);
+		hc->align_buff = qh->dw_align_buf_dma;
+	} else
+		hc->align_buff = 0;
+
+	if (hc->ep_type == USB_ENDPOINT_XFER_INT
+			|| hc->ep_type == USB_ENDPOINT_XFER_ISOC) {
+		/*
+		 * This value may be modified when the transfer is started to
+		 * reflect the actual transfer length.
+		 */
+		hc->multi_count = dwc_hb_mult(qh->maxp);
+	}
+	if (hcd->core_if->dma_desc_enable)
+		hc->desc_list_addr = qh->desc_list_dma;
+	dwc_otg_hc_init(hcd->core_if, hc);
+	hc->qh = qh;
+}
+/**
+ * This function selects transactions from the HCD transfer schedule and
+ * assigns them to available host channels. It is called from HCD interrupt
+ * handler functions.
+ *
+ * @param hcd The HCD state structure.
+ *
+ * @return The types of new transactions that were assigned to host channels.
+ */
+enum dwc_otg_transaction_type
+__dwc_otg_hcd_select_transactions(struct dwc_otg_hcd *hcd, int locked_already)
+{
+	struct list_head *qh_ptr;
+	struct dwc_otg_qh *qh;
+	int num_channels;
+	enum dwc_otg_transaction_type ret_val = DWC_OTG_TRANSACTION_NONE;
+	unsigned long flags = 0;
+
+	if (!locked_already)
+		spin_lock_irqsave(&hcd->lock, flags);
+
+#ifdef DEBUG_SOF
+	    DWC_DEBUGPL(DBG_HCD, "  Select Transactions\n");
+#endif	/*  */
+
+	/* Process entries in the periodic ready list. */
+	qh_ptr = hcd->periodic_sched_ready.next;
+	while (qh_ptr != &hcd->periodic_sched_ready
+		&& !list_empty(&hcd->free_hc_list)) {
+
+		qh = list_entry(qh_ptr, struct dwc_otg_qh, qh_list_entry);
+		assign_and_init_hc(hcd, qh);
+		/*
+		 * Move the QH from the periodic ready schedule to the
+		 * periodic assigned schedule.
+		 */
+		qh_ptr = qh_ptr->next;
+		list_move(&qh->qh_list_entry, &hcd->periodic_sched_assigned);
+		ret_val = DWC_OTG_TRANSACTION_PERIODIC;
+	}
+	/*
+	 * Process entries in the deferred portion of the non-periodic list.
+	 * A NAK put them here and, at the right time, they need to be
+	 * placed on the sched_inactive list.
+	 */
+	qh_ptr = hcd->non_periodic_sched_deferred.next;
+	while (qh_ptr != &hcd->non_periodic_sched_deferred) {
+		u16 frame_number =
+			dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
+
+		qh = list_entry(qh_ptr, struct dwc_otg_qh, qh_list_entry);
+		qh_ptr = qh_ptr->next;
+
+		if (dwc_frame_num_le(qh->sched_frame, frame_number)) {
+			/*
+			 * Move the QH from the non periodic deferred schedule
+			 * tothe non periodic inactive schedule.
+			 */
+			list_move(&qh->qh_list_entry,
+				  &hcd->non_periodic_sched_inactive);
+		}
+	}
+
+	/*
+	 * Process entries in the inactive portion of the non-periodic
+	 * schedule. Some free host channels may not be used if they are
+	 * reserved for periodic transfers.
+	 */
+	qh_ptr = hcd->non_periodic_sched_inactive.next;
+	num_channels = hcd->core_if->core_params->host_channels;
+	while (qh_ptr != &hcd->non_periodic_sched_inactive &&
+		(hcd->non_periodic_channels <
+		 num_channels - hcd->periodic_channels)
+		&& !list_empty(&hcd->free_hc_list)) {
+
+		qh = list_entry(qh_ptr, struct dwc_otg_qh, qh_list_entry);
+		assign_and_init_hc(hcd, qh);
+		/*
+		 * Move the QH from the non-periodic inactive schedule to the
+		 * non-periodic active schedule.
+		 */
+		qh_ptr = qh_ptr->next;
+		list_move(&qh->qh_list_entry,
+			   &hcd->non_periodic_sched_active);
+		if (ret_val == DWC_OTG_TRANSACTION_NONE)
+			ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC;
+		else
+			ret_val = DWC_OTG_TRANSACTION_ALL;
+
+		hcd->non_periodic_channels++;
+	}
+
+	if (!locked_already)
+		spin_unlock_irqrestore(&hcd->lock, flags);
+
+	return ret_val;
+}
+enum dwc_otg_transaction_type
+dwc_otg_hcd_select_transactions_unlocked(struct dwc_otg_hcd *hcd)
+{
+	return __dwc_otg_hcd_select_transactions(hcd, 0);
+}
+
+/**
+ * Attempts to queue a single transaction request for a host channel
+ * associated with either a periodic or non-periodic transfer. This function
+ * assumes that there is space available in the appropriate request queue. For
+ * an OUT transfer or SETUP transaction in Slave mode, it checks whether space
+ * is available in the appropriate Tx FIFO.
+ *
+ * @param hcd The HCD state structure.
+ * @param hc Host channel descriptor associated with either a periodic or
+ * non-periodic transfer.
+ * @param fifo_dwords_avail Number of DWORDs available in the periodic Tx
+ * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic
+ * transfers.
+ *
+ * @return 1 if a request is queued and more requests may be needed to
+ * complete the transfer, 0 if no more requests are required for this
+ * transfer, -1 if there is insufficient space in the Tx FIFO.
+ */
+static int queue_transaction(struct dwc_otg_hcd *hcd,
+				struct dwc_hc *hc,  u16 fifo_dwords_avail)
+{
+	int retval;
+	if (hcd->core_if->dma_enable) {
+		if (hcd->core_if->dma_desc_enable) {
+			if (!hc->xfer_started ||
+					(hc->ep_type == USB_ENDPOINT_XFER_ISOC)) {
+				dwc_otg_hcd_start_xfer_ddma(hcd, hc->qh);
+				hc->qh->ping_state = 0;
+			}
+		} else if (!hc->xfer_started) {
+			dwc_otg_hc_start_transfer(hcd->core_if, hc);
+			hc->qh->ping_state = 0;
+		}
+		retval = 0;
+	} else if (hc->halt_pending) {
+		/* Don't queue a request if the channel has been halted. */
+		retval = 0;
+	} else if (hc->halt_on_queue) {
+		dwc_otg_hc_halt(hcd->core_if, hc, hc->halt_status);
+		retval = 0;
+	} else if (hc->do_ping) {
+		if (!hc->xfer_started)
+			dwc_otg_hc_start_transfer(hcd->core_if, hc);
+		retval = 0;
+	} else if (!hc->ep_is_in || hc->data_pid_start == DWC_OTG_HC_PID_SETUP)
+		if ((fifo_dwords_avail * 4) >= hc->max_packet) {
+			if (!hc->xfer_started) {
+				dwc_otg_hc_start_transfer(hcd->core_if, hc);
+				retval = 1;
+			} else
+				retval =
+					dwc_otg_hc_continue_transfer(hcd->core_if,
+							hc);
+		} else
+			retval = -1;
+
+	else {
+		if (!hc->xfer_started) {
+			dwc_otg_hc_start_transfer(hcd->core_if, hc);
+			retval = 1;
+		} else
+			retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc);
+
+	}
+	return retval;
+}
+
+/**
+ * Processes periodic channels for the next frame and queues transactions for
+ * these channels to the DWC_otg controller. After queueing transactions, the
+ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
+ * to queue as Periodic Tx FIFO or request queue space becomes available.
+ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
+ */
+static void process_periodic_channels(struct dwc_otg_hcd *hcd)
+{
+	union hptxsts_data tx_status;
+	struct list_head *qh_ptr;
+	struct dwc_otg_qh *qh;
+	int status;
+	int no_queue_space = 0;
+	int no_fifo_space = 0;
+	struct dwc_otg_host_global_regs __iomem *host_regs;
+	host_regs = hcd->core_if->host_if->host_global_regs;
+	DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n");
+
+#ifdef DEBUG
+	tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+	DWC_DEBUGPL(DBG_HCDV,
+			"  P Tx Req Queue Space Avail (before queue): %d\n",
+			tx_status.b.ptxqspcavail);
+	DWC_DEBUGPL(DBG_HCDV, "  P Tx FIFO Space Avail (before queue): %d\n",
+			tx_status.b.ptxfspcavail);
+
+#endif
+	qh_ptr = hcd->periodic_sched_assigned.next;
+	while (qh_ptr != &hcd->periodic_sched_assigned) {
+		tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+		if (tx_status.b.ptxqspcavail == 0) {
+			no_queue_space = 1;
+			break;
+		}
+		qh = list_entry(qh_ptr, struct dwc_otg_qh, qh_list_entry);
+
+		/*
+		 * Set a flag if we're queuing high-bandwidth in slave mode.
+		 * The flag prevents any halts to get into the request queue in
+		 * the middle of multiple high-bandwidth packets getting queued.
+		 */
+		if ((!hcd->core_if->dma_enable) &&
+			(qh->channel->multi_count > 1))
+			hcd->core_if->queuing_high_bandwidth = 1;
+
+		status =
+			queue_transaction(hcd, qh->channel,
+					tx_status.b.ptxfspcavail);
+
+		if (status < 0) {
+			no_fifo_space = 1;
+			break;
+		}
+
+		/*
+		 * In Slave mode, stay on the current transfer until there is
+		 * nothing more to do or the high-bandwidth request count is
+		 * reached. In DMA mode, only need to queue one request. The
+		 * controller automatically handles multiple packets for
+		 * high-bandwidth transfers.
+		 */
+		if (hcd->core_if->dma_enable || status == 0 ||
+			qh->channel->requests == qh->channel->multi_count) {
+
+			qh_ptr = qh_ptr->next;
+
+			/*
+			 * Move the QH from the periodic assigned schedule to
+			 * the periodic queued schedule.
+			 */
+			list_move(&qh->qh_list_entry,
+					&hcd->periodic_sched_queued);
+
+			/* done queuing high bandwidth */
+			hcd->core_if->queuing_high_bandwidth = 0;
+		}
+	}
+	if (!hcd->core_if->dma_enable) {
+		struct dwc_otg_core_global_regs __iomem *global_regs;
+		union gintmsk_data intr_mask = {.d32 = 0};
+		global_regs = hcd->core_if->core_global_regs;
+		intr_mask.b.ptxfempty = 1;
+
+#ifdef DEBUG
+		tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+		DWC_DEBUGPL(DBG_HCDV, "  P Tx Req Queue Space Avail "
+				"(after queue): %d\n",
+				tx_status.b.ptxqspcavail);
+		DWC_DEBUGPL(DBG_HCDV, "  P Tx FIFO Space Avail "
+				"(after queue): %d\n",
+				tx_status.b.ptxfspcavail);
+
+#endif	/*  */
+		if (!(list_empty(&hcd->periodic_sched_assigned))
+			|| no_queue_space || no_fifo_space) {
+
+			/*
+			 * May need to queue more transactions as the request
+			 * queue or Tx FIFO empties. Enable the periodic Tx
+			 * FIFO empty interrupt. (Always use the half-empty
+			 * level to ensure that new requests are loaded as
+			 * soon as possible.)
+			 */
+			dwc_modify_reg32(&global_regs->gintmsk, 0,
+					intr_mask.d32);
+		} else {
+			/*
+			 * Disable the Tx FIFO empty interrupt since there are
+			 * no more transactions that need to be queued right
+			 * now. This function is called from interrupt
+			 * handlers to queue more transactions as transfer
+			 * states change.
+			 */
+			dwc_modify_reg32(&global_regs->gintmsk,
+					intr_mask.d32, 0);
+		}
+	}
+}
+
+/**
+ * Processes active non-periodic channels and queues transactions for these
+ * channels to the DWC_otg controller. After queueing transactions, the NP Tx
+ * FIFO Empty interrupt is enabled if there are more transactions to queue as
+ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
+ * FIFO Empty interrupt is disabled.
+ */
+static void process_non_periodic_channels(struct dwc_otg_hcd *hcd)
+{
+	union gnptxsts_data tx_status;
+	struct list_head *orig_qh_ptr;
+	struct dwc_otg_qh *qh;
+	int status;
+	int no_queue_space = 0;
+	int no_fifo_space = 0;
+	int more_to_do = 0;
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		hcd->core_if->core_global_regs;
+	DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n");
+
+#ifdef DEBUG
+	tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+	DWC_DEBUGPL(DBG_HCDV, "  NP Tx Req Queue Space Avail "
+			"(before queue): %d\n",
+			tx_status.b.nptxqspcavail);
+	DWC_DEBUGPL(DBG_HCDV, "  NP Tx FIFO Space Avail "
+			"(before queue): %d\n",
+			tx_status.b.nptxfspcavail);
+#endif	/*  */
+	/*
+	 * Keep track of the starting point. Skip over the start-of-list
+	 * entry.
+	 */
+	if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active)
+		hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
+
+	orig_qh_ptr = hcd->non_periodic_qh_ptr;
+
+	/*
+	 * Process once through the active list or until no more space is
+	 * available in the request queue or the Tx FIFO.
+	 */
+	do {
+
+		tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+		if (!hcd->core_if->dma_enable
+				&& tx_status.b.nptxqspcavail == 0) {
+			no_queue_space = 1;
+			break;
+		}
+		qh = list_entry(hcd->non_periodic_qh_ptr, struct dwc_otg_qh,
+				qh_list_entry);
+		status = queue_transaction(hcd, qh->channel,
+				tx_status.b.nptxfspcavail);
+
+		if (status > 0)
+			more_to_do = 1;
+		else if (status < 0) {
+			no_fifo_space = 1;
+			break;
+		}
+#ifdef OTG_PLB_DMA_TASKLET
+		if (atomic_read(&release_later))
+			break;
+#endif
+
+		/* Advance to next QH, skipping start-of-list entry. */
+		hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
+		if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active)
+			hcd->non_periodic_qh_ptr =
+				hcd->non_periodic_qh_ptr->next;
+
+	} while (hcd->non_periodic_qh_ptr != orig_qh_ptr);
+
+	if (!hcd->core_if->dma_enable) {
+		union gintmsk_data intr_mask = {.d32 = 0};
+		intr_mask.b.nptxfempty = 1;
+
+#ifndef OTG_PLB_DMA_TASKLET
+#ifdef DEBUG
+		tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+		DWC_DEBUGPL(DBG_HCDV, "  NP Tx Req Queue Space Avail "
+				"(after queue): %d\n",
+				tx_status.b.nptxqspcavail);
+		DWC_DEBUGPL(DBG_HCDV, "  NP Tx FIFO Space Avail "
+				"(after queue): %d\n",
+				tx_status.b.nptxfspcavail);
+#endif	/*  */
+#endif
+
+		if (more_to_do || no_queue_space || no_fifo_space) {
+
+			/*
+			 * May need to queue more transactions as the request
+			 * queue or Tx FIFO empties. Enable the non-periodic
+			 * Tx FIFO empty interrupt. (Always use the half-empty
+			 * level to ensure that new requests are loaded as
+			 * soon as possible.)
+			 */
+			dwc_modify_reg32(&global_regs->gintmsk,
+					0, intr_mask.d32);
+		} else {
+			/*
+			 * Disable the Tx FIFO empty interrupt since there are
+			 * no more transactions that need to be queued right
+			 * now. This function is called from interrupt
+			 * handlers to queue more transactions as transfer
+			 * states change.
+			 */
+			dwc_modify_reg32(&global_regs->gintmsk,
+					intr_mask.d32, 0);
+		}
+	}
+}
+/**
+ * This function processes the currently active host channels and queues
+ * transactions for these channels to the DWC_otg controller. It is called
+ * from HCD interrupt handler functions.
+ *
+ * @param hcd The HCD state structure.
+ * @param _tr_type The type(s) of transactions to queue (non-periodic,
+ * periodic, or both).
+ */
+void dwc_otg_hcd_queue_transactions(struct dwc_otg_hcd *hcd,
+					enum dwc_otg_transaction_type _tr_type)
+{
+
+#ifdef DEBUG_SOF
+	DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n");
+
+#endif
+	/* Process host channels associated with periodic transfers. */
+	if ((_tr_type == DWC_OTG_TRANSACTION_PERIODIC
+		|| _tr_type == DWC_OTG_TRANSACTION_ALL)
+		&& !list_empty(&hcd->periodic_sched_assigned)) {
+		process_periodic_channels(hcd);
+	}
+
+	/* Process host channels associated with non-periodic transfers. */
+	if ((_tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC
+		|| _tr_type == DWC_OTG_TRANSACTION_ALL)) {
+		if (!list_empty(&hcd->non_periodic_sched_active))
+			process_non_periodic_channels(hcd);
+		else {
+			/*
+			 * Ensure NP Tx FIFO empty interrupt is disabled when
+			 * there are no non-periodic transfers to process.
+			 */
+			union gintmsk_data gintmsk = {.d32 = 0};
+			gintmsk.b.nptxfempty = 1;
+			dwc_modify_reg32(&hcd->core_if->core_global_regs->gintmsk,
+					gintmsk.d32, 0);
+		}
+	}
+}
+
+
+
+
+
+
+
+/** Aborts/cancels a USB transfer request. Always returns 0 to indicate
+ * success.  */
+
+
+/** Frees resources in the DWC_otg controller related to a given endpoint. Also
+ * clears state in the HCD related to the endpoint. Any URBs for the endpoint
+ * must already be dequeued. */
+
+/** Creates Status Change bitmap for the root hub and root port. The bitmap is
+ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
+ * is the status change indicator for the single root port. Returns 1 if either
+ * change indicator is 1, otherwise returns 0. */
+int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+	buf[0] = 0;
+	buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change
+			|| dwc_otg_hcd->flags.b.port_reset_change
+			|| dwc_otg_hcd->flags.b.port_enable_change
+			|| dwc_otg_hcd->flags.b.port_suspend_change
+			|| dwc_otg_hcd->flags.b.port_over_current_change) << 1;
+
+#if 0 /*def DEBUG*/
+	if (buf[0]) {
+		DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:"
+			     " Root port status changed\n");
+		DWC_DEBUGPL(DBG_HCDV, "  port_connect_status_change: %d\n",
+			     dwc_otg_hcd->flags.b.port_connect_status_change);
+		DWC_DEBUGPL(DBG_HCDV, "  port_reset_change: %d\n",
+			     dwc_otg_hcd->flags.b.port_reset_change);
+		DWC_DEBUGPL(DBG_HCDV, "  port_enable_change: %d\n",
+			     dwc_otg_hcd->flags.b.port_enable_change);
+		DWC_DEBUGPL(DBG_HCDV, "  port_suspend_change: %d\n",
+			     dwc_otg_hcd->flags.b.port_suspend_change);
+		DWC_DEBUGPL(DBG_HCDV, "  port_over_current_change: %d\n",
+			     dwc_otg_hcd->flags.b.port_over_current_change);
+	}
+
+#endif	/*  */
+	return (buf[0] != 0);
+}
+
+
+#ifdef DWC_HS_ELECT_TST
+/*
+ * Quick and dirty hack to implement the HS Electrical Test
+ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature.
+ *
+ * This code was copied from our userspace app "hset". It sends a
+ * Get Device Descriptor control sequence in two parts, first the
+ * Setup packet by itself, followed some time later by the In and
+ * Ack packets. Rather than trying to figure out how to add this
+ * functionality to the normal driver code, we just hijack the
+ * hardware, using these two function to drive the hardware
+ * directly.
+ */
+struct dwc_otg_core_global_regs *global_regs;
+struct dwc_otg_host_global_regs *hc_global_regs;
+dwc_otg_hc_regs __iomem *hc_regs;
+u32 __iomem *data_fifo;
+
+static void do_setup(void)
+{
+	union gintsts_data gintsts;
+	union hctsiz_data hctsiz;
+	union hcchar_data hcchar;
+	haint_data_t haint;
+	union hcint_data hcint;
+
+	/* Enable HAINTs */
+	dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
+
+	/* Enable HCINTs */
+	dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
+
+	/* Read GINTSTS */
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+	/* Read HAINT */
+	haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+	/* Read HCINT */
+	hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+	/* Read HCCHAR */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+	/* Clear HCINT */
+	dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+	/* Clear HAINT */
+	dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+	/* Clear GINTSTS */
+	dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+	/* Read GINTSTS */
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+	/*
+	 * Send Setup packet (Get Device Descriptor)
+	 */
+
+	/* Make sure channel is disabled */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+	if (hcchar.b.chen) {
+
+		hcchar.b.chdis = 1;
+
+		hcchar.b.chen = 1;
+
+		dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+		mdelay(1000);
+
+		/* Read GINTSTS */
+		gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+		/* Read HAINT */
+		haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+		/* Read HCINT */
+		hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+		/* Read HCCHAR */
+		hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+		/* Clear HCINT */
+		dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+		/* Clear HAINT */
+		dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+		/* Clear GINTSTS */
+		dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+		hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+	}
+
+	/* Set HCTSIZ */
+	hctsiz.d32 = 0;
+	hctsiz.b.xfersize = 8;
+	hctsiz.b.pktcnt = 1;
+	hctsiz.b.pid = DWC_OTG_HC_PID_SETUP;
+	dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+	/* Set HCCHAR */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+	hcchar.b.eptype = USB_ENDPOINT_XFER_CONTROL;
+	hcchar.b.epdir = 0;
+	hcchar.b.epnum = 0;
+	hcchar.b.mps = 8;
+	hcchar.b.chen = 1;
+	dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+	/* Fill FIFO with Setup data for Get Device Descriptor */
+	data_fifo = (u32 *) ((char *)global_regs + 0x1000);
+	dwc_write_reg32(data_fifo++, 0x01000680);
+	dwc_write_reg32(data_fifo++, 0x00080000);
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+	/* Wait for host channel interrupt */
+	do {
+		gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+	} while (gintsts.b.hcintr == 0);
+
+	/* Disable HCINTs */
+	dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
+
+	/* Disable HAINTs */
+	dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
+
+	/* Read HAINT */
+	haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+	/* Read HCINT */
+	hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+	/* Read HCCHAR */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+	/* Clear HCINT */
+	dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+	/* Clear HAINT */
+	dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+	/* Clear GINTSTS */
+	dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+	/* Read GINTSTS */
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+}
+
+static void do_in_ack(void)
+{
+	union gintsts_data gintsts;
+	union hctsiz_data hctsiz;
+	union hcchar_data hcchar;
+	haint_data_t haint;
+	union hcint_data hcint;
+	host_grxsts_data_t grxsts;
+
+	/* Enable HAINTs */
+	dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
+
+	/* Enable HCINTs */
+	dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
+
+	/* Read GINTSTS */
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+	/* Read HAINT */
+	haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+	/* Read HCINT */
+	hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+	/* Read HCCHAR */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+	/* Clear HCINT */
+	dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+	/* Clear HAINT */
+	dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+	/* Clear GINTSTS */
+	dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+	/* Read GINTSTS */
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+	/*
+	 * Receive Control In packet
+	 */
+
+	/* Make sure channel is disabled */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+	if (hcchar.b.chen) {
+
+		hcchar.b.chdis = 1;
+		hcchar.b.chen = 1;
+		dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+		mdelay(1000);
+
+		/* Read GINTSTS */
+		gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+		/* Read HAINT */
+		haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+		/* Read HCINT */
+		hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+		/* Read HCCHAR */
+		hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+		/* Clear HCINT */
+		dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+		/* Clear HAINT */
+		dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+		/* Clear GINTSTS */
+		dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+		hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+	}
+
+	/* Set HCTSIZ */
+	hctsiz.d32 = 0;
+	hctsiz.b.xfersize = 8;
+	hctsiz.b.pktcnt = 1;
+	hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
+	dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+	/* Set HCCHAR */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+	hcchar.b.eptype = USB_ENDPOINT_XFER_CONTROL;
+	hcchar.b.epdir = 1;
+	hcchar.b.epnum = 0;
+	hcchar.b.mps = 8;
+	hcchar.b.chen = 1;
+	dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+	/* Wait for receive status queue interrupt */
+	do {
+		gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+	} while (gintsts.b.rxstsqlvl == 0);
+
+
+	/* Read RXSTS */
+	grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
+
+
+
+	/* Clear RXSTSQLVL in GINTSTS */
+	gintsts.d32 = 0;
+	gintsts.b.rxstsqlvl = 1;
+	dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+	switch (grxsts.b.pktsts) {
+	case DWC_GRXSTS_PKTSTS_IN:
+		/* Read the data into the host buffer */
+		if (grxsts.b.bcnt > 0) {
+			int i;
+			int word_count = (grxsts.b.bcnt + 3) / 4;
+			data_fifo = (u32 *) ((char *)global_regs + 0x1000);
+			for (i = 0; i < word_count; i++)
+				(void)dwc_read_reg32(data_fifo++);
+		}
+		break;
+	default:
+		break;
+	}
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+
+	/* Wait for receive status queue interrupt */
+	do {
+		gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+	} while (gintsts.b.rxstsqlvl == 0);
+
+	/* Read RXSTS */
+	grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
+
+	/* Clear RXSTSQLVL in GINTSTS */
+	gintsts.d32 = 0;
+	gintsts.b.rxstsqlvl = 1;
+	dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+	switch (grxsts.b.pktsts) {
+	case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
+		break;
+	default:
+		break;
+	}
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+	/* Wait for host channel interrupt */
+	do {
+		gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+	} while (gintsts.b.hcintr == 0);
+
+	/* Read HAINT */
+	haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+	/* Read HCINT */
+	hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+	/* Read HCCHAR */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+	/* Clear HCINT */
+	dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+	/* Clear HAINT */
+	dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+	/* Clear GINTSTS */
+	dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+	/* Read GINTSTS */
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+	mdelay(1);
+
+	/*
+	 * Send handshake packet
+	 */
+
+	/* Read HAINT */
+	haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+	/* Read HCINT */
+	hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+	/* Read HCCHAR */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+	/* Clear HCINT */
+	dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+	/* Clear HAINT */
+	dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+	/* Clear GINTSTS */
+	dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+	/* Read GINTSTS */
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+
+	/* Make sure channel is disabled */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+	if (hcchar.b.chen) {
+
+		hcchar.b.chdis = 1;
+		hcchar.b.chen = 1;
+		dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+		mdelay(1000);
+
+		/* Read GINTSTS */
+		gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+		/* Read HAINT */
+		haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+		/* Read HCINT */
+		hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+		/* Read HCCHAR */
+		hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+		/* Clear HCINT */
+		dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+		/* Clear HAINT */
+		dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+		/* Clear GINTSTS */
+		dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+		hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+	}
+
+	/* Set HCTSIZ */
+	hctsiz.d32 = 0;
+	hctsiz.b.xfersize = 0;
+	hctsiz.b.pktcnt = 1;
+	hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
+	dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+	/* Set HCCHAR */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+	hcchar.b.eptype = USB_ENDPOINT_XFER_CONTROL;
+	hcchar.b.epdir = 0;
+	hcchar.b.epnum = 0;
+	hcchar.b.mps = 8;
+	hcchar.b.chen = 1;
+	dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+	/* Wait for host channel interrupt */
+	do {
+		gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+	} while (gintsts.b.hcintr == 0);
+
+
+	/* Disable HCINTs */
+	dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
+
+	/* Disable HAINTs */
+	dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
+
+	/* Read HAINT */
+	haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+	/* Read HCINT */
+	hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+	/* Read HCCHAR */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+	/* Clear HCINT */
+	dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+	/* Clear HAINT */
+	dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+	/* Clear GINTSTS */
+	dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+	/* Read GINTSTS */
+	gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+}
+
+
+
+static void elec_test_helper(struct usb_hcd *hcd, u16 _typeReq, u16 wValue,
+		u16 wIndex, char *buf, u16 wLength)
+{
+	u32 t;
+	union gintmsk_data gintmsk;
+	t = (wIndex >> 8);	/* MSB wIndex USB */
+	DWC_DEBUGPL(DBG_HCD,
+		"DWC OTG HCD HUB CONTROL - "
+		"SetPortFeature - "
+		"USB_PORT_FEAT_TEST %d\n",
+		t);
+	warn("USB_PORT_FEAT_TEST %d\n", t);
+	if (t < 6) {
+		hprt0.d32 = dwc_otg_read_hprt0(core_if);
+		hprt0.b.prttstctl = t;
+		dwc_write_reg32(core_if->host_if->hprt0,
+				hprt0.d32);
+	} else {
+		/* Setup global vars with reg addresses
+		 * (quick and dirty hack, should be
+		 * cleaned up)
+		 */
+		global_regs = core_if->core_global_regs;
+		hc_global_regs = core_if->host_if->host_global_regs;
+		hc_regs = (dwc_otg_hc_regs __iomem *)
+				((char __iomem *) global_regs + 0x500);
+		data_fifo = (u32 *) ((char *)global_regs + 0x1000);
+		if (t == 6) {	/* HS_HOST_PORT_SUSPEND_RESUME */
+			/* Save current interrupt mask */
+			gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
+
+			/* Disable all interrupts while we muck with
+			 * the hardware directly
+			 */
+			dwc_write_reg32(&global_regs->gintmsk, 0);
+
+			/* 15 second delay per the test spec */
+			mdelay(15000);
+
+			/* Drive suspend on the root port */
+			hprt0.d32 = dwc_otg_read_hprt0(core_if);
+			hprt0.b.prtsusp = 1;
+			hprt0.b.prtres = 0;
+			dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+
+			/* 15 second delay per the test spec */
+			mdelay(15000);
+
+			/* Drive resume on the root port */
+			hprt0.d32 = dwc_otg_read_hprt0(core_if);
+			hprt0.b.prtsusp = 0;
+			hprt0.b.prtres = 1;
+			dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+			mdelay(100);
+
+			/* Clear the resume bit */
+			hprt0.b.prtres = 0;
+			dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+
+			/* Restore interrupts */
+			dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
+		} else if (t == 7) {
+			/* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */
+
+			/* Save current interrupt mask */
+			gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
+
+			/* Disable all interrupts while we muck with
+			 * the hardware directly
+			 */
+			dwc_write_reg32(&global_regs->gintmsk, 0);
+
+			/* 15 second delay per the test spec */
+			mdelay(15000);
+
+			/* Send the Setup packet */
+			do_setup();
+
+			/* 15 second delay so nothing else happens for awhile */
+			mdelay(15000);
+
+			/* Restore interrupts */
+			dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
+		} else if (t == 8) {
+			/* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */
+			/* Save current interrupt mask */
+			gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
+
+			/* Disable all interrupts while we muck with
+			 * the hardware directly
+			 */
+			dwc_write_reg32(&global_regs->gintmsk, 0);
+
+			/* Send the Setup packet */
+			do_setup();
+
+			/* 15 second delay so nothing else happens for awhile */
+			mdelay(15000);
+
+			/* Send the In and Ack packets */
+			do_in_ack();
+
+			/* 15 second delay so nothing else happens for awhile */
+			mdelay(15000);
+
+			/* Restore interrupts */
+			dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
+		}
+	}
+}
+
+#endif	/* DWC_HS_ELECT_TST */
+
+
+
+
+/** Handles hub class-specific requests.*/
+int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, u16 _typeReq, u16 wValue,
+				u16 wIndex, char *buf, u16 wLength)
+{
+	int retval = 0;
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+	struct dwc_otg_core_if *core_if = hcd_to_dwc_otg_hcd(hcd)->core_if;
+	struct usb_hub_descriptor *desc;
+	union hprt0_data hprt0 = {.d32 = 0};
+	u32 port_status;
+	switch (_typeReq) {
+	case ClearHubFeature:
+		DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+				"ClearHubFeature 0x%x\n", wValue);
+		switch (wValue) {
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* Nothing required here */
+			break;
+		default:
+			retval = -EINVAL;
+			DWC_ERROR("DWC OTG HCD - ClearHubFeature request "
+					"%xh unknown\n",
+					wValue);
+		}
+		break;
+	case ClearPortFeature:
+#ifdef CONFIG_USB_DWC_OTG_LPM
+		if (wValue != USB_PORT_FEAT_L1)
+#else
+		if (!wIndex || wIndex > 1)
+#endif
+			goto error;
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB CONTROL - "
+				"ClearPortFeature USB_PORT_FEAT_ENABLE\n");
+			hprt0.d32 = dwc_otg_read_hprt0(core_if);
+			hprt0.b.prtena = 1;
+			dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+					"ClearPortFeature "
+					"USB_PORT_FEAT_SUSPEND\n");
+			dwc_write_reg32(core_if->pcgcctl, 0);
+			mdelay(5);
+			hprt0.d32 = dwc_otg_read_hprt0(core_if);
+			hprt0.b.prtres = 1;
+			dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+			hprt0.b.prtsusp = 0;
+			/* Clear Resume bit */
+			mdelay(100);
+			hprt0.b.prtres = 0;
+			dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+			break;
+#ifdef CONFIG_USB_DWC_OTG_LPM
+		case USB_PORT_FEAT_L1:
+			{
+				union pcgcctl_data pcgcctl = {.d32 = 0 };
+				union glpmcfg_data lpmcfg = {.d32 = 0 };
+
+				lpmcfg.d32 =
+					dwc_read_reg32(&core_if->
+							core_global_regs->
+							glpmcfg);
+
+				lpmcfg.b.en_utmi_sleep = 0;
+				lpmcfg.b.hird_thres &= (~(1 << 4));
+				lpmcfg.b.prt_sleep_sts = 1;
+				dwc_write_reg32(&core_if->core_global_regs->
+						glpmcfg, lpmcfg.d32);
+
+				/* Clear Enbl_L1Gating bit. */
+				pcgcctl.b.enbl_sleep_gating = 1;
+				dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32,
+						 0);
+
+				mdelay(5);
+
+				hprt0.d32 = dwc_otg_read_hprt0(core_if);
+				hprt0.b.prtres = 1;
+				dwc_write_reg32(core_if->host_if->hprt0,
+						hprt0.d32);
+				/*
+				 * This bit will be cleared in
+				 * wakeup interrupt handle
+				 */
+				break;
+			}
+#endif
+		case USB_PORT_FEAT_POWER:
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+				"ClearPortFeature USB_PORT_FEAT_POWER\n");
+			hprt0.d32 = dwc_otg_read_hprt0(core_if);
+			hprt0.b.prtpwr = 0;
+			dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+
+			if (dwc_otg_hcd->otg_dev->soc_disable_vbus)
+				dwc_otg_hcd->otg_dev->soc_disable_vbus();
+
+			break;
+		case USB_PORT_FEAT_INDICATOR:
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+				"ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
+
+			/* Port inidicator not supported */
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			/* Clears drivers internal connect status change
+			 * flag */
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+					"ClearPortFeature "
+					"USB_PORT_FEAT_C_CONNECTION\n");
+			dwc_otg_hcd->flags.b.port_connect_status_change = 0;
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			/* Clears the driver's internal Port Reset Change
+			 * flag */
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+					"ClearPortFeature "
+					"USB_PORT_FEAT_C_RESET\n");
+			dwc_otg_hcd->flags.b.port_reset_change = 0;
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			/* Clears the driver's internal Port
+			 * Enable/Disable Change flag */
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+					"ClearPortFeature "
+					"USB_PORT_FEAT_C_ENABLE\n");
+			dwc_otg_hcd->flags.b.port_enable_change = 0;
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			/* Clears the driver's internal Port Suspend
+			 * Change flag, which is set when resume signaling on
+			 * the host port is complete */
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+					"ClearPortFeature "
+					"USB_PORT_FEAT_C_SUSPEND\n");
+			dwc_otg_hcd->flags.b.port_suspend_change = 0;
+			break;
+#ifdef CONFIG_USB_DWC_OTG_LPM
+		case USB_PORT_FEAT_C_PORT_L1:
+			dwc_otg_hcd->flags.b.port_l1_change = 0;
+			break;
+#endif
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+					"ClearPortFeature "
+					"USB_PORT_FEAT_C_OVER_CURRENT\n");
+			dwc_otg_hcd->flags.b.port_over_current_change = 0;
+			break;
+		default:
+			retval = -EINVAL;
+			DWC_ERROR("DWC OTG HCD - "
+					"ClearPortFeature request %xh "
+					"unknown or unsupported\n", wValue);
+		}
+		break;
+	case GetHubDescriptor:
+		DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+			     "GetHubDescriptor\n");
+		desc = (struct usb_hub_descriptor *)buf;
+		desc->bDescLength = 9;
+		desc->bDescriptorType = 0x29;
+		desc->bNbrPorts = 1;
+		desc->wHubCharacteristics = 0x08;
+		desc->bPwrOn2PwrGood = 1;
+		desc->bHubContrCurrent = 0;
+		desc->u.hs.DeviceRemovable[0] = 0;
+		desc->u.hs.DeviceRemovable[1] = 0xff;
+		break;
+	case GetHubStatus:
+		DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+			     "GetHubStatus\n");
+		memset(buf, 0, 4);
+		break;
+	case GetPortStatus:
+		DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+			     "GetPortStatus\n");
+		if (!wIndex || wIndex > 1)
+			goto error;
+		port_status = 0;
+		if (dwc_otg_hcd->flags.b.port_connect_status_change)
+			port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
+		if (dwc_otg_hcd->flags.b.port_enable_change)
+			port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
+		if (dwc_otg_hcd->flags.b.port_suspend_change)
+			port_status |= (1 << USB_PORT_FEAT_C_SUSPEND);
+		if (dwc_otg_hcd->flags.b.port_l1_change)
+			port_status |= (1 << USB_PORT_FEAT_C_PORT_L1);
+		if (dwc_otg_hcd->flags.b.port_reset_change)
+			port_status |= (1 << USB_PORT_FEAT_C_RESET);
+		if (dwc_otg_hcd->flags.b.port_over_current_change) {
+			DWC_ERROR("Port Over-current status change\n");
+			port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+		}
+		if (!dwc_otg_hcd->flags.b.port_connect_status) {
+			/*
+			 * The port is disconnected, which means the core is
+			 * either in device mode or it soon will be. Just
+			 * return 0's for the remainder of the port status
+			 * since the port register can't be read if the core
+			 * is in device mode.
+			 */
+			*((__le32 *) buf) = cpu_to_le32(port_status);
+			break;
+		}
+		hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
+		DWC_DEBUGPL(DBG_HCDV, "  HPRT0: 0x%08x\n", hprt0.d32);
+		if (hprt0.b.prtconnsts)
+			port_status |= (1 << USB_PORT_FEAT_CONNECTION);
+		if (hprt0.b.prtena)
+			port_status |= (1 << USB_PORT_FEAT_ENABLE);
+		if (hprt0.b.prtsusp)
+			port_status |= (1 << USB_PORT_FEAT_SUSPEND);
+		if (hprt0.b.prtovrcurract)
+			port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
+		if (hprt0.b.prtrst)
+			port_status |= (1 << USB_PORT_FEAT_RESET);
+		if (hprt0.b.prtpwr)
+			port_status |= (1 << USB_PORT_FEAT_POWER);
+		if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED)
+			port_status |= USB_PORT_STAT_HIGH_SPEED;
+
+		else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED)
+			port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
+		if (hprt0.b.prttstctl)
+			port_status |= (1 << USB_PORT_FEAT_TEST);
+		if (dwc_otg_get_lpm_portsleepstatus(dwc_otg_hcd->core_if))
+			port_status |= (1 << USB_PORT_FEAT_L1);
+
+		/* USB_PORT_FEAT_INDICATOR unsupported always 0 */
+		*((__le32 *) buf) = cpu_to_le32(port_status);
+		break;
+	case SetHubFeature:
+		DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+				"SetHubFeature\n");
+
+		/* No HUB features supported */
+		break;
+	case SetPortFeature:
+		if (wValue != USB_PORT_FEAT_TEST && (!wIndex || wIndex > 1))
+			goto error;
+
+		if (!dwc_otg_hcd->flags.b.port_connect_status) {
+			/*
+			 * The port is disconnected, which means the core is
+			 * either in device mode or it soon will be. Just
+			 * return without doing anything since the port
+			 * register can't be written if the core is in device
+			 * mode.
+			 */
+			break;
+		}
+
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+					"SetPortFeature - "
+					"USB_PORT_FEAT_SUSPEND\n");
+			if (hcd->self.otg_port == wIndex
+					&& hcd->self.b_hnp_enable) {
+				union gotgctl_data gotgctl = {.d32 = 0};
+				gotgctl.b.hstsethnpen = 1;
+				dwc_modify_reg32(&core_if->core_global_regs->
+						  gotgctl, 0, gotgctl.d32);
+				core_if->op_state = A_SUSPEND;
+			}
+			hprt0.d32 = dwc_otg_read_hprt0(core_if);
+			hprt0.b.prtsusp = 1;
+			dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+			{
+				unsigned long flags;
+				/* Update lx_state */
+				spin_lock_irqsave(&dwc_otg_hcd->lock, flags);
+				core_if->lx_state = DWC_OTG_L2;
+				spin_unlock_irqrestore(&dwc_otg_hcd->lock,
+						flags);
+			}
+			/* Suspend the Phy Clock */
+			{
+				union pcgcctl_data pcgcctl = {.d32 = 0};
+				pcgcctl.b.stoppclk = 1;
+				dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32);
+			}
+
+			/*
+			 * For HNP the bus must be suspended
+			 * for at least 200ms.
+			 */
+			if (hcd->self.b_hnp_enable)
+				mdelay(200);
+
+			break;
+		case USB_PORT_FEAT_POWER:
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+				"SetPortFeature - USB_PORT_FEAT_POWER\n");
+			hprt0.d32 = dwc_otg_read_hprt0(core_if);
+			hprt0.b.prtpwr = 1;
+			dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+
+			if (dwc_otg_hcd->otg_dev->soc_enable_vbus)
+					dwc_otg_hcd->otg_dev->soc_enable_vbus();
+
+			break;
+		case USB_PORT_FEAT_RESET:
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+				"SetPortFeature - USB_PORT_FEAT_RESET\n");
+			{
+				union pcgcctl_data pcgcctl = {.d32 = 0 };
+				pcgcctl.b.enbl_sleep_gating = 1;
+				pcgcctl.b.stoppclk = 1;
+				dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32,
+						 0);
+				dwc_write_reg32(core_if->pcgcctl, 0);
+			}
+#ifdef CONFIG_USB_DWC_OTG_LPM
+			{
+				union glpmcfg_data lpmcfg;
+				lpmcfg.d32 =
+					dwc_read_reg32(&core_if->
+							core_global_regs->
+							glpmcfg);
+				if (lpmcfg.b.prt_sleep_sts) {
+					lpmcfg.b.en_utmi_sleep = 0;
+					lpmcfg.b.hird_thres &= (~(1 << 4));
+					dwc_write_reg32(&core_if->
+							core_global_regs->
+							glpmcfg, lpmcfg.d32);
+					mdelay(1);
+				}
+			}
+#endif
+			hprt0.d32 = dwc_otg_read_hprt0(core_if);
+
+			/* When B-Host the Port reset bit is set in
+			* the Start HCD Callback function, so that
+			* the reset is started within 1ms of the HNP
+			* success interrupt. */
+			if (!hcd->self.is_b_host) {
+				hprt0.b.prtrst = 1;
+				dwc_write_reg32(core_if->host_if->hprt0,
+						hprt0.d32);
+			}
+
+			/* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
+			mdelay(60);
+			hprt0.b.prtrst = 0;
+			dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+			core_if->lx_state = DWC_OTG_L0;
+			/* Now back to the on state */
+			break;
+
+#ifdef DWC_HS_ELECT_TST
+		case USB_PORT_FEAT_TEST:
+			elec_test_helper(hcd, _typeReq, wValue, wIndex, buf,
+					wLength);
+
+			break;
+
+
+#endif	/* DWC_HS_ELECT_TST */
+		case USB_PORT_FEAT_INDICATOR:
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+				"SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
+			/* Not supported */
+			break;
+		default:
+			retval = -EINVAL;
+			DWC_ERROR("DWC OTG HCD - "
+					"SetPortFeature request %xh "
+					"unknown or unsupported\n", wValue);
+			break;
+		}
+		break;
+#if 0 /* there is no Set and Test Port feature defined in the USB standard?? */
+	case SetAndTestPortFeature:
+		if (wValue != USB_PORT_FEAT_L1)
+			goto error;
+
+		{ /*new scope not part of if above */
+			int portnum, hird, devaddr, remwake;
+			union glpmcfg_data lpmcfg;
+			u32 time_usecs;
+			union gintsts_data gintsts;
+			union gintmsk_data gintmsk;
+
+			if (!dwc_otg_get_param_lpm_enable(core_if))
+				goto error;
+
+			if (wValue != USB_PORT_FEAT_L1 || wLength != 1)
+				goto error;
+
+			/* Check if the port currently is in SLEEP state */
+			lpmcfg.d32 =
+			    dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+			if (lpmcfg.b.prt_sleep_sts) {
+				DWC_INFO("Port is already in sleep mode\n");
+				buf[0] = 0;	/* Return success */
+				break;
+			}
+
+			portnum = wIndex & 0xf;
+			hird = (wIndex >> 4) & 0xf;
+			devaddr = (wIndex >> 8) & 0x7f;
+			remwake = (wIndex >> 15);
+
+			if (portnum != 1) {
+				retval = -EINVAL;
+				DWC_WARN("Wrong port number(%d) in "
+						" SetandTestPortFeature "
+						"request\n", portnum);
+				break;
+			}
+
+			DWC_PRINT("SetandTestPortFeature request: portnum = %d,"
+					"hird = %d, devaddr = %d, rewake = %d\n"
+					, portnum, hird, devaddr, remwake);
+			/* Disable LPM interrupt */
+			gintmsk.d32 = 0;
+			gintmsk.b.lpmtranrcvd = 1;
+			dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
+					 gintmsk.d32, 0);
+
+			if (dwc_otg_hcd_send_lpm
+					(dwc_otg_hcd, devaddr, hird, remwake)) {
+				retval = -EINVAL;
+				break;
+			}
+
+			time_usecs = 10 * (lpmcfg.b.retry_count + 1);
+			/*
+			 * We will consider timeout if time_usecs microseconds
+			 * pass,and we don't receive LPM transaction status.
+			 * After receiving non-error responce(ACK/NYET/STALL)
+			 * from device, core will set lpmtranrcvd bit.
+			 */
+			do {
+				gintsts.d32 =
+					dwc_read_reg32(&core_if->
+							core_global_regs->
+							gintsts);
+
+				if (gintsts.b.lpmtranrcvd)
+					break;
+
+				dwc_udelay(1);
+			} while (--time_usecs);
+
+			/* lpm_int bit will be cleared in LPM int handler */
+
+			/* Now fill status
+			 * 0x00 - Success
+			 * 0x10 - NYET
+			 * 0x11 - Timeout
+			 */
+			if (!gintsts.b.lpmtranrcvd) {
+				buf[0] = 0x3;	/* Completion code is Timeout */
+				dwc_otg_hcd_free_hc_from_lpm(dwc_otg_hcd);
+			} else {
+				lpmcfg.d32 =
+					dwc_read_reg32(&core_if->
+							core_global_regs->
+							glpmcfg);
+
+				if (lpmcfg.b.lpm_resp == 0x3) {
+					/* ACK responce from the device */
+					buf[0] = 0x00;	/* Success */
+				} else if (lpmcfg.b.lpm_resp == 0x2) {
+					/* NYET responce from the device */
+					buf[0] = 0x2;
+				} else {
+					/* Otherwise responce with Timeout */
+					buf[0] = 0x3;
+				}
+			}
+
+			DWC_PRINTF("Device responce to LPM trans is %x\n",
+					lpmcfg.b.lpm_resp);
+
+			dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0,
+					 gintmsk.d32);
+
+			break;
+		}
+#endif /* CONFIG_USB_DWC_OTG_LPM */
+	default:
+
+error:
+		retval = -EINVAL;
+		DWC_WARN("DWC OTG HCD - Unknown hub control request type or "
+				"invalid typeReq: %xh wIndex: %xh "
+				"wValue: %xh\n",
+				_typeReq, wIndex, wValue);
+		break;
+	}
+	return retval;
+}
+
+static int dwc_otg_hcd_wait_idle(struct dwc_otg_hcd *hcd)
+{
+	while (!dwc_otg_hcd_idle(hcd)) {
+		DEFINE_WAIT(wait);
+
+		prepare_to_wait(&hcd->idleq, &wait, TASK_INTERRUPTIBLE);
+		if (!dwc_otg_hcd_idle(hcd))
+			schedule();
+		finish_wait(&hcd->idleq, &wait);
+		if (signal_pending(current))
+			return -ERESTARTSYS;
+	}
+	return 0;
+}
+
+static int dwc_otc_hcd_bus_suspend(struct usb_hcd *hcd)
+{
+	return 0;
+}
+
+static int dwc_otc_hcd_bus_resume(struct usb_hcd *hcd)
+{
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+extern int dwc_otg_hcd_suspend(struct dwc_otg_hcd * dwc_otg_hcd)
+{
+	struct usb_hcd *hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd);
+	int err = 0;
+
+	kill_all_urbs(dwc_otg_hcd);
+	/*
+	 * Ensure that no channels are left, as we reset the counters on resume.
+	 */
+	err = dwc_otg_hcd_wait_idle(dwc_otg_hcd);
+	if (err < 0)
+		goto err_idle;
+	dwc_otg_hcd_disconnect_cb(hcd);
+	dwc_otg_hcd_stop(hcd);
+	return 0;
+
+err_idle:
+	if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if))
+		reset_tasklet_func((unsigned long)dwc_otg_hcd);
+	return err;
+}
+
+extern int dwc_otg_hcd_resume(struct dwc_otg_hcd * dwc_otg_hcd)
+{
+	struct usb_hcd *hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd);
+
+	if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) {
+		dwc_otg_hcd_start(hcd);
+		reset_tasklet_func((unsigned long)dwc_otg_hcd);
+	}
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_USB_DWC_OTG_LPM
+/** Returns index of host channel to perform LPM transaction. */
+int dwc_otg_hcd_get_hc_for_lpm_tran(struct dwc_otg_hcd *hcd, u8 devaddr)
+{
+	struct dwc_otg_core_if *core_if = hcd->core_if;
+	struct dwc_hc *hc;
+	union hcchar_data hcchar;
+	union gintmsk_data gintmsk = {.d32 = 0 };
+
+	if (list_empty(&hcd->free_hc_list)) {
+		DWC_PRINT("No free channel to select for LPM transaction\n");
+		return -1;
+	}
+
+	hc = list_entry(&hcd->free_hc_list.next, struct dwc_hc, hc_list_entry);
+
+	/* Mask host channel interrupts. */
+	gintmsk.b.hcintr = 1;
+	dwc_modify_reg32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
+
+	/* Fill fields that core needs for LPM transaction */
+	hcchar.b.devaddr = devaddr;
+	hcchar.b.epnum = 0;
+	hcchar.b.eptype = USB_ENDPOINT_XFER_CONTROL;
+	hcchar.b.mps = 64;
+	hcchar.b.lspddev = (hc->speed == USB_SPEED_LOW);
+	hcchar.b.epdir = 0;	/* OUT */
+	dwc_write_reg32(&core_if->host_if->hc_regs[hc->hc_num]->hcchar,
+			hcchar.d32);
+
+	/* Remove the host channel from the free list. */
+	list_del_init(&hc->hc_list_entry);
+
+	DWC_PRINT("hcnum = %d devaddr = %d\n", hc->hc_num, devaddr);
+
+	return hc->hc_num;
+}
+
+/** Release hc after performing LPM transaction */
+void dwc_otg_hcd_free_hc_from_lpm(struct dwc_otg_hcd *hcd)
+{
+	struct dwc_hc *hc;
+	union glpmcfg_data lpmcfg;
+	u8 hc_num;
+
+	lpmcfg.d32 = dwc_read_reg32(&hcd->core_if->core_global_regs->glpmcfg);
+	hc_num = lpmcfg.b.lpm_chan_index;
+
+	hc = hcd->hc_ptr_array[hc_num];
+
+	DWC_PRINT("Freeing channel %d after LPM\n", hc_num);
+	/* Return host channel to free list */
+	list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list);
+}
+
+int dwc_otg_hcd_send_lpm(struct dwc_otg_hcd *hcd, u8 devaddr, u8 hird,
+			 u8 bRemoteWake)
+{
+	union glpmcfg_data lpmcfg;
+	union pcgcctl_data pcgcctl = {.d32 = 0 };
+	int channel;
+
+	channel = dwc_otg_hcd_get_hc_for_lpm_tran(hcd, devaddr);
+	if (channel < 0)
+		return channel;
+
+	pcgcctl.b.enbl_sleep_gating = 1;
+	dwc_modify_reg32(hcd->core_if->pcgcctl, 0, pcgcctl.d32);
+
+	/* Read LPM config register */
+	lpmcfg.d32 = dwc_read_reg32(&hcd->core_if->core_global_regs->glpmcfg);
+
+	/* Program LPM transaction fields */
+	lpmcfg.b.rem_wkup_en = bRemoteWake;
+	lpmcfg.b.hird = hird;
+	lpmcfg.b.hird_thres = 0x1c;
+	lpmcfg.b.lpm_chan_index = channel;
+	lpmcfg.b.en_utmi_sleep = 1;
+	/* Program LPM config register */
+	dwc_write_reg32(&hcd->core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+
+	/* Send LPM transaction */
+	lpmcfg.b.send_lpm = 1;
+	dwc_write_reg32(&hcd->core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+
+	return 0;
+}
+
+#endif	/* CONFIG_USB_DWC_OTG_LPM */
+
+/** Returns the current frame number. */
+int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd)
+{
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+	union hfnum_data hfnum;
+	hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->host_if->
+					host_global_regs->hfnum);
+
+#ifdef DEBUG_SOF
+	DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n",
+			hfnum.b.frnum);
+#endif
+	return hfnum.b.frnum;
+}
+
+
+/** Initializes the DWC_otg controller and its root hub and prepares it for host
+ * mode operation. Activates the root port. Returns 0 on success and a negative
+ * error code on failure. */
+int dwc_otg_hcd_start(struct usb_hcd *hcd)
+{
+	struct dwc_otg_hcd *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+	struct usb_device *udev;
+	struct usb_bus *bus;
+
+
+	DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n");
+	bus = hcd_to_bus(hcd);
+
+	/* Initialize the bus state.  If the core is in Device Mode
+	 * HALT the USB bus and return. */
+
+	hcd->state = HC_STATE_RUNNING;
+
+	/* Initialize and connect root hub if one is not already attached */
+	if (bus->root_hub) {
+		DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n");
+		/* Inform the HUB driver to resume. */
+		usb_hcd_resume_root_hub(hcd);
+	}
+	else {
+		udev = usb_alloc_dev(NULL, bus, 0);
+		udev->speed = USB_SPEED_HIGH;
+		if (!udev) {
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error udev alloc\n");
+			return -ENODEV;
+		}
+	}
+	hcd_reinit(dwc_otg_hcd);
+	return 0;
+}
+/**
+ * Sets the final status of an URB and returns it to the device driver. Any
+ * required cleanup of the URB is performed.
+ *
+ * NJ: TODO this can get called from interrupt (Hard) context, should this be
+ *     defered to a BH handler?
+ */
+void
+dwc_otg_hcd_complete_urb(struct dwc_otg_hcd *hcd, struct urb *urb, int status)
+__releases(hcd->lock)
+__acquires(hcd->lock)
+{
+	unsigned long flags;
+
+#ifdef DEBUG
+	if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+		DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n",
+				__func__, urb, usb_pipedevice(urb->pipe),
+				usb_pipeendpoint(urb->pipe),
+				usb_pipein(urb->pipe) ? "IN" : "OUT", status);
+		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+			int i;
+			for (i = 0; i < urb->number_of_packets; i++) {
+				DWC_PRINT("  ISO Desc %d status: %d\n", i,
+					   urb->iso_frame_desc[i].status);
+			}
+		}
+	}
+#endif	/*  */
+
+	if (likely(urb->unlinked)) {
+		/* report non-error and short read status as zero */
+		if (status == -EINPROGRESS || status == -EREMOTEIO)
+			status = 0;
+	}
+
+	urb->hcpriv = NULL;
+
+	/*
+	 * We must call unlink with our lock held and interrupts off (to
+	 * protect us from khub), it is possible to be here with ints enabled
+	 * so disable ints.
+	 */
+	local_irq_save(flags);
+	usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(hcd), urb);
+	local_irq_restore(flags);
+
+	/* give back must be called without the lock and with ints enabled */
+	spin_unlock(&hcd->lock);
+	usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status);
+	spin_lock(&hcd->lock);
+}
+
+
+/*
+ * Returns the Queue Head for an URB.
+ */
+struct dwc_otg_qh *dwc_urb_to_qh(struct urb *urb)
+{
+	struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb);
+	return (struct dwc_otg_qh *) ep->hcpriv;
+}
+
+
+#ifdef DEBUG
+void dwc_print_setup_data(u8 *setup)
+{
+	int i;
+	if (CHK_DEBUG_LEVEL(DBG_HCD)) {
+		DWC_PRINT("Setup Data = MSB ");
+		for (i = 7; i >= 0; i--)
+			DWC_PRINT("%02x ", setup[i]);
+		DWC_PRINT("\n");
+		DWC_PRINT("  bmRequestType Tranfer = %s\n",
+			   (setup[0] & 0x80) ? "Device-to-Host" :
+			   "Host-to-Device");
+		DWC_PRINT("  bmRequestType Type = ");
+		switch ((setup[0] & 0x60) >> 5) {
+		case 0:
+			DWC_PRINT("Standard\n");
+			break;
+		case 1:
+			DWC_PRINT("Class\n");
+			break;
+		case 2:
+			DWC_PRINT("Vendor\n");
+			break;
+		case 3:
+			DWC_PRINT("Reserved\n");
+			break;
+		}
+		DWC_PRINT("  bmRequestType Recipient = ");
+		switch (setup[0] & 0x1f) {
+		case 0:
+			DWC_PRINT("Device\n");
+			break;
+		case 1:
+			DWC_PRINT("Interface\n");
+			break;
+		case 2:
+			DWC_PRINT("Endpoint\n");
+			break;
+		case 3:
+			DWC_PRINT("Other\n");
+			break;
+		default:
+			DWC_PRINT("Reserved\n");
+			break;
+		}
+		DWC_PRINT("  bRequest = 0x%0x\n", setup[1]);
+		DWC_PRINT("  wValue = 0x%0x\n", *((u16 *) &setup[2]));
+		DWC_PRINT("  wIndex = 0x%0x\n", *((u16 *) &setup[4]));
+		DWC_PRINT("  wLength = 0x%0x\n\n", *((u16 *) &setup[6]));
+	}
+}
+
+
+#endif	/*  */
+
+void dwc_otg_hcd_dump_state(struct dwc_otg_hcd *hcd)
+{
+
+#ifdef DEBUG
+	int num_channels;
+	int i;
+	union gnptxsts_data np_tx_status;
+	union hptxsts_data p_tx_status;
+	num_channels = hcd->core_if->core_params->host_channels;
+	DWC_PRINT("\n");
+	DWC_PRINT
+	    ("************************************************************\n");
+	DWC_PRINT("HCD State:\n");
+	DWC_PRINT("  Num channels: %d\n", num_channels);
+	for (i = 0; i < num_channels; i++) {
+		struct dwc_hc *hc = hcd->hc_ptr_array[i];
+		DWC_PRINT("  Channel %d:\n", i);
+		DWC_PRINT("    dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+			   hc->dev_addr, hc->ep_num, hc->ep_is_in);
+		DWC_PRINT("    speed: %d\n", hc->speed);
+		DWC_PRINT("    ep_type: %d\n", hc->ep_type);
+		DWC_PRINT("    max_packet: %d\n", hc->max_packet);
+		DWC_PRINT("    data_pid_start: %d\n", hc->data_pid_start);
+		DWC_PRINT("    multi_count: %d\n", hc->multi_count);
+		DWC_PRINT("    xfer_started: %d\n", hc->xfer_started);
+		DWC_PRINT("    xfer_buff: %p\n", hc->xfer_buff);
+		DWC_PRINT("    xfer_len: %d\n", hc->xfer_len);
+		DWC_PRINT("    xfer_count: %d\n", hc->xfer_count);
+		DWC_PRINT("    halt_on_queue: %d\n", hc->halt_on_queue);
+		DWC_PRINT("    halt_pending: %d\n", hc->halt_pending);
+		DWC_PRINT("    halt_status: %d\n", hc->halt_status);
+		DWC_PRINT("    do_split: %d\n", hc->do_split);
+		DWC_PRINT("    complete_split: %d\n", hc->complete_split);
+		DWC_PRINT("    hub_addr: %d\n", hc->hub_addr);
+		DWC_PRINT("    port_addr: %d\n", hc->port_addr);
+		DWC_PRINT("    xact_pos: %d\n", hc->xact_pos);
+		DWC_PRINT("    requests: %d\n", hc->requests);
+		DWC_PRINT("    qh: %p\n", hc->qh);
+		if (hc->xfer_started) {
+			union hfnum_data hfnum;
+			union hcchar_data hcchar;
+			union hctsiz_data hctsiz;
+			union hcint_data hcint;
+			union hcintmsk_data hcintmsk;
+			hfnum.d32 =
+			    dwc_read_reg32(&hcd->core_if->host_if->
+					   host_global_regs->hfnum);
+			hcchar.d32 =
+			    dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->
+					   hcchar);
+			hctsiz.d32 =
+			    dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->
+					   hctsiz);
+			hcint.d32 =
+			    dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->
+					   hcint);
+			hcintmsk.d32 =
+			    dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->
+					   hcintmsk);
+			DWC_PRINT("    hfnum: 0x%08x\n", hfnum.d32);
+			DWC_PRINT("    hcchar: 0x%08x\n", hcchar.d32);
+			DWC_PRINT("    hctsiz: 0x%08x\n", hctsiz.d32);
+			DWC_PRINT("    hcint: 0x%08x\n", hcint.d32);
+			DWC_PRINT("    hcintmsk: 0x%08x\n", hcintmsk.d32);
+		}
+		if (hc->xfer_started && (hc->qh != NULL)
+				&& (hc->qh->qtd_in_process != NULL)) {
+			struct dwc_otg_qtd *qtd;
+			struct urb *urb;
+			qtd = hc->qh->qtd_in_process;
+			urb = qtd->urb;
+			DWC_PRINT("    URB Info:\n");
+			DWC_PRINT("      qtd: %p, urb: %p\n", qtd, urb);
+			if (urb != NULL) {
+				DWC_PRINT("      Dev: %d, EP: %d %s\n",
+					   usb_pipedevice(urb->pipe),
+					   usb_pipeendpoint(urb->pipe),
+					   usb_pipein(urb->
+						       pipe) ? "IN" : "OUT");
+				DWC_PRINT("      Max packet size: %d\n",
+					   usb_maxpacket(urb->dev, urb->pipe,
+							  usb_pipeout(urb->
+								      pipe)));
+				DWC_PRINT("      transfer_buffer: %p\n",
+					   urb->transfer_buffer);
+				DWC_PRINT("      transfer_dma: %p\n",
+					   (void *)urb->transfer_dma);
+				DWC_PRINT("      transfer_buffer_length: %d\n",
+					   urb->transfer_buffer_length);
+				DWC_PRINT("      actual_length: %d\n",
+					   urb->actual_length);
+			}
+		}
+	} DWC_PRINT("  non_periodic_channels: %d\n",
+		      hcd->non_periodic_channels);
+	DWC_PRINT("  periodic_channels: %d\n", hcd->periodic_channels);
+	DWC_PRINT("  periodic_usecs: %d\n", hcd->periodic_usecs);
+	np_tx_status.d32 =
+	    dwc_read_reg32(&hcd->core_if->core_global_regs->gnptxsts);
+	DWC_PRINT("  NP Tx Req Queue Space Avail: %d\n",
+		   np_tx_status.b.nptxqspcavail);
+	DWC_PRINT("  NP Tx FIFO Space Avail: %d\n",
+		   np_tx_status.b.nptxfspcavail);
+	p_tx_status.d32 =
+	    dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hptxsts);
+	DWC_PRINT("  P Tx Req Queue Space Avail: %d\n",
+		   p_tx_status.b.ptxqspcavail);
+	DWC_PRINT("  P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail);
+	dwc_otg_hcd_dump_frrem(hcd);
+	dwc_otg_dump_global_registers(hcd->core_if);
+	dwc_otg_dump_host_registers(hcd->core_if);
+	DWC_PRINT
+	    ("************************************************************\n");
+	DWC_PRINT("\n");
+
+#endif	/*  */
+}
+
+#ifdef DEBUG
+static void dump_urb_info(struct urb *urb, char *fn_name)
+{
+	DWC_PRINT("%s, urb %p\n", fn_name, urb);
+	DWC_PRINT("  Device address: %d\n", usb_pipedevice(urb->pipe));
+	DWC_PRINT("  Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
+		   (usb_pipein(urb->pipe) ? "IN" : "OUT"));
+	DWC_PRINT("  Endpoint type: %s\n", ({
+			char *pipetype;
+			switch (usb_pipetype(urb->pipe)) {
+			case PIPE_CONTROL:
+				pipetype = "CONTROL"; break;
+			case PIPE_BULK:
+				pipetype = "BULK"; break;
+			case PIPE_INTERRUPT:
+				pipetype = "INTERRUPT"; break;
+			case PIPE_ISOCHRONOUS:
+				pipetype = "ISOCHRONOUS"; break;
+			default:
+				pipetype = "UNKNOWN"; break;
+			};
+			pipetype;
+	}));
+	DWC_PRINT("  Speed: %s\n", ({
+			char *speed;
+			switch (urb->dev->speed) {
+			case USB_SPEED_HIGH:
+				speed = "HIGH"; break;
+			case USB_SPEED_FULL:
+				speed = "FULL"; break;
+			case USB_SPEED_LOW:
+				speed = "LOW"; break;
+			default:
+				speed = "UNKNOWN"; break;
+			};
+			speed;
+	}));
+	DWC_PRINT("  Max packet size: %d\n",
+		   usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+	DWC_PRINT("  Data buffer length: %d\n", urb->transfer_buffer_length);
+	DWC_PRINT("  Transfer buffer: %p, Transfer DMA: %p\n",
+		   urb->transfer_buffer, (void *)urb->transfer_dma);
+	DWC_PRINT("  Setup buffer: %p, Setup DMA: %p\n", urb->setup_packet,
+		   (void *)urb->setup_dma);
+	DWC_PRINT("  Interval: %d\n", urb->interval);
+	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+		int i;
+		for (i = 0; i < urb->number_of_packets; i++) {
+			DWC_PRINT("  ISO Desc %d:\n", i);
+			DWC_PRINT("    offset: %d, length %d\n",
+				   urb->iso_frame_desc[i].offset,
+				   urb->iso_frame_desc[i].length);
+		}
+	}
+}
+void dwc_otg_hcd_dump_frrem(struct dwc_otg_hcd *hcd)
+{
+
+
+#ifdef DEBUG
+	DWC_PRINT("Frame remaining at SOF:\n");
+	DWC_PRINT("  samples %u, accum %llu, avg %llu\n",
+			hcd->frrem_samples, hcd->frrem_accum,
+			(hcd->frrem_samples > 0) ?
+			div_u64(hcd->frrem_accum, hcd->frrem_samples) : 0);
+
+	DWC_PRINT("\n");
+	DWC_PRINT("Frame remaining at start_transfer (uframe 7):\n");
+	DWC_PRINT("  samples %u, accum %llu, avg %llu\n",
+			hcd->core_if->hfnum_7_samples,
+			hcd->core_if->hfnum_7_frrem_accum,
+			(hcd->core_if->hfnum_7_samples > 0)
+			?
+			div_u64(hcd->core_if->hfnum_7_frrem_accum,
+				hcd->core_if->hfnum_7_samples)
+			:
+			0);
+	DWC_PRINT("Frame remaining at start_transfer (uframe 0):\n");
+	DWC_PRINT("  samples %u, accum %llu, avg %llu\n",
+			hcd->core_if->hfnum_0_samples,
+			hcd->core_if->hfnum_0_frrem_accum,
+			(hcd->core_if->hfnum_0_samples > 0)
+			?
+			div_u64(hcd->core_if->hfnum_0_frrem_accum,
+				hcd->core_if->hfnum_0_samples)
+			:
+			0);
+	DWC_PRINT("Frame remaining at start_transfer (uframe 1-6):\n");
+	DWC_PRINT("  samples %u, accum %llu, avg %llu\n",
+			hcd->core_if->hfnum_other_samples,
+			hcd->core_if->hfnum_other_frrem_accum,
+			(hcd->core_if->hfnum_other_samples > 0)
+			?
+			div_u64(hcd->core_if->hfnum_other_frrem_accum,
+				hcd->core_if->hfnum_other_samples)
+			:
+			0);
+
+	DWC_PRINT("\n");
+	DWC_PRINT("Frame remaining at sample point A (uframe 7):\n");
+	DWC_PRINT("  samples %u, accum %llu, avg %llu\n",
+			hcd->hfnum_7_samples_a, hcd->hfnum_7_frrem_accum_a,
+			(hcd->hfnum_7_samples_a > 0)
+			?
+			div_u64(hcd->hfnum_7_frrem_accum_a,
+				hcd->hfnum_7_samples_a)
+			:
+			0);
+	DWC_PRINT("Frame remaining at sample point A (uframe 0):\n");
+	DWC_PRINT("  samples %u, accum %llu, avg %llu\n",
+			hcd->hfnum_0_samples_a, hcd->hfnum_0_frrem_accum_a,
+			(hcd->hfnum_0_samples_a > 0) ?
+			div_u64(hcd->hfnum_0_frrem_accum_a,
+				hcd->hfnum_0_samples_a) : 0);
+	DWC_PRINT("Frame remaining at sample point A (uframe 1-6):\n");
+	DWC_PRINT("  samples %u, accum %llu, avg %llu\n",
+			hcd->hfnum_other_samples_a,
+			hcd->hfnum_other_frrem_accum_a,
+			(hcd->hfnum_other_samples_a > 0)
+			?
+			div_u64(hcd->hfnum_other_frrem_accum_a,
+				hcd->hfnum_other_samples_a)
+			:
+			0);
+	DWC_PRINT("\n");
+	DWC_PRINT("Frame remaining at sample point B (uframe 7):\n");
+	DWC_PRINT("  samples %u, accum %llu, avg %llu\n",
+			hcd->hfnum_7_samples_b, hcd->hfnum_7_frrem_accum_b,
+			(hcd->hfnum_7_samples_b > 0) ?
+			div_u64(hcd->hfnum_7_frrem_accum_b,
+				hcd->hfnum_7_samples_b) : 0);
+	DWC_PRINT("Frame remaining at sample point B (uframe 0):\n");
+	DWC_PRINT("  samples %u, accum %llu, avg %llu\n",
+			hcd->hfnum_0_samples_b, hcd->hfnum_0_frrem_accum_b,
+			(hcd->hfnum_0_samples_b > 0) ?
+			div_u64(hcd->hfnum_0_frrem_accum_b,
+				hcd->hfnum_0_samples_b) : 0);
+	DWC_PRINT("Frame remaining at sample point B (uframe 1-6):\n");
+	DWC_PRINT("  samples %u, accum %llu, avg %llu\n",
+			hcd->hfnum_other_samples_b,
+			hcd->hfnum_other_frrem_accum_b,
+			(hcd->hfnum_other_samples_b > 0)
+			?
+			div_u64(hcd->hfnum_other_frrem_accum_b,
+				hcd->hfnum_other_samples_b)
+			:
+			0);
+#endif
+
+}
+
+static void dump_channel_info(struct dwc_otg_hcd *hcd,  struct dwc_otg_qh *qh)
+{
+	if (qh->channel != NULL) {
+		struct dwc_hc *hc = qh->channel;
+		struct list_head *item;
+		struct dwc_otg_qh *qh_item;
+		int num_channels = hcd->core_if->core_params->host_channels;
+		int i;
+		struct dwc_otg_hc_regs __iomem *hc_regs;
+		union hcchar_data hcchar;
+		union hcsplt_data hcsplt;
+		union hcsplt_data hctsiz;
+		u32 hcdma;
+
+		hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num];
+		hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+		hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
+		hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
+		hcdma = dwc_read_reg32(&hc_regs->hcdma);
+		DWC_PRINT("  Assigned to channel %p:\n", hc);
+		DWC_PRINT("    hcchar 0x%08x, hcsplt 0x%08x\n",
+				hcchar.d32, hcsplt.d32);
+		DWC_PRINT("    hctsiz 0x%08x, hcdma 0x%08x\n",
+				hctsiz.d32, hcdma);
+		DWC_PRINT("    dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+			   hc->dev_addr, hc->ep_num, hc->ep_is_in);
+		DWC_PRINT("    ep_type: %d\n", hc->ep_type);
+		DWC_PRINT("    max_packet: %d\n", hc->max_packet);
+		DWC_PRINT("    data_pid_start: %d\n", hc->data_pid_start);
+		DWC_PRINT("    xfer_started: %d\n", hc->xfer_started);
+		DWC_PRINT("    halt_status: %d\n", hc->halt_status);
+		DWC_PRINT("    xfer_buff: %p\n", hc->xfer_buff);
+		DWC_PRINT("    xfer_len: %d\n", hc->xfer_len);
+		DWC_PRINT("    qh: %p\n", hc->qh);
+
+		DWC_PRINT("  NP inactive sched:\n");
+		list_for_each(item, &hcd->non_periodic_sched_inactive) {
+			qh_item = list_entry(item, struct dwc_otg_qh,
+					qh_list_entry);
+			DWC_PRINT("    %p\n", qh_item);
+		}
+
+		DWC_PRINT("  NP active sched:\n");
+		list_for_each(item, &hcd->non_periodic_sched_deferred) {
+			qh_item = list_entry(item, struct dwc_otg_qh,
+					qh_list_entry);
+			DWC_PRINT("    %p\n", qh_item);
+		}
+
+		DWC_PRINT("  NP deferred sched:\n");
+		list_for_each(item, &hcd->non_periodic_sched_active) {
+			qh_item = list_entry(item, struct dwc_otg_qh,
+					qh_list_entry);
+			DWC_PRINT("    %p\n", qh_item);
+		}
+
+		DWC_PRINT("  Channels: \n");
+		for (i = 0; i < num_channels; i++) {
+			struct dwc_hc *hc = hcd->hc_ptr_array[i];
+			DWC_PRINT("    %2d: %p\n", i, hc);
+		}
+	}
+}
+
+#endif	/*  */
+
+#endif	/* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/dwc_otg/dwc_otg_hcd.h b/drivers/usb/dwc_otg/dwc_otg_hcd.h
new file mode 100644
index 0000000..78362ea
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_hcd.h
@@ -0,0 +1,837 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef DWC_DEVICE_ONLY
+#if !defined(__DWC_HCD_H__)
+#define __DWC_HCD_H__
+
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+struct lm_device;
+struct dwc_otg_device;
+
+#include "dwc_otg_cil.h"
+
+/**
+ * @file
+ *
+ * This file contains the structures, constants, and interfaces for
+ * the Host Contoller Driver (HCD).
+ *
+ * The Host Controller Driver (HCD) is responsible for translating requests
+ * from the USB Driver into the appropriate actions on the DWC_otg controller.
+ * It isolates the USBD from the specifics of the controller by providing an
+ * API to the USBD.
+ */
+
+/**
+ * Phases for control transfers.
+ */
+enum dwc_otg_control_phase {
+	DWC_OTG_CONTROL_SETUP,
+	DWC_OTG_CONTROL_DATA,
+	DWC_OTG_CONTROL_STATUS
+};
+
+/** Transaction types. */
+enum dwc_otg_transaction_type {
+	DWC_OTG_TRANSACTION_NONE,
+	DWC_OTG_TRANSACTION_PERIODIC,
+	DWC_OTG_TRANSACTION_NON_PERIODIC,
+	DWC_OTG_TRANSACTION_ALL
+};
+
+/**
+ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
+ * interrupt, or isochronous transfer. A single QTD is created for each URB
+ * (of one of these types) submitted to the HCD. The transfer associated with
+ * a QTD may require one or multiple transactions.
+ *
+ * A QTD is linked to a Queue Head, which is entered in either the
+ * non-periodic or periodic schedule for execution. When a QTD is chosen for
+ * execution, some or all of its transactions may be executed. After
+ * execution, the state of the QTD is updated. The QTD may be retired if all
+ * its transactions are complete or if an error occurred. Otherwise, it
+ * remains in the schedule so more transactions can be executed later.
+ */
+
+struct dwc_otg_qh;
+
+struct dwc_otg_qtd {
+	/**
+	 * Determines the PID of the next data packet for the data phase of
+	 * control transfers. Ignored for other transfer types.<br>
+	 * One of the following values:
+	 *	- DWC_OTG_HC_PID_DATA0
+	 *	- DWC_OTG_HC_PID_DATA1
+	 */
+	u8		data_toggle;
+
+	/** Current phase for control transfers (Setup, Data, or Status). */
+	enum dwc_otg_control_phase	control_phase;
+
+	/** Keep track of the current split type
+	 * for FS/LS endpoints on a HS Hub */
+	u8		complete_split;
+
+	/** How many bytes transferred during SSPLIT OUT */
+	u32		ssplit_out_xfer_count;
+
+	/**
+	 * Holds the number of bus errors that have occurred for a transaction
+	 * within this transfer.
+	 */
+	u8 		error_count;
+
+	/**
+	 * Index of the next frame descriptor for an isochronous transfer. A
+	 * frame descriptor describes the buffer position and length of the
+	 * data to be transferred in the next scheduled (micro)frame of an
+	 * isochronous transfer. It also holds status for that transaction.
+	 * The frame index starts at 0.
+	 */
+	int		isoc_frame_index;
+
+	/** Position of the ISOC split on full/low speed */
+	u8		isoc_split_pos;
+
+	/** Position of the ISOC split in the buffer for the current frame */
+	u16		isoc_split_offset;
+
+	/** URB for this transfer */
+	struct urb 	*urb;
+
+	/** This list of QTDs */
+	struct list_head qtd_list_entry;
+
+	/* Field to track the qh pointer */
+	struct dwc_otg_qh *qtd_qh_ptr;
+	/** Indicates if this QTD is currently processed by HW. */
+	u8		in_process;
+
+	/** Number of DMA descriptors for this QTD */
+	u8		n_desc;
+
+	/**
+	 * Last activated frame(packet) index.
+	 * Used in Descriptor DMA mode only.
+	 */
+	u16 isoc_frame_index_last;
+
+} ;
+
+/**
+ * A Queue Head (QH) holds the static characteristics of an endpoint and
+ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
+ * be entered in either the non-periodic or periodic schedule.
+ */
+struct dwc_otg_qh {
+	/**
+	 * Endpoint type.
+	 * One of the following values:
+	 * 	- USB_ENDPOINT_XFER_CONTROL
+	 *	- USB_ENDPOINT_XFER_ISOC
+	 *	- USB_ENDPOINT_XFER_BULK
+	 *	- USB_ENDPOINT_XFER_INT
+	 */
+	u8 		ep_type;
+	u8 		ep_is_in;
+
+	/** wMaxPacketSize Field of Endpoint Descriptor. */
+	u16		maxp;
+
+	/**
+	 * Device speed.
+	 *	USB_SPEED_UNKNOWN = 0,			 enumerating
+	 *	USB_SPEED_LOW, USB_SPEED_FULL,		 usb 1.1
+	 *	USB_SPEED_HIGH,				 usb 2.0
+	 *	USB_SPEED_WIRELESS,			 wireless (usb 2.5)
+	 *	USB_SPEED_SUPER,			 usb 3.0
+	 */
+	u8 		dev_speed;
+
+	/* Determines the PID of the next data packet for non-control
+	 * transfers. Ignored for control transfers.<br>
+	 * One of the following values:
+	 *	- DWC_OTG_HC_PID_DATA0
+	 * 	- DWC_OTG_HC_PID_DATA1
+	 */
+	u8		data_toggle;
+
+	/** Ping state if 1. */
+	u8 		ping_state;
+
+	/**
+	 * List of QTDs for this QH.
+	 */
+	struct list_head 	qtd_list;
+
+	/** Host channel currently processing transfers for this QH. */
+	struct dwc_hc		*channel;
+
+	/** QTD currently assigned to a host channel for this QH. */
+	struct dwc_otg_qtd	*qtd_in_process;
+
+	/** Full/low speed endpoint on high-speed hub requires split. */
+	u8		do_split;
+
+	/** Bandwidth in microseconds per (micro)frame. */
+	u16		usecs;
+
+	/** Interval between transfers in (micro)frames. */
+	u16		interval;
+
+	/**
+	 * (micro)frame to initialize a periodic transfer. The transfer
+	 * executes in the following (micro)frame.
+	 */
+	u16		sched_frame;
+
+	/** (micro)frame at which last start split was initialized. */
+	u16		start_split_frame;
+
+
+	/**
+	 * Used instead of original buffer if
+	 * it(physical address) is not dword-aligned.
+	 */
+	u8 		*dw_align_buf;
+	dma_addr_t 	dw_align_buf_dma;
+
+	/** @name Descriptor DMA support */
+
+	/** Descriptor List. */
+	struct dwc_otg_host_dma_desc	*desc_list;
+
+	/** Descriptor List physical address. */
+	dma_addr_t desc_list_dma;
+
+	/** Entry for QH in either the periodic or non-periodic schedule. */
+	struct list_head qh_list_entry;
+	/**
+	 * Xfer Bytes array.
+	 * Each element corresponds to a descriptor and indicates
+	 * original XferSize size value for the descriptor.
+	 */
+	u32		*n_bytes;
+
+	/** Actual number of transfer descriptors in a list. */
+	u16		ntd;
+
+	/** First activated isochronous transfer descriptor index. */
+	u8		td_first;
+	/** Last activated isochronous transfer descriptor index. */
+	u8		td_last;
+	/* access to toggles and TT */
+	struct usb_device	*dev;
+};
+
+/**
+ * This structure holds the state of the HCD, including the non-periodic and
+ * periodic schedules.
+ */
+struct dwc_otg_hcd {
+
+	spinlock_t		lock;
+
+	/** DWC OTG Core Interface Layer */
+	struct dwc_otg_core_if	*core_if;
+
+	/** Internal DWC HCD Flags */
+	union dwc_otg_hcd_internal_flags {
+		u32 d32;
+		struct {
+			unsigned port_connect_status_change:1;
+			unsigned port_connect_status:1;
+			unsigned port_reset_change:1;
+			unsigned port_enable_change:1;
+			unsigned port_suspend_change:1;
+			unsigned port_over_current_change:1;
+			unsigned port_l1_change:1;
+			unsigned reserved:26;
+		} b;
+	} flags;
+
+	/**
+	 * Inactive items in the non-periodic schedule. This is a list of
+	 * Queue Heads. Transfers associated with these Queue Heads are not
+	 * currently assigned to a host channel.
+	 */
+	struct list_head 	non_periodic_sched_inactive;
+
+	/**
+	 * Deferred items in the non-periodic schedule. This is a list of
+	 * Queue Heads. Transfers associated with these Queue Heads are not
+	 * currently assigned to a host channel.
+	 * When we get an NAK, the QH goes here.
+	 */
+	struct list_head 	non_periodic_sched_deferred;
+
+	/**
+	 * Active items in the non-periodic schedule. This is a list of
+	 * Queue Heads. Transfers associated with these Queue Heads are
+	 * currently assigned to a host channel.
+	 */
+	struct list_head 	non_periodic_sched_active;
+
+	/**
+	 * Pointer to the next Queue Head to process in the active
+	 * non-periodic schedule.
+	 */
+	struct list_head 	*non_periodic_qh_ptr;
+
+	/**
+	 * Inactive items in the periodic schedule. This is a list of QHs for
+	 * periodic transfers that are _not_ scheduled for the next frame.
+	 * Each QH in the list has an interval counter that determines when it
+	 * needs to be scheduled for execution. This scheduling mechanism
+	 * allows only a simple calculation for periodic bandwidth used (i.e.
+	 * must assume that all periodic transfers may need to execute in the
+	 * same frame). However, it greatly simplifies scheduling and should
+	 * be sufficient for the vast majority of OTG hosts, which need to
+	 * connect to a small number of peripherals at one time.
+	 *
+	 * Items move from this list to periodic_sched_ready when the QH
+	 * interval counter is 0 at SOF.
+	 */
+	struct list_head	periodic_sched_inactive;
+
+	/**
+	 * List of periodic QHs that are ready for execution in the next
+	 * frame, but have not yet been assigned to host channels.
+	 *
+	 * Items move from this list to periodic_sched_assigned as host
+	 * channels become available during the current frame.
+	 */
+	struct list_head	periodic_sched_ready;
+
+	/**
+	 * List of periodic QHs to be executed in the next frame that are
+	 * assigned to host channels.
+	 *
+	 * Items move from this list to periodic_sched_queued as the
+	 * transactions for the QH are queued to the DWC_otg controller.
+	 */
+	struct list_head	periodic_sched_assigned;
+
+	/**
+	 * List of periodic QHs that have been queued for execution.
+	 *
+	 * Items move from this list to either periodic_sched_inactive or
+	 * periodic_sched_ready when the channel associated with the transfer
+	 * is released. If the interval for the QH is 1, the item moves to
+	 * periodic_sched_ready because it must be rescheduled for the next
+	 * frame. Otherwise, the item moves to periodic_sched_inactive.
+	 */
+	struct list_head	periodic_sched_queued;
+
+	/**
+	 * Total bandwidth claimed so far for periodic transfers. This value
+	 * is in microseconds per (micro)frame. The assumption is that all
+	 * periodic transfers may occur in the same (micro)frame.
+	 */
+	u16			periodic_usecs;
+
+	/**
+	 * Frame number read from the core at SOF. The value ranges from 0 to
+	 * DWC_HFNUM_MAX_FRNUM.
+	 */
+	u16			frame_number;
+
+	/**
+	 * Free host channels in the controller. This is a list of
+	 * struct dwc_hc items.
+	 */
+	struct list_head 	free_hc_list;
+
+	/**
+	 * Number of host channels assigned to periodic transfers. Currently
+	 * assuming that there is a dedicated host channel for each periodic
+	 * transaction and at least one host channel available for
+	 * non-periodic transactions.
+	 */
+	int			periodic_channels;
+
+	/**
+	 * Number of host channels assigned to non-periodic transfers.
+	 */
+	int			non_periodic_channels;
+
+	/**
+	 * Wait queue for when all channels are free. This it scheduled when
+	 * periodic_channels and non_periodic_channels both reach 0.
+	 */
+	wait_queue_head_t	idleq;
+
+	/**
+	 * Array of pointers to the host channel descriptors. Allows accessing
+	 * a host channel descriptor given the host channel number. This is
+	 * useful in interrupt handlers.
+	 */
+	struct dwc_hc		*hc_ptr_array[MAX_EPS_CHANNELS];
+
+	/**
+	 * Buffer to use for any data received during the status phase of a
+	 * control transfer. Normally no data is transferred during the status
+	 * phase. This buffer is used as a bit bucket.
+	 */
+	u8			*status_buf;
+
+	/**
+	 * DMA address for status_buf.
+	 */
+	dma_addr_t		status_buf_dma;
+#define DWC_OTG_HCD_STATUS_BUF_SIZE 64
+
+	/**
+	 * Structure to allow starting the HCD in a non-interrupt context
+	 * during an OTG role change.
+	 */
+	struct work_struct	start_work;
+
+	/**
+	 * Connection timer. An OTG host must display a message if the device
+	 * does not connect. Started when the VBus power is turned on via
+	 * sysfs attribute "buspower".
+	 */
+	struct timer_list 	conn_timer;
+
+	/* Tasket to do a reset */
+	struct tasklet_struct	*reset_tasklet;
+
+	/** Frame List */
+	u32 *frame_list;
+
+	/** Frame List DMA address */
+	dma_addr_t frame_list_dma;
+#ifdef DEBUG
+	u32 		frrem_samples;
+	u64 		frrem_accum;
+
+	u32		hfnum_7_samples_a;
+	u64		hfnum_7_frrem_accum_a;
+	u32		hfnum_0_samples_a;
+	u64		hfnum_0_frrem_accum_a;
+	u32		hfnum_other_samples_a;
+	u64		hfnum_other_frrem_accum_a;
+
+	u32		hfnum_7_samples_b;
+	u64		hfnum_7_frrem_accum_b;
+	u32		hfnum_0_samples_b;
+	u64		hfnum_0_frrem_accum_b;
+	u32		hfnum_other_samples_b;
+	u64		hfnum_other_frrem_accum_b;
+#endif
+
+	struct device 		*dev;
+	struct dwc_otg_device   *otg_dev;
+};
+
+/** Gets the dwc_otg_hcd from a struct usb_hcd */
+static inline struct dwc_otg_hcd *hcd_to_dwc_otg_hcd(struct usb_hcd *hcd)
+{
+	return (struct dwc_otg_hcd *)(hcd->hcd_priv);
+}
+
+/** Gets the struct usb_hcd that contains a struct dwc_otg_hcd. */
+static inline struct
+usb_hcd *dwc_otg_hcd_to_hcd(struct dwc_otg_hcd *dwc_otg_hcd)
+{
+	return container_of((void *)dwc_otg_hcd, struct usb_hcd, hcd_priv);
+}
+
+/**
+ * Get value of prt_sleep_sts field from the GLPMCFG register
+ */
+extern u32 dwc_otg_get_lpm_portsleepstatus(struct dwc_otg_core_if *core_if);
+
+/** @name HCD Create/Destroy Functions */
+/** @{ */
+extern int
+dwc_otg_hcd_init(struct device *_dev, struct dwc_otg_device * dwc_otg_device);
+extern void dwc_otg_hcd_remove(struct device *_dev);
+extern int dwc_otg_hcd_suspend(struct dwc_otg_hcd * dwc_otg_hcd);
+extern int dwc_otg_hcd_resume(struct dwc_otg_hcd * dwc_otg_hcd);
+/** @} */
+
+/** @name Linux HC Driver API Functions */
+/** @{ */
+
+extern int dwc_otg_hcd_start(struct usb_hcd *hcd);
+extern void dwc_otg_hcd_stop(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd);
+extern void dwc_otg_hcd_free(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd,
+				   struct urb *urb,
+				   gfp_t mem_flags);
+extern int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
+/*				   struct usb_host_endpoint *ep,*/
+				   struct urb *urb, int status);
+extern void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd,
+					 struct usb_host_endpoint *ep);
+extern irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd,
+				       char *buf);
+extern int dwc_otg_hcd_hub_control(struct usb_hcd *hcd,
+				   u16 typeReq,
+				   u16 wValue,
+				   u16 wIndex,
+				   char *buf,
+				   u16 wLength);
+
+/** @} */
+
+/** @name Transaction Execution Functions */
+/** @{ */
+extern enum dwc_otg_transaction_type
+__dwc_otg_hcd_select_transactions(struct dwc_otg_hcd *hcd, int locked);
+
+extern void
+dwc_otg_hcd_queue_transactions(struct dwc_otg_hcd *hcd,
+			       enum dwc_otg_transaction_type tr_type);
+
+extern void dwc_otg_hcd_complete_urb(struct dwc_otg_hcd *hcd, struct urb *urb,
+				     int status);
+/** @} */
+
+/** @name Interrupt Handler Functions */
+/** @{ */
+extern int
+dwc_otg_hcd_handle_intr(struct dwc_otg_hcd *dwc_otg_hcd);
+extern int
+dwc_otg_hcd_handle_sof_intr(struct dwc_otg_hcd *dwc_otg_hcd);
+extern int
+dwc_otg_hcd_handle_rx_status_q_level_intr(struct dwc_otg_hcd *dwc_otg_hcd);
+extern int
+dwc_otg_hcd_handle_np_tx_fifo_empty_intr(struct dwc_otg_hcd *dwc_otg_hcd);
+extern int
+dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(struct dwc_otg_hcd *dwc_otg_hcd);
+extern int
+dwc_otg_hcd_handle_incomplete_periodic_intr(struct dwc_otg_hcd *dwc_otg_hcd);
+extern int
+dwc_otg_hcd_handle_port_intr(struct dwc_otg_hcd *dwc_otg_hcd);
+extern int
+dwc_otg_hcd_handle_conn_id_status_change_intr(struct dwc_otg_hcd *dwc_otg_hcd);
+extern int
+dwc_otg_hcd_handle_disconnect_intr(struct dwc_otg_hcd *dwc_otg_hcd);
+extern int
+dwc_otg_hcd_handle_hc_intr(struct dwc_otg_hcd *dwc_otg_hcd);
+extern int
+dwc_otg_hcd_handle_hc_n_intr(struct dwc_otg_hcd *dwc_otg_hcd, u32 _num);
+extern int
+dwc_otg_hcd_handle_session_req_intr(struct dwc_otg_hcd *dwc_otg_hcd);
+extern int
+dwc_otg_hcd_handle_wakeup_detected_intr(struct dwc_otg_hcd *dwc_otg_hcd);
+/** @} */
+
+
+/** @name Schedule Queue Functions */
+/** @{ */
+
+/* Implemented in dwc_otg_hcd_queue.c */
+extern bool dwc_otg_hcd_idle(struct dwc_otg_hcd *hcd);
+extern struct dwc_otg_qh *
+dwc_otg_hcd_qh_create(struct dwc_otg_hcd *hcd, struct urb *urb);
+extern void
+dwc_otg_hcd_qh_init(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh,
+		struct urb *urb);
+extern void
+dwc_otg_hcd_qh_free(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh, int locked);
+extern int
+dwc_otg_hcd_qh_add(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh);
+extern int
+__dwc_otg_hcd_qh_add(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh);
+extern void
+__dwc_otg_hcd_qh_remove(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh);
+static inline void dwc_otg_hcd_qh_remove(struct dwc_otg_hcd *hcd,
+					 struct dwc_otg_qh *qh)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&hcd->lock, flags);
+	__dwc_otg_hcd_qh_remove(hcd, qh);
+	spin_unlock_irqrestore(&hcd->lock, flags);
+}
+extern void
+__dwc_otg_hcd_qh_deactivate(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh,
+			    int sched_csplit);
+static inline void
+dwc_otg_hcd_qh_deactivate(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh,
+			  int sched_csplit)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hcd->lock, flags);
+	__dwc_otg_hcd_qh_deactivate(hcd, qh, sched_csplit);
+	spin_unlock_irqrestore(&hcd->lock, flags);
+}
+extern int
+dwc_otg_hcd_qh_deferr(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh,
+		int delay);
+
+/** Remove and free a QH */
+static inline void dwc_otg_hcd_qh_remove_and_free(struct dwc_otg_hcd *hcd,
+						  struct dwc_otg_qh *qh,
+						  int already_locked)
+{
+	dwc_otg_hcd_qh_remove(hcd, qh);
+	dwc_otg_hcd_qh_free(hcd, qh, already_locked);
+}
+
+/** Allocates memory for a QH structure.
+ * @return Returns the memory allocate or NULL on error. */
+static inline struct dwc_otg_qh *dwc_otg_hcd_qh_alloc(void)
+{
+	/* FIXME use in_irq() to decide whether to use ATOMIC
+	 * FIXME check that null result is handled */
+	return kmalloc(sizeof(struct dwc_otg_qh), GFP_ATOMIC);
+}
+
+extern struct dwc_otg_qtd *dwc_otg_hcd_qtd_create(struct urb *urb);
+
+extern void dwc_otg_hcd_qtd_init(struct dwc_otg_qtd *qtd, struct urb *urb);
+
+extern int
+dwc_otg_hcd_qtd_add(struct dwc_otg_qtd *qtd, struct dwc_otg_hcd *dwc_otg_hcd);
+
+/** Allocates memory for a QTD structure.
+ * @return Returns the memory allocate or NULL on error. */
+static inline struct dwc_otg_qtd *dwc_otg_hcd_qtd_alloc(void)
+{
+	return kmalloc(sizeof(struct dwc_otg_qtd), GFP_ATOMIC);
+}
+
+/** Frees the memory for a QTD structure.  QTD should already be removed from
+ * list.
+ * @param[in] _qtd QTD to free.*/
+static inline void dwc_otg_hcd_qtd_free(struct dwc_otg_qtd *qtd)
+{
+	kfree(qtd);
+}
+
+/** Removes a QTD from list.
+ * @param[in] _qtd QTD to remove from list. */
+static inline void __dwc_otg_hcd_qtd_remove(struct dwc_otg_hcd *hcd,
+					    struct dwc_otg_qtd *qtd,
+					    struct dwc_otg_qh *qh)
+{
+	list_del(&qtd->qtd_list_entry);
+}
+
+static inline void dwc_otg_hcd_qtd_remove(struct dwc_otg_hcd *hcd,
+					  struct dwc_otg_qtd *qtd,
+					  struct dwc_otg_qh *qh)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&hcd->lock, flags);
+	__dwc_otg_hcd_qtd_remove(hcd, qtd, qh);
+	spin_unlock_irqrestore(&hcd->lock, flags);
+}
+
+/** Remove and free a QTD */
+static inline void __dwc_otg_hcd_qtd_remove_and_free(struct dwc_otg_hcd *hcd,
+						     struct dwc_otg_qtd *qtd,
+						     struct dwc_otg_qh *qh)
+{
+	WARN_ON(list_empty(&qtd->qtd_list_entry));
+
+	__dwc_otg_hcd_qtd_remove(hcd, qtd, qh);
+	dwc_otg_hcd_qtd_free(qtd);
+}
+
+static inline void dwc_otg_hcd_qtd_remove_and_free(struct dwc_otg_hcd *hcd,
+						   struct dwc_otg_qtd *qtd,
+						   struct dwc_otg_qh *qh)
+{
+	WARN_ON(list_empty(&qtd->qtd_list_entry));
+
+	dwc_otg_hcd_qtd_remove(hcd, qtd, qh);
+	dwc_otg_hcd_qtd_free(qtd);
+}
+
+/** @} */
+
+
+/** @name Descriptor DMA Supporting Functions */
+/** @{ */
+
+extern void dwc_otg_hcd_start_xfer_ddma(struct dwc_otg_hcd *hcd,
+		struct dwc_otg_qh *qh);
+extern void
+dwc_otg_hcd_complete_xfer_ddma(struct dwc_otg_hcd *hcd,
+				struct dwc_hc *hc,
+				struct dwc_otg_hc_regs __iomem *hc_regs,
+				enum dwc_otg_halt_status halt_status);
+
+extern int
+dwc_otg_hcd_qh_init_ddma(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh);
+extern void
+dwc_otg_hcd_qh_free_ddma(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh);
+
+/** @} */
+
+/** @name Internal Functions */
+/** @{ */
+struct dwc_otg_qh *dwc_urb_to_qh(struct urb *urb);
+void dwc_otg_hcd_dump_frrem(struct dwc_otg_hcd *hcd);
+void dwc_otg_hcd_dump_state(struct dwc_otg_hcd *hcd);
+/** @} */
+#ifdef CONFIG_USB_DWC_OTG_LPM
+extern int dwc_otg_hcd_get_hc_for_lpm_tran(struct dwc_otg_hcd *hcd,
+					   u8 devaddr);
+extern void dwc_otg_hcd_free_hc_from_lpm(struct dwc_otg_hcd *hcd);
+#endif
+
+/** Gets the usb_host_endpoint associated with an URB. */
+static inline struct usb_host_endpoint *dwc_urb_to_endpoint(struct urb *urb)
+{
+	struct usb_device *dev = urb->dev;
+	int ep_num = usb_pipeendpoint(urb->pipe);
+
+	if (usb_pipein(urb->pipe))
+		return dev->ep_in[ep_num];
+	else
+		return dev->ep_out[ep_num];
+}
+
+/**
+ * Gets the endpoint number from a _bEndpointAddress argument. The endpoint is
+ * qualified with its direction (possible 32 endpoints per device).
+ */
+#define dwc_ep_addr_to_endpoint(_bEndpointAddress_) \
+	((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \
+    ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4)
+
+/** Gets the QH that contains the list_head */
+#define dwc_list_to_qh(_list_head_ptr_) \
+	(container_of(_list_head_ptr_, struct dwc_otg_qh, qh_list_entry))
+
+/** Gets the QTD that contains the list_head */
+#define dwc_list_to_qtd(_list_head_ptr_) \
+	(container_of(_list_head_ptr_, struct dwc_otg_qtd, qtd_list_entry))
+
+/** Check if QH is non-periodic  */
+#define dwc_qh_is_non_per(_qh_ptr_) \
+	((_qh_ptr_->ep_type == USB_ENDPOINT_XFER_BULK) || \
+	(_qh_ptr_->ep_type == USB_ENDPOINT_XFER_CONTROL))
+
+/** High bandwidth multiplier as encoded in highspeed endpoint descriptors */
+#define dwc_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+
+/** Packet size for any kind of endpoint descriptor */
+#define dwc_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/**
+ * Returns true if frame1 is less than or equal to frame2. The comparison is
+ * done modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the
+ * frame number when the max frame number is reached.
+ */
+static inline int dwc_frame_num_le(u16 frame1, u16 frame2)
+{
+	return ((frame2 - frame1) & DWC_HFNUM_MAX_FRNUM) <=
+		(DWC_HFNUM_MAX_FRNUM >> 1);
+}
+
+/**
+ * Returns true if frame1 is greater than frame2. The comparison is done
+ * modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
+ * number when the max frame number is reached.
+ */
+static inline int dwc_frame_num_gt(u16 frame1, u16 frame2)
+{
+	return (frame1 != frame2) &&
+		(((frame1 - frame2) & DWC_HFNUM_MAX_FRNUM) <
+		 (DWC_HFNUM_MAX_FRNUM >> 1));
+}
+
+/**
+ * Increments frame by the amount specified by _inc. The addition is done
+ * modulo DWC_HFNUM_MAX_FRNUM. Returns the incremented value.
+ */
+static inline u16 dwc_frame_num_inc(u16 frame, u16 inc)
+{
+	return (frame + inc) & DWC_HFNUM_MAX_FRNUM;
+}
+
+static inline u16 dwc_full_frame_num(u16 frame)
+{
+	return ((frame) & DWC_HFNUM_MAX_FRNUM) >> 3;
+}
+
+static inline u16 dwc_micro_frame_num(u16 frame)
+{
+	return (frame) & 0x7;
+}
+void dwc_otg_hcd_save_data_toggle(struct dwc_hc *hc,
+					struct dwc_otg_hc_regs __iomem *hc_regs,
+					struct dwc_otg_qtd *qtd);
+
+#ifdef DEBUG
+/**
+ * Macro to sample the remaining PHY clocks left in the current frame. This
+ * may be used during debugging to determine the average time it takes to
+ * execute sections of code. There are two possible sample points, "a" and
+ * "b", so the _letter argument must be one of these values.
+ *
+ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For
+ * example, "cat /sys/devices/lm0/hcd_frrem".
+ */
+#define dwc_sample_frrem(_hcd, _qh, _letter) \
+{ \
+	hfnum_data_t hfnum; \
+	struct dwc_otg_qtd *qtd; \
+	qtd = list_entry(_qh->qtd_list.next, \
+			struct dwc_otg_qtd, qtd_list_entry); \
+	if (usb_pipeint(qtd->urb->pipe) && \
+			_qh->start_split_frame != 0 && !qtd->complete_split) { \
+		hfnum.d32 = \
+			dwc_read_reg32(&_hcd->core_if->host_if->\
+					host_global_regs->hfnum); \
+		switch (hfnum.b.frnum & 0x7) { \
+		case 7: \
+			_hcd->hfnum_7_samples_##_letter++; \
+			_hcd->hfnum_7_frrem_accum_##_letter += hfnum.b.frrem; \
+			break; \
+		case 0: \
+			_hcd->hfnum_0_samples_##_letter++; \
+			_hcd->hfnum_0_frrem_accum_##_letter += hfnum.b.frrem; \
+			break; \
+		default: \
+			_hcd->hfnum_other_samples_##_letter++; \
+			_hcd->hfnum_other_frrem_accum_##_letter += \
+							hfnum.b.frrem; \
+			break; \
+		} \
+	} \
+}
+#else
+#define dwc_sample_frrem(_hcd, _qh, _letter)
+#endif
+#endif
+#endif /* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/dwc_otg/dwc_otg_hcd_ddma.c b/drivers/usb/dwc_otg/dwc_otg_hcd_ddma.c
new file mode 100644
index 0000000..e84cdc8
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_hcd_ddma.c
@@ -0,0 +1,1282 @@
+/*==========================================================================$
+ *
+ *Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ *"Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ *otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ *The Software IS NOT an item of Licensed Software or Licensed Product under
+ *any End User Software License Agreement or Agreement for Licensed Product
+ *with Synopsys or any supplement thereto. You are permitted to use and
+ *redistribute this Software in source and binary forms, with or without
+ *modification, provided that redistributions of source code must retain this
+ *notice. You may not view, use, disclose, copy or distribute this file or
+ *any information contained herein except pursuant to this license grant from
+ *Synopsys. If you do not agree with this notice, including the disclaimer
+ *below, then you are not authorized to use the Software.
+ *
+ *THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ *ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ *(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ *LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ *OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ *DAMAGE.
+ *========================================================================== */
+#ifndef DWC_DEVICE_ONLY
+
+/**@file
+ *This file contains Descriptor DMA support implementation for host mode.
+ */
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+
+static const int LOCKED = 1;
+
+static inline u8 frame_list_idx(u16 frame)
+{
+	return frame & (MAX_FRLIST_EN_NUM - 1);
+}
+
+static inline u16 desclist_idx_inc(u16 idx, u16 inc, u8 speed)
+{
+	return (idx + inc) & (((speed == USB_SPEED_HIGH) ?
+		MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC) - 1);
+}
+
+static inline u16 desclist_idx_dec(u16 idx, u16 inc, u8 speed)
+{
+	return (idx - inc) & (((speed == USB_SPEED_HIGH) ?
+		MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC) - 1);
+}
+
+static inline u16 max_desc_num(struct dwc_otg_qh *qh)
+{
+	return ((qh->ep_type == USB_ENDPOINT_XFER_ISOC)
+			&& (qh->dev_speed == USB_SPEED_HIGH)) ?
+			MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
+}
+static inline u16 frame_incr_val(struct dwc_otg_qh *qh)
+{
+	return (qh->dev_speed == USB_SPEED_HIGH) ?
+			((qh->interval + 8 - 1) / 8) : qh->interval;
+}
+
+static int desc_list_alloc(struct dwc_otg_qh *qh, struct device *dev)
+{
+	int retval = 0;
+
+	qh->desc_list = dma_alloc_coherent(dev,
+			sizeof(struct dwc_otg_host_dma_desc) * max_desc_num(qh),
+			&qh->desc_list_dma, GFP_ATOMIC);
+
+	if (!qh->desc_list) {
+		retval = -ENOMEM;
+		DWC_ERROR("%s: DMA descriptor list allocation failed\n",
+			  __func__);
+	}
+
+	memset(qh->desc_list, 0x00,
+	       sizeof(struct dwc_otg_host_dma_desc) * max_desc_num(qh));
+
+	qh->n_bytes = kmalloc(sizeof(u32) * max_desc_num(qh), GFP_ATOMIC);
+
+	if (!qh->n_bytes) {
+		retval = -ENOMEM;
+		DWC_ERROR("%s: Failed to allocate descriptor size array\n",
+			  __func__);
+	}
+
+	return retval;
+}
+
+static void desc_list_free(struct dwc_otg_qh *qh, struct device *dev)
+{
+	if (qh->desc_list) {
+		dma_free_coherent(dev, max_desc_num(qh),
+				  qh->desc_list, qh->desc_list_dma);
+		qh->desc_list = NULL;
+	}
+
+	kfree(qh->n_bytes);
+	qh->n_bytes = NULL;
+}
+
+static int frame_list_alloc(struct dwc_otg_hcd *hcd)
+{
+	int retval = 0;
+	if (hcd->frame_list)
+		return 0;
+
+	hcd->frame_list = dma_alloc_coherent(hcd->dev,
+					     4 * MAX_FRLIST_EN_NUM,
+					     &hcd->frame_list_dma,
+					     GFP_ATOMIC);
+	if (!hcd->frame_list) {
+		retval = -ENOMEM;
+		DWC_ERROR("%s: Frame List allocation failed\n", __func__);
+		goto out;
+	}
+
+	memset(hcd->frame_list, 0x00, 4 * MAX_FRLIST_EN_NUM);
+
+out:
+	return retval;
+}
+
+static void frame_list_free(struct dwc_otg_hcd *hcd)
+{
+	if (!hcd->frame_list)
+		return;
+
+	dma_free_coherent(hcd->dev, 4  * MAX_FRLIST_EN_NUM,
+			  hcd->frame_list,
+			  hcd->frame_list_dma);
+
+	hcd->frame_list = NULL;
+}
+
+static void per_sched_enable(struct dwc_otg_hcd *hcd, u16 fr_list_en)
+{
+	union hcfg_data hcfg;
+
+	hcfg.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hcfg);
+
+	if (hcfg.b.perschedstat)
+		/*already enabled*/
+		return;
+
+	dwc_write_reg32(&hcd->core_if->host_if->host_global_regs->hflbaddr,
+			hcd->frame_list_dma);
+
+	switch (fr_list_en) {
+	case 64:
+		hcfg.b.frlisten = 3;
+		break;
+	case 32:
+		hcfg.b.frlisten = 2;
+		break;
+	case 16:
+		hcfg.b.frlisten = 1;
+		break;
+	case 8:
+		hcfg.b.frlisten = 0;
+		break;
+	default:
+		break;
+	}
+
+	hcfg.b.perschedena = 1;
+
+	DWC_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
+	dwc_write_reg32(&hcd->core_if->host_if->host_global_regs->hcfg,
+			hcfg.d32);
+}
+
+static void per_sched_disable(struct dwc_otg_hcd *hcd)
+{
+	union hcfg_data hcfg;
+
+	hcfg.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hcfg);
+
+	if (!hcfg.b.perschedstat)
+		/*already disabled */
+		return;
+
+	hcfg.b.perschedena = 0;
+
+	DWC_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
+	dwc_write_reg32(&hcd->core_if->host_if->host_global_regs->hcfg,
+			hcfg.d32);
+}
+
+/*
+ * Activates/Deactivates FrameList entries for the channel
+ * based on endpoint servicing period.
+ */
+static void update_frame_list(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh,
+			      u8 enable)
+{
+	u16 i, j, inc;
+	struct dwc_hc *hc = qh->channel;
+
+	inc = frame_incr_val(qh);
+
+	if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
+		i = frame_list_idx(qh->sched_frame);
+	else
+		i = 0;
+
+	j = i;
+	do {
+		if (enable)
+			hcd->frame_list[j] |= (1 << hc->hc_num);
+		else
+			hcd->frame_list[j] &= ~(1 << hc->hc_num);
+		j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
+	} while (j != i);
+
+	if (!enable)
+		return;
+
+	hc->schinfo = 0;
+
+/*
+ * Bug Fix from Synopsys:
+ *
+ * Target Fix Date: 2009-07-06
+ *
+ * Title: Software Driver Disconnect Issue When a High-Speed Hub
+ *	  is Connected With OTG
+ *
+ * Impacted Configuration: Enabled Host mode and disabled "point to point
+ * application only" configuration option in the coreConsultant
+ *
+ * Description: When the HS HUB is connected to OTG and the HS device is
+ * connected to the downstream port of the Hub (not SPLIT), then the USB HS
+ * Device is recognized correctly. In addition, data transfer between OTG FPGA
+ * and USB HS Device completes correctly.
+ *
+ * However, if the HS USB Device is disconnected from the HS HUB, OTG does not
+ * detect the disconnect. After disconnection, the HS USB Device is plugged
+ * again, but OTG does not detect the connection.
+ * Software code Fix: There is update_frame_list() function in
+ * dwc_otg_hcd_ddma.c file.
+ *
+ * Note the following code towards the end of this function:
+ *
+ * if (qh->channel->speed == USB_SPEED_HIGH) {
+ *	j = 1;
+ *	for (i = 0 ; i< 8 / qh->interval ; i++){
+ *		hc->schinfo |= j;
+ *		j = j << qh->interval;
+ *	}
+ * }
+ *
+ * This code must be replaced with:
+ *
+ * if (qh->channel->speed == USB_SPEED_HIGH) {
+ *	j = 1;
+ *	inc = (8 + qh->interval - 1) / qh->interval;
+ *	for (i = 0 ; i < inc ; i++) {
+ *		hc->schinfo |= j;
+ *		j = j << qh->interval;
+ *	}
+ * }
+ *
+ */
+	if (qh->channel->speed == USB_SPEED_HIGH) {
+		j = 1;
+		inc = (8 + qh->interval - 1) / qh->interval;
+		for (i = 0 ; i < inc ; i++) {
+			hc->schinfo |= j;
+			j = j << qh->interval;
+		}
+	} else
+		hc->schinfo = 0xff;
+}
+
+static void release_channel_ddma(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	struct dwc_hc *hc = qh->channel;
+	if (dwc_qh_is_non_per(qh)) {
+		hcd->non_periodic_channels--;
+		if (dwc_otg_hcd_idle(hcd))
+			wake_up_interruptible(&hcd->idleq);
+	} else if (hcd->frame_list) {
+		update_frame_list(hcd, qh, 0);
+	}
+
+	/*
+	 * The condition is added to prevent double cleanup try in case of
+	 * device disconnect. See channel cleanup in dwc_otg_hcd_disconnect_cb()
+	 */
+	if (hc->qh) {
+		dwc_otg_hc_cleanup(hcd->core_if, hc);
+		list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list);
+		hc->qh = NULL;
+	}
+
+	qh->channel = NULL;
+	qh->ntd = 0;
+
+	if (qh->desc_list)
+		memset(qh->desc_list, 0x00,
+			    sizeof(struct dwc_otg_host_dma_desc)
+				    * max_desc_num(qh));
+
+}
+
+/**
+ * Initializes a QH structure's Descriptor DMA related members.
+ * Allocates memory for descriptor list.
+ * On first periodic QH, allocates memory for FrameList
+ * and enables periodic scheduling.
+ *
+ * @param hcd The HCD state structure for the DWC OTG controller.
+ * @param qh The QH to init.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int dwc_otg_hcd_qh_init_ddma(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	int retval = 0;
+
+	if (qh->do_split) {
+		DWC_ERROR("SPLIT Transfers are not supported "
+				"in Descriptor DMA.\n");
+		return -1;
+	}
+
+	retval = desc_list_alloc(qh, hcd->dev);
+
+	if ((retval == 0) &&
+		(qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
+		qh->ep_type == USB_ENDPOINT_XFER_INT)) {
+			if (!hcd->frame_list) {
+				retval = frame_list_alloc(hcd);
+				/*
+				 * Enable periodic schedule on
+				 * first periodic QH
+				 */
+				if (retval == 0)
+					per_sched_enable(hcd, MAX_FRLIST_EN_NUM)
+					;
+			}
+	}
+
+	qh->ntd = 0;
+
+	return retval;
+}
+
+/**
+ * Frees descriptor list memory associated with the QH.
+ * If QH is periodic and the last, frees FrameList memory
+ * and disables periodic scheduling.
+ *
+ * @param hcd The HCD state structure for the DWC OTG controller.
+ * @param qh The QH to init.
+ */
+void dwc_otg_hcd_qh_free_ddma(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	desc_list_free(qh, hcd->dev);
+
+	/*
+	 * Channel still assigned due to some reasons.
+	 * Seen on Isoc URB dequeue. Channel halted but no subsequent
+	 * ChHalted interrupt to release the channel. Afterwards
+	 * when it comes here from endpoint disable routine
+	 * channel remains assigned.
+	 */
+	if (qh->channel)
+		release_channel_ddma(hcd, qh);
+
+	if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
+		qh->ep_type == USB_ENDPOINT_XFER_INT)
+		&& !hcd->periodic_channels && hcd->frame_list) {
+
+		per_sched_disable(hcd);
+		frame_list_free(hcd);
+	}
+}
+
+static u8 frame_to_desc_idx(struct dwc_otg_qh *qh, u16 frame_idx)
+{
+	if (qh->dev_speed == USB_SPEED_HIGH)
+		/*
+		 * Descriptor set(8 descriptors) index
+		 * which is 8-aligned.
+		 */
+		return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
+	else
+		return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
+}
+
+/*
+ *Determine starting frame for Isochronous transfer.
+ *Few frames skipped to prevent race condition with HC.
+ */
+static u8 calc_starting_frame(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh,
+			      u8 *skip_frames)
+{
+	u16 frame = 0;
+	hcd->frame_number =
+		dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
+
+	/* sched_frame is always frame number(not uFrame) both in FS and HS ! */
+
+	/*
+	 * skip_frames is used to limit activated descriptors number
+	 * to avoid the situation when HC services the last activated
+	 * descriptor firstly.
+	 * Example for FS:
+	 * Current frame is 1, scheduled frame is 3. Since HC always fetches
+	 * the descriptor corresponding to curr_frame+1, the descriptor
+	 * corresponding to frame 2 will be fetched. If the number of
+	 * descriptors is max=64 (or greather) the list will be fully programmed
+	 * with Active descriptors and it is possible case(rare) that the latest
+	 * descriptor(considering rollback) corresponding to frame 2 will be
+	 * serviced first. HS case is more probable because, in fact,
+	 * up to 11 uframes(16 in the code) may be skipped.
+	 */
+	if (qh->dev_speed == USB_SPEED_HIGH) {
+		/*
+		 * Consider uframe counter also, to start xfer asap.
+		 * If half of the frame elapsed skip 2 frames otherwise
+		 * just 1 frame.
+		 * Starting descriptor index must be 8-aligned, so
+		 * if the current frame is near to complete the next one
+		 * is skipped as well.
+		 */
+
+		if (dwc_micro_frame_num(hcd->frame_number)  >= 5) {
+			*skip_frames = 2 * 8;
+			frame = dwc_frame_num_inc(hcd->frame_number,
+					*skip_frames);
+		} else {
+			*skip_frames = 1 * 8;
+			frame = dwc_frame_num_inc(hcd->frame_number,
+					*skip_frames);
+		}
+
+		frame = dwc_full_frame_num(frame);
+	} else {
+		/*
+		 * Two frames are skipped for FS - the current and the next.
+		 * But for descriptor programming, 1 frame(descriptor) is enough
+		 * see example above.
+		 */
+		*skip_frames = 1;
+		frame = dwc_frame_num_inc(hcd->frame_number, 2);
+	}
+
+	return frame;
+}
+/*
+ *Calculate initial descriptor index for isochronous transfer
+ *based on scheduled frame.
+ */
+static u8 recalc_initial_desc_idx(struct dwc_otg_hcd *hcd,
+				  struct dwc_otg_qh *qh)
+{
+	u16 frame = 0, fr_idx, fr_idx_tmp;
+	u8 skip_frames = 0 ;
+	/*
+	 * With current ISOC processing algorithm the channel is being
+	 * released when no more QTDs in the list(qh->ntd == 0).
+	 * Thus this function is called only when qh->ntd == 0 and
+	 * qh->channel == 0.
+	 *
+	 * So qh->channel != NULL branch is not used and just not removed from
+	 * the source file. It is required for another possible approach which
+	 * is, do not disable and release the channel when ISOC session
+	 * completed, just move QH to inactive schedule until new QTD arrives.
+	 * On new QTD, the QH moved back to 'ready' schedule,
+	 * starting frame and therefore starting desc_index are recalculated.
+	 * In this case channel is released only on ep_disable.
+	 */
+
+	/*
+	 * Calculate starting descriptor index.
+	 * For INTERRUPT endpoint it is always 0.
+	 */
+	if (qh->channel) {
+		frame = calc_starting_frame(hcd, qh, &skip_frames);
+		/*
+		 * Calculate initial descriptor index based on FrameList
+		 * current bitmap and servicing period.
+		 */
+		fr_idx_tmp = frame_list_idx(frame);
+		fr_idx = (MAX_FRLIST_EN_NUM +
+				frame_list_idx(qh->sched_frame) - fr_idx_tmp)
+				% frame_incr_val(qh);
+		fr_idx = (fr_idx + fr_idx_tmp) % MAX_FRLIST_EN_NUM;
+	} else {
+		qh->sched_frame = calc_starting_frame(hcd, qh, &skip_frames);
+		fr_idx = frame_list_idx(qh->sched_frame);
+	}
+
+	qh->td_first = qh->td_last =  frame_to_desc_idx(qh, fr_idx);
+
+	return skip_frames;
+}
+
+#define	ISOC_URB_GIVEBACK_ASAP
+
+#define MAX_ISOC_XFER_SIZE_FS 1023
+#define MAX_ISOC_XFER_SIZE_HS 3072
+#define DESCNUM_THRESHOLD 4
+
+static void init_isoc_dma_desc(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh,
+			       u8 skip_frames)
+{
+	struct usb_iso_packet_descriptor *frame_desc;
+	struct list_head *pos;
+	struct dwc_otg_qtd *qtd;
+	struct dwc_otg_host_dma_desc	*dma_desc;
+	u16 idx, inc, n_desc, ntd_max, max_xfer_size;
+
+	idx = qh->td_last;
+	inc = qh->interval;
+	n_desc = 0;
+
+	ntd_max = (max_desc_num(qh) + qh->interval - 1) / qh->interval;
+	if (skip_frames && !qh->channel)
+		ntd_max = ntd_max - skip_frames / qh->interval;
+
+	max_xfer_size = (qh->dev_speed == USB_SPEED_HIGH) ?
+				MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
+
+	list_for_each(pos, &qh->qtd_list) {
+		qtd = dwc_list_to_qtd(pos);
+		while ((qh->ntd < ntd_max) &&
+			(qtd->isoc_frame_index_last <
+					qtd->urb->number_of_packets)) {
+
+			dma_desc = &qh->desc_list[idx];
+			memset(dma_desc, 0x00,
+					sizeof(struct dwc_otg_host_dma_desc));
+
+			frame_desc =
+				&qtd->urb->iso_frame_desc[qtd->isoc_frame_index_last];
+
+			if (frame_desc->length > max_xfer_size)
+				qh->n_bytes[idx] = max_xfer_size;
+			else
+				qh->n_bytes[idx] = frame_desc->length;
+			dma_desc->status.b_isoc.n_bytes = qh->n_bytes[idx];
+			dma_desc->status.b_isoc.a = 1;
+
+			dma_desc->buf = qtd->urb->transfer_dma
+						+ frame_desc->offset;
+
+			qh->ntd++;
+
+			qtd->isoc_frame_index_last++;
+
+#ifdef	ISOC_URB_GIVEBACK_ASAP
+			/*
+			 * Set IOC for each descriptor corresponding to the
+			 * last frame of the URB.
+			 */
+
+			if (qtd->isoc_frame_index_last ==
+					qtd->urb->number_of_packets)
+				dma_desc->status.b_isoc.ioc = 1;
+
+#endif
+			idx = desclist_idx_inc(idx, inc, qh->dev_speed);
+			n_desc++;
+
+		}
+		qtd->in_process = 1;
+	}
+
+	qh->td_last = idx;
+
+#ifdef	ISOC_URB_GIVEBACK_ASAP
+	/*Set IOC for the last descriptor if descriptor list is full */
+	if (qh->ntd == ntd_max) {
+		idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
+		qh->desc_list[idx].status.b_isoc.ioc = 1;
+	}
+#else
+	/*
+	 * Set IOC bit only for one descriptor.
+	 */
+
+	if (n_desc > DESCNUM_THRESHOLD) {
+		/*
+		 * Move IOC "up". Required even if there is only one QTD
+		 * in the list, because QTDs might continue to be queued,
+		 * but during the activation it was only one queued.
+		 * Actually more than one QTD might be in the list if this
+		 * function called from XferCompletion - QTDs was queued during
+		 * HW processing of the previous descriptor chunk.
+		 */
+		idx = desclist_idx_dec(idx,
+				inc * ((qh->ntd + 1) / 2),
+				qh->dev_speed);
+	} else {
+		/*
+		 * Set the IOC for the latest descriptor
+		 * if either number of descriptor is not greater than threshold
+		 * or no more new descriptors activated.
+		 */
+		idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
+	}
+
+	qh->desc_list[idx].status.b_isoc.ioc = 1;
+#endif
+}
+
+static void init_non_isoc_dma_desc(struct dwc_otg_hcd *hcd,
+				   struct dwc_otg_qh *qh)
+{
+	struct list_head *pos;
+	struct dwc_hc *hc;
+	struct dwc_otg_host_dma_desc	*dma_desc;
+	struct dwc_otg_qtd *qtd;
+	int	num_packets, len, n_desc = 0, i = 0;
+
+	hc =  qh->channel;
+
+	/*
+	 * Start with hc->xfer_buff initialised in
+	 * assign_and_init_hc(), then if SG transfer consists of multiple URBs,
+	 * this pointer re-assigned to the buffer of the currently processed QTD
+	 * For non-SG request there is always one QTD active.
+	 */
+
+	list_for_each(pos, &qh->qtd_list) {
+
+		/*
+		 * Setting the IOC on the end descriptor of each QTD doesn't
+		 * generate an interrupt correctly (HW bug??)
+		 * so for now only process 1 QTD on the list, when this request
+		 * completes any more on the list gets re-submitted
+		 */
+		if (i++ > 0)
+			break;
+
+
+		qtd = dwc_list_to_qtd(pos);
+		if (n_desc) {
+			/* more than 1 QTDs for this request*/
+			hc->xfer_buff =
+				(u8 *)qtd->urb->transfer_dma
+						+ qtd->urb->actual_length;
+			hc->xfer_len =
+				qtd->urb->transfer_buffer_length
+						- qtd->urb->actual_length;
+		}
+
+		WARN_ON(hc->xfer_len > MAX_DMA_DESC_SIZE *
+					MAX_DMA_DESC_NUM_GENERIC);
+
+		qtd->n_desc = 0;
+
+		do {
+			dma_desc = &qh->desc_list[n_desc];
+			len = hc->xfer_len;
+
+
+			if (len > MAX_DMA_DESC_SIZE)
+				len = MAX_DMA_DESC_SIZE - hc->max_packet + 1;
+
+			if (hc->ep_is_in) {
+
+				/* Note we must program the IN descriptor size
+				 * to be a multiple of the max packet size
+				 * for the endpoint, but the size of the
+				 * dma buffer (transfer_buffer_len) may not be
+				 * big enough once we have rounded up, so we
+				 * could overflow the buffer in an error
+				 * scenario (when the device send more date
+				 * than expected), this is bad but there is not
+				 * much we can do about it, we could use a
+				 * bounce buffer it but then we would be not
+				 * much better than buffer dma mode.
+				 */
+
+				if (len > 0) {
+					num_packets = (len + hc->max_packet - 1)
+							/ hc->max_packet;
+				} else {
+					/*
+					 * Need 1 packet for
+					 * transfer length of 0.
+					 */
+					num_packets = 1;
+				}
+				/*
+				 * Always program an integral #
+				 * of max packets for IN transfers.
+				 */
+				len = num_packets * hc->max_packet;
+			}
+
+			dma_desc->status.b.n_bytes = len;
+
+			qh->n_bytes[n_desc] = len;
+
+
+			if ((qh->ep_type == USB_ENDPOINT_XFER_CONTROL) &&
+			(qtd->control_phase == DWC_OTG_CONTROL_SETUP))
+				dma_desc->status.b.sup = 1; /*Setup Packet */
+
+			dma_desc->status.b.a = 1; /*Active descriptor */
+
+			dma_desc->buf = (u32) hc->xfer_buff;
+
+			/*
+			 * Last descriptor(or single) of IN transfer
+			 * with actual size less than MaxPacket.
+			 */
+			if (len > hc->xfer_len) {
+				/*
+				 * clamp at 0, needed due to rounding up to
+				 * max packet size.
+				 */
+				hc->xfer_len = 0;
+			} else {
+				hc->xfer_buff += len;
+				hc->xfer_len -= len;
+			}
+
+			qtd->n_desc++;
+			n_desc++;
+		} while ((hc->xfer_len > 0) &&
+				(n_desc != MAX_DMA_DESC_NUM_GENERIC));
+
+
+		qtd->in_process = 1;
+
+		if (n_desc == MAX_DMA_DESC_NUM_GENERIC) {
+			WARN_ON(1);
+			break;
+		}
+
+
+		/*
+		 * Request Transfer Complete interrupt for each qtd
+		 * set it on the last descriptor of the qtd.
+		 *
+		 * (This appear to be broke see comment above)
+		 */
+		if (n_desc)
+			qh->desc_list[n_desc-1].status.b.ioc = 1;
+
+	}
+
+	if (n_desc) {
+		/*End of List indicator */
+		qh->desc_list[n_desc-1].status.b.eol = 1;
+		hc->ntd = n_desc;
+	}
+}
+
+/**
+ * For Control and Bulk endpoints initializes descriptor list
+ * and starts the transfer.
+ *
+ * For Interrupt and Isochronous endpoints initializes descriptor list
+ * then updates FrameList, marking appropriate entries as active.
+ * In case of Isochronous, the starting descriptor index is calculated based
+ * on the scheduled frame, but only on the first transfer descriptor within a
+ * session.Then starts the transfer via enabling the channel.
+ * For Isochronous endpoint the channel is not halted on XferComplete
+ * interrupt so remains assigned to the endpoint(QH) until session is done.
+ *
+ * @param hcd The HCD state structure for the DWC OTG controller.
+ * @param qh The QH to init.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+void dwc_otg_hcd_start_xfer_ddma(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	/*Channel is already assigned */
+	struct dwc_hc *hc = qh->channel;
+	u8 skip_frames = 0;
+
+	switch (hc->ep_type) {
+	case USB_ENDPOINT_XFER_CONTROL:
+	case USB_ENDPOINT_XFER_BULK:
+		init_non_isoc_dma_desc(hcd, qh);
+
+		dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		init_non_isoc_dma_desc(hcd, qh);
+
+		update_frame_list(hcd, qh, 1);
+
+		dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+
+		if (!qh->ntd)
+			skip_frames = recalc_initial_desc_idx(hcd, qh);
+
+		init_isoc_dma_desc(hcd, qh, skip_frames);
+
+		if (!hc->xfer_started) {
+
+			update_frame_list(hcd, qh, 1);
+
+			/*
+			 * Always set to max, instead of actual size.
+			 * Otherwise ntd will be changed with
+			 * channel being enabled. Not recommended.
+			 *
+			 */
+			hc->ntd = max_desc_num(qh);
+			/* Enable channel only once for ISOC */
+			dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
+		}
+
+		break;
+	default:
+
+		break;
+	}
+}
+
+static void complete_isoc_xfer_ddma(struct dwc_otg_hcd *hcd,
+				    struct dwc_hc *hc,
+				    struct dwc_otg_hc_regs __iomem *hc_regs,
+				    enum dwc_otg_halt_status halt_status)
+{
+	struct list_head 			*pos, *list_temp;
+	struct usb_iso_packet_descriptor	*frame_desc;
+	struct dwc_otg_qtd				*qtd;
+	struct dwc_otg_qh				*qh;
+	struct dwc_otg_host_dma_desc			*dma_desc;
+	u16 				idx, remain;
+	u8 				urb_compl;
+
+	qh = hc->qh;
+	idx = qh->td_first;
+
+
+	if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
+		list_for_each(pos, &hc->qh->qtd_list) {
+			qtd = dwc_list_to_qtd(pos);
+			qtd->in_process = 0;
+		}
+		return;
+	} else if ((halt_status == DWC_OTG_HC_XFER_AHB_ERR) ||
+			(halt_status == DWC_OTG_HC_XFER_BABBLE_ERR)) {
+		/*
+		 * Channel is halted in these error cases.
+		 * Considered as serious issues.
+		 * Complete all URBs marking all frames as failed,
+		 * irrespective whether some of the descriptors(frames)
+		 * succeeded or not. Pass error code to completion routine as
+		 * well, to update urb->status, some of class drivers might
+		 * use it to stop queueing transfer requests.
+		 */
+		int err = (halt_status == DWC_OTG_HC_XFER_AHB_ERR)
+							? (-EIO)
+							: (-EOVERFLOW);
+
+		list_for_each_safe(pos, list_temp, &hc->qh->qtd_list) {
+			qtd = dwc_list_to_qtd(pos);
+			for (idx = 0; idx < qtd->urb->number_of_packets; idx++) {
+				frame_desc = &qtd->urb->iso_frame_desc[idx];
+				frame_desc->status = err;
+			}
+			dwc_otg_hcd_complete_urb(hcd, qtd->urb, err);
+			__dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
+		}
+		return;
+	}
+
+	list_for_each_safe(pos, list_temp, &hc->qh->qtd_list) {
+		qtd = dwc_list_to_qtd(pos);
+
+		if (!qtd->in_process)
+			break;
+
+		urb_compl = 0;
+
+		do {
+			dma_desc = &qh->desc_list[idx];
+
+			frame_desc =
+				&qtd->urb->iso_frame_desc[qtd->isoc_frame_index];
+			remain = hc->ep_is_in ?
+					dma_desc->status.b_isoc.n_bytes : 0;
+
+			if (dma_desc->status.b_isoc.sts ==
+				DMA_DESC_STS_PKTERR) {
+				/*
+				 * XactError or, unable to complete all the
+				 * transactions in the scheduled
+				 * micro-frame/frame,both indicated
+				 * by DMA_DESC_STS_PKTERR.
+				 */
+				qtd->urb->error_count++;
+
+				frame_desc->actual_length =
+					qh->n_bytes[idx] - remain;
+				frame_desc->status = -EPROTO;
+			} else {
+				/* Success */
+				frame_desc->actual_length =
+					qh->n_bytes[idx] - remain;
+				frame_desc->status = 0;
+			}
+
+			if (++qtd->isoc_frame_index ==
+				qtd->urb->number_of_packets) {
+				/*
+				 * urb->status is not used for isoc transfers
+				 * here. The individual frame_desc status are
+				 * used instead.
+				 */
+
+				dwc_otg_hcd_complete_urb(hcd, qtd->urb, 0);
+
+				__dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
+
+				/*
+				 * This check is necessary because urb_dequeue
+				 * can be called from urb complete callback
+				 * (sound driver example). All pending URBs are
+				 * dequeued there, so no need for further
+				 * processing.
+				 */
+				if (hc->halt_status ==
+						DWC_OTG_HC_XFER_URB_DEQUEUE)
+					return;
+
+				urb_compl = 1;
+			}
+
+			qh->ntd--;
+
+			/* Stop if IOC requested descriptor reached */
+			if (dma_desc->status.b_isoc.ioc) {
+				idx = desclist_idx_inc(idx,
+						qh->interval,
+						hc->speed);
+				goto stop_scan;
+			}
+
+			idx = desclist_idx_inc(idx, qh->interval, hc->speed);
+
+			if (urb_compl)
+				break;
+
+		} while (idx != qh->td_first);
+	}
+stop_scan:
+	qh->td_first = idx;
+}
+
+static u8 update_non_isoc_urb_state_ddma(struct dwc_otg_hcd *hcd,
+					 struct dwc_hc *hc,
+					 struct dwc_otg_qtd *qtd,
+					 struct dwc_otg_host_dma_desc *dma_desc,
+					 enum dwc_otg_halt_status halt_status,
+					 u32 n_bytes,
+					 u8 *xfer_done)
+{
+	u16 remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
+	struct urb *urb = qtd->urb;
+
+	if (halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
+		urb->status = -EIO;
+		return 1;
+	}
+	if (dma_desc->status.b.sts == DMA_DESC_STS_PKTERR) {
+		switch (halt_status) {
+		case DWC_OTG_HC_XFER_STALL:
+			urb->status = -EPIPE;
+			break;
+		case DWC_OTG_HC_XFER_BABBLE_ERR:
+			urb->status = -EOVERFLOW;
+			break;
+		case DWC_OTG_HC_XFER_XACT_ERR:
+			urb->status = -EPROTO;
+			break;
+		default:
+			DWC_ERROR("%s: Unhandled descriptor error "
+					"status (%d)\n", __func__,
+					halt_status);
+			break;
+		}
+		return 1;
+	}
+
+	if (dma_desc->status.b.a == 1) {
+		DWC_DEBUGPL(DBG_HCDV, "Active descriptor encountered "
+				"on channel %d\n", hc->hc_num);
+		return 0;
+	}
+
+	if (hc->ep_type == USB_ENDPOINT_XFER_CONTROL) {
+		if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
+			urb->actual_length += n_bytes - remain;
+			if (urb->actual_length > urb->transfer_buffer_length) {
+				urb->actual_length = urb->transfer_buffer_length;
+				urb->status = -EOVERFLOW;
+				*xfer_done = 1;
+				return 1;
+			} else if (remain || urb->actual_length
+					     == urb->transfer_buffer_length) {
+				/*
+				 * For Control Data stage do not set
+				 * urb->status=0 to prevent URB callback. Set it
+				 * when Status phase done. See below.
+				 */
+				*xfer_done = 1;
+			}
+
+		} else if (qtd->control_phase == DWC_OTG_CONTROL_STATUS) {
+			urb->status = 0;
+			*xfer_done = 1;
+		}
+		/* No handling for SETUP stage */
+	} else {
+		/* BULK and INTR */
+		urb->actual_length += n_bytes - remain;
+		if (urb->actual_length > urb->transfer_buffer_length) {
+			/*
+			 * if the actual_length > transfer_buffer_length
+			 * we have an issue, this can occur because: for
+			 * in descriptors we must set the length to max packet
+			 * size (or a multiple of that). Although it only
+			 * occurs in a error case, so set -EOVERFLOW
+			 */
+			urb->actual_length = urb->transfer_buffer_length;
+			urb->status = -EOVERFLOW;
+			*xfer_done = 1;
+			return 1;
+		} else if (remain || urb->actual_length ==
+					urb->transfer_buffer_length) {
+			urb->status = 0;
+			*xfer_done = 1;
+		}
+	}
+
+	return 0;
+}
+
+
+
+static void update_control_phase(struct dwc_otg_qtd *qtd,
+				 struct urb *urb,
+				 struct dwc_hc *hc,
+				 struct dwc_otg_hc_regs __iomem *hc_regs,
+				 u8 xfer_done,
+				 u32 i)
+{
+	if (qtd->control_phase == DWC_OTG_CONTROL_SETUP) {
+		if (urb->transfer_buffer_length > 0)
+			qtd->control_phase = DWC_OTG_CONTROL_DATA;
+		else
+			qtd->control_phase = DWC_OTG_CONTROL_STATUS;
+
+		DWC_DEBUGPL(DBG_HCDV, "  Control setup transaction done\n");
+
+	} else if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
+		if (xfer_done) {
+			qtd->control_phase = DWC_OTG_CONTROL_STATUS;
+			DWC_DEBUGPL(DBG_HCDV, "  Control data transfer done\n");
+		} else if (i+1 == qtd->n_desc) {
+			/*
+			 * Last descriptor for Control
+			 * data stage which is
+			 * not completed yet.
+			 */
+			dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+		}
+	}
+}
+
+static void complete_non_isoc_xfer_ddma(struct dwc_otg_hcd *hcd,
+					struct dwc_hc *hc,
+					struct dwc_otg_hc_regs __iomem *hc_regs,
+					enum dwc_otg_halt_status halt_status)
+{
+	struct list_head 		*pos, *list_temp;
+	struct urb			*urb = NULL;
+	struct dwc_otg_qtd		*qtd = NULL;
+	struct dwc_otg_qh		*qh;
+	struct dwc_otg_host_dma_desc	*dma_desc;
+	u32 				n_bytes, n_desc, i, qtd_n_desc;
+	u8				failed = 0, xfer_done;
+
+	n_desc = 0;
+
+	qh = hc->qh;
+
+	if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
+		list_for_each(pos, &hc->qh->qtd_list) {
+			qtd = dwc_list_to_qtd(pos);
+			qtd->in_process = 0;
+		}
+		return;
+	}
+
+re_read_list:
+	list_for_each_safe(pos, list_temp, &qh->qtd_list) {
+		qtd = dwc_list_to_qtd(pos);
+		urb = qtd->urb;
+		n_bytes = 0;
+		xfer_done = 0;
+		qtd_n_desc = qtd->n_desc;
+
+		for (i = 0; i < qtd_n_desc; i++) {
+			dma_desc = &qh->desc_list[n_desc];
+
+			n_bytes = qh->n_bytes[n_desc];
+
+			failed = update_non_isoc_urb_state_ddma(hcd,
+								hc,
+								qtd,
+								dma_desc,
+								halt_status,
+								n_bytes,
+								&xfer_done);
+
+			if (failed || (xfer_done && (urb->status != -EINPROGRESS))) {
+
+				dwc_otg_hcd_complete_urb(hcd, urb, urb->status);
+				__dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
+
+				/* Calling complete on the URB drops the HCD
+				 * lock and passes control back to the higher
+				 * levels, they may then unlink any current
+				 * URBs, unlinks are synchronous in this driver
+				 * (unlike in EHCI) thus dequeue gets called
+				 * straight away and the associated QTD gets
+				 * removed off the qh list and free'd thus
+				 * list_temp can be invalid after this call
+				 * so we must re-read the list from the start.
+				 */
+
+				if (failed)
+					goto stop_scan;
+				else
+					goto re_read_list;
+
+			} else if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
+				update_control_phase(qtd, urb, hc, hc_regs,
+						     xfer_done, i);
+			}
+
+			n_desc++;
+		}
+
+	}
+
+stop_scan:
+
+	if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
+		/*
+		 * Resetting the data toggle for bulk
+		 * and interrupt endpoints in case of stall.
+		 * See handle_hc_stall_intr()
+		 */
+		if (halt_status == DWC_OTG_HC_XFER_STALL)
+			qh->data_toggle = DWC_OTG_HC_PID_DATA0;
+		else
+			dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+	}
+
+	if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
+		union hcint_data hcint;
+		hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+		if (hcint.b.nyet) {
+			/*
+			 * Got a NYET on the last transaction of the transfer.
+			 * It means that the endpoint should be in the PING
+			 * state at the beginning of the next transfer.
+			 */
+			qh->ping_state = 1;
+			clear_hc_int(hc_regs, nyet);
+		}
+
+	}
+}
+
+/**
+ * This function is called from interrupt handlers.
+ * Scans the descriptor list, updates URB's status and
+ * calls completion routine for the URB if it's done.
+ * Releases the channel to be used by other transfers.
+ * In case of Isochronous endpoint the channel is not halted until
+ * the end of the session, i.e. QTD list is empty.
+ * If periodic channel released the FrameList is updated accordingly.
+ *
+ * Calls transaction selection routines to activate pending transfers.
+ *
+ * Should be called with hcd->lock held.
+ *
+ * @param hcd The HCD state structure for the DWC OTG controller.
+ * @param hc Host channel, the transfer is completed on.
+ * @param hc_regs Host channel registers.
+ * @param halt_status Reason the channel is being halted,
+ * 	      or just XferComplete for isochronous transfer
+ */
+void dwc_otg_hcd_complete_xfer_ddma(struct dwc_otg_hcd *hcd,
+			    struct dwc_hc *hc,
+			    struct dwc_otg_hc_regs __iomem *hc_regs,
+			    enum dwc_otg_halt_status halt_status)
+{
+	u8 continue_isoc_xfer = 0;
+	enum dwc_otg_transaction_type tr_type;
+	struct dwc_otg_qh *qh = hc->qh;
+
+	if (hc->ep_type == USB_ENDPOINT_XFER_ISOC) {
+
+		complete_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
+
+		/* Release the channel if halted or session completed */
+		if (halt_status != DWC_OTG_HC_XFER_COMPLETE ||
+				list_empty(&qh->qtd_list)) {
+
+			/* Halt the channel if session completed */
+			if (halt_status == DWC_OTG_HC_XFER_COMPLETE)
+				dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
+
+			release_channel_ddma(hcd, qh);
+			__dwc_otg_hcd_qh_remove(hcd, qh);
+		} else {
+			/*Keep in assigned schedule to continue transfer */
+			list_move(&qh->qh_list_entry,
+					&hcd->periodic_sched_assigned);
+			continue_isoc_xfer = 1;
+
+		}
+		/** @todo Consider the case when period exceeds FrameList size.
+		 *  Frame Rollover interrupt should be used.
+		 */
+	} else {
+		/* Scan descriptor list to complete the URB(s),
+		 * then release the channel */
+
+		complete_non_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
+
+		release_channel_ddma(hcd, qh);
+
+		__dwc_otg_hcd_qh_remove(hcd, qh);
+
+		if (!list_empty(&qh->qtd_list))
+			/* Add back to inactive non-periodic
+			 * schedule on normal completion */
+			__dwc_otg_hcd_qh_add(hcd, qh);
+
+	}
+	tr_type = __dwc_otg_hcd_select_transactions(hcd, LOCKED);
+	if (tr_type != DWC_OTG_TRANSACTION_NONE || continue_isoc_xfer) {
+		if (continue_isoc_xfer) {
+			if (tr_type == DWC_OTG_TRANSACTION_NONE)
+				tr_type = DWC_OTG_TRANSACTION_PERIODIC;
+			else if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC)
+				tr_type = DWC_OTG_TRANSACTION_ALL;
+		}
+		dwc_otg_hcd_queue_transactions(hcd, tr_type);
+	}
+}
+
+#endif	/* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c b/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c
new file mode 100644
index 0000000..775eeaf
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c
@@ -0,0 +1,2067 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef DWC_DEVICE_ONLY
+
+#include <linux/moduleparam.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+
+static const int deferral_on = 1;
+static int nak_deferral_delay = 20;
+module_param(nak_deferral_delay, int, 0644);
+static const int nyet_deferral_delay = 1;
+static const int LOCKED = 1;
+
+/** @file
+ * This file contains the implementation of the HCD Interrupt handlers.
+ */
+
+/** This function handles interrupts for the HCD. */
+int dwc_otg_hcd_handle_intr(struct dwc_otg_hcd *dwc_otg_hcd)
+{
+	int retval = 0;
+	struct dwc_otg_core_if *core_if = dwc_otg_hcd->core_if;
+	union gintsts_data gintsts;
+
+#ifdef DEBUG
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+
+#endif	/*  */
+
+	/* Check if HOST Mode */
+	if (dwc_otg_is_host_mode(core_if)) {
+		gintsts.d32 = dwc_otg_read_core_intr(core_if);
+		if (!gintsts.d32)
+			return 0;
+
+#ifdef DEBUG
+	    /* Don't print debug message in the interrupt handler on SOF */
+#ifndef DEBUG_SOF
+		if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif
+			DWC_DEBUGPL(DBG_HCD, "\n");
+#endif	/*  */
+
+#ifdef DEBUG
+#ifndef DEBUG_SOF
+		if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif
+			DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Interrupt Detected "
+					"gintsts&gintmsk=0x%08x\n",
+					gintsts.d32);
+#endif	/*  */
+		if (gintsts.b.sofintr)
+			retval |= dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd);
+#ifdef OTG_PLB_DMA_TASKLET
+		if (!atomic_read(&release_later) && gintsts.b.rxstsqlvl)
+#else
+		if (gintsts.b.rxstsqlvl)
+#endif
+			retval |= dwc_otg_hcd_handle_rx_status_q_level_intr(
+					dwc_otg_hcd);
+#ifdef OTG_PLB_DMA_TASKLET
+		if (!atomic_read(&release_later) && gintsts.b.nptxfempty)
+#else
+		if (gintsts.b.nptxfempty)
+#endif
+			retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr(
+					dwc_otg_hcd);
+
+#if 0
+		if (gintsts.b.i2cintr)
+			;
+			/** @todo Implement i2cintr handler. */
+#endif
+
+		if (gintsts.b.portintr)
+			retval |= dwc_otg_hcd_handle_port_intr(dwc_otg_hcd);
+
+		if (gintsts.b.hcintr)
+			retval |= dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd);
+
+		if (gintsts.b.ptxfempty)
+			retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(
+					dwc_otg_hcd);
+
+
+#ifdef DEBUG
+#ifndef DEBUG_SOF
+	if (gintsts.d32 != DWC_SOF_INTR_MASK) {
+#endif
+			DWC_DEBUGPL(DBG_HCD,
+				     "DWC OTG HCD Finished "
+				     "Servicing Interrupts\n");
+			DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n",
+				     dwc_read_reg32(&global_regs->gintsts));
+			DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n",
+				     dwc_read_reg32(&global_regs->gintmsk));
+
+#ifndef DEBUG_SOF
+	}
+
+	if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif
+		DWC_DEBUGPL(DBG_HCD, "\n");
+#endif
+	}
+	return retval;
+}
+
+
+#ifdef DWC_TRACK_MISSED_SOFS
+#warning Compiling code to track missed SOFs
+#define FRAME_NUM_ARRAY_SIZE 1000
+/**
+ * This function is for debug only.
+ */
+static inline void track_missed_sofs(u16 curr_frame_number)
+{
+	static u16 frame_num_array[FRAME_NUM_ARRAY_SIZE];
+	static u16 last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
+	static int frame_num_idx;
+	static u16 last_frame_num = DWC_HFNUM_MAX_FRNUM;
+	static int dumped_frame_num_array;
+	if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
+		if ((((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) !=
+		      curr_frame_number)) {
+			frame_num_array[frame_num_idx] = curr_frame_number;
+			last_frame_num_array[frame_num_idx++] = last_frame_num;
+		}
+	} else if (!dumped_frame_num_array) {
+		int i;
+		printk(KERN_EMERG USB_DWC "Frame     Last Frame\n");
+		printk(KERN_EMERG USB_DWC "-----     ----------\n");
+		for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
+			printk(KERN_EMERG USB_DWC "0x%04x    0x%04x\n",
+				frame_num_array[i], last_frame_num_array[i]);
+		}
+		dumped_frame_num_array = 1;
+	}
+	last_frame_num = curr_frame_number;
+}
+#endif
+
+/**
+ * Handles the start-of-frame interrupt in host mode. Non-periodic
+ * transactions may be queued to the DWC_otg controller for the current
+ * (micro)frame. Periodic transactions may be queued to the controller for the
+ * next (micro)frame.
+ */
+int dwc_otg_hcd_handle_sof_intr(struct dwc_otg_hcd *hcd)
+{
+	union hfnum_data hfnum;
+	struct list_head *qh_entry;
+	struct dwc_otg_qh *qh;
+	enum dwc_otg_transaction_type tr_type;
+	union gintsts_data gintsts = {.d32 = 0};
+	hfnum.d32 =
+		dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum);
+
+#ifdef DEBUG_SOF
+	DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
+#endif	/*  */
+	hcd->frame_number = hfnum.b.frnum;
+#ifdef DEBUG
+	hcd->frrem_accum += hfnum.b.frrem;
+	hcd->frrem_samples++;
+#endif	/*  */
+
+#ifdef DWC_TRACK_MISSED_SOFS
+	track_missed_sofs(hcd->frame_number);
+#endif
+
+	/* Determine whether any periodic QHs should be executed. */
+	qh_entry = hcd->periodic_sched_inactive.next;
+	while (qh_entry != &hcd->periodic_sched_inactive) {
+		qh = list_entry(qh_entry, struct dwc_otg_qh, qh_list_entry);
+		qh_entry = qh_entry->next;
+		if (dwc_frame_num_le(qh->sched_frame, hcd->frame_number)) {
+			/*
+			 * Move QH to the ready list to be executed next
+			 * (micro)frame.
+			 */
+			list_move(&qh->qh_list_entry,
+					&hcd->periodic_sched_ready);
+		}
+	}
+	tr_type = __dwc_otg_hcd_select_transactions(hcd, LOCKED);
+	if (tr_type != DWC_OTG_TRANSACTION_NONE)
+		dwc_otg_hcd_queue_transactions(hcd, tr_type);
+
+	/* Clear interrupt */
+	gintsts.b.sofintr = 1;
+	dwc_write_reg32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32);
+	return 1;
+}
+
+/* Handles the Rx Status Queue Level Interrupt, which indicates that there is at
+ * least one packet in the Rx FIFO.  The packets are moved from the FIFO to
+ * memory if the DWC_otg controller is operating in Slave mode.
+ */
+int dwc_otg_hcd_handle_rx_status_q_level_intr(struct dwc_otg_hcd *dwc_otg_hcd)
+{
+	union host_grxsts_data grxsts;
+	struct dwc_hc *hc = NULL;
+	DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
+	grxsts.d32 =
+		dwc_read_reg32(&dwc_otg_hcd->core_if->core_global_regs->grxstsp);
+	hc = dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum];
+
+	if (hc) {
+		/* Packet Status */
+		DWC_DEBUGPL(DBG_HCDV, "    Ch num = %d\n", grxsts.b.chnum);
+		DWC_DEBUGPL(DBG_HCDV, "    Count = %d\n", grxsts.b.bcnt);
+		DWC_DEBUGPL(DBG_HCDV, "    DPID = %d, hc.dpid = %d\n",
+				grxsts.b.dpid, hc->data_pid_start);
+
+		DWC_DEBUGPL(DBG_HCDV, "    PStatus = %d\n", grxsts.b.pktsts);
+
+		switch (grxsts.b.pktsts) {
+		case DWC_GRXSTS_PKTSTS_IN:
+			/* Read the data into the host buffer. */
+			if (grxsts.b.bcnt > 0 && hc->xfer_buff) {
+				dwc_otg_read_packet(dwc_otg_hcd->core_if,
+						hc->xfer_buff, grxsts.b.bcnt);
+
+				/*
+				 * Update the HC fields for the
+				 * next packet received.
+				 */
+				hc->xfer_count += grxsts.b.bcnt;
+				hc->xfer_buff += grxsts.b.bcnt;
+			}
+		case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
+		case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
+		case DWC_GRXSTS_PKTSTS_CH_HALTED:
+			/* Handled in interrupt, just ignore data */
+			break;
+		default:
+			DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n",
+				   grxsts.b.pktsts);
+			break;
+		}
+		return 1;
+	}
+	return -1;
+}
+
+
+/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
+ * data packets may be written to the FIFO for OUT transfers. More requests
+ * may be written to the non-periodic request queue for IN transfers. This
+ * interrupt is enabled only in Slave mode. */
+int dwc_otg_hcd_handle_np_tx_fifo_empty_intr(struct dwc_otg_hcd *dwc_otg_hcd)
+{
+	DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n");
+	dwc_otg_hcd_queue_transactions(dwc_otg_hcd,
+					DWC_OTG_TRANSACTION_NON_PERIODIC);
+	return 1;
+}
+
+
+/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data
+ * packets may be written to the FIFO for OUT transfers. More requests may be
+ * written to the periodic request queue for IN transfers. This interrupt is
+ * enabled only in Slave mode. */
+int dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(struct dwc_otg_hcd *dwc_otg_hcd)
+{
+	DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n");
+	dwc_otg_hcd_queue_transactions(dwc_otg_hcd,
+					DWC_OTG_TRANSACTION_PERIODIC);
+	return 1;
+}
+
+
+
+static void dwc_otg_handle_port_change(struct dwc_otg_hcd *dwc_otg_hcd,
+					union hprt0_data *hprt0)
+{
+	int do_reset = 0;
+	struct dwc_otg_core_params *params =
+		dwc_otg_hcd->core_if->core_params;
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		dwc_otg_hcd->core_if->core_global_regs;
+	struct dwc_otg_host_if *host_if =
+		dwc_otg_hcd->core_if->host_if;
+
+	/* Check if we need to adjust the PHY clock speed for
+	* low power and adjust it */
+	if (params->host_support_fs_ls_low_power) {
+		union gusbcfg_data usbcfg;
+		usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+		if ((hprt0->b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) ||
+		(hprt0->b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED)) {
+			/*
+			 * Low power
+			 */
+			union hcfg_data hcfg;
+			if (usbcfg.b.phylpwrclksel == 0) {
+				/*
+				 * Set PHY low power clock select
+				 * for FS/LS devices
+				 */
+				usbcfg.b.phylpwrclksel = 1;
+				dwc_write_reg32(
+					&global_regs->gusbcfg, usbcfg.d32);
+				do_reset = 1;
+			}
+
+			hcfg.d32 =
+				dwc_read_reg32(&host_if->host_global_regs->hcfg);
+
+			if ((hprt0->b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) &&
+			(params->host_ls_low_power_phy_clk ==
+			DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
+				/* 6 MHZ */
+				DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG "
+						"to 6 MHz (Low Power)\n");
+				if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) {
+					hcfg.b.fslspclksel = DWC_HCFG_6_MHZ;
+					dwc_write_reg32(
+						&host_if->host_global_regs->hcfg
+						, hcfg.d32
+						);
+					do_reset = 1;
+				}
+			} else {
+				/* 48 MHZ */
+				DWC_DEBUGPL(DBG_CIL, "FS_PHY programming "
+						"HCFG to 48 MHz ()\n");
+
+				if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) {
+					hcfg.b.fslspclksel = DWC_HCFG_48_MHZ;
+					dwc_write_reg32(
+						&host_if->host_global_regs->hcfg
+						, hcfg.d32
+						);
+					do_reset = 1;
+				}
+			}
+		} else {
+			/*
+			 * Not low power
+			 */
+			if (usbcfg.b.phylpwrclksel == 1) {
+				usbcfg.b.phylpwrclksel = 0;
+				dwc_write_reg32(&global_regs->gusbcfg,
+						usbcfg.d32);
+				do_reset = 1;
+			}
+		}
+		if (do_reset)
+			tasklet_schedule(dwc_otg_hcd->reset_tasklet);
+	}
+	if (!do_reset) {
+		/* Port has been enabled set the reset change flag */
+		dwc_otg_hcd->flags.b.port_reset_change = 1;
+	}
+}
+
+
+
+/** There are multiple conditions that can cause a port interrupt. This function
+ * determines which interrupt conditions have occurred and handles them
+ * appropriately. */
+int dwc_otg_hcd_handle_port_intr(struct dwc_otg_hcd *dwc_otg_hcd)
+{
+	int retval = 0;
+	union hprt0_data hprt0;
+	union hprt0_data hprt0_modify;
+	hprt0.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0);
+	hprt0_modify.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0);
+
+	/*
+	 * Clear appropriate bits in HPRT0 to clear the interrupt bit in
+	 * GINTSTS
+	 */
+	hprt0_modify.b.prtena = 0;
+	hprt0_modify.b.prtconndet = 0;
+	hprt0_modify.b.prtenchng = 0;
+	hprt0_modify.b.prtovrcurrchng = 0;
+
+	/* Port Connect Detected
+	* Set flag and clear if detected */
+	if (hprt0.b.prtconndet) {
+		DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
+			     "Port Connect Detected--\n", hprt0.d32);
+		dwc_otg_hcd->flags.b.port_connect_status_change = 1;
+		dwc_otg_hcd->flags.b.port_connect_status = 1;
+		hprt0_modify.b.prtconndet = 1;
+
+		/* B-Device has connected, Delete the connection timer.  */
+		del_timer(&dwc_otg_hcd->conn_timer);
+
+		/* The Hub driver asserts a reset when it sees port connect
+		 * status change flag
+		 */
+		retval |= 1;
+	}
+
+	/*
+	 * Port Enable Changed
+	 * Clear if detected - Set internal flag if disabled
+	 */
+	if (hprt0.b.prtenchng) {
+		DWC_DEBUGPL(DBG_HCD, "  --Port Interrupt HPRT0=0x%08x "
+			     "Port Enable Changed--\n", hprt0.d32);
+		hprt0_modify.b.prtenchng = 1;
+		if (hprt0.b.prtena == 1)
+			dwc_otg_handle_port_change(dwc_otg_hcd, &hprt0);
+		else
+			dwc_otg_hcd->flags.b.port_enable_change = 1;
+		retval |= 1;
+	}
+
+	/** Overcurrent Change Interrupt */
+	if (hprt0.b.prtovrcurrchng) {
+		DWC_DEBUGPL(DBG_HCD, "  --Port Interrupt HPRT0=0x%08x "
+			     "Port Overcurrent Changed--\n", hprt0.d32);
+		dwc_otg_hcd->flags.b.port_over_current_change = 1;
+		hprt0_modify.b.prtovrcurrchng = 1;
+		retval |= 1;
+	}
+
+	/* Clear Port Interrupts */
+	dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
+	return retval;
+}
+
+/** This interrupt indicates that one or more host channels has a pending
+ * interrupt. There are multiple conditions that can cause each host channel
+ * interrupt. This function determines which conditions have occurred for each
+ * host channel interrupt and handles them appropriately. */
+int dwc_otg_hcd_handle_hc_intr(struct dwc_otg_hcd *dwc_otg_hcd)
+{
+	int i;
+	int retval = 0;
+	union haint_data haint;
+
+	/* Clear appropriate bits in HCINTn to clear the interrupt bit in
+	 * GINTSTS */
+	haint.d32 = dwc_otg_read_host_all_channels_intr(dwc_otg_hcd->core_if);
+	for (i = 0; i < dwc_otg_hcd->core_if->core_params->host_channels; i++) {
+		if (haint.b2.chint & (1 << i))
+			retval |= dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd, i);
+	}
+	return retval;
+}
+
+/**
+ * Gets the actual length of a transfer after the transfer halts. halt_status
+ * holds the reason for the halt.
+ *
+ * For IN transfers where halt_status is DWC_OTG_HC_XFER_COMPLETE,
+ * *short_read is set to 1 upon return if less than the requested
+ * number of bytes were transferred. Otherwise, *short_read is set to 0 upon
+ * return. short_read may also be NULL on entry, in which case it remains
+ * unchanged.
+ */
+static u32 get_actual_xfer_length(struct dwc_hc *hc,
+				  struct dwc_otg_hc_regs __iomem *hc_regs,
+				  struct dwc_otg_qtd *qtd,
+				  enum dwc_otg_halt_status halt_status,
+				  int *short_read)
+{
+	union hctsiz_data hctsiz;
+	u32 length;
+	if (short_read != NULL)
+		*short_read = 0;
+
+	hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
+	if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
+		if (hc->ep_is_in) {
+			length = hc->xfer_len - hctsiz.b.xfersize;
+			if (short_read != NULL)
+				*short_read = (hctsiz.b.xfersize != 0);
+		} else if (hc->qh->do_split)
+			length = qtd->ssplit_out_xfer_count;
+		else
+			length = hc->xfer_len;
+	} else {
+	    /*
+	     * Must use the hctsiz.pktcnt field to determine how much data
+	     * has been transferred. This field reflects the number of
+	     * packets that have been transferred via the USB. This is
+	     * always an integral number of packets if the transfer was
+	     * halted before its normal completion. (Can't use the
+	     * hctsiz.xfersize field because that reflects the number of
+	     * bytes transferred via the AHB, not the USB).
+	     */
+	    length = (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet;
+	}
+	return length;
+}
+
+/**
+ * Updates the state of the URB after a Transfer Complete interrupt on the
+ * host channel. Updates the actual_length field of the URB based on the
+ * number of bytes transferred via the host channel. Sets the URB status
+ * if the data transfer is finished.
+ *
+ * @return 1 if the data transfer specified by the URB is completely finished,
+ * 0 otherwise.
+ */
+static int update_urb_state_xfer_comp(struct dwc_hc *hc,
+				      struct dwc_otg_hc_regs __iomem *hc_regs,
+				      struct urb *urb,
+				      struct dwc_otg_qtd *qtd, int *status)
+{
+	int xfer_done = 0;
+	int short_read = 0;
+	int xfer_length;
+
+	xfer_length = get_actual_xfer_length(hc, hc_regs, qtd,
+						DWC_OTG_HC_XFER_COMPLETE,
+						&short_read);
+
+	/* non DWORD-aligned buffer case handling. */
+	if (hc->align_buff && xfer_length && hc->ep_is_in) {
+		memcpy(urb->transfer_buffer + urb->actual_length,
+				hc->qh->dw_align_buf, xfer_length);
+	}
+
+	urb->actual_length += xfer_length;
+
+	if (xfer_length && (hc->ep_type == USB_ENDPOINT_XFER_BULK)
+			&& (urb->transfer_flags & URB_ZERO_PACKET)
+			&& (urb->actual_length == urb->transfer_buffer_length)
+			&& !(urb->transfer_buffer_length % hc->max_packet)) {
+		xfer_done = 0;
+	} else if (short_read ||
+			urb->actual_length == urb->transfer_buffer_length) {
+		xfer_done = 1;
+		if (short_read && (urb->transfer_flags & URB_SHORT_NOT_OK))
+			*status = -EREMOTEIO;
+		else
+			*status = 0;
+	}
+
+
+#ifdef DEBUG
+	{
+		union hctsiz_data hctsiz;
+		hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
+		DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
+			     __func__, (hc->ep_is_in ? "IN" : "OUT"),
+			     hc->hc_num);
+		DWC_DEBUGPL(DBG_HCDV, "  hc->xfer_len %d\n", hc->xfer_len);
+		DWC_DEBUGPL(DBG_HCDV, "  hctsiz.xfersize %d\n",
+				hctsiz.b.xfersize);
+		DWC_DEBUGPL(DBG_HCDV, "  urb->transfer_buffer_length %d\n",
+			     urb->transfer_buffer_length);
+		DWC_DEBUGPL(DBG_HCDV, "  urb->actual_length %d\n",
+			     urb->actual_length);
+		DWC_DEBUGPL(DBG_HCDV, "  short_read %d, xfer_done %d\n",
+			     short_read, xfer_done);
+	}
+#endif
+	return xfer_done;
+}
+
+/*
+ * Save the starting data toggle for the next transfer. The data toggle is
+ * saved in the QH for non-control transfers and it's saved in the QTD for
+ * control transfers.
+ */
+void dwc_otg_hcd_save_data_toggle(struct dwc_hc *hc,
+	struct dwc_otg_hc_regs __iomem *hc_regs, struct dwc_otg_qtd *qtd)
+{
+	union hctsiz_data hctsiz;
+	hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
+	if (hc->ep_type != USB_ENDPOINT_XFER_CONTROL) {
+		struct dwc_otg_qh *qh = hc->qh;
+		if (hctsiz.b.pid == DWC_HCTSIZ_DATA0)
+			qh->data_toggle = DWC_OTG_HC_PID_DATA0;
+		else
+			qh->data_toggle = DWC_OTG_HC_PID_DATA1;
+	} else {
+		if (hctsiz.b.pid == DWC_HCTSIZ_DATA0)
+			qtd->data_toggle = DWC_OTG_HC_PID_DATA0;
+		else
+			qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
+	}
+}
+
+
+/**
+ * Updates the state of an Isochronous URB when the transfer is stopped for
+ * any reason. The fields of the current entry in the frame descriptor array
+ * are set based on the transfer state and the input halt_status. Completes
+ * the Isochronous URB if all the URB frames have been completed.
+ *
+ * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be
+ * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE.
+ */
+static enum dwc_otg_halt_status update_isoc_urb_state(struct dwc_otg_hcd *hcd,
+	  struct dwc_hc *hc, struct dwc_otg_hc_regs __iomem *hc_regs,
+	  struct dwc_otg_qtd *qtd, enum dwc_otg_halt_status halt_status)
+{
+	struct urb *urb = qtd->urb;
+	enum dwc_otg_halt_status ret_val = halt_status;
+	struct usb_iso_packet_descriptor *frame_desc;
+	frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index];
+
+	switch (halt_status) {
+
+	case DWC_OTG_HC_XFER_COMPLETE:
+		frame_desc->status = 0;
+		frame_desc->actual_length =
+			get_actual_xfer_length(hc,
+					hc_regs,
+					qtd,
+					halt_status,
+					NULL);
+
+		/* non DWORD-aligned buffer case handling. */
+		if (hc->align_buff &&
+				frame_desc->actual_length && hc->ep_is_in) {
+			memcpy(urb->transfer_buffer +
+					frame_desc->offset +
+					qtd->isoc_split_offset,
+				   hc->qh->dw_align_buf,
+				   frame_desc->actual_length);
+		}
+		break;
+
+	case DWC_OTG_HC_XFER_FRAME_OVERRUN:
+		urb->error_count++;
+		if (hc->ep_is_in)
+			frame_desc->status = -ENOSR;
+		else
+			frame_desc->status = -ECOMM;
+
+		frame_desc->actual_length = 0;
+		break;
+
+	case DWC_OTG_HC_XFER_BABBLE_ERR:
+		urb->error_count++;
+		frame_desc->status = -EOVERFLOW;
+
+		/* Don't need to update actual_length in this case. */
+		break;
+
+	case DWC_OTG_HC_XFER_XACT_ERR:
+		urb->error_count++;
+		frame_desc->status = -EPROTO;
+		frame_desc->actual_length =
+		    get_actual_xfer_length(hc, hc_regs, qtd, halt_status, NULL);
+
+		/* non DWORD-aligned buffer case handling. */
+		if (hc->align_buff &&
+				frame_desc->actual_length && hc->ep_is_in) {
+			memcpy(urb->transfer_buffer +
+					frame_desc->offset +
+					qtd->isoc_split_offset,
+					hc->qh->dw_align_buf,
+					frame_desc->actual_length);
+		}
+		/* Skip whole frame */
+		if (hc->qh->do_split && (hc->ep_type == USB_ENDPOINT_XFER_ISOC) &&
+				hc->ep_is_in && hcd->core_if->dma_enable) {
+			qtd->complete_split = 0;
+			qtd->isoc_split_offset = 0;
+		}
+
+		break;
+	default:
+		DWC_ERROR("%s: Unhandled halt_status (%d)\n",
+				__func__, halt_status);
+		BUG();
+		break;
+	}
+	if (++qtd->isoc_frame_index == urb->number_of_packets) {
+		/*
+		 * urb->status is not used for isoc transfers.
+		 * The individual frame_desc statuses are used instead.
+		 */
+		dwc_otg_hcd_complete_urb(hcd, urb, 0);
+		ret_val = DWC_OTG_HC_XFER_URB_COMPLETE;
+	} else
+		ret_val = DWC_OTG_HC_XFER_COMPLETE;
+
+	return ret_val;
+}
+/**
+ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
+ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
+ * still linked to the QH, the QH is added to the end of the inactive
+ * non-periodic schedule. For periodic QHs, removes the QH from the periodic
+ * schedule if no more QTDs are linked to the QH.
+ */
+static void deactivate_qh(struct dwc_otg_hcd *hcd,
+			  struct dwc_otg_qh *qh,  int free_qtd)
+{
+	int continue_split = 0;
+	struct dwc_otg_qtd *qtd;
+	DWC_DEBUGPL(DBG_HCDV, "  %s(%p,%p,%d)\n", __func__, hcd, qh,
+		      free_qtd);
+	qtd = list_entry(qh->qtd_list.next, struct dwc_otg_qtd, qtd_list_entry);
+	if (qtd->complete_split)
+		continue_split = 1;
+	else if ((qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID) ||
+			(qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END))
+		continue_split = 1;
+
+	/*
+	 * All calls come through dwc_otg_hcd_handle_hc_n_intr which takes the
+	 * hcd lock, therefore we use the unlocked internal versions of
+	 * qtd_remove and qh_deactivate to avoid recursive locking.
+	 */
+	if (free_qtd) {
+		/*
+		 * Note that this was previously a call to
+		 * dwc_otg_hcd_qtd_remove_and_free(qtd), which frees the qtd.
+		 * However, that call frees the qtd memory, and we continue in
+		 * the interrupt logic to access it many more times, including
+		 * writing to it.  With slub debugging on, it is clear that we
+		 * were writing to memory we had freed.
+		 * Call this instead, and now I have moved the freeing of the
+		 * memory to the end of processing this interrupt.
+		 */
+		__dwc_otg_hcd_qtd_remove(hcd, qtd, qh);
+
+		continue_split = 0;
+	}
+	qh->channel = NULL;
+	qh->qtd_in_process = NULL;
+	__dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split);
+}
+
+/**
+ * Releases a host channel for use by other transfers. Attempts to select and
+ * queue more transactions since at least one host channel is available.
+ *
+ * @param hcd The HCD state structure.
+ * @param hc The host channel to release.
+ * @param qtd The QTD associated with the host channel. This QTD may be freed
+ * if the transfer is complete or an error has occurred.
+ * @param halt_status Reason the channel is being released. This status
+ * determines the actions taken by this function.
+ */
+
+static void release_channel(struct dwc_otg_hcd *hcd,
+		struct dwc_hc *hc, struct dwc_otg_qtd *qtd,
+		enum dwc_otg_halt_status halt_status, int *must_free) {
+
+	enum dwc_otg_transaction_type tr_type;
+	int free_qtd;
+	struct dwc_otg_qh *qh;
+	int deact = 1;
+	int retry_delay = 1;
+
+	WARN_ON(!in_interrupt());
+
+	DWC_DEBUGPL(DBG_HCDV, "  %s: channel %d, halt_status %d\n", __func__,
+		      hc->hc_num, halt_status);
+
+	switch (halt_status) {
+	case DWC_OTG_HC_XFER_NYET:
+	case DWC_OTG_HC_XFER_NAK:
+		if (halt_status == DWC_OTG_HC_XFER_NYET)
+			retry_delay = nyet_deferral_delay;
+		else
+			retry_delay = nak_deferral_delay;
+
+		free_qtd = 0;
+		if (deferral_on && hc->do_split) {
+			qh = hc->qh;
+			if (qh)
+				deact = dwc_otg_hcd_qh_deferr(hcd,
+						qh, retry_delay);
+		}
+		break;
+
+	case DWC_OTG_HC_XFER_URB_COMPLETE:
+		free_qtd = 1;
+		break;
+	case DWC_OTG_HC_XFER_AHB_ERR:
+	case DWC_OTG_HC_XFER_STALL:
+	case DWC_OTG_HC_XFER_BABBLE_ERR:
+		free_qtd = 1;
+		break;
+	case DWC_OTG_HC_XFER_XACT_ERR:
+		if (qtd->error_count >= 3) {
+			DWC_DEBUGPL(DBG_HCDV, "  Complete URB with "
+					"transaction error\n");
+			free_qtd = 1;
+			dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EPROTO);
+		} else
+			free_qtd = 0;
+
+		break;
+	case DWC_OTG_HC_XFER_URB_DEQUEUE:
+		/*
+		 * The QTD has already been removed and the QH has been
+		 * deactivated. Don't want to do anything except release the
+		 * host channel and try to queue more transfers.
+		 */
+		goto cleanup;
+	case DWC_OTG_HC_XFER_NO_HALT_STATUS:
+		DWC_ERROR("%s: No halt_status, channel %d\n", __func__,
+			   hc->hc_num);
+		free_qtd = 0;
+		break;
+	default:
+		free_qtd = 0;
+		break;
+	}
+	*must_free = free_qtd;
+	if (deact)
+		deactivate_qh(hcd, hc->qh, free_qtd);
+
+cleanup:
+	/*
+	 * Release the host channel for use by other transfers. The cleanup
+	 * function clears the channel interrupt enables and conditions, so
+	 * there's no need to clear the Channel Halted interrupt separately.
+	 */
+	dwc_otg_hc_cleanup(hcd->core_if, hc);
+	list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list);
+	switch (hc->ep_type) {
+	case USB_ENDPOINT_XFER_CONTROL:
+	case USB_ENDPOINT_XFER_BULK:
+		hcd->non_periodic_channels--;
+		if (dwc_otg_hcd_idle(hcd))
+			wake_up_interruptible(&hcd->idleq);
+		break;
+	default:
+		/*
+		 * Don't release reservations for periodic channels here.
+		 * That's done when a periodic transfer is descheduled (i.e.
+		 * when the QH is removed from the periodic schedule).
+		 */
+		break;
+	}
+
+	/* Try to queue more transfers now that there's a free channel */
+	tr_type = __dwc_otg_hcd_select_transactions(hcd, LOCKED);
+	if (tr_type != DWC_OTG_TRANSACTION_NONE)
+		dwc_otg_hcd_queue_transactions(hcd, tr_type);
+}
+
+/**
+ * Halts a host channel. If the channel cannot be halted immediately because
+ * the request queue is full, this function ensures that the FIFO empty
+ * interrupt for the appropriate queue is enabled so that the halt request can
+ * be queued when there is space in the request queue.
+ *
+ * This function may also be called in DMA mode. In that case, the channel is
+ * simply released since the core always halts the channel automatically in
+ * DMA mode.
+ */
+static void halt_channel(struct dwc_otg_hcd *hcd, struct dwc_hc *hc,
+		struct dwc_otg_qtd *qtd, enum dwc_otg_halt_status halt_status,
+		int *must_free)
+{
+	if (hcd->core_if->dma_enable) {
+		release_channel(hcd, hc, qtd, halt_status, must_free);
+		return;
+	}
+
+	/* Slave mode processing... */
+	dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
+	if (hc->halt_on_queue) {
+		union gintmsk_data gintmsk = {.d32 = 0};
+		struct dwc_otg_core_global_regs __iomem *global_regs;
+		global_regs = hcd->core_if->core_global_regs;
+		if (hc->ep_type == USB_ENDPOINT_XFER_CONTROL ||
+			hc->ep_type == USB_ENDPOINT_XFER_BULK) {
+			/*
+			 * Make sure the Non-periodic Tx FIFO empty interrupt
+			 * is enabled so that the non-periodic schedule will
+			 * be processed.
+			 */
+			gintmsk.b.nptxfempty = 1;
+			dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32);
+		} else {
+			/*
+			 * Move the QH from the periodic queued schedule to
+			 * the periodic assigned schedule. This allows the
+			 * halt to be queued when the periodic schedule is
+			 * processed.
+			 */
+			list_move(&hc->qh->qh_list_entry,
+					&hcd->periodic_sched_assigned);
+
+			/*
+			 * Make sure the Periodic Tx FIFO Empty interrupt is
+			 * enabled so that the periodic schedule will be
+			 * processed.
+			 */
+			gintmsk.b.ptxfempty = 1;
+			dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32);
+		}
+	}
+}
+
+/**
+ * Performs common cleanup for non-periodic transfers after a Transfer
+ * Complete interrupt. This function should be called after any endpoint type
+ * specific handling is finished to release the host channel.
+ */
+static void complete_non_periodic_xfer(struct dwc_otg_hcd *hcd,
+	struct dwc_hc *hc, struct dwc_otg_hc_regs __iomem *hc_regs,
+	struct dwc_otg_qtd *qtd, enum dwc_otg_halt_status halt_status,
+	int *must_free)
+{
+	union hcint_data hcint;
+	qtd->error_count = 0;
+	hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+	if (hcint.b.nyet) {
+		/*
+		* Got a NYET on the last transaction of the transfer. This
+		* means that the endpoint should be in the PING state at the
+		* beginning of the next transfer.
+		*/
+		hc->qh->ping_state = 1;
+		clear_hc_int(hc_regs, nyet);
+	}
+
+	/*
+	 * Always halt and release the host channel to make it available for
+	 * more transfers. There may still be more phases for a control
+	 * transfer or more data packets for a bulk transfer at this point,
+	 * but the host channel is still halted. A channel will be reassigned
+	 * to the transfer when the non-periodic schedule is processed after
+	 * the channel is released. This allows transactions to be queued
+	 * properly via dwc_otg_hcd_queue_transactions, which also enables the
+	 * Tx FIFO Empty interrupt if necessary.
+	 */
+
+	if (hc->ep_is_in) {
+		/*
+		* IN transfers in Slave mode require an explicit disable to
+		* halt the channel. (In DMA mode, this call simply releases
+		* the channel.)
+		*/
+		halt_channel(hcd, hc, qtd, halt_status, must_free);
+	} else {
+		/*
+		* The channel is automatically disabled by the core for OUT
+		* transfers in Slave mode.
+		*/
+		release_channel(hcd, hc, qtd, halt_status, must_free);
+	}
+}
+
+/**
+ * Performs common cleanup for periodic transfers after a Transfer Complete
+ * interrupt. This function should be called after any endpoint type specific
+ * handling is finished to release the host channel.
+ */
+static void complete_periodic_xfer(struct dwc_otg_hcd *hcd,
+	struct dwc_hc *hc, struct dwc_otg_hc_regs __iomem *hc_regs,
+	struct dwc_otg_qtd *qtd, enum dwc_otg_halt_status halt_status,
+	int *must_free)
+{
+	union hctsiz_data hctsiz;
+	qtd->error_count = 0;
+	hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
+	if (!hc->ep_is_in || hctsiz.b.pktcnt == 0)
+		/* Core halts channel in these cases. */
+		release_channel(hcd, hc, qtd, halt_status, must_free);
+	else
+		/* Flush any outstanding requests from the Tx queue. */
+		halt_channel(hcd, hc, qtd, halt_status, must_free);
+}
+
+static int handle_xfercomp_isoc_split_in(
+				struct dwc_otg_hcd *hcd,
+				struct dwc_hc *hc,
+				struct dwc_otg_hc_regs __iomem *hc_regs,
+				struct dwc_otg_qtd *qtd,
+				int *must_free)
+{
+	u32 len;
+	struct usb_iso_packet_descriptor *frame_desc;
+	frame_desc = &qtd->urb->iso_frame_desc[qtd->isoc_frame_index];
+
+	len = get_actual_xfer_length(hc, hc_regs, qtd,
+					DWC_OTG_HC_XFER_COMPLETE,
+					NULL);
+
+	if (!len) {
+		qtd->complete_split = 0;
+		qtd->isoc_split_offset = 0;
+		return 0;
+	}
+	frame_desc->actual_length += len;
+
+	if (hc->align_buff && len)
+		memcpy(qtd->urb->transfer_buffer + frame_desc->offset +
+				qtd->isoc_split_offset,
+				hc->qh->dw_align_buf,
+				len);
+
+	qtd->isoc_split_offset += len;
+
+	if (frame_desc->length == frame_desc->actual_length) {
+		frame_desc->status = 0;
+		qtd->isoc_frame_index++;
+		qtd->complete_split = 0;
+		qtd->isoc_split_offset = 0;
+	}
+
+	if (qtd->isoc_frame_index == qtd->urb->number_of_packets) {
+		dwc_otg_hcd_complete_urb(hcd, qtd->urb, 0);
+		release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE,
+				must_free);
+	} else
+		release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS,
+				must_free);
+
+	return 1; /* Indicates that channel released */
+}
+/**
+ * Handles a host channel Transfer Complete interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int handle_hc_xfercomp_intr(struct dwc_otg_hcd *hcd,
+	struct dwc_hc *hc, struct dwc_otg_hc_regs __iomem *hc_regs,
+	struct dwc_otg_qtd *qtd, int *must_free)
+{
+	int urb_xfer_done;
+	enum dwc_otg_halt_status halt_status = DWC_OTG_HC_XFER_COMPLETE;
+	struct urb *urb = qtd->urb;
+	int pipe_type = usb_pipetype(urb->pipe);
+	int status = -EINPROGRESS;
+
+	DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+		      "Transfer Complete--\n", hc->hc_num);
+
+	if (hcd->core_if->dma_desc_enable) {
+		dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs, halt_status);
+		if (pipe_type == PIPE_ISOCHRONOUS) {
+			/* Do not disable the interrupt, just clear it */
+			clear_hc_int(hc_regs, xfercomp);
+			return 1;
+		}
+		goto handle_xfercomp_done;
+	}
+	/*
+	 * Handle xfer complete on CSPLIT.
+	 */
+	if (hc->qh->do_split) {
+		if ((hc->ep_type == USB_ENDPOINT_XFER_ISOC)
+				&& hc->ep_is_in && hcd->core_if->dma_enable) {
+			if (qtd->complete_split &&
+				handle_xfercomp_isoc_split_in(hcd, hc, hc_regs,
+						qtd, must_free))
+
+				goto handle_xfercomp_done;
+		} else
+			qtd->complete_split = 0;
+	}
+
+	/* Update the QTD and URB states. */
+	switch (pipe_type) {
+
+	case PIPE_CONTROL:
+		switch (qtd->control_phase) {
+
+		case DWC_OTG_CONTROL_SETUP:
+			if (urb->transfer_buffer_length > 0)
+				qtd->control_phase = DWC_OTG_CONTROL_DATA;
+			else
+				qtd->control_phase = DWC_OTG_CONTROL_STATUS;
+
+			DWC_DEBUGPL(DBG_HCDV,
+				     "  Control setup transaction done\n");
+			halt_status = DWC_OTG_HC_XFER_COMPLETE;
+			break;
+
+		case DWC_OTG_CONTROL_DATA:
+			urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs,
+					urb, qtd, &status);
+			if (urb_xfer_done) {
+				qtd->control_phase = DWC_OTG_CONTROL_STATUS;
+				DWC_DEBUGPL(DBG_HCDV, "  Control data "
+						"transfer done\n");
+			} else
+				dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+
+			halt_status = DWC_OTG_HC_XFER_COMPLETE;
+			break;
+
+		case DWC_OTG_CONTROL_STATUS:
+			DWC_DEBUGPL(DBG_HCDV, "  Control transfer complete\n");
+			if (status == -EINPROGRESS)
+				status = 0;
+
+			dwc_otg_hcd_complete_urb(hcd, urb, status);
+			halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
+			break;
+		}
+		complete_non_periodic_xfer(hcd, hc, hc_regs, qtd,
+					     halt_status, must_free);
+		break;
+
+	case PIPE_BULK:
+		DWC_DEBUGPL(DBG_HCDV, "  Bulk transfer complete\n");
+		urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb,
+				qtd, &status);
+		if (urb_xfer_done) {
+			dwc_otg_hcd_complete_urb(hcd, urb, status);
+			halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
+		} else
+			halt_status = DWC_OTG_HC_XFER_COMPLETE;
+
+		dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+		complete_non_periodic_xfer(hcd, hc, hc_regs, qtd,
+				halt_status, must_free);
+		break;
+
+	case PIPE_INTERRUPT:
+		DWC_DEBUGPL(DBG_HCDV, "  Interrupt transfer complete\n");
+		update_urb_state_xfer_comp(hc, hc_regs, urb, qtd, &status);
+		/*
+		 * Interrupt URB is done on the first transfer complete
+		 * interrupt.
+		 */
+		dwc_otg_hcd_complete_urb(hcd, urb, status);
+		dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+		complete_periodic_xfer(hcd, hc, hc_regs,
+				qtd, DWC_OTG_HC_XFER_URB_COMPLETE, must_free);
+		break;
+
+	case PIPE_ISOCHRONOUS:
+		DWC_DEBUGPL(DBG_HCDV, "  Isochronous transfer complete\n");
+		if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL)
+			halt_status = update_isoc_urb_state(hcd, hc, hc_regs,
+					qtd, DWC_OTG_HC_XFER_COMPLETE);
+		complete_periodic_xfer(hcd, hc, hc_regs, qtd,
+					halt_status, must_free);
+		break;
+	}
+
+handle_xfercomp_done:
+	disable_hc_int(hc_regs, xfercompl);
+	return 1;
+}
+
+/**
+ * Handles a host channel STALL interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static int handle_hc_stall_intr(struct dwc_otg_hcd *hcd, struct dwc_hc *hc,
+		struct dwc_otg_hc_regs __iomem *hc_regs,
+		struct dwc_otg_qtd *qtd, int *must_free)
+{
+	struct urb *urb = qtd->urb;
+	int pipe_type = usb_pipetype(urb->pipe);
+	DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+		      "STALL Received--\n", hc->hc_num);
+	if (hcd->core_if->dma_desc_enable) {
+		dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
+				DWC_OTG_HC_XFER_STALL);
+		goto handle_stall_done;
+	}
+	if (pipe_type == PIPE_CONTROL)
+		dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EPIPE);
+
+	if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) {
+
+		dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EPIPE);
+		/*
+		 * USB protocol requires resetting the data toggle for bulk
+		 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
+		 * setup command is issued to the endpoint. Anticipate the
+		 * CLEAR_FEATURE command since a STALL has occurred and reset
+		 * the data toggle now.
+		 */
+		hc->qh->data_toggle = 0;
+	}
+	halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_STALL, must_free);
+
+handle_stall_done:
+	disable_hc_int(hc_regs, stall);
+	return 1;
+}
+
+/*
+ * Updates the state of the URB when a transfer has been stopped due to an
+ * abnormal condition before the transfer completes. Modifies the
+ * actual_length field of the URB to reflect the number of bytes that have
+ * actually been transferred via the host channel.
+ */
+static void update_urb_state_xfer_intr(struct dwc_hc *hc,
+		struct dwc_otg_hc_regs __iomem *hc_regs, struct urb *urb,
+		struct dwc_otg_qtd *qtd, enum dwc_otg_halt_status halt_status)
+{
+	u32 bytes_transferred =
+		get_actual_xfer_length(hc, hc_regs, qtd, halt_status, NULL);
+	/* non DWORD-aligned buffer case handling. */
+	if (hc->align_buff && bytes_transferred && hc->ep_is_in)
+		memcpy(urb->transfer_buffer + urb->actual_length,
+				hc->qh->dw_align_buf,
+				bytes_transferred);
+
+	urb->actual_length += bytes_transferred;
+
+#ifdef DEBUG
+	{
+		union hctsiz_data hctsiz;
+		hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
+		DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
+				__func__, (hc->ep_is_in ? "IN" : "OUT"),
+				hc->hc_num);
+		DWC_DEBUGPL(DBG_HCDV, "  hc->start_pkt_count %d\n",
+				hc->start_pkt_count);
+		DWC_DEBUGPL(DBG_HCDV, "  hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
+		DWC_DEBUGPL(DBG_HCDV, "  hc->max_packet %d\n", hc->max_packet);
+		DWC_DEBUGPL(DBG_HCDV, "  bytes_transferred %d\n",
+				bytes_transferred);
+		DWC_DEBUGPL(DBG_HCDV, "  urb->actual_length %d\n",
+				urb->actual_length);
+		DWC_DEBUGPL(DBG_HCDV, "  urb->transfer_buffer_length %d\n",
+				urb->transfer_buffer_length);
+	}
+#endif
+}
+
+/**
+ * Handles a host channel NAK interrupt. This handler may be called in either
+ * DMA mode or Slave mode.
+ */
+static int handle_hc_nak_intr(struct dwc_otg_hcd *hcd, struct dwc_hc *hc,
+			      struct dwc_otg_hc_regs __iomem *hc_regs,
+			      struct dwc_otg_qtd *qtd, int *must_free)
+{
+	DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+			"NAK Received--\n", hc->hc_num);
+	/*
+	 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
+	 * interrupt.  Re-start the SSPLIT transfer.
+	 */
+	if (hc->do_split) {
+		if (hc->complete_split)
+			qtd->error_count = 0;
+
+		qtd->complete_split = 0;
+		halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK, must_free);
+		goto handle_nak_done;
+	}
+
+	switch (usb_pipetype(qtd->urb->pipe)) {
+
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+		if (hcd->core_if->dma_enable && hc->ep_is_in) {
+			/*
+			 * NAK interrupts are enabled on bulk/control IN
+			 * transfers in DMA mode for the sole purpose of
+			 * resetting the error count after a transaction error
+			 * occurs. The core will continue transferring data.
+			 */
+			qtd->error_count = 0;
+			goto handle_nak_done;
+		}
+
+		/*
+		 * NAK interrupts normally occur during OUT transfers in DMA
+		 * or Slave mode. For IN transfers, more requests will be
+		 * queued as request queue space is available.
+		 */
+
+		qtd->error_count = 0;
+		if (!hc->qh->ping_state) {
+			update_urb_state_xfer_intr(hc, hc_regs, qtd->urb,
+						    qtd, DWC_OTG_HC_XFER_NAK);
+			dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+			if (qtd->urb->dev->speed == USB_SPEED_HIGH)
+				hc->qh->ping_state = 1;
+		}
+
+		/*
+		 * Halt the channel so the transfer can be re-started from
+		 * the appropriate point or the PING protocol will
+		 * start/continue.
+		 */
+
+		halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK, must_free);
+		break;
+	case PIPE_INTERRUPT:
+		qtd->error_count = 0;
+		halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK, must_free);
+		break;
+	case PIPE_ISOCHRONOUS:
+		/* Should never get called for isochronous transfers. */
+		BUG();
+		break;
+	}
+
+handle_nak_done:
+	disable_hc_int(hc_regs, nak);
+
+	return 1;
+}
+
+/**
+ * Handles a host channel ACK interrupt. This interrupt is enabled when
+ * performing the PING protocol in Slave mode, when errors occur during
+ * either Slave mode or DMA mode, and during Start Split transactions.
+ */
+static int handle_hc_ack_intr(struct dwc_otg_hcd *hcd,
+	struct dwc_hc *hc, struct dwc_otg_hc_regs __iomem *hc_regs,
+	struct dwc_otg_qtd *qtd, int *must_free)
+{
+	DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+		     "ACK Received--\n", hc->hc_num);
+	if (hc->do_split) {
+		/*
+		 * Handle ACK on SSPLIT.
+		 * ACK should not occur in CSPLIT.
+		 */
+		if ((!hc->ep_is_in) &&
+				(hc->data_pid_start != DWC_OTG_HC_PID_SETUP))
+			qtd->ssplit_out_xfer_count = hc->xfer_len;
+
+		if (!(hc->ep_type == USB_ENDPOINT_XFER_ISOC && !hc->ep_is_in))
+			/* Don't need complete for isochronous out transfr*/
+			qtd->complete_split = 1;
+
+		/* ISOC OUT */
+		if ((hc->ep_type == USB_ENDPOINT_XFER_ISOC) && !hc->ep_is_in) {
+			switch (hc->xact_pos) {
+
+			case DWC_HCSPLIT_XACTPOS_ALL:
+				break;
+			case DWC_HCSPLIT_XACTPOS_END:
+				qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
+				qtd->isoc_split_offset = 0;
+				break;
+			case DWC_HCSPLIT_XACTPOS_BEGIN:
+			case DWC_HCSPLIT_XACTPOS_MID: {
+				/*
+				 * For BEGIN or MID, calculate the length for
+				 * the next microframe to determine the correct
+				 * SSPLIT token, either MID or END.
+				 */
+				struct usb_iso_packet_descriptor *frame_desc;
+				frame_desc =
+					&qtd->urb->iso_frame_desc[qtd->isoc_frame_index];
+
+				qtd->isoc_split_offset += 188;
+				if ((frame_desc->length -
+						qtd->isoc_split_offset) <= 188)
+					qtd->isoc_split_pos =
+						DWC_HCSPLIT_XACTPOS_END;
+				else
+					qtd->isoc_split_pos =
+						DWC_HCSPLIT_XACTPOS_MID;
+
+				}
+				break;
+			}
+		} else
+			halt_channel(hcd, hc, qtd,
+					DWC_OTG_HC_XFER_ACK, must_free);
+
+	} else {
+		qtd->error_count = 0;
+		if (hc->qh->ping_state) {
+			hc->qh->ping_state = 0;
+			/*
+			 * Halt the channel so the transfer can be re-started
+			 * from the appropriate point. This only happens in
+			 * Slave mode. In DMA mode, the ping_state is cleared
+			 * when the transfer is started because the core
+			 * automatically executes the PING, then the transfer.
+			 */
+			halt_channel(hcd, hc, qtd,
+					DWC_OTG_HC_XFER_ACK, must_free);
+		}
+	}
+
+	/*
+	 * If the ACK occurred when _not_ in the PING state, let the channel
+	 * continue transferring data after clearing the error count.
+	 */
+
+	disable_hc_int(hc_regs, ack);
+
+	return 1;
+}
+
+/**
+ * Handles a host channel NYET interrupt. This interrupt should only occur on
+ * Bulk and Control OUT endpoints and for complete split transactions. If a
+ * NYET occurs at the same time as a Transfer Complete interrupt, it is
+ * handled in the xfercomp interrupt handler, not here. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int handle_hc_nyet_intr(struct dwc_otg_hcd *hcd,
+	struct dwc_hc *hc, struct dwc_otg_hc_regs __iomem *hc_regs,
+	struct dwc_otg_qtd *qtd, int *must_free)
+{
+	DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+		     "NYET Received--\n", hc->hc_num);
+
+	/*
+	 * NYET on CSPLIT
+	 * re-do the CSPLIT immediately on non-periodic
+	 */
+	if (hc->do_split && hc->complete_split) {
+		if (hc->ep_is_in && (hc->ep_type == USB_ENDPOINT_XFER_ISOC)
+				&& hcd->core_if->dma_enable) {
+			qtd->complete_split = 0;
+			qtd->isoc_split_offset = 0;
+			if (++qtd->isoc_frame_index ==
+					qtd->urb->number_of_packets) {
+				dwc_otg_hcd_complete_urb(hcd, qtd->urb, 0);
+				release_channel(hcd, hc, qtd,
+						DWC_OTG_HC_XFER_URB_COMPLETE,
+						must_free);
+			} else
+				release_channel(hcd, hc, qtd,
+						DWC_OTG_HC_XFER_NO_HALT_STATUS,
+						must_free);
+			goto handle_nyet_done;
+		}
+		if ((hc->ep_type == USB_ENDPOINT_XFER_INT) ||
+				(hc->ep_type == USB_ENDPOINT_XFER_ISOC)) {
+			int frnum =
+				dwc_otg_hcd_get_frame_number(
+						dwc_otg_hcd_to_hcd(hcd)
+						);
+
+			if (dwc_full_frame_num(frnum) !=
+				dwc_full_frame_num(hc->qh->sched_frame)) {
+
+				/*
+				 * No longer in the same full speed frame.
+				 * Treat this as a transaction error.
+				 */
+#if 0
+				/** @todo Fix system performance so this can
+				 * be treated as an error. Right now complete
+				 * splits cannot be scheduled precisely enough
+				 * due to other system activity, so this error
+				 * occurs regularly in Slave mode.
+				 */
+				qtd->error_count++;
+
+#endif
+				qtd->complete_split = 0;
+				halt_channel(hcd, hc, qtd,
+						DWC_OTG_HC_XFER_XACT_ERR,
+						must_free);
+
+				/** @todo add support for isoc release */
+				goto handle_nyet_done;
+			}
+		}
+		halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET, must_free);
+		goto handle_nyet_done;
+	}
+	hc->qh->ping_state = 1;
+	qtd->error_count = 0;
+	update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd,
+					DWC_OTG_HC_XFER_NYET);
+	dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+
+	/*
+	 * Halt the channel and re-start the transfer so the PING
+	 * protocol will start.
+	 */
+	halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET, must_free);
+
+handle_nyet_done:
+	disable_hc_int(hc_regs, nyet);
+
+	return 1;
+}
+
+/**
+ * Handles a host channel babble interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static int handle_hc_babble_intr(struct dwc_otg_hcd *hcd,
+	struct dwc_hc *hc, struct dwc_otg_hc_regs __iomem *hc_regs,
+	struct dwc_otg_qtd *qtd, int *must_free)
+{
+	DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+			"Babble Error--\n", hc->hc_num);
+
+	if (hcd->core_if->dma_desc_enable) {
+		dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
+				DWC_OTG_HC_XFER_BABBLE_ERR);
+		goto handle_babble_done;
+	}
+	if (hc->ep_type != USB_ENDPOINT_XFER_ISOC) {
+		dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EOVERFLOW);
+		halt_channel(hcd, hc, qtd,
+				DWC_OTG_HC_XFER_BABBLE_ERR, must_free);
+	} else {
+		enum dwc_otg_halt_status halt_status;
+		halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
+					  DWC_OTG_HC_XFER_BABBLE_ERR);
+		halt_channel(hcd, hc, qtd, halt_status, must_free);
+	}
+
+handle_babble_done:
+	disable_hc_int(hc_regs, bblerr);
+	return 1;
+}
+
+/**
+ * Handles a host channel AHB error interrupt. This handler is only called in
+ * DMA mode.
+ */
+static int handle_hc_ahberr_intr(struct dwc_otg_hcd *hcd, struct dwc_hc *hc,
+				 struct dwc_otg_hc_regs __iomem *hc_regs,
+				 struct dwc_otg_qtd *qtd)
+{
+	union hcchar_data hcchar;
+	union hcsplt_data hcsplt;
+	union hctsiz_data hctsiz;
+	u32 hcdma;
+	struct urb *urb = qtd->urb;
+
+	DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+			"AHB Error--\n", hc->hc_num);
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+	hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
+	hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
+	hcdma = dwc_read_reg32(&hc_regs->hcdma);
+	DWC_ERROR("AHB ERROR, Channel %d\n", hc->hc_num);
+	DWC_ERROR("  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
+	DWC_ERROR("  hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
+	DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n");
+	DWC_ERROR("  Device address: %d\n", usb_pipedevice(urb->pipe));
+	DWC_ERROR("  Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
+			(usb_pipein(urb->pipe) ? "IN" : "OUT"));
+	DWC_ERROR("  Endpoint type: %s\n", ({
+			char *pipetype;
+			switch (usb_pipetype(urb->pipe)) {
+			case PIPE_CONTROL:
+				pipetype = "CONTROL"; break;
+			case PIPE_BULK:
+				pipetype = "BULK"; break;
+			case PIPE_INTERRUPT:
+				pipetype = "INTERRUPT"; break;
+			case PIPE_ISOCHRONOUS:
+				pipetype = "ISOCHRONOUS"; break;
+			default:
+				pipetype = "UNKNOWN"; break;
+			};
+			pipetype;
+	})) ;
+	DWC_ERROR("  Speed: %s\n", ({
+			char *speed;
+			switch (urb->dev->speed) {
+			case USB_SPEED_HIGH:
+				speed = "HIGH"; break;
+			case USB_SPEED_FULL:
+				speed = "FULL"; break;
+			case USB_SPEED_LOW:
+				speed = "LOW"; break;
+			default:
+				speed = "UNKNOWN"; break;
+			};
+			speed;
+	})) ;
+	DWC_ERROR("  Max packet size: %d\n",
+		usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+	DWC_ERROR("  Data buffer length: %d\n", urb->transfer_buffer_length);
+	DWC_ERROR("  Transfer buffer: %p, Transfer DMA: %p\n",
+		urb->transfer_buffer, (void *)(u32)urb->transfer_dma);
+	DWC_ERROR("  Setup buffer: %p, Setup DMA: %p\n", urb->setup_packet,
+		(void *)(u32)urb->setup_dma);
+	DWC_ERROR("  Interval: %d\n", urb->interval);
+	/* Core haltes the channel for Descriptor DMA mode */
+	if (hcd->core_if->dma_desc_enable) {
+		dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
+				DWC_OTG_HC_XFER_AHB_ERR);
+		goto handle_ahberr_done;
+	}
+	dwc_otg_hcd_complete_urb(hcd, urb, -EIO);
+
+	/*
+	 * Force a channel halt. Don't call halt_channel because that won't
+	 * write to the HCCHARn register in DMA mode to force the halt.
+	 */
+
+	dwc_otg_hc_halt(hcd->core_if, hc, DWC_OTG_HC_XFER_AHB_ERR);
+
+handle_ahberr_done:
+	disable_hc_int(hc_regs, ahberr);
+	return 1;
+}
+
+/**
+ * Handles a host channel transaction error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int handle_hc_xacterr_intr(struct dwc_otg_hcd *hcd, struct dwc_hc *hc,
+				  struct dwc_otg_hc_regs __iomem *hc_regs,
+				  struct dwc_otg_qtd *qtd, int *must_free)
+{
+	DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+		     "Transaction Error--\n", hc->hc_num);
+	if (hcd->core_if->dma_desc_enable) {
+		dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
+				DWC_OTG_HC_XFER_XACT_ERR);
+		goto handle_xacterr_done;
+	}
+	switch (usb_pipetype(qtd->urb->pipe)) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+		qtd->error_count++;
+		if (!hc->qh->ping_state) {
+			update_urb_state_xfer_intr(hc, hc_regs, qtd->urb,
+				    qtd, DWC_OTG_HC_XFER_XACT_ERR);
+			dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+			if (!hc->ep_is_in && qtd->urb->dev->speed ==
+					USB_SPEED_HIGH)
+				hc->qh->ping_state = 1;
+		}
+
+		/*
+		 * Halt the channel so the transfer can be re-started from
+		 * the appropriate point or the PING protocol will start.
+		 */
+		halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
+		break;
+
+	case PIPE_INTERRUPT:
+		qtd->error_count++;
+		if ((hc->do_split) && (hc->complete_split))
+			qtd->complete_split = 0;
+
+		halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
+		break;
+	case PIPE_ISOCHRONOUS: {
+		enum dwc_otg_halt_status halt_status;
+		halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
+					  DWC_OTG_HC_XFER_XACT_ERR);
+		halt_channel(hcd, hc, qtd, halt_status, must_free);
+		break;
+	}
+	}
+
+handle_xacterr_done:
+	disable_hc_int(hc_regs, xacterr);
+	return 1;
+}
+
+/**
+ * Handles a host channel frame overrun interrupt. This handler may be called
+ * in either DMA mode or Slave mode.
+ */
+static int handle_hc_frmovrun_intr(struct dwc_otg_hcd *hcd,
+	struct dwc_hc *hc, struct dwc_otg_hc_regs __iomem *hc_regs,
+	struct dwc_otg_qtd *qtd, int *must_free)
+{
+	DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+		     "Frame Overrun--\n", hc->hc_num);
+	switch (usb_pipetype(qtd->urb->pipe)) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+		break;
+	case PIPE_INTERRUPT:
+		halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN,
+				must_free);
+		break;
+	case PIPE_ISOCHRONOUS: {
+		enum dwc_otg_halt_status halt_status;
+		halt_status = update_isoc_urb_state(hcd, hc, hc_regs,
+				qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN);
+		halt_channel(hcd, hc, qtd, halt_status, must_free);
+
+		break;
+	}
+	}
+	disable_hc_int(hc_regs, frmovrun);
+	return 1;
+}
+
+/**
+ * Handles a host channel data toggle error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int handle_hc_datatglerr_intr(struct dwc_otg_hcd *hcd,
+	struct dwc_hc *hc, struct dwc_otg_hc_regs __iomem *hc_regs,
+	struct dwc_otg_qtd *qtd, int *must_free)
+{
+	DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+		     "Data Toggle Error--\n", hc->hc_num);
+	if (hc->ep_is_in)
+		qtd->error_count = 0;
+	else
+		DWC_ERROR("Data Toggle Error on OUT transfer,"
+			   "channel %d\n", hc->hc_num);
+
+	disable_hc_int(hc_regs, datatglerr);
+	return 1;
+}
+
+#ifdef DEBUG
+/**
+ * This function is for debug only. It checks that a valid halt status is set
+ * and that HCCHARn.chdis is clear. If there's a problem, corrective action is
+ * taken and a warning is issued.
+ * @return 1 if halt status is ok, 0 otherwise.
+ */
+static inline int halt_status_ok(struct dwc_otg_hcd *hcd, struct dwc_hc *hc,
+				 struct dwc_otg_hc_regs __iomem *hc_regs,
+				 struct dwc_otg_qtd *qtd)
+{
+	union hcchar_data hcchar;
+	union hctsiz_data hctsiz;
+	union hcint_data hcint;
+	union hcintmsk_data hcintmsk;
+	union hcsplt_data hcsplt;
+
+	if (hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) {
+		/*
+		 * This code is here only as a check. This condition should
+		 * never happen. Ignore the halt if it does occur.
+		 */
+		hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+		hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
+		hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+		hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
+		hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
+		DWC_WARN("%s: hc->halt_status == "
+				"DWC_OTG_HC_XFER_NO_HALT_STATUS, "
+				"channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
+				"hcint 0x%08x, hcintmsk 0x%08x, "
+				"hcsplt 0x%08x, qtd->complete_split %d\n",
+				__func__, hc->hc_num, hcchar.d32, hctsiz.d32,
+				hcint.d32, hcintmsk.d32, hcsplt.d32,
+				qtd->complete_split);
+		DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n",
+				__func__, hc->hc_num);
+		DWC_WARN("\n");
+		clear_hc_int(hc_regs, chhltd);
+		return 0;
+	}
+
+	/*
+	 * This code is here only as a check. hcchar.chdis should
+	 * never be set when the halt interrupt occurs. Halt the
+	 * channel again if it does occur.
+	 */
+	hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+	if (hcchar.b.chdis) {
+		DWC_WARN("%s: hcchar.chdis set unexpectedly, "
+			"hcchar 0x%08x, trying to halt again\n", __func__,
+			hcchar.d32);
+		clear_hc_int(hc_regs, chhltd);
+		hc->halt_pending = 0;
+		halt_channel(hcd, hc, qtd, hc->halt_status, NULL);
+		return 0;
+	}
+	return 1;
+}
+#endif	/*  */
+
+/**
+ * Handles a host Channel Halted interrupt in DMA mode. This handler
+ * determines the reason the channel halted and proceeds accordingly.
+ */
+static void
+handle_hc_chhltd_intr_dma(struct dwc_otg_hcd *hcd, struct dwc_hc *hc,
+			  struct dwc_otg_hc_regs __iomem *hc_regs,
+			  struct dwc_otg_qtd *qtd,
+		int *must_free)
+{
+	union hcint_data hcint;
+	union hcintmsk_data hcintmsk;
+	int out_nak_enh = 0;
+
+	/* For core with OUT NAK enhancement, the flow for high-
+	 * speed CONTROL/BULK OUT is handled a little differently.
+	 */
+	if (hcd->core_if->snpsid >= OTG_CORE_REV_2_71a) {
+		if (hc->speed == USB_SPEED_HIGH && !hc->ep_is_in &&
+				(hc->ep_type == USB_ENDPOINT_XFER_CONTROL ||
+				hc->ep_type == USB_ENDPOINT_XFER_BULK)) {
+			out_nak_enh = 1;
+		}
+	}
+	if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE
+			|| (hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR
+			&& !hcd->core_if->dma_desc_enable)) {
+
+		/*
+		 * Just release the channel. A dequeue can happen on a
+		 * transfer timeout. In the case of an AHB Error, the channel
+		 * was forced to halt because there's no way to gracefully
+		 * recover.
+		 */
+		if (hcd->core_if->dma_desc_enable)
+			dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
+					hc->halt_status);
+		else
+			release_channel(hcd, hc, qtd, hc->halt_status,
+					must_free);
+		return;
+	}
+
+	/* Read the HCINTn register to determine the cause for the halt. */
+	hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+	hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
+	if (hcint.b.xfercomp) {
+
+		/** @todo This is here because of a possible hardware bug. Spec
+		 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
+		 * interrupt w/ACK bit set should occur, but I only see the
+		 * XFERCOMP bit, even with it masked out.  This is a workaround
+		 * for that behavior.  Should fix this when hardware is fixed.
+		 */
+		if ((hc->ep_type == USB_ENDPOINT_XFER_ISOC) && (!hc->ep_is_in))
+			handle_hc_ack_intr(hcd, hc, hc_regs, qtd, must_free);
+
+		handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd, must_free);
+	} else if (hcint.b.stall) {
+		handle_hc_stall_intr(hcd, hc, hc_regs, qtd, must_free);
+	} else if (hcint.b.xacterr && !hcd->core_if->dma_desc_enable) {
+		if (out_nak_enh) {
+			if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) {
+				DWC_PRINT("XactErr with NYET/NAK/ACK\n");
+				qtd->error_count = 0;
+			} else
+				DWC_PRINT("XactErr without NYET/NAK/ACK\n");
+		}
+		/*
+		 * Must handle xacterr before nak or ack. Could get a xacterr
+		 * at the same time as either of these on a BULK/CONTROL OUT
+		 * that started with a PING. The xacterr takes precedence.
+		 */
+		handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd, must_free);
+	} else if (hcint.b.xcs_xact && hcd->core_if->dma_desc_enable) {
+		handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd, must_free);
+	} else if (hcint.b.ahberr && hcd->core_if->dma_desc_enable) {
+		handle_hc_ahberr_intr(hcd, hc, hc_regs, qtd);
+	} else if (hcint.b.bblerr) {
+		handle_hc_babble_intr(hcd, hc, hc_regs, qtd , must_free);
+	} else if (hcint.b.frmovrun) {
+		handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd , must_free);
+	} else if (!out_nak_enh) {
+		if (hcint.b.nyet) {
+			/*
+			* Must handle nyet before nak or ack. Could get a nyet
+			* at the same time as either of those on a BULK/CONTROL
+			* OUT that started with a PING. The nyet takes
+			* precedence.
+			*/
+			handle_hc_nyet_intr(hcd, hc, hc_regs, qtd, must_free);
+		} else if (hcint.b.nak && !hcintmsk.b.nak) {
+			/*
+			 * If nak is not masked, it's because a non-split IN
+			 * transfer is in an error state. In that case, the nak
+			 * is handled by the nak interrupt handler, not here.
+			 * Handle nak here for BULK/CONTROL OUT transfers,
+			 * which halt on a NAK to allow rewinding the buffer
+			 * pointer.
+			 */
+			handle_hc_nak_intr(hcd, hc, hc_regs, qtd, must_free);
+		} else if (hcint.b.ack && !hcintmsk.b.ack) {
+			/*
+			 * If ack is not masked, it's because a non-split IN
+			 * transfer is in an error state. In that case, the ack
+			 * is handled by the ack interrupt handler, not here.
+			 * Handle ack here for split transfers. Start splits
+			 * halt on ACK.
+			 */
+			handle_hc_ack_intr(hcd, hc, hc_regs, qtd, must_free);
+		} else {
+			if (hc->ep_type == USB_ENDPOINT_XFER_INT ||
+				hc->ep_type == USB_ENDPOINT_XFER_ISOC) {
+				/*
+				 * A periodic transfer halted with no other
+				 * channel interrupts set. Assume it was halted
+				 * by the core because it could not be completed
+				 * in its scheduled (micro)frame.
+				 */
+#ifdef DEBUG
+				DWC_PRINT("%s: Halt channel %d (assume "
+						"incomplete periodic "
+						"transfer)\n",
+						__func__, hc->hc_num);
+#endif
+				halt_channel(hcd, hc, qtd,
+					DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE,
+					must_free);
+			} else {
+				DWC_ERROR("%s: Channel %d, DMA Mode -- "
+					"ChHltd set, but reason "
+					"for halting is unknown, nyet %d, "
+					"hcint 0x%08x, intsts 0x%08x\n",
+					__func__,
+					hc->hc_num,
+					hcint.b.nyet,
+					hcint.d32,
+					dwc_read_reg32(&hcd->
+							core_if->
+							core_global_regs->
+							gintsts));
+			}
+		}
+	} else {
+		DWC_PRINT("NYET/NAK/ACK/other in non-error case, 0x%08x\n",
+			   hcint.d32);
+	}
+}
+
+/**
+ * Handles a host channel Channel Halted interrupt.
+ *
+ * In slave mode, this handler is called only when the driver specifically
+ * requests a halt. This occurs during handling other host channel interrupts
+ * (e.g. nak, xacterr, stall, nyet, etc.).
+ *
+ * In DMA mode, this is the interrupt that occurs when the core has finished
+ * processing a transfer on a channel. Other host channel interrupts (except
+ * ahberr) are disabled in DMA mode.
+ */
+static int handle_hc_chhltd_intr(struct dwc_otg_hcd *hcd, struct dwc_hc *hc,
+				 struct dwc_otg_hc_regs __iomem *hc_regs,
+				 struct dwc_otg_qtd *qtd, int *must_free)
+{
+	DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+		     "Channel Halted--\n", hc->hc_num);
+	if (hcd->core_if->dma_enable)
+		handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd, must_free);
+	else {
+#ifdef DEBUG
+		if (!halt_status_ok(hcd, hc, hc_regs, qtd))
+			return 1;
+#endif
+		release_channel(hcd, hc, qtd, hc->halt_status, must_free);
+	}
+
+	return 1;
+}
+
+/** Handles interrupt for a specific Host Channel */
+int dwc_otg_hcd_handle_hc_n_intr(struct dwc_otg_hcd *dwc_otg_hcd, u32 num)
+{
+	int must_free = 0;
+	int retval = 0;
+	union hcint_data hcint;
+	union hcintmsk_data hcintmsk;
+	struct dwc_hc *hc;
+	struct dwc_otg_hc_regs __iomem *hc_regs;
+	struct dwc_otg_qtd *qtd;
+
+	DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num);
+
+	spin_lock(&dwc_otg_hcd->lock);
+
+	hc = dwc_otg_hcd->hc_ptr_array[num];
+	hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num];
+	hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+	hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
+
+	DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, "
+			"hcint&hcintmsk 0x%08x\n",
+		     hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
+
+	hcint.d32 = hcint.d32 & hcintmsk.d32;
+
+	/*
+	 * The qtd_list can be empty, i.e. after a dequeue. In this case, lets
+	 * ensure qtd is NULL instead of somewhere around qh to catch potential
+	 * bugs, and WARN if any interrupts are active other than ChHltd.
+	 */
+	qtd = list_entry(hc->qh->qtd_list.next,
+			 struct dwc_otg_qtd, qtd_list_entry);
+	if (unlikely(list_empty(&hc->qh->qtd_list))) {
+		qtd = NULL;
+		WARN(hcint.d32 != 0x2,
+		     "non ChHltd irq active while qtd_list empty");
+	}
+
+	if (!dwc_otg_hcd->core_if->dma_enable) {
+		if ((hcint.b.chhltd) && (hcint.d32 != 0x2))
+			hcint.b.chhltd = 0;
+	}
+	if (hcint.b.xfercomp) {
+		retval |= handle_hc_xfercomp_intr(dwc_otg_hcd, hc,
+				hc_regs, qtd, &must_free);
+		/*
+		 * If NYET occurred at same time as Xfer Complete, the NYET is
+		 * handled by the Xfer Complete interrupt handler. Don't want
+		 * to call the NYET interrupt handler in this case.
+		 */
+		hcint.b.nyet = 0;
+	}
+	if (hcint.b.chhltd)
+		retval |= handle_hc_chhltd_intr(dwc_otg_hcd, hc, hc_regs,
+				qtd, &must_free);
+	if (hcint.b.ahberr)
+		retval |= handle_hc_ahberr_intr(dwc_otg_hcd, hc, hc_regs,
+				qtd);
+	if (hcint.b.stall)
+		retval |= handle_hc_stall_intr(dwc_otg_hcd, hc, hc_regs,
+				qtd, &must_free);
+	if (hcint.b.nak)
+		retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs,
+				qtd, &must_free);
+	if (hcint.b.ack)
+		retval |= handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs,
+				qtd, &must_free);
+	if (hcint.b.nyet)
+		retval |= handle_hc_nyet_intr(dwc_otg_hcd, hc, hc_regs,
+				qtd, &must_free);
+	if (hcint.b.xacterr)
+		retval |= handle_hc_xacterr_intr(dwc_otg_hcd, hc, hc_regs,
+				qtd, &must_free);
+	if (hcint.b.bblerr)
+		retval |= handle_hc_babble_intr(dwc_otg_hcd, hc, hc_regs,
+				qtd, &must_free);
+	if (hcint.b.frmovrun)
+		retval |= handle_hc_frmovrun_intr(dwc_otg_hcd, hc, hc_regs,
+				qtd, &must_free);
+	if (hcint.b.datatglerr)
+		retval |= handle_hc_datatglerr_intr(dwc_otg_hcd, hc, hc_regs,
+				qtd, &must_free);
+	/*
+	 * Logic to free the qtd here, at the end of the hc intr
+	 * processing, if the handling of this interrupt determined
+	 * that it needs to be freed.
+	 */
+	if (must_free) {
+		/* Free the qtd here now that we are done using it. */
+		dwc_otg_hcd_qtd_free(qtd);
+	}
+
+	spin_unlock(&dwc_otg_hcd->lock);
+
+	return retval;
+}
+
+#endif	/* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/dwc_otg/dwc_otg_hcd_queue.c b/drivers/usb/dwc_otg/dwc_otg_hcd_queue.c
new file mode 100644
index 0000000..e55daaa
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_hcd_queue.c
@@ -0,0 +1,855 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef DWC_DEVICE_ONLY
+
+/**
+ * @file
+ *
+ * This file contains the functions to manage Queue Heads and Queue
+ * Transfer Descriptors.
+ */
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+
+static const int LOCKED = 1;
+
+/**
+ * Find whether the HCD is idle, with no non periodic or periodic channels.
+ *
+ * @param hcd The HCD state structure for the DWC OTG controller.
+ * @return true if no channels are in use.
+ */
+bool dwc_otg_hcd_idle(struct dwc_otg_hcd *hcd)
+{
+	return !hcd->non_periodic_channels && !hcd->periodic_channels;
+}
+
+/** Free each QTD in the QH's QTD-list then free the QH.  QH should already be
+ * removed from a list.  QTD list should already be empty if called from URB
+ * Dequeue.
+ *
+ * @param[in] qh The QH to free.
+ */
+void dwc_otg_hcd_qh_free(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh,
+			 int locked_already)
+{
+	struct dwc_otg_qtd *qtd;
+	struct list_head *pos, *list_temp;
+	unsigned long flags = 0;
+
+	/* Free each QTD in the QTD list */
+	if (!locked_already)
+		spin_lock_irqsave(&hcd->lock, flags);
+
+	list_for_each_safe(pos, list_temp, &qh->qtd_list) {
+		qtd = dwc_list_to_qtd(pos);
+		list_del(pos);
+		dwc_otg_hcd_qtd_free(qtd);
+	}
+	if (!locked_already)
+		spin_unlock_irqrestore(&hcd->lock, flags);
+
+	if (hcd->core_if->dma_desc_enable)
+		dwc_otg_hcd_qh_free_ddma(hcd, qh);
+	else if (qh->dw_align_buf) {
+		u32 buf_size;
+		if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
+			buf_size = 4096;
+		else
+			buf_size = hcd->core_if->core_params->max_transfer_size;
+		dma_free_coherent(hcd->dev, buf_size,
+				qh->dw_align_buf, qh->dw_align_buf_dma);
+	}
+
+	kfree(qh);
+	return;
+}
+#define BitStuffTime(bytecount)  ((8 * 7 * bytecount) / 6)
+#define HS_HOST_DELAY		5	/* nanoseconds */
+#define FS_LS_HOST_DELAY	1000	/* nanoseconds */
+#define HUB_LS_SETUP		333	/* nanoseconds */
+
+#if 0
+static u32 calc_bus_time(int speed, int is_in, int is_isoc,
+					  int bytecount)
+{
+	unsigned long retval;
+
+	switch (speed) {
+	case USB_SPEED_HIGH:
+		if (is_isoc) {
+			retval =
+				((38 * 8 * 2083) +
+				(2083 * (3 + BitStuffTime(bytecount)))) / 1000 +
+				HS_HOST_DELAY;
+		} else {
+			retval =
+				((55 * 8 * 2083) +
+				(2083 * (3 + BitStuffTime(bytecount)))) / 1000 +
+				HS_HOST_DELAY;
+		}
+		break;
+	case USB_SPEED_FULL:
+		if (is_isoc) {
+			retval =
+				(8354 * (31 + 10 * BitStuffTime(bytecount))) /
+				1000;
+			if (is_in)
+				retval = 7268 + FS_LS_HOST_DELAY + retval;
+			else
+				retval = 6265 + FS_LS_HOST_DELAY + retval;
+		} else {
+			retval =
+				(8354 * (31 + 10 * BitStuffTime(bytecount))) /
+				1000;
+			retval = 9107 + FS_LS_HOST_DELAY + retval;
+		}
+		break;
+	case USB_SPEED_LOW:
+		if (is_in) {
+			retval =
+				(67667 * (31 + 10 * BitStuffTime(bytecount))) /
+				1000;
+			retval =
+				64060 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
+				retval;
+		} else {
+			retval =
+				(66700 * (31 + 10 * BitStuffTime(bytecount))) /
+				1000;
+			retval =
+				64107 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
+				retval;
+		}
+		break;
+	default:
+		DWC_WARN("Unknown device speed\n");
+		retval = -1;
+	}
+
+	return NS_TO_US(retval);
+}
+#endif
+/** Initializes a QH structure.
+ *
+ * @param[in] hcd The HCD state structure for the DWC OTG controller.
+ * @param[in] qh The QH to init.
+ * @param[in] urb Holds the information about the device/endpoint that we need
+ * to initialize the QH. */
+#define SCHEDULE_SLOP 10
+void dwc_otg_hcd_qh_init(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh,
+			 struct urb *urb)
+{
+	memset(qh, 0, sizeof(struct dwc_otg_qh));
+
+	/* Initialize QH */
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+		qh->ep_type = USB_ENDPOINT_XFER_CONTROL;
+		break;
+	case PIPE_BULK:
+		qh->ep_type = USB_ENDPOINT_XFER_BULK;
+		break;
+	case PIPE_ISOCHRONOUS:
+		qh->ep_type = USB_ENDPOINT_XFER_ISOC;
+		break;
+	case PIPE_INTERRUPT:
+		qh->ep_type = USB_ENDPOINT_XFER_INT;
+		break;
+	}
+	qh->ep_is_in = usb_pipein(urb->pipe) ? 1 : 0;
+	qh->data_toggle = DWC_OTG_HC_PID_DATA0;
+	qh->maxp = usb_maxpacket(urb->dev, urb->pipe, !(usb_pipein(urb->pipe)));
+	INIT_LIST_HEAD(&qh->qtd_list);
+	INIT_LIST_HEAD(&qh->qh_list_entry);
+	qh->channel = NULL;
+
+	/* FS/LS Enpoint on HS Hub
+	 * NOT virtual root hub */
+	qh->dev_speed = urb->dev->speed;
+	qh->do_split = 0;
+	if (((urb->dev->speed == USB_SPEED_LOW)
+		|| (urb->dev->speed == USB_SPEED_FULL))
+		&& (urb->dev->tt) && (urb->dev->tt->hub)
+		&& (urb->dev->tt->hub->devnum != 1)) {
+			DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub "
+					"addr %d, for port %d\n",
+					usb_pipeendpoint(urb->pipe),
+					urb->dev->tt->hub->devnum,
+					urb->dev->ttport);
+		qh->do_split = 1;
+	}
+
+	/* gives access to toggles */
+	qh->dev = urb->dev;
+
+	if (qh->ep_type == USB_ENDPOINT_XFER_INT
+		|| qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
+
+		/* Compute scheduling parameters once and save them. */
+		union hprt0_data hprt;
+
+		/** @todo Account for split transfers in the bus time. */
+		int bytecount =
+			dwc_hb_mult(qh->maxp) * dwc_max_packet(qh->maxp);
+		qh->usecs =
+			NS_TO_US(usb_calc_bus_time(urb->dev->speed,
+					usb_pipein(urb->pipe),
+					(qh->ep_type == USB_ENDPOINT_XFER_ISOC),
+					bytecount));
+
+		/* Start in a slightly future (micro)frame. */
+		qh->sched_frame =
+			dwc_frame_num_inc(hcd->frame_number, SCHEDULE_SLOP);
+		qh->interval = urb->interval;
+
+#if 0
+		/* Increase interrupt polling rate for debugging. */
+		if (qh->ep_type == USB_ENDPOINT_XFER_INT)
+			qh->interval = 8;
+
+#endif	/*  */
+		hprt.d32 = dwc_read_reg32(hcd->core_if->host_if->hprt0);
+		if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) &&
+			((urb->dev->speed == USB_SPEED_LOW) ||
+			(urb->dev->speed == USB_SPEED_FULL))) {
+			qh->interval *= 8;
+			qh->sched_frame |= 0x7;
+			qh->start_split_frame = qh->sched_frame;
+		}
+	}
+
+	DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n");
+	DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH  - qh = %p\n", qh);
+	DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH  - Device Address = %d\n",
+			urb->dev->devnum);
+	DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH  - Endpoint %d, %s\n",
+			usb_pipeendpoint(urb->pipe),
+			usb_pipein(urb->pipe) == USB_DIR_IN ? "IN" : "OUT");
+	DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH  - Speed = %s\n", ({
+			char *speed;
+			switch (urb->dev->speed) {
+			case USB_SPEED_LOW:
+				speed = "low"; break;
+			case USB_SPEED_FULL:
+				speed = "full"; break;
+			case USB_SPEED_HIGH:
+				speed = "high"; break;
+			default:
+				speed = "?";
+				break;
+			};
+			speed;
+	}));
+	DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH  - Type = %s\n", ({
+			char *type;
+			switch (qh->ep_type) {
+			case USB_ENDPOINT_XFER_ISOC:
+				type = "isochronous"; break;
+			case USB_ENDPOINT_XFER_INT:
+				type = "interrupt"; break;
+			case USB_ENDPOINT_XFER_CONTROL:
+				type = "control"; break;
+			case USB_ENDPOINT_XFER_BULK:
+				type = "bulk"; break;
+			default:
+				type = "?"; break;
+			};
+			type;
+	})) ;
+
+#ifdef DEBUG
+	if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
+		DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs = %d\n",
+				qh->usecs);
+		DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - interval = %d\n",
+				qh->interval);
+	}
+
+#endif	/*  */
+	return;
+}
+/**
+ * This function allocates and initializes a QH.
+ *
+ * @param hcd The HCD state structure for the DWC OTG controller.
+ * @param[in] urb Holds the information about the device/endpoint that we need
+ * to initialize the QH.
+ *
+ * @return Returns pointer to the newly allocated QH, or NULL on error. */
+struct dwc_otg_qh *dwc_otg_hcd_qh_create(struct dwc_otg_hcd *hcd,
+					 struct urb *urb)
+{
+	struct dwc_otg_qh *qh;
+
+	/* Allocate memory */
+	/** @todo add memflags argument */
+	qh = dwc_otg_hcd_qh_alloc();
+	if (qh == NULL)
+		return NULL;
+
+	dwc_otg_hcd_qh_init(hcd, qh, urb);
+
+	if (hcd->core_if->dma_desc_enable &&
+		(dwc_otg_hcd_qh_init_ddma(hcd, qh) < 0)) {
+		dwc_otg_hcd_qh_free(hcd, qh, LOCKED);
+		return NULL;
+	}
+
+	return qh;
+}
+
+
+
+/**
+ * Checks that a channel is available for a periodic transfer.
+ *
+ * @return 0 if successful, negative error code otherise.
+ */
+static int periodic_channel_available(struct dwc_otg_hcd *hcd)
+{
+	/*
+	 * Currently assuming that there is a dedicated host channnel for each
+	 * periodic transaction plus at least one host channel for
+	 * non-periodic transactions.
+	 */
+	int status;
+	int num_channels;
+	num_channels = hcd->core_if->core_params->host_channels;
+	if ((hcd->periodic_channels + hcd->non_periodic_channels <
+	      num_channels) && (hcd->periodic_channels < num_channels - 1))
+		status = 0;
+	else {
+		DWC_NOTICE("%s: Total channels: %d,"
+			"Periodic: %d, Non-periodic: %d\n",
+			__func__, num_channels, hcd->periodic_channels,
+			hcd->non_periodic_channels);
+
+		status = -ENOSPC;
+	}
+	return status;
+}
+
+/**
+ * Checks that there is sufficient bandwidth for the specified QH in the
+ * periodic schedule. For simplicity, this calculation assumes that all the
+ * transfers in the periodic schedule may occur in the same (micro)frame.
+ *
+ * @param hcd The HCD state structure for the DWC OTG controller.
+ * @param qh QH containing periodic bandwidth required.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int
+check_periodic_bandwidth(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	int status;
+	u16 max_claimed_usecs;
+	status = 0;
+	if (qh->dev_speed == USB_SPEED_HIGH
+			|| qh->do_split) {
+		/*
+		 * High speed mode.
+		 * Max periodic usecs is 80% x 125 usec = 100 usec.
+		 */
+		max_claimed_usecs = 100 - qh->usecs;
+	} else {
+		/*
+		 * Full speed mode.
+		 * Max periodic usecs is 90% x 1000 usec = 900 usec.
+		 */
+		max_claimed_usecs = 900 - qh->usecs;
+	}
+	if (hcd->periodic_usecs > max_claimed_usecs) {
+#undef USB_DWC_OTG_IGNORE_BANDWIDTH
+#ifndef USB_DWC_OTG_IGNORE_BANDWIDTH
+		DWC_NOTICE("%s: already claimed usecs %d, required usecs %d\n",
+			    __func__, hcd->periodic_usecs, qh->usecs);
+		status = -ENOSPC;
+#else
+		status = 0;
+#endif
+	}
+	return status;
+}
+
+/**
+ * Checks that the max transfer size allowed in a host channel is large enough
+ * to handle the maximum data transfer in a single (micro)frame for a periodic
+ * transfer.
+ *
+ * @param hcd The HCD state structure for the DWC OTG controller.
+ * @param qh QH for a periodic endpoint.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int check_max_xfer_size(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	int status;
+	u32 max_xfer_size;
+	u32 max_channel_xfer_size;
+	status = 0;
+	max_xfer_size = dwc_max_packet(qh->maxp) * dwc_hb_mult(qh->maxp);
+	max_channel_xfer_size = hcd->core_if->core_params->max_transfer_size;
+	if (max_xfer_size > max_channel_xfer_size) {
+		DWC_NOTICE("%s: Periodic xfer length %d > "
+				"max xfer length for channel %d\n", __func__,
+				max_xfer_size, max_channel_xfer_size);
+		status = -ENOSPC;
+	}
+	return status;
+}
+
+/**
+ * Schedules an interrupt or isochronous transfer in the periodic schedule.
+ *
+ * @param hcd The HCD state structure for the DWC OTG controller.
+ * @param qh QH for the periodic transfer. The QH should already contain the
+ * scheduling information.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int schedule_periodic(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	int status = 0;
+	struct usb_hcd *usb_hcd;
+	status = periodic_channel_available(hcd);
+	if (status) {
+		DWC_NOTICE("%s: No host channel available for periodic "
+			    "transfer.\n", __func__);
+		return status;
+	}
+	status = check_periodic_bandwidth(hcd, qh);
+	if (status) {
+		DWC_NOTICE("%s: Insufficient periodic bandwidth for "
+			    "periodic transfer.\n", __func__);
+		return status;
+	}
+	status = check_max_xfer_size(hcd, qh);
+	if (status) {
+		DWC_NOTICE("%s: Channel max transfer size too small "
+			    "for periodic transfer.\n", __func__);
+		return status;
+	}
+	usb_hcd = dwc_otg_hcd_to_hcd(hcd);
+	if (HC_IS_SUSPENDED(usb_hcd->state))
+		return -EBUSY;
+
+	if (hcd->core_if->dma_desc_enable)
+		/* Don't rely on SOF and start in ready schedule */
+		list_add_tail(&hcd->periodic_sched_ready, &qh->qh_list_entry);
+	else
+		/* Always start in the inactive schedule. */
+		list_add_tail(&qh->qh_list_entry,
+				&hcd->periodic_sched_inactive);
+
+
+	/* Reserve the periodic channel. */
+	hcd->periodic_channels++;
+
+	/* Update claimed usecs per (micro)frame. */
+	hcd->periodic_usecs += qh->usecs;
+
+	/*
+	 * Update average periodic bandwidth claimed
+	 * and # periodic reqs for usbfs.
+	 */
+	hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated +=
+						qh->usecs / qh->interval;
+
+	if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
+		hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs++;
+		DWC_DEBUGPL(DBG_HCD,
+				"Scheduled intr: qh %p, usecs %d, period %d\n",
+				qh, qh->usecs, qh->interval);
+	} else {
+		hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs++;
+		DWC_DEBUGPL(DBG_HCD,
+				"Scheduled isoc: qh %p, usecs %d, period %d\n",
+				qh, qh->usecs, qh->interval);
+	}
+	return status;
+}
+
+
+int dwc_otg_hcd_qh_add(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	unsigned long flags;
+	int ret_val;
+
+	spin_lock_irqsave(&hcd->lock, flags);
+	ret_val = __dwc_otg_hcd_qh_add(hcd, qh);
+	spin_unlock_irqrestore(&hcd->lock, flags);
+
+	return ret_val;
+}
+
+
+/**
+ * This function adds a QH to either the non periodic or periodic schedule if
+ * it is not already in the schedule. If the QH is already in the schedule, no
+ * action is taken.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ *
+ * Caller must hold hcd->lock.
+ */
+int __dwc_otg_hcd_qh_add(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	int status = 0;
+
+	if (!list_empty(&qh->qh_list_entry))
+		/* QH already in a schedule. */
+		goto done;
+
+	/* Add the new QH to the appropriate schedule */
+	if (dwc_qh_is_non_per(qh)) {
+		/* Always start in the inactive schedule. */
+		list_add_tail(&qh->qh_list_entry,
+				&hcd->non_periodic_sched_inactive);
+	} else
+		status = schedule_periodic(hcd, qh);
+
+done:
+
+	return status;
+}
+/**
+ * This function adds a QH to the non periodic deferred schedule.
+ *
+ * hcd->lock must be acquired.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int __dwc_otg_hcd_qh_add_deferred(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	if (!list_empty(&qh->qh_list_entry)) {
+		/* QH already in a schedule. */
+		goto done;
+	}
+
+	/* Add the new QH to the non periodic deferred schedule */
+	if (dwc_qh_is_non_per(qh)) {
+		list_add_tail(&qh->qh_list_entry,
+				&hcd->non_periodic_sched_deferred);
+	}
+done:
+	return 0;
+}
+
+/**
+ * Removes an interrupt or isochronous transfer from the periodic schedule.
+ *
+ * @param hcd The HCD state structure for the DWC OTG controller.
+ * @param qh QH for the periodic transfer.
+ */
+static void deschedule_periodic(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	/* increments and decrements of periodic_channels must match */
+	BUG_ON(!hcd->periodic_channels);
+
+	list_del_init(&qh->qh_list_entry);
+
+	/* Release the periodic channel reservation. */
+	hcd->periodic_channels--;
+	if (dwc_otg_hcd_idle(hcd))
+		wake_up_interruptible(&hcd->idleq);
+
+	/* Update claimed usecs per (micro)frame. */
+	hcd->periodic_usecs -= qh->usecs;
+
+	/*
+	 * Update average periodic bandwidth claimed
+	 * and # periodic reqs for usbfs.
+	 */
+	hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated -=
+					qh->usecs / qh->interval;
+
+	if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
+		hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs--;
+		DWC_DEBUGPL(DBG_HCD,
+			     "Descheduled intr: qh %p, usecs %d, period %d\n",
+			     qh, qh->usecs, qh->interval);
+	} else {
+		hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs--;
+		DWC_DEBUGPL(DBG_HCD,
+			     "Descheduled isoc: qh %p, usecs %d, period %d\n",
+			     qh, qh->usecs, qh->interval);
+	}
+}
+
+
+/**
+ * Removes a QH from either the non-periodic or periodic schedule.  Memory is
+ * not freed.
+ *
+ * @param[in] hcd The HCD state structure.
+ * @param[in] qh QH to remove from schedule.
+ *
+ * hcd->lock must be held.
+ */
+void
+__dwc_otg_hcd_qh_remove(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh)
+{
+	if (list_empty(&qh->qh_list_entry))
+		/* QH is not in a schedule. */
+		goto done;
+
+	if (dwc_qh_is_non_per(qh)) {
+		if (hcd->non_periodic_qh_ptr == &qh->qh_list_entry) {
+			hcd->non_periodic_qh_ptr =
+				hcd->non_periodic_qh_ptr->next;
+		}
+		list_del_init(&qh->qh_list_entry);
+	} else
+		deschedule_periodic(hcd, qh);
+
+done:
+	return;
+}
+
+/**
+ * Defers a QH. For non-periodic QHs, removes the QH from the active
+ * non-periodic schedule. The QH is added to the deferred non-periodic
+ * schedule if any QTDs are still attached to the QH.
+ */
+int
+dwc_otg_hcd_qh_deferr(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh, int delay)
+{
+	int deact = 1;
+	unsigned long flags;
+	spin_lock_irqsave(&hcd->lock, flags);
+
+	if (dwc_qh_is_non_per(qh)) {
+		qh->sched_frame =
+			dwc_frame_num_inc(hcd->frame_number, delay);
+		qh->channel = NULL;
+		qh->qtd_in_process = NULL;
+		deact = 0;
+		__dwc_otg_hcd_qh_remove(hcd, qh);
+		if (!list_empty(&qh->qtd_list)) {
+			/* Add back to deferred non-periodic schedule. */
+			__dwc_otg_hcd_qh_add_deferred(hcd, qh);
+		}
+	}
+
+	spin_unlock_irqrestore(&hcd->lock, flags);
+	return deact;
+}
+/**
+ * Deactivates a QH. For non-periodic QHs, removes the QH from the active
+ * non-periodic schedule. The QH is added to the inactive non-periodic
+ * schedule if any QTDs are still attached to the QH.
+ *
+ * For periodic QHs, the QH is removed from the periodic queued schedule. If
+ * there are any QTDs still attached to the QH, the QH is added to either the
+ * periodic inactive schedule or the periodic ready schedule and its next
+ * scheduled frame is calculated. The QH is placed in the ready schedule if
+ * the scheduled frame has been reached already. Otherwise it's placed in the
+ * inactive schedule. If there are no QTDs attached to the QH, the QH is
+ * completely removed from the periodic schedule.
+ */
+void __dwc_otg_hcd_qh_deactivate(struct dwc_otg_hcd *hcd, struct dwc_otg_qh *qh,
+				int sched_next_periodic_split)
+{
+	if (dwc_qh_is_non_per(qh)) {
+		__dwc_otg_hcd_qh_remove(hcd, qh);
+		if (!list_empty(&qh->qtd_list)) {
+			/* Add back to inactive non-periodic schedule. */
+			__dwc_otg_hcd_qh_add(hcd, qh);
+		}
+	} else {
+		u16 frame_number =
+			dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
+		if (qh->do_split) {
+			/*
+			 * Schedule the next continuing
+			 * periodic split transfer
+			 * */
+			if (sched_next_periodic_split) {
+				qh->sched_frame = frame_number;
+				if (dwc_frame_num_le(frame_number,
+					dwc_frame_num_inc(qh->start_split_frame,
+							1))) {
+					/*
+					 * Allow one frame to elapse after start
+					 * split microframe before scheduling
+					 * complete split, but DONT if we are
+					 * doing the next start split in the
+					 * same frame for an ISOC out.
+					 */
+					if ((qh->ep_type
+						!= USB_ENDPOINT_XFER_ISOC)
+						|| (qh->ep_is_in != 0)) {
+						qh->sched_frame =
+							dwc_frame_num_inc(qh->sched_frame, 1);
+					}
+				}
+			} else {
+				qh->sched_frame =
+					dwc_frame_num_inc(qh->start_split_frame,
+							qh->interval);
+				if (dwc_frame_num_le(qh->sched_frame,
+						frame_number))
+					qh->sched_frame = frame_number;
+
+				qh->sched_frame |= 0x7;
+				qh->start_split_frame = qh->sched_frame;
+			}
+		} else {
+			qh->sched_frame =
+				dwc_frame_num_inc(qh->sched_frame,
+						qh->interval);
+			if (dwc_frame_num_le(qh->sched_frame, frame_number))
+				qh->sched_frame = frame_number;
+		}
+		if (list_empty(&qh->qtd_list)) {
+			__dwc_otg_hcd_qh_remove(hcd, qh);
+		} else {
+			/*
+			 * Remove from periodic_sched_queued and move to
+			 * appropriate queue.
+			 */
+			if (qh->sched_frame == frame_number) {
+				list_move(&qh->qh_list_entry,
+						&hcd->periodic_sched_ready);
+			} else {
+				list_move(&qh->qh_list_entry,
+						&hcd->periodic_sched_inactive);
+			}
+		}
+	}
+}
+
+/**
+ * This function allocates and initializes a QTD.
+ *
+ * @param[in] urb The URB to create a QTD from.  Each URB-QTD pair will end up
+ * pointing to each other so each pair should have a unique correlation.
+ *
+ * @return Returns pointer to the newly allocated QTD, or NULL on error. */
+struct dwc_otg_qtd *dwc_otg_hcd_qtd_create(struct urb *urb)
+{
+	struct dwc_otg_qtd *qtd;
+	qtd = dwc_otg_hcd_qtd_alloc();
+	if (qtd == NULL)
+		return NULL;
+
+	dwc_otg_hcd_qtd_init(qtd, urb);
+	return qtd;
+}
+
+/**
+ * Initializes a QTD structure.
+ *
+ * @param[in] qtd The QTD to initialize.
+ * @param[in] urb The URB to use for initialization.  */
+void dwc_otg_hcd_qtd_init(struct dwc_otg_qtd *qtd, struct urb *urb)
+{
+	memset(qtd, 0, sizeof(struct dwc_otg_qtd));
+	qtd->urb = urb;
+	if (usb_pipecontrol(urb->pipe)) {
+		/*
+		 * The only time the QTD data toggle is used is on the data
+		 * phase of control transfers. This phase always starts with
+		 * DATA1.
+		 */
+		qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
+		qtd->control_phase = DWC_OTG_CONTROL_SETUP;
+	}
+
+	/* start split */
+	qtd->complete_split = 0;
+	qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
+	qtd->isoc_split_offset = 0;
+	qtd->in_process = 0;
+
+	/* Store the qtd ptr in the urb to reference what QTD. */
+	urb->hcpriv = qtd;
+	return;
+}
+
+/**
+ * This function adds a QTD to the QTD-list of a QH.  It will find the correct
+ * QH to place the QTD into.  If it does not find a QH, then it will create a
+ * new QH. If the QH to which the QTD is added is not currently scheduled, it
+ * is placed into the proper schedule based on its EP type.
+ *
+ * The dwc_otg_hcd lock must be held.
+ *
+ * @param[in] qtd The QTD to add
+ * @param[in] _dwc_otg_hcd The DWC HCD structure
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int
+dwc_otg_hcd_qtd_add(struct dwc_otg_qtd *qtd, struct dwc_otg_hcd *dwc_otg_hcd)
+{
+	struct usb_host_endpoint *ep;
+	struct dwc_otg_qh  *qh;
+	int retval = 0;
+	struct urb *urb = qtd->urb;
+
+	/*
+	 * Get the QH which holds the QTD-list to insert to. Create QH if it
+	 * doesn't exist.
+	 */
+	ep = dwc_urb_to_endpoint(urb);
+	qh = (struct dwc_otg_qh *) ep->hcpriv;
+	if (qh == NULL) {
+		qh = dwc_otg_hcd_qh_create(dwc_otg_hcd, urb);
+		if (qh == NULL) {
+			retval = -1;
+			goto done;
+		}
+		ep->hcpriv = qh;
+	}
+	qtd->qtd_qh_ptr = qh;
+	retval = __dwc_otg_hcd_qh_add(dwc_otg_hcd, qh);
+	if (retval == 0)
+		list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
+
+done:
+	return retval;
+}
+
+
+#endif	/* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/dwc_otg/dwc_otg_pcd.c b/drivers/usb/dwc_otg/dwc_otg_pcd.c
new file mode 100644
index 0000000..cd764ff
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_pcd.c
@@ -0,0 +1,2709 @@
+ /* ==========================================================================
+  * The Software IS NOT an item of Licensed Software or Licensed Product under
+  * any End User Software License Agreement or Agreement for Licensed Product
+  * with Synopsys or any supplement thereto. You are permitted to use and
+  * redistribute this Software in source and binary forms, with or without
+  * modification, provided that redistributions of source code must retain this
+  * notice. You may not view, use, disclose, copy or distribute this file or
+  * any information contained herein except pursuant to this license grant from
+  * Synopsys. If you do not agree with this notice, including the disclaimer
+  * below, then you are not authorized to use the Software.
+  *
+  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+  * DAMAGE.
+  * ==========================================================================*/
+
+#ifndef DWC_HOST_ONLY
+
+/** @file
+ * This file implements the Peripheral Controller Driver.
+ *
+ * The Peripheral Controller Driver (PCD) is responsible for
+ * translating requests from the Function Driver into the appropriate
+ * actions on the DWC_otg controller. It isolates the Function Driver
+ * from the specifics of the controller by providing an API to the
+ * Function Driver.
+ *
+ * The Peripheral Controller Driver for Linux will implement the
+ * Gadget API, so that the existing Gadget drivers can be used.
+ * (Gadget Driver is the Linux terminology for a Function Driver.)
+ *
+ * The Linux Gadget API is defined in the header file
+ * <linux/usb/gadget.h>.  The USB EP operations API is
+ * defined in the structure usb_ep_ops and the USB
+ * Controller API is defined in the structure
+ * usb_gadget_ops.
+ *
+ * An important function of the PCD is managing interrupts generated
+ * by the DWC_otg controller. The implementation of the DWC_otg device
+ * mode interrupt service routines is in dwc_otg_pcd_intr.c.
+ *
+ * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
+ * @todo Does it work when the request size is greater than DEPTSIZ
+ * transfer size
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_pcd.h"
+
+
+static int need_stop_srp_timer(struct dwc_otg_core_if *core_if)
+{
+	if (core_if->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS ||
+	    !core_if->core_params->i2c_enable)
+		return core_if->srp_timer_started ? 1 : 0;
+	else
+		return 0;
+}
+/**
+ * Tests if the module is set to FS or if the PHY_TYPE is FS. If so, then the
+ * gadget should not report as high-speed capable.
+ */
+static enum usb_device_speed dwc_otg_pcd_max_speed(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+
+	if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ||
+	    ((core_if->hwcfg2.b.hs_phy_type == 2) &&
+	     (core_if->hwcfg2.b.fs_phy_type == 1) &&
+	     (core_if->core_params->ulpi_fs_ls))) {
+		return USB_SPEED_FULL;
+	}
+
+	return USB_SPEED_HIGH;
+}
+
+/**
+ * Tests if driver is OTG capable.
+ */
+static u32 dwc_otg_pcd_is_otg(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	union gusbcfg_data usbcfg = {.d32 = 0 };
+
+	usbcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->gusbcfg);
+	if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap)
+		return 0;
+
+	return 1;
+}
+
+/**
+ * This function completes a request.  It call's the request call back.
+ */
+void
+dwc_otg_request_done(struct dwc_otg_pcd_ep *ep, struct dwc_otg_pcd_request *req,
+		     int status, unsigned long *irq_flags)
+__releases(ep->pcd->lock)
+__acquires(ep->pcd->lock)
+{
+	unsigned stopped = ep->stopped;
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, ep);
+
+	/*
+	 * The DMA engine cannot do unaligned write accesses to memory
+	 * so we must use an aligned bounce buffer for these :-(
+	 */
+	if (req->use_bounce_buffer) {
+		/* copy data out of bounce buffer */
+		memcpy(req->req.buf, ep->bounce_buffer, req->req.length);
+		req->req.dma = DMA_ADDR_INVALID;
+		req->mapped = 0;
+		req->use_bounce_buffer = 0;
+	} else {
+
+		if (req->mapped) {
+			dma_unmap_single(ep->pcd->gadget.dev.parent,
+				req->req.dma, req->req.length,
+				ep->dwc_ep.is_in
+					? DMA_TO_DEVICE
+					: DMA_FROM_DEVICE);
+			req->req.dma = DMA_ADDR_INVALID;
+			req->mapped = 0;
+		} else
+			dma_sync_single_for_cpu(ep->pcd->gadget.dev.parent,
+				req->req.dma, req->req.length,
+				ep->dwc_ep.is_in
+					? DMA_TO_DEVICE
+					: DMA_FROM_DEVICE);
+	}
+
+	list_del_init(&req->queue);
+
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+	if (in_interrupt()) {
+		spin_unlock(&ep->pcd->lock);
+		req->req.complete(&ep->ep, &req->req);
+		spin_lock(&ep->pcd->lock);
+
+	} else {
+		spin_unlock_irqrestore(&ep->pcd->lock, *irq_flags);
+		req->req.complete(&ep->ep, &req->req);
+		spin_lock_irqsave(&ep->pcd->lock, *irq_flags);
+	}
+	if (ep->dwc_ep.num == 0) {
+		if (ep->pcd->ep0_request_pending > 0)
+			--ep->pcd->ep0_request_pending;
+	} else {
+		if (ep->request_pending > 0)
+			--ep->request_pending;
+	}
+	ep->stopped = stopped;
+
+#ifdef CONFIG_405EZ
+	/*
+	 * Added-sr: 2007-07-26
+	 *
+	 * Finally, when the current request is done, mark this endpoint
+	 * as not active, so that new requests can be processed.
+	 */
+	ep->dwc_ep.active = 0;
+#endif
+}
+
+/**
+ * This function terminates all the requsts in the EP request queue.
+ */
+void dwc_otg_request_nuke(struct dwc_otg_pcd_ep *ep, unsigned long *irq_flags)
+{
+	struct dwc_otg_pcd_request *req;
+	ep->stopped = 1;
+
+	/* called with irqs blocked ? - NJ: yes we now pass the flags in*/
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct dwc_otg_pcd_request,
+				queue);
+		dwc_otg_request_done(ep, req, -ESHUTDOWN, irq_flags);
+	}
+}
+
+/**
+ * This function assigns periodic Tx FIFO to an periodic EP
+ * in shared Tx FIFO mode
+ */
+static u32 assign_periodic_tx_fifo(struct dwc_otg_core_if *core_if)
+{
+	u32 mask = 1;
+	u32 i;
+
+	for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
+		if (!(mask & core_if->p_tx_msk)) {
+			core_if->p_tx_msk |= mask;
+			return i + 1;
+		}
+		mask <<= 1;
+	}
+	return 0;
+}
+
+/**
+ * This function releases periodic Tx FIFO
+ * in shared Tx FIFO mode
+ */
+static void release_periodic_tx_fifo(struct dwc_otg_core_if *core_if,
+				  u32 fifo_num)
+{
+	core_if->p_tx_msk =
+		(core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
+}
+
+/**
+ * This function assigns periodic Tx FIFO to an periodic EP
+ * in shared Tx FIFO mode
+ */
+static u32 assign_tx_fifo(struct dwc_otg_core_if *core_if)
+{
+	u32 mask = 1;
+	u32 i;
+
+	for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
+		if (!(mask & core_if->tx_msk)) {
+			core_if->tx_msk |= mask;
+			return i + 1;
+		}
+		mask <<= 1;
+	}
+	return 0;
+}
+
+/**
+ * This function releases periodic Tx FIFO
+ * in shared Tx FIFO mode
+ */
+static void release_tx_fifo(struct dwc_otg_core_if *core_if, u32 fifo_num)
+{
+	core_if->tx_msk =
+		(core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
+}
+
+/**
+ *This function activates an EP.  The Device EP control register for
+ *the EP is configured as defined in the ep structure.	 Note: This
+ *function is not used for EP0.
+ */
+void dwc_otg_ep_activate(struct dwc_otg_core_if *core_if, struct dwc_ep *ep)
+{
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	union depctl_data depctl;
+	u32 __iomem *addr;
+	union daint_data daintmsk = {.d32 = 0};
+	DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, ep->num,
+		      (ep->is_in ? "IN" : "OUT"));
+
+	/*Read DEPCTLn register */
+	if (ep->is_in == 1) {
+		addr = &dev_if->in_ep_regs[ep->num]->diepctl;
+		daintmsk.ep.in = 1 << ep->num;
+	} else {
+		addr = &dev_if->out_ep_regs[ep->num]->doepctl;
+		daintmsk.ep.out = 1 << ep->num;
+	}
+
+	/*
+	 * If the EP is already active don't change the EP Control
+	 * register.
+	 */
+	depctl.d32 = dwc_read_reg32(addr);
+	if (!depctl.b.usbactep) {
+		depctl.b.mps = ep->maxpacket;
+		depctl.b.eptype = ep->type;
+		depctl.b.txfnum = ep->tx_fifo_num;
+		depctl.b.setd0pid = 1;
+		depctl.b.usbactep = 1;
+		dwc_write_reg32(addr, depctl.d32);
+		DWC_DEBUGPL(DBG_PCDV, "DEPCTL=%08x\n", dwc_read_reg32(addr));
+	}
+
+	/*Enable the Interrupt for this EP */
+	if (core_if->multiproc_int_enable) {
+		if (ep->is_in == 1) {
+			union diepint_data diepmsk = {.d32 = 0 };
+			diepmsk.b.xfercompl = 1;
+			diepmsk.b.timeout = 1;
+			diepmsk.b.epdisabled = 1;
+			diepmsk.b.ahberr = 1;
+			diepmsk.b.intknepmis = 1;
+			diepmsk.b.txfifoundrn = 1;
+
+			if (core_if->dma_desc_enable)
+				diepmsk.b.bna = 1;
+#if 0
+			if (core_if->dma_enable)
+				doepmsk.b.nak = 1;
+#endif
+			dwc_write_reg32(&dev_if->dev_global_regs->
+					diepeachintmsk[ep->num], diepmsk.d32);
+
+		} else {
+			union doepint_data doepmsk = {.d32 = 0 };
+			doepmsk.b.xfercompl = 1;
+			doepmsk.b.ahberr = 1;
+			doepmsk.b.epdisabled = 1;
+
+			if (core_if->dma_desc_enable)
+				doepmsk.b.bna = 1;
+#if 0
+			doepmsk.b.babble = 1;
+			doepmsk.b.nyet = 1;
+			doepmsk.b.nak = 1;
+#endif
+			dwc_write_reg32(&dev_if->dev_global_regs->
+					doepeachintmsk[ep->num], doepmsk.d32);
+		}
+		dwc_modify_reg32(&dev_if->dev_global_regs->deachintmsk,
+				 0, daintmsk.d32);
+	} else {
+		dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk,
+				 0, daintmsk.d32);
+	}
+	DWC_DEBUGPL(DBG_PCDV, "DAINTMSK=%0x\n",
+		      dwc_read_reg32(&dev_if->dev_global_regs->daintmsk));
+	ep->stall_clear_flag = 0;
+	return;
+}
+
+static int dwc_otg_pcd_ep_enable(struct usb_ep *_ep,
+				 const struct usb_endpoint_descriptor *_desc)
+{
+	struct dwc_otg_pcd_ep *ep = NULL;
+	struct dwc_otg_pcd *pcd = NULL;
+	unsigned long flags;
+	int retval = 0;
+
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, ep, _desc);
+
+	ep = container_of(_ep, struct dwc_otg_pcd_ep, ep);
+
+	if (!_ep || !_desc || ep->desc
+	     || _desc->bDescriptorType != USB_DT_ENDPOINT) {
+		DWC_WARN("%s, bad ep or descriptor\n", __func__);
+		return -EINVAL;
+	}
+	if (ep == &ep->pcd->ep0) {
+		DWC_WARN("%s, bad ep(0)\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Check FIFO size? */
+	if (!_desc->wMaxPacketSize) {
+		DWC_WARN("%s, bad %s maxpacket\n", __func__, _ep->name);
+		return -ERANGE;
+	}
+	pcd = ep->pcd;
+	if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
+		DWC_WARN("%s, bogus device state\n", __func__);
+		return -ESHUTDOWN;
+	}
+
+	spin_lock_irqsave(&pcd->lock, flags);
+
+	/*
+	 * Activate the EP
+	 */
+	ep->desc = _desc;
+	ep->ep.maxpacket = le16_to_cpu(_desc->wMaxPacketSize);
+	ep->stopped = 0;
+	ep->dwc_ep.is_in = (USB_DIR_IN & _desc->bEndpointAddress) != 0;
+	ep->dwc_ep.maxpacket = ep->ep.maxpacket;
+	ep->dwc_ep.type = _desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+	if (ep->dwc_ep.is_in) {
+		if (!pcd->otg_dev->core_if->en_multiple_tx_fifo) {
+			ep->dwc_ep.tx_fifo_num = 0;
+			if ((_desc->bmAttributes &
+					USB_ENDPOINT_XFERTYPE_MASK) ==
+						USB_ENDPOINT_XFER_ISOC) {
+				/*
+				 * if ISOC EP then assign a Periodic Tx FIFO.
+				 */
+				ep->dwc_ep.tx_fifo_num =
+					assign_periodic_tx_fifo(pcd->
+								otg_dev->
+								core_if);
+			}
+		} else {
+			/*
+			 * if Dedicated FIFOs mode is on then assign a Tx FIFO.
+			 */
+			ep->dwc_ep.tx_fifo_num =
+				assign_tx_fifo(pcd->otg_dev->core_if);
+		}
+	}
+
+	/* Set initial data PID. */
+	if ((_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+			USB_ENDPOINT_XFER_BULK)
+		ep->dwc_ep.data_pid_start = 0;
+
+
+	DWC_DEBUGPL(DBG_PCD, "Activate %s-%s: type=%d, mps=%d desc=%p\n",
+			ep->ep.name, (ep->dwc_ep.is_in ? "IN" : "OUT"),
+			ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
+	dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
+	spin_unlock_irqrestore(&pcd->lock, flags);
+
+	return retval;
+}
+
+/**
+ *This function deactivates an EP. This is done by clearing the USB Active
+ *EP bit in the Device EP control register.  Note: This function is not used
+ *for EP0. EP0 cannot be deactivated.
+ *
+ */
+void dwc_otg_ep_deactivate(struct dwc_otg_core_if *core_if, struct dwc_ep *ep)
+{
+	union depctl_data depctl = {.d32 = 0};
+	u32 __iomem *addr;
+	union daint_data daintmsk = {.d32 = 0};
+
+	/*Read DEPCTLn register */
+	if (ep->is_in) {
+		addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
+		/*don't mask out interrupt if not disabling channel*/
+		if (dwc_otg_can_disable_channel(core_if, ep))
+			daintmsk.ep.in = 1 << ep->num;
+	} else {
+		addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
+		daintmsk.ep.out = 1 << ep->num;
+	}
+
+
+	/*Disable the Interrupt for this EP */
+	if (core_if->multiproc_int_enable) {
+		dwc_modify_reg32(&core_if->dev_if->dev_global_regs->deachintmsk,
+				 daintmsk.d32, 0);
+
+		if (ep->is_in == 1) {
+			/*don't mask out interrupt if not disabling channel*/
+			if (dwc_otg_can_disable_channel(core_if, ep)) {
+				dwc_write_reg32(&core_if->dev_if->
+						dev_global_regs->
+						diepeachintmsk[ep->num], 0);
+			}
+		} else {
+			dwc_write_reg32(&core_if->dev_if->dev_global_regs->
+					doepeachintmsk[ep->num], 0);
+		}
+	} else
+		dwc_modify_reg32(&core_if->dev_if->dev_global_regs->daintmsk,
+				 daintmsk.d32, 0);
+
+	depctl.d32 = dwc_read_reg32(addr);
+
+	depctl.b.usbactep = 0;
+
+	if (core_if->dma_desc_enable &&
+	    dwc_otg_can_disable_channel(core_if, ep))
+		depctl.b.epdis = 1;
+
+	dwc_write_reg32(addr, depctl.d32);
+}
+
+/**
+ * This function is called when an EP is disabled due to disconnect or
+ * change in configuration. Any pending requests will terminate with a
+ * status of -ESHUTDOWN.
+ *
+ * This function modifies the dwc_otg_ep_t data structure for this EP,
+ * and then calls dwc_otg_ep_deactivate.
+ */
+static int dwc_otg_pcd_ep_disable(struct usb_ep *_ep)
+{
+	struct dwc_otg_pcd_ep *ep;
+	unsigned long flags;
+
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _ep);
+
+	ep = container_of(_ep, struct dwc_otg_pcd_ep, ep);
+
+	if (!_ep || !ep->desc) {
+		DWC_DEBUGPL(DBG_PCD, "%s, %s not enabled\n", __func__,
+			     _ep ? ep->ep.name : NULL);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ep->pcd->lock, flags);
+
+	dwc_otg_request_nuke(ep, &flags);
+	dwc_otg_ep_deactivate(GET_CORE_IF(ep->pcd), &ep->dwc_ep);
+	ep->desc = NULL;
+	ep->stopped = 1;
+	if (ep->dwc_ep.is_in) {
+		dwc_otg_flush_tx_fifo(GET_CORE_IF(ep->pcd),
+				ep->dwc_ep.tx_fifo_num);
+		release_periodic_tx_fifo(GET_CORE_IF(ep->pcd),
+				ep->dwc_ep.tx_fifo_num);
+		release_tx_fifo(GET_CORE_IF(ep->pcd),
+				ep->dwc_ep.tx_fifo_num);
+	}
+
+	spin_unlock_irqrestore(&ep->pcd->lock, flags);
+
+	DWC_DEBUGPL(DBG_PCD, "%s disabled\n", _ep->name);
+
+	return 0;
+}
+
+/**
+ * This function allocates a request object to use with the specified
+ * endpoint.
+ */
+static struct usb_request *dwc_otg_pcd_alloc_request(struct usb_ep *ep,
+						     gfp_t gfp_flags)
+{
+	struct dwc_otg_pcd_request *req;
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p,%d)\n", __func__, ep, gfp_flags);
+	if (!ep) {
+		DWC_WARN("%s() %s\n", __func__, "Invalid EP!\n");
+		return NULL;
+	}
+	req = kzalloc(sizeof(struct dwc_otg_pcd_request), gfp_flags);
+	if (!req) {
+		DWC_WARN("%s() %s\n", __func__, "request allocation failed!\n");
+		return NULL;
+	}
+
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&req->queue);
+	return &req->req;
+}
+
+/**
+ * This function frees a request object.
+ */
+static void dwc_otg_pcd_free_request(struct usb_ep *_ep,
+				     struct usb_request *_req)
+{
+	struct dwc_otg_pcd_request *req;
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, _ep, _req);
+	if (!_ep || !_req) {
+		DWC_WARN("%s() %s\n", __func__,
+				"Invalid ep or req argument!\n");
+		return;
+	}
+	req = container_of(_req, struct dwc_otg_pcd_request, req);
+	kfree(req);
+}
+
+/**
+ *This function initializes dma descriptor chain.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param ep The EP to start the transfer on.
+ */
+static void
+init_dma_desc_chain(struct dwc_otg_core_if *core_if, struct dwc_ep *ep)
+{
+	struct dwc_otg_dev_dma_desc *dma_desc;
+	u32 offset;
+	u32 xfer_est;
+	int i;
+
+	ep->desc_cnt = (ep->total_len / ep->maxxfer) +
+		((ep->total_len % ep->maxxfer) ? 1 : 0);
+	if (!ep->desc_cnt)
+		ep->desc_cnt = 1;
+
+	dma_desc = ep->desc_addr;
+	xfer_est = ep->total_len;
+	offset = 0;
+	for (i = 0; i < ep->desc_cnt; ++i) {
+		/**DMA Descriptor Setup */
+		if (xfer_est > ep->maxxfer) {
+			dma_desc->status.b.bs = BS_HOST_BUSY;
+			dma_desc->status.b.l = 0;
+			dma_desc->status.b.ioc = 0;
+			dma_desc->status.b.sp = 0;
+			dma_desc->status.b.bytes = ep->maxxfer;
+			dma_desc->buf = ep->dma_addr + offset;
+			dma_desc->status.b.bs = BS_HOST_READY;
+
+			xfer_est -= ep->maxxfer;
+			offset += ep->maxxfer;
+		} else {
+			dma_desc->status.b.bs = BS_HOST_BUSY;
+			dma_desc->status.b.l = 1;
+			dma_desc->status.b.ioc = 1;
+			if (ep->is_in) {
+				dma_desc->status.b.sp =
+				    (xfer_est %
+				     ep->maxpacket) ? 1 : ((ep->
+							    sent_zlp) ? 1 : 0);
+				dma_desc->status.b.bytes = xfer_est;
+			} else {
+				dma_desc->status.b.bytes =
+				    xfer_est + ((4 - (xfer_est & 0x3)) & 0x3);
+			}
+
+			dma_desc->buf = ep->dma_addr + offset;
+			dma_desc->status.b.bs = BS_HOST_READY;
+		}
+		BUG_ON((dma_desc->buf & 0x3) && !ep->is_in);
+		dma_desc++;
+	}
+	wmb();
+
+}
+
+/**
+ *This function does the setup for a data transfer for an EP and
+ *starts the transfer.	 For an IN transfer, the packets will be
+ *loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
+ *the packets are unloaded from the Rx FIFO in the ISR.  the ISR.
+ *
+ */
+void
+dwc_otg_ep_start_transfer(struct dwc_otg_core_if *core_if, struct dwc_ep *ep)
+{
+	union depctl_data depctl;
+	union deptsiz_data deptsiz;
+	union gintmsk_data intr_mask = {.d32 = 0};
+
+	DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
+	DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+		      "xfer_buff=%p start_xfer_buff=%p\n", ep->num,
+		      (ep->is_in ? "IN" : "OUT"), ep->xfer_len,
+		      ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff);
+
+	if (core_if->dma_desc_enable) {
+		union doepint_data doepmsk = {.d32 = 0};
+		doepmsk.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->doepmsk);
+		doepmsk.b.bna = 1;
+		dwc_write_reg32(&core_if->dev_if->dev_global_regs->doepmsk, doepmsk.d32);
+	}
+
+	/*IN endpoint */
+	if (ep->is_in) {
+		struct dwc_otg_dev_in_ep_regs __iomem *in_regs =
+			core_if->dev_if->in_ep_regs[ep->num];
+		union gnptxsts_data gtxstatus;
+		gtxstatus.d32 =
+			dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
+		if (core_if->en_multiple_tx_fifo == 0 &&
+			gtxstatus.b.nptxqspcavail == 0) {
+#ifdef DEBUG
+			DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32);
+#endif	/* */
+		    return;
+		}
+		depctl.d32 = dwc_read_reg32(&(in_regs->diepctl));
+		deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz));
+
+		ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ?
+				ep->maxxfer : (ep->total_len - ep->xfer_len);
+		/*Zero Length Packet? */
+		if ((ep->xfer_len - ep->xfer_count) == 0) {
+			deptsiz.b.xfersize = 0;
+			deptsiz.b.pktcnt = 1;
+		} else {
+			/*Program the transfer size and packet count
+			 *     as follows: xfersize = N *maxpacket +
+			 *     short_packet pktcnt = N + (short_packet
+			 *     exist ? 1 : 0)
+			 */
+			deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count;
+			deptsiz.b.pktcnt =
+				(ep->xfer_len - ep->xfer_count - 1 +
+				ep->maxpacket) / ep->maxpacket;
+		}
+#ifdef CONFIG_405EZ
+		/*
+		 *Added-sr: 2007-07-26
+		 *
+		 *Since the 405EZ (Ultra) only support 2047 bytes as
+		 *max transfer size, we have to split up bigger transfers
+		 *into multiple transfers of 1024 bytes sized messages.
+		 *I happens often, that transfers of 4096 bytes are
+		 *required (zero-gadget, file_storage-gadget).
+		 */
+		if (ep->xfer_len > MAX_XFER_LEN) {
+			ep->bytes_pending = ep->xfer_len - MAX_XFER_LEN;
+			ep->xfer_len = MAX_XFER_LEN;
+		}
+#endif
+
+		/*Write the DMA register */
+		if (core_if->dma_enable) {
+			if (core_if->dma_desc_enable == 0) {
+				dwc_write_reg32(&in_regs->dieptsiz,
+						deptsiz.d32);
+				dwc_write_reg32(&(in_regs->diepdma),
+						(u32) ep->dma_addr);
+			} else {
+				init_dma_desc_chain(core_if, ep);
+				/**DIEPDMAn Register write */
+				dwc_write_reg32(&in_regs->diepdma,
+						ep->dma_desc_addr);
+			}
+		} else {
+			dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+			if (ep->type != USB_ENDPOINT_XFER_ISOC) {
+				/**
+				 * Enable the Non-Periodic Tx FIFO empty
+				 * interrupt, or the Tx FIFO empty
+				 * interrupt in dedicated Tx FIFO mode,
+				 * the data will be written into the fifo
+				 * by the ISR.
+				 */
+				if (core_if->en_multiple_tx_fifo == 0) {
+					intr_mask.b.nptxfempty = 1;
+					dwc_modify_reg32(&core_if->
+							 core_global_regs->
+							 gintmsk, intr_mask.d32,
+							 intr_mask.d32);
+				} else {
+				    /*
+				     * Enable the Tx FIFO Empty Interrupt
+				     * for this EP
+				     */
+				    if (ep->xfer_len > 0) {
+						u32 fifoemptymsk = 0;
+						fifoemptymsk = 1 << ep->num;
+						dwc_modify_reg32(&core_if->
+								 dev_if->
+								 dev_global_regs->
+								 dtknqr4_fifoemptymsk,
+								 0,
+								 fifoemptymsk);
+					}
+				}
+			}
+		}
+
+		/*EP enable, IN data in FIFO */
+		depctl.b.cnak = 1;
+		depctl.b.epena = 1;
+		wmb();
+		dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+		depctl.d32 =
+			dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl);
+		depctl.b.nextep = ep->num;
+
+		wmb();
+		dwc_write_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl,
+				depctl.d32);
+
+	} else {
+		/*OUT endpoint */
+		struct dwc_otg_dev_out_ep_regs __iomem *out_regs =
+			core_if->dev_if->out_ep_regs[ep->num];
+		depctl.d32 = dwc_read_reg32(&(out_regs->doepctl));
+		deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz));
+
+		ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ?
+				ep->maxxfer : (ep->total_len - ep->xfer_len);
+		/*
+		 * Program the transfer size and packet count as follows:
+		 *
+		 *     pktcnt = N
+		 *     xfersize = N *maxpacket
+		 */
+		if ((ep->xfer_len - ep->xfer_count) == 0) {
+			/*Zero Length Packet */
+			deptsiz.b.xfersize = ep->maxpacket;
+			deptsiz.b.pktcnt = 1;
+		} else {
+			deptsiz.b.pktcnt =
+				(ep->xfer_len - ep->xfer_count +
+				(ep->maxpacket - 1)) / ep->maxpacket;
+			ep->xfer_len =
+				deptsiz.b.pktcnt * ep->maxpacket
+				+ ep->xfer_count;
+
+			deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count;
+		}
+
+		DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n",
+			      ep->num, deptsiz.b.xfersize, deptsiz.b.pktcnt);
+		if (core_if->dma_enable) {
+			if (!core_if->dma_desc_enable) {
+				dwc_write_reg32(&out_regs->doeptsiz,
+						deptsiz.d32);
+
+				dwc_write_reg32(&(out_regs->doepdma),
+						(u32) ep->dma_addr);
+			} else {
+				init_dma_desc_chain(core_if, ep);
+
+				/**DOEPDMAn Register write */
+				dwc_write_reg32(&out_regs->doepdma,
+						ep->dma_desc_addr);
+
+			}
+		} else
+			dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+
+		if (ep->type == USB_ENDPOINT_XFER_ISOC) {
+			/**@todo NGS: dpid is read-only. Use setd0pid
+			 *or setd1pid. */
+			if (ep->even_odd_frame)
+				depctl.b.setd1pid = 1;
+			else
+				depctl.b.setd0pid = 1;
+		}
+
+		/*EP enable */
+		depctl.b.cnak = 1;
+		depctl.b.epena = 1;
+
+		wmb();
+		dwc_write_reg32(&out_regs->doepctl, depctl.d32);
+		DWC_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n",
+			dwc_read_reg32(&out_regs->doepctl),
+			dwc_read_reg32(&out_regs->doeptsiz));
+		DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
+			dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk),
+			dwc_read_reg32(&core_if->core_global_regs->gintmsk));
+	}
+}
+
+/**
+ *This function setup a zero length transfer in Buffer DMA and
+ *Slave modes for usb requests with zero field set
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param ep The EP to start the transfer on.
+ *
+ */
+void dwc_otg_ep_start_zl_transfer(struct dwc_otg_core_if *core_if,
+		struct dwc_ep *ep)
+{
+
+	union depctl_data depctl;
+	union deptsiz_data deptsiz;
+	union gintmsk_data intr_mask = {.d32 = 0 };
+
+	DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
+	DWC_PRINT("zero length transfer is called\n");
+
+	/*IN endpoint */
+	if (ep->is_in == 1) {
+		struct dwc_otg_dev_in_ep_regs __iomem *in_regs =
+		    core_if->dev_if->in_ep_regs[ep->num];
+
+		depctl.d32 = dwc_read_reg32(&(in_regs->diepctl));
+		deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz));
+
+		deptsiz.b.xfersize = 0;
+		deptsiz.b.pktcnt = 1;
+
+		/*Write the DMA register */
+		if (core_if->dma_enable) {
+			if (core_if->dma_desc_enable == 0) {
+				dwc_write_reg32(&in_regs->dieptsiz,
+						deptsiz.d32);
+				dwc_write_reg32(&(in_regs->diepdma),
+						(u32) ep->dma_addr);
+			}
+		} else {
+			dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+			/**
+			 * Enable the Non-Periodic Tx FIFO empty interrupt,
+			 * or the Tx FIFO epmty interrupt in dedicated Tx FIFO
+			 * mode, the data will be written into the fifo by the
+			 * ISR.
+			 */
+			if (core_if->en_multiple_tx_fifo == 0) {
+				intr_mask.b.nptxfempty = 1;
+				dwc_modify_reg32(&core_if->core_global_regs->
+						 gintmsk, intr_mask.d32,
+						 intr_mask.d32);
+			} else {
+				/*
+				 * Enable the Tx FIFO Empty Interrupt
+				 * for this EP
+				 */
+				if (ep->xfer_len > 0) {
+					u32 fifoemptymsk = 0;
+					fifoemptymsk = 1 << ep->num;
+					dwc_modify_reg32(&core_if->dev_if->
+							 dev_global_regs->
+							 dtknqr4_fifoemptymsk,
+							 0, fifoemptymsk);
+				}
+			}
+		}
+
+		/*EP enable, IN data in FIFO */
+		depctl.b.cnak = 1;
+		depctl.b.epena = 1;
+		wmb();
+		dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+		depctl.d32 =
+		    dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl);
+		depctl.b.nextep = ep->num;
+		wmb();
+		dwc_write_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl,
+				depctl.d32);
+
+	} else {
+		/*OUT endpoint */
+		struct dwc_otg_dev_out_ep_regs __iomem *out_regs =
+		    core_if->dev_if->out_ep_regs[ep->num];
+
+		depctl.d32 = dwc_read_reg32(&(out_regs->doepctl));
+		deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz));
+
+		/*Zero Length Packet */
+		deptsiz.b.xfersize = ep->maxpacket;
+		deptsiz.b.pktcnt = 1;
+
+		if (core_if->dma_enable) {
+			if (!core_if->dma_desc_enable) {
+				dwc_write_reg32(&out_regs->doeptsiz,
+						deptsiz.d32);
+
+				dwc_write_reg32(&(out_regs->doepdma),
+						(u32) ep->dma_addr);
+			}
+		} else {
+			dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+		}
+
+		/*EP enable */
+		depctl.b.cnak = 1;
+		depctl.b.epena = 1;
+
+		wmb();
+		dwc_write_reg32(&out_regs->doepctl, depctl.d32);
+
+	}
+}
+
+/**
+ *This function does the setup for a data transfer for EP0 and starts
+ *the transfer.  For an IN transfer, the packets will be loaded into
+ *the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are
+ *unloaded from the Rx FIFO in the ISR.
+ */
+void
+dwc_otg_ep0_start_transfer(struct dwc_otg_core_if *core_if, struct dwc_ep *ep)
+{
+	union depctl_data depctl;
+	union deptsiz0_data deptsiz;
+	union gintmsk_data intr_mask = {.d32 = 0};
+	struct dwc_otg_dev_dma_desc *dma_desc;
+	DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+		      "xfer_buff=%p start_xfer_buff=%p total_len=%d\n",
+		      ep->num, (ep->is_in ? "IN" : "OUT"), ep->xfer_len,
+		      ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff,
+		      ep->total_len);
+	ep->total_len = ep->xfer_len;
+
+	/*IN endpoint */
+	if (ep->is_in) {
+		struct dwc_otg_dev_in_ep_regs __iomem *in_regs =
+			core_if->dev_if->in_ep_regs[0];
+		union gnptxsts_data gtxstatus;
+		gtxstatus.d32 =
+			dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
+		if (core_if->en_multiple_tx_fifo == 0 &&
+			gtxstatus.b.nptxqspcavail == 0) {
+#ifdef DEBUG
+			deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
+			DWC_DEBUGPL(DBG_PCD, "DIEPCTL0=%0x\n",
+				     dwc_read_reg32(&in_regs->diepctl));
+			DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n",
+				     deptsiz.d32, deptsiz.b.xfersize,
+				     deptsiz.b.pktcnt);
+			DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n",
+					gtxstatus.d32);
+#endif	/* */
+			printk(KERN_DEBUG"TX Queue or FIFO Full!!!!\n");
+			return;
+		}
+		depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
+		deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
+
+		/*Zero Length Packet? */
+		if (ep->xfer_len == 0) {
+			deptsiz.b.xfersize = 0;
+			deptsiz.b.pktcnt = 1;
+		} else {
+			/*Program the transfer size and packet count
+			*     as follows: xfersize = N *maxpacket +
+			*     short_packet pktcnt = N + (short_packet
+			*     exist ? 1 : 0)
+			*/
+			if (ep->xfer_len > ep->maxpacket) {
+				ep->xfer_len = ep->maxpacket;
+				deptsiz.b.xfersize = ep->maxpacket;
+			} else
+				deptsiz.b.xfersize = ep->xfer_len;
+
+			deptsiz.b.pktcnt = 1;
+		}
+		DWC_DEBUGPL(DBG_PCDV,
+			    "IN len=%d  xfersize=%d pktcnt=%d [%08x]\n",
+			    ep->xfer_len, deptsiz.b.xfersize, deptsiz.b.pktcnt,
+			    deptsiz.d32);
+
+		/*Write the DMA register */
+		if (core_if->dma_enable) {
+			if (core_if->dma_desc_enable == 0) {
+				dwc_write_reg32(&in_regs->dieptsiz,
+						deptsiz.d32);
+
+				dwc_write_reg32(&(in_regs->diepdma),
+						(u32) ep->dma_addr);
+			} else {
+				dma_desc = core_if->dev_if->in_desc_addr;
+
+				/**DMA Descriptor Setup */
+				dma_desc->status.b.bs = BS_HOST_BUSY;
+				dma_desc->status.b.l = 1;
+				dma_desc->status.b.ioc = 1;
+				dma_desc->status.b.sp =
+				    (ep->xfer_len == ep->maxpacket) ? 0 : 1;
+				dma_desc->status.b.bytes = ep->xfer_len;
+				dma_desc->buf = ep->dma_addr;
+				dma_desc->status.b.bs = BS_HOST_READY;
+
+				wmb();
+
+				/**DIEPDMA0 Register write */
+				dwc_write_reg32(&in_regs->diepdma,
+						core_if->dev_if->
+						dma_in_desc_addr);
+			}
+		} else {
+			dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+		}
+
+		/*EP enable, IN data in FIFO */
+		depctl.b.cnak = 1;
+		depctl.b.epena = 1;
+		wmb();
+		dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+		/**
+		 *Enable the Non-Periodic Tx FIFO empty interrupt, the
+		 *data will be written into the fifo by the ISR.
+		 */
+		if (!core_if->dma_enable) {
+			if (core_if->en_multiple_tx_fifo == 0) {
+				intr_mask.b.nptxfempty = 1;
+				dwc_modify_reg32(&core_if->core_global_regs->
+						 gintmsk, intr_mask.d32,
+						 intr_mask.d32);
+			} else {
+				/*Enable the Tx FIFO Empty Int for this EP */
+				if (ep->xfer_len > 0) {
+					u32 fifoemptymsk = 0;
+					fifoemptymsk |= 1 << ep->num;
+					dwc_modify_reg32(&core_if->dev_if->
+							 dev_global_regs->
+							 dtknqr4_fifoemptymsk,
+						 0, fifoemptymsk);
+				}
+			}
+		}
+	} else {
+		/*OUT endpoint */
+		struct dwc_otg_dev_out_ep_regs __iomem *out_regs =
+			core_if->dev_if->out_ep_regs[0];
+		depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
+		deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz);
+
+		/*
+		 * Program the transfer size and packet count as follows:
+		 *     xfersize = N *(maxpacket + 4 - (maxpacket % 4))
+		 *     pktcnt = N
+		 */
+
+		/*Zero Length Packet */
+		deptsiz.b.xfersize = ep->maxpacket;
+		deptsiz.b.pktcnt = 1;
+		DWC_DEBUGPL(DBG_PCDV, "len=%d  xfersize=%d pktcnt=%d\n",
+			    ep->xfer_len, deptsiz.b.xfersize, deptsiz.b.pktcnt);
+
+		if (core_if->dma_enable) {
+			if (!core_if->dma_desc_enable) {
+				dwc_write_reg32(&out_regs->doeptsiz,
+						deptsiz.d32);
+
+				dwc_write_reg32(&(out_regs->doepdma),
+						(u32) ep->dma_addr);
+			} else {
+				dma_desc = core_if->dev_if->out_desc_addr;
+
+				/**DMA Descriptor Setup */
+				dma_desc->status.b.bs = BS_HOST_BUSY;
+				dma_desc->status.b.l = 1;
+				dma_desc->status.b.ioc = 1;
+				dma_desc->status.b.bytes = ep->maxpacket;
+				dma_desc->buf = ep->dma_addr;
+				dma_desc->status.b.bs = BS_HOST_READY;
+
+				wmb();
+
+				dwc_write_reg32(&out_regs->doepdma,
+						core_if->dev_if->
+						dma_out_desc_addr);
+			}
+		} else
+			dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+
+		/*EP enable */
+		depctl.b.cnak = 1;
+		depctl.b.epena = 1;
+		wmb();
+		dwc_write_reg32(&(out_regs->doepctl), depctl.d32);
+	}
+}
+
+/**
+ *This function continues control IN transfers started by
+ *dwc_otg_ep0_start_transfer, when the transfer does not fit in a
+ *single packet.  NOTE: The DIEPCTL0/DOEPCTL0 registers only have one
+ *bit for the packet count.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param ep The EP0 data.
+ */
+void dwc_otg_ep0_continue_transfer(struct dwc_otg_core_if *core_if,
+				   struct dwc_ep *ep)
+{
+	union depctl_data depctl;
+	union deptsiz0_data deptsiz;
+	union gintmsk_data intr_mask = {.d32 = 0};
+	struct dwc_otg_dev_dma_desc *dma_desc;
+	if (ep->is_in == 1) {
+		struct dwc_otg_dev_in_ep_regs __iomem *in_regs =
+			core_if->dev_if->in_ep_regs[0];
+		union gnptxsts_data tx_status = {.d32 = 0};
+		tx_status.d32 =
+			dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
+
+		/*
+		 * @todo Should there be check for room in the Tx
+		 * Status Queue.  If not remove the code above this comment.
+		 * */
+		depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
+		deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
+
+		/*
+		 * Program the transfer size and packet count
+		 * as follows: xfersize = N *maxpacket +
+		 * short_packet pktcnt = N + (short_packet
+		 * exist ? 1 : 0)
+		 */
+		if (core_if->dma_desc_enable == 0) {
+			deptsiz.b.xfersize =
+				(ep->total_len - ep->xfer_count) > ep->maxpacket
+					?
+					ep->maxpacket
+					:
+					(ep->total_len - ep->xfer_count);
+
+			deptsiz.b.pktcnt = 1;
+			if (core_if->dma_enable == 0)
+				ep->xfer_len += deptsiz.b.xfersize;
+			else
+				ep->xfer_len = deptsiz.b.xfersize;
+
+			dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+		} else {
+			ep->xfer_len =
+				(ep->total_len - ep->xfer_count) > ep->maxpacket
+					?
+					ep->maxpacket
+					:
+					(ep->total_len - ep->xfer_count);
+
+			dma_desc = core_if->dev_if->in_desc_addr;
+
+			/**DMA Descriptor Setup */
+			dma_desc->status.b.bs = BS_HOST_BUSY;
+			dma_desc->status.b.l = 1;
+			dma_desc->status.b.ioc = 1;
+			dma_desc->status.b.sp =
+			    (ep->xfer_len == ep->maxpacket) ? 0 : 1;
+			dma_desc->status.b.bytes = ep->xfer_len;
+			dma_desc->buf = ep->dma_addr;
+			dma_desc->status.b.bs = BS_HOST_READY;
+
+			wmb();
+
+			/**DIEPDMA0 Register write */
+			dwc_write_reg32(&in_regs->diepdma,
+					core_if->dev_if->dma_in_desc_addr);
+		}
+		DWC_DEBUGPL(DBG_PCDV, "IN len=%d  xfersize=%d "
+				"pktcnt=%d [%08x]\n",
+				ep->xfer_len, deptsiz.b.xfersize,
+				deptsiz.b.pktcnt, deptsiz.d32);
+
+		/*Write the DMA register */
+		if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) {
+			if (core_if->dma_desc_enable == 0)
+				dwc_write_reg32(&(in_regs->diepdma),
+						(u32) ep->dma_addr);
+		}
+
+		/*EP enable, IN data in FIFO */
+		depctl.b.cnak = 1;
+		depctl.b.epena = 1;
+		wmb();
+		dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+		/**
+		 *Enable the Non-Periodic Tx FIFO empty interrupt, the
+		 *data will be written into the fifo by the ISR.
+		 */
+		if (!core_if->dma_enable) {
+			if (core_if->en_multiple_tx_fifo == 0) {
+				/*First clear it from GINTSTS */
+				intr_mask.b.nptxfempty = 1;
+				dwc_modify_reg32(&core_if->core_global_regs->
+						 gintmsk, intr_mask.d32,
+						 intr_mask.d32);
+
+			} else {
+				/*Enable the Tx FIFO Empty Int for this EP */
+				if (ep->xfer_len > 0) {
+					u32 fifoemptymsk = 0;
+					fifoemptymsk |= 1 << ep->num;
+					dwc_modify_reg32(&core_if->dev_if->
+							 dev_global_regs->
+							 dtknqr4_fifoemptymsk,
+							 0, fifoemptymsk);
+				}
+			}
+		}
+	} else {
+		struct dwc_otg_dev_out_ep_regs __iomem *out_regs =
+			core_if->dev_if->out_ep_regs[0];
+
+		depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
+		deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz);
+
+		/*Program the transfer size and packet count
+		 *     as follows: xfersize = N *maxpacket +
+		 *     short_packet pktcnt = N + (short_packet
+		 *     exist ? 1 : 0)
+		 */
+		deptsiz.b.xfersize = ep->maxpacket;
+		deptsiz.b.pktcnt = 1;
+
+		if (core_if->dma_desc_enable == 0)
+			dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+		else {
+			dma_desc = core_if->dev_if->out_desc_addr;
+
+			/**DMA Descriptor Setup */
+			dma_desc->status.b.bs = BS_HOST_BUSY;
+			dma_desc->status.b.l = 1;
+			dma_desc->status.b.ioc = 1;
+			dma_desc->status.b.bytes = ep->maxpacket;
+			dma_desc->buf = ep->dma_addr;
+			dma_desc->status.b.bs = BS_HOST_READY;
+
+			wmb();
+
+			/**DOEPDMA0 Register write */
+			dwc_write_reg32(&out_regs->doepdma,
+					core_if->dev_if->dma_out_desc_addr);
+		}
+
+		DWC_DEBUGPL(DBG_PCDV,
+			    "IN len=%d  xfersize=%d pktcnt=%d [%08x]\n",
+			    ep->xfer_len, deptsiz.b.xfersize, deptsiz.b.pktcnt,
+			    deptsiz.d32);
+
+		/*Write the DMA register */
+		if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) {
+			if (core_if->dma_desc_enable == 0)
+				dwc_write_reg32(&(out_regs->doepdma),
+						(u32) ep->dma_addr);
+		}
+
+		/*EP enable, IN data in FIFO */
+		depctl.b.cnak = 1;
+		depctl.b.epena = 1;
+
+		wmb();
+		dwc_write_reg32(&out_regs->doepctl, depctl.d32);
+
+	}
+}
+
+/**
+ * This function allocates a DMA Descriptor chain for the Endpoint
+ * buffer to be used for a transfer to/from the specified endpoint.
+ */
+static struct dwc_otg_dev_dma_desc *
+dwc_otg_ep_alloc_desc_chain(struct device *dev, dma_addr_t *dma_desc_addr,
+			    u32 count)
+{
+
+	return dma_alloc_coherent(dev,
+				  count * sizeof(struct dwc_otg_dev_dma_desc),
+				  dma_desc_addr,
+				  GFP_KERNEL);
+}
+
+/**
+ * This function frees a DMA Descriptor chain that was allocated by
+ * ep_alloc_desc.
+ */
+static void
+dwc_otg_ep_free_desc_chain(struct device *dev,
+			   struct dwc_otg_dev_dma_desc *desc_addr,
+			   dma_addr_t dma_desc_addr, u32 count)
+{
+	dma_free_coherent(dev,
+			  count * sizeof(struct dwc_otg_dev_dma_desc),
+			  (void *)desc_addr,
+			  dma_desc_addr);
+}
+
+/**
+ * This function is used to submit an I/O Request to an EP.
+ *
+ *	- When the request completes the request's completion callback
+ *	  is called to return the request to the driver.
+ *	- An EP, except control EPs, may have multiple requests
+ *	  pending.
+ *	- Once submitted the request cannot be examined or modified.
+ *	- Each request is turned into one or more packets.
+ *	- A BULK EP can queue any amount of data; the transfer is
+ *	  packetized.
+ *	- Zero length Packets are specified with the request 'zero'
+ *	  flag.
+ */
+static int dwc_otg_pcd_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+				gfp_t _gfp_flags)
+{
+	int start_needed = 0;
+	struct dwc_otg_pcd_request *req;
+	struct dwc_otg_pcd_ep *ep;
+	struct dwc_otg_pcd *pcd;
+	unsigned long flags = 0;
+	uint32_t max_transfer;
+
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p,%d)\n", __func__, _ep, _req,
+		      _gfp_flags);
+	req = container_of(_req, struct dwc_otg_pcd_request, req);
+	if (!_req || !_req->complete || !_req->buf
+	     || !list_empty(&req->queue)) {
+		DWC_WARN("%s, bad params\n", __func__);
+		return -EINVAL;
+	}
+	ep = container_of(_ep, struct dwc_otg_pcd_ep, ep);
+	if (!_ep || (!ep->desc && ep->dwc_ep.num != 0)) {
+		DWC_WARN("%s, bad ep\n", __func__);
+		return -EINVAL;
+	}
+	pcd = ep->pcd;
+	if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
+		DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed);
+		DWC_WARN("%s, bogus device state\n", __func__);
+		return -ESHUTDOWN;
+	}
+	DWC_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n", _ep->name,
+		       _req, _req->length, _req->buf);
+	if (!GET_CORE_IF(pcd)->core_params->opt) {
+		if (ep->dwc_ep.num != 0) {
+			DWC_ERROR("%s queue req %p, len %d buf %p\n",
+				   _ep->name, _req, _req->length, _req->buf);
+		}
+	}
+	spin_lock_irqsave(&ep->pcd->lock, flags);
+
+	dwc_otg_dump_msg(_req->buf, _req->length);
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+
+	/*
+	 * For EP0 IN without premature status, zlp is required?
+	 */
+	if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in)
+		DWC_DEBUGPL(DBG_PCDV, "%s-OUT ZLP\n", _ep->name);
+
+	/*
+	 * The DMA engine cannot do unaligned write accesses to memory
+	 * so we must use an aligned bounce buffer for these :-(
+	 */
+	if (GET_CORE_IF(pcd)->dma_desc_enable &&
+		(((!ep->dwc_ep.is_in && ((u32)_req->buf & 3)) ||
+			(!ep->dwc_ep.is_in && (_req->length & 3))))) {
+
+		/*
+		 * our bounce buffer is only PAGE_SIZE
+		 * TODO split request if bigger than PAGE_SIZE (v.unlikely)
+		 */
+		BUG_ON(_req->length > PAGE_SIZE);
+
+		_req->dma = ep->bounce_buffer_dma;
+		req->use_bounce_buffer = 1;
+		req->mapped = 1;
+
+	} else {
+
+		/* map virtual address to hardware */
+		if (_req->dma == DMA_ADDR_INVALID && _req->length) {
+			_req->dma = dma_map_single(ep->pcd->gadget.dev.parent,
+						  _req->buf,
+						  _req->length,
+						  ep->dwc_ep.is_in
+						  ? DMA_TO_DEVICE :
+						  DMA_FROM_DEVICE);
+			req->mapped = 1;
+		} else {
+			dma_sync_single_for_device(ep->pcd->gadget.dev.parent,
+						   _req->dma, _req->length,
+						   ep->dwc_ep.is_in
+						   ? DMA_TO_DEVICE :
+						   DMA_FROM_DEVICE);
+			req->mapped = 0;
+		}
+	}
+
+	/* Start the transfer */
+	if (list_empty(&ep->queue) && !ep->stopped) {
+		/* EP0 Transfer? */
+		if (ep->dwc_ep.num == 0) {
+			switch (pcd->ep0state) {
+			case EP0_IN_DATA_PHASE:
+				DWC_DEBUGPL(DBG_PCD, "%s ep0: "
+						"EP0_IN_DATA_PHASE\n",
+						__func__);
+				break;
+			case EP0_OUT_DATA_PHASE:
+				DWC_DEBUGPL(DBG_PCD, "%s ep0: "
+						"EP0_OUT_DATA_PHASE\n",
+						__func__);
+				if (pcd->request_config) {
+					/* Complete STATUS PHASE */
+					ep->dwc_ep.is_in = 1;
+					pcd->ep0state = EP0_IN_STATUS_PHASE;
+				}
+				break;
+			case EP0_IN_STATUS_PHASE:
+				DWC_DEBUGPL(DBG_PCD,
+					    "%s ep0: EP0_IN_STATUS_PHASE\n",
+					    __func__);
+				break;
+			default:
+				DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
+						pcd->ep0state);
+				spin_unlock_irqrestore(&pcd->lock, flags);
+				return -EL2HLT;
+			}
+			ep->dwc_ep.dma_addr = _req->dma;
+			ep->dwc_ep.start_xfer_buff = _req->buf;
+			ep->dwc_ep.xfer_buff = _req->buf;
+			ep->dwc_ep.xfer_len = _req->length;
+			ep->dwc_ep.xfer_count = 0;
+			ep->dwc_ep.sent_zlp = 0;
+			ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
+			/*
+			 * delay start till after putting request on queue
+			 * to avoid a race.
+			 */
+			start_needed = 1;
+		} else {
+				max_transfer =
+				    GET_CORE_IF(ep->pcd)->core_params->
+				    max_transfer_size;
+			/* Setup and start the Transfer */
+			ep->dwc_ep.dma_addr = _req->dma;
+			ep->dwc_ep.start_xfer_buff = _req->buf;
+			ep->dwc_ep.xfer_buff = _req->buf;
+			ep->dwc_ep.xfer_len = _req->length;
+			ep->dwc_ep.xfer_count = 0;
+			ep->dwc_ep.sent_zlp = 0;
+			ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
+			ep->dwc_ep.maxxfer = max_transfer;
+			if (GET_CORE_IF(pcd)->dma_desc_enable) {
+					uint32_t out_max_xfer =
+					    DDMA_MAX_TRANSFER_SIZE -
+					    (DDMA_MAX_TRANSFER_SIZE % 4);
+				if (ep->dwc_ep.is_in) {
+					if (ep->dwc_ep.maxxfer >
+					    DDMA_MAX_TRANSFER_SIZE) {
+						ep->dwc_ep.maxxfer =
+						    DDMA_MAX_TRANSFER_SIZE;
+					}
+				} else {
+						if (ep->dwc_ep.maxxfer >
+						    out_max_xfer) {
+						ep->dwc_ep.maxxfer =
+						    out_max_xfer;
+					}
+				}
+			}
+			if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
+				ep->dwc_ep.maxxfer -= (ep->dwc_ep.maxxfer %
+							ep->dwc_ep.maxpacket);
+			}
+
+			/*
+			 * delay start till after putting request on queue
+			 * to avoid a race.
+			 */
+			start_needed = 1;
+		}
+	}
+
+	if (req) {
+		if (ep->dwc_ep.num == 0)
+			++pcd->ep0_request_pending;
+		else
+			++ep->request_pending;
+
+		list_add_tail(&req->queue, &ep->queue);
+		if (ep->dwc_ep.is_in && ep->stopped
+				&& !(GET_CORE_IF(pcd)->dma_enable)) {
+			/** @todo NGS Create a function for this. */
+			union diepint_data diepmsk = {.d32 = 0};
+			diepmsk.b.intktxfemp = 1;
+			if (GET_CORE_IF(pcd)->multiproc_int_enable) {
+				dwc_modify_reg32(&GET_CORE_IF(pcd)->
+						dev_if->
+						dev_global_regs->
+						 diepeachintmsk[ep->dwc_ep.num],
+						0,
+						diepmsk.d32);
+			} else {
+				dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->
+						 dev_global_regs->diepmsk, 0,
+						 diepmsk.d32);
+			}
+		}
+		if (start_needed) {
+			if (ep->dwc_ep.num == 0) {
+				dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
+							   &ep->dwc_ep);
+			} else {
+				dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
+							  &ep->dwc_ep);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&pcd->lock, flags);
+	return 0;
+}
+
+/**
+ * This function cancels an I/O request from an EP.
+ */
+static int dwc_otg_pcd_ep_dequeue(struct usb_ep *_ep,
+				  struct usb_request *_req)
+{
+	struct dwc_otg_pcd_request *req;
+	struct dwc_otg_pcd_ep *ep;
+	struct dwc_otg_pcd *pcd;
+	unsigned long flags;
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, _ep, _req);
+	ep = container_of(_ep, struct dwc_otg_pcd_ep, ep);
+	if (!_ep || !_req || (!ep->desc && ep->dwc_ep.num != 0)) {
+		DWC_WARN("%s, bad argument\n", __func__);
+		return -EINVAL;
+	}
+	pcd = ep->pcd;
+	if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
+		DWC_WARN("%s, bogus device state\n", __func__);
+		return -ESHUTDOWN;
+	}
+	spin_lock_irqsave(&pcd->lock, flags);
+	DWC_DEBUGPL(DBG_PCDV, "%s %s %s %p\n", __func__, _ep->name,
+		     ep->dwc_ep.is_in ? "IN" : "OUT", _req);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		spin_unlock_irqrestore(&pcd->lock, flags);
+		return -EINVAL;
+	}
+	if (!list_empty(&req->queue))
+		dwc_otg_request_done(ep, req, -ECONNRESET, &flags);
+	else
+		req = NULL;
+
+	spin_unlock_irqrestore(&pcd->lock, flags);
+	return req ? 0 : -EOPNOTSUPP;
+}
+
+/**
+ *Set the EP STALL.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param ep The EP to set the stall on.
+ */
+void dwc_otg_ep_set_stall(struct dwc_otg_core_if *core_if, struct dwc_ep *ep)
+{
+	union depctl_data depctl;
+	u32 __iomem *depctl_addr;
+	DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num,
+		      (ep->is_in ? "IN" : "OUT"));
+	if (ep->is_in == 1) {
+		depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl);
+		depctl.d32 = dwc_read_reg32(depctl_addr);
+
+		/*set the disable and stall bits */
+		if (depctl.b.epena) {
+			if (dwc_otg_can_disable_channel(core_if, ep))
+				depctl.b.epdis = 1;
+		}
+		depctl.b.stall = 1;
+		dwc_write_reg32(depctl_addr, depctl.d32);
+	} else {
+		depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl);
+		depctl.d32 = dwc_read_reg32(depctl_addr);
+
+		/*set the stall bit */
+		depctl.b.stall = 1;
+		dwc_write_reg32(depctl_addr, depctl.d32);
+	}
+	DWC_DEBUGPL(DBG_PCD, "DEPCTL=%0x\n", dwc_read_reg32(depctl_addr));
+	return;
+}
+
+
+/**
+ *Clear the EP STALL.
+ *
+ *@param core_if Programming view of DWC_otg controller.
+ *@param ep The EP to clear stall from.
+ */
+void dwc_otg_ep_clear_stall(struct dwc_otg_core_if *core_if, struct dwc_ep *ep)
+{
+	union depctl_data depctl;
+	u32 __iomem *depctl_addr;
+	DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num,
+		      (ep->is_in ? "IN" : "OUT"));
+
+	if (ep->is_in == 1)
+		depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl);
+	else
+		depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl);
+
+	depctl.d32 = dwc_read_reg32(depctl_addr);
+
+	/*clear the stall bits */
+	depctl.b.stall = 0;
+
+	/*
+	 * USB Spec 9.4.5: For endpoints using data toggle, regardless
+	 * of whether an endpoint has the Halt feature set, a
+	 * ClearFeature(ENDPOINT_HALT) request always results in the
+	 * data toggle being reinitialised to DATA0.
+	 */
+	if (ep->type == USB_ENDPOINT_XFER_INT ||
+		ep->type == USB_ENDPOINT_XFER_BULK) {
+		depctl.b.setd0pid = 1;	/*DATA0 */
+	}
+	dwc_write_reg32(depctl_addr, depctl.d32);
+	DWC_DEBUGPL(DBG_PCD, "DEPCTL=%0x\n", dwc_read_reg32(depctl_addr));
+	return;
+}
+
+/**
+ * usb_ep_set_halt stalls an endpoint.
+ *
+ * usb_ep_clear_halt clears an endpoint halt and resets its data
+ * toggle.
+ *
+ * Both of these functions are implemented with the same underlying
+ * function. The behavior depends on the value argument.
+ *	- 0 means clear_halt.
+ *	- 1 means set_halt,
+ *	- 2 means clear stall lock flag.
+ *	- 3 means set  stall lock flag.
+ */
+static int dwc_otg_pcd_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	int retval = 0;
+	unsigned long flags;
+	struct dwc_otg_pcd_ep *ep = NULL;
+	DWC_DEBUGPL(DBG_PCD, "HALT %s %d\n", _ep->name, value);
+	ep = container_of(_ep, struct dwc_otg_pcd_ep, ep);
+	if (!ep || (!ep->desc && ep != &ep->pcd->ep0)
+	      || ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		DWC_WARN("%s, bad ep\n", __func__);
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ep->pcd->lock, flags);
+	if (!list_empty(&ep->queue)) {
+		DWC_DEBUGPL(DBG_PCD, "%s() %s XFer In process\n",
+			    __func__, _ep->name);
+		retval = -EAGAIN;
+	} else if (value == 0)
+		dwc_otg_ep_clear_stall(ep->pcd->otg_dev->core_if, &ep->dwc_ep);
+	else if (value == 1) {
+		if (ep->dwc_ep.is_in == 1 &&
+				GET_CORE_IF(ep->pcd)->dma_desc_enable) {
+			union dtxfsts_data txstatus;
+			union fifosize_data txfifosize;
+
+			txfifosize.d32 =
+			    dwc_read_reg32(&GET_CORE_IF(ep->pcd)->
+					   core_global_regs->
+					   dptxfsiz_dieptxf[ep->dwc_ep.
+							    tx_fifo_num]);
+			txstatus.d32 =
+			    dwc_read_reg32(&GET_CORE_IF(ep->pcd)->dev_if->
+					   in_ep_regs[ep->dwc_ep.num]->dtxfsts);
+
+			if (txstatus.b.txfspcavail < txfifosize.b.depth) {
+				DWC_DEBUGPL(DBG_PCD, "%s() Data In Tx Fifo\n",
+					    __func__);
+				retval = -EAGAIN;
+			} else {
+				if (ep->dwc_ep.num == 0)
+					ep->pcd->ep0state = EP0_STALL;
+				ep->stopped = 1;
+				dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if,
+						&ep->dwc_ep);
+			}
+		} else {
+			if (ep->dwc_ep.num == 0)
+				ep->pcd->ep0state = EP0_STALL;
+
+			ep->stopped = 1;
+			dwc_otg_ep_set_stall(GET_CORE_IF(ep->pcd), &ep->dwc_ep);
+		}
+	} else if (value == 2)
+		ep->dwc_ep.stall_clear_flag = 0;
+	else if (value == 3)
+		ep->dwc_ep.stall_clear_flag = 1;
+
+	spin_unlock_irqrestore(&ep->pcd->lock, flags);
+	return retval;
+}
+
+static struct usb_ep_ops dwc_otg_pcd_ep_ops = {
+	.enable = dwc_otg_pcd_ep_enable,
+	.disable = dwc_otg_pcd_ep_disable,
+	.alloc_request = dwc_otg_pcd_alloc_request,
+	.free_request = dwc_otg_pcd_free_request,
+	.queue = dwc_otg_pcd_ep_queue,
+	.dequeue = dwc_otg_pcd_ep_dequeue,
+	.set_halt = dwc_otg_pcd_ep_set_halt,
+};
+
+/**
+ *Gets the current USB frame number. This is the frame number from the last
+ *SOF packet.
+ */
+u32 dwc_otg_get_frame_number(struct dwc_otg_core_if *core_if)
+{
+	union dsts_data dsts;
+	dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
+	/*read current frame/microframe number from DSTS register */
+	return dsts.b.soffn;
+}
+
+/**
+ *Gets the USB Frame number of the last SOF.
+ */
+static int dwc_otg_pcd_get_frame(struct usb_gadget *gadget)
+{
+	struct dwc_otg_pcd *pcd;
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, gadget);
+	if (gadget)
+		return -ENODEV;
+	else {
+		pcd = container_of(gadget, struct dwc_otg_pcd, gadget);
+		dwc_otg_get_frame_number(GET_CORE_IF(pcd));
+	}
+	return 0;
+}
+
+/**
+ * This function is called when the SRP timer expires.	The SRP should
+ * complete within 6 seconds.
+ */
+static void srp_timeout(unsigned long _ptr)
+{
+	union gotgctl_data gotgctl;
+	struct dwc_otg_core_if *core_if = (struct dwc_otg_core_if *) _ptr;
+	u32 __iomem *addr = &core_if->core_global_regs->gotgctl;
+	gotgctl.d32 = dwc_read_reg32(addr);
+	core_if->srp_timer_started = 0;
+	if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
+		(core_if->core_params->i2c_enable)) {
+		DWC_PRINT("SRP Timeout\n");
+		if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
+			if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup)
+				core_if->pcd_cb->resume_wakeup(core_if->
+								pcd_cb->p);
+
+			/* Clear Session Request */
+			gotgctl.d32 = 0;
+			gotgctl.b.sesreq = 1;
+			dwc_modify_reg32(&core_if->
+					core_global_regs->
+					gotgctl,
+					gotgctl.d32,
+					0);
+
+			core_if->srp_success = 0;
+		} else {
+			DWC_ERROR("Device not connected/responding\n");
+			gotgctl.b.sesreq = 0;
+			dwc_write_reg32(addr, gotgctl.d32);
+		}
+	} else if (gotgctl.b.sesreq) {
+		DWC_PRINT("SRP Timeout\n");
+		DWC_ERROR("Device not connected/responding\n");
+		gotgctl.b.sesreq = 0;
+		dwc_write_reg32(addr, gotgctl.d32);
+	} else
+		DWC_PRINT(" SRP GOTGCTL=%0x\n", gotgctl.d32);
+}
+
+/**
+ * Start the SRP timer to detect when the SRP does not complete within
+ * 6 seconds.
+ *
+ */
+void dwc_otg_pcd_start_srp_timer(struct dwc_otg_pcd *pcd)
+{
+	struct timer_list *srp_timer = &pcd->srp_timer;
+	GET_CORE_IF(pcd)->srp_timer_started = 1;
+	init_timer(srp_timer);
+	srp_timer->function = srp_timeout;
+	srp_timer->data = (unsigned long)GET_CORE_IF(pcd);
+	srp_timer->expires = jiffies + (HZ * 6);
+	add_timer(srp_timer);
+}
+
+
+void dwc_otg_pcd_initiate_srp(struct dwc_otg_pcd *pcd)
+{
+	u32 __iomem *addr = &(GET_CORE_IF(pcd)->core_global_regs->gotgctl);
+	union gotgctl_data mem;
+	union gotgctl_data val;
+	val.d32 = dwc_read_reg32(addr);
+	if (val.b.sesreq) {
+		DWC_ERROR("Session Request Already active!\n");
+		return;
+	}
+	DWC_NOTICE("Session Request Initated\n");
+	mem.d32 = dwc_read_reg32(addr);
+	mem.b.sesreq = 1;
+	dwc_write_reg32(addr, mem.d32);
+
+	/* Start the SRP timer */
+	dwc_otg_pcd_start_srp_timer(pcd);
+	return;
+}
+
+/**
+ * This function initiates remote wakeup of the host from suspend state.
+ */
+static void dwc_otg_pcd_rem_wkup_from_suspend(struct dwc_otg_pcd *pcd, int set)
+{
+	union dctl_data dctl = { 0 };
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	union dsts_data dsts;
+
+	dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
+	if (!dsts.b.suspsts)
+		DWC_WARN("Remote wakeup while is not in suspend state\n");
+
+	/* Check if DEVICE_REMOTE_WAKEUP feature enabled */
+	if (pcd->remote_wakeup_enable) {
+		if (set) {
+			dctl.b.rmtwkupsig = 1;
+			dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
+					 dctl, 0, dctl.d32);
+			DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
+			mdelay(2);
+			dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
+					 dctl, dctl.d32, 0);
+			DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
+		}
+	} else {
+		DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
+	}
+}
+
+#ifdef CONFIG_USB_DWC_OTG_LPM
+/**
+ * This function initiates remote wakeup of the host from L1 sleep state.
+ */
+void dwc_otg_pcd_rem_wkup_from_sleep(struct dwc_otg_pcd *pcd, int set)
+{
+	union glpmcfg_data lpmcfg;
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+
+	lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+
+	/* Check if we are in L1 state */
+	if (!lpmcfg.b.prt_sleep_sts) {
+		DWC_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
+		return;
+	}
+
+	/* Check if host allows remote wakeup */
+	if (!lpmcfg.b.rem_wkup_en) {
+		DWC_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
+		return;
+	}
+
+	/* Check if Resume OK */
+	if (!lpmcfg.b.sleep_state_resumeok) {
+		DWC_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
+		return;
+	}
+
+	lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+	lpmcfg.b.en_utmi_sleep = 0;
+	lpmcfg.b.hird_thres &= (~(1 << 4));
+	dwc_write_reg32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+
+	if (set) {
+		union dctl_data_ dctl = {.d32 = 0 };
+		dctl.b.rmtwkupsig = 1;
+		/* Set RmtWkUpSig bit to start remote wakup signaling.
+		 * Hardware will automatically clear this bit.
+		 */
+		dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl,
+				 0, dctl.d32);
+		DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
+	}
+
+}
+#endif
+void dwc_otg_pcd_remote_wakeup(struct dwc_otg_pcd *pcd, int set)
+{
+	if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) {
+#ifdef CONFIG_USB_DWC_OTG_LPM
+		if (core_if->lx_state == DWC_OTG_L1) {
+			dwc_otg_pcd_rem_wkup_from_sleep(pcd, set);
+		} else {
+#endif
+			dwc_otg_pcd_rem_wkup_from_suspend(pcd, set);
+#ifdef CONFIG_USB_DWC_OTG_LPM
+		}
+#endif
+	}
+	return;
+}
+
+/**
+ * Initiates Session Request Protocol (SRP) to wakeup the host if no
+ * session is in progress. If a session is already in progress, but
+ * the device is suspended, remote wakeup signaling is started.
+ *
+ */
+static int dwc_otg_pcd_wakeup(struct usb_gadget *gadget)
+{
+	unsigned long flags;
+	struct dwc_otg_pcd *pcd;
+	union dsts_data dsts;
+	union gotgctl_data gotgctl;
+
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, gadget);
+
+	if (!gadget)
+		return -ENODEV;
+	else
+		pcd = container_of(gadget, struct dwc_otg_pcd, gadget);
+
+	spin_lock_irqsave(&pcd->lock, flags);
+
+	/*
+	 * This function starts the Protocol if no session is in progress. If
+	 * a session is already in progress, but the device is suspended,
+	 * remote wakeup signaling is started.
+	 */
+
+	/* Check if valid session */
+	gotgctl.d32 =
+		dwc_read_reg32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
+	if (gotgctl.b.bsesvld) {
+
+		/* Check if suspend state */
+		dsts.d32 =
+			dwc_read_reg32(&(GET_CORE_IF(pcd)->
+					dev_if->
+					dev_global_regs->
+					dsts));
+		if (dsts.b.suspsts)
+			dwc_otg_pcd_remote_wakeup(pcd, 1);
+	} else
+		dwc_otg_pcd_initiate_srp(pcd);
+
+	spin_unlock_irqrestore(&pcd->lock, flags);
+	return 0;
+}
+
+static int dwc_otg_pullup(struct usb_gadget *gadget, int is_on)
+{
+	union dctl_data dctl = {.d32 = 0 };
+	struct dwc_otg_pcd *pcd;
+	unsigned long flags;
+	dctl.b.sftdiscon = 1;
+
+	if (!gadget)
+		return -ENODEV;
+	else
+		pcd = container_of(gadget, struct dwc_otg_pcd, gadget);
+
+	spin_lock_irqsave(&pcd->lock, flags);
+
+	if (is_on)
+		dwc_modify_reg32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl),
+				 dctl.d32,
+				 0);
+	else
+		dwc_modify_reg32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl),
+				 0,
+				 dctl.d32);
+
+	spin_unlock_irqrestore(&pcd->lock, flags);
+
+	return 0;
+}
+
+static int dwc_otg_pcd_gadget_start(struct usb_gadget *g,
+				    struct usb_gadget_driver *driver);
+static int dwc_otg_pcd_gadget_stop(struct usb_gadget *g,
+				   struct usb_gadget_driver *driver);
+
+static const struct usb_gadget_ops dwc_otg_pcd_ops = {
+	.get_frame = dwc_otg_pcd_get_frame,
+	.wakeup = dwc_otg_pcd_wakeup,
+	.pullup = dwc_otg_pullup,
+	.udc_start = dwc_otg_pcd_gadget_start,
+	.udc_stop = dwc_otg_pcd_gadget_stop,
+};
+
+/**
+ * This function updates the otg values in the gadget structure.
+ */
+void dwc_otg_pcd_update_otg(struct dwc_otg_pcd *pcd, const unsigned reset)
+{
+	if (!pcd->gadget.is_otg)
+		return;
+	if (reset) {
+		pcd->b_hnp_enable = 0;
+		pcd->a_hnp_support = 0;
+		pcd->a_alt_hnp_support = 0;
+	}
+	pcd->gadget.b_hnp_enable = pcd->b_hnp_enable;
+	pcd->gadget.a_hnp_support = pcd->a_hnp_support;
+	pcd->gadget.a_alt_hnp_support = pcd->a_alt_hnp_support;
+}
+
+/**
+ * This function is the top level PCD interrupt handler.
+ */
+static irqreturn_t  dwc_otg_pcd_irq(int irq, void *dev)
+{
+	struct dwc_otg_pcd *pcd = dev;
+	int retval;
+	retval = dwc_otg_pcd_handle_intr(pcd);
+	return IRQ_RETVAL(retval);
+}
+
+/**
+ * PCD Callback function for initializing the PCD when switching to
+ * device mode.
+ */
+static int dwc_otg_pcd_start_cb(void *_p)
+{
+	struct dwc_otg_pcd *pcd = (struct dwc_otg_pcd *) _p;
+
+	/*
+	 * Initialized the Core for Device mode.
+	 */
+	if (dwc_otg_is_device_mode(GET_CORE_IF(pcd)))
+		dwc_otg_core_dev_init(GET_CORE_IF(pcd));
+
+	return 1;
+}
+
+/**
+ * PCD Callback function for stopping the PCD when switching to Host
+ * mode.
+ */
+static int dwc_otg_pcd_stop_cb(void *_p)
+{
+
+	dwc_otg_pcd_stop((struct dwc_otg_pcd *) _p);
+	return 1;
+}
+
+/**
+ * PCD Callback function for notifying the PCD when resuming from
+ * suspend.
+ *
+ * Do not call with lock held, currently this cb is only called from
+ * the common interrupt handler which takes no locks.
+ *
+ */
+static int dwc_otg_pcd_suspend_cb(void *_p)
+{
+	struct dwc_otg_pcd *pcd = (struct dwc_otg_pcd *) _p;
+	if (pcd->driver && pcd->driver->suspend)
+		pcd->driver->suspend(&pcd->gadget);
+
+	return 1;
+}
+
+/**
+ * PCD Callback function for notifying the PCD when resuming from
+ * suspend.
+ */
+static int dwc_otg_pcd_resume_cb(void *_p)
+{
+	struct dwc_otg_pcd *pcd = (struct dwc_otg_pcd *) _p;
+	if (pcd->driver && pcd->driver->resume) {
+		WARN_ON(!in_interrupt());
+		pcd->driver->resume(&pcd->gadget);
+	}
+
+	/* Maybe stop the SRP timeout timer. */
+	if (need_stop_srp_timer(GET_CORE_IF(pcd)))  {
+		GET_CORE_IF(pcd)->srp_timer_started = 0;
+		del_timer_sync(&pcd->srp_timer);
+	}
+	return 1;
+}
+
+/**
+ * PCD Callback structure for handling mode switching.
+ */
+static struct dwc_otg_cil_callbacks pcd_callbacks = {
+	.start = dwc_otg_pcd_start_cb,
+	.stop = dwc_otg_pcd_stop_cb,
+	.suspend = dwc_otg_pcd_suspend_cb,
+	.resume_wakeup = dwc_otg_pcd_resume_cb,
+	/* p set at registration */
+};
+
+/**
+ * Tasklet
+ *
+ */
+static void start_xfer_tasklet_func(unsigned long data)
+{
+	struct dwc_otg_pcd *pcd = (struct dwc_otg_pcd *) data;
+	struct dwc_otg_core_if *core_if = pcd->otg_dev->core_if;
+	int i;
+	union depctl_data diepctl;
+	DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
+	diepctl.d32 =
+		dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl);
+	if (pcd->ep0.queue_sof) {
+		pcd->ep0.queue_sof = 0;
+		start_next_request(&pcd->ep0);
+	}
+	for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
+		union depctl_data diepctl;
+		diepctl.d32 =
+		    dwc_read_reg32(&core_if->dev_if->in_ep_regs[i]->diepctl);
+		if (pcd->in_ep[i].queue_sof) {
+			pcd->in_ep[i].queue_sof = 0;
+			start_next_request(&pcd->in_ep[i]);
+		}
+	}
+}
+
+static DECLARE_TASKLET(start_xfer_tasklet, start_xfer_tasklet_func, 0);
+
+static int dwc_otg_pcd_init_ep(struct dwc_otg_pcd *pcd,
+				struct dwc_otg_pcd_ep *pcd_ep,
+				u32 is_in, u32 ep_num)
+{
+	int retval = 0;
+
+	/* Init EP structure */
+	pcd_ep->desc = NULL;
+	pcd_ep->pcd = pcd;
+	pcd_ep->stopped = 1;
+	pcd_ep->queue_sof = 0;
+
+	/* Init DWC ep structure */
+	pcd_ep->dwc_ep.is_in = is_in;
+	pcd_ep->dwc_ep.num = ep_num;
+	pcd_ep->dwc_ep.active = 0;
+	pcd_ep->dwc_ep.tx_fifo_num = 0;
+	/* Control until ep is actvated */
+	pcd_ep->dwc_ep.type = USB_ENDPOINT_XFER_CONTROL;
+	pcd_ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
+	pcd_ep->dwc_ep.dma_addr = 0;
+	pcd_ep->dwc_ep.start_xfer_buff = NULL;
+	pcd_ep->dwc_ep.xfer_buff = NULL;
+	pcd_ep->dwc_ep.xfer_len = 0;
+	pcd_ep->dwc_ep.xfer_count = 0;
+	pcd_ep->dwc_ep.sent_zlp = 0;
+	pcd_ep->dwc_ep.total_len = 0;
+	pcd_ep->dwc_ep.desc_addr = NULL;
+	pcd_ep->dwc_ep.dma_desc_addr = 0;
+
+
+	/*
+	 * pre-allocate all DMA buffers instead of allocing and freeing them
+	 * all the time
+	 */
+	if (GET_CORE_IF(pcd)->dma_desc_enable) {
+		pcd_ep->dwc_ep.desc_addr =
+		    dwc_otg_ep_alloc_desc_chain(pcd->dev,
+						&pcd_ep->dwc_ep.dma_desc_addr,
+						MAX_DMA_DESC_CNT);
+		if (!pcd_ep->dwc_ep.desc_addr) {
+			DWC_WARN("%s, can't allocate DMA descriptor\n",
+				 __func__);
+			retval = -ESHUTDOWN;
+			goto out;
+		}
+		pcd_ep->bounce_buffer = dma_alloc_coherent(pcd->dev,
+						       PAGE_SIZE,
+						       &pcd_ep->bounce_buffer_dma,
+						       GFP_KERNEL);
+		if (!pcd_ep->bounce_buffer) {
+			DWC_WARN("%s, can't allocate DMA bounce buffer\n",
+				 __func__);
+			retval = -ESHUTDOWN;
+			goto out;
+		}
+	}
+out:
+	return retval;
+}
+
+static void dwc_otg_free_channel_dma(struct dwc_otg_pcd *pcd)
+{
+	int i;
+	u32 num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
+	u32 num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
+	for (i = 1; i < num_in_eps; i++) {
+		struct dwc_otg_pcd_ep *ep = &pcd->in_ep[i-i];
+		if (ep->dwc_ep.desc_addr)
+			dwc_otg_ep_free_desc_chain(ep->pcd->dev,
+						   ep->dwc_ep.desc_addr,
+						   ep->dwc_ep.dma_desc_addr,
+						   MAX_DMA_DESC_CNT);
+
+		if (ep->bounce_buffer)
+			dma_free_coherent(ep->pcd->dev, PAGE_SIZE,
+					  ep->bounce_buffer,
+					  ep->bounce_buffer_dma);
+	}
+
+	for (i = 1; i < num_out_eps; i++) {
+		struct dwc_otg_pcd_ep *ep = &pcd->out_ep[i-1];
+		if (ep->dwc_ep.desc_addr)
+			dwc_otg_ep_free_desc_chain(ep->pcd->dev,
+						   ep->dwc_ep.desc_addr,
+						   ep->dwc_ep.dma_desc_addr,
+						   MAX_DMA_DESC_CNT);
+
+		if (ep->bounce_buffer)
+			dma_free_coherent(ep->pcd->dev, PAGE_SIZE,
+					  ep->bounce_buffer,
+					 ep->bounce_buffer_dma);
+
+	}
+}
+
+/**
+ * This function initialized the pcd ep structures to there default
+ * state.
+ *
+ * @param pcd the pcd structure.
+ */
+static int dwc_otg_pcd_reinit(struct dwc_otg_pcd *pcd)
+{
+	int retval = 0;
+	int i;
+	int in_ep_cntr, out_ep_cntr;
+	u32 hwcfg1;
+	u32 num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
+	u32 num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
+	struct dwc_otg_pcd_ep *ep;
+	static const char *names[] = {
+		"ep0", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
+		"ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
+		"ep12in", "ep13in", "ep14in", "ep15in", "ep1out", "ep2out",
+		"ep3out", "ep4out", "ep5out", "ep6out", "ep7out", "ep8out",
+		"ep9out", "ep10out", "ep11out", "ep12out",
+		"ep13out", "ep14out", "ep15out"
+	};
+
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd);
+	INIT_LIST_HEAD(&pcd->gadget.ep_list);
+	pcd->gadget.ep0 = &pcd->ep0.ep;
+	pcd->gadget.speed = USB_SPEED_UNKNOWN;
+	INIT_LIST_HEAD(&pcd->gadget.ep0->ep_list);
+
+	/**
+	 * Initialize the EP0 structure.
+	 */
+	ep = &pcd->ep0;
+	retval = dwc_otg_pcd_init_ep(pcd, ep, 0, 0);
+	if (retval)
+		goto out;
+
+	ep->ep.name = names[0];
+	ep->ep.ops = &dwc_otg_pcd_ep_ops;
+
+	ep->ep.maxpacket = MAX_PACKET_SIZE;
+	list_add_tail(&ep->ep.ep_list, &pcd->gadget.ep_list);
+
+	INIT_LIST_HEAD(&ep->queue);
+
+	in_ep_cntr = 0;
+	hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
+	for (i = 1; in_ep_cntr < num_in_eps; i++) {
+		if (!(hwcfg1 & 0x1)) {
+			struct dwc_otg_pcd_ep *ep = &pcd->in_ep[in_ep_cntr];
+			in_ep_cntr++;
+			/**
+			 * @todo NGS: Add direction to EP, based on contents
+			 * of HWCFG1.  Need a copy of HWCFG1 in pcd structure?
+			 */
+			retval = dwc_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
+			if (retval) {
+				dwc_otg_free_channel_dma(pcd);
+				goto out;
+			}
+
+			ep->ep.name = names[i];
+			ep->ep.ops = &dwc_otg_pcd_ep_ops;
+
+			ep->ep.maxpacket = MAX_PACKET_SIZE;
+			list_add_tail(&ep->ep.ep_list, &pcd->gadget.ep_list);
+
+			INIT_LIST_HEAD(&ep->queue);
+		}
+		hwcfg1 >>= 2;
+	}
+	out_ep_cntr = 0;
+	hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
+	for (i = 1; out_ep_cntr < num_out_eps; i++) {
+		if (!(hwcfg1 & 0x1)) {
+			struct dwc_otg_pcd_ep *ep = &pcd->out_ep[out_ep_cntr];
+			out_ep_cntr++;
+			/**
+			 * @todo NGS: Add direction to EP, based on contents
+			 * of HWCFG1.  Need a copy of HWCFG1 in pcd structure?
+			 */
+			retval = dwc_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
+			if (retval) {
+				dwc_otg_free_channel_dma(pcd);
+				goto out;
+			}
+
+
+			ep->ep.name = names[15 + i];
+			ep->ep.ops = &dwc_otg_pcd_ep_ops;
+
+			ep->ep.maxpacket = MAX_PACKET_SIZE;
+			list_add_tail(&ep->ep.ep_list, &pcd->gadget.ep_list);
+
+			INIT_LIST_HEAD(&ep->queue);
+		}
+		hwcfg1 >>= 2;
+	}
+
+	list_del_init(&pcd->ep0.ep.ep_list);
+	pcd->ep0state = EP0_DISCONNECT;
+	pcd->ep0.ep.maxpacket = MAX_EP0_SIZE;
+	pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
+	pcd->ep0.dwc_ep.type = USB_ENDPOINT_XFER_CONTROL;
+out:
+	return retval;
+}
+
+/**
+ * This function releases the Gadget device.
+ * required by device_unregister().
+ *
+ * @todo Should this do something?	Should it free the PCD?
+ */
+static void dwc_otg_pcd_gadget_release(struct device *dev)
+{
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, dev);
+}
+
+/**
+ * This function initialized the PCD portion of the driver.
+ *
+ */
+int __init dwc_otg_pcd_init(struct device *dev)
+{
+	static char pcd_name[] = "dwc_otg_pcd";
+	struct dwc_otg_pcd *pcd;
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	struct dwc_otg_dev_if *dev_if;
+	int retval = 0;
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, dev);
+
+	/*
+	 * Allocate PCD structure
+	 */
+	pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
+	if (!pcd)
+		return -ENOMEM;
+
+
+	spin_lock_init(&pcd->lock);
+	otg_dev->pcd = pcd;
+	pcd->dev = dev;
+	pcd->gadget.name = pcd_name;
+	pcd->otg_dev = dev_get_drvdata(dev);
+	pcd->gadget.dev.parent = dev;
+	pcd->gadget.dev.release = dwc_otg_pcd_gadget_release;
+	pcd->gadget.ops = &dwc_otg_pcd_ops;
+	if (GET_CORE_IF(pcd)->hwcfg4.b.ded_fifo_en)
+		DWC_PRINT("Dedicated Tx FIFOs mode\n");
+	else
+		DWC_PRINT("Shared Tx FIFO mode\n");
+
+	pcd->gadget.max_speed = dwc_otg_pcd_max_speed(pcd);
+	pcd->gadget.is_otg = dwc_otg_pcd_is_otg(pcd);
+
+	pcd->driver = NULL;
+
+	retval = usb_add_gadget_udc(dev, &pcd->gadget);
+	if (retval) {
+		DWC_ERROR("failed to add gadget udc\n");
+		goto err_free;
+	}
+
+	/*
+	 * Initialized the Core for Device mode.
+	 */
+	if (dwc_otg_is_device_mode(GET_CORE_IF(pcd)))
+		dwc_otg_core_dev_init(GET_CORE_IF(pcd));
+
+	/*
+	 * Initialize EP structures
+	 */
+	retval = dwc_otg_pcd_reinit(pcd);
+	if (retval != 0) {
+		DWC_ERROR("failed to setup EPs\n");
+		goto err_gadget;
+	}
+
+	/*
+	 * Register the PCD Callbacks.
+	 */
+	dwc_otg_cil_register_pcd_callbacks(otg_dev->core_if,
+			&pcd_callbacks, pcd);
+
+	/*
+	 * Setup interrupt handler
+	 */
+	DWC_DEBUGPL(DBG_ANY, "registering handler for irq%d\n", otg_dev->irq);
+	retval = request_irq(otg_dev->irq, dwc_otg_pcd_irq, IRQF_SHARED,
+			     pcd->gadget.name, pcd);
+	if (retval != 0) {
+		DWC_ERROR("request of irq%d failed\n", otg_dev->irq);
+		retval = -EBUSY;
+		goto err_eps;
+	}
+
+	/*
+	 * Initialize the DMA buffer for SETUP packets
+	 */
+	retval = -ENOMEM;
+	if (GET_CORE_IF(pcd)->dma_enable) {
+		pcd->setup_pkt = dma_alloc_coherent(dev,
+					sizeof(*pcd->setup_pkt) * 5,
+					&pcd->setup_pkt_dma_handle, 0);
+		if (!pcd->setup_pkt)
+			goto err_irq;
+		pcd->status_buf = dma_alloc_coherent(dev,
+					sizeof(uint16_t),
+					&pcd->status_buf_dma_handle, 0);
+		if (!pcd->status_buf)
+			goto err_free_setup_pkt;
+
+		if (GET_CORE_IF(pcd)->dma_desc_enable) {
+			dev_if = otg_dev->core_if->dev_if;
+
+			dev_if->setup_desc_addr[0] =
+			    dwc_otg_ep_alloc_desc_chain(dev, &dev_if->
+							dma_setup_desc_addr[0],
+							1);
+			if (!dev_if->setup_desc_addr[0])
+				goto err_free_status_buf;
+
+			dev_if->setup_desc_addr[1] =
+			    dwc_otg_ep_alloc_desc_chain(dev, &dev_if->
+							dma_setup_desc_addr[1],
+							1);
+			if (!dev_if->setup_desc_addr[1])
+				goto err_free_setup_desc_0;
+
+			dev_if->in_desc_addr =
+			    dwc_otg_ep_alloc_desc_chain(dev, &dev_if->
+							dma_in_desc_addr, 1);
+			if (!dev_if->in_desc_addr)
+				goto err_free_setup_desc_1;
+
+			dev_if->out_desc_addr =
+			    dwc_otg_ep_alloc_desc_chain(dev, &dev_if->
+							dma_out_desc_addr, 1);
+			if (!dev_if->out_desc_addr)
+				goto err_free_in_desc;
+
+			pcd->ep0.bounce_buffer = dma_alloc_coherent(dev,
+							PAGE_SIZE,
+							&pcd->ep0.bounce_buffer_dma,
+							GFP_KERNEL);
+			if (!pcd->ep0.bounce_buffer)
+				goto err_free_out_desc;
+		}
+	} else {
+		pcd->setup_pkt =  kmalloc(sizeof(*pcd->setup_pkt) * 5,
+					GFP_KERNEL);
+		if (!pcd->setup_pkt)
+			goto err_irq;
+		pcd->status_buf = kmalloc(sizeof(uint16_t),
+					GFP_KERNEL);
+		if (!pcd->status_buf)
+			goto err_free_setup_pkt_nodma;
+	}
+
+	/* Initialize tasklet */
+	start_xfer_tasklet.data = (unsigned long)pcd;
+	pcd->start_xfer_tasklet = &start_xfer_tasklet;
+
+	return 0;
+
+	/* DMA enable */
+err_free_out_desc:
+	dwc_otg_ep_free_desc_chain(dev, dev_if->out_desc_addr,
+				   dev_if->dma_out_desc_addr, 1);
+err_free_in_desc:
+	dwc_otg_ep_free_desc_chain(dev, dev_if->in_desc_addr,
+				   dev_if->dma_in_desc_addr, 1);
+err_free_setup_desc_1:
+	dwc_otg_ep_free_desc_chain(dev, dev_if->setup_desc_addr[1],
+				   dev_if->dma_setup_desc_addr[1], 1);
+err_free_setup_desc_0:
+	dwc_otg_ep_free_desc_chain(dev, dev_if->setup_desc_addr[0],
+				   dev_if->dma_setup_desc_addr[0], 1);
+err_free_status_buf:
+	dma_free_coherent(dev, sizeof(*pcd->status_buf), pcd->status_buf,
+			  pcd->status_buf_dma_handle);
+err_free_setup_pkt:
+	dma_free_coherent(dev, sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
+			  pcd->setup_pkt_dma_handle);
+	goto err_irq;
+
+	/* DMA disable */
+err_free_setup_pkt_nodma:
+	kfree(pcd->setup_pkt);
+
+	/* common error handling */
+err_irq:
+	free_irq(otg_dev->irq, pcd);
+err_eps:
+	dwc_otg_free_channel_dma(pcd);
+err_gadget:
+	usb_del_gadget_udc(&pcd->gadget);
+err_free:
+	kfree(pcd);
+	return retval;
+}
+
+/**
+ * Cleanup the PCD.
+ */
+void dwc_otg_pcd_remove(struct device *dev)
+{
+	struct dwc_otg_device *otg_dev = dev_get_drvdata(dev);
+	struct dwc_otg_pcd *pcd = otg_dev->pcd;
+	struct dwc_otg_dev_if *dev_if = GET_CORE_IF(pcd)->dev_if;
+
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, dev);
+
+	/*
+	 * Free the IRQ
+	 */
+	free_irq(otg_dev->irq, pcd);
+
+	usb_del_gadget_udc(&pcd->gadget);
+
+	/* start with the driver above us */
+	if (pcd->driver) {
+
+		/* should have been done already by driver model core */
+		DWC_WARN("driver '%s' is still registered\n",
+				pcd->driver->driver.name);
+		usb_gadget_unregister_driver(pcd->driver);
+	}
+	if (GET_CORE_IF(pcd)->dma_enable) {
+
+		dwc_otg_free_channel_dma(pcd);
+
+		dma_free_coherent(dev, sizeof(*pcd->setup_pkt) * 5,
+				   pcd->setup_pkt, pcd->setup_pkt_dma_handle);
+		dma_free_coherent(dev, sizeof(uint16_t), pcd->status_buf,
+				   pcd->status_buf_dma_handle);
+		if (GET_CORE_IF(pcd)->dma_desc_enable) {
+			dwc_otg_ep_free_desc_chain(dev,
+						   dev_if->setup_desc_addr[0],
+						   dev_if->
+						   dma_setup_desc_addr[0], 1);
+			dwc_otg_ep_free_desc_chain(dev,
+						   dev_if->setup_desc_addr[1],
+						   dev_if->
+						   dma_setup_desc_addr[1], 1);
+			dwc_otg_ep_free_desc_chain(dev,
+						   dev_if->in_desc_addr,
+						   dev_if->dma_in_desc_addr, 1);
+			dwc_otg_ep_free_desc_chain(dev,
+						   dev_if->out_desc_addr,
+						   dev_if->dma_out_desc_addr,
+						   1);
+
+			dma_free_coherent(dev, PAGE_SIZE,
+					  pcd->ep0.bounce_buffer,
+					  pcd->ep0.bounce_buffer_dma);
+		}
+	} else {
+		kfree(pcd->setup_pkt);
+		kfree(pcd->status_buf);
+	}
+
+	kfree(pcd);
+	otg_dev->pcd = NULL;
+}
+
+/**
+ * This function registers a gadget driver with the PCD.
+ *
+ * When a driver is successfully registered, it will receive control
+ * requests including set_configuration(), which enables non-control
+ * requests.  then usb traffic follows until a disconnect is reported.
+ * then a host may connect again, or the driver might get unbound.
+ */
+static int dwc_otg_pcd_gadget_start(struct usb_gadget *g,
+				    struct usb_gadget_driver *driver)
+{
+	struct dwc_otg_pcd *pcd = to_dwc_otg_pcd(g);
+	DWC_DEBUGPL(DBG_PCD, "registering gadget driver '%s'\n",
+		      driver->driver.name);
+
+	/* hook up the driver */
+	pcd->driver = driver;
+	pcd->gadget.dev.driver = &driver->driver;
+
+	return 0;
+}
+
+/**
+ * This function unregisters a gadget driver
+ */
+static int dwc_otg_pcd_gadget_stop(struct usb_gadget *g,
+				   struct usb_gadget_driver *driver)
+{
+	struct dwc_otg_pcd *pcd = to_dwc_otg_pcd(g);
+
+	dwc_otg_pcd_stop(pcd);
+
+	pcd->gadget.dev.driver = NULL;
+	pcd->driver = NULL;
+
+	return 0;
+}
+
+#endif	/* DWC_HOST_ONLY */
diff --git a/drivers/usb/dwc_otg/dwc_otg_pcd.h b/drivers/usb/dwc_otg/dwc_otg_pcd.h
new file mode 100644
index 0000000..778fd38
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_pcd.h
@@ -0,0 +1,279 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef DWC_HOST_ONLY
+#if !defined(__DWC_PCD_H__)
+#define __DWC_PCD_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+
+struct lm_device;
+struct dwc_otg_device;
+
+#include "dwc_otg_cil.h"
+
+/**
+ *
+ * This file contains the structures, constants, and interfaces for
+ * the Peripheral Controller Driver (PCD).
+ *
+ * The Peripheral Controller Driver (PCD) for Linux will implement the
+ * Gadget API, so that the existing Gadget drivers can be used.	 For
+ * the Mass Storage Function driver the File-backed USB Storage Gadget
+ * (FBS) driver will be used.  The FBS driver supports the
+ * Control-Bulk (CB), Control-Bulk-Interrupt (CBI), and Bulk-Only
+ * transports.
+ *
+ */
+
+/** Invalid DMA Address */
+#define DMA_ADDR_INVALID	(~(dma_addr_t)0)
+/** Maxpacket size for EP0 */
+#define MAX_EP0_SIZE	64
+/** Maxpacket size for any EP */
+#define MAX_PACKET_SIZE 1024
+/** Max Transfer size for any EP */
+#define DDMA_MAX_TRANSFER_SIZE 65535
+/** Max DMA Descriptor count for any EP */
+#define MAX_DMA_DESC_CNT 64
+
+
+/**
+ * Get the pointer to the core_if from the pcd pointer.
+ */
+#define GET_CORE_IF(_pcd) (_pcd->otg_dev->core_if)
+
+/**
+ * States of EP0.
+ */
+enum ep0_state {
+	EP0_DISCONNECT,		/* no host */
+	EP0_IDLE,
+	EP0_IN_DATA_PHASE,
+	EP0_OUT_DATA_PHASE,
+	EP0_IN_STATUS_PHASE,
+	EP0_OUT_STATUS_PHASE,
+	EP0_STALL,
+};
+
+/** Fordward declaration.*/
+struct dwc_otg_pcd;
+
+/**
+ * PCD EP structure.
+ * This structure describes an EP, there is an array of EPs in the PCD
+ * structure.
+ */
+struct dwc_otg_pcd_ep {
+	/** USB EP data */
+	struct usb_ep		ep;
+	/** USB EP Descriptor */
+	const struct usb_endpoint_descriptor	*desc;
+
+	/** queue of dwc_otg_pcd_requests. */
+	struct list_head	queue;
+	unsigned stopped:1;
+	unsigned disabling:1;
+	unsigned dma:1;
+	unsigned queue_sof:1;
+
+	/** Count of pending Requests */
+	unsigned request_pending;
+
+	/** bounce buffer for unaligned accesses*/
+	void *bounce_buffer;
+	dma_addr_t bounce_buffer_dma;
+
+	/** DWC_otg ep data. */
+	struct dwc_ep dwc_ep;
+
+	/** Pointer to PCD */
+	struct dwc_otg_pcd *pcd;
+
+	void *priv;
+};
+
+
+
+/** DWC_otg PCD Structure.
+ * This structure encapsulates the data for the dwc_otg PCD.
+ */
+struct dwc_otg_pcd {
+	/** USB gadget */
+	struct usb_gadget gadget;
+	/** USB gadget driver pointer*/
+	struct usb_gadget_driver *driver;
+	/** The DWC otg device pointer. */
+	struct dwc_otg_device *otg_dev;
+
+	/** State of EP0 */
+	enum ep0_state	ep0state;
+	/** EP0 Request is pending */
+	unsigned	ep0_pending:1;
+	/** Indicates when SET CONFIGURATION Request is in process */
+	unsigned	request_config:1;
+	/** The state of the Remote Wakeup Enable. */
+	unsigned	remote_wakeup_enable:1;
+	/** The state of the B-Device HNP Enable. */
+	unsigned	b_hnp_enable:1;
+	/** The state of A-Device HNP Support. */
+	unsigned	a_hnp_support:1;
+	/** The state of the A-Device Alt HNP support. */
+	unsigned	a_alt_hnp_support:1;
+	/** Count of pending Requests */
+	unsigned	ep0_request_pending;
+
+	/** SETUP packet for EP0
+	 * This structure is allocated as a DMA buffer on PCD initialization
+	 * with enough space for up to 3 setup packets.
+	 */
+	union {
+			struct usb_ctrlrequest	req;
+			u32	d32[2];
+	} *setup_pkt;
+
+	dma_addr_t setup_pkt_dma_handle;
+
+	/** 2-byte dma buffer used to return status from GET_STATUS */
+	u16 *status_buf;
+	dma_addr_t status_buf_dma_handle;
+
+	/** Array of EPs. */
+	struct dwc_otg_pcd_ep ep0;
+	/** Array of IN EPs. */
+	struct dwc_otg_pcd_ep in_ep[MAX_EPS_CHANNELS - 1];
+	/** Array of OUT EPs. */
+	struct dwc_otg_pcd_ep out_ep[MAX_EPS_CHANNELS - 1];
+	/** number of valid EPs in the above array. */
+
+	spinlock_t	lock;
+	/** Timer for SRP.	If it expires before SRP is successful
+	 * clear the SRP. */
+	struct timer_list srp_timer;
+
+	/** Tasklet to defer starting of TEST mode transmissions until
+	 *	Status Phase has been completed.
+	 */
+	struct tasklet_struct test_mode_tasklet;
+
+	/** Tasklet to delay starting of xfer in DMA mode */
+	struct tasklet_struct *start_xfer_tasklet;
+
+	/** The test mode to enter when the tasklet is executed. */
+	unsigned test_mode;
+
+	struct device *dev;
+
+};
+#define to_dwc_otg_pcd(g)	(container_of((g), struct dwc_otg_pcd, gadget))
+
+
+/** DWC_otg request structure.
+ * This structure is a list of requests.
+ */
+struct dwc_otg_pcd_request {
+	struct usb_request	req; /**< USB Request. */
+	struct list_head	queue;	/**< queue of these requests. */
+	unsigned mapped:1;
+	unsigned use_bounce_buffer:1;
+};
+
+
+extern int __init dwc_otg_pcd_init(struct device *_dev);
+
+extern void dwc_otg_pcd_stop(struct dwc_otg_pcd *_pcd);
+
+/* Display the contents of the buffer */
+extern void start_next_request(struct dwc_otg_pcd_ep *ep);
+extern void dwc_otg_pcd_remove(struct device *_dev);
+extern int dwc_otg_pcd_handle_intr(struct dwc_otg_pcd *_pcd);
+extern void dwc_otg_pcd_start_srp_timer(struct dwc_otg_pcd *_pcd);
+extern void dwc_otg_pcd_initiate_srp(struct dwc_otg_pcd *_pcd);
+extern void dwc_otg_pcd_remote_wakeup(struct dwc_otg_pcd *_pcd, int set);
+/* request functions defined in "dwc_otg_pcd.c" */
+extern void dwc_otg_request_done(struct dwc_otg_pcd_ep *_ep,
+				 struct dwc_otg_pcd_request *_req,
+				 int _status,
+				 unsigned long *irq_flags);
+extern void dwc_otg_request_nuke(struct dwc_otg_pcd_ep *_ep,
+				 unsigned long *irq_flags);
+extern void dwc_otg_pcd_update_otg(struct dwc_otg_pcd *_pcd,
+				   const unsigned _reset);
+
+
+/**
+ * Helper used for workaround of STAR 9000364833:
+ * Title: Core Does not Respond When IN EndPoints are Randomly Disabled in
+ *        Scatter/Gather DMA Device Mode
+ * Impacted Configuration: Configurations with Scatter/Gather Device DMA
+ *                         functionality
+ * Nature of Defect: The core stops responding for IN EPs after a random EP
+ *                   disable is programmed by the application for IN EPs.
+ * Consequence of Defect: The core NAKs for all the IN tokens received on USB.
+ *
+ * Description:
+ * This defect manifests as follows:
+ * 1. The core is operating in Device Scatter/Gather DMA mode.
+ * 2. USB reset is driven by the USB host at an arbitrary time.
+ * 3. The core interrupts its application with GINTSTS.USBRst bit set.
+ * 4. In response to the USB reset interrupt, the core's application programs EP
+ *    disable (DIOEPCTLn[31:30] = 2'b11) for all the enabled endpoints to stop
+ *    transfers on these endpoints (because USB reset is an indication that the
+ *    current session is no longer valid).
+ * 5. The core starts disabling the endpoints sequentially and generates
+ *    corresponding EP disabled interrupt (DIOEPINTn.EPDisbld) after every EP
+ *    is disabled.
+ * 6. Because of this defect, the FSM of the DWC_otg_aiu_dsch.v module in the
+ *    core is unable to service any IN EP after the enumeration is done
+ *    (unable to proceed with IN transfers of the control transfer).
+ *    OUT endpoints are serviced because they are not affected by this defect.
+ *
+ * Versions Affected 2.90a, 2.81a, 2.80a, 2.72a, 2.71a and 2.70a only
+ *
+ * Workaround:  Don't disable endpoints once enabled on effected cores, this
+ * is a little nasty but testing has proved this to work. Without this change
+ * the Ch9 test in the USBCV tool fail.
+ */
+static inline int
+dwc_otg_can_disable_channel(struct dwc_otg_core_if *core_if,
+			    struct dwc_ep *ep) {
+	if (core_if->snpsid < OTG_CORE_REV_2_91a && ep->is_in)
+		return true;
+	else
+		return false;
+}
+
+#endif
+#endif /* DWC_HOST_ONLY */
diff --git a/drivers/usb/dwc_otg/dwc_otg_pcd_intr.c b/drivers/usb/dwc_otg/dwc_otg_pcd_intr.c
new file mode 100644
index 0000000..02cf2fe
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_pcd_intr.c
@@ -0,0 +1,3115 @@
+/* ==========================================================================
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef DWC_HOST_ONLY
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_pcd.h"
+
+#define DEBUG_EP0
+
+
+/** @file
+ * This file contains the implementation of the PCD Interrupt handlers.
+ *
+ * The PCD handles the device interrupts.  Many conditions can cause a
+ * device interrupt. When an interrupt occurs, the device interrupt
+ * service routine determines the cause of the interrupt and
+ * dispatches handling to the appropriate function. These interrupt
+ * handling functions are described below.
+ * All interrupt registers are processed from LSB to MSB.
+ */
+
+/**
+ * This function prints the ep0 state for debug purposes.
+ */
+static  void print_ep0_state(struct dwc_otg_pcd *pcd)
+{
+#ifdef DEBUG
+	char str[40];
+	switch (pcd->ep0state) {
+	case EP0_DISCONNECT:
+		strcpy(str, "EP0_DISCONNECT");
+		break;
+	case EP0_IDLE:
+		strcpy(str, "EP0_IDLE");
+		break;
+	case EP0_IN_DATA_PHASE:
+		strcpy(str, "EP0_IN_DATA_PHASE");
+		break;
+	case EP0_OUT_DATA_PHASE:
+		strcpy(str, "EP0_OUT_DATA_PHASE");
+		break;
+	case EP0_IN_STATUS_PHASE:
+		strcpy(str, "EP0_IN_STATUS_PHASE");
+		break;
+	case EP0_OUT_STATUS_PHASE:
+		strcpy(str, "EP0_OUT_STATUS_PHASE");
+		break;
+	case EP0_STALL:
+		strcpy(str, "EP0_STALL");
+		break;
+	default:
+		strcpy(str, "EP0_INVALID");
+	}
+	DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state);
+#endif	/*  */
+}
+
+/**
+ * This function returns pointer to in ep struct with number ep_num
+ */
+static struct dwc_otg_pcd_ep *get_in_ep(struct dwc_otg_pcd *pcd,
+					u32 ep_num)
+{
+	int i;
+	int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
+	if (ep_num == 0)
+		return &pcd->ep0;
+	else {
+		for (i = 0; i < num_in_eps; ++i) {
+			if (pcd->in_ep[i].dwc_ep.num == ep_num)
+				return &pcd->in_ep[i];
+		}
+		return NULL;
+	}
+
+}
+
+/**
+ * This function returns pointer to out ep struct with number ep_num
+ */
+static  struct dwc_otg_pcd_ep *get_out_ep(struct dwc_otg_pcd *pcd,
+						u32 ep_num)
+{
+	int i;
+	int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
+	if (ep_num == 0)
+		return &pcd->ep0;
+	else {
+		for (i = 0; i < num_out_eps; ++i) {
+			if (pcd->out_ep[i].dwc_ep.num == ep_num)
+				return &pcd->out_ep[i];
+		}
+		return NULL;
+	}
+
+}
+
+/**
+ * This functions gets a pointer to an EP from the wIndex address
+ * value of the control request.
+ */
+static struct dwc_otg_pcd_ep *get_ep_by_addr(struct dwc_otg_pcd *pcd,
+						     u16 wIndex)
+{
+	struct dwc_otg_pcd_ep *ep;
+	u32 ep_num = (wIndex & USB_ENDPOINT_NUMBER_MASK);
+
+	if (ep_num == 0) {
+		ep = &pcd->ep0;
+	} else if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) { /* in ep */
+		ep = &pcd->in_ep[ep_num - 1];
+	} else {
+		ep = &pcd->out_ep[ep_num - 1];
+	}
+
+	return ep;
+}
+
+
+/**
+ * This function checks the EP request queue, if the queue is not
+ * empty the next request is started.
+ */
+void start_next_request(struct dwc_otg_pcd_ep *ep)
+{
+	struct dwc_otg_pcd_request *req = NULL;
+	u32 max_transfer =
+	    GET_CORE_IF(ep->pcd)->core_params->max_transfer_size;
+
+	if (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next,
+				struct dwc_otg_pcd_request, queue);
+
+		/* map virtual address to hardware */
+		if (req->req.dma == DMA_ADDR_INVALID && req->req.length) {
+			req->req.dma = dma_map_single(ep->pcd->gadget.dev.parent,
+						  req->req.buf,
+						  req->req.length,
+						  ep->dwc_ep.is_in
+						  ? DMA_TO_DEVICE :
+						  DMA_FROM_DEVICE);
+			req->mapped = 1;
+		} else {
+			dma_sync_single_for_device(ep->pcd->gadget.dev.parent,
+						   req->req.dma, req->req.length,
+						   ep->dwc_ep.is_in
+						   ? DMA_TO_DEVICE :
+						   DMA_FROM_DEVICE);
+			req->mapped = 0;
+		}
+
+
+
+		/* Setup and start the Transfer */
+		ep->dwc_ep.dma_addr = req->req.dma;
+		ep->dwc_ep.start_xfer_buff = req->req.buf;
+		ep->dwc_ep.xfer_buff = req->req.buf;
+		ep->dwc_ep.xfer_len = req->req.length;
+		ep->dwc_ep.xfer_count = 0;
+		ep->dwc_ep.sent_zlp = 0;
+		ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
+
+
+		ep->dwc_ep.maxxfer = max_transfer;
+		if (GET_CORE_IF(ep->pcd)->dma_desc_enable) {
+			u32 out_max_xfer = DDMA_MAX_TRANSFER_SIZE
+			    - (DDMA_MAX_TRANSFER_SIZE % 4);
+			if (ep->dwc_ep.is_in) {
+					if (ep->dwc_ep.maxxfer >
+					    DDMA_MAX_TRANSFER_SIZE) {
+						ep->dwc_ep.maxxfer =
+							DDMA_MAX_TRANSFER_SIZE;
+					}
+			} else {
+				if (ep->dwc_ep.maxxfer > out_max_xfer) {
+						ep->dwc_ep.maxxfer =
+						    out_max_xfer;
+				}
+			}
+		}
+		if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
+			ep->dwc_ep.maxxfer -=
+			    (ep->dwc_ep.maxxfer % ep->dwc_ep.maxpacket);
+		}
+
+		if (req->req.zero) {
+				if ((ep->dwc_ep.total_len %
+				     ep->dwc_ep.maxpacket == 0)
+				    && (ep->dwc_ep.total_len != 0)) {
+						ep->dwc_ep.sent_zlp = 1;
+				}
+		}
+
+
+#ifdef CONFIG_405EZ
+		/*
+		 * Added-sr: 2007-07-26
+		 *
+		 * When a new transfer will be started, mark this
+		 * endpoint as active. This way it will be blocked
+		 * for further transfers, until the current transfer
+		 * is finished.
+		 */
+		ep->dwc_ep.active = 1;
+#endif
+		dwc_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->dwc_ep);
+	}
+}
+
+/**
+ * This function handles the SOF Interrupts. At this time the SOF
+ * Interrupt is disabled.
+ */
+static int dwc_otg_pcd_handle_sof_intr(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	union gintsts_data gintsts;
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.sofintr = 1;
+	dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
+	return 1;
+}
+
+/**
+ * This function handles the Rx Status Queue Level Interrupt, which
+ * indicates that there is a least one packet in the Rx FIFO.  The
+ * packets are moved from the FIFO to memory, where they will be
+ * processed when the Endpoint Interrupt Register indicates Transfer
+ * Complete or SETUP Phase Done.
+ *
+ * Repeat the following until the Rx Status Queue is empty:
+ *	 -# Read the Receive Status Pop Register (GRXSTSP) to get Packet
+ *		info
+ *	 -# If Receive FIFO is empty then skip to step Clear the interrupt
+ *		and exit
+ *	 -# If SETUP Packet call dwc_otg_read_setup_packet to copy the
+ *		SETUP data to the buffer
+ *	 -# If OUT Data Packet call dwc_otg_read_packet to copy the data
+ *		to the destination buffer
+ */
+static int dwc_otg_pcd_handle_rx_status_q_level_intr(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	union gintmsk_data gintmask = {.d32 = 0};
+	union device_grxsts_data status;
+	struct dwc_otg_pcd_ep *ep;
+
+#ifdef DEBUG
+	static char *dpid_str[] = { "D0", "D2", "D1", "MDATA" };
+
+#endif	/*  */
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd);
+
+	/* Disable the Rx Status Queue Level interrupt */
+	gintmask.b.rxstsqlvl = 1;
+	dwc_modify_reg32(&global_regs->gintmsk, gintmask.d32, 0);
+
+	/* Get the Status from the top of the FIFO */
+	status.d32 = dwc_read_reg32(&global_regs->grxstsp);
+	DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s "
+		"pktsts:%x Frame:%d(0x%0x)\n", status.b.epnum,
+		status.b.bcnt, dpid_str[status.b.dpid], status.b.pktsts,
+		status.b.fn, status.b.fn);
+
+	/* Get pointer to EP structure */
+	ep = get_out_ep(pcd, status.b.epnum);
+
+	switch (status.b.pktsts) {
+	case DWC_DSTS_GOUT_NAK:
+		DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n");
+		break;
+	case DWC_STS_DATA_UPDT:
+		DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n");
+		if (status.b.bcnt && ep->dwc_ep.xfer_buff) {
+			dwc_otg_read_packet(core_if, ep->dwc_ep.xfer_buff,
+						status.b.bcnt);
+			ep->dwc_ep.xfer_count += status.b.bcnt;
+			ep->dwc_ep.xfer_buff += status.b.bcnt;
+		}
+		break;
+	case DWC_STS_XFER_COMP:
+		DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n");
+		break;
+	case DWC_DSTS_SETUP_COMP:
+#ifdef DEBUG_EP0
+		DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n");
+#endif	/*  */
+		break;
+	case DWC_DSTS_SETUP_UPDT:
+		dwc_otg_read_setup_packet(core_if, pcd->setup_pkt->d32);
+#ifdef DEBUG_EP0
+		DWC_DEBUGPL(DBG_PCD,
+				"SETUP PKT: %02x.%02x v%04x i%04x l%04x\n",
+				pcd->setup_pkt->req.bRequestType,
+				pcd->setup_pkt->req.bRequest,
+				__le16_to_cpu(pcd->setup_pkt->req.wValue),
+				__le16_to_cpu(pcd->setup_pkt->req.wIndex),
+				__le16_to_cpu(pcd->setup_pkt->req.wLength));
+
+#endif	/*  */
+
+		ep->dwc_ep.xfer_count += status.b.bcnt;
+		break;
+	default:
+		DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n",
+				status.b.pktsts);
+		break;
+	}
+
+	return 1;
+}
+
+/**
+ * This function examines the Device IN Token Learning Queue to
+ * determine the EP number of the last IN token received.  This
+ * implementation is for the Mass Storage device where there are only
+ * 2 IN EPs (Control-IN and BULK-IN).
+ *
+ * The EP numbers for the first six IN Tokens are in DTKNQR1 and there
+ * are 8 EP Numbers in each of the other possible DTKNQ Registers.
+ *
+ */
+static int get_ep_of_last_in_token(struct dwc_otg_core_if *core_if)
+{
+	struct dwc_otg_dev_global_regs __iomem *dev_global_regs =
+		core_if->dev_if->dev_global_regs;
+	const u32 TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth;
+
+	/* Number of Token Queue Registers */
+	const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
+	union dtknq1_data dtknqr1;
+	u32 in_tkn_epnums[4];
+	int ndx = 0;
+	int i = 0;
+	u32 __iomem *addr = &dev_global_regs->dtknqr1;
+	int epnum = 0;
+
+
+	/* Read the DTKNQ Registers */
+	for (i = 0; i < DTKNQ_REG_CNT; i++) {
+		in_tkn_epnums[i] = dwc_read_reg32(addr);
+		DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1,
+				in_tkn_epnums[i]);
+		if (addr == &dev_global_regs->dvbusdis)
+			addr = &dev_global_regs->dtknqr3_dthrctl;
+		else
+			++addr;
+	}
+
+	/* Copy the DTKNQR1 data to the bit field. */
+	dtknqr1.d32 = in_tkn_epnums[0];
+
+	/* Get the EP numbers */
+	in_tkn_epnums[0] = dtknqr1.b.epnums0_5;
+	ndx = dtknqr1.b.intknwptr - 1;
+
+	if (ndx == -1) {
+		/** @todo Find a simpler way to calculate the max
+		 * queue position.*/
+		int cnt = TOKEN_Q_DEPTH;
+		if (TOKEN_Q_DEPTH <= 6)
+			cnt = TOKEN_Q_DEPTH - 1;
+		else if (TOKEN_Q_DEPTH <= 14)
+			cnt = TOKEN_Q_DEPTH - 7;
+		else if (TOKEN_Q_DEPTH <= 22)
+			cnt = TOKEN_Q_DEPTH - 15;
+		else
+			cnt = TOKEN_Q_DEPTH - 23;
+
+		epnum = (in_tkn_epnums[DTKNQ_REG_CNT - 1] >> (cnt * 4)) & 0xF;
+	} else {
+		if (ndx <= 5)
+			epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF;
+		else if (ndx <= 13) {
+			ndx -= 6;
+			epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF;
+		} else if (ndx <= 21) {
+			ndx -= 14;
+			epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF;
+		} else if (ndx <= 29) {
+			ndx -= 22;
+			epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF;
+		}
+	}
+
+	return epnum;
+}
+
+/**
+ * This interrupt occurs when the non-periodic Tx FIFO is half-empty.
+ * The active request is checked for the next packet to be loaded into
+ * the non-periodic Tx FIFO.
+ */
+static int dwc_otg_pcd_handle_np_tx_fifo_empty_intr(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	struct dwc_otg_dev_in_ep_regs __iomem *ep_regs;
+	union gnptxsts_data txstatus = {.d32 = 0 };
+#ifndef OTG_PLB_DMA
+	union gintsts_data gintsts;
+#endif
+	int epnum = 0;
+	struct dwc_otg_pcd_ep *ep = NULL;
+	u32 len = 0;
+	int dwords;
+
+	/* Get the epnum from the IN Token Learning Queue. */
+	epnum = get_ep_of_last_in_token(core_if);
+	ep = get_in_ep(pcd, epnum);
+
+	DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %s(%d) \n", ep->ep.name, epnum);
+	ep_regs = core_if->dev_if->in_ep_regs[epnum];
+	len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
+	if (len > ep->dwc_ep.maxpacket)
+		len = ep->dwc_ep.maxpacket;
+
+	dwords = (len + 3) / 4;
+
+	/* While there is space in the queue and space in the FIFO and
+	 * More data to tranfer, Write packets to the Tx FIFO
+	 */
+	txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+	DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n", txstatus.d32);
+	while (txstatus.b.nptxqspcavail > 0
+		&& txstatus.b.nptxfspcavail > dwords
+		&& ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) {
+
+		/* Write the FIFO */
+#ifdef CONFIG_405EZ
+		/*
+		 * Added-sr: 2007-07-26
+		 *
+		 * When a new transfer will be started, mark this
+		 * endpoint as active. This way it will be blocked
+		 * for further transfers, until the current transfer
+		 * is finished.
+		 */
+		ep->dwc_ep.active = 1;
+#endif
+		dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
+		len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
+		if (len > ep->dwc_ep.maxpacket)
+			len = ep->dwc_ep.maxpacket;
+
+		dwords = (len + 3) / 4;
+		txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+		DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", txstatus.d32);
+	}
+	DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n",
+			dwc_read_reg32(&global_regs->gnptxsts));
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.nptxfempty = 1;
+	dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+	return 1;
+}
+
+/**
+ * This function is called when dedicated Tx FIFO Empty interrupt occurs.
+ * The active request is checked for the next packet to be loaded into
+ * appropriate Tx FIFO.
+ */
+static int write_empty_tx_fifo(struct dwc_otg_pcd *pcd, u32 epnum)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	struct dwc_otg_dev_in_ep_regs __iomem *ep_regs;
+	union dtxfsts_data txstatus = {.d32 = 0};
+	struct dwc_otg_pcd_ep *ep = NULL;
+	u32 len = 0;
+	int dwords;
+	ep = get_in_ep(pcd, epnum);
+
+	DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %s(%d) \n",
+			ep->ep.name, epnum);
+	ep_regs = core_if->dev_if->in_ep_regs[epnum];
+	len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
+	if (len > ep->dwc_ep.maxpacket)
+		len = ep->dwc_ep.maxpacket;
+
+	dwords = (len + 3) / 4;
+
+	/* While there is space in the queue and space in the FIFO and
+	 * More data to transfer, Write packets to the Tx FIFO */
+	txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts);
+	DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32);
+	while (txstatus.b.txfspcavail > dwords
+		 && ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len
+		 && ep->dwc_ep.xfer_len != 0) {
+
+		/* Write the FIFO */
+		dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
+		len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
+		if (len > ep->dwc_ep.maxpacket)
+			len = ep->dwc_ep.maxpacket;
+
+		dwords = (len + 3) / 4;
+		txstatus.d32 =
+			dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts);
+		DWC_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", epnum,
+				txstatus.d32);
+	}
+	DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum,
+			dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts));
+
+	return 1;
+}
+
+/**
+ * This function is called when the Device is disconnected. It stops
+ * any active requests and informs the Gadget driver of the
+ * disconnect.
+ */
+void dwc_otg_pcd_stop(struct dwc_otg_pcd *pcd)
+{
+	int i, num_in_eps, num_out_eps;
+	struct dwc_otg_pcd_ep *ep;
+	union gintmsk_data intr_mask = {.d32 = 0};
+	unsigned long flags;
+
+	spin_lock_irqsave(&pcd->lock, flags);
+
+	num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
+	num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
+	DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__);
+
+	/* don't disconnect drivers more than once */
+	if (pcd->ep0state == EP0_DISCONNECT) {
+		DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__);
+		spin_unlock_irqrestore(&pcd->lock, flags);
+		return;
+	}
+	pcd->ep0state = EP0_DISCONNECT;
+
+	/* Reset the OTG state. */
+	dwc_otg_pcd_update_otg(pcd, 1);
+
+	/* Disable the NP Tx Fifo Empty Interrupt. */
+	intr_mask.b.nptxfempty = 1;
+	dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+			  intr_mask.d32, 0);
+
+	/* Flush the FIFOs */
+	dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0);
+	dwc_otg_flush_rx_fifo(GET_CORE_IF(pcd));
+
+	/* prevent new request submissions, kill any outstanding requests  */
+	ep = &pcd->ep0;
+	dwc_otg_request_nuke(ep, &flags);
+
+	/* prevent new request submissions, kill any outstanding requests  */
+	for (i = 0; i < num_in_eps; i++) {
+		struct dwc_otg_pcd_ep *ep = &pcd->in_ep[i];
+		dwc_otg_request_nuke(ep, &flags);
+	}
+
+	/* prevent new request submissions, kill any outstanding requests  */
+	for (i = 0; i < num_out_eps; i++) {
+		struct dwc_otg_pcd_ep *ep = &pcd->out_ep[i];
+		dwc_otg_request_nuke(ep, &flags);
+	}
+
+	/* report disconnect; the driver is already quiesced */
+	if (pcd->driver && pcd->driver->disconnect) {
+		spin_unlock_irqrestore(&pcd->lock, flags);
+		pcd->driver->disconnect(&pcd->gadget);
+		spin_lock_irqsave(&pcd->lock, flags);
+	}
+
+
+	spin_unlock_irqrestore(&pcd->lock, flags);
+}
+
+/**
+ * This interrupt indicates that ...
+ */
+static int dwc_otg_pcd_handle_i2c_intr(struct dwc_otg_pcd *pcd)
+{
+	union gintmsk_data intr_mask = {.d32 = 0};
+	union gintsts_data gintsts;
+	DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "i2cintr");
+	intr_mask.b.i2cintr = 1;
+	dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+			  intr_mask.d32, 0);
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.i2cintr = 1;
+	dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+			 gintsts.d32);
+	return 1;
+}
+
+/**
+ * This interrupt indicates that ...
+ */
+static int dwc_otg_pcd_handle_early_suspend_intr(struct dwc_otg_pcd *pcd)
+{
+	union gintsts_data gintsts;
+
+#if defined(VERBOSE)
+	DWC_PRINT("Early Suspend Detected\n");
+#endif	/*  */
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.erlysuspend = 1;
+	dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+			 gintsts.d32);
+	return 1;
+}
+
+/**
+ * This function configures EPO to receive SETUP packets.
+ *
+ * Program the following fields in the endpoint specific registers for Control
+ * OUT EP 0, in order to receive a setup packet:
+ *
+ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back setup packets)
+ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back to back setup
+ *   packets)
+ * In DMA mode, DOEPDMA0 Register with a memory address to store any setup
+ * packets received
+ *
+ */
+static  void ep0_out_start(struct dwc_otg_core_if *core_if,
+				 struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	union deptsiz0_data doeptsize0 = {.d32 = 0};
+	struct dwc_otg_dev_dma_desc *dma_desc;
+	union depctl_data doepctl = {.d32 = 0 };
+
+#ifdef VERBOSE
+	    DWC_DEBUGPL(DBG_PCDV, "%s() doepctl0=%0x\n", __func__,
+			dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
+
+#endif	/*  */
+	doeptsize0.b.supcnt = 3;
+	doeptsize0.b.pktcnt = 1;
+	doeptsize0.b.xfersize = 8 * 3;
+
+	if (core_if->dma_enable) {
+		if (!core_if->dma_desc_enable) {
+			/**
+			 * put here as for Hermes mode
+			 * deptisz register should not be written
+			 */
+			dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz,
+					doeptsize0.d32);
+
+			/**
+			 * @todo dma needs to handle multiple
+			 * setup packets (up to 3)
+			 */
+			dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma,
+				    pcd->setup_pkt_dma_handle);
+		} else {
+			dev_if->setup_desc_index =
+			    (dev_if->setup_desc_index + 1) & 1;
+			dma_desc =
+			    dev_if->setup_desc_addr[dev_if->setup_desc_index];
+
+			/** DMA Descriptor Setup */
+			dma_desc->status.b.bs = BS_HOST_BUSY;
+			dma_desc->status.b.l = 1;
+			dma_desc->status.b.ioc = 1;
+			dma_desc->status.b.bytes = pcd->ep0.dwc_ep.maxpacket;
+			dma_desc->buf = pcd->setup_pkt_dma_handle;
+			dma_desc->status.b.bs = BS_HOST_READY;
+
+			wmb();
+
+			/** DOEPDMA0 Register write */
+			dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma,
+					dev_if->dma_setup_desc_addr[dev_if->
+								    setup_desc_index]);
+		}
+
+	} else {
+		/**
+		 * put here as for Hermes mode deptisz
+		 * register should not be written
+		 */
+		dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz,
+				doeptsize0.d32);
+	}
+
+	/** DOEPCTL0 Register write */
+	doepctl.b.epena = 1;
+	doepctl.b.cnak = 1;
+	dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
+
+#ifdef VERBOSE
+	DWC_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
+			dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
+	DWC_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
+		     dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl));
+#endif
+}
+
+/**
+ * This interrupt occurs when a USB Reset is detected.  When the USB Reset
+ * Interrupt occurs the device state is set to DEFAULT and the EP0 state is set
+ * to IDLE.
+ *
+ * Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1)
+ *
+ * Unmask the following interrupt bits:
+ *  - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint)
+ *  - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint)
+ *  - DOEPMSK.SETUP = 1
+ *  - DOEPMSK.XferCompl = 1
+ *  - DIEPMSK.XferCompl = 1
+ *  - DIEPMSK.TimeOut = 1
+ *
+ * Program the following fields in the endpoint specific registers for Control
+ * OUT EP 0, in order to receive a setup packet
+ *  - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back setup packets)
+ *  - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back to back setup
+ *    packets)
+ *
+ *  - In DMA mode, DOEPDMA0 Register with a memory address to store any setup
+ *    packets received
+ *
+ * At this point, all the required initialization, except for enabling
+ * the control 0 OUT endpoint is done, for receiving SETUP packets.
+ */
+static int dwc_otg_pcd_handle_usb_reset_intr(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	union depctl_data doepctl = {.d32 = 0};
+	union daint_data daintmsk = {.d32 = 0};
+	union doepint_data doepmsk = {.d32 = 0};
+	union diepint_data diepmsk = {.d32 = 0};
+	union dcfg_data dcfg = {.d32 = 0};
+	union grstctl_data resetctl = {.d32 = 0};
+	union dctl_data dctl = {.d32 = 0};
+	int i = 0;
+	union gintsts_data gintsts = {.d32 = 0 };
+	union pcgcctl_data power = {.d32 = 0 };
+
+	/*
+	 * Fix for STAR 9000382324:
+	 * When the High Speed device enters LPM state after
+	 * successfully completing LPM transactions in USB, it
+	 * erroneously detects Reset or Resume even though there
+	 * is no Reset or Resume from the Host.
+	 * As a result of this issue, the device core exits L1
+	 * state when the Host is still in L1. This issue occurs
+	 * randomly if the PHY takes more than 2.5us to enable
+	 * FS terminations after entering L1.
+	 */
+	if (core_if->lx_state == DWC_OTG_L1) {
+		union glpmcfg_data lpmcfg;
+		lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
+		if (!lpmcfg.b.sleep_state_resumeok) {
+			/* perform a soft disconnect as we are out of
+			 * step with the host
+			 */
+			union dctl_data dctl = {.d32 = 0};
+			dctl.b.sftdiscon = 1;
+			dwc_modify_reg32(&core_if->dev_if->
+					 dev_global_regs->dctl,
+					 0,
+					 dctl.d32);
+			wmb();
+			mdelay(1);
+			dwc_modify_reg32(&core_if->dev_if->
+					 dev_global_regs->dctl,
+					 dctl.d32,
+					 0);
+			goto out;
+		}
+	}
+
+
+	power.d32 = dwc_read_reg32(core_if->pcgcctl);
+	if (power.b.stoppclk) {
+		power.d32 = 0;
+		power.b.stoppclk = 1;
+		dwc_modify_reg32(core_if->pcgcctl, power.d32, 0);
+
+		power.b.pwrclmp = 1;
+		dwc_modify_reg32(core_if->pcgcctl, power.d32, 0);
+
+		power.b.rstpdwnmodule = 1;
+		dwc_modify_reg32(core_if->pcgcctl, power.d32, 0);
+	}
+
+	core_if->lx_state = DWC_OTG_L0;
+	DWC_PRINT("USB RESET\n");
+
+	/* reset the HNP settings */
+	dwc_otg_pcd_update_otg(pcd, 1);
+
+	/* Clear the Remote Wakeup Signalling */
+	dctl.b.rmtwkupsig = 1;
+	dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0);
+
+	/* Set NAK for all OUT EPs */
+	doepctl.b.snak = 1;
+	for (i = 0; i <= dev_if->num_out_eps; i++) {
+		dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl,
+				 doepctl.d32);
+	}
+
+	/* Flush the NP Tx FIFO */
+	dwc_otg_flush_tx_fifo(core_if, 0x10);
+
+	/* Flush the Learning Queue */
+	resetctl.b.intknqflsh = 1;
+	dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32);
+	if (core_if->multiproc_int_enable) {
+		daintmsk.b.inep0 = 1;
+		daintmsk.b.outep0 = 1;
+		dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk,
+				daintmsk.d32);
+
+		doepmsk.b.setup = 1;
+		doepmsk.b.xfercompl = 1;
+		doepmsk.b.ahberr = 1;
+		doepmsk.b.epdisabled = 1;
+
+		if (core_if->dma_desc_enable) {
+			doepmsk.b.stsphsercvd = 1;
+			doepmsk.b.bna = 1;
+		}
+
+		dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[0],
+				doepmsk.d32);
+
+		diepmsk.b.xfercompl = 1;
+		diepmsk.b.timeout = 1;
+		diepmsk.b.epdisabled = 1;
+		diepmsk.b.ahberr = 1;
+		diepmsk.b.intknepmis = 1;
+
+		if (core_if->dma_desc_enable)
+			diepmsk.b.bna = 1;
+
+
+		dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[0],
+				diepmsk.d32);
+	} else {
+		daintmsk.b.inep0 = 1;
+		daintmsk.b.outep0 = 1;
+		dwc_write_reg32(&dev_if->dev_global_regs->daintmsk,
+				daintmsk.d32);
+		doepmsk.b.setup = 1;
+		doepmsk.b.xfercompl = 1;
+		doepmsk.b.ahberr = 1;
+		doepmsk.b.epdisabled = 0;
+		if (core_if->dma_desc_enable) {
+			doepmsk.b.stsphsercvd = 1;
+
+			/*doepmsk.b.bna = 1;*/
+
+			/*
+			 * NJ dont enable BNA int until we get
+			 * first request.
+			 */
+		}
+		dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32);
+		diepmsk.b.xfercompl = 1;
+		diepmsk.b.timeout = 1;
+		diepmsk.b.epdisabled = 0;
+		diepmsk.b.ahberr = 1;
+		diepmsk.b.intknepmis = 1;
+
+		if (core_if->dma_desc_enable)
+			diepmsk.b.bna = 1;
+
+		/*enable NAK effective interrupts */
+		diepmsk.b.inepnakeff = 1;
+
+
+		dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32);
+	}
+
+	/* Reset Device Address */
+	dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg);
+	dcfg.b.devaddr = 0;
+
+	/* disable nzstsouthshk bit as well */
+	dcfg.b.nzstsouthshk = 0;
+
+	dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
+
+	/* setup EP0 to receive SETUP packets */
+	ep0_out_start(core_if, pcd);
+
+out:
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.usbreset = 1;
+	dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
+	return 1;
+}
+
+/**
+ * Get the device speed from the device status register and convert it
+ * to USB speed constant.
+ *
+ * @param core_if Programming view of DWC_otg controller.
+ */
+static int get_device_speed(struct dwc_otg_core_if *core_if)
+{
+	union dsts_data dsts;
+	enum usb_device_speed speed = USB_SPEED_UNKNOWN;
+	dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
+	switch (dsts.b.enumspd) {
+	case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
+		speed = USB_SPEED_HIGH;
+		break;
+	case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
+	case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
+		speed = USB_SPEED_FULL;
+		break;
+	case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
+		speed = USB_SPEED_LOW;
+		break;
+	}
+	return speed;
+}
+
+/**
+ * Read the device status register and set the device speed in the
+ * data structure.
+ * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate.
+ */
+static int dwc_otg_pcd_handle_enum_done_intr(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_pcd_ep *ep0 = &pcd->ep0;
+	union gintsts_data gintsts;
+	union gusbcfg_data gusbcfg;
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+	    GET_CORE_IF(pcd)->core_global_regs;
+	uint8_t utmi16b, utmi8b;
+
+	DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n");
+
+	if (GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_2_60a) {
+		utmi16b = 6;
+		utmi8b = 9;
+	} else {
+		utmi16b = 4;
+		utmi8b = 8;
+	}
+	dwc_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->dwc_ep);
+
+#ifdef DEBUG_EP0
+	print_ep0_state(pcd);
+#endif	/*  */
+	pcd->ep0state = EP0_IDLE;
+	ep0->stopped = 0;
+	pcd->gadget.speed = get_device_speed(GET_CORE_IF(pcd));
+
+	/* Set USB turnaround time based on device speed and PHY interface. */
+	gusbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+	if (pcd->gadget.speed == USB_SPEED_HIGH) {
+		if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
+				DWC_HWCFG2_HS_PHY_TYPE_ULPI) {
+			/* ULPI interface */
+			gusbcfg.b.usbtrdtim = 9;
+		}
+		if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
+				DWC_HWCFG2_HS_PHY_TYPE_UTMI) {
+
+			/* UTMI+ interface */
+			if (GET_CORE_IF(pcd)->hwcfg4.b.
+				utmi_phy_data_width == 0)
+
+				gusbcfg.b.usbtrdtim = utmi8b;
+
+			else if (GET_CORE_IF(pcd)->hwcfg4.b.
+				    utmi_phy_data_width == 1)
+
+				gusbcfg.b.usbtrdtim = utmi16b;
+
+			else if (GET_CORE_IF(pcd)->core_params->
+				    phy_utmi_width == 8)
+
+				gusbcfg.b.usbtrdtim = utmi8b;
+
+			else
+				gusbcfg.b.usbtrdtim = utmi16b;
+
+		}
+		if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
+			DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) {
+			/* UTMI+  OR  ULPI interface */
+			if (gusbcfg.b.ulpi_utmi_sel == 1) {
+				/* ULPI interface */
+				gusbcfg.b.usbtrdtim = 9;
+			} else {
+				/* UTMI+ interface */
+				if (GET_CORE_IF(pcd)->core_params->
+					phy_utmi_width == 16) {
+					gusbcfg.b.usbtrdtim = utmi16b;
+				} else
+					gusbcfg.b.usbtrdtim = utmi8b;
+			}
+		}
+	} else {
+		/* Full or low speed */
+		gusbcfg.b.usbtrdtim = 9;
+	}
+	dwc_write_reg32(&global_regs->gusbcfg, gusbcfg.d32);
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.enumdone = 1;
+	dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+			 gintsts.d32);
+	return 1;
+}
+
+/**
+ * This interrupt indicates that the ISO OUT Packet was dropped due to
+ * Rx FIFO full or Rx Status Queue Full.  If this interrupt occurs
+ * read all the data from the Rx FIFO.
+ */
+static int
+dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(struct dwc_otg_pcd *pcd)
+{
+	union gintmsk_data intr_mask = {.d32 = 0};
+	union gintsts_data gintsts;
+	DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
+		    "ISOC Out Dropped");
+	intr_mask.b.isooutdrop = 1;
+	dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+			  intr_mask.d32, 0);
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.isooutdrop = 1;
+	dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+			 gintsts.d32);
+	return 1;
+}
+
+/**
+ * This interrupt indicates the end of the portion of the micro-frame
+ * for periodic transactions.  If there is a periodic transaction for
+ * the next frame, load the packets into the EP periodic Tx FIFO.
+ */
+static int dwc_otg_pcd_handle_end_periodic_frame_intr(struct dwc_otg_pcd *pcd)
+{
+	union gintmsk_data intr_mask = {.d32 = 0};
+	union gintsts_data gintsts;
+	DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
+		   "End of Periodic Portion of Micro-Frame Interrupt");
+	intr_mask.b.eopframe = 1;
+	dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+			  intr_mask.d32, 0);
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.eopframe = 1;
+	dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+			 gintsts.d32);
+	return 1;
+}
+
+/**
+ * This interrupt indicates that EP of the packet on the top of the
+ * non-periodic Tx FIFO does not match EP of the IN Token received.
+ *
+ * The "Device IN Token Queue" Registers are read to determine the
+ * order the IN Tokens have been received.	The non-periodic Tx FIFO
+ * is flushed, so it can be reloaded in the order seen in the IN Token
+ * Queue.
+ */
+static int dwc_otg_pcd_handle_ep_mismatch_intr(struct dwc_otg_core_if *core_if)
+{
+	union gintsts_data gintsts;
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if);
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.epmismatch = 1;
+	dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
+	return 1;
+}
+
+/**
+ * This funcion stalls EP0.
+ */
+static  void ep0_do_stall(struct dwc_otg_pcd *pcd, const int err_val)
+{
+	struct dwc_otg_pcd_ep *ep0 = &pcd->ep0;
+	struct usb_ctrlrequest *ctrl = &pcd->setup_pkt->req;
+	DWC_WARN("req %02x.%02x protocol STALL; err %d\n", ctrl->bRequestType,
+		  ctrl->bRequest, err_val);
+	ep0->dwc_ep.is_in = 1;
+	dwc_otg_ep_set_stall(pcd->otg_dev->core_if, &ep0->dwc_ep);
+	pcd->ep0.stopped = 1;
+	pcd->ep0state = EP0_IDLE;
+	ep0_out_start(GET_CORE_IF(pcd), pcd);
+}
+
+/**
+ * This functions delegates the setup command to the gadget driver.
+ */
+static void do_gadget_setup(struct dwc_otg_pcd *pcd,
+				   struct usb_ctrlrequest *_ctrl)
+__releases(ep->pcd->lock)
+__acquires(ep->pcd->lock)
+{
+	int ret = 0;
+	if (pcd->driver && pcd->driver->setup) {
+		WARN_ON(!in_interrupt());
+
+		spin_unlock(&pcd->lock);
+		ret = pcd->driver->setup(&pcd->gadget, _ctrl);
+		spin_lock(&pcd->lock);
+
+		if (ret < 0)
+			ep0_do_stall(pcd, ret);
+
+		/** @todo This is a g_file_storage gadget driver specific
+		 * workaround: a DELAYED_STATUS result from the fsg_setup
+		 * routine will result in the gadget queueing a EP0 IN status
+		 * phase for a two-stage control transfer. Exactly the same as
+		 * a SET_CONFIGURATION/SET_INTERFACE except that this is a class
+		 * specific request.  Need a generic way to know when the gadget
+		 * driver will queue the status phase.	Can we assume when we
+		 * call the gadget driver setup() function that it will always
+		 * queue and require the following flag?  Need to look into
+		 * this.
+		 */
+		if (ret == 256 + 999)
+			pcd->request_config = 1;
+	}
+}
+
+/**
+ * This function starts the Zero-Length Packet for the IN status phase
+ * of a 2 stage control transfer.
+ */
+static void do_setup_in_status_phase(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_pcd_ep *ep0 = &pcd->ep0;
+	if (pcd->ep0state == EP0_STALL)
+		return;
+
+	pcd->ep0state = EP0_IN_STATUS_PHASE;
+
+	/* Prepare for more SETUP Packets */
+	DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n");
+	ep0->dwc_ep.xfer_len = 0;
+	ep0->dwc_ep.xfer_count = 0;
+	ep0->dwc_ep.is_in = 1;
+	ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle;
+	dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
+
+}
+
+/**
+ * This function starts the Zero-Length Packet for the OUT status phase
+ * of a 2 stage control transfer.
+ */
+static void do_setup_out_status_phase(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_pcd_ep *ep0 = &pcd->ep0;
+	if (pcd->ep0state == EP0_STALL) {
+		DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n");
+		return;
+	}
+	pcd->ep0state = EP0_OUT_STATUS_PHASE;
+
+	/* Prepare for more SETUP Packets */
+
+	DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n");
+	ep0->dwc_ep.xfer_len = 0;
+	ep0->dwc_ep.xfer_count = 0;
+	ep0->dwc_ep.is_in = 0;
+
+	ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle;
+	dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
+
+	/* Prepare for more SETUP Packets */
+	if (GET_CORE_IF(pcd)->dma_enable == 0)
+		ep0_out_start(GET_CORE_IF(pcd), pcd);
+
+}
+
+/**
+ * Clear the EP halt (STALL) and if pending requests start the
+ * transfer.
+ */
+static void pcd_clear_halt(struct dwc_otg_pcd *pcd,
+				  struct dwc_otg_pcd_ep *ep)
+{
+	if (ep->dwc_ep.stall_clear_flag == 0)
+		dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
+
+	/* Reactive the EP */
+	dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
+	if (ep->stopped) {
+		ep->stopped = 0;
+
+		/* If there is a request in the EP queue start it */
+
+		/* a tasklet to calls start_next_request(), outside of interrupt
+		 * context at some time after the current time, after a
+		 * clear-halt setup packet. Still need to implement ep mismatch
+		 * in the future if a gadget ever uses more than one endpoint
+		 * at once
+		 */
+		if (GET_CORE_IF(pcd)->dma_enable) {
+			ep->queue_sof = 1;
+			tasklet_schedule(pcd->start_xfer_tasklet);
+		} else {
+#ifdef CONFIG_405EZ
+			/*
+			 * Added-sr: 2007-07-26
+			 *
+			 * To re-enable this endpoint it's important to
+			 * set this next_ep number. Otherwise the endpoint
+			 * will not get active again after stalling.
+			 */
+
+			start_next_request(ep);
+#endif
+		}
+	}
+
+	/* Start Control Status Phase */
+	do_setup_in_status_phase(pcd);
+}
+
+/**
+ * This function is called when the SET_FEATURE TEST_MODE Setup packet
+ * is sent from the host.  The Device Control register is written with
+ * the Test Mode bits set to the specified Test Mode.  This is done as
+ * a tasklet so that the "Status" phase of the control transfer
+ * completes before transmitting the TEST packets.
+ *
+ * @todo This has not been tested since the tasklet struct was put
+ * into the PCD struct!
+ *
+ */
+static void do_test_mode(unsigned long _data)
+{
+	union dctl_data dctl;
+	struct dwc_otg_pcd *pcd = (struct dwc_otg_pcd *) _data;
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	int test_mode = pcd->test_mode;
+
+	dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl);
+	switch (test_mode) {
+	case 1:		/*TEST_J*/
+		dctl.b.tstctl = 1;
+		break;
+	case 2:		/* TEST_K*/
+		dctl.b.tstctl = 2;
+		break;
+	case 3:		/* TEST_SE0_NAK*/
+		dctl.b.tstctl = 3;
+		break;
+	case 4:		/* TEST_PACKET*/
+		dctl.b.tstctl = 4;
+		break;
+	case 5:		/* TEST_FORCE_ENABLE*/
+		dctl.b.tstctl = 5;
+		break;
+	}
+	dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
+}
+
+/**
+ * This function process the GET_STATUS Setup Commands.
+ */
+static void do_get_status(struct dwc_otg_pcd  *pcd)
+{
+	struct usb_ctrlrequest ctrl = pcd->setup_pkt->req;
+	struct dwc_otg_pcd_ep *ep;
+	struct dwc_otg_pcd_ep *ep0 = &pcd->ep0;
+	u16 *status = pcd->status_buf;
+
+#ifdef DEBUG_EP0
+	DWC_DEBUGPL(DBG_PCD,
+		    "GET_STATUS %02x.%02x v%04x i%04x l%04x\n",
+		    ctrl.bRequestType, ctrl.bRequest,
+		    __le16_to_cpu(ctrl.wValue), __le16_to_cpu(ctrl.wIndex),
+		    __le16_to_cpu(ctrl.wLength));
+#endif
+
+	switch (ctrl.bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		*status = 0x1;	/* Self powered */
+		*status |= pcd->remote_wakeup_enable << 1;
+		break;
+
+	case USB_RECIP_INTERFACE:
+		*status = 0;
+		break;
+
+	case USB_RECIP_ENDPOINT:
+		ep = get_ep_by_addr(pcd, __le16_to_cpu(ctrl.wIndex));
+		if (!ep || __le16_to_cpu(ctrl.wLength) > 2) {
+			ep0_do_stall(pcd, -EOPNOTSUPP);
+			return;
+		}
+		/** @todo check for EP stall */
+		*status = ep->stopped;
+		break;
+	}
+	pcd->ep0_pending = 1;
+	ep0->dwc_ep.start_xfer_buff = (uint8_t *) status;
+	ep0->dwc_ep.xfer_buff = (uint8_t *) status;
+	ep0->dwc_ep.dma_addr = pcd->status_buf_dma_handle;
+	ep0->dwc_ep.xfer_len = 2;
+	ep0->dwc_ep.xfer_count = 0;
+	ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len;
+	dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
+}
+/**
+ * This function process the SET_FEATURE Setup Commands.
+ */
+static void do_set_feature(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+	struct usb_ctrlrequest ctrl = pcd->setup_pkt->req;
+	struct dwc_otg_pcd_ep *ep = NULL;
+	int otg_cap_param = core_if->core_params->otg_cap;
+	union gotgctl_data gotgctl = {.d32 = 0};
+	DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
+			ctrl.bRequestType, ctrl.bRequest,
+			__le16_to_cpu(ctrl.wValue), __le16_to_cpu(ctrl.wIndex),
+			__le16_to_cpu(ctrl.wLength));
+
+	DWC_DEBUGPL(DBG_PCD, "otg_cap=%d\n", otg_cap_param);
+	switch (ctrl.bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		switch (__le16_to_cpu(ctrl.wValue)) {
+		case USB_DEVICE_REMOTE_WAKEUP:
+			pcd->remote_wakeup_enable = 1;
+			break;
+		case USB_DEVICE_TEST_MODE:
+
+			/* Setup the Test Mode tasklet to do the Test
+			 * Packet generation after the SETUP Status
+			 * phase has completed. */
+
+			/** @todo This has not been tested since the
+			 * tasklet struct was put into the PCD
+			 * struct! */
+			pcd->test_mode_tasklet.next = NULL;
+			pcd->test_mode_tasklet.state = 0;
+			atomic_set(&pcd->test_mode_tasklet.count, 0);
+			pcd->test_mode_tasklet.func = do_test_mode;
+			pcd->test_mode_tasklet.data = (unsigned long)pcd;
+			pcd->test_mode = __le16_to_cpu(ctrl.wIndex) >> 8;
+			tasklet_schedule(&pcd->test_mode_tasklet);
+			break;
+		case USB_DEVICE_B_HNP_ENABLE:
+			DWC_DEBUGPL(DBG_PCDV,
+				     "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
+
+			/* dev may initiate HNP */
+			if (otg_cap_param ==
+				DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
+				pcd->b_hnp_enable = 1;
+				dwc_otg_pcd_update_otg(pcd, 0);
+				DWC_DEBUGPL(DBG_PCD, "Request B HNP\n");
+
+				/**@todo Is the gotgctl.devhnpen cleared
+				 * by a USB Reset? */
+				gotgctl.b.devhnpen = 1;
+				gotgctl.b.hnpreq = 1;
+				dwc_write_reg32(&global_regs->gotgctl,
+						gotgctl.d32);
+			} else {
+				ep0_do_stall(pcd, -EOPNOTSUPP);
+			}
+			break;
+		case USB_DEVICE_A_HNP_SUPPORT:
+			/* RH port supports HNP */
+			DWC_DEBUGPL(DBG_PCDV,
+					"SET_FEATURE:"
+					" USB_DEVICE_A_HNP_SUPPORT\n");
+			if (otg_cap_param ==
+				DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
+				pcd->a_hnp_support = 1;
+				dwc_otg_pcd_update_otg(pcd, 0);
+			} else {
+				ep0_do_stall(pcd, -EOPNOTSUPP);
+			}
+			break;
+		case USB_DEVICE_A_ALT_HNP_SUPPORT:
+			/* other RH port does */
+			DWC_DEBUGPL(DBG_PCDV,
+					"SET_FEATURE: "
+					"USB_DEVICE_A_ALT_HNP_SUPPORT\n");
+			if (otg_cap_param ==
+				DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
+				pcd->a_alt_hnp_support = 1;
+				dwc_otg_pcd_update_otg(pcd, 0);
+			} else {
+				ep0_do_stall(pcd, -EOPNOTSUPP);
+			}
+			break;
+		}
+		do_setup_in_status_phase(pcd);
+		break;
+	case USB_RECIP_INTERFACE:
+		do_gadget_setup(pcd, &ctrl);
+		break;
+	case USB_RECIP_ENDPOINT:
+		if (__le16_to_cpu(ctrl.wValue) == USB_ENDPOINT_HALT) {
+			ep = get_ep_by_addr(pcd, __le16_to_cpu(ctrl.wIndex));
+			if (!ep) {
+				ep0_do_stall(pcd, -EOPNOTSUPP);
+				return;
+			}
+			ep->stopped = 1;
+			dwc_otg_ep_set_stall(core_if, &ep->dwc_ep);
+		}
+		do_setup_in_status_phase(pcd);
+		break;
+	}
+}
+
+/**
+ * This function process the CLEAR_FEATURE Setup Commands.
+ */
+static void do_clear_feature(struct dwc_otg_pcd *pcd)
+{
+	struct usb_ctrlrequest ctrl = pcd->setup_pkt->req;
+	struct dwc_otg_pcd_ep *ep = NULL;
+	DWC_DEBUGPL(DBG_PCD, "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
+			ctrl.bRequestType, ctrl.bRequest,
+			__le16_to_cpu(ctrl.wValue),
+			__le16_to_cpu(ctrl.wIndex),
+			__le16_to_cpu(ctrl.wLength));
+
+	switch (ctrl.bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		switch (__le16_to_cpu(ctrl.wValue)) {
+		case USB_DEVICE_REMOTE_WAKEUP:
+			pcd->remote_wakeup_enable = 0;
+			break;
+		case USB_DEVICE_TEST_MODE:
+			/** @todo Add CLEAR_FEATURE for TEST modes. */
+			break;
+		}
+		do_setup_in_status_phase(pcd);
+		break;
+	case USB_RECIP_ENDPOINT:
+		ep = get_ep_by_addr(pcd, __le16_to_cpu(ctrl.wIndex));
+		if (!ep) {
+			ep0_do_stall(pcd, -EOPNOTSUPP);
+			return;
+		}
+		pcd_clear_halt(pcd, ep);
+		DWC_DEBUGPL(DBG_PCD, "%s halt cleared by host\n",
+			      ep->ep.name);
+		break;
+	}
+}
+
+/**
+ * This function process the SET_ADDRESS Setup Commands.
+ */
+static void do_set_address(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_dev_if *dev_if = GET_CORE_IF(pcd)->dev_if;
+	struct usb_ctrlrequest ctrl = pcd->setup_pkt->req;
+
+	if (ctrl.bRequestType == USB_RECIP_DEVICE) {
+		union dcfg_data dcfg = {.d32 = 0 };
+
+#ifdef DEBUG_EP0
+		DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue);
+#endif
+		dcfg.b.devaddr = __le16_to_cpu(ctrl.wValue);
+		dwc_modify_reg32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32);
+		do_setup_in_status_phase(pcd);
+	}
+}
+
+/**
+ *	This function processes SETUP commands.	 In Linux, the USB Command
+ *	processing is done in two places - the first being the PCD and the
+ *	second in the Gadget Driver (for example, the File-Backed Storage
+ *	Gadget Driver).
+ *
+ *
+ * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are
+ * processed by pcd_setup. Calling the Function Driver's setup function from
+ * pcd_setup processes the gadget SETUP commands.
+ */
+static void pcd_setup(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	struct usb_ctrlrequest ctrl = pcd->setup_pkt->req;
+	struct dwc_otg_pcd_ep *ep0 = &pcd->ep0;
+	union deptsiz0_data doeptsize0 = {.d32 = 0};
+
+#ifdef DEBUG_EP0
+	DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+		ctrl.bRequestType, ctrl.bRequest, __le16_to_cpu(ctrl.wValue),
+		__le16_to_cpu(ctrl.wIndex), __le16_to_cpu(ctrl.wLength));
+
+#endif	/*  */
+	doeptsize0.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doeptsiz);
+
+	/** @todo handle > 1 setup packet , assert error for now */
+	if (core_if->dma_enable && core_if->dma_desc_enable == 0
+	    && (doeptsize0.b.supcnt < 2)) {
+		DWC_ERROR("\n\n	 CANNOT handle > 1 setup"
+				" packet in DMA mode\n\n");
+	}
+
+	/* Clean up the request queue */
+	dwc_otg_request_nuke(ep0, NULL);
+	ep0->stopped = 0;
+	if (ctrl.bRequestType & USB_DIR_IN) {
+		ep0->dwc_ep.is_in = 1;
+		pcd->ep0state = EP0_IN_DATA_PHASE;
+	} else {
+		ep0->dwc_ep.is_in = 0;
+		pcd->ep0state = EP0_OUT_DATA_PHASE;
+	}
+	if (__le16_to_cpu(ctrl.wLength) == 0) {
+		ep0->dwc_ep.is_in = 1;
+		pcd->ep0state = EP0_IN_STATUS_PHASE;
+	}
+
+	if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
+		/*
+		 * handle non-standard (class/vendor)
+		 * requests in the gadget driver
+		 */
+		do_gadget_setup(pcd, &ctrl);
+		return;
+	}
+
+	/** @todo NGS: Handle bad setup packet? */
+	switch (ctrl.bRequest) {
+	case USB_REQ_GET_STATUS:
+#ifdef DEBUG_EP0
+		    DWC_DEBUGPL(DBG_PCD,
+				"GET_STATUS %02x.%02x v%04x i%04x l%04x\n",
+				ctrl.bRequestType, ctrl.bRequest,
+				__le16_to_cpu(ctrl.wValue),
+				__le16_to_cpu(ctrl.wIndex),
+				__le16_to_cpu(ctrl.wLength));
+
+#endif	/*  */
+		do_get_status(pcd);
+		break;
+
+	case USB_REQ_CLEAR_FEATURE:
+		do_clear_feature(pcd);
+		break;
+	case USB_REQ_SET_FEATURE:
+		do_set_feature(pcd);
+		break;
+	case USB_REQ_SET_ADDRESS:
+		do_set_address(pcd);
+		break;
+	case USB_REQ_SET_INTERFACE:
+	case USB_REQ_SET_CONFIGURATION:
+		do_gadget_setup(pcd, &ctrl);
+		break;
+	case USB_REQ_SYNCH_FRAME:
+		do_gadget_setup(pcd, &ctrl);
+		break;
+	default:
+		/* Call the Gadget Driver's setup functions */
+		do_gadget_setup(pcd, &ctrl);
+		break;
+	}
+}
+
+/**
+ * This function completes the ep0 control transfer.
+ */
+static int ep0_complete_request(struct dwc_otg_pcd_ep *ep)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(ep->pcd);
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	struct dwc_otg_dev_in_ep_regs __iomem *in_ep_regs =
+		dev_if->in_ep_regs[ep->dwc_ep.num];
+#ifdef DEBUG_EP0
+	struct dwc_otg_dev_out_ep_regs __iomem *out_ep_regs =
+		dev_if->out_ep_regs[ep->dwc_ep.num];
+#endif
+	union deptsiz0_data deptsiz;
+	union dev_dma_desc_sts desc_sts;
+	struct dwc_otg_pcd_request *req;
+	int is_last = 0;
+	struct dwc_otg_pcd *pcd = ep->pcd;
+	static int counter; /*DFX added*/
+	counter++;
+	DWC_DEBUGPL(DBG_PCDV, "%s() %s\n", __func__, ep->ep.name);
+
+#if 0
+	if (in_set_config == 1)  {
+		printk(KERN_ERR "DFX ep0_complete_request in_set_config."
+				" ep0 pending: %d  list empty:"
+				" %d ep.is_in: %d ep0State: %d counter: %d\n",
+				pcd->ep0_pending,
+				list_empty(&ep->queue),
+				ep->dwc_ep.is_in,
+				pcd->ep0state,
+				counter);
+	}
+	if (in_set_config == 2)  {
+		printk(KERN_ERR "DFX ep0_complete_request in_set_ADDRESS. "
+				"ep0 pending: %d  list empty:"
+				" %d ep.is_in: %d ep0State: %d counter: %d\n",
+				pcd->ep0_pending,
+				list_empty(&ep->queue),
+				ep->dwc_ep.is_in,
+				pcd->ep0state,
+				counter);
+	}
+#endif
+
+	if ((pcd->ep0_pending && list_empty(&ep->queue)) /*|| counter == 1*/) {
+		if (ep->dwc_ep.is_in) {
+#ifdef DEBUG_EP0
+			DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n");
+#endif
+			do_setup_out_status_phase(pcd);
+		} else {
+#ifdef DEBUG_EP0
+			DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n");
+#endif
+			do_setup_in_status_phase(pcd);
+		}
+		pcd->ep0_pending = 0;
+		return 1;
+	}
+
+	if (list_empty(&ep->queue))
+		return 0;
+
+	req = list_entry(ep->queue.next, struct dwc_otg_pcd_request, queue);
+
+	if (pcd->ep0state == EP0_OUT_STATUS_PHASE
+	    || pcd->ep0state == EP0_IN_STATUS_PHASE) {
+		is_last = 1;
+	} else if (ep->dwc_ep.is_in) {
+		deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz);
+		if (core_if->dma_desc_enable != 0)
+			desc_sts = dev_if->in_desc_addr->status;
+#ifdef DEBUG_EP0
+		DWC_DEBUGPL(DBG_PCDV, "%s len=%d  xfersize=%d pktcnt=%d\n",
+				ep->ep.name, ep->dwc_ep.xfer_len,
+				deptsiz.b.xfersize, deptsiz.b.pktcnt);
+#endif
+		if (((core_if->dma_desc_enable == 0)
+		     && (deptsiz.b.xfersize == 0))
+		    || ((core_if->dma_desc_enable != 0)
+			&& (desc_sts.b.bytes == 0))) {
+			req->req.actual = ep->dwc_ep.xfer_count;
+			/* Is a Zero Len Packet needed? */
+			if (req->req.zero) {
+#ifdef DEBUG_EP0
+				DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n");
+#endif
+				req->req.zero = 0;
+			}
+			do_setup_out_status_phase(pcd);
+		}
+	} else {
+		/* ep0-OUT */
+#ifdef DEBUG_EP0
+		deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz);
+		DWC_DEBUGPL(DBG_PCDV, "%s len=%d xsize=%d pktcnt=%d\n",
+				ep->ep.name, ep->dwc_ep.xfer_len,
+				deptsiz.b.xfersize,
+				deptsiz.b.pktcnt);
+#endif
+		req->req.actual = ep->dwc_ep.xfer_count;
+
+		/* Is a Zero Len Packet needed? */
+		if (req->req.zero) {
+#ifdef DEBUG_EP0
+			DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n");
+#endif
+			req->req.zero = 0;
+		}
+		if (core_if->dma_desc_enable == 0)
+			do_setup_in_status_phase(pcd);
+	}
+
+	/* Complete the request */
+	if (is_last) {
+		dwc_otg_request_done(ep, req, 0, NULL);
+		ep->dwc_ep.start_xfer_buff = NULL;
+		ep->dwc_ep.xfer_buff = NULL;
+		ep->dwc_ep.xfer_len = 0;
+		/* If there is a request in the queue start it. */
+		if (ep->pcd->ep0_request_pending)
+			start_next_request(ep);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * This function completes the request for the EP. If there are
+ * additional requests for the EP in the queue they will be started.
+ */
+static void complete_ep(struct dwc_otg_pcd_ep *ep)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(ep->pcd);
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	struct dwc_otg_dev_in_ep_regs __iomem *in_ep_regs =
+		dev_if->in_ep_regs[ep->dwc_ep.num];
+	union deptsiz_data deptsiz;
+	union dev_dma_desc_sts desc_sts;
+	struct dwc_otg_pcd_request *req = NULL;
+	struct dwc_otg_dev_dma_desc *dma_desc;
+	int is_last = 0;
+	u32 byte_count = 0;
+
+	DWC_DEBUGPL(DBG_PCDV, "%s() %s-%s\n", __func__, ep->ep.name,
+			(ep->dwc_ep.is_in ? "IN" : "OUT"));
+
+	/* Get any pending requests */
+	if (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct dwc_otg_pcd_request,
+				 queue);
+		if (!req) {
+			DWC_PRINT("complete_ep 0x%p, req = NULL!\n", ep);
+			return;
+		}
+	} else {
+		DWC_PRINT("complete_ep 0x%p, ep->queue empty!\n", ep);
+		return;
+	}
+
+	if (ep->dwc_ep.is_in) {
+		deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz);
+		if (core_if->dma_enable) {
+			if (core_if->dma_desc_enable == 0) {
+				if (deptsiz.b.xfersize == 0
+				    && deptsiz.b.pktcnt == 0) {
+					byte_count =
+					    ep->dwc_ep.xfer_len -
+					    ep->dwc_ep.xfer_count;
+
+					ep->dwc_ep.xfer_buff += byte_count;
+					ep->dwc_ep.dma_addr += byte_count;
+					ep->dwc_ep.xfer_count += byte_count;
+
+					DWC_DEBUGPL(DBG_PCDV,
+						    "%d-%s len=%d  xfersize=%d "
+						    "pktcnt=%d\n",
+						    ep->dwc_ep.num,
+						    (ep->dwc_ep.
+						     is_in ? "IN" : "OUT"),
+						    ep->dwc_ep.xfer_len,
+						    deptsiz.b.xfersize,
+						    deptsiz.b.pktcnt);
+
+					if (ep->dwc_ep.xfer_len <
+					    ep->dwc_ep.total_len) {
+						dwc_otg_ep_start_transfer
+						    (core_if, &ep->dwc_ep);
+					} else if (ep->dwc_ep.sent_zlp) {
+					/*
+					 * This fragment of code should initiate
+					 * 0 length transfer in case if it is
+					 * queued a transfer with size divisible
+					 * to EPs max packet size and with
+					 * usb_request zero field is set, which
+					 * means that after data is transfered,
+					 * it is also should be transfered a 0
+					 * length packet at the end. For Slave
+					 * and Buffer DMA modes in this case
+					 * SW has to initiate 2 transfers one
+					 * with transfer size, and the second
+					 * with 0 size. For Descriptor DMA mode
+					 * SW is able to initiate a transfer,
+					 * which will handle all the packets
+					 * including the last 0 length.
+					 */
+						ep->dwc_ep.sent_zlp = 0;
+						dwc_otg_ep_start_zl_transfer(core_if,
+								&ep->dwc_ep);
+					} else
+						is_last = 1;
+				} else {
+					DWC_WARN
+					    ("Incomplete transfer (%d - %s "
+					     "[siz=%d pkt=%d])\n",
+					     ep->dwc_ep.num,
+					     (ep->dwc_ep.is_in ? "IN" : "OUT"),
+					     deptsiz.b.xfersize,
+					     deptsiz.b.pktcnt);
+				}
+			} else { /*not buffer dma, desc dma*/
+				int i;
+				dma_desc = ep->dwc_ep.desc_addr;
+				byte_count = 0;
+				ep->dwc_ep.sent_zlp = 0;
+
+				for (i = 0; i < ep->dwc_ep.desc_cnt; ++i) {
+					desc_sts = dma_desc->status;
+					byte_count += desc_sts.b.bytes;
+					dma_desc++;
+				}
+
+				if (byte_count == 0) {
+					ep->dwc_ep.xfer_count =
+					    ep->dwc_ep.total_len;
+					is_last = 1;
+				} else
+					DWC_WARN("Incomplete transfer\n");
+			}
+		} else { /*no dma*/
+			if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) {
+				DWC_DEBUGPL(DBG_PCDV,
+					    "%d-%s len=%d  xfersize=%d "
+					    "pktcnt=%d\n",
+					    ep->dwc_ep.num,
+					    ep->dwc_ep.is_in ? "IN" : "OUT",
+					    ep->dwc_ep.xfer_len,
+					    deptsiz.b.xfersize,
+					    deptsiz.b.pktcnt);
+
+				/*
+				 * Check if the whole transfer was completed,
+				 * if no, setup transfer for next portion of
+				 * data
+				 */
+				if (ep->dwc_ep.xfer_len <
+						ep->dwc_ep.total_len) {
+					dwc_otg_ep_start_transfer(core_if,
+								  &ep->dwc_ep);
+				} else if (ep->dwc_ep.sent_zlp) {
+					/*
+					 * This fragment of code should initiate
+					 * 0 length transfer in case if it is
+					 * queued a transfer with size divisible
+					 * to EPs max packet size and with
+					 * usb_request zero field is set, which
+					 * means that after data is transfered,
+					 * it is also should be transfered a 0
+					 * length packet at the end. For Slave
+					 * and Buffer DMA modes in this case
+					 * SW has to initiate 2 transfers one
+					 * with transfer size, and the second
+					 * with 0 size. For Descriptor DMA mode
+					 * SW is able to initiate a transfer,
+					 * which will handle all the packets
+					 * including the last 0 length.
+					 */
+					ep->dwc_ep.sent_zlp = 0;
+					dwc_otg_ep_start_zl_transfer(core_if,
+								     &ep->
+								     dwc_ep);
+				} else
+					is_last = 1;
+			} else {
+				DWC_WARN
+				    ("Incomplete transfer (%d-%s "
+				     "[siz=%d pkt=%d])\n",
+				     ep->dwc_ep.num,
+				     (ep->dwc_ep.is_in ? "IN" : "OUT"),
+				     deptsiz.b.xfersize, deptsiz.b.pktcnt);
+			}
+		}
+	} else { /*Out Endpoint */
+		struct dwc_otg_dev_out_ep_regs __iomem *out_ep_regs =
+		    dev_if->out_ep_regs[ep->dwc_ep.num];
+		desc_sts.d32 = 0;
+		if (core_if->dma_enable) {
+			if (core_if->dma_desc_enable) {
+				int i;
+				dma_desc = ep->dwc_ep.desc_addr;
+				byte_count = 0;
+				ep->dwc_ep.sent_zlp = 0;
+
+				for (i = 0; i < ep->dwc_ep.desc_cnt; ++i) {
+					desc_sts = dma_desc->status;
+					byte_count += desc_sts.b.bytes;
+					dma_desc++;
+				}
+
+				ep->dwc_ep.xfer_count = ep->dwc_ep.total_len
+				    - byte_count +
+				    ((4 - (ep->dwc_ep.total_len & 0x3)) & 0x3);
+				is_last = 1;
+			} else {
+				deptsiz.d32 = 0;
+				deptsiz.d32 =
+				    dwc_read_reg32(&out_ep_regs->doeptsiz);
+
+				byte_count = (ep->dwc_ep.xfer_len -
+					      ep->dwc_ep.xfer_count -
+					      deptsiz.b.xfersize);
+				ep->dwc_ep.xfer_buff += byte_count;
+				ep->dwc_ep.dma_addr += byte_count;
+				ep->dwc_ep.xfer_count += byte_count;
+
+				/*
+				 * Check if the whole transfer was completed,
+				 * if no, setup transfer for next portion of
+				 * data
+				 */
+
+				if (ep->dwc_ep.xfer_len <
+						ep->dwc_ep.total_len) {
+					dwc_otg_ep_start_transfer(core_if,
+								  &ep->dwc_ep);
+				} else if (ep->dwc_ep.sent_zlp) {
+					/*
+					 * This fragment of code should initiate
+					 * 0 length transfer in case if it is
+					 * queued a transfer with size divisible
+					 * to EPs max packet size and with
+					 * usb_request zero field is set, which
+					 * means that after data is transfered,
+					 * it is also should be transfered a 0
+					 * length packet at the end. For Slave
+					 * and Buffer DMA modes in this case
+					 * SW has to initiate 2 transfers one
+					 * with transfer size, and the second
+					 * with 0 size. For Descriptor DMA mode
+					 * SW is able to initiate a transfer,
+					 * which will handle all the packets
+					 * including the last 0 length.
+					 */
+					ep->dwc_ep.sent_zlp = 0;
+					dwc_otg_ep_start_zl_transfer(core_if,
+								     &ep->
+								     dwc_ep);
+				} else
+					is_last = 1;
+			}
+		} else {
+			/*      Check if the whole transfer was completed,
+			 *      if no, setup transfer for next portion of data
+			 */
+			if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len)
+				dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
+			else if (ep->dwc_ep.sent_zlp) {
+				ep->dwc_ep.sent_zlp = 0;
+				dwc_otg_ep_start_zl_transfer(core_if,
+							     &ep->dwc_ep);
+			} else
+				is_last = 1;
+		}
+
+		DWC_DEBUGPL(DBG_PCDV,
+			    "addr %p, %d-%s len=%d cnt=%d xsize=%d pktcnt=%d\n",
+			    &out_ep_regs->doeptsiz, ep->dwc_ep.num,
+			    ep->dwc_ep.is_in ? "IN" : "OUT",
+			    ep->dwc_ep.xfer_len, ep->dwc_ep.xfer_count,
+			    deptsiz.b.xfersize, deptsiz.b.pktcnt);
+	}
+
+	/* Complete the request */
+	if (is_last) {
+		req->req.actual = ep->dwc_ep.xfer_count;
+		dwc_otg_request_done(ep, req, 0, NULL);
+		ep->dwc_ep.start_xfer_buff = NULL;
+		ep->dwc_ep.xfer_buff = NULL;
+		ep->dwc_ep.xfer_len = 0;
+
+		/* If there is a request in the queue start it. */
+		if (ep->request_pending)
+			start_next_request(ep);
+
+	}
+}
+
+
+/**
+ * This function handles EP0 Control transfers.
+ *
+ * The state of the control tranfers are tracked in ep0state.
+ */
+static void handle_ep0(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	struct dwc_otg_pcd_ep *ep0 = &pcd->ep0;
+	union dev_dma_desc_sts desc_sts;
+	union deptsiz0_data deptsiz;
+	u32 byte_count;
+
+#ifdef DEBUG_EP0
+	DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
+	print_ep0_state(pcd);
+
+#endif	/*  */
+	switch (pcd->ep0state) {
+	case EP0_DISCONNECT:
+		break;
+	case EP0_IDLE:
+		pcd->request_config = 0;
+		pcd_setup(pcd);
+		break;
+	case EP0_IN_DATA_PHASE:
+
+#ifdef DEBUG_EP0
+		DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n",
+				ep0->dwc_ep.num,
+				(ep0->dwc_ep.is_in ? "IN" : "OUT"),
+				ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
+
+#endif	/*  */
+		if (core_if->dma_enable != 0) {
+			/*
+			 * For EP0 we can only program 1 packet at a time so we
+			 * need to do the make calculations after each complete.
+			 * Call write_packet to make the calculations, as in
+			 * slave mode, and use those values to determine if we
+			 * can complete.
+			 */
+			if (core_if->dma_desc_enable == 0) {
+				deptsiz.d32 =
+				    dwc_read_reg32(&core_if->dev_if->
+						   in_ep_regs[0]->dieptsiz);
+				byte_count =
+				    ep0->dwc_ep.xfer_len - deptsiz.b.xfersize;
+			} else {
+				desc_sts =
+				    core_if->dev_if->in_desc_addr->status;
+				byte_count =
+				    ep0->dwc_ep.xfer_len - desc_sts.b.bytes;
+			}
+			ep0->dwc_ep.xfer_count += byte_count;
+			ep0->dwc_ep.xfer_buff += byte_count;
+			ep0->dwc_ep.dma_addr += byte_count;
+		}
+		if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
+			dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
+						      &ep0->dwc_ep);
+			DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
+		} else if (ep0->dwc_ep.sent_zlp) {
+			dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
+						      &ep0->dwc_ep);
+			ep0->dwc_ep.sent_zlp = 0;
+			DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
+		} else {
+			ep0_complete_request(ep0);
+			DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
+		}
+		break;
+	case EP0_OUT_DATA_PHASE:
+#ifdef DEBUG_EP0
+		DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n",
+			    ep0->dwc_ep.num, (ep0->dwc_ep.is_in ? "IN" : "OUT"),
+			    ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
+#endif
+		if (core_if->dma_enable != 0) {
+			if (core_if->dma_desc_enable == 0) {
+				deptsiz.d32 =
+				    dwc_read_reg32(&core_if->dev_if->
+						   out_ep_regs[0]->doeptsiz);
+				byte_count =
+				    ep0->dwc_ep.maxpacket - deptsiz.b.xfersize;
+			} else {
+				desc_sts =
+				    core_if->dev_if->out_desc_addr->status;
+				byte_count =
+				    ep0->dwc_ep.maxpacket - desc_sts.b.bytes;
+			}
+			ep0->dwc_ep.xfer_count += byte_count;
+			ep0->dwc_ep.xfer_buff += byte_count;
+			ep0->dwc_ep.dma_addr += byte_count;
+		}
+		if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
+			dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
+						      &ep0->dwc_ep);
+			DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
+		} else if (ep0->dwc_ep.sent_zlp) {
+			dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
+						      &ep0->dwc_ep);
+			ep0->dwc_ep.sent_zlp = 0;
+			DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
+		} else {
+			ep0_complete_request(ep0);
+			DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
+		}
+		break;
+
+	case EP0_IN_STATUS_PHASE:
+	case EP0_OUT_STATUS_PHASE:
+		DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n");
+		ep0_complete_request(ep0);
+		pcd->ep0state = EP0_IDLE;
+		ep0->stopped = 1;
+		ep0->dwc_ep.is_in = 0;	/* OUT for next SETUP */
+
+		/* Prepare for more SETUP Packets */
+		if (core_if->dma_enable)
+			ep0_out_start(core_if, pcd);
+
+		break;
+	case EP0_STALL:
+		DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n");
+		break;
+	}
+
+#ifdef DEBUG_EP0
+	    print_ep0_state(pcd);
+#endif	/*  */
+}
+
+/**
+ * Restart transfer
+ */
+static void restart_transfer(struct dwc_otg_pcd *pcd, const u32 _epnum)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	union deptsiz_data dieptsiz = {.d32 = 0};
+
+	struct dwc_otg_pcd_ep *ep;
+	dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[_epnum]->dieptsiz);
+	ep = get_in_ep(pcd, _epnum);
+
+	DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x xfer_len=%0x"
+			" stopped=%d\n", ep->dwc_ep.xfer_buff,
+			ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len,
+			ep->stopped);
+
+	/*
+	 * If xfersize is 0 and pktcnt in not 0, resend the last packet.
+	 */
+	if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0
+		&& ep->dwc_ep.start_xfer_buff) {
+		if (ep->dwc_ep.xfer_len <= ep->dwc_ep.maxpacket) {
+			ep->dwc_ep.xfer_count = 0;
+			ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff;
+		} else {
+			ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket;
+
+			/* convert packet size to dwords. */
+			ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket;
+		}
+		ep->stopped = 0;
+		DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x "
+			     "xfer_len=%0x stopped=%d\n", ep->dwc_ep.xfer_buff,
+			     ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len,
+			     ep->stopped);
+		if (_epnum == 0)
+			dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep);
+		else
+			dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
+	}
+}
+
+/**
+ * handle the IN EP disable interrupt.
+ */
+static void handle_in_ep_disable_intr(struct dwc_otg_pcd *pcd,
+					     const u32 _epnum)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	union deptsiz_data dieptsiz = {.d32 = 0};
+	union dctl_data dctl = {.d32 = 0};
+	struct dwc_otg_pcd_ep *ep;
+	struct dwc_ep *dwc_ep;
+	ep = get_in_ep(pcd, _epnum);
+	dwc_ep = &ep->dwc_ep;
+
+	if (dwc_ep->type == USB_ENDPOINT_XFER_ISOC) {
+		dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num);
+		return;
+	}
+
+	DWC_DEBUGPL(DBG_PCD, "diepctl%d=%0x\n", _epnum,
+			dwc_read_reg32(&dev_if->in_ep_regs[_epnum]->diepctl));
+	dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[_epnum]->dieptsiz);
+	DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", dieptsiz.b.pktcnt,
+		      dieptsiz.b.xfersize);
+	if (ep->stopped) {
+
+		/* Flush the Tx FIFO */
+		/** @todo NGS: This is not the correct FIFO */
+		dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num);
+
+		/* Clear the Global IN NP NAK */
+		dctl.d32 = 0;
+		dctl.b.cgnpinnak = 1;
+		dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, 0);
+
+		/* Restart the transaction */
+		if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0)
+			restart_transfer(pcd, _epnum);
+	} else {
+
+		/* Restart the transaction */
+		if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0)
+			restart_transfer(pcd, _epnum);
+		DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n");
+	}
+}
+
+/**
+ * Handler for the IN EP timeout handshake interrupt.
+ */
+static void handle_in_ep_timeout_intr(struct dwc_otg_pcd *pcd,
+					     const u32 _epnum)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+
+#ifdef DEBUG
+	union deptsiz_data dieptsiz = {.d32 = 0};
+	u32 epnum = 0;
+
+#endif	/*  */
+	union dctl_data dctl = {.d32 = 0};
+	struct dwc_otg_pcd_ep *ep;
+	union gintmsk_data intr_mask = {.d32 = 0};
+	ep = get_in_ep(pcd, _epnum);
+
+	/* Disable the NP Tx Fifo Empty Interrrupt */
+	if (!core_if->dma_enable) {
+		intr_mask.b.nptxfempty = 1;
+		dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
+				  intr_mask.d32, 0);
+	}
+
+	/** @todo NGS Check EP type.
+	 * Implement for Periodic EPs */
+	    /*
+	     * Non-periodic EP
+	     */
+	/* Enable the Global IN NAK Effective Interrupt */
+	intr_mask.b.ginnakeff = 1;
+	dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0,
+			  intr_mask.d32);
+
+	/* Set Global IN NAK */
+	dctl.b.sgnpinnak = 1;
+	dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
+	ep->stopped = 1;
+
+#ifdef DEBUG
+	dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz);
+	DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", dieptsiz.b.pktcnt,
+		     dieptsiz.b.xfersize);
+
+#endif	/*  */
+
+#ifdef DISABLE_PERIODIC_EP
+	/*
+	 * Set the NAK bit for this EP to
+	 * start the disable process.
+	 */
+	diepctl.d32 = 0;
+	diepctl.b.snak = 1;
+	dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, diepctl.d32,
+			  diepctl.d32);
+	ep->disabling = 1;
+	ep->stopped = 1;
+
+#endif	/*  */
+}
+
+/**
+ * Handler for the IN EP NAK interrupt.
+ */
+static int handle_in_ep_nak_intr(struct dwc_otg_pcd *pcd,
+					    const u32 epnum)
+{
+	/** @todo implement ISR */
+	struct dwc_otg_core_if *core_if;
+	union diepint_data intr_mask = {.d32 = 0 };
+
+	DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "IN EP NAK");
+	core_if = GET_CORE_IF(pcd);
+	intr_mask.b.nak = 1;
+
+	if (core_if->multiproc_int_enable) {
+		dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
+				 diepeachintmsk[epnum], intr_mask.d32, 0);
+	} else {
+		dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepmsk,
+				 intr_mask.d32, 0);
+	}
+
+	return 1;
+}
+
+/**
+ * Handler for the OUT EP Babble interrupt.
+ */
+static int handle_out_ep_babble_intr(struct dwc_otg_pcd *pcd,
+						const u32 epnum)
+{
+	/** @todo implement ISR */
+	struct dwc_otg_core_if *core_if;
+	union doepint_data intr_mask = {.d32 = 0 };
+
+	DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
+		   "OUT EP Babble");
+	core_if = GET_CORE_IF(pcd);
+	intr_mask.b.babble = 1;
+
+	if (core_if->multiproc_int_enable) {
+		dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
+				 doepeachintmsk[epnum], intr_mask.d32, 0);
+	} else {
+		dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk,
+				 intr_mask.d32, 0);
+	}
+
+	return 1;
+}
+
+/**
+ * Handler for the OUT EP NAK interrupt.
+ */
+static int handle_out_ep_nak_intr(struct dwc_otg_pcd *pcd,
+					     const u32 epnum)
+{
+	/** @todo implement ISR */
+	struct dwc_otg_core_if *core_if;
+	union doepint_data intr_mask = {.d32 = 0 };
+
+	DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NAK");
+	core_if = GET_CORE_IF(pcd);
+	intr_mask.b.nak = 1;
+
+	if (core_if->multiproc_int_enable) {
+		dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
+				 doepeachintmsk[epnum], intr_mask.d32, 0);
+	} else {
+		dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk,
+				 intr_mask.d32, 0);
+	}
+
+	return 1;
+}
+
+/**
+ * Handler for the OUT EP NYET interrupt.
+ */
+static int handle_out_ep_nyet_intr(struct dwc_otg_pcd *pcd,
+					      const u32 epnum)
+{
+	/** @todo implement ISR */
+	struct dwc_otg_core_if *core_if;
+	union doepint_data intr_mask = {.d32 = 0 };
+
+	DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NYET");
+	core_if = GET_CORE_IF(pcd);
+	intr_mask.b.nyet = 1;
+
+	if (core_if->multiproc_int_enable) {
+		dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
+				 doepeachintmsk[epnum], intr_mask.d32, 0);
+	} else {
+		dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk,
+				 intr_mask.d32, 0);
+	}
+
+	return 1;
+}
+
+/**
+ * This interrupt indicates that an IN EP has a pending Interrupt.
+ * The sequence for handling the IN EP interrupt is shown below:
+ * -#	Read the Device All Endpoint Interrupt register
+ * -#	Repeat the following for each IN EP interrupt bit set (from
+ *		LSB to MSB).
+ * -#	Read the Device Endpoint Interrupt (DIEPINTn) register
+ * -#	If "Transfer Complete" call the request complete function
+ * -#	If "Endpoint Disabled" complete the EP disable procedure.
+ * -#	If "AHB Error Interrupt" log error
+ * -#	If "Time-out Handshake" log error
+ * -#	If "IN Token Received when TxFIFO Empty" write packet to Tx
+ *		FIFO.
+ * -#	If "IN Token EP Mismatch" (disable, this is handled by EP
+ *		Mismatch Interrupt)
+ */
+static int dwc_otg_pcd_handle_in_ep_intr(struct dwc_otg_pcd *pcd)
+{
+
+#define CLEAR_IN_EP_INTR(__core_if, __epnum, __intr) \
+    do { \
+	union diepint_data diepint = {.d32 = 0}; \
+	diepint.b.__intr = 1; \
+	dwc_write_reg32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \
+	diepint.d32); \
+} while (0)
+
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	union diepint_data diepint = {.d32 = 0};
+	union dctl_data dctl = {.d32 = 0 };
+	union depctl_data diepctl = {.d32 = 0};
+	u32 ep_intr;
+	u32 epnum = 0;
+	struct dwc_otg_pcd_ep *ep;
+	struct dwc_ep *dwc_ep;
+	u32 _empty_msk, _diepctl;
+	union gintmsk_data intr_mask = {.d32 = 0};
+	DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd);
+
+	/* Read in the device interrupt bits */
+	ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if);
+
+	/* Service the Device IN interrupts for each endpoint */
+	while (ep_intr) {
+		if (ep_intr & 0x1) {
+			/* Get EP pointer */
+			ep = get_in_ep(pcd, epnum);
+			dwc_ep = &ep->dwc_ep;
+			_diepctl =
+				dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl);
+			_empty_msk =
+				dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk);
+			DWC_DEBUGPL(DBG_PCDV,
+				      "IN EP INTERRUPT - %d\nepmty_msk "
+				      "- %8x  diepctl - %8x\n",
+				      epnum, _empty_msk, _diepctl);
+			DWC_DEBUGPL(DBG_PCD, "EP%d-%s: type=%d, mps=%d\n",
+				      dwc_ep->num,
+				      (dwc_ep->is_in ? "IN" : "OUT"),
+				      dwc_ep->type,
+				      dwc_ep->maxpacket);
+			diepint.d32 =
+				dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep);
+			DWC_DEBUGPL(DBG_PCDV, "EP %d Interrupt "
+					"Register - 0x%x\n",
+				      epnum, diepint.d32);
+
+		    /* Transfer complete */
+		    if (diepint.b.xfercompl) {
+				DWC_DEBUGPL(DBG_PCD, "EP%d IN Xfer Complete\n",
+						epnum);
+
+				/* Disable the NP Tx FIFO Empty
+				 * Interrrupt */
+				if (core_if->en_multiple_tx_fifo == 0) {
+					intr_mask.b.nptxfempty = 1;
+					dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
+							  intr_mask.d32, 0);
+				} else {
+					/*
+					 * Disable the Tx FIFO Empty
+					 * Interrupt for this EP
+					 */
+					u32 fifoemptymsk = 0x1 << dwc_ep->num;
+					dwc_modify_reg32(&core_if->dev_if->
+							dev_global_regs->
+							dtknqr4_fifoemptymsk,
+							fifoemptymsk,
+							0);
+				}
+
+				/*
+				 * Clear the bit in DIEPINTn
+				 * for this interrupt *
+				 */
+				CLEAR_IN_EP_INTR(core_if, epnum, xfercompl);
+
+				/* Complete the transfer */
+				if (epnum == 0)
+					handle_ep0(pcd);
+				else
+					complete_ep(ep);
+			}
+
+			/* Endpoint disable      */
+			if (diepint.b.epdisabled) {
+				DWC_DEBUGPL(DBG_ANY, "EP%d IN disabled\n",
+						epnum);
+				handle_in_ep_disable_intr(pcd, epnum);
+
+				/*
+				 * Clear the bit in DIEPINTn
+				 * for this interrupt
+				 */
+				CLEAR_IN_EP_INTR(core_if, epnum, epdisabled);
+			}
+
+			/* AHB Error */
+			if (diepint.b.ahberr) {
+				DWC_DEBUGPL(DBG_ANY, "EP%d IN AHB Error\n",
+						epnum);
+
+				/*
+				 * Clear the bit in DIEPINTn
+				 * for this interrupt
+				 */
+				CLEAR_IN_EP_INTR(core_if, epnum, ahberr);
+			}
+
+			/* TimeOUT Handshake (non-ISOC IN EPs) */
+			if (diepint.b.timeout) {
+				DWC_DEBUGPL(DBG_ANY, "EP%d IN Time-out\n",
+						epnum);
+				handle_in_ep_timeout_intr(pcd, epnum);
+				CLEAR_IN_EP_INTR(core_if, epnum, timeout);
+			}
+
+			/** IN Token received with TxF Empty */
+			if (diepint.b.intktxfemp) {
+				DWC_DEBUGPL(DBG_ANY, "EP%d IN TKN "
+						"TxFifo Empty\n", epnum);
+				if (!ep->stopped && epnum != 0) {
+					union diepint_data diepmsk = {.d32 = 0};
+					diepmsk.b.intktxfemp = 1;
+					if (core_if->multiproc_int_enable) {
+						dwc_modify_reg32(&dev_if->
+							dev_global_regs->
+								 diepeachintmsk
+								 [epnum],
+								 diepmsk.d32,
+								 0);
+					} else {
+						dwc_modify_reg32(&dev_if->
+								 dev_global_regs->
+								 diepmsk,
+								 diepmsk.d32,
+								 0);
+#ifdef CONFIG_405EZ
+						/*
+						 * Added-sr: 2007-07-26
+						 *
+						 * Only start the next transfer,
+						 * when currently no other
+						 * transfer is
+						 * active on this endpoint.
+						 */
+						if (dwc_ep->active == 0)
+							start_next_request(ep);
+					}
+#else
+					}
+				} else if (core_if->dma_desc_enable
+					   && epnum == 0
+					   && pcd->ep0state ==
+					   EP0_OUT_STATUS_PHASE) {
+
+					diepctl.d32 =
+					    dwc_read_reg32(&dev_if->
+							   in_ep_regs[epnum]->
+							   diepctl);
+
+					/* set the disable and stall bits */
+					if (diepctl.b.epena) {
+						if (dwc_otg_can_disable_channel(
+						    core_if, dwc_ep))
+							diepctl.b.epdis = 1;
+					}
+
+					diepctl.b.stall = 1;
+					dwc_write_reg32(&dev_if->
+							in_ep_regs[epnum]->
+							diepctl, diepctl.d32);
+#endif
+				}
+				CLEAR_IN_EP_INTR(core_if, epnum, intktxfemp);
+			}
+
+			/** IN Token Received with EP mismatch */
+			if (diepint.b.intknepmis) {
+				DWC_DEBUGPL(DBG_ANY,
+					"EP%d IN TKN EP Mismatch\n",
+					epnum);
+				CLEAR_IN_EP_INTR(core_if, epnum, intknepmis);
+			}
+
+			/** IN Endpoint NAK Effective */
+			if (diepint.b.inepnakeff) {
+				DWC_DEBUGPL(DBG_ANY, "EP%d IN EP NAK "
+						"Effective\n", epnum);
+
+				/* Periodic EP */
+				if (ep->disabling) {
+					diepctl.d32 = 0;
+					diepctl.b.snak = 1;
+					if (dwc_otg_can_disable_channel(core_if,
+									dwc_ep))
+						diepctl.b.epdis = 1;
+					dwc_modify_reg32(&dev_if->
+							in_ep_regs[epnum]->
+							diepctl,
+							diepctl.d32,
+							diepctl.d32);
+				}
+				CLEAR_IN_EP_INTR(core_if, epnum, inepnakeff);
+			}
+
+			/** IN EP Tx FIFO Empty Intr */
+			if (diepint.b.emptyintr) {
+				DWC_DEBUGPL(DBG_ANY,
+						"EP%d Tx FIFO Empty Intr \n",
+						epnum);
+
+				write_empty_tx_fifo(pcd, epnum);
+				CLEAR_IN_EP_INTR(core_if, epnum, emptyintr);
+			}
+			/** IN EP BNA Intr */
+			if (diepint.b.bna) {
+				CLEAR_IN_EP_INTR(core_if, epnum, bna);
+				if (core_if->dma_desc_enable) {
+					dctl.d32 =
+					    dwc_read_reg32(&dev_if->
+							   dev_global_regs->
+							   dctl);
+
+					/*
+					 * If Global Continue on BNA is
+					 * disabled - disable EP
+					 */
+					if (!dctl.b.gcontbna) {
+						diepctl.d32 = 0;
+						diepctl.b.snak = 1;
+						if (dwc_otg_can_disable_channel(
+								core_if, dwc_ep))
+							diepctl.b.epdis = 1;
+						dwc_modify_reg32(&dev_if->
+								 in_ep_regs[epnum]->
+								 diepctl,
+								 diepctl.d32,
+								 diepctl.d32);
+					} else
+						start_next_request(ep);
+				}
+			}
+			/* NAK Interrutp */
+			if (diepint.b.nak) {
+				DWC_DEBUGPL(DBG_ANY, "EP%d IN NAK Interrupt\n",
+					    epnum);
+				handle_in_ep_nak_intr(pcd, epnum);
+
+				CLEAR_IN_EP_INTR(core_if, epnum, nak);
+			}
+		}
+		epnum++;
+		ep_intr >>= 1;
+	}
+	return 1;
+#undef CLEAR_IN_EP_INTR
+}
+
+/**
+ * This interrupt indicates that an OUT EP has a pending Interrupt.
+ * The sequence for handling the OUT EP interrupt is shown below:
+ * -#	Read the Device All Endpoint Interrupt register
+ * -#	Repeat the following for each OUT EP interrupt bit set (from
+ *		LSB to MSB).
+ * -#	Read the Device Endpoint Interrupt (DOEPINTn) register
+ * -#	If "Transfer Complete" call the request complete function
+ * -#	If "Endpoint Disabled" complete the EP disable procedure.
+ * -#	If "AHB Error Interrupt" log error
+ * -#	If "Setup Phase Done" process Setup Packet (See Standard USB
+ *		Command Processing)
+ */
+static int dwc_otg_pcd_handle_out_ep_intr(struct dwc_otg_pcd *pcd)
+{
+
+#define CLEAR_OUT_EP_INTR(__core_if, __epnum, __intr) \
+do { \
+	union doepint_data doepint = { .d32 = 0}; \
+	doepint.b.__intr = 1; \
+	dwc_write_reg32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \
+	doepint.d32); \
+} while (0)
+
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+	struct dwc_otg_dev_if *dev_if = core_if->dev_if;
+	u32 ep_intr;
+	union doepint_data doepint = {.d32 = 0};
+	union dctl_data dctl = {.d32 = 0 };
+	union depctl_data doepctl = {.d32 = 0 };
+	u32 epnum = 0;
+	struct dwc_otg_pcd_ep *ep;
+	struct dwc_ep *dwc_ep;
+	DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
+
+	/* Read in the device interrupt bits */
+	ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if);
+	while (ep_intr) {
+		if (ep_intr & 0x1) {
+			/* Get EP pointer */
+			ep = get_out_ep(pcd, epnum);
+			dwc_ep = &ep->dwc_ep;
+#ifdef VERBOSE
+			DWC_DEBUGPL(DBG_PCDV, "EP%d-%s: type=%d, mps=%d\n",
+					dwc_ep->num,
+					(dwc_ep->is_in ? "IN" : "OUT"),
+					dwc_ep->type, dwc_ep->maxpacket);
+
+#endif	/*  */
+			doepint.d32 =
+				dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep);
+
+			/* Transfer complete */
+			if (doepint.b.xfercompl) {
+				DWC_DEBUGPL(DBG_PCD, "EP%d OUT Xfer Complete\n",
+						epnum);
+
+				if (epnum == 0) {
+					/*
+					 * Clear the bit in DOEPINTn
+					 * for this interrupt
+					 */
+					CLEAR_OUT_EP_INTR(core_if, epnum,
+							  xfercompl);
+					if (core_if->dma_desc_enable == 0
+					    || pcd->ep0state != EP0_IDLE)
+						handle_ep0(pcd);
+				} else {
+					/*
+					 * Clear the bit in DOEPINTn
+					 * for this interrupt
+					 */
+					CLEAR_OUT_EP_INTR(core_if, epnum,
+							  xfercompl);
+					complete_ep(ep);
+				}
+			}
+
+			/* Endpoint disable      */
+			if (doepint.b.epdisabled) {
+				DWC_DEBUGPL(DBG_PCD, "EP%d OUT disabled\n",
+						epnum);
+
+				/*
+				 * Clear the bit in DOEPINTn
+				 * for this interrupt
+				 * */
+				CLEAR_OUT_EP_INTR(core_if, epnum, epdisabled);
+			}
+
+			/* AHB Error */
+			if (doepint.b.ahberr) {
+				DWC_DEBUGPL(DBG_PCD, "EP%d OUT AHB Error\n",
+					    epnum);
+				DWC_DEBUGPL(DBG_PCD, "EP DMA REG\t %d\n",
+					    dwc_read_reg32(&core_if->dev_if->
+						out_ep_regs[epnum]->doepdma));
+				CLEAR_OUT_EP_INTR(core_if, epnum, ahberr);
+			}
+
+			/* Setup Phase Done (control EPs) */
+			if (doepint.b.setup) {
+#ifdef DEBUG_EP0
+				DWC_DEBUGPL(DBG_PCD, "EP%d SETUP Done\n",
+						epnum);
+
+#endif	/*  */
+				CLEAR_OUT_EP_INTR(core_if, epnum, setup);
+				handle_ep0(pcd);
+			}
+			/** OUT EP BNA Intr */
+			if (doepint.b.bna) {
+				CLEAR_OUT_EP_INTR(core_if, epnum, bna);
+				if (core_if->dma_desc_enable) {
+					dctl.d32 = dwc_read_reg32(&dev_if->
+								  dev_global_regs->
+								  dctl);
+
+					/*
+					 * If Global Continue on BNA is disabled
+					 * - disable EP
+					 */
+					if (!dctl.b.gcontbna) {
+						doepctl.d32 = 0;
+						doepctl.b.snak = 1;
+						doepctl.b.epdis = 1;
+						dwc_modify_reg32(&dev_if->
+								 out_ep_regs[epnum]->
+								 doepctl,
+								 doepctl.d32,
+								 doepctl.d32);
+					} else
+						start_next_request(ep);
+
+				}
+			}
+			if (doepint.b.stsphsercvd) {
+				CLEAR_OUT_EP_INTR(core_if, epnum, stsphsercvd);
+				if (core_if->dma_desc_enable)
+					do_setup_in_status_phase(pcd);
+			}
+			/* Babble Interrutp */
+			if (doepint.b.babble) {
+				DWC_DEBUGPL(DBG_ANY, "EP%d OUT Babble\n",
+					    epnum);
+				handle_out_ep_babble_intr(pcd, epnum);
+
+				CLEAR_OUT_EP_INTR(core_if, epnum, babble);
+			}
+			/* NAK Interrutp */
+			if (doepint.b.nak) {
+				DWC_DEBUGPL(DBG_ANY, "EP%d OUT NAK\n", epnum);
+				handle_out_ep_nak_intr(pcd, epnum);
+
+				CLEAR_OUT_EP_INTR(core_if, epnum, nak);
+			}
+			/* NYET Interrutp */
+			if (doepint.b.nyet) {
+				DWC_DEBUGPL(DBG_ANY, "EP%d OUT NYET\n", epnum);
+				handle_out_ep_nyet_intr(pcd, epnum);
+				CLEAR_OUT_EP_INTR(core_if, epnum, nyet);
+			}
+		}
+		epnum++;
+		ep_intr >>= 1;
+	}
+
+	return 1;
+
+#undef CLEAR_OUT_EP_INTR
+}
+
+/**
+ * Incomplete ISO IN Transfer Interrupt.
+ */
+static int dwc_otg_pcd_handle_incomplete_isoc_in_intr(struct dwc_otg_pcd *pcd)
+{
+	union gintsts_data gintsts;
+	union gintmsk_data intr_mask = {.d32 = 0};
+	DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
+		   "IN ISOC Incomplete");
+
+	intr_mask.b.incomplisoin = 1;
+	dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+			 intr_mask.d32, 0);
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.incomplisoin = 1;
+	dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+			 gintsts.d32);
+	return 1;
+}
+
+/**
+ * Incomplete ISO OUT Transfer Interrupt.
+ */
+static int dwc_otg_pcd_handle_incomplete_isoc_out_intr(struct dwc_otg_pcd *pcd)
+{
+
+	union gintsts_data gintsts;
+	union gintmsk_data intr_mask = {.d32 = 0 };
+
+	DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
+		   "OUT ISOC Incomplete");
+
+	intr_mask.b.incomplisoout = 1;
+	dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+			 intr_mask.d32, 0);
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.incomplisoout = 1;
+	dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+			 gintsts.d32);
+	return 1;
+}
+
+/**
+ * This function handles the Global IN NAK Effective interrupt.
+ *
+ */
+static int dwc_otg_pcd_handle_in_nak_effective(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_dev_if *dev_if = GET_CORE_IF(pcd)->dev_if;
+	struct dwc_otg_pcd_ep *ep;
+	union depctl_data diepctl = {.d32 = 0};
+	union depctl_data diepctl_rd = {.d32 = 0};
+	union gintmsk_data intr_mask = {.d32 = 0};
+	union gintsts_data gintsts;
+	int i;
+	DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n");
+
+	/* Disable all active IN EPs */
+	diepctl.b.snak = 1;
+	for (i = 0; i <= dev_if->num_in_eps; i++) {
+		ep = get_in_ep(pcd, i);
+		if (dwc_otg_can_disable_channel(GET_CORE_IF(pcd), &ep->dwc_ep))
+			diepctl.b.epdis = 1;
+		else
+			diepctl.b.epdis = 0;
+
+		diepctl_rd.d32 =
+			dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
+		if (diepctl_rd.b.epena)
+			dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl,
+					diepctl.d32);
+	}
+
+	/* Disable the Global IN NAK Effective Interrupt */
+	intr_mask.b.ginnakeff = 1;
+	dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+			  intr_mask.d32, 0);
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.ginnakeff = 1;
+	dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+			 gintsts.d32);
+	return 1;
+}
+
+/**
+ * OUT NAK Effective.
+ *
+ */
+static int dwc_otg_pcd_handle_out_nak_effective(struct dwc_otg_pcd *pcd)
+{
+	union gintmsk_data intr_mask = {.d32 = 0};
+	union gintsts_data gintsts;
+	DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
+		    "Global IN NAK Effective\n");
+
+	/* Disable the Global IN NAK Effective Interrupt */
+	intr_mask.b.goutnakeff = 1;
+	dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+			  intr_mask.d32, 0);
+
+	/* Clear interrupt */
+	gintsts.d32 = 0;
+	gintsts.b.goutnakeff = 1;
+	dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+			 gintsts.d32);
+	return 1;
+}
+
+/**
+ * PCD interrupt handler.
+ *
+ * The PCD handles the device interrupts.  Many conditions can cause a
+ * device interrupt. When an interrupt occurs, the device interrupt
+ * service routine determines the cause of the interrupt and
+ * dispatches handling to the appropriate function. These interrupt
+ * handling functions are described below.
+ *
+ * All interrupt registers are processed from LSB to MSB.
+ *
+ */
+int dwc_otg_pcd_handle_intr(struct dwc_otg_pcd *pcd)
+{
+	struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd);
+
+#ifdef VERBOSE
+	struct dwc_otg_core_global_regs __iomem *global_regs =
+		core_if->core_global_regs;
+#endif	/*  */
+	union gintsts_data gintr_status;
+	int retval = 0;
+
+#ifdef VERBOSE
+	DWC_DEBUGPL(DBG_ANY, "%s() gintsts=%08x	 gintmsk=%08x\n",
+		    __func__,
+		    dwc_read_reg32(&global_regs->gintsts),
+		    dwc_read_reg32(&global_regs->gintmsk));
+#endif
+	if (dwc_otg_is_device_mode(core_if)) {
+		WARN_ON(!in_interrupt());
+		spin_lock(&pcd->lock);
+
+#ifdef VERBOSE
+		DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x  gintmsk=%08x\n",
+			    __func__,
+			    dwc_read_reg32(&global_regs->gintsts),
+			    dwc_read_reg32(&global_regs->gintmsk));
+
+#endif	/*  */
+		gintr_status.d32 = dwc_otg_read_core_intr(core_if);
+		if (!gintr_status.d32) {
+			spin_unlock(&pcd->lock);
+			return 0;
+		}
+
+		DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n", __func__,
+			    gintr_status.d32);
+		if (gintr_status.b.sofintr) {
+			retval |=
+				dwc_otg_pcd_handle_sof_intr(pcd);
+		}
+#ifndef OTG_PLB_DMA_TASKLET
+		if (gintr_status.b.rxstsqlvl) {
+			retval |=
+				dwc_otg_pcd_handle_rx_status_q_level_intr(pcd);
+		}
+		if (gintr_status.b.nptxfempty) {
+			retval |=
+				dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd);
+		}
+#endif
+		if (gintr_status.b.ginnakeff) {
+			retval |=
+				dwc_otg_pcd_handle_in_nak_effective(pcd);
+		}
+		if (gintr_status.b.goutnakeff) {
+			retval |=
+				dwc_otg_pcd_handle_out_nak_effective(pcd);
+		}
+		if (gintr_status.b.i2cintr) {
+			retval |=
+				dwc_otg_pcd_handle_i2c_intr(pcd);
+		}
+		if (gintr_status.b.erlysuspend) {
+			retval |=
+				dwc_otg_pcd_handle_early_suspend_intr(pcd);
+		}
+		if (gintr_status.b.usbreset) {
+			retval |=
+				dwc_otg_pcd_handle_usb_reset_intr(pcd);
+		}
+		if (gintr_status.b.enumdone) {
+			retval |=
+				dwc_otg_pcd_handle_enum_done_intr(pcd);
+		}
+		if (gintr_status.b.isooutdrop) {
+			retval |=
+				dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(pcd);
+		}
+		if (gintr_status.b.eopframe) {
+			retval |=
+				dwc_otg_pcd_handle_end_periodic_frame_intr(pcd);
+		}
+		if (gintr_status.b.epmismatch) {
+			retval |=
+				dwc_otg_pcd_handle_ep_mismatch_intr(core_if);
+		}
+		if (gintr_status.b.inepint) {
+			if (!core_if->multiproc_int_enable)
+				retval |= dwc_otg_pcd_handle_in_ep_intr(pcd);
+		}
+		if (gintr_status.b.outepintr) {
+			if (!core_if->multiproc_int_enable)
+				retval |= dwc_otg_pcd_handle_out_ep_intr(pcd);
+		}
+		if (gintr_status.b.incomplisoin) {
+			retval |=
+				dwc_otg_pcd_handle_incomplete_isoc_in_intr(pcd);
+		}
+		if (gintr_status.b.incomplisoout) {
+			retval |=
+				dwc_otg_pcd_handle_incomplete_isoc_out_intr(pcd);
+		}
+		/* In MPI mode Device Endpoints intterrupts are asserted
+		 * without setting outepintr and inepint bits set, so these
+		 * Interrupt handlers are called without checking these
+		 * bit-fields
+		 */
+		if (core_if->multiproc_int_enable) {
+			retval |= dwc_otg_pcd_handle_in_ep_intr(pcd);
+			retval |= dwc_otg_pcd_handle_out_ep_intr(pcd);
+		}
+#ifdef VERBOSE
+		DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__,
+				dwc_read_reg32(&global_regs->gintsts));
+
+#endif	/*  */
+#ifdef OTG_PLB_DMA_TASKLET
+		if (gintr_status.b.rxstsqlvl) {
+			retval |=
+				dwc_otg_pcd_handle_rx_status_q_level_intr(pcd);
+		}
+		if (!atomic_read(&release_later) &&
+				gintr_status.b.nptxfempty) {
+			retval |=
+				dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd);
+		}
+#endif
+		spin_unlock(&pcd->lock);
+	}
+	return retval;
+}
+
+
+#endif	/* DWC_HOST_ONLY */
diff --git a/drivers/usb/dwc_otg/dwc_otg_regs.h b/drivers/usb/dwc_otg/dwc_otg_regs.h
new file mode 100644
index 0000000..9d6ef5e
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_regs.h
@@ -0,0 +1,2189 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef __DWC_OTG_REGS_H__
+#define __DWC_OTG_REGS_H__
+
+
+/** Maximum number of Periodic FIFOs */
+#define MAX_PERIO_FIFOS 15
+/** Maximum number of Periodic FIFOs */
+#define MAX_TX_FIFOS 15
+
+/** Maximum number of Endpoints/HostChannels */
+#define MAX_EPS_CHANNELS 16
+/**
+ * @file
+ *
+ * This file contains the data structures for accessing the DWC_otg core
+ * registers.
+ *
+ * The application interfaces with the HS OTG core by reading from and
+ * writing to the Control and Status Register (CSR) space through the
+ * AHB Slave interface. These registers are 32 bits wide, and the
+ * addresses are 32-bit-block aligned.
+ * CSRs are classified as follows:
+ * - Core Global Registers
+ * - Device Mode Registers
+ * - Device Global Registers
+ * - Device Endpoint Specific Registers
+ * - Host Mode Registers
+ * - Host Global Registers
+ * - Host Port CSRs
+ * - Host Channel Specific Registers
+ *
+ * Only the Core Global registers can be accessed in both Device and
+ * Host modes. When the HS OTG core is operating in one mode, either
+ * Device or Host, the application must not access registers from the
+ * other mode. When the core switches from one mode to another, the
+ * registers in the new mode of operation must be reprogrammed as they
+ * would be after a power-on reset.
+ */
+
+/****************************************************************************/
+/** DWC_otg Core registers .
+ * The dwc_otg_core_global_regs structure defines the size
+ * and relative field offsets for the Core Global registers.
+ */
+struct dwc_otg_core_global_regs {
+	/** OTG Control and Status Register. Offset: 000h */
+	u32 gotgctl;
+	/** OTG Interrupt Register. Offset: 004h */
+	u32 gotgint;
+	/**Core AHB Configuration Register. Offset: 008h */
+	u32 gahbcfg;
+
+#define DWC_GLBINTRMASK		0x0001
+#define DWC_DMAENABLE		0x0020
+#define DWC_NPTXEMPTYLVL_EMPTY	0x0080
+#define DWC_NPTXEMPTYLVL_HALFEMPTY	0x0000
+#define DWC_PTXEMPTYLVL_EMPTY	0x0100
+#define DWC_PTXEMPTYLVL_HALFEMPTY	0x0000
+
+	/**Core USB Configuration Register. Offset: 00Ch */
+	u32 gusbcfg;
+	/**Core Reset Register.	 Offset: 010h */
+	u32 grstctl;
+	/**Core Interrupt Register. Offset: 014h */
+	u32 gintsts;
+	/**Core Interrupt Mask Register. Offset: 018h */
+	u32 gintmsk;
+	/**Receive Status Queue Read Register (Read Only).Offset: 01Ch */
+	u32 grxstsr;
+	/**Receive Status Queue Read & POP Register (Read Only).Offset: 020h*/
+	u32 grxstsp;
+	/**Receive FIFO Size Register. Offset: 024h */
+	u32 grxfsiz;
+	/**Non Periodic Transmit FIFO Size Register. Offset: 028h */
+	u32 gnptxfsiz;
+	/**Non Periodic Transmit FIFO/Queue Status Register (Read
+	 * Only). Offset: 02Ch */
+	u32 gnptxsts;
+	/**I2C Access Register.	 Offset: 030h */
+	u32 gi2cctl;
+	/**PHY Vendor Control Register.	 Offset: 034h */
+	u32 gpvndctl;
+	/**General Purpose Input/Output Register.  Offset: 038h */
+	u32 ggpio;
+	/**User ID Register.  Offset: 03Ch */
+	u32 guid;
+	/**Synopsys ID Register (Read Only).  Offset: 040h */
+	u32 gsnpsid;
+	/**User HW Config1 Register (Read Only).  Offset: 044h */
+	u32 ghwcfg1;
+	/**User HW Config2 Register (Read Only).  Offset: 048h */
+	u32 ghwcfg2;
+#define DWC_SLAVE_ONLY_ARCH 0
+#define DWC_EXT_DMA_ARCH 1
+#define DWC_INT_DMA_ARCH 2
+
+#define DWC_MODE_HNP_SRP_CAPABLE	0
+#define DWC_MODE_SRP_ONLY_CAPABLE	1
+#define DWC_MODE_NO_HNP_SRP_CAPABLE		2
+#define DWC_MODE_SRP_CAPABLE_DEVICE		3
+#define DWC_MODE_NO_SRP_CAPABLE_DEVICE	4
+#define DWC_MODE_SRP_CAPABLE_HOST	5
+#define DWC_MODE_NO_SRP_CAPABLE_HOST	6
+
+	/**User HW Config3 Register (Read Only).  Offset: 04Ch */
+	u32 ghwcfg3;
+	/**User HW Config4 Register (Read Only).  Offset: 050h*/
+	u32 ghwcfg4;
+	/** Core LPM Configuration register */
+	u32 glpmcfg;
+	/** Reserved  Offset: 058h-0FFh */
+	u32 reserved[42];
+	/** Host Periodic Transmit FIFO Size Register. Offset: 100h */
+	u32 hptxfsiz;
+	/**
+	 * Device Periodic Transmit FIFO#n Register if dedicated
+	 * fifos are disabled,otherwise Device Transmit FIFO#n Register.
+	 * Offset: 104h + (FIFO_Number-1)*04h, 1 <= FIFO Number <= 15
+	 *  (1<=n<=15).
+	 */
+	u32 dptxfsiz_dieptxf[15];
+};
+
+/**
+ * This union represents the bit fields of the Core OTG Control
+ * and Status Register (GOTGCTL).  Set the bits using the bit
+ * fields then write the d32 value to the register.
+ */
+union gotgctl_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned sesreqscs:1;
+		unsigned sesreq:1;
+		unsigned reserved2_7:6;
+		unsigned hstnegscs:1;
+		unsigned hnpreq:1;
+		unsigned hstsethnpen:1;
+		unsigned devhnpen:1;
+		unsigned reserved12_15:4;
+		unsigned conidsts:1;
+		unsigned reserved17:1;
+		unsigned asesvld:1;
+		unsigned bsesvld:1;
+		unsigned currmod:1;
+		unsigned reserved21_31:11;
+	} b;
+};
+
+/**
+ * This union represents the bit fields of the Core OTG Interrupt Register
+ * (GOTGINT).  Set/clear the bits using the bit fields then write the d32
+ * value to the register.
+ */
+union gotgint_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** Current Mode */
+		unsigned reserved0_1:2;
+
+		/** Session End Detected */
+		unsigned sesenddet:1;
+
+		unsigned reserved3_7:5;
+
+		/** Session Request Success Status Change */
+		unsigned sesreqsucstschng:1;
+		/** Host Negotiation Success Status Change */
+		unsigned hstnegsucstschng:1;
+
+		unsigned reserver10_16:7;
+
+		/** Host Negotiation Detected */
+		unsigned hstnegdet:1;
+		/** A-Device Timeout Change */
+		unsigned adevtoutchng:1;
+		/** Debounce Done */
+		unsigned debdone:1;
+
+		unsigned reserved31_20:12;
+
+	} b;
+};
+
+
+/**
+ * This union represents the bit fields of the Core AHB Configuration
+ * Register (GAHBCFG).	Set/clear the bits using the bit fields then
+ * write the d32 value to the register.
+ */
+union gahbcfg_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned glblintrmsk:1;
+#define DWC_GAHBCFG_GLBINT_ENABLE		1
+
+		unsigned hburstlen:4;
+#define DWC_GAHBCFG_INT_DMA_BURST_SINGLE	0
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR		1
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR4		3
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR8		5
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR16	7
+
+		unsigned dmaenable:1;
+#define DWC_GAHBCFG_DMAENABLE			1
+		unsigned reserved:1;
+		unsigned nptxfemplvl_txfemplvl:1;
+		unsigned ptxfemplvl:1;
+#define DWC_GAHBCFG_TXFEMPTYLVL_EMPTY		1
+#define DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY	0
+		unsigned reserved9_31:23;
+	} b;
+};
+
+/**
+ * This union represents the bit fields of the Core USB Configuration
+ * Register (GUSBCFG).	Set the bits using the bit fields then write
+ * the d32 value to the register.
+ */
+union gusbcfg_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned toutcal:3;
+		unsigned phyif:1;
+		unsigned ulpi_utmi_sel:1;
+		unsigned fsintf:1;
+		unsigned physel:1;
+		unsigned ddrsel:1;
+		unsigned srpcap:1;
+		unsigned hnpcap:1;
+		unsigned usbtrdtim:4;
+		unsigned nptxfrwnden:1;
+		unsigned phylpwrclksel:1;
+		unsigned otgutmifssel:1;
+		unsigned ulpi_fsls:1;
+		unsigned ulpi_auto_res:1;
+		unsigned ulpi_clk_sus_m:1;
+		unsigned ulpi_ext_vbus_drv:1;
+		unsigned ulpi_int_vbus_indicator:1;
+		unsigned term_sel_dl_pulse:1;
+		unsigned reserved23_25:3;
+		unsigned ic_usb_cap:1;
+		unsigned ic_traffic_pull_remove:1;
+		unsigned tx_end_delay:1;
+		unsigned force_host_mode:1;
+		unsigned force_device_mode:1;
+		unsigned corrupt_tx_packet:1;
+	} b;
+};
+
+/**
+ * This union represents the bit fields of the Core LPM Configuration
+ * Register (GLPMCFG). Set the bits using bit fields then write
+ * the d32 value to the register.
+ */
+union glpmcfg_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** LPM-Capable (LPMCap) (Device and Host)
+		 * The application uses this bit to control
+		 * the DWC_otg core LPM capabilities.
+		 */
+		unsigned lpm_cap_en:1;
+		/** LPM response programmed by application (AppL1Res) (Device)
+		 * Handshake response to LPM token pre-programmed
+		 * by device application software.
+		 */
+		unsigned appl_resp:1;
+		/** Host Initiated Resume Duration (HIRD) (Device and Host)
+		 * In Host mode this field indicates the value of HIRD
+		 * to be sent in an LPM transaction.
+		 * In Device mode this field is updated with the
+		 * Received LPM Token HIRD bmAttribute
+		 * when an ACK/NYET/STALL response is sent
+		 * to an LPM transaction.
+		 */
+		unsigned hird:4;
+		/** RemoteWakeEnable (bRemoteWake) (Device and Host)
+		 * In Host mode this bit indicates the value of remote
+		 * wake up to be sent in wIndex field of LPM transaction.
+		 * In Device mode this field is updated with the
+		 * Received LPM Token bRemoteWake bmAttribute
+		 * when an ACK/NYET/STALL response is sent
+		 * to an LPM transaction.
+		 */
+		unsigned rem_wkup_en:1;
+		/** Enable utmi_sleep_n (EnblSlpM) (Device and Host)
+		 * The application uses this bit to control
+		 * the utmi_sleep_n assertion to the PHY when in L1 state.
+		 */
+		unsigned en_utmi_sleep:1;
+		/** HIRD Threshold (HIRD_Thres) (Device and Host)
+		 */
+		unsigned hird_thres:5;
+		/** LPM Response (CoreL1Res) (Device and Host)
+		 * In Host mode this bit contains handsake response to
+		 * LPM transaction.
+		 * In Device mode the response of the core to
+		 * LPM transaction received is reflected in these two bits.
+			- 0x0:ERROR (No handshake response)
+			- 0x1:STALL
+			- 0x2:NYET
+			- 0x3:ACK
+		 */
+		unsigned lpm_resp:2;
+		/** Port Sleep Status (SlpSts) (Device and Host)
+		 * This bit is set as long as a Sleep condition
+		 * is present on the USB bus.
+		 */
+		unsigned prt_sleep_sts:1;
+		/** Sleep State Resume OK (L1ResumeOK) (Device and Host)
+		 * Indicates that the application or host
+		 * can start resume from Sleep state.
+		 */
+		unsigned sleep_state_resumeok:1;
+		/** LPM channel Index (LPM_Chnl_Indx) (Host)
+		 * The channel number on which the LPM transaction
+		 * has to be applied while sending
+		 * an LPM transaction to the local device.
+		 */
+		unsigned lpm_chan_index:4;
+		/** LPM Retry Count (LPM_Retry_Cnt) (Host)
+		 * Number host retries that would be performed
+		 * if the device response was not valid response.
+		 */
+		unsigned retry_count:3;
+		/** Send LPM Transaction (SndLPM) (Host)
+		 * When set by application software,
+		 * an LPM transaction containing two tokens
+		 * is sent.
+		 */
+		unsigned send_lpm:1;
+		/** LPM Retry status (LPM_RetryCnt_Sts) (Host)
+		 * Number of LPM Host Retries still remaining
+		 * to be transmitted for the current LPM sequence
+		 */
+		unsigned retry_count_sts:3;
+		unsigned reserved28_29:2;
+		/** In host mode once this bit is set, the host
+		 * configures to drive the HSIC Idle state on the bus.
+		 * It then waits for the  device to initiate the Connect
+		 * sequence. In device mode once this bit is set, the device
+		 * waits for the HSIC Idle line state on the bus. Upon
+		 * receiving the Idle line state, it initiates the HSIC Connect
+		 * sequence.
+		 */
+		unsigned hsic_connect:1;
+		/** This bit overrides and functionally inverts
+		 * the if_select_hsic input port signal.
+		 */
+		unsigned inv_sel_hsic:1;
+	} b;
+};
+/**
+ * This union represents the bit fields of the Core Reset Register
+ * (GRSTCTL).  Set/clear the bits using the bit fields then write the
+ * d32 value to the register.
+ */
+union grstctl_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** Core Soft Reset (CSftRst) (Device and Host)
+		 *
+		 * The application can flush the control logic in the
+		 * entire core using this bit. This bit resets the
+		 * pipelines in the AHB Clock domain as well as the
+		 * PHY Clock domain.
+		 *
+		 * The state machines are reset to an IDLE state, the
+		 * control bits in the CSRs are cleared, all the
+		 * transmit FIFOs and the receive FIFO are flushed.
+		 *
+		 * The status mask bits that control the generation of
+		 * the interrupt, are cleared, to clear the
+		 * interrupt. The interrupt status bits are not
+		 * cleared, so the application can get the status of
+		 * any events that occurred in the core after it has
+		 * set this bit.
+		 *
+		 * Any transactions on the AHB are terminated as soon
+		 * as possible following the protocol. Any
+		 * transactions on the USB are terminated immediately.
+		 *
+		 * The configuration settings in the CSRs are
+		 * unchanged, so the software doesn't have to
+		 * reprogram these registers (Device
+		 * Configuration/Host Configuration/Core System
+		 * Configuration/Core PHY Configuration).
+		 *
+		 * The application can write to this bit, any time it
+		 * wants to reset the core. This is a self clearing
+		 * bit and the core clears this bit after all the
+		 * necessary logic is reset in the core, which may
+		 * take several clocks, depending on the current state
+		 * of the core.
+		 */
+		unsigned csftrst:1;
+		/** Hclk Soft Reset
+		 *
+		 * The application uses this bit to reset the control logic in
+		 * the AHB clock domain. Only AHB clock domain pipelines are
+		 * reset.
+		 */
+		unsigned hsftrst:1;
+		/** Host Frame Counter Reset (Host Only)<br>
+		 *
+		 * The application can reset the (micro)frame number
+		 * counter inside the core, using this bit. When the
+		 * (micro)frame counter is reset, the subsequent SOF
+		 * sent out by the core, will have a (micro)frame
+		 * number of 0.
+		 */
+		unsigned hstfrm:1;
+		/** In Token Sequence Learning Queue Flush
+		 * (INTknQFlsh) (Device Only)
+		 */
+		unsigned intknqflsh:1;
+		/** RxFIFO Flush (RxFFlsh) (Device and Host)
+		 *
+		 * The application can flush the entire Receive FIFO
+		 * using this bit.	<p>The application must first
+		 * ensure that the core is not in the middle of a
+		 * transaction.	 <p>The application should write into
+		 * this bit, only after making sure that neither the
+		 * DMA engine is reading from the RxFIFO nor the MAC
+		 * is writing the data in to the FIFO.	<p>The
+		 * application should wait until the bit is cleared
+		 * before performing any other operations. This bit
+		 * will takes 8 clocks (slowest of PHY or AHB clock)
+		 * to clear.
+		 */
+		unsigned rxfflsh:1;
+		/** TxFIFO Flush (TxFFlsh) (Device and Host).
+		 *
+		 * This bit is used to selectively flush a single or
+		 * all transmit FIFOs.	The application must first
+		 * ensure that the core is not in the middle of a
+		 * transaction.	 <p>The application should write into
+		 * this bit, only after making sure that neither the
+		 * DMA engine is writing into the TxFIFO nor the MAC
+		 * is reading the data out of the FIFO.	 <p>The
+		 * application should wait until the core clears this
+		 * bit, before performing any operations. This bit
+		 * will takes 8 clocks (slowest of PHY or AHB clock)
+		 * to clear.
+		 */
+		unsigned txfflsh:1;
+
+		/** TxFIFO Number (TxFNum) (Device and Host).
+		 *
+		 * This is the FIFO number which needs to be flushed,
+		 * using the TxFIFO Flush bit. This field should not
+		 * be changed until the TxFIFO Flush bit is cleared by
+		 * the core.
+		 *	 - 0x0:Non Periodic TxFIFO Flush
+		 *	 - 0x1:Periodic TxFIFO #1 Flush in device mode
+		 *	   or Periodic TxFIFO in host mode
+		 *	 - 0x2:Periodic TxFIFO #2 Flush in device mode.
+		 *	 - ...
+		 *	 - 0xF:Periodic TxFIFO #15 Flush in device mode
+		 *	 - 0x10: Flush all the Transmit NonPeriodic and
+		 *	   Transmit Periodic FIFOs in the core
+		 */
+		unsigned txfnum:5;
+		/** Reserved */
+		unsigned reserved11_29:19;
+		/** DMA Request Signal.	 Indicated DMA request is in
+		 * probress.  Used for debug purpose. */
+		unsigned dmareq:1;
+		/** AHB Master Idle.  Indicates the AHB Master State
+		 * Machine is in IDLE condition. */
+		unsigned ahbidle:1;
+	} b;
+};
+
+
+/**
+ * This union represents the bit fields of the Core Interrupt Mask
+ * Register (GINTMSK).	Set/clear the bits using the bit fields then
+ * write the d32 value to the register.
+ */
+union gintmsk_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned reserved0:1;
+		unsigned modemismatch:1;
+		unsigned otgintr:1;
+		unsigned sofintr:1;
+		unsigned rxstsqlvl:1;
+		unsigned nptxfempty:1;
+		unsigned ginnakeff:1;
+		unsigned goutnakeff:1;
+		unsigned reserved8:1;
+		unsigned i2cintr:1;
+		unsigned erlysuspend:1;
+		unsigned usbsuspend:1;
+		unsigned usbreset:1;
+		unsigned enumdone:1;
+		unsigned isooutdrop:1;
+		unsigned eopframe:1;
+		unsigned reserved16:1;
+		unsigned epmismatch:1;
+		unsigned inepintr:1;
+		unsigned outepintr:1;
+		unsigned incomplisoin:1;
+		unsigned incomplisoout:1;
+		unsigned reserved22_23:2;
+		unsigned portintr:1;
+		unsigned hcintr:1;
+		unsigned ptxfempty:1;
+		unsigned lpmtranrcvd:1;
+		unsigned conidstschng:1;
+		unsigned disconnect:1;
+		unsigned sessreqintr:1;
+		unsigned wkupintr:1;
+	} b;
+};
+/**
+ * This union represents the bit fields of the Core Interrupt Register
+ * (GINTSTS).  Set/clear the bits using the bit fields then write the
+ * d32 value to the register.
+ */
+union gintsts_data {
+	/** raw register data */
+	u32 d32;
+#define DWC_SOF_INTR_MASK 0x0008
+	/** register bits */
+	struct {
+#define DWC_HOST_MODE 1
+		unsigned curmode:1;
+		unsigned modemismatch:1;
+		unsigned otgintr:1;
+		unsigned sofintr:1;
+		unsigned rxstsqlvl:1;
+		unsigned nptxfempty:1;
+		unsigned ginnakeff:1;
+		unsigned goutnakeff:1;
+		unsigned reserved8:1;
+		unsigned i2cintr:1;
+		unsigned erlysuspend:1;
+		unsigned usbsuspend:1;
+		unsigned usbreset:1;
+		unsigned enumdone:1;
+		unsigned isooutdrop:1;
+		unsigned eopframe:1;
+		unsigned intokenrx:1;
+		unsigned epmismatch:1;
+		unsigned inepint:1;
+		unsigned outepintr:1;
+		unsigned incomplisoin:1;
+		unsigned incomplisoout:1;
+		unsigned reserved22_23:2;
+		unsigned portintr:1;
+		unsigned hcintr:1;
+		unsigned ptxfempty:1;
+		unsigned lpmtranrcvd:1;
+		unsigned conidstschng:1;
+		unsigned disconnect:1;
+		unsigned sessreqintr:1;
+		unsigned wkupintr:1;
+	} b;
+};
+
+
+/**
+ * This union represents the bit fields in the Device Receive Status Read and
+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the d32
+ * element then read out the bits using the bit elements.
+ */
+union device_grxsts_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned epnum:4;
+		unsigned bcnt:11;
+		unsigned dpid:2;
+
+#define DWC_STS_DATA_UPDT		0x2	/* OUT Data Packet */
+#define DWC_STS_XFER_COMP		0x3	/* OUT Data Transfer Complete */
+
+#define DWC_DSTS_GOUT_NAK		0x1	/* Global OUT NAK */
+#define DWC_DSTS_SETUP_COMP		0x4	/* Setup Phase Complete */
+#define DWC_DSTS_SETUP_UPDT 0x6			/* SETUP Packet */
+		unsigned pktsts:4;
+		unsigned fn:4;
+		unsigned reserved:7;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the Host Receive Status Read and
+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the d32
+ * element then read out the bits using the bit elements.
+ */
+union host_grxsts_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned chnum:4;
+		unsigned bcnt:11;
+		unsigned dpid:2;
+
+		unsigned pktsts:4;
+#define DWC_GRXSTS_PKTSTS_IN			  0x2
+#define DWC_GRXSTS_PKTSTS_IN_XFER_COMP	  0x3
+#define DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR 0x5
+#define DWC_GRXSTS_PKTSTS_CH_HALTED		  0x7
+
+		unsigned reserved:11;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the FIFO Size Registers (HPTXFSIZ,
+ * GNPTXFSIZ, DPTXFSIZn, DIEPTXFn). Read the register into the d32 element then
+ * read out the bits using the bit elements.
+ */
+union fifosize_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned startaddr:16;
+		unsigned depth:16;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the Non-Periodic Transmit
+ * FIFO/Queue Status Register (GNPTXSTS). Read the register into the
+ * d32 element then read out the bits using the bit
+ * elements.
+ */
+union gnptxsts_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned nptxfspcavail:16;
+		unsigned nptxqspcavail:8;
+		/** Top of the Non-Periodic Transmit Request Queue
+		 *	- bit 24 - Terminate (Last entry for the selected
+		 *	  channel/EP)
+		 *	- bits 26:25 - Token Type
+		 *	  - 2'b00 - IN/OUT
+		 *	  - 2'b01 - Zero Length OUT
+		 *	  - 2'b10 - PING/Complete Split
+		 *	  - 2'b11 - Channel Halt
+		 *	- bits 30:27 - Channel/EP Number
+		 */
+		unsigned nptxqtop_terminate:1;
+		unsigned nptxqtop_token:2;
+		unsigned nptxqtop_chnep:4;
+		unsigned reserved:1;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the Transmit
+ * FIFO Status Register (DTXFSTS). Read the register into the
+ * d32 element then read out the bits using the bit
+ * elements.
+ */
+union dtxfsts_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned txfspcavail:16;
+		unsigned reserved:16;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the I2C Control Register
+ * (I2CCTL). Read the register into the d32 element then read out the
+ * bits using the bit elements.
+ */
+union gi2cctl_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned rwdata:8;
+		unsigned regaddr:8;
+		unsigned addr:7;
+		unsigned i2cen:1;
+		unsigned ack:1;
+		unsigned i2csuspctl:1;
+		unsigned i2cdevaddr:2;
+		unsigned reserved:2;
+		unsigned rw:1;
+		unsigned bsydne:1;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the User HW Config1
+ * Register.  Read the register into the d32 element then read
+ * out the bits using the bit elements.
+ */
+union hwcfg1_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned ep_dir0:2;
+		unsigned ep_dir1:2;
+		unsigned ep_dir2:2;
+		unsigned ep_dir3:2;
+		unsigned ep_dir4:2;
+		unsigned ep_dir5:2;
+		unsigned ep_dir6:2;
+		unsigned ep_dir7:2;
+		unsigned ep_dir8:2;
+		unsigned ep_dir9:2;
+		unsigned ep_dir10:2;
+		unsigned ep_dir11:2;
+		unsigned ep_dir12:2;
+		unsigned ep_dir13:2;
+		unsigned ep_dir14:2;
+		unsigned ep_dir15:2;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the User HW Config2
+ * Register.  Read the register into the d32 element then read
+ * out the bits using the bit elements.
+ */
+union hwcfg2_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/* GHWCFG2 */
+		unsigned op_mode:3;
+#define DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0
+#define DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1
+#define DWC_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2
+#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
+#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
+#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
+
+		unsigned architecture:2;
+		unsigned point2point:1;
+		unsigned hs_phy_type:2;
+#define DWC_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
+#define DWC_HWCFG2_HS_PHY_TYPE_UTMI 1
+#define DWC_HWCFG2_HS_PHY_TYPE_ULPI 2
+#define DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
+
+		unsigned fs_phy_type:2;
+		unsigned num_dev_ep:4;
+		unsigned num_host_chan:4;
+		unsigned perio_ep_supported:1;
+		unsigned dynamic_fifo:1;
+		unsigned multi_proc_int:1;
+		unsigned reserved21:1;
+		unsigned nonperio_tx_q_depth:2;
+		unsigned host_perio_tx_q_depth:2;
+		unsigned dev_token_q_depth:5;
+		unsigned reserved31:1;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the User HW Config3
+ * Register.  Read the register into the d32 element then read
+ * out the bits using the bit elements.
+ */
+union hwcfg3_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/* GHWCFG3 */
+		unsigned xfer_size_cntr_width:4;
+		unsigned packet_size_cntr_width:3;
+		unsigned otg_func:1;
+		unsigned i2c:1;
+		unsigned vendor_ctrl_if:1;
+		unsigned optional_features:1;
+		unsigned synch_reset_type:1;
+		unsigned otg_enable_ic_usb:1;
+		unsigned otg_enable_hsic:1;
+		unsigned reserved14:1;
+		unsigned otg_lpm_en:1;
+		unsigned dfifo_depth:16;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the User HW Config4
+ * Register.  Read the register into the d32 element then read
+ * out the bits using the bit elements.
+ */
+union hwcfg4_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned num_dev_perio_in_ep:4;
+		unsigned power_optimiz:1;
+		unsigned min_ahb_freq:9;
+		unsigned utmi_phy_data_width:2;
+		unsigned num_dev_mode_ctrl_ep:4;
+		unsigned iddig_filt_en:1;
+		unsigned vbus_valid_filt_en:1;
+		unsigned a_valid_filt_en:1;
+		unsigned b_valid_filt_en:1;
+		unsigned session_end_filt_en:1;
+		unsigned ded_fifo_en:1;
+		unsigned num_in_eps:4;
+		unsigned desc_dma:1;
+		unsigned desc_dma_dyn:1;
+	} b;
+};
+
+
+/* ***********************Device Registers*************************************/
+
+/**
+ * Device Global Registers. Offsets 800h-BFFh
+ *
+ * The following structures define the size and relative field offsets
+ * for the Device Mode Registers.
+ *
+ * These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.
+ */
+struct dwc_otg_dev_global_regs {
+	/** Device Configuration Register. Offset 800h */
+	u32 dcfg;
+	/** Device Control Register. Offset: 804h */
+	u32 dctl;
+	/** Device Status Register (Read Only). Offset: 808h */
+	u32 dsts;
+	/** Reserved. Offset: 80Ch */
+	u32 unused;
+	/** Device IN Endpoint Common Interrupt Mask
+	 * Register. Offset: 810h */
+	u32 diepmsk;
+	/** Device OUT Endpoint Common Interrupt Mask
+	 * Register. Offset: 814h */
+	u32 doepmsk;
+	/** Device All Endpoints Interrupt Register.  Offset: 818h */
+	u32 daint;
+	/** Device All Endpoints Interrupt Mask Register.  Offset:
+	 * 81Ch */
+	u32 daintmsk;
+	/** Device IN Token Queue Read Register-1 (Read Only).
+	 * Offset: 820h */
+	u32 dtknqr1;
+	/** Device IN Token Queue Read Register-2 (Read Only).
+	 * Offset: 824h */
+	u32 dtknqr2;
+	/** Device VBUS	 discharge Register.  Offset: 828h */
+	u32 dvbusdis;
+	/** Device VBUS Pulse Register.	 Offset: 82Ch */
+	u32 dvbuspulse;
+	/** Device IN Token Queue Read Register-3 (Read Only). /
+	 *	Device Thresholding control register (Read/Write)
+	 * Offset: 830h */
+	u32 dtknqr3_dthrctl;
+	/** Device IN Token Queue Read Register-4 (Read Only). /
+	 *	Device IN EPs empty Inr. Mask Register (Read/Write)
+	 * Offset: 834h */
+	u32 dtknqr4_fifoemptymsk;
+	u32 deachint;
+	/** Device Each Endpoint Interrupt mask Register (Read/Write). /
+	 * Offset: 83Ch */
+	u32 deachintmsk;
+	/** Device Each In Endpoint Interrupt mask Register (Read/Write). /
+	 * Offset: 840h */
+	u32 diepeachintmsk[MAX_EPS_CHANNELS];
+	/** Device Each Out Endpoint Interrupt mask Register (Read/Write). /
+	 * Offset: 880h */
+	u32 doepeachintmsk[MAX_EPS_CHANNELS];
+};
+
+/**
+ * This union represents the bit fields in the Device Configuration
+ * Register.  Read the register into the d32 member then
+ * set/clear the bits using the bit elements.  Write the
+ * d32 member to the dcfg register.
+ */
+union dcfg_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** Device Speed */
+		unsigned devspd:2;
+		/** Non Zero Length Status OUT Handshake */
+		unsigned nzstsouthshk:1;
+#define DWC_DCFG_SEND_STALL 1
+
+		unsigned reserved3:1;
+		/** Device Addresses */
+		unsigned devaddr:7;
+		/** Periodic Frame Interval */
+		unsigned perfrint:2;
+#define DWC_DCFG_FRAME_INTERVAL_80 0
+#define DWC_DCFG_FRAME_INTERVAL_85 1
+#define DWC_DCFG_FRAME_INTERVAL_90 2
+#define DWC_DCFG_FRAME_INTERVAL_95 3
+
+		unsigned reserved13_17:5;
+		/** In Endpoint Mis-match count */
+		unsigned epmscnt:5;
+		/** Enable Descriptor DMA in Device mode */
+		unsigned descdma:1;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the Device Control
+ * Register.  Read the register into the d32 member then
+ * set/clear the bits using the bit elements.
+ */
+union dctl_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** Remote Wakeup */
+		unsigned rmtwkupsig:1;
+		/** Soft Disconnect */
+		unsigned sftdiscon:1;
+		/** Global Non-Periodic IN NAK Status */
+		unsigned gnpinnaksts:1;
+		/** Global OUT NAK Status */
+		unsigned goutnaksts:1;
+		/** Test Control */
+		unsigned tstctl:3;
+		/** Set Global Non-Periodic IN NAK */
+		unsigned sgnpinnak:1;
+		/** Clear Global Non-Periodic IN NAK */
+		unsigned cgnpinnak:1;
+		/** Set Global OUT NAK */
+		unsigned sgoutnak:1;
+		/** Clear Global OUT NAK */
+		unsigned cgoutnak:1;
+
+		/** Power-On Programming Done */
+		unsigned pwronprgdone:1;
+		/** Global Continue on BNA */
+		unsigned gcontbna:1;
+		/** Global Multi Count */
+		unsigned gmc:2;
+		/** Ignore Frame Number for ISOC EPs */
+		unsigned ifrmnum:1;
+		/** NAK on Babble */
+		unsigned nakonbble:1;
+
+		unsigned reserved17_31:15;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the Device Status
+ * Register.  Read the register into the d32 member then
+ * set/clear the bits using the bit elements.
+ */
+union dsts_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** Suspend Status */
+		unsigned suspsts:1;
+		/** Enumerated Speed */
+		unsigned enumspd:2;
+#define DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0
+#define DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1
+#define DWC_DSTS_ENUMSPD_LS_PHY_6MHZ		   2
+#define DWC_DSTS_ENUMSPD_FS_PHY_48MHZ		   3
+		/** Erratic Error */
+		unsigned errticerr:1;
+		unsigned reserved4_7:4;
+		/** Frame or Microframe Number of the received SOF */
+		unsigned soffn:14;
+		unsigned reserved22_31:10;
+	} b;
+};
+
+
+/**
+ * This union represents the bit fields in the Device IN EP Interrupt
+ * Register and the Device IN EP Common Mask Register.
+ *
+ * - Read the register into the d32 member then set/clear the
+ *	 bits using the bit elements.
+ */
+union diepint_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** Transfer complete mask */
+		unsigned xfercompl:1;
+		/** Endpoint disable mask */
+		unsigned epdisabled:1;
+		/** AHB Error mask */
+		unsigned ahberr:1;
+		/** TimeOUT Handshake mask (non-ISOC EPs) */
+		unsigned timeout:1;
+		/** IN Token received with TxF Empty mask */
+		unsigned intktxfemp:1;
+		/** IN Token Received with EP mismatch mask */
+		unsigned intknepmis:1;
+		/** IN Endpoint HAK Effective mask */
+		unsigned inepnakeff:1;
+		/** IN Endpoint HAK Effective mask */
+		unsigned emptyintr:1;
+
+		unsigned txfifoundrn:1;
+
+		/** BNA Interrupt mask */
+		unsigned bna:1;
+
+		unsigned reserved10_12:3;
+		/** BNA Interrupt mask */
+		unsigned nak:1;
+
+		unsigned reserved14_31:18;
+		} b;
+};
+/**
+ * This union represents the bit fields in the Device IN EP Common
+ * Interrupt Mask Register.
+ */
+/*typedef union diepint_data diepmsk_data_t;*/
+
+/**
+ * This union represents the bit fields in the Device OUT EP Interrupt
+ * Registerand Device OUT EP Common Interrupt Mask Register.
+ *
+ * - Read the register into the d32 member then set/clear the
+ *	 bits using the bit elements.
+ */
+union doepint_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** Transfer complete */
+		unsigned xfercompl:1;
+		/** Endpoint disable  */
+		unsigned epdisabled:1;
+		/** AHB Error */
+		unsigned ahberr:1;
+		/** Setup Phase Done (contorl EPs) */
+		unsigned setup:1;
+		unsigned outtknepdis:1;
+
+		unsigned stsphsercvd:1;
+		/** Back-to-Back SETUP Packets Received */
+		unsigned back2backsetup:1;
+
+		unsigned reserved7:1;
+		/** OUT packet Error */
+		unsigned outpkterr:1;
+		/** BNA Interrupt */
+		unsigned bna:1;
+
+		unsigned reserved10:1;
+		/** Packet Drop Status */
+		unsigned pktdrpsts:1;
+		/** Babble Interrupt */
+		unsigned babble:1;
+		/** NAK Interrupt */
+		unsigned nak:1;
+		/** NYET Interrupt */
+		unsigned nyet:1;
+
+		unsigned reserved15_31:17;
+	} b;
+};
+/**
+ * This union represents the bit fields in the Device OUT EP Common
+ * Interrupt Mask Register.
+ */
+/*typedef union doepint_data doepmsk_data_t;*/
+
+
+/**
+ * This union represents the bit fields in the Device All EP Interrupt
+ * and Mask Registers.
+ * - Read the register into the d32 member then set/clear the
+ *	 bits using the bit elements.
+ */
+union daint_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** IN Endpoint bits */
+		unsigned in:16;
+		/** OUT Endpoint bits */
+		unsigned out:16;
+	} ep;
+	struct {
+		/** IN Endpoint bits */
+		unsigned inep0:1;
+		unsigned inep1:1;
+		unsigned inep2:1;
+		unsigned inep3:1;
+		unsigned inep4:1;
+		unsigned inep5:1;
+		unsigned inep6:1;
+		unsigned inep7:1;
+		unsigned inep8:1;
+		unsigned inep9:1;
+		unsigned inep10:1;
+		unsigned inep11:1;
+		unsigned inep12:1;
+		unsigned inep13:1;
+		unsigned inep14:1;
+		unsigned inep15:1;
+		/** OUT Endpoint bits */
+		unsigned outep0:1;
+		unsigned outep1:1;
+		unsigned outep2:1;
+		unsigned outep3:1;
+		unsigned outep4:1;
+		unsigned outep5:1;
+		unsigned outep6:1;
+		unsigned outep7:1;
+		unsigned outep8:1;
+		unsigned outep9:1;
+		unsigned outep10:1;
+		unsigned outep11:1;
+		unsigned outep12:1;
+		unsigned outep13:1;
+		unsigned outep14:1;
+		unsigned outep15:1;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the Device IN Token Queue
+ * Read Registers.
+ * - Read the register into the d32 member.
+ * - READ-ONLY Register
+ */
+union dtknq1_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** In Token Queue Write Pointer */
+		unsigned intknwptr:5;
+		/** Reserved */
+		unsigned reserved05_06:2;
+		/** write pointer has wrapped. */
+		unsigned wrap_bit:1;
+		/** EP Numbers of IN Tokens 0 ... 4 */
+		unsigned epnums0_5:24;
+	} b;
+};
+
+/**
+ * This union represents Threshold control Register
+ * - Read and write the register into the d32 member.
+ * - READ-WRITABLE Register
+ */
+union dthrctl_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** non ISO Tx Thr. Enable */
+		unsigned non_iso_thr_en:1;
+		/** ISO Tx Thr. Enable */
+		unsigned iso_thr_en:1;
+		/** Tx Thr. Length */
+		unsigned tx_thr_len:9;
+		unsigned ahb_thr_ratio:2;
+		/** Reserved */
+		unsigned reserved13_15:3;
+		/** Rx Thr. Enable */
+		unsigned rx_thr_en:1;
+		/** Rx Thr. Length */
+		unsigned rx_thr_len:9;
+		/** Reserved */
+		unsigned reserved26_31:6;
+	} b;
+};
+
+
+/**
+ * Device Logical IN Endpoint-Specific Registers. Offsets
+ * 900h-AFCh
+ *
+ * There will be one set of endpoint registers per logical endpoint
+ * implemented.
+ *
+ * These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.
+ */
+struct dwc_otg_dev_in_ep_regs {
+	/** Device IN Endpoint Control Register. Offset:900h +
+	 * (ep_num * 20h) + 00h */
+	u32 diepctl;
+	/** Reserved. Offset:900h + (ep_num * 20h) + 04h */
+	u32 reserved04;
+	/** Device IN Endpoint Interrupt Register. Offset:900h +
+	 * (ep_num * 20h) + 08h */
+	u32 diepint;
+	/** Reserved. Offset:900h + (ep_num * 20h) + 0Ch */
+	u32 reserved0C;
+	/** Device IN Endpoint Transfer Size
+	 * Register. Offset:900h + (ep_num * 20h) + 10h */
+	u32 dieptsiz;
+	/** Device IN Endpoint DMA Address Register. Offset:900h +
+	 * (ep_num * 20h) + 14h */
+	u32 diepdma;
+	/** Device IN Endpoint Transmit FIFO Status Register. Offset:900h +
+	 * (ep_num * 20h) + 18h */
+	u32 dtxfsts;
+	/** Reserved. Offset:900h + (ep_num * 20h) + 1Ch - 900h +
+	 * (ep_num * 20h) + 1Ch*/
+	u32 diepdmab;
+};
+
+/**
+ * Device Logical OUT Endpoint-Specific Registers. Offsets:
+ * B00h-CFCh
+ *
+ * There will be one set of endpoint registers per logical endpoint
+ * implemented.
+ *
+ * These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.
+ */
+struct dwc_otg_dev_out_ep_regs {
+	/** Device OUT Endpoint Control Register. Offset:B00h +
+	 * (ep_num * 20h) + 00h */
+	u32 doepctl;
+	/** Device OUT Endpoint Frame number Register.	Offset:
+	 * B00h + (ep_num * 20h) + 04h */
+	u32 doepfn;
+	/** Device OUT Endpoint Interrupt Register. Offset:B00h +
+	 * (ep_num * 20h) + 08h */
+	u32 doepint;
+	/** Reserved. Offset:B00h + (ep_num * 20h) + 0Ch */
+	u32 reserved0C;
+	/** Device OUT Endpoint Transfer Size Register. Offset:
+	 * B00h + (ep_num * 20h) + 10h */
+	u32 doeptsiz;
+	/** Device OUT Endpoint DMA Address Register. Offset:B00h
+	 * + (ep_num * 20h) + 14h */
+	u32 doepdma;
+	/** Reserved. Offset:B00h + 	 * (ep_num * 20h) + 18h */
+	u32 unused;
+	/** Device OUT Endpoint DMA Buffer Register. Offset:B00h
+	 * + (ep_num * 20h) + 1Ch */
+	u32 doepdmab;
+};
+
+/**
+ * This union represents the bit fields in the Device EP Control
+ * Register.  Read the register into the d32 member then
+ * set/clear the bits using the bit elements.
+ */
+union depctl_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** Maximum Packet Size
+		 * IN/OUT EPn
+		 * IN/OUT EP0 - 2 bits
+		 *	 2'b00: 64 Bytes
+		 *	 2'b01: 32
+		 *	 2'b10: 16
+		 *	 2'b11: 8 */
+		unsigned mps:11;
+#define DWC_DEP0CTL_MPS_64	 0
+#define DWC_DEP0CTL_MPS_32	 1
+#define DWC_DEP0CTL_MPS_16	 2
+#define DWC_DEP0CTL_MPS_8	 3
+
+		/** Next Endpoint
+		 * IN EPn/IN EP0
+		 * OUT EPn/OUT EP0 - reserved */
+		unsigned nextep:4;
+
+		/** USB Active Endpoint */
+		unsigned usbactep:1;
+
+		/** Endpoint DPID (INTR/Bulk IN and OUT endpoints)
+		 * This field contains the PID of the packet going to
+		 * be received or transmitted on this endpoint. The
+		 * application should program the PID of the first
+		 * packet going to be received or transmitted on this
+		 * endpoint , after the endpoint is
+		 * activated. Application use the SetD1PID and
+		 * SetD0PID fields of this register to program either
+		 * D0 or D1 PID.
+		 *
+		 * The encoding for this field is
+		 *	 - 0: D0
+		 *	 - 1: D1
+		 */
+		unsigned dpid:1;
+
+		/** NAK Status */
+		unsigned naksts:1;
+
+		/** Endpoint Type
+		 *	2'b00: Control
+		 *	2'b01: Isochronous
+		 *	2'b10: Bulk
+		 *	2'b11: Interrupt */
+		unsigned eptype:2;
+
+		/** Snoop Mode
+		 * OUT EPn/OUT EP0
+		 * IN EPn/IN EP0 - reserved */
+		unsigned snp:1;
+
+		/** Stall Handshake */
+		unsigned stall:1;
+
+		/** Tx Fifo Number
+		 * IN EPn/IN EP0
+		 * OUT EPn/OUT EP0 - reserved */
+		unsigned txfnum:4;
+
+		/** Clear NAK */
+		unsigned cnak:1;
+		/** Set NAK */
+		unsigned snak:1;
+		/** Set DATA0 PID (INTR/Bulk IN and OUT endpoints)
+		 * Writing to this field sets the Endpoint DPID (DPID)
+		 * field in this register to DATA0. Set Even
+		 * (micro)frame (SetEvenFr) (ISO IN and OUT Endpoints)
+		 * Writing to this field sets the Even/Odd
+		 * (micro)frame (EO_FrNum) field to even (micro)
+		 * frame.
+		 */
+		unsigned setd0pid:1;
+		/** Set DATA1 PID (INTR/Bulk IN and OUT endpoints)
+		 * Writing to this field sets the Endpoint DPID (DPID)
+		 * field in this register to DATA1 Set Odd
+		 * (micro)frame (SetOddFr) (ISO IN and OUT Endpoints)
+		 * Writing to this field sets the Even/Odd
+		 * (micro)frame (EO_FrNum) field to odd (micro) frame.
+		 */
+		unsigned setd1pid:1;
+
+		/** Endpoint Disable */
+		unsigned epdis:1;
+		/** Endpoint Enable */
+		unsigned epena:1;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the Device EP Transfer
+ * Size Register.  Read the register into the d32 member then
+ * set/clear the bits using the bit elements.
+ */
+union deptsiz_data {
+		/** raw register data */
+		u32 d32;
+		/** register bits */
+		struct {
+			/** Transfer size */
+			unsigned xfersize:19;
+			/** Packet Count */
+			unsigned pktcnt:10;
+			/** Multi Count - Periodic IN endpoints */
+			unsigned mc:2;
+			unsigned reserved:1;
+		} b;
+};
+
+/**
+ * This union represents the bit fields in the Device EP 0 Transfer
+ * Size Register.  Read the register into the d32 member then
+ * set/clear the bits using the bit elements.
+ */
+union deptsiz0_data {
+		/** raw register data */
+		u32 d32;
+		/** register bits */
+		struct {
+			/** Transfer size */
+			unsigned xfersize:7;
+					/** Reserved */
+					unsigned reserved7_18:12;
+			/** Packet Count */
+			unsigned pktcnt:1;
+					/** Reserved */
+			unsigned reserved20_28:9;
+			/**Setup Packet Count (DOEPTSIZ0 Only) */
+			unsigned supcnt:2;
+			unsigned reserved31;
+		} b;
+};
+
+
+
+/*****************DMA Descriptor Specific Structures***************************/
+
+
+/** Buffer status definitions */
+
+#define BS_HOST_READY	0x0
+#define BS_DMA_BUSY		0x1
+#define BS_DMA_DONE		0x2
+#define BS_HOST_BUSY	0x3
+
+/** Receive/Transmit status definitions */
+
+#define RTS_SUCCESS		0x0
+#define RTS_BUFFLUSH	0x1
+#define RTS_RESERVED	0x2
+#define RTS_BUFERR		0x3
+union dev_dma_desc_sts {
+		/** raw register data */
+	u32 d32;
+		/** quadlet bits */
+	struct {
+		/** Received number of bytes */
+		unsigned bytes:16;
+
+		unsigned reserved16_22:7;
+		/** Multiple Transfer - only for OUT EPs */
+		unsigned mtrf:1;
+		/** Setup Packet received - only for OUT EPs */
+		unsigned sr:1;
+		/** Interrupt On Complete */
+		unsigned ioc:1;
+		/** Short Packet */
+		unsigned sp:1;
+		/** Last */
+		unsigned l:1;
+		/** Receive Status */
+		unsigned sts:2;
+		/** Buffer Status */
+		unsigned bs:2;
+	} b;
+};
+/**
+ * DMA Descriptor structure
+ *
+ * DMA Descriptor structure contains two quadlets:
+ * Status quadlet and Data buffer pointer.
+ */
+struct dwc_otg_dev_dma_desc {
+	/** DMA Descriptor status quadlet */
+	union dev_dma_desc_sts status;
+	/** DMA Descriptor data buffer pointer */
+	u32 buf;
+};
+
+
+
+/****************** Host Mode Register Structures******************************/
+
+/**
+ * The Host Global Registers structure defines the size and relative
+ * field offsets for the Host Mode Global Registers.  Host Global
+ * Registers offsets 400h-7FFh.
+*/
+struct dwc_otg_host_global_regs {
+	/** Host Configuration Register.   Offset: 400h */
+	u32 hcfg;
+	/** Host Frame Interval Register.	Offset: 404h */
+	u32 hfir;
+	/** Host Frame Number / Frame Remaining Register. Offset: 408h */
+	u32 hfnum;
+	/** Reserved.	Offset: 40Ch */
+	u32 reserved40C;
+	/** Host Periodic Transmit FIFO/ Queue Status Register. Offset: 410h */
+	u32 hptxsts;
+	/** Host All Channels Interrupt Register. Offset: 414h */
+	u32 haint;
+	/** Host All Channels Interrupt Mask Register. Offset: 418h */
+	u32 haintmsk;
+	/** Host Frame List Base Address Register . Offset: 41Ch */
+	u32 hflbaddr;
+};
+
+/**
+ * This union represents the bit fields in the Host Configuration Register.
+ * Read the register into the d32 member then set/clear the bits using
+ * the bit elements. Write the d32 member to the hcfg register.
+ */
+union hcfg_data {
+	/** raw register data */
+	u32 d32;
+
+	/** register bits */
+	struct {
+		/** FS/LS Phy Clock Select */
+		unsigned fslspclksel:2;
+#define DWC_HCFG_30_60_MHZ 0
+#define DWC_HCFG_48_MHZ	   1
+#define DWC_HCFG_6_MHZ	   2
+
+		/** FS/LS Only Support */
+		unsigned fslssupp:1;
+		unsigned reserved3_22:20;
+		/** Enable Scatter/gather DMA in Host mode */
+		unsigned descdma:1;
+		/** Frame List Entries */
+		unsigned frlisten:2;
+		/** Enable Periodic Scheduling */
+		unsigned perschedena:1;
+		/** Periodic Scheduling Enabled Status */
+		unsigned perschedstat:1;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the Host Frame Remaining/Number
+ * Register.
+ */
+union hfir_data {
+	/** raw register data */
+	u32 d32;
+
+	/** register bits */
+	struct {
+		unsigned frint:16;
+		unsigned reserved:16;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the Host Frame Remaining/Number
+ * Register.
+ */
+union hfnum_data {
+	/** raw register data */
+	u32 d32;
+
+	/** register bits */
+	struct {
+		unsigned frnum:16;
+#define DWC_HFNUM_MAX_FRNUM 0x3FFF
+		unsigned frrem:16;
+	} b;
+};
+
+union hptxsts_data {
+	/** raw register data */
+	u32 d32;
+
+	/** register bits */
+	struct {
+		unsigned ptxfspcavail:16;
+		unsigned ptxqspcavail:8;
+		/** Top of the Periodic Transmit Request Queue
+		 *	- bit 24 -Terminate(last entry for the selected channel)
+		 *	- bits 26:25 - Token Type
+		 *	  - 2'b00 - Zero length
+		 *	  - 2'b01 - Ping
+		 *	  - 2'b10 - Disable
+		 *	- bits 30:27 - Channel Number
+		 *	- bit 31 - Odd/even microframe
+		 */
+		unsigned ptxqtop_terminate:1;
+		unsigned ptxqtop_token:2;
+		unsigned ptxqtop_chnum:4;
+		unsigned ptxqtop_odd:1;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the Host Port Control and Status
+ * Register. Read the register into the d32 member then set/clear the
+ * bits using the bit elements. Write the d32 member to the
+ * hprt0 register.
+ */
+union hprt0_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned prtconnsts:1;
+		unsigned prtconndet:1;
+		unsigned prtena:1;
+		unsigned prtenchng:1;
+		unsigned prtovrcurract:1;
+		unsigned prtovrcurrchng:1;
+		unsigned prtres:1;
+		unsigned prtsusp:1;
+		unsigned prtrst:1;
+		unsigned reserved9:1;
+		unsigned prtlnsts:2;
+		unsigned prtpwr:1;
+		unsigned prttstctl:4;
+		unsigned prtspd:2;
+#define DWC_HPRT0_PRTSPD_HIGH_SPEED 0
+#define DWC_HPRT0_PRTSPD_FULL_SPEED 1
+#define DWC_HPRT0_PRTSPD_LOW_SPEED	2
+		unsigned reserved19_31:13;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+union haint_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned ch0:1;
+		unsigned ch1:1;
+		unsigned ch2:1;
+		unsigned ch3:1;
+		unsigned ch4:1;
+		unsigned ch5:1;
+		unsigned ch6:1;
+		unsigned ch7:1;
+		unsigned ch8:1;
+		unsigned ch9:1;
+		unsigned ch10:1;
+		unsigned ch11:1;
+		unsigned ch12:1;
+		unsigned ch13:1;
+		unsigned ch14:1;
+		unsigned ch15:1;
+		unsigned reserved:16;
+	} b;
+
+	struct {
+		unsigned chint:16;
+		unsigned reserved:16;
+	} b2;
+};
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+union haintmsk_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned ch0:1;
+		unsigned ch1:1;
+		unsigned ch2:1;
+		unsigned ch3:1;
+		unsigned ch4:1;
+		unsigned ch5:1;
+		unsigned ch6:1;
+		unsigned ch7:1;
+		unsigned ch8:1;
+		unsigned ch9:1;
+		unsigned ch10:1;
+		unsigned ch11:1;
+		unsigned ch12:1;
+		unsigned ch13:1;
+		unsigned ch14:1;
+		unsigned ch15:1;
+		unsigned reserved:16;
+	} b;
+
+	struct {
+		unsigned chint:16;
+		unsigned reserved:16;
+	} b2;
+};
+
+/**
+ * Host Channel Specific Registers. 500h-5FCh
+ */
+struct dwc_otg_hc_regs {
+	/** Host Channel 0 Characteristic Register.
+	 * Offset: 500h + (chan_num * 20h) + 00h */
+	u32 hcchar;
+	/** Host Channel 0 Split Control Register.
+	 * Offset: 500h + (chan_num * 20h) + 04h */
+	u32 hcsplt;
+	/** Host Channel 0 Interrupt Register.
+	 * Offset: 500h + (chan_num * 20h) + 08h */
+	u32 hcint;
+	/** Host Channel 0 Interrupt Mask Register.
+	 * Offset: 500h + (chan_num * 20h) + 0Ch */
+	u32 hcintmsk;
+	/** Host Channel 0 Transfer Size Register.
+	 * Offset: 500h + (chan_num * 20h) + 10h */
+	u32 hctsiz;
+	/** Host Channel 0 DMA Address Register.
+	 * Offset: 500h + (chan_num * 20h) + 14h */
+	u32 hcdma;
+	u32 reserved;
+	/** Host Channel 0 DMA Buffer Address Register.
+	 *  Offset: 500h + (chan_num * 20h) + 1Ch */
+	u32 hcdmab;
+};
+
+/**
+ * This union represents the bit fields in the Host Channel Characteristics
+ * Register. Read the register into the d32 member then set/clear the
+ * bits using the bit elements. Write the d32 member to the
+ * hcchar register.
+ */
+union hcchar_data {
+	/** raw register data */
+	u32 d32;
+
+	/** register bits */
+	struct {
+		/** Maximum packet size in bytes */
+		unsigned mps:11;
+
+		/** Endpoint number */
+		unsigned epnum:4;
+
+		/** 0: OUT, 1: IN */
+		unsigned epdir:1;
+
+		unsigned reserved:1;
+
+		/** 0: Full/high speed device, 1: Low speed device */
+		unsigned lspddev:1;
+
+		/** 0: Control, 1: Isoc, 2: Bulk, 3: Intr */
+		unsigned eptype:2;
+
+		/** Packets per frame for periodic transfers. 0 is reserved. */
+		unsigned multicnt:2;
+
+		/** Device address */
+		unsigned devaddr:7;
+
+		/**
+		 * Frame to transmit periodic transaction.
+		 * 0: even, 1: odd
+		 */
+		unsigned oddfrm:1;
+
+		/** Channel disable */
+		unsigned chdis:1;
+
+		/** Channel enable */
+		unsigned chen:1;
+	} b;
+};
+
+union hcsplt_data {
+	/** raw register data */
+	u32 d32;
+
+	/** register bits */
+	struct {
+		/** Port Address */
+		unsigned prtaddr:7;
+
+		/** Hub Address */
+		unsigned hubaddr:7;
+
+		/** Transaction Position */
+		unsigned xactpos:2;
+#define DWC_HCSPLIT_XACTPOS_MID 0
+#define DWC_HCSPLIT_XACTPOS_END 1
+#define DWC_HCSPLIT_XACTPOS_BEGIN 2
+#define DWC_HCSPLIT_XACTPOS_ALL 3
+
+		/** Do Complete Split */
+		unsigned compsplt:1;
+
+		/** Reserved */
+		unsigned reserved:14;
+
+		/** Split Enble */
+		unsigned spltena:1;
+	} b;
+};
+
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+union hcint_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		/** Transfer Complete */
+		unsigned xfercomp:1;
+		/** Channel Halted */
+		unsigned chhltd:1;
+		/** AHB Error */
+		unsigned ahberr:1;
+		/** STALL Response Received */
+		unsigned stall:1;
+		/** NAK Response Received */
+		unsigned nak:1;
+		/** ACK Response Received */
+		unsigned ack:1;
+		/** NYET Response Received */
+		unsigned nyet:1;
+		/** Transaction Err */
+		unsigned xacterr:1;
+		/** Babble Error */
+		unsigned bblerr:1;
+		/** Frame Overrun */
+		unsigned frmovrun:1;
+		/** Data Toggle Error */
+		unsigned datatglerr:1;
+		/** Buffer Not Available (only for DDMA mode) */
+		unsigned bna:1;
+		/** Exessive transaction error (only for DDMA mode) */
+		unsigned xcs_xact:1;
+		/** Frame List Rollover interrupt */
+		unsigned frm_list_roll:1;
+		/** Reserved */
+		unsigned reserved14_31:18;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the Host Channel Interrupt Mask
+ * Register. Read the register into the d32 member then set/clear the
+ * bits using the bit elements. Write the d32 member to the
+ * hcintmsk register.
+ */
+union hcintmsk_data {
+	/** raw register data */
+	u32 d32;
+
+	/** register bits */
+	struct {
+		unsigned xfercompl:1;
+		unsigned chhltd:1;
+		unsigned ahberr:1;
+		unsigned stall:1;
+		unsigned nak:1;
+		unsigned ack:1;
+		unsigned nyet:1;
+		unsigned xacterr:1;
+		unsigned bblerr:1;
+		unsigned frmovrun:1;
+		unsigned datatglerr:1;
+		unsigned bna:1;
+		unsigned xcs_xact:1;
+		unsigned frm_list_roll:1;
+		unsigned reserved14_31:18;
+	} b;
+};
+/**
+ * This union represents the bit fields in the Host Channel Transfer Size
+ * Register. Read the register into the d32 member then set/clear the
+ * bits using the bit elements. Write the d32 member to the
+ * hcchar register.
+ */
+union hctsiz_data {
+	/** raw register data */
+	u32 d32;
+
+	/** register bits */
+	struct {
+		/** Total transfer size in bytes */
+		unsigned xfersize:19;
+
+		/** Data packets to transfer */
+		unsigned pktcnt:10;
+
+		/**
+		 * Packet ID for next data packet
+		 * 0: DATA0
+		 * 1: DATA2
+		 * 2: DATA1
+		 * 3: MDATA (non-Control), SETUP (Control)
+		 */
+		unsigned pid:2;
+#define DWC_HCTSIZ_DATA0 0
+#define DWC_HCTSIZ_DATA1 2
+#define DWC_HCTSIZ_DATA2 1
+#define DWC_HCTSIZ_MDATA 3
+#define DWC_HCTSIZ_SETUP 3
+
+		/** Do PING protocol when 1 */
+		unsigned dopng:1;
+	} b;
+	/** register bits */
+	struct {
+		/** Scheduling information */
+		unsigned schinfo:8;
+
+		/** Number of transfer descriptors.
+		 * Max value:
+		 * 64 in general,
+		 * 256 only for HS isochronous endpoint.
+		 */
+		unsigned ntd:8;
+
+		/** Data packets to transfer */
+		unsigned reserved16_28:13;
+
+		/**
+		 * Packet ID for next data packet
+		 * 0: DATA0
+		 * 1: DATA2
+		 * 2: DATA1
+		 * 3: MDATA (non-Control)
+		 */
+		unsigned pid:2;
+
+		/** Do PING protocol when 1 */
+		unsigned dopng:1;
+	} b_ddma;
+};
+
+/**
+ * This union represents the bit fields in the Host Channel Interrupt Mask
+ * Register. Read the register into the d32 member then set/clear the
+ * bits using the bit elements. Write the d32 member to the
+ * hcintmsk register.
+ */
+/**
+ * This union represents the bit fields in the Host DMA Address
+ * Register used in Descriptor DMA mode.
+ */
+union hcdma_data {
+	/** raw register data */
+	u32 d32;
+	/** register bits */
+	struct {
+		unsigned reserved0_2:3;
+		/** Current Transfer Descriptor. Not used for ISOC */
+		unsigned ctd:8;
+		/** Start Address of Descriptor List */
+		unsigned dma_addr:21;
+	} b;
+};
+
+/**
+ * This union represents the bit fields in the DMA Descriptor
+ * status quadlet for host mode. Read the quadlet into the d32 member then
+ * set/clear the bits using the bit elements.
+ */
+union host_dma_desc_sts {
+	/** raw register data */
+	u32 d32;
+	/** quadlet bits */
+
+	/* for non-isochronous  */
+	struct {
+		/** Number of bytes */
+		unsigned n_bytes:17;
+		/** QTD offset to jump when Short Packet received -
+		 * only for IN EPs */
+		unsigned qtd_offset:6;
+		/**
+		 * Set to request the core to jump to alternate QTD if
+		 * Short Packet received - only for IN EPs
+		 */
+		unsigned a_qtd:1;
+		 /**
+		  * Setup Packet bit. When set indicates that buffer contains
+		  * setup packet.
+		  */
+		unsigned sup:1;
+		/** Interrupt On Complete */
+		unsigned ioc:1;
+		/** End of List */
+		unsigned eol:1;
+		unsigned reserved27:1;
+		/** Rx/Tx Status */
+		unsigned sts:2;
+	#define DMA_DESC_STS_PKTERR	1
+		unsigned reserved30:1;
+		/** Active Bit */
+		unsigned a:1;
+	} b;
+	/* for isochronous */
+	struct {
+		/** Number of bytes */
+		unsigned n_bytes:12;
+		unsigned reserved12_24:13;
+		/** Interrupt On Complete */
+		unsigned ioc:1;
+		unsigned reserved26_27:2;
+		/** Rx/Tx Status */
+		unsigned sts:2;
+		unsigned reserved30:1;
+		/** Active Bit */
+		unsigned a:1;
+	} b_isoc;
+};
+
+#define	MAX_DMA_DESC_SIZE		131071
+#define MAX_DMA_DESC_NUM_GENERIC	64
+#define MAX_DMA_DESC_NUM_HS_ISOC	256
+#define MAX_FRLIST_EN_NUM		64
+/**
+ * Host-mode DMA Descriptor structure
+ *
+ * DMA Descriptor structure contains two quadlets:
+ * Status quadlet and Data buffer pointer.
+ */
+struct dwc_otg_host_dma_desc {
+	/** DMA Descriptor status quadlet */
+	union host_dma_desc_sts	status;
+	/** DMA Descriptor data buffer pointer */
+	u32	buf;
+};
+
+/** OTG Host Interface Structure.
+ *
+ * The OTG Host Interface Structure structure contains information
+ * needed to manage the DWC_otg controller acting in host mode. It
+ * represents the programming view of the host-specific aspects of the
+ * controller.
+ */
+struct dwc_otg_host_if {
+	/** Host Global Registers starting at offset 400h.*/
+	struct dwc_otg_host_global_regs __iomem *host_global_regs;
+#define DWC_OTG_HOST_GLOBAL_REG_OFFSET 0x400
+
+	/** Host Port 0 Control and Status Register */
+	u32 __iomem *hprt0;
+#define DWC_OTG_HOST_PORT_REGS_OFFSET 0x440
+
+
+	/** Host Channel Specific Registers at offsets 500h-5FCh. */
+	struct dwc_otg_hc_regs __iomem *hc_regs[MAX_EPS_CHANNELS];
+#define DWC_OTG_HOST_CHAN_REGS_OFFSET 0x500
+#define DWC_OTG_CHAN_REGS_OFFSET 0x20
+
+
+	/* Host configuration information */
+	/** Number of Host Channels (range: 1-16) */
+	u8 num_host_channels;
+	/** Periodic EPs supported (0: no, 1: yes) */
+	u8 perio_eps_supported;
+	/** Periodic Tx FIFO Size (Only 1 host periodic Tx FIFO) */
+	u16 perio_tx_fifo_size;
+
+};
+
+/**
+ * This union represents the bit fields in the Power and Clock Gating Control
+ * Register. Read the register into the d32 member then set/clear the
+ * bits using the bit elements.
+ */
+union pcgcctl_data {
+	/** raw register data */
+	u32 d32;
+
+	/** register bits */
+	struct {
+		/** Stop Pclk */
+		unsigned stoppclk:1;
+		/** Gate Hclk */
+		unsigned gatehclk:1;
+		/** Power Clamp */
+		unsigned pwrclmp:1;
+		/** Reset Power Down Modules */
+		unsigned rstpdwnmodule:1;
+		/** PHY Suspended */
+		unsigned physuspended:1;
+		/** Enable Sleep Clock Gating (Enbl_L1Gating) */
+		unsigned enbl_sleep_gating:1;
+		/** PHY In Sleep (PhySleep) */
+		unsigned phy_in_sleep:1;
+		/** Deep Sleep*/
+		unsigned deep_sleep:1;
+
+		unsigned reserved31_8:24;
+	} b;
+};
+
+/**
+ * The dwc_otg_dev_if structure contains information needed to manage
+ * the DWC_otg controller acting in device mode. It represents the
+ * programming view of the device-specific aspects of the controller.
+ */
+struct dwc_otg_dev_if {
+	/** Pointer to device Global registers.
+	 * Device Global Registers starting at offset 800h
+	 */
+	struct dwc_otg_dev_global_regs __iomem *dev_global_regs;
+#define DWC_DEV_GLOBAL_REG_OFFSET 0x800
+
+	/**
+	 * Device Logical IN Endpoint-Specific Registers 900h-AFCh
+	 */
+	struct dwc_otg_dev_in_ep_regs __iomem *in_ep_regs[MAX_EPS_CHANNELS];
+#define DWC_DEV_IN_EP_REG_OFFSET 0x900
+#define DWC_EP_REG_OFFSET 0x20
+
+	/** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */
+	struct dwc_otg_dev_out_ep_regs __iomem *out_ep_regs[MAX_EPS_CHANNELS];
+#define DWC_DEV_OUT_EP_REG_OFFSET 0xB00
+
+	/* Device configuration information*/
+	u8	 speed;		/*Device Speed 0: Unknown, 1: LS, 2:FS, 3: HS */
+	u8	 num_in_eps;	/*Number # of Tx EP range: 0-15 exept ep0 */
+	u8	 num_out_eps;	/*Number # of Rx EP range: 0-15 exept ep 0*/
+
+	/** Size of periodic FIFOs (Bytes) */
+	u16 perio_tx_fifo_size[MAX_PERIO_FIFOS];
+
+	/** Size of Tx FIFOs (Bytes) */
+	u16 tx_fifo_size[MAX_TX_FIFOS];
+
+	/** Thresholding enable flags and length varaiables **/
+	u16 rx_thr_en;
+	u16 iso_tx_thr_en;
+	u16 non_iso_tx_thr_en;
+
+	u16 rx_thr_length;
+	u16 tx_thr_length;
+
+	/** 2 descriptors for SETUP packets */
+	dma_addr_t dma_setup_desc_addr[2];
+	struct dwc_otg_dev_dma_desc *setup_desc_addr[2];
+
+	/** Pointer to Descriptor with latest SETUP packet */
+	struct dwc_otg_dev_dma_desc *psetup;
+
+	/** Index of current SETUP handler descriptor */
+	u32 setup_desc_index;
+
+	/** Descriptor for Data In or Status In phases */
+	dma_addr_t dma_in_desc_addr;
+	struct dwc_otg_dev_dma_desc *in_desc_addr;
+
+	/** Descriptor for Data Out or Status Out phases */
+	dma_addr_t dma_out_desc_addr;
+	struct dwc_otg_dev_dma_desc *out_desc_addr;
+
+	/** Setup Packet Detected - if set clear NAK when queueing */
+	u32 spd;
+};
+
+
+#endif
diff --git a/drivers/usb/dwc_otg/dwc_otg_tz1090.h b/drivers/usb/dwc_otg/dwc_otg_tz1090.h
new file mode 100644
index 0000000..fca272f
--- /dev/null
+++ b/drivers/usb/dwc_otg/dwc_otg_tz1090.h
@@ -0,0 +1,82 @@
+/*
+ *  dwc_otg_tz1090.h
+ *
+ *  @file
+ *
+ *  This file contains the Platform Specific constants, interfaces
+ * (functions and macros) for Comet SoC.
+ *
+ *  Copyright (C) 2010 Imagination Technologies Ltd.
+ *
+ */
+
+#if !defined(__DWC_OTG_TZ1090_H__)
+#define __DWC_OTG_TZ1090_H__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#if !defined(CONFIG_SOC_TZ1090)
+#error "The contents of this file are Comet processor specific!!!"
+#endif
+
+
+/**
+ * Reads the content of a register.
+ *
+ * @param _reg address of register to read.
+ * @return contents of the register.
+ *
+ */
+static inline
+u32 dwc_read_reg32(u32 __iomem *reg)
+{
+	return ioread32((void __iomem *)reg);
+};
+
+/**
+ * Writes a register with a 32 bit value.
+ *
+ * @param _reg address of register to read.
+ * @param _value to write to _reg.
+ *
+ */
+static inline void
+dwc_write_reg32(u32 __iomem *reg, const u32 _value)
+{
+	iowrite32(_value, (void __iomem *)reg);
+};
+
+/**
+ * This function modifies bit values in a register.  Using the
+ * algorithm: (reg_contents & ~clear_mask) | set_mask.
+ *
+ * @param _reg address of register to read.
+ * @param _clear_mask bit mask to be cleared.
+ * @param _set_mask bit mask to be set.
+ *
+ */
+static inline void
+dwc_modify_reg32(u32 __iomem *reg, const u32 _clear_mask,
+		const u32 _set_mask)
+{
+	iowrite32((ioread32((void __iomem *)reg) & ~_clear_mask) |
+			_set_mask , reg);
+};
+
+static inline void
+dwc_write_datafifo32(u32 __iomem *reg, const u32 _value)
+{
+	iowrite32(_value, (void __iomem *)reg);
+};
+
+static inline u32
+dwc_read_datafifo32(u32 __iomem *reg)
+{
+	return ioread32((void __iomem *)reg);
+};
+
+#endif
diff --git a/drivers/usb/dwc_otg/ppc4xx_dma.c b/drivers/usb/dwc_otg/ppc4xx_dma.c
new file mode 100644
index 0000000..51f5196
--- /dev/null
+++ b/drivers/usb/dwc_otg/ppc4xx_dma.c
@@ -0,0 +1,746 @@
+/*
+ * IBM PPC4xx DMA engine core library
+ *
+ * Copyright 2000-2004 MontaVista Software Inc.
+ *
+ * Cleaned up and converted to new DCR access
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * Original code by Armin Kuster <akuster@mvista.com>
+ * and Pete Popov <ppopov@mvista.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the  GNU General Public License along
+ * with this program; if not, write  to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/dma.h>
+#include "ppc4xx_dma.h"
+
+ppc_dma_ch_t dma_channels[MAX_PPC4xx_DMA_CHANNELS];
+EXPORT_SYMBOL(dma_channels);
+int
+ppc4xx_get_dma_status(void)
+{
+	return mfdcr(DCRN_DMASR);
+}
+EXPORT_SYMBOL(ppc4xx_get_dma_status);
+
+void
+ppc4xx_set_src_addr(int dmanr, phys_addr_t src_addr)
+{
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"set_src_addr: bad channel: %d\n", dmanr);
+		return;
+	}
+
+#ifdef PPC4xx_DMA_64BIT
+	mtdcr(DCRN_DMASAH0 + dmanr*8, src_addr >> 32);
+#endif
+	mtdcr(DCRN_DMASA0 + dmanr*8, (u32)src_addr);
+}
+EXPORT_SYMBOL(ppc4xx_set_src_addr);
+
+void
+ppc4xx_set_dst_addr(int dmanr, phys_addr_t dst_addr)
+{
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"set_dst_addr: bad channel: %d\n", dmanr);
+		return;
+	}
+
+#ifdef PPC4xx_DMA_64BIT
+	mtdcr(DCRN_DMADAH0 + dmanr*8, dst_addr >> 32);
+#endif
+	mtdcr(DCRN_DMADA0 + dmanr*8, (u32)dst_addr);
+}
+EXPORT_SYMBOL(ppc4xx_set_dst_addr);
+
+void
+ppc4xx_enable_dma(unsigned int dmanr)
+{
+	unsigned int control;
+	ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+	unsigned int status_bits[] = { DMA_CS0 | DMA_TS0 | DMA_CH0_ERR,
+				       DMA_CS1 | DMA_TS1 | DMA_CH1_ERR,
+				       DMA_CS2 | DMA_TS2 | DMA_CH2_ERR,
+				       DMA_CS3 | DMA_TS3 | DMA_CH3_ERR};
+
+	if (p_dma_ch->in_use) {
+		printk(KERN_WARN"enable_dma: channel %d in use\n", dmanr);
+		return;
+	}
+
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"enable_dma: bad channel: %d\n", dmanr);
+		return;
+	}
+
+	if (p_dma_ch->mode == DMA_MODE_READ) {
+		/* peripheral to memory */
+		ppc4xx_set_src_addr(dmanr, 0);
+		ppc4xx_set_dst_addr(dmanr, p_dma_ch->addr);
+	} else if (p_dma_ch->mode == DMA_MODE_WRITE) {
+		/* memory to peripheral */
+		ppc4xx_set_src_addr(dmanr, p_dma_ch->addr);
+		ppc4xx_set_dst_addr(dmanr, 0);
+	}
+
+	/* for other xfer modes, the addresses are already set */
+	control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+
+	control &= ~(DMA_TM_MASK | DMA_TD);	/* clear all mode bits */
+	if (p_dma_ch->mode == DMA_MODE_MM) {
+		/* software initiated memory to memory */
+		control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE;
+#if defined(CONFIG_405EX) || defined(CONFIG_405EXr)
+		control |= DMA_MODE_MM;
+		if (p_dma_ch->dai)
+			control |= DMA_DAI;
+
+		if (p_dma_ch->sai)
+			control |= DMA_SAI;
+
+
+#endif
+#if defined(CONFIG_460EX) || defined(CONFIG_460GT)
+		control |= DMA_MODE_MM | DMA_DAI | DMA_SAI;
+#endif
+	}
+
+	mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+	/*
+	 * Clear the CS, TS, RI bits for the channel from DMASR.  This
+	 * has been observed to happen correctly only after the mode and
+	 * ETD/DCE bits in DMACRx are set above.  Must do this before
+	 * enabling the channel.
+	 */
+
+	mtdcr(DCRN_DMASR, status_bits[dmanr]);
+
+	/*
+	 * For device-paced transfers, Terminal Count Enable apparently
+	 * must be on, and this must be turned on after the mode, etc.
+	 * bits are cleared above (at least on Redwood-6).
+	 */
+
+	if ((p_dma_ch->mode == DMA_MODE_MM_DEVATDST) ||
+	    (p_dma_ch->mode == DMA_MODE_MM_DEVATSRC))
+		control |= DMA_TCE_ENABLE;
+
+	/*
+	 * Now enable the channel.
+	 */
+
+	control |= (p_dma_ch->mode | DMA_CE_ENABLE);
+	mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+	p_dma_ch->in_use = 1;
+}
+EXPORT_SYMBOL(ppc4xx_enable_dma);
+
+void
+ppc4xx_disable_dma(unsigned int dmanr)
+{
+	unsigned int control;
+	ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+	if (!p_dma_ch->in_use) {
+		printk(KERN_WARN"disable_dma: channel %d not in use\n", dmanr);
+		return;
+	}
+
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"disable_dma: bad channel: %d\n", dmanr);
+		return;
+	}
+
+	control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+	control &= ~DMA_CE_ENABLE;
+	mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+	p_dma_ch->in_use = 0;
+}
+EXPORT_SYMBOL(ppc4xx_disable_dma);
+
+/*
+ * Sets the dma mode for single DMA transfers only.
+ * For scatter/gather transfers, the mode is passed to the
+ * alloc_dma_handle() function as one of the parameters.
+ *
+ * The mode is simply saved and used later.  This allows
+ * the driver to call set_dma_mode() and set_dma_addr() in
+ * any order.
+ *
+ * Valid mode values are:
+ *
+ * DMA_MODE_READ          peripheral to memory
+ * DMA_MODE_WRITE         memory to peripheral
+ * DMA_MODE_MM            memory to memory
+ * DMA_MODE_MM_DEVATSRC   device-paced memory to memory, device at src
+ * DMA_MODE_MM_DEVATDST   device-paced memory to memory, device at dst
+ */
+int
+ppc4xx_set_dma_mode(unsigned int dmanr, unsigned int mode)
+{
+	ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"set_dma_mode: bad channel 0x%x\n", dmanr);
+		return DMA_STATUS_BAD_CHANNEL;
+	}
+
+	p_dma_ch->mode = mode;
+
+	return DMA_STATUS_GOOD;
+}
+EXPORT_SYMBOL(ppc4xx_set_dma_mode);
+
+/*
+ * Sets the DMA Count register. Note that 'count' is in bytes.
+ * However, the DMA Count register counts the number of "transfers",
+ * where each transfer is equal to the bus width.  Thus, count
+ * MUST be a multiple of the bus width.
+ */
+void
+ppc4xx_set_dma_count(unsigned int dmanr, unsigned int count)
+{
+	ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+#ifdef DEBUG_4xxDMA
+	{
+		int error = 0;
+		switch (p_dma_ch->pwidth) {
+		case PW_8:
+			break;
+		case PW_16:
+			if (count & 0x1)
+				error = 1;
+			break;
+		case PW_32:
+			if (count & 0x3)
+				error = 1;
+			break;
+		case PW_64:
+			if (count & 0x7)
+				error = 1;
+			break;
+		default:
+			printk(KERN_WARN"set_dma_count: invalid "
+					"bus width: 0x%x\n",
+					p_dma_ch->pwidth);
+			return;
+		}
+		if (error)
+			printk
+			    ("Warning: set_dma_count count 0x%x bus width %d\n",
+			     count, p_dma_ch->pwidth);
+	}
+#endif
+
+	count = count >> p_dma_ch->shift;
+
+	mtdcr(DCRN_DMACT0 + (dmanr * 0x8), count);
+}
+EXPORT_SYMBOL(ppc4xx_set_dma_count);
+
+/*
+ *   Returns the number of bytes left to be transfered.
+ *   After a DMA transfer, this should return zero.
+ *   Reading this while a DMA transfer is still in progress will return
+ *   unpredictable results.
+ */
+int
+ppc4xx_get_dma_residue(unsigned int dmanr)
+{
+	unsigned int count;
+	ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"ppc4xx_get_dma_residue: bad channel 0x%x\n",
+				dmanr);
+		return DMA_STATUS_BAD_CHANNEL;
+	}
+
+	count = mfdcr(DCRN_DMACT0 + (dmanr * 0x8));
+#if defined(CONFIG_405EX) || defined(CONFIG_405EXr) || \
+    defined(CONFIG_460EX) || defined(CONFIG_460GT)
+	count &= DMA_CTC_TC_MASK;
+#endif
+
+	return count << p_dma_ch->shift;
+}
+EXPORT_SYMBOL(ppc4xx_get_dma_residue);
+
+/*
+ * Sets the DMA address for a memory to peripheral or peripheral
+ * to memory transfer.  The address is just saved in the channel
+ * structure for now and used later in enable_dma().
+ */
+void
+ppc4xx_set_dma_addr(unsigned int dmanr, phys_addr_t addr)
+{
+	ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"ppc4xx_set_dma_addr: bad channel: %d\n",
+				dmanr);
+		return;
+	}
+
+#ifdef DEBUG_4xxDMA
+	{
+		int error = 0;
+		switch (p_dma_ch->pwidth) {
+		case PW_8:
+			break;
+		case PW_16:
+			if ((unsigned) addr & 0x1)
+				error = 1;
+			break;
+		case PW_32:
+			if ((unsigned) addr & 0x3)
+				error = 1;
+			break;
+		case PW_64:
+			if ((unsigned) addr & 0x7)
+				error = 1;
+			break;
+		default:
+			printk(KERN_WARN"ppc4xx_set_dma_addr: "
+					"invalid bus width: 0x%x\n",
+					p_dma_ch->pwidth);
+			return;
+		}
+		if (error)
+			printk(KERN_WARN"Warning: ppc4xx_set_dma_addr addr"
+					" 0x%x bus width %d\n",
+					addr, p_dma_ch->pwidth);
+	}
+#endif
+
+	/* save dma address and program it later after we know the xfer mode */
+	p_dma_ch->addr = addr;
+}
+EXPORT_SYMBOL(ppc4xx_set_dma_addr);
+
+/*
+ * Sets both DMA addresses for a memory to memory transfer.
+ * For memory to peripheral or peripheral to memory transfers
+ * the function set_dma_addr() should be used instead.
+ */
+void
+ppc4xx_set_dma_addr2(unsigned int dmanr, phys_addr_t src_dma_addr,
+		     phys_addr_t dst_dma_addr)
+{
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"ppc4xx_set_dma_addr2: bad channel: %d\n",
+				dmanr);
+		return;
+	}
+
+#ifdef DEBUG_4xxDMA
+	{
+		ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+		int error = 0;
+		switch (p_dma_ch->pwidth) {
+		case PW_8:
+			break;
+		case PW_16:
+			if (((unsigned) src_dma_addr & 0x1) ||
+					((unsigned) dst_dma_addr & 0x1)
+			   )
+				error = 1;
+			break;
+		case PW_32:
+			if (((unsigned) src_dma_addr & 0x3) ||
+					((unsigned) dst_dma_addr & 0x3)
+			   )
+				error = 1;
+			break;
+		case PW_64:
+			if (((unsigned) src_dma_addr & 0x7) ||
+					((unsigned) dst_dma_addr & 0x7)
+			   )
+				error = 1;
+			break;
+		default:
+			printk(KERN_WARN"ppc4xx_set_dma_addr2: "
+					"invalid bus width: 0x%x\n",
+					p_dma_ch->pwidth);
+			return;
+		}
+		if (error)
+			printk(KERN_WARN"Warning: ppc4xx_set_dma_addr2 src "
+					"0x%x dst 0x%x bus width %d\n",
+					src_dma_addr, dst_dma_addr,
+					p_dma_ch->pwidth);
+	}
+#endif
+
+	ppc4xx_set_src_addr(dmanr, src_dma_addr);
+	ppc4xx_set_dst_addr(dmanr, dst_dma_addr);
+}
+EXPORT_SYMBOL(ppc4xx_set_dma_addr2);
+/*
+ * Enables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list.  Otherwise, interrupts will not be enabled, if
+ * they were previously disabled.
+ */
+int
+ppc4xx_enable_dma_interrupt(unsigned int dmanr)
+{
+	unsigned int control;
+	ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"ppc4xx_enable_dma_interrupt: "
+				"bad channel: %d\n", dmanr);
+		return DMA_STATUS_BAD_CHANNEL;
+	}
+
+	p_dma_ch->int_enable = 1;
+
+#if defined(CONFIG_405EX) || defined(CONFIG_405EXr) || \
+    defined(CONFIG_460EX) || defined(CONFIG_460GT)
+	control = mfdcr(DCRN_DMACT0 + (dmanr * 0x8));
+	control |= DMA_CTC_TCIE | DMA_CTC_ETIE | DMA_CTC_EIE;
+	mtdcr(DCRN_DMACT0 + (dmanr * 0x8), control);
+#endif
+
+	control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+	control |= DMA_CIE_ENABLE;	/* Channel Interrupt Enable */
+	mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+	return DMA_STATUS_GOOD;
+}
+EXPORT_SYMBOL(ppc4xx_enable_dma_interrupt);
+
+/*
+ * Disables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list.  Otherwise, interrupts will not be disabled, if
+ * they were previously enabled.
+ */
+int
+ppc4xx_disable_dma_interrupt(unsigned int dmanr)
+{
+	unsigned int control;
+	ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"ppc4xx_disable_dma_interrupt: "
+				"bad channel: %d\n", dmanr);
+		return DMA_STATUS_BAD_CHANNEL;
+	}
+
+	p_dma_ch->int_enable = 0;
+
+	control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+	control &= ~DMA_CIE_ENABLE;	/* Channel Interrupt Enable */
+	mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+	return DMA_STATUS_GOOD;
+}
+EXPORT_SYMBOL(ppc4xx_disable_dma_interrupt);
+
+/*
+ * Configures a DMA channel, including the peripheral bus width, if a
+ * peripheral is attached to the channel, the polarity of the DMAReq and
+ * DMAAck signals, etc.  This information should really be setup by the boot
+ * code, since most likely the configuration won't change dynamically.
+ * If the kernel has to call this function, it's recommended that it's
+ * called from platform specific init code.  The driver should not need to
+ * call this function.
+ */
+int
+ppc4xx_init_dma_channel(unsigned int dmanr, ppc_dma_ch_t *p_init)
+{
+	unsigned int polarity;
+	uint32_t control = 0;
+	ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+	DMA_MODE_READ = (unsigned long) DMA_TD;	/* Peripheral to Memory */
+	DMA_MODE_WRITE = 0;	/* Memory to Peripheral */
+
+	if (!p_init) {
+		printk(KERN_WARN"ppc4xx_init_dma_channel: NULL p_init\n");
+		return DMA_STATUS_NULL_POINTER;
+	}
+
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"ppc4xx_init_dma_channel: "
+				"bad channel %d\n", dmanr);
+		return DMA_STATUS_BAD_CHANNEL;
+	}
+
+#if DCRN_POL > 0
+	polarity = mfdcr(DCRN_POL);
+#else
+	polarity = 0;
+#endif
+
+	/* Setup the control register based on the values passed to
+	 * us in p_init.  Then, over-write the control register with this
+	 * new value.
+	 */
+	control |= SET_DMA_CONTROL;
+
+	/* clear all polarity signals and then "or" in new signal levels */
+	polarity &= ~GET_DMA_POLARITY(dmanr);
+	polarity |= p_init->polarity;
+#if DCRN_POL > 0
+	mtdcr(DCRN_POL, polarity);
+#endif
+	mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+	/* save these values in our dma channel structure */
+	memcpy(p_dma_ch, p_init, sizeof(ppc_dma_ch_t));
+
+	/*
+	 * The peripheral width values written in the control register are:
+	 *   PW_8                 0
+	 *   PW_16                1
+	 *   PW_32                2
+	 *   PW_64                3
+	 *
+	 *   Since the DMA count register takes the number of "transfers",
+	 *   we need to divide the count sent to us in certain
+	 *   functions by the appropriate number.  It so happens that our
+	 *   right shift value is equal to the peripheral width value.
+	 */
+	p_dma_ch->shift = p_init->pwidth;
+
+	/*
+	 * Save the control word for easy access.
+	 */
+	p_dma_ch->control = control;
+	mtdcr(DCRN_DMASR, 0xffffffff);	/* clear status register */
+	return DMA_STATUS_GOOD;
+}
+EXPORT_SYMBOL(ppc4xx_init_dma_channel);
+
+/*
+ * This function returns the channel configuration.
+ */
+int
+ppc4xx_get_channel_config(unsigned int dmanr, ppc_dma_ch_t *p_dma_ch)
+{
+	unsigned int polarity;
+	unsigned int control;
+
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"ppc4xx_get_channel_config: "
+				"bad channel %d\n", dmanr);
+		return DMA_STATUS_BAD_CHANNEL;
+	}
+
+	memcpy(p_dma_ch, &dma_channels[dmanr], sizeof(ppc_dma_ch_t));
+
+#if DCRN_POL > 0
+	polarity = mfdcr(DCRN_POL);
+#else
+	polarity = 0;
+#endif
+
+	p_dma_ch->polarity = polarity & GET_DMA_POLARITY(dmanr);
+	control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+
+	p_dma_ch->cp = GET_DMA_PRIORITY(control);
+	p_dma_ch->pwidth = GET_DMA_PW(control);
+	p_dma_ch->psc = GET_DMA_PSC(control);
+	p_dma_ch->pwc = GET_DMA_PWC(control);
+	p_dma_ch->phc = GET_DMA_PHC(control);
+	p_dma_ch->ce = GET_DMA_CE_ENABLE(control);
+	p_dma_ch->int_enable = GET_DMA_CIE_ENABLE(control);
+	p_dma_ch->shift = GET_DMA_PW(control);
+
+#ifdef CONFIG_PPC4xx_EDMA
+	p_dma_ch->pf = GET_DMA_PREFETCH(control);
+#else
+	p_dma_ch->ch_enable = GET_DMA_CH(control);
+	p_dma_ch->ece_enable = GET_DMA_ECE(control);
+	p_dma_ch->tcd_disable = GET_DMA_TCD(control);
+#endif
+	return DMA_STATUS_GOOD;
+}
+EXPORT_SYMBOL(ppc4xx_get_channel_config);
+
+/*
+ * Sets the priority for the DMA channel dmanr.
+ * Since this is setup by the hardware init function, this function
+ * can be used to dynamically change the priority of a channel.
+ *
+ * Acceptable priorities:
+ *
+ * PRIORITY_LOW
+ * PRIORITY_MID_LOW
+ * PRIORITY_MID_HIGH
+ * PRIORITY_HIGH
+ *
+ */
+int
+ppc4xx_set_channel_priority(unsigned int dmanr, unsigned int priority)
+{
+	unsigned int control;
+
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"ppc4xx_set_channel_priority: "
+				"bad channel %d\n", dmanr);
+		return DMA_STATUS_BAD_CHANNEL;
+	}
+
+	if ((priority != PRIORITY_LOW) &&
+	    (priority != PRIORITY_MID_LOW) &&
+	    (priority != PRIORITY_MID_HIGH) && (priority != PRIORITY_HIGH)) {
+		printk(KERN_WARN"ppc4xx_set_channel_priority:"
+				" bad priority: 0x%x\n", priority);
+	}
+
+	control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+	control |= SET_DMA_PRIORITY(priority);
+	mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+	return DMA_STATUS_GOOD;
+}
+EXPORT_SYMBOL(ppc4xx_set_channel_priority);
+/*
+ * Returns the width of the peripheral attached to this channel. This assumes
+ * that someone who knows the hardware configuration, boot code or some other
+ * init code, already set the width.
+ *
+ * The return value is one of:
+ *   PW_8
+ *   PW_16
+ *   PW_32
+ *   PW_64
+ *
+ *   The function returns 0 on error.
+ */
+unsigned int
+ppc4xx_get_peripheral_width(unsigned int dmanr)
+{
+	unsigned int control;
+
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARN"ppc4xx_get_peripheral_width: "
+				"bad channel %d\n", dmanr);
+		return DMA_STATUS_BAD_CHANNEL;
+	}
+
+	control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+
+	return GET_DMA_PW(control);
+}
+EXPORT_SYMBOL(ppc4xx_get_peripheral_width);
+
+/*
+ * Clears the channel status bits
+ */
+int
+ppc4xx_clr_dma_status(unsigned int dmanr)
+{
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARNKERN_ERR "ppc4xx_clr_dma_status: "
+				"bad channel: %d\n", dmanr);
+		return DMA_STATUS_BAD_CHANNEL;
+	}
+	mtdcr(DCRN_DMASR, ((u32)DMA_CH0_ERR | (u32)DMA_CS0 | (u32)DMA_TS0) >>
+			dmanr);
+	return DMA_STATUS_GOOD;
+}
+EXPORT_SYMBOL(ppc4xx_clr_dma_status);
+
+#ifdef CONFIG_PPC4xx_EDMA
+/*
+ * Enables the burst on the channel (BTEN bit in the control/count register)
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int
+ppc4xx_enable_burst(unsigned int dmanr)
+{
+	unsigned int ctc;
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARNKERN_ERR "ppc4xx_enable_burst:"
+				" bad channel: %d\n", dmanr);
+		return DMA_STATUS_BAD_CHANNEL;
+	}
+	ctc = mfdcr(DCRN_DMACT0 + (dmanr * 0x8)) | DMA_CTC_BTEN;
+	mtdcr(DCRN_DMACT0 + (dmanr * 0x8), ctc);
+	return DMA_STATUS_GOOD;
+}
+EXPORT_SYMBOL(ppc4xx_enable_burst);
+/*
+ * Disables the burst on the channel (BTEN bit in the control/count register)
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int
+ppc4xx_disable_burst(unsigned int dmanr)
+{
+	unsigned int ctc;
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARNKERN_ERR "ppc4xx_disable_burst: "
+				"bad channel: %d\n", dmanr);
+		return DMA_STATUS_BAD_CHANNEL;
+	}
+	ctc = mfdcr(DCRN_DMACT0 + (dmanr * 0x8)) & ~DMA_CTC_BTEN;
+	mtdcr(DCRN_DMACT0 + (dmanr * 0x8), ctc);
+	return DMA_STATUS_GOOD;
+}
+EXPORT_SYMBOL(ppc4xx_disable_burst);
+/*
+ * Sets the burst size (number of peripheral widths) for the channel
+ * (BSIZ bits in the control/count register))
+ * must be one of:
+ *    DMA_CTC_BSIZ_2
+ *    DMA_CTC_BSIZ_4
+ *    DMA_CTC_BSIZ_8
+ *    DMA_CTC_BSIZ_16
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int
+ppc4xx_set_burst_size(unsigned int dmanr, unsigned int bsize)
+{
+	unsigned int ctc;
+	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+		printk(KERN_WARNKERN_ERR "ppc4xx_set_burst_size: "
+				"bad channel: %d\n", dmanr);
+		return DMA_STATUS_BAD_CHANNEL;
+	}
+	ctc = mfdcr(DCRN_DMACT0 + (dmanr * 0x8)) & ~DMA_CTC_BSIZ_MSK;
+	ctc |= (bsize & DMA_CTC_BSIZ_MSK);
+	mtdcr(DCRN_DMACT0 + (dmanr * 0x8), ctc);
+	return DMA_STATUS_GOOD;
+}
+EXPORT_SYMBOL(ppc4xx_set_burst_size);
+
+#endif /* CONFIG_PPC4xx_EDMA */
+
diff --git a/drivers/usb/dwc_otg/ppc4xx_dma.h b/drivers/usb/dwc_otg/ppc4xx_dma.h
new file mode 100644
index 0000000..4ed3170
--- /dev/null
+++ b/drivers/usb/dwc_otg/ppc4xx_dma.h
@@ -0,0 +1,638 @@
+/*
+ * include/asm-ppc/ppc4xx_dma.h
+ *
+ * IBM PPC4xx DMA engine library
+ *
+ * Copyright 2000-2004 MontaVista Software Inc.
+ *
+ * Cleaned up a bit more, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * Original code by Armin Kuster <akuster@mvista.com>
+ * and Pete Popov <ppopov@mvista.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the  GNU General Public License along
+ * with this program; if not, write  to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifdef __KERNEL__
+#ifndef __ASMPPC_PPC4xx_DMA_H
+#define __ASMPPC_PPC4xx_DMA_H
+
+#include <linux/types.h>
+#include <asm/mmu.h>
+
+#ifdef CONFIG_405EX
+#define DCRN_DMA0_BASE               0x100
+#define DCRN_DMASR_BASE              0x120
+#endif
+
+#ifdef CONFIG_460EX
+#define DCRN_DMA0_BASE               0x200
+#define DCRN_DMASR_BASE              0x220
+#endif
+
+#ifndef DCRN_DMA0_BASE
+#error DMA register not defined for this PPC4xx variant!
+#endif
+
+#define DCRN_DMACR0     (DCRN_DMA0_BASE + 0x0)  /* DMA Channel Control 0 */
+#define DCRN_DMACT0     (DCRN_DMA0_BASE + 0x1)  /* DMA Count 0 */
+#define DCRN_DMASAH0 (DCRN_DMA0_BASE + 0x2)     /* DMA Src Addr High 0 */
+#define DCRN_DMASA0     (DCRN_DMA0_BASE + 0x3)  /* DMA Src Addr Low 0 */
+#define DCRN_DMADAH0 (DCRN_DMA0_BASE + 0x4)     /* DMA Dest Addr High 0 */
+#define DCRN_DMADA0     (DCRN_DMA0_BASE + 0x5)  /* DMA Dest Addr Low 0 */
+#define DCRN_ASGH0      (DCRN_DMA0_BASE + 0x6)  /* DMA SG Desc Addr High 0 */
+#define DCRN_ASG0       (DCRN_DMA0_BASE + 0x7)  /* DMA SG Desc Addr Low 0 */
+
+#define DCRN_DMASR      (DCRN_DMASR_BASE + 0x0) /* DMA Status Register */
+#define DCRN_ASGC       (DCRN_DMASR_BASE + 0x3) /* DMA Scatter/Gather Command */
+#define DCRN_SLP        (DCRN_DMASR_BASE + 0x5) /* DMA Sleep Register */
+#define DCRN_POL        (DCRN_DMASR_BASE + 0x6) /* DMA Polarity Register */
+
+#undef DEBUG_4xxDMA
+
+#define MAX_PPC4xx_DMA_CHANNELS		4
+
+#define DMA_CH0     0
+#define DMA_CH1     1
+#define DMA_CH2     2
+#define DMA_CH3     3
+
+/*
+ * Function return status codes
+ * These values are used to indicate whether or not the function
+ * call was successful, or a bad/invalid parameter was passed.
+ */
+#define DMA_STATUS_GOOD			0
+#define DMA_STATUS_BAD_CHANNEL		1
+#define DMA_STATUS_BAD_HANDLE		2
+#define DMA_STATUS_BAD_MODE		3
+#define DMA_STATUS_NULL_POINTER		4
+#define DMA_STATUS_OUT_OF_MEMORY	5
+#define DMA_STATUS_SGL_LIST_EMPTY	6
+#define DMA_STATUS_GENERAL_ERROR	7
+#define DMA_STATUS_CHANNEL_NOTFREE	8
+
+#define DMA_CHANNEL_BUSY		0x80000000
+
+/*
+ * These indicate status as returned from the DMA Status Register.
+ */
+#define DMA_STATUS_NO_ERROR	0
+#define DMA_STATUS_CS		1	/* Count Status        */
+#define DMA_STATUS_TS		2	/* Transfer Status     */
+#define DMA_STATUS_DMA_ERROR	3	/* DMA Error Occurred  */
+#define DMA_STATUS_DMA_BUSY	4	/* The channel is busy */
+
+
+/*
+ * DMA Channel Control Registers
+ */
+
+/* The 44x devices have 64bit DMA controllers, where the 405EX/r have 32bit */
+#if defined(CONFIG_44x)
+#define	PPC4xx_DMA_64BIT
+#endif
+
+/* The 44x and 405EX/r come up big-endian with last bit reserved */
+#if defined(CONFIG_44x) || defined(CONFIG_405EX) || defined(CONFIG_405EXr)
+#define DMA_CR_OFFSET 1
+#else
+#define DMA_CR_OFFSET 0
+#endif
+
+#define DMA_CE_ENABLE        (1<<31)	/* DMA Channel Enable */
+#define SET_DMA_CE_ENABLE(x) (((x)&0x1)<<31)
+#define GET_DMA_CE_ENABLE(x) (((x)&DMA_CE_ENABLE)>>31)
+
+#define DMA_CIE_ENABLE        (1<<30)	/* DMA Channel Interrupt Enable */
+#define SET_DMA_CIE_ENABLE(x) (((x)&0x1)<<30)
+#define GET_DMA_CIE_ENABLE(x) (((x)&DMA_CIE_ENABLE)>>30)
+
+#define DMA_TD                (1<<29)
+#define SET_DMA_TD(x)         (((x)&0x1)<<29)
+#define GET_DMA_TD(x)         (((x)&DMA_TD)>>29)
+
+#define DMA_PL                (1<<28)	/* Peripheral Location */
+#define SET_DMA_PL(x)         (((x)&0x1)<<28)
+#define GET_DMA_PL(x)         (((x)&DMA_PL)>>28)
+
+#define EXTERNAL_PERIPHERAL    0
+#define INTERNAL_PERIPHERAL    1
+
+#define SET_DMA_PW(x)     (((x)&0x3)<<(26-DMA_CR_OFFSET)) /* Peripheral Width */
+#define DMA_PW_MASK       SET_DMA_PW(3)
+#define   PW_8                 0
+#define   PW_16                1
+#define   PW_32                2
+#define   PW_64                3
+/* FIXME: Add PW_128 support for 440GP DMA block */
+#define GET_DMA_PW(x)     (((x)&DMA_PW_MASK)>>(26-DMA_CR_OFFSET))
+
+/* Destination Address Increment */
+#define DMA_DAI           (1<<(25-DMA_CR_OFFSET))
+#define SET_DMA_DAI(x)    (((x)&0x1)<<(25-DMA_CR_OFFSET))
+
+/* Source Address Increment */
+#define DMA_SAI           (1<<(24-DMA_CR_OFFSET))
+#define SET_DMA_SAI(x)    (((x)&0x1)<<(24-DMA_CR_OFFSET))
+
+#define DMA_BEN           (1<<(23-DMA_CR_OFFSET))	/* Buffer Enable */
+#define SET_DMA_BEN(x)    (((x)&0x1)<<(23-DMA_CR_OFFSET))
+
+#define SET_DMA_TM(x)     (((x)&0x3)<<(21-DMA_CR_OFFSET)) /* Transfer Mode */
+#define DMA_TM_MASK       SET_DMA_TM(3)
+#define   TM_PERIPHERAL        0	/* Peripheral */
+#define   TM_RESERVED          1	/* Reserved */
+#define   TM_S_MM              2	/* Memory to Memory */
+#define   TM_D_MM              3	/* Device Paced Memory to Memory */
+#define GET_DMA_TM(x)     (((x)&DMA_TM_MASK)>>(21-DMA_CR_OFFSET))
+
+/* Peripheral Setup Cycles */
+#define SET_DMA_PSC(x)    (((x)&0x3)<<(19-DMA_CR_OFFSET))
+#define DMA_PSC_MASK      SET_DMA_PSC(3)
+#define GET_DMA_PSC(x)    (((x)&DMA_PSC_MASK)>>(19-DMA_CR_OFFSET))
+
+/* Peripheral Wait Cycles */
+#define SET_DMA_PWC(x)    (((x)&0x3F)<<(13-DMA_CR_OFFSET))
+#define DMA_PWC_MASK      SET_DMA_PWC(0x3F)
+#define GET_DMA_PWC(x)    (((x)&DMA_PWC_MASK)>>(13-DMA_CR_OFFSET))
+
+/* Peripheral Hold Cycles */
+#define SET_DMA_PHC(x)    (((x)&0x7)<<(10-DMA_CR_OFFSET))
+#define DMA_PHC_MASK      SET_DMA_PHC(0x7)
+#define GET_DMA_PHC(x)    (((x)&DMA_PHC_MASK)>>(10-DMA_CR_OFFSET))
+
+/* EOT pin is a TC output */
+#define DMA_ETD_OUTPUT     (1<<(9-DMA_CR_OFFSET))
+#define SET_DMA_ETD(x)     (((x)&0x1)<<(9-DMA_CR_OFFSET))
+
+#define DMA_TCE_ENABLE     (1<<(8-DMA_CR_OFFSET))
+#define SET_DMA_TCE(x)     (((x)&0x1)<<(8-DMA_CR_OFFSET))
+
+#define DMA_DEC            (1<<(2))	/* Address Decrement */
+#define SET_DMA_DEC(x)     (((x)&0x1)<<2)
+#define GET_DMA_DEC(x)     (((x)&DMA_DEC)>>2)
+
+
+/*
+ * Transfer Modes
+ * These modes are defined in a way that makes it possible to
+ * simply "or" in the value in the control register.
+ */
+
+#define DMA_MODE_MM		(SET_DMA_TM(TM_S_MM))	/* memory to memory */
+
+				/* Device-paced memory to memory, */
+				/* device is at source address    */
+#define DMA_MODE_MM_DEVATSRC	(DMA_TD | SET_DMA_TM(TM_D_MM))
+
+				/* Device-paced memory to memory,      */
+				/* device is at destination address    */
+#define DMA_MODE_MM_DEVATDST	(SET_DMA_TM(TM_D_MM))
+
+/* 405gp/440gp */
+
+/* Memory Read Prefetch */
+#define SET_DMA_PREFETCH(x)   (((x)&0x3)<<(4-DMA_CR_OFFSET))
+#define DMA_PREFETCH_MASK      SET_DMA_PREFETCH(3)
+#define   PREFETCH_1           0	/* Prefetch 1 Double Word */
+#define   PREFETCH_2           1
+#define   PREFETCH_4           2
+#define GET_DMA_PREFETCH(x) (((x)&DMA_PREFETCH_MASK)>>(4-DMA_CR_OFFSET))
+
+#define DMA_PCE            (1<<(3-DMA_CR_OFFSET)) /* Parity Check Enable */
+#define SET_DMA_PCE(x)     (((x)&0x1)<<(3-DMA_CR_OFFSET))
+#define GET_DMA_PCE(x)     (((x)&DMA_PCE)>>(3-DMA_CR_OFFSET))
+
+/* stb3x */
+
+#define DMA_ECE_ENABLE (1<<5)
+#define SET_DMA_ECE(x) (((x)&0x1)<<5)
+#define GET_DMA_ECE(x) (((x)&DMA_ECE_ENABLE)>>5)
+
+#define DMA_TCD_DISABLE	(1<<4)
+#define SET_DMA_TCD(x) (((x)&0x1)<<4)
+#define GET_DMA_TCD(x) (((x)&DMA_TCD_DISABLE)>>4)
+
+typedef uint32_t sgl_handle_t;
+
+#ifdef CONFIG_PPC4xx_EDMA
+
+#define SGL_LIST_SIZE 4096
+#define DMA_PPC4xx_SIZE SGL_LIST_SIZE
+
+/* DMA Channel Priority */
+#define SET_DMA_PRIORITY(x)   (((x)&0x3)<<(6-DMA_CR_OFFSET))
+#define DMA_PRIORITY_MASK SET_DMA_PRIORITY(3)
+#define PRIORITY_LOW           0
+#define PRIORITY_MID_LOW       1
+#define PRIORITY_MID_HIGH      2
+#define PRIORITY_HIGH          3
+#define GET_DMA_PRIORITY(x) (((x)&DMA_PRIORITY_MASK)>>(6-DMA_CR_OFFSET))
+
+/*
+ * DMA Polarity Configuration Register
+ */
+#define DMAReq_ActiveLow(chan) (1<<(31-(chan*3)))
+#define DMAAck_ActiveLow(chan) (1<<(30-(chan*3)))
+#define EOT_ActiveLow(chan)    (1<<(29-(chan*3)))	/* End of Transfer */
+
+/*
+ * DMA Sleep Mode Register
+ */
+#define SLEEP_MODE_ENABLE (1<<21)
+
+/*
+ * DMA Status Register
+ */
+#define DMA_CS0           (1<<31)	/* Terminal Count has been reached */
+#define DMA_CS1           (1<<30)
+#define DMA_CS2           (1<<29)
+#define DMA_CS3           (1<<28)
+
+#define DMA_TS0           (1<<27)	/* End of Transfer has been requested */
+#define DMA_TS1           (1<<26)
+#define DMA_TS2           (1<<25)
+#define DMA_TS3           (1<<24)
+
+#define DMA_CH0_ERR       (1<<23)	/* DMA Chanel 0 Error */
+#define DMA_CH1_ERR       (1<<22)
+#define DMA_CH2_ERR       (1<<21)
+#define DMA_CH3_ERR       (1<<20)
+
+#define DMA_IN_DMA_REQ0   (1<<19)	/* Internal DMA Request is pending */
+#define DMA_IN_DMA_REQ1   (1<<18)
+#define DMA_IN_DMA_REQ2   (1<<17)
+#define DMA_IN_DMA_REQ3   (1<<16)
+
+#define DMA_EXT_DMA_REQ0  (1<<15)	/* External DMA Request is pending */
+#define DMA_EXT_DMA_REQ1  (1<<14)
+#define DMA_EXT_DMA_REQ2  (1<<13)
+#define DMA_EXT_DMA_REQ3  (1<<12)
+
+#define DMA_CH0_BUSY      (1<<11)	/* DMA Channel 0 Busy */
+#define DMA_CH1_BUSY      (1<<10)
+#define DMA_CH2_BUSY       (1<<9)
+#define DMA_CH3_BUSY       (1<<8)
+
+#define DMA_SG0            (1<<7) /* DMA Channel 0 Scatter/Gather in progress */
+#define DMA_SG1            (1<<6)
+#define DMA_SG2            (1<<5)
+#define DMA_SG3            (1<<4)
+
+/* DMA Channel Count Register */
+#define DMA_CTC_TCIE	 (1<<29)	/* Terminal Count Interrupt Enable */
+#define DMA_CTC_ETIE     (1<<28)	/* EOT Interupt Enable */
+#define DMA_CTC_EIE		 (1<<27)	/* Error Interrupt Enable */
+#define DMA_CTC_BTEN     (1<<23)    /* Burst Enable/Disable bit */
+#define DMA_CTC_BSIZ_MSK (3<<21)    /* Mask of the Burst size bits */
+#define DMA_CTC_BSIZ_2   (0)
+#define DMA_CTC_BSIZ_4   (1<<21)
+#define DMA_CTC_BSIZ_8   (2<<21)
+#define DMA_CTC_BSIZ_16  (3<<21)
+#define DMA_CTC_TC_MASK  0xFFFFF
+
+/*
+ * DMA SG Command Register
+ */
+#define SSG_ENABLE(chan)   	(1<<(31-chan))	/* Start Scatter Gather */
+#define SSG_MASK_ENABLE(chan)	(1<<(15-chan))	/* Enable writing to SSG0 bit */
+
+/*
+ * DMA Scatter/Gather Descriptor Bit fields
+ */
+#define SG_LINK            (1<<31)	/* Link */
+#define SG_TCI_ENABLE      (1<<29)	/* Enable Terminal Count Interrupt */
+#define SG_ETI_ENABLE      (1<<28)	/* Enable End of Transfer Interrupt */
+#define SG_ERI_ENABLE      (1<<27)	/* Enable Error Interrupt */
+#define SG_COUNT_MASK       0xFFFF	/* Count Field */
+
+#define SET_DMA_CONTROL \
+	(SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \
+	SET_DMA_BEN(p_init->buffer_enable) | /* buffer enable */\
+	SET_DMA_ETD(p_init->etd_output)    | /* end of transfer pin*/ \
+	SET_DMA_TCE(p_init->tce_enable)    | /* terminal count enable*/ \
+	SET_DMA_PL(p_init->pl)             | /* peripheral location*/ \
+	SET_DMA_DAI(p_init->dai)           | /* dest addr increment*/ \
+	SET_DMA_SAI(p_init->sai)           | /* src addr increment*/ \
+	SET_DMA_PRIORITY(p_init->cp)       | /* channel priority*/ \
+	SET_DMA_PW(p_init->pwidth)         | /* peripheral/bus width*/ \
+	SET_DMA_PSC(p_init->psc)           | /* peripheral setup cycles*/ \
+	SET_DMA_PWC(p_init->pwc)           | /* peripheral wait cycles*/ \
+	SET_DMA_PHC(p_init->phc)           | /* peripheral hold cycles*/ \
+	SET_DMA_PREFETCH(p_init->pf)       /* read prefetch */)
+
+#define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) \
+				| DMAAck_ActiveLow(chan) \
+				| EOT_ActiveLow(chan))
+
+#elif defined(CONFIG_STB03xxx)		/* stb03xxx */
+
+#define DMA_PPC4xx_SIZE	4096
+
+/*
+ * DMA Status Register
+ */
+
+#define SET_DMA_PRIORITY(x)   (((x)&0x00800001)) /* DMA Channel Priority */
+#define DMA_PRIORITY_MASK	0x00800001
+#define   PRIORITY_LOW         	0x00000000
+#define   PRIORITY_MID_LOW     	0x00000001
+#define   PRIORITY_MID_HIGH    	0x00800000
+#define   PRIORITY_HIGH        	0x00800001
+#define GET_DMA_PRIORITY(x) (((((x) & DMA_PRIORITY_MASK) & 0x00800000) >> 22) \
+		| (((x) & DMA_PRIORITY_MASK) & 0x00000001))
+
+#define DMA_CS0           (1<<31)	/* Terminal Count has been reached */
+#define DMA_CS1           (1<<30)
+#define DMA_CS2           (1<<29)
+#define DMA_CS3           (1<<28)
+
+#define DMA_TS0           (1<<27)	/* End of Transfer has been requested */
+#define DMA_TS1           (1<<26)
+#define DMA_TS2           (1<<25)
+#define DMA_TS3           (1<<24)
+
+#define DMA_CH0_ERR       (1<<23)	/* DMA Chanel 0 Error */
+#define DMA_CH1_ERR       (1<<22)
+#define DMA_CH2_ERR       (1<<21)
+#define DMA_CH3_ERR       (1<<20)
+
+#define DMA_CT0		  (1<<19)	/* Chained transfere */
+
+#define DMA_IN_DMA_REQ0   (1<<18)	/* Internal DMA Request is pending */
+#define DMA_IN_DMA_REQ1   (1<<17)
+#define DMA_IN_DMA_REQ2   (1<<16)
+#define DMA_IN_DMA_REQ3   (1<<15)
+
+#define DMA_EXT_DMA_REQ0  (1<<14)	/* External DMA Request is pending */
+#define DMA_EXT_DMA_REQ1  (1<<13)
+#define DMA_EXT_DMA_REQ2  (1<<12)
+#define DMA_EXT_DMA_REQ3  (1<<11)
+
+#define DMA_CH0_BUSY      (1<<10)	/* DMA Channel 0 Busy */
+#define DMA_CH1_BUSY      (1<<9)
+#define DMA_CH2_BUSY       (1<<8)
+#define DMA_CH3_BUSY       (1<<7)
+
+#define DMA_CT1            (1<<6)	/* Chained transfere */
+#define DMA_CT2            (1<<5)
+#define DMA_CT3            (1<<4)
+
+#define DMA_CH_ENABLE (1<<7)
+#define SET_DMA_CH(x) (((x)&0x1)<<7)
+#define GET_DMA_CH(x) (((x)&DMA_CH_ENABLE)>>7)
+
+/* STBx25xxx dma unique */
+/* enable device port on a dma channel
+ * example ext 0 on dma 1
+ */
+
+#define	SSP0_RECV	15
+#define	SSP0_XMIT	14
+#define EXT_DMA_0	12
+#define	SC1_XMIT	11
+#define SC1_RECV	10
+#define EXT_DMA_2	9
+#define	EXT_DMA_3	8
+#define SERIAL2_XMIT	7
+#define SERIAL2_RECV	6
+#define SC0_XMIT 	5
+#define	SC0_RECV	4
+#define	SERIAL1_XMIT	3
+#define SERIAL1_RECV	2
+#define	SERIAL0_XMIT	1
+#define SERIAL0_RECV	0
+
+#define DMA_CHAN_0	1
+#define DMA_CHAN_1	2
+#define DMA_CHAN_2	3
+#define DMA_CHAN_3	4
+
+/* end STBx25xx */
+
+/*
+ * Bit 30 must be one for Redwoods, otherwise transfers may receive errors.
+ */
+#define DMA_CR_MB0 0x2
+
+#define SET_DMA_CONTROL \
+	(SET_DMA_CIE_ENABLE(p_init->int_enable) |  /* interrupt enable      */ \
+	SET_DMA_ETD(p_init->etd_output)        |  /* end of transfer pin    */ \
+	SET_DMA_TCE(p_init->tce_enable)        |  /* terminal count enable  */ \
+	SET_DMA_PL(p_init->pl)                 |  /* peripheral location    */ \
+	SET_DMA_DAI(p_init->dai)               |  /* dest addr increment    */ \
+	SET_DMA_SAI(p_init->sai)               |  /* src addr increment     */ \
+	SET_DMA_PRIORITY(p_init->cp)           |  /* channel priority       */ \
+	SET_DMA_PW(p_init->pwidth)             |  /* peripheral/bus width   */ \
+	SET_DMA_PSC(p_init->psc)               |  /* peripheral setup cycles*/ \
+	SET_DMA_PWC(p_init->pwc)               |  /* peripheral wait cycles */ \
+	SET_DMA_PHC(p_init->phc)               |  /* peripheral hold cycles */ \
+	SET_DMA_TCD(p_init->tcd_disable)       |  /* TC chain mode disable  */ \
+	SET_DMA_ECE(p_init->ece_enable)	  |  /* ECE chanin mode enable  */ \
+	SET_DMA_CH(p_init->ch_enable)	|    /* Chain enable 	        */ \
+	DMA_CR_MB0				/* must be one */)
+
+#define GET_DMA_POLARITY(chan) chan
+
+#endif
+
+typedef struct {
+	unsigned short in_use;	/* set when channel is being used, clr when
+				 * available.
+				 */
+	/*
+	 * Valid polarity settings:
+	 *   DMAReq_ActiveLow(n)
+	 *   DMAAck_ActiveLow(n)
+	 *   EOT_ActiveLow(n)
+	 *
+	 *   n is 0 to max dma chans
+	 */
+	unsigned int polarity;
+
+	char buffer_enable;	/* Boolean: buffer enable            */
+	char tce_enable;	/* Boolean: terminal count enable    */
+	char etd_output;	/* Boolean: eot pin is a tc output   */
+	char pce;		/* Boolean: parity check enable      */
+
+	/*
+	 * Peripheral location:
+	 * INTERNAL_PERIPHERAL (UART0 on the 405GP)
+	 * EXTERNAL_PERIPHERAL
+	 */
+	char pl;		/* internal/external peripheral      */
+
+	/*
+	 * Valid pwidth settings:
+	 *   PW_8
+	 *   PW_16
+	 *   PW_32
+	 *   PW_64
+	 */
+	unsigned int pwidth;
+
+	char dai;		/* Boolean: dst address increment   */
+	char sai;		/* Boolean: src address increment   */
+
+	/*
+	 * Valid psc settings: 0-3
+	 */
+	unsigned int psc;	/* Peripheral Setup Cycles         */
+
+	/*
+	 * Valid pwc settings:
+	 * 0-63
+	 */
+	unsigned int pwc;	/* Peripheral Wait Cycles          */
+
+	/*
+	 * Valid phc settings:
+	 * 0-7
+	 */
+	unsigned int phc;	/* Peripheral Hold Cycles          */
+
+	/*
+	 * Valid cp (channel priority) settings:
+	 *   PRIORITY_LOW
+	 *   PRIORITY_MID_LOW
+	 *   PRIORITY_MID_HIGH
+	 *   PRIORITY_HIGH
+	 */
+	unsigned int cp;	/* channel priority                */
+
+	/*
+	 * Valid pf (memory read prefetch) settings:
+	 *
+	 *   PREFETCH_1
+	 *   PREFETCH_2
+	 *   PREFETCH_4
+	 */
+	unsigned int pf;	/* memory read prefetch            */
+
+	/*
+	 * Boolean: channel interrupt enable
+	 * NOTE: for sgl transfers, only the last descriptor will be setup to
+	 * interrupt.
+	 */
+	char int_enable;
+
+	char shift;		/* easy access to byte_count shift, based on */
+	/* the width of the channel                  */
+
+	uint32_t control;	/* channel control word                      */
+
+	/* These variabled are used ONLY in single dma transfers              */
+	unsigned int mode;	/* transfer mode                     */
+	phys_addr_t addr;
+	char ce;		/* channel enable */
+#ifdef CONFIG_STB03xxx
+	char ch_enable;
+	char tcd_disable;
+	char ece_enable;
+	char td;		/* transfer direction */
+#endif
+
+	char int_on_final_sg;/* for scatter/gather -only interrupt on last sg */
+} ppc_dma_ch_t;
+
+/*
+ * PPC44x DMA implementations have a slightly different
+ * descriptor layout.  Probably moved about due to the
+ * change to 64-bit addresses and link pointer. I don't
+ * know why they didn't just leave control_count after
+ * the dst_addr.
+ */
+#ifdef PPC4xx_DMA_64BIT
+typedef struct {
+	uint32_t control;
+	uint32_t control_count;
+	phys_addr_t src_addr;
+	phys_addr_t dst_addr;
+	phys_addr_t next;
+} ppc_sgl_t;
+#else
+typedef struct {
+	uint32_t control;
+	phys_addr_t src_addr;
+	phys_addr_t dst_addr;
+	uint32_t control_count;
+	uint32_t next;
+} ppc_sgl_t;
+#endif
+
+typedef struct {
+	unsigned int dmanr;
+	uint32_t control;    /* channel ctrl word; loaded from each descrptr */
+	uint32_t sgl_control;/* LK, TCI, ETI, and ERI bits in sgl descriptor */
+	dma_addr_t dma_addr; /* dma (physical) address of this list          */
+	ppc_sgl_t *phead;
+	dma_addr_t phead_dma;
+	ppc_sgl_t *ptail;
+	dma_addr_t ptail_dma;
+} sgl_list_info_t;
+
+typedef struct {
+	phys_addr_t *src_addr;
+	phys_addr_t *dst_addr;
+	phys_addr_t dma_src_addr;
+	phys_addr_t dma_dst_addr;
+} pci_alloc_desc_t;
+
+extern ppc_dma_ch_t dma_channels[];
+
+/*
+ * The DMA API are in ppc4xx_dma.c and ppc4xx_sgdma.c
+ */
+extern int ppc4xx_init_dma_channel(unsigned int, ppc_dma_ch_t *);
+extern int ppc4xx_get_channel_config(unsigned int, ppc_dma_ch_t *);
+extern int ppc4xx_set_channel_priority(unsigned int, unsigned int);
+extern unsigned int ppc4xx_get_peripheral_width(unsigned int);
+extern void ppc4xx_set_sg_addr(int, phys_addr_t);
+extern int
+ppc4xx_add_dma_sgl(sgl_handle_t, phys_addr_t, phys_addr_t, unsigned int);
+extern void ppc4xx_enable_dma_sgl(sgl_handle_t);
+extern void ppc4xx_disable_dma_sgl(sgl_handle_t);
+extern int
+ppc4xx_get_dma_sgl_residue(sgl_handle_t, phys_addr_t *, phys_addr_t *);
+extern int
+ppc4xx_delete_dma_sgl_element(sgl_handle_t, phys_addr_t *, phys_addr_t *);
+extern int ppc4xx_alloc_dma_handle(sgl_handle_t *, unsigned int, unsigned int);
+extern void ppc4xx_free_dma_handle(sgl_handle_t);
+extern int ppc4xx_get_dma_status(void);
+extern int ppc4xx_enable_burst(unsigned int);
+extern int ppc4xx_disable_burst(unsigned int);
+extern int ppc4xx_set_burst_size(unsigned int, unsigned int);
+extern void ppc4xx_set_src_addr(int dmanr, phys_addr_t src_addr);
+extern void ppc4xx_set_dst_addr(int dmanr, phys_addr_t dst_addr);
+extern void ppc4xx_enable_dma(unsigned int dmanr);
+extern void ppc4xx_disable_dma(unsigned int dmanr);
+extern void ppc4xx_set_dma_count(unsigned int dmanr, unsigned int count);
+extern int ppc4xx_get_dma_residue(unsigned int dmanr);
+extern void ppc4xx_set_dma_addr2(unsigned int dmanr, phys_addr_t src_dma_addr,
+				 phys_addr_t dst_dma_addr);
+extern int ppc4xx_enable_dma_interrupt(unsigned int dmanr);
+extern int ppc4xx_disable_dma_interrupt(unsigned int dmanr);
+extern int ppc4xx_clr_dma_status(unsigned int dmanr);
+extern int
+ppc4xx_map_dma_port(unsigned int dmanr, unsigned int ocp_dma, short dma_chan);
+extern int
+ppc4xx_disable_dma_port(unsigned int dmanr, unsigned int ocp_dma,
+		short dma_chan);
+extern int ppc4xx_set_dma_mode(unsigned int dmanr, unsigned int mode);
+
+/* These are in kernel/dma.c: */
+
+/* reserve a DMA channel */
+extern int request_dma(unsigned int dmanr, const char *device_id);
+/* release it again */
+extern void free_dma(unsigned int dmanr);
+#endif
+#endif				/* __KERNEL__ */
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index f41aa0d..2373883 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -171,9 +171,16 @@
 	   Say "y" to link the driver statically, or "m" to build a
 	   dynamically linked module called "bcm63xx_udc".
 
+config USB_GADGET_DWC_OTG
+	tristate "Synopsys DWC OTG Controller"
+	depends on (USB_DWC_OTG_OTG || USB_DWC_OTG_DEVICE_ONLY) && (4XX_SOC || CPU_CAVIUM_OCTEON || SOC_TZ1090)
+	help
+	  This driver provide USB Host and Device Controller support for the
+	  Synopsys Designware USB OTG Core.
+
 config USB_FSL_USB2
 	tristate "Freescale Highspeed USB DR Peripheral Controller"
-	depends on FSL_SOC || ARCH_MXC
+	depends on FSL_SOC || ARCH_MXC || SOC_CHORUS2
 	select USB_FSL_MPH_DR_OF if OF
 	help
 	   Some of Freescale PowerPC and i.MX processors have a High Speed
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 1e4cfb0..4cf9ba0 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -169,8 +169,6 @@
 	int i, count;
 	rndis_query_cmplt_type *resp;
 	struct net_device *net;
-	struct rtnl_link_stats64 temp;
-	const struct rtnl_link_stats64 *stats;
 
 	if (!r) return -ENOMEM;
 	resp = (rndis_query_cmplt_type *)r->buf;
@@ -193,7 +191,6 @@
 	resp->InformationBufferOffset = cpu_to_le32(16);
 
 	net = rndis_per_dev_params[configNr].dev;
-	stats = dev_get_stats(net, &temp);
 
 	switch (OID) {
 
@@ -240,9 +237,8 @@
 	/* mandatory */
 	case RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE:
 		pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__);
-		if (rndis_per_dev_params[configNr].dev) {
-			*outbuf = cpu_to_le32(
-				rndis_per_dev_params[configNr].dev->mtu);
+		if (net) {
+			*outbuf = cpu_to_le32(net->mtu);
 			retval = 0;
 		}
 		break;
@@ -263,9 +259,8 @@
 	/* mandatory */
 	case RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE:
 		pr_debug("%s: RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__);
-		if (rndis_per_dev_params[configNr].dev) {
-			*outbuf = cpu_to_le32(
-				rndis_per_dev_params[configNr].dev->mtu);
+		if (net) {
+			*outbuf = cpu_to_le32(net->mtu);
 			retval = 0;
 		}
 		break;
@@ -273,9 +268,8 @@
 	/* mandatory */
 	case RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE:
 		pr_debug("%s: RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__);
-		if (rndis_per_dev_params[configNr].dev) {
-			*outbuf = cpu_to_le32(
-				rndis_per_dev_params[configNr].dev->mtu);
+		if (net) {
+			*outbuf = cpu_to_le32(net->mtu);
 			retval = 0;
 		}
 		break;
@@ -357,9 +351,9 @@
 	case RNDIS_OID_GEN_XMIT_OK:
 		if (rndis_debug > 1)
 			pr_debug("%s: RNDIS_OID_GEN_XMIT_OK\n", __func__);
-		if (stats) {
-			*outbuf = cpu_to_le32(stats->tx_packets
-				- stats->tx_errors - stats->tx_dropped);
+		if (net) {
+			*outbuf = cpu_to_le32(net->stats.tx_packets
+				- net->stats.tx_errors - net->stats.tx_dropped);
 			retval = 0;
 		}
 		break;
@@ -368,9 +362,9 @@
 	case RNDIS_OID_GEN_RCV_OK:
 		if (rndis_debug > 1)
 			pr_debug("%s: RNDIS_OID_GEN_RCV_OK\n", __func__);
-		if (stats) {
-			*outbuf = cpu_to_le32(stats->rx_packets
-				- stats->rx_errors - stats->rx_dropped);
+		if (net) {
+			*outbuf = cpu_to_le32(net->stats.rx_packets
+				- net->stats.rx_errors - net->stats.rx_dropped);
 			retval = 0;
 		}
 		break;
@@ -379,8 +373,8 @@
 	case RNDIS_OID_GEN_XMIT_ERROR:
 		if (rndis_debug > 1)
 			pr_debug("%s: RNDIS_OID_GEN_XMIT_ERROR\n", __func__);
-		if (stats) {
-			*outbuf = cpu_to_le32(stats->tx_errors);
+		if (net) {
+			*outbuf = cpu_to_le32(net->stats.tx_errors);
 			retval = 0;
 		}
 		break;
@@ -389,8 +383,8 @@
 	case RNDIS_OID_GEN_RCV_ERROR:
 		if (rndis_debug > 1)
 			pr_debug("%s: RNDIS_OID_GEN_RCV_ERROR\n", __func__);
-		if (stats) {
-			*outbuf = cpu_to_le32(stats->rx_errors);
+		if (net) {
+			*outbuf = cpu_to_le32(net->stats.rx_errors);
 			retval = 0;
 		}
 		break;
@@ -398,8 +392,8 @@
 	/* mandatory */
 	case RNDIS_OID_GEN_RCV_NO_BUFFER:
 		pr_debug("%s: RNDIS_OID_GEN_RCV_NO_BUFFER\n", __func__);
-		if (stats) {
-			*outbuf = cpu_to_le32(stats->rx_dropped);
+		if (net) {
+			*outbuf = cpu_to_le32(net->stats.rx_dropped);
 			retval = 0;
 		}
 		break;
@@ -409,7 +403,7 @@
 	/* mandatory */
 	case RNDIS_OID_802_3_PERMANENT_ADDRESS:
 		pr_debug("%s: RNDIS_OID_802_3_PERMANENT_ADDRESS\n", __func__);
-		if (rndis_per_dev_params[configNr].dev) {
+		if (net) {
 			length = ETH_ALEN;
 			memcpy(outbuf,
 				rndis_per_dev_params[configNr].host_mac,
@@ -421,7 +415,7 @@
 	/* mandatory */
 	case RNDIS_OID_802_3_CURRENT_ADDRESS:
 		pr_debug("%s: RNDIS_OID_802_3_CURRENT_ADDRESS\n", __func__);
-		if (rndis_per_dev_params[configNr].dev) {
+		if (net) {
 			length = ETH_ALEN;
 			memcpy(outbuf,
 				rndis_per_dev_params [configNr].host_mac,
@@ -457,8 +451,8 @@
 	/* mandatory */
 	case RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT:
 		pr_debug("%s: RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__);
-		if (stats) {
-			*outbuf = cpu_to_le32(stats->rx_frame_errors);
+		if (net) {
+			*outbuf = cpu_to_le32(net->stats.rx_frame_errors);
 			retval = 0;
 		}
 		break;
diff --git a/drivers/usb/host/ehci-chorus2.c b/drivers/usb/host/ehci-chorus2.c
new file mode 100644
index 0000000..0508fab
--- /dev/null
+++ b/drivers/usb/host/ehci-chorus2.c
@@ -0,0 +1,194 @@
+/*
+ * EHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 2007 Imagination Technologies Ltd.
+ *
+ * Bus Glue for Chorus2 On-Chip EHCI driver
+ *
+ * Based on "ehci-au1xxx.c" by K.Boge <karsten.boge@amd.com>
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/platform_device.h>
+
+#define USBMODE_REG             0x200c1a8
+
+/* called during probe() after chip reset completes */
+static int ehci_chorus2_setup(struct usb_hcd *hcd)
+{
+	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
+	int		retval;
+
+	/* ehci_halt seems to always fail for this HCD. */
+	writel(0, &ehci->regs->command);
+
+	retval = ehci_init(hcd);
+	if (retval)
+		return retval;
+
+	ehci->sbrn = 0x20;
+	retval = ehci_reset(ehci);
+	if (retval)
+		return retval;
+
+	writel(3, USBMODE_REG);
+
+	return 0;
+}
+
+/**
+ * usb_ehci_chorus2_probe() - initialize Chorus2 SoC-based HCDs
+ * @driver:	Host controller driver structure
+ * @dev:	Platform device
+ *
+ * Allocates basic resources for this USB host controller, and then invokes the
+ * start() method for the HCD associated with it through the hotplug entry's
+ * driver_data.
+ */
+static int usb_ehci_chorus2_probe(const struct hc_driver *driver,
+				  struct platform_device *dev)
+{
+	int retval;
+	struct usb_hcd *hcd;
+	struct ehci_hcd *ehci;
+
+	if (dev->resource[1].flags != IORESOURCE_IRQ) {
+		pr_debug("resource[1] is not IORESOURCE_IRQ");
+		retval = -ENOMEM;
+	}
+	hcd = usb_create_hcd(driver, &dev->dev, "Chorus2 EHCI");
+	if (!hcd)
+		return -ENOMEM;
+	hcd->rsrc_start = dev->resource[0].start;
+	hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1;
+
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+		pr_debug("request_mem_region failed");
+		retval = -EBUSY;
+		goto err1;
+	}
+
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (!hcd->regs) {
+		pr_debug("ioremap failed");
+		retval = -ENOMEM;
+		goto err2;
+	}
+
+	hcd->has_tt = 1;
+
+	ehci = hcd_to_ehci(hcd);
+	ehci->caps = hcd->regs;
+	ehci->regs = hcd->regs +
+		HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+	/* cache this readonly data; minimize chip reads */
+	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+	retval = usb_add_hcd(hcd, dev->resource[1].start, 0);
+	if (retval == 0)
+		return retval;
+
+	iounmap(hcd->regs);
+err2:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err1:
+	usb_put_hcd(hcd);
+	return retval;
+}
+
+/* may be called without controller electrically present */
+/* may be called with controller, bus, and devices active */
+
+/**
+ * usb_ehci_hcd_chorus2_remove() - shutdown processing for Chorus2 based HCDs
+ * @hcd:	Host controller driver
+ * @dev:	Platform device being being removed
+ *
+ * Reverses the effect of @usb_ehci_chorus2_probe, first invoking the HCD's
+ * stop() method. It is always called from a thread context, normally "rmmod",
+ * "apmd", or something similar.
+ */
+static void usb_ehci_chorus2_remove(struct usb_hcd *hcd,
+				    struct platform_device *dev)
+{
+	usb_remove_hcd(hcd);
+	iounmap(hcd->regs);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+}
+
+static const struct hc_driver ehci_chorus2_hc_driver = {
+	.description = hcd_name,
+	.product_desc = "Chorus2 EHCI",
+	.hcd_priv_size = sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq = ehci_irq,
+	.flags = HCD_MEMORY | HCD_USB2,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset = ehci_chorus2_setup,
+	.start = ehci_run,
+	.stop = ehci_stop,
+	.shutdown = ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue = ehci_urb_enqueue,
+	.urb_dequeue = ehci_urb_dequeue,
+	.endpoint_disable = ehci_endpoint_disable,
+	.endpoint_reset = ehci_endpoint_reset,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number = ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data = ehci_hub_status_data,
+	.hub_control = ehci_hub_control,
+
+	.relinquish_port = ehci_relinquish_port,
+	.port_handed_over = ehci_port_handed_over,
+
+	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_chorus2_drv_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	ret = usb_ehci_chorus2_probe(&ehci_chorus2_hc_driver, pdev);
+	return ret;
+}
+
+static int ehci_hcd_chorus2_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+	usb_ehci_chorus2_remove(hcd, pdev);
+	return 0;
+}
+
+MODULE_ALIAS("chorus2-ehci");
+static struct platform_driver ehci_chorus2_driver = {
+	.probe = ehci_hcd_chorus2_drv_probe,
+	.remove = ehci_hcd_chorus2_drv_remove,
+	.shutdown = usb_hcd_platform_shutdown,
+	.driver = {
+		.name = "chorus2-ehci",
+		.bus = &platform_bus_type
+	}
+};
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 246e124..446d94c 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1251,6 +1251,11 @@
 #define	PLATFORM_DRIVER		ehci_hcd_w90x900_driver
 #endif
 
+#if defined(CONFIG_SOC_CHORUS2)
+#include "ehci-chorus2.c"
+#define	PLATFORM_DRIVER		ehci_chorus2_driver
+#endif
+
 #ifdef CONFIG_USB_OCTEON_EHCI
 #include "ehci-octeon.c"
 #define PLATFORM_DRIVER		ehci_octeon_driver
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 2e937bd..4251a05 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2237,6 +2237,78 @@
 	  found on DA8xx/OMAP-L1xx SoCs.
 	  If unsure, say N.
 
+config FB_PDP
+	bool "PDP framebuffer support"
+	depends on FB && HAVE_CLK && ( SOC_CHORUS2 || SOC_TZ1090 )
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	default n
+	help
+	  Frame buffer driver for the on-chip Chorus2 PDP.
+
+config FB_PDP_GFX_VIDEOMEM
+	int "Graphics framebuffer memory"
+	depends on FB_PDP
+	default 1048576
+	help
+	  Default amount of video memory in bytes for the graphics framebuffer.
+
+config FB_PDP_GFX_FIX_NATIVE_RES
+	bool "Graphics framebuffer fixed to native resolution"
+	depends on FB_PDP
+	default y
+	help
+	  Forces the graphics framebuffer to always be at the native screen
+	  resolution.
+
+config FB_PDP_QUEUE_CLUT
+	depends on FB_PDP
+	def_bool n
+
+config FB_PDP_VID
+	bool "Video plane framebuffer"
+	depends on FB_PDP
+	default y
+	help
+	  Frame buffer driver for the video plane of the PDP.
+
+config FB_PDP_VID_VIDEOMEM
+	int "Video framebuffer memory"
+	depends on FB_PDP_VID
+	default 1048576
+	help
+	  Default amount of video memory in bytes for the video framebuffer.
+
+config FB_PDP_USERMEM
+	bool "User-provided memory support"
+	depends on FB_PDP
+	help
+	  Provides an ioctl which allows userland code to provide memory for
+	  use as framebuffers. See Documentation/fb/pdpfb.txt for details.
+
+config FB_PDP_PDUMP
+	bool "Generate Pdump trace to kernel log"
+	depends on FB_PDP
+	help
+	  Generates a Pdump trace of PDP accesses to the kernel log which can be
+	  extracted by a script to run on a simulator.
+
+	  If unsure press 'N'.
+
+choice
+	prompt "Pdump format"
+	depends on FB_PDP_PDUMP
+	default FB_PDP_PDUMP_V1
+
+config FB_PDP_PDUMP_V1
+	bool "Version 1"
+
+config FB_PDP_PDUMP_V2
+	bool "Version 2"
+
+endchoice
+
 config FB_VIRTUAL
 	tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
 	depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index e8bae8d..b004501 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -150,6 +150,9 @@
 obj-$(CONFIG_FB_JZ4740)		  += jz4740_fb.o
 obj-$(CONFIG_FB_PUV3_UNIGFX)      += fb-puv3.o
 obj-$(CONFIG_FB_HYPERV)		  += hyperv_fb.o
+obj-$(CONFIG_FB_PDP)		  += pdpfb.o
+obj-$(CONFIG_FB_PDP)		  += pdpfb_gfx.o
+obj-$(CONFIG_FB_PDP_VID)	  += pdpfb_vid.o
 
 # Platform or fallback drivers go here
 obj-$(CONFIG_FB_UVESA)            += uvesafb.o
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index d5ab658..3568fbe 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -148,6 +148,12 @@
 	  If you have a HX-8357 LCD panel, say Y to enable its LCD control
 	  driver.
 
+config LCD_IMGPDI
+	tristate "Imagination Technologies PDI LCD Driver"
+	help
+	  If you have an SoC with an Imagination Technologies Panel Display
+	  Interface (PDI) block, say Y to enable its control driver.
+
 endif # LCD_CLASS_DEVICE
 
 #
@@ -425,6 +431,14 @@
 	  If you have an Austrian Microsystems AS3711 say Y to enable the
 	  backlight driver.
 
+config BACKLIGHT_TZ1090_AUXDAC
+	tristate "TZ1090 Aux-DAC based LCD Backlight"
+	depends on SOC_TZ1090
+	help
+	  This driver controls the level and power of an LCD backlight using the
+	  auxiliary DAC of the TZ1090 SoC. Say Y if your board uses this
+	  hardware to control backlight level.
+
 endif # BACKLIGHT_CLASS_DEVICE
 
 endif # BACKLIGHT_LCD_SUPPORT
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 92711fe..d4d5ce2 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -7,6 +7,7 @@
 obj-$(CONFIG_LCD_HX8357)		+= hx8357.o
 obj-$(CONFIG_LCD_ILI922X)		+= ili922x.o
 obj-$(CONFIG_LCD_ILI9320)		+= ili9320.o
+obj-$(CONFIG_LCD_IMGPDI)		+= imgpdi_lcd.o
 obj-$(CONFIG_LCD_L4F00242T03)		+= l4f00242t03.o
 obj-$(CONFIG_LCD_LD9040)		+= ld9040.o
 obj-$(CONFIG_LCD_LMS283GF05)		+= lms283gf05.o
@@ -49,4 +50,5 @@
 obj-$(CONFIG_BACKLIGHT_SAHARA)		+= kb3886_bl.o
 obj-$(CONFIG_BACKLIGHT_TOSA)		+= tosa_bl.o
 obj-$(CONFIG_BACKLIGHT_TPS65217)	+= tps65217_bl.o
+obj-$(CONFIG_BACKLIGHT_TZ1090_AUXDAC)	+= tz1090_auxdac_bl.o
 obj-$(CONFIG_BACKLIGHT_WM831X)		+= wm831x_bl.o
diff --git a/drivers/video/backlight/imgpdi_lcd.c b/drivers/video/backlight/imgpdi_lcd.c
new file mode 100644
index 0000000..5efa093
--- /dev/null
+++ b/drivers/video/backlight/imgpdi_lcd.c
@@ -0,0 +1,416 @@
+/*
+ * Imagination Technologies Panel Display Interface (PDI).
+ *
+ * Copyright (C) 2012 Imagination Technologies
+ *
+ * Based on platform_lcd.c:
+ * Copyright 2008 Simtec Electronics
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/backlight.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/io.h>
+#include <linux/lcd.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <video/imgpdi_lcd.h>
+
+/* Register offsets from base address */
+
+#define PDI_SETUP		0x0000
+#define PDI_TIMING0		0x0004
+#define PDI_TIMING1		0x0008
+#define PDI_COREID		0x0040
+#define PDI_COREREVISION	0x0050
+
+/* Register field descriptions */
+
+#define PDI_SETUP_BLNKLVL	(1 << 6)	/* panel clock level during blanking */
+#define PDI_SETUP_BLNK		(1 << 5)	/* clock blanking enable */
+#define PDI_SETUP_PWR		(1 << 4)	/* panel power enable */
+#define PDI_SETUP_EN		(1 << 3)	/* panel enable */
+#define PDI_SETUP_GDEN		(1 << 2)	/* GD enable (active mode) */
+#define PDI_SETUP_NFEN		(1 << 1)	/* NF enable (active mode) */
+#define PDI_SETUP_CR		(1 << 0)	/* conversion mode active */
+
+#define PDI_TIMING0_PWRSVGD	(0xf << 24)	/* delay in PCLKs-1 HSYNC to PWRSV,GD */
+#define PDI_TIMING0_PWRSVGD_S	24
+#define PDI_TIMING0_LSDEL	(0x7f << 16)	/* delay in PCLKs-1 HSYNC to LS */
+#define PDI_TIMING0_LSDEL_S	16
+#define PDI_TIMING0_PWRSV2GD2	(0x3ff << 0)	/* delay in PCLKs-1 LS to end of PWRSV,GD */
+#define PDI_TIMING0_PWRSV2GD2_S	0
+
+#define PDI_TIMING1_NLDEL	(0xf << 16)	/* delay in PCLKs-1 HSYNC to NL */
+#define PDI_TIMING1_NLDEL_S	16
+#define PDI_TIMING1_ACBDEL	(0x3ff << 0)	/* delay in PCLKs-1 NL to end of ACB */
+#define PDI_TIMING1_ACBDEL_S	0
+
+#define PDI_COREID_GROUPID	(0xff << 24)	/* identifies IMG IP family group */
+#define PDI_COREID_GROUPID_S	24
+#define PDI_COREID_COREID	(0xff << 16)	/* identifies member of IP group */
+#define PDI_COREID_COREID_S	16
+#define PDI_COREID_CONFIG	(0xffff << 0)	/* configuration of core */
+#define PDI_COREID_CONFIG_S	0
+
+#define PDI_COREREV_MAJOR	(0xff << 16)	/* family major release revision */
+#define PDI_COREREV_MAJOR_S	16
+#define PDI_COREREV_MINOR	(0xff << 8)	/* core minor release revision */
+#define PDI_COREREV_MINOR_S	8
+#define PDI_COREREV_MAINT	(0xff << 0)	/* maintenance release revision */
+#define PDI_COREREV_MAINT_S	0
+
+struct imgpdi_lcd {
+	struct device		*us;
+	struct lcd_device	*lcd;
+	struct imgpdi_lcd_pdata	*pdata;
+	void __iomem		*base;
+	struct clk		*clk;
+
+	unsigned int		 power;
+	unsigned int		 suspended:1;
+};
+
+static struct imgpdi_lcd *to_our_lcd(struct lcd_device *lcd)
+{
+	return lcd_get_data(lcd);
+}
+
+static void imgpdi_write(struct imgpdi_lcd *plcd,
+			 unsigned int reg_offs, unsigned int data)
+{
+	iowrite32(data, plcd->base + reg_offs);
+}
+
+static unsigned int imgpdi_read(struct imgpdi_lcd *plcd,
+				unsigned int reg_offs)
+{
+	return ioread32(plcd->base + reg_offs);
+}
+
+static void imgpdi_configure_en(struct imgpdi_lcd *plcd)
+{
+	struct imgpdi_lcd_pdata *pdata = plcd->pdata;
+	struct imgpdi_lcd_timings *active = pdata->active;
+	unsigned int pdi_setup, pdi_timing0, pdi_timing1;
+
+	dev_dbg(plcd->us, "enable\n");
+
+	/* activate pdi */
+	if (active)
+		pdi_setup = PDI_SETUP_CR; /* active */
+	else
+		pdi_setup = 0; /* bypass */
+	imgpdi_write(plcd, PDI_SETUP, pdi_setup);
+
+	if (active) {
+		/* set up signal delays if in active mode */
+		pdi_timing0 = (active->pwrsvgd  - 1) << PDI_TIMING0_PWRSVGD_S |
+			      (active->ls       - 1) << PDI_TIMING0_LSDEL_S   |
+			      (active->pwrsvgd2 - 1) << PDI_TIMING0_PWRSV2GD2_S;
+		pdi_timing1 = (active->nl       - 1) << PDI_TIMING1_NLDEL_S   |
+			      (active->acb      - 1) << PDI_TIMING1_ACBDEL_S;
+		imgpdi_write(plcd, PDI_TIMING0, pdi_timing0);
+		imgpdi_write(plcd, PDI_TIMING1, pdi_timing1);
+
+		if (active->gatedriver_en)
+			pdi_setup |= PDI_SETUP_GDEN;
+		if (active->newframe_en)
+			pdi_setup |= PDI_SETUP_NFEN;
+		if (active->blanking_en) {
+			pdi_setup |= PDI_SETUP_BLNK;
+			if (active->blanking_level)
+				pdi_setup |= PDI_SETUP_BLNKLVL;
+		}
+		imgpdi_write(plcd, PDI_SETUP, pdi_setup);
+	} else {
+		/* enable panel */
+		pdi_setup |= PDI_SETUP_EN;
+		imgpdi_write(plcd, PDI_SETUP, pdi_setup);
+	}
+}
+
+static void imgpdi_configure_dis(struct imgpdi_lcd *plcd)
+{
+	dev_dbg(plcd->us, "disable\n");
+
+	/* disable panel */
+	imgpdi_write(plcd, PDI_SETUP, 0);
+}
+
+static void imgpdi_configure_pwr_en(struct imgpdi_lcd *plcd)
+{
+	struct imgpdi_lcd_pdata *pdata = plcd->pdata;
+	unsigned int pdi_setup;
+
+	if (!pdata->active) {
+		dev_dbg(plcd->us, "power enable\n");
+
+		/* after 20ms enable power */
+		msleep(20);
+		pdi_setup = imgpdi_read(plcd, PDI_SETUP);
+		pdi_setup |= PDI_SETUP_PWR;
+		imgpdi_write(plcd, PDI_SETUP, pdi_setup);
+	}
+}
+
+static void imgpdi_configure_pwr_dis(struct imgpdi_lcd *plcd)
+{
+	struct imgpdi_lcd_pdata *pdata = plcd->pdata;
+	unsigned int pdi_setup;
+
+	if (!pdata->active) {
+		dev_dbg(plcd->us, "power disable\n");
+
+		pdi_setup = imgpdi_read(plcd, PDI_SETUP);
+		pdi_setup &= ~PDI_SETUP_PWR;
+		imgpdi_write(plcd, PDI_SETUP, pdi_setup);
+		/* wait 20ms before disabling panel */
+		msleep(20);
+	}
+}
+
+static int imgpdi_lcd_get_power(struct lcd_device *lcd)
+{
+	struct imgpdi_lcd *plcd = to_our_lcd(lcd);
+
+	return plcd->power;
+}
+
+static int imgpdi_lcd_set_power(struct lcd_device *lcd, int power)
+{
+	struct imgpdi_lcd *plcd = to_our_lcd(lcd);
+
+	if (plcd->suspended)
+		power = FB_BLANK_POWERDOWN;
+
+	/*
+	 * We use the following blank state mapping:
+	 * 0:	FB_BLANK_UNBLANK:		en + power
+	 * 1:	FB_BLANK_NORMAL:		en + power
+	 * 2:	FB_BLANK_VSYNC_SUSPEND:		en
+	 * 3:	FB_BLANK_HSYNC_SUSPEND:		en
+	 * 4:	FB_BLANK_POWERDOWN:		(off)
+	 */
+	if (power < plcd->power) {
+		/* power up */
+		switch (plcd->power) {
+		case FB_BLANK_POWERDOWN:
+			if (power >= FB_BLANK_POWERDOWN)
+				break;
+			imgpdi_configure_en(plcd);
+			/* fall through */
+		case FB_BLANK_HSYNC_SUSPEND:
+		case FB_BLANK_VSYNC_SUSPEND:
+			if (power >= FB_BLANK_VSYNC_SUSPEND)
+				break;
+			imgpdi_configure_pwr_en(plcd);
+		default:
+			break;
+		};
+	} else {
+		/* power down */
+		switch (plcd->power) {
+		case FB_BLANK_UNBLANK:
+		case FB_BLANK_NORMAL:
+			if (power <= FB_BLANK_NORMAL)
+				break;
+			imgpdi_configure_pwr_dis(plcd);
+			/* fall through */
+		case FB_BLANK_VSYNC_SUSPEND:
+		case FB_BLANK_HSYNC_SUSPEND:
+			if (power <= FB_BLANK_HSYNC_SUSPEND)
+				break;
+			imgpdi_configure_dis(plcd);
+		default:
+			break;
+		};
+	}
+	plcd->power = power;
+
+	return 0;
+}
+
+static int imgpdi_lcd_match(struct lcd_device *lcd, struct fb_info *info)
+{
+	struct imgpdi_lcd *plcd = to_our_lcd(lcd);
+	struct imgpdi_lcd_pdata *pdata = plcd->pdata;
+
+	if (pdata->match_fb)
+		return pdata->match_fb(pdata, info);
+
+	return plcd->us->parent == info->device;
+}
+
+static struct lcd_ops imgpdi_lcd_ops = {
+	.get_power	= imgpdi_lcd_get_power,
+	.set_power	= imgpdi_lcd_set_power,
+	.check_fb	= imgpdi_lcd_match,
+};
+
+static void imgpdi_detect_state(struct imgpdi_lcd *plcd)
+{
+	unsigned int pdi_setup;
+
+	pdi_setup = imgpdi_read(plcd, PDI_SETUP);
+
+	/* see imgpdi_lcd_set_power() for how blanking states map to PWR, EN */
+	if (pdi_setup & PDI_SETUP_PWR)
+		plcd->power = FB_BLANK_UNBLANK;
+	else if (pdi_setup & PDI_SETUP_EN)
+		plcd->power = FB_BLANK_HSYNC_SUSPEND;
+	else
+		plcd->power = FB_BLANK_POWERDOWN;
+
+	dev_dbg(plcd->us, "initial power = %d\n", plcd->power);
+}
+
+static void imgpdi_show_id(struct imgpdi_lcd *plcd)
+{
+	unsigned int coreid, corerev;
+
+	coreid = imgpdi_read(plcd, PDI_COREID);
+	corerev = imgpdi_read(plcd, PDI_COREREVISION);
+
+	dev_info(plcd->us, "IMG PDI (id: %02x:%02x:%04x, revision: %u.%u.%u) probed successfully\n",
+		 (coreid  & PDI_COREID_GROUPID) >> PDI_COREID_GROUPID_S,
+		 (coreid  & PDI_COREID_COREID)  >> PDI_COREID_COREID_S,
+		 (coreid  & PDI_COREID_CONFIG)  >> PDI_COREID_CONFIG_S,
+		 (corerev & PDI_COREREV_MAJOR)  >> PDI_COREREV_MAJOR_S,
+		 (corerev & PDI_COREREV_MINOR)  >> PDI_COREREV_MINOR_S,
+		 (corerev & PDI_COREREV_MAINT)  >> PDI_COREREV_MAINT_S);
+}
+
+static int imgpdi_lcd_probe(struct platform_device *pdev)
+{
+	struct imgpdi_lcd_pdata *pdata;
+	struct imgpdi_lcd *plcd;
+	struct device *dev = &pdev->dev;
+	struct resource *regs;
+	int err;
+
+	/* get register memory */
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs) {
+		dev_err(dev, "no register memory resource\n");
+		return -EINVAL;
+	}
+
+	/* get platform data */
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(dev, "no platform data supplied\n");
+		return -EINVAL;
+	}
+
+	/* create private data */
+	plcd = devm_kzalloc(&pdev->dev, sizeof(struct imgpdi_lcd),
+			    GFP_KERNEL);
+	if (!plcd) {
+		dev_err(dev, "no memory for state\n");
+		return -ENOMEM;
+	}
+	plcd->us = dev;
+	plcd->pdata = pdata;
+
+	/* ioremap register memory */
+	plcd->base = devm_ioremap(dev, regs->start, regs->end - regs->start);
+	if (!plcd->base) {
+		dev_err(dev, "could not ioremap register memory\n");
+		return -ENOMEM;
+	}
+
+	/* get the clock */
+	plcd->clk = clk_get(dev, "pdi");
+	if (IS_ERR(plcd->clk)) {
+		dev_err(&pdev->dev, "could not get pdi clock resource\n");
+		return PTR_ERR(plcd->clk);
+	}
+	clk_prepare_enable(plcd->clk);
+
+	/* detect initial state */
+	imgpdi_detect_state(plcd);
+
+	/* register lcd device */
+	plcd->lcd = lcd_device_register(dev_name(dev), dev,
+					plcd, &imgpdi_lcd_ops);
+	if (IS_ERR(plcd->lcd)) {
+		dev_err(dev, "cannot register lcd device\n");
+		err = PTR_ERR(plcd->lcd);
+		goto err;
+	}
+
+	platform_set_drvdata(pdev, plcd);
+
+	imgpdi_show_id(plcd);
+
+	return 0;
+
+ err:
+	clk_disable_unprepare(plcd->clk);
+	clk_put(plcd->clk);
+	return err;
+}
+
+static int imgpdi_lcd_remove(struct platform_device *pdev)
+{
+	struct imgpdi_lcd *plcd = platform_get_drvdata(pdev);
+
+	lcd_device_unregister(plcd->lcd);
+	clk_disable_unprepare(plcd->clk);
+	clk_put(plcd->clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int imgpdi_lcd_suspend(struct platform_device *pdev, pm_message_t st)
+{
+	struct imgpdi_lcd *plcd = platform_get_drvdata(pdev);
+
+	plcd->suspended = 1;
+	imgpdi_lcd_set_power(plcd->lcd, plcd->power);
+	clk_disable_unprepare(plcd->clk);
+
+	return 0;
+}
+
+static int imgpdi_lcd_resume(struct platform_device *pdev)
+{
+	struct imgpdi_lcd *plcd = platform_get_drvdata(pdev);
+
+	plcd->suspended = 0;
+	clk_prepare_enable(plcd->clk);
+	imgpdi_lcd_set_power(plcd->lcd, plcd->power);
+
+	return 0;
+}
+#else
+#define imgpdi_lcd_suspend NULL
+#define imgpdi_lcd_resume NULL
+#endif
+
+static struct platform_driver imgpdi_lcd_driver = {
+	.driver		= {
+		.name	= "imgpdi-lcd",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= imgpdi_lcd_probe,
+	.remove		= imgpdi_lcd_remove,
+	.suspend        = imgpdi_lcd_suspend,
+	.resume         = imgpdi_lcd_resume,
+};
+
+module_platform_driver(imgpdi_lcd_driver);
+
+MODULE_AUTHOR("Imagination Technologies");
+MODULE_DESCRIPTION("ImgTec Panel Display Interface (PDI) LCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/tz1090_auxdac_bl.c b/drivers/video/backlight/tz1090_auxdac_bl.c
new file mode 100644
index 0000000..1a83781
--- /dev/null
+++ b/drivers/video/backlight/tz1090_auxdac_bl.c
@@ -0,0 +1,163 @@
+/*
+ * TZ1090 Aux-DAC based Backlight Driver
+ *
+ * Copyright (C) 2012 Imagination Technologies
+ *
+ * Based on generic_bl.c:
+ * Copyright (c) 2004-2008 Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <video/tz1090_auxdac_bl.h>
+#include <asm/soc-tz1090/afe.h>
+
+static int tz1090_auxdac_bl_intensity;
+static struct backlight_device *tz1090_auxdac_bl_dev;
+static struct tz1090_auxdac_bl_pdata *bl_machinfo;
+
+static int tz1090_auxdac_bl_send_intensity(struct backlight_device *bd)
+{
+	int intensity = bd->props.brightness;
+	int blank = bd->props.fb_blank;
+
+	/* allow sysfs to override blank of framebuffer */
+	if (bd->props.power > blank)
+		blank = bd->props.power;
+
+	if (blank != FB_BLANK_UNBLANK)
+		intensity = 0;
+	if (bd->props.state & BL_CORE_FBBLANK)
+		intensity = 0;
+	if (bd->props.state & BL_CORE_SUSPENDED)
+		intensity = 0;
+
+	dev_dbg(&tz1090_auxdac_bl_dev->dev, "intensity=%d, blank=%d\n",
+		intensity, blank);
+
+	/* adjust Aux-DAC value */
+	comet_afe_auxdac_set_value(intensity);
+	comet_afe_auxdac_set_standby(blank > FB_BLANK_NORMAL);
+	comet_afe_auxdac_set_power(blank != FB_BLANK_POWERDOWN);
+
+	/* allow platform data to control backlight power */
+	bl_machinfo->set_bl_power(blank);
+
+	tz1090_auxdac_bl_intensity = intensity;
+
+	return 0;
+}
+
+static int tz1090_auxdac_bl_get_intensity(struct backlight_device *bd)
+{
+	return tz1090_auxdac_bl_intensity;
+}
+
+static int tz1090_auxdac_bl_check_fb(struct backlight_device *bd,
+				    struct fb_info *info)
+{
+	if (bl_machinfo->match_fb)
+		return bl_machinfo->match_fb(bl_machinfo, info);
+
+	return 0;
+}
+
+static const struct backlight_ops tz1090_auxdac_bl_ops = {
+	.options	= BL_CORE_SUSPENDRESUME,
+	.update_status	= tz1090_auxdac_bl_send_intensity,
+	.get_brightness	= tz1090_auxdac_bl_get_intensity,
+	.check_fb	= tz1090_auxdac_bl_check_fb,
+};
+
+static int tz1090_auxdac_bl_probe(struct platform_device *pdev)
+{
+	struct backlight_properties props;
+	struct tz1090_auxdac_bl_pdata *machinfo = pdev->dev.platform_data;
+	const char *name = "tz1090-auxdac-bl";
+	struct backlight_device *bd;
+	int err;
+
+	/* get control of the Aux DAC which controls the backlight intensity */
+	err = comet_afe_auxdac_get();
+	if (err) {
+		dev_err(&pdev->dev, "Could not get Comet Aux-DAC\n");
+		return err;
+	}
+
+	bl_machinfo = machinfo;
+	if (machinfo->name)
+		name = machinfo->name;
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.type = BACKLIGHT_RAW;
+	props.max_brightness = 0xff;
+	bd = backlight_device_register(name, &pdev->dev, NULL,
+				       &tz1090_auxdac_bl_ops, &props);
+	if (IS_ERR(bd)) {
+		err = PTR_ERR(bd);
+		dev_err(&pdev->dev, "Could not register backlight device (err=%d)\n",
+			err);
+		goto err;
+	}
+
+	tz1090_auxdac_bl_dev = bd;
+	platform_set_drvdata(pdev, bd);
+
+	bd->props.power = FB_BLANK_UNBLANK;
+	/* detect the current settings, which may have been set by bootloader */
+	bd->props.fb_blank = FB_BLANK_POWERDOWN;
+	bd->props.brightness = machinfo->default_intensity;
+	if (comet_afe_auxdac_get_power()) {
+		if (comet_afe_auxdac_get_standby()) {
+			bd->props.fb_blank = FB_BLANK_NORMAL;
+		} else {
+			bd->props.fb_blank = FB_BLANK_UNBLANK;
+			bd->props.brightness = comet_afe_auxdac_get_value();
+		}
+	}
+	backlight_update_status(bd);
+
+	dev_info(&pdev->dev, "TZ1090 Aux-DAC backlight driver initialized\n");
+	return 0;
+err:
+	comet_afe_auxdac_put();
+	return err;
+}
+
+static int tz1090_auxdac_bl_remove(struct platform_device *pdev)
+{
+	struct backlight_device *bd = platform_get_drvdata(pdev);
+
+	bd->props.power = 0;
+	bd->props.brightness = 0;
+	backlight_update_status(bd);
+
+	backlight_device_unregister(bd);
+
+	comet_afe_auxdac_put();
+	return 0;
+}
+
+static struct platform_driver tz1090_auxdac_bl_driver = {
+	.probe		= tz1090_auxdac_bl_probe,
+	.remove		= tz1090_auxdac_bl_remove,
+	.driver		= {
+		.name	= "tz1090-auxdac-bl",
+	},
+};
+
+module_platform_driver(tz1090_auxdac_bl_driver);
+
+MODULE_AUTHOR("Imagination Technologies");
+MODULE_DESCRIPTION("TZ1090 Aux-DAC Backlight Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index bc922c4..6bbaed3 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -6,7 +6,7 @@
 
 config VGA_CONSOLE
 	bool "VGA text console" if EXPERT || !X86
-	depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER)
+	depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && !METAG
 	default y
 	help
 	  Saying Y here will allow you to use Linux in text mode through a
diff --git a/drivers/video/pdpfb.c b/drivers/video/pdpfb.c
new file mode 100644
index 0000000..cef8145
--- /dev/null
+++ b/drivers/video/pdpfb.c
@@ -0,0 +1,2447 @@
+/*
+ * PDP Framebuffer
+ *
+ * Copyright (c) 2008-2012 Imagination Technologies Ltd.
+ * Parts Copyright (C) 2009 Nokia Corporation
+ *	(custom ISR code derived from drivers/video/omap2/dss/dispc.c)
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/slab.h>
+
+#include <asm/irq.h>
+#include <video/pdpfb.h>
+#include "pdpfb.h"
+#include "pdpfb_regs.h"
+#include "pdpfb_gfx.h"
+#include "pdpfb_vid.h"
+
+/* Physical address and length of video memory */
+static ulong videomem_base;
+module_param(videomem_base, ulong, 0);
+static ulong videomem_len;
+module_param(videomem_len, ulong, 0);
+
+/* Physical address and length of graphics plane framebuffer */
+#ifndef PDP_SHARED_BASE
+static ulong gfx_videomem_base;
+module_param(gfx_videomem_base, ulong, 0);
+#endif
+static ulong gfx_videomem_len = CONFIG_FB_PDP_GFX_VIDEOMEM;
+module_param(gfx_videomem_len, ulong, 0);
+
+/* Physical address and length of video plane framebuffer */
+#ifdef CONFIG_FB_PDP_VID
+#ifndef PDP_SHARED_BASE
+static ulong vid_videomem_base;
+module_param(vid_videomem_base, ulong, 0);
+#endif
+static ulong vid_videomem_len = CONFIG_FB_PDP_VID_VIDEOMEM;
+module_param(vid_videomem_len, ulong, 0);
+#endif
+
+/* report number of VBLANKs taken to update LUT */
+/*#define LUT_UPD_REPORT_PERF*/
+
+/* Groups of registers that may need updating on VEVENT */
+#define PDP_UPDATE_SYNC			0x00000001
+#define PDP_UPDATE_CLUT			0x00000002
+
+struct pdpfb_lut {
+	spinlock_t lock;
+	u32 palette[PDP_PALETTE_NR];	/* hardware specific format */
+#ifdef CONFIG_FB_PDP_QUEUE_CLUT
+	u16 count;			/* number of items in queue */
+	u8 position;			/* next index to update */
+	u8 queue[PDP_PALETTE_NR];	/* circular queue */
+	u8 rqueue[PDP_PALETTE_NR];	/* reverse of queue */
+#else
+	u16 min, max;			/* first and last item to update */
+#endif
+};
+
+struct pdpfb_update_sync {
+	u32 hsync1, hsync2, hsync3;
+	u32 hde_ctrl;
+	u32 str1blend2;
+};
+
+struct pdpfb_updates {
+	spinlock_t lock;
+	u32 updates;			/* PDP_UPDATE_* */
+	struct pdpfb_update_sync sync;	/* PDP_UPDATE_SYNC */
+};
+
+struct pdpfb_isr_data {
+	pdpfb_isr_t	isr;
+	void		*arg;
+	u32		mask;
+};
+
+#define PDPFB_MAX_NR_ISRS 8
+
+
+#ifdef CONFIG_FB_PDP_VID
+#define PDPFB_STREAM_NR 2
+#else
+#define PDPFB_STREAM_NR 1
+#endif
+
+struct pdpfb_rgb_fmt {
+	struct fb_bitfield r, g, b;
+};
+
+#define PDPFB_MEMPOOLF_KERNEL	0x1	/* Kernel allocated memory */
+
+struct pdpfb_mem_pool {
+	unsigned long phys_base;
+	void *base;
+	unsigned long size;
+	unsigned int flags;
+	unsigned long allocated;
+};
+
+static char *pdpfb_mem_pool_names[] = {
+	[PDPFB_MEMPOOL_MEM]	= "videomem",
+	[PDPFB_MEMPOOL_GFXMEM]	= "gfx videomem",
+	[PDPFB_MEMPOOL_VIDMEM]	= "vid videomem",
+	[PDPFB_MEMPOOL_USER]	= "usermem",
+};
+
+enum pdpfb_power_state {
+	PDPFB_POWER_NA			= -1,
+	PDPFB_POWER_BACKLIT		= FB_BLANK_UNBLANK,
+	PDPFB_POWER_PANEL_POWERED	= FB_BLANK_NORMAL,
+	PDPFB_POWER_SYNC_ENABLED	= FB_BLANK_VSYNC_SUSPEND,
+	PDPFB_POWER_PANEL_ENABLED	= FB_BLANK_HSYNC_SUSPEND,
+	PDPFB_POWER_FULLY_OFF		= FB_BLANK_POWERDOWN,
+	PDPFB_POWER_MAX
+};
+
+struct pdpfb_priv {
+	void __iomem *base;
+	struct pdpfb_mem_pool pools[PDPFB_MEMPOOL_NR_POOLS];
+	unsigned int irq;
+	struct clk *pdp_clk;
+	struct clk *pixel_clk;
+	int pdp_clk_en;
+	int pixel_clk_en;
+	struct device *device;
+	struct fb_videomode mode;
+	u32 pseudo_palette[PDP_PALETTE_NR];
+	enum pdpfb_power_state power;
+	struct pdpfb_updates upd;
+	struct pdpfb_lut lut;
+	struct pdpfb_rgb_fmt palette_fmt;
+	u32 bgnd_col;
+	struct pdpfb_stream *streams[PDPFB_STREAM_NR];
+
+	spinlock_t irq_lock;
+	int isr_disable;
+	struct pdpfb_isr_data registered_isr[PDPFB_MAX_NR_ISRS];
+
+	unsigned int vsync_count;
+
+	int final_blank;
+	unsigned int reconfiguring:1;
+	unsigned int no_blank_emit:1;
+};
+
+/* The one and only */
+static struct pdpfb_priv *pdpfb_priv;
+
+static int pdpfb_init_mempool(struct pdpfb_priv *priv, int type,
+			      unsigned long phys, unsigned long len);
+
+static void pdpfb_pdp_clk_enable(struct pdpfb_priv *priv)
+{
+	if (!priv->pdp_clk_en) {
+		clk_enable(priv->pdp_clk);
+		priv->pdp_clk_en = 1;
+	}
+}
+
+static void pdpfb_pdp_clk_disable(struct pdpfb_priv *priv)
+{
+	if (priv->pdp_clk_en) {
+		clk_disable(priv->pdp_clk);
+		priv->pdp_clk_en = 0;
+	}
+}
+
+static void pdpfb_pixel_clk_enable(struct pdpfb_priv *priv)
+{
+	if (!priv->pixel_clk_en) {
+		clk_prepare_enable(priv->pixel_clk);
+		priv->pixel_clk_en = 1;
+	}
+}
+
+static void pdpfb_pixel_clk_disable(struct pdpfb_priv *priv)
+{
+	if (priv->pixel_clk_en) {
+		clk_disable_unprepare(priv->pixel_clk);
+		priv->pixel_clk_en = 0;
+	}
+}
+
+/* Register access */
+
+#define PDUMP_TAG "pdpfb pdump: "
+
+#ifdef CONFIG_FB_PDP_PDUMP_V1
+#define PDUMP_WR_FMT "WRW :REG:%08x %08x\n"
+#define PDUMP_RD_FMT "RDW :REG:%08x\n"
+#endif
+
+#ifdef CONFIG_FB_PDP_PDUMP_V2
+#define PDUMP_WR_FMT "WRW :REGMET_PDP:%#x %#x\n"
+#define PDUMP_RD_FMT "RDW :REGMET_PDP:%#x\n"
+#endif
+
+void pdpfb_write(struct pdpfb_priv *priv,
+		      unsigned int reg_offs, unsigned int data)
+{
+#ifdef CONFIG_FB_PDP_PDUMP
+	printk(PDUMP_TAG PDUMP_WR_FMT,
+	       reg_offs, data);
+#endif
+	iowrite32(data, priv->base + reg_offs);
+}
+
+unsigned int pdpfb_read(struct pdpfb_priv *priv,
+			     unsigned int reg_offs)
+{
+#ifdef CONFIG_FB_PDP_PDUMP
+	printk(PDUMP_TAG PDUMP_RD_FMT,
+	       reg_offs);
+#endif
+	return ioread32(priv->base + reg_offs);
+}
+
+/* LUT queue */
+
+static void pdpfb_lut_init(struct pdpfb_priv *priv)
+{
+	spin_lock_init(&priv->lut.lock);
+#ifdef CONFIG_FB_PDP_QUEUE_CLUT
+	priv->lut.count = 0;
+#else
+	priv->lut.min = PDP_PALETTE_NR;
+	priv->lut.max = 0;
+#endif
+}
+
+/*
+ * lut_num_entries: Get number of LUT entries to write.
+ * lut_upd_entries: Write LUT updates. priv->lut.lock must be held.
+ */
+#ifdef CONFIG_FB_PDP_QUEUE_CLUT
+
+static inline int _pdpfb_lut_num_entries(struct pdpfb_priv *priv)
+{
+	return priv->lut.count;
+}
+
+static inline int _pdpfb_lut_upd(struct pdpfb_priv *priv)
+{
+	int i = priv->lut.count;
+	u8 pos = priv->lut.position;
+	/* pos intentionally wraps around */
+	for (; i; --i, ++pos) {
+		u8 id = priv->lut.queue[pos];
+		u32 val = priv->lut.palette[id];
+		pdpfb_write(priv, PDP_PALETTE1,
+				id << PDP_PALETTE1_LUTADDR_OFFSET);
+		pdpfb_write(priv, PDP_PALETTE2, val);
+		if (unlikely(val != pdpfb_read(priv, PDP_PALETTE2)))
+			break;
+	}
+	priv->lut.count = i;
+	priv->lut.position = pos;
+	return i;
+}
+
+static inline void _pdpfb_queue_lut_upd(struct pdpfb_priv *priv,
+					u8 id, u32 palette_rgb)
+{
+	u8 position = priv->lut.position;
+	u16 count = priv->lut.count;
+	u8 index = priv->lut.rqueue[id];
+	u8 relindex = index - position;
+
+	priv->lut.palette[id] = palette_rgb;
+	/* last queue index not pending, or referring to different id */
+	if (likely(relindex >= count || priv->lut.queue[index] != id)) {
+		priv->lut.rqueue[id] = index = position + count;
+		priv->lut.queue[index] = id;
+		priv->lut.count = count+1;
+	}
+}
+
+#else /*CONFIG_FB_PDP_QUEUE_CLUT*/
+
+static inline int _pdpfb_lut_num_entries(struct pdpfb_priv *priv)
+{
+	if (priv->lut.max < priv->lut.min)
+		return 0;
+	return 1 + priv->lut.max - priv->lut.min;
+}
+
+static inline int _pdpfb_lut_upd(struct pdpfb_priv *priv)
+{
+	int i;
+	for (i = priv->lut.min; i <= priv->lut.max; ++i) {
+		pdpfb_write(priv, PDP_PALETTE1,
+				i << PDP_PALETTE1_LUTADDR_OFFSET);
+		pdpfb_write(priv, PDP_PALETTE2, priv->lut.palette[i]);
+	}
+	priv->lut.min = PDP_PALETTE_NR;
+	priv->lut.max = 0;
+	/* read any PDP register, without this we get flickering artifacts */
+	pdpfb_read(priv, PDP_SIGNAT);
+	return 0;
+}
+
+static inline void _pdpfb_queue_lut_upd(struct pdpfb_priv *priv,
+					u8 id, u32 palette_rgb)
+{
+	priv->lut.palette[id] = palette_rgb;
+	if (id < priv->lut.min)
+		priv->lut.min = id;
+	if (id > priv->lut.max)
+		priv->lut.max = id;
+}
+
+#endif /*CONFIG_FB_PDP_QUEUE_CLUT*/
+
+/*
+ * Write any queued LUT updates (for use in VBLANK).
+ * Must not hold priv->upd.lock going into this function.
+ */
+static void pdpfb_lut_upd(struct pdpfb_priv *priv)
+{
+	int new_count;
+#ifdef LUT_UPD_REPORT_PERF
+	int init_count;
+#endif
+
+	/* Don't bother updating LUT unless it's switched on */
+	if (!GET_FIELD(pdpfb_read(priv, PDP_STR1SURF), PDP_STR1SURF_USELUT))
+		return;
+
+	spin_lock(&priv->lut.lock);
+#ifdef LUT_UPD_REPORT_PERF
+	init_count = _pdpfb_lut_num_entries(priv);
+#endif
+
+	new_count = _pdpfb_lut_upd(priv);
+	if (!new_count) {
+		/* Updating CLUT finished */
+		spin_lock(&priv->upd.lock);
+		priv->upd.updates &= ~PDP_UPDATE_CLUT;
+		spin_unlock(&priv->upd.lock);
+	}
+
+	spin_unlock(&priv->lut.lock);
+
+#ifdef LUT_UPD_REPORT_PERF
+	if (new_count != init_count) {
+		static unsigned int vevent_count;
+		static unsigned int luti_count;
+		++vevent_count;
+		luti_count += init_count-new_count;
+		if (!new_count) {
+			dev_dbg(priv->device,
+				"updated %u LUTs in %u VEVENTs (%u each)\n",
+				luti_count,
+				vevent_count,
+				luti_count / vevent_count);
+			vevent_count = 0;
+			luti_count = 0;
+		}
+	}
+#endif
+}
+
+static void pdpfb_sync_upd(struct pdpfb_priv *priv)
+{
+	pdpfb_write(priv, PDP_HSYNC1, priv->upd.sync.hsync1);
+	pdpfb_write(priv, PDP_HSYNC2, priv->upd.sync.hsync2);
+	pdpfb_write(priv, PDP_HSYNC3, priv->upd.sync.hsync3);
+
+	pdpfb_write(priv, PDP_HDECTRL, priv->upd.sync.hde_ctrl);
+
+	priv->upd.updates &= ~PDP_UPDATE_SYNC;
+}
+
+/* triggered on vevent0 in interrupt context */
+static void pdpfb_interrupt_upd(void *arg, u32 mask)
+{
+	struct pdpfb_priv *priv = (struct pdpfb_priv *)arg;
+
+	/* Update as many colour lookup table entries as possible */
+	if (priv->upd.updates & PDP_UPDATE_CLUT)
+		pdpfb_lut_upd(priv);
+	spin_lock(&priv->upd.lock);
+	/* Update fields that would cause artifacts elsewhere */
+	if (priv->upd.updates & PDP_UPDATE_SYNC)
+		pdpfb_sync_upd(priv);
+	/* Disable interrupt if no more updates */
+	if (!priv->upd.updates)
+		pdpfb_unregister_isr(pdpfb_interrupt_upd, arg,
+					PDPFB_IRQ_VEVENT0);
+	spin_unlock(&priv->upd.lock);
+}
+
+static inline void _pdpfb_start_queue_lut_upd(struct pdpfb_priv *priv,
+					      unsigned long *flags)
+{
+	spin_lock_irqsave(&priv->lut.lock, *flags);
+}
+
+/* must correspond to _pdpfb_start_queue_lut_upd */
+static inline void _pdpfb_end_queue_lut_upd(struct pdpfb_priv *priv,
+					    unsigned long *flags)
+{
+	/* Start updating CLUT */
+	spin_lock(&priv->upd.lock); /* interrupts are already disabled */
+	/* If not in lut mode, delay update until it's enabled */
+	if (GET_FIELD(pdpfb_read(priv, PDP_STR1SURF), PDP_STR1SURF_USELUT)) {
+		priv->upd.updates |= PDP_UPDATE_CLUT;
+		pdpfb_register_isr(pdpfb_interrupt_upd, priv,
+				   PDPFB_IRQ_VEVENT0);
+	}
+	spin_unlock(&priv->upd.lock);
+
+	spin_unlock_irqrestore(&priv->lut.lock, *flags);
+}
+
+void pdpfb_enable_palette(struct pdpfb_priv *priv)
+{
+	unsigned long flags;
+
+	/* restart any pending palette updates */
+	spin_lock_irqsave(&priv->lut.lock, flags);
+	if (_pdpfb_lut_num_entries(priv)) {
+		spin_lock(&priv->upd.lock);
+		priv->upd.updates |= PDP_UPDATE_CLUT;
+		pdpfb_register_isr(pdpfb_interrupt_upd, priv,
+				   PDPFB_IRQ_VEVENT0);
+		spin_unlock(&priv->upd.lock);
+	}
+	spin_unlock_irqrestore(&priv->lut.lock, flags);
+}
+
+void pdpfb_disable_palette(struct pdpfb_priv *priv)
+{
+	unsigned long flags;
+
+	/* stop any pending palette updates */
+	spin_lock_irqsave(&priv->lut.lock, flags);
+	if (_pdpfb_lut_num_entries(priv)) {
+		spin_lock(&priv->upd.lock);
+		priv->upd.updates &= ~PDP_UPDATE_CLUT;
+		if (!priv->upd.updates)
+			pdpfb_unregister_isr(pdpfb_interrupt_upd, priv,
+					     PDPFB_IRQ_VEVENT0);
+		spin_unlock(&priv->upd.lock);
+	}
+	spin_unlock_irqrestore(&priv->lut.lock, flags);
+}
+
+static void pdpfb_ident(struct pdpfb_priv *priv)
+{
+	unsigned int coreid;
+	unsigned int corerev;
+
+	coreid = pdpfb_read(priv, PDP_CORE_ID);
+	corerev = pdpfb_read(priv, PDP_CORE_REV);
+
+	dev_info(priv->device,
+		 "PDP id: %#x revision: %d.%d.%d\n", coreid,
+		 (corerev >> 16) & 0xff,
+		 (corerev >> 8) & 0xff,
+		 corerev & 0xff);
+}
+
+/* Handler for all PDP interrupts */
+static irqreturn_t pdpfb_interrupt(int irq, void *dev_id)
+{
+	struct pdpfb_priv *priv = dev_id;
+	u32 int_stat;
+	int i;
+	struct pdpfb_isr_data *isr_data;
+	struct pdpfb_isr_data registered_isr[PDPFB_MAX_NR_ISRS];
+
+	int_stat = pdpfb_read(priv, PDP_INTSTAT);
+	pdpfb_write(priv, PDP_INTCLR, int_stat);
+
+	spin_lock(&priv->irq_lock);
+
+	if (priv->isr_disable) {
+		spin_unlock(&priv->irq_lock);
+		return IRQ_HANDLED;
+	}
+
+	/* make a copy and unlock, so that isrs can unregister themselves */
+	memcpy(registered_isr, priv->registered_isr,
+			sizeof(registered_isr));
+
+	spin_unlock(&priv->irq_lock);
+
+	for (i = 0; i < PDPFB_MAX_NR_ISRS; i++) {
+		isr_data = &registered_isr[i];
+
+		if (!isr_data->isr)
+			continue;
+
+		if (isr_data->mask & int_stat)
+			isr_data->isr(isr_data->arg, int_stat);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* priv->irq_lock has to be locked by the caller */
+static void _pdpfb_set_irqs(struct pdpfb_priv *priv)
+{
+	u32 mask;
+	u32 old_mask;
+	int i;
+	struct pdpfb_isr_data *isr_data;
+
+	if (priv->isr_disable) {
+		pdpfb_write(priv, PDP_INTENAB, 0);
+		return;
+	}
+
+	mask = 0;
+	for (i = 0; i < PDPFB_MAX_NR_ISRS; i++) {
+		isr_data = &priv->registered_isr[i];
+
+		if (isr_data->isr == NULL)
+			continue;
+
+		mask |= isr_data->mask;
+	}
+
+	clk_enable(priv->pdp_clk);
+
+	old_mask = pdpfb_read(priv, PDP_INTENAB);
+	/* clear the irqstatus for newly enabled irqs */
+	pdpfb_write(priv, PDP_INTCLR, (mask ^ old_mask) & mask);
+
+	pdpfb_write(priv, PDP_INTENAB, mask);
+
+	clk_disable(priv->pdp_clk);
+}
+
+static void pdpfb_disable_isr(struct pdpfb_priv *priv)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->irq_lock, flags);
+	priv->isr_disable = 1;
+	_pdpfb_set_irqs(priv);
+	spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+static void pdpfb_enable_isr(struct pdpfb_priv *priv)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->irq_lock, flags);
+	priv->isr_disable = 0;
+	_pdpfb_set_irqs(priv);
+	spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+int pdpfb_register_isr(pdpfb_isr_t isr, void *arg, u32 mask)
+{
+	struct pdpfb_priv *priv = pdpfb_priv;
+	int i;
+	int ret;
+	unsigned long flags;
+	struct pdpfb_isr_data *isr_data;
+
+	if (!priv)
+		return -ENODEV;
+
+	if (isr == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(&priv->irq_lock, flags);
+
+	/* check for duplicate entry */
+	for (i = 0; i < PDPFB_MAX_NR_ISRS; i++) {
+		isr_data = &priv->registered_isr[i];
+		if (isr_data->isr == isr && isr_data->arg == arg &&
+				isr_data->mask == mask) {
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+
+	isr_data = NULL;
+	ret = -EBUSY;
+
+	for (i = 0; i < PDPFB_MAX_NR_ISRS; i++) {
+		isr_data = &priv->registered_isr[i];
+
+		if (isr_data->isr != NULL)
+			continue;
+
+		isr_data->isr = isr;
+		isr_data->arg = arg;
+		isr_data->mask = mask;
+		ret = 0;
+
+		break;
+	}
+
+	_pdpfb_set_irqs(priv);
+
+	spin_unlock_irqrestore(&priv->irq_lock, flags);
+
+	return 0;
+err:
+	spin_unlock_irqrestore(&priv->irq_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(pdpfb_register_isr);
+
+int pdpfb_unregister_isr(pdpfb_isr_t isr, void *arg, u32 mask)
+{
+	struct pdpfb_priv *priv = pdpfb_priv;
+	int i;
+	unsigned long flags;
+	int ret = -EINVAL;
+	struct pdpfb_isr_data *isr_data;
+
+	if (!priv)
+		return -ENODEV;
+
+	spin_lock_irqsave(&priv->irq_lock, flags);
+
+	for (i = 0; i < PDPFB_MAX_NR_ISRS; i++) {
+		isr_data = &priv->registered_isr[i];
+		if (isr_data->isr != isr || isr_data->arg != arg ||
+				isr_data->mask != mask)
+			continue;
+
+		/* found the correct isr */
+
+		isr_data->isr = NULL;
+		isr_data->arg = NULL;
+		isr_data->mask = 0;
+
+		ret = 0;
+		break;
+	}
+
+	if (ret == 0)
+		_pdpfb_set_irqs(priv);
+
+	spin_unlock_irqrestore(&priv->irq_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(pdpfb_unregister_isr);
+
+static void pdpfb_irq_wait_handler(void *data, u32 mask)
+{
+	complete((struct completion *)data);
+}
+
+int pdpfb_wait_for_irq_timeout(u32 irqmask, unsigned long timeout)
+{
+	int r;
+	DECLARE_COMPLETION_ONSTACK(completion);
+
+	r = pdpfb_register_isr(pdpfb_irq_wait_handler, &completion, irqmask);
+
+	if (r)
+		return r;
+
+	timeout = wait_for_completion_timeout(&completion, timeout);
+
+	pdpfb_unregister_isr(pdpfb_irq_wait_handler, &completion, irqmask);
+
+	if (timeout == 0)
+		return -ETIMEDOUT;
+
+	if (timeout == -ERESTARTSYS)
+		return -ERESTARTSYS;
+
+	return 0;
+}
+
+int pdpfb_wait_for_irq_interruptible_timeout(u32 irqmask,
+						unsigned long timeout)
+{
+	int r;
+	DECLARE_COMPLETION_ONSTACK(completion);
+
+	r = pdpfb_register_isr(pdpfb_irq_wait_handler, &completion, irqmask);
+
+	if (r)
+		return r;
+
+	timeout = wait_for_completion_interruptible_timeout(&completion,
+			timeout);
+
+	pdpfb_unregister_isr(pdpfb_irq_wait_handler, &completion, irqmask);
+
+	if (timeout == 0)
+		return -ETIMEDOUT;
+
+	if (timeout == -ERESTARTSYS)
+		return -ERESTARTSYS;
+
+	return 0;
+}
+
+int pdpfb_wait_vsync(void)
+{
+	unsigned long timeout = msecs_to_jiffies(500);
+	u32 irq = PDPFB_IRQ_VEVENT0;
+
+	return pdpfb_wait_for_irq_interruptible_timeout(irq, timeout);
+}
+EXPORT_SYMBOL(pdpfb_wait_vsync);
+
+#ifndef CONFIG_FB_PDP_PDUMP
+static void pdpfb_vsync_isr(void *arg, u32 mask)
+{
+	struct pdpfb_priv *priv = (struct pdpfb_priv *)arg;
+	++priv->vsync_count;
+}
+#endif
+
+struct pdp_info *pdpfb_get_platform_data(struct pdpfb_priv *priv)
+{
+	return priv->device->platform_data;
+}
+
+u32 *pdpfb_get_pseudo_palette(struct pdpfb_priv *priv)
+{
+	return priv->pseudo_palette;
+}
+
+enum pdpfb_display_mode {
+	PDP_FULLY_POWERED_DOWN,
+	PDP_PARTIALLY_POWERED_DOWN,
+	PDP_ENABLED
+};
+
+static void pdpfb_set_display_enabled(struct pdpfb_priv *priv,
+					enum pdpfb_display_mode mode)
+{
+	struct pdp_info *pdata = priv->device->platform_data;
+	u32 sync_ctrl;
+	u32 op_mask;
+	int powerdn = 0;
+
+	if (mode == PDP_FULLY_POWERED_DOWN && pdata->sync_cfg.force_vsyncs)
+		mode = PDP_PARTIALLY_POWERED_DOWN;
+
+	switch (mode) {
+	case PDP_FULLY_POWERED_DOWN:
+		sync_ctrl = pdpfb_read(priv, PDP_SYNCCTRL);
+		SET_FIELD(sync_ctrl, PDP_SYNCCTRL_SYNCACTIVE, 0);
+		SET_FIELD(sync_ctrl, PDP_SYNCCTRL_POWERDN, 1);
+		pdpfb_write(priv, PDP_SYNCCTRL, sync_ctrl);
+
+		pdpfb_disable_isr(priv);
+
+		if (pdata->hwops.set_screen_power)
+			pdata->hwops.set_screen_power(0);
+		pdpfb_pixel_clk_disable(priv);
+
+		break;
+	case PDP_PARTIALLY_POWERED_DOWN:
+		powerdn = 1;
+	case PDP_ENABLED:
+		op_mask = pdpfb_read(priv, PDP_OPMASK);
+		SET_FIELD(op_mask, PDP_OPMASK_MASKR, 0x00);
+		SET_FIELD(op_mask, PDP_OPMASK_MASKG, 0x00);
+		SET_FIELD(op_mask, PDP_OPMASK_MASKB, 0x00);
+		SET_FIELD(op_mask, PDP_OPMASK_MASKLEVEL, 0);
+		pdpfb_write(priv, PDP_OPMASK, op_mask);
+
+		sync_ctrl = pdpfb_read(priv, PDP_SYNCCTRL);
+		SET_FIELD(sync_ctrl, PDP_SYNCCTRL_UPDCTRL, 0);
+		SET_FIELD(sync_ctrl, PDP_SYNCCTRL_SYNCACTIVE, 1);
+		SET_FIELD(sync_ctrl, PDP_SYNCCTRL_POWERDN, powerdn);
+		pdpfb_write(priv, PDP_SYNCCTRL, sync_ctrl);
+
+		pdpfb_pixel_clk_enable(priv);
+		if (pdata->hwops.set_screen_power)
+			pdata->hwops.set_screen_power(!powerdn);
+
+		pdpfb_enable_isr(priv);
+
+		break;
+	default:
+		dev_err(priv->device,
+			"Unrecognised pdpfb_set_display_enabled parameter %d\n",
+			mode);
+	}
+}
+
+void pdpfb_update_margins(struct pdpfb_priv *priv,
+			struct pdpfb_stream *stream)
+{
+	struct pdp_info *pdata = priv->device->platform_data;
+	struct fb_info *info = &stream->info;
+	int xres, yres, w, h;
+	int xgap[2];
+	int ygap[2];
+
+	xres = info->var.xres;
+	yres = info->var.yres;
+	w = (stream->geom.w ? stream->geom.w : xres);
+	h = (stream->geom.h ? stream->geom.h : yres);
+	xgap[0] = stream->geom.x;
+	ygap[0] = stream->geom.y;
+	/* we can't really handle the case of a plane positioned offscreen */
+	xgap[1] = max(0, (int)priv->mode.xres - w - xgap[0]);
+	ygap[1] = max(0, (int)priv->mode.yres - h - ygap[0]);
+
+	/* physical size of plane */
+	info->var.width = pdata->lcd_size_cfg.width * w / priv->mode.xres;
+	info->var.height = pdata->lcd_size_cfg.height * h / priv->mode.yres;
+
+	/* measurements in units of plane pixels */
+	info->var.pixclock = priv->mode.pixclock * w / xres * h / yres;
+	info->var.hsync_len = priv->mode.hsync_len * xres / w;
+	info->var.vsync_len = priv->mode.vsync_len * yres / h;
+	info->var.left_margin  = (priv->mode.left_margin  + xgap[0])
+					* xres / w;
+	info->var.upper_margin = (priv->mode.upper_margin + ygap[0])
+					* yres / h;
+	info->var.right_margin = (priv->mode.right_margin + xgap[1])
+					* xres / w;
+	info->var.lower_margin = (priv->mode.lower_margin + ygap[1])
+					* yres / h;
+}
+
+static void pdpfb_update_all_margins(struct pdpfb_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < PDPFB_STREAM_NR; ++i) {
+		struct pdpfb_stream *stream = priv->streams[i];
+		if (!stream || !stream->probed)
+			continue;
+		pdpfb_update_margins(priv, stream);
+	}
+}
+
+/* update variables but not registers */
+static void pdpfb_update_refresh_rate(struct pdpfb_priv *priv)
+{
+	u32 ht, vt;
+	unsigned long pix_freq;
+
+	if (priv->mode.pixclock) {
+		pix_freq = 1000*PICOS2KHZ(priv->mode.pixclock);
+	} else {
+		ht = priv->mode.hsync_len
+			+ priv->mode.left_margin
+			+ priv->mode.xres
+			+ priv->mode.right_margin;
+		vt = priv->mode.vsync_len
+			+ priv->mode.upper_margin
+			+ priv->mode.yres
+			+ priv->mode.lower_margin;
+		pix_freq = ht*vt*priv->mode.refresh;
+	}
+	clk_set_rate(priv->pixel_clk, pix_freq);
+
+	clk_prepare_enable(priv->pixel_clk);
+	pix_freq = clk_get_rate(priv->pixel_clk);
+	clk_disable_unprepare(priv->pixel_clk);
+	priv->mode.pixclock = KHZ2PICOS(pix_freq/1000);
+
+	pdpfb_update_all_margins(priv);
+}
+
+/*
+ * Updates pixel clock if it's changed (pixclock is pS).
+ * Returns 1 if changed.
+ */
+int pdpfb_update_pixclock(struct pdpfb_priv *priv, unsigned int pixclock)
+{
+	if (pixclock != priv->mode.pixclock) {
+		priv->mode.pixclock = pixclock;
+		pdpfb_update_refresh_rate(priv);
+		return 1;
+	}
+	return 0;
+}
+
+unsigned long pdpfb_get_line_length(int xres_virtual, int bpp)
+{
+#define LINELEN_ALIGNMENT_BYTES	16	/* in bytes, must be power of 2 */
+#define LINELEN_ALIGNMENT		(LINELEN_ALIGNMENT_BYTES << 3)
+	u_long length;
+
+	/* round up to line length alignment */
+	length = xres_virtual * bpp;
+	length = (length + (LINELEN_ALIGNMENT-1)) & -LINELEN_ALIGNMENT;
+	length >>= 3;
+	return length;
+}
+
+static void pdpfb_soft_reset(struct pdpfb_priv *priv)
+{
+	/*
+	 * We intentionally include the video plane even if support for it is
+	 * not compiled in. This is because it may already have been enabled by
+	 * the bootloader and we don't want a green plane on top of the
+	 * graphics plane.
+	 */
+	static const unsigned int strctrls[] = {
+		PDP_STR1CTRL,
+		PDP_STR2CTRL,
+	};
+	int i;
+	u32 sync_ctrl, ctrl;
+
+	sync_ctrl = pdpfb_read(priv, PDP_SYNCCTRL);
+	SET_FIELD(sync_ctrl, PDP_SYNCCTRL_DISPRST, 1);
+	pdpfb_write(priv, PDP_SYNCCTRL, sync_ctrl);
+	SET_FIELD(sync_ctrl, PDP_SYNCCTRL_DISPRST, 0);
+	pdpfb_write(priv, PDP_SYNCCTRL, sync_ctrl);
+
+	/* disable all the streams */
+	for (i = 0; i < ARRAY_SIZE(strctrls); ++i) {
+		ctrl = pdpfb_read(priv, strctrls[i]);
+		SET_FIELD(ctrl, PDP_STRXCTRL_STREAMEN, 0);
+		SET_FIELD(ctrl, PDP_STRXCTRL_BLENDPOS, i);
+		pdpfb_write(priv, strctrls[i], ctrl);
+	}
+}
+
+static void pdpfb_configure_display(struct pdpfb_priv *priv)
+{
+	struct pdp_info *pdata = priv->device->platform_data;
+	u32 sync_ctrl;
+	u32 mem_ctrl;
+
+	sync_ctrl = pdpfb_read(priv, PDP_SYNCCTRL);
+	SET_FIELD(sync_ctrl, PDP_SYNCCTRL_CLKPOL, pdata->sync_cfg.clock_pol);
+	SET_FIELD(sync_ctrl, PDP_SYNCCTRL_VSPOL,
+			0 == (priv->mode.sync & FB_SYNC_VERT_HIGH_ACT));
+	SET_FIELD(sync_ctrl, PDP_SYNCCTRL_HSPOL,
+			0 == (priv->mode.sync & FB_SYNC_HOR_HIGH_ACT));
+	SET_FIELD(sync_ctrl, PDP_SYNCCTRL_BLNKPOL, pdata->sync_cfg.blank_pol);
+	SET_FIELD(sync_ctrl, PDP_SYNCCTRL_VSDIS, pdata->sync_cfg.vsync_dis);
+	SET_FIELD(sync_ctrl, PDP_SYNCCTRL_HSDIS, pdata->sync_cfg.hsync_dis);
+	SET_FIELD(sync_ctrl, PDP_SYNCCTRL_BLNKDIS, pdata->sync_cfg.blank_dis);
+	SET_FIELD(sync_ctrl, PDP_SYNCCTRL_VSSLAVE, pdata->sync_cfg.sync_slave);
+	SET_FIELD(sync_ctrl, PDP_SYNCCTRL_HSSLAVE, pdata->sync_cfg.sync_slave);
+	pdpfb_write(priv, PDP_SYNCCTRL, sync_ctrl);
+
+	mem_ctrl = pdpfb_read(priv, PDP_MEMCTRL);
+	SET_FIELD(mem_ctrl, PDP_MEMCTRL_MEMREFRESH,
+			PDP_MEMCTRL_MEMREFRESH_BOTH);
+	pdpfb_write(priv, PDP_MEMCTRL, mem_ctrl);
+}
+
+#ifdef PDP_CKEY_BY_BLENDPOS
+const static struct pdpfb_stream_regs pdpfb_ckey_regs[] = {
+	{
+		.blend  = PDP_STR1BLEND,
+		.blend2 = PDP_STR1BLEND2,
+		.ctrl   = PDP_STR1CTRL,
+	},
+	{
+		.blend  = PDP_STR2BLEND,
+		.blend2 = PDP_STR2BLEND2,
+		.ctrl   = PDP_STR2CTRL,
+	},
+};
+#define CKEY_REGS(STREAM, BLENDPOS)	(pdpfb_ckey_regs[(BLENDPOS)])
+#else
+#define CKEY_REGS(STREAM, BLENDPOS)	((STREAM)->regs)
+#endif
+
+static void pdpfb_configure_streams(struct pdpfb_priv *priv)
+{
+	u32 ctrl, posn, blend, blend2;
+	u32 en, i;
+
+	pdpfb_write(priv, PDP_BGNDCOL, priv->bgnd_col);
+
+	for (i = 0; i < PDPFB_STREAM_NR; ++i) {
+		struct pdpfb_stream *stream = priv->streams[i];
+		const struct pdpfb_stream_regs *ckey_regs;
+		if (!stream)
+			continue;
+		if (stream->probed && stream->ops.configure) {
+			en = !stream->ops.configure(priv) && stream->enable;
+			if (en && stream->ops.configure_addr)
+				stream->ops.configure_addr(priv);
+		} else {
+			en = 0;
+		}
+
+		if (en) {
+			/* stream position on screen */
+			posn = pdpfb_read(priv, stream->regs.posn);
+			SET_FIELD(posn, PDP_STRXPOSN_XSTART, stream->geom.x);
+			SET_FIELD(posn, PDP_STRXPOSN_YSTART, stream->geom.y);
+			pdpfb_write(priv, stream->regs.posn, posn);
+		}
+
+		/* set up blending */
+
+		ctrl = pdpfb_read(priv, stream->regs.ctrl);
+		SET_FIELD(ctrl, PDP_STRXCTRL_STREAMEN, en);
+		SET_FIELD(ctrl, PDP_STRXCTRL_BLENDMODE, stream->blend_mode);
+		SET_FIELD(ctrl, PDP_STRXCTRL_BLENDPOS, i);
+		pdpfb_write(priv, stream->regs.ctrl, ctrl);
+
+		blend = pdpfb_read(priv, stream->regs.blend);
+		SET_FIELD(blend, PDP_STRXBLEND_GLOBALALPHA,
+				stream->global_alpha);
+		pdpfb_write(priv, stream->regs.blend, blend);
+
+		/* set up colour keying */
+
+		ckey_regs = &CKEY_REGS(stream, i);
+
+		ctrl = pdpfb_read(priv, ckey_regs->ctrl);
+		SET_FIELD(ctrl, PDP_STRXCTRL_CKEYEN, stream->ckey_en);
+		SET_FIELD(ctrl, PDP_STRXCTRL_CKEYSRC, stream->ckey_src);
+		pdpfb_write(priv, ckey_regs->ctrl, ctrl);
+
+		blend = pdpfb_read(priv, ckey_regs->blend);
+		SET_FIELD(blend, PDP_STRXBLEND_COLKEY, stream->ckey.ckey);
+		pdpfb_write(priv, ckey_regs->blend, blend);
+
+		blend2 = pdpfb_read(priv, ckey_regs->blend2);
+		SET_FIELD(blend2, PDP_STRXBLEND2_COLKEYMASK, stream->ckey.mask);
+		pdpfb_write(priv, ckey_regs->blend2, blend2);
+	}
+}
+
+/* Extract the current sync setup from the hardware */
+int pdpfb_extract_sync(struct pdpfb_priv *priv)
+{
+	u32 hsync1, hde_ctrl;
+	u32 vsync1, vde_ctrl;
+	u16 vbps, vt, vdes, vdef;
+	u16 hbps, ht, hdes, hdef;
+
+	/* read vsync */
+	vsync1	= pdpfb_read(priv, PDP_VSYNC1);
+	vbps	= GET_FIELD(vsync1, PDP_VSYNC1_VBPS);
+	vt	= GET_FIELD(vsync1, PDP_VSYNC1_VT);
+
+	vde_ctrl = pdpfb_read(priv, PDP_VDECTRL);
+	vdes	= GET_FIELD(vde_ctrl, PDP_VDECTRL_VDES);
+	vdef	= GET_FIELD(vde_ctrl, PDP_VDECTRL_VDEF);
+
+	if (!vbps || vdes <= vbps || vdef <= vdes || vt <= vdef)
+		return -EINVAL;
+
+	/* read hsync */
+	hsync1	= pdpfb_read(priv, PDP_HSYNC1);
+	hbps	= GET_FIELD(hsync1, PDP_HSYNC1_HBPS);
+	ht	= GET_FIELD(hsync1, PDP_HSYNC1_HT);
+
+	hde_ctrl = pdpfb_read(priv, PDP_HDECTRL);
+	hdes	= GET_FIELD(hde_ctrl, PDP_HDECTRL_HDES);
+	hdef	= GET_FIELD(hde_ctrl, PDP_HDECTRL_HDEF);
+
+	if (!hbps || hdes <= hbps || hdef <= hdes || ht <= hdef)
+		return -EINVAL;
+
+	/* calculate timings */
+	priv->mode.vsync_len = vbps;
+	priv->mode.upper_margin = vdes - vbps;
+	priv->mode.yres = vdef - vdes;
+	priv->mode.lower_margin = vt - vdef;
+
+	priv->mode.hsync_len = hbps;
+	priv->mode.left_margin = hdes - hbps;
+	priv->mode.xres = hdef - hdes;
+	priv->mode.right_margin = ht - hdef;
+
+	return 0;
+}
+
+/* Set up sync registers to position the frame buffers on the screen */
+int pdpfb_configure_sync(struct pdpfb_priv *priv)
+{
+	struct pdpfb_update_sync *sync = &priv->upd.sync;
+	unsigned long flags;
+
+	u32 sync_ctrl;
+	u32 vsync1, vsync2, vsync3;
+	u32 vevent;
+	u32 vde_ctrl;
+
+	u16 hbps, ht, hfps, hrbs, has, hlbs;
+	u16 vbps, vt, vtbs, vfps, vas, vbbs;
+	u16 vevent_vevent, vevent_vfetch;
+	u16 hdes, hdef;
+	u16 vdes, vdef;
+
+	int border[2][2] = { {3, 3}, {3, 3} };
+
+	hbps = priv->mode.hsync_len;
+	hdes = hbps + priv->mode.left_margin;
+	has = hdes;
+	hlbs = has - border[0][0];
+	hdef = hdes + priv->mode.xres;
+	hrbs = hdef;
+	hfps = hrbs + border[0][1];
+	ht = hrbs + priv->mode.right_margin;
+
+	vbps = priv->mode.vsync_len;
+	vdes = vbps + priv->mode.upper_margin;
+	vas = vdes;
+	vtbs = vas - border[1][0];
+	vdef = vdes + priv->mode.yres;
+	vbbs = vdef;
+	vfps = vbbs + border[1][1];
+	vt = vbbs + priv->mode.lower_margin;
+
+	vevent_vevent = 0;
+	vevent_vfetch = vbps;
+
+	/* write registers that shouldn't cause artifacts */
+
+	vsync1	= PLACE_FIELD(PDP_VSYNC1_VBPS, vbps)
+		| PLACE_FIELD(PDP_VSYNC1_VT, vt);
+	vsync2	= PLACE_FIELD(PDP_VSYNC2_VAS, vas)
+		| PLACE_FIELD(PDP_VSYNC2_VTBS, vtbs);
+	vsync3	= PLACE_FIELD(PDP_VSYNC3_VFPS, vfps)
+		| PLACE_FIELD(PDP_VSYNC3_VBBS, vbbs);
+
+	pdpfb_write(priv, PDP_VSYNC1, vsync1);
+	pdpfb_write(priv, PDP_VSYNC2, vsync2);
+	pdpfb_write(priv, PDP_VSYNC3, vsync3);
+
+	vevent		= PLACE_FIELD(PDP_VEVENT_VEVENT, vevent_vevent)
+			| PLACE_FIELD(PDP_VEVENT_VFETCH, vevent_vfetch);
+	vde_ctrl	= PLACE_FIELD(PDP_VDECTRL_VDES, vdes)
+			| PLACE_FIELD(PDP_VDECTRL_VDEF, vdef);
+
+	pdpfb_write(priv, PDP_VEVENT, vevent);
+	pdpfb_write(priv, PDP_VDECTRL, vde_ctrl);
+
+	/* write registers that would cause artifacts on next VBLANK */
+
+	spin_lock_irqsave(&priv->upd.lock, flags);
+
+	sync->hsync1	= PLACE_FIELD(PDP_HSYNC1_HBPS, hbps)
+			| PLACE_FIELD(PDP_HSYNC1_HT, ht);
+	sync->hsync2	= PLACE_FIELD(PDP_HSYNC2_HAS, has)
+			| PLACE_FIELD(PDP_HSYNC2_HLBS, hlbs);
+	sync->hsync3	= PLACE_FIELD(PDP_HSYNC3_HFPS, hfps)
+			| PLACE_FIELD(PDP_HSYNC3_HRBS, hrbs);
+
+	sync->hde_ctrl	= PLACE_FIELD(PDP_HDECTRL_HDES, hdes)
+			| PLACE_FIELD(PDP_HDECTRL_HDEF, hdef);
+
+	sync_ctrl = pdpfb_read(priv, PDP_SYNCCTRL);
+	if (GET_FIELD(sync_ctrl, PDP_SYNCCTRL_SYNCACTIVE)) {
+		/* sync enabled, apply on next VSYNC */
+		priv->upd.updates |= PDP_UPDATE_SYNC;
+		pdpfb_register_isr(pdpfb_interrupt_upd, priv,
+				   PDPFB_IRQ_VEVENT0);
+	} else {
+		/* sync disabled, apply immediately */
+		pdpfb_sync_upd(priv);
+	}
+
+	spin_unlock_irqrestore(&priv->upd.lock, flags);
+
+	return 0;
+}
+
+static void pdpfb_emit_blank_event(struct pdpfb_priv *priv)
+{
+	struct fb_event event;
+	int blank = priv->power;
+	int i;
+
+	/* skip blank events if no_blank_emit flag is set */
+	if (priv->no_blank_emit)
+		return;
+
+	/* skip the final blank if we know it'll be emitted anyway */
+	if (priv->final_blank == blank)
+		return;
+
+	event.data = &blank;
+	for (i = 0; i < PDPFB_STREAM_NR; ++i) {
+		struct pdpfb_stream *stream = priv->streams[i];
+		int flags;
+		if (!stream)
+			continue;
+		event.info = &stream->info;
+
+		/* prevent fbcon responding badly to the blank events */
+		flags = event.info->flags;
+		event.info->flags |= FBINFO_MISC_USEREVENT;
+		fb_notifier_call_chain(FB_EVENT_BLANK, &event);
+		event.info->flags = flags;
+	}
+}
+
+/* FULLY_OFF -> PANEL_ENABLED */
+static void pdpfb_power_panel_en(struct pdpfb_priv *priv)
+{
+	dev_dbg(priv->device, "panel_en\n");
+
+	pdpfb_soft_reset(priv);
+	pdpfb_write(priv, PDP_INTCLR, 0xFFFFFFFF);
+
+	pdpfb_emit_blank_event(priv);
+}
+/* PANEL_ENABLED -> FULLY_OFF */
+static void pdpfb_power_panel_dis(struct pdpfb_priv *priv)
+{
+	dev_dbg(priv->device, "panel_dis\n");
+
+	pdpfb_emit_blank_event(priv);
+}
+
+/* PANEL_ENABLED -> SYNC_ENABLED */
+static void pdpfb_power_sync_en(struct pdpfb_priv *priv)
+{
+	dev_dbg(priv->device, "sync_en\n");
+
+	pdpfb_update_refresh_rate(priv);
+	pdpfb_configure_display(priv);
+	pdpfb_configure_sync(priv);
+	pdpfb_set_display_enabled(priv, PDP_PARTIALLY_POWERED_DOWN);
+
+	pdpfb_emit_blank_event(priv);
+}
+/* SYNC_ENABLED -> PANEL_ENABLED */
+static void pdpfb_power_sync_dis(struct pdpfb_priv *priv)
+{
+	dev_dbg(priv->device, "sync_dis\n");
+
+	pdpfb_set_display_enabled(priv, PDP_FULLY_POWERED_DOWN);
+
+	pdpfb_emit_blank_event(priv);
+}
+
+/* SYNC_ENABLED -> PANEL_POWERED */
+static void pdpfb_power_panelpwr_en(struct pdpfb_priv *priv)
+{
+	dev_dbg(priv->device, "panelpwr_en\n");
+
+	pdpfb_emit_blank_event(priv);
+
+	pdpfb_configure_streams(priv);
+}
+/* PANEL_POWERED -> SYNC_ENABLED */
+static void pdpfb_power_panelpwr_dis(struct pdpfb_priv *priv)
+{
+	dev_dbg(priv->device, "panelpwr_dis\n");
+
+	pdpfb_emit_blank_event(priv);
+}
+
+/* PANEL_POWERED -> BACKLIT */
+static void pdpfb_power_bl_en(struct pdpfb_priv *priv)
+{
+	dev_dbg(priv->device, "bl_en\n");
+
+	pdpfb_set_display_enabled(priv, PDP_ENABLED);
+	pdpfb_emit_blank_event(priv);
+}
+/* BACKLIT -> PANEL_POWERED */
+static void pdpfb_power_bl_dis(struct pdpfb_priv *priv)
+{
+	dev_dbg(priv->device, "bl_dis\n");
+
+	pdpfb_emit_blank_event(priv);
+	/* enable blanking after vsync to avoid tearing */
+	pdpfb_wait_vsync();
+	pdpfb_set_display_enabled(priv, PDP_PARTIALLY_POWERED_DOWN);
+}
+
+/* power transition functions */
+typedef void (*pdpfb_power_change)(struct pdpfb_priv *priv);
+static const pdpfb_power_change pdpfb_power_fn[PDPFB_POWER_MAX][2] = {
+	[PDPFB_POWER_BACKLIT]
+		= { pdpfb_power_bl_en,		pdpfb_power_bl_dis },
+	[PDPFB_POWER_PANEL_POWERED]
+		= { pdpfb_power_panelpwr_en,	pdpfb_power_panelpwr_dis },
+	[PDPFB_POWER_SYNC_ENABLED]
+		= { pdpfb_power_sync_en,	pdpfb_power_sync_dis },
+	[PDPFB_POWER_PANEL_ENABLED]
+		= { pdpfb_power_panel_en,	pdpfb_power_panel_dis },
+};
+
+/* move to prestate (lower power) then to state (higher power) */
+static int pdpfb_reconfigure(struct pdpfb_priv *priv,
+			     enum pdpfb_power_state prestate,
+			     enum pdpfb_power_state state)
+{
+	WARN_CONSOLE_UNLOCKED();
+
+	if (prestate >= PDPFB_POWER_MAX || state <= PDPFB_POWER_NA)
+		return -EINVAL;
+
+	/* ignore blank events that we emitted ourselves */
+	if (priv->reconfiguring)
+		return -EBUSY;
+	priv->reconfiguring = 1;
+
+	while (priv->power < prestate) {
+		++priv->power;
+		pdpfb_power_fn[priv->power - 1][1](priv);
+	}
+	while (priv->power > state) {
+		--priv->power;
+		pdpfb_power_fn[priv->power][0](priv);
+	}
+
+	priv->reconfiguring = 0;
+	return 0;
+}
+
+/* move to a different power state */
+static int pdpfb_configure(struct pdpfb_priv *priv,
+			   enum pdpfb_power_state state)
+{
+	return pdpfb_reconfigure(priv, state, state);
+}
+
+void pdpfb_set_mode(struct pdpfb_priv *priv, struct fb_var_screeninfo *var)
+{
+	/*
+	 * Blanking events confuse fbcon, and aren't strictly necessary when
+	 * we're just changing mode.
+	 */
+	priv->no_blank_emit = 1;
+	if (var->activate & FB_ACTIVATE_FORCE) {
+		fb_var_to_videomode(&priv->mode, var);
+		pdpfb_reconfigure(priv, PDPFB_POWER_PANEL_ENABLED, priv->power);
+	} else {
+		struct fb_videomode old_mode = priv->mode;
+		fb_var_to_videomode(&priv->mode, var);
+		if (!fb_mode_is_equal(&priv->mode, &old_mode))
+			pdpfb_reconfigure(priv, PDPFB_POWER_PANEL_ENABLED,
+					  priv->power);
+		else
+			pdpfb_configure_streams(priv);
+	}
+	priv->no_blank_emit = 0;
+}
+
+static inline u32 _pdpfb_convert_palette(struct fb_bitfield *red_bits,
+					 struct fb_bitfield *green_bits,
+					 struct fb_bitfield *blue_bits,
+					 u_int red, u_int green, u_int blue)
+{
+	u32 rgb;
+	rgb  = MOVE_FIELD(red, 0, 16,
+			  red_bits->offset,
+			  red_bits->length);
+	rgb |= MOVE_FIELD(green, 0, 16,
+			  green_bits->offset,
+			  green_bits->length);
+	rgb |= MOVE_FIELD(blue, 0, 16,
+			  blue_bits->offset,
+			  blue_bits->length);
+	return rgb;
+}
+
+static inline void _pdpfb_set_pseudo_palette(struct pdpfb_priv *priv,
+					     struct fb_info *info, u_int regno,
+					     u_int red, u_int green,
+					     u_int blue, u_int trans)
+{
+	priv->pseudo_palette[regno] = _pdpfb_convert_palette(
+			&info->var.red, &info->var.green, &info->var.blue,
+			red, green, blue);
+}
+
+static inline u32 _pdpfb_hw_palette(struct pdpfb_priv *priv, u_int regno,
+				    u_int red, u_int green,
+				    u_int blue, u_int trans)
+{
+	return _pdpfb_convert_palette(&priv->palette_fmt.r,
+				      &priv->palette_fmt.g,
+				      &priv->palette_fmt.b,
+				      red, green, blue);
+}
+
+int pdpfb_setcolreg(u_int regno,
+		 u_int red, u_int green, u_int blue,
+		 u_int trans, struct fb_info *info)
+{
+	struct pdpfb_priv *priv;
+	u32 palette_rgb;
+	unsigned long flags;
+
+	if (regno > 255)
+		return -EINVAL;
+
+	priv = dev_get_drvdata(info->device);
+
+	/* update pseudo palette used by fbcon */
+	_pdpfb_set_pseudo_palette(priv, info, regno, red, green, blue, trans);
+	/* queue hardware palette update */
+	palette_rgb = _pdpfb_hw_palette(priv, regno, red, green, blue, trans);
+	_pdpfb_start_queue_lut_upd(priv, &flags);
+	_pdpfb_queue_lut_upd(priv, regno, palette_rgb);
+	_pdpfb_end_queue_lut_upd(priv, &flags);
+
+	return 0;
+}
+
+int pdpfb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+	struct pdpfb_priv *priv;
+	int i, start;
+	u16 *red, *green, *blue, *transp;
+	u_int hred, hgreen, hblue, htransp = 0xffff;
+	unsigned long flags;
+	u32 palette_rgb;
+
+	priv = dev_get_drvdata(info->device);
+
+	red = cmap->red;
+	green = cmap->green;
+	blue = cmap->blue;
+	transp = cmap->transp;
+	start = cmap->start;
+	if (start + cmap->len > PDP_PALETTE_NR)
+		return -EINVAL;
+
+	_pdpfb_start_queue_lut_upd(priv, &flags);
+	for (i = 0; i < cmap->len; i++) {
+		hred = *red++;
+		hgreen = *green++;
+		hblue = *blue++;
+		if (transp)
+			htransp = *transp++;
+
+		/* update pseudo palette used by fbcon */
+		_pdpfb_set_pseudo_palette(priv, info, i,
+				hred, hgreen, hblue, htransp);
+		/* queue hardware palette update */
+		palette_rgb = _pdpfb_hw_palette(priv, i,
+				hred, hgreen, hblue, htransp);
+		_pdpfb_queue_lut_upd(priv, i, palette_rgb);
+	}
+	_pdpfb_end_queue_lut_upd(priv, &flags);
+	return 0;
+}
+
+int pdpfb_blank(int blank, struct fb_info *info)
+{
+	struct pdpfb_priv *priv = dev_get_drvdata(info->device);
+	int err, prev_final_blank;
+
+	prev_final_blank = priv->final_blank;
+	priv->final_blank = blank;
+	err = pdpfb_configure(priv, blank);
+	priv->final_blank = prev_final_blank;
+	return err;
+}
+
+static void pdpfb_str_get_vblank(struct pdpfb_priv *priv,
+				 struct pdpfb_stream *stream,
+				 struct fb_vblank *vblank)
+{
+	int line_no, geomy, geomh;
+
+	line_no = pdpfb_read(priv, PDP_LINESTAT);
+	line_no = GET_FIELD(line_no, PDP_LINESTAT_LINENO_STAT);
+
+	geomy = priv->mode.vsync_len + priv->mode.upper_margin + stream->geom.y;
+	geomh = stream->geom.h;
+	if (!geomh)
+		geomh = stream->info.var.yres;
+
+	memset(vblank, 0, sizeof(*vblank));
+
+	vblank->flags = FB_VBLANK_HAVE_VBLANK	|
+			FB_VBLANK_HAVE_COUNT	|
+			FB_VBLANK_HAVE_VCOUNT	|
+			FB_VBLANK_HAVE_VSYNC;
+	if (line_no < geomy ||
+	    line_no >= geomy + geomh)
+		vblank->flags |= FB_VBLANK_VBLANKING;
+	if (line_no < priv->mode.vsync_len)
+		vblank->flags |= FB_VBLANK_VSYNCING;
+	vblank->count = priv->vsync_count;
+	vblank->vcount = line_no * stream->info.var.yres / geomh;
+}
+
+static int pdpfb_str_get_plane_pos(struct pdpfb_priv *priv,
+		struct pdpfb_stream *stream)
+{
+	int i;
+	for (i = 0; i < PDPFB_STREAM_NR; ++i) {
+		if (priv->streams[i] == stream)
+			return i;
+	}
+	return -EINVAL;
+}
+
+/* returns new position, or negative on error */
+static int pdpfb_str_set_plane_pos(struct pdpfb_priv *priv,
+		struct pdpfb_stream *stream,
+		int pos)
+{
+	int old_pos = pdpfb_str_get_plane_pos(priv, stream);
+	if (unlikely(old_pos < 0))
+		return old_pos;
+	if (unlikely(pos < 0 || pos >= PDPFB_STREAM_NR))
+		return -EINVAL;
+	if (pos == old_pos)
+		return pos;
+
+	if (pos > old_pos) {
+		do {
+			priv->streams[old_pos] = priv->streams[old_pos+1];
+			++old_pos;
+		} while (pos > old_pos);
+	} else if (pos < old_pos) {
+		do {
+			priv->streams[old_pos] = priv->streams[old_pos-1];
+			--old_pos;
+		} while (pos < old_pos);
+	}
+	priv->streams[pos] = stream;
+	pdpfb_configure_streams(priv);
+	return pos;
+}
+
+int pdpfb_str_ioctl(struct pdpfb_priv *priv, struct pdpfb_stream *stream,
+		unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	struct pdpfb_geom geom;
+	struct pdpfb_ckey ckey;
+	struct fb_vblank vblank;
+	int plane_pos;
+	int error;
+	u32 val;
+#ifdef CONFIG_FB_PDP_USERMEM
+	struct pdpfb_usermem usermem;
+	struct pdp_info *pdata;
+	int i, ret;
+#endif
+
+	switch (cmd) {
+	/* get blanking info */
+	case FBIOGET_VBLANK:
+		pdpfb_str_get_vblank(priv, stream, &vblank);
+		if (copy_to_user(argp, &vblank, sizeof(vblank)))
+			return -EFAULT;
+		return 0;
+	/* wait for vsync */
+	case FBIO_WAITFORVSYNC:
+		if (get_user(val, (u32 __user *)arg))
+			return -EFAULT;
+		if (val != 0)
+			return -ENODEV;
+		return pdpfb_wait_vsync();
+	/* get background colour */
+	case PDPIO_GETBGND:
+		if (copy_to_user(argp, &priv->bgnd_col, sizeof(priv->bgnd_col)))
+			return -EFAULT;
+		return 0;
+	/* set background colour */
+	case PDPIO_SETBGND:
+		if (copy_from_user(&val, argp, sizeof(val)))
+			return -EFAULT;
+		priv->bgnd_col = val & 0xFFFFFF;
+		pdpfb_configure_streams(priv);
+		return 0;
+	/* get screen geometry */
+	case PDPIO_GETSCRGEOM:
+		geom.x = geom.y = 0;
+		geom.w = priv->mode.xres;
+		geom.h = priv->mode.yres;
+		if (copy_to_user(argp, &geom, sizeof(geom)))
+			return -EFAULT;
+		return 0;
+
+	/* get stream enable */
+	case PDPIO_GETEN:
+		val = stream->enable;
+		if (copy_to_user(argp, &val, sizeof(val)))
+			return -EFAULT;
+		return 0;
+	/* set stream enable */
+	case PDPIO_SETEN:
+		if (copy_from_user(&val, argp, sizeof(val)))
+			return -EFAULT;
+		stream->enable = (0 != val);
+		pdpfb_configure_streams(priv);
+		return 0;
+	/* get stream plane position */
+	case PDPIO_GETPLANEPOS:
+		plane_pos = pdpfb_str_get_plane_pos(priv, stream);
+		if (unlikely(plane_pos < 0))
+			return -EINVAL;
+		if (copy_to_user(argp, &plane_pos, sizeof(plane_pos)))
+			return -EFAULT;
+		return 0;
+	/* set stream plane position */
+	case PDPIO_SETPLANEPOS:
+		if (copy_from_user(&plane_pos, argp, sizeof(plane_pos)))
+			return -EFAULT;
+
+		plane_pos = pdpfb_str_set_plane_pos(priv, stream, plane_pos);
+		if (plane_pos < 0)
+			return plane_pos;
+
+		if (copy_to_user(argp, &plane_pos, sizeof(plane_pos)))
+			return -EFAULT;
+		return 0;
+	/* get stream geometry */
+	case PDPIO_GETGEOM:
+		geom = stream->geom;
+		if (copy_to_user(argp, &geom, sizeof(geom)))
+			return -EFAULT;
+		return 0;
+	/* set stream geometry */
+	case PDPIO_SETGEOM:
+		if (copy_from_user(&geom, argp, sizeof(geom)))
+			return -EFAULT;
+		if (geom.x > 2047 || geom.y > 2047)
+			return -EINVAL;
+
+		if (stream->ops.check_geom) {
+			error = stream->ops.check_geom(priv, &geom);
+			if (error)
+				return -EINVAL;
+		} else {
+			geom.w = 0;
+			geom.h = 0;
+		}
+		stream->geom = geom;
+		if (stream->ops.set_geom)
+			stream->ops.set_geom(priv);
+
+		pdpfb_configure_streams(priv);
+		pdpfb_update_margins(priv, stream);
+
+		if (copy_to_user(argp, &geom, sizeof(geom)))
+			return -EFAULT;
+		return 0;
+	/* get stream colour key mode */
+	case PDPIO_GETCKEYMODE:
+		if (!stream->ckey_en)
+			val = PDP_CKEYMODE_DISABLE;
+		else if (stream->ckey_src == PDP_STRXCTRL_CKEYSRC_PREV)
+			val = PDP_CKEYMODE_PREVIOUS;
+		else if (stream->ckey_src == PDP_STRXCTRL_CKEYSRC_CUR)
+			val = PDP_CKEYMODE_CURRENT;
+		if (copy_to_user(argp, &val, sizeof(val)))
+			return -EFAULT;
+		return 0;
+	/* set stream colour key mode */
+	case PDPIO_SETCKEYMODE:
+		if (copy_from_user(&val, argp, sizeof(val)))
+			return -EFAULT;
+		switch (val) {
+		case PDP_CKEYMODE_DISABLE:
+			stream->ckey_en = 0;
+			break;
+		case PDP_CKEYMODE_PREVIOUS:
+			stream->ckey_en = 1;
+			stream->ckey_src = PDP_STRXCTRL_CKEYSRC_PREV;
+			break;
+		case PDP_CKEYMODE_CURRENT:
+			stream->ckey_en = 1;
+			stream->ckey_src = PDP_STRXCTRL_CKEYSRC_CUR;
+			break;
+		default:
+			return -EINVAL;
+		}
+		pdpfb_configure_streams(priv);
+		return 0;
+	/* get stream colour key */
+	case PDPIO_GETCKEY:
+		if (copy_to_user(argp, &stream->ckey, sizeof(stream->ckey)))
+			return -EFAULT;
+		return 0;
+	/* set stream colour key */
+	case PDPIO_SETCKEY:
+		if (copy_from_user(&ckey, argp, sizeof(ckey)))
+			return -EFAULT;
+		ckey.mask = ckey.mask & 0xFFFFFF;
+		ckey.ckey = ckey.ckey & ckey.mask;
+		stream->ckey = ckey;
+		if (stream->ckey_en)
+			pdpfb_configure_streams(priv);
+		if (copy_to_user(argp, &stream->ckey, sizeof(stream->ckey)))
+			return -EFAULT;
+		return 0;
+	/* get stream blending mode */
+	case PDPIO_GETBLENDMODE:
+		val = stream->blend_mode;
+		if (copy_to_user(argp, &val, sizeof(val)))
+			return -EFAULT;
+		return 0;
+	/* set stream blending mode */
+	case PDPIO_SETBLENDMODE:
+		if (copy_from_user(&val, argp, sizeof(val)))
+			return -EFAULT;
+		if (val > PDP_BLENDMODE_PIXEL)
+			return -EINVAL;
+		stream->blend_mode = val;
+		pdpfb_configure_streams(priv);
+		return 0;
+	/* gset stream global alpha */
+	case PDPIO_GETGALPHA:
+		val = stream->global_alpha;
+		if (copy_to_user(argp, &val, sizeof(val)))
+			return -EFAULT;
+		return 0;
+	/* set stream global alpha */
+	case PDPIO_SETGALPHA:
+		if (copy_from_user(&val, argp, sizeof(val)))
+			return -EFAULT;
+		val &= 0xFF;
+		stream->global_alpha = val;
+		if (stream->blend_mode != PDP_BLENDMODE_NOALPHA)
+			pdpfb_configure_streams(priv);
+		if (copy_to_user(argp, &val, sizeof(val)))
+			return -EFAULT;
+		return 0;
+	/* set user provided memory */
+	case PDPIO_SETUSERMEM:
+#ifdef CONFIG_FB_PDP_USERMEM
+		if (copy_from_user(&usermem, argp, sizeof(usermem)))
+			return -EFAULT;
+
+		if (usermem.flags & PDP_USERMEM_ALLPLANES) {
+			pdpfb_init_mempool(priv, PDPFB_MEMPOOL_USER,
+					   usermem.phys, usermem.len);
+
+#ifdef PDP_SHARED_BASE
+			pdata = pdpfb_get_platform_data(priv);
+			pdata->hwops.set_shared_base(usermem.phys);
+#endif
+			ret = 0;
+			for (i = PDPFB_STREAM_NR - 1; i >= 0; i--) {
+				struct pdpfb_stream *stream = priv->streams[i];
+				if (!stream || !stream->probed)
+					continue;
+				pdpfb_str_videomem_free(stream);
+			}
+			for (i = 0; i < PDPFB_STREAM_NR; i++) {
+				struct pdpfb_stream *stream = priv->streams[i];
+				if (!stream || !stream->probed)
+					continue;
+				error = pdpfb_str_videomem_alloc(priv, stream);
+				if (error && !ret)
+					ret = error;
+			}
+
+#ifdef PDP_SHARED_BASE
+			if (!ret)
+				return 0;
+
+			/*
+			 * Allocation from usermem failed, the best we can do
+			 * is to reset the usermem pool and attempt allocations
+			 * again.
+			 */
+
+			for (i = PDPFB_STREAM_NR - 1; i >= 0; i--) {
+				struct pdpfb_stream *stream = priv->streams[i];
+				if (!stream || !stream->probed)
+					continue;
+				pdpfb_str_videomem_free(stream);
+			}
+
+			priv->pools[PDPFB_MEMPOOL_USER].size = 0;
+			priv->pools[PDPFB_MEMPOOL_USER].flags = 0;
+			priv->pools[PDPFB_MEMPOOL_USER].phys_base = 0;
+			priv->pools[PDPFB_MEMPOOL_USER].base = NULL;
+
+			pdata->hwops.set_shared_base(
+				priv->pools[PDPFB_MEMPOOL_MEM].phys_base);
+
+			for (i = 0; i < PDPFB_STREAM_NR; i++) {
+				struct pdpfb_stream *stream = priv->streams[i];
+				if (!stream || !stream->probed)
+					continue;
+				pdpfb_str_videomem_alloc(priv, stream);
+			}
+#endif
+
+			return -ENOMEM;
+		}
+
+#ifdef PDP_SHARED_BASE
+		/* with a shared base we can only affect all planes */
+		return -EINVAL;
+#else
+		stream->videomem = (void *)usermem.phys;
+		stream->videomem_len = usermem.len;
+		stream->mem_pool = PDPFB_MEMPOOL_USERPLANE;
+
+		stream->info.screen_base = (char __force __iomem *)stream->videomem;
+		stream->info.fix.smem_start = usermem.phys;
+		stream->info.fix.smem_len = usermem.len;
+
+		return 0;
+#endif
+
+#else /* !CONFIG_FB_PDP_USERMEM */
+		return -EINVAL;
+#endif /* !CONFIG_FB_PDP_USERMEM */
+
+	default:
+		return -ENOTTY;
+	}
+	return 0;
+}
+
+void pdpfb_str_videomem_free(struct pdpfb_stream *stream)
+{
+	struct fb_info *info = &stream->info;
+	struct pdpfb_mem_pool *pool;
+
+	if (stream->mem_pool == PDPFB_MEMPOOL_KERNEL) {
+		kfree(stream->videomem);
+	} else if (stream->mem_pool < PDPFB_MEMPOOL_NR_POOLS) {
+		pool = &pdpfb_priv->pools[stream->mem_pool];
+
+		if ((unsigned long)pool->base + pool->allocated ==
+		    (unsigned long)stream->videomem + stream->videomem_len) {
+			pool->allocated -= stream->videomem_len;
+		} else {
+			dev_warn(pdpfb_priv->device,
+				"free losing memory in pool '%s'",
+				pdpfb_mem_pool_names[stream->mem_pool]);
+		}
+	}
+
+	stream->videomem = NULL;
+	info->screen_base = NULL;
+	info->fix.smem_start = 0;
+	info->fix.smem_len = 0;
+	stream->mem_pool = PDPFB_MEMPOOL_NONE;
+}
+
+static int pdpfb_pool_alloc(struct pdpfb_mem_pool *pool,
+				struct pdpfb_stream *stream,
+				unsigned long len)
+{
+	struct fb_info *info = &stream->info;
+	unsigned long offset;
+	unsigned long len_aligned = (len + PAGE_SIZE - 1) & PAGE_MASK;
+
+	offset = pool->allocated;
+	if (offset + len > pool->size)
+		return -ENOMEM;
+	pool->allocated += len_aligned;
+	stream->videomem = (void *)((unsigned long)pool->base + offset);
+#ifdef PDP_SHARED_BASE
+	stream->videomem_offset = offset;
+#endif
+	stream->videomem_len = len_aligned;
+
+	info->screen_base = (char __force __iomem *)stream->videomem;
+	info->fix.smem_start = pool->phys_base + offset;
+	info->fix.smem_len = len;
+
+	return 0;
+}
+
+int pdpfb_str_videomem_alloc(struct pdpfb_priv *priv,
+				struct pdpfb_stream *stream)
+{
+	struct fb_info *info = &stream->info;
+	void *videomem = NULL;
+	unsigned long len;
+
+#ifdef CONFIG_FB_PDP_VID
+	if (stream->mem_pool == PDPFB_MEMPOOL_GFXMEM)
+		len = gfx_videomem_len;
+	else
+		len = vid_videomem_len;
+#else
+	len = gfx_videomem_len;
+#endif
+	if (!len)
+		return 0;
+
+	/* Try user memory pool */
+	if (priv->pools[PDPFB_MEMPOOL_USER].base) {
+		if (!pdpfb_pool_alloc(&priv->pools[PDPFB_MEMPOOL_USER],
+				      stream, len)) {
+			stream->mem_pool = PDPFB_MEMPOOL_USER;
+			return 0;
+		}
+#ifdef PDP_SHARED_BASE
+		goto err_nomem;
+#endif
+	}
+	/* Try default memory pool (depends on stream type) */
+	if (stream->mem_pool < PDPFB_MEMPOOL_NR_POOLS &&
+	    priv->pools[stream->mem_pool].base) {
+		if (pdpfb_pool_alloc(&priv->pools[stream->mem_pool], stream,
+				     priv->pools[stream->mem_pool].size))
+			goto err_nomem;
+		return 0;
+	}
+	/* Try combined memory pool */
+	if (priv->pools[PDPFB_MEMPOOL_MEM].base) {
+		if (pdpfb_pool_alloc(&priv->pools[PDPFB_MEMPOOL_MEM],
+				     stream, len))
+			goto err_nomem;
+		stream->mem_pool = PDPFB_MEMPOOL_MEM;
+		return 0;
+	}
+	/* Finally try kernel allocated memory */
+	videomem = kzalloc(len, GFP_KERNEL);
+	if (!videomem)
+		goto err_nomem;
+	stream->mem_pool = PDPFB_MEMPOOL_KERNEL;
+	stream->videomem = videomem;
+	stream->videomem_len = len;
+	info->screen_base = (char __force __iomem *)videomem;
+	info->fix.smem_start = __pa(videomem);
+	info->fix.smem_len = stream->videomem_len;
+	return 0;
+
+err_nomem:
+	dev_err(priv->device, "unable to allocate video memory\n");
+	return -ENOMEM;
+}
+
+static int pdpfb_probe_streams(struct pdpfb_priv *priv,
+			       struct platform_device *pdev)
+{
+	int i;
+	int error;
+
+	for (i = 0; i < PDPFB_STREAM_NR; ++i) {
+		struct pdpfb_stream *stream = priv->streams[i];
+		if (!stream)
+			continue;
+		stream->probed = 1;
+		error = stream->ops.probe(priv, pdev, &priv->mode);
+		if (error)
+			stream->probed = 0;
+		if (error == -ENOMEM)
+			dev_err(priv->device,
+				"unable to initialise fb %d, "
+				"not enough video memory\n",
+				i);
+	}
+
+	return 0;
+}
+
+static void pdpfb_remove_streams(struct pdpfb_priv *priv,
+				struct platform_device *pdev)
+{
+	int i;
+
+	for (i = 0; i < PDPFB_STREAM_NR; ++i) {
+		struct pdpfb_stream *stream = priv->streams[i];
+		if (stream && stream->probed)
+			stream->ops.remove(priv, pdev);
+	}
+}
+
+static int pdpfb_stop(struct pdpfb_priv *priv)
+{
+	pdpfb_configure(priv, PDPFB_POWER_FULLY_OFF);
+
+	return 0;
+}
+
+static void pdpfb_probe_caps(struct pdpfb_priv *priv)
+{
+	u32 palette_bpc = ((PDP_REV >= 0x010001) ? 8 : 6);
+
+	priv->palette_fmt.r.length
+		= priv->palette_fmt.g.length
+		= priv->palette_fmt.b.length
+		= palette_bpc;
+	priv->palette_fmt.b.offset = 0;
+	priv->palette_fmt.g.offset = palette_bpc;
+	priv->palette_fmt.r.offset = palette_bpc*2;
+}
+
+#ifdef PDP_SHARED_BASE
+static int pdpfb_init_mem(struct pdpfb_priv *priv,
+			  struct pdp_info *pdata)
+{
+	struct pdpfb_mem_pool *pool;
+	void *videomem;
+	unsigned long len;
+
+	if (unlikely(!pdata->hwops.set_shared_base)) {
+		dev_err(priv->device, "no set_shared_base in platform data\n");
+		return -EINVAL;
+	}
+	/* If fixed memory pool already exists, don't alloc from kernel mem. */
+	pool = &priv->pools[PDPFB_MEMPOOL_MEM];
+	if (pool->base) {
+		pdata->hwops.set_shared_base(pool->phys_base);
+		return 0;
+	}
+
+	len	= ((gfx_videomem_len + PAGE_SIZE - 1) & PAGE_MASK)
+#ifdef CONFIG_FB_PDP_VID
+		+ ((vid_videomem_len + PAGE_SIZE - 1) & PAGE_MASK)
+#endif
+		;
+	videomem = kzalloc(len, GFP_KERNEL);
+	if (!videomem) {
+		dev_err(priv->device, "unable to allocate video memory\n");
+		return -ENOMEM;
+	}
+	pool->base = videomem;
+	pool->phys_base = __pa(videomem);
+	pool->size = len;
+	pool->flags = PDPFB_MEMPOOLF_KERNEL;
+	pdata->hwops.set_shared_base(pool->phys_base);
+
+	return 0;
+}
+#else
+static inline int pdpfb_init_mem(struct pdpfb_priv *priv,
+				 struct pdp_info *pdata)
+{
+	return 0;
+}
+#endif
+
+static void pdpfb_free_pools(struct pdpfb_priv *priv)
+{
+	int i;
+	for (i = 0; i < PDPFB_MEMPOOL_NR_POOLS; ++i) {
+		if (!priv->pools[i].base)
+			continue;
+		if (priv->pools[i].flags & PDPFB_MEMPOOLF_KERNEL)
+			kfree(priv->pools[i].base);
+		else
+			iounmap((void __force __iomem *)priv->pools[i].base);
+	}
+}
+
+static int pdpfb_init_mempool(struct pdpfb_priv *priv, int type,
+			      unsigned long phys, unsigned long len)
+{
+	struct pdpfb_mem_pool *pool = &priv->pools[type];
+	/* First come first served */
+	if (unlikely(pool->base))
+		return -EBUSY;
+	pool->base = (void __force *)ioremap_cached(phys, len);
+	if (unlikely(!pool->base)) {
+		dev_err(priv->device, "cannot ioremap %s @0x%08lx:0x%lx\n",
+			pdpfb_mem_pool_names[type], phys, len);
+		return -ENOMEM;
+	}
+	dev_info(priv->device, "%s @0x%08lx:0x%lx (ioremapped to 0x%p)\n",
+			pdpfb_mem_pool_names[type],
+			phys, len, pool->base);
+	priv->pools[type].phys_base = phys;
+	priv->pools[type].size = len;
+	priv->pools[type].flags = 0;
+	/* Clear the memory */
+	memset(priv->pools[type].base, 0, priv->pools[type].size);
+	return 0;
+}
+
+static int pdpfb_probe_params(struct platform_device *pdev,
+			      struct pdpfb_priv *priv)
+{
+	if (videomem_base > 0 && videomem_len > 0)
+		pdpfb_init_mempool(priv, PDPFB_MEMPOOL_MEM,
+				   videomem_base, videomem_len);
+#ifndef PDP_SHARED_BASE
+	if (gfx_videomem_base > 0 && gfx_videomem_len > 0)
+		pdpfb_init_mempool(priv, PDPFB_MEMPOOL_GFXMEM,
+				   gfx_videomem_base, gfx_videomem_len);
+#ifdef CONFIG_FB_PDP_VID
+	if (vid_videomem_base > 0 && vid_videomem_len > 0)
+		pdpfb_init_mempool(priv, PDPFB_MEMPOOL_VIDMEM,
+				   vid_videomem_base, vid_videomem_len);
+#endif
+#endif
+	return 0;
+}
+
+static int pdpfb_probe_state(struct pdpfb_priv *priv)
+{
+	u32 sync_ctrl;
+	unsigned long pix_freq;
+
+	/* try and determine the current state */
+	sync_ctrl = pdpfb_read(priv, PDP_SYNCCTRL);
+	if (!GET_FIELD(sync_ctrl, PDP_SYNCCTRL_SYNCACTIVE))
+		goto done;
+
+	/* sync is active, what about the pixel clock? */
+	pix_freq = clk_get_rate(priv->pixel_clk);
+	if (!pix_freq)
+		goto done;
+	dev_dbg(priv->device, "initial pixel frequency = %lu Hz\n", pix_freq);
+
+	/* pixel clock active, try and extract the sync timings */
+	if (pdpfb_extract_sync(priv))
+		goto done;
+	dev_dbg(priv->device, "initial mode %ux%u sync=(%u,%u) margins=(%u,%u),(%u,%u)\n",
+		priv->mode.xres, priv->mode.yres,
+		priv->mode.vsync_len, priv->mode.hsync_len,
+		priv->mode.upper_margin, priv->mode.left_margin,
+		priv->mode.lower_margin, priv->mode.right_margin);
+
+	priv->mode.pixclock = KHZ2PICOS(pix_freq/1000);
+	pdpfb_pixel_clk_enable(priv);
+	if (GET_FIELD(sync_ctrl, PDP_SYNCCTRL_POWERDN))
+		priv->power = PDPFB_POWER_SYNC_ENABLED;
+	else
+		priv->power = PDPFB_POWER_BACKLIT;
+
+done:
+	dev_dbg(priv->device, "initial power = %d\n", priv->power);
+
+	return 0;
+}
+
+static int pdpfb_probe_mems(struct platform_device *pdev,
+			    struct pdpfb_priv *priv)
+{
+	struct resource *res_mem;
+	int i;
+	int type;
+	unsigned long len;
+	for (i = 0; ; ++i) {
+		res_mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (!res_mem)
+			break;
+
+		type = res_mem->flags & IORESOURCE_BITS;
+		len = res_mem->end - res_mem->start;
+		if (type < PDPFB_MEMPOOL_NR_POOLS) {
+			/* Videomem pool */
+#ifdef PDP_SHARED_BASE
+			if (unlikely(type != PDPFB_MEMPOOL_MEM))
+				continue;
+#endif
+#ifndef CONFIG_FB_PDP_VID
+			if (unlikely(type == PDPFB_MEMPOOL_VIDMEM))
+				continue;
+#endif
+			pdpfb_init_mempool(priv, type,
+					   res_mem->start, len);
+		} else if (type == PDPFB_IORES_PDP) {
+			/* PDP register region */
+			if (priv->base) {
+				dev_warn(&pdev->dev,
+					"multiple pdp register ioresources\n");
+				continue;
+			}
+			priv->base = ioremap_nocache(res_mem->start, len);
+			if (!priv->base)
+				return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+static int pdpfb_probe(struct platform_device *pdev)
+{
+	struct pdpfb_priv *priv;
+	struct pdp_info *pdata;
+	struct resource *res_irq;
+	int error;
+
+	if (!pdev->dev.platform_data) {
+		dev_err(&pdev->dev, "no platform data defined\n");
+		error = -EINVAL;
+		goto err0;
+	}
+
+	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res_irq == NULL) {
+		dev_err(&pdev->dev, "cannot find IRQ resource\n");
+		error = -ENOENT;
+		goto err0;
+	}
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "cannot allocate device data\n");
+		error = -ENOMEM;
+		goto err0;
+	}
+	spin_lock_init(&priv->upd.lock);
+	spin_lock_init(&priv->irq_lock);
+	pdpfb_lut_init(priv);
+
+	platform_set_drvdata(pdev, priv);
+	pdata = pdev->dev.platform_data;
+	priv->mode = pdata->lcd_cfg;
+	priv->power = PDPFB_POWER_FULLY_OFF;
+	priv->final_blank = -1;
+
+	priv->device = &pdev->dev;
+
+	error = pdpfb_probe_params(pdev, priv);
+	if (error)
+		goto err1;
+
+	error = pdpfb_probe_mems(pdev, priv);
+	if (error)
+		goto err1;
+	if (!priv->base) {
+		dev_err(&pdev->dev, "no pdp register ioresource\n");
+		error = -ENOMEM;
+		goto err1;
+	}
+
+	priv->pdp_clk = clk_get(priv->device, "pdp");
+	if (IS_ERR(priv->pdp_clk)) {
+		dev_err(&pdev->dev, "could not get pdp clock resource\n");
+		error = PTR_ERR(priv->pdp_clk);
+		goto err1;
+	}
+	clk_prepare(priv->pdp_clk);
+
+	priv->pixel_clk = clk_get(priv->device, "pixel");
+	if (IS_ERR(priv->pixel_clk)) {
+		dev_err(&pdev->dev, "could not get pixel clock resource\n");
+		error = PTR_ERR(priv->pixel_clk);
+		goto err2;
+	}
+
+	pdpfb_pdp_clk_enable(priv);
+
+	pdpfb_probe_state(priv);
+
+	error = pdpfb_init_mem(priv, pdata);
+	if (error)
+		goto err3;
+
+	priv->irq = res_irq->start;
+	error = request_irq(priv->irq, pdpfb_interrupt, 0, "pdp", priv);
+	if (error) {
+		dev_err(&pdev->dev, "cannot register IRQ %d (%d)\n",
+				priv->irq,
+				error);
+		error = -EIO;
+		goto err3;
+	}
+
+	pdpfb_probe_caps(priv);
+
+	pdpfb_priv = priv;
+	/* don't bother counting vsync interrupts with Pdump to reduce noise */
+#ifndef CONFIG_FB_PDP_PDUMP
+	pdpfb_register_isr(pdpfb_vsync_isr, priv, PDPFB_IRQ_VEVENT0);
+#endif
+
+	priv->streams[0] = pdpfb_gfx_get_stream();
+	priv->streams[0]->mode_master = pdata->lcd_size_cfg.dynamic_mode;
+#ifdef CONFIG_FB_PDP_VID
+	priv->streams[1] = pdpfb_vid_get_stream();
+#endif
+
+	/*
+	 * We need to be able to work out the pixel clock that we can achieve
+	 * prior to registering the framebuffer, else fbcon can try and set the
+	 * mode and reset the frequency.
+	 */
+	pdpfb_update_refresh_rate(priv);
+
+	if (pdpfb_probe_streams(priv, pdev))
+		goto err4;
+
+	/*
+	 * If sync is already enabled, update each stream's margins since it
+	 * won't get done by pdpfb_configure
+	 */
+	if (priv->power <= PDPFB_POWER_SYNC_ENABLED)
+		pdpfb_update_all_margins(priv);
+
+	/* Turn everything on in the right sequence */
+	console_lock();
+	pdpfb_configure(priv, PDPFB_POWER_BACKLIT);
+	console_unlock();
+
+	_pdpfb_set_irqs(priv);
+
+	pdpfb_ident(priv);
+
+	return 0;
+err4:
+#ifndef CONFIG_FB_PDP_PDUMP
+	pdpfb_unregister_isr(pdpfb_vsync_isr, priv, PDPFB_IRQ_VEVENT0);
+#endif
+	pdpfb_priv = NULL;
+	free_irq(priv->irq, priv);
+err3:
+	pdpfb_pixel_clk_disable(priv);
+	pdpfb_pdp_clk_disable(priv);
+	clk_put(priv->pixel_clk);
+err2:
+	clk_unprepare(priv->pdp_clk);
+	clk_put(priv->pdp_clk);
+err1:
+	if (priv->base)
+		iounmap(priv->base);
+	pdpfb_free_pools(priv);
+
+	kfree(priv);
+err0:
+	return error;
+}
+
+static int pdpfb_remove(struct platform_device *pdev)
+{
+	struct pdpfb_priv *priv = platform_get_drvdata(pdev);
+
+	console_lock();
+	pdpfb_stop(priv);
+	console_unlock();
+	pdpfb_pdp_clk_disable(priv);
+
+#ifndef CONFIG_FB_PDP_PDUMP
+	pdpfb_unregister_isr(pdpfb_vsync_isr, priv, PDPFB_IRQ_VEVENT0);
+#endif
+	pdpfb_priv = NULL;
+	pdpfb_remove_streams(priv, pdev);
+	free_irq(priv->irq, priv);
+	clk_put(priv->pixel_clk);
+	clk_unprepare(priv->pdp_clk);
+	clk_put(priv->pdp_clk);
+	if (priv->base)
+		iounmap(priv->base);
+	pdpfb_free_pools(priv);
+
+	kfree(priv);
+	return 0;
+}
+
+static void pdpfb_shutdown(struct platform_device *pdev)
+{
+	struct pdpfb_priv *priv = platform_get_drvdata(pdev);
+
+	console_lock();
+	pdpfb_stop(priv);
+	console_unlock();
+}
+
+#ifdef CONFIG_PM
+static int pdpfb_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct pdpfb_priv *priv = platform_get_drvdata(pdev);
+	int i;
+
+	console_lock();
+	for (i = 0; i < PDPFB_STREAM_NR; ++i) {
+		struct pdpfb_stream *stream = priv->streams[i];
+		if (!stream || !stream->probed)
+			continue;
+		fb_set_suspend(&stream->info, 1);
+	}
+	pdpfb_stop(priv);
+	pdpfb_pdp_clk_disable(priv);
+	console_unlock();
+
+	return 0;
+}
+
+static int pdpfb_resume(struct platform_device *pdev)
+{
+	struct pdpfb_priv *priv = platform_get_drvdata(pdev);
+#ifdef PDP_SHARED_BASE
+	struct pdp_info *pdata = priv->device->platform_data;
+#endif
+	int i;
+
+	console_lock();
+	pdpfb_pdp_clk_enable(priv);
+#ifdef PDP_SHARED_BASE
+	/* restore shared base pointer */
+	if (pdata->hwops.set_shared_base) {
+		struct pdpfb_mem_pool *pool;
+		if (priv->pools[PDPFB_MEMPOOL_USER].base)
+			pool = &priv->pools[PDPFB_MEMPOOL_USER];
+		else
+			pool = &priv->pools[PDPFB_MEMPOOL_MEM];
+		pdata->hwops.set_shared_base(pool->phys_base);
+	}
+#endif
+	pdpfb_configure(priv, PDPFB_POWER_BACKLIT);
+	for (i = 0; i < PDPFB_STREAM_NR; ++i) {
+		struct pdpfb_stream *stream = priv->streams[i];
+		if (!stream || !stream->probed)
+			continue;
+		fb_set_suspend(&stream->info, 0);
+	}
+	console_unlock();
+
+	return 0;
+}
+#else
+#define pdpfb_suspend NULL
+#define pdpfb_resume NULL
+#endif	/* CONFIG_PM */
+
+static struct platform_driver pdpfb_driver = {
+	.driver		= {
+		.name		= "pdpfb",
+		.owner		= THIS_MODULE,
+	},
+	.probe		= pdpfb_probe,
+	.remove		= pdpfb_remove,
+	.shutdown	= pdpfb_shutdown,
+	.suspend	= pdpfb_suspend,
+	.resume		= pdpfb_resume,
+};
+
+static int pdpfb_init(void)
+{
+	return platform_driver_register(&pdpfb_driver);
+}
+
+static void pdpfb_exit(void)
+{
+	platform_driver_unregister(&pdpfb_driver);
+}
+
+module_init(pdpfb_init);
+module_exit(pdpfb_exit);
+
+MODULE_DESCRIPTION("PDP Framebuffer driver");
+MODULE_AUTHOR("Will Newton <will.newton@imgtec.com>");
+MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/pdpfb.h b/drivers/video/pdpfb.h
new file mode 100644
index 0000000..998379c
--- /dev/null
+++ b/drivers/video/pdpfb.h
@@ -0,0 +1,116 @@
+/*
+ * PDP Framebuffer
+ *
+ * Copyright (c) 2008 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _PDPFB_H
+#define _PDPFB_H
+
+#include <linux/fb.h>
+#include <video/pdpfb.h>
+
+/* display colour palettes on mode changes */
+/*#define PDP_REMODE_COLOUR_PALETTES*/
+
+#define PDP_PALETTE_NR 256
+
+/* Stream interface */
+
+struct platform_device;
+struct pdp_info;
+
+struct pdpfb_priv;
+struct pdpfb_stream;
+
+struct pdpfb_stream_ops {
+	int (*probe)(struct pdpfb_priv *priv, struct platform_device *pdev,
+		     const struct fb_videomode *mode);
+	int (*remove)(struct pdpfb_priv *priv, struct platform_device *pdev);
+	int (*check_geom)(struct pdpfb_priv *priv, struct pdpfb_geom *geom);
+	int (*set_geom)(struct pdpfb_priv *priv);
+	int (*configure)(struct pdpfb_priv *priv);
+	int (*configure_addr)(struct pdpfb_priv *priv);
+};
+
+struct pdpfb_stream_regs {
+	u16 surf;
+	u16 blend;
+	u16 blend2;
+	u16 ctrl;
+	u16 posn;
+	u16 addr[3];
+	u16 gamma;
+	u16 gamma_stride;
+};
+
+struct pdpfb_stream_caps {
+	unsigned int gamma;	/* number of gamma entries */
+};
+
+struct pdpfb_stream {
+	struct fb_info info;
+	int probed;
+
+	unsigned char mem_pool;
+	void *videomem;
+#ifdef PDP_SHARED_BASE
+	unsigned long videomem_offset;
+#endif
+	unsigned long videomem_len;
+
+	int enable;
+	int ckey_en;
+	int ckey_src;	/* see PDP_STRXCTRL_CKEYSRC_* */
+	struct pdpfb_ckey ckey;
+	u32 blend_mode;
+	u8 global_alpha;
+	struct pdpfb_geom geom;
+	int mode_master; /* framebuffer controls screen mode */
+
+	struct pdpfb_stream_ops ops;
+	struct pdpfb_stream_regs regs;
+	struct pdpfb_stream_caps caps;
+};
+
+struct pdp_info *pdpfb_get_platform_data(struct pdpfb_priv *priv);
+u32 *pdpfb_get_pseudo_palette(struct pdpfb_priv *priv);
+void pdpfb_enable_palette(struct pdpfb_priv *priv);
+void pdpfb_disable_palette(struct pdpfb_priv *priv);
+
+void pdpfb_str_videomem_free(struct pdpfb_stream *stream);
+int pdpfb_str_videomem_alloc(struct pdpfb_priv *priv,
+				struct pdpfb_stream *stream);
+
+void pdpfb_update_margins(struct pdpfb_priv *priv,
+			struct pdpfb_stream *stream);
+int pdpfb_str_ioctl(struct pdpfb_priv *priv, struct pdpfb_stream *stream,
+			unsigned int cmd, unsigned long arg);
+
+int pdpfb_setcolreg(u_int regno,
+		u_int red, u_int green, u_int blue,
+		u_int trans, struct fb_info *info);
+int pdpfb_setcmap(struct fb_cmap *cmap, struct fb_info *info);
+int pdpfb_blank(int blank, struct fb_info *info);
+
+unsigned long pdpfb_get_line_length(int xres_virtual, int bpp);
+int pdpfb_configure_sync(struct pdpfb_priv *priv);
+void pdpfb_set_mode(struct pdpfb_priv *priv, struct fb_var_screeninfo *var);
+int pdpfb_update_pixclock(struct pdpfb_priv *priv, unsigned int pixclock);
+
+/* Register access */
+
+void pdpfb_clock_write(struct pdpfb_priv *priv,
+		      unsigned int reg_offs, unsigned int data);
+unsigned int pdpfb_clock_read(struct pdpfb_priv *priv,
+			     unsigned int reg_offs);
+void pdpfb_write(struct pdpfb_priv *priv,
+		      unsigned int reg_offs, unsigned int data);
+unsigned int pdpfb_read(struct pdpfb_priv *priv,
+			     unsigned int reg_offs);
+
+#endif
diff --git a/drivers/video/pdpfb_gfx.c b/drivers/video/pdpfb_gfx.c
new file mode 100644
index 0000000..109e761
--- /dev/null
+++ b/drivers/video/pdpfb_gfx.c
@@ -0,0 +1,594 @@
+/*
+ * PDP Desktop Graphics Framebuffer
+ *
+ * Copyright (c) 2008 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/fb.h>
+#include <linux/platform_device.h>
+
+#include <asm/soc-chorus2/pdp.h>
+#include "pdpfb.h"
+#include "pdpfb_regs.h"
+#include "pdpfb_gfx.h"
+
+#define THIS_STREAM (&pdpfb_gfx_stream.stream)
+#define THIS_PRIV (&pdpfb_gfx_stream)
+
+static struct {
+	/* information for setting up pdp */
+	u32 id;
+	int uselut;
+	/* information for matching */
+	int bpp, grayscale;
+	struct fb_bitfield a, r, g, b;
+} pixfmts[] = {
+	{
+		.id = PDP_STR1SURF_PIXFMT_RGB8,
+		.uselut = 1,
+		.bpp = 8,
+		.r = { .offset = 0,  .length = 8 },
+		.g = { .offset = 0,  .length = 8 },
+		.b = { .offset = 0,  .length = 8 },
+	},
+	{
+		.id = PDP_STR1SURF_PIXFMT_RGB8,
+		.bpp = 8,
+		.grayscale = 1,
+		.r = { .offset = 0,  .length = 8 },
+		.g = { .offset = 0,  .length = 8 },
+		.b = { .offset = 0,  .length = 8 },
+	},
+	{
+		.id = PDP_STR1SURF_PIXFMT_RGB565,
+		.bpp = 16,
+		.r = { .offset = 11, .length = 5 },
+		.g = { .offset = 5,  .length = 6 },
+		.b = { .offset = 0,  .length = 5 },
+	},
+	{
+		.id = PDP_STR1SURF_PIXFMT_ARGB4444,
+		.bpp = 16,
+		.a = { .offset = 12, .length = 4 },
+		.r = { .offset = 8,  .length = 4 },
+		.g = { .offset = 4,  .length = 4 },
+		.b = { .offset = 0,  .length = 4 },
+	},
+	{
+		.id = PDP_STR1SURF_PIXFMT_ARGB1555,
+		.bpp = 16,
+		.a = { .offset = 15, .length = 1 },
+		.r = { .offset = 10, .length = 5 },
+		.g = { .offset = 5,  .length = 5 },
+		.b = { .offset = 0,  .length = 5 },
+	},
+#if PDP_REV >= 0x010001
+	{
+		.id = PDP_STR1SURF_PIXFMT_RGB888,
+		.bpp = 24,
+		.r = { .offset = 16, .length = 8 },
+		.g = { .offset = 8,  .length = 8 },
+		.b = { .offset = 0,  .length = 8 },
+	},
+	{
+		.id = PDP_STR1SURF_PIXFMT_ARGB8888,
+		.bpp = 32,
+		.a = { .offset = 24, .length = 8 },
+		.r = { .offset = 16, .length = 8 },
+		.g = { .offset = 8,  .length = 8 },
+		.b = { .offset = 0,  .length = 8 },
+	},
+#endif
+};
+
+static struct pdpfb_gfx_stream_priv {
+	struct pdpfb_stream stream;
+	int pixfmt; /* index into pixfmts */
+	/* fields to update on vevent */
+#ifdef PDP_SHARED_BASE
+	unsigned int base_addr;
+#endif
+} pdpfb_gfx_stream;
+
+enum {
+	pixfmt_match_none = 0,
+	pixfmt_match_bpp,
+	pixfmt_match_alpha,
+	pixfmt_match_bitsorgray,
+	pixfmt_match_exact,
+};
+
+static int pdpfb_gfx_match_var(struct pdpfb_priv *priv,
+				struct fb_var_screeninfo *var)
+{
+	int i;
+	int best_pixfmt = -1;
+	int best_rating = pixfmt_match_none;
+
+	if (var->bits_per_pixel <= 8)
+		var->bits_per_pixel = 8;
+#if PDP_REV < 0x010001
+	else
+		var->bits_per_pixel = 16;
+#else
+	else if (var->bits_per_pixel <= 16)
+		var->bits_per_pixel = 16;
+	else if (var->bits_per_pixel <= 24)
+		var->bits_per_pixel = 24;
+	else
+		var->bits_per_pixel = 32;
+#endif
+
+#define PIXFMT_MATCH(rating, cond) \
+		if (!(cond)) \
+			continue; \
+		if (best_rating < (rating)) { \
+			best_rating = (rating); \
+			best_pixfmt = i; \
+			if ((rating) == pixfmt_match_exact) \
+				break; \
+		}
+
+	for (i = 0; i < ARRAY_SIZE(pixfmts); ++i) {
+		int match_bits, match_gray;
+
+		PIXFMT_MATCH(pixfmt_match_bpp,
+			var->bits_per_pixel == pixfmts[i].bpp);
+		PIXFMT_MATCH(pixfmt_match_alpha,
+			(var->transp.length != 0)
+			== (pixfmts[i].a.length != 0));
+
+		match_bits = (var->red.offset == pixfmts[i].r.offset &&
+			var->red.length == pixfmts[i].r.length &&
+			var->green.offset == pixfmts[i].g.offset &&
+			var->green.length == pixfmts[i].g.length &&
+			var->blue.offset == pixfmts[i].b.offset &&
+			var->blue.length == pixfmts[i].b.length &&
+			var->transp.offset == pixfmts[i].a.offset &&
+			var->transp.length == pixfmts[i].a.length);
+		match_gray = (var->grayscale == pixfmts[i].grayscale);
+
+		PIXFMT_MATCH(pixfmt_match_bitsorgray, match_bits || match_gray);
+		PIXFMT_MATCH(pixfmt_match_exact, match_bits && match_gray);
+	}
+
+#undef PIXFMT_MATCH
+
+	var->bits_per_pixel = pixfmts[best_pixfmt].bpp;
+	var->red = pixfmts[best_pixfmt].r;
+	var->green = pixfmts[best_pixfmt].g;
+	var->blue = pixfmts[best_pixfmt].b;
+	var->transp = pixfmts[best_pixfmt].a;
+	var->grayscale = pixfmts[best_pixfmt].grayscale;
+	return best_pixfmt;
+}
+
+static int pdpfb_gfx_check_geom(struct pdpfb_priv *priv,
+				struct pdpfb_geom *geom)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+
+	/* only doubling and halving */
+	if (geom->w) {
+		if (geom->w*4 <= info->var.xres*3)
+			geom->w = info->var.xres / 2;
+		else if (geom->w*2 >= info->var.xres*3)
+			geom->w = info->var.xres * 2;
+		else
+			geom->w = 0;
+	}
+	if (geom->h) {
+		if (geom->h*4 <= info->var.yres*3)
+			geom->h = info->var.yres / 2;
+		else if (geom->h*2 >= info->var.yres*3)
+			geom->h = info->var.yres * 2;
+		else
+			geom->h = 0;
+	}
+	return 0;
+}
+
+static int pdpfb_gfx_check_var(struct fb_var_screeninfo *var,
+				struct fb_info *info)
+{
+	struct pdpfb_priv *priv = dev_get_drvdata(info->device);
+	const struct fb_videomode *mode;
+	u_long line_length;
+
+	if (!var->xres)
+		var->xres = 1;
+	if (!var->yres)
+		var->yres = 1;
+
+	/* if not fixed to native res, don't mode match unless mode master */
+#ifndef CONFIG_FB_PDP_GFX_FIX_NATIVE_RES
+	if (THIS_STREAM->mode_master)
+#endif
+	{
+		mode = fb_find_best_mode(var, &info->modelist);
+		if (mode) {
+			unsigned int xres_virtual = var->xres_virtual;
+			unsigned int yres_virtual = var->yres_virtual;
+			unsigned int xoffset = var->xoffset;
+			unsigned int yoffset = var->yoffset;
+			unsigned int pixclock = var->pixclock;
+			fb_videomode_to_var(var, mode);
+			var->xres_virtual = xres_virtual;
+			var->yres_virtual = yres_virtual;
+			var->xoffset = xoffset;
+			var->yoffset = yoffset;
+			/* on a fixed screen, stick to the requested pixclock */
+			if (!THIS_STREAM->mode_master)
+				var->pixclock = pixclock;
+		} else {
+			return -EINVAL;
+		}
+	}
+
+	if (var->xres > var->xres_virtual)
+		var->xres_virtual = var->xres;
+	if (var->yres > var->yres_virtual)
+		var->yres_virtual = var->yres;
+
+	if (var->xres_virtual < var->xoffset + var->xres)
+		var->xres_virtual = var->xoffset + var->xres;
+	if (var->yres_virtual < var->yoffset + var->yres)
+		var->yres_virtual = var->yoffset + var->yres;
+
+	pdpfb_gfx_match_var(priv, var);
+
+	/* Memory limit */
+	line_length = pdpfb_get_line_length(var->xres_virtual,
+						var->bits_per_pixel);
+	if (line_length * var->yres_virtual > THIS_STREAM->videomem_len)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int pdpfb_gfx_ioctl(struct fb_info *info, unsigned int cmd,
+				unsigned long arg)
+{
+	struct pdpfb_priv *priv = dev_get_drvdata(info->device);
+	return pdpfb_str_ioctl(priv, THIS_STREAM, cmd, arg);
+}
+
+#ifdef PDP_REMODE_COLOUR_PALETTES
+static void pdpfb_gfx_draw_colour_palette(struct pdpfb_priv *priv)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+
+	if (info->var.bits_per_pixel == 8) {
+		u8 *buf = (u8 *)THIS_STREAM->videomem;
+		int i, j;
+		for (j = 0; j < info->var.yres_virtual; ++j) {
+			for (i = 0; i < info->var.xres_virtual; ++i) {
+				int x, y;
+				x = 16*i/info->var.xres_virtual;
+				y = 16*j/info->var.yres_virtual;
+				buf[j*info->fix.line_length/sizeof(buf[0]) + i]
+					= x|(y<<4);
+			}
+		}
+	} else if (info->var.bits_per_pixel == 16) {
+		u16 *buf = (u16 *)THIS_STREAM->videomem;
+		int i, j;
+		for (j = 0; j < info->var.yres_virtual; ++j) {
+			for (i = 0; i < info->var.xres_virtual; ++i) {
+				int x, y;
+				x = 256*i/info->var.xres_virtual;
+				y = 256*j/info->var.yres_virtual;
+				buf[j*info->fix.line_length/sizeof(buf[0]) + i]
+					= (x<<8) | y;
+			}
+		}
+	}
+}
+#endif
+
+static int pdpfb_gfx_change_mode(struct pdpfb_priv *priv)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+	u32 str1posn;
+
+	info->fix.line_length = pdpfb_get_line_length(info->var.xres_virtual,
+						info->var.bits_per_pixel);
+	str1posn = pdpfb_read(priv, PDP_STR1POSN);
+	SET_FIELD(str1posn, PDP_STRXPOSN_SRCSTRIDE,
+			info->fix.line_length / 16 - 1);
+	pdpfb_write(priv, PDP_STR1POSN, str1posn);
+
+	if (info->var.bits_per_pixel == 8)
+		info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+	else
+		info->fix.visual = FB_VISUAL_TRUECOLOR;
+
+	return 0;
+}
+
+static int pdpfb_gfx_configure_scale(struct pdpfb_priv *priv)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+	struct pdpfb_geom *geom = &THIS_STREAM->geom;
+
+	u32 blend2;
+	u32 line_double = 0;
+	u32 line_halve = 0;
+	u32 pix_double = 0;
+	u32 pix_halve = 0;
+
+	if (geom->w) {
+		pix_double = (geom->w > info->var.xres);
+		pix_halve = (geom->w < info->var.xres);
+	}
+	if (geom->h) {
+		line_double = (geom->h > info->var.yres);
+		line_halve = (geom->h < info->var.yres);
+	}
+
+	blend2 = pdpfb_read(priv, THIS_STREAM->regs.blend2);
+	SET_FIELD(blend2, PDP_STRXBLEND2_LINEDOUBLE, line_double);
+	SET_FIELD(blend2, PDP_STRXBLEND2_LINEHALVE, line_halve);
+	SET_FIELD(blend2, PDP_STRXBLEND2_PIXDOUBLE, pix_double);
+	SET_FIELD(blend2, PDP_STRXBLEND2_PIXHALVE, pix_halve);
+	pdpfb_write(priv, THIS_STREAM->regs.blend2, blend2);
+
+	return 0;
+}
+
+#ifdef PDP_SHARED_BASE
+static void pdpfb_gfx_apply_addr(void *arg, u32 mask)
+{
+	struct pdpfb_priv *priv = (struct pdpfb_priv *)arg;
+	u32 str_ctrl;
+
+	pdpfb_unregister_isr(pdpfb_gfx_apply_addr, priv,
+			     PDPFB_IRQ_VEVENT0);
+
+	str_ctrl = pdpfb_read(priv, PDP_STR1CTRL);
+	SET_FIELD(str_ctrl, PDP_STRXCTRL_BASEADDR, THIS_PRIV->base_addr >> 4);
+	pdpfb_write(priv, PDP_STR1CTRL, str_ctrl);
+}
+#endif
+
+static int pdpfb_gfx_configure_addr(struct pdpfb_priv *priv)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+	u32 buf_offset, buf_start;
+
+	buf_offset = info->var.yoffset * info->fix.line_length
+		+ info->var.xoffset * info->var.bits_per_pixel / 8;
+	buf_start = buf_offset;
+#ifndef PDP_SHARED_BASE
+	buf_start += info->fix.smem_start;
+	pdpfb_write(priv, PDP_STR1ADDR, buf_start >> 4);
+#else
+	buf_start += THIS_STREAM->videomem_offset;
+	THIS_PRIV->base_addr = buf_start;
+	pdpfb_register_isr(pdpfb_gfx_apply_addr, priv,
+			   PDPFB_IRQ_VEVENT0);
+#endif
+
+	return 0;
+}
+
+static int pdpfb_gfx_configure(struct pdpfb_priv *priv)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+	u32 str1surf;
+	int pixfmt = THIS_PRIV->pixfmt;
+
+	str1surf = pdpfb_read(priv, PDP_STR1SURF);
+	/* enable/disable palette updates if uselut is changing */
+	if (pixfmts[pixfmt].uselut) {
+		if (!GET_FIELD(str1surf, PDP_STR1SURF_USELUT))
+			pdpfb_enable_palette(priv);
+	} else if (GET_FIELD(str1surf, PDP_STR1SURF_USELUT)) {
+		pdpfb_disable_palette(priv);
+	}
+	SET_FIELD(str1surf, PDP_STRXSURF_WIDTH, info->var.xres - 1);
+	SET_FIELD(str1surf, PDP_STRXSURF_HEIGHT, info->var.yres - 1);
+	SET_FIELD(str1surf, PDP_STR1SURF_USELUT, pixfmts[pixfmt].uselut);
+	SET_FIELD(str1surf, PDP_STR1SURF_PIXFMT, pixfmts[pixfmt].id);
+	pdpfb_write(priv, PDP_STR1SURF, str1surf);
+
+	pdpfb_gfx_change_mode(priv);
+	pdpfb_gfx_configure_scale(priv);
+
+#ifdef PDP_REMODE_COLOUR_PALETTES
+	pdpfb_gfx_draw_colour_palette(priv);
+#endif
+
+	return 0;
+}
+
+static int pdpfb_gfx_pan_display(struct fb_var_screeninfo *var,
+				struct fb_info *info)
+{
+	struct pdpfb_priv *priv;
+
+	if (var->xoffset + info->var.xres > info->var.xres_virtual ||
+	    var->yoffset + info->var.yres > info->var.yres_virtual)
+		return -EINVAL;
+
+	info->var.xoffset = var->xoffset;
+	info->var.yoffset = var->yoffset;
+
+	priv = dev_get_drvdata(info->device);
+	pdpfb_gfx_configure_addr(priv);
+
+	return 0;
+}
+
+static int pdpfb_gfx_set_par(struct fb_info *info)
+{
+	struct pdpfb_priv *priv = dev_get_drvdata(info->device);
+	int xres, yres, w, h;
+	u32 pix_clock;
+
+	THIS_PRIV->pixfmt = pdpfb_gfx_match_var(priv, &info->var);
+	if (THIS_STREAM->mode_master) {
+		pdpfb_set_mode(priv, &info->var);
+	} else {
+		pdpfb_gfx_configure_addr(priv);
+		pdpfb_gfx_configure(priv);
+
+		/*
+		 * Update pixel clock if it's changed, otherwise just update
+		 * margins.
+		 */
+		pix_clock = info->var.pixclock;
+		if (THIS_STREAM->geom.w || THIS_STREAM->geom.h) {
+			xres = info->var.xres;
+			yres = info->var.yres;
+			w = (THIS_STREAM->geom.w ? THIS_STREAM->geom.w : xres);
+			h = (THIS_STREAM->geom.h ? THIS_STREAM->geom.h : yres);
+			pix_clock = pix_clock * xres * yres / (w * h);
+		}
+		if (!pdpfb_update_pixclock(priv, pix_clock))
+			pdpfb_update_margins(priv, THIS_STREAM);
+	}
+
+	info->fix.xpanstep = 16 * 8 / info->var.bits_per_pixel;
+
+	return 0;
+}
+
+
+static struct fb_fix_screeninfo pdpfb_gfx_fix  = {
+	.id =		"pdp",
+	.type =		FB_TYPE_PACKED_PIXELS,
+	.xpanstep =	8,
+	.ypanstep =	1,
+	.accel =	FB_ACCEL_IMG_PDP_1,
+};
+
+static struct fb_ops pdpfb_gfx_ops = {
+	.fb_setcolreg	= pdpfb_setcolreg,
+	.fb_setcmap	= pdpfb_setcmap,
+	.fb_blank	= pdpfb_blank,
+	.fb_pan_display	= pdpfb_gfx_pan_display,
+	.fb_check_var	= pdpfb_gfx_check_var,
+	.fb_set_par	= pdpfb_gfx_set_par,
+	.fb_ioctl	= pdpfb_gfx_ioctl,
+	.fb_fillrect	= cfb_fillrect,
+	.fb_copyarea	= cfb_copyarea,
+	.fb_imageblit	= cfb_imageblit,
+};
+
+static int pdpfb_gfx_probe(struct pdpfb_priv *priv,
+				struct platform_device *pdev,
+				const struct fb_videomode *mode)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+	struct pdp_info *pdata = pdpfb_get_platform_data(priv);
+	int error;
+	u_long line_length;
+
+	info->device = &pdev->dev;
+	info->fbops = &pdpfb_gfx_ops;
+	info->var.xres = info->var.xres_virtual = mode->xres;
+	info->var.yres = info->var.yres_virtual = mode->yres;
+	info->var.width = pdata->lcd_size_cfg.width;
+	info->var.height = pdata->lcd_size_cfg.height;
+	info->var.activate = FB_ACTIVATE_NOW;
+	info->var.bits_per_pixel = pdata->bpp;
+	error = THIS_PRIV->pixfmt = pdpfb_gfx_match_var(priv, &info->var);
+	if (error < 0)
+		goto err0;
+
+	info->var.pixclock = mode->pixclock;
+	info->var.hsync_len = mode->hsync_len;
+	info->var.left_margin = mode->left_margin;
+	info->var.right_margin = mode->right_margin;
+	info->var.vsync_len = mode->vsync_len;
+	info->var.upper_margin = mode->upper_margin;
+	info->var.lower_margin = mode->lower_margin;
+
+	info->fix = pdpfb_gfx_fix;
+	info->fix.xpanstep = 16 * 8 / info->var.bits_per_pixel;
+
+	error = pdpfb_str_videomem_alloc(priv, THIS_STREAM);
+	if (error)
+		goto err0;
+
+	line_length = pdpfb_get_line_length(info->var.xres_virtual,
+					info->var.bits_per_pixel);
+	if (line_length * info->var.yres_virtual > THIS_STREAM->videomem_len) {
+		error = -ENOMEM;
+		goto err1;
+	}
+
+	info->pseudo_palette = pdpfb_get_pseudo_palette(priv);
+	info->flags = FBINFO_FLAG_DEFAULT
+			| FBINFO_HWACCEL_XPAN
+			| FBINFO_HWACCEL_YPAN;
+
+#ifdef PDP_GAMMA
+	THIS_STREAM->caps.gamma = PDP_GAMMA;
+#endif
+
+	error = register_framebuffer(info);
+	if (error < 0)
+		goto err1;
+
+	dev_info(&pdev->dev, "registered graphics framebuffer (len=0x%lx)\n",
+		 THIS_STREAM->videomem_len);
+
+	return 0;
+err1:
+	pdpfb_str_videomem_free(THIS_STREAM);
+err0:
+	return error;
+}
+
+static int pdpfb_gfx_remove(struct pdpfb_priv *priv,
+				struct platform_device *pdev)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+
+	unregister_framebuffer(info);
+
+	pdpfb_str_videomem_free(THIS_STREAM);
+	return 0;
+}
+
+static struct pdpfb_gfx_stream_priv pdpfb_gfx_stream = {
+	.stream = {
+		.mem_pool = PDPFB_MEMPOOL_GFXMEM,
+		.videomem_len = 0,
+		.enable = 1,
+		.ckey = {
+			.ckey = 0x000000,
+			.mask = 0xFFFFFF,
+		},
+		.global_alpha = 0xFF,
+		.ops = {
+			.probe = pdpfb_gfx_probe,
+			.remove = pdpfb_gfx_remove,
+			.check_geom = pdpfb_gfx_check_geom,
+			.configure = pdpfb_gfx_configure,
+			.configure_addr = pdpfb_gfx_configure_addr,
+		},
+		.regs = {
+			.surf = PDP_STR1SURF,
+			.blend = PDP_STR1BLEND,
+			.blend2 = PDP_STR1BLEND2,
+			.ctrl = PDP_STR1CTRL,
+			.posn = PDP_STR1POSN,
+#if PDP_REV >= 0x010001
+			.gamma = PDP_RGBGAMMA0,
+			.gamma_stride = PDP_RGBGAMMA_STRIDE,
+#endif
+		},
+	},
+};
+
+struct pdpfb_stream *pdpfb_gfx_get_stream(void)
+{
+	return THIS_STREAM;
+}
diff --git a/drivers/video/pdpfb_gfx.h b/drivers/video/pdpfb_gfx.h
new file mode 100644
index 0000000..fdc9cf1
--- /dev/null
+++ b/drivers/video/pdpfb_gfx.h
@@ -0,0 +1,18 @@
+/*
+ * PDP Desktop Graphics Framebuffer
+ *
+ * Copyright (c) 2008 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _PDPFB_GFX_H
+#define _PDPFB_GFX_H
+
+#include "pdpfb.h"
+
+struct pdpfb_stream *pdpfb_gfx_get_stream(void);
+
+#endif
diff --git a/drivers/video/pdpfb_regs.h b/drivers/video/pdpfb_regs.h
new file mode 100644
index 0000000..45a4391
--- /dev/null
+++ b/drivers/video/pdpfb_regs.h
@@ -0,0 +1,464 @@
+/*
+ * PDP Framebuffer
+ *
+ * Copyright (c) 2008-2012 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _PDPFB_REGS_H
+#define _PDPFB_REGS_H
+
+#include <video/pdpfb.h>
+
+/*
+ * Bitfield operations
+ * For each argument field, the following preprocessor macros must exist
+ * field##_BITS - the number of bits in the bit field
+ * field##_OFFSET - offset from the first bit
+ */
+
+#define GET_MASK(bits) \
+	((0x1<<(bits))-1)
+
+#define PLACE_FIELD(field, val) \
+	(((u32)(val) & GET_MASK(field##_BITS)) << field##_OFFSET)
+
+#define ADJ_FIELD(x, field, val) \
+	(((x) & ~(GET_MASK(field##_BITS) << field##_OFFSET)) \
+	| PLACE_FIELD(field, val))
+
+#define SET_FIELD(x, field, val) \
+	(x) = ADJ_FIELD(x, field, val)
+
+#define GET_FIELD(x, field) \
+	(((x) >> (field##_OFFSET)) & GET_MASK(field##_BITS))
+
+/* Keeps most significant bits */
+#define MOVE_FIELD(x, o1, l1, o2, l2) \
+	(((x) >> ((o1) + (l1) - (l2))) << (o2))
+
+/* PDP 1.0.0 */
+#define PDP_STR1SURF		0x0000
+#define PDP_STR2SURF		0x0004
+#define PDP_STR1BLEND		0x0020
+#define PDP_STR2BLEND		0x0024
+#define PDP_STR1BLEND2		0x0040
+#define PDP_STR2BLEND2		0x0044
+#define PDP_STR1CTRL		0x0060
+#define PDP_STR2CTRL		0x0064
+#define PDP_STR1ADDR		0x0068
+#define PDP_STR2ADDR		0x006C
+#define PDP_STR2UADDR		0x0084
+#define PDP_STR2VADDR		0x00A4
+#define PDP_STR1POSN		0x00C0
+#define PDP_STR2POSN		0x00C4
+#define PDP_TILEPARAM1		0x00D0
+#define PDP_TILEPARAM2		0x00D4
+#define PDP_TILEPARAM3		0x00D8
+#define PDP_TILEPARAM4		0x00DC
+#define PDP_TILEPARAM5		0x00E0
+#define PDP_PALETTE1		0x014C
+#define PDP_PALETTE2		0x0150
+#define PDP_SYNCCTRL		0x0154
+#define PDP_HSYNC1		0x0158
+#define PDP_HSYNC2		0x015C
+#define PDP_HSYNC3		0x0160
+#define PDP_VSYNC1		0x0164
+#define PDP_VSYNC2		0x0168
+#define PDP_VSYNC3		0x016C
+#define PDP_BORDCOL		0x0170
+#define PDP_BGNDCOL		0x0174
+#define PDP_INTSTAT		0x0178
+#define PDP_INTENAB		0x017C
+#define PDP_INTCTRL		0x0180
+#define PDP_SIGNAT		0x0184
+#define PDP_MEMCTRL		0x0188
+#define PDP_SKIPCTRL		0x018C
+#define PDP_SCALECTRL		0x0190
+#define PDP_MEMSTRCTRL1		0x0194
+#define PDP_MEMSTRCTRL2		0x0198
+#define PDP_PGSZMASK		0x019c
+#define PDP_HSINIT		0x01B0
+#define PDP_HSCOEFF0		0x01B4
+#define PDP_HSCOEFF1		0x01B8
+#define PDP_HSCOEFF2		0x01BC
+#define PDP_HSCOEFF3		0x01C0
+#define PDP_HSCOEFF4		0x01C4
+#define PDP_HSCOEFF5		0x01C8
+#define PDP_HSCOEFF6		0x01CC
+#define PDP_HSCOEFF7		0x01D0
+#define PDP_HSCOEFF8		0x01D4
+#define PDP_SCALESIZE		0x01D8
+#define PDP_REGLD_SPC		0x02FC
+#define PDP_REGLD_STAT		0x0300
+#define PDP_REGLD_CTRL		0x0304
+#define PDP_LINESTAT		0x0308
+#define PDP_UPDCTRL		0x030C
+#define PDP_VEVENT		0x0310
+#define PDP_HDECTRL		0x0314
+#define PDP_VDECTRL		0x0318
+#define PDP_OPMASK		0x031C
+#define PDP_DBGCTRL		0x0320
+#define PDP_DBGDATA		0x0324
+#define PDP_DBGSIDE		0x0328
+#if PDP_REV < 0x010001
+#define PDP_INTCLR		0x032C
+#else
+#define PDP_INTCLR		0x01AC
+#endif
+#define PDP_CSCCOEFF0		0x0330
+#define PDP_CSCCOEFF1		0x0334
+#define PDP_CSCCOEFF2		0x0338
+#define PDP_CSCCOEFF3		0x033C
+#define PDP_CSCCOEFF4		0x0340
+#define PDP_CORE_ID		0x04E0
+#define PDP_CORE_REV		0x04F0
+
+/* PDP 1.0.1 */
+#if PDP_REV >= 0x010001
+#define PDP_CURS1ADDR		0x0100
+#define PDP_CURS1SIZE		0x0110
+#define PDP_CURS1POSN		0x0120
+#define PDP_CURS1BLND		0x0130
+#define PDP_CURS1BLND2		0x0140
+#define PDP_VSCOEFF0		0x0198
+#define PDP_VSCOEFF1		0x019C
+#define PDP_VSCOEFF2		0x01A0
+#define PDP_VSCOEFF3		0x01A4
+#define PDP_VSCOEFF4		0x01A8
+#define PDP_VSINIT		0x0194
+#define PDP_YUVGAMMA0		0x0200
+#define PDP_YUVGAMMA_STRIDE	4
+#define PDP_RGBGAMMA0		0x0250
+#define PDP_RGBGAMMA_STRIDE	4
+#endif
+
+/* Pixel format field information */
+
+#define RGB565_RED_BITS		5
+#define RGB565_RED_OFFSET	11
+#define RGB565_GREEN_BITS	6
+#define RGB565_GREEN_OFFSET	5
+#define RGB565_BLUE_BITS	5
+#define RGB565_BLUE_OFFSET	0
+#define PLACE_RGB565(r, g, b) \
+	(PLACE_FIELD(RGB565_RED, r) \
+	|PLACE_FIELD(RGB565_GREEN, g) \
+	|PLACE_FIELD(RGB565_BLUE, b))
+
+/* Register field information */
+
+#define PDP_STR1SURF_USELUT_BITS	1	/* colour lookup table */
+#define PDP_STR1SURF_USELUT_OFFSET	31
+#define PDP_STR1SURF_PIXFMT_BITS	4	/* graphics pixel format */
+#define PDP_STR1SURF_PIXFMT_OFFSET	27
+/* PDP 1.0.0 */
+#define PDP_STR1SURF_PIXFMT_RGB8	0x0
+#define PDP_STR1SURF_PIXFMT_ARGB4444	0x4
+#define PDP_STR1SURF_PIXFMT_ARGB1555	0x5
+#if PDP_REV < 0x010001
+#define PDP_STR1SURF_PIXFMT_RGB565	0x6
+/* PDP 1.0.1 */
+#else
+#define PDP_STR1SURF_PIXFMT_RGB888	0x6
+#define PDP_STR1SURF_PIXFMT_RGB565	0x7
+#define PDP_STR1SURF_PIXFMT_ARGB8888	0x8
+#endif
+
+#define PDP_STR2SURF_PIXFMT_BITS	4	/* video pixel format */
+#define PDP_STR2SURF_PIXFMT_OFFSET	27
+#define PDP_STR2SURF_USECSC_BITS	1
+#define PDP_STR2SURF_USECSC_OFFSET	25
+#define PDP_STR2SURF_COSITED_BITS	1
+#define PDP_STR2SURF_COSITED_OFFSET	22
+/* PDP 1.0.0 */
+#define PDP_STR2SURF_PIXFMT_420_PL8		0x9
+#if PDP_REV < 0x010001
+#define PDP_STR2SURF_PIXFMT_422_UY0VY1_8888	0xA
+#define PDP_STR2SURF_PIXFMT_422_VY0UY1_8888	0xB
+#define PDP_STR2SURF_PIXFMT_422_Y0UY1V_8888	0xC
+#define PDP_STR2SURF_PIXFMT_422_Y0VY1U_8888	0xD
+#define PDP_STR2SURF_PIXFMT_420_T88CP		0xE
+#define PDP_STR2SURF_PIXFMT_422_T88CP		0xF
+/* PDP 1.0.1 */
+#else
+#define PDP_STR2SURF_PIXFMT_420_PL8IVU		0xA
+#define PDP_STR2SURF_PIXFMT_420_PL8IUV		0xB
+#define PDP_STR2SURF_PIXFMT_422_UY0VY1_8888	0xC
+#define PDP_STR2SURF_PIXFMT_422_VY0UY1_8888	0xD
+#define PDP_STR2SURF_PIXFMT_422_Y0UY1V_8888	0xE
+#define PDP_STR2SURF_PIXFMT_422_Y0VY1U_8888	0xF
+#endif
+
+#define PDP_STRXSURF_WIDTH_BITS		11	/* width - 1 */
+#define PDP_STRXSURF_WIDTH_OFFSET	11
+#define PDP_STRXSURF_HEIGHT_BITS	11	/* height - 1 */
+#define PDP_STRXSURF_HEIGHT_OFFSET	0
+
+#define PDP_STRXBLEND_GLOBALALPHA_BITS		8	/* global alpha */
+#define PDP_STRXBLEND_GLOBALALPHA_OFFSET	24
+#define PDP_STRXBLEND_COLKEY_BITS		24	/* colour key */
+#define PDP_STRXBLEND_COLKEY_OFFSET		0
+
+#define PDP_STRXBLEND2_PIXDOUBLE_BITS		1	/* pixel doubling */
+#define PDP_STRXBLEND2_PIXDOUBLE_OFFSET		31
+#define PDP_STRXBLEND2_PIXHALVE_BITS		1	/* pixel halving */
+#define PDP_STRXBLEND2_PIXHALVE_OFFSET		30
+#define PDP_STRXBLEND2_LINEDOUBLE_BITS		1	/* line doubling */
+#define PDP_STRXBLEND2_LINEDOUBLE_OFFSET	29
+#define PDP_STRXBLEND2_LINEHALVE_BITS		1	/* line halving */
+#define PDP_STRXBLEND2_LINEHALVE_OFFSET		28
+#define PDP_STRXBLEND2_COLKEYMASK_BITS		24	/* colour key mask */
+#define PDP_STRXBLEND2_COLKEYMASK_OFFSET	0
+
+#define PDP_STRXCTRL_STREAMEN_BITS		1	/* stream enable */
+#define PDP_STRXCTRL_STREAMEN_OFFSET		31
+#define PDP_STRXCTRL_CKEYEN_BITS		1	/* colour key enable */
+#define PDP_STRXCTRL_CKEYEN_OFFSET		30
+#define PDP_STRXCTRL_CKEYSRC_BITS		1	/* colour key source */
+#define PDP_STRXCTRL_CKEYSRC_OFFSET		29
+#define PDP_STRXCTRL_CKEYSRC_PREV		0x0
+#define PDP_STRXCTRL_CKEYSRC_CUR		0x1
+#define PDP_STRXCTRL_BLENDMODE_BITS		2	/* blend mode */
+#define PDP_STRXCTRL_BLENDMODE_OFFSET		27
+#define PDP_STRXCTRL_BLENDPOS_BITS		3	/* plane position */
+#define PDP_STRXCTRL_BLENDPOS_OFFSET		24
+#define PDP_STRXCTRL_BASEADDR_BITS		22	/* 25:4 of base addr */
+#define PDP_STRXCTRL_BASEADDR_OFFSET		0
+#define PDP_STR2CTRL_UVHALFSTR_BITS		1	/* UV half stride */
+#define PDP_STR2CTRL_UVHALFSTR_OFFSET		0
+
+#define PDP_STR2UADDR_UVHALFSTR_BITS		1	/* UV half stride */
+#define PDP_STR2UADDR_UVHALFSTR_OFFSET		31
+#define PDP_STR2UADDR_UBASEADDR_BITS		23	/* U plane base addr */
+#define PDP_STR2UADDR_UBASEADDR_OFFSET		0
+
+#define PDP_STR2VADDR_VBASEADDR_BITS		23	/* V plane base addr */
+#define PDP_STR2VADDR_VBASEADDR_OFFSET		0
+
+#define PDP_SYNCCTRL_SYNCACTIVE_BITS		1	/* starts sync generator */
+#define PDP_SYNCCTRL_SYNCACTIVE_OFFSET		31
+#define PDP_SYNCCTRL_DISPRST_BITS		1	/* software reset */
+#define PDP_SYNCCTRL_DISPRST_OFFSET		29
+#define PDP_SYNCCTRL_POWERDN_BITS		1	/* power down mode */
+#define PDP_SYNCCTRL_POWERDN_OFFSET		28
+#define PDP_SYNCCTRL_UPDSYNCCTRL_BITS		1	/* display update sync control */
+#define PDP_SYNCCTRL_UPDSYNCCTRL_OFFSET		26
+#define PDP_SYNCCTRL_UPDINTCTRL_BITS		1	/* display update interrupt control */
+#define PDP_SYNCCTRL_UPDINTCTRL_OFFSET		25
+#define PDP_SYNCCTRL_UPDCTRL_BITS		1	/* display update control */
+#define PDP_SYNCCTRL_UPDCTRL_OFFSET		24
+#define PDP_SYNCCTRL_UPDWAIT_BITS		5	/* fields to wait before updating */
+#define PDP_SYNCCTRL_UPDWAIT_OFFSET		16
+#define PDP_SYNCCTRL_CSYNCEN_BITS		1	/* composite output enable */
+#define PDP_SYNCCTRL_CSYNCEN_OFFSET		12
+#define PDP_SYNCCTRL_CLKPOL_BITS		1	/* pixel clock polarity */
+#define PDP_SYNCCTRL_CLKPOL_OFFSET		11
+#define PDP_SYNCCTRL_VSSLAVE_BITS		1	/* vsync master/slave */
+#define PDP_SYNCCTRL_VSSLAVE_OFFSET		7
+#define PDP_SYNCCTRL_HSSLAVE_BITS		1	/* hsync master/slave */
+#define PDP_SYNCCTRL_HSSLAVE_OFFSET		6
+#define PDP_SYNCCTRL_BLNKPOL_BITS		1	/* blank signal polarity */
+#define PDP_SYNCCTRL_BLNKPOL_OFFSET		5
+#define PDP_SYNCCTRL_BLNKDIS_BITS		1	/* blank signal disable */
+#define PDP_SYNCCTRL_BLNKDIS_OFFSET		4
+#define PDP_SYNCCTRL_VSPOL_BITS			1	/* vertical sync polarity */
+#define PDP_SYNCCTRL_VSPOL_OFFSET		3
+#define PDP_SYNCCTRL_VSDIS_BITS			1	/* vertical sync disable */
+#define PDP_SYNCCTRL_VSDIS_OFFSET		2
+#define PDP_SYNCCTRL_HSPOL_BITS			1	/* horizontal sync polarity */
+#define PDP_SYNCCTRL_HSPOL_OFFSET		1
+#define PDP_SYNCCTRL_HSDIS_BITS			1	/* horizontal sync disable */
+#define PDP_SYNCCTRL_HSDIS_OFFSET		0
+
+#define PDP_HSYNC1_HBPS_BITS		12	/* horizontal back porch start */
+#define PDP_HSYNC1_HBPS_OFFSET		16
+#define PDP_HSYNC1_HT_BITS		12	/* horizontal total */
+#define PDP_HSYNC1_HT_OFFSET		0
+
+#define PDP_HSYNC2_HAS_BITS		12	/* horizontal active start */
+#define PDP_HSYNC2_HAS_OFFSET		16
+#define PDP_HSYNC2_HLBS_BITS		12	/* horizontal left border start */
+#define PDP_HSYNC2_HLBS_OFFSET		0
+
+#define PDP_HSYNC3_HFPS_BITS		12	/* horizontal front porch start */
+#define PDP_HSYNC3_HFPS_OFFSET		16
+#define PDP_HSYNC3_HRBS_BITS		12	/* horizontal right border start */
+#define PDP_HSYNC3_HRBS_OFFSET		0
+
+#define PDP_VSYNC1_VBPS_BITS		12	/* vertical back porch start */
+#define PDP_VSYNC1_VBPS_OFFSET		16
+#define PDP_VSYNC1_VT_BITS		12	/* vertical total */
+#define PDP_VSYNC1_VT_OFFSET		0
+
+#define PDP_VSYNC2_VAS_BITS		12	/* vertical active start */
+#define PDP_VSYNC2_VAS_OFFSET		16
+#define PDP_VSYNC2_VTBS_BITS		12	/* vertical top border start */
+#define PDP_VSYNC2_VTBS_OFFSET		0
+
+#define PDP_VSYNC3_VFPS_BITS		12	/* vertical front porch start */
+#define PDP_VSYNC3_VFPS_OFFSET		16
+#define PDP_VSYNC3_VBBS_BITS		12	/* vertical bottom border start */
+#define PDP_VSYNC3_VBBS_OFFSET		0
+
+#define PDP_VEVENT_VEVENT_BITS		12	/* vertical event start */
+#define PDP_VEVENT_VEVENT_OFFSET	16
+#define PDP_VEVENT_VFETCH_BITS		12	/* vertical fetch start */
+#define PDP_VEVENT_VFETCH_OFFSET	0
+
+#define PDP_HDECTRL_HDES_BITS		12	/* horizontal data enable start */
+#define PDP_HDECTRL_HDES_OFFSET		16
+#define PDP_HDECTRL_HDEF_BITS		12	/* horizontal data enable finish */
+#define PDP_HDECTRL_HDEF_OFFSET		0
+
+#define PDP_VDECTRL_VDES_BITS		12	/* vertical data enable start */
+#define PDP_VDECTRL_VDES_OFFSET		16
+#define PDP_VDECTRL_VDEF_BITS		12	/* vertical data enable finish */
+#define PDP_VDECTRL_VDEF_OFFSET		0
+
+#define PDP_OPMASK_MASKLEVEL_BITS	1	/* masked output bit level */
+#define PDP_OPMASK_MASKLEVEL_OFFSET	31
+#define PDP_OPMASK_BLANKLEVEL_BITS	1	/* data disable output bit level */
+#define PDP_OPMASK_BLANKLEVEL_OFFSET	30
+#define PDP_OPMASK_MASKB_BITS		8	/* output data mask for blue channel */
+#define PDP_OPMASK_MASKB_OFFSET		16
+#define PDP_OPMASK_MASKG_BITS		8	/* output data mask for green channel */
+#define PDP_OPMASK_MASKG_OFFSET		8
+#define PDP_OPMASK_MASKR_BITS		8	/* output data mask for red channel */
+#define PDP_OPMASK_MASKR_OFFSET		0
+
+#define PDP_STRXPOSN_SRCSTRIDE_BITS	10	/* stride of surface in 16byte words - 1 */
+#define PDP_STRXPOSN_SRCSTRIDE_OFFSET	22
+#define PDP_STRXPOSN_XSTART_BITS	11	/* x coordinate of top left corner */
+#define PDP_STRXPOSN_XSTART_OFFSET	11
+#define PDP_STRXPOSN_YSTART_BITS	11	/* y coordinate of top left corner */
+#define PDP_STRXPOSN_YSTART_OFFSET	0
+
+#define PDP_PALETTE1_LUTADDR_BITS	8	/* set LUT address to read and write */
+#define PDP_PALETTE1_LUTADDR_OFFSET	24
+#define PDP_PALETTE2_LUTDATA_BITS	18	/* data to read or write to LUT in RGB666 */
+#define PDP_PALETTE2_LUTDATA_OFFSET	0
+
+#define PDP_INT_VEVENT0_BITS		1	/* start of safe update region */
+#define PDP_INT_VEVENT0_OFFSET		2
+#define PDP_INT_HBLNK0_BITS		1	/* start of horizontal blanking */
+#define PDP_INT_HBLNK0_OFFSET		0
+
+#define PDP_INTCTRL_HBLNKLINE_BITS	1	/* horizontal blanking interrupt line */
+#define PDP_INTCTRL_HBLNKLINE_OFFSET	16
+#define PDP_INTCTRL_HBLNKLINE_ALL	0
+#define PDP_INTCTRL_HBLNKLINE_SPECIFIC	1
+#define PDP_INTCTRL_HBLNKLINENO_BITS	12	/* horizontal line number to interrupt */
+#define PDP_INTCTRL_HBLNKLINENO_OFFSET	0
+
+#define PDP_MEMCTRL_MEMREFRESH_BITS	2	/* memory refresh control */
+#define PDP_MEMCTRL_MEMREFRESH_OFFSET	30
+#define PDP_MEMCTRL_MEMREFRESH_ALWAYS	0x0
+#define PDP_MEMCTRL_MEMREFRESH_HBLNK	0x1
+#define PDP_MEMCTRL_MEMREFRESH_VBLNK	0x2
+#define PDP_MEMCTRL_MEMREFRESH_BOTH	0x3
+
+#define PDP_SKIPCTRL_XCLIP_BITS		4	/* video pixels to remove after scaling */
+#define PDP_SKIPCTRL_XCLIP_OFFSET	28
+#define PDP_SKIPCTRL_HSKIP_BITS		11	/* video pixels to remove before scaling */
+#define PDP_SKIPCTRL_HSKIP_OFFSET	16
+#if PDP_REV >= 0x010001
+#define PDP_SKIPCTRL_YCLIP_BITS		4	/* video pixels to remove after scaling */
+#define PDP_SKIPCTRL_YCLIP_OFFSET	12
+#define PDP_SKIPCTRL_VSKIP_BITS		11	/* video pixels to remove before scaling */
+#define PDP_SKIPCTRL_VSKIP_OFFSET	0
+#endif
+
+#define PDP_SCALECTRL_HSCALEBP_BITS	1	/* video hscale bypass */
+#define PDP_SCALECTRL_HSCALEBP_OFFSET	31
+#define PDP_SCALECTRL_VSCALEBP_BITS	1	/* video hscale bypass */
+#define PDP_SCALECTRL_VSCALEBP_OFFSET	30
+#define PDP_SCALECTRL_HSBEFOREVS_BITS	1	/* hscale/vscale order */
+#define PDP_SCALECTRL_HSBEFOREVS_OFFSET	29
+#define PDP_SCALECTRL_VSURUNCTRL_BITS	1	/* vscale under-run control */
+#define PDP_SCALECTRL_VSURUNCTRL_OFFSET	27
+#define PDP_SCALECTRL_VORDER_BITS	2	/* vertical filter order */
+#define PDP_SCALECTRL_VORDER_OFFSET	16
+#define PDP_SCALECTRL_VORDER_1TAP	0x0	/* 1 tap (decim/replic) */
+#define PDP_SCALECTRL_VORDER_2TAP	0x1	/* 2 tap (bilinear) */
+#define PDP_SCALECTRL_VORDER_4TAP	0x2	/* 4 tap */
+#define PDP_SCALECTRL_VPITCH_BITS	16	/* Vertical pitch, 5.11 fixpt */
+#define PDP_SCALECTRL_VPITCH_OFFSET	0
+
+#define PDP_HSINIT_HINITIAL_BITS	16	/* HQ vid filter initial hpos, 5.11 fixed pt */
+#define PDP_HSINIT_HINITIAL_OFFSET	16
+#define PDP_HSINIT_HINITIAL_FIX		11
+#define PDP_HSINIT_HDECIM_BITS		1	/* pixel halving prior to scaling */
+#define PDP_HSINIT_HDECIM_OFFSET	15
+#define PDP_HSINIT_HPITCH_BITS		15	/* horizontal scale pitch, 4.11 fixed pt */
+#define PDP_HSINIT_HPITCH_OFFSET	0
+#define PDP_HSINIT_HPITCH_FIX		11
+
+#define PDP_VSINIT_INITIAL1_BITS	16	/* Initial pos of field 1 */
+#define PDP_VSINIT_INITIAL1_OFFSET	16
+#define PDP_VSINIT_INITIAL0_BITS	16	/* Initial pos of field 0 */
+#define PDP_VSINIT_INITIAL0_OFFSET	0
+
+#define PDP_SCALESIZE_SCALEDWIDTH_BITS		11	/* width after scaling - 1 in px */
+#define PDP_SCALESIZE_SCALEDWIDTH_OFFSET	16
+#define PDP_SCALESIZE_SCALEDHEIGHT_BITS		11	/* height after scaling - 1 in px */
+#define PDP_SCALESIZE_SCALEDHEIGHT_OFFSET	0
+
+#define PDP_CSCCOEFF0_RU_BITS		11	/* U CSC coefficient for R channel */
+#define PDP_CSCCOEFF0_RU_OFFSET		11
+#define PDP_CSCCOEFF0_RY_BITS		11	/* Y CSC coefficient for R channel */
+#define PDP_CSCCOEFF0_RY_OFFSET		0
+
+#define PDP_CSCCOEFF1_GY_BITS		11	/* Y CSC coefficient for G channel */
+#define PDP_CSCCOEFF1_GY_OFFSET		11
+#define PDP_CSCCOEFF1_RV_BITS		11	/* V CSC coefficient for R channel */
+#define PDP_CSCCOEFF1_RV_OFFSET		0
+
+#define PDP_CSCCOEFF2_GV_BITS		11	/* V CSC coefficient for G channel */
+#define PDP_CSCCOEFF2_GV_OFFSET		11
+#define PDP_CSCCOEFF2_GU_BITS		11	/* U CSC coefficient for G channel */
+#define PDP_CSCCOEFF2_GU_OFFSET		0
+
+#define PDP_CSCCOEFF3_BU_BITS		11	/* U CSC coefficient for B channel */
+#define PDP_CSCCOEFF3_BU_OFFSET		11
+#define PDP_CSCCOEFF3_BY_BITS		11	/* Y CSC coefficient for B channel */
+#define PDP_CSCCOEFF3_BY_OFFSET		0
+
+#define PDP_CSCCOEFF4_BV_BITS		11	/* V CSC coefficient for B channel */
+#define PDP_CSCCOEFF4_BV_OFFSET		0
+
+#define PDP_LINESTAT_LINENO_STAT_BITS	12	/* Current line number. */
+#define PDP_LINESTAT_LINENO_STAT_OFFSET	0
+
+/* info about video plane addresses */
+#ifndef PDP_SHARED_BASE
+#define PDP_YADDR_BITS		28
+#define PDP_YADDR_ALIGN		4
+#define PDP_UADDR_BITS		28
+#define PDP_UADDR_ALIGN		4
+#define PDP_VADDR_BITS		28
+#define PDP_VADDR_ALIGN		4
+#else
+#define PDP_YADDR_BITS		PDP_STRXCTRL_BASEADDR_BITS
+#define PDP_YADDR_ALIGN		4
+#define PDP_UADDR_BITS		PDP_STR2UADDR_UBASEADDR_BITS
+#define PDP_UADDR_ALIGN		3
+#define PDP_VADDR_BITS		PDP_STR2VADDR_VBASEADDR_BITS
+#define PDP_VADDR_ALIGN		3
+#endif
+#define PDP_YSTRIDE_BITS	PDP_STRXPOSN_SRCSTRIDE_BITS
+#define PDP_YSTRIDE_ALIGN	4
+
+#define PDP_YADDR_MAX		(((1 << PDP_YADDR_BITS) - 1) << PDP_YADDR_ALIGN)
+#define PDP_UADDR_MAX		(((1 << PDP_UADDR_BITS) - 1) << PDP_UADDR_ALIGN)
+#define PDP_VADDR_MAX		(((1 << PDP_VADDR_BITS) - 1) << PDP_VADDR_ALIGN)
+#define PDP_YSTRIDE_MAX		((1 << PDP_YSTRIDE_BITS) << PDP_YSTRIDE_ALIGN)
+#define PDP_YADDR_ALIGNMASK	((1 << PDP_YADDR_ALIGN) - 1)
+#define PDP_UADDR_ALIGNMASK	((1 << PDP_UADDR_ALIGN) - 1)
+#define PDP_VADDR_ALIGNMASK	((1 << PDP_VADDR_ALIGN) - 1)
+#define PDP_YSTRIDE_ALIGNMASK	((1 << PDP_YSTRIDE_ALIGN) - 1)
+
+#endif
diff --git a/drivers/video/pdpfb_vid.c b/drivers/video/pdpfb_vid.c
new file mode 100644
index 0000000..c730cab
--- /dev/null
+++ b/drivers/video/pdpfb_vid.c
@@ -0,0 +1,1393 @@
+/*
+ * PDP Scaled Video Framebuffer
+ *
+ * Copyright (c) 2008-2012 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/fb.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+#include <asm/soc-chorus2/pdp.h>
+#include <video/pdpfb.h>
+#include "pdpfb.h"
+#include "pdpfb_regs.h"
+#include "pdpfb_vid.h"
+
+/* warn if scale coefficients aren't normalised */
+/*#define CHECK_SCALE_COEFFS*/
+/*#define DEBUG_SCALE_COEFFS*/
+
+#define THIS_STREAM (&pdpfb_vid_stream.stream)
+#define THIS_PRIV (&pdpfb_vid_stream)
+
+static struct pdpfb_vid_stream_priv {
+	struct pdpfb_stream stream;
+	/* horizontal scaler */
+	int hdecimation;
+	int hs_taps;
+	int hscoeffs[33];
+	int hs_oldsize, hs_oldres;
+	/* vertical scaler */
+	int vs_taps;
+	int vscoeffs[17];
+	int vs_oldsize, vs_oldres;
+	/* pixel format info */
+	struct pdpfb_vid_csc csc;
+	int planar_override;
+	int planar_ov_virtres[2];
+	int planar_ov_pixfmt;
+	struct pdpfb_vid_planar planar;
+	int pixfmt;
+	int nonstd2pixfmt[PDP_VID_PIXFMT_MAX];
+	/* fields to update on vevent */
+#ifdef PDP_SHARED_BASE
+	spinlock_t base_addr_lock;
+	unsigned int base_addr_y;
+	unsigned int base_addr_u;
+	unsigned int base_addr_v;
+	unsigned int hskip;
+	unsigned int vskip;
+#endif
+} pdpfb_vid_stream;
+
+static struct pdpfb_vid_csc_coefs pdpfb_vid_csc_presets[] = {
+	[PDP_VID_CSCPRESET_HDTV] = {
+		.ry = 298,	.rv = 459,	.ru = 0,
+		.gy = 298,	.gv = -137,	.gu = -55,
+		.by = 298,	.bv = 0,	.bu = 541,
+	},
+	[PDP_VID_CSCPRESET_SDTV] = {
+		.ry = 298,	.rv = 409,	.ru = 0,
+		.gy = 298,	.gv = -208,	.gu = -100,
+		.by = 298,	.bv = 0,	.bu = 517,
+	},
+	[PDP_VID_CSCPRESET_LEGACYHDTV] = {
+		.ry = 298,	.rv = 459,	.ru = 0,
+		.gy = 298,	.gv = -139,	.gu = -66,
+		.by = 298,	.bv = 0,	.bu = 532,
+	},
+	[PDP_VID_CSCPRESET_LEGACYSDTV] = {
+		.ry = 298,	.rv = 409,	.ru = 0,
+		.gy = 298,	.gv = -207,	.gu = -97,
+		.by = 298,	.bv = 0,	.bu = 519,
+	},
+};
+
+/* fixed point arithmetic */
+
+static inline int fix(int p, int a)
+{
+	return a << p;
+}
+
+static inline int fix_div(int p, int n, int d)
+{
+	return (n << p) / d;
+}
+
+static inline int fix_divl(int p, int n, int d)
+{
+	return ((u64)n << p) / d;
+}
+
+static inline int fix_mul(int p, int a, int b)
+{
+	return (a * b) >> p;
+}
+
+static inline int fix_sin(int p, int x)
+{
+	/* tailor expansion */
+	int pow;
+	int sum;
+
+	/* in [-pi,pi] */
+	int pi2 = fix_divl(p, 6283185, 1000000);
+	int pi = pi2/2;
+	x = x % pi2;
+	if (x > pi)
+		x -= pi2;
+	else if (x < -pi)
+		x += pi2;
+
+	sum = x;
+
+	/* x represents x^2 from now on */
+	x = fix_mul(p, x, x);
+
+	/* -x^3/3! */
+	pow = fix_mul(p, sum, x);
+	sum -= pow/6;
+	/* +x^5/5! */
+	pow = fix_mul(p, pow, x);
+	sum += pow/120;
+	/* -x^7/7! */
+	pow = fix_mul(p, pow, x);
+	sum -= pow/5040;
+	/* +x^9/9! */
+	pow = fix_mul(p, pow, x);
+	sum += pow/362880;
+
+	return sum;
+}
+
+static inline int fix_cos(int p, int x)
+{
+	/* tailor expansion */
+	int pow;
+	int sum = fix(p, 1);
+
+	/* in [-pi,pi] */
+	int pi2 = fix_divl(p, 6283185, 1000000);
+	int pi = pi2/2;
+	x = x % pi2;
+	if (x > pi)
+		x -= pi2;
+	else if (x < -pi)
+		x += pi2;
+
+	/* x represents x^2 from now on */
+	x = fix_mul(p, x, x);
+
+	/* -x^2/2! */
+	pow = x;
+	sum -= pow/2;
+	/* +x^4/4! */
+	pow = fix_mul(p, pow, x);
+	sum += pow/24;
+	/* -x^6/6! */
+	pow = fix_mul(p, pow, x);
+	sum -= pow/720;
+	/* +x^8/8! */
+	pow = fix_mul(p, pow, x);
+	sum += pow/40320;
+	/* -x^10/10! */
+	pow = fix_mul(p, pow, x);
+	sum -= pow/3628800;
+
+	return sum;
+}
+
+/* returns negative for invalid nonstd */
+static int pdpfb_vid_nonstd_to_pixfmt(u32 nonstd)
+{
+	if (nonstd <= 0 || nonstd >= ARRAY_SIZE(THIS_PRIV->nonstd2pixfmt))
+		return -1;
+	return THIS_PRIV->nonstd2pixfmt[nonstd];
+}
+
+static int pdpfb_vid_scale_coeffs_stale(u32 new_res,  u32 old_res,
+					u32 new_size, u32 old_size,
+					int new_T,    int old_T,
+					int new_I,    int old_I)
+{
+	/* has number of taps/interpolation points changed? */
+	if (new_T != old_T || new_I != old_I)
+		return 1;
+	/* scale factor clamped to max of 1 */
+	if (new_size >= new_res && old_size >= old_res)
+		return 0;
+	/* has scale factor changed? */
+	return new_size * old_res != old_size * new_res;
+}
+
+static int pdpfb_vid_calc_scale_coeffs(u32 res, u32 size,
+				       int T,	/* taps */
+				       int I,	/* interpolation points */
+				       int *coeffs, int count)
+{
+	int midpoint = T*I/2;
+#define SCALE_PRECISION 8
+#define RESULT_PRECISION 6
+#define DIV(a, b)	fix_div(SCALE_PRECISION, a, b)
+#define DIVL(a, b)	fix_divl(SCALE_PRECISION, a, b)
+#define MUL(a, b)	fix_mul(SCALE_PRECISION, a, b)
+#define SIN(a)		fix_sin(SCALE_PRECISION, a)
+#define COS(a)		fix_cos(SCALE_PRECISION, a)
+#define FIX(a)		fix(SCALE_PRECISION, a)
+#define RESULT(a)	((a) >> (SCALE_PRECISION - RESULT_PRECISION))
+	int fpi = DIVL(3141593, 1000000);
+	int fS = min(FIX(1), /* scale factor clamped to 1 */
+		DIV(size, res));
+	int fA = DIV(54, 100);
+	int c, i;
+
+	int fpiS = MUL(fpi, fS);
+	int f2piSoT = fpiS*2/T;
+	int f1mA = FIX(1) - fA;
+	int fT = FIX(T);
+	int ftotal = 0;
+
+	/* Calculate coefficients */
+	for (c = 0; c <= midpoint; ++c) {
+		/* t + i/I */
+		int ftpioI = FIX(c)/I;
+		int fx = MUL(fpiS, fT/2 - ftpioI);
+		int fco = fA - MUL(f1mA, COS(MUL(f2piSoT, ftpioI)));
+		if (fx > DIV(1, 10))
+			fco = MUL(DIV(SIN(fx), fx), fco);
+		ftotal += fco;
+		coeffs[c] = fco;
+	}
+
+	/*
+	 * Interpolation points I/2+1 to I-1 have same values as 1 to I/2-1,
+	 * but in opposite order, so we only need to normalise the first half
+	 * of the interpolation points taking mirroring into account.
+	 */
+	for (i = 0; i <= I/2; ++i) {
+		int sum, err, dir = 0;
+
+		/*
+		 * Normalise coefficients in each interpolation point and
+		 * convert into result format.
+		 */
+		sum = 0;
+		for (c = i; c <= midpoint; c += I)
+			sum += coeffs[c];
+		for (; c < midpoint*2; c += I)
+			sum += coeffs[midpoint*2 - c];
+		/* Careful not to modify a coefficient twice */
+		for (c = i; c <= midpoint; c += I)
+			coeffs[c] = RESULT(DIV(coeffs[c], sum));
+		if (i & (I/2 - 1))
+			for (c = I-i; c <= midpoint; c += I)
+				coeffs[c] = RESULT(DIV(coeffs[c], sum));
+
+		/*
+		 * Find fixed point error from normalisation.
+		 */
+		sum = 0;
+		for (c = i; c <= midpoint; c += I)
+			sum += coeffs[c];
+		for (; c < midpoint*2; c += I)
+			sum += coeffs[midpoint*2 - c];
+		err = sum - (1 << RESULT_PRECISION);
+		if (err > 0)
+			dir = -1;
+		else if (err < 0)
+			dir = 1;
+		else
+			continue;
+
+#ifdef DEBUG_SCALE_COEFFS
+		printk(KERN_DEBUG "pdp: i=%d, err=%d\n", i, err);
+#define SCALE_DEBUG(C, N) \
+		printk(KERN_DEBUG "pdp:   adj*%d [%d] to 0x%x (err=%d)\n", \
+		       (N), (C), coeffs[(C)], err)
+#else
+#define SCALE_DEBUG(C, N) do {} while (0)
+#endif
+
+		/*
+		 * Distribute the error over the tap coefficients, preferring
+		 * to change central coefficients.
+		 */
+		if (i == 0) {
+			/*
+			 * Special case: contains midpoint. Work out from
+			 * center until no error or an odd value (which can be
+			 * fixed by adjusting midpoint again).
+			 */
+			while (err > 1 || err < -1) {
+				coeffs[midpoint] += dir;
+				err += dir;
+				SCALE_DEBUG(midpoint, 1);
+				for (c = midpoint - I;
+				     (err > 1 || err < -1) && c >= I;
+				     c -= I) {
+					coeffs[c] += dir;
+					err += dir*2;
+					SCALE_DEBUG(c, 2);
+				}
+			}
+			if (err & 1) {
+				coeffs[midpoint] += dir;
+				err += dir;
+				SCALE_DEBUG(midpoint, 1);
+			}
+		} else if (i == I/2) {
+			/*
+			 * Special case: tap coeffs mirror. This also means err
+			 * is always even with this interpolation point.
+			 */
+			int last = i + I*(T/2-1);
+#ifdef CHECK_SCALE_COEFFS
+			WARN(err & 1, "err (=%d) should be even for i=%d\n",
+					err, i);
+#endif
+			while (err) {
+				for (c = last; err && c >= 0; c -= I) {
+					coeffs[c] += dir;
+					err += dir*2;
+					SCALE_DEBUG(c, 2);
+				}
+			}
+		} else {
+			/*
+			 * No tap coefficient mirrors another. Work out from
+			 * center adjusting values.
+			 */
+			int last = midpoint - i;
+			int offset = i*2 - I;
+			while (err) {
+				for (c = last; err && c >= 0; c -= I) {
+					coeffs[c] += dir;
+					err += dir;
+					SCALE_DEBUG(c, 1);
+					if (!err)
+						break;
+					coeffs[c+offset] += dir;
+					err += dir;
+					SCALE_DEBUG(c+offset, 1);
+				}
+			}
+		}
+#ifdef CHECK_SCALE_COEFFS
+		sum = 0;
+		for (c = i; c <= midpoint; c += I)
+			sum += coeffs[c];
+		for (; c < midpoint*2; c += I)
+			sum += coeffs[midpoint*2 - c];
+		err = sum - (1 << RESULT_PRECISION);
+		WARN_ONCE(err, "Scale coefficients not normalised"
+			       " (i=%d, err=%d)\n", i, err);
+#endif
+	}
+
+	/* reflect about midpoint in spare coeffs */
+	if (count > midpoint*2)
+		count = midpoint*2;
+	for (c = midpoint+1; c < count; ++c)
+		coeffs[c] = coeffs[2*midpoint - c];
+
+	return 0;
+#undef DIV
+#undef MUL
+#undef SIN
+#undef COS
+#undef FIX
+#undef RESULT
+}
+
+static int pdpfb_vid_calc_hscale(struct pdpfb_priv *priv)
+{
+	u32 geomw = THIS_STREAM->geom.w;
+	if (!geomw)
+		geomw = THIS_STREAM->info.var.xres;
+
+	/* don't recalculate coefficients unless something has changed */
+	if (!pdpfb_vid_scale_coeffs_stale(THIS_STREAM->info.var.xres,
+					  THIS_PRIV->hs_oldres,
+					  geomw,
+					  THIS_PRIV->hs_oldsize,
+					  8, THIS_PRIV->hs_taps,
+					  8, 8))
+		return 0;
+
+	THIS_PRIV->hs_taps = 8;
+	THIS_PRIV->hs_oldres = THIS_STREAM->info.var.xres;
+	THIS_PRIV->hs_oldsize = geomw;
+	return pdpfb_vid_calc_scale_coeffs(THIS_STREAM->info.var.xres,
+					geomw,
+					THIS_PRIV->hs_taps,
+					8,
+					THIS_PRIV->hscoeffs,
+					ARRAY_SIZE(THIS_PRIV->hscoeffs));
+}
+
+#ifdef PDP_VID_VSCALE
+static int pdpfb_vid_calc_vscale(struct pdpfb_priv *priv)
+{
+	struct pdp_info *pdata = pdpfb_get_platform_data(priv);
+	int vs_taps;
+
+	/* 2-tap (bilinear) filtering when scaling down beyond threshold */
+	if ((u32)THIS_STREAM->geom.h * pdata->vpitch_bilinear_threshold
+	    < ((u32)THIS_STREAM->info.var.yres << PDPFB_PDATA_FIX_SHIFT))
+		vs_taps = 2;
+	else
+		vs_taps = 4;
+
+	/* don't recalculate coefficients unless something has changed */
+	if (!pdpfb_vid_scale_coeffs_stale(THIS_STREAM->info.var.yres,
+						THIS_PRIV->vs_oldres,
+					  THIS_STREAM->geom.h,
+						THIS_PRIV->vs_oldsize,
+					  vs_taps, THIS_PRIV->vs_taps,
+					  8, 8))
+		return 0;
+
+	THIS_PRIV->vs_taps = vs_taps;
+	THIS_PRIV->vs_oldres = THIS_STREAM->info.var.yres;
+	THIS_PRIV->vs_oldsize = THIS_STREAM->geom.h;
+	return pdpfb_vid_calc_scale_coeffs(THIS_STREAM->info.var.yres,
+					THIS_STREAM->geom.h,
+					vs_taps,
+					8,
+					THIS_PRIV->vscoeffs,
+					ARRAY_SIZE(THIS_PRIV->vscoeffs));
+}
+#else
+static inline int pdpfb_vid_calc_vscale(struct pdpfb_priv *priv)
+{
+	return 0;
+}
+#endif
+
+static int pdpfb_vid_set_bpp(struct fb_var_screeninfo *var)
+{
+	if (pdpfb_vid_nonstd_to_pixfmt(var->nonstd) < 0)
+		var->nonstd = 0;
+
+	switch (var->nonstd) {
+	case PDP_VID_PIXFMT_420_PL8:
+	case PDP_VID_PIXFMT_420_PL8IVU:
+	case PDP_VID_PIXFMT_420_PL8IUV:
+		var->bits_per_pixel = 12;
+		break;
+	case PDP_VID_PIXFMT_422_UY0VY1_8888:
+	case PDP_VID_PIXFMT_422_VY0UY1_8888:
+	case PDP_VID_PIXFMT_422_Y0UY1V_8888:
+	case PDP_VID_PIXFMT_422_Y0VY1U_8888:
+		var->bits_per_pixel = 16;
+		break;
+	default:
+		var->bits_per_pixel = 16;
+		var->nonstd = PDP_VID_PIXFMT_422_UY0VY1_8888;
+		break;
+	}
+
+	var->red.offset = 0;
+	var->red.length = 0;
+	var->green.offset = 0;
+	var->green.length = 0;
+	var->blue.offset = 0;
+	var->blue.length = 0;
+	var->transp.offset = 0;
+	var->transp.length = 0;
+	var->red.msb_right = 0;
+	var->green.msb_right = 0;
+	var->blue.msb_right = 0;
+	var->transp.msb_right = 0;
+	return 0;
+}
+
+static int pdpfb_vid_check_geom(struct pdpfb_priv *priv,
+				struct pdpfb_geom *geom)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+
+	if (!geom->w) {
+		THIS_PRIV->hdecimation = 0;
+	} else if (geom->w == info->var.xres) {
+		geom->w = 0;
+	} else if (geom->w > info->var.xres*8) {
+		THIS_PRIV->hdecimation = 0;
+		geom->w = info->var.xres*8;
+	} else if (geom->w*4 < info->var.xres) {
+		THIS_PRIV->hdecimation = 1;
+		if (geom->w*8 < info->var.xres)
+			geom->w = (info->var.xres+7)/8;
+#if PDP_REV < 0x010001
+		++geom->w; /* round up */
+#endif
+	} else {
+		THIS_PRIV->hdecimation = 0;
+	}
+#if PDP_REV < 0x010001
+	/* geom->w must be even because it's in YUV */
+	geom->w &= ~0x1;
+	/* pan granularity depends on horizontal decimation */
+	if (THIS_PRIV->hdecimation)
+		info->fix.xpanstep = 4;
+	else
+		info->fix.xpanstep = 2;
+#else
+	info->fix.xpanstep = 1;
+#endif
+
+	/* avoid HSKIP problems by restricting pan step */
+	if (info->var.nonstd == PDP_VID_PIXFMT_420_PL8IVU ||
+	    info->var.nonstd == PDP_VID_PIXFMT_420_PL8IUV)
+		info->fix.xpanstep = 16;
+
+	if (geom->h) {
+#ifndef PDP_VID_VSCALE
+		/* only doubling and halving */
+		if (geom->h*4 <= info->var.yres*3)
+			geom->h = info->var.yres / 2;
+		else if (geom->h*2 >= info->var.yres*3)
+			geom->h = info->var.yres * 2;
+		else
+			geom->h = 0;
+#else
+		if (geom->h == info->var.yres)
+			geom->h = 0;
+		else if (geom->h > info->var.yres*8)
+			geom->h = info->var.yres*8;
+		else if (geom->h*8 < info->var.yres)
+			geom->h = (info->var.yres+7)/8;
+#endif
+	}
+	return 0;
+}
+
+static int pdpfb_vid_set_geom(struct pdpfb_priv *priv)
+{
+	struct pdpfb_geom *geom = &THIS_STREAM->geom;
+
+	pdpfb_vid_calc_hscale(priv);
+	if (geom->h)
+		pdpfb_vid_calc_vscale(priv);
+	return 0;
+}
+
+static unsigned long pdpfb_vid_required_mem(struct fb_var_screeninfo *var)
+{
+	switch (var->nonstd) {
+	case PDP_VID_PIXFMT_420_PL8:
+	case PDP_VID_PIXFMT_420_PL8IVU:
+	case PDP_VID_PIXFMT_420_PL8IUV:
+		return 3 * pdpfb_get_line_length(var->xres_virtual/2, 8)
+			* var->yres_virtual;
+	default:
+		return pdpfb_get_line_length(var->xres_virtual,
+						var->bits_per_pixel)
+			* var->yres_virtual;
+	};
+}
+
+static int pdpfb_vid_check_var(struct fb_var_screeninfo *var,
+				struct fb_info *info)
+{
+	if (!var->xres)
+		var->xres = 1;
+	if (!var->yres)
+		var->yres = 1;
+
+#if PDP_REV < 0x010001
+	/* xres must be even, round up */
+	var->xres = (var->xres + 1) & ~0x1;
+#endif
+
+	if (var->xres > var->xres_virtual)
+		var->xres_virtual = var->xres;
+	if (var->yres > var->yres_virtual)
+		var->yres_virtual = var->yres;
+
+	if (var->xres_virtual < var->xoffset + var->xres)
+		var->xres_virtual = var->xoffset + var->xres;
+	if (var->yres_virtual < var->yoffset + var->yres)
+		var->yres_virtual = var->yoffset + var->yres;
+
+	pdpfb_vid_set_bpp(var);
+
+	/* Memory limit */
+	if (pdpfb_vid_required_mem(var) > THIS_STREAM->videomem_len)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int pdpfb_vid_configure_csc(struct pdpfb_priv *priv)
+{
+	struct pdpfb_vid_csc_coefs *coefs = &THIS_PRIV->csc.coefs;
+
+	pdpfb_write(priv, PDP_CSCCOEFF0,
+		PLACE_FIELD(PDP_CSCCOEFF0_RU, coefs->ru) |
+		PLACE_FIELD(PDP_CSCCOEFF0_RY, coefs->ry));
+	pdpfb_write(priv, PDP_CSCCOEFF1,
+		PLACE_FIELD(PDP_CSCCOEFF1_GY, coefs->gy) |
+		PLACE_FIELD(PDP_CSCCOEFF1_RV, coefs->rv));
+	pdpfb_write(priv, PDP_CSCCOEFF2,
+		PLACE_FIELD(PDP_CSCCOEFF2_GV, coefs->gv) |
+		PLACE_FIELD(PDP_CSCCOEFF2_GU, coefs->gu));
+	pdpfb_write(priv, PDP_CSCCOEFF3,
+		PLACE_FIELD(PDP_CSCCOEFF3_BU, coefs->bu) |
+		PLACE_FIELD(PDP_CSCCOEFF3_BY, coefs->by));
+	pdpfb_write(priv, PDP_CSCCOEFF4,
+		PLACE_FIELD(PDP_CSCCOEFF4_BV, coefs->bv));
+
+	return 0;
+}
+
+static int pdpfb_vid_check_csc(struct pdpfb_vid_csc_coefs *csc)
+{
+#define CSC_COEF_INVALID(x) (((x) >= (1<<10)) || ((x) < (-1<<10)))
+	return	CSC_COEF_INVALID(csc->ry) ||
+		CSC_COEF_INVALID(csc->rv) ||
+		CSC_COEF_INVALID(csc->ru) ||
+		CSC_COEF_INVALID(csc->gy) ||
+		CSC_COEF_INVALID(csc->gv) ||
+		CSC_COEF_INVALID(csc->gu) ||
+		CSC_COEF_INVALID(csc->by) ||
+		CSC_COEF_INVALID(csc->bv) ||
+		CSC_COEF_INVALID(csc->bu);
+#undef CSC_COEF_INVALID
+}
+
+static int pdpfb_vid_check_planar(struct fb_info *info,
+				  struct pdpfb_vid_planar *pl)
+{
+	int extra;
+
+	/* check alignment */
+	if (unlikely(pl->y_offset & PDP_YADDR_ALIGNMASK)) {
+		pl->y_offset &= ~PDP_YADDR_ALIGNMASK;
+		return -EINVAL;
+	}
+	if (unlikely(pl->u_offset & PDP_UADDR_ALIGNMASK)) {
+		pl->u_offset &= ~PDP_UADDR_ALIGNMASK;
+		return -EINVAL;
+	}
+	if (unlikely(pl->v_offset & PDP_VADDR_ALIGNMASK)) {
+		pl->v_offset &= ~PDP_VADDR_ALIGNMASK;
+		return -EINVAL;
+	}
+	if (unlikely(pl->y_line_length & PDP_YSTRIDE_ALIGNMASK)) {
+		/* round up */
+		pl->y_line_length = pdpfb_get_line_length(pl->y_line_length, 8);
+		return -EINVAL;
+	}
+	/* u/v strides must be the same */
+	if (unlikely(pl->u_line_length != pl->v_line_length)) {
+		pl->v_line_length = pl->u_line_length;
+		return -EINVAL;
+	}
+	/* strides must be in range */
+	if (unlikely(pl->y_line_length > PDP_YSTRIDE_MAX)) {
+		pl->y_line_length = PDP_YSTRIDE_MAX;
+		return -EINVAL;
+	}
+	/* u/v strides must be half or equal to y stride */
+	if (unlikely(pl->u_line_length != pl->y_line_length &&
+		     pl->u_line_length*2 != pl->y_line_length)) {
+		if (pl->u_line_length < pl->y_line_length)
+			pl->u_line_length = pl->y_line_length/2;
+		else
+			pl->u_line_length = pl->y_line_length;
+		pl->v_line_length = pl->u_line_length;
+		return -EINVAL;
+	}
+
+	/* offsets must be in range (don't want to overflow later) */
+	if (unlikely(pl->y_offset > PDP_YADDR_MAX ||
+		     pl->y_offset >= THIS_STREAM->videomem_len)) {
+		pl->y_offset = 0;
+		return -ENOMEM;
+	}
+	if (unlikely(pl->u_offset > PDP_UADDR_MAX ||
+		     pl->u_offset >= THIS_STREAM->videomem_len)) {
+		pl->u_offset = 0;
+		return -ENOMEM;
+	}
+	if (unlikely(pl->v_offset > PDP_VADDR_MAX ||
+		     pl->v_offset >= THIS_STREAM->videomem_len)) {
+		pl->v_offset = 0;
+		return -ENOMEM;
+	}
+	/* does y plane fit in memory? */
+	extra = info->var.xres_virtual - pl->y_line_length;
+	if (extra < 0)
+		extra = 0;
+	if (pl->y_offset + pl->y_line_length*info->var.yres_virtual + extra
+						> THIS_STREAM->videomem_len) {
+		pl->y_offset = 0;
+		return -ENOMEM;
+	}
+	/* does u plane fit in memory? */
+	if (info->var.nonstd == PDP_VID_PIXFMT_420_PL8)
+		extra = info->var.xres_virtual/2 - pl->u_line_length;
+	else /* byte interleaved chroma formats */
+		extra = info->var.xres_virtual - pl->u_line_length;
+	if (extra < 0)
+		extra = 0;
+	if (pl->u_offset + pl->u_line_length*info->var.yres_virtual/2 + extra
+						> THIS_STREAM->videomem_len) {
+		pl->u_offset = 0;
+		return -ENOMEM;
+	}
+	/* does v plane fit in memory? (same extra as u plane) */
+	if (pl->v_offset + pl->v_line_length*info->var.yres_virtual/2 + extra
+						> THIS_STREAM->videomem_len) {
+		pl->v_offset = 0;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void pdpfb_vid_update_planar(struct pdpfb_priv *priv,
+				    struct fb_info *info)
+{
+	int uv_planar = 1;
+	/* handle planar YUV */
+	switch (info->var.nonstd) {
+	case PDP_VID_PIXFMT_420_PL8IVU:
+	case PDP_VID_PIXFMT_420_PL8IUV:
+		uv_planar = 0;
+		/* fall through */
+	case PDP_VID_PIXFMT_420_PL8:
+	/* The following 2 pixel formats are unimplemented */
+	case PDP_VID_PIXFMT_420_T88CP:
+	case PDP_VID_PIXFMT_422_T88CP:
+		if (THIS_PRIV->planar_override) {
+			/* if overridden planar setup is still ok, leave it */
+			if (THIS_PRIV->planar_ov_virtres[0]
+					== info->var.xres_virtual &&
+			    THIS_PRIV->planar_ov_virtres[1]
+					== info->var.yres_virtual &&
+			    THIS_PRIV->planar_ov_pixfmt == info->var.nonstd)
+				break;
+		}
+
+		THIS_PRIV->planar_override = 0;
+		THIS_PRIV->planar.y_line_length
+			= pdpfb_get_line_length(info->var.xres_virtual, 8);
+		THIS_PRIV->planar.u_line_length
+			= THIS_PRIV->planar.v_line_length
+			= info->var.xres_virtual >> uv_planar;
+		THIS_PRIV->planar.y_offset = 0;
+		THIS_PRIV->planar.u_offset = THIS_PRIV->planar.y_offset
+			+ THIS_PRIV->planar.y_line_length
+				* info->var.yres_virtual;
+		if (uv_planar)
+			THIS_PRIV->planar.v_offset = THIS_PRIV->planar.u_offset
+				+ THIS_PRIV->planar.u_line_length
+					* info->var.yres_virtual/2;
+		else
+			THIS_PRIV->planar.v_offset = THIS_PRIV->planar.u_offset;
+		break;
+
+	case PDP_VID_PIXFMT_422_UY0VY1_8888:
+	case PDP_VID_PIXFMT_422_VY0UY1_8888:
+	case PDP_VID_PIXFMT_422_Y0UY1V_8888:
+	case PDP_VID_PIXFMT_422_Y0VY1U_8888:
+	default:
+		THIS_PRIV->planar_override = 0;
+		THIS_PRIV->planar.y_line_length
+			= THIS_PRIV->planar.u_line_length
+			= THIS_PRIV->planar.v_line_length
+			= pdpfb_get_line_length(info->var.xres_virtual,
+						info->var.bits_per_pixel);
+		THIS_PRIV->planar.y_offset = 0;
+		THIS_PRIV->planar.u_offset = 0;
+		THIS_PRIV->planar.v_offset = 0;
+		break;
+	}
+}
+
+static int pdpfb_vid_change_mode(struct pdpfb_priv *priv)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+	u32 str2posn;
+
+	/* handle planar YUV */
+	switch (info->var.nonstd) {
+	case PDP_VID_PIXFMT_420_PL8:
+	case PDP_VID_PIXFMT_420_PL8IVU:
+	case PDP_VID_PIXFMT_420_PL8IUV:
+	/* The following 2 pixel formats are unimplemented */
+	case PDP_VID_PIXFMT_420_T88CP:
+	case PDP_VID_PIXFMT_422_T88CP:
+		info->fix.type = FB_TYPE_PLANES;
+		break;
+
+	case PDP_VID_PIXFMT_422_UY0VY1_8888:
+	case PDP_VID_PIXFMT_422_VY0UY1_8888:
+	case PDP_VID_PIXFMT_422_Y0UY1V_8888:
+	case PDP_VID_PIXFMT_422_Y0VY1U_8888:
+	default:
+		info->fix.type = FB_TYPE_PACKED_PIXELS;
+		break;
+	}
+	info->fix.line_length = THIS_PRIV->planar.y_line_length;
+	str2posn = pdpfb_read(priv, PDP_STR2POSN);
+	SET_FIELD(str2posn, PDP_STRXPOSN_SRCSTRIDE,
+			(info->fix.line_length >> PDP_YSTRIDE_ALIGN) - 1);
+	pdpfb_write(priv, PDP_STR2POSN, str2posn);
+
+	return 0;
+}
+
+static int pdpfb_vid_configure_hscale(struct pdpfb_priv *priv)
+{
+	u32 i;
+	u32 scale_ctrl, hsinit, scale_size;
+	u32 xres = THIS_STREAM->info.var.xres;
+	u32 geomw = THIS_STREAM->geom.w;
+	u32 hpitch;
+	int en = (geomw && geomw != xres);
+
+#if PDP_REV >= 0x010001
+	/* we enable the scalar even with 1:1 to get fine control over HSKIP */
+	en = 1;
+#endif
+
+	scale_ctrl = pdpfb_read(priv, PDP_SCALECTRL);
+	SET_FIELD(scale_ctrl, PDP_SCALECTRL_HSCALEBP, !en);
+	pdpfb_write(priv, PDP_SCALECTRL, scale_ctrl);
+
+	if (!geomw)
+		geomw = xres;
+	scale_size = pdpfb_read(priv, PDP_SCALESIZE);
+	SET_FIELD(scale_size, PDP_SCALESIZE_SCALEDWIDTH, geomw - 1);
+	pdpfb_write(priv, PDP_SCALESIZE, scale_size);
+
+	if (!en)
+		return 0;
+
+	hpitch = (fix_div(12, xres, geomw) + 1) >> 1;
+	if (THIS_PRIV->hdecimation)
+		hpitch /= 2;
+	hsinit = pdpfb_read(priv, PDP_HSINIT);
+	SET_FIELD(hsinit, PDP_HSINIT_HINITIAL,
+			fix_div(11, THIS_PRIV->hs_taps, 2));
+	SET_FIELD(hsinit, PDP_HSINIT_HDECIM, THIS_PRIV->hdecimation);
+	SET_FIELD(hsinit, PDP_HSINIT_HPITCH, hpitch);
+	pdpfb_write(priv, PDP_HSINIT, hsinit);
+
+	for (i = 0; i < 8; ++i) {
+		u32 val =  (0xFF & THIS_PRIV->hscoeffs[i<<2])
+			| ((0xFF & THIS_PRIV->hscoeffs[(i<<2) + 1]) << 8)
+			| ((0xFF & THIS_PRIV->hscoeffs[(i<<2) + 2]) << 16)
+			| ((0xFF & THIS_PRIV->hscoeffs[(i<<2) + 3]) << 24);
+		pdpfb_write(priv, PDP_HSCOEFF0 + (i<<2), val);
+	}
+#if PDP_REV < 0x010001
+	/* the odd one out, index 32 goes in most significant byte */
+	pdpfb_write(priv, PDP_HSCOEFF8,
+			(0xFF & THIS_PRIV->hscoeffs[i<<2]) << 24);
+#else
+	pdpfb_write(priv, PDP_HSCOEFF8, 0xFF & THIS_PRIV->hscoeffs[i<<2]);
+#endif
+	return 0;
+}
+
+#ifndef PDP_VID_VSCALE
+static int pdpfb_vid_configure_vscale(struct pdpfb_priv *priv)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+	struct pdpfb_geom *geom = &THIS_STREAM->geom;
+
+	u32 blend2;
+	u32 line_double = 0;
+	u32 line_halve = 0;
+
+	if (geom->h) {
+		line_double = (geom->h > info->var.yres);
+		line_halve = (geom->h < info->var.yres);
+	}
+
+	blend2 = pdpfb_read(priv, THIS_STREAM->regs.blend2);
+	SET_FIELD(blend2, PDP_STRXBLEND2_LINEDOUBLE, line_double);
+	SET_FIELD(blend2, PDP_STRXBLEND2_LINEHALVE, line_halve);
+	pdpfb_write(priv, THIS_STREAM->regs.blend2, blend2);
+
+	return 0;
+}
+
+#else
+static int pdpfb_vid_configure_vscale(struct pdpfb_priv *priv)
+{
+	struct pdp_info *pdata = pdpfb_get_platform_data(priv);
+	u32 i;
+	u32 scale_ctrl, scale_size, vsinit;
+	u32 yres = THIS_STREAM->info.var.yres;
+	u32 geomh = THIS_STREAM->geom.h;
+	u32 vpitch = fix_div(11, yres, geomh);
+	int en = (geomh && geomh != yres);
+	int hs_before_vs = (THIS_STREAM->info.var.yres > pdata->linestore_len);
+
+	scale_ctrl = pdpfb_read(priv, PDP_SCALECTRL);
+	SET_FIELD(scale_ctrl, PDP_SCALECTRL_VSCALEBP, !en);
+	SET_FIELD(scale_ctrl, PDP_SCALECTRL_HSBEFOREVS, hs_before_vs);
+	SET_FIELD(scale_ctrl, PDP_SCALECTRL_VSURUNCTRL, 1);
+	SET_FIELD(scale_ctrl, PDP_SCALECTRL_VORDER, THIS_PRIV->vs_taps-1);
+	SET_FIELD(scale_ctrl, PDP_SCALECTRL_VPITCH, vpitch);
+	pdpfb_write(priv, PDP_SCALECTRL, scale_ctrl);
+
+	if (!geomh)
+		geomh = yres;
+	scale_size = pdpfb_read(priv, PDP_SCALESIZE);
+	SET_FIELD(scale_size, PDP_SCALESIZE_SCALEDHEIGHT, geomh - 1);
+	pdpfb_write(priv, PDP_SCALESIZE, scale_size);
+
+	if (!en)
+		return 0;
+
+	vsinit = pdpfb_read(priv, PDP_VSINIT);
+	SET_FIELD(vsinit, PDP_VSINIT_INITIAL1,
+			fix_div(11, THIS_PRIV->vs_taps, 2));
+	pdpfb_write(priv, PDP_VSINIT, vsinit);
+
+	for (i = 0; i < 4; ++i) {
+		u32 val =  (0xFF & THIS_PRIV->vscoeffs[i<<2])
+			| ((0xFF & THIS_PRIV->vscoeffs[(i<<2) + 1]) << 8)
+			| ((0xFF & THIS_PRIV->vscoeffs[(i<<2) + 2]) << 16)
+			| ((0xFF & THIS_PRIV->vscoeffs[(i<<2) + 3]) << 24);
+		pdpfb_write(priv, PDP_VSCOEFF0 + (i<<2), val);
+	}
+	pdpfb_write(priv, PDP_VSCOEFF4, 0xFF & THIS_PRIV->vscoeffs[i<<2]);
+	return 0;
+}
+#endif
+
+#ifdef PDP_SHARED_BASE
+static void pdpfb_vid_apply_addr(void *arg, u32 mask)
+{
+	struct pdpfb_priv *priv = (struct pdpfb_priv *)arg;
+	u32 tmp;
+
+	spin_lock(&THIS_PRIV->base_addr_lock);
+
+	tmp = pdpfb_read(priv, PDP_STR2CTRL);
+	SET_FIELD(tmp, PDP_STRXCTRL_BASEADDR,
+				THIS_PRIV->base_addr_y >> PDP_YADDR_ALIGN);
+	pdpfb_write(priv, PDP_STR2CTRL, tmp);
+
+	tmp = pdpfb_read(priv, PDP_STR2UADDR);
+	SET_FIELD(tmp, PDP_STR2UADDR_UBASEADDR,
+				THIS_PRIV->base_addr_u >> PDP_UADDR_ALIGN);
+	pdpfb_write(priv, PDP_STR2UADDR, tmp);
+
+	tmp = pdpfb_read(priv, PDP_STR2VADDR);
+	SET_FIELD(tmp, PDP_STR2VADDR_VBASEADDR,
+				THIS_PRIV->base_addr_v >> PDP_VADDR_ALIGN);
+	pdpfb_write(priv, PDP_STR2VADDR, tmp);
+
+	pdpfb_unregister_isr(pdpfb_vid_apply_addr, priv,
+			     PDPFB_IRQ_VEVENT0);
+
+	tmp = pdpfb_read(priv, PDP_SKIPCTRL);
+	SET_FIELD(tmp, PDP_SKIPCTRL_HSKIP, THIS_PRIV->hskip);
+#if PDP_REV >= 0x010001
+	SET_FIELD(tmp, PDP_SKIPCTRL_VSKIP, THIS_PRIV->vskip);
+#endif
+	pdpfb_write(priv, PDP_SKIPCTRL, tmp);
+
+	spin_unlock(&THIS_PRIV->base_addr_lock);
+}
+#endif
+
+static int pdpfb_vid_configure_addr(struct pdpfb_priv *priv)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+	u32 ybuf_offset, ybuf_start;
+	u32 ubuf_offset, ubuf_start;
+	u32 vbuf_offset, vbuf_start;
+	u32 hskip, xoff, vskip, yoff;
+	u32 str_ctrl;
+	u32 uv_half_stride = THIS_PRIV->planar.u_line_length
+				< THIS_PRIV->planar.y_line_length;
+#ifdef PDP_SHARED_BASE
+	u32 u_addr;
+	unsigned long flags;
+#else
+	u32 skip_ctrl;
+#endif
+
+	str_ctrl = pdpfb_read(priv, PDP_STR2CTRL);
+#ifndef PDP_SHARED_BASE
+	SET_FIELD(str_ctrl, PDP_STR2CTRL_UVHALFSTR, uv_half_stride);
+#endif
+	pdpfb_write(priv, PDP_STR2CTRL, str_ctrl);
+
+	switch (info->var.nonstd) {
+	case PDP_VID_PIXFMT_420_PL8:
+		hskip = info->var.xoffset & 0x1f;
+		xoff = info->var.xoffset - hskip;
+		vskip = info->var.yoffset & 0x1;
+		yoff = info->var.yoffset - vskip;
+
+		ybuf_offset = xoff;
+		ubuf_offset = xoff >> 1;
+		vbuf_offset = ubuf_offset;
+
+		ybuf_offset += THIS_PRIV->planar.y_line_length * yoff ;
+		ubuf_offset += THIS_PRIV->planar.u_line_length * (yoff >> 1);
+		vbuf_offset += THIS_PRIV->planar.v_line_length * (yoff >> 1);
+		break;
+	case PDP_VID_PIXFMT_420_PL8IVU:
+	case PDP_VID_PIXFMT_420_PL8IUV:
+		hskip = info->var.xoffset & 0xf;
+		xoff = info->var.xoffset - hskip;
+		vskip = info->var.yoffset & 0x1;
+		yoff = info->var.yoffset - vskip;
+
+		ybuf_offset = xoff + THIS_PRIV->planar.y_line_length * yoff;
+		ubuf_offset = xoff +
+				THIS_PRIV->planar.y_line_length * (yoff >> 1);
+		vbuf_offset = ubuf_offset;
+		break;
+	default:
+		xoff = info->var.xoffset * info->var.bits_per_pixel / 8;
+		hskip = (xoff & 0xf) * 8 / info->var.bits_per_pixel;
+		vskip = 0;
+		yoff = info->var.yoffset;
+
+		ybuf_offset = xoff + info->fix.line_length * yoff;
+		ubuf_offset = ybuf_offset;
+		vbuf_offset = ybuf_offset;
+		break;
+	}
+	ybuf_start = THIS_PRIV->planar.y_offset + ybuf_offset;
+	ubuf_start = THIS_PRIV->planar.u_offset + ubuf_offset;
+	vbuf_start = THIS_PRIV->planar.v_offset + vbuf_offset;
+
+#if PDP_REV < 0x010001
+	if (THIS_PRIV->hdecimation)
+		hskip &= ~0x3;
+	else
+		hskip &= ~0x1;
+#endif
+
+#ifndef PDP_SHARED_BASE
+	ybuf_start += info->fix.smem_start;
+	ubuf_start += info->fix.smem_start;
+	vbuf_start += info->fix.smem_start;
+	pdpfb_write(priv, PDP_STR2ADDR, ybuf_start >> PDP_YADDR_ALIGN);
+	pdpfb_write(priv, PDP_STR2UADDR, ubuf_start >> PDP_UADDR_ALIGN);
+	pdpfb_write(priv, PDP_STR2VADDR, vbuf_start >> PDP_VADDR_ALIGN);
+
+	skip_ctrl = pdpfb_read(priv, PDP_SKIPCTRL);
+	SET_FIELD(skip_ctrl, PDP_SKIPCTRL_HSKIP, hskip);
+#if PDP_REV >= 0x010001
+	SET_FIELD(skip_ctrl, PDP_SKIPCTRL_VSKIP, vskip);
+#endif
+	pdpfb_write(priv, PDP_SKIPCTRL, skip_ctrl);
+
+#else
+	ybuf_start += THIS_STREAM->videomem_offset;
+	ubuf_start += THIS_STREAM->videomem_offset;
+	vbuf_start += THIS_STREAM->videomem_offset;
+
+	spin_lock_irqsave(&THIS_PRIV->base_addr_lock, flags);
+
+	u_addr = pdpfb_read(priv, PDP_STR2UADDR);
+	SET_FIELD(u_addr, PDP_STR2UADDR_UVHALFSTR, uv_half_stride);
+	pdpfb_write(priv, PDP_STR2UADDR, u_addr);
+
+	THIS_PRIV->base_addr_y = ybuf_start;
+	THIS_PRIV->base_addr_u = ubuf_start;
+	THIS_PRIV->base_addr_v = vbuf_start;
+	THIS_PRIV->hskip = hskip;
+	THIS_PRIV->vskip = vskip;
+	pdpfb_register_isr(pdpfb_vid_apply_addr, priv,
+			   PDPFB_IRQ_VEVENT0);
+
+	spin_unlock_irqrestore(&THIS_PRIV->base_addr_lock, flags);
+#endif
+
+	return 0;
+}
+
+static int pdpfb_vid_configure(struct pdpfb_priv *priv)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+	u32 str2surf;
+
+	str2surf = pdpfb_read(priv, PDP_STR2SURF);
+	SET_FIELD(str2surf, PDP_STRXSURF_WIDTH, info->var.xres - 1);
+	SET_FIELD(str2surf, PDP_STRXSURF_HEIGHT, info->var.yres - 1);
+	SET_FIELD(str2surf, PDP_STR2SURF_PIXFMT, THIS_PRIV->pixfmt);
+	SET_FIELD(str2surf, PDP_STR2SURF_USECSC, THIS_PRIV->csc.enable);
+	SET_FIELD(str2surf, PDP_STR2SURF_COSITED, THIS_PRIV->csc.cosited);
+	pdpfb_write(priv, PDP_STR2SURF, str2surf);
+
+	pdpfb_vid_change_mode(priv);
+
+	pdpfb_vid_configure_hscale(priv);
+	pdpfb_vid_configure_vscale(priv);
+
+	return 0;
+}
+
+static int pdpfb_vid_ioctl(struct fb_info *info, unsigned int cmd,
+				unsigned long arg)
+{
+	struct pdpfb_priv *priv = dev_get_drvdata(info->device);
+	void __user *argp = (void __user *)arg;
+	struct pdpfb_vid_csc csc;
+	struct pdpfb_vid_planar pl;
+	int err;
+
+	switch (cmd) {
+	/* get colour space conversion settings */
+	case PDPIO_GETCSC:
+		if (copy_to_user(argp, &THIS_PRIV->csc,
+				sizeof(THIS_PRIV->csc)))
+			return -EFAULT;
+		return 0;
+	/* set colour space conversion settings */
+	case PDPIO_SETCSC:
+		if (copy_from_user(&csc, argp, sizeof(csc)))
+			return -EFAULT;
+		csc.enable = (csc.enable != 0);
+		csc.cosited = (csc.cosited != 0);
+		if (csc.preset) {
+			if (csc.preset < ARRAY_SIZE(pdpfb_vid_csc_presets))
+				csc.coefs = pdpfb_vid_csc_presets[csc.preset];
+			else
+				return -EINVAL;
+		} else if (pdpfb_vid_check_csc(&csc.coefs))
+			return -EINVAL;
+		THIS_PRIV->csc = csc;
+		pdpfb_vid_configure_csc(priv);
+		if (copy_to_user(argp, &THIS_PRIV->csc,
+				sizeof(THIS_PRIV->csc)))
+			return -EFAULT;
+		return 0;
+	/* get planar YUV information */
+	case PDPIO_GETPLANAR:
+		if (copy_to_user(argp, &THIS_PRIV->planar,
+				sizeof(THIS_PRIV->planar)))
+			return -EFAULT;
+		return 0;
+	/* set planar YUV information */
+	case PDPIO_SETPLANAR:
+		if (copy_from_user(&pl, argp, sizeof(pl)))
+			return -EFAULT;
+		/* pixel format must be planar */
+		if (unlikely(info->fix.type != FB_TYPE_PLANES))
+			return -EINVAL;
+
+		err = pdpfb_vid_check_planar(info, &pl);
+		if (copy_to_user(argp, &pl, sizeof(pl)))
+			return -EFAULT;
+		if (err < 0)
+			return err;
+
+		/* override the planar setup */
+		THIS_PRIV->planar = pl;
+		THIS_PRIV->planar_override = 1;
+		THIS_PRIV->planar_ov_virtres[0] = info->var.xres_virtual;
+		THIS_PRIV->planar_ov_virtres[1] = info->var.yres_virtual;
+		THIS_PRIV->planar_ov_pixfmt = info->var.nonstd;
+
+		/* start using the new setup */
+		pdpfb_vid_update_planar(priv, info);
+		pdpfb_vid_change_mode(priv);
+		pdpfb_vid_configure_addr(priv);
+		return 0;
+	default:
+		return pdpfb_str_ioctl(priv, THIS_STREAM, cmd, arg);
+	}
+	return 0;
+}
+
+static int pdpfb_vid_pan_display(struct fb_var_screeninfo *var,
+				struct fb_info *info)
+{
+	struct pdpfb_priv *priv;
+
+	if (var->xoffset + info->var.xres > info->var.xres_virtual ||
+	    var->yoffset + info->var.yres > info->var.yres_virtual)
+		return -EINVAL;
+
+	info->var.xoffset = var->xoffset;
+	info->var.yoffset = var->yoffset;
+
+	priv = dev_get_drvdata(info->device);
+	pdpfb_vid_configure_addr(priv);
+
+	return 0;
+}
+
+static int pdpfb_vid_set_par(struct fb_info *info)
+{
+	struct pdpfb_priv *priv = dev_get_drvdata(info->device);
+
+	/* resolution may have changed, so scaling may need alteration */
+	pdpfb_vid_check_geom(priv, &THIS_STREAM->geom);
+	pdpfb_vid_set_geom(priv);
+
+	/* pixel format maps from nonstd, so may need updating */
+	THIS_PRIV->pixfmt = pdpfb_vid_nonstd_to_pixfmt(info->var.nonstd);
+
+	pdpfb_vid_update_planar(priv, info);
+	pdpfb_vid_configure(priv);
+	pdpfb_vid_configure_addr(priv);
+	pdpfb_update_margins(priv, THIS_STREAM);
+
+	return 0;
+}
+
+static struct fb_fix_screeninfo pdpfb_vid_fix  = {
+	.id =		"pdp_vid",
+	.type =		FB_TYPE_PACKED_PIXELS,
+	.visual =	FB_VISUAL_TRUECOLOR,
+#if PDP_REV < 0x010001
+	.xpanstep =	2,
+	.ypanstep =	2,
+#else
+	.xpanstep =	1,
+	.ypanstep =	1,
+#endif
+	.accel =	FB_ACCEL_IMG_PDP_1,
+};
+
+static struct fb_ops pdpfb_vid_ops = {
+	.fb_setcolreg	= pdpfb_setcolreg,
+	.fb_blank	= pdpfb_blank,
+	.fb_pan_display	= pdpfb_vid_pan_display,
+	.fb_check_var	= pdpfb_vid_check_var,
+	.fb_set_par	= pdpfb_vid_set_par,
+	.fb_ioctl	= pdpfb_vid_ioctl,
+	.fb_fillrect	= cfb_fillrect,
+	.fb_copyarea	= cfb_copyarea,
+	.fb_imageblit	= cfb_imageblit,
+};
+
+static void pdpfb_vid_probe_caps(struct pdpfb_priv *priv)
+{
+	int *m = THIS_PRIV->nonstd2pixfmt;
+
+#ifdef PDP_GAMMA
+	THIS_STREAM->caps.gamma = PDP_GAMMA;
+#endif
+
+	/* pixfmt values vary between revisions */
+	m[0] = -1;
+	m[PDP_VID_PIXFMT_420_PL8] = PDP_STR2SURF_PIXFMT_420_PL8;
+	m[PDP_VID_PIXFMT_422_UY0VY1_8888] = PDP_STR2SURF_PIXFMT_422_UY0VY1_8888;
+	m[PDP_VID_PIXFMT_422_VY0UY1_8888] = PDP_STR2SURF_PIXFMT_422_VY0UY1_8888;
+	m[PDP_VID_PIXFMT_422_Y0UY1V_8888] = PDP_STR2SURF_PIXFMT_422_Y0UY1V_8888;
+	m[PDP_VID_PIXFMT_422_Y0VY1U_8888] = PDP_STR2SURF_PIXFMT_422_Y0VY1U_8888;
+#if PDP_REV < 0x010001
+	m[PDP_VID_PIXFMT_420_PL8IVU] = -1;
+	m[PDP_VID_PIXFMT_420_PL8IUV] = -1;
+	m[PDP_VID_PIXFMT_420_T88CP] = PDP_STR2SURF_PIXFMT_420_T88CP;
+	m[PDP_VID_PIXFMT_422_T88CP] = PDP_STR2SURF_PIXFMT_422_T88CP;
+#else
+	m[PDP_VID_PIXFMT_420_PL8IVU] = PDP_STR2SURF_PIXFMT_420_PL8IVU;
+	m[PDP_VID_PIXFMT_420_PL8IUV] = PDP_STR2SURF_PIXFMT_420_PL8IUV;
+	m[PDP_VID_PIXFMT_420_T88CP] = -1;
+	m[PDP_VID_PIXFMT_422_T88CP] = -1;
+#endif
+}
+
+static int pdpfb_vid_probe(struct pdpfb_priv *priv,
+			struct platform_device *pdev,
+			const struct fb_videomode *mode)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+	struct pdp_info *pdata = pdpfb_get_platform_data(priv);
+	int error;
+
+	pdpfb_vid_probe_caps(priv);
+
+	info->device = &pdev->dev;
+	info->fbops = &pdpfb_vid_ops;
+	info->var.xres = info->var.xres_virtual = (mode->xres/2);
+	info->var.yres = info->var.yres_virtual = (mode->yres/2);
+	info->var.width = pdata->lcd_size_cfg.width;
+	info->var.height = pdata->lcd_size_cfg.height;
+	info->var.activate = FB_ACTIVATE_NOW;
+	info->var.nonstd = PDP_VID_PIXFMT_422_Y0VY1U_8888;
+	error = pdpfb_vid_set_bpp(&info->var);
+	if (error)
+		goto err0;
+	THIS_PRIV->pixfmt = pdpfb_vid_nonstd_to_pixfmt(info->var.nonstd);
+	pdpfb_vid_update_planar(priv, info);
+
+	info->var.hsync_len = mode->hsync_len;
+	info->var.left_margin = mode->left_margin;
+	info->var.right_margin = mode->right_margin;
+	info->var.vsync_len = mode->vsync_len;
+	info->var.upper_margin = mode->upper_margin;
+	info->var.lower_margin = mode->lower_margin;
+
+	info->fix = pdpfb_vid_fix;
+
+
+	error = pdpfb_str_videomem_alloc(priv, THIS_STREAM);
+	if (error)
+		goto err0;
+	if (pdpfb_vid_required_mem(&info->var) > THIS_STREAM->videomem_len) {
+		error = -ENOMEM;
+		goto err1;
+	}
+
+	info->pseudo_palette = pdpfb_get_pseudo_palette(priv);
+	info->flags = FBINFO_FLAG_DEFAULT
+			| FBINFO_HWACCEL_XPAN
+			| FBINFO_HWACCEL_YPAN;
+
+	THIS_PRIV->csc.coefs = pdpfb_vid_csc_presets[PDP_VID_CSCPRESET_SDTV];
+
+	error = register_framebuffer(info);
+	if (error < 0)
+		goto err1;
+
+	dev_info(&pdev->dev, "registered video framebuffer (len=0x%lx)\n",
+		 THIS_STREAM->videomem_len);
+
+	return 0;
+err1:
+	pdpfb_str_videomem_free(THIS_STREAM);
+err0:
+	return error;
+}
+
+static int pdpfb_vid_remove(struct pdpfb_priv *priv,
+			struct platform_device *pdev)
+{
+	struct fb_info *info = &THIS_STREAM->info;
+
+	unregister_framebuffer(info);
+
+	pdpfb_str_videomem_free(THIS_STREAM);
+	return 0;
+}
+
+static struct pdpfb_vid_stream_priv pdpfb_vid_stream = {
+	.stream = {
+		.mem_pool = PDPFB_MEMPOOL_VIDMEM,
+		.videomem_len = 0,
+		.enable = 0,
+		.ckey = {
+			.ckey = 0x000000,
+			.mask = 0xFFFFFF,
+		},
+		.global_alpha = 0xFF,
+		.ops = {
+			.probe = pdpfb_vid_probe,
+			.remove = pdpfb_vid_remove,
+			.check_geom = pdpfb_vid_check_geom,
+			.set_geom = pdpfb_vid_set_geom,
+			.configure = pdpfb_vid_configure,
+			.configure_addr = pdpfb_vid_configure_addr,
+		},
+		.regs = {
+			.surf = PDP_STR2SURF,
+			.blend = PDP_STR2BLEND,
+			.blend2 = PDP_STR2BLEND2,
+			.ctrl = PDP_STR2CTRL,
+			.posn = PDP_STR2POSN,
+#if PDP_REV >= 0x010001
+			.gamma = PDP_YUVGAMMA0,
+			.gamma_stride = PDP_YUVGAMMA_STRIDE,
+#endif
+		},
+	},
+	.csc = {
+		.enable = 1,
+		.preset = PDP_VID_CSCPRESET_SDTV,
+		.cosited = 1,
+	},
+#ifdef PDP_SHARED_BASE
+	.base_addr_lock = __SPIN_LOCK_UNLOCKED(pdpfb_vid_stream.base_addr_lock),
+#endif
+};
+
+struct pdpfb_stream *pdpfb_vid_get_stream(void)
+{
+	return THIS_STREAM;
+}
diff --git a/drivers/video/pdpfb_vid.h b/drivers/video/pdpfb_vid.h
new file mode 100644
index 0000000..d43274c
--- /dev/null
+++ b/drivers/video/pdpfb_vid.h
@@ -0,0 +1,22 @@
+/*
+ * PDP Scaled Video Framebuffer
+ *
+ * Copyright (c) 2008 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _PDPFB_VID_H
+#define _PDPFB_VID_H
+
+#ifdef CONFIG_FB_PDP_VID
+
+#include "pdpfb.h"
+
+struct pdpfb_stream *pdpfb_vid_get_stream(void);
+
+#endif
+
+#endif
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index e89fc31..7403ddd 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -972,6 +972,15 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called m54xx_wdt.
 
+# METAG Architecture
+
+config IMGPDC_WDT
+	tristate "ImgTec PDC Watchdog Timer"
+	depends on METAG
+	help
+	  Hardware driver for the ImgTec PowerDown Controller Watchdog Timer
+	  as found in Meta SoCs such as Comet.
+
 # MicroBlaze Architecture
 
 config XILINX_WATCHDOG
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index a300b94..33f5df0 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -116,6 +116,9 @@
 # M68K Architecture
 obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
 
+# METAG Architecture
+obj-$(CONFIG_IMGPDC_WDT) += imgpdc_wdt.o
+
 # MicroBlaze Architecture
 obj-$(CONFIG_XILINX_WATCHDOG) += of_xilinx_wdt.o
 
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
new file mode 100644
index 0000000..2674684
--- /dev/null
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -0,0 +1,624 @@
+/*
+ * ImgTec PowerDown Controller Watchdog Timer as found in Meta SoCs.
+ *
+ * Copyright 2010-2012 Imagination Technologies Ltd.
+ *
+ * Parts derived from mpcore_wdt.
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+/* registers */
+
+#define PDC_WD_SW_RESET			0x000
+#define PDC_WD_CONFIG			0x004
+#define PDC_WD_TICKLE1			0x008
+#define PDC_WD_TICKLE2			0x00c
+#define PDC_WD_IRQ_STATUS		0x010
+#define PDC_WD_IRQ_CLEAR		0x014
+#define PDC_WD_IRQ_EN			0x018
+
+/* field masks */
+
+#define PDC_WD_CONFIG_ENABLE		0x80000000
+#define PDC_WD_CONFIG_REMIND		0x00001f00
+#define PDC_WD_CONFIG_REMIND_SHIFT	8
+#define PDC_WD_CONFIG_DELAY		0x0000001f
+#define PDC_WD_CONFIG_DELAY_SHIFT	0
+#define PDC_WD_TICKLE_NEW		0x00000010
+#define PDC_WD_TICKLE_STATUS		0x00000007
+#define PDC_WD_TICKLE_STATUS_SHIFT	0
+#define PDC_WD_IRQ_REMIND		0x00000001
+
+/* constants */
+
+#define PDC_WD_TICKLE1_MAGIC		0xabcd1234
+#define PDC_WD_TICKLE2_MAGIC		0x4321dcba
+#define PDC_WD_TICKLE_STATUS_HRESET	0x0	/* Hard reset */
+#define PDC_WD_TICKLE_STATUS_TIMEOUT	0x1	/* Timeout */
+#define PDC_WD_TICKLE_STATUS_TICKLE	0x2	/* Tickled incorrectly */
+#define PDC_WD_TICKLE_STATUS_SRESET	0x3	/* Soft reset */
+#define PDC_WD_TICKLE_STATUS_USER	0x4	/* User reset */
+
+#define TIMER_MIN	(-14)
+#define TIMER_MAX	17
+
+#define TIMER_DELAY_SHIFT		6	/* 64 seconds */
+static int delay_shift = TIMER_DELAY_SHIFT;
+module_param(delay_shift, int, 0);
+MODULE_PARM_DESC(delay_shift,
+	"Log2 of PDC watchdog timer delay in seconds ("
+	"-14 <= delay_shift <= 17, "
+	"default=" __MODULE_STRING(TIMER_DELAY_SHIFT) ")");
+
+#define TIMER_REMIND_SHIFT		TIMER_MAX	/* disabled */
+static int remind_shift = TIMER_REMIND_SHIFT;
+module_param(remind_shift, int, 0);
+MODULE_PARM_DESC(remind_shift,
+	"Log2 of PDC watchdog timer remind in seconds ("
+	"-14 <= remind_shift <= 17, "
+	"default=" __MODULE_STRING(TIMER_REMIND_SHIFT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout,
+	"Watchdog cannot be stopped once started (default="
+				__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct pdc_wdt_priv {
+	struct device *dev;
+	int irq;
+	void __iomem *reg_base;
+	unsigned long timer_alive;
+	char expect_close;
+	char card_reset;	/* whether last reset by the WD */
+	spinlock_t lock;
+};
+
+static struct platform_device *pdc_wdt_dev;
+
+/* Hardware access */
+
+static void pdc_wdt_write(struct pdc_wdt_priv *wdt,
+			  unsigned int reg_offs, unsigned int data)
+{
+	iowrite32(data, wdt->reg_base + reg_offs);
+}
+
+static unsigned int pdc_wdt_read(struct pdc_wdt_priv *wdt,
+				 unsigned int reg_offs)
+{
+	return ioread32(wdt->reg_base + reg_offs);
+}
+
+static void pdc_wdt_keepalive(struct pdc_wdt_priv *wdt)
+{
+	spin_lock(&wdt->lock);
+	pdc_wdt_write(wdt, PDC_WD_TICKLE1, PDC_WD_TICKLE1_MAGIC);
+	pdc_wdt_write(wdt, PDC_WD_TICKLE2, PDC_WD_TICKLE2_MAGIC);
+	spin_unlock(&wdt->lock);
+}
+
+/* Start the watchdog timer (delay should already be set */
+static void pdc_wdt_start(struct pdc_wdt_priv *wdt)
+{
+	u32 config;
+
+	spin_lock(&wdt->lock);
+	config = pdc_wdt_read(wdt, PDC_WD_CONFIG);
+	config |= PDC_WD_CONFIG_ENABLE;
+	pdc_wdt_write(wdt, PDC_WD_CONFIG, config);
+	spin_unlock(&wdt->lock);
+}
+
+/* Safely stop the watchdog timer */
+static void pdc_wdt_stop(struct pdc_wdt_priv *wdt)
+{
+	u32 config;
+
+	spin_lock(&wdt->lock);
+	config = pdc_wdt_read(wdt, PDC_WD_CONFIG);
+	config &= ~PDC_WD_CONFIG_ENABLE;
+	pdc_wdt_write(wdt, PDC_WD_CONFIG, config);
+	spin_unlock(&wdt->lock);
+
+	/* Must tickle to finish the stop */
+	pdc_wdt_keepalive(wdt);
+}
+
+/* Find whether the watchdog hardware is enabled */
+static int pdc_wdt_get_started(struct pdc_wdt_priv *wdt)
+{
+	return !!(pdc_wdt_read(wdt, PDC_WD_CONFIG) & PDC_WD_CONFIG_ENABLE);
+}
+
+static void pdc_wdt_set_delay_shift(struct pdc_wdt_priv *wdt, int delay_sh)
+{
+	u32 config;
+
+	delay_shift = delay_sh;
+	spin_lock(&wdt->lock);
+	config = pdc_wdt_read(wdt, PDC_WD_CONFIG);
+	/* number of 32.768KHz clocks, 2^(n+1) (14 is 1 sec) */
+	config &= ~PDC_WD_CONFIG_DELAY;
+	config |= (delay_shift-TIMER_MIN) << PDC_WD_CONFIG_DELAY_SHIFT;
+	pdc_wdt_write(wdt, PDC_WD_CONFIG, config);
+	spin_unlock(&wdt->lock);
+}
+
+static void pdc_wdt_set_remind_shift(struct pdc_wdt_priv *wdt, int remind_sh)
+{
+	u32 config;
+
+	remind_shift = remind_sh;
+	spin_lock(&wdt->lock);
+	config = pdc_wdt_read(wdt, PDC_WD_CONFIG);
+	/* number of 32.768KHz clocks, 2^(n+1) (14 is 1 sec) */
+	config &= ~PDC_WD_CONFIG_REMIND;
+	config |= (remind_shift-TIMER_MIN) << PDC_WD_CONFIG_REMIND_SHIFT;
+	pdc_wdt_write(wdt, PDC_WD_CONFIG, config);
+	spin_unlock(&wdt->lock);
+}
+
+static const char *pdc_wdt_tickle_status_str(u32 status)
+{
+	switch (status) {
+	case PDC_WD_TICKLE_STATUS_HRESET:	return "hard reset";
+	case PDC_WD_TICKLE_STATUS_TIMEOUT:	return "timeout";
+	case PDC_WD_TICKLE_STATUS_TICKLE:	return "incorrect tickle";
+	case PDC_WD_TICKLE_STATUS_SRESET:	return "soft reset";
+	case PDC_WD_TICKLE_STATUS_USER:		return "user reset";
+	default:				return "unknown";
+	};
+}
+
+/* Initialise the watchdog hardware */
+static void pdc_wdt_setup(struct pdc_wdt_priv *wdt)
+{
+	u32 status;
+
+	/* Enable remind interrupts */
+	pdc_wdt_write(wdt, PDC_WD_IRQ_EN, PDC_WD_IRQ_REMIND);
+	/* Ensure the watchdog is stopped */
+	pdc_wdt_stop(wdt);
+
+	/* Set the timeouts */
+	if (delay_shift < TIMER_MIN)
+		delay_shift = TIMER_MIN;
+	if (delay_shift > TIMER_MAX)
+		delay_shift = TIMER_MAX;
+	pdc_wdt_set_delay_shift(wdt, delay_shift);
+
+	if (remind_shift < TIMER_MIN)
+		remind_shift = TIMER_MIN;
+	if (remind_shift > TIMER_MAX)
+		remind_shift = TIMER_MAX;
+	pdc_wdt_set_remind_shift(wdt, remind_shift);
+
+	/* Find what caused the last reset */
+	status = pdc_wdt_read(wdt, PDC_WD_TICKLE1);
+	status = (status & PDC_WD_TICKLE_STATUS) >> PDC_WD_TICKLE_STATUS_SHIFT;
+	dev_info(wdt->dev,
+		 "Watchdog module loaded (last reset due to %s)\n",
+		 pdc_wdt_tickle_status_str(status));
+
+	/* Was it the watchdog? (userland may want to know) */
+	switch (status) {
+	case PDC_WD_TICKLE_STATUS_TICKLE:
+	case PDC_WD_TICKLE_STATUS_TIMEOUT:
+		wdt->card_reset = 1;
+		break;
+	default:
+		wdt->card_reset = 0;
+	}
+}
+
+/* round up to power of 2 */
+static inline int pdc_wdt_delay_to_shift(int secs)
+{
+	return order_base_2(secs);
+}
+
+/* round down to power of 2 */
+static inline int pdc_wdt_remind_to_shift(int secs)
+{
+	return ilog2(secs);
+}
+
+static inline int pdc_wdt_shift_to_secs(int shift)
+{
+	if (shift >= 0)
+		return 1 << shift;
+	else
+		return 1;
+}
+
+static irqreturn_t pdc_wdt_isr(int irq, void *dev_id)
+{
+	struct pdc_wdt_priv *wdt = dev_id;
+	u32 stat = pdc_wdt_read(wdt, PDC_WD_IRQ_STATUS);
+
+	/*
+	 * The behaviour of the remind interrupt should depend on what userland
+	 * asks for, either do nothing, panic the system or inform userland.
+	 * Unfortunately this is not part of the main linux interface, and is
+	 * currently implemented only in the ipmi watchdog driver (in
+	 * drivers/char). This could be added at a later time.
+	 */
+
+	pdc_wdt_write(wdt, PDC_WD_IRQ_CLEAR, stat);
+	return IRQ_HANDLED;
+}
+
+/* /dev/watchdog handling */
+
+static int pdc_wdt_open(struct inode *inode, struct file *file)
+{
+	struct pdc_wdt_priv *wdt = platform_get_drvdata(pdc_wdt_dev);
+
+	/* one at a time */
+	if (test_and_set_bit(0, &wdt->timer_alive))
+		return -EBUSY;
+	/* don't unload, there's no way out */
+	if (nowayout)
+		__module_get(THIS_MODULE);
+
+	file->private_data = wdt;
+	pdc_wdt_start(wdt);
+
+	return nonseekable_open(inode, file);
+}
+
+static int pdc_wdt_release(struct inode *inode, struct file *file)
+{
+	struct pdc_wdt_priv *wdt = file->private_data;
+
+	/*
+	 * Shut off the timer.
+	 * Lock it in if it's a module and we set nowayout
+	 */
+	if (wdt->expect_close == 42)
+		pdc_wdt_stop(wdt);
+	else if (pdc_wdt_get_started(wdt)) {
+		dev_crit(wdt->dev,
+			 "unexpected close, not stopping watchdog!\n");
+		pdc_wdt_keepalive(wdt);
+	}
+	clear_bit(0, &wdt->timer_alive);
+	wdt->expect_close = 0;
+	return 0;
+}
+
+static ssize_t pdc_wdt_fwrite(struct file *file, const char __user *data,
+			      size_t len, loff_t *ppos)
+{
+	struct pdc_wdt_priv *wdt = file->private_data;
+
+	/* Refresh the timer. */
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			/* In case it was set long ago */
+			wdt->expect_close = 0;
+
+			for (i = 0; i != len; i++) {
+				char c;
+
+				if (get_user(c, data + i))
+					return -EFAULT;
+				if (c == 'V')
+					wdt->expect_close = 42;
+			}
+		}
+		pdc_wdt_keepalive(wdt);
+	}
+	return len;
+}
+
+static struct watchdog_info pdc_wdt_ident = {
+	.options		= WDIOF_SETTIMEOUT |
+				  WDIOF_PRETIMEOUT |
+				  WDIOF_CARDRESET |
+				  WDIOF_MAGICCLOSE,
+	.identity = "PDC Watchdog",
+};
+
+static long pdc_wdt_ioctl(struct file *file, unsigned int cmd,
+			  unsigned long arg)
+{
+	struct pdc_wdt_priv *wdt = file->private_data;
+	int ret;
+	int delay;
+	union {
+		struct watchdog_info ident;
+		int i;
+	} uarg;
+
+	if (_IOC_DIR(cmd) && _IOC_SIZE(cmd) > sizeof(uarg))
+		return -ENOTTY;
+
+	if (_IOC_DIR(cmd) & _IOC_WRITE) {
+		ret = copy_from_user(&uarg, (void __user *)arg, _IOC_SIZE(cmd));
+		if (ret)
+			return -EFAULT;
+	}
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		uarg.ident = pdc_wdt_ident;
+		ret = 0;
+		break;
+
+	case WDIOC_GETSTATUS:
+		uarg.i = 0;
+		ret = 0;
+		break;
+
+	case WDIOC_GETBOOTSTATUS:
+		uarg.i = 0;
+		if (wdt->card_reset)
+			uarg.i |= WDIOF_CARDRESET;
+		ret = 0;
+		break;
+
+	case WDIOC_SETOPTIONS:
+		/*
+		 * Work around bad definition of WDIOC_SETOPTIONS until it's
+		 * fixed. WDIOC_SETOPTIONS is a writing ioctl.
+		 */
+		if (!(_IOC_DIR(cmd) & _IOC_WRITE)) {
+			ret = copy_from_user(&uarg, (void __user *)arg,
+					     _IOC_SIZE(cmd));
+			if (ret)
+				return -EFAULT;
+		}
+		ret = -EINVAL;
+		if (uarg.i & WDIOS_DISABLECARD) {
+			pdc_wdt_stop(wdt);
+			ret = 0;
+		}
+		if (uarg.i & WDIOS_ENABLECARD) {
+			pdc_wdt_start(wdt);
+			ret = 0;
+		}
+		break;
+
+	case WDIOC_KEEPALIVE:
+		pdc_wdt_keepalive(wdt);
+		ret = 0;
+		break;
+
+	case WDIOC_SETTIMEOUT:
+		if (uarg.i <= 0 || uarg.i > pdc_wdt_shift_to_secs(TIMER_MAX))
+			return -EINVAL;
+		uarg.i = pdc_wdt_delay_to_shift(uarg.i);
+		pdc_wdt_set_delay_shift(wdt, uarg.i);
+		/* fallthrough */
+	case WDIOC_GETTIMEOUT:
+		uarg.i = pdc_wdt_shift_to_secs(delay_shift);
+		ret = 0;
+		break;
+
+	case WDIOC_SETPRETIMEOUT:
+		/*
+		 * Pretimeout is measured in seconds before main timeout.
+		 * Subtract and round it once, and it will effectively change
+		 * if the main timeout is changed.
+		 */
+		delay = pdc_wdt_shift_to_secs(delay_shift);
+		if (!uarg.i)
+			uarg.i = TIMER_MAX;
+		else if (uarg.i > 0 && uarg.i < delay)
+			uarg.i = pdc_wdt_remind_to_shift(delay - uarg.i);
+		else
+			return -EINVAL;
+		pdc_wdt_set_remind_shift(wdt, uarg.i);
+		/* fallthrough */
+	case WDIOC_GETPRETIMEOUT:
+		if (remind_shift >= TIMER_MAX)
+			uarg.i = 0;
+		else
+			uarg.i = pdc_wdt_shift_to_secs(delay_shift) -
+				pdc_wdt_shift_to_secs(remind_shift);
+		ret = 0;
+		break;
+
+	default:
+		return -ENOTTY;
+	}
+
+	if (ret == 0 && (_IOC_DIR(cmd) & _IOC_READ) && cmd != WDIOC_SETOPTIONS) {
+		ret = copy_to_user((void __user *)arg, &uarg, _IOC_SIZE(cmd));
+		if (ret)
+			ret = -EFAULT;
+	}
+	return ret;
+}
+
+/* Kernel interface */
+
+static const struct file_operations pdc_wdt_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.write		= pdc_wdt_fwrite,
+	.unlocked_ioctl	= pdc_wdt_ioctl,
+	.open		= pdc_wdt_open,
+	.release	= pdc_wdt_release,
+};
+
+static struct miscdevice pdc_wdt_miscdev = {
+	.minor		= WATCHDOG_MINOR,
+	.name		= "watchdog",
+	.fops		= &pdc_wdt_fops,
+};
+
+static int pdc_wdt_probe(struct platform_device *pdev)
+{
+	struct pdc_wdt_priv *wdt;
+	struct resource *res_regs;
+	int irq, error;
+
+	/* Get resources from platform device */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "cannot find IRQ resource\n");
+		error = irq;
+		goto err_pdata;
+	}
+
+	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res_regs == NULL) {
+		dev_err(&pdev->dev, "cannot find registers resource\n");
+		error = -ENOENT;
+		goto err_pdata;
+	}
+
+	/* Private driver data */
+	wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+	if (!wdt) {
+		dev_err(&pdev->dev, "cannot allocate device data\n");
+		error = -ENOMEM;
+		goto err_dev;
+	}
+	platform_set_drvdata(pdev, wdt);
+	pdc_wdt_dev = pdev;
+	wdt->dev = &pdev->dev;
+	spin_lock_init(&wdt->lock);
+
+	/* Ioremap the registers */
+	wdt->reg_base = devm_ioremap(&pdev->dev, res_regs->start,
+				     res_regs->end - res_regs->start);
+	if (!wdt->reg_base) {
+		error = -EIO;
+		goto err_regs;
+	}
+
+	/* Set timeouts before userland has a chance to start the timer */
+	pdc_wdt_setup(wdt);
+
+	device_set_wakeup_capable(&pdev->dev, 1);
+	pdc_wdt_miscdev.parent = &pdev->dev;
+	error = misc_register(&pdc_wdt_miscdev);
+	if (error) {
+		dev_err(&pdev->dev,
+			"cannot register miscdev on minor=%d (err=%d)\n",
+			WATCHDOG_MINOR, error);
+		goto err_misc;
+	}
+
+	wdt->irq = irq;
+	error = devm_request_irq(&pdev->dev, wdt->irq, pdc_wdt_isr, 0,
+				 "pdc-wdt", wdt);
+	if (error) {
+		dev_err(&pdev->dev, "cannot register IRQ %u\n",
+			wdt->irq);
+		error = -EIO;
+		goto err_irq;
+	}
+
+	return 0;
+
+err_irq:
+	misc_deregister(&pdc_wdt_miscdev);
+err_misc:
+err_regs:
+	pdc_wdt_dev = NULL;
+err_dev:
+err_pdata:
+	return error;
+}
+
+static int pdc_wdt_remove(struct platform_device *pdev)
+{
+	pdc_wdt_dev = NULL;
+	misc_deregister(&pdc_wdt_miscdev);
+
+	return 0;
+}
+
+/*
+ * System shutdown handler.  Turn off the watchdog if we're
+ * restarting or halting the system.
+ */
+static void pdc_wdt_shutdown(struct platform_device *pdev)
+{
+	struct pdc_wdt_priv *wdt = platform_get_drvdata(pdev);
+
+	if (system_state == SYSTEM_RESTART || system_state == SYSTEM_HALT)
+		pdc_wdt_stop(wdt);
+}
+
+#ifdef CONFIG_PM
+static int pdc_wdt_remind_enabled(struct pdc_wdt_priv *wdt)
+{
+	return remind_shift != TIMER_MAX;
+}
+
+/*
+ * During suspend we don't want the watchdog to think we've crashed, so
+ * stop the watchdog until resume.
+ */
+static int pdc_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct pdc_wdt_priv *wdt = platform_get_drvdata(pdev);
+
+	/* Only wake if the remind is enabled. */
+	if (device_may_wakeup(&pdev->dev) && pdc_wdt_remind_enabled(wdt))
+		enable_irq_wake(wdt->irq);
+	else if (wdt->timer_alive)
+		pdc_wdt_stop(wdt);
+	return 0;
+}
+
+static int pdc_wdt_resume(struct platform_device *pdev)
+{
+	struct pdc_wdt_priv *wdt = platform_get_drvdata(pdev);
+
+	if (device_may_wakeup(&pdev->dev) && pdc_wdt_remind_enabled(wdt))
+		disable_irq_wake(wdt->irq);
+	else if (wdt->timer_alive)
+		pdc_wdt_start(wdt);
+	return 0;
+}
+#else
+#define pdc_wdt_suspend NULL
+#define pdc_wdt_resume NULL
+#endif	/* CONFIG_PM */
+
+static const struct of_device_id pdc_wdt_match[] = {
+	{ .compatible = "img,pdc-wdt" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, pdc_wdt_match);
+
+static struct platform_driver pdc_wdt_driver = {
+	.driver = {
+		.name = "imgpdc-wdt",
+		.owner	= THIS_MODULE,
+		.of_match_table	= pdc_wdt_match,
+	},
+	.probe = pdc_wdt_probe,
+	.remove = pdc_wdt_remove,
+	.shutdown = pdc_wdt_shutdown,
+	.suspend = pdc_wdt_suspend,
+	.resume = pdc_wdt_resume,
+	/* pdc_wdt has shutdown handler too */
+};
+
+module_platform_driver(pdc_wdt_driver);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("ImgTec PDC WDT");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/fs/Kconfig b/fs/Kconfig
index c229f82..839ef10 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -212,6 +212,7 @@
 source "fs/exofs/Kconfig"
 source "fs/f2fs/Kconfig"
 source "fs/efivarfs/Kconfig"
+source "fs/imgdafs/Kconfig"
 
 endif # MISC_FILESYSTEMS
 
diff --git a/fs/Makefile b/fs/Makefile
index 4fe6df3..25a93c7 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -126,3 +126,4 @@
 obj-$(CONFIG_CEPH_FS)		+= ceph/
 obj-$(CONFIG_PSTORE)		+= pstore/
 obj-$(CONFIG_EFIVAR_FS)		+= efivarfs/
+obj-$(CONFIG_IMGDAFS_FS)	+= imgdafs/
diff --git a/fs/imgdafs/Kconfig b/fs/imgdafs/Kconfig
new file mode 100644
index 0000000..844e6cf
--- /dev/null
+++ b/fs/imgdafs/Kconfig
@@ -0,0 +1,6 @@
+config IMGDAFS_FS
+	tristate "Meta DA filesystem support"
+	depends on METAG_DA
+	help
+	  This enables the DA filesystem, which allows Linux to
+	  to access files on a system attached via a debug adapter.
diff --git a/fs/imgdafs/Makefile b/fs/imgdafs/Makefile
new file mode 100644
index 0000000..169a3c6
--- /dev/null
+++ b/fs/imgdafs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for DAfs the Debug Adapter filesystem.
+#
+
+obj-$(CONFIG_IMGDAFS_FS) += imgdafs.o
+
+imgdafs-objs := inode.o
diff --git a/fs/imgdafs/imgdafs.h b/fs/imgdafs/imgdafs.h
new file mode 100644
index 0000000..0350220
--- /dev/null
+++ b/fs/imgdafs/imgdafs.h
@@ -0,0 +1,82 @@
+#ifndef _IMGDAFS_H_
+#define _IMGDAFS_H_
+
+#include <linux/types.h>
+
+#define DA_OP_OPEN 0
+#define DA_OP_CREAT 1
+#define DA_OP_READ 2
+#define DA_OP_WRITE 3
+#define DA_OP_CLOSE 4
+#define DA_OP_LINK 5
+#define DA_OP_LSEEK 6
+#define DA_OP_UNLINK 7
+#define DA_OP_ISATTY 8
+#define DA_OP_FCNTL 9
+#define DA_OP_STAT 10
+#define DA_OP_FSTAT 11
+#define DA_OP_GETCWD 12
+#define DA_OP_CHDIR 13
+#define DA_OP_MKDIR 14
+#define DA_OP_RMDIR 15
+#define DA_OP_FINDFIRST 16
+#define DA_OP_FINDNEXT 17
+#define DA_OP_FINDCLOSE 18
+#define DA_OP_CHMOD 19
+#define DA_OP_PREAD 20
+#define DA_OP_PWRITE 21
+
+#define OS_TYPE_FILE 1
+#define OS_TYPE_DIR 2
+#define OS_TYPE_SYMLINK 3
+#define OS_TYPE_CHARDEV 4
+#define OS_TYPE_BLOCKDEV 5
+#define OS_TYPE_FIFO 6
+#define OS_TYPE_SOCK 7
+
+#define DA_O_RDONLY 0
+#define DA_O_WRONLY 1
+#define DA_O_RDWR 2
+#define DA_O_APPEND 8
+#define DA_O_CREAT 0x0200
+#define DA_O_TRUNC 0x0400
+#define DA_O_EXCL 0x0800
+
+#define DA_O_AFFINITY_THREAD_0 0x10000
+#define DA_O_AFFINITY_THREAD_1 0x20000
+#define DA_O_AFFINITY_THREAD_2 0x40000
+#define DA_O_AFFINITY_THREAD_3 0x80000
+#define DA_O_AFFINITY_SHIFT 16
+
+#define DA_S_IWUSR	0200	/* 0x80 */
+#define DA_S_IRUSR	0400	/* 0x100 */
+
+struct da_stat {
+	s16 st_dev;
+	u16 st_ino;
+	u32 st_mode;
+	u16 st_nlink;
+	u16 st_uid;
+	u16 st_gid;
+	s16 st_rdev;
+	s32 st_size;
+	s32 st_atime;
+	s32 st_spare1;
+	s32 st_mtime;
+	s32 st_spare2;
+	s32 st_ctime;
+	s32 st_spare3;
+	s32 st_blksize;
+	s32 st_blocks;
+	s32 st_spare4[2];
+};
+
+#define _A_SUBDIR 0x10
+
+struct da_finddata {
+	u32 size;
+	u32 attrib;
+	u8 name[260];
+};
+
+#endif
diff --git a/fs/imgdafs/inode.c b/fs/imgdafs/inode.c
new file mode 100644
index 0000000..31f8e3e
--- /dev/null
+++ b/fs/imgdafs/inode.c
@@ -0,0 +1,842 @@
+/*
+ * Copyright (C) 2008-2013 Imagination Technologies Ltd.
+ * Licensed under the GPL
+ *
+ * Based on hostfs for UML.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/magic.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/mount.h>
+#include <linux/slab.h>
+
+#include <asm/da.h>
+#include <asm/hwthread.h>
+
+#include "imgdafs.h"
+
+static int fscall(int in_system_call, int in_arg1, int in_arg2, int in_arg3,
+		  int in_arg4, int in_arg5, int *fserrno)
+{
+	register int arg1            asm("D1Ar1") = in_arg1;
+	register int arg2            asm("D0Ar2") = in_arg2;
+	register int arg3            asm("D1Ar3") = in_arg3;
+	register int arg4            asm("D0Ar4") = in_arg4;
+	register int arg5            asm("D1Ar5") = in_arg5;
+	register int system_call     asm("D0Ar6") = in_system_call;
+	register int result          asm("D0Re0");
+	register int errno           asm("D1Re0");
+
+	asm volatile (
+		"MSETL	[A0StP++], %7,%5,%3\n\t"
+		"ADD	A0StP, A0StP, #8\n\t"
+		"SWITCH	#0x0C00208\n\t"
+		"GETL	%0, %1, [A0StP+#-8]\n\t"
+		"SUB	A0StP, A0StP, #(4*6)+8\n\t"
+		: "=r" (result),
+		  "=r" (errno)
+		: "r" (arg1),
+		  "r" (arg2),
+		  "r" (arg3),
+		  "r" (arg4),
+		  "r" (arg5),
+		  "r" (system_call)
+		: "memory");
+
+	if (fserrno)
+		*fserrno = errno;
+
+	return result;
+}
+
+struct dafs_inode_info {
+	int fd;
+	fmode_t mode;
+	struct inode vfs_inode;
+};
+
+static inline struct dafs_inode_info *DAFS_I(struct inode *inode)
+{
+	return container_of(inode, struct dafs_inode_info, vfs_inode);
+}
+
+#define FILE_DAFS_I(file) DAFS_I(file_inode(file))
+
+static int dafs_d_delete(const struct dentry *dentry)
+{
+	return 1;
+}
+
+static const struct dentry_operations dafs_dentry_ops = {
+	.d_delete		= dafs_d_delete,
+};
+
+static const struct inode_operations dafs_dir_iops;
+
+static char *__dentry_name(struct dentry *dentry, char *name)
+{
+	char *p = dentry_path_raw(dentry, name, PATH_MAX);
+	char *root;
+	size_t len;
+
+	root = dentry->d_sb->s_fs_info;
+	len = strlen(root);
+	if (IS_ERR(p)) {
+		__putname(name);
+		return NULL;
+	}
+
+	strlcpy(name, root, PATH_MAX);
+	if (len > p - name) {
+		__putname(name);
+		return NULL;
+	}
+	if (p > name + len) {
+		char *s = name + len;
+		while ((*s++ = *p++) != '\0')
+			;
+	}
+	return name;
+}
+
+static char *dentry_name(struct dentry *dentry)
+{
+	char *name = __getname();
+	if (!name)
+		return NULL;
+
+	return __dentry_name(dentry, name);
+}
+
+static int stat_file(const char *path, struct da_stat *p, int fd)
+{
+	int ret;
+	int fserrno;
+	memset(p, 0, sizeof(*p));
+
+	if (fd >= 0) {
+		ret = fscall(DA_OP_FSTAT, fd, (int)p, 0, 0, 0, &fserrno);
+		if (ret < 0) {
+			/* Some versions of Codescape do not fill out errno. */
+			if (ret < 0 && fserrno == 0)
+				fserrno = ENOENT;
+			return -fserrno;
+		}
+	} else {
+		ret = fscall(DA_OP_STAT, (int)path, (int)p, strlen(path), 0, 0,
+			     &fserrno);
+		if (ret < 0) {
+			/* Some versions of Codescape do not fill out errno. */
+			if (ret < 0 && fserrno == 0)
+				fserrno = ENOENT;
+			return -fserrno;
+		}
+	}
+
+	return 0;
+}
+
+static struct inode *dafs_iget(struct super_block *sb)
+{
+	struct inode *inode = new_inode(sb);
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+	return inode;
+}
+
+static struct inode *dafs_alloc_inode(struct super_block *sb)
+{
+	struct dafs_inode_info *hi;
+
+	hi = kmalloc(sizeof(*hi), GFP_KERNEL);
+	if (hi == NULL)
+		return NULL;
+
+	hi->fd = -1;
+	hi->mode = 0;
+	inode_init_once(&hi->vfs_inode);
+	return &hi->vfs_inode;
+}
+
+static void close_file(void *stream)
+{
+	int fd = *((int *) stream);
+
+	fscall(DA_OP_CLOSE, fd, 0, 0, 0, 0, NULL);
+}
+
+static void dafs_evict_inode(struct inode *inode)
+{
+	truncate_inode_pages(&inode->i_data, 0);
+	clear_inode(inode);
+	if (DAFS_I(inode)->fd != -1) {
+		close_file(&DAFS_I(inode)->fd);
+		DAFS_I(inode)->fd = -1;
+	}
+}
+
+static void dafs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	kfree(DAFS_I(inode));
+}
+
+static void dafs_destroy_inode(struct inode *inode)
+{
+	call_rcu(&inode->i_rcu, dafs_i_callback);
+}
+
+static const struct super_operations dafs_sbops = {
+	.alloc_inode	= dafs_alloc_inode,
+	.drop_inode	= generic_delete_inode,
+	.evict_inode	= dafs_evict_inode,
+	.destroy_inode	= dafs_destroy_inode,
+};
+
+static int open_dir(char *path, struct da_finddata *finddata, int *fserrno)
+{
+	int len = strlen(path);
+	char *buf;
+	int ret;
+
+	buf = kmalloc(len + 3, GFP_KERNEL);
+	if (!buf) {
+		*fserrno = ENOMEM;
+		return -1;
+	}
+
+	strcpy(buf, path);
+	if (buf[len - 1] != '/')
+		strcat(buf, "/*");
+	else
+		strcat(buf, "*");
+
+	ret = fscall(DA_OP_FINDFIRST, (int)buf, (int)finddata, 0, 0, 0,
+		     fserrno);
+	kfree(buf);
+	return ret;
+}
+
+static void close_dir(int handle)
+{
+	fscall(DA_OP_FINDCLOSE, handle, 0, 0, 0, 0, NULL);
+}
+
+static int read_dir(int handle, struct da_finddata *finddata)
+{
+	return fscall(DA_OP_FINDNEXT, handle, (int)finddata, 0, 0, 0, NULL);
+}
+
+static int dafs_readdir(struct file *file, void *ent, filldir_t filldir)
+{
+	struct inode *inode = file_inode(file);
+	struct super_block *sb = inode->i_sb;
+	char *name;
+	int handle;
+	int fserrno;
+	unsigned long long next, ino;
+	int error = 0;
+	struct da_finddata finddata;
+
+	name = dentry_name(file->f_path.dentry);
+	if (name == NULL)
+		return -ENOMEM;
+	handle = open_dir(name, &finddata, &fserrno);
+	__putname(name);
+	if (handle == -1)
+		return -fserrno;
+
+	next = 1;
+
+	if (file->f_pos == 0) {
+		error = (*filldir)(ent, ".", file->f_pos + 1,
+				   file->f_pos, inode->i_ino,
+				   DT_DIR);
+		if (error < 0)
+			goto out;
+		file->f_pos++;
+	}
+
+	while (1) {
+		error = read_dir(handle, &finddata);
+		if (error)
+			break;
+
+		if (next >= file->f_pos) {
+			size_t len = strlen(finddata.name);
+			ino = iunique(sb, 100);
+			error = (*filldir)(ent, finddata.name, len,
+					   file->f_pos, ino,
+					   (finddata.attrib & _A_SUBDIR) ?
+					    DT_DIR : DT_REG);
+			if (error)
+				break;
+			file->f_pos++;
+		}
+		next++;
+	}
+out:
+	close_dir(handle);
+	return 0;
+}
+
+static int dafs_file_open(struct inode *ino, struct file *file)
+{
+	static DEFINE_MUTEX(open_mutex);
+	char *name;
+	fmode_t mode, fmode;
+	int flags = 0, r = 0, w = 0, fd;
+	int cpu;
+
+	fmode = file->f_mode & (FMODE_READ | FMODE_WRITE);
+	if ((fmode & DAFS_I(ino)->mode) == fmode)
+		return 0;
+
+	mode = ino->i_mode & (DA_S_IWUSR | DA_S_IRUSR);
+
+	mode |= DAFS_I(ino)->mode;
+
+	DAFS_I(ino)->mode |= fmode;
+	if (DAFS_I(ino)->mode & FMODE_READ)
+		r = 1;
+	if (DAFS_I(ino)->mode & FMODE_WRITE) {
+		w = 1;
+		r = 1;
+	}
+
+retry:
+	if (r && !w)
+		flags |= DA_O_RDONLY;
+	else if (!r && w)
+		flags |= DA_O_WRONLY;
+	else if (r && w)
+		flags |= DA_O_RDWR;
+
+	if (file->f_flags & O_CREAT)
+		flags |= DA_O_CREAT;
+
+	if (file->f_flags & O_TRUNC)
+		flags |= DA_O_TRUNC;
+
+	/*
+	 * Set the affinity for this file handle to all CPUs. If we
+	 * don't do this then, if the process that opened the file
+	 * migrates to a different cpu, the FileServer will not accept
+	 * the file handle.
+	 */
+	for_each_possible_cpu(cpu) {
+		u8 hwthread = cpu_2_hwthread_id[cpu];
+		flags |= (1 << (DA_O_AFFINITY_SHIFT + hwthread));
+	}
+
+	name = dentry_name(file->f_path.dentry);
+	if (name == NULL)
+		return -ENOMEM;
+
+	fd = fscall(DA_OP_OPEN, (int)name, flags, mode, strlen(name), 0, NULL);
+	__putname(name);
+	if (fd < 0)
+		return fd;
+
+	mutex_lock(&open_mutex);
+	/* somebody else had handled it first? */
+	if ((mode & DAFS_I(ino)->mode) == mode) {
+		mutex_unlock(&open_mutex);
+		return 0;
+	}
+	if ((mode | DAFS_I(ino)->mode) != mode) {
+		mode |= DAFS_I(ino)->mode;
+		mutex_unlock(&open_mutex);
+		close_file(&fd);
+		goto retry;
+	}
+	DAFS_I(ino)->fd = fd;
+	DAFS_I(ino)->mode = mode;
+	mutex_unlock(&open_mutex);
+
+	return 0;
+}
+
+static const struct file_operations dafs_file_fops = {
+	.llseek		= generic_file_llseek,
+	.read		= do_sync_read,
+	.splice_read	= generic_file_splice_read,
+	.aio_read	= generic_file_aio_read,
+	.aio_write	= generic_file_aio_write,
+	.write		= do_sync_write,
+	.mmap		= generic_file_mmap,
+	.open		= dafs_file_open,
+	.release	= NULL,
+};
+
+static const struct file_operations dafs_dir_fops = {
+	.llseek		= generic_file_llseek,
+	.readdir	= dafs_readdir,
+	.read		= generic_read_dir,
+};
+
+static int read_file(int fd, unsigned long long *offset, const char *buf,
+		     int len)
+{
+	int n;
+	int fserrno;
+
+	n = fscall(DA_OP_PREAD, fd, (int)buf, len, (int)*offset, 0, &fserrno);
+
+	if (n < 0)
+		return -fserrno;
+
+	return n;
+}
+
+static int write_file(int fd, unsigned long long *offset, const char *buf,
+		      int len)
+{
+	int n;
+	int fserrno;
+
+	n = fscall(DA_OP_PWRITE, fd, (int)buf, len, (int)*offset, 0, &fserrno);
+
+	if (n < 0)
+		return -fserrno;
+
+	return n;
+}
+
+static int dafs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	struct address_space *mapping = page->mapping;
+	struct inode *inode = mapping->host;
+	char *buffer;
+	unsigned long long base;
+	int count = PAGE_CACHE_SIZE;
+	int end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+	int err;
+
+	if (page->index >= end_index)
+		count = inode->i_size & (PAGE_CACHE_SIZE-1);
+
+	buffer = kmap(page);
+	base = ((unsigned long long) page->index) << PAGE_CACHE_SHIFT;
+
+	err = write_file(DAFS_I(inode)->fd, &base, buffer, count);
+	if (err != count) {
+		ClearPageUptodate(page);
+		goto out;
+	}
+
+	if (base > inode->i_size)
+		inode->i_size = base;
+
+	if (PageError(page))
+		ClearPageError(page);
+	err = 0;
+
+ out:
+	kunmap(page);
+
+	unlock_page(page);
+	return err;
+}
+
+static int dafs_readpage(struct file *file, struct page *page)
+{
+	char *buffer;
+	long long start;
+	int err = 0;
+
+	start = (long long) page->index << PAGE_CACHE_SHIFT;
+	buffer = kmap(page);
+	err = read_file(FILE_DAFS_I(file)->fd, &start, buffer,
+			PAGE_CACHE_SIZE);
+	if (err < 0)
+		goto out;
+
+	memset(&buffer[err], 0, PAGE_CACHE_SIZE - err);
+
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	if (PageError(page))
+		ClearPageError(page);
+	err = 0;
+ out:
+	kunmap(page);
+	unlock_page(page);
+	return err;
+}
+
+static int dafs_write_begin(struct file *file, struct address_space *mapping,
+			loff_t pos, unsigned len, unsigned flags,
+			struct page **pagep, void **fsdata)
+{
+	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+
+	*pagep = grab_cache_page_write_begin(mapping, index, flags);
+	if (!*pagep)
+		return -ENOMEM;
+	return 0;
+}
+
+static int dafs_write_end(struct file *file, struct address_space *mapping,
+			loff_t pos, unsigned len, unsigned copied,
+			struct page *page, void *fsdata)
+{
+	struct inode *inode = mapping->host;
+	void *buffer;
+	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+	int err;
+
+	buffer = kmap(page);
+	err = write_file(FILE_DAFS_I(file)->fd, &pos, buffer + from, copied);
+	kunmap(page);
+
+	if (!PageUptodate(page) && err == PAGE_CACHE_SIZE)
+		SetPageUptodate(page);
+
+	/*
+	 * If err > 0, write_file has added err to pos, so we are comparing
+	 * i_size against the last byte written.
+	 */
+	if (err > 0 && (pos > inode->i_size))
+		inode->i_size = pos;
+	unlock_page(page);
+	page_cache_release(page);
+
+	return err;
+}
+
+static const struct address_space_operations dafs_aops = {
+	.writepage	= dafs_writepage,
+	.readpage	= dafs_readpage,
+	.set_page_dirty	= __set_page_dirty_nobuffers,
+	.write_begin	= dafs_write_begin,
+	.write_end	= dafs_write_end,
+};
+
+static int read_name(struct inode *ino, char *name)
+{
+	dev_t rdev;
+	struct da_stat st;
+	int err = stat_file(name, &st, -1);
+	if (err)
+		return err;
+
+	/* No valid maj and min from DA.*/
+	rdev = MKDEV(0, 0);
+
+	switch (st.st_mode & S_IFMT) {
+	case S_IFDIR:
+		ino->i_op = &dafs_dir_iops;
+		ino->i_fop = &dafs_dir_fops;
+		break;
+	case S_IFCHR:
+	case S_IFBLK:
+	case S_IFIFO:
+	case S_IFSOCK:
+		init_special_inode(ino, st.st_mode & S_IFMT, rdev);
+		break;
+
+	case S_IFLNK:
+	default:
+		ino->i_fop = &dafs_file_fops;
+		ino->i_mapping->a_ops = &dafs_aops;
+	}
+
+	ino->i_ino = st.st_ino;
+	ino->i_mode = st.st_mode;
+	set_nlink(ino, st.st_nlink);
+
+	i_uid_write(ino, st.st_uid);
+	i_gid_write(ino, st.st_gid);
+	ino->i_atime.tv_sec = st.st_atime;
+	ino->i_atime.tv_nsec = 0;
+	ino->i_mtime.tv_sec = st.st_mtime;
+	ino->i_mtime.tv_nsec = 0;
+	ino->i_ctime.tv_sec = st.st_ctime;
+	ino->i_ctime.tv_nsec = 0;
+	ino->i_size = st.st_size;
+	ino->i_blocks = st.st_blocks;
+	return 0;
+}
+
+static int dafs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+		       bool excl)
+{
+	struct inode *inode;
+	char *name;
+	int error, fd;
+	int damode;
+	int creat_flags = DA_O_TRUNC | DA_O_CREAT | DA_O_WRONLY;
+	int cpu;
+
+	inode = dafs_iget(dir->i_sb);
+	if (IS_ERR(inode)) {
+		error = PTR_ERR(inode);
+		goto out;
+	}
+
+	damode = mode & (DA_S_IWUSR | DA_S_IRUSR);
+
+	error = -ENOMEM;
+	name = dentry_name(dentry);
+	if (name == NULL)
+		goto out_put;
+
+	/*
+	 * creat() will only create text mode files on a Windows host
+	 * at present.  Replicate the creat() functionality with an
+	 * open() call, which always creates binary files. Set the
+	 * affinity to all hardware threads.
+	 */
+	for_each_possible_cpu(cpu) {
+		u8 hwthread = cpu_2_hwthread_id[cpu];
+		creat_flags |= (1 << (DA_O_AFFINITY_SHIFT + hwthread));
+	}
+
+	fd = fscall(DA_OP_OPEN, (int)name, creat_flags, damode, strlen(name),
+		    0, NULL);
+	if (fd < 0)
+		error = fd;
+	else
+		error = read_name(inode, name);
+
+	kfree(name);
+	if (error)
+		goto out_put;
+
+	DAFS_I(inode)->fd = fd;
+	DAFS_I(inode)->mode = FMODE_READ | FMODE_WRITE;
+	d_instantiate(dentry, inode);
+	return 0;
+
+ out_put:
+	iput(inode);
+ out:
+	return error;
+}
+
+static struct dentry *dafs_lookup(struct inode *ino, struct dentry *dentry,
+				  unsigned int flags)
+{
+	struct inode *inode;
+	char *name;
+	int err;
+
+	inode = dafs_iget(ino->i_sb);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		goto out;
+	}
+
+	err = -ENOMEM;
+	name = dentry_name(dentry);
+	if (name == NULL)
+		goto out_put;
+
+	err = read_name(inode, name);
+
+	__putname(name);
+	if (err == -ENOENT) {
+		iput(inode);
+		inode = NULL;
+	} else if (err)
+		goto out_put;
+
+	d_add(dentry, inode);
+	return NULL;
+
+ out_put:
+	iput(inode);
+ out:
+	return ERR_PTR(err);
+}
+
+static int dafs_link(struct dentry *to, struct inode *ino, struct dentry *from)
+{
+	char *from_name, *to_name;
+	int err;
+
+	from_name = dentry_name(from);
+	if (from_name == NULL)
+		return -ENOMEM;
+	to_name = dentry_name(to);
+	if (to_name == NULL) {
+		__putname(from_name);
+		return -ENOMEM;
+	}
+	err = -EINVAL;
+	__putname(from_name);
+	__putname(to_name);
+	return err;
+}
+
+static int dafs_unlink(struct inode *ino, struct dentry *dentry)
+{
+	char *file;
+	int err;
+	int fserrno;
+
+	file = dentry_name(dentry);
+	if (file == NULL)
+		return -ENOMEM;
+
+	err = fscall(DA_OP_UNLINK, (int)file, 0, 0, 0, 0, &fserrno);
+	__putname(file);
+	if (err)
+		return -fserrno;
+	return 0;
+}
+
+static int do_mkdir(const char *file, int mode)
+{
+	int err;
+	int fserrno;
+
+	err = fscall(DA_OP_MKDIR, (int)file, mode, strlen(file), 0, 0,
+		     &fserrno);
+	if (err)
+		return -fserrno;
+	return 0;
+}
+
+static int dafs_mkdir(struct inode *ino, struct dentry *dentry, umode_t mode)
+{
+	char *file;
+	int err;
+
+	file = dentry_name(dentry);
+	if (file == NULL)
+		return -ENOMEM;
+	err = do_mkdir(file, mode);
+	__putname(file);
+	return err;
+}
+
+static int do_rmdir(const char *file)
+{
+	int err;
+	int fserrno;
+
+	err = fscall(DA_OP_RMDIR, (int)file, strlen(file), 0, 0, 0, &fserrno);
+	if (err)
+		return -fserrno;
+	return 0;
+}
+
+static int dafs_rmdir(struct inode *ino, struct dentry *dentry)
+{
+	char *file;
+	int err;
+
+	file = dentry_name(dentry);
+	if (file == NULL)
+		return -ENOMEM;
+	err = do_rmdir(file);
+	__putname(file);
+	return err;
+}
+
+static int dafs_rename(struct inode *from_ino, struct dentry *from,
+		  struct inode *to_ino, struct dentry *to)
+{
+	char *from_name, *to_name;
+	int err;
+
+	from_name = dentry_name(from);
+	if (from_name == NULL)
+		return -ENOMEM;
+	to_name = dentry_name(to);
+	if (to_name == NULL) {
+		__putname(from_name);
+		return -ENOMEM;
+	}
+	err = -EINVAL;
+	__putname(from_name);
+	__putname(to_name);
+	return err;
+}
+
+static const struct inode_operations dafs_dir_iops = {
+	.create		= dafs_create,
+	.lookup		= dafs_lookup,
+	.link		= dafs_link,
+	.unlink		= dafs_unlink,
+	.mkdir		= dafs_mkdir,
+	.rmdir		= dafs_rmdir,
+	.rename		= dafs_rename,
+};
+
+static char *host_root_path = ".";
+
+static int dafs_fill_sb_common(struct super_block *sb, void *d, int silent)
+{
+	struct inode *root_inode;
+	int err;
+
+	sb->s_blocksize = 1024;
+	sb->s_blocksize_bits = 10;
+	sb->s_magic = IMGDAFS_SUPER_MAGIC;
+	sb->s_op = &dafs_sbops;
+	sb->s_d_op = &dafs_dentry_ops;
+	sb->s_maxbytes = MAX_LFS_FILESIZE;
+
+	err = -ENOMEM;
+
+	root_inode = new_inode(sb);
+	if (!root_inode)
+		goto out;
+
+	err = read_name(root_inode, host_root_path);
+	if (err)
+		goto out_put;
+
+	err = -ENOMEM;
+	sb->s_fs_info = host_root_path;
+	sb->s_root = d_make_root(root_inode);
+	if (sb->s_root == NULL)
+		goto out;
+
+	return 0;
+
+out_put:
+	iput(root_inode);
+out:
+	return err;
+}
+
+static struct dentry *dafs_read_sb(struct file_system_type *type,
+				   int flags, const char *dev_name,
+				   void *data)
+{
+	if (!metag_da_enabled())
+		return ERR_PTR(-ENODEV);
+	return mount_nodev(type, flags, data, dafs_fill_sb_common);
+}
+
+static struct file_system_type dafs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "imgdafs",
+	.mount		= dafs_read_sb,
+	.kill_sb	= kill_anon_super,
+	.fs_flags	= 0,
+};
+MODULE_ALIAS_FS("imgdafs");
+
+static int __init init_dafs(void)
+{
+	return register_filesystem(&dafs_type);
+}
+
+static void __exit exit_dafs(void)
+{
+	unregister_filesystem(&dafs_type);
+}
+
+module_init(init_dafs)
+module_exit(exit_dafs)
+MODULE_LICENSE("GPL");
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
index dd7adff..8138c94 100644
--- a/include/linux/clk-private.h
+++ b/include/linux/clk-private.h
@@ -33,8 +33,11 @@
 	const char		**parent_names;
 	struct clk		**parents;
 	u8			num_parents;
+	u8			new_parent_index;
 	unsigned long		rate;
 	unsigned long		new_rate;
+	struct clk		*new_parent;
+	struct clk		*new_child;
 	unsigned long		flags;
 	unsigned int		enable_count;
 	unsigned int		prepare_count;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 1186098..dd272a3 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -27,6 +27,7 @@
 #define CLK_IS_ROOT		BIT(4) /* root clk, has no parent */
 #define CLK_IS_BASIC		BIT(5) /* Basic clk, can't do a to_clk_foo() */
 #define CLK_GET_RATE_NOCACHE	BIT(6) /* do not use the cached clk rate */
+#define CLK_SET_RATE_REMUX	BIT(7) /* find best parent for rate change */
 
 struct clk_hw;
 
@@ -79,6 +80,10 @@
  * @round_rate:	Given a target rate as input, returns the closest rate actually
  * 		supported by the clock.
  *
+ * @determine_rate: Given a target rate as input, returns the closest rate
+ *		actually supported by the clock, and optionally the parent clock
+ *		that should be used to provide the clock rate.
+ *
  * @get_parent:	Queries the hardware to determine the parent of a clock.  The
  * 		return value is a u8 which specifies the index corresponding to
  * 		the parent clock.  This index can be applied to either the
@@ -126,6 +131,9 @@
 					unsigned long parent_rate);
 	long		(*round_rate)(struct clk_hw *hw, unsigned long,
 					unsigned long *);
+	long		(*determine_rate)(struct clk_hw *hw, unsigned long rate,
+					unsigned long *best_parent_rate,
+					struct clk **best_parent_clk);
 	int		(*set_parent)(struct clk_hw *hw, u8 index);
 	u8		(*get_parent)(struct clk_hw *hw);
 	int		(*set_rate)(struct clk_hw *hw, unsigned long,
@@ -196,6 +204,43 @@
 void of_fixed_clk_setup(struct device_node *np);
 
 /**
+ * struct clk_specified_rate_entry - a single possible specified rate
+ * @value:	value to match in config register
+ * @rate:	rate to use when config register matches @value
+ */
+struct clk_specified_rate_entry {
+	u32		value;
+	unsigned long	rate;
+};
+
+/**
+ * struct clk_specified_rate - specified-rate clock
+ * @hw:		handle between common and hardware-specific interfaces
+ * @reg:	register containing rate specifier field
+ * @shift:	shift to rate specifier field
+ * @width:	width of rate specifier field
+ * @rates:	mapping of specified frequencies
+ * @num_rates:	number of rates in array
+ */
+struct clk_specified_rate {
+	struct		clk_hw hw;
+	void __iomem	*reg;
+	u8		shift;
+	u8		width;
+	struct clk_specified_rate_entry	*rates;
+	unsigned int	num_rates;
+};
+
+extern const struct clk_ops clk_specified_rate_ops;
+struct clk *clk_register_specified_rate(struct device *dev, const char *name,
+		const char *parent_names, unsigned long flags,
+		void __iomem *reg, u8 shift, u8 width,
+		struct clk_specified_rate_entry *rates,
+		unsigned long num_rates);
+
+void of_specified_clk_setup(struct device_node *np);
+
+/**
  * struct clk_gate - gating clock
  *
  * @hw:		handle between common and hardware-specific interfaces
@@ -257,6 +302,7 @@
  *	Some hardware implementations gracefully handle this case and allow a
  *	zero divisor by not modifying their input clock
  *	(divide by one / bypass).
+ * CLK_DIVIDER_READ_ONLY - don't allow modification of divide value
  */
 struct clk_divider {
 	struct clk_hw	hw;
@@ -271,6 +317,7 @@
 #define CLK_DIVIDER_ONE_BASED		BIT(0)
 #define CLK_DIVIDER_POWER_OF_TWO	BIT(1)
 #define CLK_DIVIDER_ALLOW_ZERO		BIT(2)
+#define CLK_DIVIDER_READ_ONLY		BIT(3)
 
 extern const struct clk_ops clk_divider_ops;
 struct clk *clk_register_divider(struct device *dev, const char *name,
@@ -283,6 +330,8 @@
 		u8 clk_divider_flags, const struct clk_div_table *table,
 		spinlock_t *lock);
 
+void of_divider_clk_setup(struct device_node *node);
+
 /**
  * struct clk_mux - multiplexer clock
  *
@@ -403,6 +452,7 @@
 struct clk_hw *__clk_get_hw(struct clk *clk);
 u8 __clk_get_num_parents(struct clk *clk);
 struct clk *__clk_get_parent(struct clk *clk);
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
 unsigned int __clk_get_enable_count(struct clk *clk);
 unsigned int __clk_get_prepare_count(struct clk *clk);
 unsigned long __clk_get_rate(struct clk *clk);
@@ -410,6 +460,9 @@
 bool __clk_is_prepared(struct clk *clk);
 bool __clk_is_enabled(struct clk *clk);
 struct clk *__clk_lookup(const char *name);
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+			      unsigned long *best_parent_rate,
+			      struct clk **best_parent_p);
 
 /*
  * FIXME clock api without lock protection
diff --git a/include/linux/img_lcd.h b/include/linux/img_lcd.h
new file mode 100644
index 0000000..919e800
--- /dev/null
+++ b/include/linux/img_lcd.h
@@ -0,0 +1,25 @@
+/*
+ * IMG LCD controller driver.
+ *
+ * Copyright (C) 2006, 2007, 2008, 2012 Imagination Technologies Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+#ifndef __IMG_LCD_H
+#define __IMG_LCD_H
+
+#include <uapi/linux/img_lcd.h>
+
+struct img_lcd_board {
+	/* methods for handling CS line */
+	void (*enable_cs)(void);
+	void (*disable_cs)(void);
+};
+#endif
diff --git a/include/linux/img_mdc_dma.h b/include/linux/img_mdc_dma.h
new file mode 100644
index 0000000..800429b
--- /dev/null
+++ b/include/linux/img_mdc_dma.h
@@ -0,0 +1,480 @@
+/*
+ * IMG Meta DMA Controller (MDC) specific DMA code.
+ *
+ * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
+ */
+
+#ifndef __MDC_API_H__
+#define __MDC_API_H__
+
+struct dma_chan;
+struct dma_slave_config;
+struct platform_device;
+
+enum mdc_dma_tx_flags {
+	MDC_PRIORITY = (1 << 0),
+	MDC_NO_CALLBACK = (1 << 1),
+	MDC_ACCESS_DELAY = (1 << 2),
+	MDC_NEED_THREAD = (1 << 3),
+};
+
+enum mdc_dma_thread_type {
+	MDC_THREAD_FAST = (1 << 0),
+	MDC_THREAD_SLOW = (1 << 1),
+};
+
+struct mdc_dma_tx_control {
+	enum mdc_dma_tx_flags flags;
+	unsigned int prio;
+	unsigned int access_delay;
+	enum mdc_dma_thread_type thread_type;
+};
+
+/*
+ * MDC DMA cookie
+ * @req_channel: Channel to request or -1 for the first available one.
+ * On return, it contains the channel that will be allocated by the MDC
+ * DMA device
+ * @periph: Number of peripheral device requesting the channel.
+ */
+struct mdc_dma_cookie {
+	int req_channel;
+	unsigned int periph;
+};
+
+/* Platform data for SOC DMA callbacks */
+struct img_mdc_soc_callbacks {
+	int (*allocate) (int, unsigned int); /* allocate a DMA channel */
+	int (*free) (int); /* free a DMA channel */
+	/*
+	 * SOC DMA specific callbacks for suspend_noirq and resume_noirq.
+	 * Both executed in atomic context.
+	 */
+	void *(*suspend) (void);
+	void (*resume) (void *);
+};
+
+bool mdc_dma_filter_fn(struct dma_chan *, void *);
+
+int mdc_dma_probe(struct platform_device *pdev,
+		  const struct img_mdc_soc_callbacks *c);
+/* Legacy API */
+#include <linux/dma-mapping.h>
+
+#define MDC_CONTEXT_OFFSET		(0x40)
+
+/* -------------------- Register MDC_GENERAL_CONFIG -------------------- */
+
+#define _MDC_GENERAL_CONFIG_OFFSET	(0x000)
+
+#define _MDC_LIST_IEN_OFFSET		_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_LIST_IEN_SHIFT		(31)
+#define _MDC_LIST_IEN_MASK		(0x80000000)
+#define _MDC_LIST_IEN_LENGTH		(1)
+
+#define _MDC_BSWAP_OFFSET		_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_BSWAP_SHIFT		(30)
+#define _MDC_BSWAP_MASK			(0x40000000)
+#define _MDC_BSWAP_LENGTH		(1)
+
+#define _MDC_IEN_OFFSET			_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_IEN_SHIFT			(29)
+#define _MDC_IEN_MASK			(0x20000000)
+#define _MDC_IEN_LENGTH			(1)
+
+#define _MDC_LEVEL_INT_OFFSET		_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_LEVEL_INT_SHIFT		(28)
+#define _MDC_LEVEL_INT_MASK		(0x10000000)
+#define _MDC_LEVEL_INT_LENGTH		(1)
+
+#define _MDC_CHANNEL_OFFSET		_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_CHANNEL_SHIFT		(20)
+#define _MDC_CHANNEL_MASK		(0x03F00000)
+#define _MDC_CHANNEL_LENGTH		(6)
+
+#define _MDC_ACC_DEL_OFFSET		_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_ACC_DEL_SHIFT		(16)
+#define _MDC_ACC_DEL_MASK		(0x00070000)
+#define _MDC_ACC_DEL_LENGTH		(1)
+
+#define _MDC_WAIT_UNPACK_OFFSET		_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_WAIT_UNPACK_SHIFT		(13)
+#define _MDC_WAIT_UNPACK_MASK		(0x00002000)
+#define _MDC_WAIT_UNPACK_LENGTH		(1)
+
+#define _MDC_INC_W_OFFSET		_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_INC_W_SHIFT		(12)
+#define _MDC_INC_W_MASK			(0x00001000)
+#define _MDC_INC_W_LENGTH		(1)
+
+#define _MDC_WAIT_PACK_OFFSET		_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_WAIT_PACK_SHIFT		(9)
+#define _MDC_WAIT_PACK_MASK		(0x00000200)
+#define _MDC_WAIT_PACK_LENGTH		(1)
+
+#define _MDC_INC_R_OFFSET		_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_INC_R_SHIFT		(8)
+#define _MDC_INC_R_MASK			(0x00000100)
+#define _MDC_INC_R_LENGTH		(1)
+
+#define _MDC_PHYSICAL_W_OFFSET		_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_PHYSICAL_W_SHIFT		(7)
+#define _MDC_PHYSICAL_W_MASK		(0x00000080)
+#define _MDC_PHYSICAL_W_LENGTH		(1)
+
+#define _MDC_WIDTH_W_OFFSET		_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_WIDTH_W_SHIFT		(4)
+#define _MDC_WIDTH_W_MASK		(0x00000070)
+#define _MDC_WIDTH_W_LENGTH		(3)
+
+#define _MDC_PHYSICAL_R_OFFSET		MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_PHYSICAL_R_SHIFT		(3)
+#define _MDC_PHYSICAL_R_MASK		(0x00000008)
+#define _MDC_PHYSICAL_R_LENGTH		(1)
+
+#define _MDC_WIDTH_R_OFFSET		_MDC_GENERAL_CONFIG_OFFSET
+#define _MDC_WIDTH_R_SHIFT		(0)
+#define _MDC_WIDTH_R_MASK		(0x00000007)
+#define _MDC_WIDTH_R_LENGTH		(3)
+
+/* -------------------- Register MDC_READ_PORT_CONFIG -------------------- */
+
+#define _MDC_READ_PORT_CONFIG_OFFSET	(0x004)
+
+#define _MDC_STHREAD_OFFSET		_MDC_READ_PORT_CONFIG_OFFSET
+#define _MDC_STHREAD_SHIFT		(28)
+#define _MDC_STHREAD_MASK		(0xF0000000)
+#define _MDC_STHREAD_LENGTH		(4)
+
+#define _MDC_RTHREAD_OFFSET		_MDC_READ_PORT_CONFIG_OFFSET
+#define _MDC_RTHREAD_SHIFT		(24)
+#define _MDC_RTHREAD_MASK		(0x0F000000)
+#define _MDC_RTHREAD_LENGTH		(4)
+
+#define _MDC_PRIORITY_OFFSET		_MDC_READ_PORT_CONFIG_OFFSET
+#define _MDC_PRIORITY_SHIFT		(20)
+#define _MDC_PRIORITY_MASK		(0x00F00000)
+#define _MDC_PRIORITY_LENGTH		(4)
+
+#define _MDC_WTHREAD_OFFSET		_MDC_READ_PORT_CONFIG_OFFSET
+#define _MDC_WTHREAD_SHIFT		(16)
+#define _MDC_WTHREAD_MASK		(0x000F0000)
+#define _MDC_WTHREAD_LENGTH		(4)
+
+#define _MDC_HOLD_OFF_OFFSET		_MDC_READ_PORT_CONFIG_OFFSET
+#define _MDC_HOLD_OFF_SHIFT		(12)
+#define _MDC_HOLD_OFF_MASK		(0x0000F000)
+#define _MDC_HOLD_OFF_LENGTH		(4)
+
+#define _MDC_BURST_SIZE_OFFSET		_MDC_READ_PORT_CONFIG_OFFSET
+#define _MDC_BURST_SIZE_SHIFT		(4)
+#define _MDC_BURST_SIZE_MASK		(0x00000FF0)
+#define _MDC_BURST_SIZE_LENGTH		(8)
+
+#define _MDC_DREQ_ENABLE_OFFSET		_MDC_READ_PORT_CONFIG_OFFSET
+#define _MDC_DREQ_ENABLE_SHIFT		(1)
+#define _MDC_DREQ_ENABLE_MASK		(0x00000002)
+#define _MDC_DREQ_ENABLE_LENGTH		(1)
+
+#define _MDC_READBACK_OFFSET		_MDC_READ_PORT_CONFIG_OFFSET
+#define _MDC_READBACK_SHIFT		(0)
+#define _MDC_READBACK_MASK		(0x00000001)
+#define _MDC_READBACK_LENGTH		(1)
+
+/* -------------------- Register MDC_READ_ADDRESS -------------------- */
+
+#define _MDC_READ_ADDRESS_OFFSET	(0x008)
+
+#define _MDC_ADDR_R_OFFSET		_MDC_READ_ADDRESS_OFFSET
+#define _MDC_ADDR_R_SHIFT		(0)
+#define _MDC_ADDR_MASK			(0xFFFFFFFF)
+#define _MDC_ADDR_LENGTH		(32)
+
+/* -------------------- Register MDC_WRITE_ADDRESS -------------------- */
+
+#define _MDC_WRITE_ADDRESS_OFFSET	(0x00C)
+
+#define _MDC_ADDR_W_OFFSET		_MDC_WRITE_ADDRESS_OFFSET
+#define _MDC_ADDR_W_SHIFT		(0)
+#define _MDC_ADDR_W_MASK		(0xFFFFFFFF)
+#define _MDC_ADDR_W_LENGTH		(32)
+
+/* -------------------- Register MDC_TRANSFER_SIZE -------------------- */
+
+#define _MDC_TRANSFER_SIZE_OFFSET	(0x010)
+
+#define _MDC_CNT_OFFSET			_MDC_TRANSFER_SIZE_OFFSET
+#define _MDC_CNT_SHIFT			(0)
+#define _MDC_CNT_MASK			(0x00FFFFFF)
+#define _MDC_CNT_LENGTH			(24)
+
+/* -------------------- Register MDC_LIST_NODE_ADDRESS -------------------- */
+
+#define _MDC_LIST_NODE_ADDRESS_OFFSET	(0x014)
+
+#define _MDC_ADDR_S_OFFSET		_MDC_LIST_NODE_ADDRESS_OFFSET
+#define _MDC_ADDR_S_SHIFT		(0)
+#define _MDC_ADDR_S_MASK		(0xFFFFFFFF)
+#define _MDC_ADDR_S_LENGTH		(32)
+
+/* -------------------- Register MDC_CMDS_PROCESSED -------------------- */
+
+#define _MDC_CMDS_PROCESSED_OFFSET	(0x018)
+
+#define _MDC_CMD_PROCESSED_OFFSET	_MDC_CMDS_PROCESSED_OFFSET
+#define _MDC_CMD_PROCESSED_SHIFT	(16)
+#define _MDC_CMD_PROCESSED_MASK		(0x003F0000)
+#define _MDC_CMD_PROCESSED_LENGTH	(6)
+
+#define _MDC_INT_ACTIVE_OFFSET		_MDC_CMDS_PROCESSED_OFFSET
+#define _MDC_INT_ACTIVE_SHIFT		(8)
+#define _MDC_INT_ACTIVE_MASK		(0x00000100)
+#define _MDC_INT_ACTIVE_LENGTH		(1)
+
+#define _MDC_CMDS_DONE_OFFSET		_MDC_CMDS_PROCESSED_OFFSET
+#define _MDC_CMDS_DONE_SHIFT		(0)
+#define _MDC_CMDS_DONE_MASK		(0x0000003F)
+#define _MDC_CMDS_DONE_LENGTH		(6)
+
+/* -------------------- Register MDC_CONTROL_AND_STATUS -------------------- */
+
+#define _MDC_CONTROL_AND_STATUS_OFFSET	(0x01C)
+
+#define _MDC_TAG_OFFSET			_MDC_CONTROL_AND_STATUS_OFFSET
+#define _MDC_TAG_SHIFT			(24)
+#define _MDC_TAG_MASK			(0x0F000000)
+#define _MDC_TAG_LENGTH			(4)
+
+#define _MDC_CANCEL_OFFSET		_MDC_CONTROL_AND_STATUS_OFFSET
+#define _MDC_CANCEL_SHIFT		(20)
+#define _MDC_CANCEL_MASK		(0x00100000)
+#define _MDC_CANCEL_LENGTH		(1)
+
+#define _MDC_DREQ_OFFSET		_MDC_CONTROL_AND_STATUS_OFFSET
+#define _MDC_DREQ_SHIFT			(16)
+#define _MDC_DREQ_MASK			(0x00010000)
+#define _MDC_DREQ_LENGTH		(1)
+
+#define _MDC_FIFO_DEPTH_OFFSET 		_MDC_CONTROL_AND_STATUS_OFFSET
+#define _MDC_FIFO_DEPTH_SHIFT		(8)
+#define _MDC_FIFO_DEPTH_MASK		(0x00000100)
+#define _MDC_FIFO_DEPTH_LENGTH 		(1)
+
+#define _MDC_LIST_EN_OFFSET		_MDC_CONTROL_AND_STATUS_OFFSET
+#define _MDC_LIST_EN_SHIFT		(4)
+#define _MDC_LIST_EN_MASK		(0x00000010)
+#define _MDC_LIST_EN_LENGTH		(1)
+
+#define _MDC_EN_OFFSET			_MDC_CONTROL_AND_STATUS_OFFSET
+#define _MDC_EN_SHIFT			(0)
+#define _MDC_EN_MASK			(0x00000001)
+#define _MDC_EN_LENGTH			(1)
+
+/* -------------------- Register MDC_GLOBAL_CONFA -------------------------- */
+
+#define _MDC_GLOBAL_CONFA_OFFSET	(0x900)
+
+#define _MDC_THREADID_WIDTH_OFFSET	_MDC_GLOBAL_CONFA_OFFSET
+#define _MDC_THREADID_WIDTH_SHIFT	(16)
+#define _MDC_THREADID_WIDTH_MASK	(0x00FF0000)
+#define _MDC_THREADID_WIDTH_LENGTH	(8)
+
+#define _MDC_NUM_CONTEXTS_OFFSET	_MDC_GLOBAL_CONFA_OFFSET
+#define _MDC_NUM_CONTEXTS_SHIFT		(8)
+#define _MDC_NUM_CONTEXTS_MASK		(0x0000FF00)
+#define _MDC_NUM_CONTEXTS_LENGTH	(8)
+
+#define _MDC_SYS_DATA_WIDTH_OFFSET	_MDC_GLOBAL_CONFA_OFFSET
+#define _MDC_SYS_DATA_WIDTH_SHIFT	(0)
+#define _MDC_SYS_DATA_WIDTH_MASK	(0x000000FF)
+#define _MDC_SYS_DATA_WIDTH_LENGTH	(8)
+
+
+/* -------------------- Register MDC_GLOBAL_CONFG -------------------------- */
+
+#define _MDC_GLOBAL_CONFB_OFFSET	(0x904)
+
+
+/* -------------------- Register MDC_GLOBAL_STATUS -------------------------- */
+
+#define _MDC_GLOBAL_STATUS_OFFSET	(0x908)
+
+
+/*Helper Macros (dont use outside of this file */
+#define _REG_ADDRESS(REG) _##REG##_OFFSET
+#define _REG_MASK(REG) _##REG##_MASK
+#define _REG_SHIFT(REG) _##REG##_SHIFT
+
+#define _MDC_WRITE_REG(base, context, REG, value)	\
+	iowrite32(value,(void *)((base + (context * MDC_CONTEXT_OFFSET) \
+			+ _REG_ADDRESS(REG))))
+
+#define _MDC_READ_REG(base, context, REG)	\
+	ioread32(base + (void *)((context * MDC_CONTEXT_OFFSET) \
+			+ _REG_ADDRESS(REG)))
+
+#define _MDC_READ_REG_FIELD(base, ctext, REG) \
+	((_MDC_READ_REG(base, ctext, REG) & _REG_MASK(REG)) >> _REG_SHIFT(REG))
+
+#define _MDC_READ_GLOBAL_REG(base, address)\
+	ioread32((void *)(base + address))
+
+#define _MDC_READ_GLOBAL_REG_FIELD(base, REG) \
+	((_MDC_READ_GLOBAL_REG(base, _REG_ADDRESS(REG)) \
+			& _REG_MASK(REG)) >> _REG_SHIFT(REG))
+
+/* Helper to be used externally */
+#define MDC_SET_FIELD(data, FIELD, value) \
+{\
+	data &= ~_REG_MASK(FIELD);\
+	data |=  value << _REG_SHIFT(FIELD);\
+}
+
+
+
+
+static inline void MDC_REG_RESET_CONTEXT(u32 base, int context)
+{
+	_MDC_WRITE_REG(base, context, MDC_CONTROL_AND_STATUS, (1 << 20));
+	wmb();
+	_MDC_WRITE_REG(base, context, MDC_GENERAL_CONFIG, 0x00000000);
+	_MDC_WRITE_REG(base, context, MDC_READ_PORT_CONFIG, 0x00000000);
+	_MDC_WRITE_REG(base, context, MDC_READ_ADDRESS, 0x00000000);
+	_MDC_WRITE_REG(base, context, MDC_WRITE_ADDRESS, 0x00000000);
+	_MDC_WRITE_REG(base, context, MDC_LIST_NODE_ADDRESS, 0x00000000);
+	_MDC_WRITE_REG(base, context, MDC_TRANSFER_SIZE, 0x00000000);
+	_MDC_WRITE_REG(base, context, MDC_CMDS_PROCESSED, 0x00000000);
+	_MDC_WRITE_REG(base, context, MDC_CONTROL_AND_STATUS, 0x00000000);
+	wmb();
+}
+
+
+
+static inline void MDC_RSET_GENERAL_CONFIG(u32 base, int context, u32 config)
+{
+	_MDC_WRITE_REG(base,  context, MDC_GENERAL_CONFIG, config);
+}
+
+static inline u32 MDC_RGET_GENERAL_CONFIG(u32 base, int context)
+{
+	return _MDC_READ_REG(base, context, MDC_GENERAL_CONFIG);
+}
+
+static inline void MDC_RSET_READ_PORT_CONFIG(u32 base, int context, u32 config)
+{
+	_MDC_WRITE_REG(base, context, MDC_READ_PORT_CONFIG, config);
+}
+
+static inline u32 MDC_RGET_READ_PORT_CONFIG(u32 base, int context)
+{
+	return _MDC_READ_REG(base, context, MDC_READ_PORT_CONFIG);
+}
+
+static inline void MDC_RSET_READ_ADDRESS(u32 base, int context, u32 address)
+{
+	_MDC_WRITE_REG(base, context, MDC_READ_ADDRESS, address);
+}
+
+static inline u32 MDC_RGET_READ_ADDRESS(u32 base, int context)
+{
+	return _MDC_READ_REG(base, context, MDC_READ_ADDRESS);
+}
+
+static inline void MDC_RSET_WRITE_ADDRESS(u32 base, int context, u32 address)
+{
+	_MDC_WRITE_REG(base, context, MDC_WRITE_ADDRESS, address);
+}
+
+static inline u32 MDC_RGET_WRITE_ADDRESS(u32 base, int context)
+{
+	return _MDC_READ_REG(base, context, MDC_WRITE_ADDRESS);
+}
+
+static inline void MDC_RSET_TRANSFER_SIZE(u32 base, int context, u32 size)
+{
+	_MDC_WRITE_REG(base, context, MDC_TRANSFER_SIZE, size);
+}
+
+static inline void MDC_RSET_LIST_NODE_ADDR(u32 base, int context, u32 address)
+{
+	_MDC_WRITE_REG(base, context, MDC_ADDR_S, address);
+}
+
+static inline u32 MDC_RGET_LIST_NODE_ADDR(u32 base, int context)
+{
+	return _MDC_READ_REG(base, context, MDC_ADDR_S);
+}
+
+
+static inline u32 MDC_RGET_CMDS_PROCESSED(u32 base, int context)
+{
+	return _MDC_READ_REG(base, context, MDC_CMDS_PROCESSED);
+}
+
+static inline void MDC_RSET_CMDS_PROCESSED(u32 base, int context, u32 val)
+{
+	_MDC_WRITE_REG(base, context, MDC_CMDS_PROCESSED, val);
+}
+
+static inline void MDC_REG_ENABLE(u32 base, int context)
+{
+	_MDC_WRITE_REG(base, context, MDC_EN,
+		_MDC_READ_REG(base, context, MDC_EN) | _MDC_EN_MASK);
+}
+
+static inline void MDC_LIST_ENABLE(u32 base, int context)
+{
+	_MDC_WRITE_REG(base, context, MDC_LIST_EN,
+		_MDC_READ_REG(base, context, MDC_LIST_EN) | _MDC_LIST_EN_MASK);
+}
+
+static inline void MDC_CANCEL(u32 base, int context)
+{
+	_MDC_WRITE_REG(base, context, MDC_CANCEL, _MDC_CANCEL_MASK);
+}
+
+static inline int MDC_REG_IS_BUSY(u32 base, int context)
+{
+	return _MDC_READ_REG_FIELD(base, context, MDC_EN);
+}
+
+/*List Support: */
+
+struct img_dma_mdc_list {
+	volatile u32 gen_conf;
+	volatile u32 readport_conf;
+	volatile u32 read_addr;
+	volatile u32 write_addr;
+	volatile u32 xfer_size;
+	volatile u32 node_addr;
+	volatile u32 cmds_done;
+	volatile u32 ctrl_status;
+};
+
+enum img_dma_priority {
+	IMG_DMA_PRIO_BULK = 0,
+	IMG_DMA_PRIO_REALTIME,
+};
+
+enum img_dma_direction {
+	IMG_DMA_INVALID_DIR,
+	IMG_DMA_TO_PERIPHERAL,
+	IMG_DMA_FROM_PERIPHERAL,
+	IMG_DMA_MEM2MEM,
+};
+
+enum img_dma_channel_state {
+	IMG_DMA_CHANNEL_RESERVED,
+	IMG_DMA_CHANNEL_AVAILABLE,
+	IMG_DMA_CHANNEL_INUSE,
+};
+
+enum img_dma_width {
+	IMG_DMA_WIDTH_8 = 0,
+	IMG_DMA_WIDTH_16,
+	IMG_DMA_WIDTH_32,
+	IMG_DMA_WIDTH_64,
+	IMG_DMA_WIDTH_128,
+};
+
+#endif /* __MDC_API_H__ */
diff --git a/include/linux/input/ts_qt5480.h b/include/linux/input/ts_qt5480.h
new file mode 100644
index 0000000..1964a30
--- /dev/null
+++ b/include/linux/input/ts_qt5480.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2009,2010 Imagination Technologies Limited.
+ *
+ * Quantum TouchScreen Controller driver.
+ */
+
+#ifndef _INPUT_TS_QT5480_H
+#define _INPUT_TS_QT5480_H
+
+#include <linux/ioctl.h>
+
+/* Our IOCTL family group */
+#define QT5480_IOCTL	'G'
+
+/* Send a Calibrate Command */
+#define QT5480_CALIBRATE		_IOW(QT5480_IOCTL, 1, int)
+
+/* Switch ON / OFF the device */
+#define QT5480_POWER			_IOW(QT5480_IOCTL, 2, int)
+
+/* Enable / Disable Debug tracking in the PDP Memory (TFT) */
+#define QT5480_DEBUG			_IOW(QT5480_IOCTL, 3, int)
+
+/* Raw register access - not normally for general use! */
+
+/* Read a single register */
+#define QT5480_GETREG			_IOWR(QT5480_IOCTL, 4, struct ts_qt5480_frame)
+/* Write a single register */
+#define QT5480_SETREG			_IOWR(QT5480_IOCTL, 5, struct ts_qt5480_frame)
+
+/* touch-screen mapping data */
+typedef struct {
+	unsigned short x_sensor_res;
+	unsigned short x_screen_res;
+	unsigned short x_flip;
+	unsigned short x_sensor_size;
+	unsigned short x_screen_size;
+	short x_sensor_offset;
+
+	unsigned short y_sensor_res;
+	unsigned short y_screen_res;
+	unsigned short y_flip;
+	unsigned short y_sensor_size;
+	unsigned short y_screen_size;
+	short y_sensor_offset;
+
+} ts_qt5480_mapping_t;
+
+/* register entry in the QT5480 configuration table */
+typedef struct {
+	unsigned char set;
+	unsigned char value;
+} ts_qt5480_conf_reg_t;
+
+/* QT5xx0 registers */
+enum {
+	QT_CHIP_ID = 0,
+	QT_CODE_VERSION,
+	QT_CALIBRATE,
+	QT_RESET,
+	QT_BACKUP_REQUEST,
+	QT_ADDRESS_PTR,
+	QT_EEPROM_CHKSUM,
+	QT_KEY_STATUS_0 = 8,
+	QT_KEY_STATUS_4 = 12,
+	QT_GENERAL_STATUS_1 = 14,
+	QT_GENERAL_STATUS_2,
+	QT_TOUCHSCR_0_X,
+	QT_TOUCHSCR_0_Y = 18,
+	QT_TOUCHSCR_1_X = 20,
+	QT_SLIDER_0 = 20,
+	QT_TOUCHSCR_1_Y = 22,
+	QT_SLIDER_4 = 24,
+	QT_FORCE_SNS = 26,
+	QT_KEY_GATE_STATUS,
+	QT_TOUCH_0_GESTURE = 28,
+	QT_TOUCH_1_GESTURE = 32,
+	QT_RESERVED_1,
+	QT_CHAN_1_DELTA = 256,
+	QT_CHAN_1_REF = 352,
+	QT_RESERVED_2 = 448,
+	QT_KEY_CONTROL = 512,
+	QT_THRESHOLD = 560,
+	QT_BL = 608,
+	QT_LP_MODE = 656,
+	QT_MIN_CYC_TIME,
+	QT_AWAKE_TIMEOUT,
+	QT_TRIGGER_CONTROL,
+	QT_GUARD_KEY_ENABLE,
+	QT_TOUCHSCR_SETUP,
+	QT_TOUCHSCR_LEN,
+	QT_SLIDER_1_LEN = 662,
+	QT_TOUCHSCR_HYST = 668,
+	QT_SLIDER_1_HYST = 668,
+	QT_GPO_CONTROL = 674,
+	QT_NDRIFT,
+	QT_PDRIFT,
+	QT_NDIL,
+	QT_SDIL,
+	QT_NRD,
+	QT_DHT,
+	QT_FORCE_THRESH,
+	QT_CLIP_LIMIT_X,
+	QT_CLIP_LIMIT_Y,
+	QT_LIN_OFFSET_X,
+	QT_LIN_TABLE_X = 686,
+	QT_LIN_OFFSET_Y = 702,
+	QT_LIN_TABLE_Y = 704,
+	QT_BURST_CONTROL = 720,
+	QT_STATUS_MASK,
+	QT_POSITION_FILTER,
+	QT_TOUCH_SIZE_RES,
+	QT_TOUCHSCR_PLATEAU,
+	QT_SLEW_RATE,
+	QT_MED_FILT_LEN,
+	QT_SIG_IIR_CONTROL,
+	QT_TOUCHDOWN_POS_HYST,
+	QT_GEST_CONFIG = 734,
+	QT_TAP_TIMEOUT,
+	QT_DRAG_TIMEOUT,
+	QT_FLICK_TIMEOUT,
+	QT_PRESS_SHORT_TIMEOUT,
+	QT_PRESS_LONG_TIMEOUT,
+	QT_PRESS_RPT_TIMEOUT,
+	QT_FLICK_THR_LSB = 742,
+	QT_FLICK_THR_MSB,
+	QT_DRAG_THR_LSB,
+	QT_DRAG_THR_MSB,
+
+	QT_MAX_REG = 748,
+};
+
+/* touch-screen frame */
+struct ts_qt5480_frame {
+	__le16 addr;
+	unsigned char data[5];
+	int stat;
+};
+
+struct qt5480_platform_data {
+	/* Function to poll change status. */
+	int (*poll_status)(void);
+	/* Physical mapping of the touch sensor. */
+	ts_qt5480_mapping_t *phy_map;
+	/* Touch screen configuration. */
+	ts_qt5480_conf_reg_t *config;
+};
+
+#endif
diff --git a/include/linux/irqchip/metag-ext.h b/include/linux/irqchip/metag-ext.h
index 697af0f..f01812e0 100644
--- a/include/linux/irqchip/metag-ext.h
+++ b/include/linux/irqchip/metag-ext.h
@@ -5,12 +5,18 @@
 #ifndef _LINUX_IRQCHIP_METAG_EXT_H_
 #define _LINUX_IRQCHIP_METAG_EXT_H_
 
+#include <linux/compiler.h>
+
 struct irq_data;
 struct platform_device;
 
 /* called from core irq code at init */
 int init_external_IRQ(void);
 
+/* DEPRECATED use devicetree instead */
+/* map an external IRQ to a virtual IRQ number */
+int __deprecated external_irq_map(unsigned int hw);
+
 /*
  * called from SoC init_irq() callback to dynamically indicate the lack of
  * HWMASKEXT registers.
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 198f0fa..bfc17cc 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -209,6 +209,10 @@
 #define DW_MCI_QUIRK_HIGHSPEED			BIT(2)
 /* Unreliable card detection */
 #define DW_MCI_QUIRK_BROKEN_CARD_DETECTION	BIT(3)
+/* Capable of unlocking via GPIO clk_pin */
+#define DW_MCI_QUIRK_GPIO_UNLOCK		BIT(4)
+/* Capable of bit-banging reset */
+#define DW_MCI_QUIRK_BIT_BANG			BIT(5)
 
 /* Slot level quirks */
 /* This slot has no write protect */
@@ -230,6 +234,8 @@
 
 	u32 quirks; /* Workaround / Quirk flags */
 	unsigned int bus_hz; /* Clock speed at the cclk_in pad */
+	unsigned int clk_pin; /* GPIO of CLK for clear lock/bit-bang reset */
+	unsigned int cmd_pin; /* GPIO of CMD for bit-bang reset */
 
 	u32 caps;	/* Capabilities */
 	u32 caps2;	/* More capabilities */
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 6aa2380..ac05b3c 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -29,6 +29,11 @@
  *	if for example some other pin is going to drive the signal connected
  *	to it for a while. Pins used for input are usually always high
  *	impedance.
+ * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it
+ *	weakly drives the last value on a tristate bus, also known as a "bus
+ *	holder", "bus keeper" or "repeater". This allows another device on the
+ *	bus to change the value by driving the bus high or low and switching to
+ *	tristate. The argument is ignored.
  * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
  *	impedance to VDD). If the argument is != 0 pull-up is enabled,
  *	if it is 0, pull-up is disabled.
@@ -78,6 +83,7 @@
 enum pin_config_param {
 	PIN_CONFIG_BIAS_DISABLE,
 	PIN_CONFIG_BIAS_HIGH_IMPEDANCE,
+	PIN_CONFIG_BIAS_BUS_HOLD,
 	PIN_CONFIG_BIAS_PULL_UP,
 	PIN_CONFIG_BIAS_PULL_DOWN,
 	PIN_CONFIG_DRIVE_PUSH_PULL,
diff --git a/include/linux/sgx2d.h b/include/linux/sgx2d.h
new file mode 100644
index 0000000..dd7cfd2
--- /dev/null
+++ b/include/linux/sgx2d.h
@@ -0,0 +1,27 @@
+/*
+ * PowerVR SGX 2D block driver.
+ *
+ * Copyright (C) 2010  Imagination Technologies
+ */
+#ifndef _IMG_SGX2D_H_
+#define _IMG_SGX2D_H_
+
+#include <uapi/linux/sgx2d.h>
+
+
+/* private registers */
+#define SGX2D_REG_PRIVATE	0x80000000
+#define SGX2D_REG_SRST		(SGX2D_REG_PRIVATE | 0x06)	/* soft reset */
+
+struct sgx2d_pdata_reg {
+	struct sgx2d_reg reg;	/* offset into specified region */
+	unsigned int region;	/* memory io resource number */
+};
+struct sgx2d_pdata {
+	struct sgx2d_vers vers;
+	struct sgx2d_pdata_reg *regs;	/* register information */
+	unsigned int reg_count;		/* ARRAY_SIZE(regs) */
+	int unmappable;			/* set if the iomem isn't mappable */
+};
+
+#endif /* _IMG_SGX2D_H_ */
diff --git a/include/linux/spi/spi_img.h b/include/linux/spi/spi_img.h
new file mode 100644
index 0000000..ab5e63c
--- /dev/null
+++ b/include/linux/spi/spi_img.h
@@ -0,0 +1,27 @@
+#ifndef _IMG_SPI_H_
+#define _IMG_SPI_H_
+
+/* Maximum possible transfer size */
+#define IMG_SPI_MAX_TRANSFER 4096
+
+/* Platform data for SPI controller devices */
+struct img_spi_master {
+	u16 num_chipselect;
+	/* MDC needs channel number */
+	int tx_dma_channel_num;
+	/* DMAC needs peripheral number */
+	int tx_dma_peripheral_num;
+	int rx_dma_channel_num;
+	int rx_dma_peripheral_num;
+	/* Clock rate (0 if it shouldn't be changed) */
+	unsigned long clk_rate;
+};
+
+/* Controller data for SPI slave devices, passed as part of platform data */
+struct img_spi_chip {
+	u8 cs_setup;
+	u8 cs_hold;
+	u8 cs_delay;
+};
+
+#endif
diff --git a/include/linux/uccp.h b/include/linux/uccp.h
new file mode 100644
index 0000000..d1c9ab0
--- /dev/null
+++ b/include/linux/uccp.h
@@ -0,0 +1,36 @@
+/*
+ * IMG Universal Communications Core Platform (UCCP) driver interface.
+ *
+ * Copyright (C) 2010  Imagination Technologies
+ */
+#ifndef _IMG_UCCP_H_
+#define _IMG_UCCP_H_
+
+#include <uapi/linux/uccp.h>
+
+
+/* platform resource numbering in flags */
+#define UCCP_RES_HOSTSYSBUS	0x0
+#define UCCP_RES_MCREQ		0x1
+#define UCCP_RES_UCCP(res)	(((res) & 0xf0) >> 4)
+#define UCCP_RES_TYPE(res)	((res) & 0x0f)
+#define UCCP_RES(uccp, res)	(((uccp) << 4) | (res))
+
+struct uccp_core {
+	struct uccp_region *regions;
+	unsigned int num_regions;
+	unsigned int num_mc_req;
+
+	struct device *device;
+	struct resource *host_sys_bus;
+	struct resource *mc_req;
+};
+
+struct uccp_pdata {
+	struct uccp_core *cores;
+	unsigned int num_cores;
+
+	struct uccp_region *regions;
+	unsigned int num_regions;
+};
+#endif /* _IMG_UCCP_H_ */
diff --git a/include/linux/usb/dwc_otg_platform.h b/include/linux/usb/dwc_otg_platform.h
new file mode 100644
index 0000000..3b0cb73
--- /dev/null
+++ b/include/linux/usb/dwc_otg_platform.h
@@ -0,0 +1,27 @@
+/*
+ * dwc_otg_platform.h
+ *
+ * (c) 2010 Imagination Technologies
+ */
+
+#ifndef DWC_OTG_PLATFORM_H_
+#define DWC_OTG_PLATFORM_H_
+
+/**
+ * This structure is used to pass board specific information via platform data
+ */
+struct dwc_otg_board {
+	/* methods for enabling / disabling Vbus at the SoC Level*/
+	void (*enable_vbus)(void);
+	void (*disable_vbus)(void);
+	/**
+	 * Control whether host communication is permitted, to allow the device
+	 * to complete it's power-up and initialisation without the host getting
+	 * confused.
+	 * @normal 1: normal.
+	 *         0: prevent host communication.
+	 */
+	void (*vbus_valid)(int normal);
+};
+
+#endif /* DWC_OTG_PLATFORM_H_ */
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 5d5d3a3..19c471b 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -30,6 +30,7 @@
 	RC_TYPE_RC6_6A_24	= 15,	/* Philips RC6-6A-24 protocol */
 	RC_TYPE_RC6_6A_32	= 16,	/* Philips RC6-6A-32 protocol */
 	RC_TYPE_RC6_MCE		= 17,	/* MCE (Philips RC6-6A-32 subtype) protocol */
+	RC_TYPE_SHARP		= 18,	/* Sharp protocol */
 };
 
 #define RC_BIT_NONE		0
@@ -51,6 +52,7 @@
 #define RC_BIT_RC6_6A_24	(1 << RC_TYPE_RC6_6A_24)
 #define RC_BIT_RC6_6A_32	(1 << RC_TYPE_RC6_6A_32)
 #define RC_BIT_RC6_MCE		(1 << RC_TYPE_RC6_MCE)
+#define RC_BIT_SHARP		(1 << RC_TYPE_SHARP)
 
 #define RC_BIT_ALL	(RC_BIT_UNKNOWN | RC_BIT_OTHER | RC_BIT_LIRC | \
 			 RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ | \
@@ -58,7 +60,7 @@
 			 RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
 			 RC_BIT_NEC | RC_BIT_SANYO | RC_BIT_MCE_KBD | \
 			 RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
-			 RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE)
+			 RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE | RC_BIT_SHARP)
 
 struct rc_map_table {
 	u32	scancode;
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 66216c1..3beaef3 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -198,7 +198,7 @@
 	__u8 additional_cdb_length;         /* total cdb length - 8 */
 	__be16 service_action;
 	/* service specific data follows */
-};
+} __packed;
 
 static inline unsigned
 scsi_varlen_cdb_length(const void *hdr)
diff --git a/include/sound/tansen.h b/include/sound/tansen.h
new file mode 100644
index 0000000..be4ab9c
--- /dev/null
+++ b/include/sound/tansen.h
@@ -0,0 +1,22 @@
+/*
+ * sound/tansen.h -- GTI port API for the tansen codec
+ *
+ * Copyright 2013, Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __TANSEN_GTI_API
+#define __TANSEN_GTI_API
+
+#define GTI_MAKE_CTRL_REG(a, b, c, d) \
+	(((a & 1) << 3) | ((b & 1) << 2) | ((c & 1) << 1) | ((d & 1) << 0))
+
+u32 gti_read(void __iomem *port, unsigned long reg);
+void gti_write(void __iomem *port, unsigned long reg,
+		unsigned long value);
+void gti_reset(void __iomem *port, int reset);
+
+#endif /* __TANSEN_GTI_API */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index bdc6e87..abe203b 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -176,6 +176,8 @@
 header-y += if_vlan.h
 header-y += if_x25.h
 header-y += igmp.h
+header-y += img_event_timer.h
+header-y += img_lcd.h
 header-y += in.h
 header-y += in6.h
 header-y += in_route.h
@@ -343,6 +345,7 @@
 header-y += serial_core.h
 header-y += serial_reg.h
 header-y += serio.h
+header-y += sgx2d.h
 header-y += shm.h
 header-y += signal.h
 header-y += signalfd.h
@@ -378,6 +381,7 @@
 header-y += tty.h
 header-y += tty_flags.h
 header-y += types.h
+header-y += uccp.h
 header-y += udf_fs_i.h
 header-y += udp.h
 header-y += uhid.h
diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
index fb795c3..dc076b60 100644
--- a/include/uapi/linux/fb.h
+++ b/include/uapi/linux/fb.h
@@ -132,6 +132,7 @@
 #define FB_ACCEL_NEOMAGIC_NM2360 97	/* NeoMagic NM2360              */
 #define FB_ACCEL_NEOMAGIC_NM2380 98	/* NeoMagic NM2380              */
 #define FB_ACCEL_PXA3XX		 99	/* PXA3xx			*/
+#define FB_ACCEL_IMG_PDP_1	100	/* ImgTec PDP version 1		*/
 
 #define FB_ACCEL_SAVAGE4        0x80	/* S3 Savage4                   */
 #define FB_ACCEL_SAVAGE3D       0x81	/* S3 Savage3D                  */
diff --git a/include/uapi/linux/img_event_timer.h b/include/uapi/linux/img_event_timer.h
new file mode 100644
index 0000000..1e3bf82
--- /dev/null
+++ b/include/uapi/linux/img_event_timer.h
@@ -0,0 +1,81 @@
+/*
+ * IMG Event Timer Driver interface
+ *
+ * Copyright (C) 2011  Imagination Technologies
+ */
+#ifndef EVENT_TIMER_H_
+#define EVENT_TIMER_H_
+
+#include <linux/types.h>
+
+
+#define EVT_CLOCK_32K	0
+#define EVT_CLOCK_SCP0	1
+#define EVT_CLOCK_SCP1	2
+
+struct evt_clock {
+	__u32 src;
+};
+
+#define EVT_MAX_COUNTERS	6
+
+#define EVT_SRC_UCC0		0
+#define EVT_SRC_UCC1		1
+#define EVT_SRC_TFT_HSYNC	2
+#define EVT_SRC_TFT_VSYNC	3
+#define EVT_SRC_TFT_2D		4
+#define EVT_SRC_HEP		5
+#define EVT_SRC_SCB0		6
+#define EVT_SRC_SCB1		7
+#define EVT_SRC_SCB2		8
+#define EVT_SRC_SDIO		9
+#define EVT_SRC_UART0		10
+#define EVT_SRC_UART1		11
+#define EVT_SRC_SPIM0		12
+#define EVT_SRC_SPIS		13
+#define EVT_SRC_SPIM1		14
+#define EVT_SRC_I2SOUT0		15
+#define EVT_SRC_I2SOUT1		16
+#define EVT_SRC_I2SOUT2		17
+#define EVT_SRC_I2SIN		18
+#define EVT_SRC_GPIO0		19
+#define EVT_SRC_GPIO1		20
+#define EVT_SRC_GPIO2		21
+#define EVT_SRC_SOCIF		22
+#define EVT_SRC_LCD		23
+#define EVT_SRC_PDC		24
+#define EVT_SRC_USB		25
+#define EVT_SRC_SDHOST		26
+#define EVT_SRC_MDC0		27
+#define EVT_SRC_MDC1		28
+#define EVT_SRC_MDC2		29
+#define EVT_SRC_MDC3		30
+#define EVT_SRC_MDC4		31
+#define EVT_SRC_MDC5		32
+#define EVT_SRC_MDC6		33
+#define EVT_SRC_MDC7		34
+#define EVT_SRC_PDC_IR		35
+#define EVT_SRC_PDC_RTC		36
+#define EVT_SRC_PDC_WD		37
+
+struct evt_event {
+	__u32 counter;
+	__u32 source;
+	__u32 timestamp;
+	__u32 txtimer;
+	__u32 timeofday_sec;
+	__u32 timeofday_ns;
+};
+
+#define EVTIO 0xF2
+
+/*set clock source*/
+#define EVTIO_SETCLOCK		_IOW(EVTIO, 0x40, struct evt_clock)
+/*set the event to count*/
+#define EVTIO_SETEVENTSRC	_IOW(EVTIO, 0x41, struct evt_event)
+/*get event timestamp*/
+#define EVTIO_GETEVTS		_IOWR(EVTIO, 0x42, struct evt_event)
+
+
+
+#endif /* EVENT_TIMER_H_ */
diff --git a/include/uapi/linux/img_lcd.h b/include/uapi/linux/img_lcd.h
new file mode 100644
index 0000000..d74dfef
--- /dev/null
+++ b/include/uapi/linux/img_lcd.h
@@ -0,0 +1,55 @@
+/*
+ * IMG LCD controller driver.
+ *
+ * Copyright (C) 2006, 2007, 2008, 2012 Imagination Technologies Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+#ifndef _UAPI__IMG_LCD_H
+#define _UAPI__IMG_LCD_H
+
+/* Our IOCTL family group */
+#define ALPHA_IOCTL	'A'
+
+/* Write an instruction byte */
+#define ALPHA_INSTR _IOW(ALPHA_IOCTL, 1, char)
+
+/* Write a data byte */
+#define ALPHA_DATA _IOW(ALPHA_IOCTL, 2, char)
+
+struct alpha_width_struct {
+	unsigned char width:4;		/* Transmit width of 1:4:8 bits */
+	unsigned char msb:1;		/* msb or lsb first (or nibble) */
+};
+
+/* Set the interface width (use the ENUMs!) */
+#define ALPHA_WIDTH _IOW(ALPHA_IOCTL, 3, struct alpha_width_struct *)
+
+struct alpha_speed_struct {
+	unsigned short d_period;
+	unsigned short p_h_width:4;
+	unsigned short p_h_delay:4;
+	unsigned short t_div:2;
+};
+
+/* Set the interface speed for data or command phases */
+#define ALPHA_DATA_SPEED _IOW(ALPHA_IOCTL, 4, struct alpha_speed_struct *)
+#define ALPHA_COMMAND_SPEED _IOW(ALPHA_IOCTL, 5, struct alpha_speed_struct *)
+
+struct alpha_data_block {
+	unsigned len;
+	char *p;
+};
+
+#define ALPHA_DATA_BLOCK _IOW(ALPHA_IOCTL, 6, struct alpha_data_block *)
+#define ALPHA_COMMAND_BLOCK _IOW(ALPHA_IOCTL, 7, struct alpha_data_block *)
+
+
+#endif /* _UAPI__IMG_LCD_H */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 2944278..0acdf2a 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -31,6 +31,7 @@
 #define PSTOREFS_MAGIC		0x6165676C
 #define EFIVARFS_MAGIC		0xde5e81e4
 #define HOSTFS_SUPER_MAGIC	0x00c0ffee
+#define IMGDAFS_SUPER_MAGIC	0xdadadaf5
 
 #define MINIX_SUPER_MAGIC	0x137F		/* minix v1 fs, 14 char names */
 #define MINIX_SUPER_MAGIC2	0x138F		/* minix v1 fs, 30 char names */
diff --git a/include/uapi/linux/sgx2d.h b/include/uapi/linux/sgx2d.h
new file mode 100644
index 0000000..7ea7556
--- /dev/null
+++ b/include/uapi/linux/sgx2d.h
@@ -0,0 +1,58 @@
+/*
+ * PowerVR SGX 2D block driver.
+ *
+ * Copyright (C) 2010  Imagination Technologies
+ */
+
+#ifndef _UAPI_IMG_SGX2D_H_
+#define _UAPI_IMG_SGX2D_H_
+
+#include <linux/types.h>
+
+/* architecture families */
+#define SGX2D_FAM_UNSPECIFIED	0x00
+#define SGX2D_FAM_COMET		0x01
+
+/* architecture capabilities */
+#define SGX2D_CAP_FIFOFREE	0x00000001	/* Fifo freespace available */
+
+/* registers */
+#define SGX2D_REG_INVALID	0x00
+#define SGX2D_REG_SLAVEPORT	0x01
+#define SGX2D_REG_FIFOFREE	0x02	/* space left in slave port */
+#define SGX2D_REG_BLTCOUNT	0x03	/* number of completed blits */
+#define SGX2D_REG_BUSY		0x04	/* busy status */
+#define SGX2D_REG_IDLE		0x05	/* idle status */
+#define SGX2D_REG_BASEADDR	0x06	/* memory base address */
+
+struct sgx2d_vers {
+	__u32 sgx_vers;
+	__u32 arch_fam;		/* SGX2D_FAM_* */
+	__u32 arch_vers;
+	__u32 caps;		/* SGX2D_CAP_* */
+};
+
+struct sgx2d_reg {
+	__u32 id;		/* in, SGX2D_REG_* */
+	__u32 offset;		/* offset into mmio memory */
+	__u32 mask;		/* register mask */
+	__u8 shift;
+};
+
+/* what to pass to mmap */
+struct sgx2d_meminfo {
+	void *addr;		/* address to use/pass to mmap */
+	unsigned int len;	/* length of memory area */
+	int flags;		/* mmap flags (0 indicates unmappable) */
+};
+
+#define SGX2DIO 0xF2
+
+#define SGX2DIO_WAITIDLE	_IO(SGX2DIO,  0xC0)
+#define SGX2DIO_SOFTRST		_IO(SGX2DIO,  0xC1)
+#define SGX2DIO_GETVERS		_IOR(SGX2DIO,  0xC2, struct sgx2d_vers)
+#define SGX2DIO_GETREG		_IOWR(SGX2DIO, 0xC3, struct sgx2d_reg)
+#define SGX2DIO_GETMEM		_IOR(SGX2DIO,  0xC4, struct sgx2d_meminfo)
+
+
+#endif /* _UAPI_IMG_SGX2D_H_ */
diff --git a/include/uapi/linux/uccp.h b/include/uapi/linux/uccp.h
new file mode 100644
index 0000000..22fb8f1
--- /dev/null
+++ b/include/uapi/linux/uccp.h
@@ -0,0 +1,58 @@
+/*
+ * IMG Universal Communications Core Platform (UCCP) driver interface.
+ *
+ * Copyright (C) 2010  Imagination Technologies
+ */
+
+#ifndef _UAPI_IMG_UCCP_H_
+#define _UAPI_IMG_UCCP_H_
+
+#include <linux/types.h>
+
+#define UCCP_REGION_ALL			0x01
+#define UCCP_REGION_SYS_INTERNAL	0x02
+#define UCCP_REGION_MTX			0x10
+#define UCCP_REGION_MCP_16_BIT		0x20
+#define UCCP_REGION_MCP_24_BIT		0x21
+
+struct uccp_region {
+	__u32 type;	/* UCCP_REGION_* */
+	__u32 physical;	/* Physical address of region */
+	__u32 offset;	/* Offset within device file */
+	__u32 size;	/* Size of region */
+};
+
+#define UCCP_REG_DIRECT		0x00
+#define UCCP_REG_INDIRECT	0x01
+#define UCCP_REG_MCPPERIP	0x02	/* MCP peripheral memory */
+#define UCCP_REG_MCPPERIP_PACK	0x03	/* MCP packed peripheral memory */
+
+struct uccp_reg {
+	__u32 op;	/* UCCP_REG_* */
+	__u32 reg;	/* Register id to read/write */
+	__u32 val;	/* Value of register */
+};
+
+struct uccp_mcreq {
+	__u32 index;	/* Index into mcreq table */
+	__u32 bulk;	/* Bulk (MTX) address */
+	__u32 size;	/* Size in bytes */
+	__u32 physical;	/* Physical address */
+};
+
+#define UCCPIO 0xF2
+
+/* get info about a memory region by type */
+#define UCCPIO_GETREGION	_IOWR(UCCPIO, 0x80, struct uccp_region)
+/* read/write to a register on the host bus */
+#define UCCPIO_WRREG		_IOW(UCCPIO, 0x81, struct uccp_reg)
+#define UCCPIO_RDREG		_IOWR(UCCPIO, 0x81, struct uccp_reg)
+/* clear/get/set an MC REQ entry */
+#define UCCPIO_CLRMCREQ		_IOW(UCCPIO, 0x82, struct uccp_mcreq)
+#define UCCPIO_SETMCREQ		_IOW(UCCPIO, 0x83, struct uccp_mcreq)
+#define UCCPIO_GETMCREQ		_IOWR(UCCPIO, 0x83, struct uccp_mcreq)
+/* soft reset the UCCP */
+#define UCCPIO_SRST		_IO(UCCPIO, 0x84)
+
+
+#endif /* _UAPI_IMG_UCCP_H_ */
diff --git a/include/uapi/video/Kbuild b/include/uapi/video/Kbuild
index ac7203b..7b68be8 100644
--- a/include/uapi/video/Kbuild
+++ b/include/uapi/video/Kbuild
@@ -1,4 +1,5 @@
 # UAPI Header export list
 header-y += edid.h
+header-y += pdpfb.h
 header-y += sisfb.h
 header-y += uvesafb.h
diff --git a/include/uapi/video/pdpfb.h b/include/uapi/video/pdpfb.h
new file mode 100644
index 0000000..a90ea92
--- /dev/null
+++ b/include/uapi/video/pdpfb.h
@@ -0,0 +1,123 @@
+/*
+ * PDP Desktop Graphics Framebuffer
+ *
+ * Copyright (c) 2008 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _UAPI_VIDEO_PDPFB_H
+#define _UAPI_VIDEO_PDPFB_H
+
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct pdpfb_geom {
+	__u16 x, y;
+	__u16 w, h;	/* 0 for no scaling */
+};
+
+struct pdpfb_ckey {
+	__u32 ckey;	/* rgb888 */
+	__u32 mask;	/* mask of ckey */
+};
+
+/* YUV->RGB colour space conversion */
+struct pdpfb_vid_csc_coefs {
+	__s32 ry, rv, ru;
+	__s32 gy, gv, gu;
+	__s32 by, bv, bu;
+};
+
+struct pdpfb_vid_csc {
+	__u32 enable;
+	__u32 preset;	/* 0 for custom coefficients */
+	__u32 cosited;
+
+	struct pdpfb_vid_csc_coefs coefs;
+};
+
+/* Planar pixel formats */
+struct pdpfb_vid_planar {
+	/* offsets of planes inside framebuffer */
+	__u32 y_offset;
+	__u32 u_offset;
+	__u32 v_offset;
+	/* line strides for each plane */
+	__u32 y_line_length;
+	__u32 u_line_length;
+	__u32 v_line_length;
+};
+
+/* User provided memory */
+struct pdpfb_usermem {
+	__u32 phys;
+	__u32 len;
+	__u16 flags;
+};
+
+#define PDP_CKEYMODE_DISABLE	0x0
+#define PDP_CKEYMODE_PREVIOUS	0x1	/* compare ckey with previous plane */
+#define PDP_CKEYMODE_CURRENT	0x2	/* compare ckey with current plane */
+
+#define PDP_BLENDMODE_NOALPHA	0x0
+#define PDP_BLENDMODE_INVERT	0x1
+#define PDP_BLENDMODE_GLOBAL	0x2
+#define PDP_BLENDMODE_PIXEL	0x3
+
+/* non standard pixel formats */
+#define PDP_VID_PIXFMT_420_PL8		1	/* YV12 */
+#define PDP_VID_PIXFMT_420_PL8IVU	2	/* interleaved chroma (v lsb) */
+#define PDP_VID_PIXFMT_420_PL8IUV	3	/* interleaved chroma (u lsb) */
+#define PDP_VID_PIXFMT_422_UY0VY1_8888	4	/* U lsb */
+#define PDP_VID_PIXFMT_422_VY0UY1_8888	5	/* V lsb */
+#define PDP_VID_PIXFMT_422_Y0UY1V_8888	6	/* Y0 lsb */
+#define PDP_VID_PIXFMT_422_Y0VY1U_8888	7	/* Y0 lsb */
+#define PDP_VID_PIXFMT_420_T88CP	8	/* unimplemented */
+#define PDP_VID_PIXFMT_422_T88CP	9	/* unimplemented */
+#define PDP_VID_PIXFMT_MAX		10
+
+#define PDP_VID_CSCPRESET_HDTV		0x1
+#define PDP_VID_CSCPRESET_SDTV		0x2	/* default */
+#define PDP_VID_CSCPRESET_LEGACYHDTV	0x3
+#define PDP_VID_CSCPRESET_LEGACYSDTV	0x4
+
+/* PDPIO_SETUSERMEM flags */
+#define PDP_USERMEM_ALLPLANES	0x1
+
+#define PDPIO 0xF2
+
+/* general (0x00 - 0x0F) */
+#define PDPIO_GETBGND		_IOR(PDPIO, 0x00, __u32)
+#define PDPIO_SETBGND		_IOW(PDPIO, 0x00, __u32)
+#define PDPIO_GETSCRGEOM	_IOR(PDPIO, 0x01, struct pdpfb_geom)
+#define PDPIO_SETUSERMEM	_IOW(PDPIO, 0x02, struct pdpfb_usermem)
+
+/* general plane (0x10 - 0x1F) */
+#define PDPIO_GETEN		_IOR(PDPIO, 0x10, int)
+#define PDPIO_SETEN		_IOW(PDPIO, 0x10, int)
+#define PDPIO_GETPLANEPOS	_IOR(PDPIO, 0x11, int)
+#define PDPIO_SETPLANEPOS	_IOWR(PDPIO, 0x11, int)
+#define PDPIO_GETGEOM		_IOR(PDPIO, 0x12, struct pdpfb_geom)
+#define PDPIO_SETGEOM		_IOWR(PDPIO, 0x12, struct pdpfb_geom)
+#define PDPIO_GETCKEYMODE	_IOR(PDPIO, 0x13, __u32)
+#define PDPIO_SETCKEYMODE	_IOW(PDPIO, 0x13, __u32)
+#define PDPIO_GETCKEY		_IOR(PDPIO, 0x14, struct pdpfb_ckey)
+#define PDPIO_SETCKEY		_IOW(PDPIO, 0x14, struct pdpfb_ckey)
+#define PDPIO_GETBLENDMODE	_IOR(PDPIO, 0x15, __u32)
+#define PDPIO_SETBLENDMODE	_IOW(PDPIO, 0x15, __u32)
+#define PDPIO_GETGALPHA		_IOR(PDPIO, 0x16, __u32)
+#define PDPIO_SETGALPHA		_IOWR(PDPIO, 0x16, __u32)
+
+/* graphics plane (0x20 - 0x2F) */
+
+/* video plane (0x30 - 0x3F) */
+#define PDPIO_GETCSC		_IOR(PDPIO, 0x30, struct pdpfb_vid_csc)
+#define PDPIO_SETCSC		_IOWR(PDPIO, 0x30, struct pdpfb_vid_csc)
+#define PDPIO_GETPLANAR		_IOR(PDPIO, 0x31, struct pdpfb_vid_planar)
+#define PDPIO_SETPLANAR		_IOWR(PDPIO, 0x31, struct pdpfb_vid_planar)
+
+#endif /* _UAPI_VIDEO_PDPFB_H */
diff --git a/include/video/imgpdi_lcd.h b/include/video/imgpdi_lcd.h
new file mode 100644
index 0000000..b7dcdeb
--- /dev/null
+++ b/include/video/imgpdi_lcd.h
@@ -0,0 +1,42 @@
+/*
+ * Imagination Technologies Panel Display Interface (PDI).
+ *
+ * Copyright (C) 2012 Imagination Technologies
+ *
+ * Based on platform_lcd.h:
+ * Copyright 2008 Simtec Electronics
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _IMGPDI_LCD_H_
+#define _IMGPDI_LCD_H_
+
+struct imgpdi_lcd_pdata;
+struct fb_info;
+
+struct imgpdi_lcd_timings {
+	unsigned int pwrsvgd;	/* HSYNC to PWRSV,GD */
+	unsigned int ls;	/* HSYNC to LS (> HAS) */
+	unsigned int pwrsvgd2;	/* LS to end of PWRSV,GD */
+	unsigned int nl;	/* HSYNC to NL */
+	unsigned int acb;	/* NL to end of ACB */
+
+	unsigned int gatedriver_en:1;
+	unsigned int newframe_en:1;
+	unsigned int blanking_en:1;
+	unsigned int blanking_level:1;
+};
+
+struct imgpdi_lcd_pdata {
+	int	(*match_fb)(struct imgpdi_lcd_pdata *, struct fb_info *);
+
+	/* active mode timings (NULL for bypass) */
+	struct imgpdi_lcd_timings *active;
+};
+
+#endif /* _IMGPDI_LCD_H_ */
diff --git a/include/video/pdpfb.h b/include/video/pdpfb.h
new file mode 100644
index 0000000..26275588
--- /dev/null
+++ b/include/video/pdpfb.h
@@ -0,0 +1,121 @@
+/*
+ * PDP Desktop Graphics Framebuffer
+ *
+ * Copyright (c) 2008 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef _VIDEO_PDPFB_H
+#define _VIDEO_PDPFB_H
+
+#include <uapi/video/pdpfb.h>
+
+
+/* Determine PDP revision from SOC */
+
+#ifdef CONFIG_SOC_CHORUS2
+#define PDP_REV 0x010000
+#endif
+
+#ifdef CONFIG_SOC_TZ1090
+#define PDP_REV 0x010001
+#endif
+
+#ifndef PDP_REV
+#error PDP revision unknown
+#endif
+
+/* Capabilities of the PDP */
+
+#if PDP_REV >= 0x010001
+#define PDP_SHARED_BASE
+#define PDP_GAMMA		16
+#define PDP_VID_VSCALE
+#endif
+
+/*
+ * Indicates that colour key fields for stream x actually refer to the stream
+ * at blend position x, rather than graphics at stream 1 and video at stream 2.
+ */
+#define PDP_CKEY_BY_BLENDPOS
+
+#include <linux/fb.h>
+
+struct pdp_lcd_size_cfg { /* width and height of panel in mm */
+	int dynamic_mode;	/* the screen accepts different modes */
+	unsigned long width;
+	unsigned long height;
+};
+
+#define PDP_ACTIVE_LOW		1
+#define PDP_ACTIVE_HIGH		0
+#define PDP_CLOCK_NOT_INVERTED	0
+#define PDP_CLOCK_INVERTED	1
+
+struct pdp_sync_cfg {
+	unsigned int force_vsyncs:1;
+	unsigned int hsync_dis:1;	/* sync_ctrl.hsdis */
+	unsigned int vsync_dis:1;	/* sync_ctrl.vsdis */
+	unsigned int blank_dis:1;	/* sync_ctrl.blnkdis */
+	unsigned int blank_pol:1;	/* sync_ctrl.blnkpol */
+	unsigned int clock_pol:1;	/* sync_ctrl.clkpol */
+	unsigned int sync_slave:1;	/* sync_ctrl.[hv]sslave */
+};
+
+struct pdp_hwops {
+	void (*set_screen_power)(int pa);
+#ifdef PDP_SHARED_BASE
+	void (*set_shared_base)(unsigned long pa);
+#endif
+};
+
+#define PDPFB_PDATA_FIX_SHIFT 11
+struct pdp_info {
+	int bpp;
+	struct fb_videomode lcd_cfg;
+	struct pdp_lcd_size_cfg lcd_size_cfg;
+	struct pdp_sync_cfg sync_cfg;
+	struct pdp_hwops hwops;
+#ifdef PDP_VID_VSCALE
+	int linestore_len;
+	/*
+	 * Vertical pitch threshold to switch to bilinear (2-tap) scaling.
+	 * In .PDPFB_PDATA_FIX_SHIFT binary fixed point.
+	 */
+	unsigned int vpitch_bilinear_threshold;
+#endif
+};
+
+/* Video memory pools */
+#define PDPFB_MEMPOOL_MEM	0x00	/* Combined video memory */
+#define PDPFB_MEMPOOL_GFXMEM	0x01	/* Graphics plane memory */
+#define PDPFB_MEMPOOL_VIDMEM	0x02	/* Video plane memory */
+#define PDPFB_MEMPOOL_USER	0x03	/* User provided memory */
+#define PDPFB_MEMPOOL_NR_POOLS	4
+#define PDPFB_MEMPOOL_USERPLANE	0xfd	/* User provided memory for one plane */
+#define PDPFB_MEMPOOL_KERNEL	0xfe	/* Kernel allocated memory */
+#define PDPFB_MEMPOOL_NONE	0xff
+
+/* Platform resource numbering in flags */
+#define PDPFB_IORES_MEM		PDPFB_MEMPOOL_MEM
+#define PDPFB_IORES_GFXMEM	PDPFB_MEMPOOL_GFXMEM
+#define PDPFB_IORES_VIDMEM	PDPFB_MEMPOOL_VIDMEM
+#define PDPFB_IORES_PDP		0xf0
+
+/* Internal interface for graphics drivers */
+
+#define PDPFB_IRQ_VEVENT0	0x00004	/* safe update for gfx stream */
+#define PDPFB_IRQ_HBLNK0	0x00001	/* blanking for frame/gfx stream */
+
+typedef void (*pdpfb_isr_t) (void *arg, u32 mask);
+
+int pdpfb_register_isr(pdpfb_isr_t isr, void *arg, u32 mask);
+int pdpfb_unregister_isr(pdpfb_isr_t isr, void *arg, u32 mask);
+int pdpfb_wait_for_irq_timeout(u32 irqmask, unsigned long timeout);
+int pdpfb_wait_for_irq_interruptible_timeout(u32 irqmask,
+						unsigned long timeout);
+int pdpfb_wait_vsync(void);
+
+#endif
diff --git a/include/video/tz1090_auxdac_bl.h b/include/video/tz1090_auxdac_bl.h
new file mode 100644
index 0000000..7b73f26
--- /dev/null
+++ b/include/video/tz1090_auxdac_bl.h
@@ -0,0 +1,26 @@
+/*
+ * TZ1090 Aux-DAC based Backlight Driver
+ *
+ * Copyright (C) 2012 Imagination Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _TZ1090_AUXDAC_BL_H_
+#define _TZ1090_AUXDAC_BL_H_
+
+struct tz1090_auxdac_bl_pdata;
+struct fb_info;
+
+struct tz1090_auxdac_bl_pdata {
+	int	(*match_fb)(struct tz1090_auxdac_bl_pdata *, struct fb_info *);
+	void	(*set_bl_power)(int power); /* FB_BLANK_* */
+
+	const char *name;
+	int default_intensity;
+};
+
+#endif /* _TZ1090_AUXDAC_BL_H_ */
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 145bb4d..8592e1c 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -364,7 +364,7 @@
 	return ret;
 }
 
-#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+#ifdef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 #define TASKSTATS_NEEDS_PADDING 1
 #endif
 
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 9c22317..8c1e631 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -249,7 +249,7 @@
 		strcmp(".spinlock.text", txtname) == 0 ||
 		strcmp(".irqentry.text", txtname) == 0 ||
 		strcmp(".kprobes.text", txtname) == 0 ||
-		strcmp(".text.unlikely", txtname) == 0;
+		strncmp(".text.", txtname, 6) == 0;
 }
 
 /* 32 bit and 64 bit are very similar */
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 858966a..28e0955 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -135,9 +135,13 @@
      ".spinlock.text" => 1,
      ".irqentry.text" => 1,
      ".kprobes.text" => 1,
-     ".text.unlikely" => 1,
 );
 
+sub is_valid_section
+{
+    return defined($text_sections{$1}) || $1 =~ m/^\.text\./;
+}
+
 # Note: we are nice to C-programmers here, thus we skip the '||='-idiom.
 $objdump = 'objdump' if (!$objdump);
 $objcopy = 'objcopy' if (!$objcopy);
@@ -490,7 +494,7 @@
 	$read_headers = 0;
 
 	# Only record text sections that we know are safe
-	$read_function = defined($text_sections{$1});
+	$read_function = is_valid_section($1);
 	# print out any recorded offsets
 	update_funcs();
 
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 9e675c7..28c624b 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -37,6 +37,7 @@
 source "sound/soc/atmel/Kconfig"
 source "sound/soc/au1x/Kconfig"
 source "sound/soc/blackfin/Kconfig"
+source "sound/soc/chorus2/Kconfig"
 source "sound/soc/cirrus/Kconfig"
 source "sound/soc/davinci/Kconfig"
 source "sound/soc/dwc/Kconfig"
@@ -52,6 +53,7 @@
 source "sound/soc/s6000/Kconfig"
 source "sound/soc/sh/Kconfig"
 source "sound/soc/tegra/Kconfig"
+source "sound/soc/tz1090/Kconfig"
 source "sound/soc/txx9/Kconfig"
 source "sound/soc/ux500/Kconfig"
 
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 197b6ae..2a066a3 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -15,6 +15,7 @@
 obj-$(CONFIG_SND_SOC)	+= atmel/
 obj-$(CONFIG_SND_SOC)	+= au1x/
 obj-$(CONFIG_SND_SOC)	+= blackfin/
+obj-$(CONFIG_SND_SOC)	+= chorus2/
 obj-$(CONFIG_SND_SOC)	+= cirrus/
 obj-$(CONFIG_SND_SOC)	+= davinci/
 obj-$(CONFIG_SND_SOC)	+= dwc/
@@ -30,5 +31,6 @@
 obj-$(CONFIG_SND_SOC)	+= s6000/
 obj-$(CONFIG_SND_SOC)	+= sh/
 obj-$(CONFIG_SND_SOC)	+= tegra/
+obj-$(CONFIG_SND_SOC)	+= tz1090/
 obj-$(CONFIG_SND_SOC)	+= txx9/
 obj-$(CONFIG_SND_SOC)	+= ux500/
diff --git a/sound/soc/chorus2/Kconfig b/sound/soc/chorus2/Kconfig
new file mode 100644
index 0000000..2e81376
--- /dev/null
+++ b/sound/soc/chorus2/Kconfig
@@ -0,0 +1,18 @@
+config SND_CHORUS2_SOC
+	tristate "SoC Audio for the Frontier Silicon Chorus2 Chip"
+	depends on SOC_CHORUS2
+	help
+	  Say Y or M if you want to add support for codecs attached to the
+	  Chorus2 I2S Audio out interface.
+
+config SND_CHORUS2_SOC_I2S
+	tristate
+
+config SND_CHORUS2_SOC_ATP_DP
+	tristate "SoC Audio support for ATP-DP Base Layer"
+	depends on SOC_CHORUS2
+	select SND_CHORUS2_SOC
+	select SND_CHORUS2_SOC_I2S
+	select SND_SOC_WM8727
+	help
+	  Say Y if you want Audio support for your Metamorph ATP-DP board.
diff --git a/sound/soc/chorus2/Makefile b/sound/soc/chorus2/Makefile
new file mode 100644
index 0000000..e1d3e32
--- /dev/null
+++ b/sound/soc/chorus2/Makefile
@@ -0,0 +1,13 @@
+#Chorus2 Platform support
+snd-soc-chorus2-objs := chorus2-pcm.o
+snd-soc-chorus2-i2s-objs := chorus2-i2s.o
+
+obj-$(CONFIG_SND_CHORUS2_SOC) += snd-soc-chorus2.o
+obj-$(CONFIG_SND_CHORUS2_SOC_I2S) += snd-soc-chorus2-i2s.o
+
+#Chorus2 Machine support
+snd-soc-atp-dp-objs := atp-dp.o
+
+obj-$(CONFIG_SND_CHORUS2_SOC_ATP_DP) += snd-soc-atp-dp.o
+
+
diff --git a/sound/soc/chorus2/atp-dp.c b/sound/soc/chorus2/atp-dp.c
new file mode 100644
index 0000000..d353f23
--- /dev/null
+++ b/sound/soc/chorus2/atp-dp.c
@@ -0,0 +1,140 @@
+/*
+ *  atp_dp.c
+ *
+ *  atp_dp (Metamorph) ASOC Audio Support
+ *
+ *  Copyright:	(C) 2009 Imagination Technologies
+ *
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/timer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include "chorus2-pcm.h"
+#include "chorus2-i2s.h"
+
+
+
+static int atp_dp_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int ret = 0;
+
+
+	/* set codec DAI configuration */
+
+		/*Simple codec (non configurable) */
+
+	/* set cpu DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* set the codec system clock for DAC and ADC */
+
+		/*Simple codec (non configurable) */
+
+	/* set the I2S system clock  */
+
+	/*  if we support clock setup methods ...
+	ret = snd_soc_dai_set_sysclk(cpu_dai, ...);
+	if (ret < 0)
+		return ret;
+	*/
+
+	return 0;
+}
+
+static struct snd_soc_ops atp_dp_ops = {
+	.hw_params = atp_dp_hw_params,
+};
+
+
+
+/*
+ * ATP-DP digital audio interface glue - connects codec <--> CPU
+ *
+ * Note the codec used on the ATP-DP base board (WM8727) is a simple I2S
+ * Based DAC, without a configuration interface.
+ */
+static struct snd_soc_dai_link atp_dp_dai = {
+	.name = "wm8727",
+	.stream_name = "Playback",
+	.cpu_dai_name = "chorus2-i2s.0",
+	.codec_name = "wm8727-codec.0",
+	.codec_dai_name = "wm8727-hifi",
+	.platform_name = "chorus2-pcm-audio",
+	.ops = &atp_dp_ops,
+};
+
+/*ATP-DP audio machine driver */
+static struct snd_soc_card snd_soc_atp_dp = {
+	.name = "ATP-DP-Audio",
+	.dai_link = &atp_dp_dai,
+	.num_links = 1,
+};
+
+
+/* Chorus2 i2s platform device */
+static struct platform_device chorus2_i2s_platform = {
+	.name           = "chorus2-i2s",
+};
+
+/* wm8727 platform device */
+static struct platform_device wm8727_platform = {
+	.name           = "wm8727-codec",
+};
+
+static struct platform_device *audio_devices[] __initdata = {
+	&chorus2_i2s_platform,
+	&wm8727_platform,
+
+};
+
+static struct platform_device *atp_dp_snd_device;
+
+static int __init atp_dp_init(void)
+{
+	int ret;
+
+	ret = platform_add_devices(audio_devices,
+					    ARRAY_SIZE(audio_devices));
+	if (ret)
+		return ret;
+
+	atp_dp_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!atp_dp_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(atp_dp_snd_device, &snd_soc_atp_dp);
+	ret = platform_device_add(atp_dp_snd_device);
+
+	if (ret)
+		platform_device_put(atp_dp_snd_device);
+
+	return ret;
+}
+
+static void __exit atp_dp_exit(void)
+{
+	platform_device_unregister(atp_dp_snd_device);
+}
+
+module_init(atp_dp_init);
+module_exit(atp_dp_exit);
+
+/* Module information */
+MODULE_AUTHOR("Neil Jones");
+MODULE_DESCRIPTION("ALSA SoC ATP-DP");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/chorus2/chorus2-i2s.c b/sound/soc/chorus2/chorus2-i2s.c
new file mode 100644
index 0000000..d9a1a7d
--- /dev/null
+++ b/sound/soc/chorus2/chorus2-i2s.c
@@ -0,0 +1,414 @@
+/*
+ *
+ * ALSA SoC I2S  Audio Layer for Frontier Silicon Chorus2 processor
+ *
+ * Author:      Neil Jones
+ * Copyright:   (C) 2009 Imagination Technologies
+ *
+ * based on pxa2xx i2S ASoC driver
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#include "chorus2-pcm.h"
+#include "chorus2-i2s.h"
+
+#define I2S_OUT_PERIP_NUM		16
+#define SYS_CLK_CONTROL			0x02024024
+#define I2S_CLOCK_CONTROL		0x02000098
+
+
+static struct chorus2_pcm_dma_params chorus2_pcm_stereo_out[] =
+{
+	[0] = {
+		.name = "I2S PCM Stereo Out Chan 0",
+		.peripheral_num = I2S_OUT_PERIP_NUM,
+		.peripheral_address = I2S_OUT_BASE_ADDR + _I2S_OUT_CHANS_OFFSET
+					+ 0 * _I2S_OUT_CHANS_STRIDE,
+	},
+	[1] = {
+		.name = "I2S PCM Stereo Out Chan 1",
+		.peripheral_num = I2S_OUT_PERIP_NUM,
+		.peripheral_address = I2S_OUT_BASE_ADDR + _I2S_OUT_CHANS_OFFSET
+				      + 1 * _I2S_OUT_CHANS_STRIDE,
+	},
+	[2] = {
+		.name = "I2S PCM Stereo Out Chan 2",
+		.peripheral_num = I2S_OUT_PERIP_NUM,
+		.peripheral_address = I2S_OUT_BASE_ADDR + _I2S_OUT_CHANS_OFFSET
+				      + 2 * _I2S_OUT_CHANS_STRIDE,
+	},
+	[4] =  {
+		.name = "I2S PCM Stereo Out Chan 3",
+		.peripheral_num = I2S_OUT_PERIP_NUM,
+		.peripheral_address = I2S_OUT_BASE_ADDR + _I2S_OUT_CHANS_OFFSET
+				      + 3 * _I2S_OUT_CHANS_STRIDE,
+	},
+};
+
+
+static int chorus2_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+		unsigned int fmt)
+{
+
+	int id = cpu_dai->id;
+	int ret = 0;
+	u32 temp;
+
+	temp = I2S_OUT_RGET_CHAN_CONTROL(id);
+
+	/* Disable module before changing registers */
+	I2S_OUT_SET_REG_FIELD(I2S_OUT_ENABLE, 0);
+	I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_RUN, 0);
+	I2S_OUT_RSET_CHAN_CONTROL(id, temp);
+	wmb();
+
+
+	/* Setup Hardware Formats: */
+	temp = I2S_OUT_RGET_CHAN_CONTROL(id);
+
+	/*left just. bit must be set to 1*/
+	I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_MUSTB1, 1);
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	/*I2S Mode (Phillips Mode)
+		1 bit clock delay from left edge of word sel*/
+	case SND_SOC_DAIFMT_I2S:
+		I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_PH_NSY, 1);
+		break;
+	/*Right Justified Mode, data aligned with right edge of word sel.*/
+	case SND_SOC_DAIFMT_RIGHT_J:
+		ret = -EINVAL;
+		break;
+	/*
+	 *  Left justified (sony) mode data aligned to left edge of word select
+	 *  ie no delay
+	 */
+	case SND_SOC_DAIFMT_LEFT_J:
+		I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_PH_NSY, 0);
+		break;
+
+	case SND_SOC_DAIFMT_DSP_A:	/* L data msb after FRM LRC */
+	case SND_SOC_DAIFMT_DSP_B:	/* L data msb during FRM LRC */
+	case SND_SOC_DAIFMT_AC97:	/* AC97 */
+		ret = -EINVAL;
+		break;
+
+	default:
+		printk(KERN_ERR "%s: Unknown DAI format type\n", __func__);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret)
+		goto out;
+
+	I2S_OUT_RSET_CHAN_CONTROL(id, temp);
+
+	/*
+	 * Setup Clocking scheme
+	 */
+
+	temp = I2S_OUT_RGET_MAIN_CONTROL();
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:	/*Interface clk & frame slave*/
+		I2S_OUT_SET_FIELD(temp, I2S_OUT_MASTER, 0);
+		break;
+	case SND_SOC_DAIFMT_CBS_CFM:	/*Interface clk master, frame slave*/
+	case SND_SOC_DAIFMT_CBM_CFS:	/*Interface clk slave, frame master*/
+		ret = -EINVAL;
+		break;
+
+	case SND_SOC_DAIFMT_CBS_CFS:	/*Interface clk & frame master */
+		I2S_OUT_SET_FIELD(temp, I2S_OUT_MASTER, 1);
+		break;
+	default:
+		printk(KERN_ERR "%s: Unknown DAI master type\n", __func__);
+		ret = -EINVAL;
+		break;
+	}
+	if (ret)
+		goto out;
+
+	I2S_OUT_RSET_MAIN_CONTROL(temp);
+	wmb();
+
+	/*clock inversion options*/
+
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:	/* normal bit clock + frame */
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_BCLK_POL, 1);
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_LEFT_POL, 0);
+		break;
+	case SND_SOC_DAIFMT_NB_IF:	/* normal BCLK + inv FRM */
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_BCLK_POL, 1);
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_LEFT_POL, 1);
+		break;
+	case SND_SOC_DAIFMT_IB_NF:	/* invert BCLK + nor FRM */
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_BCLK_POL, 0);
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_LEFT_POL, 0);
+		break;
+	case SND_SOC_DAIFMT_IB_IF:	/* invert BCLK + FRM */
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_BCLK_POL, 0);
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_LEFT_POL, 1);
+		break;
+	}
+	/*Re-enable module - but dont set channel run bit yet*/
+	I2S_OUT_SET_REG_FIELD(I2S_OUT_ENABLE, 1);
+
+out:
+	return ret;
+}
+
+static int chorus2_i2s_startup(struct snd_pcm_substream *substream,
+			     struct snd_soc_dai *dai)
+{
+
+	return 0;
+}
+
+static int chorus2_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+			      struct snd_soc_dai *dai)
+{
+	int ret = 0;
+	u32 temp = 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			temp = I2S_OUT_RGET_CHAN_CONTROL(dai->id);
+			I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_RUN, 1);
+			I2S_OUT_RSET_CHAN_CONTROL(dai->id, temp);
+			wmb();
+		}
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			temp = I2S_OUT_RGET_CHAN_CONTROL(dai->id);
+			I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_RUN, 0);
+			I2S_OUT_RSET_CHAN_CONTROL(dai->id, temp);
+			wmb();
+		}
+		break;
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int chorus2_i2s_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int id = cpu_dai->id;
+	u32 temp;
+
+	snd_soc_dai_set_dma_data(cpu_dai, substream,
+				 &chorus2_pcm_stereo_out[id]);
+
+	temp = I2S_OUT_RGET_CHAN_CONTROL(id);
+
+	/* Disable module before changing registers */
+	I2S_OUT_SET_REG_FIELD(I2S_OUT_ENABLE, 0);
+	I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_RUN, 0);
+	I2S_OUT_RSET_CHAN_CONTROL(id, temp);
+	wmb();
+
+	/*Set Format*/
+	switch (params_format(params)) {
+
+	case SNDRV_PCM_FORMAT_S16_LE:
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_FRAME, 2);
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_PACKED, 1);
+		I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_PACKED, 1);
+		I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_FORMAT, 0);
+
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_FRAME, 2);
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_PACKED, 0);
+		I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_PACKED, 0);
+		I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_FORMAT, 4);
+		break;
+
+	case SNDRV_PCM_FORMAT_S32_LE:
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_FRAME, 2);
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_PACKED, 0);
+		I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_PACKED, 0);
+		I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_FORMAT, 8);
+		break;
+
+	default:
+		printk(KERN_ERR "%s: Unknown PCM format\n", __func__);
+		return -EINVAL;
+	}
+
+
+	I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_LRDATA_POL, 0);
+	I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_LRFORCE_DIS, 1);
+	I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_LOCK_DIS, 1);
+
+	/*write back channel settings*/
+	I2S_OUT_RSET_CHAN_CONTROL(id, temp);
+	/*enable bit clock */
+	I2S_OUT_SET_REG_FIELD(I2S_OUT_BCLK_EN, 1);
+	/* we only support 256fs */
+	I2S_OUT_SET_REG_FIELD(I2S_OUT_ACLK_SEL, 0);
+	/* Set Sample Rate */
+	switch (params_rate(params)) {
+
+	case 32000:
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_ACLK_SEL, 1);
+		iowrite32(1, (void *)SYS_CLK_CONTROL); /*12.288MHz*/
+		iowrite32(4, (void *)I2S_CLOCK_CONTROL); /*12.288MHz*/
+		break;
+	case 48000:
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_ACLK_SEL, 0);
+		iowrite32(1, (void *)SYS_CLK_CONTROL); /*12.288MHz*/
+		iowrite32(4, (void *)I2S_CLOCK_CONTROL); /*12.288MHz*/
+		break;
+	case 64000:
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_ACLK_SEL, 1);
+		iowrite32(0, (void *)SYS_CLK_CONTROL);/*24.576MHz*/
+		iowrite32(5, (void *)I2S_CLOCK_CONTROL); /*24.576MHz*/
+		break;
+	case 96000:
+		I2S_OUT_SET_REG_FIELD(I2S_OUT_ACLK_SEL, 0);
+		iowrite32(0, (void *)SYS_CLK_CONTROL);/*24.576MHz*/
+		iowrite32(5, (void *)I2S_CLOCK_CONTROL); /*24.576MHz*/
+		break;
+	default:
+		return -EINVAL;
+	}
+
+
+	wmb();
+
+	/*Re-enable module - but dont set channel run bit yet*/
+	I2S_OUT_SET_REG_FIELD(I2S_OUT_ENABLE, 1);
+	wmb();
+
+
+	return 0;
+}
+
+static void chorus2_i2s_shutdown(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+
+}
+
+
+#ifdef CONFIG_PM
+static int chorus2_i2s_suspend(struct snd_soc_dai *dai)
+{
+	/* TODO:
+	 *	It should be possible to disable the clocks to
+	 *	the I2S OUT module
+	 */
+	return 0;
+}
+
+static int chorus2_i2s_resume(struct snd_soc_dai *dai)
+{
+	/* TODO*/
+	return 0;
+}
+#endif
+
+#define CHORUS2_I2S_RATES (SNDRV_PCM_RATE_32000 |   \
+				SNDRV_PCM_RATE_48000 |  \
+				SNDRV_PCM_RATE_64000 |  \
+				SNDRV_PCM_RATE_96000)
+
+#define CHORUS2_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_ops chorus2_i2s_dai_ops = {
+	.startup	= chorus2_i2s_startup,
+	.shutdown	= chorus2_i2s_shutdown,
+	.trigger	= chorus2_i2s_trigger,
+	.hw_params	= chorus2_i2s_hw_params,
+	.set_fmt	= chorus2_i2s_set_dai_fmt,
+};
+
+struct snd_soc_dai_driver chorus2_i2s_dai = {
+	.name = "chorus2-i2s",
+	.id = 0,
+#ifdef CONFIG_PM
+	.suspend = chorus2_i2s_suspend,
+	.resume = chorus2_i2s_resume,
+#endif
+	.playback = {
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = CHORUS2_I2S_RATES,
+		.formats = CHORUS2_FORMATS,},
+	.ops = &chorus2_i2s_dai_ops,
+};
+EXPORT_SYMBOL_GPL(chorus2_i2s_dai);
+
+
+static int chorus2_i2s_platform_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	ret = snd_soc_register_dai(&pdev->dev, &chorus2_i2s_dai);
+
+	return ret;
+}
+
+static int chorus2_i2s_platform_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_dai(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver chorus2_i2s_driver = {
+	.probe = chorus2_i2s_platform_probe,
+	.remove = chorus2_i2s_platform_remove,
+
+	.driver = {
+		.name = "chorus2-i2s",
+		.owner = THIS_MODULE,
+	},
+};
+
+
+static int __init chorus_i2s_init(void)
+{
+	return platform_driver_register(&chorus2_i2s_driver);
+}
+module_init(chorus_i2s_init);
+
+static void __exit chorus2_i2s_exit(void)
+{
+	platform_driver_unregister(&chorus2_i2s_driver);
+}
+module_exit(chorus2_i2s_exit);
+
+/* Module information */
+MODULE_AUTHOR("Neil Jones");
+MODULE_DESCRIPTION("I2S driver for Frontier Silicon Chorus2 Soc");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/chorus2/chorus2-i2s.h b/sound/soc/chorus2/chorus2-i2s.h
new file mode 100644
index 0000000..bd480eb
--- /dev/null
+++ b/sound/soc/chorus2/chorus2-i2s.h
@@ -0,0 +1,168 @@
+/*
+ * chorus2-i2s.h
+ *
+ */
+#ifndef CHORUS2I2S_H_
+#define CHORUS2I2S_H_
+
+#include <linux/io.h>
+
+extern struct snd_soc_dai_driver chorus2_i2s_dai;
+
+#define I2S_OUT_BASE_ADDR		0x02017000
+
+#define _I2S_OUT_INTERLEAVE_DATA_OFFSET 0x00
+
+#define	_I2S_OUT_CONTROL_OFFSET		0x04
+
+#define _I2S_OUT_ACTIVE_CHAN_OFFSET	_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_ACTIVE_CHAN_SHIFT	12
+#define _I2S_OUT_ACTIVE_CHAN_MASK	0x0000F000
+
+#define _I2S_OUT_FRAME_OFFSET		_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_FRAME_SHIFT		7
+#define _I2S_OUT_FRAME_MASK		0x00000180
+
+#define _I2S_OUT_MASTER_OFFSET		_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_MASTER_SHIFT		6
+#define _I2S_OUT_MASTER_MASK		0x00000040
+
+#define _I2S_OUT_ACLK_SEL_OFFSET	_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_ACLK_SEL_SHIFT		5
+#define _I2S_OUT_ACLK_SEL_MASK		0x00000020
+
+#define _I2S_OUT_BCLK_EN_OFFSET		_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_BCLK_EN_SHIFT		4
+#define _I2S_OUT_BCLK_EN_MASK		0x00000010
+
+#define _I2S_OUT_LEFT_POL_OFFSET	_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_LEFT_POL_SHIFT		3
+#define _I2S_OUT_LEFT_POL_MASK		0x00000008
+
+#define _I2S_OUT_BCLK_POL_OFFSET	_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_BCLK_POL_SHIFT		2
+#define _I2S_OUT_BCLK_POL_MASK		0x00000004
+
+#define _I2S_OUT_PACKED_OFFSET		_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_PACKED_SHIFT		1
+#define _I2S_OUT_PACKED_MASK		0x00000002
+
+#define _I2S_OUT_ENABLE_OFFSET		_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_ENABLE_SHIFT		0
+#define _I2S_OUT_ENABLE_MASK		0x00000001
+
+#define _I2S_OUT_SOFT_RESET_OFFSET	0x08
+#define _I2S_OUT_SOFT_RESET_SHIFT	0
+#define _I2S_OUT_SOFT_RESET_MASK	0x00000001
+
+#define _I2S_OUT_CHANS_OFFSET		0x80
+#define _I2S_OUT_CHANS_STRIDE		0x20
+
+#define _I2S_OUT_CHAN_DATA_OFFSET	0x00
+
+#define _I2S_OUT_CHAN_CTRL_OFFSET	0x04
+
+#define _I2S_OUT_CHAN_LRDATA_POL_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_LRDATA_POL_SHIFT	12
+#define _I2S_OUT_CHAN_LRDATA_POL_MASK	0x00001000
+
+#define _I2S_OUT_CHAN_LRFORCE_DIS_OFFSET _I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_LRFORCE_DIS_SHIFT	11
+#define _I2S_OUT_CHAN_LRFORCE_DIS_MASK	0x00000800
+
+#define _I2S_OUT_CHAN_LOCK_DIS_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_LOCK_DIS_SHIFT	10
+#define _I2S_OUT_CHAN_LOCK_DIS_MASK	0x00000400
+
+#define _I2S_OUT_CHAN_REPEAT_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_REPEAT_SHIFT	9
+#define _I2S_OUT_CHAN_REPEAT_MASK	0x00000200
+
+#define _I2S_OUT_CHAN_PACKED_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_PACKED_SHIFT	8
+#define _I2S_OUT_CHAN_PACKED_MASK	0x00000100
+
+#define _I2S_OUT_CHAN_FORMAT_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_FORMAT_SHIFT	7
+#define _I2S_OUT_CHAN_FORMAT_MASK	0x000000F0
+
+#define _I2S_OUT_CHAN_MUSTBE1_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_MUSTB1_SHIFT	3
+#define _I2S_OUT_CHAN_MUSTB1_MASK	0x00000008
+
+#define _I2S_OUT_CHAN_FLUSH_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_FLUSH_SHIFT	2
+#define _I2S_OUT_CHAN_FLUSH_MASK	0x00000004
+
+#define _I2S_OUT_CHAN_PH_NSY_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_PH_NSY_SHIFT	1
+#define _I2S_OUT_CHAN_PH_NSY_MASK	0x00000002
+
+#define _I2S_OUT_CHAN_RUN_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_RUN_SHIFT		0
+#define _I2S_OUT_CHAN_RUN_MASK		0x00000001
+
+#define _I2S_OUT_CHAN_I_STATUS_OFFSET	0x08
+
+#define _I2S_OUT_CHAN_I_ENABLE_OFFSET	0x0C
+
+#define _I2S_OUT_CHAN_I_CLEAR_OFFSET	0x10
+
+#define _I2S_OUT_SAMPLE_COUNT_OFFSET 	0x1C
+
+
+/*Helper Macros (dont use outside of this file) */
+#define _REG_ADDRESS(REG) _##REG##_OFFSET
+#define _REG_MASK(REG) _##REG##_MASK
+#define _REG_SHIFT(REG) _##REG##_SHIFT
+
+#define I2S_OUT_WRITE_REG(REG, value)	\
+	iowrite32(value, (void *)I2S_OUT_BASE_ADDR + _REG_ADDRESS(REG))
+
+#define I2S_OUT_READ_REG(REG)	\
+	ioread32((void *)I2S_OUT_BASE_ADDR + _REG_ADDRESS(REG))
+
+#define I2S_OUT_GET_REG_FIELD(REG) \
+	((I2S_OUT_READ_REG(REG) & _REG_MASK(REG)) >> _REG_SHIFT(REG))
+
+
+/* Helper to be used externally */
+#define I2S_OUT_SET_REG_FIELD(REG, value) \
+{\
+	u32 temp = I2S_OUT_READ_REG(REG); \
+	I2S_OUT_SET_FIELD(temp, REG, value);\
+	I2S_OUT_WRITE_REG(REG, temp);\
+}
+
+#define I2S_OUT_SET_FIELD(data, FIELD, value) \
+{\
+	data &= ~_REG_MASK(FIELD);\
+	data |=  value << _REG_SHIFT(FIELD);\
+}
+
+static inline void I2S_OUT_RSET_CHAN_CONTROL(int channel, u32 value)
+{
+	iowrite32(value, (void *)I2S_OUT_BASE_ADDR + _I2S_OUT_CHANS_OFFSET +
+			channel * _I2S_OUT_CHANS_STRIDE +
+			_REG_ADDRESS(I2S_OUT_CHAN_CTRL));
+}
+
+static inline u32 I2S_OUT_RGET_CHAN_CONTROL(int channel)
+{
+	return ioread32((void *)I2S_OUT_BASE_ADDR + _I2S_OUT_CHANS_OFFSET +
+			channel * _I2S_OUT_CHANS_STRIDE +
+			_REG_ADDRESS(I2S_OUT_CHAN_CTRL));
+}
+static inline void I2S_OUT_RSET_MAIN_CONTROL(u32 value)
+{
+	iowrite32(value, (void *)I2S_OUT_BASE_ADDR +
+			_REG_ADDRESS(I2S_OUT_CONTROL));
+}
+
+static inline u32 I2S_OUT_RGET_MAIN_CONTROL(void)
+{
+	return ioread32((void *)I2S_OUT_BASE_ADDR +
+			_REG_ADDRESS(I2S_OUT_CONTROL));
+}
+
+#endif /* CHORUS2I2S_H_ */
diff --git a/sound/soc/chorus2/chorus2-pcm.c b/sound/soc/chorus2/chorus2-pcm.c
new file mode 100644
index 0000000..6958af3
--- /dev/null
+++ b/sound/soc/chorus2/chorus2-pcm.c
@@ -0,0 +1,540 @@
+/*
+ * chorus2-pcm.c
+ *
+ * ALSA PCM interface for the Frontier Silicon Chorus2 chip
+ *
+ * Copyright:	(C) 2009 Imagination Technologies
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include <asm/img_dma.h>
+#include <asm/img_dmac.h>
+#include <asm/soc-chorus2/dma.h>
+
+#include "chorus2-pcm.h"
+
+#define BURST_SIZE 2
+#define DMA_LIST_COUNT_REG 0x02001044
+
+
+static const struct snd_pcm_hardware chorus2_pcm_hardware = {
+	.info			= SNDRV_PCM_INFO_MMAP |
+				  SNDRV_PCM_INFO_MMAP_VALID |
+				  SNDRV_PCM_INFO_INTERLEAVED |
+				  SNDRV_PCM_INFO_PAUSE |
+				  SNDRV_PCM_INFO_RESUME,
+	.formats		= SNDRV_PCM_FMTBIT_S16_LE |
+				  SNDRV_PCM_FMTBIT_S24_LE,
+
+	.channels_min		= 2,
+	.channels_max		= 2,
+	.period_bytes_min	= 32,
+	.period_bytes_max	= 8192,
+	.periods_min		= 1,
+	.periods_max		= PAGE_SIZE/sizeof(struct img_dmac_desc),
+	.buffer_bytes_max	= 128 * 1024,
+};
+
+static int __chorus2_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct chorus2_runtime_data *crtd = runtime->private_data;
+	struct img_dmac_desc *dma_desc;
+	dma_addr_t dma_buff_phys, next_desc_phys;
+	u32 dma_perip_addr = (crtd->params->peripheral_address & 0xFFFFFF) >> 2;
+
+	/*total buffer size*/
+	size_t totsize = params_buffer_bytes(params);
+	/*amount to transfer between ints (ie size per each list node)*/
+	size_t period = params_period_bytes(params);
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+	runtime->dma_bytes = totsize;
+
+	dma_desc = (struct img_dmac_desc *)crtd->dma_list; /*base address of dma list*/
+	next_desc_phys = crtd->dma_list_phys;
+	dma_buff_phys = runtime->dma_addr;
+
+
+
+	/*fill out dma list*/
+	do {
+		next_desc_phys += sizeof(struct img_dmac_desc);
+
+		dma_desc->perip_setup = DMAC_LIST_PW_32;
+		dma_desc->len_ints = DMAC_LIST_INT_BIT | (period/4);
+		dma_desc->perip_address = dma_perip_addr;
+		dma_desc->burst =  (BURST_SIZE << DMAC_LIST_BURST_S) |
+				   (0 << DMAC_LIST_ACC_DELAY_S);
+		dma_desc->twod = 0;
+		dma_desc->twod_addr = 0;
+		BUG_ON(dma_buff_phys & 0x7);
+		dma_desc->data_addr = dma_buff_phys;
+		dma_desc->next = next_desc_phys;
+
+		if (period > totsize)
+			period = totsize;
+
+		dma_desc++;
+		dma_buff_phys += period;
+	} while (totsize -= period);
+
+	/*point the last list node back to the start to get an infinite loop*/
+	dma_desc[-1].next = crtd->dma_list_phys;
+
+	/*setup list base pointer*/
+	img_dma_set_list_addr(crtd->dma_ch, crtd->dma_list_phys);
+
+	return 0;
+}
+
+
+static int chorus2_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct chorus2_runtime_data *crtd = substream->runtime->private_data;
+	int ret = 0;
+
+	unsigned long flags;
+
+	spin_lock_irqsave(&crtd->lock, flags);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		/* Reset DMA list */
+		img_dma_set_list_addr(crtd->dma_ch, crtd->dma_list_phys);
+		crtd->period_index = 0;
+		crtd->start_delay = 1;
+		/* Start DMA */
+		img_dma_start_list(crtd->dma_ch);
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		/*Stop DMA*/
+		img_dma_stop_list(crtd->dma_ch);
+		break;
+
+	case SNDRV_PCM_TRIGGER_RESUME:
+		img_dma_start_list(crtd->dma_ch);
+		/* Restart DMA*/
+		break;
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		/*Reset DMA list*/
+		/*Restart DMA*/
+		img_dma_start_list(crtd->dma_ch);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&crtd->lock, flags);
+
+	return ret;
+}
+
+/*
+ *  This callback is called when the PCM middle layer inquires the current
+ *  hardware position on the buffer. The position must be returned in frames,
+ *  ranging from 0 to buffer_size - 1. This is called usually from the
+ *  buffer-update routine in the pcm middle layer, which is invoked when
+ *  snd_pcm_period_elapsed() is called in the interrupt routine. Then the pcm
+ *  middle layer updates the position and calculates the available space, and
+ *  wakes up the sleeping poll threads, etc.
+ */
+static snd_pcm_uframes_t
+chorus2_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct chorus2_runtime_data *crtd = runtime->private_data;
+	snd_pcm_uframes_t offset;
+	size_t base_offset;
+
+	/*
+	 * calculate the offset based on which period is active
+	 */
+	base_offset = frames_to_bytes(runtime,
+			crtd->period_index  * runtime->period_size);
+
+	offset = bytes_to_frames(runtime, base_offset);
+
+
+	return offset;
+}
+
+/*
+ * This is called when the PCM is "prepared", formats + sample rates can be set
+ * here, it differs to hw_params as id called whilst playing/recording used to
+ * recover from underruns etc.
+ *
+ * NB THIS MUST BE ATOMIC
+ */
+static int chorus2_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	return 0;
+}
+
+
+static irqreturn_t chorus2_dma_irq(int irq_num, void *dev_id)
+{
+	struct snd_pcm_substream *substream = dev_id;
+	struct chorus2_runtime_data *crtd = substream->runtime->private_data;
+	unsigned long flags;
+	u32 int_status;
+
+	img_dma_get_int_status(crtd->dma_ch, &int_status);
+
+	/*Sometime we get an irq with no status when stopping a dma list*/
+	if (!int_status)
+		return IRQ_HANDLED;
+
+	if (int_status & (1<<20)) {
+		int_status = 0;
+		img_dma_set_int_status(crtd->dma_ch, int_status);
+
+		/*
+		 * note the interrupt is generated when a list node is loaded
+		 * not when its finished thus we get an interrupt at the start
+		 * before we have transfered anything, we must ignore this or
+		 * our buffer calculation will be out.
+		 */
+
+		if (crtd->start_delay)
+			crtd->start_delay = 0;
+		else {
+			spin_lock_irqsave(&crtd->lock, flags);
+			if (crtd->period_index >= 0) {
+				if (++crtd->period_index == substream->runtime->periods)
+					crtd->period_index = 0;
+			}
+			spin_unlock_irqrestore(&crtd->lock, flags);
+
+			/* Callback to PCM middle layer */
+			snd_pcm_period_elapsed(substream);
+		}
+	} else {
+		printk(KERN_ERR "%s: DMA error on channel %d "
+			"Bad IRQ status on IRQ %d\n",
+			crtd->params->name, crtd->dma_ch, irq_num);
+		snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+	}
+
+
+	return IRQ_HANDLED;
+}
+
+
+static unsigned int rates[] = {32000, 48000, 64000, 96000};
+
+static struct snd_pcm_hw_constraint_list constraints_rates = {
+	.count = ARRAY_SIZE(rates),
+	.list = rates,
+	.mask = 0,
+ };
+
+
+/*Called on stream open */
+static int chorus2_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct chorus2_runtime_data *crtd;
+	int ret;
+
+	snd_soc_set_runtime_hwparams(substream, &chorus2_pcm_hardware);
+
+	/*
+	 * Setup any constraints / rules here,
+	 */
+
+	ret = snd_pcm_hw_constraint_list(substream->runtime, 0,
+						SNDRV_PCM_HW_PARAM_RATE,
+						&constraints_rates);
+
+	ret = snd_pcm_hw_constraint_integer(runtime,
+						SNDRV_PCM_HW_PARAM_PERIODS);
+
+	/* allocate room for private data */
+	ret = -ENOMEM;
+	crtd = kzalloc(sizeof(*crtd), GFP_KERNEL);
+	if (!crtd)
+		goto out;
+
+	spin_lock_init(&crtd->lock);
+
+	/* Create mappings to DMA descriptors */
+	crtd->dma_list =
+		dma_alloc_coherent(substream->pcm->card->dev, PAGE_SIZE,
+					&crtd->dma_list_phys, GFP_KERNEL);
+	if (!crtd->dma_list)
+		goto err1;
+
+
+	/* Set our private data field */
+	runtime->private_data = crtd;
+
+	return 0;
+
+ err1:
+	kfree(crtd);
+ out:
+	return ret;
+}
+
+/*Called on stream close */
+static int chorus2_pcm_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct chorus2_runtime_data *crtd = runtime->private_data;
+
+	/*Free mapping to DMA descriptor list */
+	dma_free_coherent(substream->pcm->card->dev, PAGE_SIZE,
+			crtd->dma_list, crtd->dma_list_phys);
+	kfree(crtd);
+	return 0;
+}
+
+
+static int chorus2_pcm_mmap(struct snd_pcm_substream *substream,
+	struct vm_area_struct *vma)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	return dma_mmap_writecombine(substream->pcm->card->dev, vma,
+				     runtime->dma_area,
+				     runtime->dma_addr,
+				     runtime->dma_bytes);
+}
+
+static int chorus2_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
+{
+	struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+	struct snd_dma_buffer *buf = &substream->dma_buffer;
+	size_t size = chorus2_pcm_hardware.buffer_bytes_max;
+	buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	buf->dev.dev = pcm->card->dev;
+	buf->private_data = NULL;
+	buf->area = dma_alloc_coherent(pcm->card->dev, size,
+					   &buf->addr, GFP_KERNEL);
+	if (!buf->area)
+		return -ENOMEM;
+	buf->bytes = size;
+	return 0;
+}
+
+
+static void chorus2_pcm_free_dma_buffers(struct snd_pcm *pcm)
+{
+	struct snd_pcm_substream *substream;
+	struct snd_dma_buffer *buf;
+	int stream;
+
+	for (stream = 0; stream < 2; stream++) {
+		substream = pcm->streams[stream].substream;
+		if (!substream)
+			continue;
+		buf = &substream->dma_buffer;
+		if (!buf->area)
+			continue;
+		dma_free_coherent(pcm->card->dev, buf->bytes,
+				      buf->area, buf->addr);
+		buf->area = NULL;
+	}
+}
+
+
+
+static int initialise_dma_hw(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct chorus2_runtime_data *crtd = runtime->private_data;
+	u32 dma_perip_addr = (crtd->params->peripheral_address & 0xFFFFFF);
+
+	int chan, ret;
+
+	chan = img_request_dma(-1, crtd->params->peripheral_num);
+	if (chan < 0) {
+		ret = chan;
+		goto out;
+	}
+	crtd->dma_ch = chan;
+
+	crtd->irq_num = img_dma_get_irq(chan) + META_IRQS;
+
+	ret = request_irq(crtd->irq_num, chorus2_dma_irq, 0,
+				"pcm-dma", substream);
+	if (ret < 0)
+		goto out;
+
+	ret = chan;
+
+	img_dma_set_io_address(crtd->dma_ch, dma_perip_addr, BURST_SIZE);
+
+
+out:
+	return ret;
+}
+
+/*
+ * This is called when hw_params is setup by an app.
+ * hence buffer sizes, period and formats should be setup, so
+ * we can setup our buffers and DMA
+ */
+static int chorus2_pcm_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct chorus2_runtime_data *crtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct chorus2_pcm_dma_params *dma;
+	int ret = 0;
+
+	dma = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+	/* return if this is a bufferless transfer */
+	if (!dma)
+		goto out;
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+	runtime->dma_bytes = params_buffer_bytes(params);
+
+	/* this may get called several times by oss emulation
+	 * with different params */
+	if (!crtd->params) {
+		crtd->params = dma;
+		ret = initialise_dma_hw(substream);
+		if (ret < 0)
+			goto out;
+		crtd->dma_ch = ret;
+
+	} else if (crtd->params != dma) {
+		img_free_dma(crtd->dma_ch);
+		crtd->params = dma;
+		ret = initialise_dma_hw(substream);
+		if (ret < 0)
+			goto out;
+		crtd->dma_ch = ret;
+	}
+
+	ret = 0;
+	return __chorus2_pcm_hw_params(substream, params);
+out:
+	return ret;
+}
+
+/*release resources allocated in hw_params*/
+static int chorus2_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	struct chorus2_runtime_data *crtd = substream->runtime->private_data;
+
+	img_dma_reset(crtd->dma_ch);
+
+	snd_pcm_set_runtime_buffer(substream, NULL);
+
+	if (crtd->dma_ch >= 0) {
+		img_free_dma(crtd->dma_ch);
+		crtd->dma_ch = -1;
+		crtd->params = 0;
+	}
+
+	if (crtd->irq_num > 0) {
+		free_irq(crtd->irq_num, substream);
+		crtd->irq_num = -1;
+	}
+
+	return 0;
+}
+
+static struct snd_pcm_ops chorus2_pcm_ops = {
+	.open		= chorus2_pcm_open,
+	.close		= chorus2_pcm_close,
+	.ioctl		= snd_pcm_lib_ioctl,
+	.hw_params	= chorus2_pcm_hw_params,
+	.hw_free	= chorus2_pcm_hw_free,
+	.prepare	= chorus2_pcm_prepare,
+	.trigger	= chorus2_pcm_trigger,
+	.pointer	= chorus2_pcm_pointer,
+	.mmap		= chorus2_pcm_mmap,
+};
+
+static u64 chorus2_pcm_dmamask = DMA_BIT_MASK(64);
+
+static int chorus2_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	struct snd_pcm *pcm = rtd->pcm;
+	int ret = 0;
+
+	if (!card->dev->dma_mask)
+		card->dev->dma_mask = &chorus2_pcm_dmamask;
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
+
+	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+		ret = chorus2_pcm_preallocate_dma_buffer(pcm,
+				SNDRV_PCM_STREAM_PLAYBACK);
+		if (ret)
+			goto out;
+	}
+
+	if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+		dev_err(card->dev, "Audio Capture (Recording) not supported");
+		ret = -EINVAL;
+		goto out;
+	}
+ out:
+	return ret;
+}
+
+struct snd_soc_platform_driver chorus2_soc_platform = {
+	.ops		= &chorus2_pcm_ops,
+	.pcm_new	= chorus2_soc_pcm_new,
+	.pcm_free	= chorus2_pcm_free_dma_buffers,
+};
+EXPORT_SYMBOL_GPL(chorus2_soc_platform);
+
+static int chorus2_soc_platform_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_platform(&pdev->dev, &chorus2_soc_platform);
+}
+
+static int chorus2_soc_platform_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver chorus2_pcm_driver = {
+	.driver = {
+			.name = "chorus2-pcm-audio",
+			.owner = THIS_MODULE,
+	},
+
+	.probe = chorus2_soc_platform_probe,
+	.remove = chorus2_soc_platform_remove,
+};
+static int __init chorus2_soc_platform_init(void)
+{
+	return platform_driver_register(&chorus2_pcm_driver);
+}
+module_init(chorus2_soc_platform_init);
+
+static void __exit chorus2_soc_platform_exit(void)
+{
+	platform_driver_unregister(&chorus2_pcm_driver);
+}
+module_exit(chorus2_soc_platform_exit);
+
+MODULE_AUTHOR("Neil Jones");
+MODULE_DESCRIPTION("Frontier Silicon Chorus2 PCM Module");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/chorus2/chorus2-pcm.h b/sound/soc/chorus2/chorus2-pcm.h
new file mode 100644
index 0000000..e647cc7
--- /dev/null
+++ b/sound/soc/chorus2/chorus2-pcm.h
@@ -0,0 +1,31 @@
+/*
+ * chorus2-pcm.h
+ */
+
+#ifndef CHORUS2PCM_H_
+#define CHORUS2PCM_H_
+
+#include <asm/soc-chorus2/dma.h>
+#include <linux/spinlock.h>
+
+extern struct snd_soc_platform_driver chorus2_soc_platform;
+
+struct chorus2_pcm_dma_params {
+	const char *name;
+	int peripheral_num;
+	int peripheral_address;
+};
+
+struct chorus2_runtime_data {
+	int dma_ch;
+	int irq_num;
+	struct chorus2_pcm_dma_params *params;
+	struct chorus2_dma_desc *dma_list;
+	dma_addr_t dma_list_phys;
+	int period_index;
+	int start_delay;
+	spinlock_t lock;
+
+};
+
+#endif /* CHORUS2PCM_H_ */
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 2f45f00..cb8a086 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -64,6 +64,7 @@
 	select SND_SOC_STA32X if I2C
 	select SND_SOC_STA529 if I2C
 	select SND_SOC_STAC9766 if SND_SOC_AC97_BUS
+	select SND_SOC_TANSEN if SOC_TZ1090
 	select SND_SOC_TAS5086 if I2C
 	select SND_SOC_TLV320AIC23 if I2C
 	select SND_SOC_TLV320AIC26 if SPI_MASTER
@@ -325,6 +326,9 @@
 config SND_SOC_STAC9766
 	tristate
 
+config SND_SOC_TANSEN
+	tristate
+
 config SND_SOC_TAS5086
 	tristate
 
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index b9e41c9..7ff5d12 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -56,6 +56,7 @@
 snd-soc-sta32x-objs := sta32x.o
 snd-soc-sta529-objs := sta529.o
 snd-soc-stac9766-objs := stac9766.o
+snd-soc-tansen-objs := tansen.o tansen_gti.o
 snd-soc-tas5086-objs := tas5086.o
 snd-soc-tlv320aic23-objs := tlv320aic23.o
 snd-soc-tlv320aic26-objs := tlv320aic26.o
@@ -180,6 +181,7 @@
 obj-$(CONFIG_SND_SOC_STA32X)   += snd-soc-sta32x.o
 obj-$(CONFIG_SND_SOC_STA529)   += snd-soc-sta529.o
 obj-$(CONFIG_SND_SOC_STAC9766)	+= snd-soc-stac9766.o
+obj-$(CONFIG_SND_SOC_TANSEN)	+= snd-soc-tansen.o
 obj-$(CONFIG_SND_SOC_TAS5086)	+= snd-soc-tas5086.o
 obj-$(CONFIG_SND_SOC_TLV320AIC23)	+= snd-soc-tlv320aic23.o
 obj-$(CONFIG_SND_SOC_TLV320AIC26)	+= snd-soc-tlv320aic26.o
diff --git a/sound/soc/codecs/tansen.c b/sound/soc/codecs/tansen.c
new file mode 100644
index 0000000..1933f0e
--- /dev/null
+++ b/sound/soc/codecs/tansen.c
@@ -0,0 +1,550 @@
+/*
+ * tansen.c - CosmicCirucits internal DAC
+ *
+ * Copyright (C) 2010-2013 Imagination Technologies Ltd.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tansen.h>
+#include <sound/tlv.h>
+
+#include "tansen.h"
+#include "tansen_gti.h"
+
+struct tansen_priv {
+	void __iomem *ctrl;
+};
+
+static const DECLARE_TLV_DB_SCALE(mic_in, -3300, 300, 0);
+static const DECLARE_TLV_DB_SCALE(line_in, -2100, 300, 0);
+
+static const struct snd_kcontrol_new tansen_snd_controls[] = {
+SOC_DOUBLE_R("Master Playback Volume",
+	     TANSEN_TM_OP_TOP6, TANSEN_TM_OP_TOP7,
+	     0, 0xFF, 1
+	     ),
+
+SOC_DOUBLE_TLV("Line/IPOD-In Gain", TANSEN_TM_IP_TOP5, 5, 2, 0x7, 1, line_in),
+SOC_DOUBLE_TLV("Mic-In Gain", TANSEN_TM_IP_TOP4, 0, 4, 0xF, 1, mic_in),
+};
+
+static const char *tansen_input_select[] = {
+	"Mic In",
+	"Line In",
+	"Ipod In",
+};
+
+static const struct soc_enum tansen_ip_enum =
+SOC_ENUM_SINGLE(TANSEN_TM_IP_TOP2, 4, 3, tansen_input_select);
+
+/* Input mux */
+static const struct snd_kcontrol_new tansen_input_mux_controls =
+SOC_DAPM_ENUM("Input Select", tansen_ip_enum);
+
+
+static const struct snd_soc_dapm_widget tansen_dapm_widgets[] = {
+		/*All Power Control now removed*/
+SND_SOC_DAPM_DAC("DAC_L", "Playback", SND_SOC_NOPM /*TANSEN_TM_OP_TOP3*/, 5, 0),
+SND_SOC_DAPM_DAC("DAC_R", "Playback", SND_SOC_NOPM /*TANSEN_TM_OP_TOP3*/, 4, 0),
+SND_SOC_DAPM_OUTPUT("LHPOUT"),
+SND_SOC_DAPM_OUTPUT("RHPOUT"),
+SND_SOC_DAPM_ADC("ADC_L", "Capture", SND_SOC_NOPM /*TANSEN_TM_IP_TOP2*/, 2, 0),
+SND_SOC_DAPM_ADC("ADC_R", "Capture", SND_SOC_NOPM /*TANSEN_TM_IP_TOP2*/, 0, 0),
+SND_SOC_DAPM_MUX("Input Mux", SND_SOC_NOPM, 0, 0, &tansen_input_mux_controls),
+SND_SOC_DAPM_PGA("PGA_L", SND_SOC_NOPM /*TANSEN_TM_L_PGA3*/, 1, 1, NULL, 0),
+SND_SOC_DAPM_PGA("PGA_R", SND_SOC_NOPM /*TANSEN_TM_R_PGA3*/, 1, 1, NULL, 0),
+SND_SOC_DAPM_MICBIAS("Mic Bias", TANSEN_TM_MICBIAS_1, 4, 1),
+SND_SOC_DAPM_MIXER("Line Input", SND_SOC_NOPM, 0, 0, NULL, 0),
+SND_SOC_DAPM_MIXER("Ipod Input", SND_SOC_NOPM, 0, 0, NULL, 0),
+SND_SOC_DAPM_INPUT("MICIN"),
+SND_SOC_DAPM_INPUT("RLINEIN"),
+SND_SOC_DAPM_INPUT("LLINEIN"),
+SND_SOC_DAPM_INPUT("RIPODIN"),
+SND_SOC_DAPM_INPUT("LIPODIN")
+};
+
+
+static const struct snd_soc_dapm_route intercon[] = {
+
+	/* outputs */
+	{"RHPOUT", NULL, "DAC_L"},
+	{"LHPOUT", NULL, "DAC_R"},
+
+	/* input mux */
+	{"Input Mux", "Line In",  "Line Input"},
+	{"Input Mux", "Mic In",   "Mic Bias"},
+	{"Input Mux", "Ipod In",  "Ipod Input"},
+	{"ADC_L", NULL, "PGA_L"},
+	{"ADC_R", NULL, "PGA_R"},
+	{"PGA_L", NULL, "Input Mux"},
+	{"PGA_R", NULL, "Input Mux"},
+
+	/* inputs */
+	{"Line Input", NULL, "LLINEIN"},
+	{"Line Input", NULL, "RLINEIN"},
+	{"Ipod Input", NULL, "LIPODIN"},
+	{"Ipod Input", NULL, "RIPODIN"},
+	{"Mic Bias", NULL, "MICIN"},
+};
+
+/* these regs don't readback correctly */
+static int cache_tm_ip_top2;
+static int cache_tm_ip_top4;
+static int cache_tm_ip_top5;
+
+static int tansen_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_codec *codec = rtd->codec;
+	u32 bitwidth = 0, reg_val;
+	u32 rx_s1_regs[] = { TANSEN_TM_PWM1_S1, TANSEN_TM_PWM2_S1,
+			     TANSEN_TM_PWM3_S1 };
+	int i;
+
+	/* bit size */
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		/*
+		 * Note this should be 3 but the Comet I2S-IN block
+		 * doesn't seem to work properly with a 16 bit input
+		 * use 24 bits and let the I2S block trim the lower
+		 * order bits.
+		 */
+		bitwidth = 1;
+		break;
+
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		bitwidth = 0;
+		break;
+
+	case SNDRV_PCM_FORMAT_S24_LE:
+		bitwidth = 1;
+		break;
+
+	case SNDRV_PCM_FORMAT_S32_LE:
+		bitwidth = 2;
+		break;
+	}
+
+	/* Update TX bitwidth (inputs) */
+	reg_val = snd_soc_read(codec, TANSEN_TM_IP_TOP6);
+	reg_val = (reg_val & ~0x3) | bitwidth ;
+	snd_soc_write(codec, TANSEN_TM_IP_TOP6, reg_val);
+
+	/* Update RX bitwidths (outputs) */
+	for (i = 0; i < ARRAY_SIZE(rx_s1_regs);  i++) {
+		reg_val = snd_soc_read(codec, rx_s1_regs[i]);
+		reg_val = (reg_val & ~0xc0) | (bitwidth << 6);
+		snd_soc_write(codec, rx_s1_regs[i], reg_val);
+	}
+	reg_val = snd_soc_read(codec, TANSEN_TM_IP_TOP7);
+	reg_val = (reg_val & ~0x3) | bitwidth;
+	snd_soc_write(codec, TANSEN_TM_IP_TOP7, reg_val);
+
+	return 0;
+}
+
+static int tansen_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+				 int clk_id, unsigned int freq, int dir)
+{
+	/*struct snd_soc_codec *codec = codec_dai->codec;*/
+
+	switch (freq) {
+	case  8192000:
+	case 12288000:
+	case 24576000:
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int tansen_set_dai_fmt(struct snd_soc_dai *codec_dai,
+			      unsigned int fmt)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	u32 tx_value, rx_value, pwm_rx_value1, pwm_rx_value2, pwm_rx_value3;
+
+	/* set master/slave audio interface */
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		/* clock and frame slave only */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+
+	tx_value  = snd_soc_read(codec, TANSEN_TM_IP_TOP6);
+	rx_value = snd_soc_read(codec, TANSEN_TM_IP_TOP7);
+	pwm_rx_value1 = snd_soc_read(codec, TANSEN_TM_PWM1_S1);
+	pwm_rx_value2 = snd_soc_read(codec, TANSEN_TM_PWM2_S1);
+	pwm_rx_value3 = snd_soc_read(codec, TANSEN_TM_PWM3_S1);
+	tx_value &= ~0x30;
+	rx_value &= ~0x30;
+	pwm_rx_value1 &= ~0xC;
+	pwm_rx_value2 &= ~0xC;
+	pwm_rx_value3 &= ~0xC;
+
+	/* interface format */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		tx_value |= (0 << 4);
+		rx_value |= (0 << 4);
+		pwm_rx_value1 |= (0 << 2);
+		pwm_rx_value2 |= (0 << 2);
+		pwm_rx_value3 |= (0 << 2);
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		/*
+		 * Note these two values should be set to 2 for right justify
+		 * but right justify mode for I2S In seems to be broken
+		 * so even though we are using right justify mode for I2S
+		 * out we set in to Phillips mode for in, Weirdly the top level
+		 * rx register needs Phillips mode as well, even though we are
+		 * sending right justified data!
+		 */
+		tx_value |= (0 << 4);
+		rx_value |= (0 << 4);
+
+		pwm_rx_value1 |= (2 << 2);
+		pwm_rx_value2 |= (2 << 2);
+		pwm_rx_value3 |= (2 << 2);
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		tx_value |= (3 << 4);
+		rx_value |= (3 << 4);
+		pwm_rx_value1 |= (3 << 2);
+		pwm_rx_value2 |= (3 << 2);
+		pwm_rx_value3 |= (3 << 2);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_write(codec, TANSEN_TM_PWM1_S1, pwm_rx_value1);
+	snd_soc_write(codec, TANSEN_TM_PWM2_S1, pwm_rx_value2);
+	snd_soc_write(codec, TANSEN_TM_PWM3_S1, pwm_rx_value3);
+
+	pwm_rx_value1 = snd_soc_read(codec, TANSEN_TM_PWM1_S2);
+	pwm_rx_value2 = snd_soc_read(codec, TANSEN_TM_PWM2_S2);
+	pwm_rx_value3 = snd_soc_read(codec, TANSEN_TM_PWM3_S2);
+
+	rx_value &= ~0xC0;
+	pwm_rx_value1 &= ~0x3;
+	pwm_rx_value2 &= ~0x3;
+	pwm_rx_value3 &= ~0x3;
+
+	/* clock inversion */
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF: /*normal bit normal frame*/
+		rx_value |= (0 << 6);
+		pwm_rx_value1 |= 0;
+		pwm_rx_value2 |= 0;
+		pwm_rx_value3 |= 0;
+		break;
+	case SND_SOC_DAIFMT_IB_IF: /*inv. bit inv. frame*/
+		rx_value |= (3 << 6);
+		pwm_rx_value1 |= 3;
+		pwm_rx_value2 |= 3;
+		pwm_rx_value3 |= 3;
+		break;
+	case SND_SOC_DAIFMT_IB_NF: /*inv. bit normal frame*/
+		rx_value |= (2 << 6);
+		pwm_rx_value1 |= 2;
+		pwm_rx_value2 |= 2;
+		pwm_rx_value3 |= 2;
+		break;
+	case SND_SOC_DAIFMT_NB_IF: /*normal bit inv. frame*/
+		rx_value |= (1 << 6);
+		pwm_rx_value1 |= 1;
+		pwm_rx_value2 |= 1;
+		pwm_rx_value3 |= 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_write(codec, TANSEN_TM_PWM1_S2, pwm_rx_value1);
+	snd_soc_write(codec, TANSEN_TM_PWM2_S2, pwm_rx_value2);
+	snd_soc_write(codec, TANSEN_TM_PWM3_S2, pwm_rx_value3);
+	snd_soc_write(codec, TANSEN_TM_IP_TOP6, tx_value);
+	snd_soc_write(codec, TANSEN_TM_IP_TOP7, rx_value);
+
+	return 0;
+}
+
+
+static void tansen_gti_set_overides(unsigned int reg, unsigned int *value)
+{
+	/* some of the overrides get dropped when components get powered down */
+	switch (reg) {
+	case  TANSEN_TM_IP_TOP2:
+		*value |= 0x80;
+		break;
+	case TANSEN_TM_MICBIAS_1:
+		*value |= 0x20;
+		break;
+	case TANSEN_TM_OP_TOP2:
+		*value |= 0x80;
+		break;
+	case TANSEN_TM_OP_TOP3:
+		*value |= 0x40;
+		break;
+	case TANSEN_TM_R_PGA3:
+		*value |= 0x01;
+		break;
+	case TANSEN_TM_L_PGA3:
+		*value |= 0x01;
+		break;
+	case TANSEN_TM_IP_TOP3:
+		*value |= 0xF0;
+		break;
+	};
+}
+
+static unsigned int
+tansen_gti_read(struct snd_soc_codec *codec, unsigned int reg)
+{
+	switch (reg) {
+	case TANSEN_TM_IP_TOP2:
+		return cache_tm_ip_top2;
+	case TANSEN_TM_IP_TOP4:
+		return cache_tm_ip_top4;
+	case TANSEN_TM_IP_TOP5:
+		return cache_tm_ip_top5;
+	default:
+		return gti_read(((struct tansen_priv *)
+				 codec->control_data)->ctrl,
+				reg);
+	}
+}
+
+static int
+tansen_gti_write(struct snd_soc_codec *codec, unsigned int reg,
+		 unsigned int value)
+{
+	tansen_gti_set_overides(reg, &value);
+
+	switch (reg) {
+	case TANSEN_TM_IP_TOP2:
+		cache_tm_ip_top2 = value;
+		break;
+	case TANSEN_TM_IP_TOP4:
+		cache_tm_ip_top4 = value;
+		break;
+	case TANSEN_TM_IP_TOP5:
+		cache_tm_ip_top5 = value;
+		break;
+	}
+
+	gti_write(((struct tansen_priv *)codec->control_data)->ctrl,
+		  reg, value);
+
+	return 0;
+}
+
+
+#define TANSEN_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | \
+			 SNDRV_PCM_FMTBIT_S24_LE | \
+			 SNDRV_PCM_FMTBIT_S32_LE)
+/*
+ * The DAC supports 128,256 & 512 sample rates but our I2S h/w only support 256
+ * or 384 fs, so we can only support (@ 256fs):
+ *   48KHz @ 12.288MHz MCLK
+ *   96KHz @ 24.576MHz MCLK
+ *   32KHz @ 8.192MHz MCLK
+ */
+#define TANSEN_RATES (SNDRV_PCM_RATE_32000 \
+		     | SNDRV_PCM_RATE_48000 \
+		     | SNDRV_PCM_RATE_96000)
+
+static struct snd_soc_dai_ops tansen_dai_ops = {
+	.hw_params	= tansen_hw_params,
+	.set_sysclk	= tansen_set_dai_sysclk,
+	.set_fmt	= tansen_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver tansen_dai = {
+	.name = "tansen",
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 2,
+		.channels_max = 6,
+		.rates = TANSEN_RATES,
+		.formats = TANSEN_FORMATS,
+		},
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = TANSEN_RATES,
+		.formats = TANSEN_FORMATS,
+		},
+
+	.ops = &tansen_dai_ops,
+};
+
+
+static int tansen_soc_probe(struct snd_soc_codec *codec)
+{
+	u32 val;
+
+	codec->control_data = dev_get_drvdata(codec->dev);
+
+	/*
+	 * Allow overriding of SoC Registers by GTI i/f for control of
+	 * Input mux, Mic bias, Input + Output gain.
+	 */
+
+	val = snd_soc_read(codec, TANSEN_TM_IP_TOP2);
+	val |= 0x90;
+	snd_soc_write(codec, TANSEN_TM_IP_TOP2, val);
+
+
+	val = snd_soc_read(codec, TANSEN_TM_OP_TOP2);
+	val |= 0x80;
+	snd_soc_write(codec, TANSEN_TM_OP_TOP2, val);
+
+	val = snd_soc_read(codec, TANSEN_TM_IP_TOP3);
+	val |= 0xF0;
+	snd_soc_write(codec, TANSEN_TM_IP_TOP3, val);
+
+	/* Program I2S Routing:
+	 *	 Data0 -> Chan2
+	 *	 Data1 -> Chan1
+	 *	 Data2 -> Chan3
+	 * We do this presumably because the headphone Amp is on PDM C+D
+	 */
+
+	val = snd_soc_read(codec, TANSEN_TM_PWM1_S1);
+	val &= ~0x3;
+	val |= 2;
+	snd_soc_write(codec, TANSEN_TM_PWM1_S1, val);
+	val = snd_soc_read(codec, TANSEN_TM_PWM2_S1);
+	val &= ~0x3;
+	val |= 1;
+	snd_soc_write(codec, TANSEN_TM_PWM2_S1, val);
+	val = snd_soc_read(codec, TANSEN_TM_PWM3_S1);
+	val &= ~0x3;
+	val |= 3;
+	snd_soc_write(codec, TANSEN_TM_PWM3_S1, val);
+
+	val = snd_soc_read(codec, TANSEN_TM_IP_TOP6);
+	val &= ~0x80;
+	val |= 0;
+	snd_soc_write(codec, TANSEN_TM_IP_TOP6, val);
+
+	val = snd_soc_read(codec, TANSEN_TM_IP_TOP7);
+	val &= ~0x80;
+	val |= 0;
+	snd_soc_write(codec, TANSEN_TM_IP_TOP7, val);
+
+
+	return 0;
+}
+
+static int tansen_soc_remove(struct snd_soc_codec *codec)
+{
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_tansen = {
+	.probe		= tansen_soc_probe,
+	.remove		= tansen_soc_remove,
+	.write		= tansen_gti_write,
+	.read		= tansen_gti_read,
+
+	.controls	= tansen_snd_controls,
+	.num_controls	= ARRAY_SIZE(tansen_snd_controls),
+	.dapm_widgets	= tansen_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(tansen_dapm_widgets),
+	.dapm_routes = intercon,
+	.num_dapm_routes = ARRAY_SIZE(intercon),
+
+};
+
+static const struct of_device_id tansen_of_match[] = {
+	{ .compatible = "cosmic,tansen", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, tansen_of_match);
+
+static int tansen_platform_probe(struct platform_device *pdev)
+{
+	struct resource *mem;
+	struct tansen_priv *tansen;
+	int ret;
+
+	tansen = kzalloc(sizeof(*tansen), GFP_KERNEL);
+	if (!tansen)
+		return -ENOMEM;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	tansen->ctrl = devm_request_and_ioremap(&pdev->dev, mem);
+
+	if (!tansen->ctrl) {
+		dev_err(&pdev->dev, "unable to ioremap registers\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	platform_set_drvdata(pdev, (void *)tansen);
+
+	ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_tansen,
+				     &tansen_dai, 1);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "Failed to register CODEC: %d\n", ret);
+		goto out;
+	}
+
+	return 0;
+
+out:
+	kfree(tansen);
+	return ret;
+}
+
+static int tansen_platform_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver tansen_codec_driver = {
+	.driver = {
+			.name = "tansen-codec",
+			.owner = THIS_MODULE,
+			.of_match_table = tansen_of_match,
+	},
+
+	.probe = tansen_platform_probe,
+	.remove = tansen_platform_remove,
+};
+
+module_platform_driver(tansen_codec_driver);
+
+MODULE_DESCRIPTION("ASoC CosmicCircuits Tansen audio driver");
+MODULE_AUTHOR("Imagination Technologies");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tansen.h b/sound/soc/codecs/tansen.h
new file mode 100644
index 0000000..f58ff84
--- /dev/null
+++ b/sound/soc/codecs/tansen.h
@@ -0,0 +1,187 @@
+/*
+ * tansen.h - CosmicCircuits internal DAC
+ *
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#ifndef TANSEN_H_
+#define TANSEN_H_
+
+#define TANSEN_TM_PWM1_L1	0x820
+#define TANSEN_TM_PWM1_L2	0x821
+#define TANSEN_TM_PWM1_L3	0x822
+#define TANSEN_TM_PWM1_R1	0x823
+#define TANSEN_TM_PWM1_R2	0x824
+#define TANSEN_TM_PWM1_R4	0x825
+#ifdef CONFIG_SOC_COMET_ES1
+#define TANSEN_TM_PWM1_S1	0x860 /*Wrong in data sheet not 0x826*/
+#else
+#define TANSEN_TM_PWM1_S1	0x826 /*now fixed*/
+#endif
+#define TANSEN_TM_PWM1_S2	0x827
+#define TANSEN_TM_PWM2_L1	0x8a0
+#define TANSEN_TM_PWM2_L2	0x8a1
+#define TANSEN_TM_PWM2_L3	0x8a2
+#define TANSEN_TM_PWM2_L4	0x8a3
+#define TANSEN_TM_PWM2_L5	0x8a4
+#define TANSEN_TM_PWM2_R1	0x8a5
+#define TANSEN_TM_PWM2_R2	0x8a6
+#define TANSEN_TM_PWM2_R3	0x8a7
+#define TANSEN_TM_PWM2_R4	0x8a8
+#define TANSEN_TM_PWM2_R5	0x8a9
+#ifdef CONFIG_SOC_COMET_ES1
+#define TANSEN_TM_PWM2_S1	0x8e0 /*Wrong in datasheet not 0x8aa */
+#else
+#define TANSEN_TM_PWM2_S1	0x8aa
+#endif
+#define TANSEN_TM_PWM2_S2	0x8ab
+#define TANSEN_TM_PWM3_L1	0x920
+#define TANSEN_TM_PWM3_L2	0x921
+#define TANSEN_TM_PWM3_L3	0x922
+#define TANSEN_TM_PWM3_R1	0x923
+#define TANSEN_TM_PWM3_R2	0x924
+#define TANSEN_TM_PWM3_R4	0x925
+#ifdef CONFIG_SOC_COMET_ES1
+#define TANSEN_TM_PWM3_S1	0x960 /*Wrong in datasheet not 0x926 */
+#else
+#define TANSEN_TM_PWM3_S1	0x926
+#endif
+#define TANSEN_TM_PWM3_S2	0x927
+#define TANSEN_TM_BG_OP1	0x980
+#define TANSEN_TM_BG_OP2	0x981
+#define TANSEN_TM_BG_OP3	0x982
+#define TANSEN_TM_BG_OP4	0x983
+#define TANSEN_TM_BG_OP5	0x984
+#define TANSEN_TM_BG_OP6	0x985
+#define TANSEN_TM_BG_OP7	0x986
+#define TANSEN_TM_HP_L_1	0xA20
+#define TANSEN_TM_HP_L_2	0xA21
+#define TANSEN_TM_HP_L_3	0xA22
+#define TANSEN_TM_HP_L_4	0xA23
+#define TANSEN_TM_HP_L_5	0xA24
+#define TANSEN_TM_HP_L_6	0xA25
+#define TANSEN_TM_HP_L_7	0xA26
+#define TANSEN_TM_HP_L_8	0xA27
+#define TANSEN_TM_HP_L_9	0xA28
+#define TANSEN_TM_HP_L_10	0xA29
+#define TANSEN_TM_HP_L_11	0xA2A
+#define TANSEN_TM_HP_L_12	0xA2B
+#define TANSEN_TM_HP_L_13	0xA2C
+#define TANSEN_TM_HP_L_14	0xA2D
+#define TANSEN_TM_HP_L_15	0xA2E
+#define TANSEN_TM_HP_L_16	0xA2F
+#define TANSEN_TM_HP_L_17	0xA30
+#define TANSEN_TM_HP_L_18	0xA31
+#define TANSEN_TM_HP_L_19	0xA32
+#define TANSEN_TM_HP_L_20	0xA33
+#define TANSEN_TM_HP_L_21	0xA34
+#define TANSEN_TM_HP_L_22	0xA35
+#define TANSEN_TM_HP_L_23	0xA36
+#define TANSEN_TM_HP_L_24	0xA37
+#define TANSEN_TM_HP_L_25	0xA38
+#define TANSEN_TM_HP_L_26	0xA39
+#define TANSEN_TM_HP_R_1	0xA40
+#define TANSEN_TM_HP_R_2	0xA41
+#define TANSEN_TM_HP_R_3	0xA42
+#define TANSEN_TM_HP_R_4	0xA43
+#define TANSEN_TM_HP_R_5	0xA44
+#define TANSEN_TM_HP_R_6	0xA45
+#define TANSEN_TM_HP_R_7	0xA46
+#define TANSEN_TM_HP_R_8	0xA47
+#define TANSEN_TM_HP_R_9	0xA48
+#define TANSEN_TM_HP_R_10	0xA49
+#define TANSEN_TM_HP_R_11	0xA4A
+#define TANSEN_TM_HP_R_12	0xA4B
+#define TANSEN_TM_HP_R_13	0xA4C
+#define TANSEN_TM_HP_R_14	0xA4D
+#define TANSEN_TM_HP_R_15	0xA4E
+#define TANSEN_TM_HP_R_16	0xA4f
+#define TANSEN_TM_HP_R_17	0xA50
+#define TANSEN_TM_HP_R_18	0xA51
+#define TANSEN_TM_HP_R_19	0xA52
+#define TANSEN_TM_HP_R_20	0xA53
+#define TANSEN_TM_HP_R_21	0xA54
+#define TANSEN_TM_HP_R_22	0xA55
+#define TANSEN_TM_HP_R_23	0xA56
+#define TANSEN_TM_HP_R_24	0xA57
+#define TANSEN_TM_HP_R_25	0xA58
+#define TANSEN_TM_HP_R_26	0xA59
+#define TANSEN_TM_PLL_1		0xA80
+#define TANSEN_TM_PLL_2		0xA81
+#define TANSEN_TM_PLL_3		0xA82
+#define TANSEN_TM_PLL_4		0xA83
+#define TANSEN_TM_PLL_5		0xA84
+#define TANSEN_TM_PLL_6		0xA85
+#define TANSEN_TM_PLL_7		0xA86
+#define TANSEN_TM_PLL_8		0xA87
+#define TANSEN_TM_PLL_9		0xA88
+#define TANSEN_TM_PLL_10	0xA89
+#define TANSEN_TM_OP_TOP1	0xB00
+#define TANSEN_TM_OP_TOP2	0xB01
+#define TANSEN_TM_OP_TOP3	0xB02
+#define TANSEN_TM_OP_TOP4	0xB03
+#define TANSEN_TM_OP_TOP5	0xB04
+#define TANSEN_TM_OP_TOP6	0xB05
+#define TANSEN_TM_OP_TOP7	0xB06
+#define TANSEN_TM_OP_TOP8	0xB07
+#define TANSEN_TM_OP_TOP9	0xB08
+#define TANSEN_TM_OP_TOP10	0xB09
+#define TANSEN_TM_OP_TOP11	0xBOA
+#define TANSEN_TM_OP_TOP12	0xB0B
+#define TANSEN_TM_OP_TOP13	0xB0C
+#define TANSEN_TM_L_PGA1	0xC20
+#define TANSEN_TM_L_PGA2	0xC21
+#define TANSEN_TM_L_PGA3	0xC22
+#define TANSEN_TM_L_PGA4	0xC23
+#define TANSEN_TM_L_PGA5	0xC24
+#define TANSEN_TM_R_PGA1	0xC40
+#define TANSEN_TM_R_PGA2	0xC41
+#define TANSEN_TM_R_PGA3	0xC42
+#define TANSEN_TM_R_PGA4	0xC43
+#define TANSEN_TM_R_PGA5	0xC44
+#define TANSEN_TM_L_ADC1	0xCA0
+#define TANSEN_TM_L_ADC2	0xCA1
+#define TANSEN_TM_L_ADC3	0xCA2
+#define TANSEN_TM_L_ADC4	0xCA3
+#define TANSEN_TM_L_ADC5	0xCA4
+#define TANSEN_TM_L_ADC6	0xCA5
+#define TANSEN_TM_L_ADC7	0xCA6
+#define TANSEN_TM_L_ADC8	0xCA7
+#define TANSEN_TM_L_ADC9	0xCA8
+#define TANSEN_TM_L_ADC10	0xCA9
+#define TANSEN_TM_R_ADC1	0xCC0
+#define TANSEN_TM_R_ADC2	0xCC1
+#define TANSEN_TM_R_ADC3	0xCC2
+#define TANSEN_TM_R_ADC4	0xCC3
+#define TANSEN_TM_R_ADC5	0xCC4
+#define TANSEN_TM_R_ADC6	0xCC5
+#define TANSEN_TM_R_ADC7	0xCC6
+#define TANSEN_TM_R_ADC8	0xCC7
+#define TANSEN_TM_R_ADC9	0xCC8
+#define TANSEN_TM_R_ADC10	0xCCA /*typo in datasheet ? should be 0xCC9 ? */
+#define TANSEN_TM_DECFILT_L1	0xD20
+#define TANSEN_TM_DECFILT_L2	0xD21
+#define TANSEN_TM_DECFILT_R1	0xD40
+#define TANSEN_TM_DECFILT_R2	0xD41
+#define TANSEN_TM_BG_IP_1	0xD80
+#define TANSEN_TM_BG_IP_2	0xD81
+#define TANSEN_TM_BG_IP_3	0xD82
+#define TANSEN_TM_BG_IP_4	0xD83
+#define TANSEN_TM_BG_IP_5	0xD84
+#define TANSEN_TM_BG_IP_6	0xD85
+#define TANSEN_TM_BG_IP_7	0xD86
+#define TANSEN_TM_MICBIAS_1	0xE00
+#define TANSEN_TM_IP_TOP1	0xE80
+#define TANSEN_TM_IP_TOP2	0xE81
+#define TANSEN_TM_IP_TOP3	0xE82
+#define TANSEN_TM_IP_TOP4	0xE83
+#define TANSEN_TM_IP_TOP5	0xE84
+#define TANSEN_TM_IP_TOP6	0xE85
+#define TANSEN_TM_IP_TOP7	0xE86
+
+#endif /* TANSEN_H_ */
diff --git a/sound/soc/codecs/tansen_gti.c b/sound/soc/codecs/tansen_gti.c
new file mode 100644
index 0000000..01d7849
--- /dev/null
+++ b/sound/soc/codecs/tansen_gti.c
@@ -0,0 +1,101 @@
+/*
+ *  tansen_gti.c - GTI port support for the Tansen Codec.
+ *
+ *  Copyright (C) 2010-2013 Imagination Technologies
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <sound/tansen.h>
+#include "tansen_gti.h"
+
+__u32 gti_read(void __iomem *port, unsigned long reg)
+{
+	int i;
+	unsigned long data_in = 0, data_out = 0, ctrl, bit, ret;
+
+	/* Construct the data input to the port */
+	data_in |= ((reg & GTI_CTRL_REG_ADDR_MASK) << GTI_CTRL_REG_ADDR_SHIFT);
+	data_in |= ((0 & GTI_CTRL_DATA_MASK) << GTI_CTRL_DATA_SHIFT);
+	data_in |= ((GTI_CTRL_OP_READ & GTI_CTRL_OP_MASK) << GTI_CTRL_OP_SHIFT);
+
+	/* Ensure GTI port is in the right state */
+	ctrl = GTI_MAKE_CTRL_REG(0, GTI_CLK_LO, GTI_NOT_IN_RESET,
+				 GTI_NOT_TEST_MODE);
+	iowrite32(ctrl, port);
+	udelay(1);
+
+	/* Clock out & in the bits, starting with the MSB */
+	for (i = 0; i < 32; i++) {
+		bit = (data_in & 0x80000000) >> 31;
+		data_in <<= 1;
+
+		ctrl = GTI_MAKE_CTRL_REG(bit, GTI_CLK_HI, GTI_NOT_IN_RESET,
+					 GTI_NOT_TEST_MODE);
+		iowrite32(ctrl, port);
+		udelay(1);
+		ctrl = GTI_MAKE_CTRL_REG(bit, GTI_CLK_LO, GTI_NOT_IN_RESET,
+					 GTI_NOT_TEST_MODE);
+		iowrite32(ctrl, port);
+		udelay(1);
+
+		bit = ioread32(port + 4);
+		data_out = (data_out << 1) | bit;
+	}
+
+	ret = (data_out >> GTI_CTRL_DATA_SHIFT) & GTI_CTRL_DATA_MASK;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(gti_read);
+
+void gti_write(void __iomem *port, unsigned long reg, unsigned long value)
+{
+	int i;
+	unsigned long data_in = 0, ctrl, bit;
+
+	/* Construct the data input to the port */
+	data_in |= ((reg & GTI_CTRL_REG_ADDR_MASK) << GTI_CTRL_REG_ADDR_SHIFT);
+	data_in |= ((value & GTI_CTRL_DATA_MASK) << GTI_CTRL_DATA_SHIFT);
+	data_in |= ((GTI_CTRL_OP_WRITE & GTI_CTRL_OP_MASK)
+		    << GTI_CTRL_OP_SHIFT);
+
+	/* Ensure GTI port is in the right state */
+	ctrl = GTI_MAKE_CTRL_REG(0, GTI_CLK_LO, GTI_NOT_IN_RESET,
+				 GTI_NOT_TEST_MODE);
+	iowrite32(ctrl, port);
+	udelay(1);
+
+	/* Clock out & in the bits, starting with the MSB */
+	for (i = 0; i < 32; i++) {
+		bit = (data_in & 0x80000000) >> 31;
+		data_in <<= 1;
+
+		ctrl = GTI_MAKE_CTRL_REG(bit, GTI_CLK_HI, GTI_NOT_IN_RESET,
+					 GTI_NOT_TEST_MODE);
+		iowrite32(ctrl, port);
+		udelay(1);
+		ctrl = GTI_MAKE_CTRL_REG(bit, GTI_CLK_LO, GTI_NOT_IN_RESET,
+					 GTI_NOT_TEST_MODE);
+		iowrite32(ctrl, port);
+		udelay(1);
+	}
+}
+EXPORT_SYMBOL_GPL(gti_write);
+
+void gti_reset(void __iomem *port, int reset)
+{
+	unsigned long ctrl;
+	ctrl = GTI_MAKE_CTRL_REG(0, GTI_CLK_LO,
+				 reset ? GTI_IN_RESET : GTI_NOT_IN_RESET,
+				 GTI_NOT_TEST_MODE);
+	iowrite32(ctrl, port);
+}
+EXPORT_SYMBOL_GPL(gti_reset);
diff --git a/sound/soc/codecs/tansen_gti.h b/sound/soc/codecs/tansen_gti.h
new file mode 100644
index 0000000..c5529a1
--- /dev/null
+++ b/sound/soc/codecs/tansen_gti.h
@@ -0,0 +1,21 @@
+#ifndef __TANSEN_GTI_H__
+#define __TANSEN_GTI_H__
+
+#define GTI_CTRL_REG_ADDR_SHIFT		20
+#define GTI_CTRL_REG_ADDR_MASK		0x00000FFF		/* 12 bits */
+#define GTI_CTRL_OP_SHIFT		19
+#define GTI_CTRL_OP_MASK		0x00000001		/* 1 bit */
+#define GTI_CTRL_DATA_SHIFT		8
+#define GTI_CTRL_DATA_MASK		0x000000FF		/* 8 bits */
+
+#define GTI_CTRL_OP_WRITE		0x00000001
+#define GTI_CTRL_OP_READ		0x00000000
+
+#define GTI_CLK_HI			1
+#define GTI_CLK_LO			0
+#define GTI_NOT_IN_RESET		1
+#define GTI_IN_RESET			0
+#define GTI_NOT_TEST_MODE		0
+#define GTI_TEST_MODE			1
+
+#endif /* __TANSEN_GTI_H__ */
diff --git a/sound/soc/tz1090/01xx.c b/sound/soc/tz1090/01xx.c
new file mode 100644
index 0000000..5120ea8
--- /dev/null
+++ b/sound/soc/tz1090/01xx.c
@@ -0,0 +1,438 @@
+/*
+ *  01xx.c
+ *
+ *  01SP/01TT TZ1090 Metamorph/Minimorph ASoC Audio Support using tansen codec.
+ *
+ *  Copyright:	(C) 2009-2013 Imagination Technologies
+ *
+ */
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tansen.h>
+#include <asm/soc-tz1090/hdmi-audio.h>
+#include <asm/soc-tz1090/audiocodec.h>
+#include <asm/soc-tz1090/defs.h>
+
+#include "../codecs/tansen_gti.h"
+#include "tz1090-pcm.h"
+#include "tz1090-i2s.h"
+
+struct zero1xx_priv {
+	void __iomem *adc_ctrl;
+	void __iomem *hp_ctrl;
+	void __iomem *gti_ctrl;
+};
+
+static int zero1xx_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int ret = 0;
+
+	/*
+	 * Note: as the Comet I2S out block expects 24 bit data left aligned
+	 * from the DMA and ALSA provides 24 bit data right aligned we
+	 * configure the I2S out block in 32 bit mode we then get 24 bit data
+	 * out of the I2S right aligned, which conforms to the Sony
+	 * right aligned I2S timings.
+	 */
+
+	/*
+	 * set codec DAI configuration:
+	 *	Sony Right Justify Timing
+	 *	normal bit clock normal frame clock
+	 *	bit clock and frame clock slave
+	 *
+	 */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_RIGHT_J |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * set cpu DAI configuration:
+	 *	Sony Right Justify Timing
+	 *	normal bit clock normal frame clock
+	 *	bit clock and frame clock master
+	 */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_RIGHT_J |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
+
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct snd_soc_ops zero1xx_ops = {
+	.hw_params = zero1xx_hw_params,
+};
+
+static int __init zero1xx_tansen_init(struct zero1xx_priv *zero1xx)
+{
+	u32 audio_hp_ctrl;
+	u32 audio_adc_ctrl;
+	/*
+	 * Brings DAC out of Reset
+	 * All other config handled by DAC driver
+	 */
+
+	u32 hpctrl, adcctrl;
+
+	hpctrl = ioread32(zero1xx->hp_ctrl);
+	adcctrl = ioread32(zero1xx->adc_ctrl);
+
+	/* GTI port must be in reset for power up to work */
+	gti_reset(zero1xx->gti_ctrl, 1);
+
+	/* Power up bandgap */
+	hpctrl &= ~(AUDIO_PWDN_BG_OP | AUDIO_PWDN_BG_IP);
+	iowrite32(hpctrl, zero1xx->hp_ctrl);
+	udelay(100);
+
+	/* Reset bandgap filter */
+	hpctrl |= (AUDIO_RST_BG_OP | AUDIO_RST_BG_IP);
+	iowrite32(hpctrl, zero1xx->hp_ctrl);
+	udelay(250);
+
+	hpctrl &= ~(AUDIO_RST_BG_OP | AUDIO_RST_BG_IP);
+	iowrite32(hpctrl, zero1xx->hp_ctrl);
+	udelay(100);
+
+	/* Power up PLL */
+	hpctrl &= ~AUDIO_PWDN_PLL;
+	iowrite32(hpctrl, zero1xx->hp_ctrl);
+	udelay(100);
+
+	/* Take digital blocks out of reset */
+	hpctrl &= ~(AUDIO_RSTB_DIG_OP | AUDIO_RSTB_DIG_IP);
+	iowrite32(hpctrl, zero1xx->hp_ctrl);
+	udelay(100);
+
+	/* Power up outputs only need PWM_C (Left) and PWM_D (right) */
+	hpctrl |= (/*AUDIO_PSCNT_PWM_F | AUDIO_PSCNT_PWM_E |*/ AUDIO_PSCNT_PWM_D |
+		   AUDIO_PSCNT_PWM_C /*| AUDIO_PSCNT_PWM_B | AUDIO_PSCNT_PWM_A*/);
+	hpctrl |= (AUDIO_PSCNTHP_R | AUDIO_PSCNTHP_L);
+	iowrite32(hpctrl, zero1xx->hp_ctrl);
+
+	adcctrl |= (AUDIO_PSCNTADC_R | AUDIO_PSCNTADC_L);
+	iowrite32(adcctrl, zero1xx->adc_ctrl);
+
+	udelay(100);
+
+	/* Reset analogue filters */
+	hpctrl |= (AUDIO_RSTB_ANA_OP | AUDIO_RSTB_ANA_IP);
+	iowrite32(hpctrl, zero1xx->hp_ctrl);
+	udelay(250);
+
+	hpctrl &= ~(AUDIO_RSTB_ANA_OP | AUDIO_RSTB_ANA_IP);
+	iowrite32(hpctrl, zero1xx->hp_ctrl);
+	udelay(250);
+
+	/* Take the GTI port out of reset */
+	gti_reset(zero1xx->gti_ctrl, 0);
+
+	/* power up */
+
+	/* Get initial state of control registers */
+	audio_hp_ctrl = ioread32(zero1xx->hp_ctrl);
+	audio_adc_ctrl = ioread32(zero1xx->adc_ctrl);
+
+	/* Need to put the GTI interface into reset for the power up to work. */
+	gti_reset(zero1xx->gti_ctrl, true);
+
+	/*
+	 *	Step 1.
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_PWDN_BG_OP	=	0
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_PWDN_BG_IP	=	0
+	 *	DELAY x us
+	 */
+	audio_hp_ctrl &= ~(CR_AUDIO_PWDN_BG_OP_MASK <<
+			   CR_AUDIO_PWDN_BG_OP_SHIFT);
+	audio_adc_ctrl &= ~(CR_AUDIO_PWDN_BG_IP_MASK <<
+			    CR_AUDIO_PWDN_BG_IP_SHIFT);
+	iowrite32(audio_hp_ctrl, zero1xx->hp_ctrl);
+
+	udelay(100);
+
+	/*
+	 *	Step 2.
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_RST_BG_OP	=	1
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_RST_BG_IP	=	1
+	 *	DELAY > 250 us
+	 */
+	audio_hp_ctrl |= (CR_AUDIO_RST_BG_OP_MASK << CR_AUDIO_RST_BG_OP_SHIFT);
+	audio_hp_ctrl |= (CR_AUDIO_RST_BG_IP_MASK << CR_AUDIO_RST_BG_IP_SHIFT);
+
+	iowrite32(audio_hp_ctrl, zero1xx->hp_ctrl);
+
+	udelay(250);
+
+	/*
+	 *	Step 3.
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_RST_BG_OP	=	0
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_RST_BG_IP	=	0
+	 *	DELAY x us
+	 */
+	audio_hp_ctrl &= ~(CR_AUDIO_RST_BG_OP_MASK << CR_AUDIO_RST_BG_OP_SHIFT);
+	audio_hp_ctrl &= ~(CR_AUDIO_RST_BG_IP_MASK << CR_AUDIO_RST_BG_IP_SHIFT);
+
+	iowrite32(audio_hp_ctrl, zero1xx->hp_ctrl);
+
+	udelay(100);
+
+	/*
+	 *	Step 4.
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_PWDN_PLL	=	0
+	 *	DELAY > 100 us
+	 */
+	audio_hp_ctrl &= ~(CR_AUDIO_PWDN_PLL_MASK << CR_AUDIO_PWDN_PLL_SHIFT);
+
+	iowrite32(audio_hp_ctrl, zero1xx->hp_ctrl);
+
+	udelay(100);
+
+	/*
+	 *	Step 5.
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_RSTB_DIG_OP	=	0
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_RSTB_DIG_IP	=	0
+	 *	DELAY x us
+	 */
+	audio_hp_ctrl &= ~(CR_AUDIO_RSTB_DIG_OP_MASK <<
+			   CR_AUDIO_RSTB_DIG_OP_SHIFT);
+	audio_hp_ctrl &= ~(CR_AUDIO_RSTB_DIG_IP_MASK <<
+			   CR_AUDIO_RSTB_DIG_IP_SHIFT);
+
+	iowrite32(audio_hp_ctrl, zero1xx->hp_ctrl);
+
+	udelay(100);
+
+	/*
+	 *	Step 6.
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_PSCNT_PWM_F	=	1
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_PSCNT_PWM_E	=	1
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_PSCNT_PWM_D	=	1
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_PSCNT_PWM_C	=	1
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_PSCNT_PWM_B	=	1
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_PSCNT_PWM_A	=	1
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_PSCNTHP_R	=	1
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_PSCNTHP_L	=	1
+	 *
+	 *	CR_AUDIO_ADC_CTRL	CR_AUDIO_PSCNTADC_R	=	1
+	 *	CR_AUDIO_ADC_CTRL	CR_AUDIO_PSCNTADC_L	=	1
+	 *	DELAY x us
+	 */
+	audio_hp_ctrl |= (CR_AUDIO_PSCNT_PWM_F_MASK <<
+			  CR_AUDIO_PSCNT_PWM_F_SHIFT);
+	audio_hp_ctrl |= (CR_AUDIO_PSCNT_PWM_E_MASK <<
+			  CR_AUDIO_PSCNT_PWM_E_SHIFT);
+	audio_hp_ctrl |= (CR_AUDIO_PSCNT_PWM_D_MASK <<
+			  CR_AUDIO_PSCNT_PWM_D_SHIFT);
+	audio_hp_ctrl |= (CR_AUDIO_PSCNT_PWM_C_MASK <<
+			  CR_AUDIO_PSCNT_PWM_C_SHIFT);
+	audio_hp_ctrl |= (CR_AUDIO_PSCNT_PWM_B_MASK <<
+			  CR_AUDIO_PSCNT_PWM_B_SHIFT);
+	audio_hp_ctrl |= (CR_AUDIO_PSCNT_PWM_A_MASK <<
+			  CR_AUDIO_PSCNT_PWM_A_SHIFT);
+	audio_hp_ctrl |= (CR_AUDIO_PSCNTHP_R_MASK <<
+			  CR_AUDIO_PSCNTHP_R_SHIFT);
+	audio_hp_ctrl |= (CR_AUDIO_PSCNTHP_L_MASK <<
+			  CR_AUDIO_PSCNTHP_L_SHIFT);
+	audio_hp_ctrl |= (CR_AUDIO_I2S_EXT_MASK <<
+			  CR_AUDIO_I2S_EXT_SHIFT);
+	audio_hp_ctrl &= ~(CR_AUDIO_PGA_MODE_MASK <<
+			   CR_AUDIO_PGA_MODE_SHIFT);
+	audio_hp_ctrl |= 0x01 << CR_AUDIO_PGA_MODE_SHIFT;
+	iowrite32(audio_hp_ctrl, zero1xx->hp_ctrl);
+
+	audio_adc_ctrl |= (CR_AUDIO_PSCNTADC_R_MASK <<
+			   CR_AUDIO_PSCNTADC_R_SHIFT);
+	audio_adc_ctrl |= (CR_AUDIO_PSCNTADC_L_MASK <<
+			   CR_AUDIO_PSCNTADC_L_SHIFT);
+	iowrite32(audio_adc_ctrl, zero1xx->adc_ctrl);
+
+	udelay(100);
+
+	/*
+	 *	Step 7.
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_RSTB_ANA_OP	=	1
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_RSTB_ANA_IP	=	1
+	 *	DELAY >250 us
+	 */
+	audio_hp_ctrl |= (CR_AUDIO_RSTB_ANA_OP_MASK <<
+			  CR_AUDIO_RSTB_ANA_OP_SHIFT);
+	audio_hp_ctrl |= (CR_AUDIO_RSTB_ANA_IP_MASK <<
+			  CR_AUDIO_RSTB_ANA_IP_SHIFT);
+
+	iowrite32(audio_hp_ctrl, zero1xx->hp_ctrl);
+
+	udelay(250);
+
+	/*
+	 *	Step 8.
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_RSTB_ANA_OP	=	0
+	 *	CR_AUDIO_HP_CTRL	CR_AUDIO_RSTB_ANA_IP	=	0
+	 *	DELAY >250 us
+	 */
+	audio_hp_ctrl &= ~(CR_AUDIO_RSTB_ANA_OP_MASK <<
+			   CR_AUDIO_RSTB_ANA_OP_SHIFT);
+	audio_hp_ctrl &= ~(CR_AUDIO_RSTB_ANA_IP_MASK <<
+			   CR_AUDIO_RSTB_ANA_IP_SHIFT);
+
+	iowrite32(audio_hp_ctrl, zero1xx->hp_ctrl);
+
+	udelay(250);
+
+
+	/* Take the GTI interface out of reset. */
+	gti_reset(zero1xx->gti_ctrl, false);
+
+	return 0;
+}
+
+/*
+ * 01SP/01TT digital audio interface glue - connects codec <--> CPU
+ *
+ * The codec used on the 01SP/01TT is the Cosmic circuits DAC internal
+ * to the SoC.
+ *
+ * Note: As we now only support 1 fixed audio configuration, HDMI audio
+ * just piggybacks on the back of the DAC config (when
+ * CONFIG_TZ1090_01XX_HDMI_AUDIO is set) and no longer has its own codec driver
+ */
+
+static struct snd_soc_dai_link zero1xx_dai = {
+	.name = "tansen",
+	.stream_name = "Playback",
+	.codec_dai_name = "tansen",
+	.platform_name = "comet-pcm-audio",
+	.ops = &zero1xx_ops,
+};
+
+
+/*01SP/01TT audio machine driver */
+static struct snd_soc_card snd_soc_zero1xx = {
+	.name = "01SP/01TT-Tansen-Audio",
+	.owner = THIS_MODULE,
+	.dai_link = &zero1xx_dai,
+	.num_links = 1,
+};
+
+static int zero1xx_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct snd_soc_card *card = &snd_soc_zero1xx;
+	struct zero1xx_priv zero1xx;
+	struct resource *mem;
+	int ret;
+
+	zero1xx_dai.codec_name = NULL;
+	zero1xx_dai.codec_of_node = of_parse_phandle(np,
+				"img,audio-codec", 0);
+	if (!zero1xx_dai.codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'img,audio-codec' not found\n");
+		return -EINVAL;
+	}
+
+	zero1xx_dai.cpu_dai_name = NULL;
+	zero1xx_dai.cpu_of_node = of_parse_phandle(np,
+				"img,i2s-controller", 0);
+	if (!zero1xx_dai.cpu_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'img,i2s-controller' not found\n");
+		return -EINVAL;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	zero1xx.adc_ctrl = devm_request_and_ioremap(&pdev->dev, mem);
+	if (!zero1xx.adc_ctrl) {
+		dev_err(&pdev->dev,
+			"Unable to ioremap the adc ctrl registers\n");
+		return -ENOMEM;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	zero1xx.hp_ctrl = devm_request_and_ioremap(&pdev->dev, mem);
+	if (!zero1xx.hp_ctrl) {
+		dev_err(&pdev->dev,
+			"Unable to ioremap the hp ctrl registers\n");
+		return -ENOMEM;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+
+	if (!mem) {
+		dev_err(&pdev->dev, "failed to get memory resource\n");
+		return -ENODEV;
+	}
+
+	zero1xx.gti_ctrl = ioremap_nocache(mem->start,
+					   mem->end - mem->start + 1);
+	if (!zero1xx.gti_ctrl) {
+		dev_err(&pdev->dev,
+			"Unable to ioremap the gti ctrl registers\n");
+		return -ENOMEM;
+	}
+
+	zero1xx_tansen_init(&zero1xx);
+
+	card->dev = &pdev->dev;
+
+	ret = snd_soc_register_card(card);
+
+	if (ret)
+		dev_err(&pdev->dev,
+			"snd_soc_register_card() failed: %d\n",
+			ret);
+
+
+	return ret;
+}
+
+static int zero1xx_remove(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+	snd_soc_unregister_card(card);
+
+	return 0;
+}
+
+static const struct of_device_id tz1090_audio_of_match[] = {
+	{ .compatible = "img,tz1090-audio", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, tz1090_audio_of_match);
+
+static struct platform_driver zero1xx_driver = {
+	.driver = {
+		.name   = "zero1xx-audio",
+		.owner  = THIS_MODULE,
+		.of_match_table = tz1090_audio_of_match,
+	},
+	.probe          = zero1xx_probe,
+	.remove         = zero1xx_remove,
+};
+
+module_platform_driver(zero1xx_driver);
+
+
+/* Module information */
+MODULE_AUTHOR("Imagination Technologies");
+MODULE_DESCRIPTION("ALSA SoC 01SP/01TT");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:zero1xx-audio");
diff --git a/sound/soc/tz1090/Kconfig b/sound/soc/tz1090/Kconfig
new file mode 100644
index 0000000..01eaf37
--- /dev/null
+++ b/sound/soc/tz1090/Kconfig
@@ -0,0 +1,19 @@
+config SND_TZ1090_SOC
+	tristate "SoC Audio for Toumaz Xenif TZ1090 (Comet)"
+	depends on SOC_TZ1090 && TZ1090_MDC_DMA
+	select SND_SOC_DMAENGINE_PCM
+	help
+	  Say Y or M if you want to add support for codecs attached to the
+	  TZ1090 I2S Audio out interface.
+
+config SND_TZ1090_SOC_I2S
+	tristate
+
+config SND_TZ1090_SOC_TZ1090_01XX
+	tristate "SoC Audio support for 01SP/01TT Base Layer"
+	depends on TZ1090_01XX || COMET_BUB
+	select SND_TZ1090_SOC
+	select SND_TZ1090_SOC_I2S
+	select SND_SOC_TANSEN
+	help
+	  Say Y if you want Audio support for the Comet Metamorph/Minimorph/BuB.
diff --git a/sound/soc/tz1090/Makefile b/sound/soc/tz1090/Makefile
new file mode 100644
index 0000000..e108e5f
--- /dev/null
+++ b/sound/soc/tz1090/Makefile
@@ -0,0 +1,12 @@
+#TZ1090 Platform support
+snd-soc-tz1090-objs := tz1090-pcm.o
+snd-soc-tz1090-i2s-objs := tz1090-i2s.o
+obj-$(CONFIG_SND_TZ1090_SOC) += snd-soc-tz1090.o
+obj-$(CONFIG_SND_TZ1090_SOC_I2S) += snd-soc-tz1090-i2s.o
+
+#TZ1090 Machine support
+snd-soc-01xx-objs := 01xx.o
+
+obj-$(CONFIG_SND_TZ1090_SOC_TZ1090_01XX) += snd-soc-01xx.o
+
+
diff --git a/sound/soc/tz1090/tz1090-i2s.c b/sound/soc/tz1090/tz1090-i2s.c
new file mode 100644
index 0000000..358ba52
--- /dev/null
+++ b/sound/soc/tz1090/tz1090-i2s.c
@@ -0,0 +1,625 @@
+/*
+ *
+ * ALSA SoC I2S  Audio Layer for TZ1090
+ *
+ * Author:      Neil Jones
+ * Copyright:   (C) 2009-2012 Imagination Technologies
+ *
+ * based on pxa2xx i2S ASoC driver
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <sound/core.h>
+#include <linux/of.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#include "tz1090-pcm.h"
+#include "tz1090-i2s.h"
+
+#define DRV_NAME "tz1090-i2s"
+
+static int comet_i2s_set_dai_fmt_out(struct snd_soc_dai *cpu_dai,
+		unsigned int fmt)
+{
+	int ret = 0;
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+
+	case SND_SOC_DAIFMT_RIGHT_J:
+		break;
+
+	case SND_SOC_DAIFMT_I2S:
+	case SND_SOC_DAIFMT_LEFT_J:
+	case SND_SOC_DAIFMT_DSP_A:
+	case SND_SOC_DAIFMT_DSP_B:
+	case SND_SOC_DAIFMT_AC97:
+		ret = -EINVAL;
+		break;
+	default:
+		printk(KERN_ERR "%s: Unknown DAI format type\n", __func__);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret)
+		goto out;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:	/*Interface clk & frame master */
+	case SND_SOC_DAIFMT_CBS_CFM:	/*Interface clk master, frame slave*/
+	case SND_SOC_DAIFMT_CBM_CFS:	/*Interface clk slave, frame master*/
+		ret = -EINVAL;
+		break;
+	case SND_SOC_DAIFMT_CBM_CFM:	/*Interface clk & frame slave*/
+		break;
+	default:
+		printk(KERN_ERR "%s: Unknown DAI master type\n", __func__);
+		ret = -EINVAL;
+		break;
+	}
+	if (ret)
+		goto out;
+
+
+	/*clock inversion options*/
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:	/* normal bit clock + frame */
+		break;
+	case SND_SOC_DAIFMT_NB_IF:	/* normal BCLK + inv FRM */
+	case SND_SOC_DAIFMT_IB_NF:	/* invert BCLK + nor FRM */
+	case SND_SOC_DAIFMT_IB_IF:	/* invert BCLK + FRM */
+		ret = -EINVAL;
+		break;
+	}
+out:
+	return ret;
+}
+
+static int comet_i2s_set_dai_fmt_in(struct snd_soc_dai *cpu_dai,
+		unsigned int fmt)
+{
+	int ret = 0;
+
+	/*Set Data Format */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+
+	case SND_SOC_DAIFMT_RIGHT_J:	/*Right Justified Mode */
+		break;
+
+	case SND_SOC_DAIFMT_I2S:/*I2S Mode(Phillips Mode)*/
+	case SND_SOC_DAIFMT_LEFT_J:	/*left Justified Mode */
+	case SND_SOC_DAIFMT_DSP_A:	/* L data msb after FRM LRC */
+	case SND_SOC_DAIFMT_DSP_B:	/* L data msb during FRM LRC */
+	case SND_SOC_DAIFMT_AC97:	/* AC97 */
+		ret = -EINVAL;
+		break;
+
+	default:
+		printk(KERN_ERR "%s: Unknown DAI format type\n", __func__);
+		ret = -EINVAL;
+		break;
+	}
+	if (ret)
+		goto out;
+
+	/*clocking scheme*/
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:	/*Codec clk & frame slave*/
+		/*the only supported mode */
+		break;
+	case SND_SOC_DAIFMT_CBS_CFM:	/*Codec clk master, frame slave*/
+	case SND_SOC_DAIFMT_CBM_CFS:	/*Codec clk slave, frame master*/
+	case SND_SOC_DAIFMT_CBS_CFS:	/*Codec clk & frame master */
+		ret = -EINVAL;
+		break;
+
+	default:
+		printk(KERN_ERR "%s: Unknown DAI master type\n", __func__);
+		ret = -EINVAL;
+		break;
+	}
+	if (ret)
+		goto out;
+
+	/*clock inversion options*/
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:	/* normal bit clock + frame */
+		break;
+	case SND_SOC_DAIFMT_NB_IF:	/* normal BCLK + inv FRM */
+	case SND_SOC_DAIFMT_IB_NF:	/* invert BCLK + nor FRM */
+	case SND_SOC_DAIFMT_IB_IF:	/* invert BCLK + FRM */
+		ret = -EINVAL;
+	}
+out:
+	return ret;
+}
+
+static int comet_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+		unsigned int fmt)
+{
+	int ret = comet_i2s_set_dai_fmt_out(cpu_dai, fmt);
+	if (ret)
+		return ret;
+	else
+		return comet_i2s_set_dai_fmt_in(cpu_dai, fmt);
+}
+
+static int comet_i2s_startup(struct snd_pcm_substream *substream,
+			     struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+
+	struct tz1090_i2s *tzi2s = snd_soc_dai_get_drvdata(dai);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		snd_soc_dai_set_dma_data(cpu_dai, substream,
+					 &tzi2s->stereo_out);
+	else
+		snd_soc_dai_set_dma_data(cpu_dai, substream,
+					 &tzi2s->stereo_in);
+
+	I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_ENABLE, 1);
+
+	return 0;
+}
+
+static int comet_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+			      struct snd_soc_dai *dai)
+{
+	int ret = 0;
+	u32 temp = 0;
+	struct tz1090_i2s *tzi2s = snd_soc_dai_get_drvdata(dai);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			temp = I2S_OUT_RGET_CHAN_CONTROL(tzi2s, 0);
+			I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_RUN, 1);
+			I2S_OUT_RSET_CHAN_CONTROL(tzi2s, 0, temp);
+			if (substream->runtime->channels > 2) {
+				temp = I2S_OUT_RGET_CHAN_CONTROL(tzi2s, 1);
+				I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_RUN, 1);
+				I2S_OUT_RSET_CHAN_CONTROL(tzi2s, 1, temp);
+			}
+			if (substream->runtime->channels > 4) {
+				temp = I2S_OUT_RGET_CHAN_CONTROL(tzi2s, 2);
+				I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_RUN, 1);
+				I2S_OUT_RSET_CHAN_CONTROL(tzi2s, 2, temp);
+			}
+
+		} else { /* Record */
+			temp = I2S_IN_RGET_CONTROL(tzi2s);
+			I2S_IN_SET_FIELD(temp, I2S_IN_ENABLE, 1);
+			I2S_IN_RSET_CONTROL(tzi2s, temp);
+		}
+
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			temp = I2S_OUT_RGET_CHAN_CONTROL(tzi2s, 0);
+			I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_RUN, 0);
+			I2S_OUT_RSET_CHAN_CONTROL(tzi2s, 0, temp);
+			if (substream->runtime->channels > 2) {
+				temp = I2S_OUT_RGET_CHAN_CONTROL(tzi2s, 1);
+				I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_RUN, 0);
+				I2S_OUT_RSET_CHAN_CONTROL(tzi2s, 1, temp);
+			}
+			if (substream->runtime->channels > 4) {
+				temp = I2S_OUT_RGET_CHAN_CONTROL(tzi2s, 2);
+				I2S_OUT_SET_FIELD(temp, I2S_OUT_CHAN_RUN, 0);
+				I2S_OUT_RSET_CHAN_CONTROL(tzi2s, 2, temp);
+			}
+		} else { /* Record */
+			temp = I2S_IN_RGET_CONTROL(tzi2s);
+			I2S_IN_SET_FIELD(temp, I2S_IN_ENABLE, 0);
+			I2S_IN_RSET_CONTROL(tzi2s, temp);
+		}
+		break;
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int comet_i2s_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct tz1090_i2s *tzi2s = snd_soc_dai_get_drvdata(dai);
+	int ch;
+	struct clk *i2s_clk = NULL;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		/* Set the number of channels in use */
+		I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_ENABLE, 0);
+		ch = params_channels(params) / 2 - 1;
+		I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_ACTIVE_CHAN, ch);
+		I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_ENABLE, 1);
+
+		/*Set Format*/
+		switch (params_format(params)) {
+
+		case SNDRV_PCM_FORMAT_S24_LE:
+			break;
+		case SNDRV_PCM_FORMAT_S16_LE:
+		case SNDRV_PCM_FORMAT_S32_LE:
+			return -EINVAL;
+			break;
+		default:
+			printk(KERN_ERR "%s: Unknown PCM format\n", __func__);
+			return -EINVAL;
+		}
+
+	} else {
+		/*Set Format*/
+		switch (params_format(params)) {
+
+		case SNDRV_PCM_FORMAT_S24_LE:
+			break;
+
+		case SNDRV_PCM_FORMAT_S16_LE:
+		case SNDRV_PCM_FORMAT_S32_LE:
+			return -EINVAL;
+			break;
+
+		default:
+			printk(KERN_ERR "%s: Unknown PCM format\n", __func__);
+			return -EINVAL;
+		}
+
+	}
+
+	/* Set Sample Rate */
+	i2s_clk = clk_get(substream->pcm->dev, "i2s");
+	if (IS_ERR(i2s_clk))
+		return PTR_ERR(i2s_clk);
+
+	switch (params_rate(params)) {
+	case 32000:
+		clk_set_rate(i2s_clk, 8192000);
+		break;
+	case 48000:
+		clk_set_rate(i2s_clk, 12288000);
+		break;
+	case 96000:
+		clk_set_rate(i2s_clk, 24576000);
+		break;
+	default:
+		return -EINVAL;
+	}
+	clk_put(i2s_clk);
+
+	return 0;
+}
+
+static void comet_i2s_shutdown(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+	struct tz1090_i2s *tzi2s = snd_soc_dai_get_drvdata(dai);
+	I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_ENABLE, 0);
+	I2S_OUT_WRITE_REG(tzi2s, I2S_OUT_SOFT_RESET, 1);
+	udelay(1000);
+	I2S_OUT_WRITE_REG(tzi2s, I2S_OUT_SOFT_RESET, 0);
+}
+
+#ifdef CONFIG_PM
+static int comet_i2s_suspend(struct snd_soc_dai *dai)
+{
+	/* TODO:
+	 *	It should be possible to disable the clocks to
+	 *	the I2S OUT module
+	 */
+	return 0;
+}
+
+static int comet_i2s_resume(struct snd_soc_dai *dai)
+{
+	/* TODO*/
+	return 0;
+}
+#endif
+
+#define COMET_I2S_RATES (SNDRV_PCM_RATE_32000 \
+			| SNDRV_PCM_RATE_48000 \
+			| SNDRV_PCM_RATE_96000)
+
+#define COMET_FORMATS_PLAYBACK SNDRV_PCM_FMTBIT_S24_LE
+
+#define COMET_FORMATS_RECORD SNDRV_PCM_FMTBIT_S24_LE
+
+static struct snd_soc_dai_ops comet_i2s_dai_ops = {
+	.startup	= comet_i2s_startup,
+	.shutdown	= comet_i2s_shutdown,
+	.trigger	= comet_i2s_trigger,
+	.hw_params	= comet_i2s_hw_params,
+	.set_fmt	= comet_i2s_set_dai_fmt,
+};
+
+struct snd_soc_dai_driver comet_i2s_dai = {
+	.id = -1,
+#ifdef CONFIG_PM
+	.suspend = comet_i2s_suspend,
+	.resume = comet_i2s_resume,
+#endif
+	.playback = {
+		.channels_min = 2,
+		.channels_max = 6,
+		.rates = COMET_I2S_RATES,
+		.formats = COMET_FORMATS_PLAYBACK,
+	},
+	.capture = {
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = COMET_I2S_RATES,
+		.formats = COMET_FORMATS_RECORD,
+	},
+	.ops = &comet_i2s_dai_ops,
+	.symmetric_rates = 1,
+
+};
+EXPORT_SYMBOL_GPL(comet_i2s_dai);
+
+static const struct snd_soc_component_driver comet_i2s_component = {
+	.name = "comet-i2s",
+};
+
+static struct of_device_id img_i2s_of_match[] = {
+	{ .compatible = "img,i2s", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, img_i2s_of_match);
+
+static struct platform_device comet_audio_pcm = {
+	.name = "comet-pcm-audio",
+	.id = -1,
+};
+
+static struct platform_device *audio_devices[] __initdata = {
+	&comet_audio_pcm,
+};
+
+static int comet_i2s_platform_probe(struct platform_device *pdev)
+{
+	struct clk *i2s;
+	int ret, i;
+	struct tz1090_i2s *tzi2s;
+	u32 of_dma[6], outtemp, intemp;
+	struct resource *mem;
+	struct device *dev = &pdev->dev;
+
+	ret = platform_add_devices(audio_devices, ARRAY_SIZE(audio_devices));
+	if (ret)
+		return ret;
+
+	/*
+	 * although the hardware can support various modes they must match
+	 * between i2s in and i2s out also when disabling the i2s block
+	 * to change the in parameters this would stop the out block (playback)
+	 * and vica versa.
+	 *
+	 * thus we only support 1 configuration for both in and out
+	 * and set it at startup.
+	 *
+	 * 24bit
+	 * normal bit clock normal frame clock
+	 * bit clock and frame clock slave
+	 *
+	 * Note1: as the Comet I2S out block expects 24 bit data left aligned
+	 * from the DMA and ALSA provides 24 bit data right aligned we
+	 * configure the I2S out block in 32 bit mode we then get 24 bit data
+	 * out of the I2S right aligned, which conforms to the Sony
+	 * right aligned I2S timings.
+	 *
+	 * Note2: I2S Audio In is broken in Comet in 16 bit mode, it does not
+	 * work in 16+16 frame or 32+32 frame with any timing configuration
+	 * (all have been tried)
+	 *
+	 * Note3: I2S Audio In is broken in Comet in 24bit right justify mode.
+	 * So even though in the machine driver we select right justify mode
+	 * and use this for I2S out, we use Philips mode for I2S In.
+	 */
+
+	tzi2s = devm_kzalloc(dev, sizeof(struct tz1090_i2s), GFP_KERNEL);
+	if (!tzi2s) {
+		dev_err(dev, "Can't allocate memory for tz1090 i2s");
+		return -ENOMEM;
+	}
+
+	dev_set_drvdata(dev, tzi2s);
+
+	tzi2s->dai = comet_i2s_dai;
+	tzi2s->dai.name = dev_name(&pdev->dev);
+	/* Get DMA addresses for audio out/in */
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	tzi2s->audio_out = devm_ioremap_resource(dev, mem);
+
+	if (IS_ERR(tzi2s->audio_out)) {
+		ret = PTR_ERR(tzi2s->audio_out);
+		goto err_resource;
+	}
+	tzi2s->stereo_out.peripheral_address = (dma_addr_t)mem->start +
+		_REG_ADDRESS(I2S_OUT_INTERLEAVE_DATA);
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	tzi2s->audio_in = devm_ioremap_resource(dev, mem);
+
+	if (IS_ERR(tzi2s->audio_in)) {
+		ret = PTR_ERR(tzi2s->audio_in);
+		goto err_resource;
+	}
+	tzi2s->stereo_in.peripheral_address = (dma_addr_t)mem->start +
+		_REG_ADDRESS(I2S_IN_DATA);
+
+	if (of_property_read_u32_array(dev->of_node, "dmas",
+				       of_dma, 6)) {
+		dev_err(dev, "dmas property not found");
+		ret = -ENODEV;
+		goto err_resource;
+	}
+
+	/* We ignore the dma phandles for now */
+	tzi2s->stereo_out.name = "I2S PCM Stereo Out Chan 0";
+	tzi2s->stereo_out.peripheral_num = of_dma[1];
+	tzi2s->stereo_out.dma_channel = of_dma[2];
+	tzi2s->stereo_in.name = "I2S PCM Stereo In";
+	tzi2s->stereo_in.peripheral_num = of_dma[4];
+	tzi2s->stereo_in.dma_channel = of_dma[5];
+
+	/* Disable I2S In and Out */
+	I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_ENABLE, 0);
+	intemp = I2S_IN_RGET_CONTROL(tzi2s);
+	I2S_IN_SET_FIELD(intemp, I2S_IN_ENABLE, 0);
+	I2S_IN_RSET_CONTROL(tzi2s, intemp);
+
+
+	/* I2S Out Setup */
+	for (i = 0; i < 3; i++) {
+		outtemp = I2S_OUT_RGET_CHAN_CONTROL(tzi2s, i);
+		/*
+		 * Disable module before changing registers
+		 */
+		I2S_OUT_SET_FIELD(outtemp, I2S_OUT_CHAN_RUN, 0);
+		I2S_OUT_RSET_CHAN_CONTROL(tzi2s, i, outtemp);
+		/*
+		 * Setup Hardware Formats:
+		 */
+		outtemp = I2S_OUT_RGET_CHAN_CONTROL(tzi2s, i);
+		/*left just. bit must be set to 1*/
+		I2S_OUT_SET_FIELD(outtemp, I2S_OUT_CHAN_MUSTB1, 1);
+		/* phillips mode*/
+		I2S_OUT_SET_FIELD(outtemp, I2S_OUT_CHAN_PH_NSY, 0);
+		I2S_OUT_RSET_CHAN_CONTROL(tzi2s, i, outtemp);
+
+		/*Set Format*/
+		outtemp = I2S_OUT_RGET_CHAN_CONTROL(tzi2s, i);
+		I2S_OUT_SET_FIELD(outtemp, I2S_OUT_CHAN_PACKED, 0);
+		I2S_OUT_SET_FIELD(outtemp, I2S_OUT_CHAN_FORMAT, 8);
+		I2S_OUT_SET_FIELD(outtemp, I2S_OUT_CHAN_LRDATA_POL, 0);
+		I2S_OUT_SET_FIELD(outtemp, I2S_OUT_CHAN_LRFORCE_DIS, 1);
+		I2S_OUT_SET_FIELD(outtemp, I2S_OUT_CHAN_LOCK_DIS, 1);
+
+		/*write back channel settings*/
+		I2S_OUT_RSET_CHAN_CONTROL(tzi2s, i, outtemp);
+	}
+
+	/* Set common frame format */
+	I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_FRAME, 2);
+	I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_PACKED, 0);
+	/*
+	 * Setup Clocking scheme
+	 */
+	outtemp = I2S_OUT_RGET_MAIN_CONTROL(tzi2s);
+	/*Interface clk & frame slave*/
+	I2S_OUT_SET_FIELD(outtemp, I2S_OUT_MASTER, 1);
+	I2S_OUT_RSET_MAIN_CONTROL(tzi2s, outtemp);
+	/* normal bit clock + frame */
+	I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_BCLK_POL, 1);
+	I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_LEFT_POL, 1);
+	/*enable bit clock */
+	I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_BCLK_EN, 1);
+	/* we only support 256fs */
+	I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_ACLK_SEL, 0);
+
+	/*I2S In Setup*/
+
+	/*Philips I2S mode*/
+	I2S_IN_SET_FIELD(intemp, I2S_IN_RJUST, 0);
+	I2S_IN_SET_FIELD(intemp, I2S_IN_LRDLY, 1);
+	I2S_IN_SET_FIELD(intemp, I2S_IN_ALIGN, 1);
+	/* Normal bit clock + frame */
+	I2S_IN_SET_FIELD(intemp, I2S_IN_BCLKPOL, 0);
+	I2S_IN_SET_FIELD(intemp, I2S_IN_LR_DATA_POL, 0);
+	/* 24 bit LE data */
+	I2S_IN_SET_FIELD(intemp, I2S_IN_FRAME_PACK, 0);
+	I2S_IN_SET_FIELD(intemp, I2S_IN_SAMPLE_WIDTH, 1);
+	I2S_IN_SET_FIELD(intemp, I2S_IN_FRAME_WIDTH, 1);
+	/*output data word to DMA is left aligned*/
+	I2S_IN_SET_FIELD(intemp, I2S_IN_PACKH, 0);
+
+	I2S_IN_RSET_CONTROL(tzi2s, intemp);
+
+	/*Re-enable module - but dont set channel run bit yet*/
+	I2S_OUT_SET_REG_FIELD(tzi2s, I2S_OUT_ENABLE, 1);
+
+
+	ret = snd_soc_register_component(dev, &comet_i2s_component, &tzi2s->dai, 1);
+	if (ret) {
+		dev_err(dev, "Could not register DAI: %d\n", ret);
+		ret = -ENOMEM;
+		goto err_dai;
+	}
+
+	i2s = devm_clk_get(dev, NULL);
+	if (IS_ERR(i2s)) {
+		ret = PTR_ERR(i2s);
+		goto err_dai;
+	}
+
+	clk_prepare_enable(i2s);
+	clk_put(i2s);
+
+	dev_info(dev, "TZ1090 I2S probed successfully");
+
+
+	return 0;
+
+err_dai:
+	snd_soc_unregister_component(dev);
+err_resource:
+	return ret;
+}
+
+static int comet_i2s_platform_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_component(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver comet_i2s_driver = {
+	.probe = comet_i2s_platform_probe,
+	.remove = comet_i2s_platform_remove,
+
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = img_i2s_of_match,
+	},
+};
+
+static int __init comet_i2s_init(void)
+{
+	return platform_driver_register(&comet_i2s_driver);
+}
+module_init(comet_i2s_init);
+
+static void __exit comet_i2s_exit(void)
+{
+	platform_driver_unregister(&comet_i2s_driver);
+}
+module_exit(comet_i2s_exit);
+
+/* Module information */
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION("I2S driver for TZ1090 SoC");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/tz1090/tz1090-i2s.h b/sound/soc/tz1090/tz1090-i2s.h
new file mode 100644
index 0000000..f7c4b11
--- /dev/null
+++ b/sound/soc/tz1090/tz1090-i2s.h
@@ -0,0 +1,280 @@
+/*
+ * tz1090-i2s.h
+ *
+ */
+#ifndef TZ1090I2S_H_
+#define TZ1090I2S_H_
+
+#include <linux/io.h>
+#include "tz1090-pcm.h"
+#include <asm/soc-tz1090/defs.h>
+
+struct tz1090_i2s {
+	struct snd_soc_dai_driver dai;
+	void __iomem *audio_out;
+	void __iomem *audio_in;
+	int periph_out;
+	int periph_in;
+	struct comet_pcm_dma_params stereo_out;
+	struct comet_pcm_dma_params stereo_in;
+};
+/*Helper Macros (dont use outside of this file) */
+#define _REG_ADDRESS(REG) _##REG##_OFFSET
+#define _REG_MASK(REG) _##REG##_MASK
+#define _REG_SHIFT(REG) _##REG##_SHIFT
+
+/**************************** I2S OUT *****************************************/
+
+#define _I2S_OUT_INTERLEAVE_DATA_OFFSET 0x00
+
+#define	_I2S_OUT_CONTROL_OFFSET		0x04
+
+#define _I2S_OUT_ACTIVE_CHAN_OFFSET	_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_ACTIVE_CHAN_SHIFT	13
+#define _I2S_OUT_ACTIVE_CHAN_MASK	0x0001E000
+
+#define _I2S_OUT_CLOCK_SELECT_OFFSET	_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_CLOCK_SELECT_SHIFT	12
+#define _I2S_OUT_CLOCK_SELECT_MASK	0x00001000
+
+#define _I2S_OUT_FRAME_OFFSET		_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_FRAME_SHIFT		7
+#define _I2S_OUT_FRAME_MASK		0x00000180
+
+#define _I2S_OUT_MASTER_OFFSET		_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_MASTER_SHIFT		6
+#define _I2S_OUT_MASTER_MASK		0x00000040
+
+#define _I2S_OUT_ACLK_SEL_OFFSET	_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_ACLK_SEL_SHIFT		5
+#define _I2S_OUT_ACLK_SEL_MASK		0x00000020
+
+#define _I2S_OUT_BCLK_EN_OFFSET		_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_BCLK_EN_SHIFT		4
+#define _I2S_OUT_BCLK_EN_MASK		0x00000010
+
+#define _I2S_OUT_LEFT_POL_OFFSET	_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_LEFT_POL_SHIFT		3
+#define _I2S_OUT_LEFT_POL_MASK		0x00000008
+
+#define _I2S_OUT_BCLK_POL_OFFSET	_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_BCLK_POL_SHIFT		2
+#define _I2S_OUT_BCLK_POL_MASK		0x00000004
+
+#define _I2S_OUT_PACKED_OFFSET		_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_PACKED_SHIFT		1
+#define _I2S_OUT_PACKED_MASK		0x00000002
+
+#define _I2S_OUT_ENABLE_OFFSET		_I2S_OUT_CONTROL_OFFSET
+#define _I2S_OUT_ENABLE_SHIFT		0
+#define _I2S_OUT_ENABLE_MASK		0x00000001
+
+#define _I2S_OUT_SOFT_RESET_OFFSET	0x08
+#define _I2S_OUT_SOFT_RESET_SHIFT	0
+#define _I2S_OUT_SOFT_RESET_MASK	0x00000001
+
+#define _I2S_OUT_CHANS_OFFSET		0x80
+#define _I2S_OUT_CHANS_STRIDE		0x20
+
+#define _I2S_OUT_CHAN_DATA_OFFSET	0x00
+
+#define _I2S_OUT_CHAN_CTRL_OFFSET	0x04
+
+#define _I2S_OUT_CHAN_LRDATA_POL_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_LRDATA_POL_SHIFT	12
+#define _I2S_OUT_CHAN_LRDATA_POL_MASK	0x00001000
+
+#define _I2S_OUT_CHAN_LRFORCE_DIS_OFFSET _I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_LRFORCE_DIS_SHIFT	11
+#define _I2S_OUT_CHAN_LRFORCE_DIS_MASK	0x00000800
+
+#define _I2S_OUT_CHAN_LOCK_DIS_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_LOCK_DIS_SHIFT	10
+#define _I2S_OUT_CHAN_LOCK_DIS_MASK	0x00000400
+
+#define _I2S_OUT_CHAN_REPEAT_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_REPEAT_SHIFT	9
+#define _I2S_OUT_CHAN_REPEAT_MASK	0x00000200
+
+#define _I2S_OUT_CHAN_PACKED_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_PACKED_SHIFT	8
+#define _I2S_OUT_CHAN_PACKED_MASK	0x00000100
+
+#define _I2S_OUT_CHAN_FORMAT_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_FORMAT_SHIFT	4
+#define _I2S_OUT_CHAN_FORMAT_MASK	0x000000F0
+
+#define _I2S_OUT_CHAN_MUSTBE1_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_MUSTB1_SHIFT	3
+#define _I2S_OUT_CHAN_MUSTB1_MASK	0x00000008
+
+#define _I2S_OUT_CHAN_FLUSH_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_FLUSH_SHIFT	2
+#define _I2S_OUT_CHAN_FLUSH_MASK	0x00000004
+
+#define _I2S_OUT_CHAN_PH_NSY_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_PH_NSY_SHIFT	1
+#define _I2S_OUT_CHAN_PH_NSY_MASK	0x00000002
+
+#define _I2S_OUT_CHAN_RUN_OFFSET	_I2S_OUT_CHAN_CTRL_OFFSET
+#define _I2S_OUT_CHAN_RUN_SHIFT		0
+#define _I2S_OUT_CHAN_RUN_MASK		0x00000001
+
+#define _I2S_OUT_SOFT_RESET_OFFSET		0x08
+
+#define _I2S_OUT_CHAN_I_STATUS_OFFSET	0x08
+
+#define _I2S_OUT_CHAN_I_ENABLE_OFFSET	0x0C
+
+#define _I2S_OUT_CHAN_I_CLEAR_OFFSET	0x10
+
+#define _I2S_OUT_SAMPLE_COUNT_OFFSET 	0x1C
+
+/* Helper to be used externally */
+#define I2S_OUT_WRITE_REG(base, REG, value)	\
+	iowrite32(value, (base)->audio_out + _REG_ADDRESS(REG))
+
+#define I2S_OUT_SET_REG_FIELD(base, REG, value) \
+{\
+	u32 temp = ioread32((base)->audio_out + _REG_ADDRESS(REG)); \
+	I2S_OUT_SET_FIELD(temp, REG, value); \
+	iowrite32(temp, (base)->audio_out + _REG_ADDRESS(REG)); \
+}
+
+#define I2S_OUT_SET_FIELD(data, FIELD, value) \
+{\
+	data &= ~_REG_MASK(FIELD);\
+	data |=  value << _REG_SHIFT(FIELD);\
+}
+
+static inline void I2S_OUT_RSET_CHAN_CONTROL(struct tz1090_i2s *base,
+					     int channel,
+					     u32 value)
+{
+	iowrite32(value, base->audio_out + _I2S_OUT_CHANS_OFFSET +
+			channel * _I2S_OUT_CHANS_STRIDE +
+			_REG_ADDRESS(I2S_OUT_CHAN_CTRL));
+}
+
+static inline u32 I2S_OUT_RGET_CHAN_CONTROL(struct tz1090_i2s *base,
+					    int channel)
+{
+	return ioread32(base->audio_out + _I2S_OUT_CHANS_OFFSET +
+			channel * _I2S_OUT_CHANS_STRIDE +
+			_REG_ADDRESS(I2S_OUT_CHAN_CTRL));
+}
+static inline void I2S_OUT_RSET_MAIN_CONTROL(struct tz1090_i2s *base,
+					     u32 value)
+{
+	iowrite32(value, base->audio_out + _REG_ADDRESS(I2S_OUT_CONTROL));
+}
+
+static inline u32 I2S_OUT_RGET_MAIN_CONTROL(struct tz1090_i2s *base)
+{
+	return ioread32(base->audio_out + _REG_ADDRESS(I2S_OUT_CONTROL));
+}
+
+/********************************** I2S IN ************************************/
+#define _I2S_IN_DATA_OFFSET		0x00
+
+#define _I2S_IN_CONTROL_OFFSET		0x04
+
+#define _I2S_IN_RJUST_OFFSET		_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_RJUST_SHIFT		18
+#define _I2S_IN_RJUST_MASK		0x00040000
+
+#define _I2S_IN_CCDEL_OFFSET		_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_CCDEL_SHIFT		15
+#define _I2S_IN_CCDEL_MASK		0x00038000
+
+#define _I2S_IN_FEN_OFFSET		_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_FEN_SHIFT		14
+#define _I2S_IN_FEN_MASK		0x00004000
+
+#define _I2S_IN_FMODE_OFFSET		_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_FMODE_SHIFT		13
+#define _I2S_IN_FMODE_MASK		0x00002000
+
+#define _I2S_IN_FRAME_PACK_OFFSET	_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_FRAME_PACK_SHIFT	12
+#define _I2S_IN_FRAME_PACK_MASK		0x00001000
+
+#define _I2S_IN_LR_DATA_POL_OFFSET	_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_LR_DATA_POL_SHIFT	11
+#define _I2S_IN_LR_DATA_POL_MASK	0x00000800
+
+#define _I2S_IN_ALIGN_OFFSET		_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_ALIGN_SHIFT		10
+#define _I2S_IN_ALIGN_MASK		0x00000400
+
+#define _I2S_IN_PACKH_OFFSET		_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_PACKH_SHIFT		9
+#define _I2S_IN_PACKH_MASK		0x00000200
+
+#define _I2S_IN_LRDLY_OFFSET		_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_LRDLY_SHIFT		8
+#define _I2S_IN_LRDLY_MASK		0x00000100
+
+#define _I2S_IN_BCLKPOL_OFFSET		_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_BCLKPOL_SHIFT		7
+#define _I2S_IN_BCLKPOL_MASK		0x00000080
+
+#define _I2S_IN_FLUSHFIFO_OFFSET	_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_FLUSHFIFO_SHIFT		6
+#define _I2S_IN_FLUSHFIFO_MASK		0x00000040
+
+#define _I2S_IN_BOC_OFFSET		_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_BOC_SHIFT		5
+#define _I2S_IN_BOC_MASK		0x00000020
+
+#define _I2S_IN_CET_OFFSET		_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_CET_SHIFT		4
+#define _I2S_IN_CET_MASK		0x00000010
+
+#define _I2S_IN_LRD_OFFSET		_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_LRD_SHIFT		3
+#define _I2S_IN_LRD_MASK		0x00000008
+
+#define _I2S_IN_FRAME_WIDTH_OFFSET	_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_FRAME_WIDTH_SHIFT	2
+#define _I2S_IN_FRAME_WIDTH_MASK	0x00000004
+
+#define _I2S_IN_SAMPLE_WIDTH_OFFSET	_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_SAMPLE_WIDTH_SHIFT	1
+#define _I2S_IN_SAMPLE_WIDTH_MASK	0x00000002
+
+#define _I2S_IN_ENABLE_OFFSET		_I2S_IN_CONTROL_OFFSET
+#define _I2S_IN_ENABLE_SHIFT		0
+#define _I2S_IN_ENABLE_MASK		0x00000001
+
+#define _I2S_IN_STATUS_OFFSET		0x08
+
+#define _I2S_IN_INT_EN_OFFSET		0x0c
+
+#define _I2S_IN_SAMPLE_CNT_OFFSET	0x10
+
+#define _I2S_IN_SOFT_REST_OFFSET	0x14
+
+#define _I2S_IN_REVISON_OFFSET		0x1c
+
+/* Helper to be used externally */
+#define I2S_IN_SET_FIELD(data, FIELD, value) \
+{\
+	data &= ~_REG_MASK(FIELD);\
+	data |=  value << _REG_SHIFT(FIELD);\
+}
+
+
+
+static inline void I2S_IN_RSET_CONTROL(struct tz1090_i2s *base,
+				       u32 value)
+{
+	iowrite32(value, base->audio_in + _REG_ADDRESS(I2S_IN_CONTROL));
+}
+
+static inline u32 I2S_IN_RGET_CONTROL(struct tz1090_i2s *base)
+{
+	return ioread32(base->audio_in + _REG_ADDRESS(I2S_IN_CONTROL));
+}
+
+#endif /* TZ1090I2S_H_ */
diff --git a/sound/soc/tz1090/tz1090-pcm.c b/sound/soc/tz1090/tz1090-pcm.c
new file mode 100644
index 0000000..6d860a2
--- /dev/null
+++ b/sound/soc/tz1090/tz1090-pcm.c
@@ -0,0 +1,347 @@
+/*
+ * tz1090-pcm.c
+ *
+ * ALSA PCM interface for the TZ1090 SoC
+ *
+ * Copyright:	(C) 2010-2012 Imagination Technologies
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/img_mdc_dma.h>
+
+#include <sound/core.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "tz1090-pcm.h"
+
+#define BURST_SIZE 2
+
+static const struct snd_pcm_hardware comet_pcm_hardware = {
+	.info			= SNDRV_PCM_INFO_MMAP |
+				  SNDRV_PCM_INFO_MMAP_VALID |
+				  SNDRV_PCM_INFO_INTERLEAVED |
+				  SNDRV_PCM_INFO_PAUSE |
+				  SNDRV_PCM_INFO_RESUME,
+	.formats		= SNDRV_PCM_FMTBIT_S16_LE |
+				  SNDRV_PCM_FMTBIT_S32_LE |
+				  SNDRV_PCM_FMTBIT_S24_LE,
+
+	.channels_min		= 2,
+	.channels_max		= 6,
+	.period_bytes_min	= 32,
+	.period_bytes_max	= 8192,
+	.periods_min		= 1,
+	.periods_max		= PAGE_SIZE/sizeof(struct img_dma_mdc_list),
+	.buffer_bytes_max	= 512 * 1024,
+};
+
+/*
+ * This is called when the PCM is "prepared", formats + sample rates can be set
+ * here, it differs to hw_params as id called whilst playing/recording used to
+ * recover from underruns etc.
+ */
+static int comet_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	return 0;
+}
+
+static unsigned int rates[] = {32000, 48000, 96000};
+
+static struct snd_pcm_hw_constraint_list c_rates = {
+	.count = ARRAY_SIZE(rates),
+	.list = rates,
+	.mask = 0,
+};
+
+/*Called on stream open */
+static int comet_pcm_open(struct snd_pcm_substream *substream)
+{
+	int ret;
+	struct comet_pcm_dma_params *dma;
+	struct mdc_dma_cookie *cookie;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct device *dev = rtd->platform->dev;
+
+	/*
+	 * Setup any constraints / rules here,
+	 */
+	runtime->hw.rate_min = c_rates.list[0];
+	runtime->hw.rate_max = c_rates.list[c_rates.count - 1];
+	runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
+
+	snd_soc_set_runtime_hwparams(substream, &comet_pcm_hardware);
+
+	snd_pcm_hw_constraint_list(substream->runtime, 0,
+				   SNDRV_PCM_HW_PARAM_RATE,
+				   &c_rates);
+
+	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+
+	dma = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+	/* return if this is a bufferless transfer */
+	if (!dma)
+		goto out;
+
+	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+	if (!cookie)
+		return -ENOMEM;
+	cookie->req_channel = dma->dma_channel;
+	cookie->periph = dma->peripheral_num;
+
+	ret = snd_dmaengine_pcm_open_request_chan(substream, &mdc_dma_filter_fn,
+						  cookie);
+
+	if (ret) {
+		dev_err(dev,
+			"dmaengine pcm open failed with err %d\n", ret);
+		kfree(cookie);
+		return ret;
+	}
+
+	kfree(cookie);
+
+out:
+	return 0;
+}
+
+/*Called on stream close */
+static int comet_pcm_close(struct snd_pcm_substream *substream)
+{
+	snd_dmaengine_pcm_close(substream);
+	return 0;
+}
+
+static int comet_pcm_mmap(struct snd_pcm_substream *substream,
+	struct vm_area_struct *vma)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	return dma_mmap_writecombine(substream->pcm->card->dev, vma,
+				     runtime->dma_area,
+				     runtime->dma_addr,
+				     runtime->dma_bytes);
+}
+
+static int comet_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
+{
+	struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+	struct snd_dma_buffer *buf = &substream->dma_buffer;
+	size_t size = comet_pcm_hardware.buffer_bytes_max;
+
+	buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	buf->dev.dev = pcm->card->dev;
+	buf->private_data = NULL;
+	buf->area = dma_alloc_coherent(pcm->card->dev, size,
+					   &buf->addr, GFP_KERNEL);
+	if (!buf->area)
+		return -ENOMEM;
+	buf->bytes = size;
+	return 0;
+}
+
+static void comet_pcm_free_dma_buffers(struct snd_pcm *pcm)
+{
+	struct snd_pcm_substream *substream;
+	struct snd_dma_buffer *buf;
+	int stream;
+
+	for (stream = 0; stream < 2; stream++) {
+		substream = pcm->streams[stream].substream;
+		if (!substream)
+			continue;
+		buf = &substream->dma_buffer;
+		if (!buf->area)
+			continue;
+		dma_free_coherent(pcm->card->dev, buf->bytes,
+				      buf->area, buf->addr);
+		buf->area = NULL;
+	}
+}
+
+/*
+ * An interrupt fires up in MDC when the list is loaded so we must not
+ * execute the call back. Therefore we pass the MDC_NO_CALLBACK flag to
+ * the channel
+ */
+static int comet_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct mdc_dma_tx_control tx_control = {
+		.flags = MDC_NO_CALLBACK,
+	};
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_START:
+	{
+		struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
+		chan->private = (void *)&tx_control;
+		return snd_dmaengine_pcm_trigger(substream,
+					SNDRV_PCM_TRIGGER_START);
+	}
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		return snd_dmaengine_pcm_trigger(substream,
+						 SNDRV_PCM_TRIGGER_STOP);
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ * This is called when hw_params is setup by an app.
+ * hence buffer sizes, period and formats should be setup, so
+ * we can setup our buffers and DMA
+ */
+static int comet_pcm_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct dma_slave_config conf;
+	struct comet_pcm_dma_params *dma;
+	struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
+	int burst_size, ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct device *dev = rtd->platform->dev;
+
+	dma = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+	/* return if this is a bufferless transfer */
+	if (!dma)
+		goto out;
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+	runtime->dma_bytes = params_buffer_bytes(params);
+
+	burst_size = params_channels(params) * 4 * 2 - 1;
+
+	/* Set I/O address */
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		conf.direction = DMA_MEM_TO_DEV;
+		conf.dst_addr = dma->peripheral_address;
+		conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		conf.dst_maxburst = burst_size;
+	} else {
+		conf.direction = DMA_DEV_TO_MEM;
+		conf.src_addr = dma->peripheral_address;
+		conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		conf.src_maxburst = burst_size;
+	}
+
+	ret = dmaengine_slave_config(chan, &conf);
+
+	if (ret < 0) {
+		dev_err(dev, "dma slave config failed wit err %d\n", ret);
+		return ret;
+	}
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+out:
+	return ret;
+}
+
+/*release resources allocated in hw_params*/
+static int comet_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	snd_pcm_set_runtime_buffer(substream, NULL);
+	return 0;
+}
+
+static struct snd_pcm_ops comet_pcm_ops = {
+	.open		= comet_pcm_open,
+	.close		= comet_pcm_close,
+	.ioctl		= snd_pcm_lib_ioctl,
+	.hw_params	= comet_pcm_hw_params,
+	.hw_free	= comet_pcm_hw_free,
+	.prepare	= comet_pcm_prepare,
+	.trigger	= comet_pcm_trigger,
+	.pointer	= snd_dmaengine_pcm_pointer,
+	.mmap		= comet_pcm_mmap,
+};
+
+static u64 comet_pcm_dmamask = DMA_BIT_MASK(64);
+
+static int comet_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	struct snd_pcm *pcm = rtd->pcm;
+	int ret = 0;
+
+	if (!card->dev->dma_mask)
+		card->dev->dma_mask = &comet_pcm_dmamask;
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
+
+	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+		ret = comet_pcm_preallocate_dma_buffer(pcm,
+				SNDRV_PCM_STREAM_PLAYBACK);
+		if (ret)
+			goto out;
+	}
+
+	if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+		ret = comet_pcm_preallocate_dma_buffer(pcm,
+				SNDRV_PCM_STREAM_CAPTURE);
+		if (ret)
+			goto out;
+	}
+ out:
+	return ret;
+}
+
+struct snd_soc_platform_driver comet_soc_platform = {
+	.ops		= &comet_pcm_ops,
+	.pcm_new	= comet_soc_pcm_new,
+	.pcm_free	= comet_pcm_free_dma_buffers,
+};
+EXPORT_SYMBOL_GPL(comet_soc_platform);
+
+static int comet_soc_platform_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_platform(&pdev->dev, &comet_soc_platform);
+}
+
+static int comet_soc_platform_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver comet_pcm_driver = {
+	.driver = {
+			.name = "comet-pcm-audio",
+			.owner = THIS_MODULE,
+	},
+
+	.probe = comet_soc_platform_probe,
+	.remove = comet_soc_platform_remove,
+};
+
+static int __init snd_comet_pcm_init(void)
+{
+	return platform_driver_register(&comet_pcm_driver);
+}
+module_init(snd_comet_pcm_init);
+
+static void __exit snd_comet_pcm_exit(void)
+{
+	platform_driver_unregister(&comet_pcm_driver);
+}
+module_exit(snd_comet_pcm_exit);
+
+MODULE_AUTHOR("Imagination Technologies");
+MODULE_DESCRIPTION("Comet PCM Module");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/tz1090/tz1090-pcm.h b/sound/soc/tz1090/tz1090-pcm.h
new file mode 100644
index 0000000..a6318ff
--- /dev/null
+++ b/sound/soc/tz1090/tz1090-pcm.h
@@ -0,0 +1,22 @@
+/*
+ * tz1090-pcm.h
+ */
+
+#ifndef TZ1090PCM_H_
+#define TZ1090PCM_H_
+
+#include <linux/spinlock.h>
+#include <linux/dmaengine.h>
+#include <linux/img_mdc_dma.h>
+
+extern struct snd_soc_platform_driver comet_soc_platform;
+
+struct comet_pcm_dma_params {
+	const char *name;
+	int peripheral_num;
+	int dma_channel;
+	dma_addr_t peripheral_address;
+	bool alloced;
+};
+
+#endif /* TZ1090PCM_H_ */